Add the m5 parameter to the ns83820 device model so that we
[gem5.git] / dev / ns_gige.cc
1 /*
2 * Copyright (c) 2004 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /* @file
30 * Device module for modelling the National Semiconductor
31 * DP83820 ethernet controller. Does not support priority queueing
32 */
33 #include <cstdio>
34 #include <deque>
35 #include <string>
36
37 #include "base/inet.hh"
38 #include "cpu/exec_context.hh"
39 #include "dev/dma.hh"
40 #include "dev/etherlink.hh"
41 #include "dev/ns_gige.hh"
42 #include "dev/pciconfigall.hh"
43 #include "mem/bus/bus.hh"
44 #include "mem/bus/dma_interface.hh"
45 #include "mem/bus/pio_interface.hh"
46 #include "mem/bus/pio_interface_impl.hh"
47 #include "mem/functional_mem/memory_control.hh"
48 #include "mem/functional_mem/physical_memory.hh"
49 #include "sim/builder.hh"
50 #include "sim/debug.hh"
51 #include "sim/host.hh"
52 #include "sim/stats.hh"
53 #include "targetarch/vtophys.hh"
54
55 const char *NsRxStateStrings[] =
56 {
57 "rxIdle",
58 "rxDescRefr",
59 "rxDescRead",
60 "rxFifoBlock",
61 "rxFragWrite",
62 "rxDescWrite",
63 "rxAdvance"
64 };
65
66 const char *NsTxStateStrings[] =
67 {
68 "txIdle",
69 "txDescRefr",
70 "txDescRead",
71 "txFifoBlock",
72 "txFragRead",
73 "txDescWrite",
74 "txAdvance"
75 };
76
77 const char *NsDmaState[] =
78 {
79 "dmaIdle",
80 "dmaReading",
81 "dmaWriting",
82 "dmaReadWaiting",
83 "dmaWriteWaiting"
84 };
85
86 using namespace std;
87 using namespace Net;
88
89 ///////////////////////////////////////////////////////////////////////
90 //
91 // NSGigE PCI Device
92 //
93 NSGigE::NSGigE(Params *p)
94 : PciDev(p), ioEnable(false),
95 txFifo(p->tx_fifo_size), rxFifo(p->rx_fifo_size),
96 txPacket(0), rxPacket(0), txPacketBufPtr(NULL), rxPacketBufPtr(NULL),
97 txXferLen(0), rxXferLen(0), cycleTime(p->cycle_time),
98 txState(txIdle), txEnable(false), CTDD(false),
99 txFragPtr(0), txDescCnt(0), txDmaState(dmaIdle), rxState(rxIdle),
100 rxEnable(false), CRDD(false), rxPktBytes(0),
101 rxFragPtr(0), rxDescCnt(0), rxDmaState(dmaIdle), extstsEnable(false),
102 rxDmaReadEvent(this), rxDmaWriteEvent(this),
103 txDmaReadEvent(this), txDmaWriteEvent(this),
104 dmaDescFree(p->dma_desc_free), dmaDataFree(p->dma_data_free),
105 txDelay(p->tx_delay), rxDelay(p->rx_delay),
106 rxKickTick(0), txKickTick(0),
107 txEvent(this), rxFilterEnable(p->rx_filter), acceptBroadcast(false),
108 acceptMulticast(false), acceptUnicast(false),
109 acceptPerfect(false), acceptArp(false),
110 physmem(p->pmem), intrTick(0), cpuPendingIntr(false),
111 intrEvent(0), interface(0)
112 {
113 if (p->header_bus) {
114 pioInterface = newPioInterface(name(), p->hier,
115 p->header_bus, this,
116 &NSGigE::cacheAccess);
117
118 pioLatency = p->pio_latency * p->header_bus->clockRatio;
119
120 if (p->payload_bus)
121 dmaInterface = new DMAInterface<Bus>(name() + ".dma",
122 p->header_bus,
123 p->payload_bus, 1);
124 else
125 dmaInterface = new DMAInterface<Bus>(name() + ".dma",
126 p->header_bus,
127 p->header_bus, 1);
128 } else if (p->payload_bus) {
129 pioInterface = newPioInterface(name(), p->hier,
130 p->payload_bus, this,
131 &NSGigE::cacheAccess);
132
133 pioLatency = p->pio_latency * p->payload_bus->clockRatio;
134
135 dmaInterface = new DMAInterface<Bus>(name() + ".dma",
136 p->payload_bus,
137 p->payload_bus, 1);
138 }
139
140
141 intrDelay = p->intr_delay;
142 dmaReadDelay = p->dma_read_delay;
143 dmaWriteDelay = p->dma_write_delay;
144 dmaReadFactor = p->dma_read_factor;
145 dmaWriteFactor = p->dma_write_factor;
146
147 regsReset();
148 memcpy(&rom.perfectMatch, p->eaddr.bytes(), ETH_ADDR_LEN);
149 }
150
151 NSGigE::~NSGigE()
152 {}
153
154 void
155 NSGigE::regStats()
156 {
157 txBytes
158 .name(name() + ".txBytes")
159 .desc("Bytes Transmitted")
160 .prereq(txBytes)
161 ;
162
163 rxBytes
164 .name(name() + ".rxBytes")
165 .desc("Bytes Received")
166 .prereq(rxBytes)
167 ;
168
169 txPackets
170 .name(name() + ".txPackets")
171 .desc("Number of Packets Transmitted")
172 .prereq(txBytes)
173 ;
174
175 rxPackets
176 .name(name() + ".rxPackets")
177 .desc("Number of Packets Received")
178 .prereq(rxBytes)
179 ;
180
181 txIpChecksums
182 .name(name() + ".txIpChecksums")
183 .desc("Number of tx IP Checksums done by device")
184 .precision(0)
185 .prereq(txBytes)
186 ;
187
188 rxIpChecksums
189 .name(name() + ".rxIpChecksums")
190 .desc("Number of rx IP Checksums done by device")
191 .precision(0)
192 .prereq(rxBytes)
193 ;
194
195 txTcpChecksums
196 .name(name() + ".txTcpChecksums")
197 .desc("Number of tx TCP Checksums done by device")
198 .precision(0)
199 .prereq(txBytes)
200 ;
201
202 rxTcpChecksums
203 .name(name() + ".rxTcpChecksums")
204 .desc("Number of rx TCP Checksums done by device")
205 .precision(0)
206 .prereq(rxBytes)
207 ;
208
209 txUdpChecksums
210 .name(name() + ".txUdpChecksums")
211 .desc("Number of tx UDP Checksums done by device")
212 .precision(0)
213 .prereq(txBytes)
214 ;
215
216 rxUdpChecksums
217 .name(name() + ".rxUdpChecksums")
218 .desc("Number of rx UDP Checksums done by device")
219 .precision(0)
220 .prereq(rxBytes)
221 ;
222
223 descDmaReads
224 .name(name() + ".descDMAReads")
225 .desc("Number of descriptors the device read w/ DMA")
226 .precision(0)
227 ;
228
229 descDmaWrites
230 .name(name() + ".descDMAWrites")
231 .desc("Number of descriptors the device wrote w/ DMA")
232 .precision(0)
233 ;
234
235 descDmaRdBytes
236 .name(name() + ".descDmaReadBytes")
237 .desc("number of descriptor bytes read w/ DMA")
238 .precision(0)
239 ;
240
241 descDmaWrBytes
242 .name(name() + ".descDmaWriteBytes")
243 .desc("number of descriptor bytes write w/ DMA")
244 .precision(0)
245 ;
246
247 txBandwidth
248 .name(name() + ".txBandwidth")
249 .desc("Transmit Bandwidth (bits/s)")
250 .precision(0)
251 .prereq(txBytes)
252 ;
253
254 rxBandwidth
255 .name(name() + ".rxBandwidth")
256 .desc("Receive Bandwidth (bits/s)")
257 .precision(0)
258 .prereq(rxBytes)
259 ;
260
261 totBandwidth
262 .name(name() + ".totBandwidth")
263 .desc("Total Bandwidth (bits/s)")
264 .precision(0)
265 .prereq(totBytes)
266 ;
267
268 totPackets
269 .name(name() + ".totPackets")
270 .desc("Total Packets")
271 .precision(0)
272 .prereq(totBytes)
273 ;
274
275 totBytes
276 .name(name() + ".totBytes")
277 .desc("Total Bytes")
278 .precision(0)
279 .prereq(totBytes)
280 ;
281
282 totPacketRate
283 .name(name() + ".totPPS")
284 .desc("Total Tranmission Rate (packets/s)")
285 .precision(0)
286 .prereq(totBytes)
287 ;
288
289 txPacketRate
290 .name(name() + ".txPPS")
291 .desc("Packet Tranmission Rate (packets/s)")
292 .precision(0)
293 .prereq(txBytes)
294 ;
295
296 rxPacketRate
297 .name(name() + ".rxPPS")
298 .desc("Packet Reception Rate (packets/s)")
299 .precision(0)
300 .prereq(rxBytes)
301 ;
302
303 postedSwi
304 .name(name() + ".postedSwi")
305 .desc("number of software interrupts posted to CPU")
306 .precision(0)
307 ;
308
309 totalSwi
310 .name(name() + ".totalSwi")
311 .desc("number of total Swi written to ISR")
312 .precision(0)
313 ;
314
315 coalescedSwi
316 .name(name() + ".coalescedSwi")
317 .desc("average number of Swi's coalesced into each post")
318 .precision(0)
319 ;
320
321 postedRxIdle
322 .name(name() + ".postedRxIdle")
323 .desc("number of rxIdle interrupts posted to CPU")
324 .precision(0)
325 ;
326
327 totalRxIdle
328 .name(name() + ".totalRxIdle")
329 .desc("number of total RxIdle written to ISR")
330 .precision(0)
331 ;
332
333 coalescedRxIdle
334 .name(name() + ".coalescedRxIdle")
335 .desc("average number of RxIdle's coalesced into each post")
336 .precision(0)
337 ;
338
339 postedRxOk
340 .name(name() + ".postedRxOk")
341 .desc("number of RxOk interrupts posted to CPU")
342 .precision(0)
343 ;
344
345 totalRxOk
346 .name(name() + ".totalRxOk")
347 .desc("number of total RxOk written to ISR")
348 .precision(0)
349 ;
350
351 coalescedRxOk
352 .name(name() + ".coalescedRxOk")
353 .desc("average number of RxOk's coalesced into each post")
354 .precision(0)
355 ;
356
357 postedRxDesc
358 .name(name() + ".postedRxDesc")
359 .desc("number of RxDesc interrupts posted to CPU")
360 .precision(0)
361 ;
362
363 totalRxDesc
364 .name(name() + ".totalRxDesc")
365 .desc("number of total RxDesc written to ISR")
366 .precision(0)
367 ;
368
369 coalescedRxDesc
370 .name(name() + ".coalescedRxDesc")
371 .desc("average number of RxDesc's coalesced into each post")
372 .precision(0)
373 ;
374
375 postedTxOk
376 .name(name() + ".postedTxOk")
377 .desc("number of TxOk interrupts posted to CPU")
378 .precision(0)
379 ;
380
381 totalTxOk
382 .name(name() + ".totalTxOk")
383 .desc("number of total TxOk written to ISR")
384 .precision(0)
385 ;
386
387 coalescedTxOk
388 .name(name() + ".coalescedTxOk")
389 .desc("average number of TxOk's coalesced into each post")
390 .precision(0)
391 ;
392
393 postedTxIdle
394 .name(name() + ".postedTxIdle")
395 .desc("number of TxIdle interrupts posted to CPU")
396 .precision(0)
397 ;
398
399 totalTxIdle
400 .name(name() + ".totalTxIdle")
401 .desc("number of total TxIdle written to ISR")
402 .precision(0)
403 ;
404
405 coalescedTxIdle
406 .name(name() + ".coalescedTxIdle")
407 .desc("average number of TxIdle's coalesced into each post")
408 .precision(0)
409 ;
410
411 postedTxDesc
412 .name(name() + ".postedTxDesc")
413 .desc("number of TxDesc interrupts posted to CPU")
414 .precision(0)
415 ;
416
417 totalTxDesc
418 .name(name() + ".totalTxDesc")
419 .desc("number of total TxDesc written to ISR")
420 .precision(0)
421 ;
422
423 coalescedTxDesc
424 .name(name() + ".coalescedTxDesc")
425 .desc("average number of TxDesc's coalesced into each post")
426 .precision(0)
427 ;
428
429 postedRxOrn
430 .name(name() + ".postedRxOrn")
431 .desc("number of RxOrn posted to CPU")
432 .precision(0)
433 ;
434
435 totalRxOrn
436 .name(name() + ".totalRxOrn")
437 .desc("number of total RxOrn written to ISR")
438 .precision(0)
439 ;
440
441 coalescedRxOrn
442 .name(name() + ".coalescedRxOrn")
443 .desc("average number of RxOrn's coalesced into each post")
444 .precision(0)
445 ;
446
447 coalescedTotal
448 .name(name() + ".coalescedTotal")
449 .desc("average number of interrupts coalesced into each post")
450 .precision(0)
451 ;
452
453 postedInterrupts
454 .name(name() + ".postedInterrupts")
455 .desc("number of posts to CPU")
456 .precision(0)
457 ;
458
459 droppedPackets
460 .name(name() + ".droppedPackets")
461 .desc("number of packets dropped")
462 .precision(0)
463 ;
464
465 coalescedSwi = totalSwi / postedInterrupts;
466 coalescedRxIdle = totalRxIdle / postedInterrupts;
467 coalescedRxOk = totalRxOk / postedInterrupts;
468 coalescedRxDesc = totalRxDesc / postedInterrupts;
469 coalescedTxOk = totalTxOk / postedInterrupts;
470 coalescedTxIdle = totalTxIdle / postedInterrupts;
471 coalescedTxDesc = totalTxDesc / postedInterrupts;
472 coalescedRxOrn = totalRxOrn / postedInterrupts;
473
474 coalescedTotal = (totalSwi + totalRxIdle + totalRxOk + totalRxDesc + totalTxOk
475 + totalTxIdle + totalTxDesc + totalRxOrn) / postedInterrupts;
476
477 txBandwidth = txBytes * Stats::constant(8) / simSeconds;
478 rxBandwidth = rxBytes * Stats::constant(8) / simSeconds;
479 totBandwidth = txBandwidth + rxBandwidth;
480 totBytes = txBytes + rxBytes;
481 totPackets = txPackets + rxPackets;
482
483 txPacketRate = txPackets / simSeconds;
484 rxPacketRate = rxPackets / simSeconds;
485 }
486
487 /**
488 * This is to read the PCI general configuration registers
489 */
490 void
491 NSGigE::ReadConfig(int offset, int size, uint8_t *data)
492 {
493 if (offset < PCI_DEVICE_SPECIFIC)
494 PciDev::ReadConfig(offset, size, data);
495 else
496 panic("Device specific PCI config space not implemented!\n");
497 }
498
499 /**
500 * This is to write to the PCI general configuration registers
501 */
502 void
503 NSGigE::WriteConfig(int offset, int size, uint32_t data)
504 {
505 if (offset < PCI_DEVICE_SPECIFIC)
506 PciDev::WriteConfig(offset, size, data);
507 else
508 panic("Device specific PCI config space not implemented!\n");
509
510 // Need to catch writes to BARs to update the PIO interface
511 switch (offset) {
512 // seems to work fine without all these PCI settings, but i
513 // put in the IO to double check, an assertion will fail if we
514 // need to properly implement it
515 case PCI_COMMAND:
516 if (config.data[offset] & PCI_CMD_IOSE)
517 ioEnable = true;
518 else
519 ioEnable = false;
520
521 #if 0
522 if (config.data[offset] & PCI_CMD_BME) {
523 bmEnabled = true;
524 }
525 else {
526 bmEnabled = false;
527 }
528
529 if (config.data[offset] & PCI_CMD_MSE) {
530 memEnable = true;
531 }
532 else {
533 memEnable = false;
534 }
535 #endif
536 break;
537
538 case PCI0_BASE_ADDR0:
539 if (BARAddrs[0] != 0) {
540 if (pioInterface)
541 pioInterface->addAddrRange(RangeSize(BARAddrs[0], BARSize[0]));
542
543 BARAddrs[0] &= EV5::PAddrUncachedMask;
544 }
545 break;
546 case PCI0_BASE_ADDR1:
547 if (BARAddrs[1] != 0) {
548 if (pioInterface)
549 pioInterface->addAddrRange(RangeSize(BARAddrs[1], BARSize[1]));
550
551 BARAddrs[1] &= EV5::PAddrUncachedMask;
552 }
553 break;
554 }
555 }
556
557 /**
558 * This reads the device registers, which are detailed in the NS83820
559 * spec sheet
560 */
561 Fault
562 NSGigE::read(MemReqPtr &req, uint8_t *data)
563 {
564 assert(ioEnable);
565
566 //The mask is to give you only the offset into the device register file
567 Addr daddr = req->paddr & 0xfff;
568 DPRINTF(EthernetPIO, "read da=%#x pa=%#x va=%#x size=%d\n",
569 daddr, req->paddr, req->vaddr, req->size);
570
571
572 // there are some reserved registers, you can see ns_gige_reg.h and
573 // the spec sheet for details
574 if (daddr > LAST && daddr <= RESERVED) {
575 panic("Accessing reserved register");
576 } else if (daddr > RESERVED && daddr <= 0x3FC) {
577 ReadConfig(daddr & 0xff, req->size, data);
578 return No_Fault;
579 } else if (daddr >= MIB_START && daddr <= MIB_END) {
580 // don't implement all the MIB's. hopefully the kernel
581 // doesn't actually DEPEND upon their values
582 // MIB are just hardware stats keepers
583 uint32_t &reg = *(uint32_t *) data;
584 reg = 0;
585 return No_Fault;
586 } else if (daddr > 0x3FC)
587 panic("Something is messed up!\n");
588
589 switch (req->size) {
590 case sizeof(uint32_t):
591 {
592 uint32_t &reg = *(uint32_t *)data;
593
594 switch (daddr) {
595 case CR:
596 reg = regs.command;
597 //these are supposed to be cleared on a read
598 reg &= ~(CR_RXD | CR_TXD | CR_TXR | CR_RXR);
599 break;
600
601 case CFG:
602 reg = regs.config;
603 break;
604
605 case MEAR:
606 reg = regs.mear;
607 break;
608
609 case PTSCR:
610 reg = regs.ptscr;
611 break;
612
613 case ISR:
614 reg = regs.isr;
615 devIntrClear(ISR_ALL);
616 break;
617
618 case IMR:
619 reg = regs.imr;
620 break;
621
622 case IER:
623 reg = regs.ier;
624 break;
625
626 case IHR:
627 reg = regs.ihr;
628 break;
629
630 case TXDP:
631 reg = regs.txdp;
632 break;
633
634 case TXDP_HI:
635 reg = regs.txdp_hi;
636 break;
637
638 case TXCFG:
639 reg = regs.txcfg;
640 break;
641
642 case GPIOR:
643 reg = regs.gpior;
644 break;
645
646 case RXDP:
647 reg = regs.rxdp;
648 break;
649
650 case RXDP_HI:
651 reg = regs.rxdp_hi;
652 break;
653
654 case RXCFG:
655 reg = regs.rxcfg;
656 break;
657
658 case PQCR:
659 reg = regs.pqcr;
660 break;
661
662 case WCSR:
663 reg = regs.wcsr;
664 break;
665
666 case PCR:
667 reg = regs.pcr;
668 break;
669
670 // see the spec sheet for how RFCR and RFDR work
671 // basically, you write to RFCR to tell the machine
672 // what you want to do next, then you act upon RFDR,
673 // and the device will be prepared b/c of what you
674 // wrote to RFCR
675 case RFCR:
676 reg = regs.rfcr;
677 break;
678
679 case RFDR:
680 switch (regs.rfcr & RFCR_RFADDR) {
681 case 0x000:
682 reg = rom.perfectMatch[1];
683 reg = reg << 8;
684 reg += rom.perfectMatch[0];
685 break;
686 case 0x002:
687 reg = rom.perfectMatch[3] << 8;
688 reg += rom.perfectMatch[2];
689 break;
690 case 0x004:
691 reg = rom.perfectMatch[5] << 8;
692 reg += rom.perfectMatch[4];
693 break;
694 default:
695 panic("reading RFDR for something other than PMATCH!\n");
696 // didn't implement other RFDR functionality b/c
697 // driver didn't use it
698 }
699 break;
700
701 case SRR:
702 reg = regs.srr;
703 break;
704
705 case MIBC:
706 reg = regs.mibc;
707 reg &= ~(MIBC_MIBS | MIBC_ACLR);
708 break;
709
710 case VRCR:
711 reg = regs.vrcr;
712 break;
713
714 case VTCR:
715 reg = regs.vtcr;
716 break;
717
718 case VDR:
719 reg = regs.vdr;
720 break;
721
722 case CCSR:
723 reg = regs.ccsr;
724 break;
725
726 case TBICR:
727 reg = regs.tbicr;
728 break;
729
730 case TBISR:
731 reg = regs.tbisr;
732 break;
733
734 case TANAR:
735 reg = regs.tanar;
736 break;
737
738 case TANLPAR:
739 reg = regs.tanlpar;
740 break;
741
742 case TANER:
743 reg = regs.taner;
744 break;
745
746 case TESR:
747 reg = regs.tesr;
748 break;
749
750 case M5REG:
751 reg = params()->m5reg;
752 break;
753
754 default:
755 panic("reading unimplemented register: addr=%#x", daddr);
756 }
757
758 DPRINTF(EthernetPIO, "read from %#x: data=%d data=%#x\n",
759 daddr, reg, reg);
760 }
761 break;
762
763 default:
764 panic("accessing register with invalid size: addr=%#x, size=%d",
765 daddr, req->size);
766 }
767
768 return No_Fault;
769 }
770
771 Fault
772 NSGigE::write(MemReqPtr &req, const uint8_t *data)
773 {
774 assert(ioEnable);
775
776 Addr daddr = req->paddr & 0xfff;
777 DPRINTF(EthernetPIO, "write da=%#x pa=%#x va=%#x size=%d\n",
778 daddr, req->paddr, req->vaddr, req->size);
779
780 if (daddr > LAST && daddr <= RESERVED) {
781 panic("Accessing reserved register");
782 } else if (daddr > RESERVED && daddr <= 0x3FC) {
783 WriteConfig(daddr & 0xff, req->size, *(uint32_t *)data);
784 return No_Fault;
785 } else if (daddr > 0x3FC)
786 panic("Something is messed up!\n");
787
788 if (req->size == sizeof(uint32_t)) {
789 uint32_t reg = *(uint32_t *)data;
790 DPRINTF(EthernetPIO, "write data=%d data=%#x\n", reg, reg);
791
792 switch (daddr) {
793 case CR:
794 regs.command = reg;
795 if (reg & CR_TXD) {
796 txEnable = false;
797 } else if (reg & CR_TXE) {
798 txEnable = true;
799
800 // the kernel is enabling the transmit machine
801 if (txState == txIdle)
802 txKick();
803 }
804
805 if (reg & CR_RXD) {
806 rxEnable = false;
807 } else if (reg & CR_RXE) {
808 rxEnable = true;
809
810 if (rxState == rxIdle)
811 rxKick();
812 }
813
814 if (reg & CR_TXR)
815 txReset();
816
817 if (reg & CR_RXR)
818 rxReset();
819
820 if (reg & CR_SWI)
821 devIntrPost(ISR_SWI);
822
823 if (reg & CR_RST) {
824 txReset();
825 rxReset();
826
827 regsReset();
828 }
829 break;
830
831 case CFG:
832 if (reg & CFG_LNKSTS ||
833 reg & CFG_SPDSTS ||
834 reg & CFG_DUPSTS ||
835 reg & CFG_RESERVED ||
836 reg & CFG_T64ADDR ||
837 reg & CFG_PCI64_DET)
838 panic("writing to read-only or reserved CFG bits!\n");
839
840 regs.config |= reg & ~(CFG_LNKSTS | CFG_SPDSTS | CFG_DUPSTS |
841 CFG_RESERVED | CFG_T64ADDR | CFG_PCI64_DET);
842
843 // all these #if 0's are because i don't THINK the kernel needs to
844 // have these implemented. if there is a problem relating to one of
845 // these, you may need to add functionality in.
846 #if 0
847 if (reg & CFG_TBI_EN) ;
848 if (reg & CFG_MODE_1000) ;
849 #endif
850
851 if (reg & CFG_AUTO_1000)
852 panic("CFG_AUTO_1000 not implemented!\n");
853
854 #if 0
855 if (reg & CFG_PINT_DUPSTS ||
856 reg & CFG_PINT_LNKSTS ||
857 reg & CFG_PINT_SPDSTS)
858 ;
859
860 if (reg & CFG_TMRTEST) ;
861 if (reg & CFG_MRM_DIS) ;
862 if (reg & CFG_MWI_DIS) ;
863
864 if (reg & CFG_T64ADDR)
865 panic("CFG_T64ADDR is read only register!\n");
866
867 if (reg & CFG_PCI64_DET)
868 panic("CFG_PCI64_DET is read only register!\n");
869
870 if (reg & CFG_DATA64_EN) ;
871 if (reg & CFG_M64ADDR) ;
872 if (reg & CFG_PHY_RST) ;
873 if (reg & CFG_PHY_DIS) ;
874 #endif
875
876 if (reg & CFG_EXTSTS_EN)
877 extstsEnable = true;
878 else
879 extstsEnable = false;
880
881 #if 0
882 if (reg & CFG_REQALG) ;
883 if (reg & CFG_SB) ;
884 if (reg & CFG_POW) ;
885 if (reg & CFG_EXD) ;
886 if (reg & CFG_PESEL) ;
887 if (reg & CFG_BROM_DIS) ;
888 if (reg & CFG_EXT_125) ;
889 if (reg & CFG_BEM) ;
890 #endif
891 break;
892
893 case MEAR:
894 regs.mear = reg;
895 // since phy is completely faked, MEAR_MD* don't matter
896 // and since the driver never uses MEAR_EE*, they don't
897 // matter
898 #if 0
899 if (reg & MEAR_EEDI) ;
900 if (reg & MEAR_EEDO) ; // this one is read only
901 if (reg & MEAR_EECLK) ;
902 if (reg & MEAR_EESEL) ;
903 if (reg & MEAR_MDIO) ;
904 if (reg & MEAR_MDDIR) ;
905 if (reg & MEAR_MDC) ;
906 #endif
907 break;
908
909 case PTSCR:
910 regs.ptscr = reg & ~(PTSCR_RBIST_RDONLY);
911 // these control BISTs for various parts of chip - we
912 // don't care or do just fake that the BIST is done
913 if (reg & PTSCR_RBIST_EN)
914 regs.ptscr |= PTSCR_RBIST_DONE;
915 if (reg & PTSCR_EEBIST_EN)
916 regs.ptscr &= ~PTSCR_EEBIST_EN;
917 if (reg & PTSCR_EELOAD_EN)
918 regs.ptscr &= ~PTSCR_EELOAD_EN;
919 break;
920
921 case ISR: /* writing to the ISR has no effect */
922 panic("ISR is a read only register!\n");
923
924 case IMR:
925 regs.imr = reg;
926 devIntrChangeMask();
927 break;
928
929 case IER:
930 regs.ier = reg;
931 break;
932
933 case IHR:
934 regs.ihr = reg;
935 /* not going to implement real interrupt holdoff */
936 break;
937
938 case TXDP:
939 regs.txdp = (reg & 0xFFFFFFFC);
940 assert(txState == txIdle);
941 CTDD = false;
942 break;
943
944 case TXDP_HI:
945 regs.txdp_hi = reg;
946 break;
947
948 case TXCFG:
949 regs.txcfg = reg;
950 #if 0
951 if (reg & TXCFG_CSI) ;
952 if (reg & TXCFG_HBI) ;
953 if (reg & TXCFG_MLB) ;
954 if (reg & TXCFG_ATP) ;
955 if (reg & TXCFG_ECRETRY) {
956 /*
957 * this could easily be implemented, but considering
958 * the network is just a fake pipe, wouldn't make
959 * sense to do this
960 */
961 }
962
963 if (reg & TXCFG_BRST_DIS) ;
964 #endif
965
966 #if 0
967 /* we handle our own DMA, ignore the kernel's exhortations */
968 if (reg & TXCFG_MXDMA) ;
969 #endif
970
971 // also, we currently don't care about fill/drain
972 // thresholds though this may change in the future with
973 // more realistic networks or a driver which changes it
974 // according to feedback
975
976 break;
977
978 case GPIOR:
979 regs.gpior = reg;
980 /* these just control general purpose i/o pins, don't matter */
981 break;
982
983 case RXDP:
984 regs.rxdp = reg;
985 CRDD = false;
986 break;
987
988 case RXDP_HI:
989 regs.rxdp_hi = reg;
990 break;
991
992 case RXCFG:
993 regs.rxcfg = reg;
994 #if 0
995 if (reg & RXCFG_AEP) ;
996 if (reg & RXCFG_ARP) ;
997 if (reg & RXCFG_STRIPCRC) ;
998 if (reg & RXCFG_RX_RD) ;
999 if (reg & RXCFG_ALP) ;
1000 if (reg & RXCFG_AIRL) ;
1001
1002 /* we handle our own DMA, ignore what kernel says about it */
1003 if (reg & RXCFG_MXDMA) ;
1004
1005 //also, we currently don't care about fill/drain thresholds
1006 //though this may change in the future with more realistic
1007 //networks or a driver which changes it according to feedback
1008 if (reg & (RXCFG_DRTH | RXCFG_DRTH0)) ;
1009 #endif
1010 break;
1011
1012 case PQCR:
1013 /* there is no priority queueing used in the linux 2.6 driver */
1014 regs.pqcr = reg;
1015 break;
1016
1017 case WCSR:
1018 /* not going to implement wake on LAN */
1019 regs.wcsr = reg;
1020 break;
1021
1022 case PCR:
1023 /* not going to implement pause control */
1024 regs.pcr = reg;
1025 break;
1026
1027 case RFCR:
1028 regs.rfcr = reg;
1029
1030 rxFilterEnable = (reg & RFCR_RFEN) ? true : false;
1031 acceptBroadcast = (reg & RFCR_AAB) ? true : false;
1032 acceptMulticast = (reg & RFCR_AAM) ? true : false;
1033 acceptUnicast = (reg & RFCR_AAU) ? true : false;
1034 acceptPerfect = (reg & RFCR_APM) ? true : false;
1035 acceptArp = (reg & RFCR_AARP) ? true : false;
1036
1037 #if 0
1038 if (reg & RFCR_APAT)
1039 panic("RFCR_APAT not implemented!\n");
1040 #endif
1041
1042 if (reg & RFCR_MHEN || reg & RFCR_UHEN)
1043 panic("hash filtering not implemented!\n");
1044
1045 if (reg & RFCR_ULM)
1046 panic("RFCR_ULM not implemented!\n");
1047
1048 break;
1049
1050 case RFDR:
1051 panic("the driver never writes to RFDR, something is wrong!\n");
1052
1053 case BRAR:
1054 panic("the driver never uses BRAR, something is wrong!\n");
1055
1056 case BRDR:
1057 panic("the driver never uses BRDR, something is wrong!\n");
1058
1059 case SRR:
1060 panic("SRR is read only register!\n");
1061
1062 case MIBC:
1063 panic("the driver never uses MIBC, something is wrong!\n");
1064
1065 case VRCR:
1066 regs.vrcr = reg;
1067 break;
1068
1069 case VTCR:
1070 regs.vtcr = reg;
1071 break;
1072
1073 case VDR:
1074 panic("the driver never uses VDR, something is wrong!\n");
1075 break;
1076
1077 case CCSR:
1078 /* not going to implement clockrun stuff */
1079 regs.ccsr = reg;
1080 break;
1081
1082 case TBICR:
1083 regs.tbicr = reg;
1084 if (reg & TBICR_MR_LOOPBACK)
1085 panic("TBICR_MR_LOOPBACK never used, something wrong!\n");
1086
1087 if (reg & TBICR_MR_AN_ENABLE) {
1088 regs.tanlpar = regs.tanar;
1089 regs.tbisr |= (TBISR_MR_AN_COMPLETE | TBISR_MR_LINK_STATUS);
1090 }
1091
1092 #if 0
1093 if (reg & TBICR_MR_RESTART_AN) ;
1094 #endif
1095
1096 break;
1097
1098 case TBISR:
1099 panic("TBISR is read only register!\n");
1100
1101 case TANAR:
1102 regs.tanar = reg;
1103 if (reg & TANAR_PS2)
1104 panic("this isn't used in driver, something wrong!\n");
1105
1106 if (reg & TANAR_PS1)
1107 panic("this isn't used in driver, something wrong!\n");
1108 break;
1109
1110 case TANLPAR:
1111 panic("this should only be written to by the fake phy!\n");
1112
1113 case TANER:
1114 panic("TANER is read only register!\n");
1115
1116 case TESR:
1117 regs.tesr = reg;
1118 break;
1119
1120 default:
1121 panic("invalid register access daddr=%#x", daddr);
1122 }
1123 } else {
1124 panic("Invalid Request Size");
1125 }
1126
1127 return No_Fault;
1128 }
1129
1130 void
1131 NSGigE::devIntrPost(uint32_t interrupts)
1132 {
1133 if (interrupts & ISR_RESERVE)
1134 panic("Cannot set a reserved interrupt");
1135
1136 if (interrupts & ISR_NOIMPL)
1137 warn("interrupt not implemented %#x\n", interrupts);
1138
1139 interrupts &= ~ISR_NOIMPL;
1140 regs.isr |= interrupts;
1141
1142 if (interrupts & regs.imr) {
1143 if (interrupts & ISR_SWI) {
1144 totalSwi++;
1145 }
1146 if (interrupts & ISR_RXIDLE) {
1147 totalRxIdle++;
1148 }
1149 if (interrupts & ISR_RXOK) {
1150 totalRxOk++;
1151 }
1152 if (interrupts & ISR_RXDESC) {
1153 totalRxDesc++;
1154 }
1155 if (interrupts & ISR_TXOK) {
1156 totalTxOk++;
1157 }
1158 if (interrupts & ISR_TXIDLE) {
1159 totalTxIdle++;
1160 }
1161 if (interrupts & ISR_TXDESC) {
1162 totalTxDesc++;
1163 }
1164 if (interrupts & ISR_RXORN) {
1165 totalRxOrn++;
1166 }
1167 }
1168
1169 DPRINTF(EthernetIntr,
1170 "interrupt written to ISR: intr=%#x isr=%#x imr=%#x\n",
1171 interrupts, regs.isr, regs.imr);
1172
1173 if ((regs.isr & regs.imr)) {
1174 Tick when = curTick;
1175 if (!(regs.isr & regs.imr & ISR_NODELAY))
1176 when += intrDelay;
1177 cpuIntrPost(when);
1178 }
1179 }
1180
1181 /* writing this interrupt counting stats inside this means that this function
1182 is now limited to being used to clear all interrupts upon the kernel
1183 reading isr and servicing. just telling you in case you were thinking
1184 of expanding use.
1185 */
1186 void
1187 NSGigE::devIntrClear(uint32_t interrupts)
1188 {
1189 if (interrupts & ISR_RESERVE)
1190 panic("Cannot clear a reserved interrupt");
1191
1192 if (regs.isr & regs.imr & ISR_SWI) {
1193 postedSwi++;
1194 }
1195 if (regs.isr & regs.imr & ISR_RXIDLE) {
1196 postedRxIdle++;
1197 }
1198 if (regs.isr & regs.imr & ISR_RXOK) {
1199 postedRxOk++;
1200 }
1201 if (regs.isr & regs.imr & ISR_RXDESC) {
1202 postedRxDesc++;
1203 }
1204 if (regs.isr & regs.imr & ISR_TXOK) {
1205 postedTxOk++;
1206 }
1207 if (regs.isr & regs.imr & ISR_TXIDLE) {
1208 postedTxIdle++;
1209 }
1210 if (regs.isr & regs.imr & ISR_TXDESC) {
1211 postedTxDesc++;
1212 }
1213 if (regs.isr & regs.imr & ISR_RXORN) {
1214 postedRxOrn++;
1215 }
1216
1217 if (regs.isr & regs.imr & (ISR_SWI | ISR_RXIDLE | ISR_RXOK | ISR_RXDESC |
1218 ISR_TXOK | ISR_TXIDLE | ISR_TXDESC | ISR_RXORN) )
1219 postedInterrupts++;
1220
1221 interrupts &= ~ISR_NOIMPL;
1222 regs.isr &= ~interrupts;
1223
1224 DPRINTF(EthernetIntr,
1225 "interrupt cleared from ISR: intr=%x isr=%x imr=%x\n",
1226 interrupts, regs.isr, regs.imr);
1227
1228 if (!(regs.isr & regs.imr))
1229 cpuIntrClear();
1230 }
1231
1232 void
1233 NSGigE::devIntrChangeMask()
1234 {
1235 DPRINTF(EthernetIntr, "interrupt mask changed: isr=%x imr=%x masked=%x\n",
1236 regs.isr, regs.imr, regs.isr & regs.imr);
1237
1238 if (regs.isr & regs.imr)
1239 cpuIntrPost(curTick);
1240 else
1241 cpuIntrClear();
1242 }
1243
1244 void
1245 NSGigE::cpuIntrPost(Tick when)
1246 {
1247 // If the interrupt you want to post is later than an interrupt
1248 // already scheduled, just let it post in the coming one and don't
1249 // schedule another.
1250 // HOWEVER, must be sure that the scheduled intrTick is in the
1251 // future (this was formerly the source of a bug)
1252 /**
1253 * @todo this warning should be removed and the intrTick code should
1254 * be fixed.
1255 */
1256 assert(when >= curTick);
1257 assert(intrTick >= curTick || intrTick == 0);
1258 if (when > intrTick && intrTick != 0) {
1259 DPRINTF(EthernetIntr, "don't need to schedule event...intrTick=%d\n",
1260 intrTick);
1261 return;
1262 }
1263
1264 intrTick = when;
1265 if (intrTick < curTick) {
1266 debug_break();
1267 intrTick = curTick;
1268 }
1269
1270 DPRINTF(EthernetIntr, "going to schedule an interrupt for intrTick=%d\n",
1271 intrTick);
1272
1273 if (intrEvent)
1274 intrEvent->squash();
1275 intrEvent = new IntrEvent(this, true);
1276 intrEvent->schedule(intrTick);
1277 }
1278
1279 void
1280 NSGigE::cpuInterrupt()
1281 {
1282 assert(intrTick == curTick);
1283
1284 // Whether or not there's a pending interrupt, we don't care about
1285 // it anymore
1286 intrEvent = 0;
1287 intrTick = 0;
1288
1289 // Don't send an interrupt if there's already one
1290 if (cpuPendingIntr) {
1291 DPRINTF(EthernetIntr,
1292 "would send an interrupt now, but there's already pending\n");
1293 } else {
1294 // Send interrupt
1295 cpuPendingIntr = true;
1296
1297 DPRINTF(EthernetIntr, "posting interrupt\n");
1298 intrPost();
1299 }
1300 }
1301
1302 void
1303 NSGigE::cpuIntrClear()
1304 {
1305 if (!cpuPendingIntr)
1306 return;
1307
1308 if (intrEvent) {
1309 intrEvent->squash();
1310 intrEvent = 0;
1311 }
1312
1313 intrTick = 0;
1314
1315 cpuPendingIntr = false;
1316
1317 DPRINTF(EthernetIntr, "clearing interrupt\n");
1318 intrClear();
1319 }
1320
1321 bool
1322 NSGigE::cpuIntrPending() const
1323 { return cpuPendingIntr; }
1324
1325 void
1326 NSGigE::txReset()
1327 {
1328
1329 DPRINTF(Ethernet, "transmit reset\n");
1330
1331 CTDD = false;
1332 txEnable = false;;
1333 txFragPtr = 0;
1334 assert(txDescCnt == 0);
1335 txFifo.clear();
1336 txState = txIdle;
1337 assert(txDmaState == dmaIdle);
1338 }
1339
1340 void
1341 NSGigE::rxReset()
1342 {
1343 DPRINTF(Ethernet, "receive reset\n");
1344
1345 CRDD = false;
1346 assert(rxPktBytes == 0);
1347 rxEnable = false;
1348 rxFragPtr = 0;
1349 assert(rxDescCnt == 0);
1350 assert(rxDmaState == dmaIdle);
1351 rxFifo.clear();
1352 rxState = rxIdle;
1353 }
1354
1355 void
1356 NSGigE::regsReset()
1357 {
1358 memset(&regs, 0, sizeof(regs));
1359 regs.config = CFG_LNKSTS;
1360 regs.mear = MEAR_MDDIR | MEAR_EEDO;
1361 regs.txcfg = 0x120; // set drain threshold to 1024 bytes and
1362 // fill threshold to 32 bytes
1363 regs.rxcfg = 0x4; // set drain threshold to 16 bytes
1364 regs.srr = 0x0103; // set the silicon revision to rev B or 0x103
1365 regs.mibc = MIBC_FRZ;
1366 regs.vdr = 0x81; // set the vlan tag type to 802.1q
1367 regs.tesr = 0xc000; // TBI capable of both full and half duplex
1368
1369 extstsEnable = false;
1370 acceptBroadcast = false;
1371 acceptMulticast = false;
1372 acceptUnicast = false;
1373 acceptPerfect = false;
1374 acceptArp = false;
1375 }
1376
1377 void
1378 NSGigE::rxDmaReadCopy()
1379 {
1380 assert(rxDmaState == dmaReading);
1381
1382 physmem->dma_read((uint8_t *)rxDmaData, rxDmaAddr, rxDmaLen);
1383 rxDmaState = dmaIdle;
1384
1385 DPRINTF(EthernetDMA, "rx dma read paddr=%#x len=%d\n",
1386 rxDmaAddr, rxDmaLen);
1387 DDUMP(EthernetDMA, rxDmaData, rxDmaLen);
1388 }
1389
1390 bool
1391 NSGigE::doRxDmaRead()
1392 {
1393 assert(rxDmaState == dmaIdle || rxDmaState == dmaReadWaiting);
1394 rxDmaState = dmaReading;
1395
1396 if (dmaInterface && !rxDmaFree) {
1397 if (dmaInterface->busy())
1398 rxDmaState = dmaReadWaiting;
1399 else
1400 dmaInterface->doDMA(Read, rxDmaAddr, rxDmaLen, curTick,
1401 &rxDmaReadEvent, true);
1402 return true;
1403 }
1404
1405 if (dmaReadDelay == 0 && dmaReadFactor == 0) {
1406 rxDmaReadCopy();
1407 return false;
1408 }
1409
1410 Tick factor = ((rxDmaLen + ULL(63)) >> ULL(6)) * dmaReadFactor;
1411 Tick start = curTick + dmaReadDelay + factor;
1412 rxDmaReadEvent.schedule(start);
1413 return true;
1414 }
1415
1416 void
1417 NSGigE::rxDmaReadDone()
1418 {
1419 assert(rxDmaState == dmaReading);
1420 rxDmaReadCopy();
1421
1422 // If the transmit state machine has a pending DMA, let it go first
1423 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting)
1424 txKick();
1425
1426 rxKick();
1427 }
1428
1429 void
1430 NSGigE::rxDmaWriteCopy()
1431 {
1432 assert(rxDmaState == dmaWriting);
1433
1434 physmem->dma_write(rxDmaAddr, (uint8_t *)rxDmaData, rxDmaLen);
1435 rxDmaState = dmaIdle;
1436
1437 DPRINTF(EthernetDMA, "rx dma write paddr=%#x len=%d\n",
1438 rxDmaAddr, rxDmaLen);
1439 DDUMP(EthernetDMA, rxDmaData, rxDmaLen);
1440 }
1441
1442 bool
1443 NSGigE::doRxDmaWrite()
1444 {
1445 assert(rxDmaState == dmaIdle || rxDmaState == dmaWriteWaiting);
1446 rxDmaState = dmaWriting;
1447
1448 if (dmaInterface && !rxDmaFree) {
1449 if (dmaInterface->busy())
1450 rxDmaState = dmaWriteWaiting;
1451 else
1452 dmaInterface->doDMA(WriteInvalidate, rxDmaAddr, rxDmaLen, curTick,
1453 &rxDmaWriteEvent, true);
1454 return true;
1455 }
1456
1457 if (dmaWriteDelay == 0 && dmaWriteFactor == 0) {
1458 rxDmaWriteCopy();
1459 return false;
1460 }
1461
1462 Tick factor = ((rxDmaLen + ULL(63)) >> ULL(6)) * dmaWriteFactor;
1463 Tick start = curTick + dmaWriteDelay + factor;
1464 rxDmaWriteEvent.schedule(start);
1465 return true;
1466 }
1467
1468 void
1469 NSGigE::rxDmaWriteDone()
1470 {
1471 assert(rxDmaState == dmaWriting);
1472 rxDmaWriteCopy();
1473
1474 // If the transmit state machine has a pending DMA, let it go first
1475 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting)
1476 txKick();
1477
1478 rxKick();
1479 }
1480
1481 void
1482 NSGigE::rxKick()
1483 {
1484 DPRINTF(EthernetSM, "receive kick rxState=%s (rxBuf.size=%d)\n",
1485 NsRxStateStrings[rxState], rxFifo.size());
1486
1487 if (rxKickTick > curTick) {
1488 DPRINTF(EthernetSM, "receive kick exiting, can't run till %d\n",
1489 rxKickTick);
1490 return;
1491 }
1492
1493 next:
1494 switch(rxDmaState) {
1495 case dmaReadWaiting:
1496 if (doRxDmaRead())
1497 goto exit;
1498 break;
1499 case dmaWriteWaiting:
1500 if (doRxDmaWrite())
1501 goto exit;
1502 break;
1503 default:
1504 break;
1505 }
1506
1507 // see state machine from spec for details
1508 // the way this works is, if you finish work on one state and can
1509 // go directly to another, you do that through jumping to the
1510 // label "next". however, if you have intermediate work, like DMA
1511 // so that you can't go to the next state yet, you go to exit and
1512 // exit the loop. however, when the DMA is done it will trigger
1513 // an event and come back to this loop.
1514 switch (rxState) {
1515 case rxIdle:
1516 if (!rxEnable) {
1517 DPRINTF(EthernetSM, "Receive Disabled! Nothing to do.\n");
1518 goto exit;
1519 }
1520
1521 if (CRDD) {
1522 rxState = rxDescRefr;
1523
1524 rxDmaAddr = regs.rxdp & 0x3fffffff;
1525 rxDmaData = &rxDescCache + offsetof(ns_desc, link);
1526 rxDmaLen = sizeof(rxDescCache.link);
1527 rxDmaFree = dmaDescFree;
1528
1529 descDmaReads++;
1530 descDmaRdBytes += rxDmaLen;
1531
1532 if (doRxDmaRead())
1533 goto exit;
1534 } else {
1535 rxState = rxDescRead;
1536
1537 rxDmaAddr = regs.rxdp & 0x3fffffff;
1538 rxDmaData = &rxDescCache;
1539 rxDmaLen = sizeof(ns_desc);
1540 rxDmaFree = dmaDescFree;
1541
1542 descDmaReads++;
1543 descDmaRdBytes += rxDmaLen;
1544
1545 if (doRxDmaRead())
1546 goto exit;
1547 }
1548 break;
1549
1550 case rxDescRefr:
1551 if (rxDmaState != dmaIdle)
1552 goto exit;
1553
1554 rxState = rxAdvance;
1555 break;
1556
1557 case rxDescRead:
1558 if (rxDmaState != dmaIdle)
1559 goto exit;
1560
1561 DPRINTF(EthernetDesc,
1562 "rxDescCache: addr=%08x read descriptor\n",
1563 regs.rxdp & 0x3fffffff);
1564 DPRINTF(EthernetDesc,
1565 "rxDescCache: link=%08x bufptr=%08x cmdsts=%08x extsts=%08x\n",
1566 rxDescCache.link, rxDescCache.bufptr, rxDescCache.cmdsts,
1567 rxDescCache.extsts);
1568
1569 if (rxDescCache.cmdsts & CMDSTS_OWN) {
1570 devIntrPost(ISR_RXIDLE);
1571 rxState = rxIdle;
1572 goto exit;
1573 } else {
1574 rxState = rxFifoBlock;
1575 rxFragPtr = rxDescCache.bufptr;
1576 rxDescCnt = rxDescCache.cmdsts & CMDSTS_LEN_MASK;
1577 }
1578 break;
1579
1580 case rxFifoBlock:
1581 if (!rxPacket) {
1582 /**
1583 * @todo in reality, we should be able to start processing
1584 * the packet as it arrives, and not have to wait for the
1585 * full packet ot be in the receive fifo.
1586 */
1587 if (rxFifo.empty())
1588 goto exit;
1589
1590 DPRINTF(EthernetSM, "****processing receive of new packet****\n");
1591
1592 // If we don't have a packet, grab a new one from the fifo.
1593 rxPacket = rxFifo.front();
1594 rxPktBytes = rxPacket->length;
1595 rxPacketBufPtr = rxPacket->data;
1596
1597 #if TRACING_ON
1598 if (DTRACE(Ethernet)) {
1599 IpPtr ip(rxPacket);
1600 if (ip) {
1601 DPRINTF(Ethernet, "ID is %d\n", ip->id());
1602 TcpPtr tcp(ip);
1603 if (tcp) {
1604 DPRINTF(Ethernet,
1605 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n",
1606 tcp->sport(), tcp->dport(), tcp->seq(),
1607 tcp->ack());
1608 }
1609 }
1610 }
1611 #endif
1612
1613 // sanity check - i think the driver behaves like this
1614 assert(rxDescCnt >= rxPktBytes);
1615 rxFifo.pop();
1616 }
1617
1618
1619 // dont' need the && rxDescCnt > 0 if driver sanity check
1620 // above holds
1621 if (rxPktBytes > 0) {
1622 rxState = rxFragWrite;
1623 // don't need min<>(rxPktBytes,rxDescCnt) if above sanity
1624 // check holds
1625 rxXferLen = rxPktBytes;
1626
1627 rxDmaAddr = rxFragPtr & 0x3fffffff;
1628 rxDmaData = rxPacketBufPtr;
1629 rxDmaLen = rxXferLen;
1630 rxDmaFree = dmaDataFree;
1631
1632 if (doRxDmaWrite())
1633 goto exit;
1634
1635 } else {
1636 rxState = rxDescWrite;
1637
1638 //if (rxPktBytes == 0) { /* packet is done */
1639 assert(rxPktBytes == 0);
1640 DPRINTF(EthernetSM, "done with receiving packet\n");
1641
1642 rxDescCache.cmdsts |= CMDSTS_OWN;
1643 rxDescCache.cmdsts &= ~CMDSTS_MORE;
1644 rxDescCache.cmdsts |= CMDSTS_OK;
1645 rxDescCache.cmdsts &= 0xffff0000;
1646 rxDescCache.cmdsts += rxPacket->length; //i.e. set CMDSTS_SIZE
1647
1648 #if 0
1649 /*
1650 * all the driver uses these are for its own stats keeping
1651 * which we don't care about, aren't necessary for
1652 * functionality and doing this would just slow us down.
1653 * if they end up using this in a later version for
1654 * functional purposes, just undef
1655 */
1656 if (rxFilterEnable) {
1657 rxDescCache.cmdsts &= ~CMDSTS_DEST_MASK;
1658 const EthAddr &dst = rxFifoFront()->dst();
1659 if (dst->unicast())
1660 rxDescCache.cmdsts |= CMDSTS_DEST_SELF;
1661 if (dst->multicast())
1662 rxDescCache.cmdsts |= CMDSTS_DEST_MULTI;
1663 if (dst->broadcast())
1664 rxDescCache.cmdsts |= CMDSTS_DEST_MASK;
1665 }
1666 #endif
1667
1668 IpPtr ip(rxPacket);
1669 if (extstsEnable && ip) {
1670 rxDescCache.extsts |= EXTSTS_IPPKT;
1671 rxIpChecksums++;
1672 if (cksum(ip) != 0) {
1673 DPRINTF(EthernetCksum, "Rx IP Checksum Error\n");
1674 rxDescCache.extsts |= EXTSTS_IPERR;
1675 }
1676 TcpPtr tcp(ip);
1677 UdpPtr udp(ip);
1678 if (tcp) {
1679 rxDescCache.extsts |= EXTSTS_TCPPKT;
1680 rxTcpChecksums++;
1681 if (cksum(tcp) != 0) {
1682 DPRINTF(EthernetCksum, "Rx TCP Checksum Error\n");
1683 rxDescCache.extsts |= EXTSTS_TCPERR;
1684
1685 }
1686 } else if (udp) {
1687 rxDescCache.extsts |= EXTSTS_UDPPKT;
1688 rxUdpChecksums++;
1689 if (cksum(udp) != 0) {
1690 DPRINTF(EthernetCksum, "Rx UDP Checksum Error\n");
1691 rxDescCache.extsts |= EXTSTS_UDPERR;
1692 }
1693 }
1694 }
1695 rxPacket = 0;
1696
1697 /*
1698 * the driver seems to always receive into desc buffers
1699 * of size 1514, so you never have a pkt that is split
1700 * into multiple descriptors on the receive side, so
1701 * i don't implement that case, hence the assert above.
1702 */
1703
1704 DPRINTF(EthernetDesc,
1705 "rxDescCache: addr=%08x writeback cmdsts extsts\n",
1706 regs.rxdp & 0x3fffffff);
1707 DPRINTF(EthernetDesc,
1708 "rxDescCache: link=%08x bufptr=%08x cmdsts=%08x extsts=%08x\n",
1709 rxDescCache.link, rxDescCache.bufptr, rxDescCache.cmdsts,
1710 rxDescCache.extsts);
1711
1712 rxDmaAddr = (regs.rxdp + offsetof(ns_desc, cmdsts)) & 0x3fffffff;
1713 rxDmaData = &(rxDescCache.cmdsts);
1714 rxDmaLen = sizeof(rxDescCache.cmdsts) + sizeof(rxDescCache.extsts);
1715 rxDmaFree = dmaDescFree;
1716
1717 descDmaWrites++;
1718 descDmaWrBytes += rxDmaLen;
1719
1720 if (doRxDmaWrite())
1721 goto exit;
1722 }
1723 break;
1724
1725 case rxFragWrite:
1726 if (rxDmaState != dmaIdle)
1727 goto exit;
1728
1729 rxPacketBufPtr += rxXferLen;
1730 rxFragPtr += rxXferLen;
1731 rxPktBytes -= rxXferLen;
1732
1733 rxState = rxFifoBlock;
1734 break;
1735
1736 case rxDescWrite:
1737 if (rxDmaState != dmaIdle)
1738 goto exit;
1739
1740 assert(rxDescCache.cmdsts & CMDSTS_OWN);
1741
1742 assert(rxPacket == 0);
1743 devIntrPost(ISR_RXOK);
1744
1745 if (rxDescCache.cmdsts & CMDSTS_INTR)
1746 devIntrPost(ISR_RXDESC);
1747
1748 if (!rxEnable) {
1749 DPRINTF(EthernetSM, "Halting the RX state machine\n");
1750 rxState = rxIdle;
1751 goto exit;
1752 } else
1753 rxState = rxAdvance;
1754 break;
1755
1756 case rxAdvance:
1757 if (rxDescCache.link == 0) {
1758 devIntrPost(ISR_RXIDLE);
1759 rxState = rxIdle;
1760 CRDD = true;
1761 goto exit;
1762 } else {
1763 rxState = rxDescRead;
1764 regs.rxdp = rxDescCache.link;
1765 CRDD = false;
1766
1767 rxDmaAddr = regs.rxdp & 0x3fffffff;
1768 rxDmaData = &rxDescCache;
1769 rxDmaLen = sizeof(ns_desc);
1770 rxDmaFree = dmaDescFree;
1771
1772 if (doRxDmaRead())
1773 goto exit;
1774 }
1775 break;
1776
1777 default:
1778 panic("Invalid rxState!");
1779 }
1780
1781 DPRINTF(EthernetSM, "entering next rxState=%s\n",
1782 NsRxStateStrings[rxState]);
1783
1784 goto next;
1785
1786 exit:
1787 /**
1788 * @todo do we want to schedule a future kick?
1789 */
1790 DPRINTF(EthernetSM, "rx state machine exited rxState=%s\n",
1791 NsRxStateStrings[rxState]);
1792 }
1793
1794 void
1795 NSGigE::transmit()
1796 {
1797 if (txFifo.empty()) {
1798 DPRINTF(Ethernet, "nothing to transmit\n");
1799 return;
1800 }
1801
1802 DPRINTF(Ethernet, "Attempt Pkt Transmit: txFifo length=%d\n",
1803 txFifo.size());
1804 if (interface->sendPacket(txFifo.front())) {
1805 #if TRACING_ON
1806 if (DTRACE(Ethernet)) {
1807 IpPtr ip(txFifo.front());
1808 if (ip) {
1809 DPRINTF(Ethernet, "ID is %d\n", ip->id());
1810 TcpPtr tcp(ip);
1811 if (tcp) {
1812 DPRINTF(Ethernet,
1813 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n",
1814 tcp->sport(), tcp->dport(), tcp->seq(), tcp->ack());
1815 }
1816 }
1817 }
1818 #endif
1819
1820 DDUMP(EthernetData, txFifo.front()->data, txFifo.front()->length);
1821 txBytes += txFifo.front()->length;
1822 txPackets++;
1823
1824 DPRINTF(Ethernet, "Successful Xmit! now txFifoAvail is %d\n",
1825 txFifo.avail());
1826 txFifo.pop();
1827
1828 /*
1829 * normally do a writeback of the descriptor here, and ONLY
1830 * after that is done, send this interrupt. but since our
1831 * stuff never actually fails, just do this interrupt here,
1832 * otherwise the code has to stray from this nice format.
1833 * besides, it's functionally the same.
1834 */
1835 devIntrPost(ISR_TXOK);
1836 }
1837
1838 if (!txFifo.empty() && !txEvent.scheduled()) {
1839 DPRINTF(Ethernet, "reschedule transmit\n");
1840 txEvent.schedule(curTick + retryTime);
1841 }
1842 }
1843
1844 void
1845 NSGigE::txDmaReadCopy()
1846 {
1847 assert(txDmaState == dmaReading);
1848
1849 physmem->dma_read((uint8_t *)txDmaData, txDmaAddr, txDmaLen);
1850 txDmaState = dmaIdle;
1851
1852 DPRINTF(EthernetDMA, "tx dma read paddr=%#x len=%d\n",
1853 txDmaAddr, txDmaLen);
1854 DDUMP(EthernetDMA, txDmaData, txDmaLen);
1855 }
1856
1857 bool
1858 NSGigE::doTxDmaRead()
1859 {
1860 assert(txDmaState == dmaIdle || txDmaState == dmaReadWaiting);
1861 txDmaState = dmaReading;
1862
1863 if (dmaInterface && !txDmaFree) {
1864 if (dmaInterface->busy())
1865 txDmaState = dmaReadWaiting;
1866 else
1867 dmaInterface->doDMA(Read, txDmaAddr, txDmaLen, curTick,
1868 &txDmaReadEvent, true);
1869 return true;
1870 }
1871
1872 if (dmaReadDelay == 0 && dmaReadFactor == 0.0) {
1873 txDmaReadCopy();
1874 return false;
1875 }
1876
1877 Tick factor = ((txDmaLen + ULL(63)) >> ULL(6)) * dmaReadFactor;
1878 Tick start = curTick + dmaReadDelay + factor;
1879 txDmaReadEvent.schedule(start);
1880 return true;
1881 }
1882
1883 void
1884 NSGigE::txDmaReadDone()
1885 {
1886 assert(txDmaState == dmaReading);
1887 txDmaReadCopy();
1888
1889 // If the receive state machine has a pending DMA, let it go first
1890 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting)
1891 rxKick();
1892
1893 txKick();
1894 }
1895
1896 void
1897 NSGigE::txDmaWriteCopy()
1898 {
1899 assert(txDmaState == dmaWriting);
1900
1901 physmem->dma_write(txDmaAddr, (uint8_t *)txDmaData, txDmaLen);
1902 txDmaState = dmaIdle;
1903
1904 DPRINTF(EthernetDMA, "tx dma write paddr=%#x len=%d\n",
1905 txDmaAddr, txDmaLen);
1906 DDUMP(EthernetDMA, txDmaData, txDmaLen);
1907 }
1908
1909 bool
1910 NSGigE::doTxDmaWrite()
1911 {
1912 assert(txDmaState == dmaIdle || txDmaState == dmaWriteWaiting);
1913 txDmaState = dmaWriting;
1914
1915 if (dmaInterface && !txDmaFree) {
1916 if (dmaInterface->busy())
1917 txDmaState = dmaWriteWaiting;
1918 else
1919 dmaInterface->doDMA(WriteInvalidate, txDmaAddr, txDmaLen, curTick,
1920 &txDmaWriteEvent, true);
1921 return true;
1922 }
1923
1924 if (dmaWriteDelay == 0 && dmaWriteFactor == 0.0) {
1925 txDmaWriteCopy();
1926 return false;
1927 }
1928
1929 Tick factor = ((txDmaLen + ULL(63)) >> ULL(6)) * dmaWriteFactor;
1930 Tick start = curTick + dmaWriteDelay + factor;
1931 txDmaWriteEvent.schedule(start);
1932 return true;
1933 }
1934
1935 void
1936 NSGigE::txDmaWriteDone()
1937 {
1938 assert(txDmaState == dmaWriting);
1939 txDmaWriteCopy();
1940
1941 // If the receive state machine has a pending DMA, let it go first
1942 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting)
1943 rxKick();
1944
1945 txKick();
1946 }
1947
1948 void
1949 NSGigE::txKick()
1950 {
1951 DPRINTF(EthernetSM, "transmit kick txState=%s\n",
1952 NsTxStateStrings[txState]);
1953
1954 if (txKickTick > curTick) {
1955 DPRINTF(EthernetSM, "transmit kick exiting, can't run till %d\n",
1956 txKickTick);
1957
1958 return;
1959 }
1960
1961 next:
1962 switch(txDmaState) {
1963 case dmaReadWaiting:
1964 if (doTxDmaRead())
1965 goto exit;
1966 break;
1967 case dmaWriteWaiting:
1968 if (doTxDmaWrite())
1969 goto exit;
1970 break;
1971 default:
1972 break;
1973 }
1974
1975 switch (txState) {
1976 case txIdle:
1977 if (!txEnable) {
1978 DPRINTF(EthernetSM, "Transmit disabled. Nothing to do.\n");
1979 goto exit;
1980 }
1981
1982 if (CTDD) {
1983 txState = txDescRefr;
1984
1985 txDmaAddr = regs.txdp & 0x3fffffff;
1986 txDmaData = &txDescCache + offsetof(ns_desc, link);
1987 txDmaLen = sizeof(txDescCache.link);
1988 txDmaFree = dmaDescFree;
1989
1990 descDmaReads++;
1991 descDmaRdBytes += txDmaLen;
1992
1993 if (doTxDmaRead())
1994 goto exit;
1995
1996 } else {
1997 txState = txDescRead;
1998
1999 txDmaAddr = regs.txdp & 0x3fffffff;
2000 txDmaData = &txDescCache;
2001 txDmaLen = sizeof(ns_desc);
2002 txDmaFree = dmaDescFree;
2003
2004 descDmaReads++;
2005 descDmaRdBytes += txDmaLen;
2006
2007 if (doTxDmaRead())
2008 goto exit;
2009 }
2010 break;
2011
2012 case txDescRefr:
2013 if (txDmaState != dmaIdle)
2014 goto exit;
2015
2016 txState = txAdvance;
2017 break;
2018
2019 case txDescRead:
2020 if (txDmaState != dmaIdle)
2021 goto exit;
2022
2023 DPRINTF(EthernetDesc,
2024 "txDescCache: link=%08x bufptr=%08x cmdsts=%08x extsts=%08x\n",
2025 txDescCache.link, txDescCache.bufptr, txDescCache.cmdsts,
2026 txDescCache.extsts);
2027
2028 if (txDescCache.cmdsts & CMDSTS_OWN) {
2029 txState = txFifoBlock;
2030 txFragPtr = txDescCache.bufptr;
2031 txDescCnt = txDescCache.cmdsts & CMDSTS_LEN_MASK;
2032 } else {
2033 devIntrPost(ISR_TXIDLE);
2034 txState = txIdle;
2035 goto exit;
2036 }
2037 break;
2038
2039 case txFifoBlock:
2040 if (!txPacket) {
2041 DPRINTF(EthernetSM, "****starting the tx of a new packet****\n");
2042 txPacket = new PacketData(16384);
2043 txPacketBufPtr = txPacket->data;
2044 }
2045
2046 if (txDescCnt == 0) {
2047 DPRINTF(EthernetSM, "the txDescCnt == 0, done with descriptor\n");
2048 if (txDescCache.cmdsts & CMDSTS_MORE) {
2049 DPRINTF(EthernetSM, "there are more descriptors to come\n");
2050 txState = txDescWrite;
2051
2052 txDescCache.cmdsts &= ~CMDSTS_OWN;
2053
2054 txDmaAddr = regs.txdp + offsetof(ns_desc, cmdsts);
2055 txDmaAddr &= 0x3fffffff;
2056 txDmaData = &(txDescCache.cmdsts);
2057 txDmaLen = sizeof(txDescCache.cmdsts);
2058 txDmaFree = dmaDescFree;
2059
2060 if (doTxDmaWrite())
2061 goto exit;
2062
2063 } else { /* this packet is totally done */
2064 DPRINTF(EthernetSM, "This packet is done, let's wrap it up\n");
2065 /* deal with the the packet that just finished */
2066 if ((regs.vtcr & VTCR_PPCHK) && extstsEnable) {
2067 IpPtr ip(txPacket);
2068 if (txDescCache.extsts & EXTSTS_UDPPKT) {
2069 UdpPtr udp(ip);
2070 udp->sum(0);
2071 udp->sum(cksum(udp));
2072 txUdpChecksums++;
2073 } else if (txDescCache.extsts & EXTSTS_TCPPKT) {
2074 TcpPtr tcp(ip);
2075 tcp->sum(0);
2076 tcp->sum(cksum(tcp));
2077 txTcpChecksums++;
2078 }
2079 if (txDescCache.extsts & EXTSTS_IPPKT) {
2080 ip->sum(0);
2081 ip->sum(cksum(ip));
2082 txIpChecksums++;
2083 }
2084 }
2085
2086 txPacket->length = txPacketBufPtr - txPacket->data;
2087 // this is just because the receive can't handle a
2088 // packet bigger want to make sure
2089 assert(txPacket->length <= 1514);
2090 #ifndef NDEBUG
2091 bool success =
2092 #endif
2093 txFifo.push(txPacket);
2094 assert(success);
2095
2096 /*
2097 * this following section is not tqo spec, but
2098 * functionally shouldn't be any different. normally,
2099 * the chip will wait til the transmit has occurred
2100 * before writing back the descriptor because it has
2101 * to wait to see that it was successfully transmitted
2102 * to decide whether to set CMDSTS_OK or not.
2103 * however, in the simulator since it is always
2104 * successfully transmitted, and writing it exactly to
2105 * spec would complicate the code, we just do it here
2106 */
2107
2108 txDescCache.cmdsts &= ~CMDSTS_OWN;
2109 txDescCache.cmdsts |= CMDSTS_OK;
2110
2111 DPRINTF(EthernetDesc,
2112 "txDesc writeback: cmdsts=%08x extsts=%08x\n",
2113 txDescCache.cmdsts, txDescCache.extsts);
2114
2115 txDmaAddr = regs.txdp + offsetof(ns_desc, cmdsts);
2116 txDmaAddr &= 0x3fffffff;
2117 txDmaData = &(txDescCache.cmdsts);
2118 txDmaLen = sizeof(txDescCache.cmdsts) +
2119 sizeof(txDescCache.extsts);
2120 txDmaFree = dmaDescFree;
2121
2122 descDmaWrites++;
2123 descDmaWrBytes += txDmaLen;
2124
2125 transmit();
2126 txPacket = 0;
2127
2128 if (!txEnable) {
2129 DPRINTF(EthernetSM, "halting TX state machine\n");
2130 txState = txIdle;
2131 goto exit;
2132 } else
2133 txState = txAdvance;
2134
2135 if (doTxDmaWrite())
2136 goto exit;
2137 }
2138 } else {
2139 DPRINTF(EthernetSM, "this descriptor isn't done yet\n");
2140 if (!txFifo.full()) {
2141 txState = txFragRead;
2142
2143 /*
2144 * The number of bytes transferred is either whatever
2145 * is left in the descriptor (txDescCnt), or if there
2146 * is not enough room in the fifo, just whatever room
2147 * is left in the fifo
2148 */
2149 txXferLen = min<uint32_t>(txDescCnt, txFifo.avail());
2150
2151 txDmaAddr = txFragPtr & 0x3fffffff;
2152 txDmaData = txPacketBufPtr;
2153 txDmaLen = txXferLen;
2154 txDmaFree = dmaDataFree;
2155
2156 if (doTxDmaRead())
2157 goto exit;
2158 } else {
2159 txState = txFifoBlock;
2160 transmit();
2161
2162 goto exit;
2163 }
2164
2165 }
2166 break;
2167
2168 case txFragRead:
2169 if (txDmaState != dmaIdle)
2170 goto exit;
2171
2172 txPacketBufPtr += txXferLen;
2173 txFragPtr += txXferLen;
2174 txDescCnt -= txXferLen;
2175 txFifo.reserve(txXferLen);
2176
2177 txState = txFifoBlock;
2178 break;
2179
2180 case txDescWrite:
2181 if (txDmaState != dmaIdle)
2182 goto exit;
2183
2184 if (txDescCache.cmdsts & CMDSTS_INTR)
2185 devIntrPost(ISR_TXDESC);
2186
2187 txState = txAdvance;
2188 break;
2189
2190 case txAdvance:
2191 if (txDescCache.link == 0) {
2192 devIntrPost(ISR_TXIDLE);
2193 txState = txIdle;
2194 goto exit;
2195 } else {
2196 txState = txDescRead;
2197 regs.txdp = txDescCache.link;
2198 CTDD = false;
2199
2200 txDmaAddr = txDescCache.link & 0x3fffffff;
2201 txDmaData = &txDescCache;
2202 txDmaLen = sizeof(ns_desc);
2203 txDmaFree = dmaDescFree;
2204
2205 if (doTxDmaRead())
2206 goto exit;
2207 }
2208 break;
2209
2210 default:
2211 panic("invalid state");
2212 }
2213
2214 DPRINTF(EthernetSM, "entering next txState=%s\n",
2215 NsTxStateStrings[txState]);
2216
2217 goto next;
2218
2219 exit:
2220 /**
2221 * @todo do we want to schedule a future kick?
2222 */
2223 DPRINTF(EthernetSM, "tx state machine exited txState=%s\n",
2224 NsTxStateStrings[txState]);
2225 }
2226
2227 void
2228 NSGigE::transferDone()
2229 {
2230 if (txFifo.empty()) {
2231 DPRINTF(Ethernet, "transfer complete: txFifo empty...nothing to do\n");
2232 return;
2233 }
2234
2235 DPRINTF(Ethernet, "transfer complete: data in txFifo...schedule xmit\n");
2236
2237 if (txEvent.scheduled())
2238 txEvent.reschedule(curTick + cycles(1));
2239 else
2240 txEvent.schedule(curTick + cycles(1));
2241 }
2242
2243 bool
2244 NSGigE::rxFilter(const PacketPtr &packet)
2245 {
2246 EthPtr eth = packet;
2247 bool drop = true;
2248 string type;
2249
2250 const EthAddr &dst = eth->dst();
2251 if (dst.unicast()) {
2252 // If we're accepting all unicast addresses
2253 if (acceptUnicast)
2254 drop = false;
2255
2256 // If we make a perfect match
2257 if (acceptPerfect && dst == rom.perfectMatch)
2258 drop = false;
2259
2260 if (acceptArp && eth->type() == ETH_TYPE_ARP)
2261 drop = false;
2262
2263 } else if (dst.broadcast()) {
2264 // if we're accepting broadcasts
2265 if (acceptBroadcast)
2266 drop = false;
2267
2268 } else if (dst.multicast()) {
2269 // if we're accepting all multicasts
2270 if (acceptMulticast)
2271 drop = false;
2272
2273 }
2274
2275 if (drop) {
2276 DPRINTF(Ethernet, "rxFilter drop\n");
2277 DDUMP(EthernetData, packet->data, packet->length);
2278 }
2279
2280 return drop;
2281 }
2282
2283 bool
2284 NSGigE::recvPacket(PacketPtr packet)
2285 {
2286 rxBytes += packet->length;
2287 rxPackets++;
2288
2289 DPRINTF(Ethernet, "Receiving packet from wire, rxFifoAvail=%d\n",
2290 rxFifo.avail());
2291
2292 if (!rxEnable) {
2293 DPRINTF(Ethernet, "receive disabled...packet dropped\n");
2294 debug_break();
2295 interface->recvDone();
2296 return true;
2297 }
2298
2299 if (rxFilterEnable && rxFilter(packet)) {
2300 DPRINTF(Ethernet, "packet filtered...dropped\n");
2301 interface->recvDone();
2302 return true;
2303 }
2304
2305 if (rxFifo.avail() < packet->length) {
2306 #if TRACING_ON
2307 IpPtr ip(packet);
2308 TcpPtr tcp(ip);
2309 if (ip) {
2310 DPRINTF(Ethernet,
2311 "packet won't fit in receive buffer...pkt ID %d dropped\n",
2312 ip->id());
2313 if (tcp) {
2314 DPRINTF(Ethernet, "Seq=%d\n", tcp->seq());
2315 }
2316 }
2317 #endif
2318 droppedPackets++;
2319 devIntrPost(ISR_RXORN);
2320 return false;
2321 }
2322
2323 rxFifo.push(packet);
2324 interface->recvDone();
2325
2326 rxKick();
2327 return true;
2328 }
2329
2330 //=====================================================================
2331 //
2332 //
2333 void
2334 NSGigE::serialize(ostream &os)
2335 {
2336 // Serialize the PciDev base class
2337 PciDev::serialize(os);
2338
2339 /*
2340 * Finalize any DMA events now.
2341 */
2342 if (rxDmaReadEvent.scheduled())
2343 rxDmaReadCopy();
2344 if (rxDmaWriteEvent.scheduled())
2345 rxDmaWriteCopy();
2346 if (txDmaReadEvent.scheduled())
2347 txDmaReadCopy();
2348 if (txDmaWriteEvent.scheduled())
2349 txDmaWriteCopy();
2350
2351 /*
2352 * Serialize the device registers
2353 */
2354 SERIALIZE_SCALAR(regs.command);
2355 SERIALIZE_SCALAR(regs.config);
2356 SERIALIZE_SCALAR(regs.mear);
2357 SERIALIZE_SCALAR(regs.ptscr);
2358 SERIALIZE_SCALAR(regs.isr);
2359 SERIALIZE_SCALAR(regs.imr);
2360 SERIALIZE_SCALAR(regs.ier);
2361 SERIALIZE_SCALAR(regs.ihr);
2362 SERIALIZE_SCALAR(regs.txdp);
2363 SERIALIZE_SCALAR(regs.txdp_hi);
2364 SERIALIZE_SCALAR(regs.txcfg);
2365 SERIALIZE_SCALAR(regs.gpior);
2366 SERIALIZE_SCALAR(regs.rxdp);
2367 SERIALIZE_SCALAR(regs.rxdp_hi);
2368 SERIALIZE_SCALAR(regs.rxcfg);
2369 SERIALIZE_SCALAR(regs.pqcr);
2370 SERIALIZE_SCALAR(regs.wcsr);
2371 SERIALIZE_SCALAR(regs.pcr);
2372 SERIALIZE_SCALAR(regs.rfcr);
2373 SERIALIZE_SCALAR(regs.rfdr);
2374 SERIALIZE_SCALAR(regs.srr);
2375 SERIALIZE_SCALAR(regs.mibc);
2376 SERIALIZE_SCALAR(regs.vrcr);
2377 SERIALIZE_SCALAR(regs.vtcr);
2378 SERIALIZE_SCALAR(regs.vdr);
2379 SERIALIZE_SCALAR(regs.ccsr);
2380 SERIALIZE_SCALAR(regs.tbicr);
2381 SERIALIZE_SCALAR(regs.tbisr);
2382 SERIALIZE_SCALAR(regs.tanar);
2383 SERIALIZE_SCALAR(regs.tanlpar);
2384 SERIALIZE_SCALAR(regs.taner);
2385 SERIALIZE_SCALAR(regs.tesr);
2386
2387 SERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN);
2388
2389 SERIALIZE_SCALAR(ioEnable);
2390
2391 /*
2392 * Serialize the data Fifos
2393 */
2394 rxFifo.serialize("rxFifo", os);
2395 txFifo.serialize("txFifo", os);
2396
2397 /*
2398 * Serialize the various helper variables
2399 */
2400 bool txPacketExists = txPacket;
2401 SERIALIZE_SCALAR(txPacketExists);
2402 if (txPacketExists) {
2403 txPacket->length = txPacketBufPtr - txPacket->data;
2404 txPacket->serialize("txPacket", os);
2405 uint32_t txPktBufPtr = (uint32_t) (txPacketBufPtr - txPacket->data);
2406 SERIALIZE_SCALAR(txPktBufPtr);
2407 }
2408
2409 bool rxPacketExists = rxPacket;
2410 SERIALIZE_SCALAR(rxPacketExists);
2411 if (rxPacketExists) {
2412 rxPacket->serialize("rxPacket", os);
2413 uint32_t rxPktBufPtr = (uint32_t) (rxPacketBufPtr - rxPacket->data);
2414 SERIALIZE_SCALAR(rxPktBufPtr);
2415 }
2416
2417 SERIALIZE_SCALAR(txXferLen);
2418 SERIALIZE_SCALAR(rxXferLen);
2419
2420 /*
2421 * Serialize DescCaches
2422 */
2423 SERIALIZE_SCALAR(txDescCache.link);
2424 SERIALIZE_SCALAR(txDescCache.bufptr);
2425 SERIALIZE_SCALAR(txDescCache.cmdsts);
2426 SERIALIZE_SCALAR(txDescCache.extsts);
2427 SERIALIZE_SCALAR(rxDescCache.link);
2428 SERIALIZE_SCALAR(rxDescCache.bufptr);
2429 SERIALIZE_SCALAR(rxDescCache.cmdsts);
2430 SERIALIZE_SCALAR(rxDescCache.extsts);
2431
2432 /*
2433 * Serialize tx state machine
2434 */
2435 int txState = this->txState;
2436 SERIALIZE_SCALAR(txState);
2437 SERIALIZE_SCALAR(txEnable);
2438 SERIALIZE_SCALAR(CTDD);
2439 SERIALIZE_SCALAR(txFragPtr);
2440 SERIALIZE_SCALAR(txDescCnt);
2441 int txDmaState = this->txDmaState;
2442 SERIALIZE_SCALAR(txDmaState);
2443
2444 /*
2445 * Serialize rx state machine
2446 */
2447 int rxState = this->rxState;
2448 SERIALIZE_SCALAR(rxState);
2449 SERIALIZE_SCALAR(rxEnable);
2450 SERIALIZE_SCALAR(CRDD);
2451 SERIALIZE_SCALAR(rxPktBytes);
2452 SERIALIZE_SCALAR(rxFragPtr);
2453 SERIALIZE_SCALAR(rxDescCnt);
2454 int rxDmaState = this->rxDmaState;
2455 SERIALIZE_SCALAR(rxDmaState);
2456
2457 SERIALIZE_SCALAR(extstsEnable);
2458
2459 /*
2460 * If there's a pending transmit, store the time so we can
2461 * reschedule it later
2462 */
2463 Tick transmitTick = txEvent.scheduled() ? txEvent.when() - curTick : 0;
2464 SERIALIZE_SCALAR(transmitTick);
2465
2466 /*
2467 * receive address filter settings
2468 */
2469 SERIALIZE_SCALAR(rxFilterEnable);
2470 SERIALIZE_SCALAR(acceptBroadcast);
2471 SERIALIZE_SCALAR(acceptMulticast);
2472 SERIALIZE_SCALAR(acceptUnicast);
2473 SERIALIZE_SCALAR(acceptPerfect);
2474 SERIALIZE_SCALAR(acceptArp);
2475
2476 /*
2477 * Keep track of pending interrupt status.
2478 */
2479 SERIALIZE_SCALAR(intrTick);
2480 SERIALIZE_SCALAR(cpuPendingIntr);
2481 Tick intrEventTick = 0;
2482 if (intrEvent)
2483 intrEventTick = intrEvent->when();
2484 SERIALIZE_SCALAR(intrEventTick);
2485
2486 }
2487
2488 void
2489 NSGigE::unserialize(Checkpoint *cp, const std::string &section)
2490 {
2491 // Unserialize the PciDev base class
2492 PciDev::unserialize(cp, section);
2493
2494 UNSERIALIZE_SCALAR(regs.command);
2495 UNSERIALIZE_SCALAR(regs.config);
2496 UNSERIALIZE_SCALAR(regs.mear);
2497 UNSERIALIZE_SCALAR(regs.ptscr);
2498 UNSERIALIZE_SCALAR(regs.isr);
2499 UNSERIALIZE_SCALAR(regs.imr);
2500 UNSERIALIZE_SCALAR(regs.ier);
2501 UNSERIALIZE_SCALAR(regs.ihr);
2502 UNSERIALIZE_SCALAR(regs.txdp);
2503 UNSERIALIZE_SCALAR(regs.txdp_hi);
2504 UNSERIALIZE_SCALAR(regs.txcfg);
2505 UNSERIALIZE_SCALAR(regs.gpior);
2506 UNSERIALIZE_SCALAR(regs.rxdp);
2507 UNSERIALIZE_SCALAR(regs.rxdp_hi);
2508 UNSERIALIZE_SCALAR(regs.rxcfg);
2509 UNSERIALIZE_SCALAR(regs.pqcr);
2510 UNSERIALIZE_SCALAR(regs.wcsr);
2511 UNSERIALIZE_SCALAR(regs.pcr);
2512 UNSERIALIZE_SCALAR(regs.rfcr);
2513 UNSERIALIZE_SCALAR(regs.rfdr);
2514 UNSERIALIZE_SCALAR(regs.srr);
2515 UNSERIALIZE_SCALAR(regs.mibc);
2516 UNSERIALIZE_SCALAR(regs.vrcr);
2517 UNSERIALIZE_SCALAR(regs.vtcr);
2518 UNSERIALIZE_SCALAR(regs.vdr);
2519 UNSERIALIZE_SCALAR(regs.ccsr);
2520 UNSERIALIZE_SCALAR(regs.tbicr);
2521 UNSERIALIZE_SCALAR(regs.tbisr);
2522 UNSERIALIZE_SCALAR(regs.tanar);
2523 UNSERIALIZE_SCALAR(regs.tanlpar);
2524 UNSERIALIZE_SCALAR(regs.taner);
2525 UNSERIALIZE_SCALAR(regs.tesr);
2526
2527 UNSERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN);
2528
2529 UNSERIALIZE_SCALAR(ioEnable);
2530
2531 /*
2532 * unserialize the data fifos
2533 */
2534 rxFifo.unserialize("rxFifo", cp, section);
2535 txFifo.unserialize("txFifo", cp, section);
2536
2537 /*
2538 * unserialize the various helper variables
2539 */
2540 bool txPacketExists;
2541 UNSERIALIZE_SCALAR(txPacketExists);
2542 if (txPacketExists) {
2543 txPacket = new PacketData(16384);
2544 txPacket->unserialize("txPacket", cp, section);
2545 uint32_t txPktBufPtr;
2546 UNSERIALIZE_SCALAR(txPktBufPtr);
2547 txPacketBufPtr = (uint8_t *) txPacket->data + txPktBufPtr;
2548 } else
2549 txPacket = 0;
2550
2551 bool rxPacketExists;
2552 UNSERIALIZE_SCALAR(rxPacketExists);
2553 rxPacket = 0;
2554 if (rxPacketExists) {
2555 rxPacket = new PacketData(16384);
2556 rxPacket->unserialize("rxPacket", cp, section);
2557 uint32_t rxPktBufPtr;
2558 UNSERIALIZE_SCALAR(rxPktBufPtr);
2559 rxPacketBufPtr = (uint8_t *) rxPacket->data + rxPktBufPtr;
2560 } else
2561 rxPacket = 0;
2562
2563 UNSERIALIZE_SCALAR(txXferLen);
2564 UNSERIALIZE_SCALAR(rxXferLen);
2565
2566 /*
2567 * Unserialize DescCaches
2568 */
2569 UNSERIALIZE_SCALAR(txDescCache.link);
2570 UNSERIALIZE_SCALAR(txDescCache.bufptr);
2571 UNSERIALIZE_SCALAR(txDescCache.cmdsts);
2572 UNSERIALIZE_SCALAR(txDescCache.extsts);
2573 UNSERIALIZE_SCALAR(rxDescCache.link);
2574 UNSERIALIZE_SCALAR(rxDescCache.bufptr);
2575 UNSERIALIZE_SCALAR(rxDescCache.cmdsts);
2576 UNSERIALIZE_SCALAR(rxDescCache.extsts);
2577
2578 /*
2579 * unserialize tx state machine
2580 */
2581 int txState;
2582 UNSERIALIZE_SCALAR(txState);
2583 this->txState = (TxState) txState;
2584 UNSERIALIZE_SCALAR(txEnable);
2585 UNSERIALIZE_SCALAR(CTDD);
2586 UNSERIALIZE_SCALAR(txFragPtr);
2587 UNSERIALIZE_SCALAR(txDescCnt);
2588 int txDmaState;
2589 UNSERIALIZE_SCALAR(txDmaState);
2590 this->txDmaState = (DmaState) txDmaState;
2591
2592 /*
2593 * unserialize rx state machine
2594 */
2595 int rxState;
2596 UNSERIALIZE_SCALAR(rxState);
2597 this->rxState = (RxState) rxState;
2598 UNSERIALIZE_SCALAR(rxEnable);
2599 UNSERIALIZE_SCALAR(CRDD);
2600 UNSERIALIZE_SCALAR(rxPktBytes);
2601 UNSERIALIZE_SCALAR(rxFragPtr);
2602 UNSERIALIZE_SCALAR(rxDescCnt);
2603 int rxDmaState;
2604 UNSERIALIZE_SCALAR(rxDmaState);
2605 this->rxDmaState = (DmaState) rxDmaState;
2606
2607 UNSERIALIZE_SCALAR(extstsEnable);
2608
2609 /*
2610 * If there's a pending transmit, reschedule it now
2611 */
2612 Tick transmitTick;
2613 UNSERIALIZE_SCALAR(transmitTick);
2614 if (transmitTick)
2615 txEvent.schedule(curTick + transmitTick);
2616
2617 /*
2618 * unserialize receive address filter settings
2619 */
2620 UNSERIALIZE_SCALAR(rxFilterEnable);
2621 UNSERIALIZE_SCALAR(acceptBroadcast);
2622 UNSERIALIZE_SCALAR(acceptMulticast);
2623 UNSERIALIZE_SCALAR(acceptUnicast);
2624 UNSERIALIZE_SCALAR(acceptPerfect);
2625 UNSERIALIZE_SCALAR(acceptArp);
2626
2627 /*
2628 * Keep track of pending interrupt status.
2629 */
2630 UNSERIALIZE_SCALAR(intrTick);
2631 UNSERIALIZE_SCALAR(cpuPendingIntr);
2632 Tick intrEventTick;
2633 UNSERIALIZE_SCALAR(intrEventTick);
2634 if (intrEventTick) {
2635 intrEvent = new IntrEvent(this, true);
2636 intrEvent->schedule(intrEventTick);
2637 }
2638
2639 /*
2640 * re-add addrRanges to bus bridges
2641 */
2642 if (pioInterface) {
2643 pioInterface->addAddrRange(RangeSize(BARAddrs[0], BARSize[0]));
2644 pioInterface->addAddrRange(RangeSize(BARAddrs[1], BARSize[1]));
2645 }
2646 }
2647
2648 Tick
2649 NSGigE::cacheAccess(MemReqPtr &req)
2650 {
2651 DPRINTF(EthernetPIO, "timing access to paddr=%#x (daddr=%#x)\n",
2652 req->paddr, req->paddr - addr);
2653 return curTick + pioLatency;
2654 }
2655
2656 BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt)
2657
2658 SimObjectParam<EtherInt *> peer;
2659 SimObjectParam<NSGigE *> device;
2660
2661 END_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt)
2662
2663 BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigEInt)
2664
2665 INIT_PARAM_DFLT(peer, "peer interface", NULL),
2666 INIT_PARAM(device, "Ethernet device of this interface")
2667
2668 END_INIT_SIM_OBJECT_PARAMS(NSGigEInt)
2669
2670 CREATE_SIM_OBJECT(NSGigEInt)
2671 {
2672 NSGigEInt *dev_int = new NSGigEInt(getInstanceName(), device);
2673
2674 EtherInt *p = (EtherInt *)peer;
2675 if (p) {
2676 dev_int->setPeer(p);
2677 p->setPeer(dev_int);
2678 }
2679
2680 return dev_int;
2681 }
2682
2683 REGISTER_SIM_OBJECT("NSGigEInt", NSGigEInt)
2684
2685
2686 BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigE)
2687
2688 Param<Addr> addr;
2689 Param<Tick> cycle_time;
2690 Param<Tick> tx_delay;
2691 Param<Tick> rx_delay;
2692 Param<Tick> intr_delay;
2693 SimObjectParam<MemoryController *> mmu;
2694 SimObjectParam<PhysicalMemory *> physmem;
2695 Param<bool> rx_filter;
2696 Param<string> hardware_address;
2697 SimObjectParam<Bus*> io_bus;
2698 SimObjectParam<Bus*> payload_bus;
2699 SimObjectParam<HierParams *> hier;
2700 Param<Tick> pio_latency;
2701 Param<bool> dma_desc_free;
2702 Param<bool> dma_data_free;
2703 Param<Tick> dma_read_delay;
2704 Param<Tick> dma_write_delay;
2705 Param<Tick> dma_read_factor;
2706 Param<Tick> dma_write_factor;
2707 SimObjectParam<PciConfigAll *> configspace;
2708 SimObjectParam<PciConfigData *> configdata;
2709 SimObjectParam<Platform *> platform;
2710 Param<uint32_t> pci_bus;
2711 Param<uint32_t> pci_dev;
2712 Param<uint32_t> pci_func;
2713 Param<uint32_t> tx_fifo_size;
2714 Param<uint32_t> rx_fifo_size;
2715 Param<uint32_t> m5reg;
2716
2717 END_DECLARE_SIM_OBJECT_PARAMS(NSGigE)
2718
2719 BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigE)
2720
2721 INIT_PARAM(addr, "Device Address"),
2722 INIT_PARAM(cycle_time, "State machine processor frequency"),
2723 INIT_PARAM(tx_delay, "Transmit Delay"),
2724 INIT_PARAM(rx_delay, "Receive Delay"),
2725 INIT_PARAM(intr_delay, "Interrupt Delay in microseconds"),
2726 INIT_PARAM(mmu, "Memory Controller"),
2727 INIT_PARAM(physmem, "Physical Memory"),
2728 INIT_PARAM_DFLT(rx_filter, "Enable Receive Filter", true),
2729 INIT_PARAM_DFLT(hardware_address, "Ethernet Hardware Address",
2730 "00:99:00:00:00:01"),
2731 INIT_PARAM_DFLT(io_bus, "The IO Bus to attach to for headers", NULL),
2732 INIT_PARAM_DFLT(payload_bus, "The IO Bus to attach to for payload", NULL),
2733 INIT_PARAM_DFLT(hier, "Hierarchy global variables", &defaultHierParams),
2734 INIT_PARAM_DFLT(pio_latency, "Programmed IO latency in bus cycles", 1),
2735 INIT_PARAM_DFLT(dma_desc_free, "DMA of Descriptors is free", false),
2736 INIT_PARAM_DFLT(dma_data_free, "DMA of Data is free", false),
2737 INIT_PARAM_DFLT(dma_read_delay, "fixed delay for dma reads", 0),
2738 INIT_PARAM_DFLT(dma_write_delay, "fixed delay for dma writes", 0),
2739 INIT_PARAM_DFLT(dma_read_factor, "multiplier for dma reads", 0),
2740 INIT_PARAM_DFLT(dma_write_factor, "multiplier for dma writes", 0),
2741 INIT_PARAM(configspace, "PCI Configspace"),
2742 INIT_PARAM(configdata, "PCI Config data"),
2743 INIT_PARAM(platform, "Platform"),
2744 INIT_PARAM(pci_bus, "PCI bus"),
2745 INIT_PARAM(pci_dev, "PCI device number"),
2746 INIT_PARAM(pci_func, "PCI function code"),
2747 INIT_PARAM_DFLT(tx_fifo_size, "max size in bytes of txFifo", 131072),
2748 INIT_PARAM_DFLT(rx_fifo_size, "max size in bytes of rxFifo", 131072),
2749 INIT_PARAM(m5reg, "m5 register")
2750
2751 END_INIT_SIM_OBJECT_PARAMS(NSGigE)
2752
2753
2754 CREATE_SIM_OBJECT(NSGigE)
2755 {
2756 NSGigE::Params *params = new NSGigE::Params;
2757
2758 params->name = getInstanceName();
2759 params->mmu = mmu;
2760 params->configSpace = configspace;
2761 params->configData = configdata;
2762 params->plat = platform;
2763 params->busNum = pci_bus;
2764 params->deviceNum = pci_dev;
2765 params->functionNum = pci_func;
2766
2767 params->cycle_time = cycle_time;
2768 params->intr_delay = intr_delay;
2769 params->pmem = physmem;
2770 params->tx_delay = tx_delay;
2771 params->rx_delay = rx_delay;
2772 params->hier = hier;
2773 params->header_bus = io_bus;
2774 params->payload_bus = payload_bus;
2775 params->pio_latency = pio_latency;
2776 params->dma_desc_free = dma_desc_free;
2777 params->dma_data_free = dma_data_free;
2778 params->dma_read_delay = dma_read_delay;
2779 params->dma_write_delay = dma_write_delay;
2780 params->dma_read_factor = dma_read_factor;
2781 params->dma_write_factor = dma_write_factor;
2782 params->rx_filter = rx_filter;
2783 params->eaddr = hardware_address;
2784 params->tx_fifo_size = tx_fifo_size;
2785 params->rx_fifo_size = rx_fifo_size;
2786 params->m5reg = m5reg;
2787 return new NSGigE(params);
2788 }
2789
2790 REGISTER_SIM_OBJECT("NSGigE", NSGigE)