Merge from changes to Memtype and Bustype and Automerge
[gem5.git] / dev / ns_gige.cc
1 /*
2 * Copyright (c) 2004 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /* @file
30 * Device module for modelling the National Semiconductor
31 * DP83820 ethernet controller. Does not support priority queueing
32 */
33 #include <cstdio>
34 #include <deque>
35 #include <string>
36
37 #include "base/inet.hh"
38 #include "cpu/exec_context.hh"
39 #include "cpu/intr_control.hh"
40 #include "dev/dma.hh"
41 #include "dev/etherlink.hh"
42 #include "dev/ns_gige.hh"
43 #include "dev/pciconfigall.hh"
44 #include "mem/bus/bus.hh"
45 #include "mem/bus/dma_interface.hh"
46 #include "mem/bus/pio_interface.hh"
47 #include "mem/bus/pio_interface_impl.hh"
48 #include "mem/functional_mem/memory_control.hh"
49 #include "mem/functional_mem/physical_memory.hh"
50 #include "sim/builder.hh"
51 #include "sim/debug.hh"
52 #include "sim/host.hh"
53 #include "sim/stats.hh"
54 #include "targetarch/vtophys.hh"
55
56 const char *NsRxStateStrings[] =
57 {
58 "rxIdle",
59 "rxDescRefr",
60 "rxDescRead",
61 "rxFifoBlock",
62 "rxFragWrite",
63 "rxDescWrite",
64 "rxAdvance"
65 };
66
67 const char *NsTxStateStrings[] =
68 {
69 "txIdle",
70 "txDescRefr",
71 "txDescRead",
72 "txFifoBlock",
73 "txFragRead",
74 "txDescWrite",
75 "txAdvance"
76 };
77
78 const char *NsDmaState[] =
79 {
80 "dmaIdle",
81 "dmaReading",
82 "dmaWriting",
83 "dmaReadWaiting",
84 "dmaWriteWaiting"
85 };
86
87 using namespace std;
88 using namespace Net;
89
90 ///////////////////////////////////////////////////////////////////////
91 //
92 // NSGigE PCI Device
93 //
94 NSGigE::NSGigE(Params *p)
95 : PciDev(p), ioEnable(false),
96 txFifo(p->tx_fifo_size), rxFifo(p->rx_fifo_size),
97 txPacket(0), rxPacket(0), txPacketBufPtr(NULL), rxPacketBufPtr(NULL),
98 txXferLen(0), rxXferLen(0), txState(txIdle), txEnable(false),
99 CTDD(false),
100 txFragPtr(0), txDescCnt(0), txDmaState(dmaIdle), rxState(rxIdle),
101 rxEnable(false), CRDD(false), rxPktBytes(0),
102 rxFragPtr(0), rxDescCnt(0), rxDmaState(dmaIdle), extstsEnable(false),
103 rxDmaReadEvent(this), rxDmaWriteEvent(this),
104 txDmaReadEvent(this), txDmaWriteEvent(this),
105 dmaDescFree(p->dma_desc_free), dmaDataFree(p->dma_data_free),
106 txDelay(p->tx_delay), rxDelay(p->rx_delay),
107 rxKickTick(0), txKickTick(0),
108 txEvent(this), rxFilterEnable(p->rx_filter), acceptBroadcast(false),
109 acceptMulticast(false), acceptUnicast(false),
110 acceptPerfect(false), acceptArp(false),
111 physmem(p->pmem), intrTick(0), cpuPendingIntr(false),
112 intrEvent(0), interface(0)
113 {
114 if (p->header_bus) {
115 pioInterface = newPioInterface(name(), p->hier,
116 p->header_bus, this,
117 &NSGigE::cacheAccess);
118
119 pioLatency = p->pio_latency * p->header_bus->clockRatio;
120
121 if (p->payload_bus)
122 dmaInterface = new DMAInterface<Bus>(name() + ".dma",
123 p->header_bus,
124 p->payload_bus, 1);
125 else
126 dmaInterface = new DMAInterface<Bus>(name() + ".dma",
127 p->header_bus,
128 p->header_bus, 1);
129 } else if (p->payload_bus) {
130 pioInterface = newPioInterface(name(), p->hier,
131 p->payload_bus, this,
132 &NSGigE::cacheAccess);
133
134 pioLatency = p->pio_latency * p->payload_bus->clockRatio;
135
136 dmaInterface = new DMAInterface<Bus>(name() + ".dma",
137 p->payload_bus,
138 p->payload_bus, 1);
139 }
140
141
142 intrDelay = US2Ticks(p->intr_delay);
143 dmaReadDelay = p->dma_read_delay;
144 dmaWriteDelay = p->dma_write_delay;
145 dmaReadFactor = p->dma_read_factor;
146 dmaWriteFactor = p->dma_write_factor;
147
148 regsReset();
149 memcpy(&rom.perfectMatch, p->eaddr.bytes(), ETH_ADDR_LEN);
150 }
151
152 NSGigE::~NSGigE()
153 {}
154
155 void
156 NSGigE::regStats()
157 {
158 txBytes
159 .name(name() + ".txBytes")
160 .desc("Bytes Transmitted")
161 .prereq(txBytes)
162 ;
163
164 rxBytes
165 .name(name() + ".rxBytes")
166 .desc("Bytes Received")
167 .prereq(rxBytes)
168 ;
169
170 txPackets
171 .name(name() + ".txPackets")
172 .desc("Number of Packets Transmitted")
173 .prereq(txBytes)
174 ;
175
176 rxPackets
177 .name(name() + ".rxPackets")
178 .desc("Number of Packets Received")
179 .prereq(rxBytes)
180 ;
181
182 txIpChecksums
183 .name(name() + ".txIpChecksums")
184 .desc("Number of tx IP Checksums done by device")
185 .precision(0)
186 .prereq(txBytes)
187 ;
188
189 rxIpChecksums
190 .name(name() + ".rxIpChecksums")
191 .desc("Number of rx IP Checksums done by device")
192 .precision(0)
193 .prereq(rxBytes)
194 ;
195
196 txTcpChecksums
197 .name(name() + ".txTcpChecksums")
198 .desc("Number of tx TCP Checksums done by device")
199 .precision(0)
200 .prereq(txBytes)
201 ;
202
203 rxTcpChecksums
204 .name(name() + ".rxTcpChecksums")
205 .desc("Number of rx TCP Checksums done by device")
206 .precision(0)
207 .prereq(rxBytes)
208 ;
209
210 txUdpChecksums
211 .name(name() + ".txUdpChecksums")
212 .desc("Number of tx UDP Checksums done by device")
213 .precision(0)
214 .prereq(txBytes)
215 ;
216
217 rxUdpChecksums
218 .name(name() + ".rxUdpChecksums")
219 .desc("Number of rx UDP Checksums done by device")
220 .precision(0)
221 .prereq(rxBytes)
222 ;
223
224 descDmaReads
225 .name(name() + ".descDMAReads")
226 .desc("Number of descriptors the device read w/ DMA")
227 .precision(0)
228 ;
229
230 descDmaWrites
231 .name(name() + ".descDMAWrites")
232 .desc("Number of descriptors the device wrote w/ DMA")
233 .precision(0)
234 ;
235
236 descDmaRdBytes
237 .name(name() + ".descDmaReadBytes")
238 .desc("number of descriptor bytes read w/ DMA")
239 .precision(0)
240 ;
241
242 descDmaWrBytes
243 .name(name() + ".descDmaWriteBytes")
244 .desc("number of descriptor bytes write w/ DMA")
245 .precision(0)
246 ;
247
248
249 txBandwidth
250 .name(name() + ".txBandwidth")
251 .desc("Transmit Bandwidth (bits/s)")
252 .precision(0)
253 .prereq(txBytes)
254 ;
255
256 rxBandwidth
257 .name(name() + ".rxBandwidth")
258 .desc("Receive Bandwidth (bits/s)")
259 .precision(0)
260 .prereq(rxBytes)
261 ;
262
263 txPacketRate
264 .name(name() + ".txPPS")
265 .desc("Packet Tranmission Rate (packets/s)")
266 .precision(0)
267 .prereq(txBytes)
268 ;
269
270 rxPacketRate
271 .name(name() + ".rxPPS")
272 .desc("Packet Reception Rate (packets/s)")
273 .precision(0)
274 .prereq(rxBytes)
275 ;
276
277 postedSwi
278 .name(name() + ".postedSwi")
279 .desc("number of software interrupts posted to CPU")
280 .precision(0)
281 ;
282
283 totalSwi
284 .name(name() + ".totalSwi")
285 .desc("number of total Swi written to ISR")
286 .precision(0)
287 ;
288
289 coalescedSwi
290 .name(name() + ".coalescedSwi")
291 .desc("average number of Swi's coalesced into each post")
292 .precision(0)
293 ;
294
295 postedRxIdle
296 .name(name() + ".postedRxIdle")
297 .desc("number of rxIdle interrupts posted to CPU")
298 .precision(0)
299 ;
300
301 totalRxIdle
302 .name(name() + ".totalRxIdle")
303 .desc("number of total RxIdle written to ISR")
304 .precision(0)
305 ;
306
307 coalescedRxIdle
308 .name(name() + ".coalescedRxIdle")
309 .desc("average number of RxIdle's coalesced into each post")
310 .precision(0)
311 ;
312
313 postedRxOk
314 .name(name() + ".postedRxOk")
315 .desc("number of RxOk interrupts posted to CPU")
316 .precision(0)
317 ;
318
319 totalRxOk
320 .name(name() + ".totalRxOk")
321 .desc("number of total RxOk written to ISR")
322 .precision(0)
323 ;
324
325 coalescedRxOk
326 .name(name() + ".coalescedRxOk")
327 .desc("average number of RxOk's coalesced into each post")
328 .precision(0)
329 ;
330
331 postedRxDesc
332 .name(name() + ".postedRxDesc")
333 .desc("number of RxDesc interrupts posted to CPU")
334 .precision(0)
335 ;
336
337 totalRxDesc
338 .name(name() + ".totalRxDesc")
339 .desc("number of total RxDesc written to ISR")
340 .precision(0)
341 ;
342
343 coalescedRxDesc
344 .name(name() + ".coalescedRxDesc")
345 .desc("average number of RxDesc's coalesced into each post")
346 .precision(0)
347 ;
348
349 postedTxOk
350 .name(name() + ".postedTxOk")
351 .desc("number of TxOk interrupts posted to CPU")
352 .precision(0)
353 ;
354
355 totalTxOk
356 .name(name() + ".totalTxOk")
357 .desc("number of total TxOk written to ISR")
358 .precision(0)
359 ;
360
361 coalescedTxOk
362 .name(name() + ".coalescedTxOk")
363 .desc("average number of TxOk's coalesced into each post")
364 .precision(0)
365 ;
366
367 postedTxIdle
368 .name(name() + ".postedTxIdle")
369 .desc("number of TxIdle interrupts posted to CPU")
370 .precision(0)
371 ;
372
373 totalTxIdle
374 .name(name() + ".totalTxIdle")
375 .desc("number of total TxIdle written to ISR")
376 .precision(0)
377 ;
378
379 coalescedTxIdle
380 .name(name() + ".coalescedTxIdle")
381 .desc("average number of TxIdle's coalesced into each post")
382 .precision(0)
383 ;
384
385 postedTxDesc
386 .name(name() + ".postedTxDesc")
387 .desc("number of TxDesc interrupts posted to CPU")
388 .precision(0)
389 ;
390
391 totalTxDesc
392 .name(name() + ".totalTxDesc")
393 .desc("number of total TxDesc written to ISR")
394 .precision(0)
395 ;
396
397 coalescedTxDesc
398 .name(name() + ".coalescedTxDesc")
399 .desc("average number of TxDesc's coalesced into each post")
400 .precision(0)
401 ;
402
403 postedRxOrn
404 .name(name() + ".postedRxOrn")
405 .desc("number of RxOrn posted to CPU")
406 .precision(0)
407 ;
408
409 totalRxOrn
410 .name(name() + ".totalRxOrn")
411 .desc("number of total RxOrn written to ISR")
412 .precision(0)
413 ;
414
415 coalescedRxOrn
416 .name(name() + ".coalescedRxOrn")
417 .desc("average number of RxOrn's coalesced into each post")
418 .precision(0)
419 ;
420
421 coalescedTotal
422 .name(name() + ".coalescedTotal")
423 .desc("average number of interrupts coalesced into each post")
424 .precision(0)
425 ;
426
427 postedInterrupts
428 .name(name() + ".postedInterrupts")
429 .desc("number of posts to CPU")
430 .precision(0)
431 ;
432
433 droppedPackets
434 .name(name() + ".droppedPackets")
435 .desc("number of packets dropped")
436 .precision(0)
437 ;
438
439 coalescedSwi = totalSwi / postedInterrupts;
440 coalescedRxIdle = totalRxIdle / postedInterrupts;
441 coalescedRxOk = totalRxOk / postedInterrupts;
442 coalescedRxDesc = totalRxDesc / postedInterrupts;
443 coalescedTxOk = totalTxOk / postedInterrupts;
444 coalescedTxIdle = totalTxIdle / postedInterrupts;
445 coalescedTxDesc = totalTxDesc / postedInterrupts;
446 coalescedRxOrn = totalRxOrn / postedInterrupts;
447
448 coalescedTotal = (totalSwi + totalRxIdle + totalRxOk + totalRxDesc + totalTxOk
449 + totalTxIdle + totalTxDesc + totalRxOrn) / postedInterrupts;
450
451 txBandwidth = txBytes * Stats::constant(8) / simSeconds;
452 rxBandwidth = rxBytes * Stats::constant(8) / simSeconds;
453 txPacketRate = txPackets / simSeconds;
454 rxPacketRate = rxPackets / simSeconds;
455 }
456
457 /**
458 * This is to read the PCI general configuration registers
459 */
460 void
461 NSGigE::ReadConfig(int offset, int size, uint8_t *data)
462 {
463 if (offset < PCI_DEVICE_SPECIFIC)
464 PciDev::ReadConfig(offset, size, data);
465 else
466 panic("Device specific PCI config space not implemented!\n");
467 }
468
469 /**
470 * This is to write to the PCI general configuration registers
471 */
472 void
473 NSGigE::WriteConfig(int offset, int size, uint32_t data)
474 {
475 if (offset < PCI_DEVICE_SPECIFIC)
476 PciDev::WriteConfig(offset, size, data);
477 else
478 panic("Device specific PCI config space not implemented!\n");
479
480 // Need to catch writes to BARs to update the PIO interface
481 switch (offset) {
482 // seems to work fine without all these PCI settings, but i
483 // put in the IO to double check, an assertion will fail if we
484 // need to properly implement it
485 case PCI_COMMAND:
486 if (config.data[offset] & PCI_CMD_IOSE)
487 ioEnable = true;
488 else
489 ioEnable = false;
490
491 #if 0
492 if (config.data[offset] & PCI_CMD_BME) {
493 bmEnabled = true;
494 }
495 else {
496 bmEnabled = false;
497 }
498
499 if (config.data[offset] & PCI_CMD_MSE) {
500 memEnable = true;
501 }
502 else {
503 memEnable = false;
504 }
505 #endif
506 break;
507
508 case PCI0_BASE_ADDR0:
509 if (BARAddrs[0] != 0) {
510 if (pioInterface)
511 pioInterface->addAddrRange(RangeSize(BARAddrs[0], BARSize[0]));
512
513 BARAddrs[0] &= EV5::PAddrUncachedMask;
514 }
515 break;
516 case PCI0_BASE_ADDR1:
517 if (BARAddrs[1] != 0) {
518 if (pioInterface)
519 pioInterface->addAddrRange(RangeSize(BARAddrs[1], BARSize[1]));
520
521 BARAddrs[1] &= EV5::PAddrUncachedMask;
522 }
523 break;
524 }
525 }
526
527 /**
528 * This reads the device registers, which are detailed in the NS83820
529 * spec sheet
530 */
531 Fault
532 NSGigE::read(MemReqPtr &req, uint8_t *data)
533 {
534 assert(ioEnable);
535
536 //The mask is to give you only the offset into the device register file
537 Addr daddr = req->paddr & 0xfff;
538 DPRINTF(EthernetPIO, "read da=%#x pa=%#x va=%#x size=%d\n",
539 daddr, req->paddr, req->vaddr, req->size);
540
541
542 // there are some reserved registers, you can see ns_gige_reg.h and
543 // the spec sheet for details
544 if (daddr > LAST && daddr <= RESERVED) {
545 panic("Accessing reserved register");
546 } else if (daddr > RESERVED && daddr <= 0x3FC) {
547 ReadConfig(daddr & 0xff, req->size, data);
548 return No_Fault;
549 } else if (daddr >= MIB_START && daddr <= MIB_END) {
550 // don't implement all the MIB's. hopefully the kernel
551 // doesn't actually DEPEND upon their values
552 // MIB are just hardware stats keepers
553 uint32_t &reg = *(uint32_t *) data;
554 reg = 0;
555 return No_Fault;
556 } else if (daddr > 0x3FC)
557 panic("Something is messed up!\n");
558
559 switch (req->size) {
560 case sizeof(uint32_t):
561 {
562 uint32_t &reg = *(uint32_t *)data;
563
564 switch (daddr) {
565 case CR:
566 reg = regs.command;
567 //these are supposed to be cleared on a read
568 reg &= ~(CR_RXD | CR_TXD | CR_TXR | CR_RXR);
569 break;
570
571 case CFG:
572 reg = regs.config;
573 break;
574
575 case MEAR:
576 reg = regs.mear;
577 break;
578
579 case PTSCR:
580 reg = regs.ptscr;
581 break;
582
583 case ISR:
584 reg = regs.isr;
585 devIntrClear(ISR_ALL);
586 break;
587
588 case IMR:
589 reg = regs.imr;
590 break;
591
592 case IER:
593 reg = regs.ier;
594 break;
595
596 case IHR:
597 reg = regs.ihr;
598 break;
599
600 case TXDP:
601 reg = regs.txdp;
602 break;
603
604 case TXDP_HI:
605 reg = regs.txdp_hi;
606 break;
607
608 case TXCFG:
609 reg = regs.txcfg;
610 break;
611
612 case GPIOR:
613 reg = regs.gpior;
614 break;
615
616 case RXDP:
617 reg = regs.rxdp;
618 break;
619
620 case RXDP_HI:
621 reg = regs.rxdp_hi;
622 break;
623
624 case RXCFG:
625 reg = regs.rxcfg;
626 break;
627
628 case PQCR:
629 reg = regs.pqcr;
630 break;
631
632 case WCSR:
633 reg = regs.wcsr;
634 break;
635
636 case PCR:
637 reg = regs.pcr;
638 break;
639
640 // see the spec sheet for how RFCR and RFDR work
641 // basically, you write to RFCR to tell the machine
642 // what you want to do next, then you act upon RFDR,
643 // and the device will be prepared b/c of what you
644 // wrote to RFCR
645 case RFCR:
646 reg = regs.rfcr;
647 break;
648
649 case RFDR:
650 switch (regs.rfcr & RFCR_RFADDR) {
651 case 0x000:
652 reg = rom.perfectMatch[1];
653 reg = reg << 8;
654 reg += rom.perfectMatch[0];
655 break;
656 case 0x002:
657 reg = rom.perfectMatch[3] << 8;
658 reg += rom.perfectMatch[2];
659 break;
660 case 0x004:
661 reg = rom.perfectMatch[5] << 8;
662 reg += rom.perfectMatch[4];
663 break;
664 default:
665 panic("reading RFDR for something other than PMATCH!\n");
666 // didn't implement other RFDR functionality b/c
667 // driver didn't use it
668 }
669 break;
670
671 case SRR:
672 reg = regs.srr;
673 break;
674
675 case MIBC:
676 reg = regs.mibc;
677 reg &= ~(MIBC_MIBS | MIBC_ACLR);
678 break;
679
680 case VRCR:
681 reg = regs.vrcr;
682 break;
683
684 case VTCR:
685 reg = regs.vtcr;
686 break;
687
688 case VDR:
689 reg = regs.vdr;
690 break;
691
692 case CCSR:
693 reg = regs.ccsr;
694 break;
695
696 case TBICR:
697 reg = regs.tbicr;
698 break;
699
700 case TBISR:
701 reg = regs.tbisr;
702 break;
703
704 case TANAR:
705 reg = regs.tanar;
706 break;
707
708 case TANLPAR:
709 reg = regs.tanlpar;
710 break;
711
712 case TANER:
713 reg = regs.taner;
714 break;
715
716 case TESR:
717 reg = regs.tesr;
718 break;
719
720 default:
721 panic("reading unimplemented register: addr=%#x", daddr);
722 }
723
724 DPRINTF(EthernetPIO, "read from %#x: data=%d data=%#x\n",
725 daddr, reg, reg);
726 }
727 break;
728
729 default:
730 panic("accessing register with invalid size: addr=%#x, size=%d",
731 daddr, req->size);
732 }
733
734 return No_Fault;
735 }
736
737 Fault
738 NSGigE::write(MemReqPtr &req, const uint8_t *data)
739 {
740 assert(ioEnable);
741
742 Addr daddr = req->paddr & 0xfff;
743 DPRINTF(EthernetPIO, "write da=%#x pa=%#x va=%#x size=%d\n",
744 daddr, req->paddr, req->vaddr, req->size);
745
746 if (daddr > LAST && daddr <= RESERVED) {
747 panic("Accessing reserved register");
748 } else if (daddr > RESERVED && daddr <= 0x3FC) {
749 WriteConfig(daddr & 0xff, req->size, *(uint32_t *)data);
750 return No_Fault;
751 } else if (daddr > 0x3FC)
752 panic("Something is messed up!\n");
753
754 if (req->size == sizeof(uint32_t)) {
755 uint32_t reg = *(uint32_t *)data;
756 DPRINTF(EthernetPIO, "write data=%d data=%#x\n", reg, reg);
757
758 switch (daddr) {
759 case CR:
760 regs.command = reg;
761 if (reg & CR_TXD) {
762 txEnable = false;
763 } else if (reg & CR_TXE) {
764 txEnable = true;
765
766 // the kernel is enabling the transmit machine
767 if (txState == txIdle)
768 txKick();
769 }
770
771 if (reg & CR_RXD) {
772 rxEnable = false;
773 } else if (reg & CR_RXE) {
774 rxEnable = true;
775
776 if (rxState == rxIdle)
777 rxKick();
778 }
779
780 if (reg & CR_TXR)
781 txReset();
782
783 if (reg & CR_RXR)
784 rxReset();
785
786 if (reg & CR_SWI)
787 devIntrPost(ISR_SWI);
788
789 if (reg & CR_RST) {
790 txReset();
791 rxReset();
792
793 regsReset();
794 }
795 break;
796
797 case CFG:
798 if (reg & CFG_LNKSTS ||
799 reg & CFG_SPDSTS ||
800 reg & CFG_DUPSTS ||
801 reg & CFG_RESERVED ||
802 reg & CFG_T64ADDR ||
803 reg & CFG_PCI64_DET)
804 panic("writing to read-only or reserved CFG bits!\n");
805
806 regs.config |= reg & ~(CFG_LNKSTS | CFG_SPDSTS | CFG_DUPSTS |
807 CFG_RESERVED | CFG_T64ADDR | CFG_PCI64_DET);
808
809 // all these #if 0's are because i don't THINK the kernel needs to
810 // have these implemented. if there is a problem relating to one of
811 // these, you may need to add functionality in.
812 #if 0
813 if (reg & CFG_TBI_EN) ;
814 if (reg & CFG_MODE_1000) ;
815 #endif
816
817 if (reg & CFG_AUTO_1000)
818 panic("CFG_AUTO_1000 not implemented!\n");
819
820 #if 0
821 if (reg & CFG_PINT_DUPSTS ||
822 reg & CFG_PINT_LNKSTS ||
823 reg & CFG_PINT_SPDSTS)
824 ;
825
826 if (reg & CFG_TMRTEST) ;
827 if (reg & CFG_MRM_DIS) ;
828 if (reg & CFG_MWI_DIS) ;
829
830 if (reg & CFG_T64ADDR)
831 panic("CFG_T64ADDR is read only register!\n");
832
833 if (reg & CFG_PCI64_DET)
834 panic("CFG_PCI64_DET is read only register!\n");
835
836 if (reg & CFG_DATA64_EN) ;
837 if (reg & CFG_M64ADDR) ;
838 if (reg & CFG_PHY_RST) ;
839 if (reg & CFG_PHY_DIS) ;
840 #endif
841
842 if (reg & CFG_EXTSTS_EN)
843 extstsEnable = true;
844 else
845 extstsEnable = false;
846
847 #if 0
848 if (reg & CFG_REQALG) ;
849 if (reg & CFG_SB) ;
850 if (reg & CFG_POW) ;
851 if (reg & CFG_EXD) ;
852 if (reg & CFG_PESEL) ;
853 if (reg & CFG_BROM_DIS) ;
854 if (reg & CFG_EXT_125) ;
855 if (reg & CFG_BEM) ;
856 #endif
857 break;
858
859 case MEAR:
860 regs.mear = reg;
861 // since phy is completely faked, MEAR_MD* don't matter
862 // and since the driver never uses MEAR_EE*, they don't
863 // matter
864 #if 0
865 if (reg & MEAR_EEDI) ;
866 if (reg & MEAR_EEDO) ; // this one is read only
867 if (reg & MEAR_EECLK) ;
868 if (reg & MEAR_EESEL) ;
869 if (reg & MEAR_MDIO) ;
870 if (reg & MEAR_MDDIR) ;
871 if (reg & MEAR_MDC) ;
872 #endif
873 break;
874
875 case PTSCR:
876 regs.ptscr = reg & ~(PTSCR_RBIST_RDONLY);
877 // these control BISTs for various parts of chip - we
878 // don't care or do just fake that the BIST is done
879 if (reg & PTSCR_RBIST_EN)
880 regs.ptscr |= PTSCR_RBIST_DONE;
881 if (reg & PTSCR_EEBIST_EN)
882 regs.ptscr &= ~PTSCR_EEBIST_EN;
883 if (reg & PTSCR_EELOAD_EN)
884 regs.ptscr &= ~PTSCR_EELOAD_EN;
885 break;
886
887 case ISR: /* writing to the ISR has no effect */
888 panic("ISR is a read only register!\n");
889
890 case IMR:
891 regs.imr = reg;
892 devIntrChangeMask();
893 break;
894
895 case IER:
896 regs.ier = reg;
897 break;
898
899 case IHR:
900 regs.ihr = reg;
901 /* not going to implement real interrupt holdoff */
902 break;
903
904 case TXDP:
905 regs.txdp = (reg & 0xFFFFFFFC);
906 assert(txState == txIdle);
907 CTDD = false;
908 break;
909
910 case TXDP_HI:
911 regs.txdp_hi = reg;
912 break;
913
914 case TXCFG:
915 regs.txcfg = reg;
916 #if 0
917 if (reg & TXCFG_CSI) ;
918 if (reg & TXCFG_HBI) ;
919 if (reg & TXCFG_MLB) ;
920 if (reg & TXCFG_ATP) ;
921 if (reg & TXCFG_ECRETRY) {
922 /*
923 * this could easily be implemented, but considering
924 * the network is just a fake pipe, wouldn't make
925 * sense to do this
926 */
927 }
928
929 if (reg & TXCFG_BRST_DIS) ;
930 #endif
931
932 #if 0
933 /* we handle our own DMA, ignore the kernel's exhortations */
934 if (reg & TXCFG_MXDMA) ;
935 #endif
936
937 // also, we currently don't care about fill/drain
938 // thresholds though this may change in the future with
939 // more realistic networks or a driver which changes it
940 // according to feedback
941
942 break;
943
944 case GPIOR:
945 regs.gpior = reg;
946 /* these just control general purpose i/o pins, don't matter */
947 break;
948
949 case RXDP:
950 regs.rxdp = reg;
951 CRDD = false;
952 break;
953
954 case RXDP_HI:
955 regs.rxdp_hi = reg;
956 break;
957
958 case RXCFG:
959 regs.rxcfg = reg;
960 #if 0
961 if (reg & RXCFG_AEP) ;
962 if (reg & RXCFG_ARP) ;
963 if (reg & RXCFG_STRIPCRC) ;
964 if (reg & RXCFG_RX_RD) ;
965 if (reg & RXCFG_ALP) ;
966 if (reg & RXCFG_AIRL) ;
967
968 /* we handle our own DMA, ignore what kernel says about it */
969 if (reg & RXCFG_MXDMA) ;
970
971 //also, we currently don't care about fill/drain thresholds
972 //though this may change in the future with more realistic
973 //networks or a driver which changes it according to feedback
974 if (reg & (RXCFG_DRTH | RXCFG_DRTH0)) ;
975 #endif
976 break;
977
978 case PQCR:
979 /* there is no priority queueing used in the linux 2.6 driver */
980 regs.pqcr = reg;
981 break;
982
983 case WCSR:
984 /* not going to implement wake on LAN */
985 regs.wcsr = reg;
986 break;
987
988 case PCR:
989 /* not going to implement pause control */
990 regs.pcr = reg;
991 break;
992
993 case RFCR:
994 regs.rfcr = reg;
995
996 rxFilterEnable = (reg & RFCR_RFEN) ? true : false;
997 acceptBroadcast = (reg & RFCR_AAB) ? true : false;
998 acceptMulticast = (reg & RFCR_AAM) ? true : false;
999 acceptUnicast = (reg & RFCR_AAU) ? true : false;
1000 acceptPerfect = (reg & RFCR_APM) ? true : false;
1001 acceptArp = (reg & RFCR_AARP) ? true : false;
1002
1003 #if 0
1004 if (reg & RFCR_APAT)
1005 panic("RFCR_APAT not implemented!\n");
1006 #endif
1007
1008 if (reg & RFCR_MHEN || reg & RFCR_UHEN)
1009 panic("hash filtering not implemented!\n");
1010
1011 if (reg & RFCR_ULM)
1012 panic("RFCR_ULM not implemented!\n");
1013
1014 break;
1015
1016 case RFDR:
1017 panic("the driver never writes to RFDR, something is wrong!\n");
1018
1019 case BRAR:
1020 panic("the driver never uses BRAR, something is wrong!\n");
1021
1022 case BRDR:
1023 panic("the driver never uses BRDR, something is wrong!\n");
1024
1025 case SRR:
1026 panic("SRR is read only register!\n");
1027
1028 case MIBC:
1029 panic("the driver never uses MIBC, something is wrong!\n");
1030
1031 case VRCR:
1032 regs.vrcr = reg;
1033 break;
1034
1035 case VTCR:
1036 regs.vtcr = reg;
1037 break;
1038
1039 case VDR:
1040 panic("the driver never uses VDR, something is wrong!\n");
1041 break;
1042
1043 case CCSR:
1044 /* not going to implement clockrun stuff */
1045 regs.ccsr = reg;
1046 break;
1047
1048 case TBICR:
1049 regs.tbicr = reg;
1050 if (reg & TBICR_MR_LOOPBACK)
1051 panic("TBICR_MR_LOOPBACK never used, something wrong!\n");
1052
1053 if (reg & TBICR_MR_AN_ENABLE) {
1054 regs.tanlpar = regs.tanar;
1055 regs.tbisr |= (TBISR_MR_AN_COMPLETE | TBISR_MR_LINK_STATUS);
1056 }
1057
1058 #if 0
1059 if (reg & TBICR_MR_RESTART_AN) ;
1060 #endif
1061
1062 break;
1063
1064 case TBISR:
1065 panic("TBISR is read only register!\n");
1066
1067 case TANAR:
1068 regs.tanar = reg;
1069 if (reg & TANAR_PS2)
1070 panic("this isn't used in driver, something wrong!\n");
1071
1072 if (reg & TANAR_PS1)
1073 panic("this isn't used in driver, something wrong!\n");
1074 break;
1075
1076 case TANLPAR:
1077 panic("this should only be written to by the fake phy!\n");
1078
1079 case TANER:
1080 panic("TANER is read only register!\n");
1081
1082 case TESR:
1083 regs.tesr = reg;
1084 break;
1085
1086 default:
1087 panic("invalid register access daddr=%#x", daddr);
1088 }
1089 } else {
1090 panic("Invalid Request Size");
1091 }
1092
1093 return No_Fault;
1094 }
1095
1096 void
1097 NSGigE::devIntrPost(uint32_t interrupts)
1098 {
1099 if (interrupts & ISR_RESERVE)
1100 panic("Cannot set a reserved interrupt");
1101
1102 if (interrupts & ISR_NOIMPL)
1103 warn("interrupt not implemented %#x\n", interrupts);
1104
1105 interrupts &= ~ISR_NOIMPL;
1106 regs.isr |= interrupts;
1107
1108 if (interrupts & regs.imr) {
1109 if (interrupts & ISR_SWI) {
1110 totalSwi++;
1111 }
1112 if (interrupts & ISR_RXIDLE) {
1113 totalRxIdle++;
1114 }
1115 if (interrupts & ISR_RXOK) {
1116 totalRxOk++;
1117 }
1118 if (interrupts & ISR_RXDESC) {
1119 totalRxDesc++;
1120 }
1121 if (interrupts & ISR_TXOK) {
1122 totalTxOk++;
1123 }
1124 if (interrupts & ISR_TXIDLE) {
1125 totalTxIdle++;
1126 }
1127 if (interrupts & ISR_TXDESC) {
1128 totalTxDesc++;
1129 }
1130 if (interrupts & ISR_RXORN) {
1131 totalRxOrn++;
1132 }
1133 }
1134
1135 DPRINTF(EthernetIntr,
1136 "interrupt written to ISR: intr=%#x isr=%#x imr=%#x\n",
1137 interrupts, regs.isr, regs.imr);
1138
1139 if ((regs.isr & regs.imr)) {
1140 Tick when = curTick;
1141 if (!(regs.isr & regs.imr & ISR_NODELAY))
1142 when += intrDelay;
1143 cpuIntrPost(when);
1144 }
1145 }
1146
1147 /* writing this interrupt counting stats inside this means that this function
1148 is now limited to being used to clear all interrupts upon the kernel
1149 reading isr and servicing. just telling you in case you were thinking
1150 of expanding use.
1151 */
1152 void
1153 NSGigE::devIntrClear(uint32_t interrupts)
1154 {
1155 if (interrupts & ISR_RESERVE)
1156 panic("Cannot clear a reserved interrupt");
1157
1158 if (regs.isr & regs.imr & ISR_SWI) {
1159 postedSwi++;
1160 }
1161 if (regs.isr & regs.imr & ISR_RXIDLE) {
1162 postedRxIdle++;
1163 }
1164 if (regs.isr & regs.imr & ISR_RXOK) {
1165 postedRxOk++;
1166 }
1167 if (regs.isr & regs.imr & ISR_RXDESC) {
1168 postedRxDesc++;
1169 }
1170 if (regs.isr & regs.imr & ISR_TXOK) {
1171 postedTxOk++;
1172 }
1173 if (regs.isr & regs.imr & ISR_TXIDLE) {
1174 postedTxIdle++;
1175 }
1176 if (regs.isr & regs.imr & ISR_TXDESC) {
1177 postedTxDesc++;
1178 }
1179 if (regs.isr & regs.imr & ISR_RXORN) {
1180 postedRxOrn++;
1181 }
1182
1183 if (regs.isr & regs.imr & (ISR_SWI | ISR_RXIDLE | ISR_RXOK | ISR_RXDESC |
1184 ISR_TXOK | ISR_TXIDLE | ISR_TXDESC | ISR_RXORN) )
1185 postedInterrupts++;
1186
1187 interrupts &= ~ISR_NOIMPL;
1188 regs.isr &= ~interrupts;
1189
1190 DPRINTF(EthernetIntr,
1191 "interrupt cleared from ISR: intr=%x isr=%x imr=%x\n",
1192 interrupts, regs.isr, regs.imr);
1193
1194 if (!(regs.isr & regs.imr))
1195 cpuIntrClear();
1196 }
1197
1198 void
1199 NSGigE::devIntrChangeMask()
1200 {
1201 DPRINTF(EthernetIntr, "interrupt mask changed: isr=%x imr=%x masked=%x\n",
1202 regs.isr, regs.imr, regs.isr & regs.imr);
1203
1204 if (regs.isr & regs.imr)
1205 cpuIntrPost(curTick);
1206 else
1207 cpuIntrClear();
1208 }
1209
1210 void
1211 NSGigE::cpuIntrPost(Tick when)
1212 {
1213 // If the interrupt you want to post is later than an interrupt
1214 // already scheduled, just let it post in the coming one and don't
1215 // schedule another.
1216 // HOWEVER, must be sure that the scheduled intrTick is in the
1217 // future (this was formerly the source of a bug)
1218 /**
1219 * @todo this warning should be removed and the intrTick code should
1220 * be fixed.
1221 */
1222 assert(when >= curTick);
1223 assert(intrTick >= curTick || intrTick == 0);
1224 if (when > intrTick && intrTick != 0) {
1225 DPRINTF(EthernetIntr, "don't need to schedule event...intrTick=%d\n",
1226 intrTick);
1227 return;
1228 }
1229
1230 intrTick = when;
1231 if (intrTick < curTick) {
1232 debug_break();
1233 intrTick = curTick;
1234 }
1235
1236 DPRINTF(EthernetIntr, "going to schedule an interrupt for intrTick=%d\n",
1237 intrTick);
1238
1239 if (intrEvent)
1240 intrEvent->squash();
1241 intrEvent = new IntrEvent(this, true);
1242 intrEvent->schedule(intrTick);
1243 }
1244
1245 void
1246 NSGigE::cpuInterrupt()
1247 {
1248 assert(intrTick == curTick);
1249
1250 // Whether or not there's a pending interrupt, we don't care about
1251 // it anymore
1252 intrEvent = 0;
1253 intrTick = 0;
1254
1255 // Don't send an interrupt if there's already one
1256 if (cpuPendingIntr) {
1257 DPRINTF(EthernetIntr,
1258 "would send an interrupt now, but there's already pending\n");
1259 } else {
1260 // Send interrupt
1261 cpuPendingIntr = true;
1262
1263 DPRINTF(EthernetIntr, "posting interrupt\n");
1264 intrPost();
1265 }
1266 }
1267
1268 void
1269 NSGigE::cpuIntrClear()
1270 {
1271 if (!cpuPendingIntr)
1272 return;
1273
1274 if (intrEvent) {
1275 intrEvent->squash();
1276 intrEvent = 0;
1277 }
1278
1279 intrTick = 0;
1280
1281 cpuPendingIntr = false;
1282
1283 DPRINTF(EthernetIntr, "clearing interrupt\n");
1284 intrClear();
1285 }
1286
1287 bool
1288 NSGigE::cpuIntrPending() const
1289 { return cpuPendingIntr; }
1290
1291 void
1292 NSGigE::txReset()
1293 {
1294
1295 DPRINTF(Ethernet, "transmit reset\n");
1296
1297 CTDD = false;
1298 txEnable = false;;
1299 txFragPtr = 0;
1300 assert(txDescCnt == 0);
1301 txFifo.clear();
1302 txState = txIdle;
1303 assert(txDmaState == dmaIdle);
1304 }
1305
1306 void
1307 NSGigE::rxReset()
1308 {
1309 DPRINTF(Ethernet, "receive reset\n");
1310
1311 CRDD = false;
1312 assert(rxPktBytes == 0);
1313 rxEnable = false;
1314 rxFragPtr = 0;
1315 assert(rxDescCnt == 0);
1316 assert(rxDmaState == dmaIdle);
1317 rxFifo.clear();
1318 rxState = rxIdle;
1319 }
1320
1321 void
1322 NSGigE::regsReset()
1323 {
1324 memset(&regs, 0, sizeof(regs));
1325 regs.config = CFG_LNKSTS;
1326 regs.mear = MEAR_MDDIR | MEAR_EEDO;
1327 regs.txcfg = 0x120; // set drain threshold to 1024 bytes and
1328 // fill threshold to 32 bytes
1329 regs.rxcfg = 0x4; // set drain threshold to 16 bytes
1330 regs.srr = 0x0103; // set the silicon revision to rev B or 0x103
1331 regs.mibc = MIBC_FRZ;
1332 regs.vdr = 0x81; // set the vlan tag type to 802.1q
1333 regs.tesr = 0xc000; // TBI capable of both full and half duplex
1334
1335 extstsEnable = false;
1336 acceptBroadcast = false;
1337 acceptMulticast = false;
1338 acceptUnicast = false;
1339 acceptPerfect = false;
1340 acceptArp = false;
1341 }
1342
1343 void
1344 NSGigE::rxDmaReadCopy()
1345 {
1346 assert(rxDmaState == dmaReading);
1347
1348 physmem->dma_read((uint8_t *)rxDmaData, rxDmaAddr, rxDmaLen);
1349 rxDmaState = dmaIdle;
1350
1351 DPRINTF(EthernetDMA, "rx dma read paddr=%#x len=%d\n",
1352 rxDmaAddr, rxDmaLen);
1353 DDUMP(EthernetDMA, rxDmaData, rxDmaLen);
1354 }
1355
1356 bool
1357 NSGigE::doRxDmaRead()
1358 {
1359 assert(rxDmaState == dmaIdle || rxDmaState == dmaReadWaiting);
1360 rxDmaState = dmaReading;
1361
1362 if (dmaInterface && !rxDmaFree) {
1363 if (dmaInterface->busy())
1364 rxDmaState = dmaReadWaiting;
1365 else
1366 dmaInterface->doDMA(Read, rxDmaAddr, rxDmaLen, curTick,
1367 &rxDmaReadEvent, true);
1368 return true;
1369 }
1370
1371 if (dmaReadDelay == 0 && dmaReadFactor == 0) {
1372 rxDmaReadCopy();
1373 return false;
1374 }
1375
1376 Tick factor = ((rxDmaLen + ULL(63)) >> ULL(6)) * dmaReadFactor;
1377 Tick start = curTick + dmaReadDelay + factor;
1378 rxDmaReadEvent.schedule(start);
1379 return true;
1380 }
1381
1382 void
1383 NSGigE::rxDmaReadDone()
1384 {
1385 assert(rxDmaState == dmaReading);
1386 rxDmaReadCopy();
1387
1388 // If the transmit state machine has a pending DMA, let it go first
1389 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting)
1390 txKick();
1391
1392 rxKick();
1393 }
1394
1395 void
1396 NSGigE::rxDmaWriteCopy()
1397 {
1398 assert(rxDmaState == dmaWriting);
1399
1400 physmem->dma_write(rxDmaAddr, (uint8_t *)rxDmaData, rxDmaLen);
1401 rxDmaState = dmaIdle;
1402
1403 DPRINTF(EthernetDMA, "rx dma write paddr=%#x len=%d\n",
1404 rxDmaAddr, rxDmaLen);
1405 DDUMP(EthernetDMA, rxDmaData, rxDmaLen);
1406 }
1407
1408 bool
1409 NSGigE::doRxDmaWrite()
1410 {
1411 assert(rxDmaState == dmaIdle || rxDmaState == dmaWriteWaiting);
1412 rxDmaState = dmaWriting;
1413
1414 if (dmaInterface && !rxDmaFree) {
1415 if (dmaInterface->busy())
1416 rxDmaState = dmaWriteWaiting;
1417 else
1418 dmaInterface->doDMA(WriteInvalidate, rxDmaAddr, rxDmaLen, curTick,
1419 &rxDmaWriteEvent, true);
1420 return true;
1421 }
1422
1423 if (dmaWriteDelay == 0 && dmaWriteFactor == 0) {
1424 rxDmaWriteCopy();
1425 return false;
1426 }
1427
1428 Tick factor = ((rxDmaLen + ULL(63)) >> ULL(6)) * dmaWriteFactor;
1429 Tick start = curTick + dmaWriteDelay + factor;
1430 rxDmaWriteEvent.schedule(start);
1431 return true;
1432 }
1433
1434 void
1435 NSGigE::rxDmaWriteDone()
1436 {
1437 assert(rxDmaState == dmaWriting);
1438 rxDmaWriteCopy();
1439
1440 // If the transmit state machine has a pending DMA, let it go first
1441 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting)
1442 txKick();
1443
1444 rxKick();
1445 }
1446
1447 void
1448 NSGigE::rxKick()
1449 {
1450 DPRINTF(EthernetSM, "receive kick rxState=%s (rxBuf.size=%d)\n",
1451 NsRxStateStrings[rxState], rxFifo.size());
1452
1453 if (rxKickTick > curTick) {
1454 DPRINTF(EthernetSM, "receive kick exiting, can't run till %d\n",
1455 rxKickTick);
1456 return;
1457 }
1458
1459 next:
1460 switch(rxDmaState) {
1461 case dmaReadWaiting:
1462 if (doRxDmaRead())
1463 goto exit;
1464 break;
1465 case dmaWriteWaiting:
1466 if (doRxDmaWrite())
1467 goto exit;
1468 break;
1469 default:
1470 break;
1471 }
1472
1473 // see state machine from spec for details
1474 // the way this works is, if you finish work on one state and can
1475 // go directly to another, you do that through jumping to the
1476 // label "next". however, if you have intermediate work, like DMA
1477 // so that you can't go to the next state yet, you go to exit and
1478 // exit the loop. however, when the DMA is done it will trigger
1479 // an event and come back to this loop.
1480 switch (rxState) {
1481 case rxIdle:
1482 if (!rxEnable) {
1483 DPRINTF(EthernetSM, "Receive Disabled! Nothing to do.\n");
1484 goto exit;
1485 }
1486
1487 if (CRDD) {
1488 rxState = rxDescRefr;
1489
1490 rxDmaAddr = regs.rxdp & 0x3fffffff;
1491 rxDmaData = &rxDescCache + offsetof(ns_desc, link);
1492 rxDmaLen = sizeof(rxDescCache.link);
1493 rxDmaFree = dmaDescFree;
1494
1495 descDmaReads++;
1496 descDmaRdBytes += rxDmaLen;
1497
1498 if (doRxDmaRead())
1499 goto exit;
1500 } else {
1501 rxState = rxDescRead;
1502
1503 rxDmaAddr = regs.rxdp & 0x3fffffff;
1504 rxDmaData = &rxDescCache;
1505 rxDmaLen = sizeof(ns_desc);
1506 rxDmaFree = dmaDescFree;
1507
1508 descDmaReads++;
1509 descDmaRdBytes += rxDmaLen;
1510
1511 if (doRxDmaRead())
1512 goto exit;
1513 }
1514 break;
1515
1516 case rxDescRefr:
1517 if (rxDmaState != dmaIdle)
1518 goto exit;
1519
1520 rxState = rxAdvance;
1521 break;
1522
1523 case rxDescRead:
1524 if (rxDmaState != dmaIdle)
1525 goto exit;
1526
1527 DPRINTF(EthernetDesc,
1528 "rxDescCache: addr=%08x read descriptor\n",
1529 regs.rxdp & 0x3fffffff);
1530 DPRINTF(EthernetDesc,
1531 "rxDescCache: link=%08x bufptr=%08x cmdsts=%08x extsts=%08x\n",
1532 rxDescCache.link, rxDescCache.bufptr, rxDescCache.cmdsts,
1533 rxDescCache.extsts);
1534
1535 if (rxDescCache.cmdsts & CMDSTS_OWN) {
1536 devIntrPost(ISR_RXIDLE);
1537 rxState = rxIdle;
1538 goto exit;
1539 } else {
1540 rxState = rxFifoBlock;
1541 rxFragPtr = rxDescCache.bufptr;
1542 rxDescCnt = rxDescCache.cmdsts & CMDSTS_LEN_MASK;
1543 }
1544 break;
1545
1546 case rxFifoBlock:
1547 if (!rxPacket) {
1548 /**
1549 * @todo in reality, we should be able to start processing
1550 * the packet as it arrives, and not have to wait for the
1551 * full packet ot be in the receive fifo.
1552 */
1553 if (rxFifo.empty())
1554 goto exit;
1555
1556 DPRINTF(EthernetSM, "****processing receive of new packet****\n");
1557
1558 // If we don't have a packet, grab a new one from the fifo.
1559 rxPacket = rxFifo.front();
1560 rxPktBytes = rxPacket->length;
1561 rxPacketBufPtr = rxPacket->data;
1562
1563 #if TRACING_ON
1564 if (DTRACE(Ethernet)) {
1565 IpPtr ip(rxPacket);
1566 if (ip) {
1567 DPRINTF(Ethernet, "ID is %d\n", ip->id());
1568 TcpPtr tcp(ip);
1569 if (tcp) {
1570 DPRINTF(Ethernet, "Src Port=%d, Dest Port=%d\n",
1571 tcp->sport(), tcp->dport());
1572 }
1573 }
1574 }
1575 #endif
1576
1577 // sanity check - i think the driver behaves like this
1578 assert(rxDescCnt >= rxPktBytes);
1579 rxFifo.pop();
1580 }
1581
1582
1583 // dont' need the && rxDescCnt > 0 if driver sanity check
1584 // above holds
1585 if (rxPktBytes > 0) {
1586 rxState = rxFragWrite;
1587 // don't need min<>(rxPktBytes,rxDescCnt) if above sanity
1588 // check holds
1589 rxXferLen = rxPktBytes;
1590
1591 rxDmaAddr = rxFragPtr & 0x3fffffff;
1592 rxDmaData = rxPacketBufPtr;
1593 rxDmaLen = rxXferLen;
1594 rxDmaFree = dmaDataFree;
1595
1596 if (doRxDmaWrite())
1597 goto exit;
1598
1599 } else {
1600 rxState = rxDescWrite;
1601
1602 //if (rxPktBytes == 0) { /* packet is done */
1603 assert(rxPktBytes == 0);
1604 DPRINTF(EthernetSM, "done with receiving packet\n");
1605
1606 rxDescCache.cmdsts |= CMDSTS_OWN;
1607 rxDescCache.cmdsts &= ~CMDSTS_MORE;
1608 rxDescCache.cmdsts |= CMDSTS_OK;
1609 rxDescCache.cmdsts &= 0xffff0000;
1610 rxDescCache.cmdsts += rxPacket->length; //i.e. set CMDSTS_SIZE
1611
1612 #if 0
1613 /*
1614 * all the driver uses these are for its own stats keeping
1615 * which we don't care about, aren't necessary for
1616 * functionality and doing this would just slow us down.
1617 * if they end up using this in a later version for
1618 * functional purposes, just undef
1619 */
1620 if (rxFilterEnable) {
1621 rxDescCache.cmdsts &= ~CMDSTS_DEST_MASK;
1622 const EthAddr &dst = rxFifoFront()->dst();
1623 if (dst->unicast())
1624 rxDescCache.cmdsts |= CMDSTS_DEST_SELF;
1625 if (dst->multicast())
1626 rxDescCache.cmdsts |= CMDSTS_DEST_MULTI;
1627 if (dst->broadcast())
1628 rxDescCache.cmdsts |= CMDSTS_DEST_MASK;
1629 }
1630 #endif
1631
1632 IpPtr ip(rxPacket);
1633 if (extstsEnable && ip) {
1634 rxDescCache.extsts |= EXTSTS_IPPKT;
1635 rxIpChecksums++;
1636 if (cksum(ip) != 0) {
1637 DPRINTF(EthernetCksum, "Rx IP Checksum Error\n");
1638 rxDescCache.extsts |= EXTSTS_IPERR;
1639 }
1640 TcpPtr tcp(ip);
1641 UdpPtr udp(ip);
1642 if (tcp) {
1643 rxDescCache.extsts |= EXTSTS_TCPPKT;
1644 rxTcpChecksums++;
1645 if (cksum(tcp) != 0) {
1646 DPRINTF(EthernetCksum, "Rx TCP Checksum Error\n");
1647 rxDescCache.extsts |= EXTSTS_TCPERR;
1648
1649 }
1650 } else if (udp) {
1651 rxDescCache.extsts |= EXTSTS_UDPPKT;
1652 rxUdpChecksums++;
1653 if (cksum(udp) != 0) {
1654 DPRINTF(EthernetCksum, "Rx UDP Checksum Error\n");
1655 rxDescCache.extsts |= EXTSTS_UDPERR;
1656 }
1657 }
1658 }
1659 rxPacket = 0;
1660
1661 /*
1662 * the driver seems to always receive into desc buffers
1663 * of size 1514, so you never have a pkt that is split
1664 * into multiple descriptors on the receive side, so
1665 * i don't implement that case, hence the assert above.
1666 */
1667
1668 DPRINTF(EthernetDesc,
1669 "rxDescCache: addr=%08x writeback cmdsts extsts\n",
1670 regs.rxdp & 0x3fffffff);
1671 DPRINTF(EthernetDesc,
1672 "rxDescCache: link=%08x bufptr=%08x cmdsts=%08x extsts=%08x\n",
1673 rxDescCache.link, rxDescCache.bufptr, rxDescCache.cmdsts,
1674 rxDescCache.extsts);
1675
1676 rxDmaAddr = (regs.rxdp + offsetof(ns_desc, cmdsts)) & 0x3fffffff;
1677 rxDmaData = &(rxDescCache.cmdsts);
1678 rxDmaLen = sizeof(rxDescCache.cmdsts) + sizeof(rxDescCache.extsts);
1679 rxDmaFree = dmaDescFree;
1680
1681 descDmaWrites++;
1682 descDmaWrBytes += rxDmaLen;
1683
1684 if (doRxDmaWrite())
1685 goto exit;
1686 }
1687 break;
1688
1689 case rxFragWrite:
1690 if (rxDmaState != dmaIdle)
1691 goto exit;
1692
1693 rxPacketBufPtr += rxXferLen;
1694 rxFragPtr += rxXferLen;
1695 rxPktBytes -= rxXferLen;
1696
1697 rxState = rxFifoBlock;
1698 break;
1699
1700 case rxDescWrite:
1701 if (rxDmaState != dmaIdle)
1702 goto exit;
1703
1704 assert(rxDescCache.cmdsts & CMDSTS_OWN);
1705
1706 assert(rxPacket == 0);
1707 devIntrPost(ISR_RXOK);
1708
1709 if (rxDescCache.cmdsts & CMDSTS_INTR)
1710 devIntrPost(ISR_RXDESC);
1711
1712 if (!rxEnable) {
1713 DPRINTF(EthernetSM, "Halting the RX state machine\n");
1714 rxState = rxIdle;
1715 goto exit;
1716 } else
1717 rxState = rxAdvance;
1718 break;
1719
1720 case rxAdvance:
1721 if (rxDescCache.link == 0) {
1722 devIntrPost(ISR_RXIDLE);
1723 rxState = rxIdle;
1724 CRDD = true;
1725 goto exit;
1726 } else {
1727 rxState = rxDescRead;
1728 regs.rxdp = rxDescCache.link;
1729 CRDD = false;
1730
1731 rxDmaAddr = regs.rxdp & 0x3fffffff;
1732 rxDmaData = &rxDescCache;
1733 rxDmaLen = sizeof(ns_desc);
1734 rxDmaFree = dmaDescFree;
1735
1736 if (doRxDmaRead())
1737 goto exit;
1738 }
1739 break;
1740
1741 default:
1742 panic("Invalid rxState!");
1743 }
1744
1745 DPRINTF(EthernetSM, "entering next rxState=%s\n",
1746 NsRxStateStrings[rxState]);
1747
1748 goto next;
1749
1750 exit:
1751 /**
1752 * @todo do we want to schedule a future kick?
1753 */
1754 DPRINTF(EthernetSM, "rx state machine exited rxState=%s\n",
1755 NsRxStateStrings[rxState]);
1756 }
1757
1758 void
1759 NSGigE::transmit()
1760 {
1761 if (txFifo.empty()) {
1762 DPRINTF(Ethernet, "nothing to transmit\n");
1763 return;
1764 }
1765
1766 DPRINTF(Ethernet, "Attempt Pkt Transmit: txFifo length=%d\n",
1767 txFifo.size());
1768 if (interface->sendPacket(txFifo.front())) {
1769 #if TRACING_ON
1770 if (DTRACE(Ethernet)) {
1771 IpPtr ip(txFifo.front());
1772 if (ip) {
1773 DPRINTF(Ethernet, "ID is %d\n", ip->id());
1774 TcpPtr tcp(ip);
1775 if (tcp) {
1776 DPRINTF(Ethernet, "Src Port=%d, Dest Port=%d\n",
1777 tcp->sport(), tcp->dport());
1778 }
1779 }
1780 }
1781 #endif
1782
1783 DDUMP(Ethernet, txFifo.front()->data, txFifo.front()->length);
1784 txBytes += txFifo.front()->length;
1785 txPackets++;
1786
1787 DPRINTF(Ethernet, "Successful Xmit! now txFifoAvail is %d\n",
1788 txFifo.avail());
1789 txFifo.pop();
1790
1791 /*
1792 * normally do a writeback of the descriptor here, and ONLY
1793 * after that is done, send this interrupt. but since our
1794 * stuff never actually fails, just do this interrupt here,
1795 * otherwise the code has to stray from this nice format.
1796 * besides, it's functionally the same.
1797 */
1798 devIntrPost(ISR_TXOK);
1799 }
1800
1801 if (!txFifo.empty() && !txEvent.scheduled()) {
1802 DPRINTF(Ethernet, "reschedule transmit\n");
1803 txEvent.schedule(curTick + 1000);
1804 }
1805 }
1806
1807 void
1808 NSGigE::txDmaReadCopy()
1809 {
1810 assert(txDmaState == dmaReading);
1811
1812 physmem->dma_read((uint8_t *)txDmaData, txDmaAddr, txDmaLen);
1813 txDmaState = dmaIdle;
1814
1815 DPRINTF(EthernetDMA, "tx dma read paddr=%#x len=%d\n",
1816 txDmaAddr, txDmaLen);
1817 DDUMP(EthernetDMA, txDmaData, txDmaLen);
1818 }
1819
1820 bool
1821 NSGigE::doTxDmaRead()
1822 {
1823 assert(txDmaState == dmaIdle || txDmaState == dmaReadWaiting);
1824 txDmaState = dmaReading;
1825
1826 if (dmaInterface && !txDmaFree) {
1827 if (dmaInterface->busy())
1828 txDmaState = dmaReadWaiting;
1829 else
1830 dmaInterface->doDMA(Read, txDmaAddr, txDmaLen, curTick,
1831 &txDmaReadEvent, true);
1832 return true;
1833 }
1834
1835 if (dmaReadDelay == 0 && dmaReadFactor == 0.0) {
1836 txDmaReadCopy();
1837 return false;
1838 }
1839
1840 Tick factor = ((txDmaLen + ULL(63)) >> ULL(6)) * dmaReadFactor;
1841 Tick start = curTick + dmaReadDelay + factor;
1842 txDmaReadEvent.schedule(start);
1843 return true;
1844 }
1845
1846 void
1847 NSGigE::txDmaReadDone()
1848 {
1849 assert(txDmaState == dmaReading);
1850 txDmaReadCopy();
1851
1852 // If the receive state machine has a pending DMA, let it go first
1853 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting)
1854 rxKick();
1855
1856 txKick();
1857 }
1858
1859 void
1860 NSGigE::txDmaWriteCopy()
1861 {
1862 assert(txDmaState == dmaWriting);
1863
1864 physmem->dma_write(txDmaAddr, (uint8_t *)txDmaData, txDmaLen);
1865 txDmaState = dmaIdle;
1866
1867 DPRINTF(EthernetDMA, "tx dma write paddr=%#x len=%d\n",
1868 txDmaAddr, txDmaLen);
1869 DDUMP(EthernetDMA, txDmaData, txDmaLen);
1870 }
1871
1872 bool
1873 NSGigE::doTxDmaWrite()
1874 {
1875 assert(txDmaState == dmaIdle || txDmaState == dmaWriteWaiting);
1876 txDmaState = dmaWriting;
1877
1878 if (dmaInterface && !txDmaFree) {
1879 if (dmaInterface->busy())
1880 txDmaState = dmaWriteWaiting;
1881 else
1882 dmaInterface->doDMA(WriteInvalidate, txDmaAddr, txDmaLen, curTick,
1883 &txDmaWriteEvent, true);
1884 return true;
1885 }
1886
1887 if (dmaWriteDelay == 0 && dmaWriteFactor == 0.0) {
1888 txDmaWriteCopy();
1889 return false;
1890 }
1891
1892 Tick factor = ((txDmaLen + ULL(63)) >> ULL(6)) * dmaWriteFactor;
1893 Tick start = curTick + dmaWriteDelay + factor;
1894 txDmaWriteEvent.schedule(start);
1895 return true;
1896 }
1897
1898 void
1899 NSGigE::txDmaWriteDone()
1900 {
1901 assert(txDmaState == dmaWriting);
1902 txDmaWriteCopy();
1903
1904 // If the receive state machine has a pending DMA, let it go first
1905 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting)
1906 rxKick();
1907
1908 txKick();
1909 }
1910
1911 void
1912 NSGigE::txKick()
1913 {
1914 DPRINTF(EthernetSM, "transmit kick txState=%s\n",
1915 NsTxStateStrings[txState]);
1916
1917 if (txKickTick > curTick) {
1918 DPRINTF(EthernetSM, "transmit kick exiting, can't run till %d\n",
1919 txKickTick);
1920
1921 return;
1922 }
1923
1924 next:
1925 switch(txDmaState) {
1926 case dmaReadWaiting:
1927 if (doTxDmaRead())
1928 goto exit;
1929 break;
1930 case dmaWriteWaiting:
1931 if (doTxDmaWrite())
1932 goto exit;
1933 break;
1934 default:
1935 break;
1936 }
1937
1938 switch (txState) {
1939 case txIdle:
1940 if (!txEnable) {
1941 DPRINTF(EthernetSM, "Transmit disabled. Nothing to do.\n");
1942 goto exit;
1943 }
1944
1945 if (CTDD) {
1946 txState = txDescRefr;
1947
1948 txDmaAddr = regs.txdp & 0x3fffffff;
1949 txDmaData = &txDescCache + offsetof(ns_desc, link);
1950 txDmaLen = sizeof(txDescCache.link);
1951 txDmaFree = dmaDescFree;
1952
1953 descDmaReads++;
1954 descDmaRdBytes += txDmaLen;
1955
1956 if (doTxDmaRead())
1957 goto exit;
1958
1959 } else {
1960 txState = txDescRead;
1961
1962 txDmaAddr = regs.txdp & 0x3fffffff;
1963 txDmaData = &txDescCache;
1964 txDmaLen = sizeof(ns_desc);
1965 txDmaFree = dmaDescFree;
1966
1967 descDmaReads++;
1968 descDmaRdBytes += txDmaLen;
1969
1970 if (doTxDmaRead())
1971 goto exit;
1972 }
1973 break;
1974
1975 case txDescRefr:
1976 if (txDmaState != dmaIdle)
1977 goto exit;
1978
1979 txState = txAdvance;
1980 break;
1981
1982 case txDescRead:
1983 if (txDmaState != dmaIdle)
1984 goto exit;
1985
1986 DPRINTF(EthernetDesc,
1987 "txDescCache: link=%08x bufptr=%08x cmdsts=%08x extsts=%08x\n",
1988 txDescCache.link, txDescCache.bufptr, txDescCache.cmdsts,
1989 txDescCache.extsts);
1990
1991 if (txDescCache.cmdsts & CMDSTS_OWN) {
1992 txState = txFifoBlock;
1993 txFragPtr = txDescCache.bufptr;
1994 txDescCnt = txDescCache.cmdsts & CMDSTS_LEN_MASK;
1995 } else {
1996 devIntrPost(ISR_TXIDLE);
1997 txState = txIdle;
1998 goto exit;
1999 }
2000 break;
2001
2002 case txFifoBlock:
2003 if (!txPacket) {
2004 DPRINTF(EthernetSM, "****starting the tx of a new packet****\n");
2005 txPacket = new PacketData(16384);
2006 txPacketBufPtr = txPacket->data;
2007 }
2008
2009 if (txDescCnt == 0) {
2010 DPRINTF(EthernetSM, "the txDescCnt == 0, done with descriptor\n");
2011 if (txDescCache.cmdsts & CMDSTS_MORE) {
2012 DPRINTF(EthernetSM, "there are more descriptors to come\n");
2013 txState = txDescWrite;
2014
2015 txDescCache.cmdsts &= ~CMDSTS_OWN;
2016
2017 txDmaAddr = regs.txdp + offsetof(ns_desc, cmdsts);
2018 txDmaAddr &= 0x3fffffff;
2019 txDmaData = &(txDescCache.cmdsts);
2020 txDmaLen = sizeof(txDescCache.cmdsts);
2021 txDmaFree = dmaDescFree;
2022
2023 if (doTxDmaWrite())
2024 goto exit;
2025
2026 } else { /* this packet is totally done */
2027 DPRINTF(EthernetSM, "This packet is done, let's wrap it up\n");
2028 /* deal with the the packet that just finished */
2029 if ((regs.vtcr & VTCR_PPCHK) && extstsEnable) {
2030 IpPtr ip(txPacket);
2031 if (txDescCache.extsts & EXTSTS_UDPPKT) {
2032 UdpPtr udp(ip);
2033 udp->sum(0);
2034 udp->sum(cksum(udp));
2035 txUdpChecksums++;
2036 } else if (txDescCache.extsts & EXTSTS_TCPPKT) {
2037 TcpPtr tcp(ip);
2038 tcp->sum(0);
2039 tcp->sum(cksum(tcp));
2040 txTcpChecksums++;
2041 }
2042 if (txDescCache.extsts & EXTSTS_IPPKT) {
2043 ip->sum(0);
2044 ip->sum(cksum(ip));
2045 txIpChecksums++;
2046 }
2047 }
2048
2049 txPacket->length = txPacketBufPtr - txPacket->data;
2050 // this is just because the receive can't handle a
2051 // packet bigger want to make sure
2052 assert(txPacket->length <= 1514);
2053 #ifndef NDEBUG
2054 bool success =
2055 #endif
2056 txFifo.push(txPacket);
2057 assert(success);
2058
2059 /*
2060 * this following section is not tqo spec, but
2061 * functionally shouldn't be any different. normally,
2062 * the chip will wait til the transmit has occurred
2063 * before writing back the descriptor because it has
2064 * to wait to see that it was successfully transmitted
2065 * to decide whether to set CMDSTS_OK or not.
2066 * however, in the simulator since it is always
2067 * successfully transmitted, and writing it exactly to
2068 * spec would complicate the code, we just do it here
2069 */
2070
2071 txDescCache.cmdsts &= ~CMDSTS_OWN;
2072 txDescCache.cmdsts |= CMDSTS_OK;
2073
2074 DPRINTF(EthernetDesc,
2075 "txDesc writeback: cmdsts=%08x extsts=%08x\n",
2076 txDescCache.cmdsts, txDescCache.extsts);
2077
2078 txDmaAddr = regs.txdp + offsetof(ns_desc, cmdsts);
2079 txDmaAddr &= 0x3fffffff;
2080 txDmaData = &(txDescCache.cmdsts);
2081 txDmaLen = sizeof(txDescCache.cmdsts) +
2082 sizeof(txDescCache.extsts);
2083 txDmaFree = dmaDescFree;
2084
2085 descDmaWrites++;
2086 descDmaWrBytes += txDmaLen;
2087
2088 transmit();
2089 txPacket = 0;
2090
2091 if (!txEnable) {
2092 DPRINTF(EthernetSM, "halting TX state machine\n");
2093 txState = txIdle;
2094 goto exit;
2095 } else
2096 txState = txAdvance;
2097
2098 if (doTxDmaWrite())
2099 goto exit;
2100 }
2101 } else {
2102 DPRINTF(EthernetSM, "this descriptor isn't done yet\n");
2103 if (!txFifo.full()) {
2104 txState = txFragRead;
2105
2106 /*
2107 * The number of bytes transferred is either whatever
2108 * is left in the descriptor (txDescCnt), or if there
2109 * is not enough room in the fifo, just whatever room
2110 * is left in the fifo
2111 */
2112 txXferLen = min<uint32_t>(txDescCnt, txFifo.avail());
2113
2114 txDmaAddr = txFragPtr & 0x3fffffff;
2115 txDmaData = txPacketBufPtr;
2116 txDmaLen = txXferLen;
2117 txDmaFree = dmaDataFree;
2118
2119 if (doTxDmaRead())
2120 goto exit;
2121 } else {
2122 txState = txFifoBlock;
2123 transmit();
2124
2125 goto exit;
2126 }
2127
2128 }
2129 break;
2130
2131 case txFragRead:
2132 if (txDmaState != dmaIdle)
2133 goto exit;
2134
2135 txPacketBufPtr += txXferLen;
2136 txFragPtr += txXferLen;
2137 txDescCnt -= txXferLen;
2138 txFifo.reserve(txXferLen);
2139
2140 txState = txFifoBlock;
2141 break;
2142
2143 case txDescWrite:
2144 if (txDmaState != dmaIdle)
2145 goto exit;
2146
2147 if (txDescCache.cmdsts & CMDSTS_INTR)
2148 devIntrPost(ISR_TXDESC);
2149
2150 txState = txAdvance;
2151 break;
2152
2153 case txAdvance:
2154 if (txDescCache.link == 0) {
2155 devIntrPost(ISR_TXIDLE);
2156 txState = txIdle;
2157 goto exit;
2158 } else {
2159 txState = txDescRead;
2160 regs.txdp = txDescCache.link;
2161 CTDD = false;
2162
2163 txDmaAddr = txDescCache.link & 0x3fffffff;
2164 txDmaData = &txDescCache;
2165 txDmaLen = sizeof(ns_desc);
2166 txDmaFree = dmaDescFree;
2167
2168 if (doTxDmaRead())
2169 goto exit;
2170 }
2171 break;
2172
2173 default:
2174 panic("invalid state");
2175 }
2176
2177 DPRINTF(EthernetSM, "entering next txState=%s\n",
2178 NsTxStateStrings[txState]);
2179
2180 goto next;
2181
2182 exit:
2183 /**
2184 * @todo do we want to schedule a future kick?
2185 */
2186 DPRINTF(EthernetSM, "tx state machine exited txState=%s\n",
2187 NsTxStateStrings[txState]);
2188 }
2189
2190 void
2191 NSGigE::transferDone()
2192 {
2193 if (txFifo.empty()) {
2194 DPRINTF(Ethernet, "transfer complete: txFifo empty...nothing to do\n");
2195 return;
2196 }
2197
2198 DPRINTF(Ethernet, "transfer complete: data in txFifo...schedule xmit\n");
2199
2200 if (txEvent.scheduled())
2201 txEvent.reschedule(curTick + 1);
2202 else
2203 txEvent.schedule(curTick + 1);
2204 }
2205
2206 bool
2207 NSGigE::rxFilter(const PacketPtr &packet)
2208 {
2209 EthPtr eth = packet;
2210 bool drop = true;
2211 string type;
2212
2213 const EthAddr &dst = eth->dst();
2214 if (dst.unicast()) {
2215 // If we're accepting all unicast addresses
2216 if (acceptUnicast)
2217 drop = false;
2218
2219 // If we make a perfect match
2220 if (acceptPerfect && dst == rom.perfectMatch)
2221 drop = false;
2222
2223 if (acceptArp && eth->type() == ETH_TYPE_ARP)
2224 drop = false;
2225
2226 } else if (dst.broadcast()) {
2227 // if we're accepting broadcasts
2228 if (acceptBroadcast)
2229 drop = false;
2230
2231 } else if (dst.multicast()) {
2232 // if we're accepting all multicasts
2233 if (acceptMulticast)
2234 drop = false;
2235
2236 }
2237
2238 if (drop) {
2239 DPRINTF(Ethernet, "rxFilter drop\n");
2240 DDUMP(EthernetData, packet->data, packet->length);
2241 }
2242
2243 return drop;
2244 }
2245
2246 bool
2247 NSGigE::recvPacket(PacketPtr packet)
2248 {
2249 rxBytes += packet->length;
2250 rxPackets++;
2251
2252 DPRINTF(Ethernet, "Receiving packet from wire, rxFifoAvail=%d\n",
2253 rxFifo.avail());
2254
2255 if (!rxEnable) {
2256 DPRINTF(Ethernet, "receive disabled...packet dropped\n");
2257 debug_break();
2258 interface->recvDone();
2259 return true;
2260 }
2261
2262 if (rxFilterEnable && rxFilter(packet)) {
2263 DPRINTF(Ethernet, "packet filtered...dropped\n");
2264 interface->recvDone();
2265 return true;
2266 }
2267
2268 if (rxFifo.avail() < packet->length) {
2269 DPRINTF(Ethernet,
2270 "packet will not fit in receive buffer...packet dropped\n");
2271 droppedPackets++;
2272 devIntrPost(ISR_RXORN);
2273 return false;
2274 }
2275
2276 rxFifo.push(packet);
2277 interface->recvDone();
2278
2279 rxKick();
2280 return true;
2281 }
2282
2283 //=====================================================================
2284 //
2285 //
2286 void
2287 NSGigE::serialize(ostream &os)
2288 {
2289 // Serialize the PciDev base class
2290 PciDev::serialize(os);
2291
2292 /*
2293 * Finalize any DMA events now.
2294 */
2295 if (rxDmaReadEvent.scheduled())
2296 rxDmaReadCopy();
2297 if (rxDmaWriteEvent.scheduled())
2298 rxDmaWriteCopy();
2299 if (txDmaReadEvent.scheduled())
2300 txDmaReadCopy();
2301 if (txDmaWriteEvent.scheduled())
2302 txDmaWriteCopy();
2303
2304 /*
2305 * Serialize the device registers
2306 */
2307 SERIALIZE_SCALAR(regs.command);
2308 SERIALIZE_SCALAR(regs.config);
2309 SERIALIZE_SCALAR(regs.mear);
2310 SERIALIZE_SCALAR(regs.ptscr);
2311 SERIALIZE_SCALAR(regs.isr);
2312 SERIALIZE_SCALAR(regs.imr);
2313 SERIALIZE_SCALAR(regs.ier);
2314 SERIALIZE_SCALAR(regs.ihr);
2315 SERIALIZE_SCALAR(regs.txdp);
2316 SERIALIZE_SCALAR(regs.txdp_hi);
2317 SERIALIZE_SCALAR(regs.txcfg);
2318 SERIALIZE_SCALAR(regs.gpior);
2319 SERIALIZE_SCALAR(regs.rxdp);
2320 SERIALIZE_SCALAR(regs.rxdp_hi);
2321 SERIALIZE_SCALAR(regs.rxcfg);
2322 SERIALIZE_SCALAR(regs.pqcr);
2323 SERIALIZE_SCALAR(regs.wcsr);
2324 SERIALIZE_SCALAR(regs.pcr);
2325 SERIALIZE_SCALAR(regs.rfcr);
2326 SERIALIZE_SCALAR(regs.rfdr);
2327 SERIALIZE_SCALAR(regs.srr);
2328 SERIALIZE_SCALAR(regs.mibc);
2329 SERIALIZE_SCALAR(regs.vrcr);
2330 SERIALIZE_SCALAR(regs.vtcr);
2331 SERIALIZE_SCALAR(regs.vdr);
2332 SERIALIZE_SCALAR(regs.ccsr);
2333 SERIALIZE_SCALAR(regs.tbicr);
2334 SERIALIZE_SCALAR(regs.tbisr);
2335 SERIALIZE_SCALAR(regs.tanar);
2336 SERIALIZE_SCALAR(regs.tanlpar);
2337 SERIALIZE_SCALAR(regs.taner);
2338 SERIALIZE_SCALAR(regs.tesr);
2339
2340 SERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN);
2341
2342 SERIALIZE_SCALAR(ioEnable);
2343
2344 /*
2345 * Serialize the data Fifos
2346 */
2347 rxFifo.serialize("rxFifo", os);
2348 txFifo.serialize("txFifo", os);
2349
2350 /*
2351 * Serialize the various helper variables
2352 */
2353 bool txPacketExists = txPacket;
2354 SERIALIZE_SCALAR(txPacketExists);
2355 if (txPacketExists) {
2356 txPacket->serialize("txPacket", os);
2357 uint32_t txPktBufPtr = (uint32_t) (txPacketBufPtr - txPacket->data);
2358 SERIALIZE_SCALAR(txPktBufPtr);
2359 }
2360
2361 bool rxPacketExists = rxPacket;
2362 SERIALIZE_SCALAR(rxPacketExists);
2363 if (rxPacketExists) {
2364 rxPacket->serialize("rxPacket", os);
2365 uint32_t rxPktBufPtr = (uint32_t) (rxPacketBufPtr - rxPacket->data);
2366 SERIALIZE_SCALAR(rxPktBufPtr);
2367 }
2368
2369 SERIALIZE_SCALAR(txXferLen);
2370 SERIALIZE_SCALAR(rxXferLen);
2371
2372 /*
2373 * Serialize DescCaches
2374 */
2375 SERIALIZE_SCALAR(txDescCache.link);
2376 SERIALIZE_SCALAR(txDescCache.bufptr);
2377 SERIALIZE_SCALAR(txDescCache.cmdsts);
2378 SERIALIZE_SCALAR(txDescCache.extsts);
2379 SERIALIZE_SCALAR(rxDescCache.link);
2380 SERIALIZE_SCALAR(rxDescCache.bufptr);
2381 SERIALIZE_SCALAR(rxDescCache.cmdsts);
2382 SERIALIZE_SCALAR(rxDescCache.extsts);
2383
2384 /*
2385 * Serialize tx state machine
2386 */
2387 int txState = this->txState;
2388 SERIALIZE_SCALAR(txState);
2389 SERIALIZE_SCALAR(txEnable);
2390 SERIALIZE_SCALAR(CTDD);
2391 SERIALIZE_SCALAR(txFragPtr);
2392 SERIALIZE_SCALAR(txDescCnt);
2393 int txDmaState = this->txDmaState;
2394 SERIALIZE_SCALAR(txDmaState);
2395
2396 /*
2397 * Serialize rx state machine
2398 */
2399 int rxState = this->rxState;
2400 SERIALIZE_SCALAR(rxState);
2401 SERIALIZE_SCALAR(rxEnable);
2402 SERIALIZE_SCALAR(CRDD);
2403 SERIALIZE_SCALAR(rxPktBytes);
2404 SERIALIZE_SCALAR(rxFragPtr);
2405 SERIALIZE_SCALAR(rxDescCnt);
2406 int rxDmaState = this->rxDmaState;
2407 SERIALIZE_SCALAR(rxDmaState);
2408
2409 SERIALIZE_SCALAR(extstsEnable);
2410
2411 /*
2412 * If there's a pending transmit, store the time so we can
2413 * reschedule it later
2414 */
2415 Tick transmitTick = txEvent.scheduled() ? txEvent.when() - curTick : 0;
2416 SERIALIZE_SCALAR(transmitTick);
2417
2418 /*
2419 * receive address filter settings
2420 */
2421 SERIALIZE_SCALAR(rxFilterEnable);
2422 SERIALIZE_SCALAR(acceptBroadcast);
2423 SERIALIZE_SCALAR(acceptMulticast);
2424 SERIALIZE_SCALAR(acceptUnicast);
2425 SERIALIZE_SCALAR(acceptPerfect);
2426 SERIALIZE_SCALAR(acceptArp);
2427
2428 /*
2429 * Keep track of pending interrupt status.
2430 */
2431 SERIALIZE_SCALAR(intrTick);
2432 SERIALIZE_SCALAR(cpuPendingIntr);
2433 Tick intrEventTick = 0;
2434 if (intrEvent)
2435 intrEventTick = intrEvent->when();
2436 SERIALIZE_SCALAR(intrEventTick);
2437
2438 }
2439
2440 void
2441 NSGigE::unserialize(Checkpoint *cp, const std::string &section)
2442 {
2443 // Unserialize the PciDev base class
2444 PciDev::unserialize(cp, section);
2445
2446 UNSERIALIZE_SCALAR(regs.command);
2447 UNSERIALIZE_SCALAR(regs.config);
2448 UNSERIALIZE_SCALAR(regs.mear);
2449 UNSERIALIZE_SCALAR(regs.ptscr);
2450 UNSERIALIZE_SCALAR(regs.isr);
2451 UNSERIALIZE_SCALAR(regs.imr);
2452 UNSERIALIZE_SCALAR(regs.ier);
2453 UNSERIALIZE_SCALAR(regs.ihr);
2454 UNSERIALIZE_SCALAR(regs.txdp);
2455 UNSERIALIZE_SCALAR(regs.txdp_hi);
2456 UNSERIALIZE_SCALAR(regs.txcfg);
2457 UNSERIALIZE_SCALAR(regs.gpior);
2458 UNSERIALIZE_SCALAR(regs.rxdp);
2459 UNSERIALIZE_SCALAR(regs.rxdp_hi);
2460 UNSERIALIZE_SCALAR(regs.rxcfg);
2461 UNSERIALIZE_SCALAR(regs.pqcr);
2462 UNSERIALIZE_SCALAR(regs.wcsr);
2463 UNSERIALIZE_SCALAR(regs.pcr);
2464 UNSERIALIZE_SCALAR(regs.rfcr);
2465 UNSERIALIZE_SCALAR(regs.rfdr);
2466 UNSERIALIZE_SCALAR(regs.srr);
2467 UNSERIALIZE_SCALAR(regs.mibc);
2468 UNSERIALIZE_SCALAR(regs.vrcr);
2469 UNSERIALIZE_SCALAR(regs.vtcr);
2470 UNSERIALIZE_SCALAR(regs.vdr);
2471 UNSERIALIZE_SCALAR(regs.ccsr);
2472 UNSERIALIZE_SCALAR(regs.tbicr);
2473 UNSERIALIZE_SCALAR(regs.tbisr);
2474 UNSERIALIZE_SCALAR(regs.tanar);
2475 UNSERIALIZE_SCALAR(regs.tanlpar);
2476 UNSERIALIZE_SCALAR(regs.taner);
2477 UNSERIALIZE_SCALAR(regs.tesr);
2478
2479 UNSERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN);
2480
2481 UNSERIALIZE_SCALAR(ioEnable);
2482
2483 /*
2484 * unserialize the data fifos
2485 */
2486 rxFifo.unserialize("rxFifo", cp, section);
2487 txFifo.unserialize("txFifo", cp, section);
2488
2489 /*
2490 * unserialize the various helper variables
2491 */
2492 bool txPacketExists;
2493 UNSERIALIZE_SCALAR(txPacketExists);
2494 if (txPacketExists) {
2495 txPacket = new PacketData(16384);
2496 txPacket->unserialize("txPacket", cp, section);
2497 uint32_t txPktBufPtr;
2498 UNSERIALIZE_SCALAR(txPktBufPtr);
2499 txPacketBufPtr = (uint8_t *) txPacket->data + txPktBufPtr;
2500 } else
2501 txPacket = 0;
2502
2503 bool rxPacketExists;
2504 UNSERIALIZE_SCALAR(rxPacketExists);
2505 rxPacket = 0;
2506 if (rxPacketExists) {
2507 rxPacket = new PacketData(16384);
2508 rxPacket->unserialize("rxPacket", cp, section);
2509 uint32_t rxPktBufPtr;
2510 UNSERIALIZE_SCALAR(rxPktBufPtr);
2511 rxPacketBufPtr = (uint8_t *) rxPacket->data + rxPktBufPtr;
2512 } else
2513 rxPacket = 0;
2514
2515 UNSERIALIZE_SCALAR(txXferLen);
2516 UNSERIALIZE_SCALAR(rxXferLen);
2517
2518 /*
2519 * Unserialize DescCaches
2520 */
2521 UNSERIALIZE_SCALAR(txDescCache.link);
2522 UNSERIALIZE_SCALAR(txDescCache.bufptr);
2523 UNSERIALIZE_SCALAR(txDescCache.cmdsts);
2524 UNSERIALIZE_SCALAR(txDescCache.extsts);
2525 UNSERIALIZE_SCALAR(rxDescCache.link);
2526 UNSERIALIZE_SCALAR(rxDescCache.bufptr);
2527 UNSERIALIZE_SCALAR(rxDescCache.cmdsts);
2528 UNSERIALIZE_SCALAR(rxDescCache.extsts);
2529
2530 /*
2531 * unserialize tx state machine
2532 */
2533 int txState;
2534 UNSERIALIZE_SCALAR(txState);
2535 this->txState = (TxState) txState;
2536 UNSERIALIZE_SCALAR(txEnable);
2537 UNSERIALIZE_SCALAR(CTDD);
2538 UNSERIALIZE_SCALAR(txFragPtr);
2539 UNSERIALIZE_SCALAR(txDescCnt);
2540 int txDmaState;
2541 UNSERIALIZE_SCALAR(txDmaState);
2542 this->txDmaState = (DmaState) txDmaState;
2543
2544 /*
2545 * unserialize rx state machine
2546 */
2547 int rxState;
2548 UNSERIALIZE_SCALAR(rxState);
2549 this->rxState = (RxState) rxState;
2550 UNSERIALIZE_SCALAR(rxEnable);
2551 UNSERIALIZE_SCALAR(CRDD);
2552 UNSERIALIZE_SCALAR(rxPktBytes);
2553 UNSERIALIZE_SCALAR(rxFragPtr);
2554 UNSERIALIZE_SCALAR(rxDescCnt);
2555 int rxDmaState;
2556 UNSERIALIZE_SCALAR(rxDmaState);
2557 this->rxDmaState = (DmaState) rxDmaState;
2558
2559 UNSERIALIZE_SCALAR(extstsEnable);
2560
2561 /*
2562 * If there's a pending transmit, reschedule it now
2563 */
2564 Tick transmitTick;
2565 UNSERIALIZE_SCALAR(transmitTick);
2566 if (transmitTick)
2567 txEvent.schedule(curTick + transmitTick);
2568
2569 /*
2570 * unserialize receive address filter settings
2571 */
2572 UNSERIALIZE_SCALAR(rxFilterEnable);
2573 UNSERIALIZE_SCALAR(acceptBroadcast);
2574 UNSERIALIZE_SCALAR(acceptMulticast);
2575 UNSERIALIZE_SCALAR(acceptUnicast);
2576 UNSERIALIZE_SCALAR(acceptPerfect);
2577 UNSERIALIZE_SCALAR(acceptArp);
2578
2579 /*
2580 * Keep track of pending interrupt status.
2581 */
2582 UNSERIALIZE_SCALAR(intrTick);
2583 UNSERIALIZE_SCALAR(cpuPendingIntr);
2584 Tick intrEventTick;
2585 UNSERIALIZE_SCALAR(intrEventTick);
2586 if (intrEventTick) {
2587 intrEvent = new IntrEvent(this, true);
2588 intrEvent->schedule(intrEventTick);
2589 }
2590
2591 /*
2592 * re-add addrRanges to bus bridges
2593 */
2594 if (pioInterface) {
2595 pioInterface->addAddrRange(RangeSize(BARAddrs[0], BARSize[0]));
2596 pioInterface->addAddrRange(RangeSize(BARAddrs[1], BARSize[1]));
2597 }
2598 }
2599
2600 Tick
2601 NSGigE::cacheAccess(MemReqPtr &req)
2602 {
2603 DPRINTF(EthernetPIO, "timing access to paddr=%#x (daddr=%#x)\n",
2604 req->paddr, req->paddr - addr);
2605 return curTick + pioLatency;
2606 }
2607
2608 BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt)
2609
2610 SimObjectParam<EtherInt *> peer;
2611 SimObjectParam<NSGigE *> device;
2612
2613 END_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt)
2614
2615 BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigEInt)
2616
2617 INIT_PARAM_DFLT(peer, "peer interface", NULL),
2618 INIT_PARAM(device, "Ethernet device of this interface")
2619
2620 END_INIT_SIM_OBJECT_PARAMS(NSGigEInt)
2621
2622 CREATE_SIM_OBJECT(NSGigEInt)
2623 {
2624 NSGigEInt *dev_int = new NSGigEInt(getInstanceName(), device);
2625
2626 EtherInt *p = (EtherInt *)peer;
2627 if (p) {
2628 dev_int->setPeer(p);
2629 p->setPeer(dev_int);
2630 }
2631
2632 return dev_int;
2633 }
2634
2635 REGISTER_SIM_OBJECT("NSGigEInt", NSGigEInt)
2636
2637
2638 BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigE)
2639
2640 Param<Tick> tx_delay;
2641 Param<Tick> rx_delay;
2642 Param<Tick> intr_delay;
2643 SimObjectParam<MemoryController *> mmu;
2644 SimObjectParam<PhysicalMemory *> physmem;
2645 Param<bool> rx_filter;
2646 Param<string> hardware_address;
2647 SimObjectParam<Bus*> header_bus;
2648 SimObjectParam<Bus*> payload_bus;
2649 SimObjectParam<HierParams *> hier;
2650 Param<Tick> pio_latency;
2651 Param<bool> dma_desc_free;
2652 Param<bool> dma_data_free;
2653 Param<Tick> dma_read_delay;
2654 Param<Tick> dma_write_delay;
2655 Param<Tick> dma_read_factor;
2656 Param<Tick> dma_write_factor;
2657 SimObjectParam<PciConfigAll *> configspace;
2658 SimObjectParam<PciConfigData *> configdata;
2659 SimObjectParam<Platform *> platform;
2660 Param<uint32_t> pci_bus;
2661 Param<uint32_t> pci_dev;
2662 Param<uint32_t> pci_func;
2663 Param<uint32_t> tx_fifo_size;
2664 Param<uint32_t> rx_fifo_size;
2665
2666 END_DECLARE_SIM_OBJECT_PARAMS(NSGigE)
2667
2668 BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigE)
2669
2670 INIT_PARAM_DFLT(tx_delay, "Transmit Delay", 1000),
2671 INIT_PARAM_DFLT(rx_delay, "Receive Delay", 1000),
2672 INIT_PARAM_DFLT(intr_delay, "Interrupt Delay in microseconds", 0),
2673 INIT_PARAM(mmu, "Memory Controller"),
2674 INIT_PARAM(physmem, "Physical Memory"),
2675 INIT_PARAM_DFLT(rx_filter, "Enable Receive Filter", true),
2676 INIT_PARAM_DFLT(hardware_address, "Ethernet Hardware Address",
2677 "00:99:00:00:00:01"),
2678 INIT_PARAM_DFLT(header_bus, "The IO Bus to attach to for headers", NULL),
2679 INIT_PARAM_DFLT(payload_bus, "The IO Bus to attach to for payload", NULL),
2680 INIT_PARAM_DFLT(hier, "Hierarchy global variables", &defaultHierParams),
2681 INIT_PARAM_DFLT(pio_latency, "Programmed IO latency in bus cycles", 1),
2682 INIT_PARAM_DFLT(dma_desc_free, "DMA of Descriptors is free", false),
2683 INIT_PARAM_DFLT(dma_data_free, "DMA of Data is free", false),
2684 INIT_PARAM_DFLT(dma_read_delay, "fixed delay for dma reads", 0),
2685 INIT_PARAM_DFLT(dma_write_delay, "fixed delay for dma writes", 0),
2686 INIT_PARAM_DFLT(dma_read_factor, "multiplier for dma reads", 0),
2687 INIT_PARAM_DFLT(dma_write_factor, "multiplier for dma writes", 0),
2688 INIT_PARAM(configspace, "PCI Configspace"),
2689 INIT_PARAM(configdata, "PCI Config data"),
2690 INIT_PARAM(platform, "Platform"),
2691 INIT_PARAM(pci_bus, "PCI bus"),
2692 INIT_PARAM(pci_dev, "PCI device number"),
2693 INIT_PARAM(pci_func, "PCI function code"),
2694 INIT_PARAM_DFLT(tx_fifo_size, "max size in bytes of txFifo", 131072),
2695 INIT_PARAM_DFLT(rx_fifo_size, "max size in bytes of rxFifo", 131072)
2696
2697 END_INIT_SIM_OBJECT_PARAMS(NSGigE)
2698
2699
2700 CREATE_SIM_OBJECT(NSGigE)
2701 {
2702 NSGigE::Params *params = new NSGigE::Params;
2703
2704 params->name = getInstanceName();
2705 params->mmu = mmu;
2706 params->configSpace = configspace;
2707 params->configData = configdata;
2708 params->plat = platform;
2709 params->busNum = pci_bus;
2710 params->deviceNum = pci_dev;
2711 params->functionNum = pci_func;
2712
2713 params->intr_delay = intr_delay;
2714 params->pmem = physmem;
2715 params->tx_delay = tx_delay;
2716 params->rx_delay = rx_delay;
2717 params->hier = hier;
2718 params->header_bus = header_bus;
2719 params->payload_bus = payload_bus;
2720 params->pio_latency = pio_latency;
2721 params->dma_desc_free = dma_desc_free;
2722 params->dma_data_free = dma_data_free;
2723 params->dma_read_delay = dma_read_delay;
2724 params->dma_write_delay = dma_write_delay;
2725 params->dma_read_factor = dma_read_factor;
2726 params->dma_write_factor = dma_write_factor;
2727 params->rx_filter = rx_filter;
2728 params->eaddr = hardware_address;
2729 params->tx_fifo_size = tx_fifo_size;
2730 params->rx_fifo_size = rx_fifo_size;
2731 return new NSGigE(params);
2732 }
2733
2734 REGISTER_SIM_OBJECT("NSGigE", NSGigE)