Merge zizzer.eecs.umich.edu:/z/m5/Bitkeeper/m5
[gem5.git] / dev / ns_gige.cc
1 /*
2 * Copyright (c) 2004-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /** @file
30 * Device module for modelling the National Semiconductor
31 * DP83820 ethernet controller. Does not support priority queueing
32 */
33 #include <cstdio>
34 #include <deque>
35 #include <string>
36
37 #include "base/inet.hh"
38 #include "cpu/exec_context.hh"
39 #include "dev/etherlink.hh"
40 #include "dev/ns_gige.hh"
41 #include "dev/pciconfigall.hh"
42 #include "mem/bus/bus.hh"
43 #include "mem/bus/dma_interface.hh"
44 #include "mem/bus/pio_interface.hh"
45 #include "mem/bus/pio_interface_impl.hh"
46 #include "mem/functional/memory_control.hh"
47 #include "mem/functional/physical.hh"
48 #include "sim/builder.hh"
49 #include "sim/debug.hh"
50 #include "sim/host.hh"
51 #include "sim/stats.hh"
52 #include "targetarch/vtophys.hh"
53
54 const char *NsRxStateStrings[] =
55 {
56 "rxIdle",
57 "rxDescRefr",
58 "rxDescRead",
59 "rxFifoBlock",
60 "rxFragWrite",
61 "rxDescWrite",
62 "rxAdvance"
63 };
64
65 const char *NsTxStateStrings[] =
66 {
67 "txIdle",
68 "txDescRefr",
69 "txDescRead",
70 "txFifoBlock",
71 "txFragRead",
72 "txDescWrite",
73 "txAdvance"
74 };
75
76 const char *NsDmaState[] =
77 {
78 "dmaIdle",
79 "dmaReading",
80 "dmaWriting",
81 "dmaReadWaiting",
82 "dmaWriteWaiting"
83 };
84
85 using namespace std;
86 using namespace Net;
87
88 ///////////////////////////////////////////////////////////////////////
89 //
90 // NSGigE PCI Device
91 //
92 NSGigE::NSGigE(Params *p)
93 : PciDev(p), ioEnable(false),
94 txFifo(p->tx_fifo_size), rxFifo(p->rx_fifo_size),
95 txPacket(0), rxPacket(0), txPacketBufPtr(NULL), rxPacketBufPtr(NULL),
96 txXferLen(0), rxXferLen(0), clock(p->clock),
97 txState(txIdle), txEnable(false), CTDD(false),
98 txFragPtr(0), txDescCnt(0), txDmaState(dmaIdle), rxState(rxIdle),
99 rxEnable(false), CRDD(false), rxPktBytes(0),
100 rxFragPtr(0), rxDescCnt(0), rxDmaState(dmaIdle), extstsEnable(false),
101 rxDmaReadEvent(this), rxDmaWriteEvent(this),
102 txDmaReadEvent(this), txDmaWriteEvent(this),
103 dmaDescFree(p->dma_desc_free), dmaDataFree(p->dma_data_free),
104 txDelay(p->tx_delay), rxDelay(p->rx_delay),
105 rxKickTick(0), txKickTick(0),
106 txEvent(this), rxFilterEnable(p->rx_filter), acceptBroadcast(false),
107 acceptMulticast(false), acceptUnicast(false),
108 acceptPerfect(false), acceptArp(false),
109 physmem(p->pmem), intrTick(0), cpuPendingIntr(false),
110 intrEvent(0), interface(0)
111 {
112 if (p->header_bus) {
113 pioInterface = newPioInterface(name(), p->hier,
114 p->header_bus, this,
115 &NSGigE::cacheAccess);
116
117 pioLatency = p->pio_latency * p->header_bus->clockRate;
118
119 if (p->payload_bus)
120 dmaInterface = new DMAInterface<Bus>(name() + ".dma",
121 p->header_bus,
122 p->payload_bus, 1,
123 p->dma_no_allocate);
124 else
125 dmaInterface = new DMAInterface<Bus>(name() + ".dma",
126 p->header_bus,
127 p->header_bus, 1,
128 p->dma_no_allocate);
129 } else if (p->payload_bus) {
130 pioInterface = newPioInterface(name(), p->hier,
131 p->payload_bus, this,
132 &NSGigE::cacheAccess);
133
134 pioLatency = p->pio_latency * p->payload_bus->clockRate;
135
136 dmaInterface = new DMAInterface<Bus>(name() + ".dma",
137 p->payload_bus,
138 p->payload_bus, 1,
139 p->dma_no_allocate);
140 }
141
142
143 intrDelay = p->intr_delay;
144 dmaReadDelay = p->dma_read_delay;
145 dmaWriteDelay = p->dma_write_delay;
146 dmaReadFactor = p->dma_read_factor;
147 dmaWriteFactor = p->dma_write_factor;
148
149 regsReset();
150 memcpy(&rom.perfectMatch, p->eaddr.bytes(), ETH_ADDR_LEN);
151 }
152
153 NSGigE::~NSGigE()
154 {}
155
156 void
157 NSGigE::regStats()
158 {
159 txBytes
160 .name(name() + ".txBytes")
161 .desc("Bytes Transmitted")
162 .prereq(txBytes)
163 ;
164
165 rxBytes
166 .name(name() + ".rxBytes")
167 .desc("Bytes Received")
168 .prereq(rxBytes)
169 ;
170
171 txPackets
172 .name(name() + ".txPackets")
173 .desc("Number of Packets Transmitted")
174 .prereq(txBytes)
175 ;
176
177 rxPackets
178 .name(name() + ".rxPackets")
179 .desc("Number of Packets Received")
180 .prereq(rxBytes)
181 ;
182
183 txIpChecksums
184 .name(name() + ".txIpChecksums")
185 .desc("Number of tx IP Checksums done by device")
186 .precision(0)
187 .prereq(txBytes)
188 ;
189
190 rxIpChecksums
191 .name(name() + ".rxIpChecksums")
192 .desc("Number of rx IP Checksums done by device")
193 .precision(0)
194 .prereq(rxBytes)
195 ;
196
197 txTcpChecksums
198 .name(name() + ".txTcpChecksums")
199 .desc("Number of tx TCP Checksums done by device")
200 .precision(0)
201 .prereq(txBytes)
202 ;
203
204 rxTcpChecksums
205 .name(name() + ".rxTcpChecksums")
206 .desc("Number of rx TCP Checksums done by device")
207 .precision(0)
208 .prereq(rxBytes)
209 ;
210
211 txUdpChecksums
212 .name(name() + ".txUdpChecksums")
213 .desc("Number of tx UDP Checksums done by device")
214 .precision(0)
215 .prereq(txBytes)
216 ;
217
218 rxUdpChecksums
219 .name(name() + ".rxUdpChecksums")
220 .desc("Number of rx UDP Checksums done by device")
221 .precision(0)
222 .prereq(rxBytes)
223 ;
224
225 descDmaReads
226 .name(name() + ".descDMAReads")
227 .desc("Number of descriptors the device read w/ DMA")
228 .precision(0)
229 ;
230
231 descDmaWrites
232 .name(name() + ".descDMAWrites")
233 .desc("Number of descriptors the device wrote w/ DMA")
234 .precision(0)
235 ;
236
237 descDmaRdBytes
238 .name(name() + ".descDmaReadBytes")
239 .desc("number of descriptor bytes read w/ DMA")
240 .precision(0)
241 ;
242
243 descDmaWrBytes
244 .name(name() + ".descDmaWriteBytes")
245 .desc("number of descriptor bytes write w/ DMA")
246 .precision(0)
247 ;
248
249 txBandwidth
250 .name(name() + ".txBandwidth")
251 .desc("Transmit Bandwidth (bits/s)")
252 .precision(0)
253 .prereq(txBytes)
254 ;
255
256 rxBandwidth
257 .name(name() + ".rxBandwidth")
258 .desc("Receive Bandwidth (bits/s)")
259 .precision(0)
260 .prereq(rxBytes)
261 ;
262
263 totBandwidth
264 .name(name() + ".totBandwidth")
265 .desc("Total Bandwidth (bits/s)")
266 .precision(0)
267 .prereq(totBytes)
268 ;
269
270 totPackets
271 .name(name() + ".totPackets")
272 .desc("Total Packets")
273 .precision(0)
274 .prereq(totBytes)
275 ;
276
277 totBytes
278 .name(name() + ".totBytes")
279 .desc("Total Bytes")
280 .precision(0)
281 .prereq(totBytes)
282 ;
283
284 totPacketRate
285 .name(name() + ".totPPS")
286 .desc("Total Tranmission Rate (packets/s)")
287 .precision(0)
288 .prereq(totBytes)
289 ;
290
291 txPacketRate
292 .name(name() + ".txPPS")
293 .desc("Packet Tranmission Rate (packets/s)")
294 .precision(0)
295 .prereq(txBytes)
296 ;
297
298 rxPacketRate
299 .name(name() + ".rxPPS")
300 .desc("Packet Reception Rate (packets/s)")
301 .precision(0)
302 .prereq(rxBytes)
303 ;
304
305 postedSwi
306 .name(name() + ".postedSwi")
307 .desc("number of software interrupts posted to CPU")
308 .precision(0)
309 ;
310
311 totalSwi
312 .name(name() + ".totalSwi")
313 .desc("number of total Swi written to ISR")
314 .precision(0)
315 ;
316
317 coalescedSwi
318 .name(name() + ".coalescedSwi")
319 .desc("average number of Swi's coalesced into each post")
320 .precision(0)
321 ;
322
323 postedRxIdle
324 .name(name() + ".postedRxIdle")
325 .desc("number of rxIdle interrupts posted to CPU")
326 .precision(0)
327 ;
328
329 totalRxIdle
330 .name(name() + ".totalRxIdle")
331 .desc("number of total RxIdle written to ISR")
332 .precision(0)
333 ;
334
335 coalescedRxIdle
336 .name(name() + ".coalescedRxIdle")
337 .desc("average number of RxIdle's coalesced into each post")
338 .precision(0)
339 ;
340
341 postedRxOk
342 .name(name() + ".postedRxOk")
343 .desc("number of RxOk interrupts posted to CPU")
344 .precision(0)
345 ;
346
347 totalRxOk
348 .name(name() + ".totalRxOk")
349 .desc("number of total RxOk written to ISR")
350 .precision(0)
351 ;
352
353 coalescedRxOk
354 .name(name() + ".coalescedRxOk")
355 .desc("average number of RxOk's coalesced into each post")
356 .precision(0)
357 ;
358
359 postedRxDesc
360 .name(name() + ".postedRxDesc")
361 .desc("number of RxDesc interrupts posted to CPU")
362 .precision(0)
363 ;
364
365 totalRxDesc
366 .name(name() + ".totalRxDesc")
367 .desc("number of total RxDesc written to ISR")
368 .precision(0)
369 ;
370
371 coalescedRxDesc
372 .name(name() + ".coalescedRxDesc")
373 .desc("average number of RxDesc's coalesced into each post")
374 .precision(0)
375 ;
376
377 postedTxOk
378 .name(name() + ".postedTxOk")
379 .desc("number of TxOk interrupts posted to CPU")
380 .precision(0)
381 ;
382
383 totalTxOk
384 .name(name() + ".totalTxOk")
385 .desc("number of total TxOk written to ISR")
386 .precision(0)
387 ;
388
389 coalescedTxOk
390 .name(name() + ".coalescedTxOk")
391 .desc("average number of TxOk's coalesced into each post")
392 .precision(0)
393 ;
394
395 postedTxIdle
396 .name(name() + ".postedTxIdle")
397 .desc("number of TxIdle interrupts posted to CPU")
398 .precision(0)
399 ;
400
401 totalTxIdle
402 .name(name() + ".totalTxIdle")
403 .desc("number of total TxIdle written to ISR")
404 .precision(0)
405 ;
406
407 coalescedTxIdle
408 .name(name() + ".coalescedTxIdle")
409 .desc("average number of TxIdle's coalesced into each post")
410 .precision(0)
411 ;
412
413 postedTxDesc
414 .name(name() + ".postedTxDesc")
415 .desc("number of TxDesc interrupts posted to CPU")
416 .precision(0)
417 ;
418
419 totalTxDesc
420 .name(name() + ".totalTxDesc")
421 .desc("number of total TxDesc written to ISR")
422 .precision(0)
423 ;
424
425 coalescedTxDesc
426 .name(name() + ".coalescedTxDesc")
427 .desc("average number of TxDesc's coalesced into each post")
428 .precision(0)
429 ;
430
431 postedRxOrn
432 .name(name() + ".postedRxOrn")
433 .desc("number of RxOrn posted to CPU")
434 .precision(0)
435 ;
436
437 totalRxOrn
438 .name(name() + ".totalRxOrn")
439 .desc("number of total RxOrn written to ISR")
440 .precision(0)
441 ;
442
443 coalescedRxOrn
444 .name(name() + ".coalescedRxOrn")
445 .desc("average number of RxOrn's coalesced into each post")
446 .precision(0)
447 ;
448
449 coalescedTotal
450 .name(name() + ".coalescedTotal")
451 .desc("average number of interrupts coalesced into each post")
452 .precision(0)
453 ;
454
455 postedInterrupts
456 .name(name() + ".postedInterrupts")
457 .desc("number of posts to CPU")
458 .precision(0)
459 ;
460
461 droppedPackets
462 .name(name() + ".droppedPackets")
463 .desc("number of packets dropped")
464 .precision(0)
465 ;
466
467 coalescedSwi = totalSwi / postedInterrupts;
468 coalescedRxIdle = totalRxIdle / postedInterrupts;
469 coalescedRxOk = totalRxOk / postedInterrupts;
470 coalescedRxDesc = totalRxDesc / postedInterrupts;
471 coalescedTxOk = totalTxOk / postedInterrupts;
472 coalescedTxIdle = totalTxIdle / postedInterrupts;
473 coalescedTxDesc = totalTxDesc / postedInterrupts;
474 coalescedRxOrn = totalRxOrn / postedInterrupts;
475
476 coalescedTotal = (totalSwi + totalRxIdle + totalRxOk + totalRxDesc +
477 totalTxOk + totalTxIdle + totalTxDesc +
478 totalRxOrn) / postedInterrupts;
479
480 txBandwidth = txBytes * Stats::constant(8) / simSeconds;
481 rxBandwidth = rxBytes * Stats::constant(8) / simSeconds;
482 totBandwidth = txBandwidth + rxBandwidth;
483 totBytes = txBytes + rxBytes;
484 totPackets = txPackets + rxPackets;
485
486 txPacketRate = txPackets / simSeconds;
487 rxPacketRate = rxPackets / simSeconds;
488 }
489
490 /**
491 * This is to read the PCI general configuration registers
492 */
493 void
494 NSGigE::ReadConfig(int offset, int size, uint8_t *data)
495 {
496 if (offset < PCI_DEVICE_SPECIFIC)
497 PciDev::ReadConfig(offset, size, data);
498 else
499 panic("Device specific PCI config space not implemented!\n");
500 }
501
502 /**
503 * This is to write to the PCI general configuration registers
504 */
505 void
506 NSGigE::WriteConfig(int offset, int size, uint32_t data)
507 {
508 if (offset < PCI_DEVICE_SPECIFIC)
509 PciDev::WriteConfig(offset, size, data);
510 else
511 panic("Device specific PCI config space not implemented!\n");
512
513 // Need to catch writes to BARs to update the PIO interface
514 switch (offset) {
515 // seems to work fine without all these PCI settings, but i
516 // put in the IO to double check, an assertion will fail if we
517 // need to properly implement it
518 case PCI_COMMAND:
519 if (config.data[offset] & PCI_CMD_IOSE)
520 ioEnable = true;
521 else
522 ioEnable = false;
523
524 #if 0
525 if (config.data[offset] & PCI_CMD_BME) {
526 bmEnabled = true;
527 }
528 else {
529 bmEnabled = false;
530 }
531
532 if (config.data[offset] & PCI_CMD_MSE) {
533 memEnable = true;
534 }
535 else {
536 memEnable = false;
537 }
538 #endif
539 break;
540
541 case PCI0_BASE_ADDR0:
542 if (BARAddrs[0] != 0) {
543 if (pioInterface)
544 pioInterface->addAddrRange(RangeSize(BARAddrs[0], BARSize[0]));
545
546 BARAddrs[0] &= EV5::PAddrUncachedMask;
547 }
548 break;
549 case PCI0_BASE_ADDR1:
550 if (BARAddrs[1] != 0) {
551 if (pioInterface)
552 pioInterface->addAddrRange(RangeSize(BARAddrs[1], BARSize[1]));
553
554 BARAddrs[1] &= EV5::PAddrUncachedMask;
555 }
556 break;
557 }
558 }
559
560 /**
561 * This reads the device registers, which are detailed in the NS83820
562 * spec sheet
563 */
564 Fault
565 NSGigE::read(MemReqPtr &req, uint8_t *data)
566 {
567 assert(ioEnable);
568
569 //The mask is to give you only the offset into the device register file
570 Addr daddr = req->paddr & 0xfff;
571 DPRINTF(EthernetPIO, "read da=%#x pa=%#x va=%#x size=%d\n",
572 daddr, req->paddr, req->vaddr, req->size);
573
574
575 // there are some reserved registers, you can see ns_gige_reg.h and
576 // the spec sheet for details
577 if (daddr > LAST && daddr <= RESERVED) {
578 panic("Accessing reserved register");
579 } else if (daddr > RESERVED && daddr <= 0x3FC) {
580 ReadConfig(daddr & 0xff, req->size, data);
581 return No_Fault;
582 } else if (daddr >= MIB_START && daddr <= MIB_END) {
583 // don't implement all the MIB's. hopefully the kernel
584 // doesn't actually DEPEND upon their values
585 // MIB are just hardware stats keepers
586 uint32_t &reg = *(uint32_t *) data;
587 reg = 0;
588 return No_Fault;
589 } else if (daddr > 0x3FC)
590 panic("Something is messed up!\n");
591
592 switch (req->size) {
593 case sizeof(uint32_t):
594 {
595 uint32_t &reg = *(uint32_t *)data;
596
597 switch (daddr) {
598 case CR:
599 reg = regs.command;
600 //these are supposed to be cleared on a read
601 reg &= ~(CR_RXD | CR_TXD | CR_TXR | CR_RXR);
602 break;
603
604 case CFGR:
605 reg = regs.config;
606 break;
607
608 case MEAR:
609 reg = regs.mear;
610 break;
611
612 case PTSCR:
613 reg = regs.ptscr;
614 break;
615
616 case ISR:
617 reg = regs.isr;
618 devIntrClear(ISR_ALL);
619 break;
620
621 case IMR:
622 reg = regs.imr;
623 break;
624
625 case IER:
626 reg = regs.ier;
627 break;
628
629 case IHR:
630 reg = regs.ihr;
631 break;
632
633 case TXDP:
634 reg = regs.txdp;
635 break;
636
637 case TXDP_HI:
638 reg = regs.txdp_hi;
639 break;
640
641 case TX_CFG:
642 reg = regs.txcfg;
643 break;
644
645 case GPIOR:
646 reg = regs.gpior;
647 break;
648
649 case RXDP:
650 reg = regs.rxdp;
651 break;
652
653 case RXDP_HI:
654 reg = regs.rxdp_hi;
655 break;
656
657 case RX_CFG:
658 reg = regs.rxcfg;
659 break;
660
661 case PQCR:
662 reg = regs.pqcr;
663 break;
664
665 case WCSR:
666 reg = regs.wcsr;
667 break;
668
669 case PCR:
670 reg = regs.pcr;
671 break;
672
673 // see the spec sheet for how RFCR and RFDR work
674 // basically, you write to RFCR to tell the machine
675 // what you want to do next, then you act upon RFDR,
676 // and the device will be prepared b/c of what you
677 // wrote to RFCR
678 case RFCR:
679 reg = regs.rfcr;
680 break;
681
682 case RFDR:
683 switch (regs.rfcr & RFCR_RFADDR) {
684 case 0x000:
685 reg = rom.perfectMatch[1];
686 reg = reg << 8;
687 reg += rom.perfectMatch[0];
688 break;
689 case 0x002:
690 reg = rom.perfectMatch[3] << 8;
691 reg += rom.perfectMatch[2];
692 break;
693 case 0x004:
694 reg = rom.perfectMatch[5] << 8;
695 reg += rom.perfectMatch[4];
696 break;
697 default:
698 panic("reading RFDR for something other than PMATCH!\n");
699 // didn't implement other RFDR functionality b/c
700 // driver didn't use it
701 }
702 break;
703
704 case SRR:
705 reg = regs.srr;
706 break;
707
708 case MIBC:
709 reg = regs.mibc;
710 reg &= ~(MIBC_MIBS | MIBC_ACLR);
711 break;
712
713 case VRCR:
714 reg = regs.vrcr;
715 break;
716
717 case VTCR:
718 reg = regs.vtcr;
719 break;
720
721 case VDR:
722 reg = regs.vdr;
723 break;
724
725 case CCSR:
726 reg = regs.ccsr;
727 break;
728
729 case TBICR:
730 reg = regs.tbicr;
731 break;
732
733 case TBISR:
734 reg = regs.tbisr;
735 break;
736
737 case TANAR:
738 reg = regs.tanar;
739 break;
740
741 case TANLPAR:
742 reg = regs.tanlpar;
743 break;
744
745 case TANER:
746 reg = regs.taner;
747 break;
748
749 case TESR:
750 reg = regs.tesr;
751 break;
752
753 case M5REG:
754 reg = params()->m5reg;
755 break;
756
757 default:
758 panic("reading unimplemented register: addr=%#x", daddr);
759 }
760
761 DPRINTF(EthernetPIO, "read from %#x: data=%d data=%#x\n",
762 daddr, reg, reg);
763 }
764 break;
765
766 default:
767 panic("accessing register with invalid size: addr=%#x, size=%d",
768 daddr, req->size);
769 }
770
771 return No_Fault;
772 }
773
774 Fault
775 NSGigE::write(MemReqPtr &req, const uint8_t *data)
776 {
777 assert(ioEnable);
778
779 Addr daddr = req->paddr & 0xfff;
780 DPRINTF(EthernetPIO, "write da=%#x pa=%#x va=%#x size=%d\n",
781 daddr, req->paddr, req->vaddr, req->size);
782
783 if (daddr > LAST && daddr <= RESERVED) {
784 panic("Accessing reserved register");
785 } else if (daddr > RESERVED && daddr <= 0x3FC) {
786 WriteConfig(daddr & 0xff, req->size, *(uint32_t *)data);
787 return No_Fault;
788 } else if (daddr > 0x3FC)
789 panic("Something is messed up!\n");
790
791 if (req->size == sizeof(uint32_t)) {
792 uint32_t reg = *(uint32_t *)data;
793 DPRINTF(EthernetPIO, "write data=%d data=%#x\n", reg, reg);
794
795 switch (daddr) {
796 case CR:
797 regs.command = reg;
798 if (reg & CR_TXD) {
799 txEnable = false;
800 } else if (reg & CR_TXE) {
801 txEnable = true;
802
803 // the kernel is enabling the transmit machine
804 if (txState == txIdle)
805 txKick();
806 }
807
808 if (reg & CR_RXD) {
809 rxEnable = false;
810 } else if (reg & CR_RXE) {
811 rxEnable = true;
812
813 if (rxState == rxIdle)
814 rxKick();
815 }
816
817 if (reg & CR_TXR)
818 txReset();
819
820 if (reg & CR_RXR)
821 rxReset();
822
823 if (reg & CR_SWI)
824 devIntrPost(ISR_SWI);
825
826 if (reg & CR_RST) {
827 txReset();
828 rxReset();
829
830 regsReset();
831 }
832 break;
833
834 case CFGR:
835 if (reg & CFGR_LNKSTS ||
836 reg & CFGR_SPDSTS ||
837 reg & CFGR_DUPSTS ||
838 reg & CFGR_RESERVED ||
839 reg & CFGR_T64ADDR ||
840 reg & CFGR_PCI64_DET)
841 panic("writing to read-only or reserved CFGR bits!\n");
842
843 regs.config |= reg & ~(CFGR_LNKSTS | CFGR_SPDSTS | CFGR_DUPSTS |
844 CFGR_RESERVED | CFGR_T64ADDR | CFGR_PCI64_DET);
845
846 // all these #if 0's are because i don't THINK the kernel needs to
847 // have these implemented. if there is a problem relating to one of
848 // these, you may need to add functionality in.
849 #if 0
850 if (reg & CFGR_TBI_EN) ;
851 if (reg & CFGR_MODE_1000) ;
852 #endif
853
854 if (reg & CFGR_AUTO_1000)
855 panic("CFGR_AUTO_1000 not implemented!\n");
856
857 #if 0
858 if (reg & CFGR_PINT_DUPSTS ||
859 reg & CFGR_PINT_LNKSTS ||
860 reg & CFGR_PINT_SPDSTS)
861 ;
862
863 if (reg & CFGR_TMRTEST) ;
864 if (reg & CFGR_MRM_DIS) ;
865 if (reg & CFGR_MWI_DIS) ;
866
867 if (reg & CFGR_T64ADDR)
868 panic("CFGR_T64ADDR is read only register!\n");
869
870 if (reg & CFGR_PCI64_DET)
871 panic("CFGR_PCI64_DET is read only register!\n");
872
873 if (reg & CFGR_DATA64_EN) ;
874 if (reg & CFGR_M64ADDR) ;
875 if (reg & CFGR_PHY_RST) ;
876 if (reg & CFGR_PHY_DIS) ;
877 #endif
878
879 if (reg & CFGR_EXTSTS_EN)
880 extstsEnable = true;
881 else
882 extstsEnable = false;
883
884 #if 0
885 if (reg & CFGR_REQALG) ;
886 if (reg & CFGR_SB) ;
887 if (reg & CFGR_POW) ;
888 if (reg & CFGR_EXD) ;
889 if (reg & CFGR_PESEL) ;
890 if (reg & CFGR_BROM_DIS) ;
891 if (reg & CFGR_EXT_125) ;
892 if (reg & CFGR_BEM) ;
893 #endif
894 break;
895
896 case MEAR:
897 regs.mear = reg;
898 // since phy is completely faked, MEAR_MD* don't matter
899 // and since the driver never uses MEAR_EE*, they don't
900 // matter
901 #if 0
902 if (reg & MEAR_EEDI) ;
903 if (reg & MEAR_EEDO) ; // this one is read only
904 if (reg & MEAR_EECLK) ;
905 if (reg & MEAR_EESEL) ;
906 if (reg & MEAR_MDIO) ;
907 if (reg & MEAR_MDDIR) ;
908 if (reg & MEAR_MDC) ;
909 #endif
910 break;
911
912 case PTSCR:
913 regs.ptscr = reg & ~(PTSCR_RBIST_RDONLY);
914 // these control BISTs for various parts of chip - we
915 // don't care or do just fake that the BIST is done
916 if (reg & PTSCR_RBIST_EN)
917 regs.ptscr |= PTSCR_RBIST_DONE;
918 if (reg & PTSCR_EEBIST_EN)
919 regs.ptscr &= ~PTSCR_EEBIST_EN;
920 if (reg & PTSCR_EELOAD_EN)
921 regs.ptscr &= ~PTSCR_EELOAD_EN;
922 break;
923
924 case ISR: /* writing to the ISR has no effect */
925 panic("ISR is a read only register!\n");
926
927 case IMR:
928 regs.imr = reg;
929 devIntrChangeMask();
930 break;
931
932 case IER:
933 regs.ier = reg;
934 break;
935
936 case IHR:
937 regs.ihr = reg;
938 /* not going to implement real interrupt holdoff */
939 break;
940
941 case TXDP:
942 regs.txdp = (reg & 0xFFFFFFFC);
943 assert(txState == txIdle);
944 CTDD = false;
945 break;
946
947 case TXDP_HI:
948 regs.txdp_hi = reg;
949 break;
950
951 case TX_CFG:
952 regs.txcfg = reg;
953 #if 0
954 if (reg & TX_CFG_CSI) ;
955 if (reg & TX_CFG_HBI) ;
956 if (reg & TX_CFG_MLB) ;
957 if (reg & TX_CFG_ATP) ;
958 if (reg & TX_CFG_ECRETRY) {
959 /*
960 * this could easily be implemented, but considering
961 * the network is just a fake pipe, wouldn't make
962 * sense to do this
963 */
964 }
965
966 if (reg & TX_CFG_BRST_DIS) ;
967 #endif
968
969 #if 0
970 /* we handle our own DMA, ignore the kernel's exhortations */
971 if (reg & TX_CFG_MXDMA) ;
972 #endif
973
974 // also, we currently don't care about fill/drain
975 // thresholds though this may change in the future with
976 // more realistic networks or a driver which changes it
977 // according to feedback
978
979 break;
980
981 case GPIOR:
982 regs.gpior = reg;
983 /* these just control general purpose i/o pins, don't matter */
984 break;
985
986 case RXDP:
987 regs.rxdp = reg;
988 CRDD = false;
989 break;
990
991 case RXDP_HI:
992 regs.rxdp_hi = reg;
993 break;
994
995 case RX_CFG:
996 regs.rxcfg = reg;
997 #if 0
998 if (reg & RX_CFG_AEP) ;
999 if (reg & RX_CFG_ARP) ;
1000 if (reg & RX_CFG_STRIPCRC) ;
1001 if (reg & RX_CFG_RX_RD) ;
1002 if (reg & RX_CFG_ALP) ;
1003 if (reg & RX_CFG_AIRL) ;
1004
1005 /* we handle our own DMA, ignore what kernel says about it */
1006 if (reg & RX_CFG_MXDMA) ;
1007
1008 //also, we currently don't care about fill/drain thresholds
1009 //though this may change in the future with more realistic
1010 //networks or a driver which changes it according to feedback
1011 if (reg & (RX_CFG_DRTH | RX_CFG_DRTH0)) ;
1012 #endif
1013 break;
1014
1015 case PQCR:
1016 /* there is no priority queueing used in the linux 2.6 driver */
1017 regs.pqcr = reg;
1018 break;
1019
1020 case WCSR:
1021 /* not going to implement wake on LAN */
1022 regs.wcsr = reg;
1023 break;
1024
1025 case PCR:
1026 /* not going to implement pause control */
1027 regs.pcr = reg;
1028 break;
1029
1030 case RFCR:
1031 regs.rfcr = reg;
1032
1033 rxFilterEnable = (reg & RFCR_RFEN) ? true : false;
1034 acceptBroadcast = (reg & RFCR_AAB) ? true : false;
1035 acceptMulticast = (reg & RFCR_AAM) ? true : false;
1036 acceptUnicast = (reg & RFCR_AAU) ? true : false;
1037 acceptPerfect = (reg & RFCR_APM) ? true : false;
1038 acceptArp = (reg & RFCR_AARP) ? true : false;
1039
1040 #if 0
1041 if (reg & RFCR_APAT)
1042 panic("RFCR_APAT not implemented!\n");
1043 #endif
1044
1045 if (reg & RFCR_MHEN || reg & RFCR_UHEN)
1046 panic("hash filtering not implemented!\n");
1047
1048 if (reg & RFCR_ULM)
1049 panic("RFCR_ULM not implemented!\n");
1050
1051 break;
1052
1053 case RFDR:
1054 panic("the driver never writes to RFDR, something is wrong!\n");
1055
1056 case BRAR:
1057 panic("the driver never uses BRAR, something is wrong!\n");
1058
1059 case BRDR:
1060 panic("the driver never uses BRDR, something is wrong!\n");
1061
1062 case SRR:
1063 panic("SRR is read only register!\n");
1064
1065 case MIBC:
1066 panic("the driver never uses MIBC, something is wrong!\n");
1067
1068 case VRCR:
1069 regs.vrcr = reg;
1070 break;
1071
1072 case VTCR:
1073 regs.vtcr = reg;
1074 break;
1075
1076 case VDR:
1077 panic("the driver never uses VDR, something is wrong!\n");
1078 break;
1079
1080 case CCSR:
1081 /* not going to implement clockrun stuff */
1082 regs.ccsr = reg;
1083 break;
1084
1085 case TBICR:
1086 regs.tbicr = reg;
1087 if (reg & TBICR_MR_LOOPBACK)
1088 panic("TBICR_MR_LOOPBACK never used, something wrong!\n");
1089
1090 if (reg & TBICR_MR_AN_ENABLE) {
1091 regs.tanlpar = regs.tanar;
1092 regs.tbisr |= (TBISR_MR_AN_COMPLETE | TBISR_MR_LINK_STATUS);
1093 }
1094
1095 #if 0
1096 if (reg & TBICR_MR_RESTART_AN) ;
1097 #endif
1098
1099 break;
1100
1101 case TBISR:
1102 panic("TBISR is read only register!\n");
1103
1104 case TANAR:
1105 regs.tanar = reg;
1106 if (reg & TANAR_PS2)
1107 panic("this isn't used in driver, something wrong!\n");
1108
1109 if (reg & TANAR_PS1)
1110 panic("this isn't used in driver, something wrong!\n");
1111 break;
1112
1113 case TANLPAR:
1114 panic("this should only be written to by the fake phy!\n");
1115
1116 case TANER:
1117 panic("TANER is read only register!\n");
1118
1119 case TESR:
1120 regs.tesr = reg;
1121 break;
1122
1123 default:
1124 panic("invalid register access daddr=%#x", daddr);
1125 }
1126 } else {
1127 panic("Invalid Request Size");
1128 }
1129
1130 return No_Fault;
1131 }
1132
1133 void
1134 NSGigE::devIntrPost(uint32_t interrupts)
1135 {
1136 if (interrupts & ISR_RESERVE)
1137 panic("Cannot set a reserved interrupt");
1138
1139 if (interrupts & ISR_NOIMPL)
1140 warn("interrupt not implemented %#x\n", interrupts);
1141
1142 interrupts &= ~ISR_NOIMPL;
1143 regs.isr |= interrupts;
1144
1145 if (interrupts & regs.imr) {
1146 if (interrupts & ISR_SWI) {
1147 totalSwi++;
1148 }
1149 if (interrupts & ISR_RXIDLE) {
1150 totalRxIdle++;
1151 }
1152 if (interrupts & ISR_RXOK) {
1153 totalRxOk++;
1154 }
1155 if (interrupts & ISR_RXDESC) {
1156 totalRxDesc++;
1157 }
1158 if (interrupts & ISR_TXOK) {
1159 totalTxOk++;
1160 }
1161 if (interrupts & ISR_TXIDLE) {
1162 totalTxIdle++;
1163 }
1164 if (interrupts & ISR_TXDESC) {
1165 totalTxDesc++;
1166 }
1167 if (interrupts & ISR_RXORN) {
1168 totalRxOrn++;
1169 }
1170 }
1171
1172 DPRINTF(EthernetIntr,
1173 "interrupt written to ISR: intr=%#x isr=%#x imr=%#x\n",
1174 interrupts, regs.isr, regs.imr);
1175
1176 if ((regs.isr & regs.imr)) {
1177 Tick when = curTick;
1178 if (!(regs.isr & regs.imr & ISR_NODELAY))
1179 when += intrDelay;
1180 cpuIntrPost(when);
1181 }
1182 }
1183
1184 /* writing this interrupt counting stats inside this means that this function
1185 is now limited to being used to clear all interrupts upon the kernel
1186 reading isr and servicing. just telling you in case you were thinking
1187 of expanding use.
1188 */
1189 void
1190 NSGigE::devIntrClear(uint32_t interrupts)
1191 {
1192 if (interrupts & ISR_RESERVE)
1193 panic("Cannot clear a reserved interrupt");
1194
1195 if (regs.isr & regs.imr & ISR_SWI) {
1196 postedSwi++;
1197 }
1198 if (regs.isr & regs.imr & ISR_RXIDLE) {
1199 postedRxIdle++;
1200 }
1201 if (regs.isr & regs.imr & ISR_RXOK) {
1202 postedRxOk++;
1203 }
1204 if (regs.isr & regs.imr & ISR_RXDESC) {
1205 postedRxDesc++;
1206 }
1207 if (regs.isr & regs.imr & ISR_TXOK) {
1208 postedTxOk++;
1209 }
1210 if (regs.isr & regs.imr & ISR_TXIDLE) {
1211 postedTxIdle++;
1212 }
1213 if (regs.isr & regs.imr & ISR_TXDESC) {
1214 postedTxDesc++;
1215 }
1216 if (regs.isr & regs.imr & ISR_RXORN) {
1217 postedRxOrn++;
1218 }
1219
1220 if (regs.isr & regs.imr & (ISR_SWI | ISR_RXIDLE | ISR_RXOK | ISR_RXDESC |
1221 ISR_TXOK | ISR_TXIDLE | ISR_TXDESC | ISR_RXORN) )
1222 postedInterrupts++;
1223
1224 interrupts &= ~ISR_NOIMPL;
1225 regs.isr &= ~interrupts;
1226
1227 DPRINTF(EthernetIntr,
1228 "interrupt cleared from ISR: intr=%x isr=%x imr=%x\n",
1229 interrupts, regs.isr, regs.imr);
1230
1231 if (!(regs.isr & regs.imr))
1232 cpuIntrClear();
1233 }
1234
1235 void
1236 NSGigE::devIntrChangeMask()
1237 {
1238 DPRINTF(EthernetIntr, "interrupt mask changed: isr=%x imr=%x masked=%x\n",
1239 regs.isr, regs.imr, regs.isr & regs.imr);
1240
1241 if (regs.isr & regs.imr)
1242 cpuIntrPost(curTick);
1243 else
1244 cpuIntrClear();
1245 }
1246
1247 void
1248 NSGigE::cpuIntrPost(Tick when)
1249 {
1250 // If the interrupt you want to post is later than an interrupt
1251 // already scheduled, just let it post in the coming one and don't
1252 // schedule another.
1253 // HOWEVER, must be sure that the scheduled intrTick is in the
1254 // future (this was formerly the source of a bug)
1255 /**
1256 * @todo this warning should be removed and the intrTick code should
1257 * be fixed.
1258 */
1259 assert(when >= curTick);
1260 assert(intrTick >= curTick || intrTick == 0);
1261 if (when > intrTick && intrTick != 0) {
1262 DPRINTF(EthernetIntr, "don't need to schedule event...intrTick=%d\n",
1263 intrTick);
1264 return;
1265 }
1266
1267 intrTick = when;
1268 if (intrTick < curTick) {
1269 debug_break();
1270 intrTick = curTick;
1271 }
1272
1273 DPRINTF(EthernetIntr, "going to schedule an interrupt for intrTick=%d\n",
1274 intrTick);
1275
1276 if (intrEvent)
1277 intrEvent->squash();
1278 intrEvent = new IntrEvent(this, true);
1279 intrEvent->schedule(intrTick);
1280 }
1281
1282 void
1283 NSGigE::cpuInterrupt()
1284 {
1285 assert(intrTick == curTick);
1286
1287 // Whether or not there's a pending interrupt, we don't care about
1288 // it anymore
1289 intrEvent = 0;
1290 intrTick = 0;
1291
1292 // Don't send an interrupt if there's already one
1293 if (cpuPendingIntr) {
1294 DPRINTF(EthernetIntr,
1295 "would send an interrupt now, but there's already pending\n");
1296 } else {
1297 // Send interrupt
1298 cpuPendingIntr = true;
1299
1300 DPRINTF(EthernetIntr, "posting interrupt\n");
1301 intrPost();
1302 }
1303 }
1304
1305 void
1306 NSGigE::cpuIntrClear()
1307 {
1308 if (!cpuPendingIntr)
1309 return;
1310
1311 if (intrEvent) {
1312 intrEvent->squash();
1313 intrEvent = 0;
1314 }
1315
1316 intrTick = 0;
1317
1318 cpuPendingIntr = false;
1319
1320 DPRINTF(EthernetIntr, "clearing interrupt\n");
1321 intrClear();
1322 }
1323
1324 bool
1325 NSGigE::cpuIntrPending() const
1326 { return cpuPendingIntr; }
1327
1328 void
1329 NSGigE::txReset()
1330 {
1331
1332 DPRINTF(Ethernet, "transmit reset\n");
1333
1334 CTDD = false;
1335 txEnable = false;;
1336 txFragPtr = 0;
1337 assert(txDescCnt == 0);
1338 txFifo.clear();
1339 txState = txIdle;
1340 assert(txDmaState == dmaIdle);
1341 }
1342
1343 void
1344 NSGigE::rxReset()
1345 {
1346 DPRINTF(Ethernet, "receive reset\n");
1347
1348 CRDD = false;
1349 assert(rxPktBytes == 0);
1350 rxEnable = false;
1351 rxFragPtr = 0;
1352 assert(rxDescCnt == 0);
1353 assert(rxDmaState == dmaIdle);
1354 rxFifo.clear();
1355 rxState = rxIdle;
1356 }
1357
1358 void
1359 NSGigE::regsReset()
1360 {
1361 memset(&regs, 0, sizeof(regs));
1362 regs.config = CFGR_LNKSTS;
1363 regs.mear = 0x22;
1364 regs.txcfg = 0x120; // set drain threshold to 1024 bytes and
1365 // fill threshold to 32 bytes
1366 regs.rxcfg = 0x4; // set drain threshold to 16 bytes
1367 regs.srr = 0x0103; // set the silicon revision to rev B or 0x103
1368 regs.mibc = MIBC_FRZ;
1369 regs.vdr = 0x81; // set the vlan tag type to 802.1q
1370 regs.tesr = 0xc000; // TBI capable of both full and half duplex
1371
1372 extstsEnable = false;
1373 acceptBroadcast = false;
1374 acceptMulticast = false;
1375 acceptUnicast = false;
1376 acceptPerfect = false;
1377 acceptArp = false;
1378 }
1379
1380 void
1381 NSGigE::rxDmaReadCopy()
1382 {
1383 assert(rxDmaState == dmaReading);
1384
1385 physmem->dma_read((uint8_t *)rxDmaData, rxDmaAddr, rxDmaLen);
1386 rxDmaState = dmaIdle;
1387
1388 DPRINTF(EthernetDMA, "rx dma read paddr=%#x len=%d\n",
1389 rxDmaAddr, rxDmaLen);
1390 DDUMP(EthernetDMA, rxDmaData, rxDmaLen);
1391 }
1392
1393 bool
1394 NSGigE::doRxDmaRead()
1395 {
1396 assert(rxDmaState == dmaIdle || rxDmaState == dmaReadWaiting);
1397 rxDmaState = dmaReading;
1398
1399 if (dmaInterface && !rxDmaFree) {
1400 if (dmaInterface->busy())
1401 rxDmaState = dmaReadWaiting;
1402 else
1403 dmaInterface->doDMA(Read, rxDmaAddr, rxDmaLen, curTick,
1404 &rxDmaReadEvent, true);
1405 return true;
1406 }
1407
1408 if (dmaReadDelay == 0 && dmaReadFactor == 0) {
1409 rxDmaReadCopy();
1410 return false;
1411 }
1412
1413 Tick factor = ((rxDmaLen + ULL(63)) >> ULL(6)) * dmaReadFactor;
1414 Tick start = curTick + dmaReadDelay + factor;
1415 rxDmaReadEvent.schedule(start);
1416 return true;
1417 }
1418
1419 void
1420 NSGigE::rxDmaReadDone()
1421 {
1422 assert(rxDmaState == dmaReading);
1423 rxDmaReadCopy();
1424
1425 // If the transmit state machine has a pending DMA, let it go first
1426 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting)
1427 txKick();
1428
1429 rxKick();
1430 }
1431
1432 void
1433 NSGigE::rxDmaWriteCopy()
1434 {
1435 assert(rxDmaState == dmaWriting);
1436
1437 physmem->dma_write(rxDmaAddr, (uint8_t *)rxDmaData, rxDmaLen);
1438 rxDmaState = dmaIdle;
1439
1440 DPRINTF(EthernetDMA, "rx dma write paddr=%#x len=%d\n",
1441 rxDmaAddr, rxDmaLen);
1442 DDUMP(EthernetDMA, rxDmaData, rxDmaLen);
1443 }
1444
1445 bool
1446 NSGigE::doRxDmaWrite()
1447 {
1448 assert(rxDmaState == dmaIdle || rxDmaState == dmaWriteWaiting);
1449 rxDmaState = dmaWriting;
1450
1451 if (dmaInterface && !rxDmaFree) {
1452 if (dmaInterface->busy())
1453 rxDmaState = dmaWriteWaiting;
1454 else
1455 dmaInterface->doDMA(WriteInvalidate, rxDmaAddr, rxDmaLen, curTick,
1456 &rxDmaWriteEvent, true);
1457 return true;
1458 }
1459
1460 if (dmaWriteDelay == 0 && dmaWriteFactor == 0) {
1461 rxDmaWriteCopy();
1462 return false;
1463 }
1464
1465 Tick factor = ((rxDmaLen + ULL(63)) >> ULL(6)) * dmaWriteFactor;
1466 Tick start = curTick + dmaWriteDelay + factor;
1467 rxDmaWriteEvent.schedule(start);
1468 return true;
1469 }
1470
1471 void
1472 NSGigE::rxDmaWriteDone()
1473 {
1474 assert(rxDmaState == dmaWriting);
1475 rxDmaWriteCopy();
1476
1477 // If the transmit state machine has a pending DMA, let it go first
1478 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting)
1479 txKick();
1480
1481 rxKick();
1482 }
1483
1484 void
1485 NSGigE::rxKick()
1486 {
1487 DPRINTF(EthernetSM, "receive kick rxState=%s (rxBuf.size=%d)\n",
1488 NsRxStateStrings[rxState], rxFifo.size());
1489
1490 if (rxKickTick > curTick) {
1491 DPRINTF(EthernetSM, "receive kick exiting, can't run till %d\n",
1492 rxKickTick);
1493 return;
1494 }
1495
1496 next:
1497 switch(rxDmaState) {
1498 case dmaReadWaiting:
1499 if (doRxDmaRead())
1500 goto exit;
1501 break;
1502 case dmaWriteWaiting:
1503 if (doRxDmaWrite())
1504 goto exit;
1505 break;
1506 default:
1507 break;
1508 }
1509
1510 // see state machine from spec for details
1511 // the way this works is, if you finish work on one state and can
1512 // go directly to another, you do that through jumping to the
1513 // label "next". however, if you have intermediate work, like DMA
1514 // so that you can't go to the next state yet, you go to exit and
1515 // exit the loop. however, when the DMA is done it will trigger
1516 // an event and come back to this loop.
1517 switch (rxState) {
1518 case rxIdle:
1519 if (!rxEnable) {
1520 DPRINTF(EthernetSM, "Receive Disabled! Nothing to do.\n");
1521 goto exit;
1522 }
1523
1524 if (CRDD) {
1525 rxState = rxDescRefr;
1526
1527 rxDmaAddr = regs.rxdp & 0x3fffffff;
1528 rxDmaData = &rxDescCache + offsetof(ns_desc, link);
1529 rxDmaLen = sizeof(rxDescCache.link);
1530 rxDmaFree = dmaDescFree;
1531
1532 descDmaReads++;
1533 descDmaRdBytes += rxDmaLen;
1534
1535 if (doRxDmaRead())
1536 goto exit;
1537 } else {
1538 rxState = rxDescRead;
1539
1540 rxDmaAddr = regs.rxdp & 0x3fffffff;
1541 rxDmaData = &rxDescCache;
1542 rxDmaLen = sizeof(ns_desc);
1543 rxDmaFree = dmaDescFree;
1544
1545 descDmaReads++;
1546 descDmaRdBytes += rxDmaLen;
1547
1548 if (doRxDmaRead())
1549 goto exit;
1550 }
1551 break;
1552
1553 case rxDescRefr:
1554 if (rxDmaState != dmaIdle)
1555 goto exit;
1556
1557 rxState = rxAdvance;
1558 break;
1559
1560 case rxDescRead:
1561 if (rxDmaState != dmaIdle)
1562 goto exit;
1563
1564 DPRINTF(EthernetDesc,
1565 "rxDescCache: addr=%08x read descriptor\n",
1566 regs.rxdp & 0x3fffffff);
1567 DPRINTF(EthernetDesc,
1568 "rxDescCache: link=%08x bufptr=%08x cmdsts=%08x extsts=%08x\n",
1569 rxDescCache.link, rxDescCache.bufptr, rxDescCache.cmdsts,
1570 rxDescCache.extsts);
1571
1572 if (rxDescCache.cmdsts & CMDSTS_OWN) {
1573 devIntrPost(ISR_RXIDLE);
1574 rxState = rxIdle;
1575 goto exit;
1576 } else {
1577 rxState = rxFifoBlock;
1578 rxFragPtr = rxDescCache.bufptr;
1579 rxDescCnt = rxDescCache.cmdsts & CMDSTS_LEN_MASK;
1580 }
1581 break;
1582
1583 case rxFifoBlock:
1584 if (!rxPacket) {
1585 /**
1586 * @todo in reality, we should be able to start processing
1587 * the packet as it arrives, and not have to wait for the
1588 * full packet ot be in the receive fifo.
1589 */
1590 if (rxFifo.empty())
1591 goto exit;
1592
1593 DPRINTF(EthernetSM, "****processing receive of new packet****\n");
1594
1595 // If we don't have a packet, grab a new one from the fifo.
1596 rxPacket = rxFifo.front();
1597 rxPktBytes = rxPacket->length;
1598 rxPacketBufPtr = rxPacket->data;
1599
1600 #if TRACING_ON
1601 if (DTRACE(Ethernet)) {
1602 IpPtr ip(rxPacket);
1603 if (ip) {
1604 DPRINTF(Ethernet, "ID is %d\n", ip->id());
1605 TcpPtr tcp(ip);
1606 if (tcp) {
1607 DPRINTF(Ethernet,
1608 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n",
1609 tcp->sport(), tcp->dport(), tcp->seq(),
1610 tcp->ack());
1611 }
1612 }
1613 }
1614 #endif
1615
1616 // sanity check - i think the driver behaves like this
1617 assert(rxDescCnt >= rxPktBytes);
1618 rxFifo.pop();
1619 }
1620
1621
1622 // dont' need the && rxDescCnt > 0 if driver sanity check
1623 // above holds
1624 if (rxPktBytes > 0) {
1625 rxState = rxFragWrite;
1626 // don't need min<>(rxPktBytes,rxDescCnt) if above sanity
1627 // check holds
1628 rxXferLen = rxPktBytes;
1629
1630 rxDmaAddr = rxFragPtr & 0x3fffffff;
1631 rxDmaData = rxPacketBufPtr;
1632 rxDmaLen = rxXferLen;
1633 rxDmaFree = dmaDataFree;
1634
1635 if (doRxDmaWrite())
1636 goto exit;
1637
1638 } else {
1639 rxState = rxDescWrite;
1640
1641 //if (rxPktBytes == 0) { /* packet is done */
1642 assert(rxPktBytes == 0);
1643 DPRINTF(EthernetSM, "done with receiving packet\n");
1644
1645 rxDescCache.cmdsts |= CMDSTS_OWN;
1646 rxDescCache.cmdsts &= ~CMDSTS_MORE;
1647 rxDescCache.cmdsts |= CMDSTS_OK;
1648 rxDescCache.cmdsts &= 0xffff0000;
1649 rxDescCache.cmdsts += rxPacket->length; //i.e. set CMDSTS_SIZE
1650
1651 #if 0
1652 /*
1653 * all the driver uses these are for its own stats keeping
1654 * which we don't care about, aren't necessary for
1655 * functionality and doing this would just slow us down.
1656 * if they end up using this in a later version for
1657 * functional purposes, just undef
1658 */
1659 if (rxFilterEnable) {
1660 rxDescCache.cmdsts &= ~CMDSTS_DEST_MASK;
1661 const EthAddr &dst = rxFifoFront()->dst();
1662 if (dst->unicast())
1663 rxDescCache.cmdsts |= CMDSTS_DEST_SELF;
1664 if (dst->multicast())
1665 rxDescCache.cmdsts |= CMDSTS_DEST_MULTI;
1666 if (dst->broadcast())
1667 rxDescCache.cmdsts |= CMDSTS_DEST_MASK;
1668 }
1669 #endif
1670
1671 IpPtr ip(rxPacket);
1672 if (extstsEnable && ip) {
1673 rxDescCache.extsts |= EXTSTS_IPPKT;
1674 rxIpChecksums++;
1675 if (cksum(ip) != 0) {
1676 DPRINTF(EthernetCksum, "Rx IP Checksum Error\n");
1677 rxDescCache.extsts |= EXTSTS_IPERR;
1678 }
1679 TcpPtr tcp(ip);
1680 UdpPtr udp(ip);
1681 if (tcp) {
1682 rxDescCache.extsts |= EXTSTS_TCPPKT;
1683 rxTcpChecksums++;
1684 if (cksum(tcp) != 0) {
1685 DPRINTF(EthernetCksum, "Rx TCP Checksum Error\n");
1686 rxDescCache.extsts |= EXTSTS_TCPERR;
1687
1688 }
1689 } else if (udp) {
1690 rxDescCache.extsts |= EXTSTS_UDPPKT;
1691 rxUdpChecksums++;
1692 if (cksum(udp) != 0) {
1693 DPRINTF(EthernetCksum, "Rx UDP Checksum Error\n");
1694 rxDescCache.extsts |= EXTSTS_UDPERR;
1695 }
1696 }
1697 }
1698 rxPacket = 0;
1699
1700 /*
1701 * the driver seems to always receive into desc buffers
1702 * of size 1514, so you never have a pkt that is split
1703 * into multiple descriptors on the receive side, so
1704 * i don't implement that case, hence the assert above.
1705 */
1706
1707 DPRINTF(EthernetDesc,
1708 "rxDescCache: addr=%08x writeback cmdsts extsts\n",
1709 regs.rxdp & 0x3fffffff);
1710 DPRINTF(EthernetDesc,
1711 "rxDescCache: link=%08x bufptr=%08x cmdsts=%08x extsts=%08x\n",
1712 rxDescCache.link, rxDescCache.bufptr, rxDescCache.cmdsts,
1713 rxDescCache.extsts);
1714
1715 rxDmaAddr = (regs.rxdp + offsetof(ns_desc, cmdsts)) & 0x3fffffff;
1716 rxDmaData = &(rxDescCache.cmdsts);
1717 rxDmaLen = sizeof(rxDescCache.cmdsts) + sizeof(rxDescCache.extsts);
1718 rxDmaFree = dmaDescFree;
1719
1720 descDmaWrites++;
1721 descDmaWrBytes += rxDmaLen;
1722
1723 if (doRxDmaWrite())
1724 goto exit;
1725 }
1726 break;
1727
1728 case rxFragWrite:
1729 if (rxDmaState != dmaIdle)
1730 goto exit;
1731
1732 rxPacketBufPtr += rxXferLen;
1733 rxFragPtr += rxXferLen;
1734 rxPktBytes -= rxXferLen;
1735
1736 rxState = rxFifoBlock;
1737 break;
1738
1739 case rxDescWrite:
1740 if (rxDmaState != dmaIdle)
1741 goto exit;
1742
1743 assert(rxDescCache.cmdsts & CMDSTS_OWN);
1744
1745 assert(rxPacket == 0);
1746 devIntrPost(ISR_RXOK);
1747
1748 if (rxDescCache.cmdsts & CMDSTS_INTR)
1749 devIntrPost(ISR_RXDESC);
1750
1751 if (!rxEnable) {
1752 DPRINTF(EthernetSM, "Halting the RX state machine\n");
1753 rxState = rxIdle;
1754 goto exit;
1755 } else
1756 rxState = rxAdvance;
1757 break;
1758
1759 case rxAdvance:
1760 if (rxDescCache.link == 0) {
1761 devIntrPost(ISR_RXIDLE);
1762 rxState = rxIdle;
1763 CRDD = true;
1764 goto exit;
1765 } else {
1766 rxState = rxDescRead;
1767 regs.rxdp = rxDescCache.link;
1768 CRDD = false;
1769
1770 rxDmaAddr = regs.rxdp & 0x3fffffff;
1771 rxDmaData = &rxDescCache;
1772 rxDmaLen = sizeof(ns_desc);
1773 rxDmaFree = dmaDescFree;
1774
1775 if (doRxDmaRead())
1776 goto exit;
1777 }
1778 break;
1779
1780 default:
1781 panic("Invalid rxState!");
1782 }
1783
1784 DPRINTF(EthernetSM, "entering next rxState=%s\n",
1785 NsRxStateStrings[rxState]);
1786
1787 goto next;
1788
1789 exit:
1790 /**
1791 * @todo do we want to schedule a future kick?
1792 */
1793 DPRINTF(EthernetSM, "rx state machine exited rxState=%s\n",
1794 NsRxStateStrings[rxState]);
1795 }
1796
1797 void
1798 NSGigE::transmit()
1799 {
1800 if (txFifo.empty()) {
1801 DPRINTF(Ethernet, "nothing to transmit\n");
1802 return;
1803 }
1804
1805 DPRINTF(Ethernet, "Attempt Pkt Transmit: txFifo length=%d\n",
1806 txFifo.size());
1807 if (interface->sendPacket(txFifo.front())) {
1808 #if TRACING_ON
1809 if (DTRACE(Ethernet)) {
1810 IpPtr ip(txFifo.front());
1811 if (ip) {
1812 DPRINTF(Ethernet, "ID is %d\n", ip->id());
1813 TcpPtr tcp(ip);
1814 if (tcp) {
1815 DPRINTF(Ethernet,
1816 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n",
1817 tcp->sport(), tcp->dport(), tcp->seq(), tcp->ack());
1818 }
1819 }
1820 }
1821 #endif
1822
1823 DDUMP(EthernetData, txFifo.front()->data, txFifo.front()->length);
1824 txBytes += txFifo.front()->length;
1825 txPackets++;
1826
1827 DPRINTF(Ethernet, "Successful Xmit! now txFifoAvail is %d\n",
1828 txFifo.avail());
1829 txFifo.pop();
1830
1831 /*
1832 * normally do a writeback of the descriptor here, and ONLY
1833 * after that is done, send this interrupt. but since our
1834 * stuff never actually fails, just do this interrupt here,
1835 * otherwise the code has to stray from this nice format.
1836 * besides, it's functionally the same.
1837 */
1838 devIntrPost(ISR_TXOK);
1839 }
1840
1841 if (!txFifo.empty() && !txEvent.scheduled()) {
1842 DPRINTF(Ethernet, "reschedule transmit\n");
1843 txEvent.schedule(curTick + retryTime);
1844 }
1845 }
1846
1847 void
1848 NSGigE::txDmaReadCopy()
1849 {
1850 assert(txDmaState == dmaReading);
1851
1852 physmem->dma_read((uint8_t *)txDmaData, txDmaAddr, txDmaLen);
1853 txDmaState = dmaIdle;
1854
1855 DPRINTF(EthernetDMA, "tx dma read paddr=%#x len=%d\n",
1856 txDmaAddr, txDmaLen);
1857 DDUMP(EthernetDMA, txDmaData, txDmaLen);
1858 }
1859
1860 bool
1861 NSGigE::doTxDmaRead()
1862 {
1863 assert(txDmaState == dmaIdle || txDmaState == dmaReadWaiting);
1864 txDmaState = dmaReading;
1865
1866 if (dmaInterface && !txDmaFree) {
1867 if (dmaInterface->busy())
1868 txDmaState = dmaReadWaiting;
1869 else
1870 dmaInterface->doDMA(Read, txDmaAddr, txDmaLen, curTick,
1871 &txDmaReadEvent, true);
1872 return true;
1873 }
1874
1875 if (dmaReadDelay == 0 && dmaReadFactor == 0.0) {
1876 txDmaReadCopy();
1877 return false;
1878 }
1879
1880 Tick factor = ((txDmaLen + ULL(63)) >> ULL(6)) * dmaReadFactor;
1881 Tick start = curTick + dmaReadDelay + factor;
1882 txDmaReadEvent.schedule(start);
1883 return true;
1884 }
1885
1886 void
1887 NSGigE::txDmaReadDone()
1888 {
1889 assert(txDmaState == dmaReading);
1890 txDmaReadCopy();
1891
1892 // If the receive state machine has a pending DMA, let it go first
1893 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting)
1894 rxKick();
1895
1896 txKick();
1897 }
1898
1899 void
1900 NSGigE::txDmaWriteCopy()
1901 {
1902 assert(txDmaState == dmaWriting);
1903
1904 physmem->dma_write(txDmaAddr, (uint8_t *)txDmaData, txDmaLen);
1905 txDmaState = dmaIdle;
1906
1907 DPRINTF(EthernetDMA, "tx dma write paddr=%#x len=%d\n",
1908 txDmaAddr, txDmaLen);
1909 DDUMP(EthernetDMA, txDmaData, txDmaLen);
1910 }
1911
1912 bool
1913 NSGigE::doTxDmaWrite()
1914 {
1915 assert(txDmaState == dmaIdle || txDmaState == dmaWriteWaiting);
1916 txDmaState = dmaWriting;
1917
1918 if (dmaInterface && !txDmaFree) {
1919 if (dmaInterface->busy())
1920 txDmaState = dmaWriteWaiting;
1921 else
1922 dmaInterface->doDMA(WriteInvalidate, txDmaAddr, txDmaLen, curTick,
1923 &txDmaWriteEvent, true);
1924 return true;
1925 }
1926
1927 if (dmaWriteDelay == 0 && dmaWriteFactor == 0.0) {
1928 txDmaWriteCopy();
1929 return false;
1930 }
1931
1932 Tick factor = ((txDmaLen + ULL(63)) >> ULL(6)) * dmaWriteFactor;
1933 Tick start = curTick + dmaWriteDelay + factor;
1934 txDmaWriteEvent.schedule(start);
1935 return true;
1936 }
1937
1938 void
1939 NSGigE::txDmaWriteDone()
1940 {
1941 assert(txDmaState == dmaWriting);
1942 txDmaWriteCopy();
1943
1944 // If the receive state machine has a pending DMA, let it go first
1945 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting)
1946 rxKick();
1947
1948 txKick();
1949 }
1950
1951 void
1952 NSGigE::txKick()
1953 {
1954 DPRINTF(EthernetSM, "transmit kick txState=%s\n",
1955 NsTxStateStrings[txState]);
1956
1957 if (txKickTick > curTick) {
1958 DPRINTF(EthernetSM, "transmit kick exiting, can't run till %d\n",
1959 txKickTick);
1960 return;
1961 }
1962
1963 next:
1964 switch(txDmaState) {
1965 case dmaReadWaiting:
1966 if (doTxDmaRead())
1967 goto exit;
1968 break;
1969 case dmaWriteWaiting:
1970 if (doTxDmaWrite())
1971 goto exit;
1972 break;
1973 default:
1974 break;
1975 }
1976
1977 switch (txState) {
1978 case txIdle:
1979 if (!txEnable) {
1980 DPRINTF(EthernetSM, "Transmit disabled. Nothing to do.\n");
1981 goto exit;
1982 }
1983
1984 if (CTDD) {
1985 txState = txDescRefr;
1986
1987 txDmaAddr = regs.txdp & 0x3fffffff;
1988 txDmaData = &txDescCache + offsetof(ns_desc, link);
1989 txDmaLen = sizeof(txDescCache.link);
1990 txDmaFree = dmaDescFree;
1991
1992 descDmaReads++;
1993 descDmaRdBytes += txDmaLen;
1994
1995 if (doTxDmaRead())
1996 goto exit;
1997
1998 } else {
1999 txState = txDescRead;
2000
2001 txDmaAddr = regs.txdp & 0x3fffffff;
2002 txDmaData = &txDescCache;
2003 txDmaLen = sizeof(ns_desc);
2004 txDmaFree = dmaDescFree;
2005
2006 descDmaReads++;
2007 descDmaRdBytes += txDmaLen;
2008
2009 if (doTxDmaRead())
2010 goto exit;
2011 }
2012 break;
2013
2014 case txDescRefr:
2015 if (txDmaState != dmaIdle)
2016 goto exit;
2017
2018 txState = txAdvance;
2019 break;
2020
2021 case txDescRead:
2022 if (txDmaState != dmaIdle)
2023 goto exit;
2024
2025 DPRINTF(EthernetDesc,
2026 "txDescCache: link=%08x bufptr=%08x cmdsts=%08x extsts=%08x\n",
2027 txDescCache.link, txDescCache.bufptr, txDescCache.cmdsts,
2028 txDescCache.extsts);
2029
2030 if (txDescCache.cmdsts & CMDSTS_OWN) {
2031 txState = txFifoBlock;
2032 txFragPtr = txDescCache.bufptr;
2033 txDescCnt = txDescCache.cmdsts & CMDSTS_LEN_MASK;
2034 } else {
2035 devIntrPost(ISR_TXIDLE);
2036 txState = txIdle;
2037 goto exit;
2038 }
2039 break;
2040
2041 case txFifoBlock:
2042 if (!txPacket) {
2043 DPRINTF(EthernetSM, "****starting the tx of a new packet****\n");
2044 txPacket = new PacketData(16384);
2045 txPacketBufPtr = txPacket->data;
2046 }
2047
2048 if (txDescCnt == 0) {
2049 DPRINTF(EthernetSM, "the txDescCnt == 0, done with descriptor\n");
2050 if (txDescCache.cmdsts & CMDSTS_MORE) {
2051 DPRINTF(EthernetSM, "there are more descriptors to come\n");
2052 txState = txDescWrite;
2053
2054 txDescCache.cmdsts &= ~CMDSTS_OWN;
2055
2056 txDmaAddr = regs.txdp + offsetof(ns_desc, cmdsts);
2057 txDmaAddr &= 0x3fffffff;
2058 txDmaData = &(txDescCache.cmdsts);
2059 txDmaLen = sizeof(txDescCache.cmdsts);
2060 txDmaFree = dmaDescFree;
2061
2062 if (doTxDmaWrite())
2063 goto exit;
2064
2065 } else { /* this packet is totally done */
2066 DPRINTF(EthernetSM, "This packet is done, let's wrap it up\n");
2067 /* deal with the the packet that just finished */
2068 if ((regs.vtcr & VTCR_PPCHK) && extstsEnable) {
2069 IpPtr ip(txPacket);
2070 if (txDescCache.extsts & EXTSTS_UDPPKT) {
2071 UdpPtr udp(ip);
2072 udp->sum(0);
2073 udp->sum(cksum(udp));
2074 txUdpChecksums++;
2075 } else if (txDescCache.extsts & EXTSTS_TCPPKT) {
2076 TcpPtr tcp(ip);
2077 tcp->sum(0);
2078 tcp->sum(cksum(tcp));
2079 txTcpChecksums++;
2080 }
2081 if (txDescCache.extsts & EXTSTS_IPPKT) {
2082 ip->sum(0);
2083 ip->sum(cksum(ip));
2084 txIpChecksums++;
2085 }
2086 }
2087
2088 txPacket->length = txPacketBufPtr - txPacket->data;
2089 // this is just because the receive can't handle a
2090 // packet bigger want to make sure
2091 assert(txPacket->length <= 1514);
2092 #ifndef NDEBUG
2093 bool success =
2094 #endif
2095 txFifo.push(txPacket);
2096 assert(success);
2097
2098 /*
2099 * this following section is not tqo spec, but
2100 * functionally shouldn't be any different. normally,
2101 * the chip will wait til the transmit has occurred
2102 * before writing back the descriptor because it has
2103 * to wait to see that it was successfully transmitted
2104 * to decide whether to set CMDSTS_OK or not.
2105 * however, in the simulator since it is always
2106 * successfully transmitted, and writing it exactly to
2107 * spec would complicate the code, we just do it here
2108 */
2109
2110 txDescCache.cmdsts &= ~CMDSTS_OWN;
2111 txDescCache.cmdsts |= CMDSTS_OK;
2112
2113 DPRINTF(EthernetDesc,
2114 "txDesc writeback: cmdsts=%08x extsts=%08x\n",
2115 txDescCache.cmdsts, txDescCache.extsts);
2116
2117 txDmaAddr = regs.txdp + offsetof(ns_desc, cmdsts);
2118 txDmaAddr &= 0x3fffffff;
2119 txDmaData = &(txDescCache.cmdsts);
2120 txDmaLen = sizeof(txDescCache.cmdsts) +
2121 sizeof(txDescCache.extsts);
2122 txDmaFree = dmaDescFree;
2123
2124 descDmaWrites++;
2125 descDmaWrBytes += txDmaLen;
2126
2127 transmit();
2128 txPacket = 0;
2129
2130 if (!txEnable) {
2131 DPRINTF(EthernetSM, "halting TX state machine\n");
2132 txState = txIdle;
2133 goto exit;
2134 } else
2135 txState = txAdvance;
2136
2137 if (doTxDmaWrite())
2138 goto exit;
2139 }
2140 } else {
2141 DPRINTF(EthernetSM, "this descriptor isn't done yet\n");
2142 if (!txFifo.full()) {
2143 txState = txFragRead;
2144
2145 /*
2146 * The number of bytes transferred is either whatever
2147 * is left in the descriptor (txDescCnt), or if there
2148 * is not enough room in the fifo, just whatever room
2149 * is left in the fifo
2150 */
2151 txXferLen = min<uint32_t>(txDescCnt, txFifo.avail());
2152
2153 txDmaAddr = txFragPtr & 0x3fffffff;
2154 txDmaData = txPacketBufPtr;
2155 txDmaLen = txXferLen;
2156 txDmaFree = dmaDataFree;
2157
2158 if (doTxDmaRead())
2159 goto exit;
2160 } else {
2161 txState = txFifoBlock;
2162 transmit();
2163
2164 goto exit;
2165 }
2166
2167 }
2168 break;
2169
2170 case txFragRead:
2171 if (txDmaState != dmaIdle)
2172 goto exit;
2173
2174 txPacketBufPtr += txXferLen;
2175 txFragPtr += txXferLen;
2176 txDescCnt -= txXferLen;
2177 txFifo.reserve(txXferLen);
2178
2179 txState = txFifoBlock;
2180 break;
2181
2182 case txDescWrite:
2183 if (txDmaState != dmaIdle)
2184 goto exit;
2185
2186 if (txDescCache.cmdsts & CMDSTS_INTR)
2187 devIntrPost(ISR_TXDESC);
2188
2189 txState = txAdvance;
2190 break;
2191
2192 case txAdvance:
2193 if (txDescCache.link == 0) {
2194 devIntrPost(ISR_TXIDLE);
2195 txState = txIdle;
2196 goto exit;
2197 } else {
2198 txState = txDescRead;
2199 regs.txdp = txDescCache.link;
2200 CTDD = false;
2201
2202 txDmaAddr = txDescCache.link & 0x3fffffff;
2203 txDmaData = &txDescCache;
2204 txDmaLen = sizeof(ns_desc);
2205 txDmaFree = dmaDescFree;
2206
2207 if (doTxDmaRead())
2208 goto exit;
2209 }
2210 break;
2211
2212 default:
2213 panic("invalid state");
2214 }
2215
2216 DPRINTF(EthernetSM, "entering next txState=%s\n",
2217 NsTxStateStrings[txState]);
2218
2219 goto next;
2220
2221 exit:
2222 /**
2223 * @todo do we want to schedule a future kick?
2224 */
2225 DPRINTF(EthernetSM, "tx state machine exited txState=%s\n",
2226 NsTxStateStrings[txState]);
2227 }
2228
2229 void
2230 NSGigE::transferDone()
2231 {
2232 if (txFifo.empty()) {
2233 DPRINTF(Ethernet, "transfer complete: txFifo empty...nothing to do\n");
2234 return;
2235 }
2236
2237 DPRINTF(Ethernet, "transfer complete: data in txFifo...schedule xmit\n");
2238
2239 if (txEvent.scheduled())
2240 txEvent.reschedule(curTick + cycles(1));
2241 else
2242 txEvent.schedule(curTick + cycles(1));
2243 }
2244
2245 bool
2246 NSGigE::rxFilter(const PacketPtr &packet)
2247 {
2248 EthPtr eth = packet;
2249 bool drop = true;
2250 string type;
2251
2252 const EthAddr &dst = eth->dst();
2253 if (dst.unicast()) {
2254 // If we're accepting all unicast addresses
2255 if (acceptUnicast)
2256 drop = false;
2257
2258 // If we make a perfect match
2259 if (acceptPerfect && dst == rom.perfectMatch)
2260 drop = false;
2261
2262 if (acceptArp && eth->type() == ETH_TYPE_ARP)
2263 drop = false;
2264
2265 } else if (dst.broadcast()) {
2266 // if we're accepting broadcasts
2267 if (acceptBroadcast)
2268 drop = false;
2269
2270 } else if (dst.multicast()) {
2271 // if we're accepting all multicasts
2272 if (acceptMulticast)
2273 drop = false;
2274
2275 }
2276
2277 if (drop) {
2278 DPRINTF(Ethernet, "rxFilter drop\n");
2279 DDUMP(EthernetData, packet->data, packet->length);
2280 }
2281
2282 return drop;
2283 }
2284
2285 bool
2286 NSGigE::recvPacket(PacketPtr packet)
2287 {
2288 rxBytes += packet->length;
2289 rxPackets++;
2290
2291 DPRINTF(Ethernet, "Receiving packet from wire, rxFifoAvail=%d\n",
2292 rxFifo.avail());
2293
2294 if (!rxEnable) {
2295 DPRINTF(Ethernet, "receive disabled...packet dropped\n");
2296 interface->recvDone();
2297 return true;
2298 }
2299
2300 if (rxFilterEnable && rxFilter(packet)) {
2301 DPRINTF(Ethernet, "packet filtered...dropped\n");
2302 interface->recvDone();
2303 return true;
2304 }
2305
2306 if (rxFifo.avail() < packet->length) {
2307 #if TRACING_ON
2308 IpPtr ip(packet);
2309 TcpPtr tcp(ip);
2310 if (ip) {
2311 DPRINTF(Ethernet,
2312 "packet won't fit in receive buffer...pkt ID %d dropped\n",
2313 ip->id());
2314 if (tcp) {
2315 DPRINTF(Ethernet, "Seq=%d\n", tcp->seq());
2316 }
2317 }
2318 #endif
2319 droppedPackets++;
2320 devIntrPost(ISR_RXORN);
2321 return false;
2322 }
2323
2324 rxFifo.push(packet);
2325 interface->recvDone();
2326
2327 rxKick();
2328 return true;
2329 }
2330
2331 //=====================================================================
2332 //
2333 //
2334 void
2335 NSGigE::serialize(ostream &os)
2336 {
2337 // Serialize the PciDev base class
2338 PciDev::serialize(os);
2339
2340 /*
2341 * Finalize any DMA events now.
2342 */
2343 if (rxDmaReadEvent.scheduled())
2344 rxDmaReadCopy();
2345 if (rxDmaWriteEvent.scheduled())
2346 rxDmaWriteCopy();
2347 if (txDmaReadEvent.scheduled())
2348 txDmaReadCopy();
2349 if (txDmaWriteEvent.scheduled())
2350 txDmaWriteCopy();
2351
2352 /*
2353 * Serialize the device registers
2354 */
2355 SERIALIZE_SCALAR(regs.command);
2356 SERIALIZE_SCALAR(regs.config);
2357 SERIALIZE_SCALAR(regs.mear);
2358 SERIALIZE_SCALAR(regs.ptscr);
2359 SERIALIZE_SCALAR(regs.isr);
2360 SERIALIZE_SCALAR(regs.imr);
2361 SERIALIZE_SCALAR(regs.ier);
2362 SERIALIZE_SCALAR(regs.ihr);
2363 SERIALIZE_SCALAR(regs.txdp);
2364 SERIALIZE_SCALAR(regs.txdp_hi);
2365 SERIALIZE_SCALAR(regs.txcfg);
2366 SERIALIZE_SCALAR(regs.gpior);
2367 SERIALIZE_SCALAR(regs.rxdp);
2368 SERIALIZE_SCALAR(regs.rxdp_hi);
2369 SERIALIZE_SCALAR(regs.rxcfg);
2370 SERIALIZE_SCALAR(regs.pqcr);
2371 SERIALIZE_SCALAR(regs.wcsr);
2372 SERIALIZE_SCALAR(regs.pcr);
2373 SERIALIZE_SCALAR(regs.rfcr);
2374 SERIALIZE_SCALAR(regs.rfdr);
2375 SERIALIZE_SCALAR(regs.srr);
2376 SERIALIZE_SCALAR(regs.mibc);
2377 SERIALIZE_SCALAR(regs.vrcr);
2378 SERIALIZE_SCALAR(regs.vtcr);
2379 SERIALIZE_SCALAR(regs.vdr);
2380 SERIALIZE_SCALAR(regs.ccsr);
2381 SERIALIZE_SCALAR(regs.tbicr);
2382 SERIALIZE_SCALAR(regs.tbisr);
2383 SERIALIZE_SCALAR(regs.tanar);
2384 SERIALIZE_SCALAR(regs.tanlpar);
2385 SERIALIZE_SCALAR(regs.taner);
2386 SERIALIZE_SCALAR(regs.tesr);
2387
2388 SERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN);
2389
2390 SERIALIZE_SCALAR(ioEnable);
2391
2392 /*
2393 * Serialize the data Fifos
2394 */
2395 rxFifo.serialize("rxFifo", os);
2396 txFifo.serialize("txFifo", os);
2397
2398 /*
2399 * Serialize the various helper variables
2400 */
2401 bool txPacketExists = txPacket;
2402 SERIALIZE_SCALAR(txPacketExists);
2403 if (txPacketExists) {
2404 txPacket->length = txPacketBufPtr - txPacket->data;
2405 txPacket->serialize("txPacket", os);
2406 uint32_t txPktBufPtr = (uint32_t) (txPacketBufPtr - txPacket->data);
2407 SERIALIZE_SCALAR(txPktBufPtr);
2408 }
2409
2410 bool rxPacketExists = rxPacket;
2411 SERIALIZE_SCALAR(rxPacketExists);
2412 if (rxPacketExists) {
2413 rxPacket->serialize("rxPacket", os);
2414 uint32_t rxPktBufPtr = (uint32_t) (rxPacketBufPtr - rxPacket->data);
2415 SERIALIZE_SCALAR(rxPktBufPtr);
2416 }
2417
2418 SERIALIZE_SCALAR(txXferLen);
2419 SERIALIZE_SCALAR(rxXferLen);
2420
2421 /*
2422 * Serialize DescCaches
2423 */
2424 SERIALIZE_SCALAR(txDescCache.link);
2425 SERIALIZE_SCALAR(txDescCache.bufptr);
2426 SERIALIZE_SCALAR(txDescCache.cmdsts);
2427 SERIALIZE_SCALAR(txDescCache.extsts);
2428 SERIALIZE_SCALAR(rxDescCache.link);
2429 SERIALIZE_SCALAR(rxDescCache.bufptr);
2430 SERIALIZE_SCALAR(rxDescCache.cmdsts);
2431 SERIALIZE_SCALAR(rxDescCache.extsts);
2432
2433 /*
2434 * Serialize tx state machine
2435 */
2436 int txState = this->txState;
2437 SERIALIZE_SCALAR(txState);
2438 SERIALIZE_SCALAR(txEnable);
2439 SERIALIZE_SCALAR(CTDD);
2440 SERIALIZE_SCALAR(txFragPtr);
2441 SERIALIZE_SCALAR(txDescCnt);
2442 int txDmaState = this->txDmaState;
2443 SERIALIZE_SCALAR(txDmaState);
2444
2445 /*
2446 * Serialize rx state machine
2447 */
2448 int rxState = this->rxState;
2449 SERIALIZE_SCALAR(rxState);
2450 SERIALIZE_SCALAR(rxEnable);
2451 SERIALIZE_SCALAR(CRDD);
2452 SERIALIZE_SCALAR(rxPktBytes);
2453 SERIALIZE_SCALAR(rxFragPtr);
2454 SERIALIZE_SCALAR(rxDescCnt);
2455 int rxDmaState = this->rxDmaState;
2456 SERIALIZE_SCALAR(rxDmaState);
2457
2458 SERIALIZE_SCALAR(extstsEnable);
2459
2460 /*
2461 * If there's a pending transmit, store the time so we can
2462 * reschedule it later
2463 */
2464 Tick transmitTick = txEvent.scheduled() ? txEvent.when() - curTick : 0;
2465 SERIALIZE_SCALAR(transmitTick);
2466
2467 /*
2468 * receive address filter settings
2469 */
2470 SERIALIZE_SCALAR(rxFilterEnable);
2471 SERIALIZE_SCALAR(acceptBroadcast);
2472 SERIALIZE_SCALAR(acceptMulticast);
2473 SERIALIZE_SCALAR(acceptUnicast);
2474 SERIALIZE_SCALAR(acceptPerfect);
2475 SERIALIZE_SCALAR(acceptArp);
2476
2477 /*
2478 * Keep track of pending interrupt status.
2479 */
2480 SERIALIZE_SCALAR(intrTick);
2481 SERIALIZE_SCALAR(cpuPendingIntr);
2482 Tick intrEventTick = 0;
2483 if (intrEvent)
2484 intrEventTick = intrEvent->when();
2485 SERIALIZE_SCALAR(intrEventTick);
2486
2487 }
2488
2489 void
2490 NSGigE::unserialize(Checkpoint *cp, const std::string &section)
2491 {
2492 // Unserialize the PciDev base class
2493 PciDev::unserialize(cp, section);
2494
2495 UNSERIALIZE_SCALAR(regs.command);
2496 UNSERIALIZE_SCALAR(regs.config);
2497 UNSERIALIZE_SCALAR(regs.mear);
2498 UNSERIALIZE_SCALAR(regs.ptscr);
2499 UNSERIALIZE_SCALAR(regs.isr);
2500 UNSERIALIZE_SCALAR(regs.imr);
2501 UNSERIALIZE_SCALAR(regs.ier);
2502 UNSERIALIZE_SCALAR(regs.ihr);
2503 UNSERIALIZE_SCALAR(regs.txdp);
2504 UNSERIALIZE_SCALAR(regs.txdp_hi);
2505 UNSERIALIZE_SCALAR(regs.txcfg);
2506 UNSERIALIZE_SCALAR(regs.gpior);
2507 UNSERIALIZE_SCALAR(regs.rxdp);
2508 UNSERIALIZE_SCALAR(regs.rxdp_hi);
2509 UNSERIALIZE_SCALAR(regs.rxcfg);
2510 UNSERIALIZE_SCALAR(regs.pqcr);
2511 UNSERIALIZE_SCALAR(regs.wcsr);
2512 UNSERIALIZE_SCALAR(regs.pcr);
2513 UNSERIALIZE_SCALAR(regs.rfcr);
2514 UNSERIALIZE_SCALAR(regs.rfdr);
2515 UNSERIALIZE_SCALAR(regs.srr);
2516 UNSERIALIZE_SCALAR(regs.mibc);
2517 UNSERIALIZE_SCALAR(regs.vrcr);
2518 UNSERIALIZE_SCALAR(regs.vtcr);
2519 UNSERIALIZE_SCALAR(regs.vdr);
2520 UNSERIALIZE_SCALAR(regs.ccsr);
2521 UNSERIALIZE_SCALAR(regs.tbicr);
2522 UNSERIALIZE_SCALAR(regs.tbisr);
2523 UNSERIALIZE_SCALAR(regs.tanar);
2524 UNSERIALIZE_SCALAR(regs.tanlpar);
2525 UNSERIALIZE_SCALAR(regs.taner);
2526 UNSERIALIZE_SCALAR(regs.tesr);
2527
2528 UNSERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN);
2529
2530 UNSERIALIZE_SCALAR(ioEnable);
2531
2532 /*
2533 * unserialize the data fifos
2534 */
2535 rxFifo.unserialize("rxFifo", cp, section);
2536 txFifo.unserialize("txFifo", cp, section);
2537
2538 /*
2539 * unserialize the various helper variables
2540 */
2541 bool txPacketExists;
2542 UNSERIALIZE_SCALAR(txPacketExists);
2543 if (txPacketExists) {
2544 txPacket = new PacketData(16384);
2545 txPacket->unserialize("txPacket", cp, section);
2546 uint32_t txPktBufPtr;
2547 UNSERIALIZE_SCALAR(txPktBufPtr);
2548 txPacketBufPtr = (uint8_t *) txPacket->data + txPktBufPtr;
2549 } else
2550 txPacket = 0;
2551
2552 bool rxPacketExists;
2553 UNSERIALIZE_SCALAR(rxPacketExists);
2554 rxPacket = 0;
2555 if (rxPacketExists) {
2556 rxPacket = new PacketData(16384);
2557 rxPacket->unserialize("rxPacket", cp, section);
2558 uint32_t rxPktBufPtr;
2559 UNSERIALIZE_SCALAR(rxPktBufPtr);
2560 rxPacketBufPtr = (uint8_t *) rxPacket->data + rxPktBufPtr;
2561 } else
2562 rxPacket = 0;
2563
2564 UNSERIALIZE_SCALAR(txXferLen);
2565 UNSERIALIZE_SCALAR(rxXferLen);
2566
2567 /*
2568 * Unserialize DescCaches
2569 */
2570 UNSERIALIZE_SCALAR(txDescCache.link);
2571 UNSERIALIZE_SCALAR(txDescCache.bufptr);
2572 UNSERIALIZE_SCALAR(txDescCache.cmdsts);
2573 UNSERIALIZE_SCALAR(txDescCache.extsts);
2574 UNSERIALIZE_SCALAR(rxDescCache.link);
2575 UNSERIALIZE_SCALAR(rxDescCache.bufptr);
2576 UNSERIALIZE_SCALAR(rxDescCache.cmdsts);
2577 UNSERIALIZE_SCALAR(rxDescCache.extsts);
2578
2579 /*
2580 * unserialize tx state machine
2581 */
2582 int txState;
2583 UNSERIALIZE_SCALAR(txState);
2584 this->txState = (TxState) txState;
2585 UNSERIALIZE_SCALAR(txEnable);
2586 UNSERIALIZE_SCALAR(CTDD);
2587 UNSERIALIZE_SCALAR(txFragPtr);
2588 UNSERIALIZE_SCALAR(txDescCnt);
2589 int txDmaState;
2590 UNSERIALIZE_SCALAR(txDmaState);
2591 this->txDmaState = (DmaState) txDmaState;
2592
2593 /*
2594 * unserialize rx state machine
2595 */
2596 int rxState;
2597 UNSERIALIZE_SCALAR(rxState);
2598 this->rxState = (RxState) rxState;
2599 UNSERIALIZE_SCALAR(rxEnable);
2600 UNSERIALIZE_SCALAR(CRDD);
2601 UNSERIALIZE_SCALAR(rxPktBytes);
2602 UNSERIALIZE_SCALAR(rxFragPtr);
2603 UNSERIALIZE_SCALAR(rxDescCnt);
2604 int rxDmaState;
2605 UNSERIALIZE_SCALAR(rxDmaState);
2606 this->rxDmaState = (DmaState) rxDmaState;
2607
2608 UNSERIALIZE_SCALAR(extstsEnable);
2609
2610 /*
2611 * If there's a pending transmit, reschedule it now
2612 */
2613 Tick transmitTick;
2614 UNSERIALIZE_SCALAR(transmitTick);
2615 if (transmitTick)
2616 txEvent.schedule(curTick + transmitTick);
2617
2618 /*
2619 * unserialize receive address filter settings
2620 */
2621 UNSERIALIZE_SCALAR(rxFilterEnable);
2622 UNSERIALIZE_SCALAR(acceptBroadcast);
2623 UNSERIALIZE_SCALAR(acceptMulticast);
2624 UNSERIALIZE_SCALAR(acceptUnicast);
2625 UNSERIALIZE_SCALAR(acceptPerfect);
2626 UNSERIALIZE_SCALAR(acceptArp);
2627
2628 /*
2629 * Keep track of pending interrupt status.
2630 */
2631 UNSERIALIZE_SCALAR(intrTick);
2632 UNSERIALIZE_SCALAR(cpuPendingIntr);
2633 Tick intrEventTick;
2634 UNSERIALIZE_SCALAR(intrEventTick);
2635 if (intrEventTick) {
2636 intrEvent = new IntrEvent(this, true);
2637 intrEvent->schedule(intrEventTick);
2638 }
2639
2640 /*
2641 * re-add addrRanges to bus bridges
2642 */
2643 if (pioInterface) {
2644 pioInterface->addAddrRange(RangeSize(BARAddrs[0], BARSize[0]));
2645 pioInterface->addAddrRange(RangeSize(BARAddrs[1], BARSize[1]));
2646 }
2647 }
2648
2649 Tick
2650 NSGigE::cacheAccess(MemReqPtr &req)
2651 {
2652 DPRINTF(EthernetPIO, "timing access to paddr=%#x (daddr=%#x)\n",
2653 req->paddr, req->paddr - addr);
2654 return curTick + pioLatency;
2655 }
2656
2657 BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt)
2658
2659 SimObjectParam<EtherInt *> peer;
2660 SimObjectParam<NSGigE *> device;
2661
2662 END_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt)
2663
2664 BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigEInt)
2665
2666 INIT_PARAM_DFLT(peer, "peer interface", NULL),
2667 INIT_PARAM(device, "Ethernet device of this interface")
2668
2669 END_INIT_SIM_OBJECT_PARAMS(NSGigEInt)
2670
2671 CREATE_SIM_OBJECT(NSGigEInt)
2672 {
2673 NSGigEInt *dev_int = new NSGigEInt(getInstanceName(), device);
2674
2675 EtherInt *p = (EtherInt *)peer;
2676 if (p) {
2677 dev_int->setPeer(p);
2678 p->setPeer(dev_int);
2679 }
2680
2681 return dev_int;
2682 }
2683
2684 REGISTER_SIM_OBJECT("NSGigEInt", NSGigEInt)
2685
2686
2687 BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigE)
2688
2689 Param<Addr> addr;
2690 Param<Tick> clock;
2691 Param<Tick> tx_delay;
2692 Param<Tick> rx_delay;
2693 Param<Tick> intr_delay;
2694 SimObjectParam<MemoryController *> mmu;
2695 SimObjectParam<PhysicalMemory *> physmem;
2696 Param<bool> rx_filter;
2697 Param<string> hardware_address;
2698 SimObjectParam<Bus*> io_bus;
2699 SimObjectParam<Bus*> payload_bus;
2700 SimObjectParam<HierParams *> hier;
2701 Param<Tick> pio_latency;
2702 Param<bool> dma_desc_free;
2703 Param<bool> dma_data_free;
2704 Param<Tick> dma_read_delay;
2705 Param<Tick> dma_write_delay;
2706 Param<Tick> dma_read_factor;
2707 Param<Tick> dma_write_factor;
2708 SimObjectParam<PciConfigAll *> configspace;
2709 SimObjectParam<PciConfigData *> configdata;
2710 SimObjectParam<Platform *> platform;
2711 Param<uint32_t> pci_bus;
2712 Param<uint32_t> pci_dev;
2713 Param<uint32_t> pci_func;
2714 Param<uint32_t> tx_fifo_size;
2715 Param<uint32_t> rx_fifo_size;
2716 Param<uint32_t> m5reg;
2717 Param<bool> dma_no_allocate;
2718
2719 END_DECLARE_SIM_OBJECT_PARAMS(NSGigE)
2720
2721 BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigE)
2722
2723 INIT_PARAM(addr, "Device Address"),
2724 INIT_PARAM(clock, "State machine processor frequency"),
2725 INIT_PARAM(tx_delay, "Transmit Delay"),
2726 INIT_PARAM(rx_delay, "Receive Delay"),
2727 INIT_PARAM(intr_delay, "Interrupt Delay in microseconds"),
2728 INIT_PARAM(mmu, "Memory Controller"),
2729 INIT_PARAM(physmem, "Physical Memory"),
2730 INIT_PARAM_DFLT(rx_filter, "Enable Receive Filter", true),
2731 INIT_PARAM_DFLT(hardware_address, "Ethernet Hardware Address",
2732 "00:99:00:00:00:01"),
2733 INIT_PARAM_DFLT(io_bus, "The IO Bus to attach to for headers", NULL),
2734 INIT_PARAM_DFLT(payload_bus, "The IO Bus to attach to for payload", NULL),
2735 INIT_PARAM_DFLT(hier, "Hierarchy global variables", &defaultHierParams),
2736 INIT_PARAM_DFLT(pio_latency, "Programmed IO latency in bus cycles", 1),
2737 INIT_PARAM_DFLT(dma_desc_free, "DMA of Descriptors is free", false),
2738 INIT_PARAM_DFLT(dma_data_free, "DMA of Data is free", false),
2739 INIT_PARAM_DFLT(dma_read_delay, "fixed delay for dma reads", 0),
2740 INIT_PARAM_DFLT(dma_write_delay, "fixed delay for dma writes", 0),
2741 INIT_PARAM_DFLT(dma_read_factor, "multiplier for dma reads", 0),
2742 INIT_PARAM_DFLT(dma_write_factor, "multiplier for dma writes", 0),
2743 INIT_PARAM(configspace, "PCI Configspace"),
2744 INIT_PARAM(configdata, "PCI Config data"),
2745 INIT_PARAM(platform, "Platform"),
2746 INIT_PARAM(pci_bus, "PCI bus"),
2747 INIT_PARAM(pci_dev, "PCI device number"),
2748 INIT_PARAM(pci_func, "PCI function code"),
2749 INIT_PARAM_DFLT(tx_fifo_size, "max size in bytes of txFifo", 131072),
2750 INIT_PARAM_DFLT(rx_fifo_size, "max size in bytes of rxFifo", 131072),
2751 INIT_PARAM(m5reg, "m5 register"),
2752 INIT_PARAM_DFLT(dma_no_allocate, "Should DMA reads allocate cache lines", true)
2753
2754 END_INIT_SIM_OBJECT_PARAMS(NSGigE)
2755
2756
2757 CREATE_SIM_OBJECT(NSGigE)
2758 {
2759 NSGigE::Params *params = new NSGigE::Params;
2760
2761 params->name = getInstanceName();
2762 params->mmu = mmu;
2763 params->configSpace = configspace;
2764 params->configData = configdata;
2765 params->plat = platform;
2766 params->busNum = pci_bus;
2767 params->deviceNum = pci_dev;
2768 params->functionNum = pci_func;
2769
2770 params->clock = clock;
2771 params->intr_delay = intr_delay;
2772 params->pmem = physmem;
2773 params->tx_delay = tx_delay;
2774 params->rx_delay = rx_delay;
2775 params->hier = hier;
2776 params->header_bus = io_bus;
2777 params->payload_bus = payload_bus;
2778 params->pio_latency = pio_latency;
2779 params->dma_desc_free = dma_desc_free;
2780 params->dma_data_free = dma_data_free;
2781 params->dma_read_delay = dma_read_delay;
2782 params->dma_write_delay = dma_write_delay;
2783 params->dma_read_factor = dma_read_factor;
2784 params->dma_write_factor = dma_write_factor;
2785 params->rx_filter = rx_filter;
2786 params->eaddr = hardware_address;
2787 params->tx_fifo_size = tx_fifo_size;
2788 params->rx_fifo_size = rx_fifo_size;
2789 params->m5reg = m5reg;
2790 params->dma_no_allocate = dma_no_allocate;
2791 return new NSGigE(params);
2792 }
2793
2794 REGISTER_SIM_OBJECT("NSGigE", NSGigE)