Standardize clock parameter names to 'clock'.
[gem5.git] / dev / ns_gige.cc
1 /*
2 * Copyright (c) 2004 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /* @file
30 * Device module for modelling the National Semiconductor
31 * DP83820 ethernet controller. Does not support priority queueing
32 */
33 #include <cstdio>
34 #include <deque>
35 #include <string>
36
37 #include "base/inet.hh"
38 #include "cpu/exec_context.hh"
39 #include "dev/dma.hh"
40 #include "dev/etherlink.hh"
41 #include "dev/ns_gige.hh"
42 #include "dev/pciconfigall.hh"
43 #include "mem/bus/bus.hh"
44 #include "mem/bus/dma_interface.hh"
45 #include "mem/bus/pio_interface.hh"
46 #include "mem/bus/pio_interface_impl.hh"
47 #include "mem/functional_mem/memory_control.hh"
48 #include "mem/functional_mem/physical_memory.hh"
49 #include "sim/builder.hh"
50 #include "sim/debug.hh"
51 #include "sim/host.hh"
52 #include "sim/stats.hh"
53 #include "targetarch/vtophys.hh"
54
55 const char *NsRxStateStrings[] =
56 {
57 "rxIdle",
58 "rxDescRefr",
59 "rxDescRead",
60 "rxFifoBlock",
61 "rxFragWrite",
62 "rxDescWrite",
63 "rxAdvance"
64 };
65
66 const char *NsTxStateStrings[] =
67 {
68 "txIdle",
69 "txDescRefr",
70 "txDescRead",
71 "txFifoBlock",
72 "txFragRead",
73 "txDescWrite",
74 "txAdvance"
75 };
76
77 const char *NsDmaState[] =
78 {
79 "dmaIdle",
80 "dmaReading",
81 "dmaWriting",
82 "dmaReadWaiting",
83 "dmaWriteWaiting"
84 };
85
86 using namespace std;
87 using namespace Net;
88
89 ///////////////////////////////////////////////////////////////////////
90 //
91 // NSGigE PCI Device
92 //
93 NSGigE::NSGigE(Params *p)
94 : PciDev(p), ioEnable(false),
95 txFifo(p->tx_fifo_size), rxFifo(p->rx_fifo_size),
96 txPacket(0), rxPacket(0), txPacketBufPtr(NULL), rxPacketBufPtr(NULL),
97 txXferLen(0), rxXferLen(0), clock(p->clock),
98 txState(txIdle), txEnable(false), CTDD(false),
99 txFragPtr(0), txDescCnt(0), txDmaState(dmaIdle), rxState(rxIdle),
100 rxEnable(false), CRDD(false), rxPktBytes(0),
101 rxFragPtr(0), rxDescCnt(0), rxDmaState(dmaIdle), extstsEnable(false),
102 rxDmaReadEvent(this), rxDmaWriteEvent(this),
103 txDmaReadEvent(this), txDmaWriteEvent(this),
104 dmaDescFree(p->dma_desc_free), dmaDataFree(p->dma_data_free),
105 txDelay(p->tx_delay), rxDelay(p->rx_delay),
106 rxKickTick(0), txKickTick(0),
107 txEvent(this), rxFilterEnable(p->rx_filter), acceptBroadcast(false),
108 acceptMulticast(false), acceptUnicast(false),
109 acceptPerfect(false), acceptArp(false),
110 physmem(p->pmem), intrTick(0), cpuPendingIntr(false),
111 intrEvent(0), interface(0)
112 {
113 if (p->header_bus) {
114 pioInterface = newPioInterface(name(), p->hier,
115 p->header_bus, this,
116 &NSGigE::cacheAccess);
117
118 pioLatency = p->pio_latency * p->header_bus->clockRate;
119
120 if (p->payload_bus)
121 dmaInterface = new DMAInterface<Bus>(name() + ".dma",
122 p->header_bus,
123 p->payload_bus, 1,
124 p->dma_no_allocate);
125 else
126 dmaInterface = new DMAInterface<Bus>(name() + ".dma",
127 p->header_bus,
128 p->header_bus, 1,
129 p->dma_no_allocate);
130 } else if (p->payload_bus) {
131 pioInterface = newPioInterface(name(), p->hier,
132 p->payload_bus, this,
133 &NSGigE::cacheAccess);
134
135 pioLatency = p->pio_latency * p->payload_bus->clockRate;
136
137 dmaInterface = new DMAInterface<Bus>(name() + ".dma",
138 p->payload_bus,
139 p->payload_bus, 1,
140 p->dma_no_allocate);
141 }
142
143
144 intrDelay = p->intr_delay;
145 dmaReadDelay = p->dma_read_delay;
146 dmaWriteDelay = p->dma_write_delay;
147 dmaReadFactor = p->dma_read_factor;
148 dmaWriteFactor = p->dma_write_factor;
149
150 regsReset();
151 memcpy(&rom.perfectMatch, p->eaddr.bytes(), ETH_ADDR_LEN);
152 }
153
154 NSGigE::~NSGigE()
155 {}
156
157 void
158 NSGigE::regStats()
159 {
160 txBytes
161 .name(name() + ".txBytes")
162 .desc("Bytes Transmitted")
163 .prereq(txBytes)
164 ;
165
166 rxBytes
167 .name(name() + ".rxBytes")
168 .desc("Bytes Received")
169 .prereq(rxBytes)
170 ;
171
172 txPackets
173 .name(name() + ".txPackets")
174 .desc("Number of Packets Transmitted")
175 .prereq(txBytes)
176 ;
177
178 rxPackets
179 .name(name() + ".rxPackets")
180 .desc("Number of Packets Received")
181 .prereq(rxBytes)
182 ;
183
184 txIpChecksums
185 .name(name() + ".txIpChecksums")
186 .desc("Number of tx IP Checksums done by device")
187 .precision(0)
188 .prereq(txBytes)
189 ;
190
191 rxIpChecksums
192 .name(name() + ".rxIpChecksums")
193 .desc("Number of rx IP Checksums done by device")
194 .precision(0)
195 .prereq(rxBytes)
196 ;
197
198 txTcpChecksums
199 .name(name() + ".txTcpChecksums")
200 .desc("Number of tx TCP Checksums done by device")
201 .precision(0)
202 .prereq(txBytes)
203 ;
204
205 rxTcpChecksums
206 .name(name() + ".rxTcpChecksums")
207 .desc("Number of rx TCP Checksums done by device")
208 .precision(0)
209 .prereq(rxBytes)
210 ;
211
212 txUdpChecksums
213 .name(name() + ".txUdpChecksums")
214 .desc("Number of tx UDP Checksums done by device")
215 .precision(0)
216 .prereq(txBytes)
217 ;
218
219 rxUdpChecksums
220 .name(name() + ".rxUdpChecksums")
221 .desc("Number of rx UDP Checksums done by device")
222 .precision(0)
223 .prereq(rxBytes)
224 ;
225
226 descDmaReads
227 .name(name() + ".descDMAReads")
228 .desc("Number of descriptors the device read w/ DMA")
229 .precision(0)
230 ;
231
232 descDmaWrites
233 .name(name() + ".descDMAWrites")
234 .desc("Number of descriptors the device wrote w/ DMA")
235 .precision(0)
236 ;
237
238 descDmaRdBytes
239 .name(name() + ".descDmaReadBytes")
240 .desc("number of descriptor bytes read w/ DMA")
241 .precision(0)
242 ;
243
244 descDmaWrBytes
245 .name(name() + ".descDmaWriteBytes")
246 .desc("number of descriptor bytes write w/ DMA")
247 .precision(0)
248 ;
249
250 txBandwidth
251 .name(name() + ".txBandwidth")
252 .desc("Transmit Bandwidth (bits/s)")
253 .precision(0)
254 .prereq(txBytes)
255 ;
256
257 rxBandwidth
258 .name(name() + ".rxBandwidth")
259 .desc("Receive Bandwidth (bits/s)")
260 .precision(0)
261 .prereq(rxBytes)
262 ;
263
264 totBandwidth
265 .name(name() + ".totBandwidth")
266 .desc("Total Bandwidth (bits/s)")
267 .precision(0)
268 .prereq(totBytes)
269 ;
270
271 totPackets
272 .name(name() + ".totPackets")
273 .desc("Total Packets")
274 .precision(0)
275 .prereq(totBytes)
276 ;
277
278 totBytes
279 .name(name() + ".totBytes")
280 .desc("Total Bytes")
281 .precision(0)
282 .prereq(totBytes)
283 ;
284
285 totPacketRate
286 .name(name() + ".totPPS")
287 .desc("Total Tranmission Rate (packets/s)")
288 .precision(0)
289 .prereq(totBytes)
290 ;
291
292 txPacketRate
293 .name(name() + ".txPPS")
294 .desc("Packet Tranmission Rate (packets/s)")
295 .precision(0)
296 .prereq(txBytes)
297 ;
298
299 rxPacketRate
300 .name(name() + ".rxPPS")
301 .desc("Packet Reception Rate (packets/s)")
302 .precision(0)
303 .prereq(rxBytes)
304 ;
305
306 postedSwi
307 .name(name() + ".postedSwi")
308 .desc("number of software interrupts posted to CPU")
309 .precision(0)
310 ;
311
312 totalSwi
313 .name(name() + ".totalSwi")
314 .desc("number of total Swi written to ISR")
315 .precision(0)
316 ;
317
318 coalescedSwi
319 .name(name() + ".coalescedSwi")
320 .desc("average number of Swi's coalesced into each post")
321 .precision(0)
322 ;
323
324 postedRxIdle
325 .name(name() + ".postedRxIdle")
326 .desc("number of rxIdle interrupts posted to CPU")
327 .precision(0)
328 ;
329
330 totalRxIdle
331 .name(name() + ".totalRxIdle")
332 .desc("number of total RxIdle written to ISR")
333 .precision(0)
334 ;
335
336 coalescedRxIdle
337 .name(name() + ".coalescedRxIdle")
338 .desc("average number of RxIdle's coalesced into each post")
339 .precision(0)
340 ;
341
342 postedRxOk
343 .name(name() + ".postedRxOk")
344 .desc("number of RxOk interrupts posted to CPU")
345 .precision(0)
346 ;
347
348 totalRxOk
349 .name(name() + ".totalRxOk")
350 .desc("number of total RxOk written to ISR")
351 .precision(0)
352 ;
353
354 coalescedRxOk
355 .name(name() + ".coalescedRxOk")
356 .desc("average number of RxOk's coalesced into each post")
357 .precision(0)
358 ;
359
360 postedRxDesc
361 .name(name() + ".postedRxDesc")
362 .desc("number of RxDesc interrupts posted to CPU")
363 .precision(0)
364 ;
365
366 totalRxDesc
367 .name(name() + ".totalRxDesc")
368 .desc("number of total RxDesc written to ISR")
369 .precision(0)
370 ;
371
372 coalescedRxDesc
373 .name(name() + ".coalescedRxDesc")
374 .desc("average number of RxDesc's coalesced into each post")
375 .precision(0)
376 ;
377
378 postedTxOk
379 .name(name() + ".postedTxOk")
380 .desc("number of TxOk interrupts posted to CPU")
381 .precision(0)
382 ;
383
384 totalTxOk
385 .name(name() + ".totalTxOk")
386 .desc("number of total TxOk written to ISR")
387 .precision(0)
388 ;
389
390 coalescedTxOk
391 .name(name() + ".coalescedTxOk")
392 .desc("average number of TxOk's coalesced into each post")
393 .precision(0)
394 ;
395
396 postedTxIdle
397 .name(name() + ".postedTxIdle")
398 .desc("number of TxIdle interrupts posted to CPU")
399 .precision(0)
400 ;
401
402 totalTxIdle
403 .name(name() + ".totalTxIdle")
404 .desc("number of total TxIdle written to ISR")
405 .precision(0)
406 ;
407
408 coalescedTxIdle
409 .name(name() + ".coalescedTxIdle")
410 .desc("average number of TxIdle's coalesced into each post")
411 .precision(0)
412 ;
413
414 postedTxDesc
415 .name(name() + ".postedTxDesc")
416 .desc("number of TxDesc interrupts posted to CPU")
417 .precision(0)
418 ;
419
420 totalTxDesc
421 .name(name() + ".totalTxDesc")
422 .desc("number of total TxDesc written to ISR")
423 .precision(0)
424 ;
425
426 coalescedTxDesc
427 .name(name() + ".coalescedTxDesc")
428 .desc("average number of TxDesc's coalesced into each post")
429 .precision(0)
430 ;
431
432 postedRxOrn
433 .name(name() + ".postedRxOrn")
434 .desc("number of RxOrn posted to CPU")
435 .precision(0)
436 ;
437
438 totalRxOrn
439 .name(name() + ".totalRxOrn")
440 .desc("number of total RxOrn written to ISR")
441 .precision(0)
442 ;
443
444 coalescedRxOrn
445 .name(name() + ".coalescedRxOrn")
446 .desc("average number of RxOrn's coalesced into each post")
447 .precision(0)
448 ;
449
450 coalescedTotal
451 .name(name() + ".coalescedTotal")
452 .desc("average number of interrupts coalesced into each post")
453 .precision(0)
454 ;
455
456 postedInterrupts
457 .name(name() + ".postedInterrupts")
458 .desc("number of posts to CPU")
459 .precision(0)
460 ;
461
462 droppedPackets
463 .name(name() + ".droppedPackets")
464 .desc("number of packets dropped")
465 .precision(0)
466 ;
467
468 coalescedSwi = totalSwi / postedInterrupts;
469 coalescedRxIdle = totalRxIdle / postedInterrupts;
470 coalescedRxOk = totalRxOk / postedInterrupts;
471 coalescedRxDesc = totalRxDesc / postedInterrupts;
472 coalescedTxOk = totalTxOk / postedInterrupts;
473 coalescedTxIdle = totalTxIdle / postedInterrupts;
474 coalescedTxDesc = totalTxDesc / postedInterrupts;
475 coalescedRxOrn = totalRxOrn / postedInterrupts;
476
477 coalescedTotal = (totalSwi + totalRxIdle + totalRxOk + totalRxDesc + totalTxOk
478 + totalTxIdle + totalTxDesc + totalRxOrn) / postedInterrupts;
479
480 txBandwidth = txBytes * Stats::constant(8) / simSeconds;
481 rxBandwidth = rxBytes * Stats::constant(8) / simSeconds;
482 totBandwidth = txBandwidth + rxBandwidth;
483 totBytes = txBytes + rxBytes;
484 totPackets = txPackets + rxPackets;
485
486 txPacketRate = txPackets / simSeconds;
487 rxPacketRate = rxPackets / simSeconds;
488 }
489
490 /**
491 * This is to read the PCI general configuration registers
492 */
493 void
494 NSGigE::ReadConfig(int offset, int size, uint8_t *data)
495 {
496 if (offset < PCI_DEVICE_SPECIFIC)
497 PciDev::ReadConfig(offset, size, data);
498 else
499 panic("Device specific PCI config space not implemented!\n");
500 }
501
502 /**
503 * This is to write to the PCI general configuration registers
504 */
505 void
506 NSGigE::WriteConfig(int offset, int size, uint32_t data)
507 {
508 if (offset < PCI_DEVICE_SPECIFIC)
509 PciDev::WriteConfig(offset, size, data);
510 else
511 panic("Device specific PCI config space not implemented!\n");
512
513 // Need to catch writes to BARs to update the PIO interface
514 switch (offset) {
515 // seems to work fine without all these PCI settings, but i
516 // put in the IO to double check, an assertion will fail if we
517 // need to properly implement it
518 case PCI_COMMAND:
519 if (config.data[offset] & PCI_CMD_IOSE)
520 ioEnable = true;
521 else
522 ioEnable = false;
523
524 #if 0
525 if (config.data[offset] & PCI_CMD_BME) {
526 bmEnabled = true;
527 }
528 else {
529 bmEnabled = false;
530 }
531
532 if (config.data[offset] & PCI_CMD_MSE) {
533 memEnable = true;
534 }
535 else {
536 memEnable = false;
537 }
538 #endif
539 break;
540
541 case PCI0_BASE_ADDR0:
542 if (BARAddrs[0] != 0) {
543 if (pioInterface)
544 pioInterface->addAddrRange(RangeSize(BARAddrs[0], BARSize[0]));
545
546 BARAddrs[0] &= EV5::PAddrUncachedMask;
547 }
548 break;
549 case PCI0_BASE_ADDR1:
550 if (BARAddrs[1] != 0) {
551 if (pioInterface)
552 pioInterface->addAddrRange(RangeSize(BARAddrs[1], BARSize[1]));
553
554 BARAddrs[1] &= EV5::PAddrUncachedMask;
555 }
556 break;
557 }
558 }
559
560 /**
561 * This reads the device registers, which are detailed in the NS83820
562 * spec sheet
563 */
564 Fault
565 NSGigE::read(MemReqPtr &req, uint8_t *data)
566 {
567 assert(ioEnable);
568
569 //The mask is to give you only the offset into the device register file
570 Addr daddr = req->paddr & 0xfff;
571 DPRINTF(EthernetPIO, "read da=%#x pa=%#x va=%#x size=%d\n",
572 daddr, req->paddr, req->vaddr, req->size);
573
574
575 // there are some reserved registers, you can see ns_gige_reg.h and
576 // the spec sheet for details
577 if (daddr > LAST && daddr <= RESERVED) {
578 panic("Accessing reserved register");
579 } else if (daddr > RESERVED && daddr <= 0x3FC) {
580 ReadConfig(daddr & 0xff, req->size, data);
581 return No_Fault;
582 } else if (daddr >= MIB_START && daddr <= MIB_END) {
583 // don't implement all the MIB's. hopefully the kernel
584 // doesn't actually DEPEND upon their values
585 // MIB are just hardware stats keepers
586 uint32_t &reg = *(uint32_t *) data;
587 reg = 0;
588 return No_Fault;
589 } else if (daddr > 0x3FC)
590 panic("Something is messed up!\n");
591
592 switch (req->size) {
593 case sizeof(uint32_t):
594 {
595 uint32_t &reg = *(uint32_t *)data;
596
597 switch (daddr) {
598 case CR:
599 reg = regs.command;
600 //these are supposed to be cleared on a read
601 reg &= ~(CR_RXD | CR_TXD | CR_TXR | CR_RXR);
602 break;
603
604 case CFGR:
605 reg = regs.config;
606 break;
607
608 case MEAR:
609 reg = regs.mear;
610 break;
611
612 case PTSCR:
613 reg = regs.ptscr;
614 break;
615
616 case ISR:
617 reg = regs.isr;
618 devIntrClear(ISR_ALL);
619 break;
620
621 case IMR:
622 reg = regs.imr;
623 break;
624
625 case IER:
626 reg = regs.ier;
627 break;
628
629 case IHR:
630 reg = regs.ihr;
631 break;
632
633 case TXDP:
634 reg = regs.txdp;
635 break;
636
637 case TXDP_HI:
638 reg = regs.txdp_hi;
639 break;
640
641 case TX_CFG:
642 reg = regs.txcfg;
643 break;
644
645 case GPIOR:
646 reg = regs.gpior;
647 break;
648
649 case RXDP:
650 reg = regs.rxdp;
651 break;
652
653 case RXDP_HI:
654 reg = regs.rxdp_hi;
655 break;
656
657 case RX_CFG:
658 reg = regs.rxcfg;
659 break;
660
661 case PQCR:
662 reg = regs.pqcr;
663 break;
664
665 case WCSR:
666 reg = regs.wcsr;
667 break;
668
669 case PCR:
670 reg = regs.pcr;
671 break;
672
673 // see the spec sheet for how RFCR and RFDR work
674 // basically, you write to RFCR to tell the machine
675 // what you want to do next, then you act upon RFDR,
676 // and the device will be prepared b/c of what you
677 // wrote to RFCR
678 case RFCR:
679 reg = regs.rfcr;
680 break;
681
682 case RFDR:
683 switch (regs.rfcr & RFCR_RFADDR) {
684 case 0x000:
685 reg = rom.perfectMatch[1];
686 reg = reg << 8;
687 reg += rom.perfectMatch[0];
688 break;
689 case 0x002:
690 reg = rom.perfectMatch[3] << 8;
691 reg += rom.perfectMatch[2];
692 break;
693 case 0x004:
694 reg = rom.perfectMatch[5] << 8;
695 reg += rom.perfectMatch[4];
696 break;
697 default:
698 panic("reading RFDR for something other than PMATCH!\n");
699 // didn't implement other RFDR functionality b/c
700 // driver didn't use it
701 }
702 break;
703
704 case SRR:
705 reg = regs.srr;
706 break;
707
708 case MIBC:
709 reg = regs.mibc;
710 reg &= ~(MIBC_MIBS | MIBC_ACLR);
711 break;
712
713 case VRCR:
714 reg = regs.vrcr;
715 break;
716
717 case VTCR:
718 reg = regs.vtcr;
719 break;
720
721 case VDR:
722 reg = regs.vdr;
723 break;
724
725 case CCSR:
726 reg = regs.ccsr;
727 break;
728
729 case TBICR:
730 reg = regs.tbicr;
731 break;
732
733 case TBISR:
734 reg = regs.tbisr;
735 break;
736
737 case TANAR:
738 reg = regs.tanar;
739 break;
740
741 case TANLPAR:
742 reg = regs.tanlpar;
743 break;
744
745 case TANER:
746 reg = regs.taner;
747 break;
748
749 case TESR:
750 reg = regs.tesr;
751 break;
752
753 case M5REG:
754 reg = params()->m5reg;
755 break;
756
757 default:
758 panic("reading unimplemented register: addr=%#x", daddr);
759 }
760
761 DPRINTF(EthernetPIO, "read from %#x: data=%d data=%#x\n",
762 daddr, reg, reg);
763 }
764 break;
765
766 default:
767 panic("accessing register with invalid size: addr=%#x, size=%d",
768 daddr, req->size);
769 }
770
771 return No_Fault;
772 }
773
774 Fault
775 NSGigE::write(MemReqPtr &req, const uint8_t *data)
776 {
777 assert(ioEnable);
778
779 Addr daddr = req->paddr & 0xfff;
780 DPRINTF(EthernetPIO, "write da=%#x pa=%#x va=%#x size=%d\n",
781 daddr, req->paddr, req->vaddr, req->size);
782
783 if (daddr > LAST && daddr <= RESERVED) {
784 panic("Accessing reserved register");
785 } else if (daddr > RESERVED && daddr <= 0x3FC) {
786 WriteConfig(daddr & 0xff, req->size, *(uint32_t *)data);
787 return No_Fault;
788 } else if (daddr > 0x3FC)
789 panic("Something is messed up!\n");
790
791 if (req->size == sizeof(uint32_t)) {
792 uint32_t reg = *(uint32_t *)data;
793 DPRINTF(EthernetPIO, "write data=%d data=%#x\n", reg, reg);
794
795 switch (daddr) {
796 case CR:
797 regs.command = reg;
798 if (reg & CR_TXD) {
799 txEnable = false;
800 } else if (reg & CR_TXE) {
801 txEnable = true;
802
803 // the kernel is enabling the transmit machine
804 if (txState == txIdle)
805 txKick();
806 }
807
808 if (reg & CR_RXD) {
809 rxEnable = false;
810 } else if (reg & CR_RXE) {
811 rxEnable = true;
812
813 if (rxState == rxIdle)
814 rxKick();
815 }
816
817 if (reg & CR_TXR)
818 txReset();
819
820 if (reg & CR_RXR)
821 rxReset();
822
823 if (reg & CR_SWI)
824 devIntrPost(ISR_SWI);
825
826 if (reg & CR_RST) {
827 txReset();
828 rxReset();
829
830 regsReset();
831 }
832 break;
833
834 case CFGR:
835 if (reg & CFGR_LNKSTS ||
836 reg & CFGR_SPDSTS ||
837 reg & CFGR_DUPSTS ||
838 reg & CFGR_RESERVED ||
839 reg & CFGR_T64ADDR ||
840 reg & CFGR_PCI64_DET)
841 panic("writing to read-only or reserved CFGR bits!\n");
842
843 regs.config |= reg & ~(CFGR_LNKSTS | CFGR_SPDSTS | CFGR_DUPSTS |
844 CFGR_RESERVED | CFGR_T64ADDR | CFGR_PCI64_DET);
845
846 // all these #if 0's are because i don't THINK the kernel needs to
847 // have these implemented. if there is a problem relating to one of
848 // these, you may need to add functionality in.
849 #if 0
850 if (reg & CFGR_TBI_EN) ;
851 if (reg & CFGR_MODE_1000) ;
852 #endif
853
854 if (reg & CFGR_AUTO_1000)
855 panic("CFGR_AUTO_1000 not implemented!\n");
856
857 #if 0
858 if (reg & CFGR_PINT_DUPSTS ||
859 reg & CFGR_PINT_LNKSTS ||
860 reg & CFGR_PINT_SPDSTS)
861 ;
862
863 if (reg & CFGR_TMRTEST) ;
864 if (reg & CFGR_MRM_DIS) ;
865 if (reg & CFGR_MWI_DIS) ;
866
867 if (reg & CFGR_T64ADDR)
868 panic("CFGR_T64ADDR is read only register!\n");
869
870 if (reg & CFGR_PCI64_DET)
871 panic("CFGR_PCI64_DET is read only register!\n");
872
873 if (reg & CFGR_DATA64_EN) ;
874 if (reg & CFGR_M64ADDR) ;
875 if (reg & CFGR_PHY_RST) ;
876 if (reg & CFGR_PHY_DIS) ;
877 #endif
878
879 if (reg & CFGR_EXTSTS_EN)
880 extstsEnable = true;
881 else
882 extstsEnable = false;
883
884 #if 0
885 if (reg & CFGR_REQALG) ;
886 if (reg & CFGR_SB) ;
887 if (reg & CFGR_POW) ;
888 if (reg & CFGR_EXD) ;
889 if (reg & CFGR_PESEL) ;
890 if (reg & CFGR_BROM_DIS) ;
891 if (reg & CFGR_EXT_125) ;
892 if (reg & CFGR_BEM) ;
893 #endif
894 break;
895
896 case MEAR:
897 regs.mear = reg;
898 // since phy is completely faked, MEAR_MD* don't matter
899 // and since the driver never uses MEAR_EE*, they don't
900 // matter
901 #if 0
902 if (reg & MEAR_EEDI) ;
903 if (reg & MEAR_EEDO) ; // this one is read only
904 if (reg & MEAR_EECLK) ;
905 if (reg & MEAR_EESEL) ;
906 if (reg & MEAR_MDIO) ;
907 if (reg & MEAR_MDDIR) ;
908 if (reg & MEAR_MDC) ;
909 #endif
910 break;
911
912 case PTSCR:
913 regs.ptscr = reg & ~(PTSCR_RBIST_RDONLY);
914 // these control BISTs for various parts of chip - we
915 // don't care or do just fake that the BIST is done
916 if (reg & PTSCR_RBIST_EN)
917 regs.ptscr |= PTSCR_RBIST_DONE;
918 if (reg & PTSCR_EEBIST_EN)
919 regs.ptscr &= ~PTSCR_EEBIST_EN;
920 if (reg & PTSCR_EELOAD_EN)
921 regs.ptscr &= ~PTSCR_EELOAD_EN;
922 break;
923
924 case ISR: /* writing to the ISR has no effect */
925 panic("ISR is a read only register!\n");
926
927 case IMR:
928 regs.imr = reg;
929 devIntrChangeMask();
930 break;
931
932 case IER:
933 regs.ier = reg;
934 break;
935
936 case IHR:
937 regs.ihr = reg;
938 /* not going to implement real interrupt holdoff */
939 break;
940
941 case TXDP:
942 regs.txdp = (reg & 0xFFFFFFFC);
943 assert(txState == txIdle);
944 CTDD = false;
945 break;
946
947 case TXDP_HI:
948 regs.txdp_hi = reg;
949 break;
950
951 case TX_CFG:
952 regs.txcfg = reg;
953 #if 0
954 if (reg & TX_CFG_CSI) ;
955 if (reg & TX_CFG_HBI) ;
956 if (reg & TX_CFG_MLB) ;
957 if (reg & TX_CFG_ATP) ;
958 if (reg & TX_CFG_ECRETRY) {
959 /*
960 * this could easily be implemented, but considering
961 * the network is just a fake pipe, wouldn't make
962 * sense to do this
963 */
964 }
965
966 if (reg & TX_CFG_BRST_DIS) ;
967 #endif
968
969 #if 0
970 /* we handle our own DMA, ignore the kernel's exhortations */
971 if (reg & TX_CFG_MXDMA) ;
972 #endif
973
974 // also, we currently don't care about fill/drain
975 // thresholds though this may change in the future with
976 // more realistic networks or a driver which changes it
977 // according to feedback
978
979 break;
980
981 case GPIOR:
982 regs.gpior = reg;
983 /* these just control general purpose i/o pins, don't matter */
984 break;
985
986 case RXDP:
987 regs.rxdp = reg;
988 CRDD = false;
989 break;
990
991 case RXDP_HI:
992 regs.rxdp_hi = reg;
993 break;
994
995 case RX_CFG:
996 regs.rxcfg = reg;
997 #if 0
998 if (reg & RX_CFG_AEP) ;
999 if (reg & RX_CFG_ARP) ;
1000 if (reg & RX_CFG_STRIPCRC) ;
1001 if (reg & RX_CFG_RX_RD) ;
1002 if (reg & RX_CFG_ALP) ;
1003 if (reg & RX_CFG_AIRL) ;
1004
1005 /* we handle our own DMA, ignore what kernel says about it */
1006 if (reg & RX_CFG_MXDMA) ;
1007
1008 //also, we currently don't care about fill/drain thresholds
1009 //though this may change in the future with more realistic
1010 //networks or a driver which changes it according to feedback
1011 if (reg & (RX_CFG_DRTH | RX_CFG_DRTH0)) ;
1012 #endif
1013 break;
1014
1015 case PQCR:
1016 /* there is no priority queueing used in the linux 2.6 driver */
1017 regs.pqcr = reg;
1018 break;
1019
1020 case WCSR:
1021 /* not going to implement wake on LAN */
1022 regs.wcsr = reg;
1023 break;
1024
1025 case PCR:
1026 /* not going to implement pause control */
1027 regs.pcr = reg;
1028 break;
1029
1030 case RFCR:
1031 regs.rfcr = reg;
1032
1033 rxFilterEnable = (reg & RFCR_RFEN) ? true : false;
1034 acceptBroadcast = (reg & RFCR_AAB) ? true : false;
1035 acceptMulticast = (reg & RFCR_AAM) ? true : false;
1036 acceptUnicast = (reg & RFCR_AAU) ? true : false;
1037 acceptPerfect = (reg & RFCR_APM) ? true : false;
1038 acceptArp = (reg & RFCR_AARP) ? true : false;
1039
1040 #if 0
1041 if (reg & RFCR_APAT)
1042 panic("RFCR_APAT not implemented!\n");
1043 #endif
1044
1045 if (reg & RFCR_MHEN || reg & RFCR_UHEN)
1046 panic("hash filtering not implemented!\n");
1047
1048 if (reg & RFCR_ULM)
1049 panic("RFCR_ULM not implemented!\n");
1050
1051 break;
1052
1053 case RFDR:
1054 panic("the driver never writes to RFDR, something is wrong!\n");
1055
1056 case BRAR:
1057 panic("the driver never uses BRAR, something is wrong!\n");
1058
1059 case BRDR:
1060 panic("the driver never uses BRDR, something is wrong!\n");
1061
1062 case SRR:
1063 panic("SRR is read only register!\n");
1064
1065 case MIBC:
1066 panic("the driver never uses MIBC, something is wrong!\n");
1067
1068 case VRCR:
1069 regs.vrcr = reg;
1070 break;
1071
1072 case VTCR:
1073 regs.vtcr = reg;
1074 break;
1075
1076 case VDR:
1077 panic("the driver never uses VDR, something is wrong!\n");
1078 break;
1079
1080 case CCSR:
1081 /* not going to implement clockrun stuff */
1082 regs.ccsr = reg;
1083 break;
1084
1085 case TBICR:
1086 regs.tbicr = reg;
1087 if (reg & TBICR_MR_LOOPBACK)
1088 panic("TBICR_MR_LOOPBACK never used, something wrong!\n");
1089
1090 if (reg & TBICR_MR_AN_ENABLE) {
1091 regs.tanlpar = regs.tanar;
1092 regs.tbisr |= (TBISR_MR_AN_COMPLETE | TBISR_MR_LINK_STATUS);
1093 }
1094
1095 #if 0
1096 if (reg & TBICR_MR_RESTART_AN) ;
1097 #endif
1098
1099 break;
1100
1101 case TBISR:
1102 panic("TBISR is read only register!\n");
1103
1104 case TANAR:
1105 regs.tanar = reg;
1106 if (reg & TANAR_PS2)
1107 panic("this isn't used in driver, something wrong!\n");
1108
1109 if (reg & TANAR_PS1)
1110 panic("this isn't used in driver, something wrong!\n");
1111 break;
1112
1113 case TANLPAR:
1114 panic("this should only be written to by the fake phy!\n");
1115
1116 case TANER:
1117 panic("TANER is read only register!\n");
1118
1119 case TESR:
1120 regs.tesr = reg;
1121 break;
1122
1123 default:
1124 panic("invalid register access daddr=%#x", daddr);
1125 }
1126 } else {
1127 panic("Invalid Request Size");
1128 }
1129
1130 return No_Fault;
1131 }
1132
1133 void
1134 NSGigE::devIntrPost(uint32_t interrupts)
1135 {
1136 if (interrupts & ISR_RESERVE)
1137 panic("Cannot set a reserved interrupt");
1138
1139 if (interrupts & ISR_NOIMPL)
1140 warn("interrupt not implemented %#x\n", interrupts);
1141
1142 interrupts &= ~ISR_NOIMPL;
1143 regs.isr |= interrupts;
1144
1145 if (interrupts & regs.imr) {
1146 if (interrupts & ISR_SWI) {
1147 totalSwi++;
1148 }
1149 if (interrupts & ISR_RXIDLE) {
1150 totalRxIdle++;
1151 }
1152 if (interrupts & ISR_RXOK) {
1153 totalRxOk++;
1154 }
1155 if (interrupts & ISR_RXDESC) {
1156 totalRxDesc++;
1157 }
1158 if (interrupts & ISR_TXOK) {
1159 totalTxOk++;
1160 }
1161 if (interrupts & ISR_TXIDLE) {
1162 totalTxIdle++;
1163 }
1164 if (interrupts & ISR_TXDESC) {
1165 totalTxDesc++;
1166 }
1167 if (interrupts & ISR_RXORN) {
1168 totalRxOrn++;
1169 }
1170 }
1171
1172 DPRINTF(EthernetIntr,
1173 "interrupt written to ISR: intr=%#x isr=%#x imr=%#x\n",
1174 interrupts, regs.isr, regs.imr);
1175
1176 if ((regs.isr & regs.imr)) {
1177 Tick when = curTick;
1178 if (!(regs.isr & regs.imr & ISR_NODELAY))
1179 when += intrDelay;
1180 cpuIntrPost(when);
1181 }
1182 }
1183
1184 /* writing this interrupt counting stats inside this means that this function
1185 is now limited to being used to clear all interrupts upon the kernel
1186 reading isr and servicing. just telling you in case you were thinking
1187 of expanding use.
1188 */
1189 void
1190 NSGigE::devIntrClear(uint32_t interrupts)
1191 {
1192 if (interrupts & ISR_RESERVE)
1193 panic("Cannot clear a reserved interrupt");
1194
1195 if (regs.isr & regs.imr & ISR_SWI) {
1196 postedSwi++;
1197 }
1198 if (regs.isr & regs.imr & ISR_RXIDLE) {
1199 postedRxIdle++;
1200 }
1201 if (regs.isr & regs.imr & ISR_RXOK) {
1202 postedRxOk++;
1203 }
1204 if (regs.isr & regs.imr & ISR_RXDESC) {
1205 postedRxDesc++;
1206 }
1207 if (regs.isr & regs.imr & ISR_TXOK) {
1208 postedTxOk++;
1209 }
1210 if (regs.isr & regs.imr & ISR_TXIDLE) {
1211 postedTxIdle++;
1212 }
1213 if (regs.isr & regs.imr & ISR_TXDESC) {
1214 postedTxDesc++;
1215 }
1216 if (regs.isr & regs.imr & ISR_RXORN) {
1217 postedRxOrn++;
1218 }
1219
1220 if (regs.isr & regs.imr & (ISR_SWI | ISR_RXIDLE | ISR_RXOK | ISR_RXDESC |
1221 ISR_TXOK | ISR_TXIDLE | ISR_TXDESC | ISR_RXORN) )
1222 postedInterrupts++;
1223
1224 interrupts &= ~ISR_NOIMPL;
1225 regs.isr &= ~interrupts;
1226
1227 DPRINTF(EthernetIntr,
1228 "interrupt cleared from ISR: intr=%x isr=%x imr=%x\n",
1229 interrupts, regs.isr, regs.imr);
1230
1231 if (!(regs.isr & regs.imr))
1232 cpuIntrClear();
1233 }
1234
1235 void
1236 NSGigE::devIntrChangeMask()
1237 {
1238 DPRINTF(EthernetIntr, "interrupt mask changed: isr=%x imr=%x masked=%x\n",
1239 regs.isr, regs.imr, regs.isr & regs.imr);
1240
1241 if (regs.isr & regs.imr)
1242 cpuIntrPost(curTick);
1243 else
1244 cpuIntrClear();
1245 }
1246
1247 void
1248 NSGigE::cpuIntrPost(Tick when)
1249 {
1250 // If the interrupt you want to post is later than an interrupt
1251 // already scheduled, just let it post in the coming one and don't
1252 // schedule another.
1253 // HOWEVER, must be sure that the scheduled intrTick is in the
1254 // future (this was formerly the source of a bug)
1255 /**
1256 * @todo this warning should be removed and the intrTick code should
1257 * be fixed.
1258 */
1259 assert(when >= curTick);
1260 assert(intrTick >= curTick || intrTick == 0);
1261 if (when > intrTick && intrTick != 0) {
1262 DPRINTF(EthernetIntr, "don't need to schedule event...intrTick=%d\n",
1263 intrTick);
1264 return;
1265 }
1266
1267 intrTick = when;
1268 if (intrTick < curTick) {
1269 debug_break();
1270 intrTick = curTick;
1271 }
1272
1273 DPRINTF(EthernetIntr, "going to schedule an interrupt for intrTick=%d\n",
1274 intrTick);
1275
1276 if (intrEvent)
1277 intrEvent->squash();
1278 intrEvent = new IntrEvent(this, true);
1279 intrEvent->schedule(intrTick);
1280 }
1281
1282 void
1283 NSGigE::cpuInterrupt()
1284 {
1285 assert(intrTick == curTick);
1286
1287 // Whether or not there's a pending interrupt, we don't care about
1288 // it anymore
1289 intrEvent = 0;
1290 intrTick = 0;
1291
1292 // Don't send an interrupt if there's already one
1293 if (cpuPendingIntr) {
1294 DPRINTF(EthernetIntr,
1295 "would send an interrupt now, but there's already pending\n");
1296 } else {
1297 // Send interrupt
1298 cpuPendingIntr = true;
1299
1300 DPRINTF(EthernetIntr, "posting interrupt\n");
1301 intrPost();
1302 }
1303 }
1304
1305 void
1306 NSGigE::cpuIntrClear()
1307 {
1308 if (!cpuPendingIntr)
1309 return;
1310
1311 if (intrEvent) {
1312 intrEvent->squash();
1313 intrEvent = 0;
1314 }
1315
1316 intrTick = 0;
1317
1318 cpuPendingIntr = false;
1319
1320 DPRINTF(EthernetIntr, "clearing interrupt\n");
1321 intrClear();
1322 }
1323
1324 bool
1325 NSGigE::cpuIntrPending() const
1326 { return cpuPendingIntr; }
1327
1328 void
1329 NSGigE::txReset()
1330 {
1331
1332 DPRINTF(Ethernet, "transmit reset\n");
1333
1334 CTDD = false;
1335 txEnable = false;;
1336 txFragPtr = 0;
1337 assert(txDescCnt == 0);
1338 txFifo.clear();
1339 txState = txIdle;
1340 assert(txDmaState == dmaIdle);
1341 }
1342
1343 void
1344 NSGigE::rxReset()
1345 {
1346 DPRINTF(Ethernet, "receive reset\n");
1347
1348 CRDD = false;
1349 assert(rxPktBytes == 0);
1350 rxEnable = false;
1351 rxFragPtr = 0;
1352 assert(rxDescCnt == 0);
1353 assert(rxDmaState == dmaIdle);
1354 rxFifo.clear();
1355 rxState = rxIdle;
1356 }
1357
1358 void
1359 NSGigE::regsReset()
1360 {
1361 memset(&regs, 0, sizeof(regs));
1362 regs.config = CFGR_LNKSTS;
1363 regs.mear = 0x22;
1364 regs.txcfg = 0x120; // set drain threshold to 1024 bytes and
1365 // fill threshold to 32 bytes
1366 regs.rxcfg = 0x4; // set drain threshold to 16 bytes
1367 regs.srr = 0x0103; // set the silicon revision to rev B or 0x103
1368 regs.mibc = MIBC_FRZ;
1369 regs.vdr = 0x81; // set the vlan tag type to 802.1q
1370 regs.tesr = 0xc000; // TBI capable of both full and half duplex
1371
1372 extstsEnable = false;
1373 acceptBroadcast = false;
1374 acceptMulticast = false;
1375 acceptUnicast = false;
1376 acceptPerfect = false;
1377 acceptArp = false;
1378 }
1379
1380 void
1381 NSGigE::rxDmaReadCopy()
1382 {
1383 assert(rxDmaState == dmaReading);
1384
1385 physmem->dma_read((uint8_t *)rxDmaData, rxDmaAddr, rxDmaLen);
1386 rxDmaState = dmaIdle;
1387
1388 DPRINTF(EthernetDMA, "rx dma read paddr=%#x len=%d\n",
1389 rxDmaAddr, rxDmaLen);
1390 DDUMP(EthernetDMA, rxDmaData, rxDmaLen);
1391 }
1392
1393 bool
1394 NSGigE::doRxDmaRead()
1395 {
1396 assert(rxDmaState == dmaIdle || rxDmaState == dmaReadWaiting);
1397 rxDmaState = dmaReading;
1398
1399 if (dmaInterface && !rxDmaFree) {
1400 if (dmaInterface->busy())
1401 rxDmaState = dmaReadWaiting;
1402 else
1403 dmaInterface->doDMA(Read, rxDmaAddr, rxDmaLen, curTick,
1404 &rxDmaReadEvent, true);
1405 return true;
1406 }
1407
1408 if (dmaReadDelay == 0 && dmaReadFactor == 0) {
1409 rxDmaReadCopy();
1410 return false;
1411 }
1412
1413 Tick factor = ((rxDmaLen + ULL(63)) >> ULL(6)) * dmaReadFactor;
1414 Tick start = curTick + dmaReadDelay + factor;
1415 rxDmaReadEvent.schedule(start);
1416 return true;
1417 }
1418
1419 void
1420 NSGigE::rxDmaReadDone()
1421 {
1422 assert(rxDmaState == dmaReading);
1423 rxDmaReadCopy();
1424
1425 // If the transmit state machine has a pending DMA, let it go first
1426 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting)
1427 txKick();
1428
1429 rxKick();
1430 }
1431
1432 void
1433 NSGigE::rxDmaWriteCopy()
1434 {
1435 assert(rxDmaState == dmaWriting);
1436
1437 physmem->dma_write(rxDmaAddr, (uint8_t *)rxDmaData, rxDmaLen);
1438 rxDmaState = dmaIdle;
1439
1440 DPRINTF(EthernetDMA, "rx dma write paddr=%#x len=%d\n",
1441 rxDmaAddr, rxDmaLen);
1442 DDUMP(EthernetDMA, rxDmaData, rxDmaLen);
1443 }
1444
1445 bool
1446 NSGigE::doRxDmaWrite()
1447 {
1448 assert(rxDmaState == dmaIdle || rxDmaState == dmaWriteWaiting);
1449 rxDmaState = dmaWriting;
1450
1451 if (dmaInterface && !rxDmaFree) {
1452 if (dmaInterface->busy())
1453 rxDmaState = dmaWriteWaiting;
1454 else
1455 dmaInterface->doDMA(WriteInvalidate, rxDmaAddr, rxDmaLen, curTick,
1456 &rxDmaWriteEvent, true);
1457 return true;
1458 }
1459
1460 if (dmaWriteDelay == 0 && dmaWriteFactor == 0) {
1461 rxDmaWriteCopy();
1462 return false;
1463 }
1464
1465 Tick factor = ((rxDmaLen + ULL(63)) >> ULL(6)) * dmaWriteFactor;
1466 Tick start = curTick + dmaWriteDelay + factor;
1467 rxDmaWriteEvent.schedule(start);
1468 return true;
1469 }
1470
1471 void
1472 NSGigE::rxDmaWriteDone()
1473 {
1474 assert(rxDmaState == dmaWriting);
1475 rxDmaWriteCopy();
1476
1477 // If the transmit state machine has a pending DMA, let it go first
1478 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting)
1479 txKick();
1480
1481 rxKick();
1482 }
1483
1484 void
1485 NSGigE::rxKick()
1486 {
1487 DPRINTF(EthernetSM, "receive kick rxState=%s (rxBuf.size=%d)\n",
1488 NsRxStateStrings[rxState], rxFifo.size());
1489
1490 if (rxKickTick > curTick) {
1491 DPRINTF(EthernetSM, "receive kick exiting, can't run till %d\n",
1492 rxKickTick);
1493 return;
1494 }
1495
1496 next:
1497 switch(rxDmaState) {
1498 case dmaReadWaiting:
1499 if (doRxDmaRead())
1500 goto exit;
1501 break;
1502 case dmaWriteWaiting:
1503 if (doRxDmaWrite())
1504 goto exit;
1505 break;
1506 default:
1507 break;
1508 }
1509
1510 // see state machine from spec for details
1511 // the way this works is, if you finish work on one state and can
1512 // go directly to another, you do that through jumping to the
1513 // label "next". however, if you have intermediate work, like DMA
1514 // so that you can't go to the next state yet, you go to exit and
1515 // exit the loop. however, when the DMA is done it will trigger
1516 // an event and come back to this loop.
1517 switch (rxState) {
1518 case rxIdle:
1519 if (!rxEnable) {
1520 DPRINTF(EthernetSM, "Receive Disabled! Nothing to do.\n");
1521 goto exit;
1522 }
1523
1524 if (CRDD) {
1525 rxState = rxDescRefr;
1526
1527 rxDmaAddr = regs.rxdp & 0x3fffffff;
1528 rxDmaData = &rxDescCache + offsetof(ns_desc, link);
1529 rxDmaLen = sizeof(rxDescCache.link);
1530 rxDmaFree = dmaDescFree;
1531
1532 descDmaReads++;
1533 descDmaRdBytes += rxDmaLen;
1534
1535 if (doRxDmaRead())
1536 goto exit;
1537 } else {
1538 rxState = rxDescRead;
1539
1540 rxDmaAddr = regs.rxdp & 0x3fffffff;
1541 rxDmaData = &rxDescCache;
1542 rxDmaLen = sizeof(ns_desc);
1543 rxDmaFree = dmaDescFree;
1544
1545 descDmaReads++;
1546 descDmaRdBytes += rxDmaLen;
1547
1548 if (doRxDmaRead())
1549 goto exit;
1550 }
1551 break;
1552
1553 case rxDescRefr:
1554 if (rxDmaState != dmaIdle)
1555 goto exit;
1556
1557 rxState = rxAdvance;
1558 break;
1559
1560 case rxDescRead:
1561 if (rxDmaState != dmaIdle)
1562 goto exit;
1563
1564 DPRINTF(EthernetDesc,
1565 "rxDescCache: addr=%08x read descriptor\n",
1566 regs.rxdp & 0x3fffffff);
1567 DPRINTF(EthernetDesc,
1568 "rxDescCache: link=%08x bufptr=%08x cmdsts=%08x extsts=%08x\n",
1569 rxDescCache.link, rxDescCache.bufptr, rxDescCache.cmdsts,
1570 rxDescCache.extsts);
1571
1572 if (rxDescCache.cmdsts & CMDSTS_OWN) {
1573 devIntrPost(ISR_RXIDLE);
1574 rxState = rxIdle;
1575 goto exit;
1576 } else {
1577 rxState = rxFifoBlock;
1578 rxFragPtr = rxDescCache.bufptr;
1579 rxDescCnt = rxDescCache.cmdsts & CMDSTS_LEN_MASK;
1580 }
1581 break;
1582
1583 case rxFifoBlock:
1584 if (!rxPacket) {
1585 /**
1586 * @todo in reality, we should be able to start processing
1587 * the packet as it arrives, and not have to wait for the
1588 * full packet ot be in the receive fifo.
1589 */
1590 if (rxFifo.empty())
1591 goto exit;
1592
1593 DPRINTF(EthernetSM, "****processing receive of new packet****\n");
1594
1595 // If we don't have a packet, grab a new one from the fifo.
1596 rxPacket = rxFifo.front();
1597 rxPktBytes = rxPacket->length;
1598 rxPacketBufPtr = rxPacket->data;
1599
1600 #if TRACING_ON
1601 if (DTRACE(Ethernet)) {
1602 IpPtr ip(rxPacket);
1603 if (ip) {
1604 DPRINTF(Ethernet, "ID is %d\n", ip->id());
1605 TcpPtr tcp(ip);
1606 if (tcp) {
1607 DPRINTF(Ethernet,
1608 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n",
1609 tcp->sport(), tcp->dport(), tcp->seq(),
1610 tcp->ack());
1611 }
1612 }
1613 }
1614 #endif
1615
1616 // sanity check - i think the driver behaves like this
1617 assert(rxDescCnt >= rxPktBytes);
1618 rxFifo.pop();
1619 }
1620
1621
1622 // dont' need the && rxDescCnt > 0 if driver sanity check
1623 // above holds
1624 if (rxPktBytes > 0) {
1625 rxState = rxFragWrite;
1626 // don't need min<>(rxPktBytes,rxDescCnt) if above sanity
1627 // check holds
1628 rxXferLen = rxPktBytes;
1629
1630 rxDmaAddr = rxFragPtr & 0x3fffffff;
1631 rxDmaData = rxPacketBufPtr;
1632 rxDmaLen = rxXferLen;
1633 rxDmaFree = dmaDataFree;
1634
1635 if (doRxDmaWrite())
1636 goto exit;
1637
1638 } else {
1639 rxState = rxDescWrite;
1640
1641 //if (rxPktBytes == 0) { /* packet is done */
1642 assert(rxPktBytes == 0);
1643 DPRINTF(EthernetSM, "done with receiving packet\n");
1644
1645 rxDescCache.cmdsts |= CMDSTS_OWN;
1646 rxDescCache.cmdsts &= ~CMDSTS_MORE;
1647 rxDescCache.cmdsts |= CMDSTS_OK;
1648 rxDescCache.cmdsts &= 0xffff0000;
1649 rxDescCache.cmdsts += rxPacket->length; //i.e. set CMDSTS_SIZE
1650
1651 #if 0
1652 /*
1653 * all the driver uses these are for its own stats keeping
1654 * which we don't care about, aren't necessary for
1655 * functionality and doing this would just slow us down.
1656 * if they end up using this in a later version for
1657 * functional purposes, just undef
1658 */
1659 if (rxFilterEnable) {
1660 rxDescCache.cmdsts &= ~CMDSTS_DEST_MASK;
1661 const EthAddr &dst = rxFifoFront()->dst();
1662 if (dst->unicast())
1663 rxDescCache.cmdsts |= CMDSTS_DEST_SELF;
1664 if (dst->multicast())
1665 rxDescCache.cmdsts |= CMDSTS_DEST_MULTI;
1666 if (dst->broadcast())
1667 rxDescCache.cmdsts |= CMDSTS_DEST_MASK;
1668 }
1669 #endif
1670
1671 IpPtr ip(rxPacket);
1672 if (extstsEnable && ip) {
1673 rxDescCache.extsts |= EXTSTS_IPPKT;
1674 rxIpChecksums++;
1675 if (cksum(ip) != 0) {
1676 DPRINTF(EthernetCksum, "Rx IP Checksum Error\n");
1677 rxDescCache.extsts |= EXTSTS_IPERR;
1678 }
1679 TcpPtr tcp(ip);
1680 UdpPtr udp(ip);
1681 if (tcp) {
1682 rxDescCache.extsts |= EXTSTS_TCPPKT;
1683 rxTcpChecksums++;
1684 if (cksum(tcp) != 0) {
1685 DPRINTF(EthernetCksum, "Rx TCP Checksum Error\n");
1686 rxDescCache.extsts |= EXTSTS_TCPERR;
1687
1688 }
1689 } else if (udp) {
1690 rxDescCache.extsts |= EXTSTS_UDPPKT;
1691 rxUdpChecksums++;
1692 if (cksum(udp) != 0) {
1693 DPRINTF(EthernetCksum, "Rx UDP Checksum Error\n");
1694 rxDescCache.extsts |= EXTSTS_UDPERR;
1695 }
1696 }
1697 }
1698 rxPacket = 0;
1699
1700 /*
1701 * the driver seems to always receive into desc buffers
1702 * of size 1514, so you never have a pkt that is split
1703 * into multiple descriptors on the receive side, so
1704 * i don't implement that case, hence the assert above.
1705 */
1706
1707 DPRINTF(EthernetDesc,
1708 "rxDescCache: addr=%08x writeback cmdsts extsts\n",
1709 regs.rxdp & 0x3fffffff);
1710 DPRINTF(EthernetDesc,
1711 "rxDescCache: link=%08x bufptr=%08x cmdsts=%08x extsts=%08x\n",
1712 rxDescCache.link, rxDescCache.bufptr, rxDescCache.cmdsts,
1713 rxDescCache.extsts);
1714
1715 rxDmaAddr = (regs.rxdp + offsetof(ns_desc, cmdsts)) & 0x3fffffff;
1716 rxDmaData = &(rxDescCache.cmdsts);
1717 rxDmaLen = sizeof(rxDescCache.cmdsts) + sizeof(rxDescCache.extsts);
1718 rxDmaFree = dmaDescFree;
1719
1720 descDmaWrites++;
1721 descDmaWrBytes += rxDmaLen;
1722
1723 if (doRxDmaWrite())
1724 goto exit;
1725 }
1726 break;
1727
1728 case rxFragWrite:
1729 if (rxDmaState != dmaIdle)
1730 goto exit;
1731
1732 rxPacketBufPtr += rxXferLen;
1733 rxFragPtr += rxXferLen;
1734 rxPktBytes -= rxXferLen;
1735
1736 rxState = rxFifoBlock;
1737 break;
1738
1739 case rxDescWrite:
1740 if (rxDmaState != dmaIdle)
1741 goto exit;
1742
1743 assert(rxDescCache.cmdsts & CMDSTS_OWN);
1744
1745 assert(rxPacket == 0);
1746 devIntrPost(ISR_RXOK);
1747
1748 if (rxDescCache.cmdsts & CMDSTS_INTR)
1749 devIntrPost(ISR_RXDESC);
1750
1751 if (!rxEnable) {
1752 DPRINTF(EthernetSM, "Halting the RX state machine\n");
1753 rxState = rxIdle;
1754 goto exit;
1755 } else
1756 rxState = rxAdvance;
1757 break;
1758
1759 case rxAdvance:
1760 if (rxDescCache.link == 0) {
1761 devIntrPost(ISR_RXIDLE);
1762 rxState = rxIdle;
1763 CRDD = true;
1764 goto exit;
1765 } else {
1766 rxState = rxDescRead;
1767 regs.rxdp = rxDescCache.link;
1768 CRDD = false;
1769
1770 rxDmaAddr = regs.rxdp & 0x3fffffff;
1771 rxDmaData = &rxDescCache;
1772 rxDmaLen = sizeof(ns_desc);
1773 rxDmaFree = dmaDescFree;
1774
1775 if (doRxDmaRead())
1776 goto exit;
1777 }
1778 break;
1779
1780 default:
1781 panic("Invalid rxState!");
1782 }
1783
1784 DPRINTF(EthernetSM, "entering next rxState=%s\n",
1785 NsRxStateStrings[rxState]);
1786
1787 goto next;
1788
1789 exit:
1790 /**
1791 * @todo do we want to schedule a future kick?
1792 */
1793 DPRINTF(EthernetSM, "rx state machine exited rxState=%s\n",
1794 NsRxStateStrings[rxState]);
1795 }
1796
1797 void
1798 NSGigE::transmit()
1799 {
1800 if (txFifo.empty()) {
1801 DPRINTF(Ethernet, "nothing to transmit\n");
1802 return;
1803 }
1804
1805 DPRINTF(Ethernet, "Attempt Pkt Transmit: txFifo length=%d\n",
1806 txFifo.size());
1807 if (interface->sendPacket(txFifo.front())) {
1808 #if TRACING_ON
1809 if (DTRACE(Ethernet)) {
1810 IpPtr ip(txFifo.front());
1811 if (ip) {
1812 DPRINTF(Ethernet, "ID is %d\n", ip->id());
1813 TcpPtr tcp(ip);
1814 if (tcp) {
1815 DPRINTF(Ethernet,
1816 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n",
1817 tcp->sport(), tcp->dport(), tcp->seq(), tcp->ack());
1818 }
1819 }
1820 }
1821 #endif
1822
1823 DDUMP(EthernetData, txFifo.front()->data, txFifo.front()->length);
1824 txBytes += txFifo.front()->length;
1825 txPackets++;
1826
1827 DPRINTF(Ethernet, "Successful Xmit! now txFifoAvail is %d\n",
1828 txFifo.avail());
1829 txFifo.pop();
1830
1831 /*
1832 * normally do a writeback of the descriptor here, and ONLY
1833 * after that is done, send this interrupt. but since our
1834 * stuff never actually fails, just do this interrupt here,
1835 * otherwise the code has to stray from this nice format.
1836 * besides, it's functionally the same.
1837 */
1838 devIntrPost(ISR_TXOK);
1839 }
1840
1841 if (!txFifo.empty() && !txEvent.scheduled()) {
1842 DPRINTF(Ethernet, "reschedule transmit\n");
1843 txEvent.schedule(curTick + retryTime);
1844 }
1845 }
1846
1847 void
1848 NSGigE::txDmaReadCopy()
1849 {
1850 assert(txDmaState == dmaReading);
1851
1852 physmem->dma_read((uint8_t *)txDmaData, txDmaAddr, txDmaLen);
1853 txDmaState = dmaIdle;
1854
1855 DPRINTF(EthernetDMA, "tx dma read paddr=%#x len=%d\n",
1856 txDmaAddr, txDmaLen);
1857 DDUMP(EthernetDMA, txDmaData, txDmaLen);
1858 }
1859
1860 bool
1861 NSGigE::doTxDmaRead()
1862 {
1863 assert(txDmaState == dmaIdle || txDmaState == dmaReadWaiting);
1864 txDmaState = dmaReading;
1865
1866 if (dmaInterface && !txDmaFree) {
1867 if (dmaInterface->busy())
1868 txDmaState = dmaReadWaiting;
1869 else
1870 dmaInterface->doDMA(Read, txDmaAddr, txDmaLen, curTick,
1871 &txDmaReadEvent, true);
1872 return true;
1873 }
1874
1875 if (dmaReadDelay == 0 && dmaReadFactor == 0.0) {
1876 txDmaReadCopy();
1877 return false;
1878 }
1879
1880 Tick factor = ((txDmaLen + ULL(63)) >> ULL(6)) * dmaReadFactor;
1881 Tick start = curTick + dmaReadDelay + factor;
1882 txDmaReadEvent.schedule(start);
1883 return true;
1884 }
1885
1886 void
1887 NSGigE::txDmaReadDone()
1888 {
1889 assert(txDmaState == dmaReading);
1890 txDmaReadCopy();
1891
1892 // If the receive state machine has a pending DMA, let it go first
1893 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting)
1894 rxKick();
1895
1896 txKick();
1897 }
1898
1899 void
1900 NSGigE::txDmaWriteCopy()
1901 {
1902 assert(txDmaState == dmaWriting);
1903
1904 physmem->dma_write(txDmaAddr, (uint8_t *)txDmaData, txDmaLen);
1905 txDmaState = dmaIdle;
1906
1907 DPRINTF(EthernetDMA, "tx dma write paddr=%#x len=%d\n",
1908 txDmaAddr, txDmaLen);
1909 DDUMP(EthernetDMA, txDmaData, txDmaLen);
1910 }
1911
1912 bool
1913 NSGigE::doTxDmaWrite()
1914 {
1915 assert(txDmaState == dmaIdle || txDmaState == dmaWriteWaiting);
1916 txDmaState = dmaWriting;
1917
1918 if (dmaInterface && !txDmaFree) {
1919 if (dmaInterface->busy())
1920 txDmaState = dmaWriteWaiting;
1921 else
1922 dmaInterface->doDMA(WriteInvalidate, txDmaAddr, txDmaLen, curTick,
1923 &txDmaWriteEvent, true);
1924 return true;
1925 }
1926
1927 if (dmaWriteDelay == 0 && dmaWriteFactor == 0.0) {
1928 txDmaWriteCopy();
1929 return false;
1930 }
1931
1932 Tick factor = ((txDmaLen + ULL(63)) >> ULL(6)) * dmaWriteFactor;
1933 Tick start = curTick + dmaWriteDelay + factor;
1934 txDmaWriteEvent.schedule(start);
1935 return true;
1936 }
1937
1938 void
1939 NSGigE::txDmaWriteDone()
1940 {
1941 assert(txDmaState == dmaWriting);
1942 txDmaWriteCopy();
1943
1944 // If the receive state machine has a pending DMA, let it go first
1945 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting)
1946 rxKick();
1947
1948 txKick();
1949 }
1950
1951 void
1952 NSGigE::txKick()
1953 {
1954 DPRINTF(EthernetSM, "transmit kick txState=%s\n",
1955 NsTxStateStrings[txState]);
1956
1957 if (txKickTick > curTick) {
1958 DPRINTF(EthernetSM, "transmit kick exiting, can't run till %d\n",
1959 txKickTick);
1960
1961 return;
1962 }
1963
1964 next:
1965 switch(txDmaState) {
1966 case dmaReadWaiting:
1967 if (doTxDmaRead())
1968 goto exit;
1969 break;
1970 case dmaWriteWaiting:
1971 if (doTxDmaWrite())
1972 goto exit;
1973 break;
1974 default:
1975 break;
1976 }
1977
1978 switch (txState) {
1979 case txIdle:
1980 if (!txEnable) {
1981 DPRINTF(EthernetSM, "Transmit disabled. Nothing to do.\n");
1982 goto exit;
1983 }
1984
1985 if (CTDD) {
1986 txState = txDescRefr;
1987
1988 txDmaAddr = regs.txdp & 0x3fffffff;
1989 txDmaData = &txDescCache + offsetof(ns_desc, link);
1990 txDmaLen = sizeof(txDescCache.link);
1991 txDmaFree = dmaDescFree;
1992
1993 descDmaReads++;
1994 descDmaRdBytes += txDmaLen;
1995
1996 if (doTxDmaRead())
1997 goto exit;
1998
1999 } else {
2000 txState = txDescRead;
2001
2002 txDmaAddr = regs.txdp & 0x3fffffff;
2003 txDmaData = &txDescCache;
2004 txDmaLen = sizeof(ns_desc);
2005 txDmaFree = dmaDescFree;
2006
2007 descDmaReads++;
2008 descDmaRdBytes += txDmaLen;
2009
2010 if (doTxDmaRead())
2011 goto exit;
2012 }
2013 break;
2014
2015 case txDescRefr:
2016 if (txDmaState != dmaIdle)
2017 goto exit;
2018
2019 txState = txAdvance;
2020 break;
2021
2022 case txDescRead:
2023 if (txDmaState != dmaIdle)
2024 goto exit;
2025
2026 DPRINTF(EthernetDesc,
2027 "txDescCache: link=%08x bufptr=%08x cmdsts=%08x extsts=%08x\n",
2028 txDescCache.link, txDescCache.bufptr, txDescCache.cmdsts,
2029 txDescCache.extsts);
2030
2031 if (txDescCache.cmdsts & CMDSTS_OWN) {
2032 txState = txFifoBlock;
2033 txFragPtr = txDescCache.bufptr;
2034 txDescCnt = txDescCache.cmdsts & CMDSTS_LEN_MASK;
2035 } else {
2036 devIntrPost(ISR_TXIDLE);
2037 txState = txIdle;
2038 goto exit;
2039 }
2040 break;
2041
2042 case txFifoBlock:
2043 if (!txPacket) {
2044 DPRINTF(EthernetSM, "****starting the tx of a new packet****\n");
2045 txPacket = new PacketData(16384);
2046 txPacketBufPtr = txPacket->data;
2047 }
2048
2049 if (txDescCnt == 0) {
2050 DPRINTF(EthernetSM, "the txDescCnt == 0, done with descriptor\n");
2051 if (txDescCache.cmdsts & CMDSTS_MORE) {
2052 DPRINTF(EthernetSM, "there are more descriptors to come\n");
2053 txState = txDescWrite;
2054
2055 txDescCache.cmdsts &= ~CMDSTS_OWN;
2056
2057 txDmaAddr = regs.txdp + offsetof(ns_desc, cmdsts);
2058 txDmaAddr &= 0x3fffffff;
2059 txDmaData = &(txDescCache.cmdsts);
2060 txDmaLen = sizeof(txDescCache.cmdsts);
2061 txDmaFree = dmaDescFree;
2062
2063 if (doTxDmaWrite())
2064 goto exit;
2065
2066 } else { /* this packet is totally done */
2067 DPRINTF(EthernetSM, "This packet is done, let's wrap it up\n");
2068 /* deal with the the packet that just finished */
2069 if ((regs.vtcr & VTCR_PPCHK) && extstsEnable) {
2070 IpPtr ip(txPacket);
2071 if (txDescCache.extsts & EXTSTS_UDPPKT) {
2072 UdpPtr udp(ip);
2073 udp->sum(0);
2074 udp->sum(cksum(udp));
2075 txUdpChecksums++;
2076 } else if (txDescCache.extsts & EXTSTS_TCPPKT) {
2077 TcpPtr tcp(ip);
2078 tcp->sum(0);
2079 tcp->sum(cksum(tcp));
2080 txTcpChecksums++;
2081 }
2082 if (txDescCache.extsts & EXTSTS_IPPKT) {
2083 ip->sum(0);
2084 ip->sum(cksum(ip));
2085 txIpChecksums++;
2086 }
2087 }
2088
2089 txPacket->length = txPacketBufPtr - txPacket->data;
2090 // this is just because the receive can't handle a
2091 // packet bigger want to make sure
2092 assert(txPacket->length <= 1514);
2093 #ifndef NDEBUG
2094 bool success =
2095 #endif
2096 txFifo.push(txPacket);
2097 assert(success);
2098
2099 /*
2100 * this following section is not tqo spec, but
2101 * functionally shouldn't be any different. normally,
2102 * the chip will wait til the transmit has occurred
2103 * before writing back the descriptor because it has
2104 * to wait to see that it was successfully transmitted
2105 * to decide whether to set CMDSTS_OK or not.
2106 * however, in the simulator since it is always
2107 * successfully transmitted, and writing it exactly to
2108 * spec would complicate the code, we just do it here
2109 */
2110
2111 txDescCache.cmdsts &= ~CMDSTS_OWN;
2112 txDescCache.cmdsts |= CMDSTS_OK;
2113
2114 DPRINTF(EthernetDesc,
2115 "txDesc writeback: cmdsts=%08x extsts=%08x\n",
2116 txDescCache.cmdsts, txDescCache.extsts);
2117
2118 txDmaAddr = regs.txdp + offsetof(ns_desc, cmdsts);
2119 txDmaAddr &= 0x3fffffff;
2120 txDmaData = &(txDescCache.cmdsts);
2121 txDmaLen = sizeof(txDescCache.cmdsts) +
2122 sizeof(txDescCache.extsts);
2123 txDmaFree = dmaDescFree;
2124
2125 descDmaWrites++;
2126 descDmaWrBytes += txDmaLen;
2127
2128 transmit();
2129 txPacket = 0;
2130
2131 if (!txEnable) {
2132 DPRINTF(EthernetSM, "halting TX state machine\n");
2133 txState = txIdle;
2134 goto exit;
2135 } else
2136 txState = txAdvance;
2137
2138 if (doTxDmaWrite())
2139 goto exit;
2140 }
2141 } else {
2142 DPRINTF(EthernetSM, "this descriptor isn't done yet\n");
2143 if (!txFifo.full()) {
2144 txState = txFragRead;
2145
2146 /*
2147 * The number of bytes transferred is either whatever
2148 * is left in the descriptor (txDescCnt), or if there
2149 * is not enough room in the fifo, just whatever room
2150 * is left in the fifo
2151 */
2152 txXferLen = min<uint32_t>(txDescCnt, txFifo.avail());
2153
2154 txDmaAddr = txFragPtr & 0x3fffffff;
2155 txDmaData = txPacketBufPtr;
2156 txDmaLen = txXferLen;
2157 txDmaFree = dmaDataFree;
2158
2159 if (doTxDmaRead())
2160 goto exit;
2161 } else {
2162 txState = txFifoBlock;
2163 transmit();
2164
2165 goto exit;
2166 }
2167
2168 }
2169 break;
2170
2171 case txFragRead:
2172 if (txDmaState != dmaIdle)
2173 goto exit;
2174
2175 txPacketBufPtr += txXferLen;
2176 txFragPtr += txXferLen;
2177 txDescCnt -= txXferLen;
2178 txFifo.reserve(txXferLen);
2179
2180 txState = txFifoBlock;
2181 break;
2182
2183 case txDescWrite:
2184 if (txDmaState != dmaIdle)
2185 goto exit;
2186
2187 if (txDescCache.cmdsts & CMDSTS_INTR)
2188 devIntrPost(ISR_TXDESC);
2189
2190 txState = txAdvance;
2191 break;
2192
2193 case txAdvance:
2194 if (txDescCache.link == 0) {
2195 devIntrPost(ISR_TXIDLE);
2196 txState = txIdle;
2197 goto exit;
2198 } else {
2199 txState = txDescRead;
2200 regs.txdp = txDescCache.link;
2201 CTDD = false;
2202
2203 txDmaAddr = txDescCache.link & 0x3fffffff;
2204 txDmaData = &txDescCache;
2205 txDmaLen = sizeof(ns_desc);
2206 txDmaFree = dmaDescFree;
2207
2208 if (doTxDmaRead())
2209 goto exit;
2210 }
2211 break;
2212
2213 default:
2214 panic("invalid state");
2215 }
2216
2217 DPRINTF(EthernetSM, "entering next txState=%s\n",
2218 NsTxStateStrings[txState]);
2219
2220 goto next;
2221
2222 exit:
2223 /**
2224 * @todo do we want to schedule a future kick?
2225 */
2226 DPRINTF(EthernetSM, "tx state machine exited txState=%s\n",
2227 NsTxStateStrings[txState]);
2228 }
2229
2230 void
2231 NSGigE::transferDone()
2232 {
2233 if (txFifo.empty()) {
2234 DPRINTF(Ethernet, "transfer complete: txFifo empty...nothing to do\n");
2235 return;
2236 }
2237
2238 DPRINTF(Ethernet, "transfer complete: data in txFifo...schedule xmit\n");
2239
2240 if (txEvent.scheduled())
2241 txEvent.reschedule(curTick + cycles(1));
2242 else
2243 txEvent.schedule(curTick + cycles(1));
2244 }
2245
2246 bool
2247 NSGigE::rxFilter(const PacketPtr &packet)
2248 {
2249 EthPtr eth = packet;
2250 bool drop = true;
2251 string type;
2252
2253 const EthAddr &dst = eth->dst();
2254 if (dst.unicast()) {
2255 // If we're accepting all unicast addresses
2256 if (acceptUnicast)
2257 drop = false;
2258
2259 // If we make a perfect match
2260 if (acceptPerfect && dst == rom.perfectMatch)
2261 drop = false;
2262
2263 if (acceptArp && eth->type() == ETH_TYPE_ARP)
2264 drop = false;
2265
2266 } else if (dst.broadcast()) {
2267 // if we're accepting broadcasts
2268 if (acceptBroadcast)
2269 drop = false;
2270
2271 } else if (dst.multicast()) {
2272 // if we're accepting all multicasts
2273 if (acceptMulticast)
2274 drop = false;
2275
2276 }
2277
2278 if (drop) {
2279 DPRINTF(Ethernet, "rxFilter drop\n");
2280 DDUMP(EthernetData, packet->data, packet->length);
2281 }
2282
2283 return drop;
2284 }
2285
2286 bool
2287 NSGigE::recvPacket(PacketPtr packet)
2288 {
2289 rxBytes += packet->length;
2290 rxPackets++;
2291
2292 DPRINTF(Ethernet, "Receiving packet from wire, rxFifoAvail=%d\n",
2293 rxFifo.avail());
2294
2295 if (!rxEnable) {
2296 DPRINTF(Ethernet, "receive disabled...packet dropped\n");
2297 debug_break();
2298 interface->recvDone();
2299 return true;
2300 }
2301
2302 if (rxFilterEnable && rxFilter(packet)) {
2303 DPRINTF(Ethernet, "packet filtered...dropped\n");
2304 interface->recvDone();
2305 return true;
2306 }
2307
2308 if (rxFifo.avail() < packet->length) {
2309 #if TRACING_ON
2310 IpPtr ip(packet);
2311 TcpPtr tcp(ip);
2312 if (ip) {
2313 DPRINTF(Ethernet,
2314 "packet won't fit in receive buffer...pkt ID %d dropped\n",
2315 ip->id());
2316 if (tcp) {
2317 DPRINTF(Ethernet, "Seq=%d\n", tcp->seq());
2318 }
2319 }
2320 #endif
2321 droppedPackets++;
2322 devIntrPost(ISR_RXORN);
2323 return false;
2324 }
2325
2326 rxFifo.push(packet);
2327 interface->recvDone();
2328
2329 rxKick();
2330 return true;
2331 }
2332
2333 //=====================================================================
2334 //
2335 //
2336 void
2337 NSGigE::serialize(ostream &os)
2338 {
2339 // Serialize the PciDev base class
2340 PciDev::serialize(os);
2341
2342 /*
2343 * Finalize any DMA events now.
2344 */
2345 if (rxDmaReadEvent.scheduled())
2346 rxDmaReadCopy();
2347 if (rxDmaWriteEvent.scheduled())
2348 rxDmaWriteCopy();
2349 if (txDmaReadEvent.scheduled())
2350 txDmaReadCopy();
2351 if (txDmaWriteEvent.scheduled())
2352 txDmaWriteCopy();
2353
2354 /*
2355 * Serialize the device registers
2356 */
2357 SERIALIZE_SCALAR(regs.command);
2358 SERIALIZE_SCALAR(regs.config);
2359 SERIALIZE_SCALAR(regs.mear);
2360 SERIALIZE_SCALAR(regs.ptscr);
2361 SERIALIZE_SCALAR(regs.isr);
2362 SERIALIZE_SCALAR(regs.imr);
2363 SERIALIZE_SCALAR(regs.ier);
2364 SERIALIZE_SCALAR(regs.ihr);
2365 SERIALIZE_SCALAR(regs.txdp);
2366 SERIALIZE_SCALAR(regs.txdp_hi);
2367 SERIALIZE_SCALAR(regs.txcfg);
2368 SERIALIZE_SCALAR(regs.gpior);
2369 SERIALIZE_SCALAR(regs.rxdp);
2370 SERIALIZE_SCALAR(regs.rxdp_hi);
2371 SERIALIZE_SCALAR(regs.rxcfg);
2372 SERIALIZE_SCALAR(regs.pqcr);
2373 SERIALIZE_SCALAR(regs.wcsr);
2374 SERIALIZE_SCALAR(regs.pcr);
2375 SERIALIZE_SCALAR(regs.rfcr);
2376 SERIALIZE_SCALAR(regs.rfdr);
2377 SERIALIZE_SCALAR(regs.srr);
2378 SERIALIZE_SCALAR(regs.mibc);
2379 SERIALIZE_SCALAR(regs.vrcr);
2380 SERIALIZE_SCALAR(regs.vtcr);
2381 SERIALIZE_SCALAR(regs.vdr);
2382 SERIALIZE_SCALAR(regs.ccsr);
2383 SERIALIZE_SCALAR(regs.tbicr);
2384 SERIALIZE_SCALAR(regs.tbisr);
2385 SERIALIZE_SCALAR(regs.tanar);
2386 SERIALIZE_SCALAR(regs.tanlpar);
2387 SERIALIZE_SCALAR(regs.taner);
2388 SERIALIZE_SCALAR(regs.tesr);
2389
2390 SERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN);
2391
2392 SERIALIZE_SCALAR(ioEnable);
2393
2394 /*
2395 * Serialize the data Fifos
2396 */
2397 rxFifo.serialize("rxFifo", os);
2398 txFifo.serialize("txFifo", os);
2399
2400 /*
2401 * Serialize the various helper variables
2402 */
2403 bool txPacketExists = txPacket;
2404 SERIALIZE_SCALAR(txPacketExists);
2405 if (txPacketExists) {
2406 txPacket->length = txPacketBufPtr - txPacket->data;
2407 txPacket->serialize("txPacket", os);
2408 uint32_t txPktBufPtr = (uint32_t) (txPacketBufPtr - txPacket->data);
2409 SERIALIZE_SCALAR(txPktBufPtr);
2410 }
2411
2412 bool rxPacketExists = rxPacket;
2413 SERIALIZE_SCALAR(rxPacketExists);
2414 if (rxPacketExists) {
2415 rxPacket->serialize("rxPacket", os);
2416 uint32_t rxPktBufPtr = (uint32_t) (rxPacketBufPtr - rxPacket->data);
2417 SERIALIZE_SCALAR(rxPktBufPtr);
2418 }
2419
2420 SERIALIZE_SCALAR(txXferLen);
2421 SERIALIZE_SCALAR(rxXferLen);
2422
2423 /*
2424 * Serialize DescCaches
2425 */
2426 SERIALIZE_SCALAR(txDescCache.link);
2427 SERIALIZE_SCALAR(txDescCache.bufptr);
2428 SERIALIZE_SCALAR(txDescCache.cmdsts);
2429 SERIALIZE_SCALAR(txDescCache.extsts);
2430 SERIALIZE_SCALAR(rxDescCache.link);
2431 SERIALIZE_SCALAR(rxDescCache.bufptr);
2432 SERIALIZE_SCALAR(rxDescCache.cmdsts);
2433 SERIALIZE_SCALAR(rxDescCache.extsts);
2434
2435 /*
2436 * Serialize tx state machine
2437 */
2438 int txState = this->txState;
2439 SERIALIZE_SCALAR(txState);
2440 SERIALIZE_SCALAR(txEnable);
2441 SERIALIZE_SCALAR(CTDD);
2442 SERIALIZE_SCALAR(txFragPtr);
2443 SERIALIZE_SCALAR(txDescCnt);
2444 int txDmaState = this->txDmaState;
2445 SERIALIZE_SCALAR(txDmaState);
2446
2447 /*
2448 * Serialize rx state machine
2449 */
2450 int rxState = this->rxState;
2451 SERIALIZE_SCALAR(rxState);
2452 SERIALIZE_SCALAR(rxEnable);
2453 SERIALIZE_SCALAR(CRDD);
2454 SERIALIZE_SCALAR(rxPktBytes);
2455 SERIALIZE_SCALAR(rxFragPtr);
2456 SERIALIZE_SCALAR(rxDescCnt);
2457 int rxDmaState = this->rxDmaState;
2458 SERIALIZE_SCALAR(rxDmaState);
2459
2460 SERIALIZE_SCALAR(extstsEnable);
2461
2462 /*
2463 * If there's a pending transmit, store the time so we can
2464 * reschedule it later
2465 */
2466 Tick transmitTick = txEvent.scheduled() ? txEvent.when() - curTick : 0;
2467 SERIALIZE_SCALAR(transmitTick);
2468
2469 /*
2470 * receive address filter settings
2471 */
2472 SERIALIZE_SCALAR(rxFilterEnable);
2473 SERIALIZE_SCALAR(acceptBroadcast);
2474 SERIALIZE_SCALAR(acceptMulticast);
2475 SERIALIZE_SCALAR(acceptUnicast);
2476 SERIALIZE_SCALAR(acceptPerfect);
2477 SERIALIZE_SCALAR(acceptArp);
2478
2479 /*
2480 * Keep track of pending interrupt status.
2481 */
2482 SERIALIZE_SCALAR(intrTick);
2483 SERIALIZE_SCALAR(cpuPendingIntr);
2484 Tick intrEventTick = 0;
2485 if (intrEvent)
2486 intrEventTick = intrEvent->when();
2487 SERIALIZE_SCALAR(intrEventTick);
2488
2489 }
2490
2491 void
2492 NSGigE::unserialize(Checkpoint *cp, const std::string &section)
2493 {
2494 // Unserialize the PciDev base class
2495 PciDev::unserialize(cp, section);
2496
2497 UNSERIALIZE_SCALAR(regs.command);
2498 UNSERIALIZE_SCALAR(regs.config);
2499 UNSERIALIZE_SCALAR(regs.mear);
2500 UNSERIALIZE_SCALAR(regs.ptscr);
2501 UNSERIALIZE_SCALAR(regs.isr);
2502 UNSERIALIZE_SCALAR(regs.imr);
2503 UNSERIALIZE_SCALAR(regs.ier);
2504 UNSERIALIZE_SCALAR(regs.ihr);
2505 UNSERIALIZE_SCALAR(regs.txdp);
2506 UNSERIALIZE_SCALAR(regs.txdp_hi);
2507 UNSERIALIZE_SCALAR(regs.txcfg);
2508 UNSERIALIZE_SCALAR(regs.gpior);
2509 UNSERIALIZE_SCALAR(regs.rxdp);
2510 UNSERIALIZE_SCALAR(regs.rxdp_hi);
2511 UNSERIALIZE_SCALAR(regs.rxcfg);
2512 UNSERIALIZE_SCALAR(regs.pqcr);
2513 UNSERIALIZE_SCALAR(regs.wcsr);
2514 UNSERIALIZE_SCALAR(regs.pcr);
2515 UNSERIALIZE_SCALAR(regs.rfcr);
2516 UNSERIALIZE_SCALAR(regs.rfdr);
2517 UNSERIALIZE_SCALAR(regs.srr);
2518 UNSERIALIZE_SCALAR(regs.mibc);
2519 UNSERIALIZE_SCALAR(regs.vrcr);
2520 UNSERIALIZE_SCALAR(regs.vtcr);
2521 UNSERIALIZE_SCALAR(regs.vdr);
2522 UNSERIALIZE_SCALAR(regs.ccsr);
2523 UNSERIALIZE_SCALAR(regs.tbicr);
2524 UNSERIALIZE_SCALAR(regs.tbisr);
2525 UNSERIALIZE_SCALAR(regs.tanar);
2526 UNSERIALIZE_SCALAR(regs.tanlpar);
2527 UNSERIALIZE_SCALAR(regs.taner);
2528 UNSERIALIZE_SCALAR(regs.tesr);
2529
2530 UNSERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN);
2531
2532 UNSERIALIZE_SCALAR(ioEnable);
2533
2534 /*
2535 * unserialize the data fifos
2536 */
2537 rxFifo.unserialize("rxFifo", cp, section);
2538 txFifo.unserialize("txFifo", cp, section);
2539
2540 /*
2541 * unserialize the various helper variables
2542 */
2543 bool txPacketExists;
2544 UNSERIALIZE_SCALAR(txPacketExists);
2545 if (txPacketExists) {
2546 txPacket = new PacketData(16384);
2547 txPacket->unserialize("txPacket", cp, section);
2548 uint32_t txPktBufPtr;
2549 UNSERIALIZE_SCALAR(txPktBufPtr);
2550 txPacketBufPtr = (uint8_t *) txPacket->data + txPktBufPtr;
2551 } else
2552 txPacket = 0;
2553
2554 bool rxPacketExists;
2555 UNSERIALIZE_SCALAR(rxPacketExists);
2556 rxPacket = 0;
2557 if (rxPacketExists) {
2558 rxPacket = new PacketData(16384);
2559 rxPacket->unserialize("rxPacket", cp, section);
2560 uint32_t rxPktBufPtr;
2561 UNSERIALIZE_SCALAR(rxPktBufPtr);
2562 rxPacketBufPtr = (uint8_t *) rxPacket->data + rxPktBufPtr;
2563 } else
2564 rxPacket = 0;
2565
2566 UNSERIALIZE_SCALAR(txXferLen);
2567 UNSERIALIZE_SCALAR(rxXferLen);
2568
2569 /*
2570 * Unserialize DescCaches
2571 */
2572 UNSERIALIZE_SCALAR(txDescCache.link);
2573 UNSERIALIZE_SCALAR(txDescCache.bufptr);
2574 UNSERIALIZE_SCALAR(txDescCache.cmdsts);
2575 UNSERIALIZE_SCALAR(txDescCache.extsts);
2576 UNSERIALIZE_SCALAR(rxDescCache.link);
2577 UNSERIALIZE_SCALAR(rxDescCache.bufptr);
2578 UNSERIALIZE_SCALAR(rxDescCache.cmdsts);
2579 UNSERIALIZE_SCALAR(rxDescCache.extsts);
2580
2581 /*
2582 * unserialize tx state machine
2583 */
2584 int txState;
2585 UNSERIALIZE_SCALAR(txState);
2586 this->txState = (TxState) txState;
2587 UNSERIALIZE_SCALAR(txEnable);
2588 UNSERIALIZE_SCALAR(CTDD);
2589 UNSERIALIZE_SCALAR(txFragPtr);
2590 UNSERIALIZE_SCALAR(txDescCnt);
2591 int txDmaState;
2592 UNSERIALIZE_SCALAR(txDmaState);
2593 this->txDmaState = (DmaState) txDmaState;
2594
2595 /*
2596 * unserialize rx state machine
2597 */
2598 int rxState;
2599 UNSERIALIZE_SCALAR(rxState);
2600 this->rxState = (RxState) rxState;
2601 UNSERIALIZE_SCALAR(rxEnable);
2602 UNSERIALIZE_SCALAR(CRDD);
2603 UNSERIALIZE_SCALAR(rxPktBytes);
2604 UNSERIALIZE_SCALAR(rxFragPtr);
2605 UNSERIALIZE_SCALAR(rxDescCnt);
2606 int rxDmaState;
2607 UNSERIALIZE_SCALAR(rxDmaState);
2608 this->rxDmaState = (DmaState) rxDmaState;
2609
2610 UNSERIALIZE_SCALAR(extstsEnable);
2611
2612 /*
2613 * If there's a pending transmit, reschedule it now
2614 */
2615 Tick transmitTick;
2616 UNSERIALIZE_SCALAR(transmitTick);
2617 if (transmitTick)
2618 txEvent.schedule(curTick + transmitTick);
2619
2620 /*
2621 * unserialize receive address filter settings
2622 */
2623 UNSERIALIZE_SCALAR(rxFilterEnable);
2624 UNSERIALIZE_SCALAR(acceptBroadcast);
2625 UNSERIALIZE_SCALAR(acceptMulticast);
2626 UNSERIALIZE_SCALAR(acceptUnicast);
2627 UNSERIALIZE_SCALAR(acceptPerfect);
2628 UNSERIALIZE_SCALAR(acceptArp);
2629
2630 /*
2631 * Keep track of pending interrupt status.
2632 */
2633 UNSERIALIZE_SCALAR(intrTick);
2634 UNSERIALIZE_SCALAR(cpuPendingIntr);
2635 Tick intrEventTick;
2636 UNSERIALIZE_SCALAR(intrEventTick);
2637 if (intrEventTick) {
2638 intrEvent = new IntrEvent(this, true);
2639 intrEvent->schedule(intrEventTick);
2640 }
2641
2642 /*
2643 * re-add addrRanges to bus bridges
2644 */
2645 if (pioInterface) {
2646 pioInterface->addAddrRange(RangeSize(BARAddrs[0], BARSize[0]));
2647 pioInterface->addAddrRange(RangeSize(BARAddrs[1], BARSize[1]));
2648 }
2649 }
2650
2651 Tick
2652 NSGigE::cacheAccess(MemReqPtr &req)
2653 {
2654 DPRINTF(EthernetPIO, "timing access to paddr=%#x (daddr=%#x)\n",
2655 req->paddr, req->paddr - addr);
2656 return curTick + pioLatency;
2657 }
2658
2659 BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt)
2660
2661 SimObjectParam<EtherInt *> peer;
2662 SimObjectParam<NSGigE *> device;
2663
2664 END_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt)
2665
2666 BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigEInt)
2667
2668 INIT_PARAM_DFLT(peer, "peer interface", NULL),
2669 INIT_PARAM(device, "Ethernet device of this interface")
2670
2671 END_INIT_SIM_OBJECT_PARAMS(NSGigEInt)
2672
2673 CREATE_SIM_OBJECT(NSGigEInt)
2674 {
2675 NSGigEInt *dev_int = new NSGigEInt(getInstanceName(), device);
2676
2677 EtherInt *p = (EtherInt *)peer;
2678 if (p) {
2679 dev_int->setPeer(p);
2680 p->setPeer(dev_int);
2681 }
2682
2683 return dev_int;
2684 }
2685
2686 REGISTER_SIM_OBJECT("NSGigEInt", NSGigEInt)
2687
2688
2689 BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigE)
2690
2691 Param<Addr> addr;
2692 Param<Tick> clock;
2693 Param<Tick> tx_delay;
2694 Param<Tick> rx_delay;
2695 Param<Tick> intr_delay;
2696 SimObjectParam<MemoryController *> mmu;
2697 SimObjectParam<PhysicalMemory *> physmem;
2698 Param<bool> rx_filter;
2699 Param<string> hardware_address;
2700 SimObjectParam<Bus*> io_bus;
2701 SimObjectParam<Bus*> payload_bus;
2702 SimObjectParam<HierParams *> hier;
2703 Param<Tick> pio_latency;
2704 Param<bool> dma_desc_free;
2705 Param<bool> dma_data_free;
2706 Param<Tick> dma_read_delay;
2707 Param<Tick> dma_write_delay;
2708 Param<Tick> dma_read_factor;
2709 Param<Tick> dma_write_factor;
2710 SimObjectParam<PciConfigAll *> configspace;
2711 SimObjectParam<PciConfigData *> configdata;
2712 SimObjectParam<Platform *> platform;
2713 Param<uint32_t> pci_bus;
2714 Param<uint32_t> pci_dev;
2715 Param<uint32_t> pci_func;
2716 Param<uint32_t> tx_fifo_size;
2717 Param<uint32_t> rx_fifo_size;
2718 Param<uint32_t> m5reg;
2719 Param<bool> dma_no_allocate;
2720
2721 END_DECLARE_SIM_OBJECT_PARAMS(NSGigE)
2722
2723 BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigE)
2724
2725 INIT_PARAM(addr, "Device Address"),
2726 INIT_PARAM(clock, "State machine processor frequency"),
2727 INIT_PARAM(tx_delay, "Transmit Delay"),
2728 INIT_PARAM(rx_delay, "Receive Delay"),
2729 INIT_PARAM(intr_delay, "Interrupt Delay in microseconds"),
2730 INIT_PARAM(mmu, "Memory Controller"),
2731 INIT_PARAM(physmem, "Physical Memory"),
2732 INIT_PARAM_DFLT(rx_filter, "Enable Receive Filter", true),
2733 INIT_PARAM_DFLT(hardware_address, "Ethernet Hardware Address",
2734 "00:99:00:00:00:01"),
2735 INIT_PARAM_DFLT(io_bus, "The IO Bus to attach to for headers", NULL),
2736 INIT_PARAM_DFLT(payload_bus, "The IO Bus to attach to for payload", NULL),
2737 INIT_PARAM_DFLT(hier, "Hierarchy global variables", &defaultHierParams),
2738 INIT_PARAM_DFLT(pio_latency, "Programmed IO latency in bus cycles", 1),
2739 INIT_PARAM_DFLT(dma_desc_free, "DMA of Descriptors is free", false),
2740 INIT_PARAM_DFLT(dma_data_free, "DMA of Data is free", false),
2741 INIT_PARAM_DFLT(dma_read_delay, "fixed delay for dma reads", 0),
2742 INIT_PARAM_DFLT(dma_write_delay, "fixed delay for dma writes", 0),
2743 INIT_PARAM_DFLT(dma_read_factor, "multiplier for dma reads", 0),
2744 INIT_PARAM_DFLT(dma_write_factor, "multiplier for dma writes", 0),
2745 INIT_PARAM(configspace, "PCI Configspace"),
2746 INIT_PARAM(configdata, "PCI Config data"),
2747 INIT_PARAM(platform, "Platform"),
2748 INIT_PARAM(pci_bus, "PCI bus"),
2749 INIT_PARAM(pci_dev, "PCI device number"),
2750 INIT_PARAM(pci_func, "PCI function code"),
2751 INIT_PARAM_DFLT(tx_fifo_size, "max size in bytes of txFifo", 131072),
2752 INIT_PARAM_DFLT(rx_fifo_size, "max size in bytes of rxFifo", 131072),
2753 INIT_PARAM(m5reg, "m5 register"),
2754 INIT_PARAM_DFLT(dma_no_allocate, "Should DMA reads allocate cache lines", true)
2755
2756 END_INIT_SIM_OBJECT_PARAMS(NSGigE)
2757
2758
2759 CREATE_SIM_OBJECT(NSGigE)
2760 {
2761 NSGigE::Params *params = new NSGigE::Params;
2762
2763 params->name = getInstanceName();
2764 params->mmu = mmu;
2765 params->configSpace = configspace;
2766 params->configData = configdata;
2767 params->plat = platform;
2768 params->busNum = pci_bus;
2769 params->deviceNum = pci_dev;
2770 params->functionNum = pci_func;
2771
2772 params->clock = clock;
2773 params->intr_delay = intr_delay;
2774 params->pmem = physmem;
2775 params->tx_delay = tx_delay;
2776 params->rx_delay = rx_delay;
2777 params->hier = hier;
2778 params->header_bus = io_bus;
2779 params->payload_bus = payload_bus;
2780 params->pio_latency = pio_latency;
2781 params->dma_desc_free = dma_desc_free;
2782 params->dma_data_free = dma_data_free;
2783 params->dma_read_delay = dma_read_delay;
2784 params->dma_write_delay = dma_write_delay;
2785 params->dma_read_factor = dma_read_factor;
2786 params->dma_write_factor = dma_write_factor;
2787 params->rx_filter = rx_filter;
2788 params->eaddr = hardware_address;
2789 params->tx_fifo_size = tx_fifo_size;
2790 params->rx_fifo_size = rx_fifo_size;
2791 params->m5reg = m5reg;
2792 params->dma_no_allocate = dma_no_allocate;
2793 return new NSGigE(params);
2794 }
2795
2796 REGISTER_SIM_OBJECT("NSGigE", NSGigE)