d67c6cf95c13f3013c35a958b766d1eaaf30e5f0
[gem5.git] / dev / ns_gige.cc
1 /*
2 * Copyright (c) 2004-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /** @file
30 * Device module for modelling the National Semiconductor
31 * DP83820 ethernet controller. Does not support priority queueing
32 */
33 #include <cstdio>
34 #include <deque>
35 #include <string>
36
37 #include "base/inet.hh"
38 #include "cpu/exec_context.hh"
39 #include "dev/etherlink.hh"
40 #include "dev/ns_gige.hh"
41 #include "dev/pciconfigall.hh"
42 #include "mem/bus/bus.hh"
43 #include "mem/bus/dma_interface.hh"
44 #include "mem/bus/pio_interface.hh"
45 #include "mem/bus/pio_interface_impl.hh"
46 #include "mem/functional/memory_control.hh"
47 #include "mem/functional/physical.hh"
48 #include "sim/builder.hh"
49 #include "sim/debug.hh"
50 #include "sim/host.hh"
51 #include "sim/stats.hh"
52 #include "targetarch/vtophys.hh"
53
54 const char *NsRxStateStrings[] =
55 {
56 "rxIdle",
57 "rxDescRefr",
58 "rxDescRead",
59 "rxFifoBlock",
60 "rxFragWrite",
61 "rxDescWrite",
62 "rxAdvance"
63 };
64
65 const char *NsTxStateStrings[] =
66 {
67 "txIdle",
68 "txDescRefr",
69 "txDescRead",
70 "txFifoBlock",
71 "txFragRead",
72 "txDescWrite",
73 "txAdvance"
74 };
75
76 const char *NsDmaState[] =
77 {
78 "dmaIdle",
79 "dmaReading",
80 "dmaWriting",
81 "dmaReadWaiting",
82 "dmaWriteWaiting"
83 };
84
85 using namespace std;
86 using namespace Net;
87
88 ///////////////////////////////////////////////////////////////////////
89 //
90 // NSGigE PCI Device
91 //
92 NSGigE::NSGigE(Params *p)
93 : PciDev(p), ioEnable(false),
94 txFifo(p->tx_fifo_size), rxFifo(p->rx_fifo_size),
95 txPacket(0), rxPacket(0), txPacketBufPtr(NULL), rxPacketBufPtr(NULL),
96 txXferLen(0), rxXferLen(0), clock(p->clock),
97 txState(txIdle), txEnable(false), CTDD(false),
98 txFragPtr(0), txDescCnt(0), txDmaState(dmaIdle), rxState(rxIdle),
99 rxEnable(false), CRDD(false), rxPktBytes(0),
100 rxFragPtr(0), rxDescCnt(0), rxDmaState(dmaIdle), extstsEnable(false),
101 eepromState(eepromStart), rxDmaReadEvent(this), rxDmaWriteEvent(this),
102 txDmaReadEvent(this), txDmaWriteEvent(this),
103 dmaDescFree(p->dma_desc_free), dmaDataFree(p->dma_data_free),
104 txDelay(p->tx_delay), rxDelay(p->rx_delay),
105 rxKickTick(0), rxKickEvent(this), txKickTick(0), txKickEvent(this),
106 txEvent(this), rxFilterEnable(p->rx_filter), acceptBroadcast(false),
107 acceptMulticast(false), acceptUnicast(false),
108 acceptPerfect(false), acceptArp(false), multicastHashEnable(false),
109 physmem(p->pmem), intrTick(0), cpuPendingIntr(false),
110 intrEvent(0), interface(0)
111 {
112 if (p->header_bus) {
113 pioInterface = newPioInterface(name() + ".pio", p->hier,
114 p->header_bus, this,
115 &NSGigE::cacheAccess);
116
117 pioLatency = p->pio_latency * p->header_bus->clockRate;
118
119 if (p->payload_bus)
120 dmaInterface = new DMAInterface<Bus>(name() + ".dma",
121 p->header_bus,
122 p->payload_bus, 1,
123 p->dma_no_allocate);
124 else
125 dmaInterface = new DMAInterface<Bus>(name() + ".dma",
126 p->header_bus,
127 p->header_bus, 1,
128 p->dma_no_allocate);
129 } else if (p->payload_bus) {
130 pioInterface = newPioInterface(name() + ".pio2", p->hier,
131 p->payload_bus, this,
132 &NSGigE::cacheAccess);
133
134 pioLatency = p->pio_latency * p->payload_bus->clockRate;
135
136 dmaInterface = new DMAInterface<Bus>(name() + ".dma",
137 p->payload_bus,
138 p->payload_bus, 1,
139 p->dma_no_allocate);
140 }
141
142
143 intrDelay = p->intr_delay;
144 dmaReadDelay = p->dma_read_delay;
145 dmaWriteDelay = p->dma_write_delay;
146 dmaReadFactor = p->dma_read_factor;
147 dmaWriteFactor = p->dma_write_factor;
148
149 regsReset();
150 memcpy(&rom.perfectMatch, p->eaddr.bytes(), ETH_ADDR_LEN);
151
152 memset(&rxDesc32, 0, sizeof(rxDesc32));
153 memset(&txDesc32, 0, sizeof(txDesc32));
154 memset(&rxDesc64, 0, sizeof(rxDesc64));
155 memset(&txDesc64, 0, sizeof(txDesc64));
156 }
157
158 NSGigE::~NSGigE()
159 {}
160
161 void
162 NSGigE::regStats()
163 {
164 txBytes
165 .name(name() + ".txBytes")
166 .desc("Bytes Transmitted")
167 .prereq(txBytes)
168 ;
169
170 rxBytes
171 .name(name() + ".rxBytes")
172 .desc("Bytes Received")
173 .prereq(rxBytes)
174 ;
175
176 txPackets
177 .name(name() + ".txPackets")
178 .desc("Number of Packets Transmitted")
179 .prereq(txBytes)
180 ;
181
182 rxPackets
183 .name(name() + ".rxPackets")
184 .desc("Number of Packets Received")
185 .prereq(rxBytes)
186 ;
187
188 txIpChecksums
189 .name(name() + ".txIpChecksums")
190 .desc("Number of tx IP Checksums done by device")
191 .precision(0)
192 .prereq(txBytes)
193 ;
194
195 rxIpChecksums
196 .name(name() + ".rxIpChecksums")
197 .desc("Number of rx IP Checksums done by device")
198 .precision(0)
199 .prereq(rxBytes)
200 ;
201
202 txTcpChecksums
203 .name(name() + ".txTcpChecksums")
204 .desc("Number of tx TCP Checksums done by device")
205 .precision(0)
206 .prereq(txBytes)
207 ;
208
209 rxTcpChecksums
210 .name(name() + ".rxTcpChecksums")
211 .desc("Number of rx TCP Checksums done by device")
212 .precision(0)
213 .prereq(rxBytes)
214 ;
215
216 txUdpChecksums
217 .name(name() + ".txUdpChecksums")
218 .desc("Number of tx UDP Checksums done by device")
219 .precision(0)
220 .prereq(txBytes)
221 ;
222
223 rxUdpChecksums
224 .name(name() + ".rxUdpChecksums")
225 .desc("Number of rx UDP Checksums done by device")
226 .precision(0)
227 .prereq(rxBytes)
228 ;
229
230 descDmaReads
231 .name(name() + ".descDMAReads")
232 .desc("Number of descriptors the device read w/ DMA")
233 .precision(0)
234 ;
235
236 descDmaWrites
237 .name(name() + ".descDMAWrites")
238 .desc("Number of descriptors the device wrote w/ DMA")
239 .precision(0)
240 ;
241
242 descDmaRdBytes
243 .name(name() + ".descDmaReadBytes")
244 .desc("number of descriptor bytes read w/ DMA")
245 .precision(0)
246 ;
247
248 descDmaWrBytes
249 .name(name() + ".descDmaWriteBytes")
250 .desc("number of descriptor bytes write w/ DMA")
251 .precision(0)
252 ;
253
254 txBandwidth
255 .name(name() + ".txBandwidth")
256 .desc("Transmit Bandwidth (bits/s)")
257 .precision(0)
258 .prereq(txBytes)
259 ;
260
261 rxBandwidth
262 .name(name() + ".rxBandwidth")
263 .desc("Receive Bandwidth (bits/s)")
264 .precision(0)
265 .prereq(rxBytes)
266 ;
267
268 totBandwidth
269 .name(name() + ".totBandwidth")
270 .desc("Total Bandwidth (bits/s)")
271 .precision(0)
272 .prereq(totBytes)
273 ;
274
275 totPackets
276 .name(name() + ".totPackets")
277 .desc("Total Packets")
278 .precision(0)
279 .prereq(totBytes)
280 ;
281
282 totBytes
283 .name(name() + ".totBytes")
284 .desc("Total Bytes")
285 .precision(0)
286 .prereq(totBytes)
287 ;
288
289 totPacketRate
290 .name(name() + ".totPPS")
291 .desc("Total Tranmission Rate (packets/s)")
292 .precision(0)
293 .prereq(totBytes)
294 ;
295
296 txPacketRate
297 .name(name() + ".txPPS")
298 .desc("Packet Tranmission Rate (packets/s)")
299 .precision(0)
300 .prereq(txBytes)
301 ;
302
303 rxPacketRate
304 .name(name() + ".rxPPS")
305 .desc("Packet Reception Rate (packets/s)")
306 .precision(0)
307 .prereq(rxBytes)
308 ;
309
310 postedSwi
311 .name(name() + ".postedSwi")
312 .desc("number of software interrupts posted to CPU")
313 .precision(0)
314 ;
315
316 totalSwi
317 .name(name() + ".totalSwi")
318 .desc("total number of Swi written to ISR")
319 .precision(0)
320 ;
321
322 coalescedSwi
323 .name(name() + ".coalescedSwi")
324 .desc("average number of Swi's coalesced into each post")
325 .precision(0)
326 ;
327
328 postedRxIdle
329 .name(name() + ".postedRxIdle")
330 .desc("number of rxIdle interrupts posted to CPU")
331 .precision(0)
332 ;
333
334 totalRxIdle
335 .name(name() + ".totalRxIdle")
336 .desc("total number of RxIdle written to ISR")
337 .precision(0)
338 ;
339
340 coalescedRxIdle
341 .name(name() + ".coalescedRxIdle")
342 .desc("average number of RxIdle's coalesced into each post")
343 .precision(0)
344 ;
345
346 postedRxOk
347 .name(name() + ".postedRxOk")
348 .desc("number of RxOk interrupts posted to CPU")
349 .precision(0)
350 ;
351
352 totalRxOk
353 .name(name() + ".totalRxOk")
354 .desc("total number of RxOk written to ISR")
355 .precision(0)
356 ;
357
358 coalescedRxOk
359 .name(name() + ".coalescedRxOk")
360 .desc("average number of RxOk's coalesced into each post")
361 .precision(0)
362 ;
363
364 postedRxDesc
365 .name(name() + ".postedRxDesc")
366 .desc("number of RxDesc interrupts posted to CPU")
367 .precision(0)
368 ;
369
370 totalRxDesc
371 .name(name() + ".totalRxDesc")
372 .desc("total number of RxDesc written to ISR")
373 .precision(0)
374 ;
375
376 coalescedRxDesc
377 .name(name() + ".coalescedRxDesc")
378 .desc("average number of RxDesc's coalesced into each post")
379 .precision(0)
380 ;
381
382 postedTxOk
383 .name(name() + ".postedTxOk")
384 .desc("number of TxOk interrupts posted to CPU")
385 .precision(0)
386 ;
387
388 totalTxOk
389 .name(name() + ".totalTxOk")
390 .desc("total number of TxOk written to ISR")
391 .precision(0)
392 ;
393
394 coalescedTxOk
395 .name(name() + ".coalescedTxOk")
396 .desc("average number of TxOk's coalesced into each post")
397 .precision(0)
398 ;
399
400 postedTxIdle
401 .name(name() + ".postedTxIdle")
402 .desc("number of TxIdle interrupts posted to CPU")
403 .precision(0)
404 ;
405
406 totalTxIdle
407 .name(name() + ".totalTxIdle")
408 .desc("total number of TxIdle written to ISR")
409 .precision(0)
410 ;
411
412 coalescedTxIdle
413 .name(name() + ".coalescedTxIdle")
414 .desc("average number of TxIdle's coalesced into each post")
415 .precision(0)
416 ;
417
418 postedTxDesc
419 .name(name() + ".postedTxDesc")
420 .desc("number of TxDesc interrupts posted to CPU")
421 .precision(0)
422 ;
423
424 totalTxDesc
425 .name(name() + ".totalTxDesc")
426 .desc("total number of TxDesc written to ISR")
427 .precision(0)
428 ;
429
430 coalescedTxDesc
431 .name(name() + ".coalescedTxDesc")
432 .desc("average number of TxDesc's coalesced into each post")
433 .precision(0)
434 ;
435
436 postedRxOrn
437 .name(name() + ".postedRxOrn")
438 .desc("number of RxOrn posted to CPU")
439 .precision(0)
440 ;
441
442 totalRxOrn
443 .name(name() + ".totalRxOrn")
444 .desc("total number of RxOrn written to ISR")
445 .precision(0)
446 ;
447
448 coalescedRxOrn
449 .name(name() + ".coalescedRxOrn")
450 .desc("average number of RxOrn's coalesced into each post")
451 .precision(0)
452 ;
453
454 coalescedTotal
455 .name(name() + ".coalescedTotal")
456 .desc("average number of interrupts coalesced into each post")
457 .precision(0)
458 ;
459
460 postedInterrupts
461 .name(name() + ".postedInterrupts")
462 .desc("number of posts to CPU")
463 .precision(0)
464 ;
465
466 droppedPackets
467 .name(name() + ".droppedPackets")
468 .desc("number of packets dropped")
469 .precision(0)
470 ;
471
472 coalescedSwi = totalSwi / postedInterrupts;
473 coalescedRxIdle = totalRxIdle / postedInterrupts;
474 coalescedRxOk = totalRxOk / postedInterrupts;
475 coalescedRxDesc = totalRxDesc / postedInterrupts;
476 coalescedTxOk = totalTxOk / postedInterrupts;
477 coalescedTxIdle = totalTxIdle / postedInterrupts;
478 coalescedTxDesc = totalTxDesc / postedInterrupts;
479 coalescedRxOrn = totalRxOrn / postedInterrupts;
480
481 coalescedTotal = (totalSwi + totalRxIdle + totalRxOk + totalRxDesc +
482 totalTxOk + totalTxIdle + totalTxDesc +
483 totalRxOrn) / postedInterrupts;
484
485 txBandwidth = txBytes * Stats::constant(8) / simSeconds;
486 rxBandwidth = rxBytes * Stats::constant(8) / simSeconds;
487 totBandwidth = txBandwidth + rxBandwidth;
488 totBytes = txBytes + rxBytes;
489 totPackets = txPackets + rxPackets;
490
491 txPacketRate = txPackets / simSeconds;
492 rxPacketRate = rxPackets / simSeconds;
493 }
494
495 /**
496 * This is to read the PCI general configuration registers
497 */
498 void
499 NSGigE::readConfig(int offset, int size, uint8_t *data)
500 {
501 if (offset < PCI_DEVICE_SPECIFIC)
502 PciDev::readConfig(offset, size, data);
503 else
504 panic("Device specific PCI config space not implemented!\n");
505 }
506
507 /**
508 * This is to write to the PCI general configuration registers
509 */
510 void
511 NSGigE::writeConfig(int offset, int size, const uint8_t* data)
512 {
513 if (offset < PCI_DEVICE_SPECIFIC)
514 PciDev::writeConfig(offset, size, data);
515 else
516 panic("Device specific PCI config space not implemented!\n");
517
518 // Need to catch writes to BARs to update the PIO interface
519 switch (offset) {
520 // seems to work fine without all these PCI settings, but i
521 // put in the IO to double check, an assertion will fail if we
522 // need to properly implement it
523 case PCI_COMMAND:
524 if (config.data[offset] & PCI_CMD_IOSE)
525 ioEnable = true;
526 else
527 ioEnable = false;
528
529 #if 0
530 if (config.data[offset] & PCI_CMD_BME) {
531 bmEnabled = true;
532 }
533 else {
534 bmEnabled = false;
535 }
536
537 if (config.data[offset] & PCI_CMD_MSE) {
538 memEnable = true;
539 }
540 else {
541 memEnable = false;
542 }
543 #endif
544 break;
545
546 case PCI0_BASE_ADDR0:
547 if (BARAddrs[0] != 0) {
548 if (pioInterface)
549 pioInterface->addAddrRange(RangeSize(BARAddrs[0], BARSize[0]));
550
551 BARAddrs[0] &= EV5::PAddrUncachedMask;
552 }
553 break;
554 case PCI0_BASE_ADDR1:
555 if (BARAddrs[1] != 0) {
556 if (pioInterface)
557 pioInterface->addAddrRange(RangeSize(BARAddrs[1], BARSize[1]));
558
559 BARAddrs[1] &= EV5::PAddrUncachedMask;
560 }
561 break;
562 }
563 }
564
565 /**
566 * This reads the device registers, which are detailed in the NS83820
567 * spec sheet
568 */
569 Fault
570 NSGigE::read(MemReqPtr &req, uint8_t *data)
571 {
572 assert(ioEnable);
573
574 //The mask is to give you only the offset into the device register file
575 Addr daddr = req->paddr & 0xfff;
576 DPRINTF(EthernetPIO, "read da=%#x pa=%#x va=%#x size=%d\n",
577 daddr, req->paddr, req->vaddr, req->size);
578
579
580 // there are some reserved registers, you can see ns_gige_reg.h and
581 // the spec sheet for details
582 if (daddr > LAST && daddr <= RESERVED) {
583 panic("Accessing reserved register");
584 } else if (daddr > RESERVED && daddr <= 0x3FC) {
585 readConfig(daddr & 0xff, req->size, data);
586 return No_Fault;
587 } else if (daddr >= MIB_START && daddr <= MIB_END) {
588 // don't implement all the MIB's. hopefully the kernel
589 // doesn't actually DEPEND upon their values
590 // MIB are just hardware stats keepers
591 uint32_t &reg = *(uint32_t *) data;
592 reg = 0;
593 return No_Fault;
594 } else if (daddr > 0x3FC)
595 panic("Something is messed up!\n");
596
597 switch (req->size) {
598 case sizeof(uint32_t):
599 {
600 uint32_t &reg = *(uint32_t *)data;
601 uint16_t rfaddr;
602
603 switch (daddr) {
604 case CR:
605 reg = regs.command;
606 //these are supposed to be cleared on a read
607 reg &= ~(CR_RXD | CR_TXD | CR_TXR | CR_RXR);
608 break;
609
610 case CFGR:
611 reg = regs.config;
612 break;
613
614 case MEAR:
615 reg = regs.mear;
616 break;
617
618 case PTSCR:
619 reg = regs.ptscr;
620 break;
621
622 case ISR:
623 reg = regs.isr;
624 devIntrClear(ISR_ALL);
625 break;
626
627 case IMR:
628 reg = regs.imr;
629 break;
630
631 case IER:
632 reg = regs.ier;
633 break;
634
635 case IHR:
636 reg = regs.ihr;
637 break;
638
639 case TXDP:
640 reg = regs.txdp;
641 break;
642
643 case TXDP_HI:
644 reg = regs.txdp_hi;
645 break;
646
647 case TX_CFG:
648 reg = regs.txcfg;
649 break;
650
651 case GPIOR:
652 reg = regs.gpior;
653 break;
654
655 case RXDP:
656 reg = regs.rxdp;
657 break;
658
659 case RXDP_HI:
660 reg = regs.rxdp_hi;
661 break;
662
663 case RX_CFG:
664 reg = regs.rxcfg;
665 break;
666
667 case PQCR:
668 reg = regs.pqcr;
669 break;
670
671 case WCSR:
672 reg = regs.wcsr;
673 break;
674
675 case PCR:
676 reg = regs.pcr;
677 break;
678
679 // see the spec sheet for how RFCR and RFDR work
680 // basically, you write to RFCR to tell the machine
681 // what you want to do next, then you act upon RFDR,
682 // and the device will be prepared b/c of what you
683 // wrote to RFCR
684 case RFCR:
685 reg = regs.rfcr;
686 break;
687
688 case RFDR:
689 rfaddr = (uint16_t)(regs.rfcr & RFCR_RFADDR);
690 switch (rfaddr) {
691 // Read from perfect match ROM octets
692 case 0x000:
693 reg = rom.perfectMatch[1];
694 reg = reg << 8;
695 reg += rom.perfectMatch[0];
696 break;
697 case 0x002:
698 reg = rom.perfectMatch[3] << 8;
699 reg += rom.perfectMatch[2];
700 break;
701 case 0x004:
702 reg = rom.perfectMatch[5] << 8;
703 reg += rom.perfectMatch[4];
704 break;
705 default:
706 // Read filter hash table
707 if (rfaddr >= FHASH_ADDR &&
708 rfaddr < FHASH_ADDR + FHASH_SIZE) {
709
710 // Only word-aligned reads supported
711 if (rfaddr % 2)
712 panic("unaligned read from filter hash table!");
713
714 reg = rom.filterHash[rfaddr - FHASH_ADDR + 1] << 8;
715 reg += rom.filterHash[rfaddr - FHASH_ADDR];
716 break;
717 }
718
719 panic("reading RFDR for something other than pattern"
720 " matching or hashing! %#x\n", rfaddr);
721 }
722 break;
723
724 case SRR:
725 reg = regs.srr;
726 break;
727
728 case MIBC:
729 reg = regs.mibc;
730 reg &= ~(MIBC_MIBS | MIBC_ACLR);
731 break;
732
733 case VRCR:
734 reg = regs.vrcr;
735 break;
736
737 case VTCR:
738 reg = regs.vtcr;
739 break;
740
741 case VDR:
742 reg = regs.vdr;
743 break;
744
745 case CCSR:
746 reg = regs.ccsr;
747 break;
748
749 case TBICR:
750 reg = regs.tbicr;
751 break;
752
753 case TBISR:
754 reg = regs.tbisr;
755 break;
756
757 case TANAR:
758 reg = regs.tanar;
759 break;
760
761 case TANLPAR:
762 reg = regs.tanlpar;
763 break;
764
765 case TANER:
766 reg = regs.taner;
767 break;
768
769 case TESR:
770 reg = regs.tesr;
771 break;
772
773 case M5REG:
774 reg = params()->m5reg;
775 break;
776
777 default:
778 panic("reading unimplemented register: addr=%#x", daddr);
779 }
780
781 DPRINTF(EthernetPIO, "read from %#x: data=%d data=%#x\n",
782 daddr, reg, reg);
783 }
784 break;
785
786 default:
787 panic("accessing register with invalid size: addr=%#x, size=%d",
788 daddr, req->size);
789 }
790
791 return No_Fault;
792 }
793
794 Fault
795 NSGigE::write(MemReqPtr &req, const uint8_t *data)
796 {
797 assert(ioEnable);
798
799 Addr daddr = req->paddr & 0xfff;
800 DPRINTF(EthernetPIO, "write da=%#x pa=%#x va=%#x size=%d\n",
801 daddr, req->paddr, req->vaddr, req->size);
802
803 if (daddr > LAST && daddr <= RESERVED) {
804 panic("Accessing reserved register");
805 } else if (daddr > RESERVED && daddr <= 0x3FC) {
806 writeConfig(daddr & 0xff, req->size, data);
807 return No_Fault;
808 } else if (daddr > 0x3FC)
809 panic("Something is messed up!\n");
810
811 if (req->size == sizeof(uint32_t)) {
812 uint32_t reg = *(uint32_t *)data;
813 uint16_t rfaddr;
814
815 DPRINTF(EthernetPIO, "write data=%d data=%#x\n", reg, reg);
816
817 switch (daddr) {
818 case CR:
819 regs.command = reg;
820 if (reg & CR_TXD) {
821 txEnable = false;
822 } else if (reg & CR_TXE) {
823 txEnable = true;
824
825 // the kernel is enabling the transmit machine
826 if (txState == txIdle)
827 txKick();
828 }
829
830 if (reg & CR_RXD) {
831 rxEnable = false;
832 } else if (reg & CR_RXE) {
833 rxEnable = true;
834
835 if (rxState == rxIdle)
836 rxKick();
837 }
838
839 if (reg & CR_TXR)
840 txReset();
841
842 if (reg & CR_RXR)
843 rxReset();
844
845 if (reg & CR_SWI)
846 devIntrPost(ISR_SWI);
847
848 if (reg & CR_RST) {
849 txReset();
850 rxReset();
851
852 regsReset();
853 }
854 break;
855
856 case CFGR:
857 if (reg & CFGR_LNKSTS ||
858 reg & CFGR_SPDSTS ||
859 reg & CFGR_DUPSTS ||
860 reg & CFGR_RESERVED ||
861 reg & CFGR_T64ADDR ||
862 reg & CFGR_PCI64_DET)
863
864 // First clear all writable bits
865 regs.config &= CFGR_LNKSTS | CFGR_SPDSTS | CFGR_DUPSTS |
866 CFGR_RESERVED | CFGR_T64ADDR |
867 CFGR_PCI64_DET;
868 // Now set the appropriate writable bits
869 regs.config |= reg & ~(CFGR_LNKSTS | CFGR_SPDSTS | CFGR_DUPSTS |
870 CFGR_RESERVED | CFGR_T64ADDR |
871 CFGR_PCI64_DET);
872
873 // all these #if 0's are because i don't THINK the kernel needs to
874 // have these implemented. if there is a problem relating to one of
875 // these, you may need to add functionality in.
876 if (reg & CFGR_TBI_EN) ;
877 if (reg & CFGR_MODE_1000) ;
878
879 if (reg & CFGR_AUTO_1000)
880 panic("CFGR_AUTO_1000 not implemented!\n");
881
882 if (reg & CFGR_PINT_DUPSTS ||
883 reg & CFGR_PINT_LNKSTS ||
884 reg & CFGR_PINT_SPDSTS)
885 ;
886
887 if (reg & CFGR_TMRTEST) ;
888 if (reg & CFGR_MRM_DIS) ;
889 if (reg & CFGR_MWI_DIS) ;
890
891 if (reg & CFGR_T64ADDR) ;
892 // panic("CFGR_T64ADDR is read only register!\n");
893
894 if (reg & CFGR_PCI64_DET)
895 panic("CFGR_PCI64_DET is read only register!\n");
896
897 if (reg & CFGR_DATA64_EN) ;
898 if (reg & CFGR_M64ADDR) ;
899 if (reg & CFGR_PHY_RST) ;
900 if (reg & CFGR_PHY_DIS) ;
901
902 if (reg & CFGR_EXTSTS_EN)
903 extstsEnable = true;
904 else
905 extstsEnable = false;
906
907 if (reg & CFGR_REQALG) ;
908 if (reg & CFGR_SB) ;
909 if (reg & CFGR_POW) ;
910 if (reg & CFGR_EXD) ;
911 if (reg & CFGR_PESEL) ;
912 if (reg & CFGR_BROM_DIS) ;
913 if (reg & CFGR_EXT_125) ;
914 if (reg & CFGR_BEM) ;
915 break;
916
917 case MEAR:
918 // Clear writable bits
919 regs.mear &= MEAR_EEDO;
920 // Set appropriate writable bits
921 regs.mear |= reg & ~MEAR_EEDO;
922
923 // FreeBSD uses the EEPROM to read PMATCH (for the MAC address)
924 // even though it could get it through RFDR
925 if (reg & MEAR_EESEL) {
926 // Rising edge of clock
927 if (reg & MEAR_EECLK && !eepromClk)
928 eepromKick();
929 }
930 else {
931 eepromState = eepromStart;
932 regs.mear &= ~MEAR_EEDI;
933 }
934
935 eepromClk = reg & MEAR_EECLK;
936
937 // since phy is completely faked, MEAR_MD* don't matter
938 if (reg & MEAR_MDIO) ;
939 if (reg & MEAR_MDDIR) ;
940 if (reg & MEAR_MDC) ;
941 break;
942
943 case PTSCR:
944 regs.ptscr = reg & ~(PTSCR_RBIST_RDONLY);
945 // these control BISTs for various parts of chip - we
946 // don't care or do just fake that the BIST is done
947 if (reg & PTSCR_RBIST_EN)
948 regs.ptscr |= PTSCR_RBIST_DONE;
949 if (reg & PTSCR_EEBIST_EN)
950 regs.ptscr &= ~PTSCR_EEBIST_EN;
951 if (reg & PTSCR_EELOAD_EN)
952 regs.ptscr &= ~PTSCR_EELOAD_EN;
953 break;
954
955 case ISR: /* writing to the ISR has no effect */
956 panic("ISR is a read only register!\n");
957
958 case IMR:
959 regs.imr = reg;
960 devIntrChangeMask();
961 break;
962
963 case IER:
964 regs.ier = reg;
965 break;
966
967 case IHR:
968 regs.ihr = reg;
969 /* not going to implement real interrupt holdoff */
970 break;
971
972 case TXDP:
973 regs.txdp = (reg & 0xFFFFFFFC);
974 assert(txState == txIdle);
975 CTDD = false;
976 break;
977
978 case TXDP_HI:
979 regs.txdp_hi = reg;
980 break;
981
982 case TX_CFG:
983 regs.txcfg = reg;
984 #if 0
985 if (reg & TX_CFG_CSI) ;
986 if (reg & TX_CFG_HBI) ;
987 if (reg & TX_CFG_MLB) ;
988 if (reg & TX_CFG_ATP) ;
989 if (reg & TX_CFG_ECRETRY) {
990 /*
991 * this could easily be implemented, but considering
992 * the network is just a fake pipe, wouldn't make
993 * sense to do this
994 */
995 }
996
997 if (reg & TX_CFG_BRST_DIS) ;
998 #endif
999
1000 #if 0
1001 /* we handle our own DMA, ignore the kernel's exhortations */
1002 if (reg & TX_CFG_MXDMA) ;
1003 #endif
1004
1005 // also, we currently don't care about fill/drain
1006 // thresholds though this may change in the future with
1007 // more realistic networks or a driver which changes it
1008 // according to feedback
1009
1010 break;
1011
1012 case GPIOR:
1013 // Only write writable bits
1014 regs.gpior &= GPIOR_UNUSED | GPIOR_GP5_IN | GPIOR_GP4_IN
1015 | GPIOR_GP3_IN | GPIOR_GP2_IN | GPIOR_GP1_IN;
1016 regs.gpior |= reg & ~(GPIOR_UNUSED | GPIOR_GP5_IN | GPIOR_GP4_IN
1017 | GPIOR_GP3_IN | GPIOR_GP2_IN | GPIOR_GP1_IN);
1018 /* these just control general purpose i/o pins, don't matter */
1019 break;
1020
1021 case RXDP:
1022 regs.rxdp = reg;
1023 CRDD = false;
1024 break;
1025
1026 case RXDP_HI:
1027 regs.rxdp_hi = reg;
1028 break;
1029
1030 case RX_CFG:
1031 regs.rxcfg = reg;
1032 #if 0
1033 if (reg & RX_CFG_AEP) ;
1034 if (reg & RX_CFG_ARP) ;
1035 if (reg & RX_CFG_STRIPCRC) ;
1036 if (reg & RX_CFG_RX_RD) ;
1037 if (reg & RX_CFG_ALP) ;
1038 if (reg & RX_CFG_AIRL) ;
1039
1040 /* we handle our own DMA, ignore what kernel says about it */
1041 if (reg & RX_CFG_MXDMA) ;
1042
1043 //also, we currently don't care about fill/drain thresholds
1044 //though this may change in the future with more realistic
1045 //networks or a driver which changes it according to feedback
1046 if (reg & (RX_CFG_DRTH | RX_CFG_DRTH0)) ;
1047 #endif
1048 break;
1049
1050 case PQCR:
1051 /* there is no priority queueing used in the linux 2.6 driver */
1052 regs.pqcr = reg;
1053 break;
1054
1055 case WCSR:
1056 /* not going to implement wake on LAN */
1057 regs.wcsr = reg;
1058 break;
1059
1060 case PCR:
1061 /* not going to implement pause control */
1062 regs.pcr = reg;
1063 break;
1064
1065 case RFCR:
1066 regs.rfcr = reg;
1067
1068 rxFilterEnable = (reg & RFCR_RFEN) ? true : false;
1069 acceptBroadcast = (reg & RFCR_AAB) ? true : false;
1070 acceptMulticast = (reg & RFCR_AAM) ? true : false;
1071 acceptUnicast = (reg & RFCR_AAU) ? true : false;
1072 acceptPerfect = (reg & RFCR_APM) ? true : false;
1073 acceptArp = (reg & RFCR_AARP) ? true : false;
1074 multicastHashEnable = (reg & RFCR_MHEN) ? true : false;
1075
1076 #if 0
1077 if (reg & RFCR_APAT)
1078 panic("RFCR_APAT not implemented!\n");
1079 #endif
1080 if (reg & RFCR_UHEN)
1081 panic("Unicast hash filtering not used by drivers!\n");
1082
1083 if (reg & RFCR_ULM)
1084 panic("RFCR_ULM not implemented!\n");
1085
1086 break;
1087
1088 case RFDR:
1089 rfaddr = (uint16_t)(regs.rfcr & RFCR_RFADDR);
1090 switch (rfaddr) {
1091 case 0x000:
1092 rom.perfectMatch[0] = (uint8_t)reg;
1093 rom.perfectMatch[1] = (uint8_t)(reg >> 8);
1094 break;
1095 case 0x002:
1096 rom.perfectMatch[2] = (uint8_t)reg;
1097 rom.perfectMatch[3] = (uint8_t)(reg >> 8);
1098 break;
1099 case 0x004:
1100 rom.perfectMatch[4] = (uint8_t)reg;
1101 rom.perfectMatch[5] = (uint8_t)(reg >> 8);
1102 break;
1103 default:
1104
1105 if (rfaddr >= FHASH_ADDR &&
1106 rfaddr < FHASH_ADDR + FHASH_SIZE) {
1107
1108 // Only word-aligned writes supported
1109 if (rfaddr % 2)
1110 panic("unaligned write to filter hash table!");
1111
1112 rom.filterHash[rfaddr - FHASH_ADDR] = (uint8_t)reg;
1113 rom.filterHash[rfaddr - FHASH_ADDR + 1]
1114 = (uint8_t)(reg >> 8);
1115 break;
1116 }
1117 panic("writing RFDR for something other than pattern matching\
1118 or hashing! %#x\n", rfaddr);
1119 }
1120
1121 case BRAR:
1122 regs.brar = reg;
1123 break;
1124
1125 case BRDR:
1126 panic("the driver never uses BRDR, something is wrong!\n");
1127
1128 case SRR:
1129 panic("SRR is read only register!\n");
1130
1131 case MIBC:
1132 panic("the driver never uses MIBC, something is wrong!\n");
1133
1134 case VRCR:
1135 regs.vrcr = reg;
1136 break;
1137
1138 case VTCR:
1139 regs.vtcr = reg;
1140 break;
1141
1142 case VDR:
1143 panic("the driver never uses VDR, something is wrong!\n");
1144
1145 case CCSR:
1146 /* not going to implement clockrun stuff */
1147 regs.ccsr = reg;
1148 break;
1149
1150 case TBICR:
1151 regs.tbicr = reg;
1152 if (reg & TBICR_MR_LOOPBACK)
1153 panic("TBICR_MR_LOOPBACK never used, something wrong!\n");
1154
1155 if (reg & TBICR_MR_AN_ENABLE) {
1156 regs.tanlpar = regs.tanar;
1157 regs.tbisr |= (TBISR_MR_AN_COMPLETE | TBISR_MR_LINK_STATUS);
1158 }
1159
1160 #if 0
1161 if (reg & TBICR_MR_RESTART_AN) ;
1162 #endif
1163
1164 break;
1165
1166 case TBISR:
1167 panic("TBISR is read only register!\n");
1168
1169 case TANAR:
1170 // Only write the writable bits
1171 regs.tanar &= TANAR_RF1 | TANAR_RF2 | TANAR_UNUSED;
1172 regs.tanar |= reg & ~(TANAR_RF1 | TANAR_RF2 | TANAR_UNUSED);
1173
1174 // Pause capability unimplemented
1175 #if 0
1176 if (reg & TANAR_PS2) ;
1177 if (reg & TANAR_PS1) ;
1178 #endif
1179
1180 break;
1181
1182 case TANLPAR:
1183 panic("this should only be written to by the fake phy!\n");
1184
1185 case TANER:
1186 panic("TANER is read only register!\n");
1187
1188 case TESR:
1189 regs.tesr = reg;
1190 break;
1191
1192 default:
1193 panic("invalid register access daddr=%#x", daddr);
1194 }
1195 } else {
1196 panic("Invalid Request Size");
1197 }
1198
1199 return No_Fault;
1200 }
1201
1202 void
1203 NSGigE::devIntrPost(uint32_t interrupts)
1204 {
1205 if (interrupts & ISR_RESERVE)
1206 panic("Cannot set a reserved interrupt");
1207
1208 if (interrupts & ISR_NOIMPL)
1209 warn("interrupt not implemented %#x\n", interrupts);
1210
1211 interrupts &= ISR_IMPL;
1212 regs.isr |= interrupts;
1213
1214 if (interrupts & regs.imr) {
1215 if (interrupts & ISR_SWI) {
1216 totalSwi++;
1217 }
1218 if (interrupts & ISR_RXIDLE) {
1219 totalRxIdle++;
1220 }
1221 if (interrupts & ISR_RXOK) {
1222 totalRxOk++;
1223 }
1224 if (interrupts & ISR_RXDESC) {
1225 totalRxDesc++;
1226 }
1227 if (interrupts & ISR_TXOK) {
1228 totalTxOk++;
1229 }
1230 if (interrupts & ISR_TXIDLE) {
1231 totalTxIdle++;
1232 }
1233 if (interrupts & ISR_TXDESC) {
1234 totalTxDesc++;
1235 }
1236 if (interrupts & ISR_RXORN) {
1237 totalRxOrn++;
1238 }
1239 }
1240
1241 DPRINTF(EthernetIntr,
1242 "interrupt written to ISR: intr=%#x isr=%#x imr=%#x\n",
1243 interrupts, regs.isr, regs.imr);
1244
1245 if ((regs.isr & regs.imr)) {
1246 Tick when = curTick;
1247 if ((regs.isr & regs.imr & ISR_NODELAY) == 0)
1248 when += intrDelay;
1249 cpuIntrPost(when);
1250 }
1251 }
1252
1253 /* writing this interrupt counting stats inside this means that this function
1254 is now limited to being used to clear all interrupts upon the kernel
1255 reading isr and servicing. just telling you in case you were thinking
1256 of expanding use.
1257 */
1258 void
1259 NSGigE::devIntrClear(uint32_t interrupts)
1260 {
1261 if (interrupts & ISR_RESERVE)
1262 panic("Cannot clear a reserved interrupt");
1263
1264 if (regs.isr & regs.imr & ISR_SWI) {
1265 postedSwi++;
1266 }
1267 if (regs.isr & regs.imr & ISR_RXIDLE) {
1268 postedRxIdle++;
1269 }
1270 if (regs.isr & regs.imr & ISR_RXOK) {
1271 postedRxOk++;
1272 }
1273 if (regs.isr & regs.imr & ISR_RXDESC) {
1274 postedRxDesc++;
1275 }
1276 if (regs.isr & regs.imr & ISR_TXOK) {
1277 postedTxOk++;
1278 }
1279 if (regs.isr & regs.imr & ISR_TXIDLE) {
1280 postedTxIdle++;
1281 }
1282 if (regs.isr & regs.imr & ISR_TXDESC) {
1283 postedTxDesc++;
1284 }
1285 if (regs.isr & regs.imr & ISR_RXORN) {
1286 postedRxOrn++;
1287 }
1288
1289 if (regs.isr & regs.imr & ISR_IMPL)
1290 postedInterrupts++;
1291
1292 interrupts &= ~ISR_NOIMPL;
1293 regs.isr &= ~interrupts;
1294
1295 DPRINTF(EthernetIntr,
1296 "interrupt cleared from ISR: intr=%x isr=%x imr=%x\n",
1297 interrupts, regs.isr, regs.imr);
1298
1299 if (!(regs.isr & regs.imr))
1300 cpuIntrClear();
1301 }
1302
1303 void
1304 NSGigE::devIntrChangeMask()
1305 {
1306 DPRINTF(EthernetIntr, "interrupt mask changed: isr=%x imr=%x masked=%x\n",
1307 regs.isr, regs.imr, regs.isr & regs.imr);
1308
1309 if (regs.isr & regs.imr)
1310 cpuIntrPost(curTick);
1311 else
1312 cpuIntrClear();
1313 }
1314
1315 void
1316 NSGigE::cpuIntrPost(Tick when)
1317 {
1318 // If the interrupt you want to post is later than an interrupt
1319 // already scheduled, just let it post in the coming one and don't
1320 // schedule another.
1321 // HOWEVER, must be sure that the scheduled intrTick is in the
1322 // future (this was formerly the source of a bug)
1323 /**
1324 * @todo this warning should be removed and the intrTick code should
1325 * be fixed.
1326 */
1327 assert(when >= curTick);
1328 assert(intrTick >= curTick || intrTick == 0);
1329 if (when > intrTick && intrTick != 0) {
1330 DPRINTF(EthernetIntr, "don't need to schedule event...intrTick=%d\n",
1331 intrTick);
1332 return;
1333 }
1334
1335 intrTick = when;
1336 if (intrTick < curTick) {
1337 debug_break();
1338 intrTick = curTick;
1339 }
1340
1341 DPRINTF(EthernetIntr, "going to schedule an interrupt for intrTick=%d\n",
1342 intrTick);
1343
1344 if (intrEvent)
1345 intrEvent->squash();
1346 intrEvent = new IntrEvent(this, true);
1347 intrEvent->schedule(intrTick);
1348 }
1349
1350 void
1351 NSGigE::cpuInterrupt()
1352 {
1353 assert(intrTick == curTick);
1354
1355 // Whether or not there's a pending interrupt, we don't care about
1356 // it anymore
1357 intrEvent = 0;
1358 intrTick = 0;
1359
1360 // Don't send an interrupt if there's already one
1361 if (cpuPendingIntr) {
1362 DPRINTF(EthernetIntr,
1363 "would send an interrupt now, but there's already pending\n");
1364 } else {
1365 // Send interrupt
1366 cpuPendingIntr = true;
1367
1368 DPRINTF(EthernetIntr, "posting interrupt\n");
1369 intrPost();
1370 }
1371 }
1372
1373 void
1374 NSGigE::cpuIntrClear()
1375 {
1376 if (!cpuPendingIntr)
1377 return;
1378
1379 if (intrEvent) {
1380 intrEvent->squash();
1381 intrEvent = 0;
1382 }
1383
1384 intrTick = 0;
1385
1386 cpuPendingIntr = false;
1387
1388 DPRINTF(EthernetIntr, "clearing interrupt\n");
1389 intrClear();
1390 }
1391
1392 bool
1393 NSGigE::cpuIntrPending() const
1394 { return cpuPendingIntr; }
1395
1396 void
1397 NSGigE::txReset()
1398 {
1399
1400 DPRINTF(Ethernet, "transmit reset\n");
1401
1402 CTDD = false;
1403 txEnable = false;;
1404 txFragPtr = 0;
1405 assert(txDescCnt == 0);
1406 txFifo.clear();
1407 txState = txIdle;
1408 assert(txDmaState == dmaIdle);
1409 }
1410
1411 void
1412 NSGigE::rxReset()
1413 {
1414 DPRINTF(Ethernet, "receive reset\n");
1415
1416 CRDD = false;
1417 assert(rxPktBytes == 0);
1418 rxEnable = false;
1419 rxFragPtr = 0;
1420 assert(rxDescCnt == 0);
1421 assert(rxDmaState == dmaIdle);
1422 rxFifo.clear();
1423 rxState = rxIdle;
1424 }
1425
1426 void
1427 NSGigE::regsReset()
1428 {
1429 memset(&regs, 0, sizeof(regs));
1430 regs.config = (CFGR_LNKSTS | CFGR_TBI_EN | CFGR_MODE_1000);
1431 regs.mear = 0x12;
1432 regs.txcfg = 0x120; // set drain threshold to 1024 bytes and
1433 // fill threshold to 32 bytes
1434 regs.rxcfg = 0x4; // set drain threshold to 16 bytes
1435 regs.srr = 0x0103; // set the silicon revision to rev B or 0x103
1436 regs.mibc = MIBC_FRZ;
1437 regs.vdr = 0x81; // set the vlan tag type to 802.1q
1438 regs.tesr = 0xc000; // TBI capable of both full and half duplex
1439 regs.brar = 0xffffffff;
1440
1441 extstsEnable = false;
1442 acceptBroadcast = false;
1443 acceptMulticast = false;
1444 acceptUnicast = false;
1445 acceptPerfect = false;
1446 acceptArp = false;
1447 }
1448
1449 void
1450 NSGigE::rxDmaReadCopy()
1451 {
1452 assert(rxDmaState == dmaReading);
1453
1454 physmem->dma_read((uint8_t *)rxDmaData, rxDmaAddr, rxDmaLen);
1455 rxDmaState = dmaIdle;
1456
1457 DPRINTF(EthernetDMA, "rx dma read paddr=%#x len=%d\n",
1458 rxDmaAddr, rxDmaLen);
1459 DDUMP(EthernetDMA, rxDmaData, rxDmaLen);
1460 }
1461
1462 bool
1463 NSGigE::doRxDmaRead()
1464 {
1465 assert(rxDmaState == dmaIdle || rxDmaState == dmaReadWaiting);
1466 rxDmaState = dmaReading;
1467
1468 if (dmaInterface && !rxDmaFree) {
1469 if (dmaInterface->busy())
1470 rxDmaState = dmaReadWaiting;
1471 else
1472 dmaInterface->doDMA(Read, rxDmaAddr, rxDmaLen, curTick,
1473 &rxDmaReadEvent, true);
1474 return true;
1475 }
1476
1477 if (dmaReadDelay == 0 && dmaReadFactor == 0) {
1478 rxDmaReadCopy();
1479 return false;
1480 }
1481
1482 Tick factor = ((rxDmaLen + ULL(63)) >> ULL(6)) * dmaReadFactor;
1483 Tick start = curTick + dmaReadDelay + factor;
1484 rxDmaReadEvent.schedule(start);
1485 return true;
1486 }
1487
1488 void
1489 NSGigE::rxDmaReadDone()
1490 {
1491 assert(rxDmaState == dmaReading);
1492 rxDmaReadCopy();
1493
1494 // If the transmit state machine has a pending DMA, let it go first
1495 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting)
1496 txKick();
1497
1498 rxKick();
1499 }
1500
1501 void
1502 NSGigE::rxDmaWriteCopy()
1503 {
1504 assert(rxDmaState == dmaWriting);
1505
1506 physmem->dma_write(rxDmaAddr, (uint8_t *)rxDmaData, rxDmaLen);
1507 rxDmaState = dmaIdle;
1508
1509 DPRINTF(EthernetDMA, "rx dma write paddr=%#x len=%d\n",
1510 rxDmaAddr, rxDmaLen);
1511 DDUMP(EthernetDMA, rxDmaData, rxDmaLen);
1512 }
1513
1514 bool
1515 NSGigE::doRxDmaWrite()
1516 {
1517 assert(rxDmaState == dmaIdle || rxDmaState == dmaWriteWaiting);
1518 rxDmaState = dmaWriting;
1519
1520 if (dmaInterface && !rxDmaFree) {
1521 if (dmaInterface->busy())
1522 rxDmaState = dmaWriteWaiting;
1523 else
1524 dmaInterface->doDMA(WriteInvalidate, rxDmaAddr, rxDmaLen, curTick,
1525 &rxDmaWriteEvent, true);
1526 return true;
1527 }
1528
1529 if (dmaWriteDelay == 0 && dmaWriteFactor == 0) {
1530 rxDmaWriteCopy();
1531 return false;
1532 }
1533
1534 Tick factor = ((rxDmaLen + ULL(63)) >> ULL(6)) * dmaWriteFactor;
1535 Tick start = curTick + dmaWriteDelay + factor;
1536 rxDmaWriteEvent.schedule(start);
1537 return true;
1538 }
1539
1540 void
1541 NSGigE::rxDmaWriteDone()
1542 {
1543 assert(rxDmaState == dmaWriting);
1544 rxDmaWriteCopy();
1545
1546 // If the transmit state machine has a pending DMA, let it go first
1547 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting)
1548 txKick();
1549
1550 rxKick();
1551 }
1552
1553 void
1554 NSGigE::rxKick()
1555 {
1556 bool is64bit = (bool)(regs.config & CFGR_M64ADDR);
1557
1558 DPRINTF(EthernetSM,
1559 "receive kick rxState=%s (rxBuf.size=%d) %d-bit\n",
1560 NsRxStateStrings[rxState], rxFifo.size(), is64bit ? 64 : 32);
1561
1562 Addr link, bufptr;
1563 uint32_t &cmdsts = is64bit ? rxDesc64.cmdsts : rxDesc32.cmdsts;
1564 uint32_t &extsts = is64bit ? rxDesc64.extsts : rxDesc32.extsts;
1565
1566 next:
1567 if (clock) {
1568 if (rxKickTick > curTick) {
1569 DPRINTF(EthernetSM, "receive kick exiting, can't run till %d\n",
1570 rxKickTick);
1571
1572 goto exit;
1573 }
1574
1575 // Go to the next state machine clock tick.
1576 rxKickTick = curTick + cycles(1);
1577 }
1578
1579 switch(rxDmaState) {
1580 case dmaReadWaiting:
1581 if (doRxDmaRead())
1582 goto exit;
1583 break;
1584 case dmaWriteWaiting:
1585 if (doRxDmaWrite())
1586 goto exit;
1587 break;
1588 default:
1589 break;
1590 }
1591
1592 link = is64bit ? (Addr)rxDesc64.link : (Addr)rxDesc32.link;
1593 bufptr = is64bit ? (Addr)rxDesc64.bufptr : (Addr)rxDesc32.bufptr;
1594
1595 // see state machine from spec for details
1596 // the way this works is, if you finish work on one state and can
1597 // go directly to another, you do that through jumping to the
1598 // label "next". however, if you have intermediate work, like DMA
1599 // so that you can't go to the next state yet, you go to exit and
1600 // exit the loop. however, when the DMA is done it will trigger
1601 // an event and come back to this loop.
1602 switch (rxState) {
1603 case rxIdle:
1604 if (!rxEnable) {
1605 DPRINTF(EthernetSM, "Receive Disabled! Nothing to do.\n");
1606 goto exit;
1607 }
1608
1609 if (CRDD) {
1610 rxState = rxDescRefr;
1611
1612 rxDmaAddr = regs.rxdp & 0x3fffffff;
1613 rxDmaData =
1614 is64bit ? (void *)&rxDesc64.link : (void *)&rxDesc32.link;
1615 rxDmaLen = is64bit ? sizeof(rxDesc64.link) : sizeof(rxDesc32.link);
1616 rxDmaFree = dmaDescFree;
1617
1618 descDmaReads++;
1619 descDmaRdBytes += rxDmaLen;
1620
1621 if (doRxDmaRead())
1622 goto exit;
1623 } else {
1624 rxState = rxDescRead;
1625
1626 rxDmaAddr = regs.rxdp & 0x3fffffff;
1627 rxDmaData = is64bit ? (void *)&rxDesc64 : (void *)&rxDesc32;
1628 rxDmaLen = is64bit ? sizeof(rxDesc64) : sizeof(rxDesc32);
1629 rxDmaFree = dmaDescFree;
1630
1631 descDmaReads++;
1632 descDmaRdBytes += rxDmaLen;
1633
1634 if (doRxDmaRead())
1635 goto exit;
1636 }
1637 break;
1638
1639 case rxDescRefr:
1640 if (rxDmaState != dmaIdle)
1641 goto exit;
1642
1643 rxState = rxAdvance;
1644 break;
1645
1646 case rxDescRead:
1647 if (rxDmaState != dmaIdle)
1648 goto exit;
1649
1650 DPRINTF(EthernetDesc, "rxDesc: addr=%08x read descriptor\n",
1651 regs.rxdp & 0x3fffffff);
1652 DPRINTF(EthernetDesc,
1653 "rxDesc: link=%#x bufptr=%#x cmdsts=%08x extsts=%08x\n",
1654 link, bufptr, cmdsts, extsts);
1655
1656 if (cmdsts & CMDSTS_OWN) {
1657 devIntrPost(ISR_RXIDLE);
1658 rxState = rxIdle;
1659 goto exit;
1660 } else {
1661 rxState = rxFifoBlock;
1662 rxFragPtr = bufptr;
1663 rxDescCnt = cmdsts & CMDSTS_LEN_MASK;
1664 }
1665 break;
1666
1667 case rxFifoBlock:
1668 if (!rxPacket) {
1669 /**
1670 * @todo in reality, we should be able to start processing
1671 * the packet as it arrives, and not have to wait for the
1672 * full packet ot be in the receive fifo.
1673 */
1674 if (rxFifo.empty())
1675 goto exit;
1676
1677 DPRINTF(EthernetSM, "****processing receive of new packet****\n");
1678
1679 // If we don't have a packet, grab a new one from the fifo.
1680 rxPacket = rxFifo.front();
1681 rxPktBytes = rxPacket->length;
1682 rxPacketBufPtr = rxPacket->data;
1683
1684 #if TRACING_ON
1685 if (DTRACE(Ethernet)) {
1686 IpPtr ip(rxPacket);
1687 if (ip) {
1688 DPRINTF(Ethernet, "ID is %d\n", ip->id());
1689 TcpPtr tcp(ip);
1690 if (tcp) {
1691 DPRINTF(Ethernet,
1692 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n",
1693 tcp->sport(), tcp->dport(), tcp->seq(),
1694 tcp->ack());
1695 }
1696 }
1697 }
1698 #endif
1699
1700 // sanity check - i think the driver behaves like this
1701 assert(rxDescCnt >= rxPktBytes);
1702 rxFifo.pop();
1703 }
1704
1705
1706 // dont' need the && rxDescCnt > 0 if driver sanity check
1707 // above holds
1708 if (rxPktBytes > 0) {
1709 rxState = rxFragWrite;
1710 // don't need min<>(rxPktBytes,rxDescCnt) if above sanity
1711 // check holds
1712 rxXferLen = rxPktBytes;
1713
1714 rxDmaAddr = rxFragPtr & 0x3fffffff;
1715 rxDmaData = rxPacketBufPtr;
1716 rxDmaLen = rxXferLen;
1717 rxDmaFree = dmaDataFree;
1718
1719 if (doRxDmaWrite())
1720 goto exit;
1721
1722 } else {
1723 rxState = rxDescWrite;
1724
1725 //if (rxPktBytes == 0) { /* packet is done */
1726 assert(rxPktBytes == 0);
1727 DPRINTF(EthernetSM, "done with receiving packet\n");
1728
1729 cmdsts |= CMDSTS_OWN;
1730 cmdsts &= ~CMDSTS_MORE;
1731 cmdsts |= CMDSTS_OK;
1732 cmdsts &= 0xffff0000;
1733 cmdsts += rxPacket->length; //i.e. set CMDSTS_SIZE
1734
1735 #if 0
1736 /*
1737 * all the driver uses these are for its own stats keeping
1738 * which we don't care about, aren't necessary for
1739 * functionality and doing this would just slow us down.
1740 * if they end up using this in a later version for
1741 * functional purposes, just undef
1742 */
1743 if (rxFilterEnable) {
1744 cmdsts &= ~CMDSTS_DEST_MASK;
1745 const EthAddr &dst = rxFifoFront()->dst();
1746 if (dst->unicast())
1747 cmdsts |= CMDSTS_DEST_SELF;
1748 if (dst->multicast())
1749 cmdsts |= CMDSTS_DEST_MULTI;
1750 if (dst->broadcast())
1751 cmdsts |= CMDSTS_DEST_MASK;
1752 }
1753 #endif
1754
1755 IpPtr ip(rxPacket);
1756 if (extstsEnable && ip) {
1757 extsts |= EXTSTS_IPPKT;
1758 rxIpChecksums++;
1759 if (cksum(ip) != 0) {
1760 DPRINTF(EthernetCksum, "Rx IP Checksum Error\n");
1761 extsts |= EXTSTS_IPERR;
1762 }
1763 TcpPtr tcp(ip);
1764 UdpPtr udp(ip);
1765 if (tcp) {
1766 extsts |= EXTSTS_TCPPKT;
1767 rxTcpChecksums++;
1768 if (cksum(tcp) != 0) {
1769 DPRINTF(EthernetCksum, "Rx TCP Checksum Error\n");
1770 extsts |= EXTSTS_TCPERR;
1771
1772 }
1773 } else if (udp) {
1774 extsts |= EXTSTS_UDPPKT;
1775 rxUdpChecksums++;
1776 if (cksum(udp) != 0) {
1777 DPRINTF(EthernetCksum, "Rx UDP Checksum Error\n");
1778 extsts |= EXTSTS_UDPERR;
1779 }
1780 }
1781 }
1782 rxPacket = 0;
1783
1784 /*
1785 * the driver seems to always receive into desc buffers
1786 * of size 1514, so you never have a pkt that is split
1787 * into multiple descriptors on the receive side, so
1788 * i don't implement that case, hence the assert above.
1789 */
1790
1791 DPRINTF(EthernetDesc,
1792 "rxDesc: addr=%08x writeback cmdsts extsts\n",
1793 regs.rxdp & 0x3fffffff);
1794 DPRINTF(EthernetDesc,
1795 "rxDesc: link=%#x bufptr=%#x cmdsts=%08x extsts=%08x\n",
1796 link, bufptr, cmdsts, extsts);
1797
1798 rxDmaAddr = regs.rxdp & 0x3fffffff;
1799 rxDmaData = &cmdsts;
1800 if (is64bit) {
1801 rxDmaAddr += offsetof(ns_desc64, cmdsts);
1802 rxDmaLen = sizeof(rxDesc64.cmdsts) + sizeof(rxDesc64.extsts);
1803 } else {
1804 rxDmaAddr += offsetof(ns_desc32, cmdsts);
1805 rxDmaLen = sizeof(rxDesc32.cmdsts) + sizeof(rxDesc32.extsts);
1806 }
1807 rxDmaFree = dmaDescFree;
1808
1809 descDmaWrites++;
1810 descDmaWrBytes += rxDmaLen;
1811
1812 if (doRxDmaWrite())
1813 goto exit;
1814 }
1815 break;
1816
1817 case rxFragWrite:
1818 if (rxDmaState != dmaIdle)
1819 goto exit;
1820
1821 rxPacketBufPtr += rxXferLen;
1822 rxFragPtr += rxXferLen;
1823 rxPktBytes -= rxXferLen;
1824
1825 rxState = rxFifoBlock;
1826 break;
1827
1828 case rxDescWrite:
1829 if (rxDmaState != dmaIdle)
1830 goto exit;
1831
1832 assert(cmdsts & CMDSTS_OWN);
1833
1834 assert(rxPacket == 0);
1835 devIntrPost(ISR_RXOK);
1836
1837 if (cmdsts & CMDSTS_INTR)
1838 devIntrPost(ISR_RXDESC);
1839
1840 if (!rxEnable) {
1841 DPRINTF(EthernetSM, "Halting the RX state machine\n");
1842 rxState = rxIdle;
1843 goto exit;
1844 } else
1845 rxState = rxAdvance;
1846 break;
1847
1848 case rxAdvance:
1849 if (link == 0) {
1850 devIntrPost(ISR_RXIDLE);
1851 rxState = rxIdle;
1852 CRDD = true;
1853 goto exit;
1854 } else {
1855 if (rxDmaState != dmaIdle)
1856 goto exit;
1857 rxState = rxDescRead;
1858 regs.rxdp = link;
1859 CRDD = false;
1860
1861 rxDmaAddr = regs.rxdp & 0x3fffffff;
1862 rxDmaData = is64bit ? (void *)&rxDesc64 : (void *)&rxDesc32;
1863 rxDmaLen = is64bit ? sizeof(rxDesc64) : sizeof(rxDesc32);
1864 rxDmaFree = dmaDescFree;
1865
1866 if (doRxDmaRead())
1867 goto exit;
1868 }
1869 break;
1870
1871 default:
1872 panic("Invalid rxState!");
1873 }
1874
1875 DPRINTF(EthernetSM, "entering next rxState=%s\n",
1876 NsRxStateStrings[rxState]);
1877 goto next;
1878
1879 exit:
1880 /**
1881 * @todo do we want to schedule a future kick?
1882 */
1883 DPRINTF(EthernetSM, "rx state machine exited rxState=%s\n",
1884 NsRxStateStrings[rxState]);
1885
1886 if (clock && !rxKickEvent.scheduled())
1887 rxKickEvent.schedule(rxKickTick);
1888 }
1889
1890 void
1891 NSGigE::transmit()
1892 {
1893 if (txFifo.empty()) {
1894 DPRINTF(Ethernet, "nothing to transmit\n");
1895 return;
1896 }
1897
1898 DPRINTF(Ethernet, "Attempt Pkt Transmit: txFifo length=%d\n",
1899 txFifo.size());
1900 if (interface->sendPacket(txFifo.front())) {
1901 #if TRACING_ON
1902 if (DTRACE(Ethernet)) {
1903 IpPtr ip(txFifo.front());
1904 if (ip) {
1905 DPRINTF(Ethernet, "ID is %d\n", ip->id());
1906 TcpPtr tcp(ip);
1907 if (tcp) {
1908 DPRINTF(Ethernet,
1909 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n",
1910 tcp->sport(), tcp->dport(), tcp->seq(),
1911 tcp->ack());
1912 }
1913 }
1914 }
1915 #endif
1916
1917 DDUMP(EthernetData, txFifo.front()->data, txFifo.front()->length);
1918 txBytes += txFifo.front()->length;
1919 txPackets++;
1920
1921 DPRINTF(Ethernet, "Successful Xmit! now txFifoAvail is %d\n",
1922 txFifo.avail());
1923 txFifo.pop();
1924
1925 /*
1926 * normally do a writeback of the descriptor here, and ONLY
1927 * after that is done, send this interrupt. but since our
1928 * stuff never actually fails, just do this interrupt here,
1929 * otherwise the code has to stray from this nice format.
1930 * besides, it's functionally the same.
1931 */
1932 devIntrPost(ISR_TXOK);
1933 }
1934
1935 if (!txFifo.empty() && !txEvent.scheduled()) {
1936 DPRINTF(Ethernet, "reschedule transmit\n");
1937 txEvent.schedule(curTick + retryTime);
1938 }
1939 }
1940
1941 void
1942 NSGigE::txDmaReadCopy()
1943 {
1944 assert(txDmaState == dmaReading);
1945
1946 physmem->dma_read((uint8_t *)txDmaData, txDmaAddr, txDmaLen);
1947 txDmaState = dmaIdle;
1948
1949 DPRINTF(EthernetDMA, "tx dma read paddr=%#x len=%d\n",
1950 txDmaAddr, txDmaLen);
1951 DDUMP(EthernetDMA, txDmaData, txDmaLen);
1952 }
1953
1954 bool
1955 NSGigE::doTxDmaRead()
1956 {
1957 assert(txDmaState == dmaIdle || txDmaState == dmaReadWaiting);
1958 txDmaState = dmaReading;
1959
1960 if (dmaInterface && !txDmaFree) {
1961 if (dmaInterface->busy())
1962 txDmaState = dmaReadWaiting;
1963 else
1964 dmaInterface->doDMA(Read, txDmaAddr, txDmaLen, curTick,
1965 &txDmaReadEvent, true);
1966 return true;
1967 }
1968
1969 if (dmaReadDelay == 0 && dmaReadFactor == 0.0) {
1970 txDmaReadCopy();
1971 return false;
1972 }
1973
1974 Tick factor = ((txDmaLen + ULL(63)) >> ULL(6)) * dmaReadFactor;
1975 Tick start = curTick + dmaReadDelay + factor;
1976 txDmaReadEvent.schedule(start);
1977 return true;
1978 }
1979
1980 void
1981 NSGigE::txDmaReadDone()
1982 {
1983 assert(txDmaState == dmaReading);
1984 txDmaReadCopy();
1985
1986 // If the receive state machine has a pending DMA, let it go first
1987 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting)
1988 rxKick();
1989
1990 txKick();
1991 }
1992
1993 void
1994 NSGigE::txDmaWriteCopy()
1995 {
1996 assert(txDmaState == dmaWriting);
1997
1998 physmem->dma_write(txDmaAddr, (uint8_t *)txDmaData, txDmaLen);
1999 txDmaState = dmaIdle;
2000
2001 DPRINTF(EthernetDMA, "tx dma write paddr=%#x len=%d\n",
2002 txDmaAddr, txDmaLen);
2003 DDUMP(EthernetDMA, txDmaData, txDmaLen);
2004 }
2005
2006 bool
2007 NSGigE::doTxDmaWrite()
2008 {
2009 assert(txDmaState == dmaIdle || txDmaState == dmaWriteWaiting);
2010 txDmaState = dmaWriting;
2011
2012 if (dmaInterface && !txDmaFree) {
2013 if (dmaInterface->busy())
2014 txDmaState = dmaWriteWaiting;
2015 else
2016 dmaInterface->doDMA(WriteInvalidate, txDmaAddr, txDmaLen, curTick,
2017 &txDmaWriteEvent, true);
2018 return true;
2019 }
2020
2021 if (dmaWriteDelay == 0 && dmaWriteFactor == 0.0) {
2022 txDmaWriteCopy();
2023 return false;
2024 }
2025
2026 Tick factor = ((txDmaLen + ULL(63)) >> ULL(6)) * dmaWriteFactor;
2027 Tick start = curTick + dmaWriteDelay + factor;
2028 txDmaWriteEvent.schedule(start);
2029 return true;
2030 }
2031
2032 void
2033 NSGigE::txDmaWriteDone()
2034 {
2035 assert(txDmaState == dmaWriting);
2036 txDmaWriteCopy();
2037
2038 // If the receive state machine has a pending DMA, let it go first
2039 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting)
2040 rxKick();
2041
2042 txKick();
2043 }
2044
2045 void
2046 NSGigE::txKick()
2047 {
2048 bool is64bit = (bool)(regs.config & CFGR_M64ADDR);
2049
2050 DPRINTF(EthernetSM, "transmit kick txState=%s %d-bit\n",
2051 NsTxStateStrings[txState], is64bit ? 64 : 32);
2052
2053 Addr link, bufptr;
2054 uint32_t &cmdsts = is64bit ? txDesc64.cmdsts : txDesc32.cmdsts;
2055 uint32_t &extsts = is64bit ? txDesc64.extsts : txDesc32.extsts;
2056
2057 next:
2058 if (clock) {
2059 if (txKickTick > curTick) {
2060 DPRINTF(EthernetSM, "transmit kick exiting, can't run till %d\n",
2061 txKickTick);
2062 goto exit;
2063 }
2064
2065 // Go to the next state machine clock tick.
2066 txKickTick = curTick + cycles(1);
2067 }
2068
2069 switch(txDmaState) {
2070 case dmaReadWaiting:
2071 if (doTxDmaRead())
2072 goto exit;
2073 break;
2074 case dmaWriteWaiting:
2075 if (doTxDmaWrite())
2076 goto exit;
2077 break;
2078 default:
2079 break;
2080 }
2081
2082 link = is64bit ? (Addr)txDesc64.link : (Addr)txDesc32.link;
2083 bufptr = is64bit ? (Addr)txDesc64.bufptr : (Addr)txDesc32.bufptr;
2084 switch (txState) {
2085 case txIdle:
2086 if (!txEnable) {
2087 DPRINTF(EthernetSM, "Transmit disabled. Nothing to do.\n");
2088 goto exit;
2089 }
2090
2091 if (CTDD) {
2092 txState = txDescRefr;
2093
2094 txDmaAddr = regs.txdp & 0x3fffffff;
2095 txDmaData =
2096 is64bit ? (void *)&txDesc64.link : (void *)&txDesc32.link;
2097 txDmaLen = is64bit ? sizeof(txDesc64.link) : sizeof(txDesc32.link);
2098 txDmaFree = dmaDescFree;
2099
2100 descDmaReads++;
2101 descDmaRdBytes += txDmaLen;
2102
2103 if (doTxDmaRead())
2104 goto exit;
2105
2106 } else {
2107 txState = txDescRead;
2108
2109 txDmaAddr = regs.txdp & 0x3fffffff;
2110 txDmaData = is64bit ? (void *)&txDesc64 : (void *)&txDesc32;
2111 txDmaLen = is64bit ? sizeof(txDesc64) : sizeof(txDesc32);
2112 txDmaFree = dmaDescFree;
2113
2114 descDmaReads++;
2115 descDmaRdBytes += txDmaLen;
2116
2117 if (doTxDmaRead())
2118 goto exit;
2119 }
2120 break;
2121
2122 case txDescRefr:
2123 if (txDmaState != dmaIdle)
2124 goto exit;
2125
2126 txState = txAdvance;
2127 break;
2128
2129 case txDescRead:
2130 if (txDmaState != dmaIdle)
2131 goto exit;
2132
2133 DPRINTF(EthernetDesc, "txDesc: addr=%08x read descriptor\n",
2134 regs.txdp & 0x3fffffff);
2135 DPRINTF(EthernetDesc,
2136 "txDesc: link=%#x bufptr=%#x cmdsts=%#08x extsts=%#08x\n",
2137 link, bufptr, cmdsts, extsts);
2138
2139 if (cmdsts & CMDSTS_OWN) {
2140 txState = txFifoBlock;
2141 txFragPtr = bufptr;
2142 txDescCnt = cmdsts & CMDSTS_LEN_MASK;
2143 } else {
2144 devIntrPost(ISR_TXIDLE);
2145 txState = txIdle;
2146 goto exit;
2147 }
2148 break;
2149
2150 case txFifoBlock:
2151 if (!txPacket) {
2152 DPRINTF(EthernetSM, "****starting the tx of a new packet****\n");
2153 txPacket = new PacketData(16384);
2154 txPacketBufPtr = txPacket->data;
2155 }
2156
2157 if (txDescCnt == 0) {
2158 DPRINTF(EthernetSM, "the txDescCnt == 0, done with descriptor\n");
2159 if (cmdsts & CMDSTS_MORE) {
2160 DPRINTF(EthernetSM, "there are more descriptors to come\n");
2161 txState = txDescWrite;
2162
2163 cmdsts &= ~CMDSTS_OWN;
2164
2165 txDmaAddr = regs.txdp & 0x3fffffff;
2166 txDmaData = &cmdsts;
2167 if (is64bit) {
2168 txDmaAddr += offsetof(ns_desc64, cmdsts);
2169 txDmaLen = sizeof(txDesc64.cmdsts);
2170 } else {
2171 txDmaAddr += offsetof(ns_desc32, cmdsts);
2172 txDmaLen = sizeof(txDesc32.cmdsts);
2173 }
2174 txDmaFree = dmaDescFree;
2175
2176 if (doTxDmaWrite())
2177 goto exit;
2178
2179 } else { /* this packet is totally done */
2180 DPRINTF(EthernetSM, "This packet is done, let's wrap it up\n");
2181 /* deal with the the packet that just finished */
2182 if ((regs.vtcr & VTCR_PPCHK) && extstsEnable) {
2183 IpPtr ip(txPacket);
2184 if (extsts & EXTSTS_UDPPKT) {
2185 UdpPtr udp(ip);
2186 udp->sum(0);
2187 udp->sum(cksum(udp));
2188 txUdpChecksums++;
2189 } else if (extsts & EXTSTS_TCPPKT) {
2190 TcpPtr tcp(ip);
2191 tcp->sum(0);
2192 tcp->sum(cksum(tcp));
2193 txTcpChecksums++;
2194 }
2195 if (extsts & EXTSTS_IPPKT) {
2196 ip->sum(0);
2197 ip->sum(cksum(ip));
2198 txIpChecksums++;
2199 }
2200 }
2201
2202 txPacket->length = txPacketBufPtr - txPacket->data;
2203 // this is just because the receive can't handle a
2204 // packet bigger want to make sure
2205 if (txPacket->length > 1514)
2206 panic("transmit packet too large, %s > 1514\n",
2207 txPacket->length);
2208
2209 #ifndef NDEBUG
2210 bool success =
2211 #endif
2212 txFifo.push(txPacket);
2213 assert(success);
2214
2215 /*
2216 * this following section is not tqo spec, but
2217 * functionally shouldn't be any different. normally,
2218 * the chip will wait til the transmit has occurred
2219 * before writing back the descriptor because it has
2220 * to wait to see that it was successfully transmitted
2221 * to decide whether to set CMDSTS_OK or not.
2222 * however, in the simulator since it is always
2223 * successfully transmitted, and writing it exactly to
2224 * spec would complicate the code, we just do it here
2225 */
2226
2227 cmdsts &= ~CMDSTS_OWN;
2228 cmdsts |= CMDSTS_OK;
2229
2230 DPRINTF(EthernetDesc,
2231 "txDesc writeback: cmdsts=%08x extsts=%08x\n",
2232 cmdsts, extsts);
2233
2234 txDmaFree = dmaDescFree;
2235 txDmaAddr = regs.txdp & 0x3fffffff;
2236 txDmaData = &cmdsts;
2237 if (is64bit) {
2238 txDmaAddr += offsetof(ns_desc64, cmdsts);
2239 txDmaLen =
2240 sizeof(txDesc64.cmdsts) + sizeof(txDesc64.extsts);
2241 } else {
2242 txDmaAddr += offsetof(ns_desc32, cmdsts);
2243 txDmaLen =
2244 sizeof(txDesc32.cmdsts) + sizeof(txDesc32.extsts);
2245 }
2246
2247 descDmaWrites++;
2248 descDmaWrBytes += txDmaLen;
2249
2250 transmit();
2251 txPacket = 0;
2252
2253 if (!txEnable) {
2254 DPRINTF(EthernetSM, "halting TX state machine\n");
2255 txState = txIdle;
2256 goto exit;
2257 } else
2258 txState = txAdvance;
2259
2260 if (doTxDmaWrite())
2261 goto exit;
2262 }
2263 } else {
2264 DPRINTF(EthernetSM, "this descriptor isn't done yet\n");
2265 if (!txFifo.full()) {
2266 txState = txFragRead;
2267
2268 /*
2269 * The number of bytes transferred is either whatever
2270 * is left in the descriptor (txDescCnt), or if there
2271 * is not enough room in the fifo, just whatever room
2272 * is left in the fifo
2273 */
2274 txXferLen = min<uint32_t>(txDescCnt, txFifo.avail());
2275
2276 txDmaAddr = txFragPtr & 0x3fffffff;
2277 txDmaData = txPacketBufPtr;
2278 txDmaLen = txXferLen;
2279 txDmaFree = dmaDataFree;
2280
2281 if (doTxDmaRead())
2282 goto exit;
2283 } else {
2284 txState = txFifoBlock;
2285 transmit();
2286
2287 goto exit;
2288 }
2289
2290 }
2291 break;
2292
2293 case txFragRead:
2294 if (txDmaState != dmaIdle)
2295 goto exit;
2296
2297 txPacketBufPtr += txXferLen;
2298 txFragPtr += txXferLen;
2299 txDescCnt -= txXferLen;
2300 txFifo.reserve(txXferLen);
2301
2302 txState = txFifoBlock;
2303 break;
2304
2305 case txDescWrite:
2306 if (txDmaState != dmaIdle)
2307 goto exit;
2308
2309 if (cmdsts & CMDSTS_INTR)
2310 devIntrPost(ISR_TXDESC);
2311
2312 if (!txEnable) {
2313 DPRINTF(EthernetSM, "halting TX state machine\n");
2314 txState = txIdle;
2315 goto exit;
2316 } else
2317 txState = txAdvance;
2318 break;
2319
2320 case txAdvance:
2321 if (link == 0) {
2322 devIntrPost(ISR_TXIDLE);
2323 txState = txIdle;
2324 goto exit;
2325 } else {
2326 if (txDmaState != dmaIdle)
2327 goto exit;
2328 txState = txDescRead;
2329 regs.txdp = link;
2330 CTDD = false;
2331
2332 txDmaAddr = link & 0x3fffffff;
2333 txDmaData = is64bit ? (void *)&txDesc64 : (void *)&txDesc32;
2334 txDmaLen = is64bit ? sizeof(txDesc64) : sizeof(txDesc32);
2335 txDmaFree = dmaDescFree;
2336
2337 if (doTxDmaRead())
2338 goto exit;
2339 }
2340 break;
2341
2342 default:
2343 panic("invalid state");
2344 }
2345
2346 DPRINTF(EthernetSM, "entering next txState=%s\n",
2347 NsTxStateStrings[txState]);
2348 goto next;
2349
2350 exit:
2351 /**
2352 * @todo do we want to schedule a future kick?
2353 */
2354 DPRINTF(EthernetSM, "tx state machine exited txState=%s\n",
2355 NsTxStateStrings[txState]);
2356
2357 if (clock && !txKickEvent.scheduled())
2358 txKickEvent.schedule(txKickTick);
2359 }
2360
2361 /**
2362 * Advance the EEPROM state machine
2363 * Called on rising edge of EEPROM clock bit in MEAR
2364 */
2365 void
2366 NSGigE::eepromKick()
2367 {
2368 switch (eepromState) {
2369
2370 case eepromStart:
2371
2372 // Wait for start bit
2373 if (regs.mear & MEAR_EEDI) {
2374 // Set up to get 2 opcode bits
2375 eepromState = eepromGetOpcode;
2376 eepromBitsToRx = 2;
2377 eepromOpcode = 0;
2378 }
2379 break;
2380
2381 case eepromGetOpcode:
2382 eepromOpcode <<= 1;
2383 eepromOpcode += (regs.mear & MEAR_EEDI) ? 1 : 0;
2384 --eepromBitsToRx;
2385
2386 // Done getting opcode
2387 if (eepromBitsToRx == 0) {
2388 if (eepromOpcode != EEPROM_READ)
2389 panic("only EEPROM reads are implemented!");
2390
2391 // Set up to get address
2392 eepromState = eepromGetAddress;
2393 eepromBitsToRx = 6;
2394 eepromAddress = 0;
2395 }
2396 break;
2397
2398 case eepromGetAddress:
2399 eepromAddress <<= 1;
2400 eepromAddress += (regs.mear & MEAR_EEDI) ? 1 : 0;
2401 --eepromBitsToRx;
2402
2403 // Done getting address
2404 if (eepromBitsToRx == 0) {
2405
2406 if (eepromAddress >= EEPROM_SIZE)
2407 panic("EEPROM read access out of range!");
2408
2409 switch (eepromAddress) {
2410
2411 case EEPROM_PMATCH2_ADDR:
2412 eepromData = rom.perfectMatch[5];
2413 eepromData <<= 8;
2414 eepromData += rom.perfectMatch[4];
2415 break;
2416
2417 case EEPROM_PMATCH1_ADDR:
2418 eepromData = rom.perfectMatch[3];
2419 eepromData <<= 8;
2420 eepromData += rom.perfectMatch[2];
2421 break;
2422
2423 case EEPROM_PMATCH0_ADDR:
2424 eepromData = rom.perfectMatch[1];
2425 eepromData <<= 8;
2426 eepromData += rom.perfectMatch[0];
2427 break;
2428
2429 default:
2430 panic("FreeBSD driver only uses EEPROM to read PMATCH!");
2431 }
2432 // Set up to read data
2433 eepromState = eepromRead;
2434 eepromBitsToRx = 16;
2435
2436 // Clear data in bit
2437 regs.mear &= ~MEAR_EEDI;
2438 }
2439 break;
2440
2441 case eepromRead:
2442 // Clear Data Out bit
2443 regs.mear &= ~MEAR_EEDO;
2444 // Set bit to value of current EEPROM bit
2445 regs.mear |= (eepromData & 0x8000) ? MEAR_EEDO : 0x0;
2446
2447 eepromData <<= 1;
2448 --eepromBitsToRx;
2449
2450 // All done
2451 if (eepromBitsToRx == 0) {
2452 eepromState = eepromStart;
2453 }
2454 break;
2455
2456 default:
2457 panic("invalid EEPROM state");
2458 }
2459
2460 }
2461
2462 void
2463 NSGigE::transferDone()
2464 {
2465 if (txFifo.empty()) {
2466 DPRINTF(Ethernet, "transfer complete: txFifo empty...nothing to do\n");
2467 return;
2468 }
2469
2470 DPRINTF(Ethernet, "transfer complete: data in txFifo...schedule xmit\n");
2471
2472 if (txEvent.scheduled())
2473 txEvent.reschedule(curTick + cycles(1));
2474 else
2475 txEvent.schedule(curTick + cycles(1));
2476 }
2477
2478 bool
2479 NSGigE::rxFilter(const PacketPtr &packet)
2480 {
2481 EthPtr eth = packet;
2482 bool drop = true;
2483 string type;
2484
2485 const EthAddr &dst = eth->dst();
2486 if (dst.unicast()) {
2487 // If we're accepting all unicast addresses
2488 if (acceptUnicast)
2489 drop = false;
2490
2491 // If we make a perfect match
2492 if (acceptPerfect && dst == rom.perfectMatch)
2493 drop = false;
2494
2495 if (acceptArp && eth->type() == ETH_TYPE_ARP)
2496 drop = false;
2497
2498 } else if (dst.broadcast()) {
2499 // if we're accepting broadcasts
2500 if (acceptBroadcast)
2501 drop = false;
2502
2503 } else if (dst.multicast()) {
2504 // if we're accepting all multicasts
2505 if (acceptMulticast)
2506 drop = false;
2507
2508 // Multicast hashing faked - all packets accepted
2509 if (multicastHashEnable)
2510 drop = false;
2511 }
2512
2513 if (drop) {
2514 DPRINTF(Ethernet, "rxFilter drop\n");
2515 DDUMP(EthernetData, packet->data, packet->length);
2516 }
2517
2518 return drop;
2519 }
2520
2521 bool
2522 NSGigE::recvPacket(PacketPtr packet)
2523 {
2524 rxBytes += packet->length;
2525 rxPackets++;
2526
2527 DPRINTF(Ethernet, "Receiving packet from wire, rxFifoAvail=%d\n",
2528 rxFifo.avail());
2529
2530 if (!rxEnable) {
2531 DPRINTF(Ethernet, "receive disabled...packet dropped\n");
2532 interface->recvDone();
2533 return true;
2534 }
2535
2536 if (!rxFilterEnable) {
2537 DPRINTF(Ethernet,
2538 "receive packet filtering disabled . . . packet dropped\n");
2539 interface->recvDone();
2540 return true;
2541 }
2542
2543 if (rxFilter(packet)) {
2544 DPRINTF(Ethernet, "packet filtered...dropped\n");
2545 interface->recvDone();
2546 return true;
2547 }
2548
2549 if (rxFifo.avail() < packet->length) {
2550 #if TRACING_ON
2551 IpPtr ip(packet);
2552 TcpPtr tcp(ip);
2553 if (ip) {
2554 DPRINTF(Ethernet,
2555 "packet won't fit in receive buffer...pkt ID %d dropped\n",
2556 ip->id());
2557 if (tcp) {
2558 DPRINTF(Ethernet, "Seq=%d\n", tcp->seq());
2559 }
2560 }
2561 #endif
2562 droppedPackets++;
2563 devIntrPost(ISR_RXORN);
2564 return false;
2565 }
2566
2567 rxFifo.push(packet);
2568 interface->recvDone();
2569
2570 rxKick();
2571 return true;
2572 }
2573
2574 //=====================================================================
2575 //
2576 //
2577 void
2578 NSGigE::serialize(ostream &os)
2579 {
2580 // Serialize the PciDev base class
2581 PciDev::serialize(os);
2582
2583 /*
2584 * Finalize any DMA events now.
2585 */
2586 if (rxDmaReadEvent.scheduled())
2587 rxDmaReadCopy();
2588 if (rxDmaWriteEvent.scheduled())
2589 rxDmaWriteCopy();
2590 if (txDmaReadEvent.scheduled())
2591 txDmaReadCopy();
2592 if (txDmaWriteEvent.scheduled())
2593 txDmaWriteCopy();
2594
2595 /*
2596 * Serialize the device registers
2597 */
2598 SERIALIZE_SCALAR(regs.command);
2599 SERIALIZE_SCALAR(regs.config);
2600 SERIALIZE_SCALAR(regs.mear);
2601 SERIALIZE_SCALAR(regs.ptscr);
2602 SERIALIZE_SCALAR(regs.isr);
2603 SERIALIZE_SCALAR(regs.imr);
2604 SERIALIZE_SCALAR(regs.ier);
2605 SERIALIZE_SCALAR(regs.ihr);
2606 SERIALIZE_SCALAR(regs.txdp);
2607 SERIALIZE_SCALAR(regs.txdp_hi);
2608 SERIALIZE_SCALAR(regs.txcfg);
2609 SERIALIZE_SCALAR(regs.gpior);
2610 SERIALIZE_SCALAR(regs.rxdp);
2611 SERIALIZE_SCALAR(regs.rxdp_hi);
2612 SERIALIZE_SCALAR(regs.rxcfg);
2613 SERIALIZE_SCALAR(regs.pqcr);
2614 SERIALIZE_SCALAR(regs.wcsr);
2615 SERIALIZE_SCALAR(regs.pcr);
2616 SERIALIZE_SCALAR(regs.rfcr);
2617 SERIALIZE_SCALAR(regs.rfdr);
2618 SERIALIZE_SCALAR(regs.brar);
2619 SERIALIZE_SCALAR(regs.brdr);
2620 SERIALIZE_SCALAR(regs.srr);
2621 SERIALIZE_SCALAR(regs.mibc);
2622 SERIALIZE_SCALAR(regs.vrcr);
2623 SERIALIZE_SCALAR(regs.vtcr);
2624 SERIALIZE_SCALAR(regs.vdr);
2625 SERIALIZE_SCALAR(regs.ccsr);
2626 SERIALIZE_SCALAR(regs.tbicr);
2627 SERIALIZE_SCALAR(regs.tbisr);
2628 SERIALIZE_SCALAR(regs.tanar);
2629 SERIALIZE_SCALAR(regs.tanlpar);
2630 SERIALIZE_SCALAR(regs.taner);
2631 SERIALIZE_SCALAR(regs.tesr);
2632
2633 SERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN);
2634 SERIALIZE_ARRAY(rom.filterHash, FHASH_SIZE);
2635
2636 SERIALIZE_SCALAR(ioEnable);
2637
2638 /*
2639 * Serialize the data Fifos
2640 */
2641 rxFifo.serialize("rxFifo", os);
2642 txFifo.serialize("txFifo", os);
2643
2644 /*
2645 * Serialize the various helper variables
2646 */
2647 bool txPacketExists = txPacket;
2648 SERIALIZE_SCALAR(txPacketExists);
2649 if (txPacketExists) {
2650 txPacket->length = txPacketBufPtr - txPacket->data;
2651 txPacket->serialize("txPacket", os);
2652 uint32_t txPktBufPtr = (uint32_t) (txPacketBufPtr - txPacket->data);
2653 SERIALIZE_SCALAR(txPktBufPtr);
2654 }
2655
2656 bool rxPacketExists = rxPacket;
2657 SERIALIZE_SCALAR(rxPacketExists);
2658 if (rxPacketExists) {
2659 rxPacket->serialize("rxPacket", os);
2660 uint32_t rxPktBufPtr = (uint32_t) (rxPacketBufPtr - rxPacket->data);
2661 SERIALIZE_SCALAR(rxPktBufPtr);
2662 }
2663
2664 SERIALIZE_SCALAR(txXferLen);
2665 SERIALIZE_SCALAR(rxXferLen);
2666
2667 /*
2668 * Serialize Cached Descriptors
2669 */
2670 SERIALIZE_SCALAR(rxDesc64.link);
2671 SERIALIZE_SCALAR(rxDesc64.bufptr);
2672 SERIALIZE_SCALAR(rxDesc64.cmdsts);
2673 SERIALIZE_SCALAR(rxDesc64.extsts);
2674 SERIALIZE_SCALAR(txDesc64.link);
2675 SERIALIZE_SCALAR(txDesc64.bufptr);
2676 SERIALIZE_SCALAR(txDesc64.cmdsts);
2677 SERIALIZE_SCALAR(txDesc64.extsts);
2678 SERIALIZE_SCALAR(rxDesc32.link);
2679 SERIALIZE_SCALAR(rxDesc32.bufptr);
2680 SERIALIZE_SCALAR(rxDesc32.cmdsts);
2681 SERIALIZE_SCALAR(rxDesc32.extsts);
2682 SERIALIZE_SCALAR(txDesc32.link);
2683 SERIALIZE_SCALAR(txDesc32.bufptr);
2684 SERIALIZE_SCALAR(txDesc32.cmdsts);
2685 SERIALIZE_SCALAR(txDesc32.extsts);
2686 SERIALIZE_SCALAR(extstsEnable);
2687
2688 /*
2689 * Serialize tx state machine
2690 */
2691 int txState = this->txState;
2692 SERIALIZE_SCALAR(txState);
2693 SERIALIZE_SCALAR(txEnable);
2694 SERIALIZE_SCALAR(CTDD);
2695 SERIALIZE_SCALAR(txFragPtr);
2696 SERIALIZE_SCALAR(txDescCnt);
2697 int txDmaState = this->txDmaState;
2698 SERIALIZE_SCALAR(txDmaState);
2699 SERIALIZE_SCALAR(txKickTick);
2700
2701 /*
2702 * Serialize rx state machine
2703 */
2704 int rxState = this->rxState;
2705 SERIALIZE_SCALAR(rxState);
2706 SERIALIZE_SCALAR(rxEnable);
2707 SERIALIZE_SCALAR(CRDD);
2708 SERIALIZE_SCALAR(rxPktBytes);
2709 SERIALIZE_SCALAR(rxFragPtr);
2710 SERIALIZE_SCALAR(rxDescCnt);
2711 int rxDmaState = this->rxDmaState;
2712 SERIALIZE_SCALAR(rxDmaState);
2713 SERIALIZE_SCALAR(rxKickTick);
2714
2715 /*
2716 * Serialize EEPROM state machine
2717 */
2718 int eepromState = this->eepromState;
2719 SERIALIZE_SCALAR(eepromState);
2720 SERIALIZE_SCALAR(eepromClk);
2721 SERIALIZE_SCALAR(eepromBitsToRx);
2722 SERIALIZE_SCALAR(eepromOpcode);
2723 SERIALIZE_SCALAR(eepromAddress);
2724 SERIALIZE_SCALAR(eepromData);
2725
2726 /*
2727 * If there's a pending transmit, store the time so we can
2728 * reschedule it later
2729 */
2730 Tick transmitTick = txEvent.scheduled() ? txEvent.when() - curTick : 0;
2731 SERIALIZE_SCALAR(transmitTick);
2732
2733 /*
2734 * receive address filter settings
2735 */
2736 SERIALIZE_SCALAR(rxFilterEnable);
2737 SERIALIZE_SCALAR(acceptBroadcast);
2738 SERIALIZE_SCALAR(acceptMulticast);
2739 SERIALIZE_SCALAR(acceptUnicast);
2740 SERIALIZE_SCALAR(acceptPerfect);
2741 SERIALIZE_SCALAR(acceptArp);
2742 SERIALIZE_SCALAR(multicastHashEnable);
2743
2744 /*
2745 * Keep track of pending interrupt status.
2746 */
2747 SERIALIZE_SCALAR(intrTick);
2748 SERIALIZE_SCALAR(cpuPendingIntr);
2749 Tick intrEventTick = 0;
2750 if (intrEvent)
2751 intrEventTick = intrEvent->when();
2752 SERIALIZE_SCALAR(intrEventTick);
2753
2754 }
2755
2756 void
2757 NSGigE::unserialize(Checkpoint *cp, const std::string &section)
2758 {
2759 // Unserialize the PciDev base class
2760 PciDev::unserialize(cp, section);
2761
2762 UNSERIALIZE_SCALAR(regs.command);
2763 UNSERIALIZE_SCALAR(regs.config);
2764 UNSERIALIZE_SCALAR(regs.mear);
2765 UNSERIALIZE_SCALAR(regs.ptscr);
2766 UNSERIALIZE_SCALAR(regs.isr);
2767 UNSERIALIZE_SCALAR(regs.imr);
2768 UNSERIALIZE_SCALAR(regs.ier);
2769 UNSERIALIZE_SCALAR(regs.ihr);
2770 UNSERIALIZE_SCALAR(regs.txdp);
2771 UNSERIALIZE_SCALAR(regs.txdp_hi);
2772 UNSERIALIZE_SCALAR(regs.txcfg);
2773 UNSERIALIZE_SCALAR(regs.gpior);
2774 UNSERIALIZE_SCALAR(regs.rxdp);
2775 UNSERIALIZE_SCALAR(regs.rxdp_hi);
2776 UNSERIALIZE_SCALAR(regs.rxcfg);
2777 UNSERIALIZE_SCALAR(regs.pqcr);
2778 UNSERIALIZE_SCALAR(regs.wcsr);
2779 UNSERIALIZE_SCALAR(regs.pcr);
2780 UNSERIALIZE_SCALAR(regs.rfcr);
2781 UNSERIALIZE_SCALAR(regs.rfdr);
2782 UNSERIALIZE_SCALAR(regs.brar);
2783 UNSERIALIZE_SCALAR(regs.brdr);
2784 UNSERIALIZE_SCALAR(regs.srr);
2785 UNSERIALIZE_SCALAR(regs.mibc);
2786 UNSERIALIZE_SCALAR(regs.vrcr);
2787 UNSERIALIZE_SCALAR(regs.vtcr);
2788 UNSERIALIZE_SCALAR(regs.vdr);
2789 UNSERIALIZE_SCALAR(regs.ccsr);
2790 UNSERIALIZE_SCALAR(regs.tbicr);
2791 UNSERIALIZE_SCALAR(regs.tbisr);
2792 UNSERIALIZE_SCALAR(regs.tanar);
2793 UNSERIALIZE_SCALAR(regs.tanlpar);
2794 UNSERIALIZE_SCALAR(regs.taner);
2795 UNSERIALIZE_SCALAR(regs.tesr);
2796
2797 UNSERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN);
2798 UNSERIALIZE_ARRAY(rom.filterHash, FHASH_SIZE);
2799
2800 UNSERIALIZE_SCALAR(ioEnable);
2801
2802 /*
2803 * unserialize the data fifos
2804 */
2805 rxFifo.unserialize("rxFifo", cp, section);
2806 txFifo.unserialize("txFifo", cp, section);
2807
2808 /*
2809 * unserialize the various helper variables
2810 */
2811 bool txPacketExists;
2812 UNSERIALIZE_SCALAR(txPacketExists);
2813 if (txPacketExists) {
2814 txPacket = new PacketData(16384);
2815 txPacket->unserialize("txPacket", cp, section);
2816 uint32_t txPktBufPtr;
2817 UNSERIALIZE_SCALAR(txPktBufPtr);
2818 txPacketBufPtr = (uint8_t *) txPacket->data + txPktBufPtr;
2819 } else
2820 txPacket = 0;
2821
2822 bool rxPacketExists;
2823 UNSERIALIZE_SCALAR(rxPacketExists);
2824 rxPacket = 0;
2825 if (rxPacketExists) {
2826 rxPacket = new PacketData(16384);
2827 rxPacket->unserialize("rxPacket", cp, section);
2828 uint32_t rxPktBufPtr;
2829 UNSERIALIZE_SCALAR(rxPktBufPtr);
2830 rxPacketBufPtr = (uint8_t *) rxPacket->data + rxPktBufPtr;
2831 } else
2832 rxPacket = 0;
2833
2834 UNSERIALIZE_SCALAR(txXferLen);
2835 UNSERIALIZE_SCALAR(rxXferLen);
2836
2837 /*
2838 * Unserialize Cached Descriptors
2839 */
2840 UNSERIALIZE_SCALAR(rxDesc64.link);
2841 UNSERIALIZE_SCALAR(rxDesc64.bufptr);
2842 UNSERIALIZE_SCALAR(rxDesc64.cmdsts);
2843 UNSERIALIZE_SCALAR(rxDesc64.extsts);
2844 UNSERIALIZE_SCALAR(txDesc64.link);
2845 UNSERIALIZE_SCALAR(txDesc64.bufptr);
2846 UNSERIALIZE_SCALAR(txDesc64.cmdsts);
2847 UNSERIALIZE_SCALAR(txDesc64.extsts);
2848 UNSERIALIZE_SCALAR(rxDesc32.link);
2849 UNSERIALIZE_SCALAR(rxDesc32.bufptr);
2850 UNSERIALIZE_SCALAR(rxDesc32.cmdsts);
2851 UNSERIALIZE_SCALAR(rxDesc32.extsts);
2852 UNSERIALIZE_SCALAR(txDesc32.link);
2853 UNSERIALIZE_SCALAR(txDesc32.bufptr);
2854 UNSERIALIZE_SCALAR(txDesc32.cmdsts);
2855 UNSERIALIZE_SCALAR(txDesc32.extsts);
2856 UNSERIALIZE_SCALAR(extstsEnable);
2857
2858 /*
2859 * unserialize tx state machine
2860 */
2861 int txState;
2862 UNSERIALIZE_SCALAR(txState);
2863 this->txState = (TxState) txState;
2864 UNSERIALIZE_SCALAR(txEnable);
2865 UNSERIALIZE_SCALAR(CTDD);
2866 UNSERIALIZE_SCALAR(txFragPtr);
2867 UNSERIALIZE_SCALAR(txDescCnt);
2868 int txDmaState;
2869 UNSERIALIZE_SCALAR(txDmaState);
2870 this->txDmaState = (DmaState) txDmaState;
2871 UNSERIALIZE_SCALAR(txKickTick);
2872 if (txKickTick)
2873 txKickEvent.schedule(txKickTick);
2874
2875 /*
2876 * unserialize rx state machine
2877 */
2878 int rxState;
2879 UNSERIALIZE_SCALAR(rxState);
2880 this->rxState = (RxState) rxState;
2881 UNSERIALIZE_SCALAR(rxEnable);
2882 UNSERIALIZE_SCALAR(CRDD);
2883 UNSERIALIZE_SCALAR(rxPktBytes);
2884 UNSERIALIZE_SCALAR(rxFragPtr);
2885 UNSERIALIZE_SCALAR(rxDescCnt);
2886 int rxDmaState;
2887 UNSERIALIZE_SCALAR(rxDmaState);
2888 this->rxDmaState = (DmaState) rxDmaState;
2889 UNSERIALIZE_SCALAR(rxKickTick);
2890 if (rxKickTick)
2891 rxKickEvent.schedule(rxKickTick);
2892
2893 /*
2894 * Unserialize EEPROM state machine
2895 */
2896 int eepromState;
2897 UNSERIALIZE_SCALAR(eepromState);
2898 this->eepromState = (EEPROMState) eepromState;
2899 UNSERIALIZE_SCALAR(eepromClk);
2900 UNSERIALIZE_SCALAR(eepromBitsToRx);
2901 UNSERIALIZE_SCALAR(eepromOpcode);
2902 UNSERIALIZE_SCALAR(eepromAddress);
2903 UNSERIALIZE_SCALAR(eepromData);
2904
2905 /*
2906 * If there's a pending transmit, reschedule it now
2907 */
2908 Tick transmitTick;
2909 UNSERIALIZE_SCALAR(transmitTick);
2910 if (transmitTick)
2911 txEvent.schedule(curTick + transmitTick);
2912
2913 /*
2914 * unserialize receive address filter settings
2915 */
2916 UNSERIALIZE_SCALAR(rxFilterEnable);
2917 UNSERIALIZE_SCALAR(acceptBroadcast);
2918 UNSERIALIZE_SCALAR(acceptMulticast);
2919 UNSERIALIZE_SCALAR(acceptUnicast);
2920 UNSERIALIZE_SCALAR(acceptPerfect);
2921 UNSERIALIZE_SCALAR(acceptArp);
2922 UNSERIALIZE_SCALAR(multicastHashEnable);
2923
2924 /*
2925 * Keep track of pending interrupt status.
2926 */
2927 UNSERIALIZE_SCALAR(intrTick);
2928 UNSERIALIZE_SCALAR(cpuPendingIntr);
2929 Tick intrEventTick;
2930 UNSERIALIZE_SCALAR(intrEventTick);
2931 if (intrEventTick) {
2932 intrEvent = new IntrEvent(this, true);
2933 intrEvent->schedule(intrEventTick);
2934 }
2935
2936 /*
2937 * re-add addrRanges to bus bridges
2938 */
2939 if (pioInterface) {
2940 pioInterface->addAddrRange(RangeSize(BARAddrs[0], BARSize[0]));
2941 pioInterface->addAddrRange(RangeSize(BARAddrs[1], BARSize[1]));
2942 }
2943 }
2944
2945 Tick
2946 NSGigE::cacheAccess(MemReqPtr &req)
2947 {
2948 DPRINTF(EthernetPIO, "timing access to paddr=%#x (daddr=%#x)\n",
2949 req->paddr, req->paddr - addr);
2950 return curTick + pioLatency;
2951 }
2952
2953 BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt)
2954
2955 SimObjectParam<EtherInt *> peer;
2956 SimObjectParam<NSGigE *> device;
2957
2958 END_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt)
2959
2960 BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigEInt)
2961
2962 INIT_PARAM_DFLT(peer, "peer interface", NULL),
2963 INIT_PARAM(device, "Ethernet device of this interface")
2964
2965 END_INIT_SIM_OBJECT_PARAMS(NSGigEInt)
2966
2967 CREATE_SIM_OBJECT(NSGigEInt)
2968 {
2969 NSGigEInt *dev_int = new NSGigEInt(getInstanceName(), device);
2970
2971 EtherInt *p = (EtherInt *)peer;
2972 if (p) {
2973 dev_int->setPeer(p);
2974 p->setPeer(dev_int);
2975 }
2976
2977 return dev_int;
2978 }
2979
2980 REGISTER_SIM_OBJECT("NSGigEInt", NSGigEInt)
2981
2982
2983 BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigE)
2984
2985 Param<Addr> addr;
2986 Param<Tick> clock;
2987 Param<Tick> tx_delay;
2988 Param<Tick> rx_delay;
2989 Param<Tick> intr_delay;
2990 SimObjectParam<MemoryController *> mmu;
2991 SimObjectParam<PhysicalMemory *> physmem;
2992 Param<bool> rx_filter;
2993 Param<string> hardware_address;
2994 SimObjectParam<Bus*> io_bus;
2995 SimObjectParam<Bus*> payload_bus;
2996 SimObjectParam<HierParams *> hier;
2997 Param<Tick> pio_latency;
2998 Param<bool> dma_desc_free;
2999 Param<bool> dma_data_free;
3000 Param<Tick> dma_read_delay;
3001 Param<Tick> dma_write_delay;
3002 Param<Tick> dma_read_factor;
3003 Param<Tick> dma_write_factor;
3004 SimObjectParam<PciConfigAll *> configspace;
3005 SimObjectParam<PciConfigData *> configdata;
3006 SimObjectParam<Platform *> platform;
3007 Param<uint32_t> pci_bus;
3008 Param<uint32_t> pci_dev;
3009 Param<uint32_t> pci_func;
3010 Param<uint32_t> tx_fifo_size;
3011 Param<uint32_t> rx_fifo_size;
3012 Param<uint32_t> m5reg;
3013 Param<bool> dma_no_allocate;
3014
3015 END_DECLARE_SIM_OBJECT_PARAMS(NSGigE)
3016
3017 BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigE)
3018
3019 INIT_PARAM(addr, "Device Address"),
3020 INIT_PARAM(clock, "State machine processor frequency"),
3021 INIT_PARAM(tx_delay, "Transmit Delay"),
3022 INIT_PARAM(rx_delay, "Receive Delay"),
3023 INIT_PARAM(intr_delay, "Interrupt Delay in microseconds"),
3024 INIT_PARAM(mmu, "Memory Controller"),
3025 INIT_PARAM(physmem, "Physical Memory"),
3026 INIT_PARAM_DFLT(rx_filter, "Enable Receive Filter", true),
3027 INIT_PARAM(hardware_address, "Ethernet Hardware Address"),
3028 INIT_PARAM_DFLT(io_bus, "The IO Bus to attach to for headers", NULL),
3029 INIT_PARAM_DFLT(payload_bus, "The IO Bus to attach to for payload", NULL),
3030 INIT_PARAM_DFLT(hier, "Hierarchy global variables", &defaultHierParams),
3031 INIT_PARAM_DFLT(pio_latency, "Programmed IO latency in bus cycles", 1),
3032 INIT_PARAM_DFLT(dma_desc_free, "DMA of Descriptors is free", false),
3033 INIT_PARAM_DFLT(dma_data_free, "DMA of Data is free", false),
3034 INIT_PARAM_DFLT(dma_read_delay, "fixed delay for dma reads", 0),
3035 INIT_PARAM_DFLT(dma_write_delay, "fixed delay for dma writes", 0),
3036 INIT_PARAM_DFLT(dma_read_factor, "multiplier for dma reads", 0),
3037 INIT_PARAM_DFLT(dma_write_factor, "multiplier for dma writes", 0),
3038 INIT_PARAM(configspace, "PCI Configspace"),
3039 INIT_PARAM(configdata, "PCI Config data"),
3040 INIT_PARAM(platform, "Platform"),
3041 INIT_PARAM(pci_bus, "PCI bus"),
3042 INIT_PARAM(pci_dev, "PCI device number"),
3043 INIT_PARAM(pci_func, "PCI function code"),
3044 INIT_PARAM_DFLT(tx_fifo_size, "max size in bytes of txFifo", 131072),
3045 INIT_PARAM_DFLT(rx_fifo_size, "max size in bytes of rxFifo", 131072),
3046 INIT_PARAM(m5reg, "m5 register"),
3047 INIT_PARAM_DFLT(dma_no_allocate, "Should DMA reads allocate cache lines", true)
3048
3049 END_INIT_SIM_OBJECT_PARAMS(NSGigE)
3050
3051
3052 CREATE_SIM_OBJECT(NSGigE)
3053 {
3054 NSGigE::Params *params = new NSGigE::Params;
3055
3056 params->name = getInstanceName();
3057 params->mmu = mmu;
3058 params->configSpace = configspace;
3059 params->configData = configdata;
3060 params->plat = platform;
3061 params->busNum = pci_bus;
3062 params->deviceNum = pci_dev;
3063 params->functionNum = pci_func;
3064
3065 params->clock = clock;
3066 params->intr_delay = intr_delay;
3067 params->pmem = physmem;
3068 params->tx_delay = tx_delay;
3069 params->rx_delay = rx_delay;
3070 params->hier = hier;
3071 params->header_bus = io_bus;
3072 params->payload_bus = payload_bus;
3073 params->pio_latency = pio_latency;
3074 params->dma_desc_free = dma_desc_free;
3075 params->dma_data_free = dma_data_free;
3076 params->dma_read_delay = dma_read_delay;
3077 params->dma_write_delay = dma_write_delay;
3078 params->dma_read_factor = dma_read_factor;
3079 params->dma_write_factor = dma_write_factor;
3080 params->rx_filter = rx_filter;
3081 params->eaddr = hardware_address;
3082 params->tx_fifo_size = tx_fifo_size;
3083 params->rx_fifo_size = rx_fifo_size;
3084 params->m5reg = m5reg;
3085 params->dma_no_allocate = dma_no_allocate;
3086 return new NSGigE(params);
3087 }
3088
3089 REGISTER_SIM_OBJECT("NSGigE", NSGigE)