9010850ab8a67cc1e09c497d768cbf1540cc0022
[gem5.git] / dev / ns_gige.cc
1 /*
2 * Copyright (c) 2004-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /** @file
30 * Device module for modelling the National Semiconductor
31 * DP83820 ethernet controller. Does not support priority queueing
32 */
33 #include <cstdio>
34 #include <deque>
35 #include <string>
36
37 #include "base/inet.hh"
38 #include "cpu/exec_context.hh"
39 #include "dev/etherlink.hh"
40 #include "dev/ns_gige.hh"
41 #include "dev/pciconfigall.hh"
42 #include "mem/bus/bus.hh"
43 #include "mem/bus/dma_interface.hh"
44 #include "mem/bus/pio_interface.hh"
45 #include "mem/bus/pio_interface_impl.hh"
46 #include "mem/functional/memory_control.hh"
47 #include "mem/functional/physical.hh"
48 #include "sim/builder.hh"
49 #include "sim/debug.hh"
50 #include "sim/host.hh"
51 #include "sim/stats.hh"
52 #include "targetarch/vtophys.hh"
53
54 const char *NsRxStateStrings[] =
55 {
56 "rxIdle",
57 "rxDescRefr",
58 "rxDescRead",
59 "rxFifoBlock",
60 "rxFragWrite",
61 "rxDescWrite",
62 "rxAdvance"
63 };
64
65 const char *NsTxStateStrings[] =
66 {
67 "txIdle",
68 "txDescRefr",
69 "txDescRead",
70 "txFifoBlock",
71 "txFragRead",
72 "txDescWrite",
73 "txAdvance"
74 };
75
76 const char *NsDmaState[] =
77 {
78 "dmaIdle",
79 "dmaReading",
80 "dmaWriting",
81 "dmaReadWaiting",
82 "dmaWriteWaiting"
83 };
84
85 using namespace std;
86 using namespace Net;
87
88 ///////////////////////////////////////////////////////////////////////
89 //
90 // NSGigE PCI Device
91 //
92 NSGigE::NSGigE(Params *p)
93 : PciDev(p), ioEnable(false),
94 txFifo(p->tx_fifo_size), rxFifo(p->rx_fifo_size),
95 txPacket(0), rxPacket(0), txPacketBufPtr(NULL), rxPacketBufPtr(NULL),
96 txXferLen(0), rxXferLen(0), clock(p->clock),
97 txState(txIdle), txEnable(false), CTDD(false),
98 txFragPtr(0), txDescCnt(0), txDmaState(dmaIdle), rxState(rxIdle),
99 rxEnable(false), CRDD(false), rxPktBytes(0),
100 rxFragPtr(0), rxDescCnt(0), rxDmaState(dmaIdle), extstsEnable(false),
101 eepromState(eepromStart), rxDmaReadEvent(this), rxDmaWriteEvent(this),
102 txDmaReadEvent(this), txDmaWriteEvent(this),
103 dmaDescFree(p->dma_desc_free), dmaDataFree(p->dma_data_free),
104 txDelay(p->tx_delay), rxDelay(p->rx_delay),
105 rxKickTick(0), rxKickEvent(this), txKickTick(0), txKickEvent(this),
106 txEvent(this), rxFilterEnable(p->rx_filter), acceptBroadcast(false),
107 acceptMulticast(false), acceptUnicast(false),
108 acceptPerfect(false), acceptArp(false), multicastHashEnable(false),
109 physmem(p->pmem), intrTick(0), cpuPendingIntr(false),
110 intrEvent(0), interface(0)
111 {
112 if (p->pio_bus) {
113 pioInterface = newPioInterface(name() + ".pio", p->hier,
114 p->pio_bus, this,
115 &NSGigE::cacheAccess);
116 pioLatency = p->pio_latency * p->pio_bus->clockRate;
117 }
118
119 if (p->header_bus) {
120 if (p->payload_bus)
121 dmaInterface = new DMAInterface<Bus>(name() + ".dma",
122 p->header_bus,
123 p->payload_bus, 1,
124 p->dma_no_allocate);
125 else
126 dmaInterface = new DMAInterface<Bus>(name() + ".dma",
127 p->header_bus,
128 p->header_bus, 1,
129 p->dma_no_allocate);
130 } else if (p->payload_bus)
131 panic("Must define a header bus if defining a payload bus");
132
133 pioDelayWrite = p->pio_delay_write && pioInterface;
134
135 intrDelay = p->intr_delay;
136 dmaReadDelay = p->dma_read_delay;
137 dmaWriteDelay = p->dma_write_delay;
138 dmaReadFactor = p->dma_read_factor;
139 dmaWriteFactor = p->dma_write_factor;
140
141 regsReset();
142 memcpy(&rom.perfectMatch, p->eaddr.bytes(), ETH_ADDR_LEN);
143
144 memset(&rxDesc32, 0, sizeof(rxDesc32));
145 memset(&txDesc32, 0, sizeof(txDesc32));
146 memset(&rxDesc64, 0, sizeof(rxDesc64));
147 memset(&txDesc64, 0, sizeof(txDesc64));
148 }
149
150 NSGigE::~NSGigE()
151 {}
152
153 void
154 NSGigE::regStats()
155 {
156 txBytes
157 .name(name() + ".txBytes")
158 .desc("Bytes Transmitted")
159 .prereq(txBytes)
160 ;
161
162 rxBytes
163 .name(name() + ".rxBytes")
164 .desc("Bytes Received")
165 .prereq(rxBytes)
166 ;
167
168 txPackets
169 .name(name() + ".txPackets")
170 .desc("Number of Packets Transmitted")
171 .prereq(txBytes)
172 ;
173
174 rxPackets
175 .name(name() + ".rxPackets")
176 .desc("Number of Packets Received")
177 .prereq(rxBytes)
178 ;
179
180 txIpChecksums
181 .name(name() + ".txIpChecksums")
182 .desc("Number of tx IP Checksums done by device")
183 .precision(0)
184 .prereq(txBytes)
185 ;
186
187 rxIpChecksums
188 .name(name() + ".rxIpChecksums")
189 .desc("Number of rx IP Checksums done by device")
190 .precision(0)
191 .prereq(rxBytes)
192 ;
193
194 txTcpChecksums
195 .name(name() + ".txTcpChecksums")
196 .desc("Number of tx TCP Checksums done by device")
197 .precision(0)
198 .prereq(txBytes)
199 ;
200
201 rxTcpChecksums
202 .name(name() + ".rxTcpChecksums")
203 .desc("Number of rx TCP Checksums done by device")
204 .precision(0)
205 .prereq(rxBytes)
206 ;
207
208 txUdpChecksums
209 .name(name() + ".txUdpChecksums")
210 .desc("Number of tx UDP Checksums done by device")
211 .precision(0)
212 .prereq(txBytes)
213 ;
214
215 rxUdpChecksums
216 .name(name() + ".rxUdpChecksums")
217 .desc("Number of rx UDP Checksums done by device")
218 .precision(0)
219 .prereq(rxBytes)
220 ;
221
222 descDmaReads
223 .name(name() + ".descDMAReads")
224 .desc("Number of descriptors the device read w/ DMA")
225 .precision(0)
226 ;
227
228 descDmaWrites
229 .name(name() + ".descDMAWrites")
230 .desc("Number of descriptors the device wrote w/ DMA")
231 .precision(0)
232 ;
233
234 descDmaRdBytes
235 .name(name() + ".descDmaReadBytes")
236 .desc("number of descriptor bytes read w/ DMA")
237 .precision(0)
238 ;
239
240 descDmaWrBytes
241 .name(name() + ".descDmaWriteBytes")
242 .desc("number of descriptor bytes write w/ DMA")
243 .precision(0)
244 ;
245
246 txBandwidth
247 .name(name() + ".txBandwidth")
248 .desc("Transmit Bandwidth (bits/s)")
249 .precision(0)
250 .prereq(txBytes)
251 ;
252
253 rxBandwidth
254 .name(name() + ".rxBandwidth")
255 .desc("Receive Bandwidth (bits/s)")
256 .precision(0)
257 .prereq(rxBytes)
258 ;
259
260 totBandwidth
261 .name(name() + ".totBandwidth")
262 .desc("Total Bandwidth (bits/s)")
263 .precision(0)
264 .prereq(totBytes)
265 ;
266
267 totPackets
268 .name(name() + ".totPackets")
269 .desc("Total Packets")
270 .precision(0)
271 .prereq(totBytes)
272 ;
273
274 totBytes
275 .name(name() + ".totBytes")
276 .desc("Total Bytes")
277 .precision(0)
278 .prereq(totBytes)
279 ;
280
281 totPacketRate
282 .name(name() + ".totPPS")
283 .desc("Total Tranmission Rate (packets/s)")
284 .precision(0)
285 .prereq(totBytes)
286 ;
287
288 txPacketRate
289 .name(name() + ".txPPS")
290 .desc("Packet Tranmission Rate (packets/s)")
291 .precision(0)
292 .prereq(txBytes)
293 ;
294
295 rxPacketRate
296 .name(name() + ".rxPPS")
297 .desc("Packet Reception Rate (packets/s)")
298 .precision(0)
299 .prereq(rxBytes)
300 ;
301
302 postedSwi
303 .name(name() + ".postedSwi")
304 .desc("number of software interrupts posted to CPU")
305 .precision(0)
306 ;
307
308 totalSwi
309 .name(name() + ".totalSwi")
310 .desc("total number of Swi written to ISR")
311 .precision(0)
312 ;
313
314 coalescedSwi
315 .name(name() + ".coalescedSwi")
316 .desc("average number of Swi's coalesced into each post")
317 .precision(0)
318 ;
319
320 postedRxIdle
321 .name(name() + ".postedRxIdle")
322 .desc("number of rxIdle interrupts posted to CPU")
323 .precision(0)
324 ;
325
326 totalRxIdle
327 .name(name() + ".totalRxIdle")
328 .desc("total number of RxIdle written to ISR")
329 .precision(0)
330 ;
331
332 coalescedRxIdle
333 .name(name() + ".coalescedRxIdle")
334 .desc("average number of RxIdle's coalesced into each post")
335 .precision(0)
336 ;
337
338 postedRxOk
339 .name(name() + ".postedRxOk")
340 .desc("number of RxOk interrupts posted to CPU")
341 .precision(0)
342 ;
343
344 totalRxOk
345 .name(name() + ".totalRxOk")
346 .desc("total number of RxOk written to ISR")
347 .precision(0)
348 ;
349
350 coalescedRxOk
351 .name(name() + ".coalescedRxOk")
352 .desc("average number of RxOk's coalesced into each post")
353 .precision(0)
354 ;
355
356 postedRxDesc
357 .name(name() + ".postedRxDesc")
358 .desc("number of RxDesc interrupts posted to CPU")
359 .precision(0)
360 ;
361
362 totalRxDesc
363 .name(name() + ".totalRxDesc")
364 .desc("total number of RxDesc written to ISR")
365 .precision(0)
366 ;
367
368 coalescedRxDesc
369 .name(name() + ".coalescedRxDesc")
370 .desc("average number of RxDesc's coalesced into each post")
371 .precision(0)
372 ;
373
374 postedTxOk
375 .name(name() + ".postedTxOk")
376 .desc("number of TxOk interrupts posted to CPU")
377 .precision(0)
378 ;
379
380 totalTxOk
381 .name(name() + ".totalTxOk")
382 .desc("total number of TxOk written to ISR")
383 .precision(0)
384 ;
385
386 coalescedTxOk
387 .name(name() + ".coalescedTxOk")
388 .desc("average number of TxOk's coalesced into each post")
389 .precision(0)
390 ;
391
392 postedTxIdle
393 .name(name() + ".postedTxIdle")
394 .desc("number of TxIdle interrupts posted to CPU")
395 .precision(0)
396 ;
397
398 totalTxIdle
399 .name(name() + ".totalTxIdle")
400 .desc("total number of TxIdle written to ISR")
401 .precision(0)
402 ;
403
404 coalescedTxIdle
405 .name(name() + ".coalescedTxIdle")
406 .desc("average number of TxIdle's coalesced into each post")
407 .precision(0)
408 ;
409
410 postedTxDesc
411 .name(name() + ".postedTxDesc")
412 .desc("number of TxDesc interrupts posted to CPU")
413 .precision(0)
414 ;
415
416 totalTxDesc
417 .name(name() + ".totalTxDesc")
418 .desc("total number of TxDesc written to ISR")
419 .precision(0)
420 ;
421
422 coalescedTxDesc
423 .name(name() + ".coalescedTxDesc")
424 .desc("average number of TxDesc's coalesced into each post")
425 .precision(0)
426 ;
427
428 postedRxOrn
429 .name(name() + ".postedRxOrn")
430 .desc("number of RxOrn posted to CPU")
431 .precision(0)
432 ;
433
434 totalRxOrn
435 .name(name() + ".totalRxOrn")
436 .desc("total number of RxOrn written to ISR")
437 .precision(0)
438 ;
439
440 coalescedRxOrn
441 .name(name() + ".coalescedRxOrn")
442 .desc("average number of RxOrn's coalesced into each post")
443 .precision(0)
444 ;
445
446 coalescedTotal
447 .name(name() + ".coalescedTotal")
448 .desc("average number of interrupts coalesced into each post")
449 .precision(0)
450 ;
451
452 postedInterrupts
453 .name(name() + ".postedInterrupts")
454 .desc("number of posts to CPU")
455 .precision(0)
456 ;
457
458 droppedPackets
459 .name(name() + ".droppedPackets")
460 .desc("number of packets dropped")
461 .precision(0)
462 ;
463
464 coalescedSwi = totalSwi / postedInterrupts;
465 coalescedRxIdle = totalRxIdle / postedInterrupts;
466 coalescedRxOk = totalRxOk / postedInterrupts;
467 coalescedRxDesc = totalRxDesc / postedInterrupts;
468 coalescedTxOk = totalTxOk / postedInterrupts;
469 coalescedTxIdle = totalTxIdle / postedInterrupts;
470 coalescedTxDesc = totalTxDesc / postedInterrupts;
471 coalescedRxOrn = totalRxOrn / postedInterrupts;
472
473 coalescedTotal = (totalSwi + totalRxIdle + totalRxOk + totalRxDesc +
474 totalTxOk + totalTxIdle + totalTxDesc +
475 totalRxOrn) / postedInterrupts;
476
477 txBandwidth = txBytes * Stats::constant(8) / simSeconds;
478 rxBandwidth = rxBytes * Stats::constant(8) / simSeconds;
479 totBandwidth = txBandwidth + rxBandwidth;
480 totBytes = txBytes + rxBytes;
481 totPackets = txPackets + rxPackets;
482
483 txPacketRate = txPackets / simSeconds;
484 rxPacketRate = rxPackets / simSeconds;
485 }
486
487 /**
488 * This is to read the PCI general configuration registers
489 */
490 void
491 NSGigE::readConfig(int offset, int size, uint8_t *data)
492 {
493 if (offset < PCI_DEVICE_SPECIFIC)
494 PciDev::readConfig(offset, size, data);
495 else
496 panic("Device specific PCI config space not implemented!\n");
497 }
498
499 /**
500 * This is to write to the PCI general configuration registers
501 */
502 void
503 NSGigE::writeConfig(int offset, int size, const uint8_t* data)
504 {
505 if (offset < PCI_DEVICE_SPECIFIC)
506 PciDev::writeConfig(offset, size, data);
507 else
508 panic("Device specific PCI config space not implemented!\n");
509
510 // Need to catch writes to BARs to update the PIO interface
511 switch (offset) {
512 // seems to work fine without all these PCI settings, but i
513 // put in the IO to double check, an assertion will fail if we
514 // need to properly implement it
515 case PCI_COMMAND:
516 if (config.data[offset] & PCI_CMD_IOSE)
517 ioEnable = true;
518 else
519 ioEnable = false;
520
521 #if 0
522 if (config.data[offset] & PCI_CMD_BME) {
523 bmEnabled = true;
524 }
525 else {
526 bmEnabled = false;
527 }
528
529 if (config.data[offset] & PCI_CMD_MSE) {
530 memEnable = true;
531 }
532 else {
533 memEnable = false;
534 }
535 #endif
536 break;
537
538 case PCI0_BASE_ADDR0:
539 if (BARAddrs[0] != 0) {
540 if (pioInterface)
541 pioInterface->addAddrRange(RangeSize(BARAddrs[0], BARSize[0]));
542
543 BARAddrs[0] &= EV5::PAddrUncachedMask;
544 }
545 break;
546 case PCI0_BASE_ADDR1:
547 if (BARAddrs[1] != 0) {
548 if (pioInterface)
549 pioInterface->addAddrRange(RangeSize(BARAddrs[1], BARSize[1]));
550
551 BARAddrs[1] &= EV5::PAddrUncachedMask;
552 }
553 break;
554 }
555 }
556
557 /**
558 * This reads the device registers, which are detailed in the NS83820
559 * spec sheet
560 */
561 Fault
562 NSGigE::read(MemReqPtr &req, uint8_t *data)
563 {
564 assert(ioEnable);
565
566 //The mask is to give you only the offset into the device register file
567 Addr daddr = req->paddr & 0xfff;
568 DPRINTF(EthernetPIO, "read da=%#x pa=%#x va=%#x size=%d\n",
569 daddr, req->paddr, req->vaddr, req->size);
570
571
572 // there are some reserved registers, you can see ns_gige_reg.h and
573 // the spec sheet for details
574 if (daddr > LAST && daddr <= RESERVED) {
575 panic("Accessing reserved register");
576 } else if (daddr > RESERVED && daddr <= 0x3FC) {
577 readConfig(daddr & 0xff, req->size, data);
578 return No_Fault;
579 } else if (daddr >= MIB_START && daddr <= MIB_END) {
580 // don't implement all the MIB's. hopefully the kernel
581 // doesn't actually DEPEND upon their values
582 // MIB are just hardware stats keepers
583 uint32_t &reg = *(uint32_t *) data;
584 reg = 0;
585 return No_Fault;
586 } else if (daddr > 0x3FC)
587 panic("Something is messed up!\n");
588
589 switch (req->size) {
590 case sizeof(uint32_t):
591 {
592 uint32_t &reg = *(uint32_t *)data;
593 uint16_t rfaddr;
594
595 switch (daddr) {
596 case CR:
597 reg = regs.command;
598 //these are supposed to be cleared on a read
599 reg &= ~(CR_RXD | CR_TXD | CR_TXR | CR_RXR);
600 break;
601
602 case CFGR:
603 reg = regs.config;
604 break;
605
606 case MEAR:
607 reg = regs.mear;
608 break;
609
610 case PTSCR:
611 reg = regs.ptscr;
612 break;
613
614 case ISR:
615 reg = regs.isr;
616 devIntrClear(ISR_ALL);
617 break;
618
619 case IMR:
620 reg = regs.imr;
621 break;
622
623 case IER:
624 reg = regs.ier;
625 break;
626
627 case IHR:
628 reg = regs.ihr;
629 break;
630
631 case TXDP:
632 reg = regs.txdp;
633 break;
634
635 case TXDP_HI:
636 reg = regs.txdp_hi;
637 break;
638
639 case TX_CFG:
640 reg = regs.txcfg;
641 break;
642
643 case GPIOR:
644 reg = regs.gpior;
645 break;
646
647 case RXDP:
648 reg = regs.rxdp;
649 break;
650
651 case RXDP_HI:
652 reg = regs.rxdp_hi;
653 break;
654
655 case RX_CFG:
656 reg = regs.rxcfg;
657 break;
658
659 case PQCR:
660 reg = regs.pqcr;
661 break;
662
663 case WCSR:
664 reg = regs.wcsr;
665 break;
666
667 case PCR:
668 reg = regs.pcr;
669 break;
670
671 // see the spec sheet for how RFCR and RFDR work
672 // basically, you write to RFCR to tell the machine
673 // what you want to do next, then you act upon RFDR,
674 // and the device will be prepared b/c of what you
675 // wrote to RFCR
676 case RFCR:
677 reg = regs.rfcr;
678 break;
679
680 case RFDR:
681 rfaddr = (uint16_t)(regs.rfcr & RFCR_RFADDR);
682 switch (rfaddr) {
683 // Read from perfect match ROM octets
684 case 0x000:
685 reg = rom.perfectMatch[1];
686 reg = reg << 8;
687 reg += rom.perfectMatch[0];
688 break;
689 case 0x002:
690 reg = rom.perfectMatch[3] << 8;
691 reg += rom.perfectMatch[2];
692 break;
693 case 0x004:
694 reg = rom.perfectMatch[5] << 8;
695 reg += rom.perfectMatch[4];
696 break;
697 default:
698 // Read filter hash table
699 if (rfaddr >= FHASH_ADDR &&
700 rfaddr < FHASH_ADDR + FHASH_SIZE) {
701
702 // Only word-aligned reads supported
703 if (rfaddr % 2)
704 panic("unaligned read from filter hash table!");
705
706 reg = rom.filterHash[rfaddr - FHASH_ADDR + 1] << 8;
707 reg += rom.filterHash[rfaddr - FHASH_ADDR];
708 break;
709 }
710
711 panic("reading RFDR for something other than pattern"
712 " matching or hashing! %#x\n", rfaddr);
713 }
714 break;
715
716 case SRR:
717 reg = regs.srr;
718 break;
719
720 case MIBC:
721 reg = regs.mibc;
722 reg &= ~(MIBC_MIBS | MIBC_ACLR);
723 break;
724
725 case VRCR:
726 reg = regs.vrcr;
727 break;
728
729 case VTCR:
730 reg = regs.vtcr;
731 break;
732
733 case VDR:
734 reg = regs.vdr;
735 break;
736
737 case CCSR:
738 reg = regs.ccsr;
739 break;
740
741 case TBICR:
742 reg = regs.tbicr;
743 break;
744
745 case TBISR:
746 reg = regs.tbisr;
747 break;
748
749 case TANAR:
750 reg = regs.tanar;
751 break;
752
753 case TANLPAR:
754 reg = regs.tanlpar;
755 break;
756
757 case TANER:
758 reg = regs.taner;
759 break;
760
761 case TESR:
762 reg = regs.tesr;
763 break;
764
765 case M5REG:
766 reg = 0;
767 if (params()->rx_thread)
768 reg |= M5REG_RX_THREAD;
769 if (params()->tx_thread)
770 reg |= M5REG_TX_THREAD;
771 break;
772
773 default:
774 panic("reading unimplemented register: addr=%#x", daddr);
775 }
776
777 DPRINTF(EthernetPIO, "read from %#x: data=%d data=%#x\n",
778 daddr, reg, reg);
779 }
780 break;
781
782 default:
783 panic("accessing register with invalid size: addr=%#x, size=%d",
784 daddr, req->size);
785 }
786
787 return No_Fault;
788 }
789
790 Fault
791 NSGigE::write(MemReqPtr &req, const uint8_t *data)
792 {
793 assert(ioEnable);
794
795 Addr daddr = req->paddr & 0xfff;
796 DPRINTF(EthernetPIO, "write da=%#x pa=%#x va=%#x size=%d\n",
797 daddr, req->paddr, req->vaddr, req->size);
798
799 if (daddr > LAST && daddr <= RESERVED) {
800 panic("Accessing reserved register");
801 } else if (daddr > RESERVED && daddr <= 0x3FC) {
802 writeConfig(daddr & 0xff, req->size, data);
803 return No_Fault;
804 } else if (daddr > 0x3FC)
805 panic("Something is messed up!\n");
806
807 if (pioDelayWrite) {
808 int cpu = (req->xc->regs.ipr[TheISA::IPR_PALtemp16] >> 8) & 0xff;
809 if (cpu >= writeQueue.size())
810 writeQueue.resize(cpu + 1);
811 writeQueue[cpu].push_back(RegWriteData(daddr, *(uint32_t *)data));
812 }
813
814 if (req->size == sizeof(uint32_t)) {
815 uint32_t reg = *(uint32_t *)data;
816 uint16_t rfaddr;
817
818 DPRINTF(EthernetPIO, "write data=%d data=%#x\n", reg, reg);
819
820 switch (daddr) {
821 case CR:
822 regs.command = reg;
823 if (reg & CR_TXD) {
824 txEnable = false;
825 } else if (reg & CR_TXE) {
826 if (!pioDelayWrite) {
827 txEnable = true;
828
829 // the kernel is enabling the transmit machine
830 if (txState == txIdle)
831 txKick();
832 }
833 }
834
835 if (reg & CR_RXD) {
836 rxEnable = false;
837 } else if (reg & CR_RXE) {
838 if (!pioDelayWrite) {
839 rxEnable = true;
840
841 if (rxState == rxIdle)
842 rxKick();
843 }
844 }
845
846 if (reg & CR_TXR)
847 txReset();
848
849 if (reg & CR_RXR)
850 rxReset();
851
852 if (reg & CR_SWI)
853 devIntrPost(ISR_SWI);
854
855 if (reg & CR_RST) {
856 txReset();
857 rxReset();
858
859 regsReset();
860 }
861 break;
862
863 case CFGR:
864 if (reg & CFGR_LNKSTS ||
865 reg & CFGR_SPDSTS ||
866 reg & CFGR_DUPSTS ||
867 reg & CFGR_RESERVED ||
868 reg & CFGR_T64ADDR ||
869 reg & CFGR_PCI64_DET)
870
871 // First clear all writable bits
872 regs.config &= CFGR_LNKSTS | CFGR_SPDSTS | CFGR_DUPSTS |
873 CFGR_RESERVED | CFGR_T64ADDR |
874 CFGR_PCI64_DET;
875 // Now set the appropriate writable bits
876 regs.config |= reg & ~(CFGR_LNKSTS | CFGR_SPDSTS | CFGR_DUPSTS |
877 CFGR_RESERVED | CFGR_T64ADDR |
878 CFGR_PCI64_DET);
879
880 // all these #if 0's are because i don't THINK the kernel needs to
881 // have these implemented. if there is a problem relating to one of
882 // these, you may need to add functionality in.
883 if (reg & CFGR_TBI_EN) ;
884 if (reg & CFGR_MODE_1000) ;
885
886 if (reg & CFGR_AUTO_1000)
887 panic("CFGR_AUTO_1000 not implemented!\n");
888
889 if (reg & CFGR_PINT_DUPSTS ||
890 reg & CFGR_PINT_LNKSTS ||
891 reg & CFGR_PINT_SPDSTS)
892 ;
893
894 if (reg & CFGR_TMRTEST) ;
895 if (reg & CFGR_MRM_DIS) ;
896 if (reg & CFGR_MWI_DIS) ;
897
898 if (reg & CFGR_T64ADDR) ;
899 // panic("CFGR_T64ADDR is read only register!\n");
900
901 if (reg & CFGR_PCI64_DET)
902 panic("CFGR_PCI64_DET is read only register!\n");
903
904 if (reg & CFGR_DATA64_EN) ;
905 if (reg & CFGR_M64ADDR) ;
906 if (reg & CFGR_PHY_RST) ;
907 if (reg & CFGR_PHY_DIS) ;
908
909 if (reg & CFGR_EXTSTS_EN)
910 extstsEnable = true;
911 else
912 extstsEnable = false;
913
914 if (reg & CFGR_REQALG) ;
915 if (reg & CFGR_SB) ;
916 if (reg & CFGR_POW) ;
917 if (reg & CFGR_EXD) ;
918 if (reg & CFGR_PESEL) ;
919 if (reg & CFGR_BROM_DIS) ;
920 if (reg & CFGR_EXT_125) ;
921 if (reg & CFGR_BEM) ;
922 break;
923
924 case MEAR:
925 // Clear writable bits
926 regs.mear &= MEAR_EEDO;
927 // Set appropriate writable bits
928 regs.mear |= reg & ~MEAR_EEDO;
929
930 // FreeBSD uses the EEPROM to read PMATCH (for the MAC address)
931 // even though it could get it through RFDR
932 if (reg & MEAR_EESEL) {
933 // Rising edge of clock
934 if (reg & MEAR_EECLK && !eepromClk)
935 eepromKick();
936 }
937 else {
938 eepromState = eepromStart;
939 regs.mear &= ~MEAR_EEDI;
940 }
941
942 eepromClk = reg & MEAR_EECLK;
943
944 // since phy is completely faked, MEAR_MD* don't matter
945 if (reg & MEAR_MDIO) ;
946 if (reg & MEAR_MDDIR) ;
947 if (reg & MEAR_MDC) ;
948 break;
949
950 case PTSCR:
951 regs.ptscr = reg & ~(PTSCR_RBIST_RDONLY);
952 // these control BISTs for various parts of chip - we
953 // don't care or do just fake that the BIST is done
954 if (reg & PTSCR_RBIST_EN)
955 regs.ptscr |= PTSCR_RBIST_DONE;
956 if (reg & PTSCR_EEBIST_EN)
957 regs.ptscr &= ~PTSCR_EEBIST_EN;
958 if (reg & PTSCR_EELOAD_EN)
959 regs.ptscr &= ~PTSCR_EELOAD_EN;
960 break;
961
962 case ISR: /* writing to the ISR has no effect */
963 panic("ISR is a read only register!\n");
964
965 case IMR:
966 regs.imr = reg;
967 devIntrChangeMask();
968 break;
969
970 case IER:
971 regs.ier = reg;
972 break;
973
974 case IHR:
975 regs.ihr = reg;
976 /* not going to implement real interrupt holdoff */
977 break;
978
979 case TXDP:
980 regs.txdp = (reg & 0xFFFFFFFC);
981 assert(txState == txIdle);
982 CTDD = false;
983 break;
984
985 case TXDP_HI:
986 regs.txdp_hi = reg;
987 break;
988
989 case TX_CFG:
990 regs.txcfg = reg;
991 #if 0
992 if (reg & TX_CFG_CSI) ;
993 if (reg & TX_CFG_HBI) ;
994 if (reg & TX_CFG_MLB) ;
995 if (reg & TX_CFG_ATP) ;
996 if (reg & TX_CFG_ECRETRY) {
997 /*
998 * this could easily be implemented, but considering
999 * the network is just a fake pipe, wouldn't make
1000 * sense to do this
1001 */
1002 }
1003
1004 if (reg & TX_CFG_BRST_DIS) ;
1005 #endif
1006
1007 #if 0
1008 /* we handle our own DMA, ignore the kernel's exhortations */
1009 if (reg & TX_CFG_MXDMA) ;
1010 #endif
1011
1012 // also, we currently don't care about fill/drain
1013 // thresholds though this may change in the future with
1014 // more realistic networks or a driver which changes it
1015 // according to feedback
1016
1017 break;
1018
1019 case GPIOR:
1020 // Only write writable bits
1021 regs.gpior &= GPIOR_UNUSED | GPIOR_GP5_IN | GPIOR_GP4_IN
1022 | GPIOR_GP3_IN | GPIOR_GP2_IN | GPIOR_GP1_IN;
1023 regs.gpior |= reg & ~(GPIOR_UNUSED | GPIOR_GP5_IN | GPIOR_GP4_IN
1024 | GPIOR_GP3_IN | GPIOR_GP2_IN | GPIOR_GP1_IN);
1025 /* these just control general purpose i/o pins, don't matter */
1026 break;
1027
1028 case RXDP:
1029 regs.rxdp = reg;
1030 CRDD = false;
1031 break;
1032
1033 case RXDP_HI:
1034 regs.rxdp_hi = reg;
1035 break;
1036
1037 case RX_CFG:
1038 regs.rxcfg = reg;
1039 #if 0
1040 if (reg & RX_CFG_AEP) ;
1041 if (reg & RX_CFG_ARP) ;
1042 if (reg & RX_CFG_STRIPCRC) ;
1043 if (reg & RX_CFG_RX_RD) ;
1044 if (reg & RX_CFG_ALP) ;
1045 if (reg & RX_CFG_AIRL) ;
1046
1047 /* we handle our own DMA, ignore what kernel says about it */
1048 if (reg & RX_CFG_MXDMA) ;
1049
1050 //also, we currently don't care about fill/drain thresholds
1051 //though this may change in the future with more realistic
1052 //networks or a driver which changes it according to feedback
1053 if (reg & (RX_CFG_DRTH | RX_CFG_DRTH0)) ;
1054 #endif
1055 break;
1056
1057 case PQCR:
1058 /* there is no priority queueing used in the linux 2.6 driver */
1059 regs.pqcr = reg;
1060 break;
1061
1062 case WCSR:
1063 /* not going to implement wake on LAN */
1064 regs.wcsr = reg;
1065 break;
1066
1067 case PCR:
1068 /* not going to implement pause control */
1069 regs.pcr = reg;
1070 break;
1071
1072 case RFCR:
1073 regs.rfcr = reg;
1074
1075 rxFilterEnable = (reg & RFCR_RFEN) ? true : false;
1076 acceptBroadcast = (reg & RFCR_AAB) ? true : false;
1077 acceptMulticast = (reg & RFCR_AAM) ? true : false;
1078 acceptUnicast = (reg & RFCR_AAU) ? true : false;
1079 acceptPerfect = (reg & RFCR_APM) ? true : false;
1080 acceptArp = (reg & RFCR_AARP) ? true : false;
1081 multicastHashEnable = (reg & RFCR_MHEN) ? true : false;
1082
1083 #if 0
1084 if (reg & RFCR_APAT)
1085 panic("RFCR_APAT not implemented!\n");
1086 #endif
1087 if (reg & RFCR_UHEN)
1088 panic("Unicast hash filtering not used by drivers!\n");
1089
1090 if (reg & RFCR_ULM)
1091 panic("RFCR_ULM not implemented!\n");
1092
1093 break;
1094
1095 case RFDR:
1096 rfaddr = (uint16_t)(regs.rfcr & RFCR_RFADDR);
1097 switch (rfaddr) {
1098 case 0x000:
1099 rom.perfectMatch[0] = (uint8_t)reg;
1100 rom.perfectMatch[1] = (uint8_t)(reg >> 8);
1101 break;
1102 case 0x002:
1103 rom.perfectMatch[2] = (uint8_t)reg;
1104 rom.perfectMatch[3] = (uint8_t)(reg >> 8);
1105 break;
1106 case 0x004:
1107 rom.perfectMatch[4] = (uint8_t)reg;
1108 rom.perfectMatch[5] = (uint8_t)(reg >> 8);
1109 break;
1110 default:
1111
1112 if (rfaddr >= FHASH_ADDR &&
1113 rfaddr < FHASH_ADDR + FHASH_SIZE) {
1114
1115 // Only word-aligned writes supported
1116 if (rfaddr % 2)
1117 panic("unaligned write to filter hash table!");
1118
1119 rom.filterHash[rfaddr - FHASH_ADDR] = (uint8_t)reg;
1120 rom.filterHash[rfaddr - FHASH_ADDR + 1]
1121 = (uint8_t)(reg >> 8);
1122 break;
1123 }
1124 panic("writing RFDR for something other than pattern matching\
1125 or hashing! %#x\n", rfaddr);
1126 }
1127
1128 case BRAR:
1129 regs.brar = reg;
1130 break;
1131
1132 case BRDR:
1133 panic("the driver never uses BRDR, something is wrong!\n");
1134
1135 case SRR:
1136 panic("SRR is read only register!\n");
1137
1138 case MIBC:
1139 panic("the driver never uses MIBC, something is wrong!\n");
1140
1141 case VRCR:
1142 regs.vrcr = reg;
1143 break;
1144
1145 case VTCR:
1146 regs.vtcr = reg;
1147 break;
1148
1149 case VDR:
1150 panic("the driver never uses VDR, something is wrong!\n");
1151
1152 case CCSR:
1153 /* not going to implement clockrun stuff */
1154 regs.ccsr = reg;
1155 break;
1156
1157 case TBICR:
1158 regs.tbicr = reg;
1159 if (reg & TBICR_MR_LOOPBACK)
1160 panic("TBICR_MR_LOOPBACK never used, something wrong!\n");
1161
1162 if (reg & TBICR_MR_AN_ENABLE) {
1163 regs.tanlpar = regs.tanar;
1164 regs.tbisr |= (TBISR_MR_AN_COMPLETE | TBISR_MR_LINK_STATUS);
1165 }
1166
1167 #if 0
1168 if (reg & TBICR_MR_RESTART_AN) ;
1169 #endif
1170
1171 break;
1172
1173 case TBISR:
1174 panic("TBISR is read only register!\n");
1175
1176 case TANAR:
1177 // Only write the writable bits
1178 regs.tanar &= TANAR_RF1 | TANAR_RF2 | TANAR_UNUSED;
1179 regs.tanar |= reg & ~(TANAR_RF1 | TANAR_RF2 | TANAR_UNUSED);
1180
1181 // Pause capability unimplemented
1182 #if 0
1183 if (reg & TANAR_PS2) ;
1184 if (reg & TANAR_PS1) ;
1185 #endif
1186
1187 break;
1188
1189 case TANLPAR:
1190 panic("this should only be written to by the fake phy!\n");
1191
1192 case TANER:
1193 panic("TANER is read only register!\n");
1194
1195 case TESR:
1196 regs.tesr = reg;
1197 break;
1198
1199 default:
1200 panic("invalid register access daddr=%#x", daddr);
1201 }
1202 } else {
1203 panic("Invalid Request Size");
1204 }
1205
1206 return No_Fault;
1207 }
1208
1209 void
1210 NSGigE::devIntrPost(uint32_t interrupts)
1211 {
1212 if (interrupts & ISR_RESERVE)
1213 panic("Cannot set a reserved interrupt");
1214
1215 if (interrupts & ISR_NOIMPL)
1216 warn("interrupt not implemented %#x\n", interrupts);
1217
1218 interrupts &= ISR_IMPL;
1219 regs.isr |= interrupts;
1220
1221 if (interrupts & regs.imr) {
1222 if (interrupts & ISR_SWI) {
1223 totalSwi++;
1224 }
1225 if (interrupts & ISR_RXIDLE) {
1226 totalRxIdle++;
1227 }
1228 if (interrupts & ISR_RXOK) {
1229 totalRxOk++;
1230 }
1231 if (interrupts & ISR_RXDESC) {
1232 totalRxDesc++;
1233 }
1234 if (interrupts & ISR_TXOK) {
1235 totalTxOk++;
1236 }
1237 if (interrupts & ISR_TXIDLE) {
1238 totalTxIdle++;
1239 }
1240 if (interrupts & ISR_TXDESC) {
1241 totalTxDesc++;
1242 }
1243 if (interrupts & ISR_RXORN) {
1244 totalRxOrn++;
1245 }
1246 }
1247
1248 DPRINTF(EthernetIntr,
1249 "interrupt written to ISR: intr=%#x isr=%#x imr=%#x\n",
1250 interrupts, regs.isr, regs.imr);
1251
1252 if ((regs.isr & regs.imr)) {
1253 Tick when = curTick;
1254 if ((regs.isr & regs.imr & ISR_NODELAY) == 0)
1255 when += intrDelay;
1256 cpuIntrPost(when);
1257 }
1258 }
1259
1260 /* writing this interrupt counting stats inside this means that this function
1261 is now limited to being used to clear all interrupts upon the kernel
1262 reading isr and servicing. just telling you in case you were thinking
1263 of expanding use.
1264 */
1265 void
1266 NSGigE::devIntrClear(uint32_t interrupts)
1267 {
1268 if (interrupts & ISR_RESERVE)
1269 panic("Cannot clear a reserved interrupt");
1270
1271 if (regs.isr & regs.imr & ISR_SWI) {
1272 postedSwi++;
1273 }
1274 if (regs.isr & regs.imr & ISR_RXIDLE) {
1275 postedRxIdle++;
1276 }
1277 if (regs.isr & regs.imr & ISR_RXOK) {
1278 postedRxOk++;
1279 }
1280 if (regs.isr & regs.imr & ISR_RXDESC) {
1281 postedRxDesc++;
1282 }
1283 if (regs.isr & regs.imr & ISR_TXOK) {
1284 postedTxOk++;
1285 }
1286 if (regs.isr & regs.imr & ISR_TXIDLE) {
1287 postedTxIdle++;
1288 }
1289 if (regs.isr & regs.imr & ISR_TXDESC) {
1290 postedTxDesc++;
1291 }
1292 if (regs.isr & regs.imr & ISR_RXORN) {
1293 postedRxOrn++;
1294 }
1295
1296 if (regs.isr & regs.imr & ISR_IMPL)
1297 postedInterrupts++;
1298
1299 interrupts &= ~ISR_NOIMPL;
1300 regs.isr &= ~interrupts;
1301
1302 DPRINTF(EthernetIntr,
1303 "interrupt cleared from ISR: intr=%x isr=%x imr=%x\n",
1304 interrupts, regs.isr, regs.imr);
1305
1306 if (!(regs.isr & regs.imr))
1307 cpuIntrClear();
1308 }
1309
1310 void
1311 NSGigE::devIntrChangeMask()
1312 {
1313 DPRINTF(EthernetIntr, "interrupt mask changed: isr=%x imr=%x masked=%x\n",
1314 regs.isr, regs.imr, regs.isr & regs.imr);
1315
1316 if (regs.isr & regs.imr)
1317 cpuIntrPost(curTick);
1318 else
1319 cpuIntrClear();
1320 }
1321
1322 void
1323 NSGigE::cpuIntrPost(Tick when)
1324 {
1325 // If the interrupt you want to post is later than an interrupt
1326 // already scheduled, just let it post in the coming one and don't
1327 // schedule another.
1328 // HOWEVER, must be sure that the scheduled intrTick is in the
1329 // future (this was formerly the source of a bug)
1330 /**
1331 * @todo this warning should be removed and the intrTick code should
1332 * be fixed.
1333 */
1334 assert(when >= curTick);
1335 assert(intrTick >= curTick || intrTick == 0);
1336 if (when > intrTick && intrTick != 0) {
1337 DPRINTF(EthernetIntr, "don't need to schedule event...intrTick=%d\n",
1338 intrTick);
1339 return;
1340 }
1341
1342 intrTick = when;
1343 if (intrTick < curTick) {
1344 debug_break();
1345 intrTick = curTick;
1346 }
1347
1348 DPRINTF(EthernetIntr, "going to schedule an interrupt for intrTick=%d\n",
1349 intrTick);
1350
1351 if (intrEvent)
1352 intrEvent->squash();
1353 intrEvent = new IntrEvent(this, true);
1354 intrEvent->schedule(intrTick);
1355 }
1356
1357 void
1358 NSGigE::cpuInterrupt()
1359 {
1360 assert(intrTick == curTick);
1361
1362 // Whether or not there's a pending interrupt, we don't care about
1363 // it anymore
1364 intrEvent = 0;
1365 intrTick = 0;
1366
1367 // Don't send an interrupt if there's already one
1368 if (cpuPendingIntr) {
1369 DPRINTF(EthernetIntr,
1370 "would send an interrupt now, but there's already pending\n");
1371 } else {
1372 // Send interrupt
1373 cpuPendingIntr = true;
1374
1375 DPRINTF(EthernetIntr, "posting interrupt\n");
1376 intrPost();
1377 }
1378 }
1379
1380 void
1381 NSGigE::cpuIntrClear()
1382 {
1383 if (!cpuPendingIntr)
1384 return;
1385
1386 if (intrEvent) {
1387 intrEvent->squash();
1388 intrEvent = 0;
1389 }
1390
1391 intrTick = 0;
1392
1393 cpuPendingIntr = false;
1394
1395 DPRINTF(EthernetIntr, "clearing interrupt\n");
1396 intrClear();
1397 }
1398
1399 bool
1400 NSGigE::cpuIntrPending() const
1401 { return cpuPendingIntr; }
1402
1403 void
1404 NSGigE::txReset()
1405 {
1406
1407 DPRINTF(Ethernet, "transmit reset\n");
1408
1409 CTDD = false;
1410 txEnable = false;;
1411 txFragPtr = 0;
1412 assert(txDescCnt == 0);
1413 txFifo.clear();
1414 txState = txIdle;
1415 assert(txDmaState == dmaIdle);
1416 }
1417
1418 void
1419 NSGigE::rxReset()
1420 {
1421 DPRINTF(Ethernet, "receive reset\n");
1422
1423 CRDD = false;
1424 assert(rxPktBytes == 0);
1425 rxEnable = false;
1426 rxFragPtr = 0;
1427 assert(rxDescCnt == 0);
1428 assert(rxDmaState == dmaIdle);
1429 rxFifo.clear();
1430 rxState = rxIdle;
1431 }
1432
1433 void
1434 NSGigE::regsReset()
1435 {
1436 memset(&regs, 0, sizeof(regs));
1437 regs.config = (CFGR_LNKSTS | CFGR_TBI_EN | CFGR_MODE_1000);
1438 regs.mear = 0x12;
1439 regs.txcfg = 0x120; // set drain threshold to 1024 bytes and
1440 // fill threshold to 32 bytes
1441 regs.rxcfg = 0x4; // set drain threshold to 16 bytes
1442 regs.srr = 0x0103; // set the silicon revision to rev B or 0x103
1443 regs.mibc = MIBC_FRZ;
1444 regs.vdr = 0x81; // set the vlan tag type to 802.1q
1445 regs.tesr = 0xc000; // TBI capable of both full and half duplex
1446 regs.brar = 0xffffffff;
1447
1448 extstsEnable = false;
1449 acceptBroadcast = false;
1450 acceptMulticast = false;
1451 acceptUnicast = false;
1452 acceptPerfect = false;
1453 acceptArp = false;
1454 }
1455
1456 void
1457 NSGigE::rxDmaReadCopy()
1458 {
1459 assert(rxDmaState == dmaReading);
1460
1461 physmem->dma_read((uint8_t *)rxDmaData, rxDmaAddr, rxDmaLen);
1462 rxDmaState = dmaIdle;
1463
1464 DPRINTF(EthernetDMA, "rx dma read paddr=%#x len=%d\n",
1465 rxDmaAddr, rxDmaLen);
1466 DDUMP(EthernetDMA, rxDmaData, rxDmaLen);
1467 }
1468
1469 bool
1470 NSGigE::doRxDmaRead()
1471 {
1472 assert(rxDmaState == dmaIdle || rxDmaState == dmaReadWaiting);
1473 rxDmaState = dmaReading;
1474
1475 if (dmaInterface && !rxDmaFree) {
1476 if (dmaInterface->busy())
1477 rxDmaState = dmaReadWaiting;
1478 else
1479 dmaInterface->doDMA(Read, rxDmaAddr, rxDmaLen, curTick,
1480 &rxDmaReadEvent, true);
1481 return true;
1482 }
1483
1484 if (dmaReadDelay == 0 && dmaReadFactor == 0) {
1485 rxDmaReadCopy();
1486 return false;
1487 }
1488
1489 Tick factor = ((rxDmaLen + ULL(63)) >> ULL(6)) * dmaReadFactor;
1490 Tick start = curTick + dmaReadDelay + factor;
1491 rxDmaReadEvent.schedule(start);
1492 return true;
1493 }
1494
1495 void
1496 NSGigE::rxDmaReadDone()
1497 {
1498 assert(rxDmaState == dmaReading);
1499 rxDmaReadCopy();
1500
1501 // If the transmit state machine has a pending DMA, let it go first
1502 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting)
1503 txKick();
1504
1505 rxKick();
1506 }
1507
1508 void
1509 NSGigE::rxDmaWriteCopy()
1510 {
1511 assert(rxDmaState == dmaWriting);
1512
1513 physmem->dma_write(rxDmaAddr, (uint8_t *)rxDmaData, rxDmaLen);
1514 rxDmaState = dmaIdle;
1515
1516 DPRINTF(EthernetDMA, "rx dma write paddr=%#x len=%d\n",
1517 rxDmaAddr, rxDmaLen);
1518 DDUMP(EthernetDMA, rxDmaData, rxDmaLen);
1519 }
1520
1521 bool
1522 NSGigE::doRxDmaWrite()
1523 {
1524 assert(rxDmaState == dmaIdle || rxDmaState == dmaWriteWaiting);
1525 rxDmaState = dmaWriting;
1526
1527 if (dmaInterface && !rxDmaFree) {
1528 if (dmaInterface->busy())
1529 rxDmaState = dmaWriteWaiting;
1530 else
1531 dmaInterface->doDMA(WriteInvalidate, rxDmaAddr, rxDmaLen, curTick,
1532 &rxDmaWriteEvent, true);
1533 return true;
1534 }
1535
1536 if (dmaWriteDelay == 0 && dmaWriteFactor == 0) {
1537 rxDmaWriteCopy();
1538 return false;
1539 }
1540
1541 Tick factor = ((rxDmaLen + ULL(63)) >> ULL(6)) * dmaWriteFactor;
1542 Tick start = curTick + dmaWriteDelay + factor;
1543 rxDmaWriteEvent.schedule(start);
1544 return true;
1545 }
1546
1547 void
1548 NSGigE::rxDmaWriteDone()
1549 {
1550 assert(rxDmaState == dmaWriting);
1551 rxDmaWriteCopy();
1552
1553 // If the transmit state machine has a pending DMA, let it go first
1554 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting)
1555 txKick();
1556
1557 rxKick();
1558 }
1559
1560 void
1561 NSGigE::rxKick()
1562 {
1563 bool is64bit = (bool)(regs.config & CFGR_M64ADDR);
1564
1565 DPRINTF(EthernetSM,
1566 "receive kick rxState=%s (rxBuf.size=%d) %d-bit\n",
1567 NsRxStateStrings[rxState], rxFifo.size(), is64bit ? 64 : 32);
1568
1569 Addr link, bufptr;
1570 uint32_t &cmdsts = is64bit ? rxDesc64.cmdsts : rxDesc32.cmdsts;
1571 uint32_t &extsts = is64bit ? rxDesc64.extsts : rxDesc32.extsts;
1572
1573 next:
1574 if (clock) {
1575 if (rxKickTick > curTick) {
1576 DPRINTF(EthernetSM, "receive kick exiting, can't run till %d\n",
1577 rxKickTick);
1578
1579 goto exit;
1580 }
1581
1582 // Go to the next state machine clock tick.
1583 rxKickTick = curTick + cycles(1);
1584 }
1585
1586 switch(rxDmaState) {
1587 case dmaReadWaiting:
1588 if (doRxDmaRead())
1589 goto exit;
1590 break;
1591 case dmaWriteWaiting:
1592 if (doRxDmaWrite())
1593 goto exit;
1594 break;
1595 default:
1596 break;
1597 }
1598
1599 link = is64bit ? (Addr)rxDesc64.link : (Addr)rxDesc32.link;
1600 bufptr = is64bit ? (Addr)rxDesc64.bufptr : (Addr)rxDesc32.bufptr;
1601
1602 // see state machine from spec for details
1603 // the way this works is, if you finish work on one state and can
1604 // go directly to another, you do that through jumping to the
1605 // label "next". however, if you have intermediate work, like DMA
1606 // so that you can't go to the next state yet, you go to exit and
1607 // exit the loop. however, when the DMA is done it will trigger
1608 // an event and come back to this loop.
1609 switch (rxState) {
1610 case rxIdle:
1611 if (!rxEnable) {
1612 DPRINTF(EthernetSM, "Receive Disabled! Nothing to do.\n");
1613 goto exit;
1614 }
1615
1616 if (CRDD) {
1617 rxState = rxDescRefr;
1618
1619 rxDmaAddr = regs.rxdp & 0x3fffffff;
1620 rxDmaData =
1621 is64bit ? (void *)&rxDesc64.link : (void *)&rxDesc32.link;
1622 rxDmaLen = is64bit ? sizeof(rxDesc64.link) : sizeof(rxDesc32.link);
1623 rxDmaFree = dmaDescFree;
1624
1625 descDmaReads++;
1626 descDmaRdBytes += rxDmaLen;
1627
1628 if (doRxDmaRead())
1629 goto exit;
1630 } else {
1631 rxState = rxDescRead;
1632
1633 rxDmaAddr = regs.rxdp & 0x3fffffff;
1634 rxDmaData = is64bit ? (void *)&rxDesc64 : (void *)&rxDesc32;
1635 rxDmaLen = is64bit ? sizeof(rxDesc64) : sizeof(rxDesc32);
1636 rxDmaFree = dmaDescFree;
1637
1638 descDmaReads++;
1639 descDmaRdBytes += rxDmaLen;
1640
1641 if (doRxDmaRead())
1642 goto exit;
1643 }
1644 break;
1645
1646 case rxDescRefr:
1647 if (rxDmaState != dmaIdle)
1648 goto exit;
1649
1650 rxState = rxAdvance;
1651 break;
1652
1653 case rxDescRead:
1654 if (rxDmaState != dmaIdle)
1655 goto exit;
1656
1657 DPRINTF(EthernetDesc, "rxDesc: addr=%08x read descriptor\n",
1658 regs.rxdp & 0x3fffffff);
1659 DPRINTF(EthernetDesc,
1660 "rxDesc: link=%#x bufptr=%#x cmdsts=%08x extsts=%08x\n",
1661 link, bufptr, cmdsts, extsts);
1662
1663 if (cmdsts & CMDSTS_OWN) {
1664 devIntrPost(ISR_RXIDLE);
1665 rxState = rxIdle;
1666 goto exit;
1667 } else {
1668 rxState = rxFifoBlock;
1669 rxFragPtr = bufptr;
1670 rxDescCnt = cmdsts & CMDSTS_LEN_MASK;
1671 }
1672 break;
1673
1674 case rxFifoBlock:
1675 if (!rxPacket) {
1676 /**
1677 * @todo in reality, we should be able to start processing
1678 * the packet as it arrives, and not have to wait for the
1679 * full packet ot be in the receive fifo.
1680 */
1681 if (rxFifo.empty())
1682 goto exit;
1683
1684 DPRINTF(EthernetSM, "****processing receive of new packet****\n");
1685
1686 // If we don't have a packet, grab a new one from the fifo.
1687 rxPacket = rxFifo.front();
1688 rxPktBytes = rxPacket->length;
1689 rxPacketBufPtr = rxPacket->data;
1690
1691 #if TRACING_ON
1692 if (DTRACE(Ethernet)) {
1693 IpPtr ip(rxPacket);
1694 if (ip) {
1695 DPRINTF(Ethernet, "ID is %d\n", ip->id());
1696 TcpPtr tcp(ip);
1697 if (tcp) {
1698 DPRINTF(Ethernet,
1699 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n",
1700 tcp->sport(), tcp->dport(), tcp->seq(),
1701 tcp->ack());
1702 }
1703 }
1704 }
1705 #endif
1706
1707 // sanity check - i think the driver behaves like this
1708 assert(rxDescCnt >= rxPktBytes);
1709 rxFifo.pop();
1710 }
1711
1712
1713 // dont' need the && rxDescCnt > 0 if driver sanity check
1714 // above holds
1715 if (rxPktBytes > 0) {
1716 rxState = rxFragWrite;
1717 // don't need min<>(rxPktBytes,rxDescCnt) if above sanity
1718 // check holds
1719 rxXferLen = rxPktBytes;
1720
1721 rxDmaAddr = rxFragPtr & 0x3fffffff;
1722 rxDmaData = rxPacketBufPtr;
1723 rxDmaLen = rxXferLen;
1724 rxDmaFree = dmaDataFree;
1725
1726 if (doRxDmaWrite())
1727 goto exit;
1728
1729 } else {
1730 rxState = rxDescWrite;
1731
1732 //if (rxPktBytes == 0) { /* packet is done */
1733 assert(rxPktBytes == 0);
1734 DPRINTF(EthernetSM, "done with receiving packet\n");
1735
1736 cmdsts |= CMDSTS_OWN;
1737 cmdsts &= ~CMDSTS_MORE;
1738 cmdsts |= CMDSTS_OK;
1739 cmdsts &= 0xffff0000;
1740 cmdsts += rxPacket->length; //i.e. set CMDSTS_SIZE
1741
1742 #if 0
1743 /*
1744 * all the driver uses these are for its own stats keeping
1745 * which we don't care about, aren't necessary for
1746 * functionality and doing this would just slow us down.
1747 * if they end up using this in a later version for
1748 * functional purposes, just undef
1749 */
1750 if (rxFilterEnable) {
1751 cmdsts &= ~CMDSTS_DEST_MASK;
1752 const EthAddr &dst = rxFifoFront()->dst();
1753 if (dst->unicast())
1754 cmdsts |= CMDSTS_DEST_SELF;
1755 if (dst->multicast())
1756 cmdsts |= CMDSTS_DEST_MULTI;
1757 if (dst->broadcast())
1758 cmdsts |= CMDSTS_DEST_MASK;
1759 }
1760 #endif
1761
1762 IpPtr ip(rxPacket);
1763 if (extstsEnable && ip) {
1764 extsts |= EXTSTS_IPPKT;
1765 rxIpChecksums++;
1766 if (cksum(ip) != 0) {
1767 DPRINTF(EthernetCksum, "Rx IP Checksum Error\n");
1768 extsts |= EXTSTS_IPERR;
1769 }
1770 TcpPtr tcp(ip);
1771 UdpPtr udp(ip);
1772 if (tcp) {
1773 extsts |= EXTSTS_TCPPKT;
1774 rxTcpChecksums++;
1775 if (cksum(tcp) != 0) {
1776 DPRINTF(EthernetCksum, "Rx TCP Checksum Error\n");
1777 extsts |= EXTSTS_TCPERR;
1778
1779 }
1780 } else if (udp) {
1781 extsts |= EXTSTS_UDPPKT;
1782 rxUdpChecksums++;
1783 if (cksum(udp) != 0) {
1784 DPRINTF(EthernetCksum, "Rx UDP Checksum Error\n");
1785 extsts |= EXTSTS_UDPERR;
1786 }
1787 }
1788 }
1789 rxPacket = 0;
1790
1791 /*
1792 * the driver seems to always receive into desc buffers
1793 * of size 1514, so you never have a pkt that is split
1794 * into multiple descriptors on the receive side, so
1795 * i don't implement that case, hence the assert above.
1796 */
1797
1798 DPRINTF(EthernetDesc,
1799 "rxDesc: addr=%08x writeback cmdsts extsts\n",
1800 regs.rxdp & 0x3fffffff);
1801 DPRINTF(EthernetDesc,
1802 "rxDesc: link=%#x bufptr=%#x cmdsts=%08x extsts=%08x\n",
1803 link, bufptr, cmdsts, extsts);
1804
1805 rxDmaAddr = regs.rxdp & 0x3fffffff;
1806 rxDmaData = &cmdsts;
1807 if (is64bit) {
1808 rxDmaAddr += offsetof(ns_desc64, cmdsts);
1809 rxDmaLen = sizeof(rxDesc64.cmdsts) + sizeof(rxDesc64.extsts);
1810 } else {
1811 rxDmaAddr += offsetof(ns_desc32, cmdsts);
1812 rxDmaLen = sizeof(rxDesc32.cmdsts) + sizeof(rxDesc32.extsts);
1813 }
1814 rxDmaFree = dmaDescFree;
1815
1816 descDmaWrites++;
1817 descDmaWrBytes += rxDmaLen;
1818
1819 if (doRxDmaWrite())
1820 goto exit;
1821 }
1822 break;
1823
1824 case rxFragWrite:
1825 if (rxDmaState != dmaIdle)
1826 goto exit;
1827
1828 rxPacketBufPtr += rxXferLen;
1829 rxFragPtr += rxXferLen;
1830 rxPktBytes -= rxXferLen;
1831
1832 rxState = rxFifoBlock;
1833 break;
1834
1835 case rxDescWrite:
1836 if (rxDmaState != dmaIdle)
1837 goto exit;
1838
1839 assert(cmdsts & CMDSTS_OWN);
1840
1841 assert(rxPacket == 0);
1842 devIntrPost(ISR_RXOK);
1843
1844 if (cmdsts & CMDSTS_INTR)
1845 devIntrPost(ISR_RXDESC);
1846
1847 if (!rxEnable) {
1848 DPRINTF(EthernetSM, "Halting the RX state machine\n");
1849 rxState = rxIdle;
1850 goto exit;
1851 } else
1852 rxState = rxAdvance;
1853 break;
1854
1855 case rxAdvance:
1856 if (link == 0) {
1857 devIntrPost(ISR_RXIDLE);
1858 rxState = rxIdle;
1859 CRDD = true;
1860 goto exit;
1861 } else {
1862 if (rxDmaState != dmaIdle)
1863 goto exit;
1864 rxState = rxDescRead;
1865 regs.rxdp = link;
1866 CRDD = false;
1867
1868 rxDmaAddr = regs.rxdp & 0x3fffffff;
1869 rxDmaData = is64bit ? (void *)&rxDesc64 : (void *)&rxDesc32;
1870 rxDmaLen = is64bit ? sizeof(rxDesc64) : sizeof(rxDesc32);
1871 rxDmaFree = dmaDescFree;
1872
1873 if (doRxDmaRead())
1874 goto exit;
1875 }
1876 break;
1877
1878 default:
1879 panic("Invalid rxState!");
1880 }
1881
1882 DPRINTF(EthernetSM, "entering next rxState=%s\n",
1883 NsRxStateStrings[rxState]);
1884 goto next;
1885
1886 exit:
1887 /**
1888 * @todo do we want to schedule a future kick?
1889 */
1890 DPRINTF(EthernetSM, "rx state machine exited rxState=%s\n",
1891 NsRxStateStrings[rxState]);
1892
1893 if (clock && !rxKickEvent.scheduled())
1894 rxKickEvent.schedule(rxKickTick);
1895 }
1896
1897 void
1898 NSGigE::transmit()
1899 {
1900 if (txFifo.empty()) {
1901 DPRINTF(Ethernet, "nothing to transmit\n");
1902 return;
1903 }
1904
1905 DPRINTF(Ethernet, "Attempt Pkt Transmit: txFifo length=%d\n",
1906 txFifo.size());
1907 if (interface->sendPacket(txFifo.front())) {
1908 #if TRACING_ON
1909 if (DTRACE(Ethernet)) {
1910 IpPtr ip(txFifo.front());
1911 if (ip) {
1912 DPRINTF(Ethernet, "ID is %d\n", ip->id());
1913 TcpPtr tcp(ip);
1914 if (tcp) {
1915 DPRINTF(Ethernet,
1916 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n",
1917 tcp->sport(), tcp->dport(), tcp->seq(),
1918 tcp->ack());
1919 }
1920 }
1921 }
1922 #endif
1923
1924 DDUMP(EthernetData, txFifo.front()->data, txFifo.front()->length);
1925 txBytes += txFifo.front()->length;
1926 txPackets++;
1927
1928 DPRINTF(Ethernet, "Successful Xmit! now txFifoAvail is %d\n",
1929 txFifo.avail());
1930 txFifo.pop();
1931
1932 /*
1933 * normally do a writeback of the descriptor here, and ONLY
1934 * after that is done, send this interrupt. but since our
1935 * stuff never actually fails, just do this interrupt here,
1936 * otherwise the code has to stray from this nice format.
1937 * besides, it's functionally the same.
1938 */
1939 devIntrPost(ISR_TXOK);
1940 }
1941
1942 if (!txFifo.empty() && !txEvent.scheduled()) {
1943 DPRINTF(Ethernet, "reschedule transmit\n");
1944 txEvent.schedule(curTick + retryTime);
1945 }
1946 }
1947
1948 void
1949 NSGigE::txDmaReadCopy()
1950 {
1951 assert(txDmaState == dmaReading);
1952
1953 physmem->dma_read((uint8_t *)txDmaData, txDmaAddr, txDmaLen);
1954 txDmaState = dmaIdle;
1955
1956 DPRINTF(EthernetDMA, "tx dma read paddr=%#x len=%d\n",
1957 txDmaAddr, txDmaLen);
1958 DDUMP(EthernetDMA, txDmaData, txDmaLen);
1959 }
1960
1961 bool
1962 NSGigE::doTxDmaRead()
1963 {
1964 assert(txDmaState == dmaIdle || txDmaState == dmaReadWaiting);
1965 txDmaState = dmaReading;
1966
1967 if (dmaInterface && !txDmaFree) {
1968 if (dmaInterface->busy())
1969 txDmaState = dmaReadWaiting;
1970 else
1971 dmaInterface->doDMA(Read, txDmaAddr, txDmaLen, curTick,
1972 &txDmaReadEvent, true);
1973 return true;
1974 }
1975
1976 if (dmaReadDelay == 0 && dmaReadFactor == 0.0) {
1977 txDmaReadCopy();
1978 return false;
1979 }
1980
1981 Tick factor = ((txDmaLen + ULL(63)) >> ULL(6)) * dmaReadFactor;
1982 Tick start = curTick + dmaReadDelay + factor;
1983 txDmaReadEvent.schedule(start);
1984 return true;
1985 }
1986
1987 void
1988 NSGigE::txDmaReadDone()
1989 {
1990 assert(txDmaState == dmaReading);
1991 txDmaReadCopy();
1992
1993 // If the receive state machine has a pending DMA, let it go first
1994 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting)
1995 rxKick();
1996
1997 txKick();
1998 }
1999
2000 void
2001 NSGigE::txDmaWriteCopy()
2002 {
2003 assert(txDmaState == dmaWriting);
2004
2005 physmem->dma_write(txDmaAddr, (uint8_t *)txDmaData, txDmaLen);
2006 txDmaState = dmaIdle;
2007
2008 DPRINTF(EthernetDMA, "tx dma write paddr=%#x len=%d\n",
2009 txDmaAddr, txDmaLen);
2010 DDUMP(EthernetDMA, txDmaData, txDmaLen);
2011 }
2012
2013 bool
2014 NSGigE::doTxDmaWrite()
2015 {
2016 assert(txDmaState == dmaIdle || txDmaState == dmaWriteWaiting);
2017 txDmaState = dmaWriting;
2018
2019 if (dmaInterface && !txDmaFree) {
2020 if (dmaInterface->busy())
2021 txDmaState = dmaWriteWaiting;
2022 else
2023 dmaInterface->doDMA(WriteInvalidate, txDmaAddr, txDmaLen, curTick,
2024 &txDmaWriteEvent, true);
2025 return true;
2026 }
2027
2028 if (dmaWriteDelay == 0 && dmaWriteFactor == 0.0) {
2029 txDmaWriteCopy();
2030 return false;
2031 }
2032
2033 Tick factor = ((txDmaLen + ULL(63)) >> ULL(6)) * dmaWriteFactor;
2034 Tick start = curTick + dmaWriteDelay + factor;
2035 txDmaWriteEvent.schedule(start);
2036 return true;
2037 }
2038
2039 void
2040 NSGigE::txDmaWriteDone()
2041 {
2042 assert(txDmaState == dmaWriting);
2043 txDmaWriteCopy();
2044
2045 // If the receive state machine has a pending DMA, let it go first
2046 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting)
2047 rxKick();
2048
2049 txKick();
2050 }
2051
2052 void
2053 NSGigE::txKick()
2054 {
2055 bool is64bit = (bool)(regs.config & CFGR_M64ADDR);
2056
2057 DPRINTF(EthernetSM, "transmit kick txState=%s %d-bit\n",
2058 NsTxStateStrings[txState], is64bit ? 64 : 32);
2059
2060 Addr link, bufptr;
2061 uint32_t &cmdsts = is64bit ? txDesc64.cmdsts : txDesc32.cmdsts;
2062 uint32_t &extsts = is64bit ? txDesc64.extsts : txDesc32.extsts;
2063
2064 next:
2065 if (clock) {
2066 if (txKickTick > curTick) {
2067 DPRINTF(EthernetSM, "transmit kick exiting, can't run till %d\n",
2068 txKickTick);
2069 goto exit;
2070 }
2071
2072 // Go to the next state machine clock tick.
2073 txKickTick = curTick + cycles(1);
2074 }
2075
2076 switch(txDmaState) {
2077 case dmaReadWaiting:
2078 if (doTxDmaRead())
2079 goto exit;
2080 break;
2081 case dmaWriteWaiting:
2082 if (doTxDmaWrite())
2083 goto exit;
2084 break;
2085 default:
2086 break;
2087 }
2088
2089 link = is64bit ? (Addr)txDesc64.link : (Addr)txDesc32.link;
2090 bufptr = is64bit ? (Addr)txDesc64.bufptr : (Addr)txDesc32.bufptr;
2091 switch (txState) {
2092 case txIdle:
2093 if (!txEnable) {
2094 DPRINTF(EthernetSM, "Transmit disabled. Nothing to do.\n");
2095 goto exit;
2096 }
2097
2098 if (CTDD) {
2099 txState = txDescRefr;
2100
2101 txDmaAddr = regs.txdp & 0x3fffffff;
2102 txDmaData =
2103 is64bit ? (void *)&txDesc64.link : (void *)&txDesc32.link;
2104 txDmaLen = is64bit ? sizeof(txDesc64.link) : sizeof(txDesc32.link);
2105 txDmaFree = dmaDescFree;
2106
2107 descDmaReads++;
2108 descDmaRdBytes += txDmaLen;
2109
2110 if (doTxDmaRead())
2111 goto exit;
2112
2113 } else {
2114 txState = txDescRead;
2115
2116 txDmaAddr = regs.txdp & 0x3fffffff;
2117 txDmaData = is64bit ? (void *)&txDesc64 : (void *)&txDesc32;
2118 txDmaLen = is64bit ? sizeof(txDesc64) : sizeof(txDesc32);
2119 txDmaFree = dmaDescFree;
2120
2121 descDmaReads++;
2122 descDmaRdBytes += txDmaLen;
2123
2124 if (doTxDmaRead())
2125 goto exit;
2126 }
2127 break;
2128
2129 case txDescRefr:
2130 if (txDmaState != dmaIdle)
2131 goto exit;
2132
2133 txState = txAdvance;
2134 break;
2135
2136 case txDescRead:
2137 if (txDmaState != dmaIdle)
2138 goto exit;
2139
2140 DPRINTF(EthernetDesc, "txDesc: addr=%08x read descriptor\n",
2141 regs.txdp & 0x3fffffff);
2142 DPRINTF(EthernetDesc,
2143 "txDesc: link=%#x bufptr=%#x cmdsts=%#08x extsts=%#08x\n",
2144 link, bufptr, cmdsts, extsts);
2145
2146 if (cmdsts & CMDSTS_OWN) {
2147 txState = txFifoBlock;
2148 txFragPtr = bufptr;
2149 txDescCnt = cmdsts & CMDSTS_LEN_MASK;
2150 } else {
2151 devIntrPost(ISR_TXIDLE);
2152 txState = txIdle;
2153 goto exit;
2154 }
2155 break;
2156
2157 case txFifoBlock:
2158 if (!txPacket) {
2159 DPRINTF(EthernetSM, "****starting the tx of a new packet****\n");
2160 txPacket = new PacketData(16384);
2161 txPacketBufPtr = txPacket->data;
2162 }
2163
2164 if (txDescCnt == 0) {
2165 DPRINTF(EthernetSM, "the txDescCnt == 0, done with descriptor\n");
2166 if (cmdsts & CMDSTS_MORE) {
2167 DPRINTF(EthernetSM, "there are more descriptors to come\n");
2168 txState = txDescWrite;
2169
2170 cmdsts &= ~CMDSTS_OWN;
2171
2172 txDmaAddr = regs.txdp & 0x3fffffff;
2173 txDmaData = &cmdsts;
2174 if (is64bit) {
2175 txDmaAddr += offsetof(ns_desc64, cmdsts);
2176 txDmaLen = sizeof(txDesc64.cmdsts);
2177 } else {
2178 txDmaAddr += offsetof(ns_desc32, cmdsts);
2179 txDmaLen = sizeof(txDesc32.cmdsts);
2180 }
2181 txDmaFree = dmaDescFree;
2182
2183 if (doTxDmaWrite())
2184 goto exit;
2185
2186 } else { /* this packet is totally done */
2187 DPRINTF(EthernetSM, "This packet is done, let's wrap it up\n");
2188 /* deal with the the packet that just finished */
2189 if ((regs.vtcr & VTCR_PPCHK) && extstsEnable) {
2190 IpPtr ip(txPacket);
2191 if (extsts & EXTSTS_UDPPKT) {
2192 UdpPtr udp(ip);
2193 udp->sum(0);
2194 udp->sum(cksum(udp));
2195 txUdpChecksums++;
2196 } else if (extsts & EXTSTS_TCPPKT) {
2197 TcpPtr tcp(ip);
2198 tcp->sum(0);
2199 tcp->sum(cksum(tcp));
2200 txTcpChecksums++;
2201 }
2202 if (extsts & EXTSTS_IPPKT) {
2203 ip->sum(0);
2204 ip->sum(cksum(ip));
2205 txIpChecksums++;
2206 }
2207 }
2208
2209 txPacket->length = txPacketBufPtr - txPacket->data;
2210 // this is just because the receive can't handle a
2211 // packet bigger want to make sure
2212 if (txPacket->length > 1514)
2213 panic("transmit packet too large, %s > 1514\n",
2214 txPacket->length);
2215
2216 #ifndef NDEBUG
2217 bool success =
2218 #endif
2219 txFifo.push(txPacket);
2220 assert(success);
2221
2222 /*
2223 * this following section is not tqo spec, but
2224 * functionally shouldn't be any different. normally,
2225 * the chip will wait til the transmit has occurred
2226 * before writing back the descriptor because it has
2227 * to wait to see that it was successfully transmitted
2228 * to decide whether to set CMDSTS_OK or not.
2229 * however, in the simulator since it is always
2230 * successfully transmitted, and writing it exactly to
2231 * spec would complicate the code, we just do it here
2232 */
2233
2234 cmdsts &= ~CMDSTS_OWN;
2235 cmdsts |= CMDSTS_OK;
2236
2237 DPRINTF(EthernetDesc,
2238 "txDesc writeback: cmdsts=%08x extsts=%08x\n",
2239 cmdsts, extsts);
2240
2241 txDmaFree = dmaDescFree;
2242 txDmaAddr = regs.txdp & 0x3fffffff;
2243 txDmaData = &cmdsts;
2244 if (is64bit) {
2245 txDmaAddr += offsetof(ns_desc64, cmdsts);
2246 txDmaLen =
2247 sizeof(txDesc64.cmdsts) + sizeof(txDesc64.extsts);
2248 } else {
2249 txDmaAddr += offsetof(ns_desc32, cmdsts);
2250 txDmaLen =
2251 sizeof(txDesc32.cmdsts) + sizeof(txDesc32.extsts);
2252 }
2253
2254 descDmaWrites++;
2255 descDmaWrBytes += txDmaLen;
2256
2257 transmit();
2258 txPacket = 0;
2259
2260 if (!txEnable) {
2261 DPRINTF(EthernetSM, "halting TX state machine\n");
2262 txState = txIdle;
2263 goto exit;
2264 } else
2265 txState = txAdvance;
2266
2267 if (doTxDmaWrite())
2268 goto exit;
2269 }
2270 } else {
2271 DPRINTF(EthernetSM, "this descriptor isn't done yet\n");
2272 if (!txFifo.full()) {
2273 txState = txFragRead;
2274
2275 /*
2276 * The number of bytes transferred is either whatever
2277 * is left in the descriptor (txDescCnt), or if there
2278 * is not enough room in the fifo, just whatever room
2279 * is left in the fifo
2280 */
2281 txXferLen = min<uint32_t>(txDescCnt, txFifo.avail());
2282
2283 txDmaAddr = txFragPtr & 0x3fffffff;
2284 txDmaData = txPacketBufPtr;
2285 txDmaLen = txXferLen;
2286 txDmaFree = dmaDataFree;
2287
2288 if (doTxDmaRead())
2289 goto exit;
2290 } else {
2291 txState = txFifoBlock;
2292 transmit();
2293
2294 goto exit;
2295 }
2296
2297 }
2298 break;
2299
2300 case txFragRead:
2301 if (txDmaState != dmaIdle)
2302 goto exit;
2303
2304 txPacketBufPtr += txXferLen;
2305 txFragPtr += txXferLen;
2306 txDescCnt -= txXferLen;
2307 txFifo.reserve(txXferLen);
2308
2309 txState = txFifoBlock;
2310 break;
2311
2312 case txDescWrite:
2313 if (txDmaState != dmaIdle)
2314 goto exit;
2315
2316 if (cmdsts & CMDSTS_INTR)
2317 devIntrPost(ISR_TXDESC);
2318
2319 if (!txEnable) {
2320 DPRINTF(EthernetSM, "halting TX state machine\n");
2321 txState = txIdle;
2322 goto exit;
2323 } else
2324 txState = txAdvance;
2325 break;
2326
2327 case txAdvance:
2328 if (link == 0) {
2329 devIntrPost(ISR_TXIDLE);
2330 txState = txIdle;
2331 goto exit;
2332 } else {
2333 if (txDmaState != dmaIdle)
2334 goto exit;
2335 txState = txDescRead;
2336 regs.txdp = link;
2337 CTDD = false;
2338
2339 txDmaAddr = link & 0x3fffffff;
2340 txDmaData = is64bit ? (void *)&txDesc64 : (void *)&txDesc32;
2341 txDmaLen = is64bit ? sizeof(txDesc64) : sizeof(txDesc32);
2342 txDmaFree = dmaDescFree;
2343
2344 if (doTxDmaRead())
2345 goto exit;
2346 }
2347 break;
2348
2349 default:
2350 panic("invalid state");
2351 }
2352
2353 DPRINTF(EthernetSM, "entering next txState=%s\n",
2354 NsTxStateStrings[txState]);
2355 goto next;
2356
2357 exit:
2358 /**
2359 * @todo do we want to schedule a future kick?
2360 */
2361 DPRINTF(EthernetSM, "tx state machine exited txState=%s\n",
2362 NsTxStateStrings[txState]);
2363
2364 if (clock && !txKickEvent.scheduled())
2365 txKickEvent.schedule(txKickTick);
2366 }
2367
2368 /**
2369 * Advance the EEPROM state machine
2370 * Called on rising edge of EEPROM clock bit in MEAR
2371 */
2372 void
2373 NSGigE::eepromKick()
2374 {
2375 switch (eepromState) {
2376
2377 case eepromStart:
2378
2379 // Wait for start bit
2380 if (regs.mear & MEAR_EEDI) {
2381 // Set up to get 2 opcode bits
2382 eepromState = eepromGetOpcode;
2383 eepromBitsToRx = 2;
2384 eepromOpcode = 0;
2385 }
2386 break;
2387
2388 case eepromGetOpcode:
2389 eepromOpcode <<= 1;
2390 eepromOpcode += (regs.mear & MEAR_EEDI) ? 1 : 0;
2391 --eepromBitsToRx;
2392
2393 // Done getting opcode
2394 if (eepromBitsToRx == 0) {
2395 if (eepromOpcode != EEPROM_READ)
2396 panic("only EEPROM reads are implemented!");
2397
2398 // Set up to get address
2399 eepromState = eepromGetAddress;
2400 eepromBitsToRx = 6;
2401 eepromAddress = 0;
2402 }
2403 break;
2404
2405 case eepromGetAddress:
2406 eepromAddress <<= 1;
2407 eepromAddress += (regs.mear & MEAR_EEDI) ? 1 : 0;
2408 --eepromBitsToRx;
2409
2410 // Done getting address
2411 if (eepromBitsToRx == 0) {
2412
2413 if (eepromAddress >= EEPROM_SIZE)
2414 panic("EEPROM read access out of range!");
2415
2416 switch (eepromAddress) {
2417
2418 case EEPROM_PMATCH2_ADDR:
2419 eepromData = rom.perfectMatch[5];
2420 eepromData <<= 8;
2421 eepromData += rom.perfectMatch[4];
2422 break;
2423
2424 case EEPROM_PMATCH1_ADDR:
2425 eepromData = rom.perfectMatch[3];
2426 eepromData <<= 8;
2427 eepromData += rom.perfectMatch[2];
2428 break;
2429
2430 case EEPROM_PMATCH0_ADDR:
2431 eepromData = rom.perfectMatch[1];
2432 eepromData <<= 8;
2433 eepromData += rom.perfectMatch[0];
2434 break;
2435
2436 default:
2437 panic("FreeBSD driver only uses EEPROM to read PMATCH!");
2438 }
2439 // Set up to read data
2440 eepromState = eepromRead;
2441 eepromBitsToRx = 16;
2442
2443 // Clear data in bit
2444 regs.mear &= ~MEAR_EEDI;
2445 }
2446 break;
2447
2448 case eepromRead:
2449 // Clear Data Out bit
2450 regs.mear &= ~MEAR_EEDO;
2451 // Set bit to value of current EEPROM bit
2452 regs.mear |= (eepromData & 0x8000) ? MEAR_EEDO : 0x0;
2453
2454 eepromData <<= 1;
2455 --eepromBitsToRx;
2456
2457 // All done
2458 if (eepromBitsToRx == 0) {
2459 eepromState = eepromStart;
2460 }
2461 break;
2462
2463 default:
2464 panic("invalid EEPROM state");
2465 }
2466
2467 }
2468
2469 void
2470 NSGigE::transferDone()
2471 {
2472 if (txFifo.empty()) {
2473 DPRINTF(Ethernet, "transfer complete: txFifo empty...nothing to do\n");
2474 return;
2475 }
2476
2477 DPRINTF(Ethernet, "transfer complete: data in txFifo...schedule xmit\n");
2478
2479 if (txEvent.scheduled())
2480 txEvent.reschedule(curTick + cycles(1));
2481 else
2482 txEvent.schedule(curTick + cycles(1));
2483 }
2484
2485 bool
2486 NSGigE::rxFilter(const PacketPtr &packet)
2487 {
2488 EthPtr eth = packet;
2489 bool drop = true;
2490 string type;
2491
2492 const EthAddr &dst = eth->dst();
2493 if (dst.unicast()) {
2494 // If we're accepting all unicast addresses
2495 if (acceptUnicast)
2496 drop = false;
2497
2498 // If we make a perfect match
2499 if (acceptPerfect && dst == rom.perfectMatch)
2500 drop = false;
2501
2502 if (acceptArp && eth->type() == ETH_TYPE_ARP)
2503 drop = false;
2504
2505 } else if (dst.broadcast()) {
2506 // if we're accepting broadcasts
2507 if (acceptBroadcast)
2508 drop = false;
2509
2510 } else if (dst.multicast()) {
2511 // if we're accepting all multicasts
2512 if (acceptMulticast)
2513 drop = false;
2514
2515 // Multicast hashing faked - all packets accepted
2516 if (multicastHashEnable)
2517 drop = false;
2518 }
2519
2520 if (drop) {
2521 DPRINTF(Ethernet, "rxFilter drop\n");
2522 DDUMP(EthernetData, packet->data, packet->length);
2523 }
2524
2525 return drop;
2526 }
2527
2528 bool
2529 NSGigE::recvPacket(PacketPtr packet)
2530 {
2531 rxBytes += packet->length;
2532 rxPackets++;
2533
2534 DPRINTF(Ethernet, "Receiving packet from wire, rxFifoAvail=%d\n",
2535 rxFifo.avail());
2536
2537 if (!rxEnable) {
2538 DPRINTF(Ethernet, "receive disabled...packet dropped\n");
2539 return true;
2540 }
2541
2542 if (!rxFilterEnable) {
2543 DPRINTF(Ethernet,
2544 "receive packet filtering disabled . . . packet dropped\n");
2545 return true;
2546 }
2547
2548 if (rxFilter(packet)) {
2549 DPRINTF(Ethernet, "packet filtered...dropped\n");
2550 return true;
2551 }
2552
2553 if (rxFifo.avail() < packet->length) {
2554 #if TRACING_ON
2555 IpPtr ip(packet);
2556 TcpPtr tcp(ip);
2557 if (ip) {
2558 DPRINTF(Ethernet,
2559 "packet won't fit in receive buffer...pkt ID %d dropped\n",
2560 ip->id());
2561 if (tcp) {
2562 DPRINTF(Ethernet, "Seq=%d\n", tcp->seq());
2563 }
2564 }
2565 #endif
2566 droppedPackets++;
2567 devIntrPost(ISR_RXORN);
2568 return false;
2569 }
2570
2571 rxFifo.push(packet);
2572
2573 rxKick();
2574 return true;
2575 }
2576
2577 //=====================================================================
2578 //
2579 //
2580 void
2581 NSGigE::serialize(ostream &os)
2582 {
2583 // Serialize the PciDev base class
2584 PciDev::serialize(os);
2585
2586 /*
2587 * Finalize any DMA events now.
2588 */
2589 if (rxDmaReadEvent.scheduled())
2590 rxDmaReadCopy();
2591 if (rxDmaWriteEvent.scheduled())
2592 rxDmaWriteCopy();
2593 if (txDmaReadEvent.scheduled())
2594 txDmaReadCopy();
2595 if (txDmaWriteEvent.scheduled())
2596 txDmaWriteCopy();
2597
2598 /*
2599 * Serialize the device registers
2600 */
2601 SERIALIZE_SCALAR(regs.command);
2602 SERIALIZE_SCALAR(regs.config);
2603 SERIALIZE_SCALAR(regs.mear);
2604 SERIALIZE_SCALAR(regs.ptscr);
2605 SERIALIZE_SCALAR(regs.isr);
2606 SERIALIZE_SCALAR(regs.imr);
2607 SERIALIZE_SCALAR(regs.ier);
2608 SERIALIZE_SCALAR(regs.ihr);
2609 SERIALIZE_SCALAR(regs.txdp);
2610 SERIALIZE_SCALAR(regs.txdp_hi);
2611 SERIALIZE_SCALAR(regs.txcfg);
2612 SERIALIZE_SCALAR(regs.gpior);
2613 SERIALIZE_SCALAR(regs.rxdp);
2614 SERIALIZE_SCALAR(regs.rxdp_hi);
2615 SERIALIZE_SCALAR(regs.rxcfg);
2616 SERIALIZE_SCALAR(regs.pqcr);
2617 SERIALIZE_SCALAR(regs.wcsr);
2618 SERIALIZE_SCALAR(regs.pcr);
2619 SERIALIZE_SCALAR(regs.rfcr);
2620 SERIALIZE_SCALAR(regs.rfdr);
2621 SERIALIZE_SCALAR(regs.brar);
2622 SERIALIZE_SCALAR(regs.brdr);
2623 SERIALIZE_SCALAR(regs.srr);
2624 SERIALIZE_SCALAR(regs.mibc);
2625 SERIALIZE_SCALAR(regs.vrcr);
2626 SERIALIZE_SCALAR(regs.vtcr);
2627 SERIALIZE_SCALAR(regs.vdr);
2628 SERIALIZE_SCALAR(regs.ccsr);
2629 SERIALIZE_SCALAR(regs.tbicr);
2630 SERIALIZE_SCALAR(regs.tbisr);
2631 SERIALIZE_SCALAR(regs.tanar);
2632 SERIALIZE_SCALAR(regs.tanlpar);
2633 SERIALIZE_SCALAR(regs.taner);
2634 SERIALIZE_SCALAR(regs.tesr);
2635
2636 SERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN);
2637 SERIALIZE_ARRAY(rom.filterHash, FHASH_SIZE);
2638
2639 SERIALIZE_SCALAR(ioEnable);
2640
2641 /*
2642 * Serialize the data Fifos
2643 */
2644 rxFifo.serialize("rxFifo", os);
2645 txFifo.serialize("txFifo", os);
2646
2647 /*
2648 * Serialize the various helper variables
2649 */
2650 bool txPacketExists = txPacket;
2651 SERIALIZE_SCALAR(txPacketExists);
2652 if (txPacketExists) {
2653 txPacket->length = txPacketBufPtr - txPacket->data;
2654 txPacket->serialize("txPacket", os);
2655 uint32_t txPktBufPtr = (uint32_t) (txPacketBufPtr - txPacket->data);
2656 SERIALIZE_SCALAR(txPktBufPtr);
2657 }
2658
2659 bool rxPacketExists = rxPacket;
2660 SERIALIZE_SCALAR(rxPacketExists);
2661 if (rxPacketExists) {
2662 rxPacket->serialize("rxPacket", os);
2663 uint32_t rxPktBufPtr = (uint32_t) (rxPacketBufPtr - rxPacket->data);
2664 SERIALIZE_SCALAR(rxPktBufPtr);
2665 }
2666
2667 SERIALIZE_SCALAR(txXferLen);
2668 SERIALIZE_SCALAR(rxXferLen);
2669
2670 /*
2671 * Serialize Cached Descriptors
2672 */
2673 SERIALIZE_SCALAR(rxDesc64.link);
2674 SERIALIZE_SCALAR(rxDesc64.bufptr);
2675 SERIALIZE_SCALAR(rxDesc64.cmdsts);
2676 SERIALIZE_SCALAR(rxDesc64.extsts);
2677 SERIALIZE_SCALAR(txDesc64.link);
2678 SERIALIZE_SCALAR(txDesc64.bufptr);
2679 SERIALIZE_SCALAR(txDesc64.cmdsts);
2680 SERIALIZE_SCALAR(txDesc64.extsts);
2681 SERIALIZE_SCALAR(rxDesc32.link);
2682 SERIALIZE_SCALAR(rxDesc32.bufptr);
2683 SERIALIZE_SCALAR(rxDesc32.cmdsts);
2684 SERIALIZE_SCALAR(rxDesc32.extsts);
2685 SERIALIZE_SCALAR(txDesc32.link);
2686 SERIALIZE_SCALAR(txDesc32.bufptr);
2687 SERIALIZE_SCALAR(txDesc32.cmdsts);
2688 SERIALIZE_SCALAR(txDesc32.extsts);
2689 SERIALIZE_SCALAR(extstsEnable);
2690
2691 /*
2692 * Serialize tx state machine
2693 */
2694 int txState = this->txState;
2695 SERIALIZE_SCALAR(txState);
2696 SERIALIZE_SCALAR(txEnable);
2697 SERIALIZE_SCALAR(CTDD);
2698 SERIALIZE_SCALAR(txFragPtr);
2699 SERIALIZE_SCALAR(txDescCnt);
2700 int txDmaState = this->txDmaState;
2701 SERIALIZE_SCALAR(txDmaState);
2702 SERIALIZE_SCALAR(txKickTick);
2703
2704 /*
2705 * Serialize rx state machine
2706 */
2707 int rxState = this->rxState;
2708 SERIALIZE_SCALAR(rxState);
2709 SERIALIZE_SCALAR(rxEnable);
2710 SERIALIZE_SCALAR(CRDD);
2711 SERIALIZE_SCALAR(rxPktBytes);
2712 SERIALIZE_SCALAR(rxFragPtr);
2713 SERIALIZE_SCALAR(rxDescCnt);
2714 int rxDmaState = this->rxDmaState;
2715 SERIALIZE_SCALAR(rxDmaState);
2716 SERIALIZE_SCALAR(rxKickTick);
2717
2718 /*
2719 * Serialize EEPROM state machine
2720 */
2721 int eepromState = this->eepromState;
2722 SERIALIZE_SCALAR(eepromState);
2723 SERIALIZE_SCALAR(eepromClk);
2724 SERIALIZE_SCALAR(eepromBitsToRx);
2725 SERIALIZE_SCALAR(eepromOpcode);
2726 SERIALIZE_SCALAR(eepromAddress);
2727 SERIALIZE_SCALAR(eepromData);
2728
2729 /*
2730 * If there's a pending transmit, store the time so we can
2731 * reschedule it later
2732 */
2733 Tick transmitTick = txEvent.scheduled() ? txEvent.when() - curTick : 0;
2734 SERIALIZE_SCALAR(transmitTick);
2735
2736 /*
2737 * receive address filter settings
2738 */
2739 SERIALIZE_SCALAR(rxFilterEnable);
2740 SERIALIZE_SCALAR(acceptBroadcast);
2741 SERIALIZE_SCALAR(acceptMulticast);
2742 SERIALIZE_SCALAR(acceptUnicast);
2743 SERIALIZE_SCALAR(acceptPerfect);
2744 SERIALIZE_SCALAR(acceptArp);
2745 SERIALIZE_SCALAR(multicastHashEnable);
2746
2747 /*
2748 * Keep track of pending interrupt status.
2749 */
2750 SERIALIZE_SCALAR(intrTick);
2751 SERIALIZE_SCALAR(cpuPendingIntr);
2752 Tick intrEventTick = 0;
2753 if (intrEvent)
2754 intrEventTick = intrEvent->when();
2755 SERIALIZE_SCALAR(intrEventTick);
2756
2757 }
2758
2759 void
2760 NSGigE::unserialize(Checkpoint *cp, const std::string &section)
2761 {
2762 // Unserialize the PciDev base class
2763 PciDev::unserialize(cp, section);
2764
2765 UNSERIALIZE_SCALAR(regs.command);
2766 UNSERIALIZE_SCALAR(regs.config);
2767 UNSERIALIZE_SCALAR(regs.mear);
2768 UNSERIALIZE_SCALAR(regs.ptscr);
2769 UNSERIALIZE_SCALAR(regs.isr);
2770 UNSERIALIZE_SCALAR(regs.imr);
2771 UNSERIALIZE_SCALAR(regs.ier);
2772 UNSERIALIZE_SCALAR(regs.ihr);
2773 UNSERIALIZE_SCALAR(regs.txdp);
2774 UNSERIALIZE_SCALAR(regs.txdp_hi);
2775 UNSERIALIZE_SCALAR(regs.txcfg);
2776 UNSERIALIZE_SCALAR(regs.gpior);
2777 UNSERIALIZE_SCALAR(regs.rxdp);
2778 UNSERIALIZE_SCALAR(regs.rxdp_hi);
2779 UNSERIALIZE_SCALAR(regs.rxcfg);
2780 UNSERIALIZE_SCALAR(regs.pqcr);
2781 UNSERIALIZE_SCALAR(regs.wcsr);
2782 UNSERIALIZE_SCALAR(regs.pcr);
2783 UNSERIALIZE_SCALAR(regs.rfcr);
2784 UNSERIALIZE_SCALAR(regs.rfdr);
2785 UNSERIALIZE_SCALAR(regs.brar);
2786 UNSERIALIZE_SCALAR(regs.brdr);
2787 UNSERIALIZE_SCALAR(regs.srr);
2788 UNSERIALIZE_SCALAR(regs.mibc);
2789 UNSERIALIZE_SCALAR(regs.vrcr);
2790 UNSERIALIZE_SCALAR(regs.vtcr);
2791 UNSERIALIZE_SCALAR(regs.vdr);
2792 UNSERIALIZE_SCALAR(regs.ccsr);
2793 UNSERIALIZE_SCALAR(regs.tbicr);
2794 UNSERIALIZE_SCALAR(regs.tbisr);
2795 UNSERIALIZE_SCALAR(regs.tanar);
2796 UNSERIALIZE_SCALAR(regs.tanlpar);
2797 UNSERIALIZE_SCALAR(regs.taner);
2798 UNSERIALIZE_SCALAR(regs.tesr);
2799
2800 UNSERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN);
2801 UNSERIALIZE_ARRAY(rom.filterHash, FHASH_SIZE);
2802
2803 UNSERIALIZE_SCALAR(ioEnable);
2804
2805 /*
2806 * unserialize the data fifos
2807 */
2808 rxFifo.unserialize("rxFifo", cp, section);
2809 txFifo.unserialize("txFifo", cp, section);
2810
2811 /*
2812 * unserialize the various helper variables
2813 */
2814 bool txPacketExists;
2815 UNSERIALIZE_SCALAR(txPacketExists);
2816 if (txPacketExists) {
2817 txPacket = new PacketData(16384);
2818 txPacket->unserialize("txPacket", cp, section);
2819 uint32_t txPktBufPtr;
2820 UNSERIALIZE_SCALAR(txPktBufPtr);
2821 txPacketBufPtr = (uint8_t *) txPacket->data + txPktBufPtr;
2822 } else
2823 txPacket = 0;
2824
2825 bool rxPacketExists;
2826 UNSERIALIZE_SCALAR(rxPacketExists);
2827 rxPacket = 0;
2828 if (rxPacketExists) {
2829 rxPacket = new PacketData(16384);
2830 rxPacket->unserialize("rxPacket", cp, section);
2831 uint32_t rxPktBufPtr;
2832 UNSERIALIZE_SCALAR(rxPktBufPtr);
2833 rxPacketBufPtr = (uint8_t *) rxPacket->data + rxPktBufPtr;
2834 } else
2835 rxPacket = 0;
2836
2837 UNSERIALIZE_SCALAR(txXferLen);
2838 UNSERIALIZE_SCALAR(rxXferLen);
2839
2840 /*
2841 * Unserialize Cached Descriptors
2842 */
2843 UNSERIALIZE_SCALAR(rxDesc64.link);
2844 UNSERIALIZE_SCALAR(rxDesc64.bufptr);
2845 UNSERIALIZE_SCALAR(rxDesc64.cmdsts);
2846 UNSERIALIZE_SCALAR(rxDesc64.extsts);
2847 UNSERIALIZE_SCALAR(txDesc64.link);
2848 UNSERIALIZE_SCALAR(txDesc64.bufptr);
2849 UNSERIALIZE_SCALAR(txDesc64.cmdsts);
2850 UNSERIALIZE_SCALAR(txDesc64.extsts);
2851 UNSERIALIZE_SCALAR(rxDesc32.link);
2852 UNSERIALIZE_SCALAR(rxDesc32.bufptr);
2853 UNSERIALIZE_SCALAR(rxDesc32.cmdsts);
2854 UNSERIALIZE_SCALAR(rxDesc32.extsts);
2855 UNSERIALIZE_SCALAR(txDesc32.link);
2856 UNSERIALIZE_SCALAR(txDesc32.bufptr);
2857 UNSERIALIZE_SCALAR(txDesc32.cmdsts);
2858 UNSERIALIZE_SCALAR(txDesc32.extsts);
2859 UNSERIALIZE_SCALAR(extstsEnable);
2860
2861 /*
2862 * unserialize tx state machine
2863 */
2864 int txState;
2865 UNSERIALIZE_SCALAR(txState);
2866 this->txState = (TxState) txState;
2867 UNSERIALIZE_SCALAR(txEnable);
2868 UNSERIALIZE_SCALAR(CTDD);
2869 UNSERIALIZE_SCALAR(txFragPtr);
2870 UNSERIALIZE_SCALAR(txDescCnt);
2871 int txDmaState;
2872 UNSERIALIZE_SCALAR(txDmaState);
2873 this->txDmaState = (DmaState) txDmaState;
2874 UNSERIALIZE_SCALAR(txKickTick);
2875 if (txKickTick)
2876 txKickEvent.schedule(txKickTick);
2877
2878 /*
2879 * unserialize rx state machine
2880 */
2881 int rxState;
2882 UNSERIALIZE_SCALAR(rxState);
2883 this->rxState = (RxState) rxState;
2884 UNSERIALIZE_SCALAR(rxEnable);
2885 UNSERIALIZE_SCALAR(CRDD);
2886 UNSERIALIZE_SCALAR(rxPktBytes);
2887 UNSERIALIZE_SCALAR(rxFragPtr);
2888 UNSERIALIZE_SCALAR(rxDescCnt);
2889 int rxDmaState;
2890 UNSERIALIZE_SCALAR(rxDmaState);
2891 this->rxDmaState = (DmaState) rxDmaState;
2892 UNSERIALIZE_SCALAR(rxKickTick);
2893 if (rxKickTick)
2894 rxKickEvent.schedule(rxKickTick);
2895
2896 /*
2897 * Unserialize EEPROM state machine
2898 */
2899 int eepromState;
2900 UNSERIALIZE_SCALAR(eepromState);
2901 this->eepromState = (EEPROMState) eepromState;
2902 UNSERIALIZE_SCALAR(eepromClk);
2903 UNSERIALIZE_SCALAR(eepromBitsToRx);
2904 UNSERIALIZE_SCALAR(eepromOpcode);
2905 UNSERIALIZE_SCALAR(eepromAddress);
2906 UNSERIALIZE_SCALAR(eepromData);
2907
2908 /*
2909 * If there's a pending transmit, reschedule it now
2910 */
2911 Tick transmitTick;
2912 UNSERIALIZE_SCALAR(transmitTick);
2913 if (transmitTick)
2914 txEvent.schedule(curTick + transmitTick);
2915
2916 /*
2917 * unserialize receive address filter settings
2918 */
2919 UNSERIALIZE_SCALAR(rxFilterEnable);
2920 UNSERIALIZE_SCALAR(acceptBroadcast);
2921 UNSERIALIZE_SCALAR(acceptMulticast);
2922 UNSERIALIZE_SCALAR(acceptUnicast);
2923 UNSERIALIZE_SCALAR(acceptPerfect);
2924 UNSERIALIZE_SCALAR(acceptArp);
2925 UNSERIALIZE_SCALAR(multicastHashEnable);
2926
2927 /*
2928 * Keep track of pending interrupt status.
2929 */
2930 UNSERIALIZE_SCALAR(intrTick);
2931 UNSERIALIZE_SCALAR(cpuPendingIntr);
2932 Tick intrEventTick;
2933 UNSERIALIZE_SCALAR(intrEventTick);
2934 if (intrEventTick) {
2935 intrEvent = new IntrEvent(this, true);
2936 intrEvent->schedule(intrEventTick);
2937 }
2938
2939 /*
2940 * re-add addrRanges to bus bridges
2941 */
2942 if (pioInterface) {
2943 pioInterface->addAddrRange(RangeSize(BARAddrs[0], BARSize[0]));
2944 pioInterface->addAddrRange(RangeSize(BARAddrs[1], BARSize[1]));
2945 }
2946 }
2947
2948 Tick
2949 NSGigE::cacheAccess(MemReqPtr &req)
2950 {
2951 Addr daddr = req->paddr & 0xfff;
2952 DPRINTF(EthernetPIO, "timing access to paddr=%#x (daddr=%#x)\n",
2953 req->paddr, daddr);
2954
2955 if (!pioDelayWrite || !req->cmd.isWrite())
2956 return curTick + pioLatency;
2957
2958 int cpu = (req->xc->regs.ipr[TheISA::IPR_PALtemp16] >> 8) & 0xff;
2959 std::list<RegWriteData> &wq = writeQueue[cpu];
2960 if (wq.empty())
2961 panic("WriteQueue for cpu %d empty timing daddr=%#x", cpu, daddr);
2962
2963 const RegWriteData &data = wq.front();
2964 if (data.daddr != daddr)
2965 panic("read mismatch on cpu %d, daddr functional=%#x timing=%#x",
2966 cpu, data.daddr, daddr);
2967
2968 if (daddr == CR) {
2969 if ((data.value & (CR_TXD | CR_TXE)) == CR_TXE) {
2970 txEnable = true;
2971 if (txState == txIdle)
2972 txKick();
2973 }
2974
2975 if ((data.value & (CR_RXD | CR_RXE)) == CR_RXE) {
2976 rxEnable = true;
2977 if (rxState == rxIdle)
2978 rxKick();
2979 }
2980 }
2981
2982 wq.pop_front();
2983 return curTick + pioLatency;
2984 }
2985
2986 BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt)
2987
2988 SimObjectParam<EtherInt *> peer;
2989 SimObjectParam<NSGigE *> device;
2990
2991 END_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt)
2992
2993 BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigEInt)
2994
2995 INIT_PARAM_DFLT(peer, "peer interface", NULL),
2996 INIT_PARAM(device, "Ethernet device of this interface")
2997
2998 END_INIT_SIM_OBJECT_PARAMS(NSGigEInt)
2999
3000 CREATE_SIM_OBJECT(NSGigEInt)
3001 {
3002 NSGigEInt *dev_int = new NSGigEInt(getInstanceName(), device);
3003
3004 EtherInt *p = (EtherInt *)peer;
3005 if (p) {
3006 dev_int->setPeer(p);
3007 p->setPeer(dev_int);
3008 }
3009
3010 return dev_int;
3011 }
3012
3013 REGISTER_SIM_OBJECT("NSGigEInt", NSGigEInt)
3014
3015
3016 BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigE)
3017
3018 Param<Tick> clock;
3019
3020 Param<Addr> addr;
3021 SimObjectParam<MemoryController *> mmu;
3022 SimObjectParam<PhysicalMemory *> physmem;
3023 SimObjectParam<PciConfigAll *> configspace;
3024 SimObjectParam<PciConfigData *> configdata;
3025 SimObjectParam<Platform *> platform;
3026 Param<uint32_t> pci_bus;
3027 Param<uint32_t> pci_dev;
3028 Param<uint32_t> pci_func;
3029
3030 SimObjectParam<HierParams *> hier;
3031 SimObjectParam<Bus*> pio_bus;
3032 SimObjectParam<Bus*> dma_bus;
3033 SimObjectParam<Bus*> payload_bus;
3034 Param<bool> dma_desc_free;
3035 Param<bool> dma_data_free;
3036 Param<Tick> dma_read_delay;
3037 Param<Tick> dma_write_delay;
3038 Param<Tick> dma_read_factor;
3039 Param<Tick> dma_write_factor;
3040 Param<bool> dma_no_allocate;
3041 Param<Tick> pio_latency;
3042 Param<bool> pio_delay_write;
3043 Param<Tick> intr_delay;
3044
3045 Param<Tick> rx_delay;
3046 Param<Tick> tx_delay;
3047 Param<uint32_t> rx_fifo_size;
3048 Param<uint32_t> tx_fifo_size;
3049
3050 Param<bool> rx_filter;
3051 Param<string> hardware_address;
3052 Param<bool> rx_thread;
3053 Param<bool> tx_thread;
3054
3055 END_DECLARE_SIM_OBJECT_PARAMS(NSGigE)
3056
3057 BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigE)
3058
3059 INIT_PARAM(clock, "State machine processor frequency"),
3060
3061 INIT_PARAM(addr, "Device Address"),
3062 INIT_PARAM(mmu, "Memory Controller"),
3063 INIT_PARAM(physmem, "Physical Memory"),
3064 INIT_PARAM(configspace, "PCI Configspace"),
3065 INIT_PARAM(configdata, "PCI Config data"),
3066 INIT_PARAM(platform, "Platform"),
3067 INIT_PARAM(pci_bus, "PCI bus"),
3068 INIT_PARAM(pci_dev, "PCI device number"),
3069 INIT_PARAM(pci_func, "PCI function code"),
3070
3071 INIT_PARAM(hier, "Hierarchy global variables"),
3072 INIT_PARAM(pio_bus, ""),
3073 INIT_PARAM(dma_bus, ""),
3074 INIT_PARAM(payload_bus, "The IO Bus to attach to for payload"),
3075 INIT_PARAM(dma_desc_free, "DMA of Descriptors is free"),
3076 INIT_PARAM(dma_data_free, "DMA of Data is free"),
3077 INIT_PARAM(dma_read_delay, "fixed delay for dma reads"),
3078 INIT_PARAM(dma_write_delay, "fixed delay for dma writes"),
3079 INIT_PARAM(dma_read_factor, "multiplier for dma reads"),
3080 INIT_PARAM(dma_write_factor, "multiplier for dma writes"),
3081 INIT_PARAM(dma_no_allocate, "Should DMA reads allocate cache lines"),
3082 INIT_PARAM(pio_latency, "Programmed IO latency in bus cycles"),
3083 INIT_PARAM(pio_delay_write, ""),
3084 INIT_PARAM(intr_delay, "Interrupt Delay in microseconds"),
3085
3086 INIT_PARAM(rx_delay, "Receive Delay"),
3087 INIT_PARAM(tx_delay, "Transmit Delay"),
3088 INIT_PARAM(rx_fifo_size, "max size in bytes of rxFifo"),
3089 INIT_PARAM(tx_fifo_size, "max size in bytes of txFifo"),
3090
3091 INIT_PARAM(rx_filter, "Enable Receive Filter"),
3092 INIT_PARAM(hardware_address, "Ethernet Hardware Address"),
3093 INIT_PARAM(rx_thread, ""),
3094 INIT_PARAM(tx_thread, "")
3095
3096 END_INIT_SIM_OBJECT_PARAMS(NSGigE)
3097
3098
3099 CREATE_SIM_OBJECT(NSGigE)
3100 {
3101 NSGigE::Params *params = new NSGigE::Params;
3102
3103 params->name = getInstanceName();
3104
3105 params->clock = clock;
3106
3107 params->mmu = mmu;
3108 params->pmem = physmem;
3109 params->configSpace = configspace;
3110 params->configData = configdata;
3111 params->plat = platform;
3112 params->busNum = pci_bus;
3113 params->deviceNum = pci_dev;
3114 params->functionNum = pci_func;
3115
3116 params->hier = hier;
3117 params->pio_bus = pio_bus;
3118 params->header_bus = dma_bus;
3119 params->payload_bus = payload_bus;
3120 params->dma_desc_free = dma_desc_free;
3121 params->dma_data_free = dma_data_free;
3122 params->dma_read_delay = dma_read_delay;
3123 params->dma_write_delay = dma_write_delay;
3124 params->dma_read_factor = dma_read_factor;
3125 params->dma_write_factor = dma_write_factor;
3126 params->dma_no_allocate = dma_no_allocate;
3127 params->pio_latency = pio_latency;
3128 params->pio_delay_write = pio_delay_write;
3129 params->intr_delay = intr_delay;
3130
3131 params->rx_delay = rx_delay;
3132 params->tx_delay = tx_delay;
3133 params->rx_fifo_size = rx_fifo_size;
3134 params->tx_fifo_size = tx_fifo_size;
3135
3136 params->rx_filter = rx_filter;
3137 params->eaddr = hardware_address;
3138 params->rx_thread = rx_thread;
3139 params->tx_thread = tx_thread;
3140
3141 return new NSGigE(params);
3142 }
3143
3144 REGISTER_SIM_OBJECT("NSGigE", NSGigE)