Changes to untemplate StaticInst and StaticInstPtr, change the isa to a namespace...
[gem5.git] / dev / ns_gige.cc
1 /*
2 * Copyright (c) 2004-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /** @file
30 * Device module for modelling the National Semiconductor
31 * DP83820 ethernet controller. Does not support priority queueing
32 */
33 #include <cstdio>
34 #include <deque>
35 #include <string>
36
37 #include "base/inet.hh"
38 #include "cpu/exec_context.hh"
39 #include "dev/etherlink.hh"
40 #include "dev/ns_gige.hh"
41 #include "dev/pciconfigall.hh"
42 #include "mem/bus/bus.hh"
43 #include "mem/bus/dma_interface.hh"
44 #include "mem/bus/pio_interface.hh"
45 #include "mem/bus/pio_interface_impl.hh"
46 #include "mem/functional/memory_control.hh"
47 #include "mem/functional/physical.hh"
48 #include "sim/builder.hh"
49 #include "sim/debug.hh"
50 #include "sim/host.hh"
51 #include "sim/stats.hh"
52 #include "targetarch/vtophys.hh"
53
54 const char *NsRxStateStrings[] =
55 {
56 "rxIdle",
57 "rxDescRefr",
58 "rxDescRead",
59 "rxFifoBlock",
60 "rxFragWrite",
61 "rxDescWrite",
62 "rxAdvance"
63 };
64
65 const char *NsTxStateStrings[] =
66 {
67 "txIdle",
68 "txDescRefr",
69 "txDescRead",
70 "txFifoBlock",
71 "txFragRead",
72 "txDescWrite",
73 "txAdvance"
74 };
75
76 const char *NsDmaState[] =
77 {
78 "dmaIdle",
79 "dmaReading",
80 "dmaWriting",
81 "dmaReadWaiting",
82 "dmaWriteWaiting"
83 };
84
85 using namespace std;
86 using namespace Net;
87 using namespace TheISA;
88
89 ///////////////////////////////////////////////////////////////////////
90 //
91 // NSGigE PCI Device
92 //
93 NSGigE::NSGigE(Params *p)
94 : PciDev(p), ioEnable(false),
95 txFifo(p->tx_fifo_size), rxFifo(p->rx_fifo_size),
96 txPacket(0), rxPacket(0), txPacketBufPtr(NULL), rxPacketBufPtr(NULL),
97 txXferLen(0), rxXferLen(0), clock(p->clock),
98 txState(txIdle), txEnable(false), CTDD(false),
99 txFragPtr(0), txDescCnt(0), txDmaState(dmaIdle), rxState(rxIdle),
100 rxEnable(false), CRDD(false), rxPktBytes(0),
101 rxFragPtr(0), rxDescCnt(0), rxDmaState(dmaIdle), extstsEnable(false),
102 eepromState(eepromStart), rxDmaReadEvent(this), rxDmaWriteEvent(this),
103 txDmaReadEvent(this), txDmaWriteEvent(this),
104 dmaDescFree(p->dma_desc_free), dmaDataFree(p->dma_data_free),
105 txDelay(p->tx_delay), rxDelay(p->rx_delay),
106 rxKickTick(0), rxKickEvent(this), txKickTick(0), txKickEvent(this),
107 txEvent(this), rxFilterEnable(p->rx_filter), acceptBroadcast(false),
108 acceptMulticast(false), acceptUnicast(false),
109 acceptPerfect(false), acceptArp(false), multicastHashEnable(false),
110 physmem(p->pmem), intrTick(0), cpuPendingIntr(false),
111 intrEvent(0), interface(0)
112 {
113 if (p->pio_bus) {
114 pioInterface = newPioInterface(name() + ".pio", p->hier,
115 p->pio_bus, this,
116 &NSGigE::cacheAccess);
117 pioLatency = p->pio_latency * p->pio_bus->clockRate;
118 }
119
120 if (p->header_bus) {
121 if (p->payload_bus)
122 dmaInterface = new DMAInterface<Bus>(name() + ".dma",
123 p->header_bus,
124 p->payload_bus, 1,
125 p->dma_no_allocate);
126 else
127 dmaInterface = new DMAInterface<Bus>(name() + ".dma",
128 p->header_bus,
129 p->header_bus, 1,
130 p->dma_no_allocate);
131 } else if (p->payload_bus)
132 panic("Must define a header bus if defining a payload bus");
133
134 pioDelayWrite = p->pio_delay_write && pioInterface;
135
136 intrDelay = p->intr_delay;
137 dmaReadDelay = p->dma_read_delay;
138 dmaWriteDelay = p->dma_write_delay;
139 dmaReadFactor = p->dma_read_factor;
140 dmaWriteFactor = p->dma_write_factor;
141
142 regsReset();
143 memcpy(&rom.perfectMatch, p->eaddr.bytes(), ETH_ADDR_LEN);
144
145 memset(&rxDesc32, 0, sizeof(rxDesc32));
146 memset(&txDesc32, 0, sizeof(txDesc32));
147 memset(&rxDesc64, 0, sizeof(rxDesc64));
148 memset(&txDesc64, 0, sizeof(txDesc64));
149 }
150
151 NSGigE::~NSGigE()
152 {}
153
154 void
155 NSGigE::regStats()
156 {
157 txBytes
158 .name(name() + ".txBytes")
159 .desc("Bytes Transmitted")
160 .prereq(txBytes)
161 ;
162
163 rxBytes
164 .name(name() + ".rxBytes")
165 .desc("Bytes Received")
166 .prereq(rxBytes)
167 ;
168
169 txPackets
170 .name(name() + ".txPackets")
171 .desc("Number of Packets Transmitted")
172 .prereq(txBytes)
173 ;
174
175 rxPackets
176 .name(name() + ".rxPackets")
177 .desc("Number of Packets Received")
178 .prereq(rxBytes)
179 ;
180
181 txIpChecksums
182 .name(name() + ".txIpChecksums")
183 .desc("Number of tx IP Checksums done by device")
184 .precision(0)
185 .prereq(txBytes)
186 ;
187
188 rxIpChecksums
189 .name(name() + ".rxIpChecksums")
190 .desc("Number of rx IP Checksums done by device")
191 .precision(0)
192 .prereq(rxBytes)
193 ;
194
195 txTcpChecksums
196 .name(name() + ".txTcpChecksums")
197 .desc("Number of tx TCP Checksums done by device")
198 .precision(0)
199 .prereq(txBytes)
200 ;
201
202 rxTcpChecksums
203 .name(name() + ".rxTcpChecksums")
204 .desc("Number of rx TCP Checksums done by device")
205 .precision(0)
206 .prereq(rxBytes)
207 ;
208
209 txUdpChecksums
210 .name(name() + ".txUdpChecksums")
211 .desc("Number of tx UDP Checksums done by device")
212 .precision(0)
213 .prereq(txBytes)
214 ;
215
216 rxUdpChecksums
217 .name(name() + ".rxUdpChecksums")
218 .desc("Number of rx UDP Checksums done by device")
219 .precision(0)
220 .prereq(rxBytes)
221 ;
222
223 descDmaReads
224 .name(name() + ".descDMAReads")
225 .desc("Number of descriptors the device read w/ DMA")
226 .precision(0)
227 ;
228
229 descDmaWrites
230 .name(name() + ".descDMAWrites")
231 .desc("Number of descriptors the device wrote w/ DMA")
232 .precision(0)
233 ;
234
235 descDmaRdBytes
236 .name(name() + ".descDmaReadBytes")
237 .desc("number of descriptor bytes read w/ DMA")
238 .precision(0)
239 ;
240
241 descDmaWrBytes
242 .name(name() + ".descDmaWriteBytes")
243 .desc("number of descriptor bytes write w/ DMA")
244 .precision(0)
245 ;
246
247 txBandwidth
248 .name(name() + ".txBandwidth")
249 .desc("Transmit Bandwidth (bits/s)")
250 .precision(0)
251 .prereq(txBytes)
252 ;
253
254 rxBandwidth
255 .name(name() + ".rxBandwidth")
256 .desc("Receive Bandwidth (bits/s)")
257 .precision(0)
258 .prereq(rxBytes)
259 ;
260
261 totBandwidth
262 .name(name() + ".totBandwidth")
263 .desc("Total Bandwidth (bits/s)")
264 .precision(0)
265 .prereq(totBytes)
266 ;
267
268 totPackets
269 .name(name() + ".totPackets")
270 .desc("Total Packets")
271 .precision(0)
272 .prereq(totBytes)
273 ;
274
275 totBytes
276 .name(name() + ".totBytes")
277 .desc("Total Bytes")
278 .precision(0)
279 .prereq(totBytes)
280 ;
281
282 totPacketRate
283 .name(name() + ".totPPS")
284 .desc("Total Tranmission Rate (packets/s)")
285 .precision(0)
286 .prereq(totBytes)
287 ;
288
289 txPacketRate
290 .name(name() + ".txPPS")
291 .desc("Packet Tranmission Rate (packets/s)")
292 .precision(0)
293 .prereq(txBytes)
294 ;
295
296 rxPacketRate
297 .name(name() + ".rxPPS")
298 .desc("Packet Reception Rate (packets/s)")
299 .precision(0)
300 .prereq(rxBytes)
301 ;
302
303 postedSwi
304 .name(name() + ".postedSwi")
305 .desc("number of software interrupts posted to CPU")
306 .precision(0)
307 ;
308
309 totalSwi
310 .name(name() + ".totalSwi")
311 .desc("total number of Swi written to ISR")
312 .precision(0)
313 ;
314
315 coalescedSwi
316 .name(name() + ".coalescedSwi")
317 .desc("average number of Swi's coalesced into each post")
318 .precision(0)
319 ;
320
321 postedRxIdle
322 .name(name() + ".postedRxIdle")
323 .desc("number of rxIdle interrupts posted to CPU")
324 .precision(0)
325 ;
326
327 totalRxIdle
328 .name(name() + ".totalRxIdle")
329 .desc("total number of RxIdle written to ISR")
330 .precision(0)
331 ;
332
333 coalescedRxIdle
334 .name(name() + ".coalescedRxIdle")
335 .desc("average number of RxIdle's coalesced into each post")
336 .precision(0)
337 ;
338
339 postedRxOk
340 .name(name() + ".postedRxOk")
341 .desc("number of RxOk interrupts posted to CPU")
342 .precision(0)
343 ;
344
345 totalRxOk
346 .name(name() + ".totalRxOk")
347 .desc("total number of RxOk written to ISR")
348 .precision(0)
349 ;
350
351 coalescedRxOk
352 .name(name() + ".coalescedRxOk")
353 .desc("average number of RxOk's coalesced into each post")
354 .precision(0)
355 ;
356
357 postedRxDesc
358 .name(name() + ".postedRxDesc")
359 .desc("number of RxDesc interrupts posted to CPU")
360 .precision(0)
361 ;
362
363 totalRxDesc
364 .name(name() + ".totalRxDesc")
365 .desc("total number of RxDesc written to ISR")
366 .precision(0)
367 ;
368
369 coalescedRxDesc
370 .name(name() + ".coalescedRxDesc")
371 .desc("average number of RxDesc's coalesced into each post")
372 .precision(0)
373 ;
374
375 postedTxOk
376 .name(name() + ".postedTxOk")
377 .desc("number of TxOk interrupts posted to CPU")
378 .precision(0)
379 ;
380
381 totalTxOk
382 .name(name() + ".totalTxOk")
383 .desc("total number of TxOk written to ISR")
384 .precision(0)
385 ;
386
387 coalescedTxOk
388 .name(name() + ".coalescedTxOk")
389 .desc("average number of TxOk's coalesced into each post")
390 .precision(0)
391 ;
392
393 postedTxIdle
394 .name(name() + ".postedTxIdle")
395 .desc("number of TxIdle interrupts posted to CPU")
396 .precision(0)
397 ;
398
399 totalTxIdle
400 .name(name() + ".totalTxIdle")
401 .desc("total number of TxIdle written to ISR")
402 .precision(0)
403 ;
404
405 coalescedTxIdle
406 .name(name() + ".coalescedTxIdle")
407 .desc("average number of TxIdle's coalesced into each post")
408 .precision(0)
409 ;
410
411 postedTxDesc
412 .name(name() + ".postedTxDesc")
413 .desc("number of TxDesc interrupts posted to CPU")
414 .precision(0)
415 ;
416
417 totalTxDesc
418 .name(name() + ".totalTxDesc")
419 .desc("total number of TxDesc written to ISR")
420 .precision(0)
421 ;
422
423 coalescedTxDesc
424 .name(name() + ".coalescedTxDesc")
425 .desc("average number of TxDesc's coalesced into each post")
426 .precision(0)
427 ;
428
429 postedRxOrn
430 .name(name() + ".postedRxOrn")
431 .desc("number of RxOrn posted to CPU")
432 .precision(0)
433 ;
434
435 totalRxOrn
436 .name(name() + ".totalRxOrn")
437 .desc("total number of RxOrn written to ISR")
438 .precision(0)
439 ;
440
441 coalescedRxOrn
442 .name(name() + ".coalescedRxOrn")
443 .desc("average number of RxOrn's coalesced into each post")
444 .precision(0)
445 ;
446
447 coalescedTotal
448 .name(name() + ".coalescedTotal")
449 .desc("average number of interrupts coalesced into each post")
450 .precision(0)
451 ;
452
453 postedInterrupts
454 .name(name() + ".postedInterrupts")
455 .desc("number of posts to CPU")
456 .precision(0)
457 ;
458
459 droppedPackets
460 .name(name() + ".droppedPackets")
461 .desc("number of packets dropped")
462 .precision(0)
463 ;
464
465 coalescedSwi = totalSwi / postedInterrupts;
466 coalescedRxIdle = totalRxIdle / postedInterrupts;
467 coalescedRxOk = totalRxOk / postedInterrupts;
468 coalescedRxDesc = totalRxDesc / postedInterrupts;
469 coalescedTxOk = totalTxOk / postedInterrupts;
470 coalescedTxIdle = totalTxIdle / postedInterrupts;
471 coalescedTxDesc = totalTxDesc / postedInterrupts;
472 coalescedRxOrn = totalRxOrn / postedInterrupts;
473
474 coalescedTotal = (totalSwi + totalRxIdle + totalRxOk + totalRxDesc +
475 totalTxOk + totalTxIdle + totalTxDesc +
476 totalRxOrn) / postedInterrupts;
477
478 txBandwidth = txBytes * Stats::constant(8) / simSeconds;
479 rxBandwidth = rxBytes * Stats::constant(8) / simSeconds;
480 totBandwidth = txBandwidth + rxBandwidth;
481 totBytes = txBytes + rxBytes;
482 totPackets = txPackets + rxPackets;
483
484 txPacketRate = txPackets / simSeconds;
485 rxPacketRate = rxPackets / simSeconds;
486 }
487
488 /**
489 * This is to read the PCI general configuration registers
490 */
491 void
492 NSGigE::readConfig(int offset, int size, uint8_t *data)
493 {
494 if (offset < PCI_DEVICE_SPECIFIC)
495 PciDev::readConfig(offset, size, data);
496 else
497 panic("Device specific PCI config space not implemented!\n");
498 }
499
500 /**
501 * This is to write to the PCI general configuration registers
502 */
503 void
504 NSGigE::writeConfig(int offset, int size, const uint8_t* data)
505 {
506 if (offset < PCI_DEVICE_SPECIFIC)
507 PciDev::writeConfig(offset, size, data);
508 else
509 panic("Device specific PCI config space not implemented!\n");
510
511 // Need to catch writes to BARs to update the PIO interface
512 switch (offset) {
513 // seems to work fine without all these PCI settings, but i
514 // put in the IO to double check, an assertion will fail if we
515 // need to properly implement it
516 case PCI_COMMAND:
517 if (config.data[offset] & PCI_CMD_IOSE)
518 ioEnable = true;
519 else
520 ioEnable = false;
521
522 #if 0
523 if (config.data[offset] & PCI_CMD_BME) {
524 bmEnabled = true;
525 }
526 else {
527 bmEnabled = false;
528 }
529
530 if (config.data[offset] & PCI_CMD_MSE) {
531 memEnable = true;
532 }
533 else {
534 memEnable = false;
535 }
536 #endif
537 break;
538
539 case PCI0_BASE_ADDR0:
540 if (BARAddrs[0] != 0) {
541 if (pioInterface)
542 pioInterface->addAddrRange(RangeSize(BARAddrs[0], BARSize[0]));
543
544 BARAddrs[0] &= EV5::PAddrUncachedMask;
545 }
546 break;
547 case PCI0_BASE_ADDR1:
548 if (BARAddrs[1] != 0) {
549 if (pioInterface)
550 pioInterface->addAddrRange(RangeSize(BARAddrs[1], BARSize[1]));
551
552 BARAddrs[1] &= EV5::PAddrUncachedMask;
553 }
554 break;
555 }
556 }
557
558 /**
559 * This reads the device registers, which are detailed in the NS83820
560 * spec sheet
561 */
562 Fault *
563 NSGigE::read(MemReqPtr &req, uint8_t *data)
564 {
565 assert(ioEnable);
566
567 //The mask is to give you only the offset into the device register file
568 Addr daddr = req->paddr & 0xfff;
569 DPRINTF(EthernetPIO, "read da=%#x pa=%#x va=%#x size=%d\n",
570 daddr, req->paddr, req->vaddr, req->size);
571
572
573 // there are some reserved registers, you can see ns_gige_reg.h and
574 // the spec sheet for details
575 if (daddr > LAST && daddr <= RESERVED) {
576 panic("Accessing reserved register");
577 } else if (daddr > RESERVED && daddr <= 0x3FC) {
578 readConfig(daddr & 0xff, req->size, data);
579 return NoFault;
580 } else if (daddr >= MIB_START && daddr <= MIB_END) {
581 // don't implement all the MIB's. hopefully the kernel
582 // doesn't actually DEPEND upon their values
583 // MIB are just hardware stats keepers
584 uint32_t &reg = *(uint32_t *) data;
585 reg = 0;
586 return NoFault;
587 } else if (daddr > 0x3FC)
588 panic("Something is messed up!\n");
589
590 switch (req->size) {
591 case sizeof(uint32_t):
592 {
593 uint32_t &reg = *(uint32_t *)data;
594 uint16_t rfaddr;
595
596 switch (daddr) {
597 case CR:
598 reg = regs.command;
599 //these are supposed to be cleared on a read
600 reg &= ~(CR_RXD | CR_TXD | CR_TXR | CR_RXR);
601 break;
602
603 case CFGR:
604 reg = regs.config;
605 break;
606
607 case MEAR:
608 reg = regs.mear;
609 break;
610
611 case PTSCR:
612 reg = regs.ptscr;
613 break;
614
615 case ISR:
616 reg = regs.isr;
617 devIntrClear(ISR_ALL);
618 break;
619
620 case IMR:
621 reg = regs.imr;
622 break;
623
624 case IER:
625 reg = regs.ier;
626 break;
627
628 case IHR:
629 reg = regs.ihr;
630 break;
631
632 case TXDP:
633 reg = regs.txdp;
634 break;
635
636 case TXDP_HI:
637 reg = regs.txdp_hi;
638 break;
639
640 case TX_CFG:
641 reg = regs.txcfg;
642 break;
643
644 case GPIOR:
645 reg = regs.gpior;
646 break;
647
648 case RXDP:
649 reg = regs.rxdp;
650 break;
651
652 case RXDP_HI:
653 reg = regs.rxdp_hi;
654 break;
655
656 case RX_CFG:
657 reg = regs.rxcfg;
658 break;
659
660 case PQCR:
661 reg = regs.pqcr;
662 break;
663
664 case WCSR:
665 reg = regs.wcsr;
666 break;
667
668 case PCR:
669 reg = regs.pcr;
670 break;
671
672 // see the spec sheet for how RFCR and RFDR work
673 // basically, you write to RFCR to tell the machine
674 // what you want to do next, then you act upon RFDR,
675 // and the device will be prepared b/c of what you
676 // wrote to RFCR
677 case RFCR:
678 reg = regs.rfcr;
679 break;
680
681 case RFDR:
682 rfaddr = (uint16_t)(regs.rfcr & RFCR_RFADDR);
683 switch (rfaddr) {
684 // Read from perfect match ROM octets
685 case 0x000:
686 reg = rom.perfectMatch[1];
687 reg = reg << 8;
688 reg += rom.perfectMatch[0];
689 break;
690 case 0x002:
691 reg = rom.perfectMatch[3] << 8;
692 reg += rom.perfectMatch[2];
693 break;
694 case 0x004:
695 reg = rom.perfectMatch[5] << 8;
696 reg += rom.perfectMatch[4];
697 break;
698 default:
699 // Read filter hash table
700 if (rfaddr >= FHASH_ADDR &&
701 rfaddr < FHASH_ADDR + FHASH_SIZE) {
702
703 // Only word-aligned reads supported
704 if (rfaddr % 2)
705 panic("unaligned read from filter hash table!");
706
707 reg = rom.filterHash[rfaddr - FHASH_ADDR + 1] << 8;
708 reg += rom.filterHash[rfaddr - FHASH_ADDR];
709 break;
710 }
711
712 panic("reading RFDR for something other than pattern"
713 " matching or hashing! %#x\n", rfaddr);
714 }
715 break;
716
717 case SRR:
718 reg = regs.srr;
719 break;
720
721 case MIBC:
722 reg = regs.mibc;
723 reg &= ~(MIBC_MIBS | MIBC_ACLR);
724 break;
725
726 case VRCR:
727 reg = regs.vrcr;
728 break;
729
730 case VTCR:
731 reg = regs.vtcr;
732 break;
733
734 case VDR:
735 reg = regs.vdr;
736 break;
737
738 case CCSR:
739 reg = regs.ccsr;
740 break;
741
742 case TBICR:
743 reg = regs.tbicr;
744 break;
745
746 case TBISR:
747 reg = regs.tbisr;
748 break;
749
750 case TANAR:
751 reg = regs.tanar;
752 break;
753
754 case TANLPAR:
755 reg = regs.tanlpar;
756 break;
757
758 case TANER:
759 reg = regs.taner;
760 break;
761
762 case TESR:
763 reg = regs.tesr;
764 break;
765
766 case M5REG:
767 reg = 0;
768 if (params()->rx_thread)
769 reg |= M5REG_RX_THREAD;
770 if (params()->tx_thread)
771 reg |= M5REG_TX_THREAD;
772 break;
773
774 default:
775 panic("reading unimplemented register: addr=%#x", daddr);
776 }
777
778 DPRINTF(EthernetPIO, "read from %#x: data=%d data=%#x\n",
779 daddr, reg, reg);
780 }
781 break;
782
783 default:
784 panic("accessing register with invalid size: addr=%#x, size=%d",
785 daddr, req->size);
786 }
787
788 return NoFault;
789 }
790
791 Fault *
792 NSGigE::write(MemReqPtr &req, const uint8_t *data)
793 {
794 assert(ioEnable);
795
796 Addr daddr = req->paddr & 0xfff;
797 DPRINTF(EthernetPIO, "write da=%#x pa=%#x va=%#x size=%d\n",
798 daddr, req->paddr, req->vaddr, req->size);
799
800 if (daddr > LAST && daddr <= RESERVED) {
801 panic("Accessing reserved register");
802 } else if (daddr > RESERVED && daddr <= 0x3FC) {
803 writeConfig(daddr & 0xff, req->size, data);
804 return NoFault;
805 } else if (daddr > 0x3FC)
806 panic("Something is messed up!\n");
807
808 if (pioDelayWrite) {
809 int cpu = (req->xc->regs.ipr[TheISA::IPR_PALtemp16] >> 8) & 0xff;
810 if (cpu >= writeQueue.size())
811 writeQueue.resize(cpu + 1);
812 writeQueue[cpu].push_back(RegWriteData(daddr, *(uint32_t *)data));
813 }
814
815 if (req->size == sizeof(uint32_t)) {
816 uint32_t reg = *(uint32_t *)data;
817 uint16_t rfaddr;
818
819 DPRINTF(EthernetPIO, "write data=%d data=%#x\n", reg, reg);
820
821 switch (daddr) {
822 case CR:
823 regs.command = reg;
824 if (reg & CR_TXD) {
825 txEnable = false;
826 } else if (reg & CR_TXE) {
827 if (!pioDelayWrite) {
828 txEnable = true;
829
830 // the kernel is enabling the transmit machine
831 if (txState == txIdle)
832 txKick();
833 }
834 }
835
836 if (reg & CR_RXD) {
837 rxEnable = false;
838 } else if (reg & CR_RXE) {
839 if (!pioDelayWrite) {
840 rxEnable = true;
841
842 if (rxState == rxIdle)
843 rxKick();
844 }
845 }
846
847 if (reg & CR_TXR)
848 txReset();
849
850 if (reg & CR_RXR)
851 rxReset();
852
853 if (reg & CR_SWI)
854 devIntrPost(ISR_SWI);
855
856 if (reg & CR_RST) {
857 txReset();
858 rxReset();
859
860 regsReset();
861 }
862 break;
863
864 case CFGR:
865 if (reg & CFGR_LNKSTS ||
866 reg & CFGR_SPDSTS ||
867 reg & CFGR_DUPSTS ||
868 reg & CFGR_RESERVED ||
869 reg & CFGR_T64ADDR ||
870 reg & CFGR_PCI64_DET)
871
872 // First clear all writable bits
873 regs.config &= CFGR_LNKSTS | CFGR_SPDSTS | CFGR_DUPSTS |
874 CFGR_RESERVED | CFGR_T64ADDR |
875 CFGR_PCI64_DET;
876 // Now set the appropriate writable bits
877 regs.config |= reg & ~(CFGR_LNKSTS | CFGR_SPDSTS | CFGR_DUPSTS |
878 CFGR_RESERVED | CFGR_T64ADDR |
879 CFGR_PCI64_DET);
880
881 // all these #if 0's are because i don't THINK the kernel needs to
882 // have these implemented. if there is a problem relating to one of
883 // these, you may need to add functionality in.
884 if (reg & CFGR_TBI_EN) ;
885 if (reg & CFGR_MODE_1000) ;
886
887 if (reg & CFGR_AUTO_1000)
888 panic("CFGR_AUTO_1000 not implemented!\n");
889
890 if (reg & CFGR_PINT_DUPSTS ||
891 reg & CFGR_PINT_LNKSTS ||
892 reg & CFGR_PINT_SPDSTS)
893 ;
894
895 if (reg & CFGR_TMRTEST) ;
896 if (reg & CFGR_MRM_DIS) ;
897 if (reg & CFGR_MWI_DIS) ;
898
899 if (reg & CFGR_T64ADDR) ;
900 // panic("CFGR_T64ADDR is read only register!\n");
901
902 if (reg & CFGR_PCI64_DET)
903 panic("CFGR_PCI64_DET is read only register!\n");
904
905 if (reg & CFGR_DATA64_EN) ;
906 if (reg & CFGR_M64ADDR) ;
907 if (reg & CFGR_PHY_RST) ;
908 if (reg & CFGR_PHY_DIS) ;
909
910 if (reg & CFGR_EXTSTS_EN)
911 extstsEnable = true;
912 else
913 extstsEnable = false;
914
915 if (reg & CFGR_REQALG) ;
916 if (reg & CFGR_SB) ;
917 if (reg & CFGR_POW) ;
918 if (reg & CFGR_EXD) ;
919 if (reg & CFGR_PESEL) ;
920 if (reg & CFGR_BROM_DIS) ;
921 if (reg & CFGR_EXT_125) ;
922 if (reg & CFGR_BEM) ;
923 break;
924
925 case MEAR:
926 // Clear writable bits
927 regs.mear &= MEAR_EEDO;
928 // Set appropriate writable bits
929 regs.mear |= reg & ~MEAR_EEDO;
930
931 // FreeBSD uses the EEPROM to read PMATCH (for the MAC address)
932 // even though it could get it through RFDR
933 if (reg & MEAR_EESEL) {
934 // Rising edge of clock
935 if (reg & MEAR_EECLK && !eepromClk)
936 eepromKick();
937 }
938 else {
939 eepromState = eepromStart;
940 regs.mear &= ~MEAR_EEDI;
941 }
942
943 eepromClk = reg & MEAR_EECLK;
944
945 // since phy is completely faked, MEAR_MD* don't matter
946 if (reg & MEAR_MDIO) ;
947 if (reg & MEAR_MDDIR) ;
948 if (reg & MEAR_MDC) ;
949 break;
950
951 case PTSCR:
952 regs.ptscr = reg & ~(PTSCR_RBIST_RDONLY);
953 // these control BISTs for various parts of chip - we
954 // don't care or do just fake that the BIST is done
955 if (reg & PTSCR_RBIST_EN)
956 regs.ptscr |= PTSCR_RBIST_DONE;
957 if (reg & PTSCR_EEBIST_EN)
958 regs.ptscr &= ~PTSCR_EEBIST_EN;
959 if (reg & PTSCR_EELOAD_EN)
960 regs.ptscr &= ~PTSCR_EELOAD_EN;
961 break;
962
963 case ISR: /* writing to the ISR has no effect */
964 panic("ISR is a read only register!\n");
965
966 case IMR:
967 regs.imr = reg;
968 devIntrChangeMask();
969 break;
970
971 case IER:
972 regs.ier = reg;
973 break;
974
975 case IHR:
976 regs.ihr = reg;
977 /* not going to implement real interrupt holdoff */
978 break;
979
980 case TXDP:
981 regs.txdp = (reg & 0xFFFFFFFC);
982 assert(txState == txIdle);
983 CTDD = false;
984 break;
985
986 case TXDP_HI:
987 regs.txdp_hi = reg;
988 break;
989
990 case TX_CFG:
991 regs.txcfg = reg;
992 #if 0
993 if (reg & TX_CFG_CSI) ;
994 if (reg & TX_CFG_HBI) ;
995 if (reg & TX_CFG_MLB) ;
996 if (reg & TX_CFG_ATP) ;
997 if (reg & TX_CFG_ECRETRY) {
998 /*
999 * this could easily be implemented, but considering
1000 * the network is just a fake pipe, wouldn't make
1001 * sense to do this
1002 */
1003 }
1004
1005 if (reg & TX_CFG_BRST_DIS) ;
1006 #endif
1007
1008 #if 0
1009 /* we handle our own DMA, ignore the kernel's exhortations */
1010 if (reg & TX_CFG_MXDMA) ;
1011 #endif
1012
1013 // also, we currently don't care about fill/drain
1014 // thresholds though this may change in the future with
1015 // more realistic networks or a driver which changes it
1016 // according to feedback
1017
1018 break;
1019
1020 case GPIOR:
1021 // Only write writable bits
1022 regs.gpior &= GPIOR_UNUSED | GPIOR_GP5_IN | GPIOR_GP4_IN
1023 | GPIOR_GP3_IN | GPIOR_GP2_IN | GPIOR_GP1_IN;
1024 regs.gpior |= reg & ~(GPIOR_UNUSED | GPIOR_GP5_IN | GPIOR_GP4_IN
1025 | GPIOR_GP3_IN | GPIOR_GP2_IN | GPIOR_GP1_IN);
1026 /* these just control general purpose i/o pins, don't matter */
1027 break;
1028
1029 case RXDP:
1030 regs.rxdp = reg;
1031 CRDD = false;
1032 break;
1033
1034 case RXDP_HI:
1035 regs.rxdp_hi = reg;
1036 break;
1037
1038 case RX_CFG:
1039 regs.rxcfg = reg;
1040 #if 0
1041 if (reg & RX_CFG_AEP) ;
1042 if (reg & RX_CFG_ARP) ;
1043 if (reg & RX_CFG_STRIPCRC) ;
1044 if (reg & RX_CFG_RX_RD) ;
1045 if (reg & RX_CFG_ALP) ;
1046 if (reg & RX_CFG_AIRL) ;
1047
1048 /* we handle our own DMA, ignore what kernel says about it */
1049 if (reg & RX_CFG_MXDMA) ;
1050
1051 //also, we currently don't care about fill/drain thresholds
1052 //though this may change in the future with more realistic
1053 //networks or a driver which changes it according to feedback
1054 if (reg & (RX_CFG_DRTH | RX_CFG_DRTH0)) ;
1055 #endif
1056 break;
1057
1058 case PQCR:
1059 /* there is no priority queueing used in the linux 2.6 driver */
1060 regs.pqcr = reg;
1061 break;
1062
1063 case WCSR:
1064 /* not going to implement wake on LAN */
1065 regs.wcsr = reg;
1066 break;
1067
1068 case PCR:
1069 /* not going to implement pause control */
1070 regs.pcr = reg;
1071 break;
1072
1073 case RFCR:
1074 regs.rfcr = reg;
1075
1076 rxFilterEnable = (reg & RFCR_RFEN) ? true : false;
1077 acceptBroadcast = (reg & RFCR_AAB) ? true : false;
1078 acceptMulticast = (reg & RFCR_AAM) ? true : false;
1079 acceptUnicast = (reg & RFCR_AAU) ? true : false;
1080 acceptPerfect = (reg & RFCR_APM) ? true : false;
1081 acceptArp = (reg & RFCR_AARP) ? true : false;
1082 multicastHashEnable = (reg & RFCR_MHEN) ? true : false;
1083
1084 #if 0
1085 if (reg & RFCR_APAT)
1086 panic("RFCR_APAT not implemented!\n");
1087 #endif
1088 if (reg & RFCR_UHEN)
1089 panic("Unicast hash filtering not used by drivers!\n");
1090
1091 if (reg & RFCR_ULM)
1092 panic("RFCR_ULM not implemented!\n");
1093
1094 break;
1095
1096 case RFDR:
1097 rfaddr = (uint16_t)(regs.rfcr & RFCR_RFADDR);
1098 switch (rfaddr) {
1099 case 0x000:
1100 rom.perfectMatch[0] = (uint8_t)reg;
1101 rom.perfectMatch[1] = (uint8_t)(reg >> 8);
1102 break;
1103 case 0x002:
1104 rom.perfectMatch[2] = (uint8_t)reg;
1105 rom.perfectMatch[3] = (uint8_t)(reg >> 8);
1106 break;
1107 case 0x004:
1108 rom.perfectMatch[4] = (uint8_t)reg;
1109 rom.perfectMatch[5] = (uint8_t)(reg >> 8);
1110 break;
1111 default:
1112
1113 if (rfaddr >= FHASH_ADDR &&
1114 rfaddr < FHASH_ADDR + FHASH_SIZE) {
1115
1116 // Only word-aligned writes supported
1117 if (rfaddr % 2)
1118 panic("unaligned write to filter hash table!");
1119
1120 rom.filterHash[rfaddr - FHASH_ADDR] = (uint8_t)reg;
1121 rom.filterHash[rfaddr - FHASH_ADDR + 1]
1122 = (uint8_t)(reg >> 8);
1123 break;
1124 }
1125 panic("writing RFDR for something other than pattern matching\
1126 or hashing! %#x\n", rfaddr);
1127 }
1128
1129 case BRAR:
1130 regs.brar = reg;
1131 break;
1132
1133 case BRDR:
1134 panic("the driver never uses BRDR, something is wrong!\n");
1135
1136 case SRR:
1137 panic("SRR is read only register!\n");
1138
1139 case MIBC:
1140 panic("the driver never uses MIBC, something is wrong!\n");
1141
1142 case VRCR:
1143 regs.vrcr = reg;
1144 break;
1145
1146 case VTCR:
1147 regs.vtcr = reg;
1148 break;
1149
1150 case VDR:
1151 panic("the driver never uses VDR, something is wrong!\n");
1152
1153 case CCSR:
1154 /* not going to implement clockrun stuff */
1155 regs.ccsr = reg;
1156 break;
1157
1158 case TBICR:
1159 regs.tbicr = reg;
1160 if (reg & TBICR_MR_LOOPBACK)
1161 panic("TBICR_MR_LOOPBACK never used, something wrong!\n");
1162
1163 if (reg & TBICR_MR_AN_ENABLE) {
1164 regs.tanlpar = regs.tanar;
1165 regs.tbisr |= (TBISR_MR_AN_COMPLETE | TBISR_MR_LINK_STATUS);
1166 }
1167
1168 #if 0
1169 if (reg & TBICR_MR_RESTART_AN) ;
1170 #endif
1171
1172 break;
1173
1174 case TBISR:
1175 panic("TBISR is read only register!\n");
1176
1177 case TANAR:
1178 // Only write the writable bits
1179 regs.tanar &= TANAR_RF1 | TANAR_RF2 | TANAR_UNUSED;
1180 regs.tanar |= reg & ~(TANAR_RF1 | TANAR_RF2 | TANAR_UNUSED);
1181
1182 // Pause capability unimplemented
1183 #if 0
1184 if (reg & TANAR_PS2) ;
1185 if (reg & TANAR_PS1) ;
1186 #endif
1187
1188 break;
1189
1190 case TANLPAR:
1191 panic("this should only be written to by the fake phy!\n");
1192
1193 case TANER:
1194 panic("TANER is read only register!\n");
1195
1196 case TESR:
1197 regs.tesr = reg;
1198 break;
1199
1200 default:
1201 panic("invalid register access daddr=%#x", daddr);
1202 }
1203 } else {
1204 panic("Invalid Request Size");
1205 }
1206
1207 return NoFault;
1208 }
1209
1210 void
1211 NSGigE::devIntrPost(uint32_t interrupts)
1212 {
1213 if (interrupts & ISR_RESERVE)
1214 panic("Cannot set a reserved interrupt");
1215
1216 if (interrupts & ISR_NOIMPL)
1217 warn("interrupt not implemented %#x\n", interrupts);
1218
1219 interrupts &= ISR_IMPL;
1220 regs.isr |= interrupts;
1221
1222 if (interrupts & regs.imr) {
1223 if (interrupts & ISR_SWI) {
1224 totalSwi++;
1225 }
1226 if (interrupts & ISR_RXIDLE) {
1227 totalRxIdle++;
1228 }
1229 if (interrupts & ISR_RXOK) {
1230 totalRxOk++;
1231 }
1232 if (interrupts & ISR_RXDESC) {
1233 totalRxDesc++;
1234 }
1235 if (interrupts & ISR_TXOK) {
1236 totalTxOk++;
1237 }
1238 if (interrupts & ISR_TXIDLE) {
1239 totalTxIdle++;
1240 }
1241 if (interrupts & ISR_TXDESC) {
1242 totalTxDesc++;
1243 }
1244 if (interrupts & ISR_RXORN) {
1245 totalRxOrn++;
1246 }
1247 }
1248
1249 DPRINTF(EthernetIntr,
1250 "interrupt written to ISR: intr=%#x isr=%#x imr=%#x\n",
1251 interrupts, regs.isr, regs.imr);
1252
1253 if ((regs.isr & regs.imr)) {
1254 Tick when = curTick;
1255 if ((regs.isr & regs.imr & ISR_NODELAY) == 0)
1256 when += intrDelay;
1257 cpuIntrPost(when);
1258 }
1259 }
1260
1261 /* writing this interrupt counting stats inside this means that this function
1262 is now limited to being used to clear all interrupts upon the kernel
1263 reading isr and servicing. just telling you in case you were thinking
1264 of expanding use.
1265 */
1266 void
1267 NSGigE::devIntrClear(uint32_t interrupts)
1268 {
1269 if (interrupts & ISR_RESERVE)
1270 panic("Cannot clear a reserved interrupt");
1271
1272 if (regs.isr & regs.imr & ISR_SWI) {
1273 postedSwi++;
1274 }
1275 if (regs.isr & regs.imr & ISR_RXIDLE) {
1276 postedRxIdle++;
1277 }
1278 if (regs.isr & regs.imr & ISR_RXOK) {
1279 postedRxOk++;
1280 }
1281 if (regs.isr & regs.imr & ISR_RXDESC) {
1282 postedRxDesc++;
1283 }
1284 if (regs.isr & regs.imr & ISR_TXOK) {
1285 postedTxOk++;
1286 }
1287 if (regs.isr & regs.imr & ISR_TXIDLE) {
1288 postedTxIdle++;
1289 }
1290 if (regs.isr & regs.imr & ISR_TXDESC) {
1291 postedTxDesc++;
1292 }
1293 if (regs.isr & regs.imr & ISR_RXORN) {
1294 postedRxOrn++;
1295 }
1296
1297 if (regs.isr & regs.imr & ISR_IMPL)
1298 postedInterrupts++;
1299
1300 interrupts &= ~ISR_NOIMPL;
1301 regs.isr &= ~interrupts;
1302
1303 DPRINTF(EthernetIntr,
1304 "interrupt cleared from ISR: intr=%x isr=%x imr=%x\n",
1305 interrupts, regs.isr, regs.imr);
1306
1307 if (!(regs.isr & regs.imr))
1308 cpuIntrClear();
1309 }
1310
1311 void
1312 NSGigE::devIntrChangeMask()
1313 {
1314 DPRINTF(EthernetIntr, "interrupt mask changed: isr=%x imr=%x masked=%x\n",
1315 regs.isr, regs.imr, regs.isr & regs.imr);
1316
1317 if (regs.isr & regs.imr)
1318 cpuIntrPost(curTick);
1319 else
1320 cpuIntrClear();
1321 }
1322
1323 void
1324 NSGigE::cpuIntrPost(Tick when)
1325 {
1326 // If the interrupt you want to post is later than an interrupt
1327 // already scheduled, just let it post in the coming one and don't
1328 // schedule another.
1329 // HOWEVER, must be sure that the scheduled intrTick is in the
1330 // future (this was formerly the source of a bug)
1331 /**
1332 * @todo this warning should be removed and the intrTick code should
1333 * be fixed.
1334 */
1335 assert(when >= curTick);
1336 assert(intrTick >= curTick || intrTick == 0);
1337 if (when > intrTick && intrTick != 0) {
1338 DPRINTF(EthernetIntr, "don't need to schedule event...intrTick=%d\n",
1339 intrTick);
1340 return;
1341 }
1342
1343 intrTick = when;
1344 if (intrTick < curTick) {
1345 debug_break();
1346 intrTick = curTick;
1347 }
1348
1349 DPRINTF(EthernetIntr, "going to schedule an interrupt for intrTick=%d\n",
1350 intrTick);
1351
1352 if (intrEvent)
1353 intrEvent->squash();
1354 intrEvent = new IntrEvent(this, true);
1355 intrEvent->schedule(intrTick);
1356 }
1357
1358 void
1359 NSGigE::cpuInterrupt()
1360 {
1361 assert(intrTick == curTick);
1362
1363 // Whether or not there's a pending interrupt, we don't care about
1364 // it anymore
1365 intrEvent = 0;
1366 intrTick = 0;
1367
1368 // Don't send an interrupt if there's already one
1369 if (cpuPendingIntr) {
1370 DPRINTF(EthernetIntr,
1371 "would send an interrupt now, but there's already pending\n");
1372 } else {
1373 // Send interrupt
1374 cpuPendingIntr = true;
1375
1376 DPRINTF(EthernetIntr, "posting interrupt\n");
1377 intrPost();
1378 }
1379 }
1380
1381 void
1382 NSGigE::cpuIntrClear()
1383 {
1384 if (!cpuPendingIntr)
1385 return;
1386
1387 if (intrEvent) {
1388 intrEvent->squash();
1389 intrEvent = 0;
1390 }
1391
1392 intrTick = 0;
1393
1394 cpuPendingIntr = false;
1395
1396 DPRINTF(EthernetIntr, "clearing interrupt\n");
1397 intrClear();
1398 }
1399
1400 bool
1401 NSGigE::cpuIntrPending() const
1402 { return cpuPendingIntr; }
1403
1404 void
1405 NSGigE::txReset()
1406 {
1407
1408 DPRINTF(Ethernet, "transmit reset\n");
1409
1410 CTDD = false;
1411 txEnable = false;;
1412 txFragPtr = 0;
1413 assert(txDescCnt == 0);
1414 txFifo.clear();
1415 txState = txIdle;
1416 assert(txDmaState == dmaIdle);
1417 }
1418
1419 void
1420 NSGigE::rxReset()
1421 {
1422 DPRINTF(Ethernet, "receive reset\n");
1423
1424 CRDD = false;
1425 assert(rxPktBytes == 0);
1426 rxEnable = false;
1427 rxFragPtr = 0;
1428 assert(rxDescCnt == 0);
1429 assert(rxDmaState == dmaIdle);
1430 rxFifo.clear();
1431 rxState = rxIdle;
1432 }
1433
1434 void
1435 NSGigE::regsReset()
1436 {
1437 memset(&regs, 0, sizeof(regs));
1438 regs.config = (CFGR_LNKSTS | CFGR_TBI_EN | CFGR_MODE_1000);
1439 regs.mear = 0x12;
1440 regs.txcfg = 0x120; // set drain threshold to 1024 bytes and
1441 // fill threshold to 32 bytes
1442 regs.rxcfg = 0x4; // set drain threshold to 16 bytes
1443 regs.srr = 0x0103; // set the silicon revision to rev B or 0x103
1444 regs.mibc = MIBC_FRZ;
1445 regs.vdr = 0x81; // set the vlan tag type to 802.1q
1446 regs.tesr = 0xc000; // TBI capable of both full and half duplex
1447 regs.brar = 0xffffffff;
1448
1449 extstsEnable = false;
1450 acceptBroadcast = false;
1451 acceptMulticast = false;
1452 acceptUnicast = false;
1453 acceptPerfect = false;
1454 acceptArp = false;
1455 }
1456
1457 void
1458 NSGigE::rxDmaReadCopy()
1459 {
1460 assert(rxDmaState == dmaReading);
1461
1462 physmem->dma_read((uint8_t *)rxDmaData, rxDmaAddr, rxDmaLen);
1463 rxDmaState = dmaIdle;
1464
1465 DPRINTF(EthernetDMA, "rx dma read paddr=%#x len=%d\n",
1466 rxDmaAddr, rxDmaLen);
1467 DDUMP(EthernetDMA, rxDmaData, rxDmaLen);
1468 }
1469
1470 bool
1471 NSGigE::doRxDmaRead()
1472 {
1473 assert(rxDmaState == dmaIdle || rxDmaState == dmaReadWaiting);
1474 rxDmaState = dmaReading;
1475
1476 if (dmaInterface && !rxDmaFree) {
1477 if (dmaInterface->busy())
1478 rxDmaState = dmaReadWaiting;
1479 else
1480 dmaInterface->doDMA(Read, rxDmaAddr, rxDmaLen, curTick,
1481 &rxDmaReadEvent, true);
1482 return true;
1483 }
1484
1485 if (dmaReadDelay == 0 && dmaReadFactor == 0) {
1486 rxDmaReadCopy();
1487 return false;
1488 }
1489
1490 Tick factor = ((rxDmaLen + ULL(63)) >> ULL(6)) * dmaReadFactor;
1491 Tick start = curTick + dmaReadDelay + factor;
1492 rxDmaReadEvent.schedule(start);
1493 return true;
1494 }
1495
1496 void
1497 NSGigE::rxDmaReadDone()
1498 {
1499 assert(rxDmaState == dmaReading);
1500 rxDmaReadCopy();
1501
1502 // If the transmit state machine has a pending DMA, let it go first
1503 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting)
1504 txKick();
1505
1506 rxKick();
1507 }
1508
1509 void
1510 NSGigE::rxDmaWriteCopy()
1511 {
1512 assert(rxDmaState == dmaWriting);
1513
1514 physmem->dma_write(rxDmaAddr, (uint8_t *)rxDmaData, rxDmaLen);
1515 rxDmaState = dmaIdle;
1516
1517 DPRINTF(EthernetDMA, "rx dma write paddr=%#x len=%d\n",
1518 rxDmaAddr, rxDmaLen);
1519 DDUMP(EthernetDMA, rxDmaData, rxDmaLen);
1520 }
1521
1522 bool
1523 NSGigE::doRxDmaWrite()
1524 {
1525 assert(rxDmaState == dmaIdle || rxDmaState == dmaWriteWaiting);
1526 rxDmaState = dmaWriting;
1527
1528 if (dmaInterface && !rxDmaFree) {
1529 if (dmaInterface->busy())
1530 rxDmaState = dmaWriteWaiting;
1531 else
1532 dmaInterface->doDMA(WriteInvalidate, rxDmaAddr, rxDmaLen, curTick,
1533 &rxDmaWriteEvent, true);
1534 return true;
1535 }
1536
1537 if (dmaWriteDelay == 0 && dmaWriteFactor == 0) {
1538 rxDmaWriteCopy();
1539 return false;
1540 }
1541
1542 Tick factor = ((rxDmaLen + ULL(63)) >> ULL(6)) * dmaWriteFactor;
1543 Tick start = curTick + dmaWriteDelay + factor;
1544 rxDmaWriteEvent.schedule(start);
1545 return true;
1546 }
1547
1548 void
1549 NSGigE::rxDmaWriteDone()
1550 {
1551 assert(rxDmaState == dmaWriting);
1552 rxDmaWriteCopy();
1553
1554 // If the transmit state machine has a pending DMA, let it go first
1555 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting)
1556 txKick();
1557
1558 rxKick();
1559 }
1560
1561 void
1562 NSGigE::rxKick()
1563 {
1564 bool is64bit = (bool)(regs.config & CFGR_M64ADDR);
1565
1566 DPRINTF(EthernetSM,
1567 "receive kick rxState=%s (rxBuf.size=%d) %d-bit\n",
1568 NsRxStateStrings[rxState], rxFifo.size(), is64bit ? 64 : 32);
1569
1570 Addr link, bufptr;
1571 uint32_t &cmdsts = is64bit ? rxDesc64.cmdsts : rxDesc32.cmdsts;
1572 uint32_t &extsts = is64bit ? rxDesc64.extsts : rxDesc32.extsts;
1573
1574 next:
1575 if (clock) {
1576 if (rxKickTick > curTick) {
1577 DPRINTF(EthernetSM, "receive kick exiting, can't run till %d\n",
1578 rxKickTick);
1579
1580 goto exit;
1581 }
1582
1583 // Go to the next state machine clock tick.
1584 rxKickTick = curTick + cycles(1);
1585 }
1586
1587 switch(rxDmaState) {
1588 case dmaReadWaiting:
1589 if (doRxDmaRead())
1590 goto exit;
1591 break;
1592 case dmaWriteWaiting:
1593 if (doRxDmaWrite())
1594 goto exit;
1595 break;
1596 default:
1597 break;
1598 }
1599
1600 link = is64bit ? (Addr)rxDesc64.link : (Addr)rxDesc32.link;
1601 bufptr = is64bit ? (Addr)rxDesc64.bufptr : (Addr)rxDesc32.bufptr;
1602
1603 // see state machine from spec for details
1604 // the way this works is, if you finish work on one state and can
1605 // go directly to another, you do that through jumping to the
1606 // label "next". however, if you have intermediate work, like DMA
1607 // so that you can't go to the next state yet, you go to exit and
1608 // exit the loop. however, when the DMA is done it will trigger
1609 // an event and come back to this loop.
1610 switch (rxState) {
1611 case rxIdle:
1612 if (!rxEnable) {
1613 DPRINTF(EthernetSM, "Receive Disabled! Nothing to do.\n");
1614 goto exit;
1615 }
1616
1617 if (CRDD) {
1618 rxState = rxDescRefr;
1619
1620 rxDmaAddr = regs.rxdp & 0x3fffffff;
1621 rxDmaData =
1622 is64bit ? (void *)&rxDesc64.link : (void *)&rxDesc32.link;
1623 rxDmaLen = is64bit ? sizeof(rxDesc64.link) : sizeof(rxDesc32.link);
1624 rxDmaFree = dmaDescFree;
1625
1626 descDmaReads++;
1627 descDmaRdBytes += rxDmaLen;
1628
1629 if (doRxDmaRead())
1630 goto exit;
1631 } else {
1632 rxState = rxDescRead;
1633
1634 rxDmaAddr = regs.rxdp & 0x3fffffff;
1635 rxDmaData = is64bit ? (void *)&rxDesc64 : (void *)&rxDesc32;
1636 rxDmaLen = is64bit ? sizeof(rxDesc64) : sizeof(rxDesc32);
1637 rxDmaFree = dmaDescFree;
1638
1639 descDmaReads++;
1640 descDmaRdBytes += rxDmaLen;
1641
1642 if (doRxDmaRead())
1643 goto exit;
1644 }
1645 break;
1646
1647 case rxDescRefr:
1648 if (rxDmaState != dmaIdle)
1649 goto exit;
1650
1651 rxState = rxAdvance;
1652 break;
1653
1654 case rxDescRead:
1655 if (rxDmaState != dmaIdle)
1656 goto exit;
1657
1658 DPRINTF(EthernetDesc, "rxDesc: addr=%08x read descriptor\n",
1659 regs.rxdp & 0x3fffffff);
1660 DPRINTF(EthernetDesc,
1661 "rxDesc: link=%#x bufptr=%#x cmdsts=%08x extsts=%08x\n",
1662 link, bufptr, cmdsts, extsts);
1663
1664 if (cmdsts & CMDSTS_OWN) {
1665 devIntrPost(ISR_RXIDLE);
1666 rxState = rxIdle;
1667 goto exit;
1668 } else {
1669 rxState = rxFifoBlock;
1670 rxFragPtr = bufptr;
1671 rxDescCnt = cmdsts & CMDSTS_LEN_MASK;
1672 }
1673 break;
1674
1675 case rxFifoBlock:
1676 if (!rxPacket) {
1677 /**
1678 * @todo in reality, we should be able to start processing
1679 * the packet as it arrives, and not have to wait for the
1680 * full packet ot be in the receive fifo.
1681 */
1682 if (rxFifo.empty())
1683 goto exit;
1684
1685 DPRINTF(EthernetSM, "****processing receive of new packet****\n");
1686
1687 // If we don't have a packet, grab a new one from the fifo.
1688 rxPacket = rxFifo.front();
1689 rxPktBytes = rxPacket->length;
1690 rxPacketBufPtr = rxPacket->data;
1691
1692 #if TRACING_ON
1693 if (DTRACE(Ethernet)) {
1694 IpPtr ip(rxPacket);
1695 if (ip) {
1696 DPRINTF(Ethernet, "ID is %d\n", ip->id());
1697 TcpPtr tcp(ip);
1698 if (tcp) {
1699 DPRINTF(Ethernet,
1700 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n",
1701 tcp->sport(), tcp->dport(), tcp->seq(),
1702 tcp->ack());
1703 }
1704 }
1705 }
1706 #endif
1707
1708 // sanity check - i think the driver behaves like this
1709 assert(rxDescCnt >= rxPktBytes);
1710 rxFifo.pop();
1711 }
1712
1713
1714 // dont' need the && rxDescCnt > 0 if driver sanity check
1715 // above holds
1716 if (rxPktBytes > 0) {
1717 rxState = rxFragWrite;
1718 // don't need min<>(rxPktBytes,rxDescCnt) if above sanity
1719 // check holds
1720 rxXferLen = rxPktBytes;
1721
1722 rxDmaAddr = rxFragPtr & 0x3fffffff;
1723 rxDmaData = rxPacketBufPtr;
1724 rxDmaLen = rxXferLen;
1725 rxDmaFree = dmaDataFree;
1726
1727 if (doRxDmaWrite())
1728 goto exit;
1729
1730 } else {
1731 rxState = rxDescWrite;
1732
1733 //if (rxPktBytes == 0) { /* packet is done */
1734 assert(rxPktBytes == 0);
1735 DPRINTF(EthernetSM, "done with receiving packet\n");
1736
1737 cmdsts |= CMDSTS_OWN;
1738 cmdsts &= ~CMDSTS_MORE;
1739 cmdsts |= CMDSTS_OK;
1740 cmdsts &= 0xffff0000;
1741 cmdsts += rxPacket->length; //i.e. set CMDSTS_SIZE
1742
1743 #if 0
1744 /*
1745 * all the driver uses these are for its own stats keeping
1746 * which we don't care about, aren't necessary for
1747 * functionality and doing this would just slow us down.
1748 * if they end up using this in a later version for
1749 * functional purposes, just undef
1750 */
1751 if (rxFilterEnable) {
1752 cmdsts &= ~CMDSTS_DEST_MASK;
1753 const EthAddr &dst = rxFifoFront()->dst();
1754 if (dst->unicast())
1755 cmdsts |= CMDSTS_DEST_SELF;
1756 if (dst->multicast())
1757 cmdsts |= CMDSTS_DEST_MULTI;
1758 if (dst->broadcast())
1759 cmdsts |= CMDSTS_DEST_MASK;
1760 }
1761 #endif
1762
1763 IpPtr ip(rxPacket);
1764 if (extstsEnable && ip) {
1765 extsts |= EXTSTS_IPPKT;
1766 rxIpChecksums++;
1767 if (cksum(ip) != 0) {
1768 DPRINTF(EthernetCksum, "Rx IP Checksum Error\n");
1769 extsts |= EXTSTS_IPERR;
1770 }
1771 TcpPtr tcp(ip);
1772 UdpPtr udp(ip);
1773 if (tcp) {
1774 extsts |= EXTSTS_TCPPKT;
1775 rxTcpChecksums++;
1776 if (cksum(tcp) != 0) {
1777 DPRINTF(EthernetCksum, "Rx TCP Checksum Error\n");
1778 extsts |= EXTSTS_TCPERR;
1779
1780 }
1781 } else if (udp) {
1782 extsts |= EXTSTS_UDPPKT;
1783 rxUdpChecksums++;
1784 if (cksum(udp) != 0) {
1785 DPRINTF(EthernetCksum, "Rx UDP Checksum Error\n");
1786 extsts |= EXTSTS_UDPERR;
1787 }
1788 }
1789 }
1790 rxPacket = 0;
1791
1792 /*
1793 * the driver seems to always receive into desc buffers
1794 * of size 1514, so you never have a pkt that is split
1795 * into multiple descriptors on the receive side, so
1796 * i don't implement that case, hence the assert above.
1797 */
1798
1799 DPRINTF(EthernetDesc,
1800 "rxDesc: addr=%08x writeback cmdsts extsts\n",
1801 regs.rxdp & 0x3fffffff);
1802 DPRINTF(EthernetDesc,
1803 "rxDesc: link=%#x bufptr=%#x cmdsts=%08x extsts=%08x\n",
1804 link, bufptr, cmdsts, extsts);
1805
1806 rxDmaAddr = regs.rxdp & 0x3fffffff;
1807 rxDmaData = &cmdsts;
1808 if (is64bit) {
1809 rxDmaAddr += offsetof(ns_desc64, cmdsts);
1810 rxDmaLen = sizeof(rxDesc64.cmdsts) + sizeof(rxDesc64.extsts);
1811 } else {
1812 rxDmaAddr += offsetof(ns_desc32, cmdsts);
1813 rxDmaLen = sizeof(rxDesc32.cmdsts) + sizeof(rxDesc32.extsts);
1814 }
1815 rxDmaFree = dmaDescFree;
1816
1817 descDmaWrites++;
1818 descDmaWrBytes += rxDmaLen;
1819
1820 if (doRxDmaWrite())
1821 goto exit;
1822 }
1823 break;
1824
1825 case rxFragWrite:
1826 if (rxDmaState != dmaIdle)
1827 goto exit;
1828
1829 rxPacketBufPtr += rxXferLen;
1830 rxFragPtr += rxXferLen;
1831 rxPktBytes -= rxXferLen;
1832
1833 rxState = rxFifoBlock;
1834 break;
1835
1836 case rxDescWrite:
1837 if (rxDmaState != dmaIdle)
1838 goto exit;
1839
1840 assert(cmdsts & CMDSTS_OWN);
1841
1842 assert(rxPacket == 0);
1843 devIntrPost(ISR_RXOK);
1844
1845 if (cmdsts & CMDSTS_INTR)
1846 devIntrPost(ISR_RXDESC);
1847
1848 if (!rxEnable) {
1849 DPRINTF(EthernetSM, "Halting the RX state machine\n");
1850 rxState = rxIdle;
1851 goto exit;
1852 } else
1853 rxState = rxAdvance;
1854 break;
1855
1856 case rxAdvance:
1857 if (link == 0) {
1858 devIntrPost(ISR_RXIDLE);
1859 rxState = rxIdle;
1860 CRDD = true;
1861 goto exit;
1862 } else {
1863 if (rxDmaState != dmaIdle)
1864 goto exit;
1865 rxState = rxDescRead;
1866 regs.rxdp = link;
1867 CRDD = false;
1868
1869 rxDmaAddr = regs.rxdp & 0x3fffffff;
1870 rxDmaData = is64bit ? (void *)&rxDesc64 : (void *)&rxDesc32;
1871 rxDmaLen = is64bit ? sizeof(rxDesc64) : sizeof(rxDesc32);
1872 rxDmaFree = dmaDescFree;
1873
1874 if (doRxDmaRead())
1875 goto exit;
1876 }
1877 break;
1878
1879 default:
1880 panic("Invalid rxState!");
1881 }
1882
1883 DPRINTF(EthernetSM, "entering next rxState=%s\n",
1884 NsRxStateStrings[rxState]);
1885 goto next;
1886
1887 exit:
1888 /**
1889 * @todo do we want to schedule a future kick?
1890 */
1891 DPRINTF(EthernetSM, "rx state machine exited rxState=%s\n",
1892 NsRxStateStrings[rxState]);
1893
1894 if (clock && !rxKickEvent.scheduled())
1895 rxKickEvent.schedule(rxKickTick);
1896 }
1897
1898 void
1899 NSGigE::transmit()
1900 {
1901 if (txFifo.empty()) {
1902 DPRINTF(Ethernet, "nothing to transmit\n");
1903 return;
1904 }
1905
1906 DPRINTF(Ethernet, "Attempt Pkt Transmit: txFifo length=%d\n",
1907 txFifo.size());
1908 if (interface->sendPacket(txFifo.front())) {
1909 #if TRACING_ON
1910 if (DTRACE(Ethernet)) {
1911 IpPtr ip(txFifo.front());
1912 if (ip) {
1913 DPRINTF(Ethernet, "ID is %d\n", ip->id());
1914 TcpPtr tcp(ip);
1915 if (tcp) {
1916 DPRINTF(Ethernet,
1917 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n",
1918 tcp->sport(), tcp->dport(), tcp->seq(),
1919 tcp->ack());
1920 }
1921 }
1922 }
1923 #endif
1924
1925 DDUMP(EthernetData, txFifo.front()->data, txFifo.front()->length);
1926 txBytes += txFifo.front()->length;
1927 txPackets++;
1928
1929 DPRINTF(Ethernet, "Successful Xmit! now txFifoAvail is %d\n",
1930 txFifo.avail());
1931 txFifo.pop();
1932
1933 /*
1934 * normally do a writeback of the descriptor here, and ONLY
1935 * after that is done, send this interrupt. but since our
1936 * stuff never actually fails, just do this interrupt here,
1937 * otherwise the code has to stray from this nice format.
1938 * besides, it's functionally the same.
1939 */
1940 devIntrPost(ISR_TXOK);
1941 }
1942
1943 if (!txFifo.empty() && !txEvent.scheduled()) {
1944 DPRINTF(Ethernet, "reschedule transmit\n");
1945 txEvent.schedule(curTick + retryTime);
1946 }
1947 }
1948
1949 void
1950 NSGigE::txDmaReadCopy()
1951 {
1952 assert(txDmaState == dmaReading);
1953
1954 physmem->dma_read((uint8_t *)txDmaData, txDmaAddr, txDmaLen);
1955 txDmaState = dmaIdle;
1956
1957 DPRINTF(EthernetDMA, "tx dma read paddr=%#x len=%d\n",
1958 txDmaAddr, txDmaLen);
1959 DDUMP(EthernetDMA, txDmaData, txDmaLen);
1960 }
1961
1962 bool
1963 NSGigE::doTxDmaRead()
1964 {
1965 assert(txDmaState == dmaIdle || txDmaState == dmaReadWaiting);
1966 txDmaState = dmaReading;
1967
1968 if (dmaInterface && !txDmaFree) {
1969 if (dmaInterface->busy())
1970 txDmaState = dmaReadWaiting;
1971 else
1972 dmaInterface->doDMA(Read, txDmaAddr, txDmaLen, curTick,
1973 &txDmaReadEvent, true);
1974 return true;
1975 }
1976
1977 if (dmaReadDelay == 0 && dmaReadFactor == 0.0) {
1978 txDmaReadCopy();
1979 return false;
1980 }
1981
1982 Tick factor = ((txDmaLen + ULL(63)) >> ULL(6)) * dmaReadFactor;
1983 Tick start = curTick + dmaReadDelay + factor;
1984 txDmaReadEvent.schedule(start);
1985 return true;
1986 }
1987
1988 void
1989 NSGigE::txDmaReadDone()
1990 {
1991 assert(txDmaState == dmaReading);
1992 txDmaReadCopy();
1993
1994 // If the receive state machine has a pending DMA, let it go first
1995 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting)
1996 rxKick();
1997
1998 txKick();
1999 }
2000
2001 void
2002 NSGigE::txDmaWriteCopy()
2003 {
2004 assert(txDmaState == dmaWriting);
2005
2006 physmem->dma_write(txDmaAddr, (uint8_t *)txDmaData, txDmaLen);
2007 txDmaState = dmaIdle;
2008
2009 DPRINTF(EthernetDMA, "tx dma write paddr=%#x len=%d\n",
2010 txDmaAddr, txDmaLen);
2011 DDUMP(EthernetDMA, txDmaData, txDmaLen);
2012 }
2013
2014 bool
2015 NSGigE::doTxDmaWrite()
2016 {
2017 assert(txDmaState == dmaIdle || txDmaState == dmaWriteWaiting);
2018 txDmaState = dmaWriting;
2019
2020 if (dmaInterface && !txDmaFree) {
2021 if (dmaInterface->busy())
2022 txDmaState = dmaWriteWaiting;
2023 else
2024 dmaInterface->doDMA(WriteInvalidate, txDmaAddr, txDmaLen, curTick,
2025 &txDmaWriteEvent, true);
2026 return true;
2027 }
2028
2029 if (dmaWriteDelay == 0 && dmaWriteFactor == 0.0) {
2030 txDmaWriteCopy();
2031 return false;
2032 }
2033
2034 Tick factor = ((txDmaLen + ULL(63)) >> ULL(6)) * dmaWriteFactor;
2035 Tick start = curTick + dmaWriteDelay + factor;
2036 txDmaWriteEvent.schedule(start);
2037 return true;
2038 }
2039
2040 void
2041 NSGigE::txDmaWriteDone()
2042 {
2043 assert(txDmaState == dmaWriting);
2044 txDmaWriteCopy();
2045
2046 // If the receive state machine has a pending DMA, let it go first
2047 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting)
2048 rxKick();
2049
2050 txKick();
2051 }
2052
2053 void
2054 NSGigE::txKick()
2055 {
2056 bool is64bit = (bool)(regs.config & CFGR_M64ADDR);
2057
2058 DPRINTF(EthernetSM, "transmit kick txState=%s %d-bit\n",
2059 NsTxStateStrings[txState], is64bit ? 64 : 32);
2060
2061 Addr link, bufptr;
2062 uint32_t &cmdsts = is64bit ? txDesc64.cmdsts : txDesc32.cmdsts;
2063 uint32_t &extsts = is64bit ? txDesc64.extsts : txDesc32.extsts;
2064
2065 next:
2066 if (clock) {
2067 if (txKickTick > curTick) {
2068 DPRINTF(EthernetSM, "transmit kick exiting, can't run till %d\n",
2069 txKickTick);
2070 goto exit;
2071 }
2072
2073 // Go to the next state machine clock tick.
2074 txKickTick = curTick + cycles(1);
2075 }
2076
2077 switch(txDmaState) {
2078 case dmaReadWaiting:
2079 if (doTxDmaRead())
2080 goto exit;
2081 break;
2082 case dmaWriteWaiting:
2083 if (doTxDmaWrite())
2084 goto exit;
2085 break;
2086 default:
2087 break;
2088 }
2089
2090 link = is64bit ? (Addr)txDesc64.link : (Addr)txDesc32.link;
2091 bufptr = is64bit ? (Addr)txDesc64.bufptr : (Addr)txDesc32.bufptr;
2092 switch (txState) {
2093 case txIdle:
2094 if (!txEnable) {
2095 DPRINTF(EthernetSM, "Transmit disabled. Nothing to do.\n");
2096 goto exit;
2097 }
2098
2099 if (CTDD) {
2100 txState = txDescRefr;
2101
2102 txDmaAddr = regs.txdp & 0x3fffffff;
2103 txDmaData =
2104 is64bit ? (void *)&txDesc64.link : (void *)&txDesc32.link;
2105 txDmaLen = is64bit ? sizeof(txDesc64.link) : sizeof(txDesc32.link);
2106 txDmaFree = dmaDescFree;
2107
2108 descDmaReads++;
2109 descDmaRdBytes += txDmaLen;
2110
2111 if (doTxDmaRead())
2112 goto exit;
2113
2114 } else {
2115 txState = txDescRead;
2116
2117 txDmaAddr = regs.txdp & 0x3fffffff;
2118 txDmaData = is64bit ? (void *)&txDesc64 : (void *)&txDesc32;
2119 txDmaLen = is64bit ? sizeof(txDesc64) : sizeof(txDesc32);
2120 txDmaFree = dmaDescFree;
2121
2122 descDmaReads++;
2123 descDmaRdBytes += txDmaLen;
2124
2125 if (doTxDmaRead())
2126 goto exit;
2127 }
2128 break;
2129
2130 case txDescRefr:
2131 if (txDmaState != dmaIdle)
2132 goto exit;
2133
2134 txState = txAdvance;
2135 break;
2136
2137 case txDescRead:
2138 if (txDmaState != dmaIdle)
2139 goto exit;
2140
2141 DPRINTF(EthernetDesc, "txDesc: addr=%08x read descriptor\n",
2142 regs.txdp & 0x3fffffff);
2143 DPRINTF(EthernetDesc,
2144 "txDesc: link=%#x bufptr=%#x cmdsts=%#08x extsts=%#08x\n",
2145 link, bufptr, cmdsts, extsts);
2146
2147 if (cmdsts & CMDSTS_OWN) {
2148 txState = txFifoBlock;
2149 txFragPtr = bufptr;
2150 txDescCnt = cmdsts & CMDSTS_LEN_MASK;
2151 } else {
2152 devIntrPost(ISR_TXIDLE);
2153 txState = txIdle;
2154 goto exit;
2155 }
2156 break;
2157
2158 case txFifoBlock:
2159 if (!txPacket) {
2160 DPRINTF(EthernetSM, "****starting the tx of a new packet****\n");
2161 txPacket = new PacketData(16384);
2162 txPacketBufPtr = txPacket->data;
2163 }
2164
2165 if (txDescCnt == 0) {
2166 DPRINTF(EthernetSM, "the txDescCnt == 0, done with descriptor\n");
2167 if (cmdsts & CMDSTS_MORE) {
2168 DPRINTF(EthernetSM, "there are more descriptors to come\n");
2169 txState = txDescWrite;
2170
2171 cmdsts &= ~CMDSTS_OWN;
2172
2173 txDmaAddr = regs.txdp & 0x3fffffff;
2174 txDmaData = &cmdsts;
2175 if (is64bit) {
2176 txDmaAddr += offsetof(ns_desc64, cmdsts);
2177 txDmaLen = sizeof(txDesc64.cmdsts);
2178 } else {
2179 txDmaAddr += offsetof(ns_desc32, cmdsts);
2180 txDmaLen = sizeof(txDesc32.cmdsts);
2181 }
2182 txDmaFree = dmaDescFree;
2183
2184 if (doTxDmaWrite())
2185 goto exit;
2186
2187 } else { /* this packet is totally done */
2188 DPRINTF(EthernetSM, "This packet is done, let's wrap it up\n");
2189 /* deal with the the packet that just finished */
2190 if ((regs.vtcr & VTCR_PPCHK) && extstsEnable) {
2191 IpPtr ip(txPacket);
2192 if (extsts & EXTSTS_UDPPKT) {
2193 UdpPtr udp(ip);
2194 udp->sum(0);
2195 udp->sum(cksum(udp));
2196 txUdpChecksums++;
2197 } else if (extsts & EXTSTS_TCPPKT) {
2198 TcpPtr tcp(ip);
2199 tcp->sum(0);
2200 tcp->sum(cksum(tcp));
2201 txTcpChecksums++;
2202 }
2203 if (extsts & EXTSTS_IPPKT) {
2204 ip->sum(0);
2205 ip->sum(cksum(ip));
2206 txIpChecksums++;
2207 }
2208 }
2209
2210 txPacket->length = txPacketBufPtr - txPacket->data;
2211 // this is just because the receive can't handle a
2212 // packet bigger want to make sure
2213 if (txPacket->length > 1514)
2214 panic("transmit packet too large, %s > 1514\n",
2215 txPacket->length);
2216
2217 #ifndef NDEBUG
2218 bool success =
2219 #endif
2220 txFifo.push(txPacket);
2221 assert(success);
2222
2223 /*
2224 * this following section is not tqo spec, but
2225 * functionally shouldn't be any different. normally,
2226 * the chip will wait til the transmit has occurred
2227 * before writing back the descriptor because it has
2228 * to wait to see that it was successfully transmitted
2229 * to decide whether to set CMDSTS_OK or not.
2230 * however, in the simulator since it is always
2231 * successfully transmitted, and writing it exactly to
2232 * spec would complicate the code, we just do it here
2233 */
2234
2235 cmdsts &= ~CMDSTS_OWN;
2236 cmdsts |= CMDSTS_OK;
2237
2238 DPRINTF(EthernetDesc,
2239 "txDesc writeback: cmdsts=%08x extsts=%08x\n",
2240 cmdsts, extsts);
2241
2242 txDmaFree = dmaDescFree;
2243 txDmaAddr = regs.txdp & 0x3fffffff;
2244 txDmaData = &cmdsts;
2245 if (is64bit) {
2246 txDmaAddr += offsetof(ns_desc64, cmdsts);
2247 txDmaLen =
2248 sizeof(txDesc64.cmdsts) + sizeof(txDesc64.extsts);
2249 } else {
2250 txDmaAddr += offsetof(ns_desc32, cmdsts);
2251 txDmaLen =
2252 sizeof(txDesc32.cmdsts) + sizeof(txDesc32.extsts);
2253 }
2254
2255 descDmaWrites++;
2256 descDmaWrBytes += txDmaLen;
2257
2258 transmit();
2259 txPacket = 0;
2260
2261 if (!txEnable) {
2262 DPRINTF(EthernetSM, "halting TX state machine\n");
2263 txState = txIdle;
2264 goto exit;
2265 } else
2266 txState = txAdvance;
2267
2268 if (doTxDmaWrite())
2269 goto exit;
2270 }
2271 } else {
2272 DPRINTF(EthernetSM, "this descriptor isn't done yet\n");
2273 if (!txFifo.full()) {
2274 txState = txFragRead;
2275
2276 /*
2277 * The number of bytes transferred is either whatever
2278 * is left in the descriptor (txDescCnt), or if there
2279 * is not enough room in the fifo, just whatever room
2280 * is left in the fifo
2281 */
2282 txXferLen = min<uint32_t>(txDescCnt, txFifo.avail());
2283
2284 txDmaAddr = txFragPtr & 0x3fffffff;
2285 txDmaData = txPacketBufPtr;
2286 txDmaLen = txXferLen;
2287 txDmaFree = dmaDataFree;
2288
2289 if (doTxDmaRead())
2290 goto exit;
2291 } else {
2292 txState = txFifoBlock;
2293 transmit();
2294
2295 goto exit;
2296 }
2297
2298 }
2299 break;
2300
2301 case txFragRead:
2302 if (txDmaState != dmaIdle)
2303 goto exit;
2304
2305 txPacketBufPtr += txXferLen;
2306 txFragPtr += txXferLen;
2307 txDescCnt -= txXferLen;
2308 txFifo.reserve(txXferLen);
2309
2310 txState = txFifoBlock;
2311 break;
2312
2313 case txDescWrite:
2314 if (txDmaState != dmaIdle)
2315 goto exit;
2316
2317 if (cmdsts & CMDSTS_INTR)
2318 devIntrPost(ISR_TXDESC);
2319
2320 if (!txEnable) {
2321 DPRINTF(EthernetSM, "halting TX state machine\n");
2322 txState = txIdle;
2323 goto exit;
2324 } else
2325 txState = txAdvance;
2326 break;
2327
2328 case txAdvance:
2329 if (link == 0) {
2330 devIntrPost(ISR_TXIDLE);
2331 txState = txIdle;
2332 goto exit;
2333 } else {
2334 if (txDmaState != dmaIdle)
2335 goto exit;
2336 txState = txDescRead;
2337 regs.txdp = link;
2338 CTDD = false;
2339
2340 txDmaAddr = link & 0x3fffffff;
2341 txDmaData = is64bit ? (void *)&txDesc64 : (void *)&txDesc32;
2342 txDmaLen = is64bit ? sizeof(txDesc64) : sizeof(txDesc32);
2343 txDmaFree = dmaDescFree;
2344
2345 if (doTxDmaRead())
2346 goto exit;
2347 }
2348 break;
2349
2350 default:
2351 panic("invalid state");
2352 }
2353
2354 DPRINTF(EthernetSM, "entering next txState=%s\n",
2355 NsTxStateStrings[txState]);
2356 goto next;
2357
2358 exit:
2359 /**
2360 * @todo do we want to schedule a future kick?
2361 */
2362 DPRINTF(EthernetSM, "tx state machine exited txState=%s\n",
2363 NsTxStateStrings[txState]);
2364
2365 if (clock && !txKickEvent.scheduled())
2366 txKickEvent.schedule(txKickTick);
2367 }
2368
2369 /**
2370 * Advance the EEPROM state machine
2371 * Called on rising edge of EEPROM clock bit in MEAR
2372 */
2373 void
2374 NSGigE::eepromKick()
2375 {
2376 switch (eepromState) {
2377
2378 case eepromStart:
2379
2380 // Wait for start bit
2381 if (regs.mear & MEAR_EEDI) {
2382 // Set up to get 2 opcode bits
2383 eepromState = eepromGetOpcode;
2384 eepromBitsToRx = 2;
2385 eepromOpcode = 0;
2386 }
2387 break;
2388
2389 case eepromGetOpcode:
2390 eepromOpcode <<= 1;
2391 eepromOpcode += (regs.mear & MEAR_EEDI) ? 1 : 0;
2392 --eepromBitsToRx;
2393
2394 // Done getting opcode
2395 if (eepromBitsToRx == 0) {
2396 if (eepromOpcode != EEPROM_READ)
2397 panic("only EEPROM reads are implemented!");
2398
2399 // Set up to get address
2400 eepromState = eepromGetAddress;
2401 eepromBitsToRx = 6;
2402 eepromAddress = 0;
2403 }
2404 break;
2405
2406 case eepromGetAddress:
2407 eepromAddress <<= 1;
2408 eepromAddress += (regs.mear & MEAR_EEDI) ? 1 : 0;
2409 --eepromBitsToRx;
2410
2411 // Done getting address
2412 if (eepromBitsToRx == 0) {
2413
2414 if (eepromAddress >= EEPROM_SIZE)
2415 panic("EEPROM read access out of range!");
2416
2417 switch (eepromAddress) {
2418
2419 case EEPROM_PMATCH2_ADDR:
2420 eepromData = rom.perfectMatch[5];
2421 eepromData <<= 8;
2422 eepromData += rom.perfectMatch[4];
2423 break;
2424
2425 case EEPROM_PMATCH1_ADDR:
2426 eepromData = rom.perfectMatch[3];
2427 eepromData <<= 8;
2428 eepromData += rom.perfectMatch[2];
2429 break;
2430
2431 case EEPROM_PMATCH0_ADDR:
2432 eepromData = rom.perfectMatch[1];
2433 eepromData <<= 8;
2434 eepromData += rom.perfectMatch[0];
2435 break;
2436
2437 default:
2438 panic("FreeBSD driver only uses EEPROM to read PMATCH!");
2439 }
2440 // Set up to read data
2441 eepromState = eepromRead;
2442 eepromBitsToRx = 16;
2443
2444 // Clear data in bit
2445 regs.mear &= ~MEAR_EEDI;
2446 }
2447 break;
2448
2449 case eepromRead:
2450 // Clear Data Out bit
2451 regs.mear &= ~MEAR_EEDO;
2452 // Set bit to value of current EEPROM bit
2453 regs.mear |= (eepromData & 0x8000) ? MEAR_EEDO : 0x0;
2454
2455 eepromData <<= 1;
2456 --eepromBitsToRx;
2457
2458 // All done
2459 if (eepromBitsToRx == 0) {
2460 eepromState = eepromStart;
2461 }
2462 break;
2463
2464 default:
2465 panic("invalid EEPROM state");
2466 }
2467
2468 }
2469
2470 void
2471 NSGigE::transferDone()
2472 {
2473 if (txFifo.empty()) {
2474 DPRINTF(Ethernet, "transfer complete: txFifo empty...nothing to do\n");
2475 return;
2476 }
2477
2478 DPRINTF(Ethernet, "transfer complete: data in txFifo...schedule xmit\n");
2479
2480 if (txEvent.scheduled())
2481 txEvent.reschedule(curTick + cycles(1));
2482 else
2483 txEvent.schedule(curTick + cycles(1));
2484 }
2485
2486 bool
2487 NSGigE::rxFilter(const PacketPtr &packet)
2488 {
2489 EthPtr eth = packet;
2490 bool drop = true;
2491 string type;
2492
2493 const EthAddr &dst = eth->dst();
2494 if (dst.unicast()) {
2495 // If we're accepting all unicast addresses
2496 if (acceptUnicast)
2497 drop = false;
2498
2499 // If we make a perfect match
2500 if (acceptPerfect && dst == rom.perfectMatch)
2501 drop = false;
2502
2503 if (acceptArp && eth->type() == ETH_TYPE_ARP)
2504 drop = false;
2505
2506 } else if (dst.broadcast()) {
2507 // if we're accepting broadcasts
2508 if (acceptBroadcast)
2509 drop = false;
2510
2511 } else if (dst.multicast()) {
2512 // if we're accepting all multicasts
2513 if (acceptMulticast)
2514 drop = false;
2515
2516 // Multicast hashing faked - all packets accepted
2517 if (multicastHashEnable)
2518 drop = false;
2519 }
2520
2521 if (drop) {
2522 DPRINTF(Ethernet, "rxFilter drop\n");
2523 DDUMP(EthernetData, packet->data, packet->length);
2524 }
2525
2526 return drop;
2527 }
2528
2529 bool
2530 NSGigE::recvPacket(PacketPtr packet)
2531 {
2532 rxBytes += packet->length;
2533 rxPackets++;
2534
2535 DPRINTF(Ethernet, "Receiving packet from wire, rxFifoAvail=%d\n",
2536 rxFifo.avail());
2537
2538 if (!rxEnable) {
2539 DPRINTF(Ethernet, "receive disabled...packet dropped\n");
2540 return true;
2541 }
2542
2543 if (!rxFilterEnable) {
2544 DPRINTF(Ethernet,
2545 "receive packet filtering disabled . . . packet dropped\n");
2546 return true;
2547 }
2548
2549 if (rxFilter(packet)) {
2550 DPRINTF(Ethernet, "packet filtered...dropped\n");
2551 return true;
2552 }
2553
2554 if (rxFifo.avail() < packet->length) {
2555 #if TRACING_ON
2556 IpPtr ip(packet);
2557 TcpPtr tcp(ip);
2558 if (ip) {
2559 DPRINTF(Ethernet,
2560 "packet won't fit in receive buffer...pkt ID %d dropped\n",
2561 ip->id());
2562 if (tcp) {
2563 DPRINTF(Ethernet, "Seq=%d\n", tcp->seq());
2564 }
2565 }
2566 #endif
2567 droppedPackets++;
2568 devIntrPost(ISR_RXORN);
2569 return false;
2570 }
2571
2572 rxFifo.push(packet);
2573
2574 rxKick();
2575 return true;
2576 }
2577
2578 //=====================================================================
2579 //
2580 //
2581 void
2582 NSGigE::serialize(ostream &os)
2583 {
2584 // Serialize the PciDev base class
2585 PciDev::serialize(os);
2586
2587 /*
2588 * Finalize any DMA events now.
2589 */
2590 if (rxDmaReadEvent.scheduled())
2591 rxDmaReadCopy();
2592 if (rxDmaWriteEvent.scheduled())
2593 rxDmaWriteCopy();
2594 if (txDmaReadEvent.scheduled())
2595 txDmaReadCopy();
2596 if (txDmaWriteEvent.scheduled())
2597 txDmaWriteCopy();
2598
2599 /*
2600 * Serialize the device registers
2601 */
2602 SERIALIZE_SCALAR(regs.command);
2603 SERIALIZE_SCALAR(regs.config);
2604 SERIALIZE_SCALAR(regs.mear);
2605 SERIALIZE_SCALAR(regs.ptscr);
2606 SERIALIZE_SCALAR(regs.isr);
2607 SERIALIZE_SCALAR(regs.imr);
2608 SERIALIZE_SCALAR(regs.ier);
2609 SERIALIZE_SCALAR(regs.ihr);
2610 SERIALIZE_SCALAR(regs.txdp);
2611 SERIALIZE_SCALAR(regs.txdp_hi);
2612 SERIALIZE_SCALAR(regs.txcfg);
2613 SERIALIZE_SCALAR(regs.gpior);
2614 SERIALIZE_SCALAR(regs.rxdp);
2615 SERIALIZE_SCALAR(regs.rxdp_hi);
2616 SERIALIZE_SCALAR(regs.rxcfg);
2617 SERIALIZE_SCALAR(regs.pqcr);
2618 SERIALIZE_SCALAR(regs.wcsr);
2619 SERIALIZE_SCALAR(regs.pcr);
2620 SERIALIZE_SCALAR(regs.rfcr);
2621 SERIALIZE_SCALAR(regs.rfdr);
2622 SERIALIZE_SCALAR(regs.brar);
2623 SERIALIZE_SCALAR(regs.brdr);
2624 SERIALIZE_SCALAR(regs.srr);
2625 SERIALIZE_SCALAR(regs.mibc);
2626 SERIALIZE_SCALAR(regs.vrcr);
2627 SERIALIZE_SCALAR(regs.vtcr);
2628 SERIALIZE_SCALAR(regs.vdr);
2629 SERIALIZE_SCALAR(regs.ccsr);
2630 SERIALIZE_SCALAR(regs.tbicr);
2631 SERIALIZE_SCALAR(regs.tbisr);
2632 SERIALIZE_SCALAR(regs.tanar);
2633 SERIALIZE_SCALAR(regs.tanlpar);
2634 SERIALIZE_SCALAR(regs.taner);
2635 SERIALIZE_SCALAR(regs.tesr);
2636
2637 SERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN);
2638 SERIALIZE_ARRAY(rom.filterHash, FHASH_SIZE);
2639
2640 SERIALIZE_SCALAR(ioEnable);
2641
2642 /*
2643 * Serialize the data Fifos
2644 */
2645 rxFifo.serialize("rxFifo", os);
2646 txFifo.serialize("txFifo", os);
2647
2648 /*
2649 * Serialize the various helper variables
2650 */
2651 bool txPacketExists = txPacket;
2652 SERIALIZE_SCALAR(txPacketExists);
2653 if (txPacketExists) {
2654 txPacket->length = txPacketBufPtr - txPacket->data;
2655 txPacket->serialize("txPacket", os);
2656 uint32_t txPktBufPtr = (uint32_t) (txPacketBufPtr - txPacket->data);
2657 SERIALIZE_SCALAR(txPktBufPtr);
2658 }
2659
2660 bool rxPacketExists = rxPacket;
2661 SERIALIZE_SCALAR(rxPacketExists);
2662 if (rxPacketExists) {
2663 rxPacket->serialize("rxPacket", os);
2664 uint32_t rxPktBufPtr = (uint32_t) (rxPacketBufPtr - rxPacket->data);
2665 SERIALIZE_SCALAR(rxPktBufPtr);
2666 }
2667
2668 SERIALIZE_SCALAR(txXferLen);
2669 SERIALIZE_SCALAR(rxXferLen);
2670
2671 /*
2672 * Serialize Cached Descriptors
2673 */
2674 SERIALIZE_SCALAR(rxDesc64.link);
2675 SERIALIZE_SCALAR(rxDesc64.bufptr);
2676 SERIALIZE_SCALAR(rxDesc64.cmdsts);
2677 SERIALIZE_SCALAR(rxDesc64.extsts);
2678 SERIALIZE_SCALAR(txDesc64.link);
2679 SERIALIZE_SCALAR(txDesc64.bufptr);
2680 SERIALIZE_SCALAR(txDesc64.cmdsts);
2681 SERIALIZE_SCALAR(txDesc64.extsts);
2682 SERIALIZE_SCALAR(rxDesc32.link);
2683 SERIALIZE_SCALAR(rxDesc32.bufptr);
2684 SERIALIZE_SCALAR(rxDesc32.cmdsts);
2685 SERIALIZE_SCALAR(rxDesc32.extsts);
2686 SERIALIZE_SCALAR(txDesc32.link);
2687 SERIALIZE_SCALAR(txDesc32.bufptr);
2688 SERIALIZE_SCALAR(txDesc32.cmdsts);
2689 SERIALIZE_SCALAR(txDesc32.extsts);
2690 SERIALIZE_SCALAR(extstsEnable);
2691
2692 /*
2693 * Serialize tx state machine
2694 */
2695 int txState = this->txState;
2696 SERIALIZE_SCALAR(txState);
2697 SERIALIZE_SCALAR(txEnable);
2698 SERIALIZE_SCALAR(CTDD);
2699 SERIALIZE_SCALAR(txFragPtr);
2700 SERIALIZE_SCALAR(txDescCnt);
2701 int txDmaState = this->txDmaState;
2702 SERIALIZE_SCALAR(txDmaState);
2703 SERIALIZE_SCALAR(txKickTick);
2704
2705 /*
2706 * Serialize rx state machine
2707 */
2708 int rxState = this->rxState;
2709 SERIALIZE_SCALAR(rxState);
2710 SERIALIZE_SCALAR(rxEnable);
2711 SERIALIZE_SCALAR(CRDD);
2712 SERIALIZE_SCALAR(rxPktBytes);
2713 SERIALIZE_SCALAR(rxFragPtr);
2714 SERIALIZE_SCALAR(rxDescCnt);
2715 int rxDmaState = this->rxDmaState;
2716 SERIALIZE_SCALAR(rxDmaState);
2717 SERIALIZE_SCALAR(rxKickTick);
2718
2719 /*
2720 * Serialize EEPROM state machine
2721 */
2722 int eepromState = this->eepromState;
2723 SERIALIZE_SCALAR(eepromState);
2724 SERIALIZE_SCALAR(eepromClk);
2725 SERIALIZE_SCALAR(eepromBitsToRx);
2726 SERIALIZE_SCALAR(eepromOpcode);
2727 SERIALIZE_SCALAR(eepromAddress);
2728 SERIALIZE_SCALAR(eepromData);
2729
2730 /*
2731 * If there's a pending transmit, store the time so we can
2732 * reschedule it later
2733 */
2734 Tick transmitTick = txEvent.scheduled() ? txEvent.when() - curTick : 0;
2735 SERIALIZE_SCALAR(transmitTick);
2736
2737 /*
2738 * receive address filter settings
2739 */
2740 SERIALIZE_SCALAR(rxFilterEnable);
2741 SERIALIZE_SCALAR(acceptBroadcast);
2742 SERIALIZE_SCALAR(acceptMulticast);
2743 SERIALIZE_SCALAR(acceptUnicast);
2744 SERIALIZE_SCALAR(acceptPerfect);
2745 SERIALIZE_SCALAR(acceptArp);
2746 SERIALIZE_SCALAR(multicastHashEnable);
2747
2748 /*
2749 * Keep track of pending interrupt status.
2750 */
2751 SERIALIZE_SCALAR(intrTick);
2752 SERIALIZE_SCALAR(cpuPendingIntr);
2753 Tick intrEventTick = 0;
2754 if (intrEvent)
2755 intrEventTick = intrEvent->when();
2756 SERIALIZE_SCALAR(intrEventTick);
2757
2758 }
2759
2760 void
2761 NSGigE::unserialize(Checkpoint *cp, const std::string &section)
2762 {
2763 // Unserialize the PciDev base class
2764 PciDev::unserialize(cp, section);
2765
2766 UNSERIALIZE_SCALAR(regs.command);
2767 UNSERIALIZE_SCALAR(regs.config);
2768 UNSERIALIZE_SCALAR(regs.mear);
2769 UNSERIALIZE_SCALAR(regs.ptscr);
2770 UNSERIALIZE_SCALAR(regs.isr);
2771 UNSERIALIZE_SCALAR(regs.imr);
2772 UNSERIALIZE_SCALAR(regs.ier);
2773 UNSERIALIZE_SCALAR(regs.ihr);
2774 UNSERIALIZE_SCALAR(regs.txdp);
2775 UNSERIALIZE_SCALAR(regs.txdp_hi);
2776 UNSERIALIZE_SCALAR(regs.txcfg);
2777 UNSERIALIZE_SCALAR(regs.gpior);
2778 UNSERIALIZE_SCALAR(regs.rxdp);
2779 UNSERIALIZE_SCALAR(regs.rxdp_hi);
2780 UNSERIALIZE_SCALAR(regs.rxcfg);
2781 UNSERIALIZE_SCALAR(regs.pqcr);
2782 UNSERIALIZE_SCALAR(regs.wcsr);
2783 UNSERIALIZE_SCALAR(regs.pcr);
2784 UNSERIALIZE_SCALAR(regs.rfcr);
2785 UNSERIALIZE_SCALAR(regs.rfdr);
2786 UNSERIALIZE_SCALAR(regs.brar);
2787 UNSERIALIZE_SCALAR(regs.brdr);
2788 UNSERIALIZE_SCALAR(regs.srr);
2789 UNSERIALIZE_SCALAR(regs.mibc);
2790 UNSERIALIZE_SCALAR(regs.vrcr);
2791 UNSERIALIZE_SCALAR(regs.vtcr);
2792 UNSERIALIZE_SCALAR(regs.vdr);
2793 UNSERIALIZE_SCALAR(regs.ccsr);
2794 UNSERIALIZE_SCALAR(regs.tbicr);
2795 UNSERIALIZE_SCALAR(regs.tbisr);
2796 UNSERIALIZE_SCALAR(regs.tanar);
2797 UNSERIALIZE_SCALAR(regs.tanlpar);
2798 UNSERIALIZE_SCALAR(regs.taner);
2799 UNSERIALIZE_SCALAR(regs.tesr);
2800
2801 UNSERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN);
2802 UNSERIALIZE_ARRAY(rom.filterHash, FHASH_SIZE);
2803
2804 UNSERIALIZE_SCALAR(ioEnable);
2805
2806 /*
2807 * unserialize the data fifos
2808 */
2809 rxFifo.unserialize("rxFifo", cp, section);
2810 txFifo.unserialize("txFifo", cp, section);
2811
2812 /*
2813 * unserialize the various helper variables
2814 */
2815 bool txPacketExists;
2816 UNSERIALIZE_SCALAR(txPacketExists);
2817 if (txPacketExists) {
2818 txPacket = new PacketData(16384);
2819 txPacket->unserialize("txPacket", cp, section);
2820 uint32_t txPktBufPtr;
2821 UNSERIALIZE_SCALAR(txPktBufPtr);
2822 txPacketBufPtr = (uint8_t *) txPacket->data + txPktBufPtr;
2823 } else
2824 txPacket = 0;
2825
2826 bool rxPacketExists;
2827 UNSERIALIZE_SCALAR(rxPacketExists);
2828 rxPacket = 0;
2829 if (rxPacketExists) {
2830 rxPacket = new PacketData(16384);
2831 rxPacket->unserialize("rxPacket", cp, section);
2832 uint32_t rxPktBufPtr;
2833 UNSERIALIZE_SCALAR(rxPktBufPtr);
2834 rxPacketBufPtr = (uint8_t *) rxPacket->data + rxPktBufPtr;
2835 } else
2836 rxPacket = 0;
2837
2838 UNSERIALIZE_SCALAR(txXferLen);
2839 UNSERIALIZE_SCALAR(rxXferLen);
2840
2841 /*
2842 * Unserialize Cached Descriptors
2843 */
2844 UNSERIALIZE_SCALAR(rxDesc64.link);
2845 UNSERIALIZE_SCALAR(rxDesc64.bufptr);
2846 UNSERIALIZE_SCALAR(rxDesc64.cmdsts);
2847 UNSERIALIZE_SCALAR(rxDesc64.extsts);
2848 UNSERIALIZE_SCALAR(txDesc64.link);
2849 UNSERIALIZE_SCALAR(txDesc64.bufptr);
2850 UNSERIALIZE_SCALAR(txDesc64.cmdsts);
2851 UNSERIALIZE_SCALAR(txDesc64.extsts);
2852 UNSERIALIZE_SCALAR(rxDesc32.link);
2853 UNSERIALIZE_SCALAR(rxDesc32.bufptr);
2854 UNSERIALIZE_SCALAR(rxDesc32.cmdsts);
2855 UNSERIALIZE_SCALAR(rxDesc32.extsts);
2856 UNSERIALIZE_SCALAR(txDesc32.link);
2857 UNSERIALIZE_SCALAR(txDesc32.bufptr);
2858 UNSERIALIZE_SCALAR(txDesc32.cmdsts);
2859 UNSERIALIZE_SCALAR(txDesc32.extsts);
2860 UNSERIALIZE_SCALAR(extstsEnable);
2861
2862 /*
2863 * unserialize tx state machine
2864 */
2865 int txState;
2866 UNSERIALIZE_SCALAR(txState);
2867 this->txState = (TxState) txState;
2868 UNSERIALIZE_SCALAR(txEnable);
2869 UNSERIALIZE_SCALAR(CTDD);
2870 UNSERIALIZE_SCALAR(txFragPtr);
2871 UNSERIALIZE_SCALAR(txDescCnt);
2872 int txDmaState;
2873 UNSERIALIZE_SCALAR(txDmaState);
2874 this->txDmaState = (DmaState) txDmaState;
2875 UNSERIALIZE_SCALAR(txKickTick);
2876 if (txKickTick)
2877 txKickEvent.schedule(txKickTick);
2878
2879 /*
2880 * unserialize rx state machine
2881 */
2882 int rxState;
2883 UNSERIALIZE_SCALAR(rxState);
2884 this->rxState = (RxState) rxState;
2885 UNSERIALIZE_SCALAR(rxEnable);
2886 UNSERIALIZE_SCALAR(CRDD);
2887 UNSERIALIZE_SCALAR(rxPktBytes);
2888 UNSERIALIZE_SCALAR(rxFragPtr);
2889 UNSERIALIZE_SCALAR(rxDescCnt);
2890 int rxDmaState;
2891 UNSERIALIZE_SCALAR(rxDmaState);
2892 this->rxDmaState = (DmaState) rxDmaState;
2893 UNSERIALIZE_SCALAR(rxKickTick);
2894 if (rxKickTick)
2895 rxKickEvent.schedule(rxKickTick);
2896
2897 /*
2898 * Unserialize EEPROM state machine
2899 */
2900 int eepromState;
2901 UNSERIALIZE_SCALAR(eepromState);
2902 this->eepromState = (EEPROMState) eepromState;
2903 UNSERIALIZE_SCALAR(eepromClk);
2904 UNSERIALIZE_SCALAR(eepromBitsToRx);
2905 UNSERIALIZE_SCALAR(eepromOpcode);
2906 UNSERIALIZE_SCALAR(eepromAddress);
2907 UNSERIALIZE_SCALAR(eepromData);
2908
2909 /*
2910 * If there's a pending transmit, reschedule it now
2911 */
2912 Tick transmitTick;
2913 UNSERIALIZE_SCALAR(transmitTick);
2914 if (transmitTick)
2915 txEvent.schedule(curTick + transmitTick);
2916
2917 /*
2918 * unserialize receive address filter settings
2919 */
2920 UNSERIALIZE_SCALAR(rxFilterEnable);
2921 UNSERIALIZE_SCALAR(acceptBroadcast);
2922 UNSERIALIZE_SCALAR(acceptMulticast);
2923 UNSERIALIZE_SCALAR(acceptUnicast);
2924 UNSERIALIZE_SCALAR(acceptPerfect);
2925 UNSERIALIZE_SCALAR(acceptArp);
2926 UNSERIALIZE_SCALAR(multicastHashEnable);
2927
2928 /*
2929 * Keep track of pending interrupt status.
2930 */
2931 UNSERIALIZE_SCALAR(intrTick);
2932 UNSERIALIZE_SCALAR(cpuPendingIntr);
2933 Tick intrEventTick;
2934 UNSERIALIZE_SCALAR(intrEventTick);
2935 if (intrEventTick) {
2936 intrEvent = new IntrEvent(this, true);
2937 intrEvent->schedule(intrEventTick);
2938 }
2939
2940 /*
2941 * re-add addrRanges to bus bridges
2942 */
2943 if (pioInterface) {
2944 pioInterface->addAddrRange(RangeSize(BARAddrs[0], BARSize[0]));
2945 pioInterface->addAddrRange(RangeSize(BARAddrs[1], BARSize[1]));
2946 }
2947 }
2948
2949 Tick
2950 NSGigE::cacheAccess(MemReqPtr &req)
2951 {
2952 Addr daddr = req->paddr & 0xfff;
2953 DPRINTF(EthernetPIO, "timing access to paddr=%#x (daddr=%#x)\n",
2954 req->paddr, daddr);
2955
2956 if (!pioDelayWrite || !req->cmd.isWrite())
2957 return curTick + pioLatency;
2958
2959 int cpu = (req->xc->regs.ipr[TheISA::IPR_PALtemp16] >> 8) & 0xff;
2960 std::list<RegWriteData> &wq = writeQueue[cpu];
2961 if (wq.empty())
2962 panic("WriteQueue for cpu %d empty timing daddr=%#x", cpu, daddr);
2963
2964 const RegWriteData &data = wq.front();
2965 if (data.daddr != daddr)
2966 panic("read mismatch on cpu %d, daddr functional=%#x timing=%#x",
2967 cpu, data.daddr, daddr);
2968
2969 if (daddr == CR) {
2970 if ((data.value & (CR_TXD | CR_TXE)) == CR_TXE) {
2971 txEnable = true;
2972 if (txState == txIdle)
2973 txKick();
2974 }
2975
2976 if ((data.value & (CR_RXD | CR_RXE)) == CR_RXE) {
2977 rxEnable = true;
2978 if (rxState == rxIdle)
2979 rxKick();
2980 }
2981 }
2982
2983 wq.pop_front();
2984 return curTick + pioLatency;
2985 }
2986
2987 BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt)
2988
2989 SimObjectParam<EtherInt *> peer;
2990 SimObjectParam<NSGigE *> device;
2991
2992 END_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt)
2993
2994 BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigEInt)
2995
2996 INIT_PARAM_DFLT(peer, "peer interface", NULL),
2997 INIT_PARAM(device, "Ethernet device of this interface")
2998
2999 END_INIT_SIM_OBJECT_PARAMS(NSGigEInt)
3000
3001 CREATE_SIM_OBJECT(NSGigEInt)
3002 {
3003 NSGigEInt *dev_int = new NSGigEInt(getInstanceName(), device);
3004
3005 EtherInt *p = (EtherInt *)peer;
3006 if (p) {
3007 dev_int->setPeer(p);
3008 p->setPeer(dev_int);
3009 }
3010
3011 return dev_int;
3012 }
3013
3014 REGISTER_SIM_OBJECT("NSGigEInt", NSGigEInt)
3015
3016
3017 BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigE)
3018
3019 Param<Tick> clock;
3020
3021 Param<Addr> addr;
3022 SimObjectParam<MemoryController *> mmu;
3023 SimObjectParam<PhysicalMemory *> physmem;
3024 SimObjectParam<PciConfigAll *> configspace;
3025 SimObjectParam<PciConfigData *> configdata;
3026 SimObjectParam<Platform *> platform;
3027 Param<uint32_t> pci_bus;
3028 Param<uint32_t> pci_dev;
3029 Param<uint32_t> pci_func;
3030
3031 SimObjectParam<HierParams *> hier;
3032 SimObjectParam<Bus*> pio_bus;
3033 SimObjectParam<Bus*> dma_bus;
3034 SimObjectParam<Bus*> payload_bus;
3035 Param<bool> dma_desc_free;
3036 Param<bool> dma_data_free;
3037 Param<Tick> dma_read_delay;
3038 Param<Tick> dma_write_delay;
3039 Param<Tick> dma_read_factor;
3040 Param<Tick> dma_write_factor;
3041 Param<bool> dma_no_allocate;
3042 Param<Tick> pio_latency;
3043 Param<bool> pio_delay_write;
3044 Param<Tick> intr_delay;
3045
3046 Param<Tick> rx_delay;
3047 Param<Tick> tx_delay;
3048 Param<uint32_t> rx_fifo_size;
3049 Param<uint32_t> tx_fifo_size;
3050
3051 Param<bool> rx_filter;
3052 Param<string> hardware_address;
3053 Param<bool> rx_thread;
3054 Param<bool> tx_thread;
3055
3056 END_DECLARE_SIM_OBJECT_PARAMS(NSGigE)
3057
3058 BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigE)
3059
3060 INIT_PARAM(clock, "State machine processor frequency"),
3061
3062 INIT_PARAM(addr, "Device Address"),
3063 INIT_PARAM(mmu, "Memory Controller"),
3064 INIT_PARAM(physmem, "Physical Memory"),
3065 INIT_PARAM(configspace, "PCI Configspace"),
3066 INIT_PARAM(configdata, "PCI Config data"),
3067 INIT_PARAM(platform, "Platform"),
3068 INIT_PARAM(pci_bus, "PCI bus"),
3069 INIT_PARAM(pci_dev, "PCI device number"),
3070 INIT_PARAM(pci_func, "PCI function code"),
3071
3072 INIT_PARAM(hier, "Hierarchy global variables"),
3073 INIT_PARAM(pio_bus, ""),
3074 INIT_PARAM(dma_bus, ""),
3075 INIT_PARAM(payload_bus, "The IO Bus to attach to for payload"),
3076 INIT_PARAM(dma_desc_free, "DMA of Descriptors is free"),
3077 INIT_PARAM(dma_data_free, "DMA of Data is free"),
3078 INIT_PARAM(dma_read_delay, "fixed delay for dma reads"),
3079 INIT_PARAM(dma_write_delay, "fixed delay for dma writes"),
3080 INIT_PARAM(dma_read_factor, "multiplier for dma reads"),
3081 INIT_PARAM(dma_write_factor, "multiplier for dma writes"),
3082 INIT_PARAM(dma_no_allocate, "Should DMA reads allocate cache lines"),
3083 INIT_PARAM(pio_latency, "Programmed IO latency in bus cycles"),
3084 INIT_PARAM(pio_delay_write, ""),
3085 INIT_PARAM(intr_delay, "Interrupt Delay in microseconds"),
3086
3087 INIT_PARAM(rx_delay, "Receive Delay"),
3088 INIT_PARAM(tx_delay, "Transmit Delay"),
3089 INIT_PARAM(rx_fifo_size, "max size in bytes of rxFifo"),
3090 INIT_PARAM(tx_fifo_size, "max size in bytes of txFifo"),
3091
3092 INIT_PARAM(rx_filter, "Enable Receive Filter"),
3093 INIT_PARAM(hardware_address, "Ethernet Hardware Address"),
3094 INIT_PARAM(rx_thread, ""),
3095 INIT_PARAM(tx_thread, "")
3096
3097 END_INIT_SIM_OBJECT_PARAMS(NSGigE)
3098
3099
3100 CREATE_SIM_OBJECT(NSGigE)
3101 {
3102 NSGigE::Params *params = new NSGigE::Params;
3103
3104 params->name = getInstanceName();
3105
3106 params->clock = clock;
3107
3108 params->mmu = mmu;
3109 params->pmem = physmem;
3110 params->configSpace = configspace;
3111 params->configData = configdata;
3112 params->plat = platform;
3113 params->busNum = pci_bus;
3114 params->deviceNum = pci_dev;
3115 params->functionNum = pci_func;
3116
3117 params->hier = hier;
3118 params->pio_bus = pio_bus;
3119 params->header_bus = dma_bus;
3120 params->payload_bus = payload_bus;
3121 params->dma_desc_free = dma_desc_free;
3122 params->dma_data_free = dma_data_free;
3123 params->dma_read_delay = dma_read_delay;
3124 params->dma_write_delay = dma_write_delay;
3125 params->dma_read_factor = dma_read_factor;
3126 params->dma_write_factor = dma_write_factor;
3127 params->dma_no_allocate = dma_no_allocate;
3128 params->pio_latency = pio_latency;
3129 params->pio_delay_write = pio_delay_write;
3130 params->intr_delay = intr_delay;
3131
3132 params->rx_delay = rx_delay;
3133 params->tx_delay = tx_delay;
3134 params->rx_fifo_size = rx_fifo_size;
3135 params->tx_fifo_size = tx_fifo_size;
3136
3137 params->rx_filter = rx_filter;
3138 params->eaddr = hardware_address;
3139 params->rx_thread = rx_thread;
3140 params->tx_thread = tx_thread;
3141
3142 return new NSGigE(params);
3143 }
3144
3145 REGISTER_SIM_OBJECT("NSGigE", NSGigE)