Merge with head.
[gem5.git] / src / dev / ns_gige.cc
1 /*
2 * Copyright (c) 2004-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Nathan Binkert
29 * Lisa Hsu
30 */
31
32 /** @file
33 * Device module for modelling the National Semiconductor
34 * DP83820 ethernet controller. Does not support priority queueing
35 */
36 #include <deque>
37 #include <string>
38
39 #include "base/inet.hh"
40 #include "cpu/thread_context.hh"
41 #include "dev/etherlink.hh"
42 #include "dev/ns_gige.hh"
43 #include "dev/pciconfigall.hh"
44 #include "mem/packet.hh"
45 #include "mem/packet_access.hh"
46 #include "params/NSGigE.hh"
47 #include "sim/debug.hh"
48 #include "sim/host.hh"
49 #include "sim/stats.hh"
50 #include "sim/system.hh"
51
52 const char *NsRxStateStrings[] =
53 {
54 "rxIdle",
55 "rxDescRefr",
56 "rxDescRead",
57 "rxFifoBlock",
58 "rxFragWrite",
59 "rxDescWrite",
60 "rxAdvance"
61 };
62
63 const char *NsTxStateStrings[] =
64 {
65 "txIdle",
66 "txDescRefr",
67 "txDescRead",
68 "txFifoBlock",
69 "txFragRead",
70 "txDescWrite",
71 "txAdvance"
72 };
73
74 const char *NsDmaState[] =
75 {
76 "dmaIdle",
77 "dmaReading",
78 "dmaWriting",
79 "dmaReadWaiting",
80 "dmaWriteWaiting"
81 };
82
83 using namespace std;
84 using namespace Net;
85 using namespace TheISA;
86
87 ///////////////////////////////////////////////////////////////////////
88 //
89 // NSGigE PCI Device
90 //
91 NSGigE::NSGigE(Params *p)
92 : EtherDevice(p), ioEnable(false),
93 txFifo(p->tx_fifo_size), rxFifo(p->rx_fifo_size),
94 txPacket(0), rxPacket(0), txPacketBufPtr(NULL), rxPacketBufPtr(NULL),
95 txXferLen(0), rxXferLen(0), rxDmaFree(false), txDmaFree(false),
96 clock(p->clock),
97 txState(txIdle), txEnable(false), CTDD(false), txHalt(false),
98 txFragPtr(0), txDescCnt(0), txDmaState(dmaIdle), rxState(rxIdle),
99 rxEnable(false), CRDD(false), rxPktBytes(0), rxHalt(false),
100 rxFragPtr(0), rxDescCnt(0), rxDmaState(dmaIdle), extstsEnable(false),
101 eepromState(eepromStart), eepromClk(false), eepromBitsToRx(0),
102 eepromOpcode(0), eepromAddress(0), eepromData(0),
103 dmaReadDelay(p->dma_read_delay), dmaWriteDelay(p->dma_write_delay),
104 dmaReadFactor(p->dma_read_factor), dmaWriteFactor(p->dma_write_factor),
105 rxDmaData(NULL), rxDmaAddr(0), rxDmaLen(0),
106 txDmaData(NULL), txDmaAddr(0), txDmaLen(0),
107 rxDmaReadEvent(this), rxDmaWriteEvent(this),
108 txDmaReadEvent(this), txDmaWriteEvent(this),
109 dmaDescFree(p->dma_desc_free), dmaDataFree(p->dma_data_free),
110 txDelay(p->tx_delay), rxDelay(p->rx_delay),
111 rxKickTick(0), rxKickEvent(this), txKickTick(0), txKickEvent(this),
112 txEvent(this), rxFilterEnable(p->rx_filter),
113 acceptBroadcast(false), acceptMulticast(false), acceptUnicast(false),
114 acceptPerfect(false), acceptArp(false), multicastHashEnable(false),
115 intrDelay(p->intr_delay), intrTick(0), cpuPendingIntr(false),
116 intrEvent(0), interface(0)
117 {
118
119
120 interface = new NSGigEInt(name() + ".int0", this);
121
122 regsReset();
123 memcpy(&rom.perfectMatch, p->hardware_address.bytes(), ETH_ADDR_LEN);
124
125 memset(&rxDesc32, 0, sizeof(rxDesc32));
126 memset(&txDesc32, 0, sizeof(txDesc32));
127 memset(&rxDesc64, 0, sizeof(rxDesc64));
128 memset(&txDesc64, 0, sizeof(txDesc64));
129 }
130
131 NSGigE::~NSGigE()
132 {}
133
134 void
135 NSGigE::regStats()
136 {
137 txBytes
138 .name(name() + ".txBytes")
139 .desc("Bytes Transmitted")
140 .prereq(txBytes)
141 ;
142
143 rxBytes
144 .name(name() + ".rxBytes")
145 .desc("Bytes Received")
146 .prereq(rxBytes)
147 ;
148
149 txPackets
150 .name(name() + ".txPackets")
151 .desc("Number of Packets Transmitted")
152 .prereq(txBytes)
153 ;
154
155 rxPackets
156 .name(name() + ".rxPackets")
157 .desc("Number of Packets Received")
158 .prereq(rxBytes)
159 ;
160
161 txIpChecksums
162 .name(name() + ".txIpChecksums")
163 .desc("Number of tx IP Checksums done by device")
164 .precision(0)
165 .prereq(txBytes)
166 ;
167
168 rxIpChecksums
169 .name(name() + ".rxIpChecksums")
170 .desc("Number of rx IP Checksums done by device")
171 .precision(0)
172 .prereq(rxBytes)
173 ;
174
175 txTcpChecksums
176 .name(name() + ".txTcpChecksums")
177 .desc("Number of tx TCP Checksums done by device")
178 .precision(0)
179 .prereq(txBytes)
180 ;
181
182 rxTcpChecksums
183 .name(name() + ".rxTcpChecksums")
184 .desc("Number of rx TCP Checksums done by device")
185 .precision(0)
186 .prereq(rxBytes)
187 ;
188
189 txUdpChecksums
190 .name(name() + ".txUdpChecksums")
191 .desc("Number of tx UDP Checksums done by device")
192 .precision(0)
193 .prereq(txBytes)
194 ;
195
196 rxUdpChecksums
197 .name(name() + ".rxUdpChecksums")
198 .desc("Number of rx UDP Checksums done by device")
199 .precision(0)
200 .prereq(rxBytes)
201 ;
202
203 descDmaReads
204 .name(name() + ".descDMAReads")
205 .desc("Number of descriptors the device read w/ DMA")
206 .precision(0)
207 ;
208
209 descDmaWrites
210 .name(name() + ".descDMAWrites")
211 .desc("Number of descriptors the device wrote w/ DMA")
212 .precision(0)
213 ;
214
215 descDmaRdBytes
216 .name(name() + ".descDmaReadBytes")
217 .desc("number of descriptor bytes read w/ DMA")
218 .precision(0)
219 ;
220
221 descDmaWrBytes
222 .name(name() + ".descDmaWriteBytes")
223 .desc("number of descriptor bytes write w/ DMA")
224 .precision(0)
225 ;
226
227 txBandwidth
228 .name(name() + ".txBandwidth")
229 .desc("Transmit Bandwidth (bits/s)")
230 .precision(0)
231 .prereq(txBytes)
232 ;
233
234 rxBandwidth
235 .name(name() + ".rxBandwidth")
236 .desc("Receive Bandwidth (bits/s)")
237 .precision(0)
238 .prereq(rxBytes)
239 ;
240
241 totBandwidth
242 .name(name() + ".totBandwidth")
243 .desc("Total Bandwidth (bits/s)")
244 .precision(0)
245 .prereq(totBytes)
246 ;
247
248 totPackets
249 .name(name() + ".totPackets")
250 .desc("Total Packets")
251 .precision(0)
252 .prereq(totBytes)
253 ;
254
255 totBytes
256 .name(name() + ".totBytes")
257 .desc("Total Bytes")
258 .precision(0)
259 .prereq(totBytes)
260 ;
261
262 totPacketRate
263 .name(name() + ".totPPS")
264 .desc("Total Tranmission Rate (packets/s)")
265 .precision(0)
266 .prereq(totBytes)
267 ;
268
269 txPacketRate
270 .name(name() + ".txPPS")
271 .desc("Packet Tranmission Rate (packets/s)")
272 .precision(0)
273 .prereq(txBytes)
274 ;
275
276 rxPacketRate
277 .name(name() + ".rxPPS")
278 .desc("Packet Reception Rate (packets/s)")
279 .precision(0)
280 .prereq(rxBytes)
281 ;
282
283 postedSwi
284 .name(name() + ".postedSwi")
285 .desc("number of software interrupts posted to CPU")
286 .precision(0)
287 ;
288
289 totalSwi
290 .name(name() + ".totalSwi")
291 .desc("total number of Swi written to ISR")
292 .precision(0)
293 ;
294
295 coalescedSwi
296 .name(name() + ".coalescedSwi")
297 .desc("average number of Swi's coalesced into each post")
298 .precision(0)
299 ;
300
301 postedRxIdle
302 .name(name() + ".postedRxIdle")
303 .desc("number of rxIdle interrupts posted to CPU")
304 .precision(0)
305 ;
306
307 totalRxIdle
308 .name(name() + ".totalRxIdle")
309 .desc("total number of RxIdle written to ISR")
310 .precision(0)
311 ;
312
313 coalescedRxIdle
314 .name(name() + ".coalescedRxIdle")
315 .desc("average number of RxIdle's coalesced into each post")
316 .precision(0)
317 ;
318
319 postedRxOk
320 .name(name() + ".postedRxOk")
321 .desc("number of RxOk interrupts posted to CPU")
322 .precision(0)
323 ;
324
325 totalRxOk
326 .name(name() + ".totalRxOk")
327 .desc("total number of RxOk written to ISR")
328 .precision(0)
329 ;
330
331 coalescedRxOk
332 .name(name() + ".coalescedRxOk")
333 .desc("average number of RxOk's coalesced into each post")
334 .precision(0)
335 ;
336
337 postedRxDesc
338 .name(name() + ".postedRxDesc")
339 .desc("number of RxDesc interrupts posted to CPU")
340 .precision(0)
341 ;
342
343 totalRxDesc
344 .name(name() + ".totalRxDesc")
345 .desc("total number of RxDesc written to ISR")
346 .precision(0)
347 ;
348
349 coalescedRxDesc
350 .name(name() + ".coalescedRxDesc")
351 .desc("average number of RxDesc's coalesced into each post")
352 .precision(0)
353 ;
354
355 postedTxOk
356 .name(name() + ".postedTxOk")
357 .desc("number of TxOk interrupts posted to CPU")
358 .precision(0)
359 ;
360
361 totalTxOk
362 .name(name() + ".totalTxOk")
363 .desc("total number of TxOk written to ISR")
364 .precision(0)
365 ;
366
367 coalescedTxOk
368 .name(name() + ".coalescedTxOk")
369 .desc("average number of TxOk's coalesced into each post")
370 .precision(0)
371 ;
372
373 postedTxIdle
374 .name(name() + ".postedTxIdle")
375 .desc("number of TxIdle interrupts posted to CPU")
376 .precision(0)
377 ;
378
379 totalTxIdle
380 .name(name() + ".totalTxIdle")
381 .desc("total number of TxIdle written to ISR")
382 .precision(0)
383 ;
384
385 coalescedTxIdle
386 .name(name() + ".coalescedTxIdle")
387 .desc("average number of TxIdle's coalesced into each post")
388 .precision(0)
389 ;
390
391 postedTxDesc
392 .name(name() + ".postedTxDesc")
393 .desc("number of TxDesc interrupts posted to CPU")
394 .precision(0)
395 ;
396
397 totalTxDesc
398 .name(name() + ".totalTxDesc")
399 .desc("total number of TxDesc written to ISR")
400 .precision(0)
401 ;
402
403 coalescedTxDesc
404 .name(name() + ".coalescedTxDesc")
405 .desc("average number of TxDesc's coalesced into each post")
406 .precision(0)
407 ;
408
409 postedRxOrn
410 .name(name() + ".postedRxOrn")
411 .desc("number of RxOrn posted to CPU")
412 .precision(0)
413 ;
414
415 totalRxOrn
416 .name(name() + ".totalRxOrn")
417 .desc("total number of RxOrn written to ISR")
418 .precision(0)
419 ;
420
421 coalescedRxOrn
422 .name(name() + ".coalescedRxOrn")
423 .desc("average number of RxOrn's coalesced into each post")
424 .precision(0)
425 ;
426
427 coalescedTotal
428 .name(name() + ".coalescedTotal")
429 .desc("average number of interrupts coalesced into each post")
430 .precision(0)
431 ;
432
433 postedInterrupts
434 .name(name() + ".postedInterrupts")
435 .desc("number of posts to CPU")
436 .precision(0)
437 ;
438
439 droppedPackets
440 .name(name() + ".droppedPackets")
441 .desc("number of packets dropped")
442 .precision(0)
443 ;
444
445 coalescedSwi = totalSwi / postedInterrupts;
446 coalescedRxIdle = totalRxIdle / postedInterrupts;
447 coalescedRxOk = totalRxOk / postedInterrupts;
448 coalescedRxDesc = totalRxDesc / postedInterrupts;
449 coalescedTxOk = totalTxOk / postedInterrupts;
450 coalescedTxIdle = totalTxIdle / postedInterrupts;
451 coalescedTxDesc = totalTxDesc / postedInterrupts;
452 coalescedRxOrn = totalRxOrn / postedInterrupts;
453
454 coalescedTotal = (totalSwi + totalRxIdle + totalRxOk + totalRxDesc +
455 totalTxOk + totalTxIdle + totalTxDesc +
456 totalRxOrn) / postedInterrupts;
457
458 txBandwidth = txBytes * Stats::constant(8) / simSeconds;
459 rxBandwidth = rxBytes * Stats::constant(8) / simSeconds;
460 totBandwidth = txBandwidth + rxBandwidth;
461 totBytes = txBytes + rxBytes;
462 totPackets = txPackets + rxPackets;
463
464 txPacketRate = txPackets / simSeconds;
465 rxPacketRate = rxPackets / simSeconds;
466 }
467
468
469 /**
470 * This is to write to the PCI general configuration registers
471 */
472 Tick
473 NSGigE::writeConfig(PacketPtr pkt)
474 {
475 int offset = pkt->getAddr() & PCI_CONFIG_SIZE;
476 if (offset < PCI_DEVICE_SPECIFIC)
477 PciDev::writeConfig(pkt);
478 else
479 panic("Device specific PCI config space not implemented!\n");
480
481 switch (offset) {
482 // seems to work fine without all these PCI settings, but i
483 // put in the IO to double check, an assertion will fail if we
484 // need to properly implement it
485 case PCI_COMMAND:
486 if (config.data[offset] & PCI_CMD_IOSE)
487 ioEnable = true;
488 else
489 ioEnable = false;
490 break;
491 }
492
493 return configDelay;
494 }
495
496 EtherInt*
497 NSGigE::getEthPort(const std::string &if_name, int idx)
498 {
499 if (if_name == "interface") {
500 if (interface->getPeer())
501 panic("interface already connected to\n");
502 return interface;
503 }
504 return NULL;
505 }
506
507 /**
508 * This reads the device registers, which are detailed in the NS83820
509 * spec sheet
510 */
511 Tick
512 NSGigE::read(PacketPtr pkt)
513 {
514 assert(ioEnable);
515
516 pkt->allocate();
517
518 //The mask is to give you only the offset into the device register file
519 Addr daddr = pkt->getAddr() & 0xfff;
520 DPRINTF(EthernetPIO, "read da=%#x pa=%#x size=%d\n",
521 daddr, pkt->getAddr(), pkt->getSize());
522
523
524 // there are some reserved registers, you can see ns_gige_reg.h and
525 // the spec sheet for details
526 if (daddr > LAST && daddr <= RESERVED) {
527 panic("Accessing reserved register");
528 } else if (daddr > RESERVED && daddr <= 0x3FC) {
529 return readConfig(pkt);
530 } else if (daddr >= MIB_START && daddr <= MIB_END) {
531 // don't implement all the MIB's. hopefully the kernel
532 // doesn't actually DEPEND upon their values
533 // MIB are just hardware stats keepers
534 pkt->set<uint32_t>(0);
535 pkt->makeAtomicResponse();
536 return pioDelay;
537 } else if (daddr > 0x3FC)
538 panic("Something is messed up!\n");
539
540 assert(pkt->getSize() == sizeof(uint32_t));
541 uint32_t &reg = *pkt->getPtr<uint32_t>();
542 uint16_t rfaddr;
543
544 switch (daddr) {
545 case CR:
546 reg = regs.command;
547 //these are supposed to be cleared on a read
548 reg &= ~(CR_RXD | CR_TXD | CR_TXR | CR_RXR);
549 break;
550
551 case CFGR:
552 reg = regs.config;
553 break;
554
555 case MEAR:
556 reg = regs.mear;
557 break;
558
559 case PTSCR:
560 reg = regs.ptscr;
561 break;
562
563 case ISR:
564 reg = regs.isr;
565 devIntrClear(ISR_ALL);
566 break;
567
568 case IMR:
569 reg = regs.imr;
570 break;
571
572 case IER:
573 reg = regs.ier;
574 break;
575
576 case IHR:
577 reg = regs.ihr;
578 break;
579
580 case TXDP:
581 reg = regs.txdp;
582 break;
583
584 case TXDP_HI:
585 reg = regs.txdp_hi;
586 break;
587
588 case TX_CFG:
589 reg = regs.txcfg;
590 break;
591
592 case GPIOR:
593 reg = regs.gpior;
594 break;
595
596 case RXDP:
597 reg = regs.rxdp;
598 break;
599
600 case RXDP_HI:
601 reg = regs.rxdp_hi;
602 break;
603
604 case RX_CFG:
605 reg = regs.rxcfg;
606 break;
607
608 case PQCR:
609 reg = regs.pqcr;
610 break;
611
612 case WCSR:
613 reg = regs.wcsr;
614 break;
615
616 case PCR:
617 reg = regs.pcr;
618 break;
619
620 // see the spec sheet for how RFCR and RFDR work
621 // basically, you write to RFCR to tell the machine
622 // what you want to do next, then you act upon RFDR,
623 // and the device will be prepared b/c of what you
624 // wrote to RFCR
625 case RFCR:
626 reg = regs.rfcr;
627 break;
628
629 case RFDR:
630 rfaddr = (uint16_t)(regs.rfcr & RFCR_RFADDR);
631 switch (rfaddr) {
632 // Read from perfect match ROM octets
633 case 0x000:
634 reg = rom.perfectMatch[1];
635 reg = reg << 8;
636 reg += rom.perfectMatch[0];
637 break;
638 case 0x002:
639 reg = rom.perfectMatch[3] << 8;
640 reg += rom.perfectMatch[2];
641 break;
642 case 0x004:
643 reg = rom.perfectMatch[5] << 8;
644 reg += rom.perfectMatch[4];
645 break;
646 default:
647 // Read filter hash table
648 if (rfaddr >= FHASH_ADDR &&
649 rfaddr < FHASH_ADDR + FHASH_SIZE) {
650
651 // Only word-aligned reads supported
652 if (rfaddr % 2)
653 panic("unaligned read from filter hash table!");
654
655 reg = rom.filterHash[rfaddr - FHASH_ADDR + 1] << 8;
656 reg += rom.filterHash[rfaddr - FHASH_ADDR];
657 break;
658 }
659
660 panic("reading RFDR for something other than pattern"
661 " matching or hashing! %#x\n", rfaddr);
662 }
663 break;
664
665 case SRR:
666 reg = regs.srr;
667 break;
668
669 case MIBC:
670 reg = regs.mibc;
671 reg &= ~(MIBC_MIBS | MIBC_ACLR);
672 break;
673
674 case VRCR:
675 reg = regs.vrcr;
676 break;
677
678 case VTCR:
679 reg = regs.vtcr;
680 break;
681
682 case VDR:
683 reg = regs.vdr;
684 break;
685
686 case CCSR:
687 reg = regs.ccsr;
688 break;
689
690 case TBICR:
691 reg = regs.tbicr;
692 break;
693
694 case TBISR:
695 reg = regs.tbisr;
696 break;
697
698 case TANAR:
699 reg = regs.tanar;
700 break;
701
702 case TANLPAR:
703 reg = regs.tanlpar;
704 break;
705
706 case TANER:
707 reg = regs.taner;
708 break;
709
710 case TESR:
711 reg = regs.tesr;
712 break;
713
714 case M5REG:
715 reg = 0;
716 if (params()->rx_thread)
717 reg |= M5REG_RX_THREAD;
718 if (params()->tx_thread)
719 reg |= M5REG_TX_THREAD;
720 if (params()->rss)
721 reg |= M5REG_RSS;
722 break;
723
724 default:
725 panic("reading unimplemented register: addr=%#x", daddr);
726 }
727
728 DPRINTF(EthernetPIO, "read from %#x: data=%d data=%#x\n",
729 daddr, reg, reg);
730
731 pkt->makeAtomicResponse();
732 return pioDelay;
733 }
734
735 Tick
736 NSGigE::write(PacketPtr pkt)
737 {
738 assert(ioEnable);
739
740 Addr daddr = pkt->getAddr() & 0xfff;
741 DPRINTF(EthernetPIO, "write da=%#x pa=%#x size=%d\n",
742 daddr, pkt->getAddr(), pkt->getSize());
743
744 if (daddr > LAST && daddr <= RESERVED) {
745 panic("Accessing reserved register");
746 } else if (daddr > RESERVED && daddr <= 0x3FC) {
747 return writeConfig(pkt);
748 } else if (daddr > 0x3FC)
749 panic("Something is messed up!\n");
750
751 if (pkt->getSize() == sizeof(uint32_t)) {
752 uint32_t reg = pkt->get<uint32_t>();
753 uint16_t rfaddr;
754
755 DPRINTF(EthernetPIO, "write data=%d data=%#x\n", reg, reg);
756
757 switch (daddr) {
758 case CR:
759 regs.command = reg;
760 if (reg & CR_TXD) {
761 txEnable = false;
762 } else if (reg & CR_TXE) {
763 txEnable = true;
764
765 // the kernel is enabling the transmit machine
766 if (txState == txIdle)
767 txKick();
768 }
769
770 if (reg & CR_RXD) {
771 rxEnable = false;
772 } else if (reg & CR_RXE) {
773 rxEnable = true;
774
775 if (rxState == rxIdle)
776 rxKick();
777 }
778
779 if (reg & CR_TXR)
780 txReset();
781
782 if (reg & CR_RXR)
783 rxReset();
784
785 if (reg & CR_SWI)
786 devIntrPost(ISR_SWI);
787
788 if (reg & CR_RST) {
789 txReset();
790 rxReset();
791
792 regsReset();
793 }
794 break;
795
796 case CFGR:
797 if (reg & CFGR_LNKSTS ||
798 reg & CFGR_SPDSTS ||
799 reg & CFGR_DUPSTS ||
800 reg & CFGR_RESERVED ||
801 reg & CFGR_T64ADDR ||
802 reg & CFGR_PCI64_DET)
803
804 // First clear all writable bits
805 regs.config &= CFGR_LNKSTS | CFGR_SPDSTS | CFGR_DUPSTS |
806 CFGR_RESERVED | CFGR_T64ADDR |
807 CFGR_PCI64_DET;
808 // Now set the appropriate writable bits
809 regs.config |= reg & ~(CFGR_LNKSTS | CFGR_SPDSTS | CFGR_DUPSTS |
810 CFGR_RESERVED | CFGR_T64ADDR |
811 CFGR_PCI64_DET);
812
813 // all these #if 0's are because i don't THINK the kernel needs to
814 // have these implemented. if there is a problem relating to one of
815 // these, you may need to add functionality in.
816 if (reg & CFGR_TBI_EN) ;
817 if (reg & CFGR_MODE_1000) ;
818
819 if (reg & CFGR_AUTO_1000)
820 panic("CFGR_AUTO_1000 not implemented!\n");
821
822 if (reg & CFGR_PINT_DUPSTS ||
823 reg & CFGR_PINT_LNKSTS ||
824 reg & CFGR_PINT_SPDSTS)
825 ;
826
827 if (reg & CFGR_TMRTEST) ;
828 if (reg & CFGR_MRM_DIS) ;
829 if (reg & CFGR_MWI_DIS) ;
830
831 if (reg & CFGR_T64ADDR) ;
832 // panic("CFGR_T64ADDR is read only register!\n");
833
834 if (reg & CFGR_PCI64_DET)
835 panic("CFGR_PCI64_DET is read only register!\n");
836
837 if (reg & CFGR_DATA64_EN) ;
838 if (reg & CFGR_M64ADDR) ;
839 if (reg & CFGR_PHY_RST) ;
840 if (reg & CFGR_PHY_DIS) ;
841
842 if (reg & CFGR_EXTSTS_EN)
843 extstsEnable = true;
844 else
845 extstsEnable = false;
846
847 if (reg & CFGR_REQALG) ;
848 if (reg & CFGR_SB) ;
849 if (reg & CFGR_POW) ;
850 if (reg & CFGR_EXD) ;
851 if (reg & CFGR_PESEL) ;
852 if (reg & CFGR_BROM_DIS) ;
853 if (reg & CFGR_EXT_125) ;
854 if (reg & CFGR_BEM) ;
855 break;
856
857 case MEAR:
858 // Clear writable bits
859 regs.mear &= MEAR_EEDO;
860 // Set appropriate writable bits
861 regs.mear |= reg & ~MEAR_EEDO;
862
863 // FreeBSD uses the EEPROM to read PMATCH (for the MAC address)
864 // even though it could get it through RFDR
865 if (reg & MEAR_EESEL) {
866 // Rising edge of clock
867 if (reg & MEAR_EECLK && !eepromClk)
868 eepromKick();
869 }
870 else {
871 eepromState = eepromStart;
872 regs.mear &= ~MEAR_EEDI;
873 }
874
875 eepromClk = reg & MEAR_EECLK;
876
877 // since phy is completely faked, MEAR_MD* don't matter
878 if (reg & MEAR_MDIO) ;
879 if (reg & MEAR_MDDIR) ;
880 if (reg & MEAR_MDC) ;
881 break;
882
883 case PTSCR:
884 regs.ptscr = reg & ~(PTSCR_RBIST_RDONLY);
885 // these control BISTs for various parts of chip - we
886 // don't care or do just fake that the BIST is done
887 if (reg & PTSCR_RBIST_EN)
888 regs.ptscr |= PTSCR_RBIST_DONE;
889 if (reg & PTSCR_EEBIST_EN)
890 regs.ptscr &= ~PTSCR_EEBIST_EN;
891 if (reg & PTSCR_EELOAD_EN)
892 regs.ptscr &= ~PTSCR_EELOAD_EN;
893 break;
894
895 case ISR: /* writing to the ISR has no effect */
896 panic("ISR is a read only register!\n");
897
898 case IMR:
899 regs.imr = reg;
900 devIntrChangeMask();
901 break;
902
903 case IER:
904 regs.ier = reg;
905 break;
906
907 case IHR:
908 regs.ihr = reg;
909 /* not going to implement real interrupt holdoff */
910 break;
911
912 case TXDP:
913 regs.txdp = (reg & 0xFFFFFFFC);
914 assert(txState == txIdle);
915 CTDD = false;
916 break;
917
918 case TXDP_HI:
919 regs.txdp_hi = reg;
920 break;
921
922 case TX_CFG:
923 regs.txcfg = reg;
924 #if 0
925 if (reg & TX_CFG_CSI) ;
926 if (reg & TX_CFG_HBI) ;
927 if (reg & TX_CFG_MLB) ;
928 if (reg & TX_CFG_ATP) ;
929 if (reg & TX_CFG_ECRETRY) {
930 /*
931 * this could easily be implemented, but considering
932 * the network is just a fake pipe, wouldn't make
933 * sense to do this
934 */
935 }
936
937 if (reg & TX_CFG_BRST_DIS) ;
938 #endif
939
940 #if 0
941 /* we handle our own DMA, ignore the kernel's exhortations */
942 if (reg & TX_CFG_MXDMA) ;
943 #endif
944
945 // also, we currently don't care about fill/drain
946 // thresholds though this may change in the future with
947 // more realistic networks or a driver which changes it
948 // according to feedback
949
950 break;
951
952 case GPIOR:
953 // Only write writable bits
954 regs.gpior &= GPIOR_UNUSED | GPIOR_GP5_IN | GPIOR_GP4_IN
955 | GPIOR_GP3_IN | GPIOR_GP2_IN | GPIOR_GP1_IN;
956 regs.gpior |= reg & ~(GPIOR_UNUSED | GPIOR_GP5_IN | GPIOR_GP4_IN
957 | GPIOR_GP3_IN | GPIOR_GP2_IN | GPIOR_GP1_IN);
958 /* these just control general purpose i/o pins, don't matter */
959 break;
960
961 case RXDP:
962 regs.rxdp = reg;
963 CRDD = false;
964 break;
965
966 case RXDP_HI:
967 regs.rxdp_hi = reg;
968 break;
969
970 case RX_CFG:
971 regs.rxcfg = reg;
972 #if 0
973 if (reg & RX_CFG_AEP) ;
974 if (reg & RX_CFG_ARP) ;
975 if (reg & RX_CFG_STRIPCRC) ;
976 if (reg & RX_CFG_RX_RD) ;
977 if (reg & RX_CFG_ALP) ;
978 if (reg & RX_CFG_AIRL) ;
979
980 /* we handle our own DMA, ignore what kernel says about it */
981 if (reg & RX_CFG_MXDMA) ;
982
983 //also, we currently don't care about fill/drain thresholds
984 //though this may change in the future with more realistic
985 //networks or a driver which changes it according to feedback
986 if (reg & (RX_CFG_DRTH | RX_CFG_DRTH0)) ;
987 #endif
988 break;
989
990 case PQCR:
991 /* there is no priority queueing used in the linux 2.6 driver */
992 regs.pqcr = reg;
993 break;
994
995 case WCSR:
996 /* not going to implement wake on LAN */
997 regs.wcsr = reg;
998 break;
999
1000 case PCR:
1001 /* not going to implement pause control */
1002 regs.pcr = reg;
1003 break;
1004
1005 case RFCR:
1006 regs.rfcr = reg;
1007
1008 rxFilterEnable = (reg & RFCR_RFEN) ? true : false;
1009 acceptBroadcast = (reg & RFCR_AAB) ? true : false;
1010 acceptMulticast = (reg & RFCR_AAM) ? true : false;
1011 acceptUnicast = (reg & RFCR_AAU) ? true : false;
1012 acceptPerfect = (reg & RFCR_APM) ? true : false;
1013 acceptArp = (reg & RFCR_AARP) ? true : false;
1014 multicastHashEnable = (reg & RFCR_MHEN) ? true : false;
1015
1016 #if 0
1017 if (reg & RFCR_APAT)
1018 panic("RFCR_APAT not implemented!\n");
1019 #endif
1020 if (reg & RFCR_UHEN)
1021 panic("Unicast hash filtering not used by drivers!\n");
1022
1023 if (reg & RFCR_ULM)
1024 panic("RFCR_ULM not implemented!\n");
1025
1026 break;
1027
1028 case RFDR:
1029 rfaddr = (uint16_t)(regs.rfcr & RFCR_RFADDR);
1030 switch (rfaddr) {
1031 case 0x000:
1032 rom.perfectMatch[0] = (uint8_t)reg;
1033 rom.perfectMatch[1] = (uint8_t)(reg >> 8);
1034 break;
1035 case 0x002:
1036 rom.perfectMatch[2] = (uint8_t)reg;
1037 rom.perfectMatch[3] = (uint8_t)(reg >> 8);
1038 break;
1039 case 0x004:
1040 rom.perfectMatch[4] = (uint8_t)reg;
1041 rom.perfectMatch[5] = (uint8_t)(reg >> 8);
1042 break;
1043 default:
1044
1045 if (rfaddr >= FHASH_ADDR &&
1046 rfaddr < FHASH_ADDR + FHASH_SIZE) {
1047
1048 // Only word-aligned writes supported
1049 if (rfaddr % 2)
1050 panic("unaligned write to filter hash table!");
1051
1052 rom.filterHash[rfaddr - FHASH_ADDR] = (uint8_t)reg;
1053 rom.filterHash[rfaddr - FHASH_ADDR + 1]
1054 = (uint8_t)(reg >> 8);
1055 break;
1056 }
1057 panic("writing RFDR for something other than pattern matching\
1058 or hashing! %#x\n", rfaddr);
1059 }
1060
1061 case BRAR:
1062 regs.brar = reg;
1063 break;
1064
1065 case BRDR:
1066 panic("the driver never uses BRDR, something is wrong!\n");
1067
1068 case SRR:
1069 panic("SRR is read only register!\n");
1070
1071 case MIBC:
1072 panic("the driver never uses MIBC, something is wrong!\n");
1073
1074 case VRCR:
1075 regs.vrcr = reg;
1076 break;
1077
1078 case VTCR:
1079 regs.vtcr = reg;
1080 break;
1081
1082 case VDR:
1083 panic("the driver never uses VDR, something is wrong!\n");
1084
1085 case CCSR:
1086 /* not going to implement clockrun stuff */
1087 regs.ccsr = reg;
1088 break;
1089
1090 case TBICR:
1091 regs.tbicr = reg;
1092 if (reg & TBICR_MR_LOOPBACK)
1093 panic("TBICR_MR_LOOPBACK never used, something wrong!\n");
1094
1095 if (reg & TBICR_MR_AN_ENABLE) {
1096 regs.tanlpar = regs.tanar;
1097 regs.tbisr |= (TBISR_MR_AN_COMPLETE | TBISR_MR_LINK_STATUS);
1098 }
1099
1100 #if 0
1101 if (reg & TBICR_MR_RESTART_AN) ;
1102 #endif
1103
1104 break;
1105
1106 case TBISR:
1107 panic("TBISR is read only register!\n");
1108
1109 case TANAR:
1110 // Only write the writable bits
1111 regs.tanar &= TANAR_RF1 | TANAR_RF2 | TANAR_UNUSED;
1112 regs.tanar |= reg & ~(TANAR_RF1 | TANAR_RF2 | TANAR_UNUSED);
1113
1114 // Pause capability unimplemented
1115 #if 0
1116 if (reg & TANAR_PS2) ;
1117 if (reg & TANAR_PS1) ;
1118 #endif
1119
1120 break;
1121
1122 case TANLPAR:
1123 panic("this should only be written to by the fake phy!\n");
1124
1125 case TANER:
1126 panic("TANER is read only register!\n");
1127
1128 case TESR:
1129 regs.tesr = reg;
1130 break;
1131
1132 default:
1133 panic("invalid register access daddr=%#x", daddr);
1134 }
1135 } else {
1136 panic("Invalid Request Size");
1137 }
1138 pkt->makeAtomicResponse();
1139 return pioDelay;
1140 }
1141
1142 void
1143 NSGigE::devIntrPost(uint32_t interrupts)
1144 {
1145 if (interrupts & ISR_RESERVE)
1146 panic("Cannot set a reserved interrupt");
1147
1148 if (interrupts & ISR_NOIMPL)
1149 warn("interrupt not implemented %#x\n", interrupts);
1150
1151 interrupts &= ISR_IMPL;
1152 regs.isr |= interrupts;
1153
1154 if (interrupts & regs.imr) {
1155 if (interrupts & ISR_SWI) {
1156 totalSwi++;
1157 }
1158 if (interrupts & ISR_RXIDLE) {
1159 totalRxIdle++;
1160 }
1161 if (interrupts & ISR_RXOK) {
1162 totalRxOk++;
1163 }
1164 if (interrupts & ISR_RXDESC) {
1165 totalRxDesc++;
1166 }
1167 if (interrupts & ISR_TXOK) {
1168 totalTxOk++;
1169 }
1170 if (interrupts & ISR_TXIDLE) {
1171 totalTxIdle++;
1172 }
1173 if (interrupts & ISR_TXDESC) {
1174 totalTxDesc++;
1175 }
1176 if (interrupts & ISR_RXORN) {
1177 totalRxOrn++;
1178 }
1179 }
1180
1181 DPRINTF(EthernetIntr,
1182 "interrupt written to ISR: intr=%#x isr=%#x imr=%#x\n",
1183 interrupts, regs.isr, regs.imr);
1184
1185 if ((regs.isr & regs.imr)) {
1186 Tick when = curTick;
1187 if ((regs.isr & regs.imr & ISR_NODELAY) == 0)
1188 when += intrDelay;
1189 cpuIntrPost(when);
1190 }
1191 }
1192
1193 /* writing this interrupt counting stats inside this means that this function
1194 is now limited to being used to clear all interrupts upon the kernel
1195 reading isr and servicing. just telling you in case you were thinking
1196 of expanding use.
1197 */
1198 void
1199 NSGigE::devIntrClear(uint32_t interrupts)
1200 {
1201 if (interrupts & ISR_RESERVE)
1202 panic("Cannot clear a reserved interrupt");
1203
1204 if (regs.isr & regs.imr & ISR_SWI) {
1205 postedSwi++;
1206 }
1207 if (regs.isr & regs.imr & ISR_RXIDLE) {
1208 postedRxIdle++;
1209 }
1210 if (regs.isr & regs.imr & ISR_RXOK) {
1211 postedRxOk++;
1212 }
1213 if (regs.isr & regs.imr & ISR_RXDESC) {
1214 postedRxDesc++;
1215 }
1216 if (regs.isr & regs.imr & ISR_TXOK) {
1217 postedTxOk++;
1218 }
1219 if (regs.isr & regs.imr & ISR_TXIDLE) {
1220 postedTxIdle++;
1221 }
1222 if (regs.isr & regs.imr & ISR_TXDESC) {
1223 postedTxDesc++;
1224 }
1225 if (regs.isr & regs.imr & ISR_RXORN) {
1226 postedRxOrn++;
1227 }
1228
1229 if (regs.isr & regs.imr & ISR_IMPL)
1230 postedInterrupts++;
1231
1232 interrupts &= ~ISR_NOIMPL;
1233 regs.isr &= ~interrupts;
1234
1235 DPRINTF(EthernetIntr,
1236 "interrupt cleared from ISR: intr=%x isr=%x imr=%x\n",
1237 interrupts, regs.isr, regs.imr);
1238
1239 if (!(regs.isr & regs.imr))
1240 cpuIntrClear();
1241 }
1242
1243 void
1244 NSGigE::devIntrChangeMask()
1245 {
1246 DPRINTF(EthernetIntr, "interrupt mask changed: isr=%x imr=%x masked=%x\n",
1247 regs.isr, regs.imr, regs.isr & regs.imr);
1248
1249 if (regs.isr & regs.imr)
1250 cpuIntrPost(curTick);
1251 else
1252 cpuIntrClear();
1253 }
1254
1255 void
1256 NSGigE::cpuIntrPost(Tick when)
1257 {
1258 // If the interrupt you want to post is later than an interrupt
1259 // already scheduled, just let it post in the coming one and don't
1260 // schedule another.
1261 // HOWEVER, must be sure that the scheduled intrTick is in the
1262 // future (this was formerly the source of a bug)
1263 /**
1264 * @todo this warning should be removed and the intrTick code should
1265 * be fixed.
1266 */
1267 assert(when >= curTick);
1268 assert(intrTick >= curTick || intrTick == 0);
1269 if (when > intrTick && intrTick != 0) {
1270 DPRINTF(EthernetIntr, "don't need to schedule event...intrTick=%d\n",
1271 intrTick);
1272 return;
1273 }
1274
1275 intrTick = when;
1276 if (intrTick < curTick) {
1277 debug_break();
1278 intrTick = curTick;
1279 }
1280
1281 DPRINTF(EthernetIntr, "going to schedule an interrupt for intrTick=%d\n",
1282 intrTick);
1283
1284 if (intrEvent)
1285 intrEvent->squash();
1286 intrEvent = new IntrEvent(this, intrTick, true);
1287 }
1288
1289 void
1290 NSGigE::cpuInterrupt()
1291 {
1292 assert(intrTick == curTick);
1293
1294 // Whether or not there's a pending interrupt, we don't care about
1295 // it anymore
1296 intrEvent = 0;
1297 intrTick = 0;
1298
1299 // Don't send an interrupt if there's already one
1300 if (cpuPendingIntr) {
1301 DPRINTF(EthernetIntr,
1302 "would send an interrupt now, but there's already pending\n");
1303 } else {
1304 // Send interrupt
1305 cpuPendingIntr = true;
1306
1307 DPRINTF(EthernetIntr, "posting interrupt\n");
1308 intrPost();
1309 }
1310 }
1311
1312 void
1313 NSGigE::cpuIntrClear()
1314 {
1315 if (!cpuPendingIntr)
1316 return;
1317
1318 if (intrEvent) {
1319 intrEvent->squash();
1320 intrEvent = 0;
1321 }
1322
1323 intrTick = 0;
1324
1325 cpuPendingIntr = false;
1326
1327 DPRINTF(EthernetIntr, "clearing interrupt\n");
1328 intrClear();
1329 }
1330
1331 bool
1332 NSGigE::cpuIntrPending() const
1333 { return cpuPendingIntr; }
1334
1335 void
1336 NSGigE::txReset()
1337 {
1338
1339 DPRINTF(Ethernet, "transmit reset\n");
1340
1341 CTDD = false;
1342 txEnable = false;;
1343 txFragPtr = 0;
1344 assert(txDescCnt == 0);
1345 txFifo.clear();
1346 txState = txIdle;
1347 assert(txDmaState == dmaIdle);
1348 }
1349
1350 void
1351 NSGigE::rxReset()
1352 {
1353 DPRINTF(Ethernet, "receive reset\n");
1354
1355 CRDD = false;
1356 assert(rxPktBytes == 0);
1357 rxEnable = false;
1358 rxFragPtr = 0;
1359 assert(rxDescCnt == 0);
1360 assert(rxDmaState == dmaIdle);
1361 rxFifo.clear();
1362 rxState = rxIdle;
1363 }
1364
1365 void
1366 NSGigE::regsReset()
1367 {
1368 memset(&regs, 0, sizeof(regs));
1369 regs.config = (CFGR_LNKSTS | CFGR_TBI_EN | CFGR_MODE_1000);
1370 regs.mear = 0x12;
1371 regs.txcfg = 0x120; // set drain threshold to 1024 bytes and
1372 // fill threshold to 32 bytes
1373 regs.rxcfg = 0x4; // set drain threshold to 16 bytes
1374 regs.srr = 0x0103; // set the silicon revision to rev B or 0x103
1375 regs.mibc = MIBC_FRZ;
1376 regs.vdr = 0x81; // set the vlan tag type to 802.1q
1377 regs.tesr = 0xc000; // TBI capable of both full and half duplex
1378 regs.brar = 0xffffffff;
1379
1380 extstsEnable = false;
1381 acceptBroadcast = false;
1382 acceptMulticast = false;
1383 acceptUnicast = false;
1384 acceptPerfect = false;
1385 acceptArp = false;
1386 }
1387
1388 bool
1389 NSGigE::doRxDmaRead()
1390 {
1391 assert(rxDmaState == dmaIdle || rxDmaState == dmaReadWaiting);
1392 rxDmaState = dmaReading;
1393
1394 if (dmaPending() || getState() != Running)
1395 rxDmaState = dmaReadWaiting;
1396 else
1397 dmaRead(rxDmaAddr, rxDmaLen, &rxDmaReadEvent, (uint8_t*)rxDmaData);
1398
1399 return true;
1400 }
1401
1402 void
1403 NSGigE::rxDmaReadDone()
1404 {
1405 assert(rxDmaState == dmaReading);
1406 rxDmaState = dmaIdle;
1407
1408 DPRINTF(EthernetDMA, "rx dma read paddr=%#x len=%d\n",
1409 rxDmaAddr, rxDmaLen);
1410 DDUMP(EthernetDMA, rxDmaData, rxDmaLen);
1411
1412 // If the transmit state machine has a pending DMA, let it go first
1413 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting)
1414 txKick();
1415
1416 rxKick();
1417 }
1418
1419 bool
1420 NSGigE::doRxDmaWrite()
1421 {
1422 assert(rxDmaState == dmaIdle || rxDmaState == dmaWriteWaiting);
1423 rxDmaState = dmaWriting;
1424
1425 if (dmaPending() || getState() != Running)
1426 rxDmaState = dmaWriteWaiting;
1427 else
1428 dmaWrite(rxDmaAddr, rxDmaLen, &rxDmaWriteEvent, (uint8_t*)rxDmaData);
1429 return true;
1430 }
1431
1432 void
1433 NSGigE::rxDmaWriteDone()
1434 {
1435 assert(rxDmaState == dmaWriting);
1436 rxDmaState = dmaIdle;
1437
1438 DPRINTF(EthernetDMA, "rx dma write paddr=%#x len=%d\n",
1439 rxDmaAddr, rxDmaLen);
1440 DDUMP(EthernetDMA, rxDmaData, rxDmaLen);
1441
1442 // If the transmit state machine has a pending DMA, let it go first
1443 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting)
1444 txKick();
1445
1446 rxKick();
1447 }
1448
1449 void
1450 NSGigE::rxKick()
1451 {
1452 bool is64bit = (bool)(regs.config & CFGR_M64ADDR);
1453
1454 DPRINTF(EthernetSM,
1455 "receive kick rxState=%s (rxBuf.size=%d) %d-bit\n",
1456 NsRxStateStrings[rxState], rxFifo.size(), is64bit ? 64 : 32);
1457
1458 Addr link, bufptr;
1459 uint32_t &cmdsts = is64bit ? rxDesc64.cmdsts : rxDesc32.cmdsts;
1460 uint32_t &extsts = is64bit ? rxDesc64.extsts : rxDesc32.extsts;
1461
1462 next:
1463 if (clock) {
1464 if (rxKickTick > curTick) {
1465 DPRINTF(EthernetSM, "receive kick exiting, can't run till %d\n",
1466 rxKickTick);
1467
1468 goto exit;
1469 }
1470
1471 // Go to the next state machine clock tick.
1472 rxKickTick = curTick + cycles(1);
1473 }
1474
1475 switch(rxDmaState) {
1476 case dmaReadWaiting:
1477 if (doRxDmaRead())
1478 goto exit;
1479 break;
1480 case dmaWriteWaiting:
1481 if (doRxDmaWrite())
1482 goto exit;
1483 break;
1484 default:
1485 break;
1486 }
1487
1488 link = is64bit ? (Addr)rxDesc64.link : (Addr)rxDesc32.link;
1489 bufptr = is64bit ? (Addr)rxDesc64.bufptr : (Addr)rxDesc32.bufptr;
1490
1491 // see state machine from spec for details
1492 // the way this works is, if you finish work on one state and can
1493 // go directly to another, you do that through jumping to the
1494 // label "next". however, if you have intermediate work, like DMA
1495 // so that you can't go to the next state yet, you go to exit and
1496 // exit the loop. however, when the DMA is done it will trigger
1497 // an event and come back to this loop.
1498 switch (rxState) {
1499 case rxIdle:
1500 if (!rxEnable) {
1501 DPRINTF(EthernetSM, "Receive Disabled! Nothing to do.\n");
1502 goto exit;
1503 }
1504
1505 if (CRDD) {
1506 rxState = rxDescRefr;
1507
1508 rxDmaAddr = regs.rxdp & 0x3fffffff;
1509 rxDmaData =
1510 is64bit ? (void *)&rxDesc64.link : (void *)&rxDesc32.link;
1511 rxDmaLen = is64bit ? sizeof(rxDesc64.link) : sizeof(rxDesc32.link);
1512 rxDmaFree = dmaDescFree;
1513
1514 descDmaReads++;
1515 descDmaRdBytes += rxDmaLen;
1516
1517 if (doRxDmaRead())
1518 goto exit;
1519 } else {
1520 rxState = rxDescRead;
1521
1522 rxDmaAddr = regs.rxdp & 0x3fffffff;
1523 rxDmaData = is64bit ? (void *)&rxDesc64 : (void *)&rxDesc32;
1524 rxDmaLen = is64bit ? sizeof(rxDesc64) : sizeof(rxDesc32);
1525 rxDmaFree = dmaDescFree;
1526
1527 descDmaReads++;
1528 descDmaRdBytes += rxDmaLen;
1529
1530 if (doRxDmaRead())
1531 goto exit;
1532 }
1533 break;
1534
1535 case rxDescRefr:
1536 if (rxDmaState != dmaIdle)
1537 goto exit;
1538
1539 rxState = rxAdvance;
1540 break;
1541
1542 case rxDescRead:
1543 if (rxDmaState != dmaIdle)
1544 goto exit;
1545
1546 DPRINTF(EthernetDesc, "rxDesc: addr=%08x read descriptor\n",
1547 regs.rxdp & 0x3fffffff);
1548 DPRINTF(EthernetDesc,
1549 "rxDesc: link=%#x bufptr=%#x cmdsts=%08x extsts=%08x\n",
1550 link, bufptr, cmdsts, extsts);
1551
1552 if (cmdsts & CMDSTS_OWN) {
1553 devIntrPost(ISR_RXIDLE);
1554 rxState = rxIdle;
1555 goto exit;
1556 } else {
1557 rxState = rxFifoBlock;
1558 rxFragPtr = bufptr;
1559 rxDescCnt = cmdsts & CMDSTS_LEN_MASK;
1560 }
1561 break;
1562
1563 case rxFifoBlock:
1564 if (!rxPacket) {
1565 /**
1566 * @todo in reality, we should be able to start processing
1567 * the packet as it arrives, and not have to wait for the
1568 * full packet ot be in the receive fifo.
1569 */
1570 if (rxFifo.empty())
1571 goto exit;
1572
1573 DPRINTF(EthernetSM, "****processing receive of new packet****\n");
1574
1575 // If we don't have a packet, grab a new one from the fifo.
1576 rxPacket = rxFifo.front();
1577 rxPktBytes = rxPacket->length;
1578 rxPacketBufPtr = rxPacket->data;
1579
1580 #if TRACING_ON
1581 if (DTRACE(Ethernet)) {
1582 IpPtr ip(rxPacket);
1583 if (ip) {
1584 DPRINTF(Ethernet, "ID is %d\n", ip->id());
1585 TcpPtr tcp(ip);
1586 if (tcp) {
1587 DPRINTF(Ethernet,
1588 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n",
1589 tcp->sport(), tcp->dport(), tcp->seq(),
1590 tcp->ack());
1591 }
1592 }
1593 }
1594 #endif
1595
1596 // sanity check - i think the driver behaves like this
1597 assert(rxDescCnt >= rxPktBytes);
1598 rxFifo.pop();
1599 }
1600
1601
1602 // dont' need the && rxDescCnt > 0 if driver sanity check
1603 // above holds
1604 if (rxPktBytes > 0) {
1605 rxState = rxFragWrite;
1606 // don't need min<>(rxPktBytes,rxDescCnt) if above sanity
1607 // check holds
1608 rxXferLen = rxPktBytes;
1609
1610 rxDmaAddr = rxFragPtr & 0x3fffffff;
1611 rxDmaData = rxPacketBufPtr;
1612 rxDmaLen = rxXferLen;
1613 rxDmaFree = dmaDataFree;
1614
1615 if (doRxDmaWrite())
1616 goto exit;
1617
1618 } else {
1619 rxState = rxDescWrite;
1620
1621 //if (rxPktBytes == 0) { /* packet is done */
1622 assert(rxPktBytes == 0);
1623 DPRINTF(EthernetSM, "done with receiving packet\n");
1624
1625 cmdsts |= CMDSTS_OWN;
1626 cmdsts &= ~CMDSTS_MORE;
1627 cmdsts |= CMDSTS_OK;
1628 cmdsts &= 0xffff0000;
1629 cmdsts += rxPacket->length; //i.e. set CMDSTS_SIZE
1630
1631 #if 0
1632 /*
1633 * all the driver uses these are for its own stats keeping
1634 * which we don't care about, aren't necessary for
1635 * functionality and doing this would just slow us down.
1636 * if they end up using this in a later version for
1637 * functional purposes, just undef
1638 */
1639 if (rxFilterEnable) {
1640 cmdsts &= ~CMDSTS_DEST_MASK;
1641 const EthAddr &dst = rxFifoFront()->dst();
1642 if (dst->unicast())
1643 cmdsts |= CMDSTS_DEST_SELF;
1644 if (dst->multicast())
1645 cmdsts |= CMDSTS_DEST_MULTI;
1646 if (dst->broadcast())
1647 cmdsts |= CMDSTS_DEST_MASK;
1648 }
1649 #endif
1650
1651 IpPtr ip(rxPacket);
1652 if (extstsEnable && ip) {
1653 extsts |= EXTSTS_IPPKT;
1654 rxIpChecksums++;
1655 if (cksum(ip) != 0) {
1656 DPRINTF(EthernetCksum, "Rx IP Checksum Error\n");
1657 extsts |= EXTSTS_IPERR;
1658 }
1659 TcpPtr tcp(ip);
1660 UdpPtr udp(ip);
1661 if (tcp) {
1662 extsts |= EXTSTS_TCPPKT;
1663 rxTcpChecksums++;
1664 if (cksum(tcp) != 0) {
1665 DPRINTF(EthernetCksum, "Rx TCP Checksum Error\n");
1666 extsts |= EXTSTS_TCPERR;
1667
1668 }
1669 } else if (udp) {
1670 extsts |= EXTSTS_UDPPKT;
1671 rxUdpChecksums++;
1672 if (cksum(udp) != 0) {
1673 DPRINTF(EthernetCksum, "Rx UDP Checksum Error\n");
1674 extsts |= EXTSTS_UDPERR;
1675 }
1676 }
1677 }
1678 rxPacket = 0;
1679
1680 /*
1681 * the driver seems to always receive into desc buffers
1682 * of size 1514, so you never have a pkt that is split
1683 * into multiple descriptors on the receive side, so
1684 * i don't implement that case, hence the assert above.
1685 */
1686
1687 DPRINTF(EthernetDesc,
1688 "rxDesc: addr=%08x writeback cmdsts extsts\n",
1689 regs.rxdp & 0x3fffffff);
1690 DPRINTF(EthernetDesc,
1691 "rxDesc: link=%#x bufptr=%#x cmdsts=%08x extsts=%08x\n",
1692 link, bufptr, cmdsts, extsts);
1693
1694 rxDmaAddr = regs.rxdp & 0x3fffffff;
1695 rxDmaData = &cmdsts;
1696 if (is64bit) {
1697 rxDmaAddr += offsetof(ns_desc64, cmdsts);
1698 rxDmaLen = sizeof(rxDesc64.cmdsts) + sizeof(rxDesc64.extsts);
1699 } else {
1700 rxDmaAddr += offsetof(ns_desc32, cmdsts);
1701 rxDmaLen = sizeof(rxDesc32.cmdsts) + sizeof(rxDesc32.extsts);
1702 }
1703 rxDmaFree = dmaDescFree;
1704
1705 descDmaWrites++;
1706 descDmaWrBytes += rxDmaLen;
1707
1708 if (doRxDmaWrite())
1709 goto exit;
1710 }
1711 break;
1712
1713 case rxFragWrite:
1714 if (rxDmaState != dmaIdle)
1715 goto exit;
1716
1717 rxPacketBufPtr += rxXferLen;
1718 rxFragPtr += rxXferLen;
1719 rxPktBytes -= rxXferLen;
1720
1721 rxState = rxFifoBlock;
1722 break;
1723
1724 case rxDescWrite:
1725 if (rxDmaState != dmaIdle)
1726 goto exit;
1727
1728 assert(cmdsts & CMDSTS_OWN);
1729
1730 assert(rxPacket == 0);
1731 devIntrPost(ISR_RXOK);
1732
1733 if (cmdsts & CMDSTS_INTR)
1734 devIntrPost(ISR_RXDESC);
1735
1736 if (!rxEnable) {
1737 DPRINTF(EthernetSM, "Halting the RX state machine\n");
1738 rxState = rxIdle;
1739 goto exit;
1740 } else
1741 rxState = rxAdvance;
1742 break;
1743
1744 case rxAdvance:
1745 if (link == 0) {
1746 devIntrPost(ISR_RXIDLE);
1747 rxState = rxIdle;
1748 CRDD = true;
1749 goto exit;
1750 } else {
1751 if (rxDmaState != dmaIdle)
1752 goto exit;
1753 rxState = rxDescRead;
1754 regs.rxdp = link;
1755 CRDD = false;
1756
1757 rxDmaAddr = regs.rxdp & 0x3fffffff;
1758 rxDmaData = is64bit ? (void *)&rxDesc64 : (void *)&rxDesc32;
1759 rxDmaLen = is64bit ? sizeof(rxDesc64) : sizeof(rxDesc32);
1760 rxDmaFree = dmaDescFree;
1761
1762 if (doRxDmaRead())
1763 goto exit;
1764 }
1765 break;
1766
1767 default:
1768 panic("Invalid rxState!");
1769 }
1770
1771 DPRINTF(EthernetSM, "entering next rxState=%s\n",
1772 NsRxStateStrings[rxState]);
1773 goto next;
1774
1775 exit:
1776 /**
1777 * @todo do we want to schedule a future kick?
1778 */
1779 DPRINTF(EthernetSM, "rx state machine exited rxState=%s\n",
1780 NsRxStateStrings[rxState]);
1781
1782 if (clock && !rxKickEvent.scheduled())
1783 rxKickEvent.schedule(rxKickTick);
1784 }
1785
1786 void
1787 NSGigE::transmit()
1788 {
1789 if (txFifo.empty()) {
1790 DPRINTF(Ethernet, "nothing to transmit\n");
1791 return;
1792 }
1793
1794 DPRINTF(Ethernet, "Attempt Pkt Transmit: txFifo length=%d\n",
1795 txFifo.size());
1796 if (interface->sendPacket(txFifo.front())) {
1797 #if TRACING_ON
1798 if (DTRACE(Ethernet)) {
1799 IpPtr ip(txFifo.front());
1800 if (ip) {
1801 DPRINTF(Ethernet, "ID is %d\n", ip->id());
1802 TcpPtr tcp(ip);
1803 if (tcp) {
1804 DPRINTF(Ethernet,
1805 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n",
1806 tcp->sport(), tcp->dport(), tcp->seq(),
1807 tcp->ack());
1808 }
1809 }
1810 }
1811 #endif
1812
1813 DDUMP(EthernetData, txFifo.front()->data, txFifo.front()->length);
1814 txBytes += txFifo.front()->length;
1815 txPackets++;
1816
1817 DPRINTF(Ethernet, "Successful Xmit! now txFifoAvail is %d\n",
1818 txFifo.avail());
1819 txFifo.pop();
1820
1821 /*
1822 * normally do a writeback of the descriptor here, and ONLY
1823 * after that is done, send this interrupt. but since our
1824 * stuff never actually fails, just do this interrupt here,
1825 * otherwise the code has to stray from this nice format.
1826 * besides, it's functionally the same.
1827 */
1828 devIntrPost(ISR_TXOK);
1829 }
1830
1831 if (!txFifo.empty() && !txEvent.scheduled()) {
1832 DPRINTF(Ethernet, "reschedule transmit\n");
1833 txEvent.schedule(curTick + retryTime);
1834 }
1835 }
1836
1837 bool
1838 NSGigE::doTxDmaRead()
1839 {
1840 assert(txDmaState == dmaIdle || txDmaState == dmaReadWaiting);
1841 txDmaState = dmaReading;
1842
1843 if (dmaPending() || getState() != Running)
1844 txDmaState = dmaReadWaiting;
1845 else
1846 dmaRead(txDmaAddr, txDmaLen, &txDmaReadEvent, (uint8_t*)txDmaData);
1847
1848 return true;
1849 }
1850
1851 void
1852 NSGigE::txDmaReadDone()
1853 {
1854 assert(txDmaState == dmaReading);
1855 txDmaState = dmaIdle;
1856
1857 DPRINTF(EthernetDMA, "tx dma read paddr=%#x len=%d\n",
1858 txDmaAddr, txDmaLen);
1859 DDUMP(EthernetDMA, txDmaData, txDmaLen);
1860
1861 // If the receive state machine has a pending DMA, let it go first
1862 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting)
1863 rxKick();
1864
1865 txKick();
1866 }
1867
1868 bool
1869 NSGigE::doTxDmaWrite()
1870 {
1871 assert(txDmaState == dmaIdle || txDmaState == dmaWriteWaiting);
1872 txDmaState = dmaWriting;
1873
1874 if (dmaPending() || getState() != Running)
1875 txDmaState = dmaWriteWaiting;
1876 else
1877 dmaWrite(txDmaAddr, txDmaLen, &txDmaWriteEvent, (uint8_t*)txDmaData);
1878 return true;
1879 }
1880
1881 void
1882 NSGigE::txDmaWriteDone()
1883 {
1884 assert(txDmaState == dmaWriting);
1885 txDmaState = dmaIdle;
1886
1887 DPRINTF(EthernetDMA, "tx dma write paddr=%#x len=%d\n",
1888 txDmaAddr, txDmaLen);
1889 DDUMP(EthernetDMA, txDmaData, txDmaLen);
1890
1891 // If the receive state machine has a pending DMA, let it go first
1892 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting)
1893 rxKick();
1894
1895 txKick();
1896 }
1897
1898 void
1899 NSGigE::txKick()
1900 {
1901 bool is64bit = (bool)(regs.config & CFGR_M64ADDR);
1902
1903 DPRINTF(EthernetSM, "transmit kick txState=%s %d-bit\n",
1904 NsTxStateStrings[txState], is64bit ? 64 : 32);
1905
1906 Addr link, bufptr;
1907 uint32_t &cmdsts = is64bit ? txDesc64.cmdsts : txDesc32.cmdsts;
1908 uint32_t &extsts = is64bit ? txDesc64.extsts : txDesc32.extsts;
1909
1910 next:
1911 if (clock) {
1912 if (txKickTick > curTick) {
1913 DPRINTF(EthernetSM, "transmit kick exiting, can't run till %d\n",
1914 txKickTick);
1915 goto exit;
1916 }
1917
1918 // Go to the next state machine clock tick.
1919 txKickTick = curTick + cycles(1);
1920 }
1921
1922 switch(txDmaState) {
1923 case dmaReadWaiting:
1924 if (doTxDmaRead())
1925 goto exit;
1926 break;
1927 case dmaWriteWaiting:
1928 if (doTxDmaWrite())
1929 goto exit;
1930 break;
1931 default:
1932 break;
1933 }
1934
1935 link = is64bit ? (Addr)txDesc64.link : (Addr)txDesc32.link;
1936 bufptr = is64bit ? (Addr)txDesc64.bufptr : (Addr)txDesc32.bufptr;
1937 switch (txState) {
1938 case txIdle:
1939 if (!txEnable) {
1940 DPRINTF(EthernetSM, "Transmit disabled. Nothing to do.\n");
1941 goto exit;
1942 }
1943
1944 if (CTDD) {
1945 txState = txDescRefr;
1946
1947 txDmaAddr = regs.txdp & 0x3fffffff;
1948 txDmaData =
1949 is64bit ? (void *)&txDesc64.link : (void *)&txDesc32.link;
1950 txDmaLen = is64bit ? sizeof(txDesc64.link) : sizeof(txDesc32.link);
1951 txDmaFree = dmaDescFree;
1952
1953 descDmaReads++;
1954 descDmaRdBytes += txDmaLen;
1955
1956 if (doTxDmaRead())
1957 goto exit;
1958
1959 } else {
1960 txState = txDescRead;
1961
1962 txDmaAddr = regs.txdp & 0x3fffffff;
1963 txDmaData = is64bit ? (void *)&txDesc64 : (void *)&txDesc32;
1964 txDmaLen = is64bit ? sizeof(txDesc64) : sizeof(txDesc32);
1965 txDmaFree = dmaDescFree;
1966
1967 descDmaReads++;
1968 descDmaRdBytes += txDmaLen;
1969
1970 if (doTxDmaRead())
1971 goto exit;
1972 }
1973 break;
1974
1975 case txDescRefr:
1976 if (txDmaState != dmaIdle)
1977 goto exit;
1978
1979 txState = txAdvance;
1980 break;
1981
1982 case txDescRead:
1983 if (txDmaState != dmaIdle)
1984 goto exit;
1985
1986 DPRINTF(EthernetDesc, "txDesc: addr=%08x read descriptor\n",
1987 regs.txdp & 0x3fffffff);
1988 DPRINTF(EthernetDesc,
1989 "txDesc: link=%#x bufptr=%#x cmdsts=%#08x extsts=%#08x\n",
1990 link, bufptr, cmdsts, extsts);
1991
1992 if (cmdsts & CMDSTS_OWN) {
1993 txState = txFifoBlock;
1994 txFragPtr = bufptr;
1995 txDescCnt = cmdsts & CMDSTS_LEN_MASK;
1996 } else {
1997 devIntrPost(ISR_TXIDLE);
1998 txState = txIdle;
1999 goto exit;
2000 }
2001 break;
2002
2003 case txFifoBlock:
2004 if (!txPacket) {
2005 DPRINTF(EthernetSM, "****starting the tx of a new packet****\n");
2006 txPacket = new EthPacketData(16384);
2007 txPacketBufPtr = txPacket->data;
2008 }
2009
2010 if (txDescCnt == 0) {
2011 DPRINTF(EthernetSM, "the txDescCnt == 0, done with descriptor\n");
2012 if (cmdsts & CMDSTS_MORE) {
2013 DPRINTF(EthernetSM, "there are more descriptors to come\n");
2014 txState = txDescWrite;
2015
2016 cmdsts &= ~CMDSTS_OWN;
2017
2018 txDmaAddr = regs.txdp & 0x3fffffff;
2019 txDmaData = &cmdsts;
2020 if (is64bit) {
2021 txDmaAddr += offsetof(ns_desc64, cmdsts);
2022 txDmaLen = sizeof(txDesc64.cmdsts);
2023 } else {
2024 txDmaAddr += offsetof(ns_desc32, cmdsts);
2025 txDmaLen = sizeof(txDesc32.cmdsts);
2026 }
2027 txDmaFree = dmaDescFree;
2028
2029 if (doTxDmaWrite())
2030 goto exit;
2031
2032 } else { /* this packet is totally done */
2033 DPRINTF(EthernetSM, "This packet is done, let's wrap it up\n");
2034 /* deal with the the packet that just finished */
2035 if ((regs.vtcr & VTCR_PPCHK) && extstsEnable) {
2036 IpPtr ip(txPacket);
2037 if (extsts & EXTSTS_UDPPKT) {
2038 UdpPtr udp(ip);
2039 udp->sum(0);
2040 udp->sum(cksum(udp));
2041 txUdpChecksums++;
2042 } else if (extsts & EXTSTS_TCPPKT) {
2043 TcpPtr tcp(ip);
2044 tcp->sum(0);
2045 tcp->sum(cksum(tcp));
2046 txTcpChecksums++;
2047 }
2048 if (extsts & EXTSTS_IPPKT) {
2049 ip->sum(0);
2050 ip->sum(cksum(ip));
2051 txIpChecksums++;
2052 }
2053 }
2054
2055 txPacket->length = txPacketBufPtr - txPacket->data;
2056 // this is just because the receive can't handle a
2057 // packet bigger want to make sure
2058 if (txPacket->length > 1514)
2059 panic("transmit packet too large, %s > 1514\n",
2060 txPacket->length);
2061
2062 #ifndef NDEBUG
2063 bool success =
2064 #endif
2065 txFifo.push(txPacket);
2066 assert(success);
2067
2068 /*
2069 * this following section is not tqo spec, but
2070 * functionally shouldn't be any different. normally,
2071 * the chip will wait til the transmit has occurred
2072 * before writing back the descriptor because it has
2073 * to wait to see that it was successfully transmitted
2074 * to decide whether to set CMDSTS_OK or not.
2075 * however, in the simulator since it is always
2076 * successfully transmitted, and writing it exactly to
2077 * spec would complicate the code, we just do it here
2078 */
2079
2080 cmdsts &= ~CMDSTS_OWN;
2081 cmdsts |= CMDSTS_OK;
2082
2083 DPRINTF(EthernetDesc,
2084 "txDesc writeback: cmdsts=%08x extsts=%08x\n",
2085 cmdsts, extsts);
2086
2087 txDmaFree = dmaDescFree;
2088 txDmaAddr = regs.txdp & 0x3fffffff;
2089 txDmaData = &cmdsts;
2090 if (is64bit) {
2091 txDmaAddr += offsetof(ns_desc64, cmdsts);
2092 txDmaLen =
2093 sizeof(txDesc64.cmdsts) + sizeof(txDesc64.extsts);
2094 } else {
2095 txDmaAddr += offsetof(ns_desc32, cmdsts);
2096 txDmaLen =
2097 sizeof(txDesc32.cmdsts) + sizeof(txDesc32.extsts);
2098 }
2099
2100 descDmaWrites++;
2101 descDmaWrBytes += txDmaLen;
2102
2103 transmit();
2104 txPacket = 0;
2105
2106 if (!txEnable) {
2107 DPRINTF(EthernetSM, "halting TX state machine\n");
2108 txState = txIdle;
2109 goto exit;
2110 } else
2111 txState = txAdvance;
2112
2113 if (doTxDmaWrite())
2114 goto exit;
2115 }
2116 } else {
2117 DPRINTF(EthernetSM, "this descriptor isn't done yet\n");
2118 if (!txFifo.full()) {
2119 txState = txFragRead;
2120
2121 /*
2122 * The number of bytes transferred is either whatever
2123 * is left in the descriptor (txDescCnt), or if there
2124 * is not enough room in the fifo, just whatever room
2125 * is left in the fifo
2126 */
2127 txXferLen = min<uint32_t>(txDescCnt, txFifo.avail());
2128
2129 txDmaAddr = txFragPtr & 0x3fffffff;
2130 txDmaData = txPacketBufPtr;
2131 txDmaLen = txXferLen;
2132 txDmaFree = dmaDataFree;
2133
2134 if (doTxDmaRead())
2135 goto exit;
2136 } else {
2137 txState = txFifoBlock;
2138 transmit();
2139
2140 goto exit;
2141 }
2142
2143 }
2144 break;
2145
2146 case txFragRead:
2147 if (txDmaState != dmaIdle)
2148 goto exit;
2149
2150 txPacketBufPtr += txXferLen;
2151 txFragPtr += txXferLen;
2152 txDescCnt -= txXferLen;
2153 txFifo.reserve(txXferLen);
2154
2155 txState = txFifoBlock;
2156 break;
2157
2158 case txDescWrite:
2159 if (txDmaState != dmaIdle)
2160 goto exit;
2161
2162 if (cmdsts & CMDSTS_INTR)
2163 devIntrPost(ISR_TXDESC);
2164
2165 if (!txEnable) {
2166 DPRINTF(EthernetSM, "halting TX state machine\n");
2167 txState = txIdle;
2168 goto exit;
2169 } else
2170 txState = txAdvance;
2171 break;
2172
2173 case txAdvance:
2174 if (link == 0) {
2175 devIntrPost(ISR_TXIDLE);
2176 txState = txIdle;
2177 goto exit;
2178 } else {
2179 if (txDmaState != dmaIdle)
2180 goto exit;
2181 txState = txDescRead;
2182 regs.txdp = link;
2183 CTDD = false;
2184
2185 txDmaAddr = link & 0x3fffffff;
2186 txDmaData = is64bit ? (void *)&txDesc64 : (void *)&txDesc32;
2187 txDmaLen = is64bit ? sizeof(txDesc64) : sizeof(txDesc32);
2188 txDmaFree = dmaDescFree;
2189
2190 if (doTxDmaRead())
2191 goto exit;
2192 }
2193 break;
2194
2195 default:
2196 panic("invalid state");
2197 }
2198
2199 DPRINTF(EthernetSM, "entering next txState=%s\n",
2200 NsTxStateStrings[txState]);
2201 goto next;
2202
2203 exit:
2204 /**
2205 * @todo do we want to schedule a future kick?
2206 */
2207 DPRINTF(EthernetSM, "tx state machine exited txState=%s\n",
2208 NsTxStateStrings[txState]);
2209
2210 if (clock && !txKickEvent.scheduled())
2211 txKickEvent.schedule(txKickTick);
2212 }
2213
2214 /**
2215 * Advance the EEPROM state machine
2216 * Called on rising edge of EEPROM clock bit in MEAR
2217 */
2218 void
2219 NSGigE::eepromKick()
2220 {
2221 switch (eepromState) {
2222
2223 case eepromStart:
2224
2225 // Wait for start bit
2226 if (regs.mear & MEAR_EEDI) {
2227 // Set up to get 2 opcode bits
2228 eepromState = eepromGetOpcode;
2229 eepromBitsToRx = 2;
2230 eepromOpcode = 0;
2231 }
2232 break;
2233
2234 case eepromGetOpcode:
2235 eepromOpcode <<= 1;
2236 eepromOpcode += (regs.mear & MEAR_EEDI) ? 1 : 0;
2237 --eepromBitsToRx;
2238
2239 // Done getting opcode
2240 if (eepromBitsToRx == 0) {
2241 if (eepromOpcode != EEPROM_READ)
2242 panic("only EEPROM reads are implemented!");
2243
2244 // Set up to get address
2245 eepromState = eepromGetAddress;
2246 eepromBitsToRx = 6;
2247 eepromAddress = 0;
2248 }
2249 break;
2250
2251 case eepromGetAddress:
2252 eepromAddress <<= 1;
2253 eepromAddress += (regs.mear & MEAR_EEDI) ? 1 : 0;
2254 --eepromBitsToRx;
2255
2256 // Done getting address
2257 if (eepromBitsToRx == 0) {
2258
2259 if (eepromAddress >= EEPROM_SIZE)
2260 panic("EEPROM read access out of range!");
2261
2262 switch (eepromAddress) {
2263
2264 case EEPROM_PMATCH2_ADDR:
2265 eepromData = rom.perfectMatch[5];
2266 eepromData <<= 8;
2267 eepromData += rom.perfectMatch[4];
2268 break;
2269
2270 case EEPROM_PMATCH1_ADDR:
2271 eepromData = rom.perfectMatch[3];
2272 eepromData <<= 8;
2273 eepromData += rom.perfectMatch[2];
2274 break;
2275
2276 case EEPROM_PMATCH0_ADDR:
2277 eepromData = rom.perfectMatch[1];
2278 eepromData <<= 8;
2279 eepromData += rom.perfectMatch[0];
2280 break;
2281
2282 default:
2283 panic("FreeBSD driver only uses EEPROM to read PMATCH!");
2284 }
2285 // Set up to read data
2286 eepromState = eepromRead;
2287 eepromBitsToRx = 16;
2288
2289 // Clear data in bit
2290 regs.mear &= ~MEAR_EEDI;
2291 }
2292 break;
2293
2294 case eepromRead:
2295 // Clear Data Out bit
2296 regs.mear &= ~MEAR_EEDO;
2297 // Set bit to value of current EEPROM bit
2298 regs.mear |= (eepromData & 0x8000) ? MEAR_EEDO : 0x0;
2299
2300 eepromData <<= 1;
2301 --eepromBitsToRx;
2302
2303 // All done
2304 if (eepromBitsToRx == 0) {
2305 eepromState = eepromStart;
2306 }
2307 break;
2308
2309 default:
2310 panic("invalid EEPROM state");
2311 }
2312
2313 }
2314
2315 void
2316 NSGigE::transferDone()
2317 {
2318 if (txFifo.empty()) {
2319 DPRINTF(Ethernet, "transfer complete: txFifo empty...nothing to do\n");
2320 return;
2321 }
2322
2323 DPRINTF(Ethernet, "transfer complete: data in txFifo...schedule xmit\n");
2324
2325 txEvent.reschedule(curTick + cycles(1), true);
2326 }
2327
2328 bool
2329 NSGigE::rxFilter(const EthPacketPtr &packet)
2330 {
2331 EthPtr eth = packet;
2332 bool drop = true;
2333 string type;
2334
2335 const EthAddr &dst = eth->dst();
2336 if (dst.unicast()) {
2337 // If we're accepting all unicast addresses
2338 if (acceptUnicast)
2339 drop = false;
2340
2341 // If we make a perfect match
2342 if (acceptPerfect && dst == rom.perfectMatch)
2343 drop = false;
2344
2345 if (acceptArp && eth->type() == ETH_TYPE_ARP)
2346 drop = false;
2347
2348 } else if (dst.broadcast()) {
2349 // if we're accepting broadcasts
2350 if (acceptBroadcast)
2351 drop = false;
2352
2353 } else if (dst.multicast()) {
2354 // if we're accepting all multicasts
2355 if (acceptMulticast)
2356 drop = false;
2357
2358 // Multicast hashing faked - all packets accepted
2359 if (multicastHashEnable)
2360 drop = false;
2361 }
2362
2363 if (drop) {
2364 DPRINTF(Ethernet, "rxFilter drop\n");
2365 DDUMP(EthernetData, packet->data, packet->length);
2366 }
2367
2368 return drop;
2369 }
2370
2371 bool
2372 NSGigE::recvPacket(EthPacketPtr packet)
2373 {
2374 rxBytes += packet->length;
2375 rxPackets++;
2376
2377 DPRINTF(Ethernet, "Receiving packet from wire, rxFifoAvail=%d\n",
2378 rxFifo.avail());
2379
2380 if (!rxEnable) {
2381 DPRINTF(Ethernet, "receive disabled...packet dropped\n");
2382 return true;
2383 }
2384
2385 if (!rxFilterEnable) {
2386 DPRINTF(Ethernet,
2387 "receive packet filtering disabled . . . packet dropped\n");
2388 return true;
2389 }
2390
2391 if (rxFilter(packet)) {
2392 DPRINTF(Ethernet, "packet filtered...dropped\n");
2393 return true;
2394 }
2395
2396 if (rxFifo.avail() < packet->length) {
2397 #if TRACING_ON
2398 IpPtr ip(packet);
2399 TcpPtr tcp(ip);
2400 if (ip) {
2401 DPRINTF(Ethernet,
2402 "packet won't fit in receive buffer...pkt ID %d dropped\n",
2403 ip->id());
2404 if (tcp) {
2405 DPRINTF(Ethernet, "Seq=%d\n", tcp->seq());
2406 }
2407 }
2408 #endif
2409 droppedPackets++;
2410 devIntrPost(ISR_RXORN);
2411 return false;
2412 }
2413
2414 rxFifo.push(packet);
2415
2416 rxKick();
2417 return true;
2418 }
2419
2420
2421 void
2422 NSGigE::resume()
2423 {
2424 SimObject::resume();
2425
2426 // During drain we could have left the state machines in a waiting state and
2427 // they wouldn't get out until some other event occured to kick them.
2428 // This way they'll get out immediately
2429 txKick();
2430 rxKick();
2431 }
2432
2433
2434 //=====================================================================
2435 //
2436 //
2437 void
2438 NSGigE::serialize(ostream &os)
2439 {
2440 // Serialize the PciDev base class
2441 PciDev::serialize(os);
2442
2443 /*
2444 * Finalize any DMA events now.
2445 */
2446 // @todo will mem system save pending dma?
2447
2448 /*
2449 * Serialize the device registers
2450 */
2451 SERIALIZE_SCALAR(regs.command);
2452 SERIALIZE_SCALAR(regs.config);
2453 SERIALIZE_SCALAR(regs.mear);
2454 SERIALIZE_SCALAR(regs.ptscr);
2455 SERIALIZE_SCALAR(regs.isr);
2456 SERIALIZE_SCALAR(regs.imr);
2457 SERIALIZE_SCALAR(regs.ier);
2458 SERIALIZE_SCALAR(regs.ihr);
2459 SERIALIZE_SCALAR(regs.txdp);
2460 SERIALIZE_SCALAR(regs.txdp_hi);
2461 SERIALIZE_SCALAR(regs.txcfg);
2462 SERIALIZE_SCALAR(regs.gpior);
2463 SERIALIZE_SCALAR(regs.rxdp);
2464 SERIALIZE_SCALAR(regs.rxdp_hi);
2465 SERIALIZE_SCALAR(regs.rxcfg);
2466 SERIALIZE_SCALAR(regs.pqcr);
2467 SERIALIZE_SCALAR(regs.wcsr);
2468 SERIALIZE_SCALAR(regs.pcr);
2469 SERIALIZE_SCALAR(regs.rfcr);
2470 SERIALIZE_SCALAR(regs.rfdr);
2471 SERIALIZE_SCALAR(regs.brar);
2472 SERIALIZE_SCALAR(regs.brdr);
2473 SERIALIZE_SCALAR(regs.srr);
2474 SERIALIZE_SCALAR(regs.mibc);
2475 SERIALIZE_SCALAR(regs.vrcr);
2476 SERIALIZE_SCALAR(regs.vtcr);
2477 SERIALIZE_SCALAR(regs.vdr);
2478 SERIALIZE_SCALAR(regs.ccsr);
2479 SERIALIZE_SCALAR(regs.tbicr);
2480 SERIALIZE_SCALAR(regs.tbisr);
2481 SERIALIZE_SCALAR(regs.tanar);
2482 SERIALIZE_SCALAR(regs.tanlpar);
2483 SERIALIZE_SCALAR(regs.taner);
2484 SERIALIZE_SCALAR(regs.tesr);
2485
2486 SERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN);
2487 SERIALIZE_ARRAY(rom.filterHash, FHASH_SIZE);
2488
2489 SERIALIZE_SCALAR(ioEnable);
2490
2491 /*
2492 * Serialize the data Fifos
2493 */
2494 rxFifo.serialize("rxFifo", os);
2495 txFifo.serialize("txFifo", os);
2496
2497 /*
2498 * Serialize the various helper variables
2499 */
2500 bool txPacketExists = txPacket;
2501 SERIALIZE_SCALAR(txPacketExists);
2502 if (txPacketExists) {
2503 txPacket->length = txPacketBufPtr - txPacket->data;
2504 txPacket->serialize("txPacket", os);
2505 uint32_t txPktBufPtr = (uint32_t) (txPacketBufPtr - txPacket->data);
2506 SERIALIZE_SCALAR(txPktBufPtr);
2507 }
2508
2509 bool rxPacketExists = rxPacket;
2510 SERIALIZE_SCALAR(rxPacketExists);
2511 if (rxPacketExists) {
2512 rxPacket->serialize("rxPacket", os);
2513 uint32_t rxPktBufPtr = (uint32_t) (rxPacketBufPtr - rxPacket->data);
2514 SERIALIZE_SCALAR(rxPktBufPtr);
2515 }
2516
2517 SERIALIZE_SCALAR(txXferLen);
2518 SERIALIZE_SCALAR(rxXferLen);
2519
2520 /*
2521 * Serialize Cached Descriptors
2522 */
2523 SERIALIZE_SCALAR(rxDesc64.link);
2524 SERIALIZE_SCALAR(rxDesc64.bufptr);
2525 SERIALIZE_SCALAR(rxDesc64.cmdsts);
2526 SERIALIZE_SCALAR(rxDesc64.extsts);
2527 SERIALIZE_SCALAR(txDesc64.link);
2528 SERIALIZE_SCALAR(txDesc64.bufptr);
2529 SERIALIZE_SCALAR(txDesc64.cmdsts);
2530 SERIALIZE_SCALAR(txDesc64.extsts);
2531 SERIALIZE_SCALAR(rxDesc32.link);
2532 SERIALIZE_SCALAR(rxDesc32.bufptr);
2533 SERIALIZE_SCALAR(rxDesc32.cmdsts);
2534 SERIALIZE_SCALAR(rxDesc32.extsts);
2535 SERIALIZE_SCALAR(txDesc32.link);
2536 SERIALIZE_SCALAR(txDesc32.bufptr);
2537 SERIALIZE_SCALAR(txDesc32.cmdsts);
2538 SERIALIZE_SCALAR(txDesc32.extsts);
2539 SERIALIZE_SCALAR(extstsEnable);
2540
2541 /*
2542 * Serialize tx state machine
2543 */
2544 int txState = this->txState;
2545 SERIALIZE_SCALAR(txState);
2546 SERIALIZE_SCALAR(txEnable);
2547 SERIALIZE_SCALAR(CTDD);
2548 SERIALIZE_SCALAR(txFragPtr);
2549 SERIALIZE_SCALAR(txDescCnt);
2550 int txDmaState = this->txDmaState;
2551 SERIALIZE_SCALAR(txDmaState);
2552 SERIALIZE_SCALAR(txKickTick);
2553
2554 /*
2555 * Serialize rx state machine
2556 */
2557 int rxState = this->rxState;
2558 SERIALIZE_SCALAR(rxState);
2559 SERIALIZE_SCALAR(rxEnable);
2560 SERIALIZE_SCALAR(CRDD);
2561 SERIALIZE_SCALAR(rxPktBytes);
2562 SERIALIZE_SCALAR(rxFragPtr);
2563 SERIALIZE_SCALAR(rxDescCnt);
2564 int rxDmaState = this->rxDmaState;
2565 SERIALIZE_SCALAR(rxDmaState);
2566 SERIALIZE_SCALAR(rxKickTick);
2567
2568 /*
2569 * Serialize EEPROM state machine
2570 */
2571 int eepromState = this->eepromState;
2572 SERIALIZE_SCALAR(eepromState);
2573 SERIALIZE_SCALAR(eepromClk);
2574 SERIALIZE_SCALAR(eepromBitsToRx);
2575 SERIALIZE_SCALAR(eepromOpcode);
2576 SERIALIZE_SCALAR(eepromAddress);
2577 SERIALIZE_SCALAR(eepromData);
2578
2579 /*
2580 * If there's a pending transmit, store the time so we can
2581 * reschedule it later
2582 */
2583 Tick transmitTick = txEvent.scheduled() ? txEvent.when() - curTick : 0;
2584 SERIALIZE_SCALAR(transmitTick);
2585
2586 /*
2587 * receive address filter settings
2588 */
2589 SERIALIZE_SCALAR(rxFilterEnable);
2590 SERIALIZE_SCALAR(acceptBroadcast);
2591 SERIALIZE_SCALAR(acceptMulticast);
2592 SERIALIZE_SCALAR(acceptUnicast);
2593 SERIALIZE_SCALAR(acceptPerfect);
2594 SERIALIZE_SCALAR(acceptArp);
2595 SERIALIZE_SCALAR(multicastHashEnable);
2596
2597 /*
2598 * Keep track of pending interrupt status.
2599 */
2600 SERIALIZE_SCALAR(intrTick);
2601 SERIALIZE_SCALAR(cpuPendingIntr);
2602 Tick intrEventTick = 0;
2603 if (intrEvent)
2604 intrEventTick = intrEvent->when();
2605 SERIALIZE_SCALAR(intrEventTick);
2606
2607 }
2608
2609 void
2610 NSGigE::unserialize(Checkpoint *cp, const std::string &section)
2611 {
2612 // Unserialize the PciDev base class
2613 PciDev::unserialize(cp, section);
2614
2615 UNSERIALIZE_SCALAR(regs.command);
2616 UNSERIALIZE_SCALAR(regs.config);
2617 UNSERIALIZE_SCALAR(regs.mear);
2618 UNSERIALIZE_SCALAR(regs.ptscr);
2619 UNSERIALIZE_SCALAR(regs.isr);
2620 UNSERIALIZE_SCALAR(regs.imr);
2621 UNSERIALIZE_SCALAR(regs.ier);
2622 UNSERIALIZE_SCALAR(regs.ihr);
2623 UNSERIALIZE_SCALAR(regs.txdp);
2624 UNSERIALIZE_SCALAR(regs.txdp_hi);
2625 UNSERIALIZE_SCALAR(regs.txcfg);
2626 UNSERIALIZE_SCALAR(regs.gpior);
2627 UNSERIALIZE_SCALAR(regs.rxdp);
2628 UNSERIALIZE_SCALAR(regs.rxdp_hi);
2629 UNSERIALIZE_SCALAR(regs.rxcfg);
2630 UNSERIALIZE_SCALAR(regs.pqcr);
2631 UNSERIALIZE_SCALAR(regs.wcsr);
2632 UNSERIALIZE_SCALAR(regs.pcr);
2633 UNSERIALIZE_SCALAR(regs.rfcr);
2634 UNSERIALIZE_SCALAR(regs.rfdr);
2635 UNSERIALIZE_SCALAR(regs.brar);
2636 UNSERIALIZE_SCALAR(regs.brdr);
2637 UNSERIALIZE_SCALAR(regs.srr);
2638 UNSERIALIZE_SCALAR(regs.mibc);
2639 UNSERIALIZE_SCALAR(regs.vrcr);
2640 UNSERIALIZE_SCALAR(regs.vtcr);
2641 UNSERIALIZE_SCALAR(regs.vdr);
2642 UNSERIALIZE_SCALAR(regs.ccsr);
2643 UNSERIALIZE_SCALAR(regs.tbicr);
2644 UNSERIALIZE_SCALAR(regs.tbisr);
2645 UNSERIALIZE_SCALAR(regs.tanar);
2646 UNSERIALIZE_SCALAR(regs.tanlpar);
2647 UNSERIALIZE_SCALAR(regs.taner);
2648 UNSERIALIZE_SCALAR(regs.tesr);
2649
2650 UNSERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN);
2651 UNSERIALIZE_ARRAY(rom.filterHash, FHASH_SIZE);
2652
2653 UNSERIALIZE_SCALAR(ioEnable);
2654
2655 /*
2656 * unserialize the data fifos
2657 */
2658 rxFifo.unserialize("rxFifo", cp, section);
2659 txFifo.unserialize("txFifo", cp, section);
2660
2661 /*
2662 * unserialize the various helper variables
2663 */
2664 bool txPacketExists;
2665 UNSERIALIZE_SCALAR(txPacketExists);
2666 if (txPacketExists) {
2667 txPacket = new EthPacketData(16384);
2668 txPacket->unserialize("txPacket", cp, section);
2669 uint32_t txPktBufPtr;
2670 UNSERIALIZE_SCALAR(txPktBufPtr);
2671 txPacketBufPtr = (uint8_t *) txPacket->data + txPktBufPtr;
2672 } else
2673 txPacket = 0;
2674
2675 bool rxPacketExists;
2676 UNSERIALIZE_SCALAR(rxPacketExists);
2677 rxPacket = 0;
2678 if (rxPacketExists) {
2679 rxPacket = new EthPacketData(16384);
2680 rxPacket->unserialize("rxPacket", cp, section);
2681 uint32_t rxPktBufPtr;
2682 UNSERIALIZE_SCALAR(rxPktBufPtr);
2683 rxPacketBufPtr = (uint8_t *) rxPacket->data + rxPktBufPtr;
2684 } else
2685 rxPacket = 0;
2686
2687 UNSERIALIZE_SCALAR(txXferLen);
2688 UNSERIALIZE_SCALAR(rxXferLen);
2689
2690 /*
2691 * Unserialize Cached Descriptors
2692 */
2693 UNSERIALIZE_SCALAR(rxDesc64.link);
2694 UNSERIALIZE_SCALAR(rxDesc64.bufptr);
2695 UNSERIALIZE_SCALAR(rxDesc64.cmdsts);
2696 UNSERIALIZE_SCALAR(rxDesc64.extsts);
2697 UNSERIALIZE_SCALAR(txDesc64.link);
2698 UNSERIALIZE_SCALAR(txDesc64.bufptr);
2699 UNSERIALIZE_SCALAR(txDesc64.cmdsts);
2700 UNSERIALIZE_SCALAR(txDesc64.extsts);
2701 UNSERIALIZE_SCALAR(rxDesc32.link);
2702 UNSERIALIZE_SCALAR(rxDesc32.bufptr);
2703 UNSERIALIZE_SCALAR(rxDesc32.cmdsts);
2704 UNSERIALIZE_SCALAR(rxDesc32.extsts);
2705 UNSERIALIZE_SCALAR(txDesc32.link);
2706 UNSERIALIZE_SCALAR(txDesc32.bufptr);
2707 UNSERIALIZE_SCALAR(txDesc32.cmdsts);
2708 UNSERIALIZE_SCALAR(txDesc32.extsts);
2709 UNSERIALIZE_SCALAR(extstsEnable);
2710
2711 /*
2712 * unserialize tx state machine
2713 */
2714 int txState;
2715 UNSERIALIZE_SCALAR(txState);
2716 this->txState = (TxState) txState;
2717 UNSERIALIZE_SCALAR(txEnable);
2718 UNSERIALIZE_SCALAR(CTDD);
2719 UNSERIALIZE_SCALAR(txFragPtr);
2720 UNSERIALIZE_SCALAR(txDescCnt);
2721 int txDmaState;
2722 UNSERIALIZE_SCALAR(txDmaState);
2723 this->txDmaState = (DmaState) txDmaState;
2724 UNSERIALIZE_SCALAR(txKickTick);
2725 if (txKickTick)
2726 txKickEvent.schedule(txKickTick);
2727
2728 /*
2729 * unserialize rx state machine
2730 */
2731 int rxState;
2732 UNSERIALIZE_SCALAR(rxState);
2733 this->rxState = (RxState) rxState;
2734 UNSERIALIZE_SCALAR(rxEnable);
2735 UNSERIALIZE_SCALAR(CRDD);
2736 UNSERIALIZE_SCALAR(rxPktBytes);
2737 UNSERIALIZE_SCALAR(rxFragPtr);
2738 UNSERIALIZE_SCALAR(rxDescCnt);
2739 int rxDmaState;
2740 UNSERIALIZE_SCALAR(rxDmaState);
2741 this->rxDmaState = (DmaState) rxDmaState;
2742 UNSERIALIZE_SCALAR(rxKickTick);
2743 if (rxKickTick)
2744 rxKickEvent.schedule(rxKickTick);
2745
2746 /*
2747 * Unserialize EEPROM state machine
2748 */
2749 int eepromState;
2750 UNSERIALIZE_SCALAR(eepromState);
2751 this->eepromState = (EEPROMState) eepromState;
2752 UNSERIALIZE_SCALAR(eepromClk);
2753 UNSERIALIZE_SCALAR(eepromBitsToRx);
2754 UNSERIALIZE_SCALAR(eepromOpcode);
2755 UNSERIALIZE_SCALAR(eepromAddress);
2756 UNSERIALIZE_SCALAR(eepromData);
2757
2758 /*
2759 * If there's a pending transmit, reschedule it now
2760 */
2761 Tick transmitTick;
2762 UNSERIALIZE_SCALAR(transmitTick);
2763 if (transmitTick)
2764 txEvent.schedule(curTick + transmitTick);
2765
2766 /*
2767 * unserialize receive address filter settings
2768 */
2769 UNSERIALIZE_SCALAR(rxFilterEnable);
2770 UNSERIALIZE_SCALAR(acceptBroadcast);
2771 UNSERIALIZE_SCALAR(acceptMulticast);
2772 UNSERIALIZE_SCALAR(acceptUnicast);
2773 UNSERIALIZE_SCALAR(acceptPerfect);
2774 UNSERIALIZE_SCALAR(acceptArp);
2775 UNSERIALIZE_SCALAR(multicastHashEnable);
2776
2777 /*
2778 * Keep track of pending interrupt status.
2779 */
2780 UNSERIALIZE_SCALAR(intrTick);
2781 UNSERIALIZE_SCALAR(cpuPendingIntr);
2782 Tick intrEventTick;
2783 UNSERIALIZE_SCALAR(intrEventTick);
2784 if (intrEventTick) {
2785 intrEvent = new IntrEvent(this, intrEventTick, true);
2786 }
2787 }
2788
2789 NSGigE *
2790 NSGigEParams::create()
2791 {
2792 return new NSGigE(this);
2793 }