1a5adb2754c8ba2fbbf3441484561c9bd8b155ba
[gem5.git] / src / dev / net / ns_gige.cc
1 /*
2 * Copyright (c) 2004-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Nathan Binkert
29 * Lisa Hsu
30 */
31
32 /** @file
33 * Device module for modelling the National Semiconductor
34 * DP83820 ethernet controller. Does not support priority queueing
35 */
36
37 #include "dev/net/ns_gige.hh"
38
39 #include <deque>
40 #include <memory>
41 #include <string>
42
43 #include "base/debug.hh"
44 #include "base/inet.hh"
45 #include "base/types.hh"
46 #include "config/the_isa.hh"
47 #include "debug/EthernetAll.hh"
48 #include "dev/net/etherlink.hh"
49 #include "mem/packet.hh"
50 #include "mem/packet_access.hh"
51 #include "params/NSGigE.hh"
52 #include "sim/system.hh"
53
54 // clang complains about std::set being overloaded with Packet::set if
55 // we open up the entire namespace std
56 using std::make_shared;
57 using std::min;
58 using std::ostream;
59 using std::string;
60
61 const char *NsRxStateStrings[] =
62 {
63 "rxIdle",
64 "rxDescRefr",
65 "rxDescRead",
66 "rxFifoBlock",
67 "rxFragWrite",
68 "rxDescWrite",
69 "rxAdvance"
70 };
71
72 const char *NsTxStateStrings[] =
73 {
74 "txIdle",
75 "txDescRefr",
76 "txDescRead",
77 "txFifoBlock",
78 "txFragRead",
79 "txDescWrite",
80 "txAdvance"
81 };
82
83 const char *NsDmaState[] =
84 {
85 "dmaIdle",
86 "dmaReading",
87 "dmaWriting",
88 "dmaReadWaiting",
89 "dmaWriteWaiting"
90 };
91
92 using namespace Net;
93 using namespace TheISA;
94
95 ///////////////////////////////////////////////////////////////////////
96 //
97 // NSGigE PCI Device
98 //
99 NSGigE::NSGigE(Params *p)
100 : EtherDevBase(p), ioEnable(false),
101 txFifo(p->tx_fifo_size), rxFifo(p->rx_fifo_size),
102 txPacket(0), rxPacket(0), txPacketBufPtr(NULL), rxPacketBufPtr(NULL),
103 txXferLen(0), rxXferLen(0), rxDmaFree(false), txDmaFree(false),
104 txState(txIdle), txEnable(false), CTDD(false), txHalt(false),
105 txFragPtr(0), txDescCnt(0), txDmaState(dmaIdle), rxState(rxIdle),
106 rxEnable(false), CRDD(false), rxPktBytes(0), rxHalt(false),
107 rxFragPtr(0), rxDescCnt(0), rxDmaState(dmaIdle), extstsEnable(false),
108 eepromState(eepromStart), eepromClk(false), eepromBitsToRx(0),
109 eepromOpcode(0), eepromAddress(0), eepromData(0),
110 dmaReadDelay(p->dma_read_delay), dmaWriteDelay(p->dma_write_delay),
111 dmaReadFactor(p->dma_read_factor), dmaWriteFactor(p->dma_write_factor),
112 rxDmaData(NULL), rxDmaAddr(0), rxDmaLen(0),
113 txDmaData(NULL), txDmaAddr(0), txDmaLen(0),
114 rxDmaReadEvent([this]{ rxDmaReadDone(); }, name()),
115 rxDmaWriteEvent([this]{ rxDmaWriteDone(); }, name()),
116 txDmaReadEvent([this]{ txDmaReadDone(); }, name()),
117 txDmaWriteEvent([this]{ txDmaWriteDone(); }, name()),
118 dmaDescFree(p->dma_desc_free), dmaDataFree(p->dma_data_free),
119 txDelay(p->tx_delay), rxDelay(p->rx_delay),
120 rxKickTick(0),
121 rxKickEvent([this]{ rxKick(); }, name()),
122 txKickTick(0),
123 txKickEvent([this]{ txKick(); }, name()),
124 txEvent([this]{ txEventTransmit(); }, name()),
125 rxFilterEnable(p->rx_filter),
126 acceptBroadcast(false), acceptMulticast(false), acceptUnicast(false),
127 acceptPerfect(false), acceptArp(false), multicastHashEnable(false),
128 intrDelay(p->intr_delay), intrTick(0), cpuPendingIntr(false),
129 intrEvent(0), interface(0)
130 {
131
132
133 interface = new NSGigEInt(name() + ".int0", this);
134
135 regsReset();
136 memcpy(&rom.perfectMatch, p->hardware_address.bytes(), ETH_ADDR_LEN);
137
138 memset(&rxDesc32, 0, sizeof(rxDesc32));
139 memset(&txDesc32, 0, sizeof(txDesc32));
140 memset(&rxDesc64, 0, sizeof(rxDesc64));
141 memset(&txDesc64, 0, sizeof(txDesc64));
142 }
143
144 NSGigE::~NSGigE()
145 {
146 delete interface;
147 }
148
149 /**
150 * This is to write to the PCI general configuration registers
151 */
152 Tick
153 NSGigE::writeConfig(PacketPtr pkt)
154 {
155 int offset = pkt->getAddr() & PCI_CONFIG_SIZE;
156 if (offset < PCI_DEVICE_SPECIFIC)
157 PciDevice::writeConfig(pkt);
158 else
159 panic("Device specific PCI config space not implemented!\n");
160
161 switch (offset) {
162 // seems to work fine without all these PCI settings, but i
163 // put in the IO to double check, an assertion will fail if we
164 // need to properly implement it
165 case PCI_COMMAND:
166 if (config.data[offset] & PCI_CMD_IOSE)
167 ioEnable = true;
168 else
169 ioEnable = false;
170 break;
171 }
172
173 return configDelay;
174 }
175
176 EtherInt*
177 NSGigE::getEthPort(const std::string &if_name, int idx)
178 {
179 if (if_name == "interface") {
180 if (interface->getPeer())
181 panic("interface already connected to\n");
182 return interface;
183 }
184 return NULL;
185 }
186
187 /**
188 * This reads the device registers, which are detailed in the NS83820
189 * spec sheet
190 */
191 Tick
192 NSGigE::read(PacketPtr pkt)
193 {
194 assert(ioEnable);
195
196 //The mask is to give you only the offset into the device register file
197 Addr daddr = pkt->getAddr() & 0xfff;
198 DPRINTF(EthernetPIO, "read da=%#x pa=%#x size=%d\n",
199 daddr, pkt->getAddr(), pkt->getSize());
200
201
202 // there are some reserved registers, you can see ns_gige_reg.h and
203 // the spec sheet for details
204 if (daddr > LAST && daddr <= RESERVED) {
205 panic("Accessing reserved register");
206 } else if (daddr > RESERVED && daddr <= 0x3FC) {
207 return readConfig(pkt);
208 } else if (daddr >= MIB_START && daddr <= MIB_END) {
209 // don't implement all the MIB's. hopefully the kernel
210 // doesn't actually DEPEND upon their values
211 // MIB are just hardware stats keepers
212 pkt->setLE<uint32_t>(0);
213 pkt->makeAtomicResponse();
214 return pioDelay;
215 } else if (daddr > 0x3FC)
216 panic("Something is messed up!\n");
217
218 assert(pkt->getSize() == sizeof(uint32_t));
219 uint32_t &reg = *pkt->getPtr<uint32_t>();
220 uint16_t rfaddr;
221
222 switch (daddr) {
223 case CR:
224 reg = regs.command;
225 //these are supposed to be cleared on a read
226 reg &= ~(CR_RXD | CR_TXD | CR_TXR | CR_RXR);
227 break;
228
229 case CFGR:
230 reg = regs.config;
231 break;
232
233 case MEAR:
234 reg = regs.mear;
235 break;
236
237 case PTSCR:
238 reg = regs.ptscr;
239 break;
240
241 case ISR:
242 reg = regs.isr;
243 devIntrClear(ISR_ALL);
244 break;
245
246 case IMR:
247 reg = regs.imr;
248 break;
249
250 case IER:
251 reg = regs.ier;
252 break;
253
254 case IHR:
255 reg = regs.ihr;
256 break;
257
258 case TXDP:
259 reg = regs.txdp;
260 break;
261
262 case TXDP_HI:
263 reg = regs.txdp_hi;
264 break;
265
266 case TX_CFG:
267 reg = regs.txcfg;
268 break;
269
270 case GPIOR:
271 reg = regs.gpior;
272 break;
273
274 case RXDP:
275 reg = regs.rxdp;
276 break;
277
278 case RXDP_HI:
279 reg = regs.rxdp_hi;
280 break;
281
282 case RX_CFG:
283 reg = regs.rxcfg;
284 break;
285
286 case PQCR:
287 reg = regs.pqcr;
288 break;
289
290 case WCSR:
291 reg = regs.wcsr;
292 break;
293
294 case PCR:
295 reg = regs.pcr;
296 break;
297
298 // see the spec sheet for how RFCR and RFDR work
299 // basically, you write to RFCR to tell the machine
300 // what you want to do next, then you act upon RFDR,
301 // and the device will be prepared b/c of what you
302 // wrote to RFCR
303 case RFCR:
304 reg = regs.rfcr;
305 break;
306
307 case RFDR:
308 rfaddr = (uint16_t)(regs.rfcr & RFCR_RFADDR);
309 switch (rfaddr) {
310 // Read from perfect match ROM octets
311 case 0x000:
312 reg = rom.perfectMatch[1];
313 reg = reg << 8;
314 reg += rom.perfectMatch[0];
315 break;
316 case 0x002:
317 reg = rom.perfectMatch[3] << 8;
318 reg += rom.perfectMatch[2];
319 break;
320 case 0x004:
321 reg = rom.perfectMatch[5] << 8;
322 reg += rom.perfectMatch[4];
323 break;
324 default:
325 // Read filter hash table
326 if (rfaddr >= FHASH_ADDR &&
327 rfaddr < FHASH_ADDR + FHASH_SIZE) {
328
329 // Only word-aligned reads supported
330 if (rfaddr % 2)
331 panic("unaligned read from filter hash table!");
332
333 reg = rom.filterHash[rfaddr - FHASH_ADDR + 1] << 8;
334 reg += rom.filterHash[rfaddr - FHASH_ADDR];
335 break;
336 }
337
338 panic("reading RFDR for something other than pattern"
339 " matching or hashing! %#x\n", rfaddr);
340 }
341 break;
342
343 case SRR:
344 reg = regs.srr;
345 break;
346
347 case MIBC:
348 reg = regs.mibc;
349 reg &= ~(MIBC_MIBS | MIBC_ACLR);
350 break;
351
352 case VRCR:
353 reg = regs.vrcr;
354 break;
355
356 case VTCR:
357 reg = regs.vtcr;
358 break;
359
360 case VDR:
361 reg = regs.vdr;
362 break;
363
364 case CCSR:
365 reg = regs.ccsr;
366 break;
367
368 case TBICR:
369 reg = regs.tbicr;
370 break;
371
372 case TBISR:
373 reg = regs.tbisr;
374 break;
375
376 case TANAR:
377 reg = regs.tanar;
378 break;
379
380 case TANLPAR:
381 reg = regs.tanlpar;
382 break;
383
384 case TANER:
385 reg = regs.taner;
386 break;
387
388 case TESR:
389 reg = regs.tesr;
390 break;
391
392 case M5REG:
393 reg = 0;
394 if (params()->rx_thread)
395 reg |= M5REG_RX_THREAD;
396 if (params()->tx_thread)
397 reg |= M5REG_TX_THREAD;
398 if (params()->rss)
399 reg |= M5REG_RSS;
400 break;
401
402 default:
403 panic("reading unimplemented register: addr=%#x", daddr);
404 }
405
406 DPRINTF(EthernetPIO, "read from %#x: data=%d data=%#x\n",
407 daddr, reg, reg);
408
409 pkt->makeAtomicResponse();
410 return pioDelay;
411 }
412
413 Tick
414 NSGigE::write(PacketPtr pkt)
415 {
416 assert(ioEnable);
417
418 Addr daddr = pkt->getAddr() & 0xfff;
419 DPRINTF(EthernetPIO, "write da=%#x pa=%#x size=%d\n",
420 daddr, pkt->getAddr(), pkt->getSize());
421
422 if (daddr > LAST && daddr <= RESERVED) {
423 panic("Accessing reserved register");
424 } else if (daddr > RESERVED && daddr <= 0x3FC) {
425 return writeConfig(pkt);
426 } else if (daddr > 0x3FC)
427 panic("Something is messed up!\n");
428
429 if (pkt->getSize() == sizeof(uint32_t)) {
430 uint32_t reg = pkt->getLE<uint32_t>();
431 uint16_t rfaddr;
432
433 DPRINTF(EthernetPIO, "write data=%d data=%#x\n", reg, reg);
434
435 switch (daddr) {
436 case CR:
437 regs.command = reg;
438 if (reg & CR_TXD) {
439 txEnable = false;
440 } else if (reg & CR_TXE) {
441 txEnable = true;
442
443 // the kernel is enabling the transmit machine
444 if (txState == txIdle)
445 txKick();
446 }
447
448 if (reg & CR_RXD) {
449 rxEnable = false;
450 } else if (reg & CR_RXE) {
451 rxEnable = true;
452
453 if (rxState == rxIdle)
454 rxKick();
455 }
456
457 if (reg & CR_TXR)
458 txReset();
459
460 if (reg & CR_RXR)
461 rxReset();
462
463 if (reg & CR_SWI)
464 devIntrPost(ISR_SWI);
465
466 if (reg & CR_RST) {
467 txReset();
468 rxReset();
469
470 regsReset();
471 }
472 break;
473
474 case CFGR:
475 if (reg & CFGR_LNKSTS ||
476 reg & CFGR_SPDSTS ||
477 reg & CFGR_DUPSTS ||
478 reg & CFGR_RESERVED ||
479 reg & CFGR_T64ADDR ||
480 reg & CFGR_PCI64_DET) {
481 // First clear all writable bits
482 regs.config &= CFGR_LNKSTS | CFGR_SPDSTS | CFGR_DUPSTS |
483 CFGR_RESERVED | CFGR_T64ADDR |
484 CFGR_PCI64_DET;
485 // Now set the appropriate writable bits
486 regs.config |= reg & ~(CFGR_LNKSTS | CFGR_SPDSTS | CFGR_DUPSTS |
487 CFGR_RESERVED | CFGR_T64ADDR |
488 CFGR_PCI64_DET);
489 }
490
491 // all these #if 0's are because i don't THINK the kernel needs to
492 // have these implemented. if there is a problem relating to one of
493 // these, you may need to add functionality in.
494
495 // grouped together and #if 0'ed to avoid empty if body and make clang happy
496 #if 0
497 if (reg & CFGR_TBI_EN) ;
498 if (reg & CFGR_MODE_1000) ;
499
500 if (reg & CFGR_PINT_DUPSTS ||
501 reg & CFGR_PINT_LNKSTS ||
502 reg & CFGR_PINT_SPDSTS)
503 ;
504
505 if (reg & CFGR_TMRTEST) ;
506 if (reg & CFGR_MRM_DIS) ;
507 if (reg & CFGR_MWI_DIS) ;
508
509 if (reg & CFGR_DATA64_EN) ;
510 if (reg & CFGR_M64ADDR) ;
511 if (reg & CFGR_PHY_RST) ;
512 if (reg & CFGR_PHY_DIS) ;
513
514 if (reg & CFGR_REQALG) ;
515 if (reg & CFGR_SB) ;
516 if (reg & CFGR_POW) ;
517 if (reg & CFGR_EXD) ;
518 if (reg & CFGR_PESEL) ;
519 if (reg & CFGR_BROM_DIS) ;
520 if (reg & CFGR_EXT_125) ;
521 if (reg & CFGR_BEM) ;
522
523 if (reg & CFGR_T64ADDR) ;
524 // panic("CFGR_T64ADDR is read only register!\n");
525 #endif
526 if (reg & CFGR_AUTO_1000)
527 panic("CFGR_AUTO_1000 not implemented!\n");
528
529 if (reg & CFGR_PCI64_DET)
530 panic("CFGR_PCI64_DET is read only register!\n");
531
532 if (reg & CFGR_EXTSTS_EN)
533 extstsEnable = true;
534 else
535 extstsEnable = false;
536 break;
537
538 case MEAR:
539 // Clear writable bits
540 regs.mear &= MEAR_EEDO;
541 // Set appropriate writable bits
542 regs.mear |= reg & ~MEAR_EEDO;
543
544 // FreeBSD uses the EEPROM to read PMATCH (for the MAC address)
545 // even though it could get it through RFDR
546 if (reg & MEAR_EESEL) {
547 // Rising edge of clock
548 if (reg & MEAR_EECLK && !eepromClk)
549 eepromKick();
550 }
551 else {
552 eepromState = eepromStart;
553 regs.mear &= ~MEAR_EEDI;
554 }
555
556 eepromClk = reg & MEAR_EECLK;
557
558 // since phy is completely faked, MEAR_MD* don't matter
559
560 // grouped together and #if 0'ed to avoid empty if body and make clang happy
561 #if 0
562 if (reg & MEAR_MDIO) ;
563 if (reg & MEAR_MDDIR) ;
564 if (reg & MEAR_MDC) ;
565 #endif
566 break;
567
568 case PTSCR:
569 regs.ptscr = reg & ~(PTSCR_RBIST_RDONLY);
570 // these control BISTs for various parts of chip - we
571 // don't care or do just fake that the BIST is done
572 if (reg & PTSCR_RBIST_EN)
573 regs.ptscr |= PTSCR_RBIST_DONE;
574 if (reg & PTSCR_EEBIST_EN)
575 regs.ptscr &= ~PTSCR_EEBIST_EN;
576 if (reg & PTSCR_EELOAD_EN)
577 regs.ptscr &= ~PTSCR_EELOAD_EN;
578 break;
579
580 case ISR: /* writing to the ISR has no effect */
581 panic("ISR is a read only register!\n");
582
583 case IMR:
584 regs.imr = reg;
585 devIntrChangeMask();
586 break;
587
588 case IER:
589 regs.ier = reg;
590 break;
591
592 case IHR:
593 regs.ihr = reg;
594 /* not going to implement real interrupt holdoff */
595 break;
596
597 case TXDP:
598 regs.txdp = (reg & 0xFFFFFFFC);
599 assert(txState == txIdle);
600 CTDD = false;
601 break;
602
603 case TXDP_HI:
604 regs.txdp_hi = reg;
605 break;
606
607 case TX_CFG:
608 regs.txcfg = reg;
609 #if 0
610 if (reg & TX_CFG_CSI) ;
611 if (reg & TX_CFG_HBI) ;
612 if (reg & TX_CFG_MLB) ;
613 if (reg & TX_CFG_ATP) ;
614 if (reg & TX_CFG_ECRETRY) {
615 /*
616 * this could easily be implemented, but considering
617 * the network is just a fake pipe, wouldn't make
618 * sense to do this
619 */
620 }
621
622 if (reg & TX_CFG_BRST_DIS) ;
623 #endif
624
625 #if 0
626 /* we handle our own DMA, ignore the kernel's exhortations */
627 if (reg & TX_CFG_MXDMA) ;
628 #endif
629
630 // also, we currently don't care about fill/drain
631 // thresholds though this may change in the future with
632 // more realistic networks or a driver which changes it
633 // according to feedback
634
635 break;
636
637 case GPIOR:
638 // Only write writable bits
639 regs.gpior &= GPIOR_UNUSED | GPIOR_GP5_IN | GPIOR_GP4_IN
640 | GPIOR_GP3_IN | GPIOR_GP2_IN | GPIOR_GP1_IN;
641 regs.gpior |= reg & ~(GPIOR_UNUSED | GPIOR_GP5_IN | GPIOR_GP4_IN
642 | GPIOR_GP3_IN | GPIOR_GP2_IN | GPIOR_GP1_IN);
643 /* these just control general purpose i/o pins, don't matter */
644 break;
645
646 case RXDP:
647 regs.rxdp = reg;
648 CRDD = false;
649 break;
650
651 case RXDP_HI:
652 regs.rxdp_hi = reg;
653 break;
654
655 case RX_CFG:
656 regs.rxcfg = reg;
657 #if 0
658 if (reg & RX_CFG_AEP) ;
659 if (reg & RX_CFG_ARP) ;
660 if (reg & RX_CFG_STRIPCRC) ;
661 if (reg & RX_CFG_RX_RD) ;
662 if (reg & RX_CFG_ALP) ;
663 if (reg & RX_CFG_AIRL) ;
664
665 /* we handle our own DMA, ignore what kernel says about it */
666 if (reg & RX_CFG_MXDMA) ;
667
668 //also, we currently don't care about fill/drain thresholds
669 //though this may change in the future with more realistic
670 //networks or a driver which changes it according to feedback
671 if (reg & (RX_CFG_DRTH | RX_CFG_DRTH0)) ;
672 #endif
673 break;
674
675 case PQCR:
676 /* there is no priority queueing used in the linux 2.6 driver */
677 regs.pqcr = reg;
678 break;
679
680 case WCSR:
681 /* not going to implement wake on LAN */
682 regs.wcsr = reg;
683 break;
684
685 case PCR:
686 /* not going to implement pause control */
687 regs.pcr = reg;
688 break;
689
690 case RFCR:
691 regs.rfcr = reg;
692
693 rxFilterEnable = (reg & RFCR_RFEN) ? true : false;
694 acceptBroadcast = (reg & RFCR_AAB) ? true : false;
695 acceptMulticast = (reg & RFCR_AAM) ? true : false;
696 acceptUnicast = (reg & RFCR_AAU) ? true : false;
697 acceptPerfect = (reg & RFCR_APM) ? true : false;
698 acceptArp = (reg & RFCR_AARP) ? true : false;
699 multicastHashEnable = (reg & RFCR_MHEN) ? true : false;
700
701 #if 0
702 if (reg & RFCR_APAT)
703 panic("RFCR_APAT not implemented!\n");
704 #endif
705 if (reg & RFCR_UHEN)
706 panic("Unicast hash filtering not used by drivers!\n");
707
708 if (reg & RFCR_ULM)
709 panic("RFCR_ULM not implemented!\n");
710
711 break;
712
713 case RFDR:
714 rfaddr = (uint16_t)(regs.rfcr & RFCR_RFADDR);
715 switch (rfaddr) {
716 case 0x000:
717 rom.perfectMatch[0] = (uint8_t)reg;
718 rom.perfectMatch[1] = (uint8_t)(reg >> 8);
719 break;
720 case 0x002:
721 rom.perfectMatch[2] = (uint8_t)reg;
722 rom.perfectMatch[3] = (uint8_t)(reg >> 8);
723 break;
724 case 0x004:
725 rom.perfectMatch[4] = (uint8_t)reg;
726 rom.perfectMatch[5] = (uint8_t)(reg >> 8);
727 break;
728 default:
729
730 if (rfaddr >= FHASH_ADDR &&
731 rfaddr < FHASH_ADDR + FHASH_SIZE) {
732
733 // Only word-aligned writes supported
734 if (rfaddr % 2)
735 panic("unaligned write to filter hash table!");
736
737 rom.filterHash[rfaddr - FHASH_ADDR] = (uint8_t)reg;
738 rom.filterHash[rfaddr - FHASH_ADDR + 1]
739 = (uint8_t)(reg >> 8);
740 break;
741 }
742 panic("writing RFDR for something other than pattern matching "
743 "or hashing! %#x\n", rfaddr);
744 }
745 break;
746
747 case BRAR:
748 regs.brar = reg;
749 break;
750
751 case BRDR:
752 panic("the driver never uses BRDR, something is wrong!\n");
753
754 case SRR:
755 panic("SRR is read only register!\n");
756
757 case MIBC:
758 panic("the driver never uses MIBC, something is wrong!\n");
759
760 case VRCR:
761 regs.vrcr = reg;
762 break;
763
764 case VTCR:
765 regs.vtcr = reg;
766 break;
767
768 case VDR:
769 panic("the driver never uses VDR, something is wrong!\n");
770
771 case CCSR:
772 /* not going to implement clockrun stuff */
773 regs.ccsr = reg;
774 break;
775
776 case TBICR:
777 regs.tbicr = reg;
778 if (reg & TBICR_MR_LOOPBACK)
779 panic("TBICR_MR_LOOPBACK never used, something wrong!\n");
780
781 if (reg & TBICR_MR_AN_ENABLE) {
782 regs.tanlpar = regs.tanar;
783 regs.tbisr |= (TBISR_MR_AN_COMPLETE | TBISR_MR_LINK_STATUS);
784 }
785
786 #if 0
787 if (reg & TBICR_MR_RESTART_AN) ;
788 #endif
789
790 break;
791
792 case TBISR:
793 panic("TBISR is read only register!\n");
794
795 case TANAR:
796 // Only write the writable bits
797 regs.tanar &= TANAR_RF1 | TANAR_RF2 | TANAR_UNUSED;
798 regs.tanar |= reg & ~(TANAR_RF1 | TANAR_RF2 | TANAR_UNUSED);
799
800 // Pause capability unimplemented
801 #if 0
802 if (reg & TANAR_PS2) ;
803 if (reg & TANAR_PS1) ;
804 #endif
805
806 break;
807
808 case TANLPAR:
809 panic("this should only be written to by the fake phy!\n");
810
811 case TANER:
812 panic("TANER is read only register!\n");
813
814 case TESR:
815 regs.tesr = reg;
816 break;
817
818 default:
819 panic("invalid register access daddr=%#x", daddr);
820 }
821 } else {
822 panic("Invalid Request Size");
823 }
824 pkt->makeAtomicResponse();
825 return pioDelay;
826 }
827
828 void
829 NSGigE::devIntrPost(uint32_t interrupts)
830 {
831 if (interrupts & ISR_RESERVE)
832 panic("Cannot set a reserved interrupt");
833
834 if (interrupts & ISR_NOIMPL)
835 warn("interrupt not implemented %#x\n", interrupts);
836
837 interrupts &= ISR_IMPL;
838 regs.isr |= interrupts;
839
840 if (interrupts & regs.imr) {
841 if (interrupts & ISR_SWI) {
842 totalSwi++;
843 }
844 if (interrupts & ISR_RXIDLE) {
845 totalRxIdle++;
846 }
847 if (interrupts & ISR_RXOK) {
848 totalRxOk++;
849 }
850 if (interrupts & ISR_RXDESC) {
851 totalRxDesc++;
852 }
853 if (interrupts & ISR_TXOK) {
854 totalTxOk++;
855 }
856 if (interrupts & ISR_TXIDLE) {
857 totalTxIdle++;
858 }
859 if (interrupts & ISR_TXDESC) {
860 totalTxDesc++;
861 }
862 if (interrupts & ISR_RXORN) {
863 totalRxOrn++;
864 }
865 }
866
867 DPRINTF(EthernetIntr,
868 "interrupt written to ISR: intr=%#x isr=%#x imr=%#x\n",
869 interrupts, regs.isr, regs.imr);
870
871 if ((regs.isr & regs.imr)) {
872 Tick when = curTick();
873 if ((regs.isr & regs.imr & ISR_NODELAY) == 0)
874 when += intrDelay;
875 postedInterrupts++;
876 cpuIntrPost(when);
877 }
878 }
879
880 /* writing this interrupt counting stats inside this means that this function
881 is now limited to being used to clear all interrupts upon the kernel
882 reading isr and servicing. just telling you in case you were thinking
883 of expanding use.
884 */
885 void
886 NSGigE::devIntrClear(uint32_t interrupts)
887 {
888 if (interrupts & ISR_RESERVE)
889 panic("Cannot clear a reserved interrupt");
890
891 if (regs.isr & regs.imr & ISR_SWI) {
892 postedSwi++;
893 }
894 if (regs.isr & regs.imr & ISR_RXIDLE) {
895 postedRxIdle++;
896 }
897 if (regs.isr & regs.imr & ISR_RXOK) {
898 postedRxOk++;
899 }
900 if (regs.isr & regs.imr & ISR_RXDESC) {
901 postedRxDesc++;
902 }
903 if (regs.isr & regs.imr & ISR_TXOK) {
904 postedTxOk++;
905 }
906 if (regs.isr & regs.imr & ISR_TXIDLE) {
907 postedTxIdle++;
908 }
909 if (regs.isr & regs.imr & ISR_TXDESC) {
910 postedTxDesc++;
911 }
912 if (regs.isr & regs.imr & ISR_RXORN) {
913 postedRxOrn++;
914 }
915
916 interrupts &= ~ISR_NOIMPL;
917 regs.isr &= ~interrupts;
918
919 DPRINTF(EthernetIntr,
920 "interrupt cleared from ISR: intr=%x isr=%x imr=%x\n",
921 interrupts, regs.isr, regs.imr);
922
923 if (!(regs.isr & regs.imr))
924 cpuIntrClear();
925 }
926
927 void
928 NSGigE::devIntrChangeMask()
929 {
930 DPRINTF(EthernetIntr, "interrupt mask changed: isr=%x imr=%x masked=%x\n",
931 regs.isr, regs.imr, regs.isr & regs.imr);
932
933 if (regs.isr & regs.imr)
934 cpuIntrPost(curTick());
935 else
936 cpuIntrClear();
937 }
938
939 void
940 NSGigE::cpuIntrPost(Tick when)
941 {
942 // If the interrupt you want to post is later than an interrupt
943 // already scheduled, just let it post in the coming one and don't
944 // schedule another.
945 // HOWEVER, must be sure that the scheduled intrTick is in the
946 // future (this was formerly the source of a bug)
947 /**
948 * @todo this warning should be removed and the intrTick code should
949 * be fixed.
950 */
951 assert(when >= curTick());
952 assert(intrTick >= curTick() || intrTick == 0);
953 if (when > intrTick && intrTick != 0) {
954 DPRINTF(EthernetIntr, "don't need to schedule event...intrTick=%d\n",
955 intrTick);
956 return;
957 }
958
959 intrTick = when;
960 if (intrTick < curTick()) {
961 intrTick = curTick();
962 }
963
964 DPRINTF(EthernetIntr, "going to schedule an interrupt for intrTick=%d\n",
965 intrTick);
966
967 if (intrEvent)
968 intrEvent->squash();
969
970 intrEvent = new EventFunctionWrapper([this]{ cpuInterrupt(); },
971 name(), true);
972 schedule(intrEvent, intrTick);
973 }
974
975 void
976 NSGigE::cpuInterrupt()
977 {
978 assert(intrTick == curTick());
979
980 // Whether or not there's a pending interrupt, we don't care about
981 // it anymore
982 intrEvent = 0;
983 intrTick = 0;
984
985 // Don't send an interrupt if there's already one
986 if (cpuPendingIntr) {
987 DPRINTF(EthernetIntr,
988 "would send an interrupt now, but there's already pending\n");
989 } else {
990 // Send interrupt
991 cpuPendingIntr = true;
992
993 DPRINTF(EthernetIntr, "posting interrupt\n");
994 intrPost();
995 }
996 }
997
998 void
999 NSGigE::cpuIntrClear()
1000 {
1001 if (!cpuPendingIntr)
1002 return;
1003
1004 if (intrEvent) {
1005 intrEvent->squash();
1006 intrEvent = 0;
1007 }
1008
1009 intrTick = 0;
1010
1011 cpuPendingIntr = false;
1012
1013 DPRINTF(EthernetIntr, "clearing interrupt\n");
1014 intrClear();
1015 }
1016
1017 bool
1018 NSGigE::cpuIntrPending() const
1019 { return cpuPendingIntr; }
1020
1021 void
1022 NSGigE::txReset()
1023 {
1024
1025 DPRINTF(Ethernet, "transmit reset\n");
1026
1027 CTDD = false;
1028 txEnable = false;;
1029 txFragPtr = 0;
1030 assert(txDescCnt == 0);
1031 txFifo.clear();
1032 txState = txIdle;
1033 assert(txDmaState == dmaIdle);
1034 }
1035
1036 void
1037 NSGigE::rxReset()
1038 {
1039 DPRINTF(Ethernet, "receive reset\n");
1040
1041 CRDD = false;
1042 assert(rxPktBytes == 0);
1043 rxEnable = false;
1044 rxFragPtr = 0;
1045 assert(rxDescCnt == 0);
1046 assert(rxDmaState == dmaIdle);
1047 rxFifo.clear();
1048 rxState = rxIdle;
1049 }
1050
1051 void
1052 NSGigE::regsReset()
1053 {
1054 memset(&regs, 0, sizeof(regs));
1055 regs.config = (CFGR_LNKSTS | CFGR_TBI_EN | CFGR_MODE_1000);
1056 regs.mear = 0x12;
1057 regs.txcfg = 0x120; // set drain threshold to 1024 bytes and
1058 // fill threshold to 32 bytes
1059 regs.rxcfg = 0x4; // set drain threshold to 16 bytes
1060 regs.srr = 0x0103; // set the silicon revision to rev B or 0x103
1061 regs.mibc = MIBC_FRZ;
1062 regs.vdr = 0x81; // set the vlan tag type to 802.1q
1063 regs.tesr = 0xc000; // TBI capable of both full and half duplex
1064 regs.brar = 0xffffffff;
1065
1066 extstsEnable = false;
1067 acceptBroadcast = false;
1068 acceptMulticast = false;
1069 acceptUnicast = false;
1070 acceptPerfect = false;
1071 acceptArp = false;
1072 }
1073
1074 bool
1075 NSGigE::doRxDmaRead()
1076 {
1077 assert(rxDmaState == dmaIdle || rxDmaState == dmaReadWaiting);
1078 rxDmaState = dmaReading;
1079
1080 if (dmaPending() || drainState() != DrainState::Running)
1081 rxDmaState = dmaReadWaiting;
1082 else
1083 dmaRead(rxDmaAddr, rxDmaLen, &rxDmaReadEvent, (uint8_t*)rxDmaData);
1084
1085 return true;
1086 }
1087
1088 void
1089 NSGigE::rxDmaReadDone()
1090 {
1091 assert(rxDmaState == dmaReading);
1092 rxDmaState = dmaIdle;
1093
1094 DPRINTF(EthernetDMA, "rx dma read paddr=%#x len=%d\n",
1095 rxDmaAddr, rxDmaLen);
1096 DDUMP(EthernetDMA, rxDmaData, rxDmaLen);
1097
1098 // If the transmit state machine has a pending DMA, let it go first
1099 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting)
1100 txKick();
1101
1102 rxKick();
1103 }
1104
1105 bool
1106 NSGigE::doRxDmaWrite()
1107 {
1108 assert(rxDmaState == dmaIdle || rxDmaState == dmaWriteWaiting);
1109 rxDmaState = dmaWriting;
1110
1111 if (dmaPending() || drainState() != DrainState::Running)
1112 rxDmaState = dmaWriteWaiting;
1113 else
1114 dmaWrite(rxDmaAddr, rxDmaLen, &rxDmaWriteEvent, (uint8_t*)rxDmaData);
1115 return true;
1116 }
1117
1118 void
1119 NSGigE::rxDmaWriteDone()
1120 {
1121 assert(rxDmaState == dmaWriting);
1122 rxDmaState = dmaIdle;
1123
1124 DPRINTF(EthernetDMA, "rx dma write paddr=%#x len=%d\n",
1125 rxDmaAddr, rxDmaLen);
1126 DDUMP(EthernetDMA, rxDmaData, rxDmaLen);
1127
1128 // If the transmit state machine has a pending DMA, let it go first
1129 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting)
1130 txKick();
1131
1132 rxKick();
1133 }
1134
1135 void
1136 NSGigE::rxKick()
1137 {
1138 bool is64bit = (bool)(regs.config & CFGR_M64ADDR);
1139
1140 DPRINTF(EthernetSM,
1141 "receive kick rxState=%s (rxBuf.size=%d) %d-bit\n",
1142 NsRxStateStrings[rxState], rxFifo.size(), is64bit ? 64 : 32);
1143
1144 Addr link, bufptr;
1145 uint32_t &cmdsts = is64bit ? rxDesc64.cmdsts : rxDesc32.cmdsts;
1146 uint32_t &extsts = is64bit ? rxDesc64.extsts : rxDesc32.extsts;
1147
1148 next:
1149 if (rxKickTick > curTick()) {
1150 DPRINTF(EthernetSM, "receive kick exiting, can't run till %d\n",
1151 rxKickTick);
1152
1153 goto exit;
1154 }
1155
1156 // Go to the next state machine clock tick.
1157 rxKickTick = clockEdge(Cycles(1));
1158
1159 switch(rxDmaState) {
1160 case dmaReadWaiting:
1161 if (doRxDmaRead())
1162 goto exit;
1163 break;
1164 case dmaWriteWaiting:
1165 if (doRxDmaWrite())
1166 goto exit;
1167 break;
1168 default:
1169 break;
1170 }
1171
1172 link = is64bit ? (Addr)rxDesc64.link : (Addr)rxDesc32.link;
1173 bufptr = is64bit ? (Addr)rxDesc64.bufptr : (Addr)rxDesc32.bufptr;
1174
1175 // see state machine from spec for details
1176 // the way this works is, if you finish work on one state and can
1177 // go directly to another, you do that through jumping to the
1178 // label "next". however, if you have intermediate work, like DMA
1179 // so that you can't go to the next state yet, you go to exit and
1180 // exit the loop. however, when the DMA is done it will trigger
1181 // an event and come back to this loop.
1182 switch (rxState) {
1183 case rxIdle:
1184 if (!rxEnable) {
1185 DPRINTF(EthernetSM, "Receive Disabled! Nothing to do.\n");
1186 goto exit;
1187 }
1188
1189 if (CRDD) {
1190 rxState = rxDescRefr;
1191
1192 rxDmaAddr = regs.rxdp & 0x3fffffff;
1193 rxDmaData =
1194 is64bit ? (void *)&rxDesc64.link : (void *)&rxDesc32.link;
1195 rxDmaLen = is64bit ? sizeof(rxDesc64.link) : sizeof(rxDesc32.link);
1196 rxDmaFree = dmaDescFree;
1197
1198 descDmaReads++;
1199 descDmaRdBytes += rxDmaLen;
1200
1201 if (doRxDmaRead())
1202 goto exit;
1203 } else {
1204 rxState = rxDescRead;
1205
1206 rxDmaAddr = regs.rxdp & 0x3fffffff;
1207 rxDmaData = is64bit ? (void *)&rxDesc64 : (void *)&rxDesc32;
1208 rxDmaLen = is64bit ? sizeof(rxDesc64) : sizeof(rxDesc32);
1209 rxDmaFree = dmaDescFree;
1210
1211 descDmaReads++;
1212 descDmaRdBytes += rxDmaLen;
1213
1214 if (doRxDmaRead())
1215 goto exit;
1216 }
1217 break;
1218
1219 case rxDescRefr:
1220 if (rxDmaState != dmaIdle)
1221 goto exit;
1222
1223 rxState = rxAdvance;
1224 break;
1225
1226 case rxDescRead:
1227 if (rxDmaState != dmaIdle)
1228 goto exit;
1229
1230 DPRINTF(EthernetDesc, "rxDesc: addr=%08x read descriptor\n",
1231 regs.rxdp & 0x3fffffff);
1232 DPRINTF(EthernetDesc,
1233 "rxDesc: link=%#x bufptr=%#x cmdsts=%08x extsts=%08x\n",
1234 link, bufptr, cmdsts, extsts);
1235
1236 if (cmdsts & CMDSTS_OWN) {
1237 devIntrPost(ISR_RXIDLE);
1238 rxState = rxIdle;
1239 goto exit;
1240 } else {
1241 rxState = rxFifoBlock;
1242 rxFragPtr = bufptr;
1243 rxDescCnt = cmdsts & CMDSTS_LEN_MASK;
1244 }
1245 break;
1246
1247 case rxFifoBlock:
1248 if (!rxPacket) {
1249 /**
1250 * @todo in reality, we should be able to start processing
1251 * the packet as it arrives, and not have to wait for the
1252 * full packet ot be in the receive fifo.
1253 */
1254 if (rxFifo.empty())
1255 goto exit;
1256
1257 DPRINTF(EthernetSM, "****processing receive of new packet****\n");
1258
1259 // If we don't have a packet, grab a new one from the fifo.
1260 rxPacket = rxFifo.front();
1261 rxPktBytes = rxPacket->length;
1262 rxPacketBufPtr = rxPacket->data;
1263
1264 #if TRACING_ON
1265 if (DTRACE(Ethernet)) {
1266 IpPtr ip(rxPacket);
1267 if (ip) {
1268 DPRINTF(Ethernet, "ID is %d\n", ip->id());
1269 TcpPtr tcp(ip);
1270 if (tcp) {
1271 DPRINTF(Ethernet,
1272 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n",
1273 tcp->sport(), tcp->dport(), tcp->seq(),
1274 tcp->ack());
1275 }
1276 }
1277 }
1278 #endif
1279
1280 // sanity check - i think the driver behaves like this
1281 assert(rxDescCnt >= rxPktBytes);
1282 rxFifo.pop();
1283 }
1284
1285
1286 // dont' need the && rxDescCnt > 0 if driver sanity check
1287 // above holds
1288 if (rxPktBytes > 0) {
1289 rxState = rxFragWrite;
1290 // don't need min<>(rxPktBytes,rxDescCnt) if above sanity
1291 // check holds
1292 rxXferLen = rxPktBytes;
1293
1294 rxDmaAddr = rxFragPtr & 0x3fffffff;
1295 rxDmaData = rxPacketBufPtr;
1296 rxDmaLen = rxXferLen;
1297 rxDmaFree = dmaDataFree;
1298
1299 if (doRxDmaWrite())
1300 goto exit;
1301
1302 } else {
1303 rxState = rxDescWrite;
1304
1305 //if (rxPktBytes == 0) { /* packet is done */
1306 assert(rxPktBytes == 0);
1307 DPRINTF(EthernetSM, "done with receiving packet\n");
1308
1309 cmdsts |= CMDSTS_OWN;
1310 cmdsts &= ~CMDSTS_MORE;
1311 cmdsts |= CMDSTS_OK;
1312 cmdsts &= 0xffff0000;
1313 cmdsts += rxPacket->length; //i.e. set CMDSTS_SIZE
1314
1315 #if 0
1316 /*
1317 * all the driver uses these are for its own stats keeping
1318 * which we don't care about, aren't necessary for
1319 * functionality and doing this would just slow us down.
1320 * if they end up using this in a later version for
1321 * functional purposes, just undef
1322 */
1323 if (rxFilterEnable) {
1324 cmdsts &= ~CMDSTS_DEST_MASK;
1325 const EthAddr &dst = rxFifoFront()->dst();
1326 if (dst->unicast())
1327 cmdsts |= CMDSTS_DEST_SELF;
1328 if (dst->multicast())
1329 cmdsts |= CMDSTS_DEST_MULTI;
1330 if (dst->broadcast())
1331 cmdsts |= CMDSTS_DEST_MASK;
1332 }
1333 #endif
1334
1335 IpPtr ip(rxPacket);
1336 if (extstsEnable && ip) {
1337 extsts |= EXTSTS_IPPKT;
1338 rxIpChecksums++;
1339 if (cksum(ip) != 0) {
1340 DPRINTF(EthernetCksum, "Rx IP Checksum Error\n");
1341 extsts |= EXTSTS_IPERR;
1342 }
1343 TcpPtr tcp(ip);
1344 UdpPtr udp(ip);
1345 if (tcp) {
1346 extsts |= EXTSTS_TCPPKT;
1347 rxTcpChecksums++;
1348 if (cksum(tcp) != 0) {
1349 DPRINTF(EthernetCksum, "Rx TCP Checksum Error\n");
1350 extsts |= EXTSTS_TCPERR;
1351
1352 }
1353 } else if (udp) {
1354 extsts |= EXTSTS_UDPPKT;
1355 rxUdpChecksums++;
1356 if (cksum(udp) != 0) {
1357 DPRINTF(EthernetCksum, "Rx UDP Checksum Error\n");
1358 extsts |= EXTSTS_UDPERR;
1359 }
1360 }
1361 }
1362 rxPacket = 0;
1363
1364 /*
1365 * the driver seems to always receive into desc buffers
1366 * of size 1514, so you never have a pkt that is split
1367 * into multiple descriptors on the receive side, so
1368 * i don't implement that case, hence the assert above.
1369 */
1370
1371 DPRINTF(EthernetDesc,
1372 "rxDesc: addr=%08x writeback cmdsts extsts\n",
1373 regs.rxdp & 0x3fffffff);
1374 DPRINTF(EthernetDesc,
1375 "rxDesc: link=%#x bufptr=%#x cmdsts=%08x extsts=%08x\n",
1376 link, bufptr, cmdsts, extsts);
1377
1378 rxDmaAddr = regs.rxdp & 0x3fffffff;
1379 rxDmaData = &cmdsts;
1380 if (is64bit) {
1381 rxDmaAddr += offsetof(ns_desc64, cmdsts);
1382 rxDmaLen = sizeof(rxDesc64.cmdsts) + sizeof(rxDesc64.extsts);
1383 } else {
1384 rxDmaAddr += offsetof(ns_desc32, cmdsts);
1385 rxDmaLen = sizeof(rxDesc32.cmdsts) + sizeof(rxDesc32.extsts);
1386 }
1387 rxDmaFree = dmaDescFree;
1388
1389 descDmaWrites++;
1390 descDmaWrBytes += rxDmaLen;
1391
1392 if (doRxDmaWrite())
1393 goto exit;
1394 }
1395 break;
1396
1397 case rxFragWrite:
1398 if (rxDmaState != dmaIdle)
1399 goto exit;
1400
1401 rxPacketBufPtr += rxXferLen;
1402 rxFragPtr += rxXferLen;
1403 rxPktBytes -= rxXferLen;
1404
1405 rxState = rxFifoBlock;
1406 break;
1407
1408 case rxDescWrite:
1409 if (rxDmaState != dmaIdle)
1410 goto exit;
1411
1412 assert(cmdsts & CMDSTS_OWN);
1413
1414 assert(rxPacket == 0);
1415 devIntrPost(ISR_RXOK);
1416
1417 if (cmdsts & CMDSTS_INTR)
1418 devIntrPost(ISR_RXDESC);
1419
1420 if (!rxEnable) {
1421 DPRINTF(EthernetSM, "Halting the RX state machine\n");
1422 rxState = rxIdle;
1423 goto exit;
1424 } else
1425 rxState = rxAdvance;
1426 break;
1427
1428 case rxAdvance:
1429 if (link == 0) {
1430 devIntrPost(ISR_RXIDLE);
1431 rxState = rxIdle;
1432 CRDD = true;
1433 goto exit;
1434 } else {
1435 if (rxDmaState != dmaIdle)
1436 goto exit;
1437 rxState = rxDescRead;
1438 regs.rxdp = link;
1439 CRDD = false;
1440
1441 rxDmaAddr = regs.rxdp & 0x3fffffff;
1442 rxDmaData = is64bit ? (void *)&rxDesc64 : (void *)&rxDesc32;
1443 rxDmaLen = is64bit ? sizeof(rxDesc64) : sizeof(rxDesc32);
1444 rxDmaFree = dmaDescFree;
1445
1446 if (doRxDmaRead())
1447 goto exit;
1448 }
1449 break;
1450
1451 default:
1452 panic("Invalid rxState!");
1453 }
1454
1455 DPRINTF(EthernetSM, "entering next rxState=%s\n",
1456 NsRxStateStrings[rxState]);
1457 goto next;
1458
1459 exit:
1460 /**
1461 * @todo do we want to schedule a future kick?
1462 */
1463 DPRINTF(EthernetSM, "rx state machine exited rxState=%s\n",
1464 NsRxStateStrings[rxState]);
1465
1466 if (!rxKickEvent.scheduled())
1467 schedule(rxKickEvent, rxKickTick);
1468 }
1469
1470 void
1471 NSGigE::transmit()
1472 {
1473 if (txFifo.empty()) {
1474 DPRINTF(Ethernet, "nothing to transmit\n");
1475 return;
1476 }
1477
1478 DPRINTF(Ethernet, "Attempt Pkt Transmit: txFifo length=%d\n",
1479 txFifo.size());
1480 if (interface->sendPacket(txFifo.front())) {
1481 #if TRACING_ON
1482 if (DTRACE(Ethernet)) {
1483 IpPtr ip(txFifo.front());
1484 if (ip) {
1485 DPRINTF(Ethernet, "ID is %d\n", ip->id());
1486 TcpPtr tcp(ip);
1487 if (tcp) {
1488 DPRINTF(Ethernet,
1489 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n",
1490 tcp->sport(), tcp->dport(), tcp->seq(),
1491 tcp->ack());
1492 }
1493 }
1494 }
1495 #endif
1496
1497 DDUMP(EthernetData, txFifo.front()->data, txFifo.front()->length);
1498 txBytes += txFifo.front()->length;
1499 txPackets++;
1500
1501 DPRINTF(Ethernet, "Successful Xmit! now txFifoAvail is %d\n",
1502 txFifo.avail());
1503 txFifo.pop();
1504
1505 /*
1506 * normally do a writeback of the descriptor here, and ONLY
1507 * after that is done, send this interrupt. but since our
1508 * stuff never actually fails, just do this interrupt here,
1509 * otherwise the code has to stray from this nice format.
1510 * besides, it's functionally the same.
1511 */
1512 devIntrPost(ISR_TXOK);
1513 }
1514
1515 if (!txFifo.empty() && !txEvent.scheduled()) {
1516 DPRINTF(Ethernet, "reschedule transmit\n");
1517 schedule(txEvent, curTick() + retryTime);
1518 }
1519 }
1520
1521 bool
1522 NSGigE::doTxDmaRead()
1523 {
1524 assert(txDmaState == dmaIdle || txDmaState == dmaReadWaiting);
1525 txDmaState = dmaReading;
1526
1527 if (dmaPending() || drainState() != DrainState::Running)
1528 txDmaState = dmaReadWaiting;
1529 else
1530 dmaRead(txDmaAddr, txDmaLen, &txDmaReadEvent, (uint8_t*)txDmaData);
1531
1532 return true;
1533 }
1534
1535 void
1536 NSGigE::txDmaReadDone()
1537 {
1538 assert(txDmaState == dmaReading);
1539 txDmaState = dmaIdle;
1540
1541 DPRINTF(EthernetDMA, "tx dma read paddr=%#x len=%d\n",
1542 txDmaAddr, txDmaLen);
1543 DDUMP(EthernetDMA, txDmaData, txDmaLen);
1544
1545 // If the receive state machine has a pending DMA, let it go first
1546 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting)
1547 rxKick();
1548
1549 txKick();
1550 }
1551
1552 bool
1553 NSGigE::doTxDmaWrite()
1554 {
1555 assert(txDmaState == dmaIdle || txDmaState == dmaWriteWaiting);
1556 txDmaState = dmaWriting;
1557
1558 if (dmaPending() || drainState() != DrainState::Running)
1559 txDmaState = dmaWriteWaiting;
1560 else
1561 dmaWrite(txDmaAddr, txDmaLen, &txDmaWriteEvent, (uint8_t*)txDmaData);
1562 return true;
1563 }
1564
1565 void
1566 NSGigE::txDmaWriteDone()
1567 {
1568 assert(txDmaState == dmaWriting);
1569 txDmaState = dmaIdle;
1570
1571 DPRINTF(EthernetDMA, "tx dma write paddr=%#x len=%d\n",
1572 txDmaAddr, txDmaLen);
1573 DDUMP(EthernetDMA, txDmaData, txDmaLen);
1574
1575 // If the receive state machine has a pending DMA, let it go first
1576 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting)
1577 rxKick();
1578
1579 txKick();
1580 }
1581
1582 void
1583 NSGigE::txKick()
1584 {
1585 bool is64bit = (bool)(regs.config & CFGR_M64ADDR);
1586
1587 DPRINTF(EthernetSM, "transmit kick txState=%s %d-bit\n",
1588 NsTxStateStrings[txState], is64bit ? 64 : 32);
1589
1590 Addr link, bufptr;
1591 uint32_t &cmdsts = is64bit ? txDesc64.cmdsts : txDesc32.cmdsts;
1592 uint32_t &extsts = is64bit ? txDesc64.extsts : txDesc32.extsts;
1593
1594 next:
1595 if (txKickTick > curTick()) {
1596 DPRINTF(EthernetSM, "transmit kick exiting, can't run till %d\n",
1597 txKickTick);
1598 goto exit;
1599 }
1600
1601 // Go to the next state machine clock tick.
1602 txKickTick = clockEdge(Cycles(1));
1603
1604 switch(txDmaState) {
1605 case dmaReadWaiting:
1606 if (doTxDmaRead())
1607 goto exit;
1608 break;
1609 case dmaWriteWaiting:
1610 if (doTxDmaWrite())
1611 goto exit;
1612 break;
1613 default:
1614 break;
1615 }
1616
1617 link = is64bit ? (Addr)txDesc64.link : (Addr)txDesc32.link;
1618 bufptr = is64bit ? (Addr)txDesc64.bufptr : (Addr)txDesc32.bufptr;
1619 switch (txState) {
1620 case txIdle:
1621 if (!txEnable) {
1622 DPRINTF(EthernetSM, "Transmit disabled. Nothing to do.\n");
1623 goto exit;
1624 }
1625
1626 if (CTDD) {
1627 txState = txDescRefr;
1628
1629 txDmaAddr = regs.txdp & 0x3fffffff;
1630 txDmaData =
1631 is64bit ? (void *)&txDesc64.link : (void *)&txDesc32.link;
1632 txDmaLen = is64bit ? sizeof(txDesc64.link) : sizeof(txDesc32.link);
1633 txDmaFree = dmaDescFree;
1634
1635 descDmaReads++;
1636 descDmaRdBytes += txDmaLen;
1637
1638 if (doTxDmaRead())
1639 goto exit;
1640
1641 } else {
1642 txState = txDescRead;
1643
1644 txDmaAddr = regs.txdp & 0x3fffffff;
1645 txDmaData = is64bit ? (void *)&txDesc64 : (void *)&txDesc32;
1646 txDmaLen = is64bit ? sizeof(txDesc64) : sizeof(txDesc32);
1647 txDmaFree = dmaDescFree;
1648
1649 descDmaReads++;
1650 descDmaRdBytes += txDmaLen;
1651
1652 if (doTxDmaRead())
1653 goto exit;
1654 }
1655 break;
1656
1657 case txDescRefr:
1658 if (txDmaState != dmaIdle)
1659 goto exit;
1660
1661 txState = txAdvance;
1662 break;
1663
1664 case txDescRead:
1665 if (txDmaState != dmaIdle)
1666 goto exit;
1667
1668 DPRINTF(EthernetDesc, "txDesc: addr=%08x read descriptor\n",
1669 regs.txdp & 0x3fffffff);
1670 DPRINTF(EthernetDesc,
1671 "txDesc: link=%#x bufptr=%#x cmdsts=%#08x extsts=%#08x\n",
1672 link, bufptr, cmdsts, extsts);
1673
1674 if (cmdsts & CMDSTS_OWN) {
1675 txState = txFifoBlock;
1676 txFragPtr = bufptr;
1677 txDescCnt = cmdsts & CMDSTS_LEN_MASK;
1678 } else {
1679 devIntrPost(ISR_TXIDLE);
1680 txState = txIdle;
1681 goto exit;
1682 }
1683 break;
1684
1685 case txFifoBlock:
1686 if (!txPacket) {
1687 DPRINTF(EthernetSM, "****starting the tx of a new packet****\n");
1688 txPacket = make_shared<EthPacketData>(16384);
1689 txPacketBufPtr = txPacket->data;
1690 }
1691
1692 if (txDescCnt == 0) {
1693 DPRINTF(EthernetSM, "the txDescCnt == 0, done with descriptor\n");
1694 if (cmdsts & CMDSTS_MORE) {
1695 DPRINTF(EthernetSM, "there are more descriptors to come\n");
1696 txState = txDescWrite;
1697
1698 cmdsts &= ~CMDSTS_OWN;
1699
1700 txDmaAddr = regs.txdp & 0x3fffffff;
1701 txDmaData = &cmdsts;
1702 if (is64bit) {
1703 txDmaAddr += offsetof(ns_desc64, cmdsts);
1704 txDmaLen = sizeof(txDesc64.cmdsts);
1705 } else {
1706 txDmaAddr += offsetof(ns_desc32, cmdsts);
1707 txDmaLen = sizeof(txDesc32.cmdsts);
1708 }
1709 txDmaFree = dmaDescFree;
1710
1711 if (doTxDmaWrite())
1712 goto exit;
1713
1714 } else { /* this packet is totally done */
1715 DPRINTF(EthernetSM, "This packet is done, let's wrap it up\n");
1716 /* deal with the the packet that just finished */
1717 if ((regs.vtcr & VTCR_PPCHK) && extstsEnable) {
1718 IpPtr ip(txPacket);
1719 if (extsts & EXTSTS_UDPPKT) {
1720 UdpPtr udp(ip);
1721 if (udp) {
1722 udp->sum(0);
1723 udp->sum(cksum(udp));
1724 txUdpChecksums++;
1725 } else {
1726 Debug::breakpoint();
1727 warn_once("UDPPKT set, but not UDP!\n");
1728 }
1729 } else if (extsts & EXTSTS_TCPPKT) {
1730 TcpPtr tcp(ip);
1731 if (tcp) {
1732 tcp->sum(0);
1733 tcp->sum(cksum(tcp));
1734 txTcpChecksums++;
1735 } else {
1736 warn_once("TCPPKT set, but not UDP!\n");
1737 }
1738 }
1739 if (extsts & EXTSTS_IPPKT) {
1740 if (ip) {
1741 ip->sum(0);
1742 ip->sum(cksum(ip));
1743 txIpChecksums++;
1744 } else {
1745 warn_once("IPPKT set, but not UDP!\n");
1746 }
1747 }
1748 }
1749
1750 txPacket->simLength = txPacketBufPtr - txPacket->data;
1751 txPacket->length = txPacketBufPtr - txPacket->data;
1752 // this is just because the receive can't handle a
1753 // packet bigger want to make sure
1754 if (txPacket->length > 1514)
1755 panic("transmit packet too large, %s > 1514\n",
1756 txPacket->length);
1757
1758 #ifndef NDEBUG
1759 bool success =
1760 #endif
1761 txFifo.push(txPacket);
1762 assert(success);
1763
1764 /*
1765 * this following section is not tqo spec, but
1766 * functionally shouldn't be any different. normally,
1767 * the chip will wait til the transmit has occurred
1768 * before writing back the descriptor because it has
1769 * to wait to see that it was successfully transmitted
1770 * to decide whether to set CMDSTS_OK or not.
1771 * however, in the simulator since it is always
1772 * successfully transmitted, and writing it exactly to
1773 * spec would complicate the code, we just do it here
1774 */
1775
1776 cmdsts &= ~CMDSTS_OWN;
1777 cmdsts |= CMDSTS_OK;
1778
1779 DPRINTF(EthernetDesc,
1780 "txDesc writeback: cmdsts=%08x extsts=%08x\n",
1781 cmdsts, extsts);
1782
1783 txDmaFree = dmaDescFree;
1784 txDmaAddr = regs.txdp & 0x3fffffff;
1785 txDmaData = &cmdsts;
1786 if (is64bit) {
1787 txDmaAddr += offsetof(ns_desc64, cmdsts);
1788 txDmaLen =
1789 sizeof(txDesc64.cmdsts) + sizeof(txDesc64.extsts);
1790 } else {
1791 txDmaAddr += offsetof(ns_desc32, cmdsts);
1792 txDmaLen =
1793 sizeof(txDesc32.cmdsts) + sizeof(txDesc32.extsts);
1794 }
1795
1796 descDmaWrites++;
1797 descDmaWrBytes += txDmaLen;
1798
1799 transmit();
1800 txPacket = 0;
1801
1802 if (!txEnable) {
1803 DPRINTF(EthernetSM, "halting TX state machine\n");
1804 txState = txIdle;
1805 goto exit;
1806 } else
1807 txState = txAdvance;
1808
1809 if (doTxDmaWrite())
1810 goto exit;
1811 }
1812 } else {
1813 DPRINTF(EthernetSM, "this descriptor isn't done yet\n");
1814 if (!txFifo.full()) {
1815 txState = txFragRead;
1816
1817 /*
1818 * The number of bytes transferred is either whatever
1819 * is left in the descriptor (txDescCnt), or if there
1820 * is not enough room in the fifo, just whatever room
1821 * is left in the fifo
1822 */
1823 txXferLen = min<uint32_t>(txDescCnt, txFifo.avail());
1824
1825 txDmaAddr = txFragPtr & 0x3fffffff;
1826 txDmaData = txPacketBufPtr;
1827 txDmaLen = txXferLen;
1828 txDmaFree = dmaDataFree;
1829
1830 if (doTxDmaRead())
1831 goto exit;
1832 } else {
1833 txState = txFifoBlock;
1834 transmit();
1835
1836 goto exit;
1837 }
1838
1839 }
1840 break;
1841
1842 case txFragRead:
1843 if (txDmaState != dmaIdle)
1844 goto exit;
1845
1846 txPacketBufPtr += txXferLen;
1847 txFragPtr += txXferLen;
1848 txDescCnt -= txXferLen;
1849 txFifo.reserve(txXferLen);
1850
1851 txState = txFifoBlock;
1852 break;
1853
1854 case txDescWrite:
1855 if (txDmaState != dmaIdle)
1856 goto exit;
1857
1858 if (cmdsts & CMDSTS_INTR)
1859 devIntrPost(ISR_TXDESC);
1860
1861 if (!txEnable) {
1862 DPRINTF(EthernetSM, "halting TX state machine\n");
1863 txState = txIdle;
1864 goto exit;
1865 } else
1866 txState = txAdvance;
1867 break;
1868
1869 case txAdvance:
1870 if (link == 0) {
1871 devIntrPost(ISR_TXIDLE);
1872 txState = txIdle;
1873 goto exit;
1874 } else {
1875 if (txDmaState != dmaIdle)
1876 goto exit;
1877 txState = txDescRead;
1878 regs.txdp = link;
1879 CTDD = false;
1880
1881 txDmaAddr = link & 0x3fffffff;
1882 txDmaData = is64bit ? (void *)&txDesc64 : (void *)&txDesc32;
1883 txDmaLen = is64bit ? sizeof(txDesc64) : sizeof(txDesc32);
1884 txDmaFree = dmaDescFree;
1885
1886 if (doTxDmaRead())
1887 goto exit;
1888 }
1889 break;
1890
1891 default:
1892 panic("invalid state");
1893 }
1894
1895 DPRINTF(EthernetSM, "entering next txState=%s\n",
1896 NsTxStateStrings[txState]);
1897 goto next;
1898
1899 exit:
1900 /**
1901 * @todo do we want to schedule a future kick?
1902 */
1903 DPRINTF(EthernetSM, "tx state machine exited txState=%s\n",
1904 NsTxStateStrings[txState]);
1905
1906 if (!txKickEvent.scheduled())
1907 schedule(txKickEvent, txKickTick);
1908 }
1909
1910 /**
1911 * Advance the EEPROM state machine
1912 * Called on rising edge of EEPROM clock bit in MEAR
1913 */
1914 void
1915 NSGigE::eepromKick()
1916 {
1917 switch (eepromState) {
1918
1919 case eepromStart:
1920
1921 // Wait for start bit
1922 if (regs.mear & MEAR_EEDI) {
1923 // Set up to get 2 opcode bits
1924 eepromState = eepromGetOpcode;
1925 eepromBitsToRx = 2;
1926 eepromOpcode = 0;
1927 }
1928 break;
1929
1930 case eepromGetOpcode:
1931 eepromOpcode <<= 1;
1932 eepromOpcode += (regs.mear & MEAR_EEDI) ? 1 : 0;
1933 --eepromBitsToRx;
1934
1935 // Done getting opcode
1936 if (eepromBitsToRx == 0) {
1937 if (eepromOpcode != EEPROM_READ)
1938 panic("only EEPROM reads are implemented!");
1939
1940 // Set up to get address
1941 eepromState = eepromGetAddress;
1942 eepromBitsToRx = 6;
1943 eepromAddress = 0;
1944 }
1945 break;
1946
1947 case eepromGetAddress:
1948 eepromAddress <<= 1;
1949 eepromAddress += (regs.mear & MEAR_EEDI) ? 1 : 0;
1950 --eepromBitsToRx;
1951
1952 // Done getting address
1953 if (eepromBitsToRx == 0) {
1954
1955 if (eepromAddress >= EEPROM_SIZE)
1956 panic("EEPROM read access out of range!");
1957
1958 switch (eepromAddress) {
1959
1960 case EEPROM_PMATCH2_ADDR:
1961 eepromData = rom.perfectMatch[5];
1962 eepromData <<= 8;
1963 eepromData += rom.perfectMatch[4];
1964 break;
1965
1966 case EEPROM_PMATCH1_ADDR:
1967 eepromData = rom.perfectMatch[3];
1968 eepromData <<= 8;
1969 eepromData += rom.perfectMatch[2];
1970 break;
1971
1972 case EEPROM_PMATCH0_ADDR:
1973 eepromData = rom.perfectMatch[1];
1974 eepromData <<= 8;
1975 eepromData += rom.perfectMatch[0];
1976 break;
1977
1978 default:
1979 panic("FreeBSD driver only uses EEPROM to read PMATCH!");
1980 }
1981 // Set up to read data
1982 eepromState = eepromRead;
1983 eepromBitsToRx = 16;
1984
1985 // Clear data in bit
1986 regs.mear &= ~MEAR_EEDI;
1987 }
1988 break;
1989
1990 case eepromRead:
1991 // Clear Data Out bit
1992 regs.mear &= ~MEAR_EEDO;
1993 // Set bit to value of current EEPROM bit
1994 regs.mear |= (eepromData & 0x8000) ? MEAR_EEDO : 0x0;
1995
1996 eepromData <<= 1;
1997 --eepromBitsToRx;
1998
1999 // All done
2000 if (eepromBitsToRx == 0) {
2001 eepromState = eepromStart;
2002 }
2003 break;
2004
2005 default:
2006 panic("invalid EEPROM state");
2007 }
2008
2009 }
2010
2011 void
2012 NSGigE::transferDone()
2013 {
2014 if (txFifo.empty()) {
2015 DPRINTF(Ethernet, "transfer complete: txFifo empty...nothing to do\n");
2016 return;
2017 }
2018
2019 DPRINTF(Ethernet, "transfer complete: data in txFifo...schedule xmit\n");
2020
2021 reschedule(txEvent, clockEdge(Cycles(1)), true);
2022 }
2023
2024 bool
2025 NSGigE::rxFilter(const EthPacketPtr &packet)
2026 {
2027 EthPtr eth = packet;
2028 bool drop = true;
2029 string type;
2030
2031 const EthAddr &dst = eth->dst();
2032 if (dst.unicast()) {
2033 // If we're accepting all unicast addresses
2034 if (acceptUnicast)
2035 drop = false;
2036
2037 // If we make a perfect match
2038 if (acceptPerfect && dst == rom.perfectMatch)
2039 drop = false;
2040
2041 if (acceptArp && eth->type() == ETH_TYPE_ARP)
2042 drop = false;
2043
2044 } else if (dst.broadcast()) {
2045 // if we're accepting broadcasts
2046 if (acceptBroadcast)
2047 drop = false;
2048
2049 } else if (dst.multicast()) {
2050 // if we're accepting all multicasts
2051 if (acceptMulticast)
2052 drop = false;
2053
2054 // Multicast hashing faked - all packets accepted
2055 if (multicastHashEnable)
2056 drop = false;
2057 }
2058
2059 if (drop) {
2060 DPRINTF(Ethernet, "rxFilter drop\n");
2061 DDUMP(EthernetData, packet->data, packet->length);
2062 }
2063
2064 return drop;
2065 }
2066
2067 bool
2068 NSGigE::recvPacket(EthPacketPtr packet)
2069 {
2070 rxBytes += packet->length;
2071 rxPackets++;
2072
2073 DPRINTF(Ethernet, "Receiving packet from wire, rxFifoAvail=%d\n",
2074 rxFifo.avail());
2075
2076 if (!rxEnable) {
2077 DPRINTF(Ethernet, "receive disabled...packet dropped\n");
2078 return true;
2079 }
2080
2081 if (!rxFilterEnable) {
2082 DPRINTF(Ethernet,
2083 "receive packet filtering disabled . . . packet dropped\n");
2084 return true;
2085 }
2086
2087 if (rxFilter(packet)) {
2088 DPRINTF(Ethernet, "packet filtered...dropped\n");
2089 return true;
2090 }
2091
2092 if (rxFifo.avail() < packet->length) {
2093 #if TRACING_ON
2094 IpPtr ip(packet);
2095 TcpPtr tcp(ip);
2096 if (ip) {
2097 DPRINTF(Ethernet,
2098 "packet won't fit in receive buffer...pkt ID %d dropped\n",
2099 ip->id());
2100 if (tcp) {
2101 DPRINTF(Ethernet, "Seq=%d\n", tcp->seq());
2102 }
2103 }
2104 #endif
2105 droppedPackets++;
2106 devIntrPost(ISR_RXORN);
2107 return false;
2108 }
2109
2110 rxFifo.push(packet);
2111
2112 rxKick();
2113 return true;
2114 }
2115
2116
2117 void
2118 NSGigE::drainResume()
2119 {
2120 Drainable::drainResume();
2121
2122 // During drain we could have left the state machines in a waiting state and
2123 // they wouldn't get out until some other event occured to kick them.
2124 // This way they'll get out immediately
2125 txKick();
2126 rxKick();
2127 }
2128
2129
2130 //=====================================================================
2131 //
2132 //
2133 void
2134 NSGigE::serialize(CheckpointOut &cp) const
2135 {
2136 // Serialize the PciDevice base class
2137 PciDevice::serialize(cp);
2138
2139 /*
2140 * Finalize any DMA events now.
2141 */
2142 // @todo will mem system save pending dma?
2143
2144 /*
2145 * Serialize the device registers
2146 */
2147 SERIALIZE_SCALAR(regs.command);
2148 SERIALIZE_SCALAR(regs.config);
2149 SERIALIZE_SCALAR(regs.mear);
2150 SERIALIZE_SCALAR(regs.ptscr);
2151 SERIALIZE_SCALAR(regs.isr);
2152 SERIALIZE_SCALAR(regs.imr);
2153 SERIALIZE_SCALAR(regs.ier);
2154 SERIALIZE_SCALAR(regs.ihr);
2155 SERIALIZE_SCALAR(regs.txdp);
2156 SERIALIZE_SCALAR(regs.txdp_hi);
2157 SERIALIZE_SCALAR(regs.txcfg);
2158 SERIALIZE_SCALAR(regs.gpior);
2159 SERIALIZE_SCALAR(regs.rxdp);
2160 SERIALIZE_SCALAR(regs.rxdp_hi);
2161 SERIALIZE_SCALAR(regs.rxcfg);
2162 SERIALIZE_SCALAR(regs.pqcr);
2163 SERIALIZE_SCALAR(regs.wcsr);
2164 SERIALIZE_SCALAR(regs.pcr);
2165 SERIALIZE_SCALAR(regs.rfcr);
2166 SERIALIZE_SCALAR(regs.rfdr);
2167 SERIALIZE_SCALAR(regs.brar);
2168 SERIALIZE_SCALAR(regs.brdr);
2169 SERIALIZE_SCALAR(regs.srr);
2170 SERIALIZE_SCALAR(regs.mibc);
2171 SERIALIZE_SCALAR(regs.vrcr);
2172 SERIALIZE_SCALAR(regs.vtcr);
2173 SERIALIZE_SCALAR(regs.vdr);
2174 SERIALIZE_SCALAR(regs.ccsr);
2175 SERIALIZE_SCALAR(regs.tbicr);
2176 SERIALIZE_SCALAR(regs.tbisr);
2177 SERIALIZE_SCALAR(regs.tanar);
2178 SERIALIZE_SCALAR(regs.tanlpar);
2179 SERIALIZE_SCALAR(regs.taner);
2180 SERIALIZE_SCALAR(regs.tesr);
2181
2182 SERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN);
2183 SERIALIZE_ARRAY(rom.filterHash, FHASH_SIZE);
2184
2185 SERIALIZE_SCALAR(ioEnable);
2186
2187 /*
2188 * Serialize the data Fifos
2189 */
2190 rxFifo.serialize("rxFifo", cp);
2191 txFifo.serialize("txFifo", cp);
2192
2193 /*
2194 * Serialize the various helper variables
2195 */
2196 bool txPacketExists = txPacket != nullptr;
2197 SERIALIZE_SCALAR(txPacketExists);
2198 if (txPacketExists) {
2199 txPacket->simLength = txPacketBufPtr - txPacket->data;
2200 txPacket->length = txPacketBufPtr - txPacket->data;
2201 txPacket->serialize("txPacket", cp);
2202 uint32_t txPktBufPtr = (uint32_t) (txPacketBufPtr - txPacket->data);
2203 SERIALIZE_SCALAR(txPktBufPtr);
2204 }
2205
2206 bool rxPacketExists = rxPacket != nullptr;
2207 SERIALIZE_SCALAR(rxPacketExists);
2208 if (rxPacketExists) {
2209 rxPacket->serialize("rxPacket", cp);
2210 uint32_t rxPktBufPtr = (uint32_t) (rxPacketBufPtr - rxPacket->data);
2211 SERIALIZE_SCALAR(rxPktBufPtr);
2212 }
2213
2214 SERIALIZE_SCALAR(txXferLen);
2215 SERIALIZE_SCALAR(rxXferLen);
2216
2217 /*
2218 * Serialize Cached Descriptors
2219 */
2220 SERIALIZE_SCALAR(rxDesc64.link);
2221 SERIALIZE_SCALAR(rxDesc64.bufptr);
2222 SERIALIZE_SCALAR(rxDesc64.cmdsts);
2223 SERIALIZE_SCALAR(rxDesc64.extsts);
2224 SERIALIZE_SCALAR(txDesc64.link);
2225 SERIALIZE_SCALAR(txDesc64.bufptr);
2226 SERIALIZE_SCALAR(txDesc64.cmdsts);
2227 SERIALIZE_SCALAR(txDesc64.extsts);
2228 SERIALIZE_SCALAR(rxDesc32.link);
2229 SERIALIZE_SCALAR(rxDesc32.bufptr);
2230 SERIALIZE_SCALAR(rxDesc32.cmdsts);
2231 SERIALIZE_SCALAR(rxDesc32.extsts);
2232 SERIALIZE_SCALAR(txDesc32.link);
2233 SERIALIZE_SCALAR(txDesc32.bufptr);
2234 SERIALIZE_SCALAR(txDesc32.cmdsts);
2235 SERIALIZE_SCALAR(txDesc32.extsts);
2236 SERIALIZE_SCALAR(extstsEnable);
2237
2238 /*
2239 * Serialize tx state machine
2240 */
2241 int txState = this->txState;
2242 SERIALIZE_SCALAR(txState);
2243 SERIALIZE_SCALAR(txEnable);
2244 SERIALIZE_SCALAR(CTDD);
2245 SERIALIZE_SCALAR(txFragPtr);
2246 SERIALIZE_SCALAR(txDescCnt);
2247 int txDmaState = this->txDmaState;
2248 SERIALIZE_SCALAR(txDmaState);
2249 SERIALIZE_SCALAR(txKickTick);
2250
2251 /*
2252 * Serialize rx state machine
2253 */
2254 int rxState = this->rxState;
2255 SERIALIZE_SCALAR(rxState);
2256 SERIALIZE_SCALAR(rxEnable);
2257 SERIALIZE_SCALAR(CRDD);
2258 SERIALIZE_SCALAR(rxPktBytes);
2259 SERIALIZE_SCALAR(rxFragPtr);
2260 SERIALIZE_SCALAR(rxDescCnt);
2261 int rxDmaState = this->rxDmaState;
2262 SERIALIZE_SCALAR(rxDmaState);
2263 SERIALIZE_SCALAR(rxKickTick);
2264
2265 /*
2266 * Serialize EEPROM state machine
2267 */
2268 int eepromState = this->eepromState;
2269 SERIALIZE_SCALAR(eepromState);
2270 SERIALIZE_SCALAR(eepromClk);
2271 SERIALIZE_SCALAR(eepromBitsToRx);
2272 SERIALIZE_SCALAR(eepromOpcode);
2273 SERIALIZE_SCALAR(eepromAddress);
2274 SERIALIZE_SCALAR(eepromData);
2275
2276 /*
2277 * If there's a pending transmit, store the time so we can
2278 * reschedule it later
2279 */
2280 Tick transmitTick = txEvent.scheduled() ? txEvent.when() - curTick() : 0;
2281 SERIALIZE_SCALAR(transmitTick);
2282
2283 /*
2284 * receive address filter settings
2285 */
2286 SERIALIZE_SCALAR(rxFilterEnable);
2287 SERIALIZE_SCALAR(acceptBroadcast);
2288 SERIALIZE_SCALAR(acceptMulticast);
2289 SERIALIZE_SCALAR(acceptUnicast);
2290 SERIALIZE_SCALAR(acceptPerfect);
2291 SERIALIZE_SCALAR(acceptArp);
2292 SERIALIZE_SCALAR(multicastHashEnable);
2293
2294 /*
2295 * Keep track of pending interrupt status.
2296 */
2297 SERIALIZE_SCALAR(intrTick);
2298 SERIALIZE_SCALAR(cpuPendingIntr);
2299 Tick intrEventTick = 0;
2300 if (intrEvent)
2301 intrEventTick = intrEvent->when();
2302 SERIALIZE_SCALAR(intrEventTick);
2303
2304 }
2305
2306 void
2307 NSGigE::unserialize(CheckpointIn &cp)
2308 {
2309 // Unserialize the PciDevice base class
2310 PciDevice::unserialize(cp);
2311
2312 UNSERIALIZE_SCALAR(regs.command);
2313 UNSERIALIZE_SCALAR(regs.config);
2314 UNSERIALIZE_SCALAR(regs.mear);
2315 UNSERIALIZE_SCALAR(regs.ptscr);
2316 UNSERIALIZE_SCALAR(regs.isr);
2317 UNSERIALIZE_SCALAR(regs.imr);
2318 UNSERIALIZE_SCALAR(regs.ier);
2319 UNSERIALIZE_SCALAR(regs.ihr);
2320 UNSERIALIZE_SCALAR(regs.txdp);
2321 UNSERIALIZE_SCALAR(regs.txdp_hi);
2322 UNSERIALIZE_SCALAR(regs.txcfg);
2323 UNSERIALIZE_SCALAR(regs.gpior);
2324 UNSERIALIZE_SCALAR(regs.rxdp);
2325 UNSERIALIZE_SCALAR(regs.rxdp_hi);
2326 UNSERIALIZE_SCALAR(regs.rxcfg);
2327 UNSERIALIZE_SCALAR(regs.pqcr);
2328 UNSERIALIZE_SCALAR(regs.wcsr);
2329 UNSERIALIZE_SCALAR(regs.pcr);
2330 UNSERIALIZE_SCALAR(regs.rfcr);
2331 UNSERIALIZE_SCALAR(regs.rfdr);
2332 UNSERIALIZE_SCALAR(regs.brar);
2333 UNSERIALIZE_SCALAR(regs.brdr);
2334 UNSERIALIZE_SCALAR(regs.srr);
2335 UNSERIALIZE_SCALAR(regs.mibc);
2336 UNSERIALIZE_SCALAR(regs.vrcr);
2337 UNSERIALIZE_SCALAR(regs.vtcr);
2338 UNSERIALIZE_SCALAR(regs.vdr);
2339 UNSERIALIZE_SCALAR(regs.ccsr);
2340 UNSERIALIZE_SCALAR(regs.tbicr);
2341 UNSERIALIZE_SCALAR(regs.tbisr);
2342 UNSERIALIZE_SCALAR(regs.tanar);
2343 UNSERIALIZE_SCALAR(regs.tanlpar);
2344 UNSERIALIZE_SCALAR(regs.taner);
2345 UNSERIALIZE_SCALAR(regs.tesr);
2346
2347 UNSERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN);
2348 UNSERIALIZE_ARRAY(rom.filterHash, FHASH_SIZE);
2349
2350 UNSERIALIZE_SCALAR(ioEnable);
2351
2352 /*
2353 * unserialize the data fifos
2354 */
2355 rxFifo.unserialize("rxFifo", cp);
2356 txFifo.unserialize("txFifo", cp);
2357
2358 /*
2359 * unserialize the various helper variables
2360 */
2361 bool txPacketExists;
2362 UNSERIALIZE_SCALAR(txPacketExists);
2363 if (txPacketExists) {
2364 txPacket = make_shared<EthPacketData>(16384);
2365 txPacket->unserialize("txPacket", cp);
2366 uint32_t txPktBufPtr;
2367 UNSERIALIZE_SCALAR(txPktBufPtr);
2368 txPacketBufPtr = (uint8_t *) txPacket->data + txPktBufPtr;
2369 } else
2370 txPacket = 0;
2371
2372 bool rxPacketExists;
2373 UNSERIALIZE_SCALAR(rxPacketExists);
2374 rxPacket = 0;
2375 if (rxPacketExists) {
2376 rxPacket = make_shared<EthPacketData>();
2377 rxPacket->unserialize("rxPacket", cp);
2378 uint32_t rxPktBufPtr;
2379 UNSERIALIZE_SCALAR(rxPktBufPtr);
2380 rxPacketBufPtr = (uint8_t *) rxPacket->data + rxPktBufPtr;
2381 } else
2382 rxPacket = 0;
2383
2384 UNSERIALIZE_SCALAR(txXferLen);
2385 UNSERIALIZE_SCALAR(rxXferLen);
2386
2387 /*
2388 * Unserialize Cached Descriptors
2389 */
2390 UNSERIALIZE_SCALAR(rxDesc64.link);
2391 UNSERIALIZE_SCALAR(rxDesc64.bufptr);
2392 UNSERIALIZE_SCALAR(rxDesc64.cmdsts);
2393 UNSERIALIZE_SCALAR(rxDesc64.extsts);
2394 UNSERIALIZE_SCALAR(txDesc64.link);
2395 UNSERIALIZE_SCALAR(txDesc64.bufptr);
2396 UNSERIALIZE_SCALAR(txDesc64.cmdsts);
2397 UNSERIALIZE_SCALAR(txDesc64.extsts);
2398 UNSERIALIZE_SCALAR(rxDesc32.link);
2399 UNSERIALIZE_SCALAR(rxDesc32.bufptr);
2400 UNSERIALIZE_SCALAR(rxDesc32.cmdsts);
2401 UNSERIALIZE_SCALAR(rxDesc32.extsts);
2402 UNSERIALIZE_SCALAR(txDesc32.link);
2403 UNSERIALIZE_SCALAR(txDesc32.bufptr);
2404 UNSERIALIZE_SCALAR(txDesc32.cmdsts);
2405 UNSERIALIZE_SCALAR(txDesc32.extsts);
2406 UNSERIALIZE_SCALAR(extstsEnable);
2407
2408 /*
2409 * unserialize tx state machine
2410 */
2411 int txState;
2412 UNSERIALIZE_SCALAR(txState);
2413 this->txState = (TxState) txState;
2414 UNSERIALIZE_SCALAR(txEnable);
2415 UNSERIALIZE_SCALAR(CTDD);
2416 UNSERIALIZE_SCALAR(txFragPtr);
2417 UNSERIALIZE_SCALAR(txDescCnt);
2418 int txDmaState;
2419 UNSERIALIZE_SCALAR(txDmaState);
2420 this->txDmaState = (DmaState) txDmaState;
2421 UNSERIALIZE_SCALAR(txKickTick);
2422 if (txKickTick)
2423 schedule(txKickEvent, txKickTick);
2424
2425 /*
2426 * unserialize rx state machine
2427 */
2428 int rxState;
2429 UNSERIALIZE_SCALAR(rxState);
2430 this->rxState = (RxState) rxState;
2431 UNSERIALIZE_SCALAR(rxEnable);
2432 UNSERIALIZE_SCALAR(CRDD);
2433 UNSERIALIZE_SCALAR(rxPktBytes);
2434 UNSERIALIZE_SCALAR(rxFragPtr);
2435 UNSERIALIZE_SCALAR(rxDescCnt);
2436 int rxDmaState;
2437 UNSERIALIZE_SCALAR(rxDmaState);
2438 this->rxDmaState = (DmaState) rxDmaState;
2439 UNSERIALIZE_SCALAR(rxKickTick);
2440 if (rxKickTick)
2441 schedule(rxKickEvent, rxKickTick);
2442
2443 /*
2444 * Unserialize EEPROM state machine
2445 */
2446 int eepromState;
2447 UNSERIALIZE_SCALAR(eepromState);
2448 this->eepromState = (EEPROMState) eepromState;
2449 UNSERIALIZE_SCALAR(eepromClk);
2450 UNSERIALIZE_SCALAR(eepromBitsToRx);
2451 UNSERIALIZE_SCALAR(eepromOpcode);
2452 UNSERIALIZE_SCALAR(eepromAddress);
2453 UNSERIALIZE_SCALAR(eepromData);
2454
2455 /*
2456 * If there's a pending transmit, reschedule it now
2457 */
2458 Tick transmitTick;
2459 UNSERIALIZE_SCALAR(transmitTick);
2460 if (transmitTick)
2461 schedule(txEvent, curTick() + transmitTick);
2462
2463 /*
2464 * unserialize receive address filter settings
2465 */
2466 UNSERIALIZE_SCALAR(rxFilterEnable);
2467 UNSERIALIZE_SCALAR(acceptBroadcast);
2468 UNSERIALIZE_SCALAR(acceptMulticast);
2469 UNSERIALIZE_SCALAR(acceptUnicast);
2470 UNSERIALIZE_SCALAR(acceptPerfect);
2471 UNSERIALIZE_SCALAR(acceptArp);
2472 UNSERIALIZE_SCALAR(multicastHashEnable);
2473
2474 /*
2475 * Keep track of pending interrupt status.
2476 */
2477 UNSERIALIZE_SCALAR(intrTick);
2478 UNSERIALIZE_SCALAR(cpuPendingIntr);
2479 Tick intrEventTick;
2480 UNSERIALIZE_SCALAR(intrEventTick);
2481 if (intrEventTick) {
2482 intrEvent = new EventFunctionWrapper([this]{ cpuInterrupt(); },
2483 name(), true);
2484 schedule(intrEvent, intrEventTick);
2485 }
2486 }
2487
2488 NSGigE *
2489 NSGigEParams::create()
2490 {
2491 return new NSGigE(this);
2492 }