SimObjects: Clean up handling of C++ namespaces.
[gem5.git] / src / dev / ns_gige.cc
1 /*
2 * Copyright (c) 2004-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Nathan Binkert
29 * Lisa Hsu
30 */
31
32 /** @file
33 * Device module for modelling the National Semiconductor
34 * DP83820 ethernet controller. Does not support priority queueing
35 */
36 #include <deque>
37 #include <string>
38
39 #include "base/inet.hh"
40 #include "cpu/thread_context.hh"
41 #include "dev/etherlink.hh"
42 #include "dev/ns_gige.hh"
43 #include "dev/pciconfigall.hh"
44 #include "mem/packet.hh"
45 #include "mem/packet_access.hh"
46 #include "params/NSGigE.hh"
47 #include "sim/debug.hh"
48 #include "sim/host.hh"
49 #include "sim/system.hh"
50
51 const char *NsRxStateStrings[] =
52 {
53 "rxIdle",
54 "rxDescRefr",
55 "rxDescRead",
56 "rxFifoBlock",
57 "rxFragWrite",
58 "rxDescWrite",
59 "rxAdvance"
60 };
61
62 const char *NsTxStateStrings[] =
63 {
64 "txIdle",
65 "txDescRefr",
66 "txDescRead",
67 "txFifoBlock",
68 "txFragRead",
69 "txDescWrite",
70 "txAdvance"
71 };
72
73 const char *NsDmaState[] =
74 {
75 "dmaIdle",
76 "dmaReading",
77 "dmaWriting",
78 "dmaReadWaiting",
79 "dmaWriteWaiting"
80 };
81
82 using namespace std;
83 using namespace Net;
84 using namespace TheISA;
85
86 ///////////////////////////////////////////////////////////////////////
87 //
88 // NSGigE PCI Device
89 //
90 NSGigE::NSGigE(Params *p)
91 : EtherDevice(p), ioEnable(false),
92 txFifo(p->tx_fifo_size), rxFifo(p->rx_fifo_size),
93 txPacket(0), rxPacket(0), txPacketBufPtr(NULL), rxPacketBufPtr(NULL),
94 txXferLen(0), rxXferLen(0), rxDmaFree(false), txDmaFree(false),
95 clock(p->clock),
96 txState(txIdle), txEnable(false), CTDD(false), txHalt(false),
97 txFragPtr(0), txDescCnt(0), txDmaState(dmaIdle), rxState(rxIdle),
98 rxEnable(false), CRDD(false), rxPktBytes(0), rxHalt(false),
99 rxFragPtr(0), rxDescCnt(0), rxDmaState(dmaIdle), extstsEnable(false),
100 eepromState(eepromStart), eepromClk(false), eepromBitsToRx(0),
101 eepromOpcode(0), eepromAddress(0), eepromData(0),
102 dmaReadDelay(p->dma_read_delay), dmaWriteDelay(p->dma_write_delay),
103 dmaReadFactor(p->dma_read_factor), dmaWriteFactor(p->dma_write_factor),
104 rxDmaData(NULL), rxDmaAddr(0), rxDmaLen(0),
105 txDmaData(NULL), txDmaAddr(0), txDmaLen(0),
106 rxDmaReadEvent(this), rxDmaWriteEvent(this),
107 txDmaReadEvent(this), txDmaWriteEvent(this),
108 dmaDescFree(p->dma_desc_free), dmaDataFree(p->dma_data_free),
109 txDelay(p->tx_delay), rxDelay(p->rx_delay),
110 rxKickTick(0), rxKickEvent(this), txKickTick(0), txKickEvent(this),
111 txEvent(this), rxFilterEnable(p->rx_filter),
112 acceptBroadcast(false), acceptMulticast(false), acceptUnicast(false),
113 acceptPerfect(false), acceptArp(false), multicastHashEnable(false),
114 intrDelay(p->intr_delay), intrTick(0), cpuPendingIntr(false),
115 intrEvent(0), interface(0)
116 {
117
118
119 interface = new NSGigEInt(name() + ".int0", this);
120
121 regsReset();
122 memcpy(&rom.perfectMatch, p->hardware_address.bytes(), ETH_ADDR_LEN);
123
124 memset(&rxDesc32, 0, sizeof(rxDesc32));
125 memset(&txDesc32, 0, sizeof(txDesc32));
126 memset(&rxDesc64, 0, sizeof(rxDesc64));
127 memset(&txDesc64, 0, sizeof(txDesc64));
128 }
129
130 NSGigE::~NSGigE()
131 {}
132
133 /**
134 * This is to write to the PCI general configuration registers
135 */
136 Tick
137 NSGigE::writeConfig(PacketPtr pkt)
138 {
139 int offset = pkt->getAddr() & PCI_CONFIG_SIZE;
140 if (offset < PCI_DEVICE_SPECIFIC)
141 PciDev::writeConfig(pkt);
142 else
143 panic("Device specific PCI config space not implemented!\n");
144
145 switch (offset) {
146 // seems to work fine without all these PCI settings, but i
147 // put in the IO to double check, an assertion will fail if we
148 // need to properly implement it
149 case PCI_COMMAND:
150 if (config.data[offset] & PCI_CMD_IOSE)
151 ioEnable = true;
152 else
153 ioEnable = false;
154 break;
155 }
156
157 return configDelay;
158 }
159
160 EtherInt*
161 NSGigE::getEthPort(const std::string &if_name, int idx)
162 {
163 if (if_name == "interface") {
164 if (interface->getPeer())
165 panic("interface already connected to\n");
166 return interface;
167 }
168 return NULL;
169 }
170
171 /**
172 * This reads the device registers, which are detailed in the NS83820
173 * spec sheet
174 */
175 Tick
176 NSGigE::read(PacketPtr pkt)
177 {
178 assert(ioEnable);
179
180 pkt->allocate();
181
182 //The mask is to give you only the offset into the device register file
183 Addr daddr = pkt->getAddr() & 0xfff;
184 DPRINTF(EthernetPIO, "read da=%#x pa=%#x size=%d\n",
185 daddr, pkt->getAddr(), pkt->getSize());
186
187
188 // there are some reserved registers, you can see ns_gige_reg.h and
189 // the spec sheet for details
190 if (daddr > LAST && daddr <= RESERVED) {
191 panic("Accessing reserved register");
192 } else if (daddr > RESERVED && daddr <= 0x3FC) {
193 return readConfig(pkt);
194 } else if (daddr >= MIB_START && daddr <= MIB_END) {
195 // don't implement all the MIB's. hopefully the kernel
196 // doesn't actually DEPEND upon their values
197 // MIB are just hardware stats keepers
198 pkt->set<uint32_t>(0);
199 pkt->makeAtomicResponse();
200 return pioDelay;
201 } else if (daddr > 0x3FC)
202 panic("Something is messed up!\n");
203
204 assert(pkt->getSize() == sizeof(uint32_t));
205 uint32_t &reg = *pkt->getPtr<uint32_t>();
206 uint16_t rfaddr;
207
208 switch (daddr) {
209 case CR:
210 reg = regs.command;
211 //these are supposed to be cleared on a read
212 reg &= ~(CR_RXD | CR_TXD | CR_TXR | CR_RXR);
213 break;
214
215 case CFGR:
216 reg = regs.config;
217 break;
218
219 case MEAR:
220 reg = regs.mear;
221 break;
222
223 case PTSCR:
224 reg = regs.ptscr;
225 break;
226
227 case ISR:
228 reg = regs.isr;
229 devIntrClear(ISR_ALL);
230 break;
231
232 case IMR:
233 reg = regs.imr;
234 break;
235
236 case IER:
237 reg = regs.ier;
238 break;
239
240 case IHR:
241 reg = regs.ihr;
242 break;
243
244 case TXDP:
245 reg = regs.txdp;
246 break;
247
248 case TXDP_HI:
249 reg = regs.txdp_hi;
250 break;
251
252 case TX_CFG:
253 reg = regs.txcfg;
254 break;
255
256 case GPIOR:
257 reg = regs.gpior;
258 break;
259
260 case RXDP:
261 reg = regs.rxdp;
262 break;
263
264 case RXDP_HI:
265 reg = regs.rxdp_hi;
266 break;
267
268 case RX_CFG:
269 reg = regs.rxcfg;
270 break;
271
272 case PQCR:
273 reg = regs.pqcr;
274 break;
275
276 case WCSR:
277 reg = regs.wcsr;
278 break;
279
280 case PCR:
281 reg = regs.pcr;
282 break;
283
284 // see the spec sheet for how RFCR and RFDR work
285 // basically, you write to RFCR to tell the machine
286 // what you want to do next, then you act upon RFDR,
287 // and the device will be prepared b/c of what you
288 // wrote to RFCR
289 case RFCR:
290 reg = regs.rfcr;
291 break;
292
293 case RFDR:
294 rfaddr = (uint16_t)(regs.rfcr & RFCR_RFADDR);
295 switch (rfaddr) {
296 // Read from perfect match ROM octets
297 case 0x000:
298 reg = rom.perfectMatch[1];
299 reg = reg << 8;
300 reg += rom.perfectMatch[0];
301 break;
302 case 0x002:
303 reg = rom.perfectMatch[3] << 8;
304 reg += rom.perfectMatch[2];
305 break;
306 case 0x004:
307 reg = rom.perfectMatch[5] << 8;
308 reg += rom.perfectMatch[4];
309 break;
310 default:
311 // Read filter hash table
312 if (rfaddr >= FHASH_ADDR &&
313 rfaddr < FHASH_ADDR + FHASH_SIZE) {
314
315 // Only word-aligned reads supported
316 if (rfaddr % 2)
317 panic("unaligned read from filter hash table!");
318
319 reg = rom.filterHash[rfaddr - FHASH_ADDR + 1] << 8;
320 reg += rom.filterHash[rfaddr - FHASH_ADDR];
321 break;
322 }
323
324 panic("reading RFDR for something other than pattern"
325 " matching or hashing! %#x\n", rfaddr);
326 }
327 break;
328
329 case SRR:
330 reg = regs.srr;
331 break;
332
333 case MIBC:
334 reg = regs.mibc;
335 reg &= ~(MIBC_MIBS | MIBC_ACLR);
336 break;
337
338 case VRCR:
339 reg = regs.vrcr;
340 break;
341
342 case VTCR:
343 reg = regs.vtcr;
344 break;
345
346 case VDR:
347 reg = regs.vdr;
348 break;
349
350 case CCSR:
351 reg = regs.ccsr;
352 break;
353
354 case TBICR:
355 reg = regs.tbicr;
356 break;
357
358 case TBISR:
359 reg = regs.tbisr;
360 break;
361
362 case TANAR:
363 reg = regs.tanar;
364 break;
365
366 case TANLPAR:
367 reg = regs.tanlpar;
368 break;
369
370 case TANER:
371 reg = regs.taner;
372 break;
373
374 case TESR:
375 reg = regs.tesr;
376 break;
377
378 case M5REG:
379 reg = 0;
380 if (params()->rx_thread)
381 reg |= M5REG_RX_THREAD;
382 if (params()->tx_thread)
383 reg |= M5REG_TX_THREAD;
384 if (params()->rss)
385 reg |= M5REG_RSS;
386 break;
387
388 default:
389 panic("reading unimplemented register: addr=%#x", daddr);
390 }
391
392 DPRINTF(EthernetPIO, "read from %#x: data=%d data=%#x\n",
393 daddr, reg, reg);
394
395 pkt->makeAtomicResponse();
396 return pioDelay;
397 }
398
399 Tick
400 NSGigE::write(PacketPtr pkt)
401 {
402 assert(ioEnable);
403
404 Addr daddr = pkt->getAddr() & 0xfff;
405 DPRINTF(EthernetPIO, "write da=%#x pa=%#x size=%d\n",
406 daddr, pkt->getAddr(), pkt->getSize());
407
408 if (daddr > LAST && daddr <= RESERVED) {
409 panic("Accessing reserved register");
410 } else if (daddr > RESERVED && daddr <= 0x3FC) {
411 return writeConfig(pkt);
412 } else if (daddr > 0x3FC)
413 panic("Something is messed up!\n");
414
415 if (pkt->getSize() == sizeof(uint32_t)) {
416 uint32_t reg = pkt->get<uint32_t>();
417 uint16_t rfaddr;
418
419 DPRINTF(EthernetPIO, "write data=%d data=%#x\n", reg, reg);
420
421 switch (daddr) {
422 case CR:
423 regs.command = reg;
424 if (reg & CR_TXD) {
425 txEnable = false;
426 } else if (reg & CR_TXE) {
427 txEnable = true;
428
429 // the kernel is enabling the transmit machine
430 if (txState == txIdle)
431 txKick();
432 }
433
434 if (reg & CR_RXD) {
435 rxEnable = false;
436 } else if (reg & CR_RXE) {
437 rxEnable = true;
438
439 if (rxState == rxIdle)
440 rxKick();
441 }
442
443 if (reg & CR_TXR)
444 txReset();
445
446 if (reg & CR_RXR)
447 rxReset();
448
449 if (reg & CR_SWI)
450 devIntrPost(ISR_SWI);
451
452 if (reg & CR_RST) {
453 txReset();
454 rxReset();
455
456 regsReset();
457 }
458 break;
459
460 case CFGR:
461 if (reg & CFGR_LNKSTS ||
462 reg & CFGR_SPDSTS ||
463 reg & CFGR_DUPSTS ||
464 reg & CFGR_RESERVED ||
465 reg & CFGR_T64ADDR ||
466 reg & CFGR_PCI64_DET)
467
468 // First clear all writable bits
469 regs.config &= CFGR_LNKSTS | CFGR_SPDSTS | CFGR_DUPSTS |
470 CFGR_RESERVED | CFGR_T64ADDR |
471 CFGR_PCI64_DET;
472 // Now set the appropriate writable bits
473 regs.config |= reg & ~(CFGR_LNKSTS | CFGR_SPDSTS | CFGR_DUPSTS |
474 CFGR_RESERVED | CFGR_T64ADDR |
475 CFGR_PCI64_DET);
476
477 // all these #if 0's are because i don't THINK the kernel needs to
478 // have these implemented. if there is a problem relating to one of
479 // these, you may need to add functionality in.
480 if (reg & CFGR_TBI_EN) ;
481 if (reg & CFGR_MODE_1000) ;
482
483 if (reg & CFGR_AUTO_1000)
484 panic("CFGR_AUTO_1000 not implemented!\n");
485
486 if (reg & CFGR_PINT_DUPSTS ||
487 reg & CFGR_PINT_LNKSTS ||
488 reg & CFGR_PINT_SPDSTS)
489 ;
490
491 if (reg & CFGR_TMRTEST) ;
492 if (reg & CFGR_MRM_DIS) ;
493 if (reg & CFGR_MWI_DIS) ;
494
495 if (reg & CFGR_T64ADDR) ;
496 // panic("CFGR_T64ADDR is read only register!\n");
497
498 if (reg & CFGR_PCI64_DET)
499 panic("CFGR_PCI64_DET is read only register!\n");
500
501 if (reg & CFGR_DATA64_EN) ;
502 if (reg & CFGR_M64ADDR) ;
503 if (reg & CFGR_PHY_RST) ;
504 if (reg & CFGR_PHY_DIS) ;
505
506 if (reg & CFGR_EXTSTS_EN)
507 extstsEnable = true;
508 else
509 extstsEnable = false;
510
511 if (reg & CFGR_REQALG) ;
512 if (reg & CFGR_SB) ;
513 if (reg & CFGR_POW) ;
514 if (reg & CFGR_EXD) ;
515 if (reg & CFGR_PESEL) ;
516 if (reg & CFGR_BROM_DIS) ;
517 if (reg & CFGR_EXT_125) ;
518 if (reg & CFGR_BEM) ;
519 break;
520
521 case MEAR:
522 // Clear writable bits
523 regs.mear &= MEAR_EEDO;
524 // Set appropriate writable bits
525 regs.mear |= reg & ~MEAR_EEDO;
526
527 // FreeBSD uses the EEPROM to read PMATCH (for the MAC address)
528 // even though it could get it through RFDR
529 if (reg & MEAR_EESEL) {
530 // Rising edge of clock
531 if (reg & MEAR_EECLK && !eepromClk)
532 eepromKick();
533 }
534 else {
535 eepromState = eepromStart;
536 regs.mear &= ~MEAR_EEDI;
537 }
538
539 eepromClk = reg & MEAR_EECLK;
540
541 // since phy is completely faked, MEAR_MD* don't matter
542 if (reg & MEAR_MDIO) ;
543 if (reg & MEAR_MDDIR) ;
544 if (reg & MEAR_MDC) ;
545 break;
546
547 case PTSCR:
548 regs.ptscr = reg & ~(PTSCR_RBIST_RDONLY);
549 // these control BISTs for various parts of chip - we
550 // don't care or do just fake that the BIST is done
551 if (reg & PTSCR_RBIST_EN)
552 regs.ptscr |= PTSCR_RBIST_DONE;
553 if (reg & PTSCR_EEBIST_EN)
554 regs.ptscr &= ~PTSCR_EEBIST_EN;
555 if (reg & PTSCR_EELOAD_EN)
556 regs.ptscr &= ~PTSCR_EELOAD_EN;
557 break;
558
559 case ISR: /* writing to the ISR has no effect */
560 panic("ISR is a read only register!\n");
561
562 case IMR:
563 regs.imr = reg;
564 devIntrChangeMask();
565 break;
566
567 case IER:
568 regs.ier = reg;
569 break;
570
571 case IHR:
572 regs.ihr = reg;
573 /* not going to implement real interrupt holdoff */
574 break;
575
576 case TXDP:
577 regs.txdp = (reg & 0xFFFFFFFC);
578 assert(txState == txIdle);
579 CTDD = false;
580 break;
581
582 case TXDP_HI:
583 regs.txdp_hi = reg;
584 break;
585
586 case TX_CFG:
587 regs.txcfg = reg;
588 #if 0
589 if (reg & TX_CFG_CSI) ;
590 if (reg & TX_CFG_HBI) ;
591 if (reg & TX_CFG_MLB) ;
592 if (reg & TX_CFG_ATP) ;
593 if (reg & TX_CFG_ECRETRY) {
594 /*
595 * this could easily be implemented, but considering
596 * the network is just a fake pipe, wouldn't make
597 * sense to do this
598 */
599 }
600
601 if (reg & TX_CFG_BRST_DIS) ;
602 #endif
603
604 #if 0
605 /* we handle our own DMA, ignore the kernel's exhortations */
606 if (reg & TX_CFG_MXDMA) ;
607 #endif
608
609 // also, we currently don't care about fill/drain
610 // thresholds though this may change in the future with
611 // more realistic networks or a driver which changes it
612 // according to feedback
613
614 break;
615
616 case GPIOR:
617 // Only write writable bits
618 regs.gpior &= GPIOR_UNUSED | GPIOR_GP5_IN | GPIOR_GP4_IN
619 | GPIOR_GP3_IN | GPIOR_GP2_IN | GPIOR_GP1_IN;
620 regs.gpior |= reg & ~(GPIOR_UNUSED | GPIOR_GP5_IN | GPIOR_GP4_IN
621 | GPIOR_GP3_IN | GPIOR_GP2_IN | GPIOR_GP1_IN);
622 /* these just control general purpose i/o pins, don't matter */
623 break;
624
625 case RXDP:
626 regs.rxdp = reg;
627 CRDD = false;
628 break;
629
630 case RXDP_HI:
631 regs.rxdp_hi = reg;
632 break;
633
634 case RX_CFG:
635 regs.rxcfg = reg;
636 #if 0
637 if (reg & RX_CFG_AEP) ;
638 if (reg & RX_CFG_ARP) ;
639 if (reg & RX_CFG_STRIPCRC) ;
640 if (reg & RX_CFG_RX_RD) ;
641 if (reg & RX_CFG_ALP) ;
642 if (reg & RX_CFG_AIRL) ;
643
644 /* we handle our own DMA, ignore what kernel says about it */
645 if (reg & RX_CFG_MXDMA) ;
646
647 //also, we currently don't care about fill/drain thresholds
648 //though this may change in the future with more realistic
649 //networks or a driver which changes it according to feedback
650 if (reg & (RX_CFG_DRTH | RX_CFG_DRTH0)) ;
651 #endif
652 break;
653
654 case PQCR:
655 /* there is no priority queueing used in the linux 2.6 driver */
656 regs.pqcr = reg;
657 break;
658
659 case WCSR:
660 /* not going to implement wake on LAN */
661 regs.wcsr = reg;
662 break;
663
664 case PCR:
665 /* not going to implement pause control */
666 regs.pcr = reg;
667 break;
668
669 case RFCR:
670 regs.rfcr = reg;
671
672 rxFilterEnable = (reg & RFCR_RFEN) ? true : false;
673 acceptBroadcast = (reg & RFCR_AAB) ? true : false;
674 acceptMulticast = (reg & RFCR_AAM) ? true : false;
675 acceptUnicast = (reg & RFCR_AAU) ? true : false;
676 acceptPerfect = (reg & RFCR_APM) ? true : false;
677 acceptArp = (reg & RFCR_AARP) ? true : false;
678 multicastHashEnable = (reg & RFCR_MHEN) ? true : false;
679
680 #if 0
681 if (reg & RFCR_APAT)
682 panic("RFCR_APAT not implemented!\n");
683 #endif
684 if (reg & RFCR_UHEN)
685 panic("Unicast hash filtering not used by drivers!\n");
686
687 if (reg & RFCR_ULM)
688 panic("RFCR_ULM not implemented!\n");
689
690 break;
691
692 case RFDR:
693 rfaddr = (uint16_t)(regs.rfcr & RFCR_RFADDR);
694 switch (rfaddr) {
695 case 0x000:
696 rom.perfectMatch[0] = (uint8_t)reg;
697 rom.perfectMatch[1] = (uint8_t)(reg >> 8);
698 break;
699 case 0x002:
700 rom.perfectMatch[2] = (uint8_t)reg;
701 rom.perfectMatch[3] = (uint8_t)(reg >> 8);
702 break;
703 case 0x004:
704 rom.perfectMatch[4] = (uint8_t)reg;
705 rom.perfectMatch[5] = (uint8_t)(reg >> 8);
706 break;
707 default:
708
709 if (rfaddr >= FHASH_ADDR &&
710 rfaddr < FHASH_ADDR + FHASH_SIZE) {
711
712 // Only word-aligned writes supported
713 if (rfaddr % 2)
714 panic("unaligned write to filter hash table!");
715
716 rom.filterHash[rfaddr - FHASH_ADDR] = (uint8_t)reg;
717 rom.filterHash[rfaddr - FHASH_ADDR + 1]
718 = (uint8_t)(reg >> 8);
719 break;
720 }
721 panic("writing RFDR for something other than pattern matching\
722 or hashing! %#x\n", rfaddr);
723 }
724
725 case BRAR:
726 regs.brar = reg;
727 break;
728
729 case BRDR:
730 panic("the driver never uses BRDR, something is wrong!\n");
731
732 case SRR:
733 panic("SRR is read only register!\n");
734
735 case MIBC:
736 panic("the driver never uses MIBC, something is wrong!\n");
737
738 case VRCR:
739 regs.vrcr = reg;
740 break;
741
742 case VTCR:
743 regs.vtcr = reg;
744 break;
745
746 case VDR:
747 panic("the driver never uses VDR, something is wrong!\n");
748
749 case CCSR:
750 /* not going to implement clockrun stuff */
751 regs.ccsr = reg;
752 break;
753
754 case TBICR:
755 regs.tbicr = reg;
756 if (reg & TBICR_MR_LOOPBACK)
757 panic("TBICR_MR_LOOPBACK never used, something wrong!\n");
758
759 if (reg & TBICR_MR_AN_ENABLE) {
760 regs.tanlpar = regs.tanar;
761 regs.tbisr |= (TBISR_MR_AN_COMPLETE | TBISR_MR_LINK_STATUS);
762 }
763
764 #if 0
765 if (reg & TBICR_MR_RESTART_AN) ;
766 #endif
767
768 break;
769
770 case TBISR:
771 panic("TBISR is read only register!\n");
772
773 case TANAR:
774 // Only write the writable bits
775 regs.tanar &= TANAR_RF1 | TANAR_RF2 | TANAR_UNUSED;
776 regs.tanar |= reg & ~(TANAR_RF1 | TANAR_RF2 | TANAR_UNUSED);
777
778 // Pause capability unimplemented
779 #if 0
780 if (reg & TANAR_PS2) ;
781 if (reg & TANAR_PS1) ;
782 #endif
783
784 break;
785
786 case TANLPAR:
787 panic("this should only be written to by the fake phy!\n");
788
789 case TANER:
790 panic("TANER is read only register!\n");
791
792 case TESR:
793 regs.tesr = reg;
794 break;
795
796 default:
797 panic("invalid register access daddr=%#x", daddr);
798 }
799 } else {
800 panic("Invalid Request Size");
801 }
802 pkt->makeAtomicResponse();
803 return pioDelay;
804 }
805
806 void
807 NSGigE::devIntrPost(uint32_t interrupts)
808 {
809 if (interrupts & ISR_RESERVE)
810 panic("Cannot set a reserved interrupt");
811
812 if (interrupts & ISR_NOIMPL)
813 warn("interrupt not implemented %#x\n", interrupts);
814
815 interrupts &= ISR_IMPL;
816 regs.isr |= interrupts;
817
818 if (interrupts & regs.imr) {
819 if (interrupts & ISR_SWI) {
820 totalSwi++;
821 }
822 if (interrupts & ISR_RXIDLE) {
823 totalRxIdle++;
824 }
825 if (interrupts & ISR_RXOK) {
826 totalRxOk++;
827 }
828 if (interrupts & ISR_RXDESC) {
829 totalRxDesc++;
830 }
831 if (interrupts & ISR_TXOK) {
832 totalTxOk++;
833 }
834 if (interrupts & ISR_TXIDLE) {
835 totalTxIdle++;
836 }
837 if (interrupts & ISR_TXDESC) {
838 totalTxDesc++;
839 }
840 if (interrupts & ISR_RXORN) {
841 totalRxOrn++;
842 }
843 }
844
845 DPRINTF(EthernetIntr,
846 "interrupt written to ISR: intr=%#x isr=%#x imr=%#x\n",
847 interrupts, regs.isr, regs.imr);
848
849 if ((regs.isr & regs.imr)) {
850 Tick when = curTick;
851 if ((regs.isr & regs.imr & ISR_NODELAY) == 0)
852 when += intrDelay;
853 postedInterrupts++;
854 cpuIntrPost(when);
855 }
856 }
857
858 /* writing this interrupt counting stats inside this means that this function
859 is now limited to being used to clear all interrupts upon the kernel
860 reading isr and servicing. just telling you in case you were thinking
861 of expanding use.
862 */
863 void
864 NSGigE::devIntrClear(uint32_t interrupts)
865 {
866 if (interrupts & ISR_RESERVE)
867 panic("Cannot clear a reserved interrupt");
868
869 if (regs.isr & regs.imr & ISR_SWI) {
870 postedSwi++;
871 }
872 if (regs.isr & regs.imr & ISR_RXIDLE) {
873 postedRxIdle++;
874 }
875 if (regs.isr & regs.imr & ISR_RXOK) {
876 postedRxOk++;
877 }
878 if (regs.isr & regs.imr & ISR_RXDESC) {
879 postedRxDesc++;
880 }
881 if (regs.isr & regs.imr & ISR_TXOK) {
882 postedTxOk++;
883 }
884 if (regs.isr & regs.imr & ISR_TXIDLE) {
885 postedTxIdle++;
886 }
887 if (regs.isr & regs.imr & ISR_TXDESC) {
888 postedTxDesc++;
889 }
890 if (regs.isr & regs.imr & ISR_RXORN) {
891 postedRxOrn++;
892 }
893
894 interrupts &= ~ISR_NOIMPL;
895 regs.isr &= ~interrupts;
896
897 DPRINTF(EthernetIntr,
898 "interrupt cleared from ISR: intr=%x isr=%x imr=%x\n",
899 interrupts, regs.isr, regs.imr);
900
901 if (!(regs.isr & regs.imr))
902 cpuIntrClear();
903 }
904
905 void
906 NSGigE::devIntrChangeMask()
907 {
908 DPRINTF(EthernetIntr, "interrupt mask changed: isr=%x imr=%x masked=%x\n",
909 regs.isr, regs.imr, regs.isr & regs.imr);
910
911 if (regs.isr & regs.imr)
912 cpuIntrPost(curTick);
913 else
914 cpuIntrClear();
915 }
916
917 void
918 NSGigE::cpuIntrPost(Tick when)
919 {
920 // If the interrupt you want to post is later than an interrupt
921 // already scheduled, just let it post in the coming one and don't
922 // schedule another.
923 // HOWEVER, must be sure that the scheduled intrTick is in the
924 // future (this was formerly the source of a bug)
925 /**
926 * @todo this warning should be removed and the intrTick code should
927 * be fixed.
928 */
929 assert(when >= curTick);
930 assert(intrTick >= curTick || intrTick == 0);
931 if (when > intrTick && intrTick != 0) {
932 DPRINTF(EthernetIntr, "don't need to schedule event...intrTick=%d\n",
933 intrTick);
934 return;
935 }
936
937 intrTick = when;
938 if (intrTick < curTick) {
939 debug_break();
940 intrTick = curTick;
941 }
942
943 DPRINTF(EthernetIntr, "going to schedule an interrupt for intrTick=%d\n",
944 intrTick);
945
946 if (intrEvent)
947 intrEvent->squash();
948 intrEvent = new IntrEvent(this, true);
949 schedule(intrEvent, intrTick);
950 }
951
952 void
953 NSGigE::cpuInterrupt()
954 {
955 assert(intrTick == curTick);
956
957 // Whether or not there's a pending interrupt, we don't care about
958 // it anymore
959 intrEvent = 0;
960 intrTick = 0;
961
962 // Don't send an interrupt if there's already one
963 if (cpuPendingIntr) {
964 DPRINTF(EthernetIntr,
965 "would send an interrupt now, but there's already pending\n");
966 } else {
967 // Send interrupt
968 cpuPendingIntr = true;
969
970 DPRINTF(EthernetIntr, "posting interrupt\n");
971 intrPost();
972 }
973 }
974
975 void
976 NSGigE::cpuIntrClear()
977 {
978 if (!cpuPendingIntr)
979 return;
980
981 if (intrEvent) {
982 intrEvent->squash();
983 intrEvent = 0;
984 }
985
986 intrTick = 0;
987
988 cpuPendingIntr = false;
989
990 DPRINTF(EthernetIntr, "clearing interrupt\n");
991 intrClear();
992 }
993
994 bool
995 NSGigE::cpuIntrPending() const
996 { return cpuPendingIntr; }
997
998 void
999 NSGigE::txReset()
1000 {
1001
1002 DPRINTF(Ethernet, "transmit reset\n");
1003
1004 CTDD = false;
1005 txEnable = false;;
1006 txFragPtr = 0;
1007 assert(txDescCnt == 0);
1008 txFifo.clear();
1009 txState = txIdle;
1010 assert(txDmaState == dmaIdle);
1011 }
1012
1013 void
1014 NSGigE::rxReset()
1015 {
1016 DPRINTF(Ethernet, "receive reset\n");
1017
1018 CRDD = false;
1019 assert(rxPktBytes == 0);
1020 rxEnable = false;
1021 rxFragPtr = 0;
1022 assert(rxDescCnt == 0);
1023 assert(rxDmaState == dmaIdle);
1024 rxFifo.clear();
1025 rxState = rxIdle;
1026 }
1027
1028 void
1029 NSGigE::regsReset()
1030 {
1031 memset(&regs, 0, sizeof(regs));
1032 regs.config = (CFGR_LNKSTS | CFGR_TBI_EN | CFGR_MODE_1000);
1033 regs.mear = 0x12;
1034 regs.txcfg = 0x120; // set drain threshold to 1024 bytes and
1035 // fill threshold to 32 bytes
1036 regs.rxcfg = 0x4; // set drain threshold to 16 bytes
1037 regs.srr = 0x0103; // set the silicon revision to rev B or 0x103
1038 regs.mibc = MIBC_FRZ;
1039 regs.vdr = 0x81; // set the vlan tag type to 802.1q
1040 regs.tesr = 0xc000; // TBI capable of both full and half duplex
1041 regs.brar = 0xffffffff;
1042
1043 extstsEnable = false;
1044 acceptBroadcast = false;
1045 acceptMulticast = false;
1046 acceptUnicast = false;
1047 acceptPerfect = false;
1048 acceptArp = false;
1049 }
1050
1051 bool
1052 NSGigE::doRxDmaRead()
1053 {
1054 assert(rxDmaState == dmaIdle || rxDmaState == dmaReadWaiting);
1055 rxDmaState = dmaReading;
1056
1057 if (dmaPending() || getState() != Running)
1058 rxDmaState = dmaReadWaiting;
1059 else
1060 dmaRead(rxDmaAddr, rxDmaLen, &rxDmaReadEvent, (uint8_t*)rxDmaData);
1061
1062 return true;
1063 }
1064
1065 void
1066 NSGigE::rxDmaReadDone()
1067 {
1068 assert(rxDmaState == dmaReading);
1069 rxDmaState = dmaIdle;
1070
1071 DPRINTF(EthernetDMA, "rx dma read paddr=%#x len=%d\n",
1072 rxDmaAddr, rxDmaLen);
1073 DDUMP(EthernetDMA, rxDmaData, rxDmaLen);
1074
1075 // If the transmit state machine has a pending DMA, let it go first
1076 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting)
1077 txKick();
1078
1079 rxKick();
1080 }
1081
1082 bool
1083 NSGigE::doRxDmaWrite()
1084 {
1085 assert(rxDmaState == dmaIdle || rxDmaState == dmaWriteWaiting);
1086 rxDmaState = dmaWriting;
1087
1088 if (dmaPending() || getState() != Running)
1089 rxDmaState = dmaWriteWaiting;
1090 else
1091 dmaWrite(rxDmaAddr, rxDmaLen, &rxDmaWriteEvent, (uint8_t*)rxDmaData);
1092 return true;
1093 }
1094
1095 void
1096 NSGigE::rxDmaWriteDone()
1097 {
1098 assert(rxDmaState == dmaWriting);
1099 rxDmaState = dmaIdle;
1100
1101 DPRINTF(EthernetDMA, "rx dma write paddr=%#x len=%d\n",
1102 rxDmaAddr, rxDmaLen);
1103 DDUMP(EthernetDMA, rxDmaData, rxDmaLen);
1104
1105 // If the transmit state machine has a pending DMA, let it go first
1106 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting)
1107 txKick();
1108
1109 rxKick();
1110 }
1111
1112 void
1113 NSGigE::rxKick()
1114 {
1115 bool is64bit = (bool)(regs.config & CFGR_M64ADDR);
1116
1117 DPRINTF(EthernetSM,
1118 "receive kick rxState=%s (rxBuf.size=%d) %d-bit\n",
1119 NsRxStateStrings[rxState], rxFifo.size(), is64bit ? 64 : 32);
1120
1121 Addr link, bufptr;
1122 uint32_t &cmdsts = is64bit ? rxDesc64.cmdsts : rxDesc32.cmdsts;
1123 uint32_t &extsts = is64bit ? rxDesc64.extsts : rxDesc32.extsts;
1124
1125 next:
1126 if (clock) {
1127 if (rxKickTick > curTick) {
1128 DPRINTF(EthernetSM, "receive kick exiting, can't run till %d\n",
1129 rxKickTick);
1130
1131 goto exit;
1132 }
1133
1134 // Go to the next state machine clock tick.
1135 rxKickTick = curTick + ticks(1);
1136 }
1137
1138 switch(rxDmaState) {
1139 case dmaReadWaiting:
1140 if (doRxDmaRead())
1141 goto exit;
1142 break;
1143 case dmaWriteWaiting:
1144 if (doRxDmaWrite())
1145 goto exit;
1146 break;
1147 default:
1148 break;
1149 }
1150
1151 link = is64bit ? (Addr)rxDesc64.link : (Addr)rxDesc32.link;
1152 bufptr = is64bit ? (Addr)rxDesc64.bufptr : (Addr)rxDesc32.bufptr;
1153
1154 // see state machine from spec for details
1155 // the way this works is, if you finish work on one state and can
1156 // go directly to another, you do that through jumping to the
1157 // label "next". however, if you have intermediate work, like DMA
1158 // so that you can't go to the next state yet, you go to exit and
1159 // exit the loop. however, when the DMA is done it will trigger
1160 // an event and come back to this loop.
1161 switch (rxState) {
1162 case rxIdle:
1163 if (!rxEnable) {
1164 DPRINTF(EthernetSM, "Receive Disabled! Nothing to do.\n");
1165 goto exit;
1166 }
1167
1168 if (CRDD) {
1169 rxState = rxDescRefr;
1170
1171 rxDmaAddr = regs.rxdp & 0x3fffffff;
1172 rxDmaData =
1173 is64bit ? (void *)&rxDesc64.link : (void *)&rxDesc32.link;
1174 rxDmaLen = is64bit ? sizeof(rxDesc64.link) : sizeof(rxDesc32.link);
1175 rxDmaFree = dmaDescFree;
1176
1177 descDmaReads++;
1178 descDmaRdBytes += rxDmaLen;
1179
1180 if (doRxDmaRead())
1181 goto exit;
1182 } else {
1183 rxState = rxDescRead;
1184
1185 rxDmaAddr = regs.rxdp & 0x3fffffff;
1186 rxDmaData = is64bit ? (void *)&rxDesc64 : (void *)&rxDesc32;
1187 rxDmaLen = is64bit ? sizeof(rxDesc64) : sizeof(rxDesc32);
1188 rxDmaFree = dmaDescFree;
1189
1190 descDmaReads++;
1191 descDmaRdBytes += rxDmaLen;
1192
1193 if (doRxDmaRead())
1194 goto exit;
1195 }
1196 break;
1197
1198 case rxDescRefr:
1199 if (rxDmaState != dmaIdle)
1200 goto exit;
1201
1202 rxState = rxAdvance;
1203 break;
1204
1205 case rxDescRead:
1206 if (rxDmaState != dmaIdle)
1207 goto exit;
1208
1209 DPRINTF(EthernetDesc, "rxDesc: addr=%08x read descriptor\n",
1210 regs.rxdp & 0x3fffffff);
1211 DPRINTF(EthernetDesc,
1212 "rxDesc: link=%#x bufptr=%#x cmdsts=%08x extsts=%08x\n",
1213 link, bufptr, cmdsts, extsts);
1214
1215 if (cmdsts & CMDSTS_OWN) {
1216 devIntrPost(ISR_RXIDLE);
1217 rxState = rxIdle;
1218 goto exit;
1219 } else {
1220 rxState = rxFifoBlock;
1221 rxFragPtr = bufptr;
1222 rxDescCnt = cmdsts & CMDSTS_LEN_MASK;
1223 }
1224 break;
1225
1226 case rxFifoBlock:
1227 if (!rxPacket) {
1228 /**
1229 * @todo in reality, we should be able to start processing
1230 * the packet as it arrives, and not have to wait for the
1231 * full packet ot be in the receive fifo.
1232 */
1233 if (rxFifo.empty())
1234 goto exit;
1235
1236 DPRINTF(EthernetSM, "****processing receive of new packet****\n");
1237
1238 // If we don't have a packet, grab a new one from the fifo.
1239 rxPacket = rxFifo.front();
1240 rxPktBytes = rxPacket->length;
1241 rxPacketBufPtr = rxPacket->data;
1242
1243 #if TRACING_ON
1244 if (DTRACE(Ethernet)) {
1245 IpPtr ip(rxPacket);
1246 if (ip) {
1247 DPRINTF(Ethernet, "ID is %d\n", ip->id());
1248 TcpPtr tcp(ip);
1249 if (tcp) {
1250 DPRINTF(Ethernet,
1251 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n",
1252 tcp->sport(), tcp->dport(), tcp->seq(),
1253 tcp->ack());
1254 }
1255 }
1256 }
1257 #endif
1258
1259 // sanity check - i think the driver behaves like this
1260 assert(rxDescCnt >= rxPktBytes);
1261 rxFifo.pop();
1262 }
1263
1264
1265 // dont' need the && rxDescCnt > 0 if driver sanity check
1266 // above holds
1267 if (rxPktBytes > 0) {
1268 rxState = rxFragWrite;
1269 // don't need min<>(rxPktBytes,rxDescCnt) if above sanity
1270 // check holds
1271 rxXferLen = rxPktBytes;
1272
1273 rxDmaAddr = rxFragPtr & 0x3fffffff;
1274 rxDmaData = rxPacketBufPtr;
1275 rxDmaLen = rxXferLen;
1276 rxDmaFree = dmaDataFree;
1277
1278 if (doRxDmaWrite())
1279 goto exit;
1280
1281 } else {
1282 rxState = rxDescWrite;
1283
1284 //if (rxPktBytes == 0) { /* packet is done */
1285 assert(rxPktBytes == 0);
1286 DPRINTF(EthernetSM, "done with receiving packet\n");
1287
1288 cmdsts |= CMDSTS_OWN;
1289 cmdsts &= ~CMDSTS_MORE;
1290 cmdsts |= CMDSTS_OK;
1291 cmdsts &= 0xffff0000;
1292 cmdsts += rxPacket->length; //i.e. set CMDSTS_SIZE
1293
1294 #if 0
1295 /*
1296 * all the driver uses these are for its own stats keeping
1297 * which we don't care about, aren't necessary for
1298 * functionality and doing this would just slow us down.
1299 * if they end up using this in a later version for
1300 * functional purposes, just undef
1301 */
1302 if (rxFilterEnable) {
1303 cmdsts &= ~CMDSTS_DEST_MASK;
1304 const EthAddr &dst = rxFifoFront()->dst();
1305 if (dst->unicast())
1306 cmdsts |= CMDSTS_DEST_SELF;
1307 if (dst->multicast())
1308 cmdsts |= CMDSTS_DEST_MULTI;
1309 if (dst->broadcast())
1310 cmdsts |= CMDSTS_DEST_MASK;
1311 }
1312 #endif
1313
1314 IpPtr ip(rxPacket);
1315 if (extstsEnable && ip) {
1316 extsts |= EXTSTS_IPPKT;
1317 rxIpChecksums++;
1318 if (cksum(ip) != 0) {
1319 DPRINTF(EthernetCksum, "Rx IP Checksum Error\n");
1320 extsts |= EXTSTS_IPERR;
1321 }
1322 TcpPtr tcp(ip);
1323 UdpPtr udp(ip);
1324 if (tcp) {
1325 extsts |= EXTSTS_TCPPKT;
1326 rxTcpChecksums++;
1327 if (cksum(tcp) != 0) {
1328 DPRINTF(EthernetCksum, "Rx TCP Checksum Error\n");
1329 extsts |= EXTSTS_TCPERR;
1330
1331 }
1332 } else if (udp) {
1333 extsts |= EXTSTS_UDPPKT;
1334 rxUdpChecksums++;
1335 if (cksum(udp) != 0) {
1336 DPRINTF(EthernetCksum, "Rx UDP Checksum Error\n");
1337 extsts |= EXTSTS_UDPERR;
1338 }
1339 }
1340 }
1341 rxPacket = 0;
1342
1343 /*
1344 * the driver seems to always receive into desc buffers
1345 * of size 1514, so you never have a pkt that is split
1346 * into multiple descriptors on the receive side, so
1347 * i don't implement that case, hence the assert above.
1348 */
1349
1350 DPRINTF(EthernetDesc,
1351 "rxDesc: addr=%08x writeback cmdsts extsts\n",
1352 regs.rxdp & 0x3fffffff);
1353 DPRINTF(EthernetDesc,
1354 "rxDesc: link=%#x bufptr=%#x cmdsts=%08x extsts=%08x\n",
1355 link, bufptr, cmdsts, extsts);
1356
1357 rxDmaAddr = regs.rxdp & 0x3fffffff;
1358 rxDmaData = &cmdsts;
1359 if (is64bit) {
1360 rxDmaAddr += offsetof(ns_desc64, cmdsts);
1361 rxDmaLen = sizeof(rxDesc64.cmdsts) + sizeof(rxDesc64.extsts);
1362 } else {
1363 rxDmaAddr += offsetof(ns_desc32, cmdsts);
1364 rxDmaLen = sizeof(rxDesc32.cmdsts) + sizeof(rxDesc32.extsts);
1365 }
1366 rxDmaFree = dmaDescFree;
1367
1368 descDmaWrites++;
1369 descDmaWrBytes += rxDmaLen;
1370
1371 if (doRxDmaWrite())
1372 goto exit;
1373 }
1374 break;
1375
1376 case rxFragWrite:
1377 if (rxDmaState != dmaIdle)
1378 goto exit;
1379
1380 rxPacketBufPtr += rxXferLen;
1381 rxFragPtr += rxXferLen;
1382 rxPktBytes -= rxXferLen;
1383
1384 rxState = rxFifoBlock;
1385 break;
1386
1387 case rxDescWrite:
1388 if (rxDmaState != dmaIdle)
1389 goto exit;
1390
1391 assert(cmdsts & CMDSTS_OWN);
1392
1393 assert(rxPacket == 0);
1394 devIntrPost(ISR_RXOK);
1395
1396 if (cmdsts & CMDSTS_INTR)
1397 devIntrPost(ISR_RXDESC);
1398
1399 if (!rxEnable) {
1400 DPRINTF(EthernetSM, "Halting the RX state machine\n");
1401 rxState = rxIdle;
1402 goto exit;
1403 } else
1404 rxState = rxAdvance;
1405 break;
1406
1407 case rxAdvance:
1408 if (link == 0) {
1409 devIntrPost(ISR_RXIDLE);
1410 rxState = rxIdle;
1411 CRDD = true;
1412 goto exit;
1413 } else {
1414 if (rxDmaState != dmaIdle)
1415 goto exit;
1416 rxState = rxDescRead;
1417 regs.rxdp = link;
1418 CRDD = false;
1419
1420 rxDmaAddr = regs.rxdp & 0x3fffffff;
1421 rxDmaData = is64bit ? (void *)&rxDesc64 : (void *)&rxDesc32;
1422 rxDmaLen = is64bit ? sizeof(rxDesc64) : sizeof(rxDesc32);
1423 rxDmaFree = dmaDescFree;
1424
1425 if (doRxDmaRead())
1426 goto exit;
1427 }
1428 break;
1429
1430 default:
1431 panic("Invalid rxState!");
1432 }
1433
1434 DPRINTF(EthernetSM, "entering next rxState=%s\n",
1435 NsRxStateStrings[rxState]);
1436 goto next;
1437
1438 exit:
1439 /**
1440 * @todo do we want to schedule a future kick?
1441 */
1442 DPRINTF(EthernetSM, "rx state machine exited rxState=%s\n",
1443 NsRxStateStrings[rxState]);
1444
1445 if (clock && !rxKickEvent.scheduled())
1446 schedule(rxKickEvent, rxKickTick);
1447 }
1448
1449 void
1450 NSGigE::transmit()
1451 {
1452 if (txFifo.empty()) {
1453 DPRINTF(Ethernet, "nothing to transmit\n");
1454 return;
1455 }
1456
1457 DPRINTF(Ethernet, "Attempt Pkt Transmit: txFifo length=%d\n",
1458 txFifo.size());
1459 if (interface->sendPacket(txFifo.front())) {
1460 #if TRACING_ON
1461 if (DTRACE(Ethernet)) {
1462 IpPtr ip(txFifo.front());
1463 if (ip) {
1464 DPRINTF(Ethernet, "ID is %d\n", ip->id());
1465 TcpPtr tcp(ip);
1466 if (tcp) {
1467 DPRINTF(Ethernet,
1468 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n",
1469 tcp->sport(), tcp->dport(), tcp->seq(),
1470 tcp->ack());
1471 }
1472 }
1473 }
1474 #endif
1475
1476 DDUMP(EthernetData, txFifo.front()->data, txFifo.front()->length);
1477 txBytes += txFifo.front()->length;
1478 txPackets++;
1479
1480 DPRINTF(Ethernet, "Successful Xmit! now txFifoAvail is %d\n",
1481 txFifo.avail());
1482 txFifo.pop();
1483
1484 /*
1485 * normally do a writeback of the descriptor here, and ONLY
1486 * after that is done, send this interrupt. but since our
1487 * stuff never actually fails, just do this interrupt here,
1488 * otherwise the code has to stray from this nice format.
1489 * besides, it's functionally the same.
1490 */
1491 devIntrPost(ISR_TXOK);
1492 }
1493
1494 if (!txFifo.empty() && !txEvent.scheduled()) {
1495 DPRINTF(Ethernet, "reschedule transmit\n");
1496 schedule(txEvent, curTick + retryTime);
1497 }
1498 }
1499
1500 bool
1501 NSGigE::doTxDmaRead()
1502 {
1503 assert(txDmaState == dmaIdle || txDmaState == dmaReadWaiting);
1504 txDmaState = dmaReading;
1505
1506 if (dmaPending() || getState() != Running)
1507 txDmaState = dmaReadWaiting;
1508 else
1509 dmaRead(txDmaAddr, txDmaLen, &txDmaReadEvent, (uint8_t*)txDmaData);
1510
1511 return true;
1512 }
1513
1514 void
1515 NSGigE::txDmaReadDone()
1516 {
1517 assert(txDmaState == dmaReading);
1518 txDmaState = dmaIdle;
1519
1520 DPRINTF(EthernetDMA, "tx dma read paddr=%#x len=%d\n",
1521 txDmaAddr, txDmaLen);
1522 DDUMP(EthernetDMA, txDmaData, txDmaLen);
1523
1524 // If the receive state machine has a pending DMA, let it go first
1525 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting)
1526 rxKick();
1527
1528 txKick();
1529 }
1530
1531 bool
1532 NSGigE::doTxDmaWrite()
1533 {
1534 assert(txDmaState == dmaIdle || txDmaState == dmaWriteWaiting);
1535 txDmaState = dmaWriting;
1536
1537 if (dmaPending() || getState() != Running)
1538 txDmaState = dmaWriteWaiting;
1539 else
1540 dmaWrite(txDmaAddr, txDmaLen, &txDmaWriteEvent, (uint8_t*)txDmaData);
1541 return true;
1542 }
1543
1544 void
1545 NSGigE::txDmaWriteDone()
1546 {
1547 assert(txDmaState == dmaWriting);
1548 txDmaState = dmaIdle;
1549
1550 DPRINTF(EthernetDMA, "tx dma write paddr=%#x len=%d\n",
1551 txDmaAddr, txDmaLen);
1552 DDUMP(EthernetDMA, txDmaData, txDmaLen);
1553
1554 // If the receive state machine has a pending DMA, let it go first
1555 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting)
1556 rxKick();
1557
1558 txKick();
1559 }
1560
1561 void
1562 NSGigE::txKick()
1563 {
1564 bool is64bit = (bool)(regs.config & CFGR_M64ADDR);
1565
1566 DPRINTF(EthernetSM, "transmit kick txState=%s %d-bit\n",
1567 NsTxStateStrings[txState], is64bit ? 64 : 32);
1568
1569 Addr link, bufptr;
1570 uint32_t &cmdsts = is64bit ? txDesc64.cmdsts : txDesc32.cmdsts;
1571 uint32_t &extsts = is64bit ? txDesc64.extsts : txDesc32.extsts;
1572
1573 next:
1574 if (clock) {
1575 if (txKickTick > curTick) {
1576 DPRINTF(EthernetSM, "transmit kick exiting, can't run till %d\n",
1577 txKickTick);
1578 goto exit;
1579 }
1580
1581 // Go to the next state machine clock tick.
1582 txKickTick = curTick + ticks(1);
1583 }
1584
1585 switch(txDmaState) {
1586 case dmaReadWaiting:
1587 if (doTxDmaRead())
1588 goto exit;
1589 break;
1590 case dmaWriteWaiting:
1591 if (doTxDmaWrite())
1592 goto exit;
1593 break;
1594 default:
1595 break;
1596 }
1597
1598 link = is64bit ? (Addr)txDesc64.link : (Addr)txDesc32.link;
1599 bufptr = is64bit ? (Addr)txDesc64.bufptr : (Addr)txDesc32.bufptr;
1600 switch (txState) {
1601 case txIdle:
1602 if (!txEnable) {
1603 DPRINTF(EthernetSM, "Transmit disabled. Nothing to do.\n");
1604 goto exit;
1605 }
1606
1607 if (CTDD) {
1608 txState = txDescRefr;
1609
1610 txDmaAddr = regs.txdp & 0x3fffffff;
1611 txDmaData =
1612 is64bit ? (void *)&txDesc64.link : (void *)&txDesc32.link;
1613 txDmaLen = is64bit ? sizeof(txDesc64.link) : sizeof(txDesc32.link);
1614 txDmaFree = dmaDescFree;
1615
1616 descDmaReads++;
1617 descDmaRdBytes += txDmaLen;
1618
1619 if (doTxDmaRead())
1620 goto exit;
1621
1622 } else {
1623 txState = txDescRead;
1624
1625 txDmaAddr = regs.txdp & 0x3fffffff;
1626 txDmaData = is64bit ? (void *)&txDesc64 : (void *)&txDesc32;
1627 txDmaLen = is64bit ? sizeof(txDesc64) : sizeof(txDesc32);
1628 txDmaFree = dmaDescFree;
1629
1630 descDmaReads++;
1631 descDmaRdBytes += txDmaLen;
1632
1633 if (doTxDmaRead())
1634 goto exit;
1635 }
1636 break;
1637
1638 case txDescRefr:
1639 if (txDmaState != dmaIdle)
1640 goto exit;
1641
1642 txState = txAdvance;
1643 break;
1644
1645 case txDescRead:
1646 if (txDmaState != dmaIdle)
1647 goto exit;
1648
1649 DPRINTF(EthernetDesc, "txDesc: addr=%08x read descriptor\n",
1650 regs.txdp & 0x3fffffff);
1651 DPRINTF(EthernetDesc,
1652 "txDesc: link=%#x bufptr=%#x cmdsts=%#08x extsts=%#08x\n",
1653 link, bufptr, cmdsts, extsts);
1654
1655 if (cmdsts & CMDSTS_OWN) {
1656 txState = txFifoBlock;
1657 txFragPtr = bufptr;
1658 txDescCnt = cmdsts & CMDSTS_LEN_MASK;
1659 } else {
1660 devIntrPost(ISR_TXIDLE);
1661 txState = txIdle;
1662 goto exit;
1663 }
1664 break;
1665
1666 case txFifoBlock:
1667 if (!txPacket) {
1668 DPRINTF(EthernetSM, "****starting the tx of a new packet****\n");
1669 txPacket = new EthPacketData(16384);
1670 txPacketBufPtr = txPacket->data;
1671 }
1672
1673 if (txDescCnt == 0) {
1674 DPRINTF(EthernetSM, "the txDescCnt == 0, done with descriptor\n");
1675 if (cmdsts & CMDSTS_MORE) {
1676 DPRINTF(EthernetSM, "there are more descriptors to come\n");
1677 txState = txDescWrite;
1678
1679 cmdsts &= ~CMDSTS_OWN;
1680
1681 txDmaAddr = regs.txdp & 0x3fffffff;
1682 txDmaData = &cmdsts;
1683 if (is64bit) {
1684 txDmaAddr += offsetof(ns_desc64, cmdsts);
1685 txDmaLen = sizeof(txDesc64.cmdsts);
1686 } else {
1687 txDmaAddr += offsetof(ns_desc32, cmdsts);
1688 txDmaLen = sizeof(txDesc32.cmdsts);
1689 }
1690 txDmaFree = dmaDescFree;
1691
1692 if (doTxDmaWrite())
1693 goto exit;
1694
1695 } else { /* this packet is totally done */
1696 DPRINTF(EthernetSM, "This packet is done, let's wrap it up\n");
1697 /* deal with the the packet that just finished */
1698 if ((regs.vtcr & VTCR_PPCHK) && extstsEnable) {
1699 IpPtr ip(txPacket);
1700 if (extsts & EXTSTS_UDPPKT) {
1701 UdpPtr udp(ip);
1702 if (udp) {
1703 udp->sum(0);
1704 udp->sum(cksum(udp));
1705 txUdpChecksums++;
1706 } else {
1707 debug_break();
1708 warn_once("UDPPKT set, but not UDP!\n");
1709 }
1710 } else if (extsts & EXTSTS_TCPPKT) {
1711 TcpPtr tcp(ip);
1712 if (tcp) {
1713 tcp->sum(0);
1714 tcp->sum(cksum(tcp));
1715 txTcpChecksums++;
1716 } else {
1717 debug_break();
1718 warn_once("TCPPKT set, but not UDP!\n");
1719 }
1720 }
1721 if (extsts & EXTSTS_IPPKT) {
1722 if (ip) {
1723 ip->sum(0);
1724 ip->sum(cksum(ip));
1725 txIpChecksums++;
1726 } else {
1727 debug_break();
1728 warn_once("IPPKT set, but not UDP!\n");
1729 }
1730 }
1731 }
1732
1733 txPacket->length = txPacketBufPtr - txPacket->data;
1734 // this is just because the receive can't handle a
1735 // packet bigger want to make sure
1736 if (txPacket->length > 1514)
1737 panic("transmit packet too large, %s > 1514\n",
1738 txPacket->length);
1739
1740 #ifndef NDEBUG
1741 bool success =
1742 #endif
1743 txFifo.push(txPacket);
1744 assert(success);
1745
1746 /*
1747 * this following section is not tqo spec, but
1748 * functionally shouldn't be any different. normally,
1749 * the chip will wait til the transmit has occurred
1750 * before writing back the descriptor because it has
1751 * to wait to see that it was successfully transmitted
1752 * to decide whether to set CMDSTS_OK or not.
1753 * however, in the simulator since it is always
1754 * successfully transmitted, and writing it exactly to
1755 * spec would complicate the code, we just do it here
1756 */
1757
1758 cmdsts &= ~CMDSTS_OWN;
1759 cmdsts |= CMDSTS_OK;
1760
1761 DPRINTF(EthernetDesc,
1762 "txDesc writeback: cmdsts=%08x extsts=%08x\n",
1763 cmdsts, extsts);
1764
1765 txDmaFree = dmaDescFree;
1766 txDmaAddr = regs.txdp & 0x3fffffff;
1767 txDmaData = &cmdsts;
1768 if (is64bit) {
1769 txDmaAddr += offsetof(ns_desc64, cmdsts);
1770 txDmaLen =
1771 sizeof(txDesc64.cmdsts) + sizeof(txDesc64.extsts);
1772 } else {
1773 txDmaAddr += offsetof(ns_desc32, cmdsts);
1774 txDmaLen =
1775 sizeof(txDesc32.cmdsts) + sizeof(txDesc32.extsts);
1776 }
1777
1778 descDmaWrites++;
1779 descDmaWrBytes += txDmaLen;
1780
1781 transmit();
1782 txPacket = 0;
1783
1784 if (!txEnable) {
1785 DPRINTF(EthernetSM, "halting TX state machine\n");
1786 txState = txIdle;
1787 goto exit;
1788 } else
1789 txState = txAdvance;
1790
1791 if (doTxDmaWrite())
1792 goto exit;
1793 }
1794 } else {
1795 DPRINTF(EthernetSM, "this descriptor isn't done yet\n");
1796 if (!txFifo.full()) {
1797 txState = txFragRead;
1798
1799 /*
1800 * The number of bytes transferred is either whatever
1801 * is left in the descriptor (txDescCnt), or if there
1802 * is not enough room in the fifo, just whatever room
1803 * is left in the fifo
1804 */
1805 txXferLen = min<uint32_t>(txDescCnt, txFifo.avail());
1806
1807 txDmaAddr = txFragPtr & 0x3fffffff;
1808 txDmaData = txPacketBufPtr;
1809 txDmaLen = txXferLen;
1810 txDmaFree = dmaDataFree;
1811
1812 if (doTxDmaRead())
1813 goto exit;
1814 } else {
1815 txState = txFifoBlock;
1816 transmit();
1817
1818 goto exit;
1819 }
1820
1821 }
1822 break;
1823
1824 case txFragRead:
1825 if (txDmaState != dmaIdle)
1826 goto exit;
1827
1828 txPacketBufPtr += txXferLen;
1829 txFragPtr += txXferLen;
1830 txDescCnt -= txXferLen;
1831 txFifo.reserve(txXferLen);
1832
1833 txState = txFifoBlock;
1834 break;
1835
1836 case txDescWrite:
1837 if (txDmaState != dmaIdle)
1838 goto exit;
1839
1840 if (cmdsts & CMDSTS_INTR)
1841 devIntrPost(ISR_TXDESC);
1842
1843 if (!txEnable) {
1844 DPRINTF(EthernetSM, "halting TX state machine\n");
1845 txState = txIdle;
1846 goto exit;
1847 } else
1848 txState = txAdvance;
1849 break;
1850
1851 case txAdvance:
1852 if (link == 0) {
1853 devIntrPost(ISR_TXIDLE);
1854 txState = txIdle;
1855 goto exit;
1856 } else {
1857 if (txDmaState != dmaIdle)
1858 goto exit;
1859 txState = txDescRead;
1860 regs.txdp = link;
1861 CTDD = false;
1862
1863 txDmaAddr = link & 0x3fffffff;
1864 txDmaData = is64bit ? (void *)&txDesc64 : (void *)&txDesc32;
1865 txDmaLen = is64bit ? sizeof(txDesc64) : sizeof(txDesc32);
1866 txDmaFree = dmaDescFree;
1867
1868 if (doTxDmaRead())
1869 goto exit;
1870 }
1871 break;
1872
1873 default:
1874 panic("invalid state");
1875 }
1876
1877 DPRINTF(EthernetSM, "entering next txState=%s\n",
1878 NsTxStateStrings[txState]);
1879 goto next;
1880
1881 exit:
1882 /**
1883 * @todo do we want to schedule a future kick?
1884 */
1885 DPRINTF(EthernetSM, "tx state machine exited txState=%s\n",
1886 NsTxStateStrings[txState]);
1887
1888 if (clock && !txKickEvent.scheduled())
1889 schedule(txKickEvent, txKickTick);
1890 }
1891
1892 /**
1893 * Advance the EEPROM state machine
1894 * Called on rising edge of EEPROM clock bit in MEAR
1895 */
1896 void
1897 NSGigE::eepromKick()
1898 {
1899 switch (eepromState) {
1900
1901 case eepromStart:
1902
1903 // Wait for start bit
1904 if (regs.mear & MEAR_EEDI) {
1905 // Set up to get 2 opcode bits
1906 eepromState = eepromGetOpcode;
1907 eepromBitsToRx = 2;
1908 eepromOpcode = 0;
1909 }
1910 break;
1911
1912 case eepromGetOpcode:
1913 eepromOpcode <<= 1;
1914 eepromOpcode += (regs.mear & MEAR_EEDI) ? 1 : 0;
1915 --eepromBitsToRx;
1916
1917 // Done getting opcode
1918 if (eepromBitsToRx == 0) {
1919 if (eepromOpcode != EEPROM_READ)
1920 panic("only EEPROM reads are implemented!");
1921
1922 // Set up to get address
1923 eepromState = eepromGetAddress;
1924 eepromBitsToRx = 6;
1925 eepromAddress = 0;
1926 }
1927 break;
1928
1929 case eepromGetAddress:
1930 eepromAddress <<= 1;
1931 eepromAddress += (regs.mear & MEAR_EEDI) ? 1 : 0;
1932 --eepromBitsToRx;
1933
1934 // Done getting address
1935 if (eepromBitsToRx == 0) {
1936
1937 if (eepromAddress >= EEPROM_SIZE)
1938 panic("EEPROM read access out of range!");
1939
1940 switch (eepromAddress) {
1941
1942 case EEPROM_PMATCH2_ADDR:
1943 eepromData = rom.perfectMatch[5];
1944 eepromData <<= 8;
1945 eepromData += rom.perfectMatch[4];
1946 break;
1947
1948 case EEPROM_PMATCH1_ADDR:
1949 eepromData = rom.perfectMatch[3];
1950 eepromData <<= 8;
1951 eepromData += rom.perfectMatch[2];
1952 break;
1953
1954 case EEPROM_PMATCH0_ADDR:
1955 eepromData = rom.perfectMatch[1];
1956 eepromData <<= 8;
1957 eepromData += rom.perfectMatch[0];
1958 break;
1959
1960 default:
1961 panic("FreeBSD driver only uses EEPROM to read PMATCH!");
1962 }
1963 // Set up to read data
1964 eepromState = eepromRead;
1965 eepromBitsToRx = 16;
1966
1967 // Clear data in bit
1968 regs.mear &= ~MEAR_EEDI;
1969 }
1970 break;
1971
1972 case eepromRead:
1973 // Clear Data Out bit
1974 regs.mear &= ~MEAR_EEDO;
1975 // Set bit to value of current EEPROM bit
1976 regs.mear |= (eepromData & 0x8000) ? MEAR_EEDO : 0x0;
1977
1978 eepromData <<= 1;
1979 --eepromBitsToRx;
1980
1981 // All done
1982 if (eepromBitsToRx == 0) {
1983 eepromState = eepromStart;
1984 }
1985 break;
1986
1987 default:
1988 panic("invalid EEPROM state");
1989 }
1990
1991 }
1992
1993 void
1994 NSGigE::transferDone()
1995 {
1996 if (txFifo.empty()) {
1997 DPRINTF(Ethernet, "transfer complete: txFifo empty...nothing to do\n");
1998 return;
1999 }
2000
2001 DPRINTF(Ethernet, "transfer complete: data in txFifo...schedule xmit\n");
2002
2003 reschedule(txEvent, curTick + ticks(1), true);
2004 }
2005
2006 bool
2007 NSGigE::rxFilter(const EthPacketPtr &packet)
2008 {
2009 EthPtr eth = packet;
2010 bool drop = true;
2011 string type;
2012
2013 const EthAddr &dst = eth->dst();
2014 if (dst.unicast()) {
2015 // If we're accepting all unicast addresses
2016 if (acceptUnicast)
2017 drop = false;
2018
2019 // If we make a perfect match
2020 if (acceptPerfect && dst == rom.perfectMatch)
2021 drop = false;
2022
2023 if (acceptArp && eth->type() == ETH_TYPE_ARP)
2024 drop = false;
2025
2026 } else if (dst.broadcast()) {
2027 // if we're accepting broadcasts
2028 if (acceptBroadcast)
2029 drop = false;
2030
2031 } else if (dst.multicast()) {
2032 // if we're accepting all multicasts
2033 if (acceptMulticast)
2034 drop = false;
2035
2036 // Multicast hashing faked - all packets accepted
2037 if (multicastHashEnable)
2038 drop = false;
2039 }
2040
2041 if (drop) {
2042 DPRINTF(Ethernet, "rxFilter drop\n");
2043 DDUMP(EthernetData, packet->data, packet->length);
2044 }
2045
2046 return drop;
2047 }
2048
2049 bool
2050 NSGigE::recvPacket(EthPacketPtr packet)
2051 {
2052 rxBytes += packet->length;
2053 rxPackets++;
2054
2055 DPRINTF(Ethernet, "Receiving packet from wire, rxFifoAvail=%d\n",
2056 rxFifo.avail());
2057
2058 if (!rxEnable) {
2059 DPRINTF(Ethernet, "receive disabled...packet dropped\n");
2060 return true;
2061 }
2062
2063 if (!rxFilterEnable) {
2064 DPRINTF(Ethernet,
2065 "receive packet filtering disabled . . . packet dropped\n");
2066 return true;
2067 }
2068
2069 if (rxFilter(packet)) {
2070 DPRINTF(Ethernet, "packet filtered...dropped\n");
2071 return true;
2072 }
2073
2074 if (rxFifo.avail() < packet->length) {
2075 #if TRACING_ON
2076 IpPtr ip(packet);
2077 TcpPtr tcp(ip);
2078 if (ip) {
2079 DPRINTF(Ethernet,
2080 "packet won't fit in receive buffer...pkt ID %d dropped\n",
2081 ip->id());
2082 if (tcp) {
2083 DPRINTF(Ethernet, "Seq=%d\n", tcp->seq());
2084 }
2085 }
2086 #endif
2087 droppedPackets++;
2088 devIntrPost(ISR_RXORN);
2089 return false;
2090 }
2091
2092 rxFifo.push(packet);
2093
2094 rxKick();
2095 return true;
2096 }
2097
2098
2099 void
2100 NSGigE::resume()
2101 {
2102 SimObject::resume();
2103
2104 // During drain we could have left the state machines in a waiting state and
2105 // they wouldn't get out until some other event occured to kick them.
2106 // This way they'll get out immediately
2107 txKick();
2108 rxKick();
2109 }
2110
2111
2112 //=====================================================================
2113 //
2114 //
2115 void
2116 NSGigE::serialize(ostream &os)
2117 {
2118 // Serialize the PciDev base class
2119 PciDev::serialize(os);
2120
2121 /*
2122 * Finalize any DMA events now.
2123 */
2124 // @todo will mem system save pending dma?
2125
2126 /*
2127 * Serialize the device registers
2128 */
2129 SERIALIZE_SCALAR(regs.command);
2130 SERIALIZE_SCALAR(regs.config);
2131 SERIALIZE_SCALAR(regs.mear);
2132 SERIALIZE_SCALAR(regs.ptscr);
2133 SERIALIZE_SCALAR(regs.isr);
2134 SERIALIZE_SCALAR(regs.imr);
2135 SERIALIZE_SCALAR(regs.ier);
2136 SERIALIZE_SCALAR(regs.ihr);
2137 SERIALIZE_SCALAR(regs.txdp);
2138 SERIALIZE_SCALAR(regs.txdp_hi);
2139 SERIALIZE_SCALAR(regs.txcfg);
2140 SERIALIZE_SCALAR(regs.gpior);
2141 SERIALIZE_SCALAR(regs.rxdp);
2142 SERIALIZE_SCALAR(regs.rxdp_hi);
2143 SERIALIZE_SCALAR(regs.rxcfg);
2144 SERIALIZE_SCALAR(regs.pqcr);
2145 SERIALIZE_SCALAR(regs.wcsr);
2146 SERIALIZE_SCALAR(regs.pcr);
2147 SERIALIZE_SCALAR(regs.rfcr);
2148 SERIALIZE_SCALAR(regs.rfdr);
2149 SERIALIZE_SCALAR(regs.brar);
2150 SERIALIZE_SCALAR(regs.brdr);
2151 SERIALIZE_SCALAR(regs.srr);
2152 SERIALIZE_SCALAR(regs.mibc);
2153 SERIALIZE_SCALAR(regs.vrcr);
2154 SERIALIZE_SCALAR(regs.vtcr);
2155 SERIALIZE_SCALAR(regs.vdr);
2156 SERIALIZE_SCALAR(regs.ccsr);
2157 SERIALIZE_SCALAR(regs.tbicr);
2158 SERIALIZE_SCALAR(regs.tbisr);
2159 SERIALIZE_SCALAR(regs.tanar);
2160 SERIALIZE_SCALAR(regs.tanlpar);
2161 SERIALIZE_SCALAR(regs.taner);
2162 SERIALIZE_SCALAR(regs.tesr);
2163
2164 SERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN);
2165 SERIALIZE_ARRAY(rom.filterHash, FHASH_SIZE);
2166
2167 SERIALIZE_SCALAR(ioEnable);
2168
2169 /*
2170 * Serialize the data Fifos
2171 */
2172 rxFifo.serialize("rxFifo", os);
2173 txFifo.serialize("txFifo", os);
2174
2175 /*
2176 * Serialize the various helper variables
2177 */
2178 bool txPacketExists = txPacket;
2179 SERIALIZE_SCALAR(txPacketExists);
2180 if (txPacketExists) {
2181 txPacket->length = txPacketBufPtr - txPacket->data;
2182 txPacket->serialize("txPacket", os);
2183 uint32_t txPktBufPtr = (uint32_t) (txPacketBufPtr - txPacket->data);
2184 SERIALIZE_SCALAR(txPktBufPtr);
2185 }
2186
2187 bool rxPacketExists = rxPacket;
2188 SERIALIZE_SCALAR(rxPacketExists);
2189 if (rxPacketExists) {
2190 rxPacket->serialize("rxPacket", os);
2191 uint32_t rxPktBufPtr = (uint32_t) (rxPacketBufPtr - rxPacket->data);
2192 SERIALIZE_SCALAR(rxPktBufPtr);
2193 }
2194
2195 SERIALIZE_SCALAR(txXferLen);
2196 SERIALIZE_SCALAR(rxXferLen);
2197
2198 /*
2199 * Serialize Cached Descriptors
2200 */
2201 SERIALIZE_SCALAR(rxDesc64.link);
2202 SERIALIZE_SCALAR(rxDesc64.bufptr);
2203 SERIALIZE_SCALAR(rxDesc64.cmdsts);
2204 SERIALIZE_SCALAR(rxDesc64.extsts);
2205 SERIALIZE_SCALAR(txDesc64.link);
2206 SERIALIZE_SCALAR(txDesc64.bufptr);
2207 SERIALIZE_SCALAR(txDesc64.cmdsts);
2208 SERIALIZE_SCALAR(txDesc64.extsts);
2209 SERIALIZE_SCALAR(rxDesc32.link);
2210 SERIALIZE_SCALAR(rxDesc32.bufptr);
2211 SERIALIZE_SCALAR(rxDesc32.cmdsts);
2212 SERIALIZE_SCALAR(rxDesc32.extsts);
2213 SERIALIZE_SCALAR(txDesc32.link);
2214 SERIALIZE_SCALAR(txDesc32.bufptr);
2215 SERIALIZE_SCALAR(txDesc32.cmdsts);
2216 SERIALIZE_SCALAR(txDesc32.extsts);
2217 SERIALIZE_SCALAR(extstsEnable);
2218
2219 /*
2220 * Serialize tx state machine
2221 */
2222 int txState = this->txState;
2223 SERIALIZE_SCALAR(txState);
2224 SERIALIZE_SCALAR(txEnable);
2225 SERIALIZE_SCALAR(CTDD);
2226 SERIALIZE_SCALAR(txFragPtr);
2227 SERIALIZE_SCALAR(txDescCnt);
2228 int txDmaState = this->txDmaState;
2229 SERIALIZE_SCALAR(txDmaState);
2230 SERIALIZE_SCALAR(txKickTick);
2231
2232 /*
2233 * Serialize rx state machine
2234 */
2235 int rxState = this->rxState;
2236 SERIALIZE_SCALAR(rxState);
2237 SERIALIZE_SCALAR(rxEnable);
2238 SERIALIZE_SCALAR(CRDD);
2239 SERIALIZE_SCALAR(rxPktBytes);
2240 SERIALIZE_SCALAR(rxFragPtr);
2241 SERIALIZE_SCALAR(rxDescCnt);
2242 int rxDmaState = this->rxDmaState;
2243 SERIALIZE_SCALAR(rxDmaState);
2244 SERIALIZE_SCALAR(rxKickTick);
2245
2246 /*
2247 * Serialize EEPROM state machine
2248 */
2249 int eepromState = this->eepromState;
2250 SERIALIZE_SCALAR(eepromState);
2251 SERIALIZE_SCALAR(eepromClk);
2252 SERIALIZE_SCALAR(eepromBitsToRx);
2253 SERIALIZE_SCALAR(eepromOpcode);
2254 SERIALIZE_SCALAR(eepromAddress);
2255 SERIALIZE_SCALAR(eepromData);
2256
2257 /*
2258 * If there's a pending transmit, store the time so we can
2259 * reschedule it later
2260 */
2261 Tick transmitTick = txEvent.scheduled() ? txEvent.when() - curTick : 0;
2262 SERIALIZE_SCALAR(transmitTick);
2263
2264 /*
2265 * receive address filter settings
2266 */
2267 SERIALIZE_SCALAR(rxFilterEnable);
2268 SERIALIZE_SCALAR(acceptBroadcast);
2269 SERIALIZE_SCALAR(acceptMulticast);
2270 SERIALIZE_SCALAR(acceptUnicast);
2271 SERIALIZE_SCALAR(acceptPerfect);
2272 SERIALIZE_SCALAR(acceptArp);
2273 SERIALIZE_SCALAR(multicastHashEnable);
2274
2275 /*
2276 * Keep track of pending interrupt status.
2277 */
2278 SERIALIZE_SCALAR(intrTick);
2279 SERIALIZE_SCALAR(cpuPendingIntr);
2280 Tick intrEventTick = 0;
2281 if (intrEvent)
2282 intrEventTick = intrEvent->when();
2283 SERIALIZE_SCALAR(intrEventTick);
2284
2285 }
2286
2287 void
2288 NSGigE::unserialize(Checkpoint *cp, const std::string &section)
2289 {
2290 // Unserialize the PciDev base class
2291 PciDev::unserialize(cp, section);
2292
2293 UNSERIALIZE_SCALAR(regs.command);
2294 UNSERIALIZE_SCALAR(regs.config);
2295 UNSERIALIZE_SCALAR(regs.mear);
2296 UNSERIALIZE_SCALAR(regs.ptscr);
2297 UNSERIALIZE_SCALAR(regs.isr);
2298 UNSERIALIZE_SCALAR(regs.imr);
2299 UNSERIALIZE_SCALAR(regs.ier);
2300 UNSERIALIZE_SCALAR(regs.ihr);
2301 UNSERIALIZE_SCALAR(regs.txdp);
2302 UNSERIALIZE_SCALAR(regs.txdp_hi);
2303 UNSERIALIZE_SCALAR(regs.txcfg);
2304 UNSERIALIZE_SCALAR(regs.gpior);
2305 UNSERIALIZE_SCALAR(regs.rxdp);
2306 UNSERIALIZE_SCALAR(regs.rxdp_hi);
2307 UNSERIALIZE_SCALAR(regs.rxcfg);
2308 UNSERIALIZE_SCALAR(regs.pqcr);
2309 UNSERIALIZE_SCALAR(regs.wcsr);
2310 UNSERIALIZE_SCALAR(regs.pcr);
2311 UNSERIALIZE_SCALAR(regs.rfcr);
2312 UNSERIALIZE_SCALAR(regs.rfdr);
2313 UNSERIALIZE_SCALAR(regs.brar);
2314 UNSERIALIZE_SCALAR(regs.brdr);
2315 UNSERIALIZE_SCALAR(regs.srr);
2316 UNSERIALIZE_SCALAR(regs.mibc);
2317 UNSERIALIZE_SCALAR(regs.vrcr);
2318 UNSERIALIZE_SCALAR(regs.vtcr);
2319 UNSERIALIZE_SCALAR(regs.vdr);
2320 UNSERIALIZE_SCALAR(regs.ccsr);
2321 UNSERIALIZE_SCALAR(regs.tbicr);
2322 UNSERIALIZE_SCALAR(regs.tbisr);
2323 UNSERIALIZE_SCALAR(regs.tanar);
2324 UNSERIALIZE_SCALAR(regs.tanlpar);
2325 UNSERIALIZE_SCALAR(regs.taner);
2326 UNSERIALIZE_SCALAR(regs.tesr);
2327
2328 UNSERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN);
2329 UNSERIALIZE_ARRAY(rom.filterHash, FHASH_SIZE);
2330
2331 UNSERIALIZE_SCALAR(ioEnable);
2332
2333 /*
2334 * unserialize the data fifos
2335 */
2336 rxFifo.unserialize("rxFifo", cp, section);
2337 txFifo.unserialize("txFifo", cp, section);
2338
2339 /*
2340 * unserialize the various helper variables
2341 */
2342 bool txPacketExists;
2343 UNSERIALIZE_SCALAR(txPacketExists);
2344 if (txPacketExists) {
2345 txPacket = new EthPacketData(16384);
2346 txPacket->unserialize("txPacket", cp, section);
2347 uint32_t txPktBufPtr;
2348 UNSERIALIZE_SCALAR(txPktBufPtr);
2349 txPacketBufPtr = (uint8_t *) txPacket->data + txPktBufPtr;
2350 } else
2351 txPacket = 0;
2352
2353 bool rxPacketExists;
2354 UNSERIALIZE_SCALAR(rxPacketExists);
2355 rxPacket = 0;
2356 if (rxPacketExists) {
2357 rxPacket = new EthPacketData(16384);
2358 rxPacket->unserialize("rxPacket", cp, section);
2359 uint32_t rxPktBufPtr;
2360 UNSERIALIZE_SCALAR(rxPktBufPtr);
2361 rxPacketBufPtr = (uint8_t *) rxPacket->data + rxPktBufPtr;
2362 } else
2363 rxPacket = 0;
2364
2365 UNSERIALIZE_SCALAR(txXferLen);
2366 UNSERIALIZE_SCALAR(rxXferLen);
2367
2368 /*
2369 * Unserialize Cached Descriptors
2370 */
2371 UNSERIALIZE_SCALAR(rxDesc64.link);
2372 UNSERIALIZE_SCALAR(rxDesc64.bufptr);
2373 UNSERIALIZE_SCALAR(rxDesc64.cmdsts);
2374 UNSERIALIZE_SCALAR(rxDesc64.extsts);
2375 UNSERIALIZE_SCALAR(txDesc64.link);
2376 UNSERIALIZE_SCALAR(txDesc64.bufptr);
2377 UNSERIALIZE_SCALAR(txDesc64.cmdsts);
2378 UNSERIALIZE_SCALAR(txDesc64.extsts);
2379 UNSERIALIZE_SCALAR(rxDesc32.link);
2380 UNSERIALIZE_SCALAR(rxDesc32.bufptr);
2381 UNSERIALIZE_SCALAR(rxDesc32.cmdsts);
2382 UNSERIALIZE_SCALAR(rxDesc32.extsts);
2383 UNSERIALIZE_SCALAR(txDesc32.link);
2384 UNSERIALIZE_SCALAR(txDesc32.bufptr);
2385 UNSERIALIZE_SCALAR(txDesc32.cmdsts);
2386 UNSERIALIZE_SCALAR(txDesc32.extsts);
2387 UNSERIALIZE_SCALAR(extstsEnable);
2388
2389 /*
2390 * unserialize tx state machine
2391 */
2392 int txState;
2393 UNSERIALIZE_SCALAR(txState);
2394 this->txState = (TxState) txState;
2395 UNSERIALIZE_SCALAR(txEnable);
2396 UNSERIALIZE_SCALAR(CTDD);
2397 UNSERIALIZE_SCALAR(txFragPtr);
2398 UNSERIALIZE_SCALAR(txDescCnt);
2399 int txDmaState;
2400 UNSERIALIZE_SCALAR(txDmaState);
2401 this->txDmaState = (DmaState) txDmaState;
2402 UNSERIALIZE_SCALAR(txKickTick);
2403 if (txKickTick)
2404 schedule(txKickEvent, txKickTick);
2405
2406 /*
2407 * unserialize rx state machine
2408 */
2409 int rxState;
2410 UNSERIALIZE_SCALAR(rxState);
2411 this->rxState = (RxState) rxState;
2412 UNSERIALIZE_SCALAR(rxEnable);
2413 UNSERIALIZE_SCALAR(CRDD);
2414 UNSERIALIZE_SCALAR(rxPktBytes);
2415 UNSERIALIZE_SCALAR(rxFragPtr);
2416 UNSERIALIZE_SCALAR(rxDescCnt);
2417 int rxDmaState;
2418 UNSERIALIZE_SCALAR(rxDmaState);
2419 this->rxDmaState = (DmaState) rxDmaState;
2420 UNSERIALIZE_SCALAR(rxKickTick);
2421 if (rxKickTick)
2422 schedule(rxKickEvent, rxKickTick);
2423
2424 /*
2425 * Unserialize EEPROM state machine
2426 */
2427 int eepromState;
2428 UNSERIALIZE_SCALAR(eepromState);
2429 this->eepromState = (EEPROMState) eepromState;
2430 UNSERIALIZE_SCALAR(eepromClk);
2431 UNSERIALIZE_SCALAR(eepromBitsToRx);
2432 UNSERIALIZE_SCALAR(eepromOpcode);
2433 UNSERIALIZE_SCALAR(eepromAddress);
2434 UNSERIALIZE_SCALAR(eepromData);
2435
2436 /*
2437 * If there's a pending transmit, reschedule it now
2438 */
2439 Tick transmitTick;
2440 UNSERIALIZE_SCALAR(transmitTick);
2441 if (transmitTick)
2442 schedule(txEvent, curTick + transmitTick);
2443
2444 /*
2445 * unserialize receive address filter settings
2446 */
2447 UNSERIALIZE_SCALAR(rxFilterEnable);
2448 UNSERIALIZE_SCALAR(acceptBroadcast);
2449 UNSERIALIZE_SCALAR(acceptMulticast);
2450 UNSERIALIZE_SCALAR(acceptUnicast);
2451 UNSERIALIZE_SCALAR(acceptPerfect);
2452 UNSERIALIZE_SCALAR(acceptArp);
2453 UNSERIALIZE_SCALAR(multicastHashEnable);
2454
2455 /*
2456 * Keep track of pending interrupt status.
2457 */
2458 UNSERIALIZE_SCALAR(intrTick);
2459 UNSERIALIZE_SCALAR(cpuPendingIntr);
2460 Tick intrEventTick;
2461 UNSERIALIZE_SCALAR(intrEventTick);
2462 if (intrEventTick) {
2463 intrEvent = new IntrEvent(this, true);
2464 schedule(intrEvent, intrEventTick);
2465 }
2466 }
2467
2468 NSGigE *
2469 NSGigEParams::create()
2470 {
2471 return new NSGigE(this);
2472 }