Config: Enable using O3 CPU and Ruby in SE mode
[gem5.git] / src / dev / ns_gige.cc
1 /*
2 * Copyright (c) 2004-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Nathan Binkert
29 * Lisa Hsu
30 */
31
32 /** @file
33 * Device module for modelling the National Semiconductor
34 * DP83820 ethernet controller. Does not support priority queueing
35 */
36 #include <deque>
37 #include <string>
38
39 #include "base/debug.hh"
40 #include "base/inet.hh"
41 #include "base/types.hh"
42 #include "config/the_isa.hh"
43 #include "cpu/thread_context.hh"
44 #include "debug/EthernetAll.hh"
45 #include "dev/etherlink.hh"
46 #include "dev/ns_gige.hh"
47 #include "dev/pciconfigall.hh"
48 #include "mem/packet.hh"
49 #include "mem/packet_access.hh"
50 #include "params/NSGigE.hh"
51 #include "sim/system.hh"
52
53 const char *NsRxStateStrings[] =
54 {
55 "rxIdle",
56 "rxDescRefr",
57 "rxDescRead",
58 "rxFifoBlock",
59 "rxFragWrite",
60 "rxDescWrite",
61 "rxAdvance"
62 };
63
64 const char *NsTxStateStrings[] =
65 {
66 "txIdle",
67 "txDescRefr",
68 "txDescRead",
69 "txFifoBlock",
70 "txFragRead",
71 "txDescWrite",
72 "txAdvance"
73 };
74
75 const char *NsDmaState[] =
76 {
77 "dmaIdle",
78 "dmaReading",
79 "dmaWriting",
80 "dmaReadWaiting",
81 "dmaWriteWaiting"
82 };
83
84 using namespace std;
85 using namespace Net;
86 using namespace TheISA;
87
88 ///////////////////////////////////////////////////////////////////////
89 //
90 // NSGigE PCI Device
91 //
92 NSGigE::NSGigE(Params *p)
93 : EtherDevice(p), ioEnable(false),
94 txFifo(p->tx_fifo_size), rxFifo(p->rx_fifo_size),
95 txPacket(0), rxPacket(0), txPacketBufPtr(NULL), rxPacketBufPtr(NULL),
96 txXferLen(0), rxXferLen(0), rxDmaFree(false), txDmaFree(false),
97 clock(p->clock),
98 txState(txIdle), txEnable(false), CTDD(false), txHalt(false),
99 txFragPtr(0), txDescCnt(0), txDmaState(dmaIdle), rxState(rxIdle),
100 rxEnable(false), CRDD(false), rxPktBytes(0), rxHalt(false),
101 rxFragPtr(0), rxDescCnt(0), rxDmaState(dmaIdle), extstsEnable(false),
102 eepromState(eepromStart), eepromClk(false), eepromBitsToRx(0),
103 eepromOpcode(0), eepromAddress(0), eepromData(0),
104 dmaReadDelay(p->dma_read_delay), dmaWriteDelay(p->dma_write_delay),
105 dmaReadFactor(p->dma_read_factor), dmaWriteFactor(p->dma_write_factor),
106 rxDmaData(NULL), rxDmaAddr(0), rxDmaLen(0),
107 txDmaData(NULL), txDmaAddr(0), txDmaLen(0),
108 rxDmaReadEvent(this), rxDmaWriteEvent(this),
109 txDmaReadEvent(this), txDmaWriteEvent(this),
110 dmaDescFree(p->dma_desc_free), dmaDataFree(p->dma_data_free),
111 txDelay(p->tx_delay), rxDelay(p->rx_delay),
112 rxKickTick(0), rxKickEvent(this), txKickTick(0), txKickEvent(this),
113 txEvent(this), rxFilterEnable(p->rx_filter),
114 acceptBroadcast(false), acceptMulticast(false), acceptUnicast(false),
115 acceptPerfect(false), acceptArp(false), multicastHashEnable(false),
116 intrDelay(p->intr_delay), intrTick(0), cpuPendingIntr(false),
117 intrEvent(0), interface(0)
118 {
119
120
121 interface = new NSGigEInt(name() + ".int0", this);
122
123 regsReset();
124 memcpy(&rom.perfectMatch, p->hardware_address.bytes(), ETH_ADDR_LEN);
125
126 memset(&rxDesc32, 0, sizeof(rxDesc32));
127 memset(&txDesc32, 0, sizeof(txDesc32));
128 memset(&rxDesc64, 0, sizeof(rxDesc64));
129 memset(&txDesc64, 0, sizeof(txDesc64));
130 }
131
132 NSGigE::~NSGigE()
133 {}
134
135 /**
136 * This is to write to the PCI general configuration registers
137 */
138 Tick
139 NSGigE::writeConfig(PacketPtr pkt)
140 {
141 int offset = pkt->getAddr() & PCI_CONFIG_SIZE;
142 if (offset < PCI_DEVICE_SPECIFIC)
143 PciDev::writeConfig(pkt);
144 else
145 panic("Device specific PCI config space not implemented!\n");
146
147 switch (offset) {
148 // seems to work fine without all these PCI settings, but i
149 // put in the IO to double check, an assertion will fail if we
150 // need to properly implement it
151 case PCI_COMMAND:
152 if (config.data[offset] & PCI_CMD_IOSE)
153 ioEnable = true;
154 else
155 ioEnable = false;
156 break;
157 }
158
159 return configDelay;
160 }
161
162 EtherInt*
163 NSGigE::getEthPort(const std::string &if_name, int idx)
164 {
165 if (if_name == "interface") {
166 if (interface->getPeer())
167 panic("interface already connected to\n");
168 return interface;
169 }
170 return NULL;
171 }
172
173 /**
174 * This reads the device registers, which are detailed in the NS83820
175 * spec sheet
176 */
177 Tick
178 NSGigE::read(PacketPtr pkt)
179 {
180 assert(ioEnable);
181
182 pkt->allocate();
183
184 //The mask is to give you only the offset into the device register file
185 Addr daddr = pkt->getAddr() & 0xfff;
186 DPRINTF(EthernetPIO, "read da=%#x pa=%#x size=%d\n",
187 daddr, pkt->getAddr(), pkt->getSize());
188
189
190 // there are some reserved registers, you can see ns_gige_reg.h and
191 // the spec sheet for details
192 if (daddr > LAST && daddr <= RESERVED) {
193 panic("Accessing reserved register");
194 } else if (daddr > RESERVED && daddr <= 0x3FC) {
195 return readConfig(pkt);
196 } else if (daddr >= MIB_START && daddr <= MIB_END) {
197 // don't implement all the MIB's. hopefully the kernel
198 // doesn't actually DEPEND upon their values
199 // MIB are just hardware stats keepers
200 pkt->set<uint32_t>(0);
201 pkt->makeAtomicResponse();
202 return pioDelay;
203 } else if (daddr > 0x3FC)
204 panic("Something is messed up!\n");
205
206 assert(pkt->getSize() == sizeof(uint32_t));
207 uint32_t &reg = *pkt->getPtr<uint32_t>();
208 uint16_t rfaddr;
209
210 switch (daddr) {
211 case CR:
212 reg = regs.command;
213 //these are supposed to be cleared on a read
214 reg &= ~(CR_RXD | CR_TXD | CR_TXR | CR_RXR);
215 break;
216
217 case CFGR:
218 reg = regs.config;
219 break;
220
221 case MEAR:
222 reg = regs.mear;
223 break;
224
225 case PTSCR:
226 reg = regs.ptscr;
227 break;
228
229 case ISR:
230 reg = regs.isr;
231 devIntrClear(ISR_ALL);
232 break;
233
234 case IMR:
235 reg = regs.imr;
236 break;
237
238 case IER:
239 reg = regs.ier;
240 break;
241
242 case IHR:
243 reg = regs.ihr;
244 break;
245
246 case TXDP:
247 reg = regs.txdp;
248 break;
249
250 case TXDP_HI:
251 reg = regs.txdp_hi;
252 break;
253
254 case TX_CFG:
255 reg = regs.txcfg;
256 break;
257
258 case GPIOR:
259 reg = regs.gpior;
260 break;
261
262 case RXDP:
263 reg = regs.rxdp;
264 break;
265
266 case RXDP_HI:
267 reg = regs.rxdp_hi;
268 break;
269
270 case RX_CFG:
271 reg = regs.rxcfg;
272 break;
273
274 case PQCR:
275 reg = regs.pqcr;
276 break;
277
278 case WCSR:
279 reg = regs.wcsr;
280 break;
281
282 case PCR:
283 reg = regs.pcr;
284 break;
285
286 // see the spec sheet for how RFCR and RFDR work
287 // basically, you write to RFCR to tell the machine
288 // what you want to do next, then you act upon RFDR,
289 // and the device will be prepared b/c of what you
290 // wrote to RFCR
291 case RFCR:
292 reg = regs.rfcr;
293 break;
294
295 case RFDR:
296 rfaddr = (uint16_t)(regs.rfcr & RFCR_RFADDR);
297 switch (rfaddr) {
298 // Read from perfect match ROM octets
299 case 0x000:
300 reg = rom.perfectMatch[1];
301 reg = reg << 8;
302 reg += rom.perfectMatch[0];
303 break;
304 case 0x002:
305 reg = rom.perfectMatch[3] << 8;
306 reg += rom.perfectMatch[2];
307 break;
308 case 0x004:
309 reg = rom.perfectMatch[5] << 8;
310 reg += rom.perfectMatch[4];
311 break;
312 default:
313 // Read filter hash table
314 if (rfaddr >= FHASH_ADDR &&
315 rfaddr < FHASH_ADDR + FHASH_SIZE) {
316
317 // Only word-aligned reads supported
318 if (rfaddr % 2)
319 panic("unaligned read from filter hash table!");
320
321 reg = rom.filterHash[rfaddr - FHASH_ADDR + 1] << 8;
322 reg += rom.filterHash[rfaddr - FHASH_ADDR];
323 break;
324 }
325
326 panic("reading RFDR for something other than pattern"
327 " matching or hashing! %#x\n", rfaddr);
328 }
329 break;
330
331 case SRR:
332 reg = regs.srr;
333 break;
334
335 case MIBC:
336 reg = regs.mibc;
337 reg &= ~(MIBC_MIBS | MIBC_ACLR);
338 break;
339
340 case VRCR:
341 reg = regs.vrcr;
342 break;
343
344 case VTCR:
345 reg = regs.vtcr;
346 break;
347
348 case VDR:
349 reg = regs.vdr;
350 break;
351
352 case CCSR:
353 reg = regs.ccsr;
354 break;
355
356 case TBICR:
357 reg = regs.tbicr;
358 break;
359
360 case TBISR:
361 reg = regs.tbisr;
362 break;
363
364 case TANAR:
365 reg = regs.tanar;
366 break;
367
368 case TANLPAR:
369 reg = regs.tanlpar;
370 break;
371
372 case TANER:
373 reg = regs.taner;
374 break;
375
376 case TESR:
377 reg = regs.tesr;
378 break;
379
380 case M5REG:
381 reg = 0;
382 if (params()->rx_thread)
383 reg |= M5REG_RX_THREAD;
384 if (params()->tx_thread)
385 reg |= M5REG_TX_THREAD;
386 if (params()->rss)
387 reg |= M5REG_RSS;
388 break;
389
390 default:
391 panic("reading unimplemented register: addr=%#x", daddr);
392 }
393
394 DPRINTF(EthernetPIO, "read from %#x: data=%d data=%#x\n",
395 daddr, reg, reg);
396
397 pkt->makeAtomicResponse();
398 return pioDelay;
399 }
400
401 Tick
402 NSGigE::write(PacketPtr pkt)
403 {
404 assert(ioEnable);
405
406 Addr daddr = pkt->getAddr() & 0xfff;
407 DPRINTF(EthernetPIO, "write da=%#x pa=%#x size=%d\n",
408 daddr, pkt->getAddr(), pkt->getSize());
409
410 if (daddr > LAST && daddr <= RESERVED) {
411 panic("Accessing reserved register");
412 } else if (daddr > RESERVED && daddr <= 0x3FC) {
413 return writeConfig(pkt);
414 } else if (daddr > 0x3FC)
415 panic("Something is messed up!\n");
416
417 if (pkt->getSize() == sizeof(uint32_t)) {
418 uint32_t reg = pkt->get<uint32_t>();
419 uint16_t rfaddr;
420
421 DPRINTF(EthernetPIO, "write data=%d data=%#x\n", reg, reg);
422
423 switch (daddr) {
424 case CR:
425 regs.command = reg;
426 if (reg & CR_TXD) {
427 txEnable = false;
428 } else if (reg & CR_TXE) {
429 txEnable = true;
430
431 // the kernel is enabling the transmit machine
432 if (txState == txIdle)
433 txKick();
434 }
435
436 if (reg & CR_RXD) {
437 rxEnable = false;
438 } else if (reg & CR_RXE) {
439 rxEnable = true;
440
441 if (rxState == rxIdle)
442 rxKick();
443 }
444
445 if (reg & CR_TXR)
446 txReset();
447
448 if (reg & CR_RXR)
449 rxReset();
450
451 if (reg & CR_SWI)
452 devIntrPost(ISR_SWI);
453
454 if (reg & CR_RST) {
455 txReset();
456 rxReset();
457
458 regsReset();
459 }
460 break;
461
462 case CFGR:
463 if (reg & CFGR_LNKSTS ||
464 reg & CFGR_SPDSTS ||
465 reg & CFGR_DUPSTS ||
466 reg & CFGR_RESERVED ||
467 reg & CFGR_T64ADDR ||
468 reg & CFGR_PCI64_DET)
469
470 // First clear all writable bits
471 regs.config &= CFGR_LNKSTS | CFGR_SPDSTS | CFGR_DUPSTS |
472 CFGR_RESERVED | CFGR_T64ADDR |
473 CFGR_PCI64_DET;
474 // Now set the appropriate writable bits
475 regs.config |= reg & ~(CFGR_LNKSTS | CFGR_SPDSTS | CFGR_DUPSTS |
476 CFGR_RESERVED | CFGR_T64ADDR |
477 CFGR_PCI64_DET);
478
479 // all these #if 0's are because i don't THINK the kernel needs to
480 // have these implemented. if there is a problem relating to one of
481 // these, you may need to add functionality in.
482 if (reg & CFGR_TBI_EN) ;
483 if (reg & CFGR_MODE_1000) ;
484
485 if (reg & CFGR_AUTO_1000)
486 panic("CFGR_AUTO_1000 not implemented!\n");
487
488 if (reg & CFGR_PINT_DUPSTS ||
489 reg & CFGR_PINT_LNKSTS ||
490 reg & CFGR_PINT_SPDSTS)
491 ;
492
493 if (reg & CFGR_TMRTEST) ;
494 if (reg & CFGR_MRM_DIS) ;
495 if (reg & CFGR_MWI_DIS) ;
496
497 if (reg & CFGR_T64ADDR) ;
498 // panic("CFGR_T64ADDR is read only register!\n");
499
500 if (reg & CFGR_PCI64_DET)
501 panic("CFGR_PCI64_DET is read only register!\n");
502
503 if (reg & CFGR_DATA64_EN) ;
504 if (reg & CFGR_M64ADDR) ;
505 if (reg & CFGR_PHY_RST) ;
506 if (reg & CFGR_PHY_DIS) ;
507
508 if (reg & CFGR_EXTSTS_EN)
509 extstsEnable = true;
510 else
511 extstsEnable = false;
512
513 if (reg & CFGR_REQALG) ;
514 if (reg & CFGR_SB) ;
515 if (reg & CFGR_POW) ;
516 if (reg & CFGR_EXD) ;
517 if (reg & CFGR_PESEL) ;
518 if (reg & CFGR_BROM_DIS) ;
519 if (reg & CFGR_EXT_125) ;
520 if (reg & CFGR_BEM) ;
521 break;
522
523 case MEAR:
524 // Clear writable bits
525 regs.mear &= MEAR_EEDO;
526 // Set appropriate writable bits
527 regs.mear |= reg & ~MEAR_EEDO;
528
529 // FreeBSD uses the EEPROM to read PMATCH (for the MAC address)
530 // even though it could get it through RFDR
531 if (reg & MEAR_EESEL) {
532 // Rising edge of clock
533 if (reg & MEAR_EECLK && !eepromClk)
534 eepromKick();
535 }
536 else {
537 eepromState = eepromStart;
538 regs.mear &= ~MEAR_EEDI;
539 }
540
541 eepromClk = reg & MEAR_EECLK;
542
543 // since phy is completely faked, MEAR_MD* don't matter
544 if (reg & MEAR_MDIO) ;
545 if (reg & MEAR_MDDIR) ;
546 if (reg & MEAR_MDC) ;
547 break;
548
549 case PTSCR:
550 regs.ptscr = reg & ~(PTSCR_RBIST_RDONLY);
551 // these control BISTs for various parts of chip - we
552 // don't care or do just fake that the BIST is done
553 if (reg & PTSCR_RBIST_EN)
554 regs.ptscr |= PTSCR_RBIST_DONE;
555 if (reg & PTSCR_EEBIST_EN)
556 regs.ptscr &= ~PTSCR_EEBIST_EN;
557 if (reg & PTSCR_EELOAD_EN)
558 regs.ptscr &= ~PTSCR_EELOAD_EN;
559 break;
560
561 case ISR: /* writing to the ISR has no effect */
562 panic("ISR is a read only register!\n");
563
564 case IMR:
565 regs.imr = reg;
566 devIntrChangeMask();
567 break;
568
569 case IER:
570 regs.ier = reg;
571 break;
572
573 case IHR:
574 regs.ihr = reg;
575 /* not going to implement real interrupt holdoff */
576 break;
577
578 case TXDP:
579 regs.txdp = (reg & 0xFFFFFFFC);
580 assert(txState == txIdle);
581 CTDD = false;
582 break;
583
584 case TXDP_HI:
585 regs.txdp_hi = reg;
586 break;
587
588 case TX_CFG:
589 regs.txcfg = reg;
590 #if 0
591 if (reg & TX_CFG_CSI) ;
592 if (reg & TX_CFG_HBI) ;
593 if (reg & TX_CFG_MLB) ;
594 if (reg & TX_CFG_ATP) ;
595 if (reg & TX_CFG_ECRETRY) {
596 /*
597 * this could easily be implemented, but considering
598 * the network is just a fake pipe, wouldn't make
599 * sense to do this
600 */
601 }
602
603 if (reg & TX_CFG_BRST_DIS) ;
604 #endif
605
606 #if 0
607 /* we handle our own DMA, ignore the kernel's exhortations */
608 if (reg & TX_CFG_MXDMA) ;
609 #endif
610
611 // also, we currently don't care about fill/drain
612 // thresholds though this may change in the future with
613 // more realistic networks or a driver which changes it
614 // according to feedback
615
616 break;
617
618 case GPIOR:
619 // Only write writable bits
620 regs.gpior &= GPIOR_UNUSED | GPIOR_GP5_IN | GPIOR_GP4_IN
621 | GPIOR_GP3_IN | GPIOR_GP2_IN | GPIOR_GP1_IN;
622 regs.gpior |= reg & ~(GPIOR_UNUSED | GPIOR_GP5_IN | GPIOR_GP4_IN
623 | GPIOR_GP3_IN | GPIOR_GP2_IN | GPIOR_GP1_IN);
624 /* these just control general purpose i/o pins, don't matter */
625 break;
626
627 case RXDP:
628 regs.rxdp = reg;
629 CRDD = false;
630 break;
631
632 case RXDP_HI:
633 regs.rxdp_hi = reg;
634 break;
635
636 case RX_CFG:
637 regs.rxcfg = reg;
638 #if 0
639 if (reg & RX_CFG_AEP) ;
640 if (reg & RX_CFG_ARP) ;
641 if (reg & RX_CFG_STRIPCRC) ;
642 if (reg & RX_CFG_RX_RD) ;
643 if (reg & RX_CFG_ALP) ;
644 if (reg & RX_CFG_AIRL) ;
645
646 /* we handle our own DMA, ignore what kernel says about it */
647 if (reg & RX_CFG_MXDMA) ;
648
649 //also, we currently don't care about fill/drain thresholds
650 //though this may change in the future with more realistic
651 //networks or a driver which changes it according to feedback
652 if (reg & (RX_CFG_DRTH | RX_CFG_DRTH0)) ;
653 #endif
654 break;
655
656 case PQCR:
657 /* there is no priority queueing used in the linux 2.6 driver */
658 regs.pqcr = reg;
659 break;
660
661 case WCSR:
662 /* not going to implement wake on LAN */
663 regs.wcsr = reg;
664 break;
665
666 case PCR:
667 /* not going to implement pause control */
668 regs.pcr = reg;
669 break;
670
671 case RFCR:
672 regs.rfcr = reg;
673
674 rxFilterEnable = (reg & RFCR_RFEN) ? true : false;
675 acceptBroadcast = (reg & RFCR_AAB) ? true : false;
676 acceptMulticast = (reg & RFCR_AAM) ? true : false;
677 acceptUnicast = (reg & RFCR_AAU) ? true : false;
678 acceptPerfect = (reg & RFCR_APM) ? true : false;
679 acceptArp = (reg & RFCR_AARP) ? true : false;
680 multicastHashEnable = (reg & RFCR_MHEN) ? true : false;
681
682 #if 0
683 if (reg & RFCR_APAT)
684 panic("RFCR_APAT not implemented!\n");
685 #endif
686 if (reg & RFCR_UHEN)
687 panic("Unicast hash filtering not used by drivers!\n");
688
689 if (reg & RFCR_ULM)
690 panic("RFCR_ULM not implemented!\n");
691
692 break;
693
694 case RFDR:
695 rfaddr = (uint16_t)(regs.rfcr & RFCR_RFADDR);
696 switch (rfaddr) {
697 case 0x000:
698 rom.perfectMatch[0] = (uint8_t)reg;
699 rom.perfectMatch[1] = (uint8_t)(reg >> 8);
700 break;
701 case 0x002:
702 rom.perfectMatch[2] = (uint8_t)reg;
703 rom.perfectMatch[3] = (uint8_t)(reg >> 8);
704 break;
705 case 0x004:
706 rom.perfectMatch[4] = (uint8_t)reg;
707 rom.perfectMatch[5] = (uint8_t)(reg >> 8);
708 break;
709 default:
710
711 if (rfaddr >= FHASH_ADDR &&
712 rfaddr < FHASH_ADDR + FHASH_SIZE) {
713
714 // Only word-aligned writes supported
715 if (rfaddr % 2)
716 panic("unaligned write to filter hash table!");
717
718 rom.filterHash[rfaddr - FHASH_ADDR] = (uint8_t)reg;
719 rom.filterHash[rfaddr - FHASH_ADDR + 1]
720 = (uint8_t)(reg >> 8);
721 break;
722 }
723 panic("writing RFDR for something other than pattern matching\
724 or hashing! %#x\n", rfaddr);
725 }
726
727 case BRAR:
728 regs.brar = reg;
729 break;
730
731 case BRDR:
732 panic("the driver never uses BRDR, something is wrong!\n");
733
734 case SRR:
735 panic("SRR is read only register!\n");
736
737 case MIBC:
738 panic("the driver never uses MIBC, something is wrong!\n");
739
740 case VRCR:
741 regs.vrcr = reg;
742 break;
743
744 case VTCR:
745 regs.vtcr = reg;
746 break;
747
748 case VDR:
749 panic("the driver never uses VDR, something is wrong!\n");
750
751 case CCSR:
752 /* not going to implement clockrun stuff */
753 regs.ccsr = reg;
754 break;
755
756 case TBICR:
757 regs.tbicr = reg;
758 if (reg & TBICR_MR_LOOPBACK)
759 panic("TBICR_MR_LOOPBACK never used, something wrong!\n");
760
761 if (reg & TBICR_MR_AN_ENABLE) {
762 regs.tanlpar = regs.tanar;
763 regs.tbisr |= (TBISR_MR_AN_COMPLETE | TBISR_MR_LINK_STATUS);
764 }
765
766 #if 0
767 if (reg & TBICR_MR_RESTART_AN) ;
768 #endif
769
770 break;
771
772 case TBISR:
773 panic("TBISR is read only register!\n");
774
775 case TANAR:
776 // Only write the writable bits
777 regs.tanar &= TANAR_RF1 | TANAR_RF2 | TANAR_UNUSED;
778 regs.tanar |= reg & ~(TANAR_RF1 | TANAR_RF2 | TANAR_UNUSED);
779
780 // Pause capability unimplemented
781 #if 0
782 if (reg & TANAR_PS2) ;
783 if (reg & TANAR_PS1) ;
784 #endif
785
786 break;
787
788 case TANLPAR:
789 panic("this should only be written to by the fake phy!\n");
790
791 case TANER:
792 panic("TANER is read only register!\n");
793
794 case TESR:
795 regs.tesr = reg;
796 break;
797
798 default:
799 panic("invalid register access daddr=%#x", daddr);
800 }
801 } else {
802 panic("Invalid Request Size");
803 }
804 pkt->makeAtomicResponse();
805 return pioDelay;
806 }
807
808 void
809 NSGigE::devIntrPost(uint32_t interrupts)
810 {
811 if (interrupts & ISR_RESERVE)
812 panic("Cannot set a reserved interrupt");
813
814 if (interrupts & ISR_NOIMPL)
815 warn("interrupt not implemented %#x\n", interrupts);
816
817 interrupts &= ISR_IMPL;
818 regs.isr |= interrupts;
819
820 if (interrupts & regs.imr) {
821 if (interrupts & ISR_SWI) {
822 totalSwi++;
823 }
824 if (interrupts & ISR_RXIDLE) {
825 totalRxIdle++;
826 }
827 if (interrupts & ISR_RXOK) {
828 totalRxOk++;
829 }
830 if (interrupts & ISR_RXDESC) {
831 totalRxDesc++;
832 }
833 if (interrupts & ISR_TXOK) {
834 totalTxOk++;
835 }
836 if (interrupts & ISR_TXIDLE) {
837 totalTxIdle++;
838 }
839 if (interrupts & ISR_TXDESC) {
840 totalTxDesc++;
841 }
842 if (interrupts & ISR_RXORN) {
843 totalRxOrn++;
844 }
845 }
846
847 DPRINTF(EthernetIntr,
848 "interrupt written to ISR: intr=%#x isr=%#x imr=%#x\n",
849 interrupts, regs.isr, regs.imr);
850
851 if ((regs.isr & regs.imr)) {
852 Tick when = curTick();
853 if ((regs.isr & regs.imr & ISR_NODELAY) == 0)
854 when += intrDelay;
855 postedInterrupts++;
856 cpuIntrPost(when);
857 }
858 }
859
860 /* writing this interrupt counting stats inside this means that this function
861 is now limited to being used to clear all interrupts upon the kernel
862 reading isr and servicing. just telling you in case you were thinking
863 of expanding use.
864 */
865 void
866 NSGigE::devIntrClear(uint32_t interrupts)
867 {
868 if (interrupts & ISR_RESERVE)
869 panic("Cannot clear a reserved interrupt");
870
871 if (regs.isr & regs.imr & ISR_SWI) {
872 postedSwi++;
873 }
874 if (regs.isr & regs.imr & ISR_RXIDLE) {
875 postedRxIdle++;
876 }
877 if (regs.isr & regs.imr & ISR_RXOK) {
878 postedRxOk++;
879 }
880 if (regs.isr & regs.imr & ISR_RXDESC) {
881 postedRxDesc++;
882 }
883 if (regs.isr & regs.imr & ISR_TXOK) {
884 postedTxOk++;
885 }
886 if (regs.isr & regs.imr & ISR_TXIDLE) {
887 postedTxIdle++;
888 }
889 if (regs.isr & regs.imr & ISR_TXDESC) {
890 postedTxDesc++;
891 }
892 if (regs.isr & regs.imr & ISR_RXORN) {
893 postedRxOrn++;
894 }
895
896 interrupts &= ~ISR_NOIMPL;
897 regs.isr &= ~interrupts;
898
899 DPRINTF(EthernetIntr,
900 "interrupt cleared from ISR: intr=%x isr=%x imr=%x\n",
901 interrupts, regs.isr, regs.imr);
902
903 if (!(regs.isr & regs.imr))
904 cpuIntrClear();
905 }
906
907 void
908 NSGigE::devIntrChangeMask()
909 {
910 DPRINTF(EthernetIntr, "interrupt mask changed: isr=%x imr=%x masked=%x\n",
911 regs.isr, regs.imr, regs.isr & regs.imr);
912
913 if (regs.isr & regs.imr)
914 cpuIntrPost(curTick());
915 else
916 cpuIntrClear();
917 }
918
919 void
920 NSGigE::cpuIntrPost(Tick when)
921 {
922 // If the interrupt you want to post is later than an interrupt
923 // already scheduled, just let it post in the coming one and don't
924 // schedule another.
925 // HOWEVER, must be sure that the scheduled intrTick is in the
926 // future (this was formerly the source of a bug)
927 /**
928 * @todo this warning should be removed and the intrTick code should
929 * be fixed.
930 */
931 assert(when >= curTick());
932 assert(intrTick >= curTick() || intrTick == 0);
933 if (when > intrTick && intrTick != 0) {
934 DPRINTF(EthernetIntr, "don't need to schedule event...intrTick=%d\n",
935 intrTick);
936 return;
937 }
938
939 intrTick = when;
940 if (intrTick < curTick()) {
941 Debug::breakpoint();
942 intrTick = curTick();
943 }
944
945 DPRINTF(EthernetIntr, "going to schedule an interrupt for intrTick=%d\n",
946 intrTick);
947
948 if (intrEvent)
949 intrEvent->squash();
950 intrEvent = new IntrEvent(this, true);
951 schedule(intrEvent, intrTick);
952 }
953
954 void
955 NSGigE::cpuInterrupt()
956 {
957 assert(intrTick == curTick());
958
959 // Whether or not there's a pending interrupt, we don't care about
960 // it anymore
961 intrEvent = 0;
962 intrTick = 0;
963
964 // Don't send an interrupt if there's already one
965 if (cpuPendingIntr) {
966 DPRINTF(EthernetIntr,
967 "would send an interrupt now, but there's already pending\n");
968 } else {
969 // Send interrupt
970 cpuPendingIntr = true;
971
972 DPRINTF(EthernetIntr, "posting interrupt\n");
973 intrPost();
974 }
975 }
976
977 void
978 NSGigE::cpuIntrClear()
979 {
980 if (!cpuPendingIntr)
981 return;
982
983 if (intrEvent) {
984 intrEvent->squash();
985 intrEvent = 0;
986 }
987
988 intrTick = 0;
989
990 cpuPendingIntr = false;
991
992 DPRINTF(EthernetIntr, "clearing interrupt\n");
993 intrClear();
994 }
995
996 bool
997 NSGigE::cpuIntrPending() const
998 { return cpuPendingIntr; }
999
1000 void
1001 NSGigE::txReset()
1002 {
1003
1004 DPRINTF(Ethernet, "transmit reset\n");
1005
1006 CTDD = false;
1007 txEnable = false;;
1008 txFragPtr = 0;
1009 assert(txDescCnt == 0);
1010 txFifo.clear();
1011 txState = txIdle;
1012 assert(txDmaState == dmaIdle);
1013 }
1014
1015 void
1016 NSGigE::rxReset()
1017 {
1018 DPRINTF(Ethernet, "receive reset\n");
1019
1020 CRDD = false;
1021 assert(rxPktBytes == 0);
1022 rxEnable = false;
1023 rxFragPtr = 0;
1024 assert(rxDescCnt == 0);
1025 assert(rxDmaState == dmaIdle);
1026 rxFifo.clear();
1027 rxState = rxIdle;
1028 }
1029
1030 void
1031 NSGigE::regsReset()
1032 {
1033 memset(&regs, 0, sizeof(regs));
1034 regs.config = (CFGR_LNKSTS | CFGR_TBI_EN | CFGR_MODE_1000);
1035 regs.mear = 0x12;
1036 regs.txcfg = 0x120; // set drain threshold to 1024 bytes and
1037 // fill threshold to 32 bytes
1038 regs.rxcfg = 0x4; // set drain threshold to 16 bytes
1039 regs.srr = 0x0103; // set the silicon revision to rev B or 0x103
1040 regs.mibc = MIBC_FRZ;
1041 regs.vdr = 0x81; // set the vlan tag type to 802.1q
1042 regs.tesr = 0xc000; // TBI capable of both full and half duplex
1043 regs.brar = 0xffffffff;
1044
1045 extstsEnable = false;
1046 acceptBroadcast = false;
1047 acceptMulticast = false;
1048 acceptUnicast = false;
1049 acceptPerfect = false;
1050 acceptArp = false;
1051 }
1052
1053 bool
1054 NSGigE::doRxDmaRead()
1055 {
1056 assert(rxDmaState == dmaIdle || rxDmaState == dmaReadWaiting);
1057 rxDmaState = dmaReading;
1058
1059 if (dmaPending() || getState() != Running)
1060 rxDmaState = dmaReadWaiting;
1061 else
1062 dmaRead(rxDmaAddr, rxDmaLen, &rxDmaReadEvent, (uint8_t*)rxDmaData);
1063
1064 return true;
1065 }
1066
1067 void
1068 NSGigE::rxDmaReadDone()
1069 {
1070 assert(rxDmaState == dmaReading);
1071 rxDmaState = dmaIdle;
1072
1073 DPRINTF(EthernetDMA, "rx dma read paddr=%#x len=%d\n",
1074 rxDmaAddr, rxDmaLen);
1075 DDUMP(EthernetDMA, rxDmaData, rxDmaLen);
1076
1077 // If the transmit state machine has a pending DMA, let it go first
1078 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting)
1079 txKick();
1080
1081 rxKick();
1082 }
1083
1084 bool
1085 NSGigE::doRxDmaWrite()
1086 {
1087 assert(rxDmaState == dmaIdle || rxDmaState == dmaWriteWaiting);
1088 rxDmaState = dmaWriting;
1089
1090 if (dmaPending() || getState() != Running)
1091 rxDmaState = dmaWriteWaiting;
1092 else
1093 dmaWrite(rxDmaAddr, rxDmaLen, &rxDmaWriteEvent, (uint8_t*)rxDmaData);
1094 return true;
1095 }
1096
1097 void
1098 NSGigE::rxDmaWriteDone()
1099 {
1100 assert(rxDmaState == dmaWriting);
1101 rxDmaState = dmaIdle;
1102
1103 DPRINTF(EthernetDMA, "rx dma write paddr=%#x len=%d\n",
1104 rxDmaAddr, rxDmaLen);
1105 DDUMP(EthernetDMA, rxDmaData, rxDmaLen);
1106
1107 // If the transmit state machine has a pending DMA, let it go first
1108 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting)
1109 txKick();
1110
1111 rxKick();
1112 }
1113
1114 void
1115 NSGigE::rxKick()
1116 {
1117 bool is64bit = (bool)(regs.config & CFGR_M64ADDR);
1118
1119 DPRINTF(EthernetSM,
1120 "receive kick rxState=%s (rxBuf.size=%d) %d-bit\n",
1121 NsRxStateStrings[rxState], rxFifo.size(), is64bit ? 64 : 32);
1122
1123 Addr link, bufptr;
1124 uint32_t &cmdsts = is64bit ? rxDesc64.cmdsts : rxDesc32.cmdsts;
1125 uint32_t &extsts = is64bit ? rxDesc64.extsts : rxDesc32.extsts;
1126
1127 next:
1128 if (clock) {
1129 if (rxKickTick > curTick()) {
1130 DPRINTF(EthernetSM, "receive kick exiting, can't run till %d\n",
1131 rxKickTick);
1132
1133 goto exit;
1134 }
1135
1136 // Go to the next state machine clock tick.
1137 rxKickTick = curTick() + ticks(1);
1138 }
1139
1140 switch(rxDmaState) {
1141 case dmaReadWaiting:
1142 if (doRxDmaRead())
1143 goto exit;
1144 break;
1145 case dmaWriteWaiting:
1146 if (doRxDmaWrite())
1147 goto exit;
1148 break;
1149 default:
1150 break;
1151 }
1152
1153 link = is64bit ? (Addr)rxDesc64.link : (Addr)rxDesc32.link;
1154 bufptr = is64bit ? (Addr)rxDesc64.bufptr : (Addr)rxDesc32.bufptr;
1155
1156 // see state machine from spec for details
1157 // the way this works is, if you finish work on one state and can
1158 // go directly to another, you do that through jumping to the
1159 // label "next". however, if you have intermediate work, like DMA
1160 // so that you can't go to the next state yet, you go to exit and
1161 // exit the loop. however, when the DMA is done it will trigger
1162 // an event and come back to this loop.
1163 switch (rxState) {
1164 case rxIdle:
1165 if (!rxEnable) {
1166 DPRINTF(EthernetSM, "Receive Disabled! Nothing to do.\n");
1167 goto exit;
1168 }
1169
1170 if (CRDD) {
1171 rxState = rxDescRefr;
1172
1173 rxDmaAddr = regs.rxdp & 0x3fffffff;
1174 rxDmaData =
1175 is64bit ? (void *)&rxDesc64.link : (void *)&rxDesc32.link;
1176 rxDmaLen = is64bit ? sizeof(rxDesc64.link) : sizeof(rxDesc32.link);
1177 rxDmaFree = dmaDescFree;
1178
1179 descDmaReads++;
1180 descDmaRdBytes += rxDmaLen;
1181
1182 if (doRxDmaRead())
1183 goto exit;
1184 } else {
1185 rxState = rxDescRead;
1186
1187 rxDmaAddr = regs.rxdp & 0x3fffffff;
1188 rxDmaData = is64bit ? (void *)&rxDesc64 : (void *)&rxDesc32;
1189 rxDmaLen = is64bit ? sizeof(rxDesc64) : sizeof(rxDesc32);
1190 rxDmaFree = dmaDescFree;
1191
1192 descDmaReads++;
1193 descDmaRdBytes += rxDmaLen;
1194
1195 if (doRxDmaRead())
1196 goto exit;
1197 }
1198 break;
1199
1200 case rxDescRefr:
1201 if (rxDmaState != dmaIdle)
1202 goto exit;
1203
1204 rxState = rxAdvance;
1205 break;
1206
1207 case rxDescRead:
1208 if (rxDmaState != dmaIdle)
1209 goto exit;
1210
1211 DPRINTF(EthernetDesc, "rxDesc: addr=%08x read descriptor\n",
1212 regs.rxdp & 0x3fffffff);
1213 DPRINTF(EthernetDesc,
1214 "rxDesc: link=%#x bufptr=%#x cmdsts=%08x extsts=%08x\n",
1215 link, bufptr, cmdsts, extsts);
1216
1217 if (cmdsts & CMDSTS_OWN) {
1218 devIntrPost(ISR_RXIDLE);
1219 rxState = rxIdle;
1220 goto exit;
1221 } else {
1222 rxState = rxFifoBlock;
1223 rxFragPtr = bufptr;
1224 rxDescCnt = cmdsts & CMDSTS_LEN_MASK;
1225 }
1226 break;
1227
1228 case rxFifoBlock:
1229 if (!rxPacket) {
1230 /**
1231 * @todo in reality, we should be able to start processing
1232 * the packet as it arrives, and not have to wait for the
1233 * full packet ot be in the receive fifo.
1234 */
1235 if (rxFifo.empty())
1236 goto exit;
1237
1238 DPRINTF(EthernetSM, "****processing receive of new packet****\n");
1239
1240 // If we don't have a packet, grab a new one from the fifo.
1241 rxPacket = rxFifo.front();
1242 rxPktBytes = rxPacket->length;
1243 rxPacketBufPtr = rxPacket->data;
1244
1245 #if TRACING_ON
1246 if (DTRACE(Ethernet)) {
1247 IpPtr ip(rxPacket);
1248 if (ip) {
1249 DPRINTF(Ethernet, "ID is %d\n", ip->id());
1250 TcpPtr tcp(ip);
1251 if (tcp) {
1252 DPRINTF(Ethernet,
1253 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n",
1254 tcp->sport(), tcp->dport(), tcp->seq(),
1255 tcp->ack());
1256 }
1257 }
1258 }
1259 #endif
1260
1261 // sanity check - i think the driver behaves like this
1262 assert(rxDescCnt >= rxPktBytes);
1263 rxFifo.pop();
1264 }
1265
1266
1267 // dont' need the && rxDescCnt > 0 if driver sanity check
1268 // above holds
1269 if (rxPktBytes > 0) {
1270 rxState = rxFragWrite;
1271 // don't need min<>(rxPktBytes,rxDescCnt) if above sanity
1272 // check holds
1273 rxXferLen = rxPktBytes;
1274
1275 rxDmaAddr = rxFragPtr & 0x3fffffff;
1276 rxDmaData = rxPacketBufPtr;
1277 rxDmaLen = rxXferLen;
1278 rxDmaFree = dmaDataFree;
1279
1280 if (doRxDmaWrite())
1281 goto exit;
1282
1283 } else {
1284 rxState = rxDescWrite;
1285
1286 //if (rxPktBytes == 0) { /* packet is done */
1287 assert(rxPktBytes == 0);
1288 DPRINTF(EthernetSM, "done with receiving packet\n");
1289
1290 cmdsts |= CMDSTS_OWN;
1291 cmdsts &= ~CMDSTS_MORE;
1292 cmdsts |= CMDSTS_OK;
1293 cmdsts &= 0xffff0000;
1294 cmdsts += rxPacket->length; //i.e. set CMDSTS_SIZE
1295
1296 #if 0
1297 /*
1298 * all the driver uses these are for its own stats keeping
1299 * which we don't care about, aren't necessary for
1300 * functionality and doing this would just slow us down.
1301 * if they end up using this in a later version for
1302 * functional purposes, just undef
1303 */
1304 if (rxFilterEnable) {
1305 cmdsts &= ~CMDSTS_DEST_MASK;
1306 const EthAddr &dst = rxFifoFront()->dst();
1307 if (dst->unicast())
1308 cmdsts |= CMDSTS_DEST_SELF;
1309 if (dst->multicast())
1310 cmdsts |= CMDSTS_DEST_MULTI;
1311 if (dst->broadcast())
1312 cmdsts |= CMDSTS_DEST_MASK;
1313 }
1314 #endif
1315
1316 IpPtr ip(rxPacket);
1317 if (extstsEnable && ip) {
1318 extsts |= EXTSTS_IPPKT;
1319 rxIpChecksums++;
1320 if (cksum(ip) != 0) {
1321 DPRINTF(EthernetCksum, "Rx IP Checksum Error\n");
1322 extsts |= EXTSTS_IPERR;
1323 }
1324 TcpPtr tcp(ip);
1325 UdpPtr udp(ip);
1326 if (tcp) {
1327 extsts |= EXTSTS_TCPPKT;
1328 rxTcpChecksums++;
1329 if (cksum(tcp) != 0) {
1330 DPRINTF(EthernetCksum, "Rx TCP Checksum Error\n");
1331 extsts |= EXTSTS_TCPERR;
1332
1333 }
1334 } else if (udp) {
1335 extsts |= EXTSTS_UDPPKT;
1336 rxUdpChecksums++;
1337 if (cksum(udp) != 0) {
1338 DPRINTF(EthernetCksum, "Rx UDP Checksum Error\n");
1339 extsts |= EXTSTS_UDPERR;
1340 }
1341 }
1342 }
1343 rxPacket = 0;
1344
1345 /*
1346 * the driver seems to always receive into desc buffers
1347 * of size 1514, so you never have a pkt that is split
1348 * into multiple descriptors on the receive side, so
1349 * i don't implement that case, hence the assert above.
1350 */
1351
1352 DPRINTF(EthernetDesc,
1353 "rxDesc: addr=%08x writeback cmdsts extsts\n",
1354 regs.rxdp & 0x3fffffff);
1355 DPRINTF(EthernetDesc,
1356 "rxDesc: link=%#x bufptr=%#x cmdsts=%08x extsts=%08x\n",
1357 link, bufptr, cmdsts, extsts);
1358
1359 rxDmaAddr = regs.rxdp & 0x3fffffff;
1360 rxDmaData = &cmdsts;
1361 if (is64bit) {
1362 rxDmaAddr += offsetof(ns_desc64, cmdsts);
1363 rxDmaLen = sizeof(rxDesc64.cmdsts) + sizeof(rxDesc64.extsts);
1364 } else {
1365 rxDmaAddr += offsetof(ns_desc32, cmdsts);
1366 rxDmaLen = sizeof(rxDesc32.cmdsts) + sizeof(rxDesc32.extsts);
1367 }
1368 rxDmaFree = dmaDescFree;
1369
1370 descDmaWrites++;
1371 descDmaWrBytes += rxDmaLen;
1372
1373 if (doRxDmaWrite())
1374 goto exit;
1375 }
1376 break;
1377
1378 case rxFragWrite:
1379 if (rxDmaState != dmaIdle)
1380 goto exit;
1381
1382 rxPacketBufPtr += rxXferLen;
1383 rxFragPtr += rxXferLen;
1384 rxPktBytes -= rxXferLen;
1385
1386 rxState = rxFifoBlock;
1387 break;
1388
1389 case rxDescWrite:
1390 if (rxDmaState != dmaIdle)
1391 goto exit;
1392
1393 assert(cmdsts & CMDSTS_OWN);
1394
1395 assert(rxPacket == 0);
1396 devIntrPost(ISR_RXOK);
1397
1398 if (cmdsts & CMDSTS_INTR)
1399 devIntrPost(ISR_RXDESC);
1400
1401 if (!rxEnable) {
1402 DPRINTF(EthernetSM, "Halting the RX state machine\n");
1403 rxState = rxIdle;
1404 goto exit;
1405 } else
1406 rxState = rxAdvance;
1407 break;
1408
1409 case rxAdvance:
1410 if (link == 0) {
1411 devIntrPost(ISR_RXIDLE);
1412 rxState = rxIdle;
1413 CRDD = true;
1414 goto exit;
1415 } else {
1416 if (rxDmaState != dmaIdle)
1417 goto exit;
1418 rxState = rxDescRead;
1419 regs.rxdp = link;
1420 CRDD = false;
1421
1422 rxDmaAddr = regs.rxdp & 0x3fffffff;
1423 rxDmaData = is64bit ? (void *)&rxDesc64 : (void *)&rxDesc32;
1424 rxDmaLen = is64bit ? sizeof(rxDesc64) : sizeof(rxDesc32);
1425 rxDmaFree = dmaDescFree;
1426
1427 if (doRxDmaRead())
1428 goto exit;
1429 }
1430 break;
1431
1432 default:
1433 panic("Invalid rxState!");
1434 }
1435
1436 DPRINTF(EthernetSM, "entering next rxState=%s\n",
1437 NsRxStateStrings[rxState]);
1438 goto next;
1439
1440 exit:
1441 /**
1442 * @todo do we want to schedule a future kick?
1443 */
1444 DPRINTF(EthernetSM, "rx state machine exited rxState=%s\n",
1445 NsRxStateStrings[rxState]);
1446
1447 if (clock && !rxKickEvent.scheduled())
1448 schedule(rxKickEvent, rxKickTick);
1449 }
1450
1451 void
1452 NSGigE::transmit()
1453 {
1454 if (txFifo.empty()) {
1455 DPRINTF(Ethernet, "nothing to transmit\n");
1456 return;
1457 }
1458
1459 DPRINTF(Ethernet, "Attempt Pkt Transmit: txFifo length=%d\n",
1460 txFifo.size());
1461 if (interface->sendPacket(txFifo.front())) {
1462 #if TRACING_ON
1463 if (DTRACE(Ethernet)) {
1464 IpPtr ip(txFifo.front());
1465 if (ip) {
1466 DPRINTF(Ethernet, "ID is %d\n", ip->id());
1467 TcpPtr tcp(ip);
1468 if (tcp) {
1469 DPRINTF(Ethernet,
1470 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n",
1471 tcp->sport(), tcp->dport(), tcp->seq(),
1472 tcp->ack());
1473 }
1474 }
1475 }
1476 #endif
1477
1478 DDUMP(EthernetData, txFifo.front()->data, txFifo.front()->length);
1479 txBytes += txFifo.front()->length;
1480 txPackets++;
1481
1482 DPRINTF(Ethernet, "Successful Xmit! now txFifoAvail is %d\n",
1483 txFifo.avail());
1484 txFifo.pop();
1485
1486 /*
1487 * normally do a writeback of the descriptor here, and ONLY
1488 * after that is done, send this interrupt. but since our
1489 * stuff never actually fails, just do this interrupt here,
1490 * otherwise the code has to stray from this nice format.
1491 * besides, it's functionally the same.
1492 */
1493 devIntrPost(ISR_TXOK);
1494 }
1495
1496 if (!txFifo.empty() && !txEvent.scheduled()) {
1497 DPRINTF(Ethernet, "reschedule transmit\n");
1498 schedule(txEvent, curTick() + retryTime);
1499 }
1500 }
1501
1502 bool
1503 NSGigE::doTxDmaRead()
1504 {
1505 assert(txDmaState == dmaIdle || txDmaState == dmaReadWaiting);
1506 txDmaState = dmaReading;
1507
1508 if (dmaPending() || getState() != Running)
1509 txDmaState = dmaReadWaiting;
1510 else
1511 dmaRead(txDmaAddr, txDmaLen, &txDmaReadEvent, (uint8_t*)txDmaData);
1512
1513 return true;
1514 }
1515
1516 void
1517 NSGigE::txDmaReadDone()
1518 {
1519 assert(txDmaState == dmaReading);
1520 txDmaState = dmaIdle;
1521
1522 DPRINTF(EthernetDMA, "tx dma read paddr=%#x len=%d\n",
1523 txDmaAddr, txDmaLen);
1524 DDUMP(EthernetDMA, txDmaData, txDmaLen);
1525
1526 // If the receive state machine has a pending DMA, let it go first
1527 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting)
1528 rxKick();
1529
1530 txKick();
1531 }
1532
1533 bool
1534 NSGigE::doTxDmaWrite()
1535 {
1536 assert(txDmaState == dmaIdle || txDmaState == dmaWriteWaiting);
1537 txDmaState = dmaWriting;
1538
1539 if (dmaPending() || getState() != Running)
1540 txDmaState = dmaWriteWaiting;
1541 else
1542 dmaWrite(txDmaAddr, txDmaLen, &txDmaWriteEvent, (uint8_t*)txDmaData);
1543 return true;
1544 }
1545
1546 void
1547 NSGigE::txDmaWriteDone()
1548 {
1549 assert(txDmaState == dmaWriting);
1550 txDmaState = dmaIdle;
1551
1552 DPRINTF(EthernetDMA, "tx dma write paddr=%#x len=%d\n",
1553 txDmaAddr, txDmaLen);
1554 DDUMP(EthernetDMA, txDmaData, txDmaLen);
1555
1556 // If the receive state machine has a pending DMA, let it go first
1557 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting)
1558 rxKick();
1559
1560 txKick();
1561 }
1562
1563 void
1564 NSGigE::txKick()
1565 {
1566 bool is64bit = (bool)(regs.config & CFGR_M64ADDR);
1567
1568 DPRINTF(EthernetSM, "transmit kick txState=%s %d-bit\n",
1569 NsTxStateStrings[txState], is64bit ? 64 : 32);
1570
1571 Addr link, bufptr;
1572 uint32_t &cmdsts = is64bit ? txDesc64.cmdsts : txDesc32.cmdsts;
1573 uint32_t &extsts = is64bit ? txDesc64.extsts : txDesc32.extsts;
1574
1575 next:
1576 if (clock) {
1577 if (txKickTick > curTick()) {
1578 DPRINTF(EthernetSM, "transmit kick exiting, can't run till %d\n",
1579 txKickTick);
1580 goto exit;
1581 }
1582
1583 // Go to the next state machine clock tick.
1584 txKickTick = curTick() + ticks(1);
1585 }
1586
1587 switch(txDmaState) {
1588 case dmaReadWaiting:
1589 if (doTxDmaRead())
1590 goto exit;
1591 break;
1592 case dmaWriteWaiting:
1593 if (doTxDmaWrite())
1594 goto exit;
1595 break;
1596 default:
1597 break;
1598 }
1599
1600 link = is64bit ? (Addr)txDesc64.link : (Addr)txDesc32.link;
1601 bufptr = is64bit ? (Addr)txDesc64.bufptr : (Addr)txDesc32.bufptr;
1602 switch (txState) {
1603 case txIdle:
1604 if (!txEnable) {
1605 DPRINTF(EthernetSM, "Transmit disabled. Nothing to do.\n");
1606 goto exit;
1607 }
1608
1609 if (CTDD) {
1610 txState = txDescRefr;
1611
1612 txDmaAddr = regs.txdp & 0x3fffffff;
1613 txDmaData =
1614 is64bit ? (void *)&txDesc64.link : (void *)&txDesc32.link;
1615 txDmaLen = is64bit ? sizeof(txDesc64.link) : sizeof(txDesc32.link);
1616 txDmaFree = dmaDescFree;
1617
1618 descDmaReads++;
1619 descDmaRdBytes += txDmaLen;
1620
1621 if (doTxDmaRead())
1622 goto exit;
1623
1624 } else {
1625 txState = txDescRead;
1626
1627 txDmaAddr = regs.txdp & 0x3fffffff;
1628 txDmaData = is64bit ? (void *)&txDesc64 : (void *)&txDesc32;
1629 txDmaLen = is64bit ? sizeof(txDesc64) : sizeof(txDesc32);
1630 txDmaFree = dmaDescFree;
1631
1632 descDmaReads++;
1633 descDmaRdBytes += txDmaLen;
1634
1635 if (doTxDmaRead())
1636 goto exit;
1637 }
1638 break;
1639
1640 case txDescRefr:
1641 if (txDmaState != dmaIdle)
1642 goto exit;
1643
1644 txState = txAdvance;
1645 break;
1646
1647 case txDescRead:
1648 if (txDmaState != dmaIdle)
1649 goto exit;
1650
1651 DPRINTF(EthernetDesc, "txDesc: addr=%08x read descriptor\n",
1652 regs.txdp & 0x3fffffff);
1653 DPRINTF(EthernetDesc,
1654 "txDesc: link=%#x bufptr=%#x cmdsts=%#08x extsts=%#08x\n",
1655 link, bufptr, cmdsts, extsts);
1656
1657 if (cmdsts & CMDSTS_OWN) {
1658 txState = txFifoBlock;
1659 txFragPtr = bufptr;
1660 txDescCnt = cmdsts & CMDSTS_LEN_MASK;
1661 } else {
1662 devIntrPost(ISR_TXIDLE);
1663 txState = txIdle;
1664 goto exit;
1665 }
1666 break;
1667
1668 case txFifoBlock:
1669 if (!txPacket) {
1670 DPRINTF(EthernetSM, "****starting the tx of a new packet****\n");
1671 txPacket = new EthPacketData(16384);
1672 txPacketBufPtr = txPacket->data;
1673 }
1674
1675 if (txDescCnt == 0) {
1676 DPRINTF(EthernetSM, "the txDescCnt == 0, done with descriptor\n");
1677 if (cmdsts & CMDSTS_MORE) {
1678 DPRINTF(EthernetSM, "there are more descriptors to come\n");
1679 txState = txDescWrite;
1680
1681 cmdsts &= ~CMDSTS_OWN;
1682
1683 txDmaAddr = regs.txdp & 0x3fffffff;
1684 txDmaData = &cmdsts;
1685 if (is64bit) {
1686 txDmaAddr += offsetof(ns_desc64, cmdsts);
1687 txDmaLen = sizeof(txDesc64.cmdsts);
1688 } else {
1689 txDmaAddr += offsetof(ns_desc32, cmdsts);
1690 txDmaLen = sizeof(txDesc32.cmdsts);
1691 }
1692 txDmaFree = dmaDescFree;
1693
1694 if (doTxDmaWrite())
1695 goto exit;
1696
1697 } else { /* this packet is totally done */
1698 DPRINTF(EthernetSM, "This packet is done, let's wrap it up\n");
1699 /* deal with the the packet that just finished */
1700 if ((regs.vtcr & VTCR_PPCHK) && extstsEnable) {
1701 IpPtr ip(txPacket);
1702 if (extsts & EXTSTS_UDPPKT) {
1703 UdpPtr udp(ip);
1704 if (udp) {
1705 udp->sum(0);
1706 udp->sum(cksum(udp));
1707 txUdpChecksums++;
1708 } else {
1709 Debug::breakpoint();
1710 warn_once("UDPPKT set, but not UDP!\n");
1711 }
1712 } else if (extsts & EXTSTS_TCPPKT) {
1713 TcpPtr tcp(ip);
1714 if (tcp) {
1715 tcp->sum(0);
1716 tcp->sum(cksum(tcp));
1717 txTcpChecksums++;
1718 } else {
1719 Debug::breakpoint();
1720 warn_once("TCPPKT set, but not UDP!\n");
1721 }
1722 }
1723 if (extsts & EXTSTS_IPPKT) {
1724 if (ip) {
1725 ip->sum(0);
1726 ip->sum(cksum(ip));
1727 txIpChecksums++;
1728 } else {
1729 Debug::breakpoint();
1730 warn_once("IPPKT set, but not UDP!\n");
1731 }
1732 }
1733 }
1734
1735 txPacket->length = txPacketBufPtr - txPacket->data;
1736 // this is just because the receive can't handle a
1737 // packet bigger want to make sure
1738 if (txPacket->length > 1514)
1739 panic("transmit packet too large, %s > 1514\n",
1740 txPacket->length);
1741
1742 #ifndef NDEBUG
1743 bool success =
1744 #endif
1745 txFifo.push(txPacket);
1746 assert(success);
1747
1748 /*
1749 * this following section is not tqo spec, but
1750 * functionally shouldn't be any different. normally,
1751 * the chip will wait til the transmit has occurred
1752 * before writing back the descriptor because it has
1753 * to wait to see that it was successfully transmitted
1754 * to decide whether to set CMDSTS_OK or not.
1755 * however, in the simulator since it is always
1756 * successfully transmitted, and writing it exactly to
1757 * spec would complicate the code, we just do it here
1758 */
1759
1760 cmdsts &= ~CMDSTS_OWN;
1761 cmdsts |= CMDSTS_OK;
1762
1763 DPRINTF(EthernetDesc,
1764 "txDesc writeback: cmdsts=%08x extsts=%08x\n",
1765 cmdsts, extsts);
1766
1767 txDmaFree = dmaDescFree;
1768 txDmaAddr = regs.txdp & 0x3fffffff;
1769 txDmaData = &cmdsts;
1770 if (is64bit) {
1771 txDmaAddr += offsetof(ns_desc64, cmdsts);
1772 txDmaLen =
1773 sizeof(txDesc64.cmdsts) + sizeof(txDesc64.extsts);
1774 } else {
1775 txDmaAddr += offsetof(ns_desc32, cmdsts);
1776 txDmaLen =
1777 sizeof(txDesc32.cmdsts) + sizeof(txDesc32.extsts);
1778 }
1779
1780 descDmaWrites++;
1781 descDmaWrBytes += txDmaLen;
1782
1783 transmit();
1784 txPacket = 0;
1785
1786 if (!txEnable) {
1787 DPRINTF(EthernetSM, "halting TX state machine\n");
1788 txState = txIdle;
1789 goto exit;
1790 } else
1791 txState = txAdvance;
1792
1793 if (doTxDmaWrite())
1794 goto exit;
1795 }
1796 } else {
1797 DPRINTF(EthernetSM, "this descriptor isn't done yet\n");
1798 if (!txFifo.full()) {
1799 txState = txFragRead;
1800
1801 /*
1802 * The number of bytes transferred is either whatever
1803 * is left in the descriptor (txDescCnt), or if there
1804 * is not enough room in the fifo, just whatever room
1805 * is left in the fifo
1806 */
1807 txXferLen = min<uint32_t>(txDescCnt, txFifo.avail());
1808
1809 txDmaAddr = txFragPtr & 0x3fffffff;
1810 txDmaData = txPacketBufPtr;
1811 txDmaLen = txXferLen;
1812 txDmaFree = dmaDataFree;
1813
1814 if (doTxDmaRead())
1815 goto exit;
1816 } else {
1817 txState = txFifoBlock;
1818 transmit();
1819
1820 goto exit;
1821 }
1822
1823 }
1824 break;
1825
1826 case txFragRead:
1827 if (txDmaState != dmaIdle)
1828 goto exit;
1829
1830 txPacketBufPtr += txXferLen;
1831 txFragPtr += txXferLen;
1832 txDescCnt -= txXferLen;
1833 txFifo.reserve(txXferLen);
1834
1835 txState = txFifoBlock;
1836 break;
1837
1838 case txDescWrite:
1839 if (txDmaState != dmaIdle)
1840 goto exit;
1841
1842 if (cmdsts & CMDSTS_INTR)
1843 devIntrPost(ISR_TXDESC);
1844
1845 if (!txEnable) {
1846 DPRINTF(EthernetSM, "halting TX state machine\n");
1847 txState = txIdle;
1848 goto exit;
1849 } else
1850 txState = txAdvance;
1851 break;
1852
1853 case txAdvance:
1854 if (link == 0) {
1855 devIntrPost(ISR_TXIDLE);
1856 txState = txIdle;
1857 goto exit;
1858 } else {
1859 if (txDmaState != dmaIdle)
1860 goto exit;
1861 txState = txDescRead;
1862 regs.txdp = link;
1863 CTDD = false;
1864
1865 txDmaAddr = link & 0x3fffffff;
1866 txDmaData = is64bit ? (void *)&txDesc64 : (void *)&txDesc32;
1867 txDmaLen = is64bit ? sizeof(txDesc64) : sizeof(txDesc32);
1868 txDmaFree = dmaDescFree;
1869
1870 if (doTxDmaRead())
1871 goto exit;
1872 }
1873 break;
1874
1875 default:
1876 panic("invalid state");
1877 }
1878
1879 DPRINTF(EthernetSM, "entering next txState=%s\n",
1880 NsTxStateStrings[txState]);
1881 goto next;
1882
1883 exit:
1884 /**
1885 * @todo do we want to schedule a future kick?
1886 */
1887 DPRINTF(EthernetSM, "tx state machine exited txState=%s\n",
1888 NsTxStateStrings[txState]);
1889
1890 if (clock && !txKickEvent.scheduled())
1891 schedule(txKickEvent, txKickTick);
1892 }
1893
1894 /**
1895 * Advance the EEPROM state machine
1896 * Called on rising edge of EEPROM clock bit in MEAR
1897 */
1898 void
1899 NSGigE::eepromKick()
1900 {
1901 switch (eepromState) {
1902
1903 case eepromStart:
1904
1905 // Wait for start bit
1906 if (regs.mear & MEAR_EEDI) {
1907 // Set up to get 2 opcode bits
1908 eepromState = eepromGetOpcode;
1909 eepromBitsToRx = 2;
1910 eepromOpcode = 0;
1911 }
1912 break;
1913
1914 case eepromGetOpcode:
1915 eepromOpcode <<= 1;
1916 eepromOpcode += (regs.mear & MEAR_EEDI) ? 1 : 0;
1917 --eepromBitsToRx;
1918
1919 // Done getting opcode
1920 if (eepromBitsToRx == 0) {
1921 if (eepromOpcode != EEPROM_READ)
1922 panic("only EEPROM reads are implemented!");
1923
1924 // Set up to get address
1925 eepromState = eepromGetAddress;
1926 eepromBitsToRx = 6;
1927 eepromAddress = 0;
1928 }
1929 break;
1930
1931 case eepromGetAddress:
1932 eepromAddress <<= 1;
1933 eepromAddress += (regs.mear & MEAR_EEDI) ? 1 : 0;
1934 --eepromBitsToRx;
1935
1936 // Done getting address
1937 if (eepromBitsToRx == 0) {
1938
1939 if (eepromAddress >= EEPROM_SIZE)
1940 panic("EEPROM read access out of range!");
1941
1942 switch (eepromAddress) {
1943
1944 case EEPROM_PMATCH2_ADDR:
1945 eepromData = rom.perfectMatch[5];
1946 eepromData <<= 8;
1947 eepromData += rom.perfectMatch[4];
1948 break;
1949
1950 case EEPROM_PMATCH1_ADDR:
1951 eepromData = rom.perfectMatch[3];
1952 eepromData <<= 8;
1953 eepromData += rom.perfectMatch[2];
1954 break;
1955
1956 case EEPROM_PMATCH0_ADDR:
1957 eepromData = rom.perfectMatch[1];
1958 eepromData <<= 8;
1959 eepromData += rom.perfectMatch[0];
1960 break;
1961
1962 default:
1963 panic("FreeBSD driver only uses EEPROM to read PMATCH!");
1964 }
1965 // Set up to read data
1966 eepromState = eepromRead;
1967 eepromBitsToRx = 16;
1968
1969 // Clear data in bit
1970 regs.mear &= ~MEAR_EEDI;
1971 }
1972 break;
1973
1974 case eepromRead:
1975 // Clear Data Out bit
1976 regs.mear &= ~MEAR_EEDO;
1977 // Set bit to value of current EEPROM bit
1978 regs.mear |= (eepromData & 0x8000) ? MEAR_EEDO : 0x0;
1979
1980 eepromData <<= 1;
1981 --eepromBitsToRx;
1982
1983 // All done
1984 if (eepromBitsToRx == 0) {
1985 eepromState = eepromStart;
1986 }
1987 break;
1988
1989 default:
1990 panic("invalid EEPROM state");
1991 }
1992
1993 }
1994
1995 void
1996 NSGigE::transferDone()
1997 {
1998 if (txFifo.empty()) {
1999 DPRINTF(Ethernet, "transfer complete: txFifo empty...nothing to do\n");
2000 return;
2001 }
2002
2003 DPRINTF(Ethernet, "transfer complete: data in txFifo...schedule xmit\n");
2004
2005 reschedule(txEvent, curTick() + ticks(1), true);
2006 }
2007
2008 bool
2009 NSGigE::rxFilter(const EthPacketPtr &packet)
2010 {
2011 EthPtr eth = packet;
2012 bool drop = true;
2013 string type;
2014
2015 const EthAddr &dst = eth->dst();
2016 if (dst.unicast()) {
2017 // If we're accepting all unicast addresses
2018 if (acceptUnicast)
2019 drop = false;
2020
2021 // If we make a perfect match
2022 if (acceptPerfect && dst == rom.perfectMatch)
2023 drop = false;
2024
2025 if (acceptArp && eth->type() == ETH_TYPE_ARP)
2026 drop = false;
2027
2028 } else if (dst.broadcast()) {
2029 // if we're accepting broadcasts
2030 if (acceptBroadcast)
2031 drop = false;
2032
2033 } else if (dst.multicast()) {
2034 // if we're accepting all multicasts
2035 if (acceptMulticast)
2036 drop = false;
2037
2038 // Multicast hashing faked - all packets accepted
2039 if (multicastHashEnable)
2040 drop = false;
2041 }
2042
2043 if (drop) {
2044 DPRINTF(Ethernet, "rxFilter drop\n");
2045 DDUMP(EthernetData, packet->data, packet->length);
2046 }
2047
2048 return drop;
2049 }
2050
2051 bool
2052 NSGigE::recvPacket(EthPacketPtr packet)
2053 {
2054 rxBytes += packet->length;
2055 rxPackets++;
2056
2057 DPRINTF(Ethernet, "Receiving packet from wire, rxFifoAvail=%d\n",
2058 rxFifo.avail());
2059
2060 if (!rxEnable) {
2061 DPRINTF(Ethernet, "receive disabled...packet dropped\n");
2062 return true;
2063 }
2064
2065 if (!rxFilterEnable) {
2066 DPRINTF(Ethernet,
2067 "receive packet filtering disabled . . . packet dropped\n");
2068 return true;
2069 }
2070
2071 if (rxFilter(packet)) {
2072 DPRINTF(Ethernet, "packet filtered...dropped\n");
2073 return true;
2074 }
2075
2076 if (rxFifo.avail() < packet->length) {
2077 #if TRACING_ON
2078 IpPtr ip(packet);
2079 TcpPtr tcp(ip);
2080 if (ip) {
2081 DPRINTF(Ethernet,
2082 "packet won't fit in receive buffer...pkt ID %d dropped\n",
2083 ip->id());
2084 if (tcp) {
2085 DPRINTF(Ethernet, "Seq=%d\n", tcp->seq());
2086 }
2087 }
2088 #endif
2089 droppedPackets++;
2090 devIntrPost(ISR_RXORN);
2091 return false;
2092 }
2093
2094 rxFifo.push(packet);
2095
2096 rxKick();
2097 return true;
2098 }
2099
2100
2101 void
2102 NSGigE::resume()
2103 {
2104 SimObject::resume();
2105
2106 // During drain we could have left the state machines in a waiting state and
2107 // they wouldn't get out until some other event occured to kick them.
2108 // This way they'll get out immediately
2109 txKick();
2110 rxKick();
2111 }
2112
2113
2114 //=====================================================================
2115 //
2116 //
2117 void
2118 NSGigE::serialize(ostream &os)
2119 {
2120 // Serialize the PciDev base class
2121 PciDev::serialize(os);
2122
2123 /*
2124 * Finalize any DMA events now.
2125 */
2126 // @todo will mem system save pending dma?
2127
2128 /*
2129 * Serialize the device registers
2130 */
2131 SERIALIZE_SCALAR(regs.command);
2132 SERIALIZE_SCALAR(regs.config);
2133 SERIALIZE_SCALAR(regs.mear);
2134 SERIALIZE_SCALAR(regs.ptscr);
2135 SERIALIZE_SCALAR(regs.isr);
2136 SERIALIZE_SCALAR(regs.imr);
2137 SERIALIZE_SCALAR(regs.ier);
2138 SERIALIZE_SCALAR(regs.ihr);
2139 SERIALIZE_SCALAR(regs.txdp);
2140 SERIALIZE_SCALAR(regs.txdp_hi);
2141 SERIALIZE_SCALAR(regs.txcfg);
2142 SERIALIZE_SCALAR(regs.gpior);
2143 SERIALIZE_SCALAR(regs.rxdp);
2144 SERIALIZE_SCALAR(regs.rxdp_hi);
2145 SERIALIZE_SCALAR(regs.rxcfg);
2146 SERIALIZE_SCALAR(regs.pqcr);
2147 SERIALIZE_SCALAR(regs.wcsr);
2148 SERIALIZE_SCALAR(regs.pcr);
2149 SERIALIZE_SCALAR(regs.rfcr);
2150 SERIALIZE_SCALAR(regs.rfdr);
2151 SERIALIZE_SCALAR(regs.brar);
2152 SERIALIZE_SCALAR(regs.brdr);
2153 SERIALIZE_SCALAR(regs.srr);
2154 SERIALIZE_SCALAR(regs.mibc);
2155 SERIALIZE_SCALAR(regs.vrcr);
2156 SERIALIZE_SCALAR(regs.vtcr);
2157 SERIALIZE_SCALAR(regs.vdr);
2158 SERIALIZE_SCALAR(regs.ccsr);
2159 SERIALIZE_SCALAR(regs.tbicr);
2160 SERIALIZE_SCALAR(regs.tbisr);
2161 SERIALIZE_SCALAR(regs.tanar);
2162 SERIALIZE_SCALAR(regs.tanlpar);
2163 SERIALIZE_SCALAR(regs.taner);
2164 SERIALIZE_SCALAR(regs.tesr);
2165
2166 SERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN);
2167 SERIALIZE_ARRAY(rom.filterHash, FHASH_SIZE);
2168
2169 SERIALIZE_SCALAR(ioEnable);
2170
2171 /*
2172 * Serialize the data Fifos
2173 */
2174 rxFifo.serialize("rxFifo", os);
2175 txFifo.serialize("txFifo", os);
2176
2177 /*
2178 * Serialize the various helper variables
2179 */
2180 bool txPacketExists = txPacket;
2181 SERIALIZE_SCALAR(txPacketExists);
2182 if (txPacketExists) {
2183 txPacket->length = txPacketBufPtr - txPacket->data;
2184 txPacket->serialize("txPacket", os);
2185 uint32_t txPktBufPtr = (uint32_t) (txPacketBufPtr - txPacket->data);
2186 SERIALIZE_SCALAR(txPktBufPtr);
2187 }
2188
2189 bool rxPacketExists = rxPacket;
2190 SERIALIZE_SCALAR(rxPacketExists);
2191 if (rxPacketExists) {
2192 rxPacket->serialize("rxPacket", os);
2193 uint32_t rxPktBufPtr = (uint32_t) (rxPacketBufPtr - rxPacket->data);
2194 SERIALIZE_SCALAR(rxPktBufPtr);
2195 }
2196
2197 SERIALIZE_SCALAR(txXferLen);
2198 SERIALIZE_SCALAR(rxXferLen);
2199
2200 /*
2201 * Serialize Cached Descriptors
2202 */
2203 SERIALIZE_SCALAR(rxDesc64.link);
2204 SERIALIZE_SCALAR(rxDesc64.bufptr);
2205 SERIALIZE_SCALAR(rxDesc64.cmdsts);
2206 SERIALIZE_SCALAR(rxDesc64.extsts);
2207 SERIALIZE_SCALAR(txDesc64.link);
2208 SERIALIZE_SCALAR(txDesc64.bufptr);
2209 SERIALIZE_SCALAR(txDesc64.cmdsts);
2210 SERIALIZE_SCALAR(txDesc64.extsts);
2211 SERIALIZE_SCALAR(rxDesc32.link);
2212 SERIALIZE_SCALAR(rxDesc32.bufptr);
2213 SERIALIZE_SCALAR(rxDesc32.cmdsts);
2214 SERIALIZE_SCALAR(rxDesc32.extsts);
2215 SERIALIZE_SCALAR(txDesc32.link);
2216 SERIALIZE_SCALAR(txDesc32.bufptr);
2217 SERIALIZE_SCALAR(txDesc32.cmdsts);
2218 SERIALIZE_SCALAR(txDesc32.extsts);
2219 SERIALIZE_SCALAR(extstsEnable);
2220
2221 /*
2222 * Serialize tx state machine
2223 */
2224 int txState = this->txState;
2225 SERIALIZE_SCALAR(txState);
2226 SERIALIZE_SCALAR(txEnable);
2227 SERIALIZE_SCALAR(CTDD);
2228 SERIALIZE_SCALAR(txFragPtr);
2229 SERIALIZE_SCALAR(txDescCnt);
2230 int txDmaState = this->txDmaState;
2231 SERIALIZE_SCALAR(txDmaState);
2232 SERIALIZE_SCALAR(txKickTick);
2233
2234 /*
2235 * Serialize rx state machine
2236 */
2237 int rxState = this->rxState;
2238 SERIALIZE_SCALAR(rxState);
2239 SERIALIZE_SCALAR(rxEnable);
2240 SERIALIZE_SCALAR(CRDD);
2241 SERIALIZE_SCALAR(rxPktBytes);
2242 SERIALIZE_SCALAR(rxFragPtr);
2243 SERIALIZE_SCALAR(rxDescCnt);
2244 int rxDmaState = this->rxDmaState;
2245 SERIALIZE_SCALAR(rxDmaState);
2246 SERIALIZE_SCALAR(rxKickTick);
2247
2248 /*
2249 * Serialize EEPROM state machine
2250 */
2251 int eepromState = this->eepromState;
2252 SERIALIZE_SCALAR(eepromState);
2253 SERIALIZE_SCALAR(eepromClk);
2254 SERIALIZE_SCALAR(eepromBitsToRx);
2255 SERIALIZE_SCALAR(eepromOpcode);
2256 SERIALIZE_SCALAR(eepromAddress);
2257 SERIALIZE_SCALAR(eepromData);
2258
2259 /*
2260 * If there's a pending transmit, store the time so we can
2261 * reschedule it later
2262 */
2263 Tick transmitTick = txEvent.scheduled() ? txEvent.when() - curTick() : 0;
2264 SERIALIZE_SCALAR(transmitTick);
2265
2266 /*
2267 * receive address filter settings
2268 */
2269 SERIALIZE_SCALAR(rxFilterEnable);
2270 SERIALIZE_SCALAR(acceptBroadcast);
2271 SERIALIZE_SCALAR(acceptMulticast);
2272 SERIALIZE_SCALAR(acceptUnicast);
2273 SERIALIZE_SCALAR(acceptPerfect);
2274 SERIALIZE_SCALAR(acceptArp);
2275 SERIALIZE_SCALAR(multicastHashEnable);
2276
2277 /*
2278 * Keep track of pending interrupt status.
2279 */
2280 SERIALIZE_SCALAR(intrTick);
2281 SERIALIZE_SCALAR(cpuPendingIntr);
2282 Tick intrEventTick = 0;
2283 if (intrEvent)
2284 intrEventTick = intrEvent->when();
2285 SERIALIZE_SCALAR(intrEventTick);
2286
2287 }
2288
2289 void
2290 NSGigE::unserialize(Checkpoint *cp, const std::string &section)
2291 {
2292 // Unserialize the PciDev base class
2293 PciDev::unserialize(cp, section);
2294
2295 UNSERIALIZE_SCALAR(regs.command);
2296 UNSERIALIZE_SCALAR(regs.config);
2297 UNSERIALIZE_SCALAR(regs.mear);
2298 UNSERIALIZE_SCALAR(regs.ptscr);
2299 UNSERIALIZE_SCALAR(regs.isr);
2300 UNSERIALIZE_SCALAR(regs.imr);
2301 UNSERIALIZE_SCALAR(regs.ier);
2302 UNSERIALIZE_SCALAR(regs.ihr);
2303 UNSERIALIZE_SCALAR(regs.txdp);
2304 UNSERIALIZE_SCALAR(regs.txdp_hi);
2305 UNSERIALIZE_SCALAR(regs.txcfg);
2306 UNSERIALIZE_SCALAR(regs.gpior);
2307 UNSERIALIZE_SCALAR(regs.rxdp);
2308 UNSERIALIZE_SCALAR(regs.rxdp_hi);
2309 UNSERIALIZE_SCALAR(regs.rxcfg);
2310 UNSERIALIZE_SCALAR(regs.pqcr);
2311 UNSERIALIZE_SCALAR(regs.wcsr);
2312 UNSERIALIZE_SCALAR(regs.pcr);
2313 UNSERIALIZE_SCALAR(regs.rfcr);
2314 UNSERIALIZE_SCALAR(regs.rfdr);
2315 UNSERIALIZE_SCALAR(regs.brar);
2316 UNSERIALIZE_SCALAR(regs.brdr);
2317 UNSERIALIZE_SCALAR(regs.srr);
2318 UNSERIALIZE_SCALAR(regs.mibc);
2319 UNSERIALIZE_SCALAR(regs.vrcr);
2320 UNSERIALIZE_SCALAR(regs.vtcr);
2321 UNSERIALIZE_SCALAR(regs.vdr);
2322 UNSERIALIZE_SCALAR(regs.ccsr);
2323 UNSERIALIZE_SCALAR(regs.tbicr);
2324 UNSERIALIZE_SCALAR(regs.tbisr);
2325 UNSERIALIZE_SCALAR(regs.tanar);
2326 UNSERIALIZE_SCALAR(regs.tanlpar);
2327 UNSERIALIZE_SCALAR(regs.taner);
2328 UNSERIALIZE_SCALAR(regs.tesr);
2329
2330 UNSERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN);
2331 UNSERIALIZE_ARRAY(rom.filterHash, FHASH_SIZE);
2332
2333 UNSERIALIZE_SCALAR(ioEnable);
2334
2335 /*
2336 * unserialize the data fifos
2337 */
2338 rxFifo.unserialize("rxFifo", cp, section);
2339 txFifo.unserialize("txFifo", cp, section);
2340
2341 /*
2342 * unserialize the various helper variables
2343 */
2344 bool txPacketExists;
2345 UNSERIALIZE_SCALAR(txPacketExists);
2346 if (txPacketExists) {
2347 txPacket = new EthPacketData(16384);
2348 txPacket->unserialize("txPacket", cp, section);
2349 uint32_t txPktBufPtr;
2350 UNSERIALIZE_SCALAR(txPktBufPtr);
2351 txPacketBufPtr = (uint8_t *) txPacket->data + txPktBufPtr;
2352 } else
2353 txPacket = 0;
2354
2355 bool rxPacketExists;
2356 UNSERIALIZE_SCALAR(rxPacketExists);
2357 rxPacket = 0;
2358 if (rxPacketExists) {
2359 rxPacket = new EthPacketData(16384);
2360 rxPacket->unserialize("rxPacket", cp, section);
2361 uint32_t rxPktBufPtr;
2362 UNSERIALIZE_SCALAR(rxPktBufPtr);
2363 rxPacketBufPtr = (uint8_t *) rxPacket->data + rxPktBufPtr;
2364 } else
2365 rxPacket = 0;
2366
2367 UNSERIALIZE_SCALAR(txXferLen);
2368 UNSERIALIZE_SCALAR(rxXferLen);
2369
2370 /*
2371 * Unserialize Cached Descriptors
2372 */
2373 UNSERIALIZE_SCALAR(rxDesc64.link);
2374 UNSERIALIZE_SCALAR(rxDesc64.bufptr);
2375 UNSERIALIZE_SCALAR(rxDesc64.cmdsts);
2376 UNSERIALIZE_SCALAR(rxDesc64.extsts);
2377 UNSERIALIZE_SCALAR(txDesc64.link);
2378 UNSERIALIZE_SCALAR(txDesc64.bufptr);
2379 UNSERIALIZE_SCALAR(txDesc64.cmdsts);
2380 UNSERIALIZE_SCALAR(txDesc64.extsts);
2381 UNSERIALIZE_SCALAR(rxDesc32.link);
2382 UNSERIALIZE_SCALAR(rxDesc32.bufptr);
2383 UNSERIALIZE_SCALAR(rxDesc32.cmdsts);
2384 UNSERIALIZE_SCALAR(rxDesc32.extsts);
2385 UNSERIALIZE_SCALAR(txDesc32.link);
2386 UNSERIALIZE_SCALAR(txDesc32.bufptr);
2387 UNSERIALIZE_SCALAR(txDesc32.cmdsts);
2388 UNSERIALIZE_SCALAR(txDesc32.extsts);
2389 UNSERIALIZE_SCALAR(extstsEnable);
2390
2391 /*
2392 * unserialize tx state machine
2393 */
2394 int txState;
2395 UNSERIALIZE_SCALAR(txState);
2396 this->txState = (TxState) txState;
2397 UNSERIALIZE_SCALAR(txEnable);
2398 UNSERIALIZE_SCALAR(CTDD);
2399 UNSERIALIZE_SCALAR(txFragPtr);
2400 UNSERIALIZE_SCALAR(txDescCnt);
2401 int txDmaState;
2402 UNSERIALIZE_SCALAR(txDmaState);
2403 this->txDmaState = (DmaState) txDmaState;
2404 UNSERIALIZE_SCALAR(txKickTick);
2405 if (txKickTick)
2406 schedule(txKickEvent, txKickTick);
2407
2408 /*
2409 * unserialize rx state machine
2410 */
2411 int rxState;
2412 UNSERIALIZE_SCALAR(rxState);
2413 this->rxState = (RxState) rxState;
2414 UNSERIALIZE_SCALAR(rxEnable);
2415 UNSERIALIZE_SCALAR(CRDD);
2416 UNSERIALIZE_SCALAR(rxPktBytes);
2417 UNSERIALIZE_SCALAR(rxFragPtr);
2418 UNSERIALIZE_SCALAR(rxDescCnt);
2419 int rxDmaState;
2420 UNSERIALIZE_SCALAR(rxDmaState);
2421 this->rxDmaState = (DmaState) rxDmaState;
2422 UNSERIALIZE_SCALAR(rxKickTick);
2423 if (rxKickTick)
2424 schedule(rxKickEvent, rxKickTick);
2425
2426 /*
2427 * Unserialize EEPROM state machine
2428 */
2429 int eepromState;
2430 UNSERIALIZE_SCALAR(eepromState);
2431 this->eepromState = (EEPROMState) eepromState;
2432 UNSERIALIZE_SCALAR(eepromClk);
2433 UNSERIALIZE_SCALAR(eepromBitsToRx);
2434 UNSERIALIZE_SCALAR(eepromOpcode);
2435 UNSERIALIZE_SCALAR(eepromAddress);
2436 UNSERIALIZE_SCALAR(eepromData);
2437
2438 /*
2439 * If there's a pending transmit, reschedule it now
2440 */
2441 Tick transmitTick;
2442 UNSERIALIZE_SCALAR(transmitTick);
2443 if (transmitTick)
2444 schedule(txEvent, curTick() + transmitTick);
2445
2446 /*
2447 * unserialize receive address filter settings
2448 */
2449 UNSERIALIZE_SCALAR(rxFilterEnable);
2450 UNSERIALIZE_SCALAR(acceptBroadcast);
2451 UNSERIALIZE_SCALAR(acceptMulticast);
2452 UNSERIALIZE_SCALAR(acceptUnicast);
2453 UNSERIALIZE_SCALAR(acceptPerfect);
2454 UNSERIALIZE_SCALAR(acceptArp);
2455 UNSERIALIZE_SCALAR(multicastHashEnable);
2456
2457 /*
2458 * Keep track of pending interrupt status.
2459 */
2460 UNSERIALIZE_SCALAR(intrTick);
2461 UNSERIALIZE_SCALAR(cpuPendingIntr);
2462 Tick intrEventTick;
2463 UNSERIALIZE_SCALAR(intrEventTick);
2464 if (intrEventTick) {
2465 intrEvent = new IntrEvent(this, true);
2466 schedule(intrEvent, intrEventTick);
2467 }
2468 }
2469
2470 NSGigE *
2471 NSGigEParams::create()
2472 {
2473 return new NSGigE(this);
2474 }