x86: changes to apic, keyboard
[gem5.git] / src / dev / ns_gige.cc
1 /*
2 * Copyright (c) 2004-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Nathan Binkert
29 * Lisa Hsu
30 */
31
32 /** @file
33 * Device module for modelling the National Semiconductor
34 * DP83820 ethernet controller. Does not support priority queueing
35 */
36 #include <deque>
37 #include <string>
38
39 #include "base/debug.hh"
40 #include "base/inet.hh"
41 #include "base/types.hh"
42 #include "config/the_isa.hh"
43 #include "cpu/thread_context.hh"
44 #include "debug/EthernetAll.hh"
45 #include "dev/etherlink.hh"
46 #include "dev/ns_gige.hh"
47 #include "dev/pciconfigall.hh"
48 #include "mem/packet.hh"
49 #include "mem/packet_access.hh"
50 #include "params/NSGigE.hh"
51 #include "sim/system.hh"
52
53 // clang complains about std::set being overloaded with Packet::set if
54 // we open up the entire namespace std
55 using std::min;
56 using std::ostream;
57 using std::string;
58
59 const char *NsRxStateStrings[] =
60 {
61 "rxIdle",
62 "rxDescRefr",
63 "rxDescRead",
64 "rxFifoBlock",
65 "rxFragWrite",
66 "rxDescWrite",
67 "rxAdvance"
68 };
69
70 const char *NsTxStateStrings[] =
71 {
72 "txIdle",
73 "txDescRefr",
74 "txDescRead",
75 "txFifoBlock",
76 "txFragRead",
77 "txDescWrite",
78 "txAdvance"
79 };
80
81 const char *NsDmaState[] =
82 {
83 "dmaIdle",
84 "dmaReading",
85 "dmaWriting",
86 "dmaReadWaiting",
87 "dmaWriteWaiting"
88 };
89
90 using namespace Net;
91 using namespace TheISA;
92
93 ///////////////////////////////////////////////////////////////////////
94 //
95 // NSGigE PCI Device
96 //
97 NSGigE::NSGigE(Params *p)
98 : EtherDevBase(p), ioEnable(false),
99 txFifo(p->tx_fifo_size), rxFifo(p->rx_fifo_size),
100 txPacket(0), rxPacket(0), txPacketBufPtr(NULL), rxPacketBufPtr(NULL),
101 txXferLen(0), rxXferLen(0), rxDmaFree(false), txDmaFree(false),
102 txState(txIdle), txEnable(false), CTDD(false), txHalt(false),
103 txFragPtr(0), txDescCnt(0), txDmaState(dmaIdle), rxState(rxIdle),
104 rxEnable(false), CRDD(false), rxPktBytes(0), rxHalt(false),
105 rxFragPtr(0), rxDescCnt(0), rxDmaState(dmaIdle), extstsEnable(false),
106 eepromState(eepromStart), eepromClk(false), eepromBitsToRx(0),
107 eepromOpcode(0), eepromAddress(0), eepromData(0),
108 dmaReadDelay(p->dma_read_delay), dmaWriteDelay(p->dma_write_delay),
109 dmaReadFactor(p->dma_read_factor), dmaWriteFactor(p->dma_write_factor),
110 rxDmaData(NULL), rxDmaAddr(0), rxDmaLen(0),
111 txDmaData(NULL), txDmaAddr(0), txDmaLen(0),
112 rxDmaReadEvent(this), rxDmaWriteEvent(this),
113 txDmaReadEvent(this), txDmaWriteEvent(this),
114 dmaDescFree(p->dma_desc_free), dmaDataFree(p->dma_data_free),
115 txDelay(p->tx_delay), rxDelay(p->rx_delay),
116 rxKickTick(0), rxKickEvent(this), txKickTick(0), txKickEvent(this),
117 txEvent(this), rxFilterEnable(p->rx_filter),
118 acceptBroadcast(false), acceptMulticast(false), acceptUnicast(false),
119 acceptPerfect(false), acceptArp(false), multicastHashEnable(false),
120 intrDelay(p->intr_delay), intrTick(0), cpuPendingIntr(false),
121 intrEvent(0), interface(0)
122 {
123
124
125 interface = new NSGigEInt(name() + ".int0", this);
126
127 regsReset();
128 memcpy(&rom.perfectMatch, p->hardware_address.bytes(), ETH_ADDR_LEN);
129
130 memset(&rxDesc32, 0, sizeof(rxDesc32));
131 memset(&txDesc32, 0, sizeof(txDesc32));
132 memset(&rxDesc64, 0, sizeof(rxDesc64));
133 memset(&txDesc64, 0, sizeof(txDesc64));
134 }
135
136 NSGigE::~NSGigE()
137 {
138 delete interface;
139 }
140
141 /**
142 * This is to write to the PCI general configuration registers
143 */
144 Tick
145 NSGigE::writeConfig(PacketPtr pkt)
146 {
147 int offset = pkt->getAddr() & PCI_CONFIG_SIZE;
148 if (offset < PCI_DEVICE_SPECIFIC)
149 PciDev::writeConfig(pkt);
150 else
151 panic("Device specific PCI config space not implemented!\n");
152
153 switch (offset) {
154 // seems to work fine without all these PCI settings, but i
155 // put in the IO to double check, an assertion will fail if we
156 // need to properly implement it
157 case PCI_COMMAND:
158 if (config.data[offset] & PCI_CMD_IOSE)
159 ioEnable = true;
160 else
161 ioEnable = false;
162 break;
163 }
164
165 return configDelay;
166 }
167
168 EtherInt*
169 NSGigE::getEthPort(const std::string &if_name, int idx)
170 {
171 if (if_name == "interface") {
172 if (interface->getPeer())
173 panic("interface already connected to\n");
174 return interface;
175 }
176 return NULL;
177 }
178
179 /**
180 * This reads the device registers, which are detailed in the NS83820
181 * spec sheet
182 */
183 Tick
184 NSGigE::read(PacketPtr pkt)
185 {
186 assert(ioEnable);
187
188 pkt->allocate();
189
190 //The mask is to give you only the offset into the device register file
191 Addr daddr = pkt->getAddr() & 0xfff;
192 DPRINTF(EthernetPIO, "read da=%#x pa=%#x size=%d\n",
193 daddr, pkt->getAddr(), pkt->getSize());
194
195
196 // there are some reserved registers, you can see ns_gige_reg.h and
197 // the spec sheet for details
198 if (daddr > LAST && daddr <= RESERVED) {
199 panic("Accessing reserved register");
200 } else if (daddr > RESERVED && daddr <= 0x3FC) {
201 return readConfig(pkt);
202 } else if (daddr >= MIB_START && daddr <= MIB_END) {
203 // don't implement all the MIB's. hopefully the kernel
204 // doesn't actually DEPEND upon their values
205 // MIB are just hardware stats keepers
206 pkt->set<uint32_t>(0);
207 pkt->makeAtomicResponse();
208 return pioDelay;
209 } else if (daddr > 0x3FC)
210 panic("Something is messed up!\n");
211
212 assert(pkt->getSize() == sizeof(uint32_t));
213 uint32_t &reg = *pkt->getPtr<uint32_t>();
214 uint16_t rfaddr;
215
216 switch (daddr) {
217 case CR:
218 reg = regs.command;
219 //these are supposed to be cleared on a read
220 reg &= ~(CR_RXD | CR_TXD | CR_TXR | CR_RXR);
221 break;
222
223 case CFGR:
224 reg = regs.config;
225 break;
226
227 case MEAR:
228 reg = regs.mear;
229 break;
230
231 case PTSCR:
232 reg = regs.ptscr;
233 break;
234
235 case ISR:
236 reg = regs.isr;
237 devIntrClear(ISR_ALL);
238 break;
239
240 case IMR:
241 reg = regs.imr;
242 break;
243
244 case IER:
245 reg = regs.ier;
246 break;
247
248 case IHR:
249 reg = regs.ihr;
250 break;
251
252 case TXDP:
253 reg = regs.txdp;
254 break;
255
256 case TXDP_HI:
257 reg = regs.txdp_hi;
258 break;
259
260 case TX_CFG:
261 reg = regs.txcfg;
262 break;
263
264 case GPIOR:
265 reg = regs.gpior;
266 break;
267
268 case RXDP:
269 reg = regs.rxdp;
270 break;
271
272 case RXDP_HI:
273 reg = regs.rxdp_hi;
274 break;
275
276 case RX_CFG:
277 reg = regs.rxcfg;
278 break;
279
280 case PQCR:
281 reg = regs.pqcr;
282 break;
283
284 case WCSR:
285 reg = regs.wcsr;
286 break;
287
288 case PCR:
289 reg = regs.pcr;
290 break;
291
292 // see the spec sheet for how RFCR and RFDR work
293 // basically, you write to RFCR to tell the machine
294 // what you want to do next, then you act upon RFDR,
295 // and the device will be prepared b/c of what you
296 // wrote to RFCR
297 case RFCR:
298 reg = regs.rfcr;
299 break;
300
301 case RFDR:
302 rfaddr = (uint16_t)(regs.rfcr & RFCR_RFADDR);
303 switch (rfaddr) {
304 // Read from perfect match ROM octets
305 case 0x000:
306 reg = rom.perfectMatch[1];
307 reg = reg << 8;
308 reg += rom.perfectMatch[0];
309 break;
310 case 0x002:
311 reg = rom.perfectMatch[3] << 8;
312 reg += rom.perfectMatch[2];
313 break;
314 case 0x004:
315 reg = rom.perfectMatch[5] << 8;
316 reg += rom.perfectMatch[4];
317 break;
318 default:
319 // Read filter hash table
320 if (rfaddr >= FHASH_ADDR &&
321 rfaddr < FHASH_ADDR + FHASH_SIZE) {
322
323 // Only word-aligned reads supported
324 if (rfaddr % 2)
325 panic("unaligned read from filter hash table!");
326
327 reg = rom.filterHash[rfaddr - FHASH_ADDR + 1] << 8;
328 reg += rom.filterHash[rfaddr - FHASH_ADDR];
329 break;
330 }
331
332 panic("reading RFDR for something other than pattern"
333 " matching or hashing! %#x\n", rfaddr);
334 }
335 break;
336
337 case SRR:
338 reg = regs.srr;
339 break;
340
341 case MIBC:
342 reg = regs.mibc;
343 reg &= ~(MIBC_MIBS | MIBC_ACLR);
344 break;
345
346 case VRCR:
347 reg = regs.vrcr;
348 break;
349
350 case VTCR:
351 reg = regs.vtcr;
352 break;
353
354 case VDR:
355 reg = regs.vdr;
356 break;
357
358 case CCSR:
359 reg = regs.ccsr;
360 break;
361
362 case TBICR:
363 reg = regs.tbicr;
364 break;
365
366 case TBISR:
367 reg = regs.tbisr;
368 break;
369
370 case TANAR:
371 reg = regs.tanar;
372 break;
373
374 case TANLPAR:
375 reg = regs.tanlpar;
376 break;
377
378 case TANER:
379 reg = regs.taner;
380 break;
381
382 case TESR:
383 reg = regs.tesr;
384 break;
385
386 case M5REG:
387 reg = 0;
388 if (params()->rx_thread)
389 reg |= M5REG_RX_THREAD;
390 if (params()->tx_thread)
391 reg |= M5REG_TX_THREAD;
392 if (params()->rss)
393 reg |= M5REG_RSS;
394 break;
395
396 default:
397 panic("reading unimplemented register: addr=%#x", daddr);
398 }
399
400 DPRINTF(EthernetPIO, "read from %#x: data=%d data=%#x\n",
401 daddr, reg, reg);
402
403 pkt->makeAtomicResponse();
404 return pioDelay;
405 }
406
407 Tick
408 NSGigE::write(PacketPtr pkt)
409 {
410 assert(ioEnable);
411
412 Addr daddr = pkt->getAddr() & 0xfff;
413 DPRINTF(EthernetPIO, "write da=%#x pa=%#x size=%d\n",
414 daddr, pkt->getAddr(), pkt->getSize());
415
416 if (daddr > LAST && daddr <= RESERVED) {
417 panic("Accessing reserved register");
418 } else if (daddr > RESERVED && daddr <= 0x3FC) {
419 return writeConfig(pkt);
420 } else if (daddr > 0x3FC)
421 panic("Something is messed up!\n");
422
423 if (pkt->getSize() == sizeof(uint32_t)) {
424 uint32_t reg = pkt->get<uint32_t>();
425 uint16_t rfaddr;
426
427 DPRINTF(EthernetPIO, "write data=%d data=%#x\n", reg, reg);
428
429 switch (daddr) {
430 case CR:
431 regs.command = reg;
432 if (reg & CR_TXD) {
433 txEnable = false;
434 } else if (reg & CR_TXE) {
435 txEnable = true;
436
437 // the kernel is enabling the transmit machine
438 if (txState == txIdle)
439 txKick();
440 }
441
442 if (reg & CR_RXD) {
443 rxEnable = false;
444 } else if (reg & CR_RXE) {
445 rxEnable = true;
446
447 if (rxState == rxIdle)
448 rxKick();
449 }
450
451 if (reg & CR_TXR)
452 txReset();
453
454 if (reg & CR_RXR)
455 rxReset();
456
457 if (reg & CR_SWI)
458 devIntrPost(ISR_SWI);
459
460 if (reg & CR_RST) {
461 txReset();
462 rxReset();
463
464 regsReset();
465 }
466 break;
467
468 case CFGR:
469 if (reg & CFGR_LNKSTS ||
470 reg & CFGR_SPDSTS ||
471 reg & CFGR_DUPSTS ||
472 reg & CFGR_RESERVED ||
473 reg & CFGR_T64ADDR ||
474 reg & CFGR_PCI64_DET) {
475 // First clear all writable bits
476 regs.config &= CFGR_LNKSTS | CFGR_SPDSTS | CFGR_DUPSTS |
477 CFGR_RESERVED | CFGR_T64ADDR |
478 CFGR_PCI64_DET;
479 // Now set the appropriate writable bits
480 regs.config |= reg & ~(CFGR_LNKSTS | CFGR_SPDSTS | CFGR_DUPSTS |
481 CFGR_RESERVED | CFGR_T64ADDR |
482 CFGR_PCI64_DET);
483 }
484
485 // all these #if 0's are because i don't THINK the kernel needs to
486 // have these implemented. if there is a problem relating to one of
487 // these, you may need to add functionality in.
488
489 // grouped together and #if 0'ed to avoid empty if body and make clang happy
490 #if 0
491 if (reg & CFGR_TBI_EN) ;
492 if (reg & CFGR_MODE_1000) ;
493
494 if (reg & CFGR_PINT_DUPSTS ||
495 reg & CFGR_PINT_LNKSTS ||
496 reg & CFGR_PINT_SPDSTS)
497 ;
498
499 if (reg & CFGR_TMRTEST) ;
500 if (reg & CFGR_MRM_DIS) ;
501 if (reg & CFGR_MWI_DIS) ;
502
503 if (reg & CFGR_DATA64_EN) ;
504 if (reg & CFGR_M64ADDR) ;
505 if (reg & CFGR_PHY_RST) ;
506 if (reg & CFGR_PHY_DIS) ;
507
508 if (reg & CFGR_REQALG) ;
509 if (reg & CFGR_SB) ;
510 if (reg & CFGR_POW) ;
511 if (reg & CFGR_EXD) ;
512 if (reg & CFGR_PESEL) ;
513 if (reg & CFGR_BROM_DIS) ;
514 if (reg & CFGR_EXT_125) ;
515 if (reg & CFGR_BEM) ;
516
517 if (reg & CFGR_T64ADDR) ;
518 // panic("CFGR_T64ADDR is read only register!\n");
519 #endif
520 if (reg & CFGR_AUTO_1000)
521 panic("CFGR_AUTO_1000 not implemented!\n");
522
523 if (reg & CFGR_PCI64_DET)
524 panic("CFGR_PCI64_DET is read only register!\n");
525
526 if (reg & CFGR_EXTSTS_EN)
527 extstsEnable = true;
528 else
529 extstsEnable = false;
530 break;
531
532 case MEAR:
533 // Clear writable bits
534 regs.mear &= MEAR_EEDO;
535 // Set appropriate writable bits
536 regs.mear |= reg & ~MEAR_EEDO;
537
538 // FreeBSD uses the EEPROM to read PMATCH (for the MAC address)
539 // even though it could get it through RFDR
540 if (reg & MEAR_EESEL) {
541 // Rising edge of clock
542 if (reg & MEAR_EECLK && !eepromClk)
543 eepromKick();
544 }
545 else {
546 eepromState = eepromStart;
547 regs.mear &= ~MEAR_EEDI;
548 }
549
550 eepromClk = reg & MEAR_EECLK;
551
552 // since phy is completely faked, MEAR_MD* don't matter
553
554 // grouped together and #if 0'ed to avoid empty if body and make clang happy
555 #if 0
556 if (reg & MEAR_MDIO) ;
557 if (reg & MEAR_MDDIR) ;
558 if (reg & MEAR_MDC) ;
559 #endif
560 break;
561
562 case PTSCR:
563 regs.ptscr = reg & ~(PTSCR_RBIST_RDONLY);
564 // these control BISTs for various parts of chip - we
565 // don't care or do just fake that the BIST is done
566 if (reg & PTSCR_RBIST_EN)
567 regs.ptscr |= PTSCR_RBIST_DONE;
568 if (reg & PTSCR_EEBIST_EN)
569 regs.ptscr &= ~PTSCR_EEBIST_EN;
570 if (reg & PTSCR_EELOAD_EN)
571 regs.ptscr &= ~PTSCR_EELOAD_EN;
572 break;
573
574 case ISR: /* writing to the ISR has no effect */
575 panic("ISR is a read only register!\n");
576
577 case IMR:
578 regs.imr = reg;
579 devIntrChangeMask();
580 break;
581
582 case IER:
583 regs.ier = reg;
584 break;
585
586 case IHR:
587 regs.ihr = reg;
588 /* not going to implement real interrupt holdoff */
589 break;
590
591 case TXDP:
592 regs.txdp = (reg & 0xFFFFFFFC);
593 assert(txState == txIdle);
594 CTDD = false;
595 break;
596
597 case TXDP_HI:
598 regs.txdp_hi = reg;
599 break;
600
601 case TX_CFG:
602 regs.txcfg = reg;
603 #if 0
604 if (reg & TX_CFG_CSI) ;
605 if (reg & TX_CFG_HBI) ;
606 if (reg & TX_CFG_MLB) ;
607 if (reg & TX_CFG_ATP) ;
608 if (reg & TX_CFG_ECRETRY) {
609 /*
610 * this could easily be implemented, but considering
611 * the network is just a fake pipe, wouldn't make
612 * sense to do this
613 */
614 }
615
616 if (reg & TX_CFG_BRST_DIS) ;
617 #endif
618
619 #if 0
620 /* we handle our own DMA, ignore the kernel's exhortations */
621 if (reg & TX_CFG_MXDMA) ;
622 #endif
623
624 // also, we currently don't care about fill/drain
625 // thresholds though this may change in the future with
626 // more realistic networks or a driver which changes it
627 // according to feedback
628
629 break;
630
631 case GPIOR:
632 // Only write writable bits
633 regs.gpior &= GPIOR_UNUSED | GPIOR_GP5_IN | GPIOR_GP4_IN
634 | GPIOR_GP3_IN | GPIOR_GP2_IN | GPIOR_GP1_IN;
635 regs.gpior |= reg & ~(GPIOR_UNUSED | GPIOR_GP5_IN | GPIOR_GP4_IN
636 | GPIOR_GP3_IN | GPIOR_GP2_IN | GPIOR_GP1_IN);
637 /* these just control general purpose i/o pins, don't matter */
638 break;
639
640 case RXDP:
641 regs.rxdp = reg;
642 CRDD = false;
643 break;
644
645 case RXDP_HI:
646 regs.rxdp_hi = reg;
647 break;
648
649 case RX_CFG:
650 regs.rxcfg = reg;
651 #if 0
652 if (reg & RX_CFG_AEP) ;
653 if (reg & RX_CFG_ARP) ;
654 if (reg & RX_CFG_STRIPCRC) ;
655 if (reg & RX_CFG_RX_RD) ;
656 if (reg & RX_CFG_ALP) ;
657 if (reg & RX_CFG_AIRL) ;
658
659 /* we handle our own DMA, ignore what kernel says about it */
660 if (reg & RX_CFG_MXDMA) ;
661
662 //also, we currently don't care about fill/drain thresholds
663 //though this may change in the future with more realistic
664 //networks or a driver which changes it according to feedback
665 if (reg & (RX_CFG_DRTH | RX_CFG_DRTH0)) ;
666 #endif
667 break;
668
669 case PQCR:
670 /* there is no priority queueing used in the linux 2.6 driver */
671 regs.pqcr = reg;
672 break;
673
674 case WCSR:
675 /* not going to implement wake on LAN */
676 regs.wcsr = reg;
677 break;
678
679 case PCR:
680 /* not going to implement pause control */
681 regs.pcr = reg;
682 break;
683
684 case RFCR:
685 regs.rfcr = reg;
686
687 rxFilterEnable = (reg & RFCR_RFEN) ? true : false;
688 acceptBroadcast = (reg & RFCR_AAB) ? true : false;
689 acceptMulticast = (reg & RFCR_AAM) ? true : false;
690 acceptUnicast = (reg & RFCR_AAU) ? true : false;
691 acceptPerfect = (reg & RFCR_APM) ? true : false;
692 acceptArp = (reg & RFCR_AARP) ? true : false;
693 multicastHashEnable = (reg & RFCR_MHEN) ? true : false;
694
695 #if 0
696 if (reg & RFCR_APAT)
697 panic("RFCR_APAT not implemented!\n");
698 #endif
699 if (reg & RFCR_UHEN)
700 panic("Unicast hash filtering not used by drivers!\n");
701
702 if (reg & RFCR_ULM)
703 panic("RFCR_ULM not implemented!\n");
704
705 break;
706
707 case RFDR:
708 rfaddr = (uint16_t)(regs.rfcr & RFCR_RFADDR);
709 switch (rfaddr) {
710 case 0x000:
711 rom.perfectMatch[0] = (uint8_t)reg;
712 rom.perfectMatch[1] = (uint8_t)(reg >> 8);
713 break;
714 case 0x002:
715 rom.perfectMatch[2] = (uint8_t)reg;
716 rom.perfectMatch[3] = (uint8_t)(reg >> 8);
717 break;
718 case 0x004:
719 rom.perfectMatch[4] = (uint8_t)reg;
720 rom.perfectMatch[5] = (uint8_t)(reg >> 8);
721 break;
722 default:
723
724 if (rfaddr >= FHASH_ADDR &&
725 rfaddr < FHASH_ADDR + FHASH_SIZE) {
726
727 // Only word-aligned writes supported
728 if (rfaddr % 2)
729 panic("unaligned write to filter hash table!");
730
731 rom.filterHash[rfaddr - FHASH_ADDR] = (uint8_t)reg;
732 rom.filterHash[rfaddr - FHASH_ADDR + 1]
733 = (uint8_t)(reg >> 8);
734 break;
735 }
736 panic("writing RFDR for something other than pattern matching\
737 or hashing! %#x\n", rfaddr);
738 }
739
740 case BRAR:
741 regs.brar = reg;
742 break;
743
744 case BRDR:
745 panic("the driver never uses BRDR, something is wrong!\n");
746
747 case SRR:
748 panic("SRR is read only register!\n");
749
750 case MIBC:
751 panic("the driver never uses MIBC, something is wrong!\n");
752
753 case VRCR:
754 regs.vrcr = reg;
755 break;
756
757 case VTCR:
758 regs.vtcr = reg;
759 break;
760
761 case VDR:
762 panic("the driver never uses VDR, something is wrong!\n");
763
764 case CCSR:
765 /* not going to implement clockrun stuff */
766 regs.ccsr = reg;
767 break;
768
769 case TBICR:
770 regs.tbicr = reg;
771 if (reg & TBICR_MR_LOOPBACK)
772 panic("TBICR_MR_LOOPBACK never used, something wrong!\n");
773
774 if (reg & TBICR_MR_AN_ENABLE) {
775 regs.tanlpar = regs.tanar;
776 regs.tbisr |= (TBISR_MR_AN_COMPLETE | TBISR_MR_LINK_STATUS);
777 }
778
779 #if 0
780 if (reg & TBICR_MR_RESTART_AN) ;
781 #endif
782
783 break;
784
785 case TBISR:
786 panic("TBISR is read only register!\n");
787
788 case TANAR:
789 // Only write the writable bits
790 regs.tanar &= TANAR_RF1 | TANAR_RF2 | TANAR_UNUSED;
791 regs.tanar |= reg & ~(TANAR_RF1 | TANAR_RF2 | TANAR_UNUSED);
792
793 // Pause capability unimplemented
794 #if 0
795 if (reg & TANAR_PS2) ;
796 if (reg & TANAR_PS1) ;
797 #endif
798
799 break;
800
801 case TANLPAR:
802 panic("this should only be written to by the fake phy!\n");
803
804 case TANER:
805 panic("TANER is read only register!\n");
806
807 case TESR:
808 regs.tesr = reg;
809 break;
810
811 default:
812 panic("invalid register access daddr=%#x", daddr);
813 }
814 } else {
815 panic("Invalid Request Size");
816 }
817 pkt->makeAtomicResponse();
818 return pioDelay;
819 }
820
821 void
822 NSGigE::devIntrPost(uint32_t interrupts)
823 {
824 if (interrupts & ISR_RESERVE)
825 panic("Cannot set a reserved interrupt");
826
827 if (interrupts & ISR_NOIMPL)
828 warn("interrupt not implemented %#x\n", interrupts);
829
830 interrupts &= ISR_IMPL;
831 regs.isr |= interrupts;
832
833 if (interrupts & regs.imr) {
834 if (interrupts & ISR_SWI) {
835 totalSwi++;
836 }
837 if (interrupts & ISR_RXIDLE) {
838 totalRxIdle++;
839 }
840 if (interrupts & ISR_RXOK) {
841 totalRxOk++;
842 }
843 if (interrupts & ISR_RXDESC) {
844 totalRxDesc++;
845 }
846 if (interrupts & ISR_TXOK) {
847 totalTxOk++;
848 }
849 if (interrupts & ISR_TXIDLE) {
850 totalTxIdle++;
851 }
852 if (interrupts & ISR_TXDESC) {
853 totalTxDesc++;
854 }
855 if (interrupts & ISR_RXORN) {
856 totalRxOrn++;
857 }
858 }
859
860 DPRINTF(EthernetIntr,
861 "interrupt written to ISR: intr=%#x isr=%#x imr=%#x\n",
862 interrupts, regs.isr, regs.imr);
863
864 if ((regs.isr & regs.imr)) {
865 Tick when = curTick();
866 if ((regs.isr & regs.imr & ISR_NODELAY) == 0)
867 when += intrDelay;
868 postedInterrupts++;
869 cpuIntrPost(when);
870 }
871 }
872
873 /* writing this interrupt counting stats inside this means that this function
874 is now limited to being used to clear all interrupts upon the kernel
875 reading isr and servicing. just telling you in case you were thinking
876 of expanding use.
877 */
878 void
879 NSGigE::devIntrClear(uint32_t interrupts)
880 {
881 if (interrupts & ISR_RESERVE)
882 panic("Cannot clear a reserved interrupt");
883
884 if (regs.isr & regs.imr & ISR_SWI) {
885 postedSwi++;
886 }
887 if (regs.isr & regs.imr & ISR_RXIDLE) {
888 postedRxIdle++;
889 }
890 if (regs.isr & regs.imr & ISR_RXOK) {
891 postedRxOk++;
892 }
893 if (regs.isr & regs.imr & ISR_RXDESC) {
894 postedRxDesc++;
895 }
896 if (regs.isr & regs.imr & ISR_TXOK) {
897 postedTxOk++;
898 }
899 if (regs.isr & regs.imr & ISR_TXIDLE) {
900 postedTxIdle++;
901 }
902 if (regs.isr & regs.imr & ISR_TXDESC) {
903 postedTxDesc++;
904 }
905 if (regs.isr & regs.imr & ISR_RXORN) {
906 postedRxOrn++;
907 }
908
909 interrupts &= ~ISR_NOIMPL;
910 regs.isr &= ~interrupts;
911
912 DPRINTF(EthernetIntr,
913 "interrupt cleared from ISR: intr=%x isr=%x imr=%x\n",
914 interrupts, regs.isr, regs.imr);
915
916 if (!(regs.isr & regs.imr))
917 cpuIntrClear();
918 }
919
920 void
921 NSGigE::devIntrChangeMask()
922 {
923 DPRINTF(EthernetIntr, "interrupt mask changed: isr=%x imr=%x masked=%x\n",
924 regs.isr, regs.imr, regs.isr & regs.imr);
925
926 if (regs.isr & regs.imr)
927 cpuIntrPost(curTick());
928 else
929 cpuIntrClear();
930 }
931
932 void
933 NSGigE::cpuIntrPost(Tick when)
934 {
935 // If the interrupt you want to post is later than an interrupt
936 // already scheduled, just let it post in the coming one and don't
937 // schedule another.
938 // HOWEVER, must be sure that the scheduled intrTick is in the
939 // future (this was formerly the source of a bug)
940 /**
941 * @todo this warning should be removed and the intrTick code should
942 * be fixed.
943 */
944 assert(when >= curTick());
945 assert(intrTick >= curTick() || intrTick == 0);
946 if (when > intrTick && intrTick != 0) {
947 DPRINTF(EthernetIntr, "don't need to schedule event...intrTick=%d\n",
948 intrTick);
949 return;
950 }
951
952 intrTick = when;
953 if (intrTick < curTick()) {
954 Debug::breakpoint();
955 intrTick = curTick();
956 }
957
958 DPRINTF(EthernetIntr, "going to schedule an interrupt for intrTick=%d\n",
959 intrTick);
960
961 if (intrEvent)
962 intrEvent->squash();
963 intrEvent = new IntrEvent(this, true);
964 schedule(intrEvent, intrTick);
965 }
966
967 void
968 NSGigE::cpuInterrupt()
969 {
970 assert(intrTick == curTick());
971
972 // Whether or not there's a pending interrupt, we don't care about
973 // it anymore
974 intrEvent = 0;
975 intrTick = 0;
976
977 // Don't send an interrupt if there's already one
978 if (cpuPendingIntr) {
979 DPRINTF(EthernetIntr,
980 "would send an interrupt now, but there's already pending\n");
981 } else {
982 // Send interrupt
983 cpuPendingIntr = true;
984
985 DPRINTF(EthernetIntr, "posting interrupt\n");
986 intrPost();
987 }
988 }
989
990 void
991 NSGigE::cpuIntrClear()
992 {
993 if (!cpuPendingIntr)
994 return;
995
996 if (intrEvent) {
997 intrEvent->squash();
998 intrEvent = 0;
999 }
1000
1001 intrTick = 0;
1002
1003 cpuPendingIntr = false;
1004
1005 DPRINTF(EthernetIntr, "clearing interrupt\n");
1006 intrClear();
1007 }
1008
1009 bool
1010 NSGigE::cpuIntrPending() const
1011 { return cpuPendingIntr; }
1012
1013 void
1014 NSGigE::txReset()
1015 {
1016
1017 DPRINTF(Ethernet, "transmit reset\n");
1018
1019 CTDD = false;
1020 txEnable = false;;
1021 txFragPtr = 0;
1022 assert(txDescCnt == 0);
1023 txFifo.clear();
1024 txState = txIdle;
1025 assert(txDmaState == dmaIdle);
1026 }
1027
1028 void
1029 NSGigE::rxReset()
1030 {
1031 DPRINTF(Ethernet, "receive reset\n");
1032
1033 CRDD = false;
1034 assert(rxPktBytes == 0);
1035 rxEnable = false;
1036 rxFragPtr = 0;
1037 assert(rxDescCnt == 0);
1038 assert(rxDmaState == dmaIdle);
1039 rxFifo.clear();
1040 rxState = rxIdle;
1041 }
1042
1043 void
1044 NSGigE::regsReset()
1045 {
1046 memset(&regs, 0, sizeof(regs));
1047 regs.config = (CFGR_LNKSTS | CFGR_TBI_EN | CFGR_MODE_1000);
1048 regs.mear = 0x12;
1049 regs.txcfg = 0x120; // set drain threshold to 1024 bytes and
1050 // fill threshold to 32 bytes
1051 regs.rxcfg = 0x4; // set drain threshold to 16 bytes
1052 regs.srr = 0x0103; // set the silicon revision to rev B or 0x103
1053 regs.mibc = MIBC_FRZ;
1054 regs.vdr = 0x81; // set the vlan tag type to 802.1q
1055 regs.tesr = 0xc000; // TBI capable of both full and half duplex
1056 regs.brar = 0xffffffff;
1057
1058 extstsEnable = false;
1059 acceptBroadcast = false;
1060 acceptMulticast = false;
1061 acceptUnicast = false;
1062 acceptPerfect = false;
1063 acceptArp = false;
1064 }
1065
1066 bool
1067 NSGigE::doRxDmaRead()
1068 {
1069 assert(rxDmaState == dmaIdle || rxDmaState == dmaReadWaiting);
1070 rxDmaState = dmaReading;
1071
1072 if (dmaPending() || getDrainState() != Drainable::Running)
1073 rxDmaState = dmaReadWaiting;
1074 else
1075 dmaRead(rxDmaAddr, rxDmaLen, &rxDmaReadEvent, (uint8_t*)rxDmaData);
1076
1077 return true;
1078 }
1079
1080 void
1081 NSGigE::rxDmaReadDone()
1082 {
1083 assert(rxDmaState == dmaReading);
1084 rxDmaState = dmaIdle;
1085
1086 DPRINTF(EthernetDMA, "rx dma read paddr=%#x len=%d\n",
1087 rxDmaAddr, rxDmaLen);
1088 DDUMP(EthernetDMA, rxDmaData, rxDmaLen);
1089
1090 // If the transmit state machine has a pending DMA, let it go first
1091 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting)
1092 txKick();
1093
1094 rxKick();
1095 }
1096
1097 bool
1098 NSGigE::doRxDmaWrite()
1099 {
1100 assert(rxDmaState == dmaIdle || rxDmaState == dmaWriteWaiting);
1101 rxDmaState = dmaWriting;
1102
1103 if (dmaPending() || getDrainState() != Running)
1104 rxDmaState = dmaWriteWaiting;
1105 else
1106 dmaWrite(rxDmaAddr, rxDmaLen, &rxDmaWriteEvent, (uint8_t*)rxDmaData);
1107 return true;
1108 }
1109
1110 void
1111 NSGigE::rxDmaWriteDone()
1112 {
1113 assert(rxDmaState == dmaWriting);
1114 rxDmaState = dmaIdle;
1115
1116 DPRINTF(EthernetDMA, "rx dma write paddr=%#x len=%d\n",
1117 rxDmaAddr, rxDmaLen);
1118 DDUMP(EthernetDMA, rxDmaData, rxDmaLen);
1119
1120 // If the transmit state machine has a pending DMA, let it go first
1121 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting)
1122 txKick();
1123
1124 rxKick();
1125 }
1126
1127 void
1128 NSGigE::rxKick()
1129 {
1130 bool is64bit = (bool)(regs.config & CFGR_M64ADDR);
1131
1132 DPRINTF(EthernetSM,
1133 "receive kick rxState=%s (rxBuf.size=%d) %d-bit\n",
1134 NsRxStateStrings[rxState], rxFifo.size(), is64bit ? 64 : 32);
1135
1136 Addr link, bufptr;
1137 uint32_t &cmdsts = is64bit ? rxDesc64.cmdsts : rxDesc32.cmdsts;
1138 uint32_t &extsts = is64bit ? rxDesc64.extsts : rxDesc32.extsts;
1139
1140 next:
1141 if (rxKickTick > curTick()) {
1142 DPRINTF(EthernetSM, "receive kick exiting, can't run till %d\n",
1143 rxKickTick);
1144
1145 goto exit;
1146 }
1147
1148 // Go to the next state machine clock tick.
1149 rxKickTick = clockEdge(Cycles(1));
1150
1151 switch(rxDmaState) {
1152 case dmaReadWaiting:
1153 if (doRxDmaRead())
1154 goto exit;
1155 break;
1156 case dmaWriteWaiting:
1157 if (doRxDmaWrite())
1158 goto exit;
1159 break;
1160 default:
1161 break;
1162 }
1163
1164 link = is64bit ? (Addr)rxDesc64.link : (Addr)rxDesc32.link;
1165 bufptr = is64bit ? (Addr)rxDesc64.bufptr : (Addr)rxDesc32.bufptr;
1166
1167 // see state machine from spec for details
1168 // the way this works is, if you finish work on one state and can
1169 // go directly to another, you do that through jumping to the
1170 // label "next". however, if you have intermediate work, like DMA
1171 // so that you can't go to the next state yet, you go to exit and
1172 // exit the loop. however, when the DMA is done it will trigger
1173 // an event and come back to this loop.
1174 switch (rxState) {
1175 case rxIdle:
1176 if (!rxEnable) {
1177 DPRINTF(EthernetSM, "Receive Disabled! Nothing to do.\n");
1178 goto exit;
1179 }
1180
1181 if (CRDD) {
1182 rxState = rxDescRefr;
1183
1184 rxDmaAddr = regs.rxdp & 0x3fffffff;
1185 rxDmaData =
1186 is64bit ? (void *)&rxDesc64.link : (void *)&rxDesc32.link;
1187 rxDmaLen = is64bit ? sizeof(rxDesc64.link) : sizeof(rxDesc32.link);
1188 rxDmaFree = dmaDescFree;
1189
1190 descDmaReads++;
1191 descDmaRdBytes += rxDmaLen;
1192
1193 if (doRxDmaRead())
1194 goto exit;
1195 } else {
1196 rxState = rxDescRead;
1197
1198 rxDmaAddr = regs.rxdp & 0x3fffffff;
1199 rxDmaData = is64bit ? (void *)&rxDesc64 : (void *)&rxDesc32;
1200 rxDmaLen = is64bit ? sizeof(rxDesc64) : sizeof(rxDesc32);
1201 rxDmaFree = dmaDescFree;
1202
1203 descDmaReads++;
1204 descDmaRdBytes += rxDmaLen;
1205
1206 if (doRxDmaRead())
1207 goto exit;
1208 }
1209 break;
1210
1211 case rxDescRefr:
1212 if (rxDmaState != dmaIdle)
1213 goto exit;
1214
1215 rxState = rxAdvance;
1216 break;
1217
1218 case rxDescRead:
1219 if (rxDmaState != dmaIdle)
1220 goto exit;
1221
1222 DPRINTF(EthernetDesc, "rxDesc: addr=%08x read descriptor\n",
1223 regs.rxdp & 0x3fffffff);
1224 DPRINTF(EthernetDesc,
1225 "rxDesc: link=%#x bufptr=%#x cmdsts=%08x extsts=%08x\n",
1226 link, bufptr, cmdsts, extsts);
1227
1228 if (cmdsts & CMDSTS_OWN) {
1229 devIntrPost(ISR_RXIDLE);
1230 rxState = rxIdle;
1231 goto exit;
1232 } else {
1233 rxState = rxFifoBlock;
1234 rxFragPtr = bufptr;
1235 rxDescCnt = cmdsts & CMDSTS_LEN_MASK;
1236 }
1237 break;
1238
1239 case rxFifoBlock:
1240 if (!rxPacket) {
1241 /**
1242 * @todo in reality, we should be able to start processing
1243 * the packet as it arrives, and not have to wait for the
1244 * full packet ot be in the receive fifo.
1245 */
1246 if (rxFifo.empty())
1247 goto exit;
1248
1249 DPRINTF(EthernetSM, "****processing receive of new packet****\n");
1250
1251 // If we don't have a packet, grab a new one from the fifo.
1252 rxPacket = rxFifo.front();
1253 rxPktBytes = rxPacket->length;
1254 rxPacketBufPtr = rxPacket->data;
1255
1256 #if TRACING_ON
1257 if (DTRACE(Ethernet)) {
1258 IpPtr ip(rxPacket);
1259 if (ip) {
1260 DPRINTF(Ethernet, "ID is %d\n", ip->id());
1261 TcpPtr tcp(ip);
1262 if (tcp) {
1263 DPRINTF(Ethernet,
1264 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n",
1265 tcp->sport(), tcp->dport(), tcp->seq(),
1266 tcp->ack());
1267 }
1268 }
1269 }
1270 #endif
1271
1272 // sanity check - i think the driver behaves like this
1273 assert(rxDescCnt >= rxPktBytes);
1274 rxFifo.pop();
1275 }
1276
1277
1278 // dont' need the && rxDescCnt > 0 if driver sanity check
1279 // above holds
1280 if (rxPktBytes > 0) {
1281 rxState = rxFragWrite;
1282 // don't need min<>(rxPktBytes,rxDescCnt) if above sanity
1283 // check holds
1284 rxXferLen = rxPktBytes;
1285
1286 rxDmaAddr = rxFragPtr & 0x3fffffff;
1287 rxDmaData = rxPacketBufPtr;
1288 rxDmaLen = rxXferLen;
1289 rxDmaFree = dmaDataFree;
1290
1291 if (doRxDmaWrite())
1292 goto exit;
1293
1294 } else {
1295 rxState = rxDescWrite;
1296
1297 //if (rxPktBytes == 0) { /* packet is done */
1298 assert(rxPktBytes == 0);
1299 DPRINTF(EthernetSM, "done with receiving packet\n");
1300
1301 cmdsts |= CMDSTS_OWN;
1302 cmdsts &= ~CMDSTS_MORE;
1303 cmdsts |= CMDSTS_OK;
1304 cmdsts &= 0xffff0000;
1305 cmdsts += rxPacket->length; //i.e. set CMDSTS_SIZE
1306
1307 #if 0
1308 /*
1309 * all the driver uses these are for its own stats keeping
1310 * which we don't care about, aren't necessary for
1311 * functionality and doing this would just slow us down.
1312 * if they end up using this in a later version for
1313 * functional purposes, just undef
1314 */
1315 if (rxFilterEnable) {
1316 cmdsts &= ~CMDSTS_DEST_MASK;
1317 const EthAddr &dst = rxFifoFront()->dst();
1318 if (dst->unicast())
1319 cmdsts |= CMDSTS_DEST_SELF;
1320 if (dst->multicast())
1321 cmdsts |= CMDSTS_DEST_MULTI;
1322 if (dst->broadcast())
1323 cmdsts |= CMDSTS_DEST_MASK;
1324 }
1325 #endif
1326
1327 IpPtr ip(rxPacket);
1328 if (extstsEnable && ip) {
1329 extsts |= EXTSTS_IPPKT;
1330 rxIpChecksums++;
1331 if (cksum(ip) != 0) {
1332 DPRINTF(EthernetCksum, "Rx IP Checksum Error\n");
1333 extsts |= EXTSTS_IPERR;
1334 }
1335 TcpPtr tcp(ip);
1336 UdpPtr udp(ip);
1337 if (tcp) {
1338 extsts |= EXTSTS_TCPPKT;
1339 rxTcpChecksums++;
1340 if (cksum(tcp) != 0) {
1341 DPRINTF(EthernetCksum, "Rx TCP Checksum Error\n");
1342 extsts |= EXTSTS_TCPERR;
1343
1344 }
1345 } else if (udp) {
1346 extsts |= EXTSTS_UDPPKT;
1347 rxUdpChecksums++;
1348 if (cksum(udp) != 0) {
1349 DPRINTF(EthernetCksum, "Rx UDP Checksum Error\n");
1350 extsts |= EXTSTS_UDPERR;
1351 }
1352 }
1353 }
1354 rxPacket = 0;
1355
1356 /*
1357 * the driver seems to always receive into desc buffers
1358 * of size 1514, so you never have a pkt that is split
1359 * into multiple descriptors on the receive side, so
1360 * i don't implement that case, hence the assert above.
1361 */
1362
1363 DPRINTF(EthernetDesc,
1364 "rxDesc: addr=%08x writeback cmdsts extsts\n",
1365 regs.rxdp & 0x3fffffff);
1366 DPRINTF(EthernetDesc,
1367 "rxDesc: link=%#x bufptr=%#x cmdsts=%08x extsts=%08x\n",
1368 link, bufptr, cmdsts, extsts);
1369
1370 rxDmaAddr = regs.rxdp & 0x3fffffff;
1371 rxDmaData = &cmdsts;
1372 if (is64bit) {
1373 rxDmaAddr += offsetof(ns_desc64, cmdsts);
1374 rxDmaLen = sizeof(rxDesc64.cmdsts) + sizeof(rxDesc64.extsts);
1375 } else {
1376 rxDmaAddr += offsetof(ns_desc32, cmdsts);
1377 rxDmaLen = sizeof(rxDesc32.cmdsts) + sizeof(rxDesc32.extsts);
1378 }
1379 rxDmaFree = dmaDescFree;
1380
1381 descDmaWrites++;
1382 descDmaWrBytes += rxDmaLen;
1383
1384 if (doRxDmaWrite())
1385 goto exit;
1386 }
1387 break;
1388
1389 case rxFragWrite:
1390 if (rxDmaState != dmaIdle)
1391 goto exit;
1392
1393 rxPacketBufPtr += rxXferLen;
1394 rxFragPtr += rxXferLen;
1395 rxPktBytes -= rxXferLen;
1396
1397 rxState = rxFifoBlock;
1398 break;
1399
1400 case rxDescWrite:
1401 if (rxDmaState != dmaIdle)
1402 goto exit;
1403
1404 assert(cmdsts & CMDSTS_OWN);
1405
1406 assert(rxPacket == 0);
1407 devIntrPost(ISR_RXOK);
1408
1409 if (cmdsts & CMDSTS_INTR)
1410 devIntrPost(ISR_RXDESC);
1411
1412 if (!rxEnable) {
1413 DPRINTF(EthernetSM, "Halting the RX state machine\n");
1414 rxState = rxIdle;
1415 goto exit;
1416 } else
1417 rxState = rxAdvance;
1418 break;
1419
1420 case rxAdvance:
1421 if (link == 0) {
1422 devIntrPost(ISR_RXIDLE);
1423 rxState = rxIdle;
1424 CRDD = true;
1425 goto exit;
1426 } else {
1427 if (rxDmaState != dmaIdle)
1428 goto exit;
1429 rxState = rxDescRead;
1430 regs.rxdp = link;
1431 CRDD = false;
1432
1433 rxDmaAddr = regs.rxdp & 0x3fffffff;
1434 rxDmaData = is64bit ? (void *)&rxDesc64 : (void *)&rxDesc32;
1435 rxDmaLen = is64bit ? sizeof(rxDesc64) : sizeof(rxDesc32);
1436 rxDmaFree = dmaDescFree;
1437
1438 if (doRxDmaRead())
1439 goto exit;
1440 }
1441 break;
1442
1443 default:
1444 panic("Invalid rxState!");
1445 }
1446
1447 DPRINTF(EthernetSM, "entering next rxState=%s\n",
1448 NsRxStateStrings[rxState]);
1449 goto next;
1450
1451 exit:
1452 /**
1453 * @todo do we want to schedule a future kick?
1454 */
1455 DPRINTF(EthernetSM, "rx state machine exited rxState=%s\n",
1456 NsRxStateStrings[rxState]);
1457
1458 if (!rxKickEvent.scheduled())
1459 schedule(rxKickEvent, rxKickTick);
1460 }
1461
1462 void
1463 NSGigE::transmit()
1464 {
1465 if (txFifo.empty()) {
1466 DPRINTF(Ethernet, "nothing to transmit\n");
1467 return;
1468 }
1469
1470 DPRINTF(Ethernet, "Attempt Pkt Transmit: txFifo length=%d\n",
1471 txFifo.size());
1472 if (interface->sendPacket(txFifo.front())) {
1473 #if TRACING_ON
1474 if (DTRACE(Ethernet)) {
1475 IpPtr ip(txFifo.front());
1476 if (ip) {
1477 DPRINTF(Ethernet, "ID is %d\n", ip->id());
1478 TcpPtr tcp(ip);
1479 if (tcp) {
1480 DPRINTF(Ethernet,
1481 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n",
1482 tcp->sport(), tcp->dport(), tcp->seq(),
1483 tcp->ack());
1484 }
1485 }
1486 }
1487 #endif
1488
1489 DDUMP(EthernetData, txFifo.front()->data, txFifo.front()->length);
1490 txBytes += txFifo.front()->length;
1491 txPackets++;
1492
1493 DPRINTF(Ethernet, "Successful Xmit! now txFifoAvail is %d\n",
1494 txFifo.avail());
1495 txFifo.pop();
1496
1497 /*
1498 * normally do a writeback of the descriptor here, and ONLY
1499 * after that is done, send this interrupt. but since our
1500 * stuff never actually fails, just do this interrupt here,
1501 * otherwise the code has to stray from this nice format.
1502 * besides, it's functionally the same.
1503 */
1504 devIntrPost(ISR_TXOK);
1505 }
1506
1507 if (!txFifo.empty() && !txEvent.scheduled()) {
1508 DPRINTF(Ethernet, "reschedule transmit\n");
1509 schedule(txEvent, curTick() + retryTime);
1510 }
1511 }
1512
1513 bool
1514 NSGigE::doTxDmaRead()
1515 {
1516 assert(txDmaState == dmaIdle || txDmaState == dmaReadWaiting);
1517 txDmaState = dmaReading;
1518
1519 if (dmaPending() || getDrainState() != Running)
1520 txDmaState = dmaReadWaiting;
1521 else
1522 dmaRead(txDmaAddr, txDmaLen, &txDmaReadEvent, (uint8_t*)txDmaData);
1523
1524 return true;
1525 }
1526
1527 void
1528 NSGigE::txDmaReadDone()
1529 {
1530 assert(txDmaState == dmaReading);
1531 txDmaState = dmaIdle;
1532
1533 DPRINTF(EthernetDMA, "tx dma read paddr=%#x len=%d\n",
1534 txDmaAddr, txDmaLen);
1535 DDUMP(EthernetDMA, txDmaData, txDmaLen);
1536
1537 // If the receive state machine has a pending DMA, let it go first
1538 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting)
1539 rxKick();
1540
1541 txKick();
1542 }
1543
1544 bool
1545 NSGigE::doTxDmaWrite()
1546 {
1547 assert(txDmaState == dmaIdle || txDmaState == dmaWriteWaiting);
1548 txDmaState = dmaWriting;
1549
1550 if (dmaPending() || getDrainState() != Running)
1551 txDmaState = dmaWriteWaiting;
1552 else
1553 dmaWrite(txDmaAddr, txDmaLen, &txDmaWriteEvent, (uint8_t*)txDmaData);
1554 return true;
1555 }
1556
1557 void
1558 NSGigE::txDmaWriteDone()
1559 {
1560 assert(txDmaState == dmaWriting);
1561 txDmaState = dmaIdle;
1562
1563 DPRINTF(EthernetDMA, "tx dma write paddr=%#x len=%d\n",
1564 txDmaAddr, txDmaLen);
1565 DDUMP(EthernetDMA, txDmaData, txDmaLen);
1566
1567 // If the receive state machine has a pending DMA, let it go first
1568 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting)
1569 rxKick();
1570
1571 txKick();
1572 }
1573
1574 void
1575 NSGigE::txKick()
1576 {
1577 bool is64bit = (bool)(regs.config & CFGR_M64ADDR);
1578
1579 DPRINTF(EthernetSM, "transmit kick txState=%s %d-bit\n",
1580 NsTxStateStrings[txState], is64bit ? 64 : 32);
1581
1582 Addr link, bufptr;
1583 uint32_t &cmdsts = is64bit ? txDesc64.cmdsts : txDesc32.cmdsts;
1584 uint32_t &extsts = is64bit ? txDesc64.extsts : txDesc32.extsts;
1585
1586 next:
1587 if (txKickTick > curTick()) {
1588 DPRINTF(EthernetSM, "transmit kick exiting, can't run till %d\n",
1589 txKickTick);
1590 goto exit;
1591 }
1592
1593 // Go to the next state machine clock tick.
1594 txKickTick = clockEdge(Cycles(1));
1595
1596 switch(txDmaState) {
1597 case dmaReadWaiting:
1598 if (doTxDmaRead())
1599 goto exit;
1600 break;
1601 case dmaWriteWaiting:
1602 if (doTxDmaWrite())
1603 goto exit;
1604 break;
1605 default:
1606 break;
1607 }
1608
1609 link = is64bit ? (Addr)txDesc64.link : (Addr)txDesc32.link;
1610 bufptr = is64bit ? (Addr)txDesc64.bufptr : (Addr)txDesc32.bufptr;
1611 switch (txState) {
1612 case txIdle:
1613 if (!txEnable) {
1614 DPRINTF(EthernetSM, "Transmit disabled. Nothing to do.\n");
1615 goto exit;
1616 }
1617
1618 if (CTDD) {
1619 txState = txDescRefr;
1620
1621 txDmaAddr = regs.txdp & 0x3fffffff;
1622 txDmaData =
1623 is64bit ? (void *)&txDesc64.link : (void *)&txDesc32.link;
1624 txDmaLen = is64bit ? sizeof(txDesc64.link) : sizeof(txDesc32.link);
1625 txDmaFree = dmaDescFree;
1626
1627 descDmaReads++;
1628 descDmaRdBytes += txDmaLen;
1629
1630 if (doTxDmaRead())
1631 goto exit;
1632
1633 } else {
1634 txState = txDescRead;
1635
1636 txDmaAddr = regs.txdp & 0x3fffffff;
1637 txDmaData = is64bit ? (void *)&txDesc64 : (void *)&txDesc32;
1638 txDmaLen = is64bit ? sizeof(txDesc64) : sizeof(txDesc32);
1639 txDmaFree = dmaDescFree;
1640
1641 descDmaReads++;
1642 descDmaRdBytes += txDmaLen;
1643
1644 if (doTxDmaRead())
1645 goto exit;
1646 }
1647 break;
1648
1649 case txDescRefr:
1650 if (txDmaState != dmaIdle)
1651 goto exit;
1652
1653 txState = txAdvance;
1654 break;
1655
1656 case txDescRead:
1657 if (txDmaState != dmaIdle)
1658 goto exit;
1659
1660 DPRINTF(EthernetDesc, "txDesc: addr=%08x read descriptor\n",
1661 regs.txdp & 0x3fffffff);
1662 DPRINTF(EthernetDesc,
1663 "txDesc: link=%#x bufptr=%#x cmdsts=%#08x extsts=%#08x\n",
1664 link, bufptr, cmdsts, extsts);
1665
1666 if (cmdsts & CMDSTS_OWN) {
1667 txState = txFifoBlock;
1668 txFragPtr = bufptr;
1669 txDescCnt = cmdsts & CMDSTS_LEN_MASK;
1670 } else {
1671 devIntrPost(ISR_TXIDLE);
1672 txState = txIdle;
1673 goto exit;
1674 }
1675 break;
1676
1677 case txFifoBlock:
1678 if (!txPacket) {
1679 DPRINTF(EthernetSM, "****starting the tx of a new packet****\n");
1680 txPacket = new EthPacketData(16384);
1681 txPacketBufPtr = txPacket->data;
1682 }
1683
1684 if (txDescCnt == 0) {
1685 DPRINTF(EthernetSM, "the txDescCnt == 0, done with descriptor\n");
1686 if (cmdsts & CMDSTS_MORE) {
1687 DPRINTF(EthernetSM, "there are more descriptors to come\n");
1688 txState = txDescWrite;
1689
1690 cmdsts &= ~CMDSTS_OWN;
1691
1692 txDmaAddr = regs.txdp & 0x3fffffff;
1693 txDmaData = &cmdsts;
1694 if (is64bit) {
1695 txDmaAddr += offsetof(ns_desc64, cmdsts);
1696 txDmaLen = sizeof(txDesc64.cmdsts);
1697 } else {
1698 txDmaAddr += offsetof(ns_desc32, cmdsts);
1699 txDmaLen = sizeof(txDesc32.cmdsts);
1700 }
1701 txDmaFree = dmaDescFree;
1702
1703 if (doTxDmaWrite())
1704 goto exit;
1705
1706 } else { /* this packet is totally done */
1707 DPRINTF(EthernetSM, "This packet is done, let's wrap it up\n");
1708 /* deal with the the packet that just finished */
1709 if ((regs.vtcr & VTCR_PPCHK) && extstsEnable) {
1710 IpPtr ip(txPacket);
1711 if (extsts & EXTSTS_UDPPKT) {
1712 UdpPtr udp(ip);
1713 if (udp) {
1714 udp->sum(0);
1715 udp->sum(cksum(udp));
1716 txUdpChecksums++;
1717 } else {
1718 Debug::breakpoint();
1719 warn_once("UDPPKT set, but not UDP!\n");
1720 }
1721 } else if (extsts & EXTSTS_TCPPKT) {
1722 TcpPtr tcp(ip);
1723 if (tcp) {
1724 tcp->sum(0);
1725 tcp->sum(cksum(tcp));
1726 txTcpChecksums++;
1727 } else {
1728 Debug::breakpoint();
1729 warn_once("TCPPKT set, but not UDP!\n");
1730 }
1731 }
1732 if (extsts & EXTSTS_IPPKT) {
1733 if (ip) {
1734 ip->sum(0);
1735 ip->sum(cksum(ip));
1736 txIpChecksums++;
1737 } else {
1738 Debug::breakpoint();
1739 warn_once("IPPKT set, but not UDP!\n");
1740 }
1741 }
1742 }
1743
1744 txPacket->length = txPacketBufPtr - txPacket->data;
1745 // this is just because the receive can't handle a
1746 // packet bigger want to make sure
1747 if (txPacket->length > 1514)
1748 panic("transmit packet too large, %s > 1514\n",
1749 txPacket->length);
1750
1751 #ifndef NDEBUG
1752 bool success =
1753 #endif
1754 txFifo.push(txPacket);
1755 assert(success);
1756
1757 /*
1758 * this following section is not tqo spec, but
1759 * functionally shouldn't be any different. normally,
1760 * the chip will wait til the transmit has occurred
1761 * before writing back the descriptor because it has
1762 * to wait to see that it was successfully transmitted
1763 * to decide whether to set CMDSTS_OK or not.
1764 * however, in the simulator since it is always
1765 * successfully transmitted, and writing it exactly to
1766 * spec would complicate the code, we just do it here
1767 */
1768
1769 cmdsts &= ~CMDSTS_OWN;
1770 cmdsts |= CMDSTS_OK;
1771
1772 DPRINTF(EthernetDesc,
1773 "txDesc writeback: cmdsts=%08x extsts=%08x\n",
1774 cmdsts, extsts);
1775
1776 txDmaFree = dmaDescFree;
1777 txDmaAddr = regs.txdp & 0x3fffffff;
1778 txDmaData = &cmdsts;
1779 if (is64bit) {
1780 txDmaAddr += offsetof(ns_desc64, cmdsts);
1781 txDmaLen =
1782 sizeof(txDesc64.cmdsts) + sizeof(txDesc64.extsts);
1783 } else {
1784 txDmaAddr += offsetof(ns_desc32, cmdsts);
1785 txDmaLen =
1786 sizeof(txDesc32.cmdsts) + sizeof(txDesc32.extsts);
1787 }
1788
1789 descDmaWrites++;
1790 descDmaWrBytes += txDmaLen;
1791
1792 transmit();
1793 txPacket = 0;
1794
1795 if (!txEnable) {
1796 DPRINTF(EthernetSM, "halting TX state machine\n");
1797 txState = txIdle;
1798 goto exit;
1799 } else
1800 txState = txAdvance;
1801
1802 if (doTxDmaWrite())
1803 goto exit;
1804 }
1805 } else {
1806 DPRINTF(EthernetSM, "this descriptor isn't done yet\n");
1807 if (!txFifo.full()) {
1808 txState = txFragRead;
1809
1810 /*
1811 * The number of bytes transferred is either whatever
1812 * is left in the descriptor (txDescCnt), or if there
1813 * is not enough room in the fifo, just whatever room
1814 * is left in the fifo
1815 */
1816 txXferLen = min<uint32_t>(txDescCnt, txFifo.avail());
1817
1818 txDmaAddr = txFragPtr & 0x3fffffff;
1819 txDmaData = txPacketBufPtr;
1820 txDmaLen = txXferLen;
1821 txDmaFree = dmaDataFree;
1822
1823 if (doTxDmaRead())
1824 goto exit;
1825 } else {
1826 txState = txFifoBlock;
1827 transmit();
1828
1829 goto exit;
1830 }
1831
1832 }
1833 break;
1834
1835 case txFragRead:
1836 if (txDmaState != dmaIdle)
1837 goto exit;
1838
1839 txPacketBufPtr += txXferLen;
1840 txFragPtr += txXferLen;
1841 txDescCnt -= txXferLen;
1842 txFifo.reserve(txXferLen);
1843
1844 txState = txFifoBlock;
1845 break;
1846
1847 case txDescWrite:
1848 if (txDmaState != dmaIdle)
1849 goto exit;
1850
1851 if (cmdsts & CMDSTS_INTR)
1852 devIntrPost(ISR_TXDESC);
1853
1854 if (!txEnable) {
1855 DPRINTF(EthernetSM, "halting TX state machine\n");
1856 txState = txIdle;
1857 goto exit;
1858 } else
1859 txState = txAdvance;
1860 break;
1861
1862 case txAdvance:
1863 if (link == 0) {
1864 devIntrPost(ISR_TXIDLE);
1865 txState = txIdle;
1866 goto exit;
1867 } else {
1868 if (txDmaState != dmaIdle)
1869 goto exit;
1870 txState = txDescRead;
1871 regs.txdp = link;
1872 CTDD = false;
1873
1874 txDmaAddr = link & 0x3fffffff;
1875 txDmaData = is64bit ? (void *)&txDesc64 : (void *)&txDesc32;
1876 txDmaLen = is64bit ? sizeof(txDesc64) : sizeof(txDesc32);
1877 txDmaFree = dmaDescFree;
1878
1879 if (doTxDmaRead())
1880 goto exit;
1881 }
1882 break;
1883
1884 default:
1885 panic("invalid state");
1886 }
1887
1888 DPRINTF(EthernetSM, "entering next txState=%s\n",
1889 NsTxStateStrings[txState]);
1890 goto next;
1891
1892 exit:
1893 /**
1894 * @todo do we want to schedule a future kick?
1895 */
1896 DPRINTF(EthernetSM, "tx state machine exited txState=%s\n",
1897 NsTxStateStrings[txState]);
1898
1899 if (!txKickEvent.scheduled())
1900 schedule(txKickEvent, txKickTick);
1901 }
1902
1903 /**
1904 * Advance the EEPROM state machine
1905 * Called on rising edge of EEPROM clock bit in MEAR
1906 */
1907 void
1908 NSGigE::eepromKick()
1909 {
1910 switch (eepromState) {
1911
1912 case eepromStart:
1913
1914 // Wait for start bit
1915 if (regs.mear & MEAR_EEDI) {
1916 // Set up to get 2 opcode bits
1917 eepromState = eepromGetOpcode;
1918 eepromBitsToRx = 2;
1919 eepromOpcode = 0;
1920 }
1921 break;
1922
1923 case eepromGetOpcode:
1924 eepromOpcode <<= 1;
1925 eepromOpcode += (regs.mear & MEAR_EEDI) ? 1 : 0;
1926 --eepromBitsToRx;
1927
1928 // Done getting opcode
1929 if (eepromBitsToRx == 0) {
1930 if (eepromOpcode != EEPROM_READ)
1931 panic("only EEPROM reads are implemented!");
1932
1933 // Set up to get address
1934 eepromState = eepromGetAddress;
1935 eepromBitsToRx = 6;
1936 eepromAddress = 0;
1937 }
1938 break;
1939
1940 case eepromGetAddress:
1941 eepromAddress <<= 1;
1942 eepromAddress += (regs.mear & MEAR_EEDI) ? 1 : 0;
1943 --eepromBitsToRx;
1944
1945 // Done getting address
1946 if (eepromBitsToRx == 0) {
1947
1948 if (eepromAddress >= EEPROM_SIZE)
1949 panic("EEPROM read access out of range!");
1950
1951 switch (eepromAddress) {
1952
1953 case EEPROM_PMATCH2_ADDR:
1954 eepromData = rom.perfectMatch[5];
1955 eepromData <<= 8;
1956 eepromData += rom.perfectMatch[4];
1957 break;
1958
1959 case EEPROM_PMATCH1_ADDR:
1960 eepromData = rom.perfectMatch[3];
1961 eepromData <<= 8;
1962 eepromData += rom.perfectMatch[2];
1963 break;
1964
1965 case EEPROM_PMATCH0_ADDR:
1966 eepromData = rom.perfectMatch[1];
1967 eepromData <<= 8;
1968 eepromData += rom.perfectMatch[0];
1969 break;
1970
1971 default:
1972 panic("FreeBSD driver only uses EEPROM to read PMATCH!");
1973 }
1974 // Set up to read data
1975 eepromState = eepromRead;
1976 eepromBitsToRx = 16;
1977
1978 // Clear data in bit
1979 regs.mear &= ~MEAR_EEDI;
1980 }
1981 break;
1982
1983 case eepromRead:
1984 // Clear Data Out bit
1985 regs.mear &= ~MEAR_EEDO;
1986 // Set bit to value of current EEPROM bit
1987 regs.mear |= (eepromData & 0x8000) ? MEAR_EEDO : 0x0;
1988
1989 eepromData <<= 1;
1990 --eepromBitsToRx;
1991
1992 // All done
1993 if (eepromBitsToRx == 0) {
1994 eepromState = eepromStart;
1995 }
1996 break;
1997
1998 default:
1999 panic("invalid EEPROM state");
2000 }
2001
2002 }
2003
2004 void
2005 NSGigE::transferDone()
2006 {
2007 if (txFifo.empty()) {
2008 DPRINTF(Ethernet, "transfer complete: txFifo empty...nothing to do\n");
2009 return;
2010 }
2011
2012 DPRINTF(Ethernet, "transfer complete: data in txFifo...schedule xmit\n");
2013
2014 reschedule(txEvent, clockEdge(Cycles(1)), true);
2015 }
2016
2017 bool
2018 NSGigE::rxFilter(const EthPacketPtr &packet)
2019 {
2020 EthPtr eth = packet;
2021 bool drop = true;
2022 string type;
2023
2024 const EthAddr &dst = eth->dst();
2025 if (dst.unicast()) {
2026 // If we're accepting all unicast addresses
2027 if (acceptUnicast)
2028 drop = false;
2029
2030 // If we make a perfect match
2031 if (acceptPerfect && dst == rom.perfectMatch)
2032 drop = false;
2033
2034 if (acceptArp && eth->type() == ETH_TYPE_ARP)
2035 drop = false;
2036
2037 } else if (dst.broadcast()) {
2038 // if we're accepting broadcasts
2039 if (acceptBroadcast)
2040 drop = false;
2041
2042 } else if (dst.multicast()) {
2043 // if we're accepting all multicasts
2044 if (acceptMulticast)
2045 drop = false;
2046
2047 // Multicast hashing faked - all packets accepted
2048 if (multicastHashEnable)
2049 drop = false;
2050 }
2051
2052 if (drop) {
2053 DPRINTF(Ethernet, "rxFilter drop\n");
2054 DDUMP(EthernetData, packet->data, packet->length);
2055 }
2056
2057 return drop;
2058 }
2059
2060 bool
2061 NSGigE::recvPacket(EthPacketPtr packet)
2062 {
2063 rxBytes += packet->length;
2064 rxPackets++;
2065
2066 DPRINTF(Ethernet, "Receiving packet from wire, rxFifoAvail=%d\n",
2067 rxFifo.avail());
2068
2069 if (!rxEnable) {
2070 DPRINTF(Ethernet, "receive disabled...packet dropped\n");
2071 return true;
2072 }
2073
2074 if (!rxFilterEnable) {
2075 DPRINTF(Ethernet,
2076 "receive packet filtering disabled . . . packet dropped\n");
2077 return true;
2078 }
2079
2080 if (rxFilter(packet)) {
2081 DPRINTF(Ethernet, "packet filtered...dropped\n");
2082 return true;
2083 }
2084
2085 if (rxFifo.avail() < packet->length) {
2086 #if TRACING_ON
2087 IpPtr ip(packet);
2088 TcpPtr tcp(ip);
2089 if (ip) {
2090 DPRINTF(Ethernet,
2091 "packet won't fit in receive buffer...pkt ID %d dropped\n",
2092 ip->id());
2093 if (tcp) {
2094 DPRINTF(Ethernet, "Seq=%d\n", tcp->seq());
2095 }
2096 }
2097 #endif
2098 droppedPackets++;
2099 devIntrPost(ISR_RXORN);
2100 return false;
2101 }
2102
2103 rxFifo.push(packet);
2104
2105 rxKick();
2106 return true;
2107 }
2108
2109
2110 void
2111 NSGigE::drainResume()
2112 {
2113 Drainable::drainResume();
2114
2115 // During drain we could have left the state machines in a waiting state and
2116 // they wouldn't get out until some other event occured to kick them.
2117 // This way they'll get out immediately
2118 txKick();
2119 rxKick();
2120 }
2121
2122
2123 //=====================================================================
2124 //
2125 //
2126 void
2127 NSGigE::serialize(ostream &os)
2128 {
2129 // Serialize the PciDev base class
2130 PciDev::serialize(os);
2131
2132 /*
2133 * Finalize any DMA events now.
2134 */
2135 // @todo will mem system save pending dma?
2136
2137 /*
2138 * Serialize the device registers
2139 */
2140 SERIALIZE_SCALAR(regs.command);
2141 SERIALIZE_SCALAR(regs.config);
2142 SERIALIZE_SCALAR(regs.mear);
2143 SERIALIZE_SCALAR(regs.ptscr);
2144 SERIALIZE_SCALAR(regs.isr);
2145 SERIALIZE_SCALAR(regs.imr);
2146 SERIALIZE_SCALAR(regs.ier);
2147 SERIALIZE_SCALAR(regs.ihr);
2148 SERIALIZE_SCALAR(regs.txdp);
2149 SERIALIZE_SCALAR(regs.txdp_hi);
2150 SERIALIZE_SCALAR(regs.txcfg);
2151 SERIALIZE_SCALAR(regs.gpior);
2152 SERIALIZE_SCALAR(regs.rxdp);
2153 SERIALIZE_SCALAR(regs.rxdp_hi);
2154 SERIALIZE_SCALAR(regs.rxcfg);
2155 SERIALIZE_SCALAR(regs.pqcr);
2156 SERIALIZE_SCALAR(regs.wcsr);
2157 SERIALIZE_SCALAR(regs.pcr);
2158 SERIALIZE_SCALAR(regs.rfcr);
2159 SERIALIZE_SCALAR(regs.rfdr);
2160 SERIALIZE_SCALAR(regs.brar);
2161 SERIALIZE_SCALAR(regs.brdr);
2162 SERIALIZE_SCALAR(regs.srr);
2163 SERIALIZE_SCALAR(regs.mibc);
2164 SERIALIZE_SCALAR(regs.vrcr);
2165 SERIALIZE_SCALAR(regs.vtcr);
2166 SERIALIZE_SCALAR(regs.vdr);
2167 SERIALIZE_SCALAR(regs.ccsr);
2168 SERIALIZE_SCALAR(regs.tbicr);
2169 SERIALIZE_SCALAR(regs.tbisr);
2170 SERIALIZE_SCALAR(regs.tanar);
2171 SERIALIZE_SCALAR(regs.tanlpar);
2172 SERIALIZE_SCALAR(regs.taner);
2173 SERIALIZE_SCALAR(regs.tesr);
2174
2175 SERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN);
2176 SERIALIZE_ARRAY(rom.filterHash, FHASH_SIZE);
2177
2178 SERIALIZE_SCALAR(ioEnable);
2179
2180 /*
2181 * Serialize the data Fifos
2182 */
2183 rxFifo.serialize("rxFifo", os);
2184 txFifo.serialize("txFifo", os);
2185
2186 /*
2187 * Serialize the various helper variables
2188 */
2189 bool txPacketExists = txPacket;
2190 SERIALIZE_SCALAR(txPacketExists);
2191 if (txPacketExists) {
2192 txPacket->length = txPacketBufPtr - txPacket->data;
2193 txPacket->serialize("txPacket", os);
2194 uint32_t txPktBufPtr = (uint32_t) (txPacketBufPtr - txPacket->data);
2195 SERIALIZE_SCALAR(txPktBufPtr);
2196 }
2197
2198 bool rxPacketExists = rxPacket;
2199 SERIALIZE_SCALAR(rxPacketExists);
2200 if (rxPacketExists) {
2201 rxPacket->serialize("rxPacket", os);
2202 uint32_t rxPktBufPtr = (uint32_t) (rxPacketBufPtr - rxPacket->data);
2203 SERIALIZE_SCALAR(rxPktBufPtr);
2204 }
2205
2206 SERIALIZE_SCALAR(txXferLen);
2207 SERIALIZE_SCALAR(rxXferLen);
2208
2209 /*
2210 * Serialize Cached Descriptors
2211 */
2212 SERIALIZE_SCALAR(rxDesc64.link);
2213 SERIALIZE_SCALAR(rxDesc64.bufptr);
2214 SERIALIZE_SCALAR(rxDesc64.cmdsts);
2215 SERIALIZE_SCALAR(rxDesc64.extsts);
2216 SERIALIZE_SCALAR(txDesc64.link);
2217 SERIALIZE_SCALAR(txDesc64.bufptr);
2218 SERIALIZE_SCALAR(txDesc64.cmdsts);
2219 SERIALIZE_SCALAR(txDesc64.extsts);
2220 SERIALIZE_SCALAR(rxDesc32.link);
2221 SERIALIZE_SCALAR(rxDesc32.bufptr);
2222 SERIALIZE_SCALAR(rxDesc32.cmdsts);
2223 SERIALIZE_SCALAR(rxDesc32.extsts);
2224 SERIALIZE_SCALAR(txDesc32.link);
2225 SERIALIZE_SCALAR(txDesc32.bufptr);
2226 SERIALIZE_SCALAR(txDesc32.cmdsts);
2227 SERIALIZE_SCALAR(txDesc32.extsts);
2228 SERIALIZE_SCALAR(extstsEnable);
2229
2230 /*
2231 * Serialize tx state machine
2232 */
2233 int txState = this->txState;
2234 SERIALIZE_SCALAR(txState);
2235 SERIALIZE_SCALAR(txEnable);
2236 SERIALIZE_SCALAR(CTDD);
2237 SERIALIZE_SCALAR(txFragPtr);
2238 SERIALIZE_SCALAR(txDescCnt);
2239 int txDmaState = this->txDmaState;
2240 SERIALIZE_SCALAR(txDmaState);
2241 SERIALIZE_SCALAR(txKickTick);
2242
2243 /*
2244 * Serialize rx state machine
2245 */
2246 int rxState = this->rxState;
2247 SERIALIZE_SCALAR(rxState);
2248 SERIALIZE_SCALAR(rxEnable);
2249 SERIALIZE_SCALAR(CRDD);
2250 SERIALIZE_SCALAR(rxPktBytes);
2251 SERIALIZE_SCALAR(rxFragPtr);
2252 SERIALIZE_SCALAR(rxDescCnt);
2253 int rxDmaState = this->rxDmaState;
2254 SERIALIZE_SCALAR(rxDmaState);
2255 SERIALIZE_SCALAR(rxKickTick);
2256
2257 /*
2258 * Serialize EEPROM state machine
2259 */
2260 int eepromState = this->eepromState;
2261 SERIALIZE_SCALAR(eepromState);
2262 SERIALIZE_SCALAR(eepromClk);
2263 SERIALIZE_SCALAR(eepromBitsToRx);
2264 SERIALIZE_SCALAR(eepromOpcode);
2265 SERIALIZE_SCALAR(eepromAddress);
2266 SERIALIZE_SCALAR(eepromData);
2267
2268 /*
2269 * If there's a pending transmit, store the time so we can
2270 * reschedule it later
2271 */
2272 Tick transmitTick = txEvent.scheduled() ? txEvent.when() - curTick() : 0;
2273 SERIALIZE_SCALAR(transmitTick);
2274
2275 /*
2276 * receive address filter settings
2277 */
2278 SERIALIZE_SCALAR(rxFilterEnable);
2279 SERIALIZE_SCALAR(acceptBroadcast);
2280 SERIALIZE_SCALAR(acceptMulticast);
2281 SERIALIZE_SCALAR(acceptUnicast);
2282 SERIALIZE_SCALAR(acceptPerfect);
2283 SERIALIZE_SCALAR(acceptArp);
2284 SERIALIZE_SCALAR(multicastHashEnable);
2285
2286 /*
2287 * Keep track of pending interrupt status.
2288 */
2289 SERIALIZE_SCALAR(intrTick);
2290 SERIALIZE_SCALAR(cpuPendingIntr);
2291 Tick intrEventTick = 0;
2292 if (intrEvent)
2293 intrEventTick = intrEvent->when();
2294 SERIALIZE_SCALAR(intrEventTick);
2295
2296 }
2297
2298 void
2299 NSGigE::unserialize(Checkpoint *cp, const std::string &section)
2300 {
2301 // Unserialize the PciDev base class
2302 PciDev::unserialize(cp, section);
2303
2304 UNSERIALIZE_SCALAR(regs.command);
2305 UNSERIALIZE_SCALAR(regs.config);
2306 UNSERIALIZE_SCALAR(regs.mear);
2307 UNSERIALIZE_SCALAR(regs.ptscr);
2308 UNSERIALIZE_SCALAR(regs.isr);
2309 UNSERIALIZE_SCALAR(regs.imr);
2310 UNSERIALIZE_SCALAR(regs.ier);
2311 UNSERIALIZE_SCALAR(regs.ihr);
2312 UNSERIALIZE_SCALAR(regs.txdp);
2313 UNSERIALIZE_SCALAR(regs.txdp_hi);
2314 UNSERIALIZE_SCALAR(regs.txcfg);
2315 UNSERIALIZE_SCALAR(regs.gpior);
2316 UNSERIALIZE_SCALAR(regs.rxdp);
2317 UNSERIALIZE_SCALAR(regs.rxdp_hi);
2318 UNSERIALIZE_SCALAR(regs.rxcfg);
2319 UNSERIALIZE_SCALAR(regs.pqcr);
2320 UNSERIALIZE_SCALAR(regs.wcsr);
2321 UNSERIALIZE_SCALAR(regs.pcr);
2322 UNSERIALIZE_SCALAR(regs.rfcr);
2323 UNSERIALIZE_SCALAR(regs.rfdr);
2324 UNSERIALIZE_SCALAR(regs.brar);
2325 UNSERIALIZE_SCALAR(regs.brdr);
2326 UNSERIALIZE_SCALAR(regs.srr);
2327 UNSERIALIZE_SCALAR(regs.mibc);
2328 UNSERIALIZE_SCALAR(regs.vrcr);
2329 UNSERIALIZE_SCALAR(regs.vtcr);
2330 UNSERIALIZE_SCALAR(regs.vdr);
2331 UNSERIALIZE_SCALAR(regs.ccsr);
2332 UNSERIALIZE_SCALAR(regs.tbicr);
2333 UNSERIALIZE_SCALAR(regs.tbisr);
2334 UNSERIALIZE_SCALAR(regs.tanar);
2335 UNSERIALIZE_SCALAR(regs.tanlpar);
2336 UNSERIALIZE_SCALAR(regs.taner);
2337 UNSERIALIZE_SCALAR(regs.tesr);
2338
2339 UNSERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN);
2340 UNSERIALIZE_ARRAY(rom.filterHash, FHASH_SIZE);
2341
2342 UNSERIALIZE_SCALAR(ioEnable);
2343
2344 /*
2345 * unserialize the data fifos
2346 */
2347 rxFifo.unserialize("rxFifo", cp, section);
2348 txFifo.unserialize("txFifo", cp, section);
2349
2350 /*
2351 * unserialize the various helper variables
2352 */
2353 bool txPacketExists;
2354 UNSERIALIZE_SCALAR(txPacketExists);
2355 if (txPacketExists) {
2356 txPacket = new EthPacketData(16384);
2357 txPacket->unserialize("txPacket", cp, section);
2358 uint32_t txPktBufPtr;
2359 UNSERIALIZE_SCALAR(txPktBufPtr);
2360 txPacketBufPtr = (uint8_t *) txPacket->data + txPktBufPtr;
2361 } else
2362 txPacket = 0;
2363
2364 bool rxPacketExists;
2365 UNSERIALIZE_SCALAR(rxPacketExists);
2366 rxPacket = 0;
2367 if (rxPacketExists) {
2368 rxPacket = new EthPacketData(16384);
2369 rxPacket->unserialize("rxPacket", cp, section);
2370 uint32_t rxPktBufPtr;
2371 UNSERIALIZE_SCALAR(rxPktBufPtr);
2372 rxPacketBufPtr = (uint8_t *) rxPacket->data + rxPktBufPtr;
2373 } else
2374 rxPacket = 0;
2375
2376 UNSERIALIZE_SCALAR(txXferLen);
2377 UNSERIALIZE_SCALAR(rxXferLen);
2378
2379 /*
2380 * Unserialize Cached Descriptors
2381 */
2382 UNSERIALIZE_SCALAR(rxDesc64.link);
2383 UNSERIALIZE_SCALAR(rxDesc64.bufptr);
2384 UNSERIALIZE_SCALAR(rxDesc64.cmdsts);
2385 UNSERIALIZE_SCALAR(rxDesc64.extsts);
2386 UNSERIALIZE_SCALAR(txDesc64.link);
2387 UNSERIALIZE_SCALAR(txDesc64.bufptr);
2388 UNSERIALIZE_SCALAR(txDesc64.cmdsts);
2389 UNSERIALIZE_SCALAR(txDesc64.extsts);
2390 UNSERIALIZE_SCALAR(rxDesc32.link);
2391 UNSERIALIZE_SCALAR(rxDesc32.bufptr);
2392 UNSERIALIZE_SCALAR(rxDesc32.cmdsts);
2393 UNSERIALIZE_SCALAR(rxDesc32.extsts);
2394 UNSERIALIZE_SCALAR(txDesc32.link);
2395 UNSERIALIZE_SCALAR(txDesc32.bufptr);
2396 UNSERIALIZE_SCALAR(txDesc32.cmdsts);
2397 UNSERIALIZE_SCALAR(txDesc32.extsts);
2398 UNSERIALIZE_SCALAR(extstsEnable);
2399
2400 /*
2401 * unserialize tx state machine
2402 */
2403 int txState;
2404 UNSERIALIZE_SCALAR(txState);
2405 this->txState = (TxState) txState;
2406 UNSERIALIZE_SCALAR(txEnable);
2407 UNSERIALIZE_SCALAR(CTDD);
2408 UNSERIALIZE_SCALAR(txFragPtr);
2409 UNSERIALIZE_SCALAR(txDescCnt);
2410 int txDmaState;
2411 UNSERIALIZE_SCALAR(txDmaState);
2412 this->txDmaState = (DmaState) txDmaState;
2413 UNSERIALIZE_SCALAR(txKickTick);
2414 if (txKickTick)
2415 schedule(txKickEvent, txKickTick);
2416
2417 /*
2418 * unserialize rx state machine
2419 */
2420 int rxState;
2421 UNSERIALIZE_SCALAR(rxState);
2422 this->rxState = (RxState) rxState;
2423 UNSERIALIZE_SCALAR(rxEnable);
2424 UNSERIALIZE_SCALAR(CRDD);
2425 UNSERIALIZE_SCALAR(rxPktBytes);
2426 UNSERIALIZE_SCALAR(rxFragPtr);
2427 UNSERIALIZE_SCALAR(rxDescCnt);
2428 int rxDmaState;
2429 UNSERIALIZE_SCALAR(rxDmaState);
2430 this->rxDmaState = (DmaState) rxDmaState;
2431 UNSERIALIZE_SCALAR(rxKickTick);
2432 if (rxKickTick)
2433 schedule(rxKickEvent, rxKickTick);
2434
2435 /*
2436 * Unserialize EEPROM state machine
2437 */
2438 int eepromState;
2439 UNSERIALIZE_SCALAR(eepromState);
2440 this->eepromState = (EEPROMState) eepromState;
2441 UNSERIALIZE_SCALAR(eepromClk);
2442 UNSERIALIZE_SCALAR(eepromBitsToRx);
2443 UNSERIALIZE_SCALAR(eepromOpcode);
2444 UNSERIALIZE_SCALAR(eepromAddress);
2445 UNSERIALIZE_SCALAR(eepromData);
2446
2447 /*
2448 * If there's a pending transmit, reschedule it now
2449 */
2450 Tick transmitTick;
2451 UNSERIALIZE_SCALAR(transmitTick);
2452 if (transmitTick)
2453 schedule(txEvent, curTick() + transmitTick);
2454
2455 /*
2456 * unserialize receive address filter settings
2457 */
2458 UNSERIALIZE_SCALAR(rxFilterEnable);
2459 UNSERIALIZE_SCALAR(acceptBroadcast);
2460 UNSERIALIZE_SCALAR(acceptMulticast);
2461 UNSERIALIZE_SCALAR(acceptUnicast);
2462 UNSERIALIZE_SCALAR(acceptPerfect);
2463 UNSERIALIZE_SCALAR(acceptArp);
2464 UNSERIALIZE_SCALAR(multicastHashEnable);
2465
2466 /*
2467 * Keep track of pending interrupt status.
2468 */
2469 UNSERIALIZE_SCALAR(intrTick);
2470 UNSERIALIZE_SCALAR(cpuPendingIntr);
2471 Tick intrEventTick;
2472 UNSERIALIZE_SCALAR(intrEventTick);
2473 if (intrEventTick) {
2474 intrEvent = new IntrEvent(this, true);
2475 schedule(intrEvent, intrEventTick);
2476 }
2477 }
2478
2479 NSGigE *
2480 NSGigEParams::create()
2481 {
2482 return new NSGigE(this);
2483 }