sim,dev: Get rid of the global retryTime constant.
[gem5.git] / src / dev / net / ns_gige.cc
1 /*
2 * Copyright (c) 2004-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /** @file
30 * Device module for modelling the National Semiconductor
31 * DP83820 ethernet controller. Does not support priority queueing
32 */
33
34 #include "dev/net/ns_gige.hh"
35
36 #include <deque>
37 #include <memory>
38 #include <string>
39
40 #include "base/debug.hh"
41 #include "base/inet.hh"
42 #include "base/types.hh"
43 #include "debug/EthernetAll.hh"
44 #include "dev/net/etherlink.hh"
45 #include "mem/packet.hh"
46 #include "mem/packet_access.hh"
47 #include "params/NSGigE.hh"
48 #include "sim/system.hh"
49
50 // clang complains about std::set being overloaded with Packet::set if
51 // we open up the entire namespace std
52 using std::make_shared;
53 using std::min;
54 using std::ostream;
55 using std::string;
56
57 const char *NsRxStateStrings[] =
58 {
59 "rxIdle",
60 "rxDescRefr",
61 "rxDescRead",
62 "rxFifoBlock",
63 "rxFragWrite",
64 "rxDescWrite",
65 "rxAdvance"
66 };
67
68 const char *NsTxStateStrings[] =
69 {
70 "txIdle",
71 "txDescRefr",
72 "txDescRead",
73 "txFifoBlock",
74 "txFragRead",
75 "txDescWrite",
76 "txAdvance"
77 };
78
79 const char *NsDmaState[] =
80 {
81 "dmaIdle",
82 "dmaReading",
83 "dmaWriting",
84 "dmaReadWaiting",
85 "dmaWriteWaiting"
86 };
87
88 using namespace Net;
89
90 ///////////////////////////////////////////////////////////////////////
91 //
92 // NSGigE PCI Device
93 //
94 NSGigE::NSGigE(Params *p)
95 : EtherDevBase(p), ioEnable(false),
96 txFifo(p->tx_fifo_size), rxFifo(p->rx_fifo_size),
97 txPacket(0), rxPacket(0), txPacketBufPtr(NULL), rxPacketBufPtr(NULL),
98 txXferLen(0), rxXferLen(0), rxDmaFree(false), txDmaFree(false),
99 txState(txIdle), txEnable(false), CTDD(false), txHalt(false),
100 txFragPtr(0), txDescCnt(0), txDmaState(dmaIdle), rxState(rxIdle),
101 rxEnable(false), CRDD(false), rxPktBytes(0), rxHalt(false),
102 rxFragPtr(0), rxDescCnt(0), rxDmaState(dmaIdle), extstsEnable(false),
103 eepromState(eepromStart), eepromClk(false), eepromBitsToRx(0),
104 eepromOpcode(0), eepromAddress(0), eepromData(0),
105 dmaReadDelay(p->dma_read_delay), dmaWriteDelay(p->dma_write_delay),
106 dmaReadFactor(p->dma_read_factor), dmaWriteFactor(p->dma_write_factor),
107 rxDmaData(NULL), rxDmaAddr(0), rxDmaLen(0),
108 txDmaData(NULL), txDmaAddr(0), txDmaLen(0),
109 rxDmaReadEvent([this]{ rxDmaReadDone(); }, name()),
110 rxDmaWriteEvent([this]{ rxDmaWriteDone(); }, name()),
111 txDmaReadEvent([this]{ txDmaReadDone(); }, name()),
112 txDmaWriteEvent([this]{ txDmaWriteDone(); }, name()),
113 dmaDescFree(p->dma_desc_free), dmaDataFree(p->dma_data_free),
114 txDelay(p->tx_delay), rxDelay(p->rx_delay),
115 rxKickTick(0),
116 rxKickEvent([this]{ rxKick(); }, name()),
117 txKickTick(0),
118 txKickEvent([this]{ txKick(); }, name()),
119 txEvent([this]{ txEventTransmit(); }, name()),
120 rxFilterEnable(p->rx_filter),
121 acceptBroadcast(false), acceptMulticast(false), acceptUnicast(false),
122 acceptPerfect(false), acceptArp(false), multicastHashEnable(false),
123 intrDelay(p->intr_delay), intrTick(0), cpuPendingIntr(false),
124 intrEvent(0), interface(0)
125 {
126
127
128 interface = new NSGigEInt(name() + ".int0", this);
129
130 regsReset();
131 memcpy(&rom.perfectMatch, p->hardware_address.bytes(), ETH_ADDR_LEN);
132
133 memset(&rxDesc32, 0, sizeof(rxDesc32));
134 memset(&txDesc32, 0, sizeof(txDesc32));
135 memset(&rxDesc64, 0, sizeof(rxDesc64));
136 memset(&txDesc64, 0, sizeof(txDesc64));
137 }
138
139 NSGigE::~NSGigE()
140 {
141 delete interface;
142 }
143
144 /**
145 * This is to write to the PCI general configuration registers
146 */
147 Tick
148 NSGigE::writeConfig(PacketPtr pkt)
149 {
150 int offset = pkt->getAddr() & PCI_CONFIG_SIZE;
151 if (offset < PCI_DEVICE_SPECIFIC)
152 PciDevice::writeConfig(pkt);
153 else
154 panic("Device specific PCI config space not implemented!\n");
155
156 switch (offset) {
157 // seems to work fine without all these PCI settings, but i
158 // put in the IO to double check, an assertion will fail if we
159 // need to properly implement it
160 case PCI_COMMAND:
161 if (config.data[offset] & PCI_CMD_IOSE)
162 ioEnable = true;
163 else
164 ioEnable = false;
165 break;
166 }
167
168 return configDelay;
169 }
170
171 Port &
172 NSGigE::getPort(const std::string &if_name, PortID idx)
173 {
174 if (if_name == "interface")
175 return *interface;
176 return EtherDevBase::getPort(if_name, idx);
177 }
178
179 /**
180 * This reads the device registers, which are detailed in the NS83820
181 * spec sheet
182 */
183 Tick
184 NSGigE::read(PacketPtr pkt)
185 {
186 assert(ioEnable);
187
188 //The mask is to give you only the offset into the device register file
189 Addr daddr = pkt->getAddr() & 0xfff;
190 DPRINTF(EthernetPIO, "read da=%#x pa=%#x size=%d\n",
191 daddr, pkt->getAddr(), pkt->getSize());
192
193
194 // there are some reserved registers, you can see ns_gige_reg.h and
195 // the spec sheet for details
196 if (daddr > LAST && daddr <= RESERVED) {
197 panic("Accessing reserved register");
198 } else if (daddr > RESERVED && daddr <= 0x3FC) {
199 return readConfig(pkt);
200 } else if (daddr >= MIB_START && daddr <= MIB_END) {
201 // don't implement all the MIB's. hopefully the kernel
202 // doesn't actually DEPEND upon their values
203 // MIB are just hardware stats keepers
204 pkt->setLE<uint32_t>(0);
205 pkt->makeAtomicResponse();
206 return pioDelay;
207 } else if (daddr > 0x3FC)
208 panic("Something is messed up!\n");
209
210 assert(pkt->getSize() == sizeof(uint32_t));
211 uint32_t &reg = *pkt->getPtr<uint32_t>();
212 uint16_t rfaddr;
213
214 switch (daddr) {
215 case CR:
216 reg = regs.command;
217 //these are supposed to be cleared on a read
218 reg &= ~(CR_RXD | CR_TXD | CR_TXR | CR_RXR);
219 break;
220
221 case CFGR:
222 reg = regs.config;
223 break;
224
225 case MEAR:
226 reg = regs.mear;
227 break;
228
229 case PTSCR:
230 reg = regs.ptscr;
231 break;
232
233 case ISR:
234 reg = regs.isr;
235 devIntrClear(ISR_ALL);
236 break;
237
238 case IMR:
239 reg = regs.imr;
240 break;
241
242 case IER:
243 reg = regs.ier;
244 break;
245
246 case IHR:
247 reg = regs.ihr;
248 break;
249
250 case TXDP:
251 reg = regs.txdp;
252 break;
253
254 case TXDP_HI:
255 reg = regs.txdp_hi;
256 break;
257
258 case TX_CFG:
259 reg = regs.txcfg;
260 break;
261
262 case GPIOR:
263 reg = regs.gpior;
264 break;
265
266 case RXDP:
267 reg = regs.rxdp;
268 break;
269
270 case RXDP_HI:
271 reg = regs.rxdp_hi;
272 break;
273
274 case RX_CFG:
275 reg = regs.rxcfg;
276 break;
277
278 case PQCR:
279 reg = regs.pqcr;
280 break;
281
282 case WCSR:
283 reg = regs.wcsr;
284 break;
285
286 case PCR:
287 reg = regs.pcr;
288 break;
289
290 // see the spec sheet for how RFCR and RFDR work
291 // basically, you write to RFCR to tell the machine
292 // what you want to do next, then you act upon RFDR,
293 // and the device will be prepared b/c of what you
294 // wrote to RFCR
295 case RFCR:
296 reg = regs.rfcr;
297 break;
298
299 case RFDR:
300 rfaddr = (uint16_t)(regs.rfcr & RFCR_RFADDR);
301 switch (rfaddr) {
302 // Read from perfect match ROM octets
303 case 0x000:
304 reg = rom.perfectMatch[1];
305 reg = reg << 8;
306 reg += rom.perfectMatch[0];
307 break;
308 case 0x002:
309 reg = rom.perfectMatch[3] << 8;
310 reg += rom.perfectMatch[2];
311 break;
312 case 0x004:
313 reg = rom.perfectMatch[5] << 8;
314 reg += rom.perfectMatch[4];
315 break;
316 default:
317 // Read filter hash table
318 if (rfaddr >= FHASH_ADDR &&
319 rfaddr < FHASH_ADDR + FHASH_SIZE) {
320
321 // Only word-aligned reads supported
322 if (rfaddr % 2)
323 panic("unaligned read from filter hash table!");
324
325 reg = rom.filterHash[rfaddr - FHASH_ADDR + 1] << 8;
326 reg += rom.filterHash[rfaddr - FHASH_ADDR];
327 break;
328 }
329
330 panic("reading RFDR for something other than pattern"
331 " matching or hashing! %#x\n", rfaddr);
332 }
333 break;
334
335 case SRR:
336 reg = regs.srr;
337 break;
338
339 case MIBC:
340 reg = regs.mibc;
341 reg &= ~(MIBC_MIBS | MIBC_ACLR);
342 break;
343
344 case VRCR:
345 reg = regs.vrcr;
346 break;
347
348 case VTCR:
349 reg = regs.vtcr;
350 break;
351
352 case VDR:
353 reg = regs.vdr;
354 break;
355
356 case CCSR:
357 reg = regs.ccsr;
358 break;
359
360 case TBICR:
361 reg = regs.tbicr;
362 break;
363
364 case TBISR:
365 reg = regs.tbisr;
366 break;
367
368 case TANAR:
369 reg = regs.tanar;
370 break;
371
372 case TANLPAR:
373 reg = regs.tanlpar;
374 break;
375
376 case TANER:
377 reg = regs.taner;
378 break;
379
380 case TESR:
381 reg = regs.tesr;
382 break;
383
384 case M5REG:
385 reg = 0;
386 if (params()->rx_thread)
387 reg |= M5REG_RX_THREAD;
388 if (params()->tx_thread)
389 reg |= M5REG_TX_THREAD;
390 if (params()->rss)
391 reg |= M5REG_RSS;
392 break;
393
394 default:
395 panic("reading unimplemented register: addr=%#x", daddr);
396 }
397
398 DPRINTF(EthernetPIO, "read from %#x: data=%d data=%#x\n",
399 daddr, reg, reg);
400
401 pkt->makeAtomicResponse();
402 return pioDelay;
403 }
404
405 Tick
406 NSGigE::write(PacketPtr pkt)
407 {
408 assert(ioEnable);
409
410 Addr daddr = pkt->getAddr() & 0xfff;
411 DPRINTF(EthernetPIO, "write da=%#x pa=%#x size=%d\n",
412 daddr, pkt->getAddr(), pkt->getSize());
413
414 if (daddr > LAST && daddr <= RESERVED) {
415 panic("Accessing reserved register");
416 } else if (daddr > RESERVED && daddr <= 0x3FC) {
417 return writeConfig(pkt);
418 } else if (daddr > 0x3FC)
419 panic("Something is messed up!\n");
420
421 if (pkt->getSize() == sizeof(uint32_t)) {
422 uint32_t reg = pkt->getLE<uint32_t>();
423 uint16_t rfaddr;
424
425 DPRINTF(EthernetPIO, "write data=%d data=%#x\n", reg, reg);
426
427 switch (daddr) {
428 case CR:
429 regs.command = reg;
430 if (reg & CR_TXD) {
431 txEnable = false;
432 } else if (reg & CR_TXE) {
433 txEnable = true;
434
435 // the kernel is enabling the transmit machine
436 if (txState == txIdle)
437 txKick();
438 }
439
440 if (reg & CR_RXD) {
441 rxEnable = false;
442 } else if (reg & CR_RXE) {
443 rxEnable = true;
444
445 if (rxState == rxIdle)
446 rxKick();
447 }
448
449 if (reg & CR_TXR)
450 txReset();
451
452 if (reg & CR_RXR)
453 rxReset();
454
455 if (reg & CR_SWI)
456 devIntrPost(ISR_SWI);
457
458 if (reg & CR_RST) {
459 txReset();
460 rxReset();
461
462 regsReset();
463 }
464 break;
465
466 case CFGR:
467 if (reg & CFGR_LNKSTS ||
468 reg & CFGR_SPDSTS ||
469 reg & CFGR_DUPSTS ||
470 reg & CFGR_RESERVED ||
471 reg & CFGR_T64ADDR ||
472 reg & CFGR_PCI64_DET) {
473 // First clear all writable bits
474 regs.config &= CFGR_LNKSTS | CFGR_SPDSTS | CFGR_DUPSTS |
475 CFGR_RESERVED | CFGR_T64ADDR |
476 CFGR_PCI64_DET;
477 // Now set the appropriate writable bits
478 regs.config |= reg & ~(CFGR_LNKSTS | CFGR_SPDSTS | CFGR_DUPSTS |
479 CFGR_RESERVED | CFGR_T64ADDR |
480 CFGR_PCI64_DET);
481 }
482
483 if (reg & CFGR_AUTO_1000)
484 panic("CFGR_AUTO_1000 not implemented!\n");
485
486 if (reg & CFGR_PCI64_DET)
487 panic("CFGR_PCI64_DET is read only register!\n");
488
489 if (reg & CFGR_EXTSTS_EN)
490 extstsEnable = true;
491 else
492 extstsEnable = false;
493 break;
494
495 case MEAR:
496 // Clear writable bits
497 regs.mear &= MEAR_EEDO;
498 // Set appropriate writable bits
499 regs.mear |= reg & ~MEAR_EEDO;
500
501 // FreeBSD uses the EEPROM to read PMATCH (for the MAC address)
502 // even though it could get it through RFDR
503 if (reg & MEAR_EESEL) {
504 // Rising edge of clock
505 if (reg & MEAR_EECLK && !eepromClk)
506 eepromKick();
507 }
508 else {
509 eepromState = eepromStart;
510 regs.mear &= ~MEAR_EEDI;
511 }
512
513 eepromClk = reg & MEAR_EECLK;
514
515 // since phy is completely faked, MEAR_MD* don't matter
516 break;
517
518 case PTSCR:
519 regs.ptscr = reg & ~(PTSCR_RBIST_RDONLY);
520 // these control BISTs for various parts of chip - we
521 // don't care or do just fake that the BIST is done
522 if (reg & PTSCR_RBIST_EN)
523 regs.ptscr |= PTSCR_RBIST_DONE;
524 if (reg & PTSCR_EEBIST_EN)
525 regs.ptscr &= ~PTSCR_EEBIST_EN;
526 if (reg & PTSCR_EELOAD_EN)
527 regs.ptscr &= ~PTSCR_EELOAD_EN;
528 break;
529
530 case ISR: /* writing to the ISR has no effect */
531 panic("ISR is a read only register!\n");
532
533 case IMR:
534 regs.imr = reg;
535 devIntrChangeMask();
536 break;
537
538 case IER:
539 regs.ier = reg;
540 break;
541
542 case IHR:
543 regs.ihr = reg;
544 /* not going to implement real interrupt holdoff */
545 break;
546
547 case TXDP:
548 regs.txdp = (reg & 0xFFFFFFFC);
549 assert(txState == txIdle);
550 CTDD = false;
551 break;
552
553 case TXDP_HI:
554 regs.txdp_hi = reg;
555 break;
556
557 case TX_CFG:
558 regs.txcfg = reg;
559
560 // also, we currently don't care about fill/drain
561 // thresholds though this may change in the future with
562 // more realistic networks or a driver which changes it
563 // according to feedback
564
565 break;
566
567 case GPIOR:
568 // Only write writable bits
569 regs.gpior &= GPIOR_UNUSED | GPIOR_GP5_IN | GPIOR_GP4_IN
570 | GPIOR_GP3_IN | GPIOR_GP2_IN | GPIOR_GP1_IN;
571 regs.gpior |= reg & ~(GPIOR_UNUSED | GPIOR_GP5_IN | GPIOR_GP4_IN
572 | GPIOR_GP3_IN | GPIOR_GP2_IN | GPIOR_GP1_IN);
573 /* these just control general purpose i/o pins, don't matter */
574 break;
575
576 case RXDP:
577 regs.rxdp = reg;
578 CRDD = false;
579 break;
580
581 case RXDP_HI:
582 regs.rxdp_hi = reg;
583 break;
584
585 case RX_CFG:
586 regs.rxcfg = reg;
587 break;
588
589 case PQCR:
590 /* there is no priority queueing used in the linux 2.6 driver */
591 regs.pqcr = reg;
592 break;
593
594 case WCSR:
595 /* not going to implement wake on LAN */
596 regs.wcsr = reg;
597 break;
598
599 case PCR:
600 /* not going to implement pause control */
601 regs.pcr = reg;
602 break;
603
604 case RFCR:
605 regs.rfcr = reg;
606
607 rxFilterEnable = (reg & RFCR_RFEN) ? true : false;
608 acceptBroadcast = (reg & RFCR_AAB) ? true : false;
609 acceptMulticast = (reg & RFCR_AAM) ? true : false;
610 acceptUnicast = (reg & RFCR_AAU) ? true : false;
611 acceptPerfect = (reg & RFCR_APM) ? true : false;
612 acceptArp = (reg & RFCR_AARP) ? true : false;
613 multicastHashEnable = (reg & RFCR_MHEN) ? true : false;
614
615 if (reg & RFCR_UHEN)
616 panic("Unicast hash filtering not used by drivers!\n");
617
618 if (reg & RFCR_ULM)
619 panic("RFCR_ULM not implemented!\n");
620
621 break;
622
623 case RFDR:
624 rfaddr = (uint16_t)(regs.rfcr & RFCR_RFADDR);
625 switch (rfaddr) {
626 case 0x000:
627 rom.perfectMatch[0] = (uint8_t)reg;
628 rom.perfectMatch[1] = (uint8_t)(reg >> 8);
629 break;
630 case 0x002:
631 rom.perfectMatch[2] = (uint8_t)reg;
632 rom.perfectMatch[3] = (uint8_t)(reg >> 8);
633 break;
634 case 0x004:
635 rom.perfectMatch[4] = (uint8_t)reg;
636 rom.perfectMatch[5] = (uint8_t)(reg >> 8);
637 break;
638 default:
639
640 if (rfaddr >= FHASH_ADDR &&
641 rfaddr < FHASH_ADDR + FHASH_SIZE) {
642
643 // Only word-aligned writes supported
644 if (rfaddr % 2)
645 panic("unaligned write to filter hash table!");
646
647 rom.filterHash[rfaddr - FHASH_ADDR] = (uint8_t)reg;
648 rom.filterHash[rfaddr - FHASH_ADDR + 1]
649 = (uint8_t)(reg >> 8);
650 break;
651 }
652 panic("writing RFDR for something other than pattern matching "
653 "or hashing! %#x\n", rfaddr);
654 }
655 break;
656
657 case BRAR:
658 regs.brar = reg;
659 break;
660
661 case BRDR:
662 panic("the driver never uses BRDR, something is wrong!\n");
663
664 case SRR:
665 panic("SRR is read only register!\n");
666
667 case MIBC:
668 panic("the driver never uses MIBC, something is wrong!\n");
669
670 case VRCR:
671 regs.vrcr = reg;
672 break;
673
674 case VTCR:
675 regs.vtcr = reg;
676 break;
677
678 case VDR:
679 panic("the driver never uses VDR, something is wrong!\n");
680
681 case CCSR:
682 /* not going to implement clockrun stuff */
683 regs.ccsr = reg;
684 break;
685
686 case TBICR:
687 regs.tbicr = reg;
688 if (reg & TBICR_MR_LOOPBACK)
689 panic("TBICR_MR_LOOPBACK never used, something wrong!\n");
690
691 if (reg & TBICR_MR_AN_ENABLE) {
692 regs.tanlpar = regs.tanar;
693 regs.tbisr |= (TBISR_MR_AN_COMPLETE | TBISR_MR_LINK_STATUS);
694 }
695
696 break;
697
698 case TBISR:
699 panic("TBISR is read only register!\n");
700
701 case TANAR:
702 // Only write the writable bits
703 regs.tanar &= TANAR_RF1 | TANAR_RF2 | TANAR_UNUSED;
704 regs.tanar |= reg & ~(TANAR_RF1 | TANAR_RF2 | TANAR_UNUSED);
705
706 // Pause capability unimplemented
707 break;
708
709 case TANLPAR:
710 panic("this should only be written to by the fake phy!\n");
711
712 case TANER:
713 panic("TANER is read only register!\n");
714
715 case TESR:
716 regs.tesr = reg;
717 break;
718
719 default:
720 panic("invalid register access daddr=%#x", daddr);
721 }
722 } else {
723 panic("Invalid Request Size");
724 }
725 pkt->makeAtomicResponse();
726 return pioDelay;
727 }
728
729 void
730 NSGigE::devIntrPost(uint32_t interrupts)
731 {
732 if (interrupts & ISR_RESERVE)
733 panic("Cannot set a reserved interrupt");
734
735 if (interrupts & ISR_NOIMPL)
736 warn("interrupt not implemented %#x\n", interrupts);
737
738 interrupts &= ISR_IMPL;
739 regs.isr |= interrupts;
740
741 if (interrupts & regs.imr) {
742 if (interrupts & ISR_SWI) {
743 totalSwi++;
744 }
745 if (interrupts & ISR_RXIDLE) {
746 totalRxIdle++;
747 }
748 if (interrupts & ISR_RXOK) {
749 totalRxOk++;
750 }
751 if (interrupts & ISR_RXDESC) {
752 totalRxDesc++;
753 }
754 if (interrupts & ISR_TXOK) {
755 totalTxOk++;
756 }
757 if (interrupts & ISR_TXIDLE) {
758 totalTxIdle++;
759 }
760 if (interrupts & ISR_TXDESC) {
761 totalTxDesc++;
762 }
763 if (interrupts & ISR_RXORN) {
764 totalRxOrn++;
765 }
766 }
767
768 DPRINTF(EthernetIntr,
769 "interrupt written to ISR: intr=%#x isr=%#x imr=%#x\n",
770 interrupts, regs.isr, regs.imr);
771
772 if ((regs.isr & regs.imr)) {
773 Tick when = curTick();
774 if ((regs.isr & regs.imr & ISR_NODELAY) == 0)
775 when += intrDelay;
776 postedInterrupts++;
777 cpuIntrPost(when);
778 }
779 }
780
781 /* writing this interrupt counting stats inside this means that this function
782 is now limited to being used to clear all interrupts upon the kernel
783 reading isr and servicing. just telling you in case you were thinking
784 of expanding use.
785 */
786 void
787 NSGigE::devIntrClear(uint32_t interrupts)
788 {
789 if (interrupts & ISR_RESERVE)
790 panic("Cannot clear a reserved interrupt");
791
792 if (regs.isr & regs.imr & ISR_SWI) {
793 postedSwi++;
794 }
795 if (regs.isr & regs.imr & ISR_RXIDLE) {
796 postedRxIdle++;
797 }
798 if (regs.isr & regs.imr & ISR_RXOK) {
799 postedRxOk++;
800 }
801 if (regs.isr & regs.imr & ISR_RXDESC) {
802 postedRxDesc++;
803 }
804 if (regs.isr & regs.imr & ISR_TXOK) {
805 postedTxOk++;
806 }
807 if (regs.isr & regs.imr & ISR_TXIDLE) {
808 postedTxIdle++;
809 }
810 if (regs.isr & regs.imr & ISR_TXDESC) {
811 postedTxDesc++;
812 }
813 if (regs.isr & regs.imr & ISR_RXORN) {
814 postedRxOrn++;
815 }
816
817 interrupts &= ~ISR_NOIMPL;
818 regs.isr &= ~interrupts;
819
820 DPRINTF(EthernetIntr,
821 "interrupt cleared from ISR: intr=%x isr=%x imr=%x\n",
822 interrupts, regs.isr, regs.imr);
823
824 if (!(regs.isr & regs.imr))
825 cpuIntrClear();
826 }
827
828 void
829 NSGigE::devIntrChangeMask()
830 {
831 DPRINTF(EthernetIntr, "interrupt mask changed: isr=%x imr=%x masked=%x\n",
832 regs.isr, regs.imr, regs.isr & regs.imr);
833
834 if (regs.isr & regs.imr)
835 cpuIntrPost(curTick());
836 else
837 cpuIntrClear();
838 }
839
840 void
841 NSGigE::cpuIntrPost(Tick when)
842 {
843 // If the interrupt you want to post is later than an interrupt
844 // already scheduled, just let it post in the coming one and don't
845 // schedule another.
846 // HOWEVER, must be sure that the scheduled intrTick is in the
847 // future (this was formerly the source of a bug)
848 /**
849 * @todo this warning should be removed and the intrTick code should
850 * be fixed.
851 */
852 assert(when >= curTick());
853 assert(intrTick >= curTick() || intrTick == 0);
854 if (when > intrTick && intrTick != 0) {
855 DPRINTF(EthernetIntr, "don't need to schedule event...intrTick=%d\n",
856 intrTick);
857 return;
858 }
859
860 intrTick = when;
861 if (intrTick < curTick()) {
862 intrTick = curTick();
863 }
864
865 DPRINTF(EthernetIntr, "going to schedule an interrupt for intrTick=%d\n",
866 intrTick);
867
868 if (intrEvent)
869 intrEvent->squash();
870
871 intrEvent = new EventFunctionWrapper([this]{ cpuInterrupt(); },
872 name(), true);
873 schedule(intrEvent, intrTick);
874 }
875
876 void
877 NSGigE::cpuInterrupt()
878 {
879 assert(intrTick == curTick());
880
881 // Whether or not there's a pending interrupt, we don't care about
882 // it anymore
883 intrEvent = 0;
884 intrTick = 0;
885
886 // Don't send an interrupt if there's already one
887 if (cpuPendingIntr) {
888 DPRINTF(EthernetIntr,
889 "would send an interrupt now, but there's already pending\n");
890 } else {
891 // Send interrupt
892 cpuPendingIntr = true;
893
894 DPRINTF(EthernetIntr, "posting interrupt\n");
895 intrPost();
896 }
897 }
898
899 void
900 NSGigE::cpuIntrClear()
901 {
902 if (!cpuPendingIntr)
903 return;
904
905 if (intrEvent) {
906 intrEvent->squash();
907 intrEvent = 0;
908 }
909
910 intrTick = 0;
911
912 cpuPendingIntr = false;
913
914 DPRINTF(EthernetIntr, "clearing interrupt\n");
915 intrClear();
916 }
917
918 bool
919 NSGigE::cpuIntrPending() const
920 { return cpuPendingIntr; }
921
922 void
923 NSGigE::txReset()
924 {
925
926 DPRINTF(Ethernet, "transmit reset\n");
927
928 CTDD = false;
929 txEnable = false;;
930 txFragPtr = 0;
931 assert(txDescCnt == 0);
932 txFifo.clear();
933 txState = txIdle;
934 assert(txDmaState == dmaIdle);
935 }
936
937 void
938 NSGigE::rxReset()
939 {
940 DPRINTF(Ethernet, "receive reset\n");
941
942 CRDD = false;
943 assert(rxPktBytes == 0);
944 rxEnable = false;
945 rxFragPtr = 0;
946 assert(rxDescCnt == 0);
947 assert(rxDmaState == dmaIdle);
948 rxFifo.clear();
949 rxState = rxIdle;
950 }
951
952 void
953 NSGigE::regsReset()
954 {
955 memset(&regs, 0, sizeof(regs));
956 regs.config = (CFGR_LNKSTS | CFGR_TBI_EN | CFGR_MODE_1000);
957 regs.mear = 0x12;
958 regs.txcfg = 0x120; // set drain threshold to 1024 bytes and
959 // fill threshold to 32 bytes
960 regs.rxcfg = 0x4; // set drain threshold to 16 bytes
961 regs.srr = 0x0103; // set the silicon revision to rev B or 0x103
962 regs.mibc = MIBC_FRZ;
963 regs.vdr = 0x81; // set the vlan tag type to 802.1q
964 regs.tesr = 0xc000; // TBI capable of both full and half duplex
965 regs.brar = 0xffffffff;
966
967 extstsEnable = false;
968 acceptBroadcast = false;
969 acceptMulticast = false;
970 acceptUnicast = false;
971 acceptPerfect = false;
972 acceptArp = false;
973 }
974
975 bool
976 NSGigE::doRxDmaRead()
977 {
978 assert(rxDmaState == dmaIdle || rxDmaState == dmaReadWaiting);
979 rxDmaState = dmaReading;
980
981 if (dmaPending() || drainState() != DrainState::Running)
982 rxDmaState = dmaReadWaiting;
983 else
984 dmaRead(rxDmaAddr, rxDmaLen, &rxDmaReadEvent, (uint8_t*)rxDmaData);
985
986 return true;
987 }
988
989 void
990 NSGigE::rxDmaReadDone()
991 {
992 assert(rxDmaState == dmaReading);
993 rxDmaState = dmaIdle;
994
995 DPRINTF(EthernetDMA, "rx dma read paddr=%#x len=%d\n",
996 rxDmaAddr, rxDmaLen);
997 DDUMP(EthernetDMA, rxDmaData, rxDmaLen);
998
999 // If the transmit state machine has a pending DMA, let it go first
1000 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting)
1001 txKick();
1002
1003 rxKick();
1004 }
1005
1006 bool
1007 NSGigE::doRxDmaWrite()
1008 {
1009 assert(rxDmaState == dmaIdle || rxDmaState == dmaWriteWaiting);
1010 rxDmaState = dmaWriting;
1011
1012 if (dmaPending() || drainState() != DrainState::Running)
1013 rxDmaState = dmaWriteWaiting;
1014 else
1015 dmaWrite(rxDmaAddr, rxDmaLen, &rxDmaWriteEvent, (uint8_t*)rxDmaData);
1016 return true;
1017 }
1018
1019 void
1020 NSGigE::rxDmaWriteDone()
1021 {
1022 assert(rxDmaState == dmaWriting);
1023 rxDmaState = dmaIdle;
1024
1025 DPRINTF(EthernetDMA, "rx dma write paddr=%#x len=%d\n",
1026 rxDmaAddr, rxDmaLen);
1027 DDUMP(EthernetDMA, rxDmaData, rxDmaLen);
1028
1029 // If the transmit state machine has a pending DMA, let it go first
1030 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting)
1031 txKick();
1032
1033 rxKick();
1034 }
1035
1036 void
1037 NSGigE::rxKick()
1038 {
1039 bool is64bit = (bool)(regs.config & CFGR_M64ADDR);
1040
1041 DPRINTF(EthernetSM,
1042 "receive kick rxState=%s (rxBuf.size=%d) %d-bit\n",
1043 NsRxStateStrings[rxState], rxFifo.size(), is64bit ? 64 : 32);
1044
1045 Addr link, bufptr;
1046 uint32_t &cmdsts = is64bit ? rxDesc64.cmdsts : rxDesc32.cmdsts;
1047 uint32_t &extsts = is64bit ? rxDesc64.extsts : rxDesc32.extsts;
1048
1049 next:
1050 if (rxKickTick > curTick()) {
1051 DPRINTF(EthernetSM, "receive kick exiting, can't run till %d\n",
1052 rxKickTick);
1053
1054 goto exit;
1055 }
1056
1057 // Go to the next state machine clock tick.
1058 rxKickTick = clockEdge(Cycles(1));
1059
1060 switch(rxDmaState) {
1061 case dmaReadWaiting:
1062 if (doRxDmaRead())
1063 goto exit;
1064 break;
1065 case dmaWriteWaiting:
1066 if (doRxDmaWrite())
1067 goto exit;
1068 break;
1069 default:
1070 break;
1071 }
1072
1073 link = is64bit ? (Addr)rxDesc64.link : (Addr)rxDesc32.link;
1074 bufptr = is64bit ? (Addr)rxDesc64.bufptr : (Addr)rxDesc32.bufptr;
1075
1076 // see state machine from spec for details
1077 // the way this works is, if you finish work on one state and can
1078 // go directly to another, you do that through jumping to the
1079 // label "next". however, if you have intermediate work, like DMA
1080 // so that you can't go to the next state yet, you go to exit and
1081 // exit the loop. however, when the DMA is done it will trigger
1082 // an event and come back to this loop.
1083 switch (rxState) {
1084 case rxIdle:
1085 if (!rxEnable) {
1086 DPRINTF(EthernetSM, "Receive Disabled! Nothing to do.\n");
1087 goto exit;
1088 }
1089
1090 if (CRDD) {
1091 rxState = rxDescRefr;
1092
1093 rxDmaAddr = regs.rxdp & 0x3fffffff;
1094 rxDmaData =
1095 is64bit ? (void *)&rxDesc64.link : (void *)&rxDesc32.link;
1096 rxDmaLen = is64bit ? sizeof(rxDesc64.link) : sizeof(rxDesc32.link);
1097 rxDmaFree = dmaDescFree;
1098
1099 descDmaReads++;
1100 descDmaRdBytes += rxDmaLen;
1101
1102 if (doRxDmaRead())
1103 goto exit;
1104 } else {
1105 rxState = rxDescRead;
1106
1107 rxDmaAddr = regs.rxdp & 0x3fffffff;
1108 rxDmaData = is64bit ? (void *)&rxDesc64 : (void *)&rxDesc32;
1109 rxDmaLen = is64bit ? sizeof(rxDesc64) : sizeof(rxDesc32);
1110 rxDmaFree = dmaDescFree;
1111
1112 descDmaReads++;
1113 descDmaRdBytes += rxDmaLen;
1114
1115 if (doRxDmaRead())
1116 goto exit;
1117 }
1118 break;
1119
1120 case rxDescRefr:
1121 if (rxDmaState != dmaIdle)
1122 goto exit;
1123
1124 rxState = rxAdvance;
1125 break;
1126
1127 case rxDescRead:
1128 if (rxDmaState != dmaIdle)
1129 goto exit;
1130
1131 DPRINTF(EthernetDesc, "rxDesc: addr=%08x read descriptor\n",
1132 regs.rxdp & 0x3fffffff);
1133 DPRINTF(EthernetDesc,
1134 "rxDesc: link=%#x bufptr=%#x cmdsts=%08x extsts=%08x\n",
1135 link, bufptr, cmdsts, extsts);
1136
1137 if (cmdsts & CMDSTS_OWN) {
1138 devIntrPost(ISR_RXIDLE);
1139 rxState = rxIdle;
1140 goto exit;
1141 } else {
1142 rxState = rxFifoBlock;
1143 rxFragPtr = bufptr;
1144 rxDescCnt = cmdsts & CMDSTS_LEN_MASK;
1145 }
1146 break;
1147
1148 case rxFifoBlock:
1149 if (!rxPacket) {
1150 /**
1151 * @todo in reality, we should be able to start processing
1152 * the packet as it arrives, and not have to wait for the
1153 * full packet ot be in the receive fifo.
1154 */
1155 if (rxFifo.empty())
1156 goto exit;
1157
1158 DPRINTF(EthernetSM, "****processing receive of new packet****\n");
1159
1160 // If we don't have a packet, grab a new one from the fifo.
1161 rxPacket = rxFifo.front();
1162 rxPktBytes = rxPacket->length;
1163 rxPacketBufPtr = rxPacket->data;
1164
1165 #if TRACING_ON
1166 if (DTRACE(Ethernet)) {
1167 IpPtr ip(rxPacket);
1168 if (ip) {
1169 DPRINTF(Ethernet, "ID is %d\n", ip->id());
1170 TcpPtr tcp(ip);
1171 if (tcp) {
1172 DPRINTF(Ethernet,
1173 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n",
1174 tcp->sport(), tcp->dport(), tcp->seq(),
1175 tcp->ack());
1176 }
1177 }
1178 }
1179 #endif
1180
1181 // sanity check - i think the driver behaves like this
1182 assert(rxDescCnt >= rxPktBytes);
1183 rxFifo.pop();
1184 }
1185
1186
1187 // dont' need the && rxDescCnt > 0 if driver sanity check
1188 // above holds
1189 if (rxPktBytes > 0) {
1190 rxState = rxFragWrite;
1191 // don't need min<>(rxPktBytes,rxDescCnt) if above sanity
1192 // check holds
1193 rxXferLen = rxPktBytes;
1194
1195 rxDmaAddr = rxFragPtr & 0x3fffffff;
1196 rxDmaData = rxPacketBufPtr;
1197 rxDmaLen = rxXferLen;
1198 rxDmaFree = dmaDataFree;
1199
1200 if (doRxDmaWrite())
1201 goto exit;
1202
1203 } else {
1204 rxState = rxDescWrite;
1205
1206 //if (rxPktBytes == 0) { /* packet is done */
1207 assert(rxPktBytes == 0);
1208 DPRINTF(EthernetSM, "done with receiving packet\n");
1209
1210 cmdsts |= CMDSTS_OWN;
1211 cmdsts &= ~CMDSTS_MORE;
1212 cmdsts |= CMDSTS_OK;
1213 cmdsts &= 0xffff0000;
1214 cmdsts += rxPacket->length; //i.e. set CMDSTS_SIZE
1215
1216 IpPtr ip(rxPacket);
1217 if (extstsEnable && ip) {
1218 extsts |= EXTSTS_IPPKT;
1219 rxIpChecksums++;
1220 if (cksum(ip) != 0) {
1221 DPRINTF(EthernetCksum, "Rx IP Checksum Error\n");
1222 extsts |= EXTSTS_IPERR;
1223 }
1224 TcpPtr tcp(ip);
1225 UdpPtr udp(ip);
1226 if (tcp) {
1227 extsts |= EXTSTS_TCPPKT;
1228 rxTcpChecksums++;
1229 if (cksum(tcp) != 0) {
1230 DPRINTF(EthernetCksum, "Rx TCP Checksum Error\n");
1231 extsts |= EXTSTS_TCPERR;
1232
1233 }
1234 } else if (udp) {
1235 extsts |= EXTSTS_UDPPKT;
1236 rxUdpChecksums++;
1237 if (cksum(udp) != 0) {
1238 DPRINTF(EthernetCksum, "Rx UDP Checksum Error\n");
1239 extsts |= EXTSTS_UDPERR;
1240 }
1241 }
1242 }
1243 rxPacket = 0;
1244
1245 /*
1246 * the driver seems to always receive into desc buffers
1247 * of size 1514, so you never have a pkt that is split
1248 * into multiple descriptors on the receive side, so
1249 * i don't implement that case, hence the assert above.
1250 */
1251
1252 DPRINTF(EthernetDesc,
1253 "rxDesc: addr=%08x writeback cmdsts extsts\n",
1254 regs.rxdp & 0x3fffffff);
1255 DPRINTF(EthernetDesc,
1256 "rxDesc: link=%#x bufptr=%#x cmdsts=%08x extsts=%08x\n",
1257 link, bufptr, cmdsts, extsts);
1258
1259 rxDmaAddr = regs.rxdp & 0x3fffffff;
1260 rxDmaData = &cmdsts;
1261 if (is64bit) {
1262 rxDmaAddr += offsetof(ns_desc64, cmdsts);
1263 rxDmaLen = sizeof(rxDesc64.cmdsts) + sizeof(rxDesc64.extsts);
1264 } else {
1265 rxDmaAddr += offsetof(ns_desc32, cmdsts);
1266 rxDmaLen = sizeof(rxDesc32.cmdsts) + sizeof(rxDesc32.extsts);
1267 }
1268 rxDmaFree = dmaDescFree;
1269
1270 descDmaWrites++;
1271 descDmaWrBytes += rxDmaLen;
1272
1273 if (doRxDmaWrite())
1274 goto exit;
1275 }
1276 break;
1277
1278 case rxFragWrite:
1279 if (rxDmaState != dmaIdle)
1280 goto exit;
1281
1282 rxPacketBufPtr += rxXferLen;
1283 rxFragPtr += rxXferLen;
1284 rxPktBytes -= rxXferLen;
1285
1286 rxState = rxFifoBlock;
1287 break;
1288
1289 case rxDescWrite:
1290 if (rxDmaState != dmaIdle)
1291 goto exit;
1292
1293 assert(cmdsts & CMDSTS_OWN);
1294
1295 assert(rxPacket == 0);
1296 devIntrPost(ISR_RXOK);
1297
1298 if (cmdsts & CMDSTS_INTR)
1299 devIntrPost(ISR_RXDESC);
1300
1301 if (!rxEnable) {
1302 DPRINTF(EthernetSM, "Halting the RX state machine\n");
1303 rxState = rxIdle;
1304 goto exit;
1305 } else
1306 rxState = rxAdvance;
1307 break;
1308
1309 case rxAdvance:
1310 if (link == 0) {
1311 devIntrPost(ISR_RXIDLE);
1312 rxState = rxIdle;
1313 CRDD = true;
1314 goto exit;
1315 } else {
1316 if (rxDmaState != dmaIdle)
1317 goto exit;
1318 rxState = rxDescRead;
1319 regs.rxdp = link;
1320 CRDD = false;
1321
1322 rxDmaAddr = regs.rxdp & 0x3fffffff;
1323 rxDmaData = is64bit ? (void *)&rxDesc64 : (void *)&rxDesc32;
1324 rxDmaLen = is64bit ? sizeof(rxDesc64) : sizeof(rxDesc32);
1325 rxDmaFree = dmaDescFree;
1326
1327 if (doRxDmaRead())
1328 goto exit;
1329 }
1330 break;
1331
1332 default:
1333 panic("Invalid rxState!");
1334 }
1335
1336 DPRINTF(EthernetSM, "entering next rxState=%s\n",
1337 NsRxStateStrings[rxState]);
1338 goto next;
1339
1340 exit:
1341 /**
1342 * @todo do we want to schedule a future kick?
1343 */
1344 DPRINTF(EthernetSM, "rx state machine exited rxState=%s\n",
1345 NsRxStateStrings[rxState]);
1346
1347 if (!rxKickEvent.scheduled())
1348 schedule(rxKickEvent, rxKickTick);
1349 }
1350
1351 void
1352 NSGigE::transmit()
1353 {
1354 if (txFifo.empty()) {
1355 DPRINTF(Ethernet, "nothing to transmit\n");
1356 return;
1357 }
1358
1359 DPRINTF(Ethernet, "Attempt Pkt Transmit: txFifo length=%d\n",
1360 txFifo.size());
1361 if (interface->sendPacket(txFifo.front())) {
1362 #if TRACING_ON
1363 if (DTRACE(Ethernet)) {
1364 IpPtr ip(txFifo.front());
1365 if (ip) {
1366 DPRINTF(Ethernet, "ID is %d\n", ip->id());
1367 TcpPtr tcp(ip);
1368 if (tcp) {
1369 DPRINTF(Ethernet,
1370 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n",
1371 tcp->sport(), tcp->dport(), tcp->seq(),
1372 tcp->ack());
1373 }
1374 }
1375 }
1376 #endif
1377
1378 DDUMP(EthernetData, txFifo.front()->data, txFifo.front()->length);
1379 txBytes += txFifo.front()->length;
1380 txPackets++;
1381
1382 DPRINTF(Ethernet, "Successful Xmit! now txFifoAvail is %d\n",
1383 txFifo.avail());
1384 txFifo.pop();
1385
1386 /*
1387 * normally do a writeback of the descriptor here, and ONLY
1388 * after that is done, send this interrupt. but since our
1389 * stuff never actually fails, just do this interrupt here,
1390 * otherwise the code has to stray from this nice format.
1391 * besides, it's functionally the same.
1392 */
1393 devIntrPost(ISR_TXOK);
1394 }
1395
1396 if (!txFifo.empty() && !txEvent.scheduled()) {
1397 DPRINTF(Ethernet, "reschedule transmit\n");
1398 schedule(txEvent, curTick() + SimClock::Int::ns);
1399 }
1400 }
1401
1402 bool
1403 NSGigE::doTxDmaRead()
1404 {
1405 assert(txDmaState == dmaIdle || txDmaState == dmaReadWaiting);
1406 txDmaState = dmaReading;
1407
1408 if (dmaPending() || drainState() != DrainState::Running)
1409 txDmaState = dmaReadWaiting;
1410 else
1411 dmaRead(txDmaAddr, txDmaLen, &txDmaReadEvent, (uint8_t*)txDmaData);
1412
1413 return true;
1414 }
1415
1416 void
1417 NSGigE::txDmaReadDone()
1418 {
1419 assert(txDmaState == dmaReading);
1420 txDmaState = dmaIdle;
1421
1422 DPRINTF(EthernetDMA, "tx dma read paddr=%#x len=%d\n",
1423 txDmaAddr, txDmaLen);
1424 DDUMP(EthernetDMA, txDmaData, txDmaLen);
1425
1426 // If the receive state machine has a pending DMA, let it go first
1427 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting)
1428 rxKick();
1429
1430 txKick();
1431 }
1432
1433 bool
1434 NSGigE::doTxDmaWrite()
1435 {
1436 assert(txDmaState == dmaIdle || txDmaState == dmaWriteWaiting);
1437 txDmaState = dmaWriting;
1438
1439 if (dmaPending() || drainState() != DrainState::Running)
1440 txDmaState = dmaWriteWaiting;
1441 else
1442 dmaWrite(txDmaAddr, txDmaLen, &txDmaWriteEvent, (uint8_t*)txDmaData);
1443 return true;
1444 }
1445
1446 void
1447 NSGigE::txDmaWriteDone()
1448 {
1449 assert(txDmaState == dmaWriting);
1450 txDmaState = dmaIdle;
1451
1452 DPRINTF(EthernetDMA, "tx dma write paddr=%#x len=%d\n",
1453 txDmaAddr, txDmaLen);
1454 DDUMP(EthernetDMA, txDmaData, txDmaLen);
1455
1456 // If the receive state machine has a pending DMA, let it go first
1457 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting)
1458 rxKick();
1459
1460 txKick();
1461 }
1462
1463 void
1464 NSGigE::txKick()
1465 {
1466 bool is64bit = (bool)(regs.config & CFGR_M64ADDR);
1467
1468 DPRINTF(EthernetSM, "transmit kick txState=%s %d-bit\n",
1469 NsTxStateStrings[txState], is64bit ? 64 : 32);
1470
1471 Addr link, bufptr;
1472 uint32_t &cmdsts = is64bit ? txDesc64.cmdsts : txDesc32.cmdsts;
1473 uint32_t &extsts = is64bit ? txDesc64.extsts : txDesc32.extsts;
1474
1475 next:
1476 if (txKickTick > curTick()) {
1477 DPRINTF(EthernetSM, "transmit kick exiting, can't run till %d\n",
1478 txKickTick);
1479 goto exit;
1480 }
1481
1482 // Go to the next state machine clock tick.
1483 txKickTick = clockEdge(Cycles(1));
1484
1485 switch(txDmaState) {
1486 case dmaReadWaiting:
1487 if (doTxDmaRead())
1488 goto exit;
1489 break;
1490 case dmaWriteWaiting:
1491 if (doTxDmaWrite())
1492 goto exit;
1493 break;
1494 default:
1495 break;
1496 }
1497
1498 link = is64bit ? (Addr)txDesc64.link : (Addr)txDesc32.link;
1499 bufptr = is64bit ? (Addr)txDesc64.bufptr : (Addr)txDesc32.bufptr;
1500 switch (txState) {
1501 case txIdle:
1502 if (!txEnable) {
1503 DPRINTF(EthernetSM, "Transmit disabled. Nothing to do.\n");
1504 goto exit;
1505 }
1506
1507 if (CTDD) {
1508 txState = txDescRefr;
1509
1510 txDmaAddr = regs.txdp & 0x3fffffff;
1511 txDmaData =
1512 is64bit ? (void *)&txDesc64.link : (void *)&txDesc32.link;
1513 txDmaLen = is64bit ? sizeof(txDesc64.link) : sizeof(txDesc32.link);
1514 txDmaFree = dmaDescFree;
1515
1516 descDmaReads++;
1517 descDmaRdBytes += txDmaLen;
1518
1519 if (doTxDmaRead())
1520 goto exit;
1521
1522 } else {
1523 txState = txDescRead;
1524
1525 txDmaAddr = regs.txdp & 0x3fffffff;
1526 txDmaData = is64bit ? (void *)&txDesc64 : (void *)&txDesc32;
1527 txDmaLen = is64bit ? sizeof(txDesc64) : sizeof(txDesc32);
1528 txDmaFree = dmaDescFree;
1529
1530 descDmaReads++;
1531 descDmaRdBytes += txDmaLen;
1532
1533 if (doTxDmaRead())
1534 goto exit;
1535 }
1536 break;
1537
1538 case txDescRefr:
1539 if (txDmaState != dmaIdle)
1540 goto exit;
1541
1542 txState = txAdvance;
1543 break;
1544
1545 case txDescRead:
1546 if (txDmaState != dmaIdle)
1547 goto exit;
1548
1549 DPRINTF(EthernetDesc, "txDesc: addr=%08x read descriptor\n",
1550 regs.txdp & 0x3fffffff);
1551 DPRINTF(EthernetDesc,
1552 "txDesc: link=%#x bufptr=%#x cmdsts=%#08x extsts=%#08x\n",
1553 link, bufptr, cmdsts, extsts);
1554
1555 if (cmdsts & CMDSTS_OWN) {
1556 txState = txFifoBlock;
1557 txFragPtr = bufptr;
1558 txDescCnt = cmdsts & CMDSTS_LEN_MASK;
1559 } else {
1560 devIntrPost(ISR_TXIDLE);
1561 txState = txIdle;
1562 goto exit;
1563 }
1564 break;
1565
1566 case txFifoBlock:
1567 if (!txPacket) {
1568 DPRINTF(EthernetSM, "****starting the tx of a new packet****\n");
1569 txPacket = make_shared<EthPacketData>(16384);
1570 txPacketBufPtr = txPacket->data;
1571 }
1572
1573 if (txDescCnt == 0) {
1574 DPRINTF(EthernetSM, "the txDescCnt == 0, done with descriptor\n");
1575 if (cmdsts & CMDSTS_MORE) {
1576 DPRINTF(EthernetSM, "there are more descriptors to come\n");
1577 txState = txDescWrite;
1578
1579 cmdsts &= ~CMDSTS_OWN;
1580
1581 txDmaAddr = regs.txdp & 0x3fffffff;
1582 txDmaData = &cmdsts;
1583 if (is64bit) {
1584 txDmaAddr += offsetof(ns_desc64, cmdsts);
1585 txDmaLen = sizeof(txDesc64.cmdsts);
1586 } else {
1587 txDmaAddr += offsetof(ns_desc32, cmdsts);
1588 txDmaLen = sizeof(txDesc32.cmdsts);
1589 }
1590 txDmaFree = dmaDescFree;
1591
1592 if (doTxDmaWrite())
1593 goto exit;
1594
1595 } else { /* this packet is totally done */
1596 DPRINTF(EthernetSM, "This packet is done, let's wrap it up\n");
1597 /* deal with the the packet that just finished */
1598 if ((regs.vtcr & VTCR_PPCHK) && extstsEnable) {
1599 IpPtr ip(txPacket);
1600 if (extsts & EXTSTS_UDPPKT) {
1601 UdpPtr udp(ip);
1602 if (udp) {
1603 udp->sum(0);
1604 udp->sum(cksum(udp));
1605 txUdpChecksums++;
1606 } else {
1607 Debug::breakpoint();
1608 warn_once("UDPPKT set, but not UDP!\n");
1609 }
1610 } else if (extsts & EXTSTS_TCPPKT) {
1611 TcpPtr tcp(ip);
1612 if (tcp) {
1613 tcp->sum(0);
1614 tcp->sum(cksum(tcp));
1615 txTcpChecksums++;
1616 } else {
1617 warn_once("TCPPKT set, but not UDP!\n");
1618 }
1619 }
1620 if (extsts & EXTSTS_IPPKT) {
1621 if (ip) {
1622 ip->sum(0);
1623 ip->sum(cksum(ip));
1624 txIpChecksums++;
1625 } else {
1626 warn_once("IPPKT set, but not UDP!\n");
1627 }
1628 }
1629 }
1630
1631 txPacket->simLength = txPacketBufPtr - txPacket->data;
1632 txPacket->length = txPacketBufPtr - txPacket->data;
1633 // this is just because the receive can't handle a
1634 // packet bigger want to make sure
1635 if (txPacket->length > 1514)
1636 panic("transmit packet too large, %s > 1514\n",
1637 txPacket->length);
1638
1639 #ifndef NDEBUG
1640 bool success =
1641 #endif
1642 txFifo.push(txPacket);
1643 assert(success);
1644
1645 /*
1646 * this following section is not tqo spec, but
1647 * functionally shouldn't be any different. normally,
1648 * the chip will wait til the transmit has occurred
1649 * before writing back the descriptor because it has
1650 * to wait to see that it was successfully transmitted
1651 * to decide whether to set CMDSTS_OK or not.
1652 * however, in the simulator since it is always
1653 * successfully transmitted, and writing it exactly to
1654 * spec would complicate the code, we just do it here
1655 */
1656
1657 cmdsts &= ~CMDSTS_OWN;
1658 cmdsts |= CMDSTS_OK;
1659
1660 DPRINTF(EthernetDesc,
1661 "txDesc writeback: cmdsts=%08x extsts=%08x\n",
1662 cmdsts, extsts);
1663
1664 txDmaFree = dmaDescFree;
1665 txDmaAddr = regs.txdp & 0x3fffffff;
1666 txDmaData = &cmdsts;
1667 if (is64bit) {
1668 txDmaAddr += offsetof(ns_desc64, cmdsts);
1669 txDmaLen =
1670 sizeof(txDesc64.cmdsts) + sizeof(txDesc64.extsts);
1671 } else {
1672 txDmaAddr += offsetof(ns_desc32, cmdsts);
1673 txDmaLen =
1674 sizeof(txDesc32.cmdsts) + sizeof(txDesc32.extsts);
1675 }
1676
1677 descDmaWrites++;
1678 descDmaWrBytes += txDmaLen;
1679
1680 transmit();
1681 txPacket = 0;
1682
1683 if (!txEnable) {
1684 DPRINTF(EthernetSM, "halting TX state machine\n");
1685 txState = txIdle;
1686 goto exit;
1687 } else
1688 txState = txAdvance;
1689
1690 if (doTxDmaWrite())
1691 goto exit;
1692 }
1693 } else {
1694 DPRINTF(EthernetSM, "this descriptor isn't done yet\n");
1695 if (!txFifo.full()) {
1696 txState = txFragRead;
1697
1698 /*
1699 * The number of bytes transferred is either whatever
1700 * is left in the descriptor (txDescCnt), or if there
1701 * is not enough room in the fifo, just whatever room
1702 * is left in the fifo
1703 */
1704 txXferLen = min<uint32_t>(txDescCnt, txFifo.avail());
1705
1706 txDmaAddr = txFragPtr & 0x3fffffff;
1707 txDmaData = txPacketBufPtr;
1708 txDmaLen = txXferLen;
1709 txDmaFree = dmaDataFree;
1710
1711 if (doTxDmaRead())
1712 goto exit;
1713 } else {
1714 txState = txFifoBlock;
1715 transmit();
1716
1717 goto exit;
1718 }
1719
1720 }
1721 break;
1722
1723 case txFragRead:
1724 if (txDmaState != dmaIdle)
1725 goto exit;
1726
1727 txPacketBufPtr += txXferLen;
1728 txFragPtr += txXferLen;
1729 txDescCnt -= txXferLen;
1730 txFifo.reserve(txXferLen);
1731
1732 txState = txFifoBlock;
1733 break;
1734
1735 case txDescWrite:
1736 if (txDmaState != dmaIdle)
1737 goto exit;
1738
1739 if (cmdsts & CMDSTS_INTR)
1740 devIntrPost(ISR_TXDESC);
1741
1742 if (!txEnable) {
1743 DPRINTF(EthernetSM, "halting TX state machine\n");
1744 txState = txIdle;
1745 goto exit;
1746 } else
1747 txState = txAdvance;
1748 break;
1749
1750 case txAdvance:
1751 if (link == 0) {
1752 devIntrPost(ISR_TXIDLE);
1753 txState = txIdle;
1754 goto exit;
1755 } else {
1756 if (txDmaState != dmaIdle)
1757 goto exit;
1758 txState = txDescRead;
1759 regs.txdp = link;
1760 CTDD = false;
1761
1762 txDmaAddr = link & 0x3fffffff;
1763 txDmaData = is64bit ? (void *)&txDesc64 : (void *)&txDesc32;
1764 txDmaLen = is64bit ? sizeof(txDesc64) : sizeof(txDesc32);
1765 txDmaFree = dmaDescFree;
1766
1767 if (doTxDmaRead())
1768 goto exit;
1769 }
1770 break;
1771
1772 default:
1773 panic("invalid state");
1774 }
1775
1776 DPRINTF(EthernetSM, "entering next txState=%s\n",
1777 NsTxStateStrings[txState]);
1778 goto next;
1779
1780 exit:
1781 /**
1782 * @todo do we want to schedule a future kick?
1783 */
1784 DPRINTF(EthernetSM, "tx state machine exited txState=%s\n",
1785 NsTxStateStrings[txState]);
1786
1787 if (!txKickEvent.scheduled())
1788 schedule(txKickEvent, txKickTick);
1789 }
1790
1791 /**
1792 * Advance the EEPROM state machine
1793 * Called on rising edge of EEPROM clock bit in MEAR
1794 */
1795 void
1796 NSGigE::eepromKick()
1797 {
1798 switch (eepromState) {
1799
1800 case eepromStart:
1801
1802 // Wait for start bit
1803 if (regs.mear & MEAR_EEDI) {
1804 // Set up to get 2 opcode bits
1805 eepromState = eepromGetOpcode;
1806 eepromBitsToRx = 2;
1807 eepromOpcode = 0;
1808 }
1809 break;
1810
1811 case eepromGetOpcode:
1812 eepromOpcode <<= 1;
1813 eepromOpcode += (regs.mear & MEAR_EEDI) ? 1 : 0;
1814 --eepromBitsToRx;
1815
1816 // Done getting opcode
1817 if (eepromBitsToRx == 0) {
1818 if (eepromOpcode != EEPROM_READ)
1819 panic("only EEPROM reads are implemented!");
1820
1821 // Set up to get address
1822 eepromState = eepromGetAddress;
1823 eepromBitsToRx = 6;
1824 eepromAddress = 0;
1825 }
1826 break;
1827
1828 case eepromGetAddress:
1829 eepromAddress <<= 1;
1830 eepromAddress += (regs.mear & MEAR_EEDI) ? 1 : 0;
1831 --eepromBitsToRx;
1832
1833 // Done getting address
1834 if (eepromBitsToRx == 0) {
1835
1836 if (eepromAddress >= EEPROM_SIZE)
1837 panic("EEPROM read access out of range!");
1838
1839 switch (eepromAddress) {
1840
1841 case EEPROM_PMATCH2_ADDR:
1842 eepromData = rom.perfectMatch[5];
1843 eepromData <<= 8;
1844 eepromData += rom.perfectMatch[4];
1845 break;
1846
1847 case EEPROM_PMATCH1_ADDR:
1848 eepromData = rom.perfectMatch[3];
1849 eepromData <<= 8;
1850 eepromData += rom.perfectMatch[2];
1851 break;
1852
1853 case EEPROM_PMATCH0_ADDR:
1854 eepromData = rom.perfectMatch[1];
1855 eepromData <<= 8;
1856 eepromData += rom.perfectMatch[0];
1857 break;
1858
1859 default:
1860 panic("FreeBSD driver only uses EEPROM to read PMATCH!");
1861 }
1862 // Set up to read data
1863 eepromState = eepromRead;
1864 eepromBitsToRx = 16;
1865
1866 // Clear data in bit
1867 regs.mear &= ~MEAR_EEDI;
1868 }
1869 break;
1870
1871 case eepromRead:
1872 // Clear Data Out bit
1873 regs.mear &= ~MEAR_EEDO;
1874 // Set bit to value of current EEPROM bit
1875 regs.mear |= (eepromData & 0x8000) ? MEAR_EEDO : 0x0;
1876
1877 eepromData <<= 1;
1878 --eepromBitsToRx;
1879
1880 // All done
1881 if (eepromBitsToRx == 0) {
1882 eepromState = eepromStart;
1883 }
1884 break;
1885
1886 default:
1887 panic("invalid EEPROM state");
1888 }
1889
1890 }
1891
1892 void
1893 NSGigE::transferDone()
1894 {
1895 if (txFifo.empty()) {
1896 DPRINTF(Ethernet, "transfer complete: txFifo empty...nothing to do\n");
1897 return;
1898 }
1899
1900 DPRINTF(Ethernet, "transfer complete: data in txFifo...schedule xmit\n");
1901
1902 reschedule(txEvent, clockEdge(Cycles(1)), true);
1903 }
1904
1905 bool
1906 NSGigE::rxFilter(const EthPacketPtr &packet)
1907 {
1908 EthPtr eth = packet;
1909 bool drop = true;
1910 string type;
1911
1912 const EthAddr &dst = eth->dst();
1913 if (dst.unicast()) {
1914 // If we're accepting all unicast addresses
1915 if (acceptUnicast)
1916 drop = false;
1917
1918 // If we make a perfect match
1919 if (acceptPerfect && dst == rom.perfectMatch)
1920 drop = false;
1921
1922 if (acceptArp && eth->type() == ETH_TYPE_ARP)
1923 drop = false;
1924
1925 } else if (dst.broadcast()) {
1926 // if we're accepting broadcasts
1927 if (acceptBroadcast)
1928 drop = false;
1929
1930 } else if (dst.multicast()) {
1931 // if we're accepting all multicasts
1932 if (acceptMulticast)
1933 drop = false;
1934
1935 // Multicast hashing faked - all packets accepted
1936 if (multicastHashEnable)
1937 drop = false;
1938 }
1939
1940 if (drop) {
1941 DPRINTF(Ethernet, "rxFilter drop\n");
1942 DDUMP(EthernetData, packet->data, packet->length);
1943 }
1944
1945 return drop;
1946 }
1947
1948 bool
1949 NSGigE::recvPacket(EthPacketPtr packet)
1950 {
1951 rxBytes += packet->length;
1952 rxPackets++;
1953
1954 DPRINTF(Ethernet, "Receiving packet from wire, rxFifoAvail=%d\n",
1955 rxFifo.avail());
1956
1957 if (!rxEnable) {
1958 DPRINTF(Ethernet, "receive disabled...packet dropped\n");
1959 return true;
1960 }
1961
1962 if (!rxFilterEnable) {
1963 DPRINTF(Ethernet,
1964 "receive packet filtering disabled . . . packet dropped\n");
1965 return true;
1966 }
1967
1968 if (rxFilter(packet)) {
1969 DPRINTF(Ethernet, "packet filtered...dropped\n");
1970 return true;
1971 }
1972
1973 if (rxFifo.avail() < packet->length) {
1974 #if TRACING_ON
1975 IpPtr ip(packet);
1976 TcpPtr tcp(ip);
1977 if (ip) {
1978 DPRINTF(Ethernet,
1979 "packet won't fit in receive buffer...pkt ID %d dropped\n",
1980 ip->id());
1981 if (tcp) {
1982 DPRINTF(Ethernet, "Seq=%d\n", tcp->seq());
1983 }
1984 }
1985 #endif
1986 droppedPackets++;
1987 devIntrPost(ISR_RXORN);
1988 return false;
1989 }
1990
1991 rxFifo.push(packet);
1992
1993 rxKick();
1994 return true;
1995 }
1996
1997
1998 void
1999 NSGigE::drainResume()
2000 {
2001 Drainable::drainResume();
2002
2003 // During drain we could have left the state machines in a waiting state and
2004 // they wouldn't get out until some other event occured to kick them.
2005 // This way they'll get out immediately
2006 txKick();
2007 rxKick();
2008 }
2009
2010
2011 //=====================================================================
2012 //
2013 //
2014 void
2015 NSGigE::serialize(CheckpointOut &cp) const
2016 {
2017 // Serialize the PciDevice base class
2018 PciDevice::serialize(cp);
2019
2020 /*
2021 * Finalize any DMA events now.
2022 */
2023 // @todo will mem system save pending dma?
2024
2025 /*
2026 * Serialize the device registers
2027 */
2028 SERIALIZE_SCALAR(regs.command);
2029 SERIALIZE_SCALAR(regs.config);
2030 SERIALIZE_SCALAR(regs.mear);
2031 SERIALIZE_SCALAR(regs.ptscr);
2032 SERIALIZE_SCALAR(regs.isr);
2033 SERIALIZE_SCALAR(regs.imr);
2034 SERIALIZE_SCALAR(regs.ier);
2035 SERIALIZE_SCALAR(regs.ihr);
2036 SERIALIZE_SCALAR(regs.txdp);
2037 SERIALIZE_SCALAR(regs.txdp_hi);
2038 SERIALIZE_SCALAR(regs.txcfg);
2039 SERIALIZE_SCALAR(regs.gpior);
2040 SERIALIZE_SCALAR(regs.rxdp);
2041 SERIALIZE_SCALAR(regs.rxdp_hi);
2042 SERIALIZE_SCALAR(regs.rxcfg);
2043 SERIALIZE_SCALAR(regs.pqcr);
2044 SERIALIZE_SCALAR(regs.wcsr);
2045 SERIALIZE_SCALAR(regs.pcr);
2046 SERIALIZE_SCALAR(regs.rfcr);
2047 SERIALIZE_SCALAR(regs.rfdr);
2048 SERIALIZE_SCALAR(regs.brar);
2049 SERIALIZE_SCALAR(regs.brdr);
2050 SERIALIZE_SCALAR(regs.srr);
2051 SERIALIZE_SCALAR(regs.mibc);
2052 SERIALIZE_SCALAR(regs.vrcr);
2053 SERIALIZE_SCALAR(regs.vtcr);
2054 SERIALIZE_SCALAR(regs.vdr);
2055 SERIALIZE_SCALAR(regs.ccsr);
2056 SERIALIZE_SCALAR(regs.tbicr);
2057 SERIALIZE_SCALAR(regs.tbisr);
2058 SERIALIZE_SCALAR(regs.tanar);
2059 SERIALIZE_SCALAR(regs.tanlpar);
2060 SERIALIZE_SCALAR(regs.taner);
2061 SERIALIZE_SCALAR(regs.tesr);
2062
2063 SERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN);
2064 SERIALIZE_ARRAY(rom.filterHash, FHASH_SIZE);
2065
2066 SERIALIZE_SCALAR(ioEnable);
2067
2068 /*
2069 * Serialize the data Fifos
2070 */
2071 rxFifo.serialize("rxFifo", cp);
2072 txFifo.serialize("txFifo", cp);
2073
2074 /*
2075 * Serialize the various helper variables
2076 */
2077 bool txPacketExists = txPacket != nullptr;
2078 SERIALIZE_SCALAR(txPacketExists);
2079 if (txPacketExists) {
2080 txPacket->simLength = txPacketBufPtr - txPacket->data;
2081 txPacket->length = txPacketBufPtr - txPacket->data;
2082 txPacket->serialize("txPacket", cp);
2083 uint32_t txPktBufPtr = (uint32_t) (txPacketBufPtr - txPacket->data);
2084 SERIALIZE_SCALAR(txPktBufPtr);
2085 }
2086
2087 bool rxPacketExists = rxPacket != nullptr;
2088 SERIALIZE_SCALAR(rxPacketExists);
2089 if (rxPacketExists) {
2090 rxPacket->serialize("rxPacket", cp);
2091 uint32_t rxPktBufPtr = (uint32_t) (rxPacketBufPtr - rxPacket->data);
2092 SERIALIZE_SCALAR(rxPktBufPtr);
2093 }
2094
2095 SERIALIZE_SCALAR(txXferLen);
2096 SERIALIZE_SCALAR(rxXferLen);
2097
2098 /*
2099 * Serialize Cached Descriptors
2100 */
2101 SERIALIZE_SCALAR(rxDesc64.link);
2102 SERIALIZE_SCALAR(rxDesc64.bufptr);
2103 SERIALIZE_SCALAR(rxDesc64.cmdsts);
2104 SERIALIZE_SCALAR(rxDesc64.extsts);
2105 SERIALIZE_SCALAR(txDesc64.link);
2106 SERIALIZE_SCALAR(txDesc64.bufptr);
2107 SERIALIZE_SCALAR(txDesc64.cmdsts);
2108 SERIALIZE_SCALAR(txDesc64.extsts);
2109 SERIALIZE_SCALAR(rxDesc32.link);
2110 SERIALIZE_SCALAR(rxDesc32.bufptr);
2111 SERIALIZE_SCALAR(rxDesc32.cmdsts);
2112 SERIALIZE_SCALAR(rxDesc32.extsts);
2113 SERIALIZE_SCALAR(txDesc32.link);
2114 SERIALIZE_SCALAR(txDesc32.bufptr);
2115 SERIALIZE_SCALAR(txDesc32.cmdsts);
2116 SERIALIZE_SCALAR(txDesc32.extsts);
2117 SERIALIZE_SCALAR(extstsEnable);
2118
2119 /*
2120 * Serialize tx state machine
2121 */
2122 int txState = this->txState;
2123 SERIALIZE_SCALAR(txState);
2124 SERIALIZE_SCALAR(txEnable);
2125 SERIALIZE_SCALAR(CTDD);
2126 SERIALIZE_SCALAR(txFragPtr);
2127 SERIALIZE_SCALAR(txDescCnt);
2128 int txDmaState = this->txDmaState;
2129 SERIALIZE_SCALAR(txDmaState);
2130 SERIALIZE_SCALAR(txKickTick);
2131
2132 /*
2133 * Serialize rx state machine
2134 */
2135 int rxState = this->rxState;
2136 SERIALIZE_SCALAR(rxState);
2137 SERIALIZE_SCALAR(rxEnable);
2138 SERIALIZE_SCALAR(CRDD);
2139 SERIALIZE_SCALAR(rxPktBytes);
2140 SERIALIZE_SCALAR(rxFragPtr);
2141 SERIALIZE_SCALAR(rxDescCnt);
2142 int rxDmaState = this->rxDmaState;
2143 SERIALIZE_SCALAR(rxDmaState);
2144 SERIALIZE_SCALAR(rxKickTick);
2145
2146 /*
2147 * Serialize EEPROM state machine
2148 */
2149 int eepromState = this->eepromState;
2150 SERIALIZE_SCALAR(eepromState);
2151 SERIALIZE_SCALAR(eepromClk);
2152 SERIALIZE_SCALAR(eepromBitsToRx);
2153 SERIALIZE_SCALAR(eepromOpcode);
2154 SERIALIZE_SCALAR(eepromAddress);
2155 SERIALIZE_SCALAR(eepromData);
2156
2157 /*
2158 * If there's a pending transmit, store the time so we can
2159 * reschedule it later
2160 */
2161 Tick transmitTick = txEvent.scheduled() ? txEvent.when() - curTick() : 0;
2162 SERIALIZE_SCALAR(transmitTick);
2163
2164 /*
2165 * receive address filter settings
2166 */
2167 SERIALIZE_SCALAR(rxFilterEnable);
2168 SERIALIZE_SCALAR(acceptBroadcast);
2169 SERIALIZE_SCALAR(acceptMulticast);
2170 SERIALIZE_SCALAR(acceptUnicast);
2171 SERIALIZE_SCALAR(acceptPerfect);
2172 SERIALIZE_SCALAR(acceptArp);
2173 SERIALIZE_SCALAR(multicastHashEnable);
2174
2175 /*
2176 * Keep track of pending interrupt status.
2177 */
2178 SERIALIZE_SCALAR(intrTick);
2179 SERIALIZE_SCALAR(cpuPendingIntr);
2180 Tick intrEventTick = 0;
2181 if (intrEvent)
2182 intrEventTick = intrEvent->when();
2183 SERIALIZE_SCALAR(intrEventTick);
2184
2185 }
2186
2187 void
2188 NSGigE::unserialize(CheckpointIn &cp)
2189 {
2190 // Unserialize the PciDevice base class
2191 PciDevice::unserialize(cp);
2192
2193 UNSERIALIZE_SCALAR(regs.command);
2194 UNSERIALIZE_SCALAR(regs.config);
2195 UNSERIALIZE_SCALAR(regs.mear);
2196 UNSERIALIZE_SCALAR(regs.ptscr);
2197 UNSERIALIZE_SCALAR(regs.isr);
2198 UNSERIALIZE_SCALAR(regs.imr);
2199 UNSERIALIZE_SCALAR(regs.ier);
2200 UNSERIALIZE_SCALAR(regs.ihr);
2201 UNSERIALIZE_SCALAR(regs.txdp);
2202 UNSERIALIZE_SCALAR(regs.txdp_hi);
2203 UNSERIALIZE_SCALAR(regs.txcfg);
2204 UNSERIALIZE_SCALAR(regs.gpior);
2205 UNSERIALIZE_SCALAR(regs.rxdp);
2206 UNSERIALIZE_SCALAR(regs.rxdp_hi);
2207 UNSERIALIZE_SCALAR(regs.rxcfg);
2208 UNSERIALIZE_SCALAR(regs.pqcr);
2209 UNSERIALIZE_SCALAR(regs.wcsr);
2210 UNSERIALIZE_SCALAR(regs.pcr);
2211 UNSERIALIZE_SCALAR(regs.rfcr);
2212 UNSERIALIZE_SCALAR(regs.rfdr);
2213 UNSERIALIZE_SCALAR(regs.brar);
2214 UNSERIALIZE_SCALAR(regs.brdr);
2215 UNSERIALIZE_SCALAR(regs.srr);
2216 UNSERIALIZE_SCALAR(regs.mibc);
2217 UNSERIALIZE_SCALAR(regs.vrcr);
2218 UNSERIALIZE_SCALAR(regs.vtcr);
2219 UNSERIALIZE_SCALAR(regs.vdr);
2220 UNSERIALIZE_SCALAR(regs.ccsr);
2221 UNSERIALIZE_SCALAR(regs.tbicr);
2222 UNSERIALIZE_SCALAR(regs.tbisr);
2223 UNSERIALIZE_SCALAR(regs.tanar);
2224 UNSERIALIZE_SCALAR(regs.tanlpar);
2225 UNSERIALIZE_SCALAR(regs.taner);
2226 UNSERIALIZE_SCALAR(regs.tesr);
2227
2228 UNSERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN);
2229 UNSERIALIZE_ARRAY(rom.filterHash, FHASH_SIZE);
2230
2231 UNSERIALIZE_SCALAR(ioEnable);
2232
2233 /*
2234 * unserialize the data fifos
2235 */
2236 rxFifo.unserialize("rxFifo", cp);
2237 txFifo.unserialize("txFifo", cp);
2238
2239 /*
2240 * unserialize the various helper variables
2241 */
2242 bool txPacketExists;
2243 UNSERIALIZE_SCALAR(txPacketExists);
2244 if (txPacketExists) {
2245 txPacket = make_shared<EthPacketData>(16384);
2246 txPacket->unserialize("txPacket", cp);
2247 uint32_t txPktBufPtr;
2248 UNSERIALIZE_SCALAR(txPktBufPtr);
2249 txPacketBufPtr = (uint8_t *) txPacket->data + txPktBufPtr;
2250 } else
2251 txPacket = 0;
2252
2253 bool rxPacketExists;
2254 UNSERIALIZE_SCALAR(rxPacketExists);
2255 rxPacket = 0;
2256 if (rxPacketExists) {
2257 rxPacket = make_shared<EthPacketData>();
2258 rxPacket->unserialize("rxPacket", cp);
2259 uint32_t rxPktBufPtr;
2260 UNSERIALIZE_SCALAR(rxPktBufPtr);
2261 rxPacketBufPtr = (uint8_t *) rxPacket->data + rxPktBufPtr;
2262 } else
2263 rxPacket = 0;
2264
2265 UNSERIALIZE_SCALAR(txXferLen);
2266 UNSERIALIZE_SCALAR(rxXferLen);
2267
2268 /*
2269 * Unserialize Cached Descriptors
2270 */
2271 UNSERIALIZE_SCALAR(rxDesc64.link);
2272 UNSERIALIZE_SCALAR(rxDesc64.bufptr);
2273 UNSERIALIZE_SCALAR(rxDesc64.cmdsts);
2274 UNSERIALIZE_SCALAR(rxDesc64.extsts);
2275 UNSERIALIZE_SCALAR(txDesc64.link);
2276 UNSERIALIZE_SCALAR(txDesc64.bufptr);
2277 UNSERIALIZE_SCALAR(txDesc64.cmdsts);
2278 UNSERIALIZE_SCALAR(txDesc64.extsts);
2279 UNSERIALIZE_SCALAR(rxDesc32.link);
2280 UNSERIALIZE_SCALAR(rxDesc32.bufptr);
2281 UNSERIALIZE_SCALAR(rxDesc32.cmdsts);
2282 UNSERIALIZE_SCALAR(rxDesc32.extsts);
2283 UNSERIALIZE_SCALAR(txDesc32.link);
2284 UNSERIALIZE_SCALAR(txDesc32.bufptr);
2285 UNSERIALIZE_SCALAR(txDesc32.cmdsts);
2286 UNSERIALIZE_SCALAR(txDesc32.extsts);
2287 UNSERIALIZE_SCALAR(extstsEnable);
2288
2289 /*
2290 * unserialize tx state machine
2291 */
2292 int txState;
2293 UNSERIALIZE_SCALAR(txState);
2294 this->txState = (TxState) txState;
2295 UNSERIALIZE_SCALAR(txEnable);
2296 UNSERIALIZE_SCALAR(CTDD);
2297 UNSERIALIZE_SCALAR(txFragPtr);
2298 UNSERIALIZE_SCALAR(txDescCnt);
2299 int txDmaState;
2300 UNSERIALIZE_SCALAR(txDmaState);
2301 this->txDmaState = (DmaState) txDmaState;
2302 UNSERIALIZE_SCALAR(txKickTick);
2303 if (txKickTick)
2304 schedule(txKickEvent, txKickTick);
2305
2306 /*
2307 * unserialize rx state machine
2308 */
2309 int rxState;
2310 UNSERIALIZE_SCALAR(rxState);
2311 this->rxState = (RxState) rxState;
2312 UNSERIALIZE_SCALAR(rxEnable);
2313 UNSERIALIZE_SCALAR(CRDD);
2314 UNSERIALIZE_SCALAR(rxPktBytes);
2315 UNSERIALIZE_SCALAR(rxFragPtr);
2316 UNSERIALIZE_SCALAR(rxDescCnt);
2317 int rxDmaState;
2318 UNSERIALIZE_SCALAR(rxDmaState);
2319 this->rxDmaState = (DmaState) rxDmaState;
2320 UNSERIALIZE_SCALAR(rxKickTick);
2321 if (rxKickTick)
2322 schedule(rxKickEvent, rxKickTick);
2323
2324 /*
2325 * Unserialize EEPROM state machine
2326 */
2327 int eepromState;
2328 UNSERIALIZE_SCALAR(eepromState);
2329 this->eepromState = (EEPROMState) eepromState;
2330 UNSERIALIZE_SCALAR(eepromClk);
2331 UNSERIALIZE_SCALAR(eepromBitsToRx);
2332 UNSERIALIZE_SCALAR(eepromOpcode);
2333 UNSERIALIZE_SCALAR(eepromAddress);
2334 UNSERIALIZE_SCALAR(eepromData);
2335
2336 /*
2337 * If there's a pending transmit, reschedule it now
2338 */
2339 Tick transmitTick;
2340 UNSERIALIZE_SCALAR(transmitTick);
2341 if (transmitTick)
2342 schedule(txEvent, curTick() + transmitTick);
2343
2344 /*
2345 * unserialize receive address filter settings
2346 */
2347 UNSERIALIZE_SCALAR(rxFilterEnable);
2348 UNSERIALIZE_SCALAR(acceptBroadcast);
2349 UNSERIALIZE_SCALAR(acceptMulticast);
2350 UNSERIALIZE_SCALAR(acceptUnicast);
2351 UNSERIALIZE_SCALAR(acceptPerfect);
2352 UNSERIALIZE_SCALAR(acceptArp);
2353 UNSERIALIZE_SCALAR(multicastHashEnable);
2354
2355 /*
2356 * Keep track of pending interrupt status.
2357 */
2358 UNSERIALIZE_SCALAR(intrTick);
2359 UNSERIALIZE_SCALAR(cpuPendingIntr);
2360 Tick intrEventTick;
2361 UNSERIALIZE_SCALAR(intrEventTick);
2362 if (intrEventTick) {
2363 intrEvent = new EventFunctionWrapper([this]{ cpuInterrupt(); },
2364 name(), true);
2365 schedule(intrEvent, intrEventTick);
2366 }
2367 }
2368
2369 NSGigE *
2370 NSGigEParams::create()
2371 {
2372 return new NSGigE(this);
2373 }