mem-cache: Add multiple eviction stats
[gem5.git] / src / dev / net / ns_gige.cc
1 /*
2 * Copyright (c) 2004-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Nathan Binkert
29 * Lisa Hsu
30 */
31
32 /** @file
33 * Device module for modelling the National Semiconductor
34 * DP83820 ethernet controller. Does not support priority queueing
35 */
36
37 #include "dev/net/ns_gige.hh"
38
39 #include <deque>
40 #include <memory>
41 #include <string>
42
43 #include "base/debug.hh"
44 #include "base/inet.hh"
45 #include "base/types.hh"
46 #include "debug/EthernetAll.hh"
47 #include "dev/net/etherlink.hh"
48 #include "mem/packet.hh"
49 #include "mem/packet_access.hh"
50 #include "params/NSGigE.hh"
51 #include "sim/system.hh"
52
53 // clang complains about std::set being overloaded with Packet::set if
54 // we open up the entire namespace std
55 using std::make_shared;
56 using std::min;
57 using std::ostream;
58 using std::string;
59
60 const char *NsRxStateStrings[] =
61 {
62 "rxIdle",
63 "rxDescRefr",
64 "rxDescRead",
65 "rxFifoBlock",
66 "rxFragWrite",
67 "rxDescWrite",
68 "rxAdvance"
69 };
70
71 const char *NsTxStateStrings[] =
72 {
73 "txIdle",
74 "txDescRefr",
75 "txDescRead",
76 "txFifoBlock",
77 "txFragRead",
78 "txDescWrite",
79 "txAdvance"
80 };
81
82 const char *NsDmaState[] =
83 {
84 "dmaIdle",
85 "dmaReading",
86 "dmaWriting",
87 "dmaReadWaiting",
88 "dmaWriteWaiting"
89 };
90
91 using namespace Net;
92
93 ///////////////////////////////////////////////////////////////////////
94 //
95 // NSGigE PCI Device
96 //
97 NSGigE::NSGigE(Params *p)
98 : EtherDevBase(p), ioEnable(false),
99 txFifo(p->tx_fifo_size), rxFifo(p->rx_fifo_size),
100 txPacket(0), rxPacket(0), txPacketBufPtr(NULL), rxPacketBufPtr(NULL),
101 txXferLen(0), rxXferLen(0), rxDmaFree(false), txDmaFree(false),
102 txState(txIdle), txEnable(false), CTDD(false), txHalt(false),
103 txFragPtr(0), txDescCnt(0), txDmaState(dmaIdle), rxState(rxIdle),
104 rxEnable(false), CRDD(false), rxPktBytes(0), rxHalt(false),
105 rxFragPtr(0), rxDescCnt(0), rxDmaState(dmaIdle), extstsEnable(false),
106 eepromState(eepromStart), eepromClk(false), eepromBitsToRx(0),
107 eepromOpcode(0), eepromAddress(0), eepromData(0),
108 dmaReadDelay(p->dma_read_delay), dmaWriteDelay(p->dma_write_delay),
109 dmaReadFactor(p->dma_read_factor), dmaWriteFactor(p->dma_write_factor),
110 rxDmaData(NULL), rxDmaAddr(0), rxDmaLen(0),
111 txDmaData(NULL), txDmaAddr(0), txDmaLen(0),
112 rxDmaReadEvent([this]{ rxDmaReadDone(); }, name()),
113 rxDmaWriteEvent([this]{ rxDmaWriteDone(); }, name()),
114 txDmaReadEvent([this]{ txDmaReadDone(); }, name()),
115 txDmaWriteEvent([this]{ txDmaWriteDone(); }, name()),
116 dmaDescFree(p->dma_desc_free), dmaDataFree(p->dma_data_free),
117 txDelay(p->tx_delay), rxDelay(p->rx_delay),
118 rxKickTick(0),
119 rxKickEvent([this]{ rxKick(); }, name()),
120 txKickTick(0),
121 txKickEvent([this]{ txKick(); }, name()),
122 txEvent([this]{ txEventTransmit(); }, name()),
123 rxFilterEnable(p->rx_filter),
124 acceptBroadcast(false), acceptMulticast(false), acceptUnicast(false),
125 acceptPerfect(false), acceptArp(false), multicastHashEnable(false),
126 intrDelay(p->intr_delay), intrTick(0), cpuPendingIntr(false),
127 intrEvent(0), interface(0)
128 {
129
130
131 interface = new NSGigEInt(name() + ".int0", this);
132
133 regsReset();
134 memcpy(&rom.perfectMatch, p->hardware_address.bytes(), ETH_ADDR_LEN);
135
136 memset(&rxDesc32, 0, sizeof(rxDesc32));
137 memset(&txDesc32, 0, sizeof(txDesc32));
138 memset(&rxDesc64, 0, sizeof(rxDesc64));
139 memset(&txDesc64, 0, sizeof(txDesc64));
140 }
141
142 NSGigE::~NSGigE()
143 {
144 delete interface;
145 }
146
147 /**
148 * This is to write to the PCI general configuration registers
149 */
150 Tick
151 NSGigE::writeConfig(PacketPtr pkt)
152 {
153 int offset = pkt->getAddr() & PCI_CONFIG_SIZE;
154 if (offset < PCI_DEVICE_SPECIFIC)
155 PciDevice::writeConfig(pkt);
156 else
157 panic("Device specific PCI config space not implemented!\n");
158
159 switch (offset) {
160 // seems to work fine without all these PCI settings, but i
161 // put in the IO to double check, an assertion will fail if we
162 // need to properly implement it
163 case PCI_COMMAND:
164 if (config.data[offset] & PCI_CMD_IOSE)
165 ioEnable = true;
166 else
167 ioEnable = false;
168 break;
169 }
170
171 return configDelay;
172 }
173
174 Port &
175 NSGigE::getPort(const std::string &if_name, PortID idx)
176 {
177 if (if_name == "interface")
178 return *interface;
179 return EtherDevBase::getPort(if_name, idx);
180 }
181
182 /**
183 * This reads the device registers, which are detailed in the NS83820
184 * spec sheet
185 */
186 Tick
187 NSGigE::read(PacketPtr pkt)
188 {
189 assert(ioEnable);
190
191 //The mask is to give you only the offset into the device register file
192 Addr daddr = pkt->getAddr() & 0xfff;
193 DPRINTF(EthernetPIO, "read da=%#x pa=%#x size=%d\n",
194 daddr, pkt->getAddr(), pkt->getSize());
195
196
197 // there are some reserved registers, you can see ns_gige_reg.h and
198 // the spec sheet for details
199 if (daddr > LAST && daddr <= RESERVED) {
200 panic("Accessing reserved register");
201 } else if (daddr > RESERVED && daddr <= 0x3FC) {
202 return readConfig(pkt);
203 } else if (daddr >= MIB_START && daddr <= MIB_END) {
204 // don't implement all the MIB's. hopefully the kernel
205 // doesn't actually DEPEND upon their values
206 // MIB are just hardware stats keepers
207 pkt->setLE<uint32_t>(0);
208 pkt->makeAtomicResponse();
209 return pioDelay;
210 } else if (daddr > 0x3FC)
211 panic("Something is messed up!\n");
212
213 assert(pkt->getSize() == sizeof(uint32_t));
214 uint32_t &reg = *pkt->getPtr<uint32_t>();
215 uint16_t rfaddr;
216
217 switch (daddr) {
218 case CR:
219 reg = regs.command;
220 //these are supposed to be cleared on a read
221 reg &= ~(CR_RXD | CR_TXD | CR_TXR | CR_RXR);
222 break;
223
224 case CFGR:
225 reg = regs.config;
226 break;
227
228 case MEAR:
229 reg = regs.mear;
230 break;
231
232 case PTSCR:
233 reg = regs.ptscr;
234 break;
235
236 case ISR:
237 reg = regs.isr;
238 devIntrClear(ISR_ALL);
239 break;
240
241 case IMR:
242 reg = regs.imr;
243 break;
244
245 case IER:
246 reg = regs.ier;
247 break;
248
249 case IHR:
250 reg = regs.ihr;
251 break;
252
253 case TXDP:
254 reg = regs.txdp;
255 break;
256
257 case TXDP_HI:
258 reg = regs.txdp_hi;
259 break;
260
261 case TX_CFG:
262 reg = regs.txcfg;
263 break;
264
265 case GPIOR:
266 reg = regs.gpior;
267 break;
268
269 case RXDP:
270 reg = regs.rxdp;
271 break;
272
273 case RXDP_HI:
274 reg = regs.rxdp_hi;
275 break;
276
277 case RX_CFG:
278 reg = regs.rxcfg;
279 break;
280
281 case PQCR:
282 reg = regs.pqcr;
283 break;
284
285 case WCSR:
286 reg = regs.wcsr;
287 break;
288
289 case PCR:
290 reg = regs.pcr;
291 break;
292
293 // see the spec sheet for how RFCR and RFDR work
294 // basically, you write to RFCR to tell the machine
295 // what you want to do next, then you act upon RFDR,
296 // and the device will be prepared b/c of what you
297 // wrote to RFCR
298 case RFCR:
299 reg = regs.rfcr;
300 break;
301
302 case RFDR:
303 rfaddr = (uint16_t)(regs.rfcr & RFCR_RFADDR);
304 switch (rfaddr) {
305 // Read from perfect match ROM octets
306 case 0x000:
307 reg = rom.perfectMatch[1];
308 reg = reg << 8;
309 reg += rom.perfectMatch[0];
310 break;
311 case 0x002:
312 reg = rom.perfectMatch[3] << 8;
313 reg += rom.perfectMatch[2];
314 break;
315 case 0x004:
316 reg = rom.perfectMatch[5] << 8;
317 reg += rom.perfectMatch[4];
318 break;
319 default:
320 // Read filter hash table
321 if (rfaddr >= FHASH_ADDR &&
322 rfaddr < FHASH_ADDR + FHASH_SIZE) {
323
324 // Only word-aligned reads supported
325 if (rfaddr % 2)
326 panic("unaligned read from filter hash table!");
327
328 reg = rom.filterHash[rfaddr - FHASH_ADDR + 1] << 8;
329 reg += rom.filterHash[rfaddr - FHASH_ADDR];
330 break;
331 }
332
333 panic("reading RFDR for something other than pattern"
334 " matching or hashing! %#x\n", rfaddr);
335 }
336 break;
337
338 case SRR:
339 reg = regs.srr;
340 break;
341
342 case MIBC:
343 reg = regs.mibc;
344 reg &= ~(MIBC_MIBS | MIBC_ACLR);
345 break;
346
347 case VRCR:
348 reg = regs.vrcr;
349 break;
350
351 case VTCR:
352 reg = regs.vtcr;
353 break;
354
355 case VDR:
356 reg = regs.vdr;
357 break;
358
359 case CCSR:
360 reg = regs.ccsr;
361 break;
362
363 case TBICR:
364 reg = regs.tbicr;
365 break;
366
367 case TBISR:
368 reg = regs.tbisr;
369 break;
370
371 case TANAR:
372 reg = regs.tanar;
373 break;
374
375 case TANLPAR:
376 reg = regs.tanlpar;
377 break;
378
379 case TANER:
380 reg = regs.taner;
381 break;
382
383 case TESR:
384 reg = regs.tesr;
385 break;
386
387 case M5REG:
388 reg = 0;
389 if (params()->rx_thread)
390 reg |= M5REG_RX_THREAD;
391 if (params()->tx_thread)
392 reg |= M5REG_TX_THREAD;
393 if (params()->rss)
394 reg |= M5REG_RSS;
395 break;
396
397 default:
398 panic("reading unimplemented register: addr=%#x", daddr);
399 }
400
401 DPRINTF(EthernetPIO, "read from %#x: data=%d data=%#x\n",
402 daddr, reg, reg);
403
404 pkt->makeAtomicResponse();
405 return pioDelay;
406 }
407
408 Tick
409 NSGigE::write(PacketPtr pkt)
410 {
411 assert(ioEnable);
412
413 Addr daddr = pkt->getAddr() & 0xfff;
414 DPRINTF(EthernetPIO, "write da=%#x pa=%#x size=%d\n",
415 daddr, pkt->getAddr(), pkt->getSize());
416
417 if (daddr > LAST && daddr <= RESERVED) {
418 panic("Accessing reserved register");
419 } else if (daddr > RESERVED && daddr <= 0x3FC) {
420 return writeConfig(pkt);
421 } else if (daddr > 0x3FC)
422 panic("Something is messed up!\n");
423
424 if (pkt->getSize() == sizeof(uint32_t)) {
425 uint32_t reg = pkt->getLE<uint32_t>();
426 uint16_t rfaddr;
427
428 DPRINTF(EthernetPIO, "write data=%d data=%#x\n", reg, reg);
429
430 switch (daddr) {
431 case CR:
432 regs.command = reg;
433 if (reg & CR_TXD) {
434 txEnable = false;
435 } else if (reg & CR_TXE) {
436 txEnable = true;
437
438 // the kernel is enabling the transmit machine
439 if (txState == txIdle)
440 txKick();
441 }
442
443 if (reg & CR_RXD) {
444 rxEnable = false;
445 } else if (reg & CR_RXE) {
446 rxEnable = true;
447
448 if (rxState == rxIdle)
449 rxKick();
450 }
451
452 if (reg & CR_TXR)
453 txReset();
454
455 if (reg & CR_RXR)
456 rxReset();
457
458 if (reg & CR_SWI)
459 devIntrPost(ISR_SWI);
460
461 if (reg & CR_RST) {
462 txReset();
463 rxReset();
464
465 regsReset();
466 }
467 break;
468
469 case CFGR:
470 if (reg & CFGR_LNKSTS ||
471 reg & CFGR_SPDSTS ||
472 reg & CFGR_DUPSTS ||
473 reg & CFGR_RESERVED ||
474 reg & CFGR_T64ADDR ||
475 reg & CFGR_PCI64_DET) {
476 // First clear all writable bits
477 regs.config &= CFGR_LNKSTS | CFGR_SPDSTS | CFGR_DUPSTS |
478 CFGR_RESERVED | CFGR_T64ADDR |
479 CFGR_PCI64_DET;
480 // Now set the appropriate writable bits
481 regs.config |= reg & ~(CFGR_LNKSTS | CFGR_SPDSTS | CFGR_DUPSTS |
482 CFGR_RESERVED | CFGR_T64ADDR |
483 CFGR_PCI64_DET);
484 }
485
486 if (reg & CFGR_AUTO_1000)
487 panic("CFGR_AUTO_1000 not implemented!\n");
488
489 if (reg & CFGR_PCI64_DET)
490 panic("CFGR_PCI64_DET is read only register!\n");
491
492 if (reg & CFGR_EXTSTS_EN)
493 extstsEnable = true;
494 else
495 extstsEnable = false;
496 break;
497
498 case MEAR:
499 // Clear writable bits
500 regs.mear &= MEAR_EEDO;
501 // Set appropriate writable bits
502 regs.mear |= reg & ~MEAR_EEDO;
503
504 // FreeBSD uses the EEPROM to read PMATCH (for the MAC address)
505 // even though it could get it through RFDR
506 if (reg & MEAR_EESEL) {
507 // Rising edge of clock
508 if (reg & MEAR_EECLK && !eepromClk)
509 eepromKick();
510 }
511 else {
512 eepromState = eepromStart;
513 regs.mear &= ~MEAR_EEDI;
514 }
515
516 eepromClk = reg & MEAR_EECLK;
517
518 // since phy is completely faked, MEAR_MD* don't matter
519 break;
520
521 case PTSCR:
522 regs.ptscr = reg & ~(PTSCR_RBIST_RDONLY);
523 // these control BISTs for various parts of chip - we
524 // don't care or do just fake that the BIST is done
525 if (reg & PTSCR_RBIST_EN)
526 regs.ptscr |= PTSCR_RBIST_DONE;
527 if (reg & PTSCR_EEBIST_EN)
528 regs.ptscr &= ~PTSCR_EEBIST_EN;
529 if (reg & PTSCR_EELOAD_EN)
530 regs.ptscr &= ~PTSCR_EELOAD_EN;
531 break;
532
533 case ISR: /* writing to the ISR has no effect */
534 panic("ISR is a read only register!\n");
535
536 case IMR:
537 regs.imr = reg;
538 devIntrChangeMask();
539 break;
540
541 case IER:
542 regs.ier = reg;
543 break;
544
545 case IHR:
546 regs.ihr = reg;
547 /* not going to implement real interrupt holdoff */
548 break;
549
550 case TXDP:
551 regs.txdp = (reg & 0xFFFFFFFC);
552 assert(txState == txIdle);
553 CTDD = false;
554 break;
555
556 case TXDP_HI:
557 regs.txdp_hi = reg;
558 break;
559
560 case TX_CFG:
561 regs.txcfg = reg;
562
563 // also, we currently don't care about fill/drain
564 // thresholds though this may change in the future with
565 // more realistic networks or a driver which changes it
566 // according to feedback
567
568 break;
569
570 case GPIOR:
571 // Only write writable bits
572 regs.gpior &= GPIOR_UNUSED | GPIOR_GP5_IN | GPIOR_GP4_IN
573 | GPIOR_GP3_IN | GPIOR_GP2_IN | GPIOR_GP1_IN;
574 regs.gpior |= reg & ~(GPIOR_UNUSED | GPIOR_GP5_IN | GPIOR_GP4_IN
575 | GPIOR_GP3_IN | GPIOR_GP2_IN | GPIOR_GP1_IN);
576 /* these just control general purpose i/o pins, don't matter */
577 break;
578
579 case RXDP:
580 regs.rxdp = reg;
581 CRDD = false;
582 break;
583
584 case RXDP_HI:
585 regs.rxdp_hi = reg;
586 break;
587
588 case RX_CFG:
589 regs.rxcfg = reg;
590 break;
591
592 case PQCR:
593 /* there is no priority queueing used in the linux 2.6 driver */
594 regs.pqcr = reg;
595 break;
596
597 case WCSR:
598 /* not going to implement wake on LAN */
599 regs.wcsr = reg;
600 break;
601
602 case PCR:
603 /* not going to implement pause control */
604 regs.pcr = reg;
605 break;
606
607 case RFCR:
608 regs.rfcr = reg;
609
610 rxFilterEnable = (reg & RFCR_RFEN) ? true : false;
611 acceptBroadcast = (reg & RFCR_AAB) ? true : false;
612 acceptMulticast = (reg & RFCR_AAM) ? true : false;
613 acceptUnicast = (reg & RFCR_AAU) ? true : false;
614 acceptPerfect = (reg & RFCR_APM) ? true : false;
615 acceptArp = (reg & RFCR_AARP) ? true : false;
616 multicastHashEnable = (reg & RFCR_MHEN) ? true : false;
617
618 if (reg & RFCR_UHEN)
619 panic("Unicast hash filtering not used by drivers!\n");
620
621 if (reg & RFCR_ULM)
622 panic("RFCR_ULM not implemented!\n");
623
624 break;
625
626 case RFDR:
627 rfaddr = (uint16_t)(regs.rfcr & RFCR_RFADDR);
628 switch (rfaddr) {
629 case 0x000:
630 rom.perfectMatch[0] = (uint8_t)reg;
631 rom.perfectMatch[1] = (uint8_t)(reg >> 8);
632 break;
633 case 0x002:
634 rom.perfectMatch[2] = (uint8_t)reg;
635 rom.perfectMatch[3] = (uint8_t)(reg >> 8);
636 break;
637 case 0x004:
638 rom.perfectMatch[4] = (uint8_t)reg;
639 rom.perfectMatch[5] = (uint8_t)(reg >> 8);
640 break;
641 default:
642
643 if (rfaddr >= FHASH_ADDR &&
644 rfaddr < FHASH_ADDR + FHASH_SIZE) {
645
646 // Only word-aligned writes supported
647 if (rfaddr % 2)
648 panic("unaligned write to filter hash table!");
649
650 rom.filterHash[rfaddr - FHASH_ADDR] = (uint8_t)reg;
651 rom.filterHash[rfaddr - FHASH_ADDR + 1]
652 = (uint8_t)(reg >> 8);
653 break;
654 }
655 panic("writing RFDR for something other than pattern matching "
656 "or hashing! %#x\n", rfaddr);
657 }
658 break;
659
660 case BRAR:
661 regs.brar = reg;
662 break;
663
664 case BRDR:
665 panic("the driver never uses BRDR, something is wrong!\n");
666
667 case SRR:
668 panic("SRR is read only register!\n");
669
670 case MIBC:
671 panic("the driver never uses MIBC, something is wrong!\n");
672
673 case VRCR:
674 regs.vrcr = reg;
675 break;
676
677 case VTCR:
678 regs.vtcr = reg;
679 break;
680
681 case VDR:
682 panic("the driver never uses VDR, something is wrong!\n");
683
684 case CCSR:
685 /* not going to implement clockrun stuff */
686 regs.ccsr = reg;
687 break;
688
689 case TBICR:
690 regs.tbicr = reg;
691 if (reg & TBICR_MR_LOOPBACK)
692 panic("TBICR_MR_LOOPBACK never used, something wrong!\n");
693
694 if (reg & TBICR_MR_AN_ENABLE) {
695 regs.tanlpar = regs.tanar;
696 regs.tbisr |= (TBISR_MR_AN_COMPLETE | TBISR_MR_LINK_STATUS);
697 }
698
699 break;
700
701 case TBISR:
702 panic("TBISR is read only register!\n");
703
704 case TANAR:
705 // Only write the writable bits
706 regs.tanar &= TANAR_RF1 | TANAR_RF2 | TANAR_UNUSED;
707 regs.tanar |= reg & ~(TANAR_RF1 | TANAR_RF2 | TANAR_UNUSED);
708
709 // Pause capability unimplemented
710 break;
711
712 case TANLPAR:
713 panic("this should only be written to by the fake phy!\n");
714
715 case TANER:
716 panic("TANER is read only register!\n");
717
718 case TESR:
719 regs.tesr = reg;
720 break;
721
722 default:
723 panic("invalid register access daddr=%#x", daddr);
724 }
725 } else {
726 panic("Invalid Request Size");
727 }
728 pkt->makeAtomicResponse();
729 return pioDelay;
730 }
731
732 void
733 NSGigE::devIntrPost(uint32_t interrupts)
734 {
735 if (interrupts & ISR_RESERVE)
736 panic("Cannot set a reserved interrupt");
737
738 if (interrupts & ISR_NOIMPL)
739 warn("interrupt not implemented %#x\n", interrupts);
740
741 interrupts &= ISR_IMPL;
742 regs.isr |= interrupts;
743
744 if (interrupts & regs.imr) {
745 if (interrupts & ISR_SWI) {
746 totalSwi++;
747 }
748 if (interrupts & ISR_RXIDLE) {
749 totalRxIdle++;
750 }
751 if (interrupts & ISR_RXOK) {
752 totalRxOk++;
753 }
754 if (interrupts & ISR_RXDESC) {
755 totalRxDesc++;
756 }
757 if (interrupts & ISR_TXOK) {
758 totalTxOk++;
759 }
760 if (interrupts & ISR_TXIDLE) {
761 totalTxIdle++;
762 }
763 if (interrupts & ISR_TXDESC) {
764 totalTxDesc++;
765 }
766 if (interrupts & ISR_RXORN) {
767 totalRxOrn++;
768 }
769 }
770
771 DPRINTF(EthernetIntr,
772 "interrupt written to ISR: intr=%#x isr=%#x imr=%#x\n",
773 interrupts, regs.isr, regs.imr);
774
775 if ((regs.isr & regs.imr)) {
776 Tick when = curTick();
777 if ((regs.isr & regs.imr & ISR_NODELAY) == 0)
778 when += intrDelay;
779 postedInterrupts++;
780 cpuIntrPost(when);
781 }
782 }
783
784 /* writing this interrupt counting stats inside this means that this function
785 is now limited to being used to clear all interrupts upon the kernel
786 reading isr and servicing. just telling you in case you were thinking
787 of expanding use.
788 */
789 void
790 NSGigE::devIntrClear(uint32_t interrupts)
791 {
792 if (interrupts & ISR_RESERVE)
793 panic("Cannot clear a reserved interrupt");
794
795 if (regs.isr & regs.imr & ISR_SWI) {
796 postedSwi++;
797 }
798 if (regs.isr & regs.imr & ISR_RXIDLE) {
799 postedRxIdle++;
800 }
801 if (regs.isr & regs.imr & ISR_RXOK) {
802 postedRxOk++;
803 }
804 if (regs.isr & regs.imr & ISR_RXDESC) {
805 postedRxDesc++;
806 }
807 if (regs.isr & regs.imr & ISR_TXOK) {
808 postedTxOk++;
809 }
810 if (regs.isr & regs.imr & ISR_TXIDLE) {
811 postedTxIdle++;
812 }
813 if (regs.isr & regs.imr & ISR_TXDESC) {
814 postedTxDesc++;
815 }
816 if (regs.isr & regs.imr & ISR_RXORN) {
817 postedRxOrn++;
818 }
819
820 interrupts &= ~ISR_NOIMPL;
821 regs.isr &= ~interrupts;
822
823 DPRINTF(EthernetIntr,
824 "interrupt cleared from ISR: intr=%x isr=%x imr=%x\n",
825 interrupts, regs.isr, regs.imr);
826
827 if (!(regs.isr & regs.imr))
828 cpuIntrClear();
829 }
830
831 void
832 NSGigE::devIntrChangeMask()
833 {
834 DPRINTF(EthernetIntr, "interrupt mask changed: isr=%x imr=%x masked=%x\n",
835 regs.isr, regs.imr, regs.isr & regs.imr);
836
837 if (regs.isr & regs.imr)
838 cpuIntrPost(curTick());
839 else
840 cpuIntrClear();
841 }
842
843 void
844 NSGigE::cpuIntrPost(Tick when)
845 {
846 // If the interrupt you want to post is later than an interrupt
847 // already scheduled, just let it post in the coming one and don't
848 // schedule another.
849 // HOWEVER, must be sure that the scheduled intrTick is in the
850 // future (this was formerly the source of a bug)
851 /**
852 * @todo this warning should be removed and the intrTick code should
853 * be fixed.
854 */
855 assert(when >= curTick());
856 assert(intrTick >= curTick() || intrTick == 0);
857 if (when > intrTick && intrTick != 0) {
858 DPRINTF(EthernetIntr, "don't need to schedule event...intrTick=%d\n",
859 intrTick);
860 return;
861 }
862
863 intrTick = when;
864 if (intrTick < curTick()) {
865 intrTick = curTick();
866 }
867
868 DPRINTF(EthernetIntr, "going to schedule an interrupt for intrTick=%d\n",
869 intrTick);
870
871 if (intrEvent)
872 intrEvent->squash();
873
874 intrEvent = new EventFunctionWrapper([this]{ cpuInterrupt(); },
875 name(), true);
876 schedule(intrEvent, intrTick);
877 }
878
879 void
880 NSGigE::cpuInterrupt()
881 {
882 assert(intrTick == curTick());
883
884 // Whether or not there's a pending interrupt, we don't care about
885 // it anymore
886 intrEvent = 0;
887 intrTick = 0;
888
889 // Don't send an interrupt if there's already one
890 if (cpuPendingIntr) {
891 DPRINTF(EthernetIntr,
892 "would send an interrupt now, but there's already pending\n");
893 } else {
894 // Send interrupt
895 cpuPendingIntr = true;
896
897 DPRINTF(EthernetIntr, "posting interrupt\n");
898 intrPost();
899 }
900 }
901
902 void
903 NSGigE::cpuIntrClear()
904 {
905 if (!cpuPendingIntr)
906 return;
907
908 if (intrEvent) {
909 intrEvent->squash();
910 intrEvent = 0;
911 }
912
913 intrTick = 0;
914
915 cpuPendingIntr = false;
916
917 DPRINTF(EthernetIntr, "clearing interrupt\n");
918 intrClear();
919 }
920
921 bool
922 NSGigE::cpuIntrPending() const
923 { return cpuPendingIntr; }
924
925 void
926 NSGigE::txReset()
927 {
928
929 DPRINTF(Ethernet, "transmit reset\n");
930
931 CTDD = false;
932 txEnable = false;;
933 txFragPtr = 0;
934 assert(txDescCnt == 0);
935 txFifo.clear();
936 txState = txIdle;
937 assert(txDmaState == dmaIdle);
938 }
939
940 void
941 NSGigE::rxReset()
942 {
943 DPRINTF(Ethernet, "receive reset\n");
944
945 CRDD = false;
946 assert(rxPktBytes == 0);
947 rxEnable = false;
948 rxFragPtr = 0;
949 assert(rxDescCnt == 0);
950 assert(rxDmaState == dmaIdle);
951 rxFifo.clear();
952 rxState = rxIdle;
953 }
954
955 void
956 NSGigE::regsReset()
957 {
958 memset(&regs, 0, sizeof(regs));
959 regs.config = (CFGR_LNKSTS | CFGR_TBI_EN | CFGR_MODE_1000);
960 regs.mear = 0x12;
961 regs.txcfg = 0x120; // set drain threshold to 1024 bytes and
962 // fill threshold to 32 bytes
963 regs.rxcfg = 0x4; // set drain threshold to 16 bytes
964 regs.srr = 0x0103; // set the silicon revision to rev B or 0x103
965 regs.mibc = MIBC_FRZ;
966 regs.vdr = 0x81; // set the vlan tag type to 802.1q
967 regs.tesr = 0xc000; // TBI capable of both full and half duplex
968 regs.brar = 0xffffffff;
969
970 extstsEnable = false;
971 acceptBroadcast = false;
972 acceptMulticast = false;
973 acceptUnicast = false;
974 acceptPerfect = false;
975 acceptArp = false;
976 }
977
978 bool
979 NSGigE::doRxDmaRead()
980 {
981 assert(rxDmaState == dmaIdle || rxDmaState == dmaReadWaiting);
982 rxDmaState = dmaReading;
983
984 if (dmaPending() || drainState() != DrainState::Running)
985 rxDmaState = dmaReadWaiting;
986 else
987 dmaRead(rxDmaAddr, rxDmaLen, &rxDmaReadEvent, (uint8_t*)rxDmaData);
988
989 return true;
990 }
991
992 void
993 NSGigE::rxDmaReadDone()
994 {
995 assert(rxDmaState == dmaReading);
996 rxDmaState = dmaIdle;
997
998 DPRINTF(EthernetDMA, "rx dma read paddr=%#x len=%d\n",
999 rxDmaAddr, rxDmaLen);
1000 DDUMP(EthernetDMA, rxDmaData, rxDmaLen);
1001
1002 // If the transmit state machine has a pending DMA, let it go first
1003 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting)
1004 txKick();
1005
1006 rxKick();
1007 }
1008
1009 bool
1010 NSGigE::doRxDmaWrite()
1011 {
1012 assert(rxDmaState == dmaIdle || rxDmaState == dmaWriteWaiting);
1013 rxDmaState = dmaWriting;
1014
1015 if (dmaPending() || drainState() != DrainState::Running)
1016 rxDmaState = dmaWriteWaiting;
1017 else
1018 dmaWrite(rxDmaAddr, rxDmaLen, &rxDmaWriteEvent, (uint8_t*)rxDmaData);
1019 return true;
1020 }
1021
1022 void
1023 NSGigE::rxDmaWriteDone()
1024 {
1025 assert(rxDmaState == dmaWriting);
1026 rxDmaState = dmaIdle;
1027
1028 DPRINTF(EthernetDMA, "rx dma write paddr=%#x len=%d\n",
1029 rxDmaAddr, rxDmaLen);
1030 DDUMP(EthernetDMA, rxDmaData, rxDmaLen);
1031
1032 // If the transmit state machine has a pending DMA, let it go first
1033 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting)
1034 txKick();
1035
1036 rxKick();
1037 }
1038
1039 void
1040 NSGigE::rxKick()
1041 {
1042 bool is64bit = (bool)(regs.config & CFGR_M64ADDR);
1043
1044 DPRINTF(EthernetSM,
1045 "receive kick rxState=%s (rxBuf.size=%d) %d-bit\n",
1046 NsRxStateStrings[rxState], rxFifo.size(), is64bit ? 64 : 32);
1047
1048 Addr link, bufptr;
1049 uint32_t &cmdsts = is64bit ? rxDesc64.cmdsts : rxDesc32.cmdsts;
1050 uint32_t &extsts = is64bit ? rxDesc64.extsts : rxDesc32.extsts;
1051
1052 next:
1053 if (rxKickTick > curTick()) {
1054 DPRINTF(EthernetSM, "receive kick exiting, can't run till %d\n",
1055 rxKickTick);
1056
1057 goto exit;
1058 }
1059
1060 // Go to the next state machine clock tick.
1061 rxKickTick = clockEdge(Cycles(1));
1062
1063 switch(rxDmaState) {
1064 case dmaReadWaiting:
1065 if (doRxDmaRead())
1066 goto exit;
1067 break;
1068 case dmaWriteWaiting:
1069 if (doRxDmaWrite())
1070 goto exit;
1071 break;
1072 default:
1073 break;
1074 }
1075
1076 link = is64bit ? (Addr)rxDesc64.link : (Addr)rxDesc32.link;
1077 bufptr = is64bit ? (Addr)rxDesc64.bufptr : (Addr)rxDesc32.bufptr;
1078
1079 // see state machine from spec for details
1080 // the way this works is, if you finish work on one state and can
1081 // go directly to another, you do that through jumping to the
1082 // label "next". however, if you have intermediate work, like DMA
1083 // so that you can't go to the next state yet, you go to exit and
1084 // exit the loop. however, when the DMA is done it will trigger
1085 // an event and come back to this loop.
1086 switch (rxState) {
1087 case rxIdle:
1088 if (!rxEnable) {
1089 DPRINTF(EthernetSM, "Receive Disabled! Nothing to do.\n");
1090 goto exit;
1091 }
1092
1093 if (CRDD) {
1094 rxState = rxDescRefr;
1095
1096 rxDmaAddr = regs.rxdp & 0x3fffffff;
1097 rxDmaData =
1098 is64bit ? (void *)&rxDesc64.link : (void *)&rxDesc32.link;
1099 rxDmaLen = is64bit ? sizeof(rxDesc64.link) : sizeof(rxDesc32.link);
1100 rxDmaFree = dmaDescFree;
1101
1102 descDmaReads++;
1103 descDmaRdBytes += rxDmaLen;
1104
1105 if (doRxDmaRead())
1106 goto exit;
1107 } else {
1108 rxState = rxDescRead;
1109
1110 rxDmaAddr = regs.rxdp & 0x3fffffff;
1111 rxDmaData = is64bit ? (void *)&rxDesc64 : (void *)&rxDesc32;
1112 rxDmaLen = is64bit ? sizeof(rxDesc64) : sizeof(rxDesc32);
1113 rxDmaFree = dmaDescFree;
1114
1115 descDmaReads++;
1116 descDmaRdBytes += rxDmaLen;
1117
1118 if (doRxDmaRead())
1119 goto exit;
1120 }
1121 break;
1122
1123 case rxDescRefr:
1124 if (rxDmaState != dmaIdle)
1125 goto exit;
1126
1127 rxState = rxAdvance;
1128 break;
1129
1130 case rxDescRead:
1131 if (rxDmaState != dmaIdle)
1132 goto exit;
1133
1134 DPRINTF(EthernetDesc, "rxDesc: addr=%08x read descriptor\n",
1135 regs.rxdp & 0x3fffffff);
1136 DPRINTF(EthernetDesc,
1137 "rxDesc: link=%#x bufptr=%#x cmdsts=%08x extsts=%08x\n",
1138 link, bufptr, cmdsts, extsts);
1139
1140 if (cmdsts & CMDSTS_OWN) {
1141 devIntrPost(ISR_RXIDLE);
1142 rxState = rxIdle;
1143 goto exit;
1144 } else {
1145 rxState = rxFifoBlock;
1146 rxFragPtr = bufptr;
1147 rxDescCnt = cmdsts & CMDSTS_LEN_MASK;
1148 }
1149 break;
1150
1151 case rxFifoBlock:
1152 if (!rxPacket) {
1153 /**
1154 * @todo in reality, we should be able to start processing
1155 * the packet as it arrives, and not have to wait for the
1156 * full packet ot be in the receive fifo.
1157 */
1158 if (rxFifo.empty())
1159 goto exit;
1160
1161 DPRINTF(EthernetSM, "****processing receive of new packet****\n");
1162
1163 // If we don't have a packet, grab a new one from the fifo.
1164 rxPacket = rxFifo.front();
1165 rxPktBytes = rxPacket->length;
1166 rxPacketBufPtr = rxPacket->data;
1167
1168 #if TRACING_ON
1169 if (DTRACE(Ethernet)) {
1170 IpPtr ip(rxPacket);
1171 if (ip) {
1172 DPRINTF(Ethernet, "ID is %d\n", ip->id());
1173 TcpPtr tcp(ip);
1174 if (tcp) {
1175 DPRINTF(Ethernet,
1176 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n",
1177 tcp->sport(), tcp->dport(), tcp->seq(),
1178 tcp->ack());
1179 }
1180 }
1181 }
1182 #endif
1183
1184 // sanity check - i think the driver behaves like this
1185 assert(rxDescCnt >= rxPktBytes);
1186 rxFifo.pop();
1187 }
1188
1189
1190 // dont' need the && rxDescCnt > 0 if driver sanity check
1191 // above holds
1192 if (rxPktBytes > 0) {
1193 rxState = rxFragWrite;
1194 // don't need min<>(rxPktBytes,rxDescCnt) if above sanity
1195 // check holds
1196 rxXferLen = rxPktBytes;
1197
1198 rxDmaAddr = rxFragPtr & 0x3fffffff;
1199 rxDmaData = rxPacketBufPtr;
1200 rxDmaLen = rxXferLen;
1201 rxDmaFree = dmaDataFree;
1202
1203 if (doRxDmaWrite())
1204 goto exit;
1205
1206 } else {
1207 rxState = rxDescWrite;
1208
1209 //if (rxPktBytes == 0) { /* packet is done */
1210 assert(rxPktBytes == 0);
1211 DPRINTF(EthernetSM, "done with receiving packet\n");
1212
1213 cmdsts |= CMDSTS_OWN;
1214 cmdsts &= ~CMDSTS_MORE;
1215 cmdsts |= CMDSTS_OK;
1216 cmdsts &= 0xffff0000;
1217 cmdsts += rxPacket->length; //i.e. set CMDSTS_SIZE
1218
1219 IpPtr ip(rxPacket);
1220 if (extstsEnable && ip) {
1221 extsts |= EXTSTS_IPPKT;
1222 rxIpChecksums++;
1223 if (cksum(ip) != 0) {
1224 DPRINTF(EthernetCksum, "Rx IP Checksum Error\n");
1225 extsts |= EXTSTS_IPERR;
1226 }
1227 TcpPtr tcp(ip);
1228 UdpPtr udp(ip);
1229 if (tcp) {
1230 extsts |= EXTSTS_TCPPKT;
1231 rxTcpChecksums++;
1232 if (cksum(tcp) != 0) {
1233 DPRINTF(EthernetCksum, "Rx TCP Checksum Error\n");
1234 extsts |= EXTSTS_TCPERR;
1235
1236 }
1237 } else if (udp) {
1238 extsts |= EXTSTS_UDPPKT;
1239 rxUdpChecksums++;
1240 if (cksum(udp) != 0) {
1241 DPRINTF(EthernetCksum, "Rx UDP Checksum Error\n");
1242 extsts |= EXTSTS_UDPERR;
1243 }
1244 }
1245 }
1246 rxPacket = 0;
1247
1248 /*
1249 * the driver seems to always receive into desc buffers
1250 * of size 1514, so you never have a pkt that is split
1251 * into multiple descriptors on the receive side, so
1252 * i don't implement that case, hence the assert above.
1253 */
1254
1255 DPRINTF(EthernetDesc,
1256 "rxDesc: addr=%08x writeback cmdsts extsts\n",
1257 regs.rxdp & 0x3fffffff);
1258 DPRINTF(EthernetDesc,
1259 "rxDesc: link=%#x bufptr=%#x cmdsts=%08x extsts=%08x\n",
1260 link, bufptr, cmdsts, extsts);
1261
1262 rxDmaAddr = regs.rxdp & 0x3fffffff;
1263 rxDmaData = &cmdsts;
1264 if (is64bit) {
1265 rxDmaAddr += offsetof(ns_desc64, cmdsts);
1266 rxDmaLen = sizeof(rxDesc64.cmdsts) + sizeof(rxDesc64.extsts);
1267 } else {
1268 rxDmaAddr += offsetof(ns_desc32, cmdsts);
1269 rxDmaLen = sizeof(rxDesc32.cmdsts) + sizeof(rxDesc32.extsts);
1270 }
1271 rxDmaFree = dmaDescFree;
1272
1273 descDmaWrites++;
1274 descDmaWrBytes += rxDmaLen;
1275
1276 if (doRxDmaWrite())
1277 goto exit;
1278 }
1279 break;
1280
1281 case rxFragWrite:
1282 if (rxDmaState != dmaIdle)
1283 goto exit;
1284
1285 rxPacketBufPtr += rxXferLen;
1286 rxFragPtr += rxXferLen;
1287 rxPktBytes -= rxXferLen;
1288
1289 rxState = rxFifoBlock;
1290 break;
1291
1292 case rxDescWrite:
1293 if (rxDmaState != dmaIdle)
1294 goto exit;
1295
1296 assert(cmdsts & CMDSTS_OWN);
1297
1298 assert(rxPacket == 0);
1299 devIntrPost(ISR_RXOK);
1300
1301 if (cmdsts & CMDSTS_INTR)
1302 devIntrPost(ISR_RXDESC);
1303
1304 if (!rxEnable) {
1305 DPRINTF(EthernetSM, "Halting the RX state machine\n");
1306 rxState = rxIdle;
1307 goto exit;
1308 } else
1309 rxState = rxAdvance;
1310 break;
1311
1312 case rxAdvance:
1313 if (link == 0) {
1314 devIntrPost(ISR_RXIDLE);
1315 rxState = rxIdle;
1316 CRDD = true;
1317 goto exit;
1318 } else {
1319 if (rxDmaState != dmaIdle)
1320 goto exit;
1321 rxState = rxDescRead;
1322 regs.rxdp = link;
1323 CRDD = false;
1324
1325 rxDmaAddr = regs.rxdp & 0x3fffffff;
1326 rxDmaData = is64bit ? (void *)&rxDesc64 : (void *)&rxDesc32;
1327 rxDmaLen = is64bit ? sizeof(rxDesc64) : sizeof(rxDesc32);
1328 rxDmaFree = dmaDescFree;
1329
1330 if (doRxDmaRead())
1331 goto exit;
1332 }
1333 break;
1334
1335 default:
1336 panic("Invalid rxState!");
1337 }
1338
1339 DPRINTF(EthernetSM, "entering next rxState=%s\n",
1340 NsRxStateStrings[rxState]);
1341 goto next;
1342
1343 exit:
1344 /**
1345 * @todo do we want to schedule a future kick?
1346 */
1347 DPRINTF(EthernetSM, "rx state machine exited rxState=%s\n",
1348 NsRxStateStrings[rxState]);
1349
1350 if (!rxKickEvent.scheduled())
1351 schedule(rxKickEvent, rxKickTick);
1352 }
1353
1354 void
1355 NSGigE::transmit()
1356 {
1357 if (txFifo.empty()) {
1358 DPRINTF(Ethernet, "nothing to transmit\n");
1359 return;
1360 }
1361
1362 DPRINTF(Ethernet, "Attempt Pkt Transmit: txFifo length=%d\n",
1363 txFifo.size());
1364 if (interface->sendPacket(txFifo.front())) {
1365 #if TRACING_ON
1366 if (DTRACE(Ethernet)) {
1367 IpPtr ip(txFifo.front());
1368 if (ip) {
1369 DPRINTF(Ethernet, "ID is %d\n", ip->id());
1370 TcpPtr tcp(ip);
1371 if (tcp) {
1372 DPRINTF(Ethernet,
1373 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n",
1374 tcp->sport(), tcp->dport(), tcp->seq(),
1375 tcp->ack());
1376 }
1377 }
1378 }
1379 #endif
1380
1381 DDUMP(EthernetData, txFifo.front()->data, txFifo.front()->length);
1382 txBytes += txFifo.front()->length;
1383 txPackets++;
1384
1385 DPRINTF(Ethernet, "Successful Xmit! now txFifoAvail is %d\n",
1386 txFifo.avail());
1387 txFifo.pop();
1388
1389 /*
1390 * normally do a writeback of the descriptor here, and ONLY
1391 * after that is done, send this interrupt. but since our
1392 * stuff never actually fails, just do this interrupt here,
1393 * otherwise the code has to stray from this nice format.
1394 * besides, it's functionally the same.
1395 */
1396 devIntrPost(ISR_TXOK);
1397 }
1398
1399 if (!txFifo.empty() && !txEvent.scheduled()) {
1400 DPRINTF(Ethernet, "reschedule transmit\n");
1401 schedule(txEvent, curTick() + retryTime);
1402 }
1403 }
1404
1405 bool
1406 NSGigE::doTxDmaRead()
1407 {
1408 assert(txDmaState == dmaIdle || txDmaState == dmaReadWaiting);
1409 txDmaState = dmaReading;
1410
1411 if (dmaPending() || drainState() != DrainState::Running)
1412 txDmaState = dmaReadWaiting;
1413 else
1414 dmaRead(txDmaAddr, txDmaLen, &txDmaReadEvent, (uint8_t*)txDmaData);
1415
1416 return true;
1417 }
1418
1419 void
1420 NSGigE::txDmaReadDone()
1421 {
1422 assert(txDmaState == dmaReading);
1423 txDmaState = dmaIdle;
1424
1425 DPRINTF(EthernetDMA, "tx dma read paddr=%#x len=%d\n",
1426 txDmaAddr, txDmaLen);
1427 DDUMP(EthernetDMA, txDmaData, txDmaLen);
1428
1429 // If the receive state machine has a pending DMA, let it go first
1430 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting)
1431 rxKick();
1432
1433 txKick();
1434 }
1435
1436 bool
1437 NSGigE::doTxDmaWrite()
1438 {
1439 assert(txDmaState == dmaIdle || txDmaState == dmaWriteWaiting);
1440 txDmaState = dmaWriting;
1441
1442 if (dmaPending() || drainState() != DrainState::Running)
1443 txDmaState = dmaWriteWaiting;
1444 else
1445 dmaWrite(txDmaAddr, txDmaLen, &txDmaWriteEvent, (uint8_t*)txDmaData);
1446 return true;
1447 }
1448
1449 void
1450 NSGigE::txDmaWriteDone()
1451 {
1452 assert(txDmaState == dmaWriting);
1453 txDmaState = dmaIdle;
1454
1455 DPRINTF(EthernetDMA, "tx dma write paddr=%#x len=%d\n",
1456 txDmaAddr, txDmaLen);
1457 DDUMP(EthernetDMA, txDmaData, txDmaLen);
1458
1459 // If the receive state machine has a pending DMA, let it go first
1460 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting)
1461 rxKick();
1462
1463 txKick();
1464 }
1465
1466 void
1467 NSGigE::txKick()
1468 {
1469 bool is64bit = (bool)(regs.config & CFGR_M64ADDR);
1470
1471 DPRINTF(EthernetSM, "transmit kick txState=%s %d-bit\n",
1472 NsTxStateStrings[txState], is64bit ? 64 : 32);
1473
1474 Addr link, bufptr;
1475 uint32_t &cmdsts = is64bit ? txDesc64.cmdsts : txDesc32.cmdsts;
1476 uint32_t &extsts = is64bit ? txDesc64.extsts : txDesc32.extsts;
1477
1478 next:
1479 if (txKickTick > curTick()) {
1480 DPRINTF(EthernetSM, "transmit kick exiting, can't run till %d\n",
1481 txKickTick);
1482 goto exit;
1483 }
1484
1485 // Go to the next state machine clock tick.
1486 txKickTick = clockEdge(Cycles(1));
1487
1488 switch(txDmaState) {
1489 case dmaReadWaiting:
1490 if (doTxDmaRead())
1491 goto exit;
1492 break;
1493 case dmaWriteWaiting:
1494 if (doTxDmaWrite())
1495 goto exit;
1496 break;
1497 default:
1498 break;
1499 }
1500
1501 link = is64bit ? (Addr)txDesc64.link : (Addr)txDesc32.link;
1502 bufptr = is64bit ? (Addr)txDesc64.bufptr : (Addr)txDesc32.bufptr;
1503 switch (txState) {
1504 case txIdle:
1505 if (!txEnable) {
1506 DPRINTF(EthernetSM, "Transmit disabled. Nothing to do.\n");
1507 goto exit;
1508 }
1509
1510 if (CTDD) {
1511 txState = txDescRefr;
1512
1513 txDmaAddr = regs.txdp & 0x3fffffff;
1514 txDmaData =
1515 is64bit ? (void *)&txDesc64.link : (void *)&txDesc32.link;
1516 txDmaLen = is64bit ? sizeof(txDesc64.link) : sizeof(txDesc32.link);
1517 txDmaFree = dmaDescFree;
1518
1519 descDmaReads++;
1520 descDmaRdBytes += txDmaLen;
1521
1522 if (doTxDmaRead())
1523 goto exit;
1524
1525 } else {
1526 txState = txDescRead;
1527
1528 txDmaAddr = regs.txdp & 0x3fffffff;
1529 txDmaData = is64bit ? (void *)&txDesc64 : (void *)&txDesc32;
1530 txDmaLen = is64bit ? sizeof(txDesc64) : sizeof(txDesc32);
1531 txDmaFree = dmaDescFree;
1532
1533 descDmaReads++;
1534 descDmaRdBytes += txDmaLen;
1535
1536 if (doTxDmaRead())
1537 goto exit;
1538 }
1539 break;
1540
1541 case txDescRefr:
1542 if (txDmaState != dmaIdle)
1543 goto exit;
1544
1545 txState = txAdvance;
1546 break;
1547
1548 case txDescRead:
1549 if (txDmaState != dmaIdle)
1550 goto exit;
1551
1552 DPRINTF(EthernetDesc, "txDesc: addr=%08x read descriptor\n",
1553 regs.txdp & 0x3fffffff);
1554 DPRINTF(EthernetDesc,
1555 "txDesc: link=%#x bufptr=%#x cmdsts=%#08x extsts=%#08x\n",
1556 link, bufptr, cmdsts, extsts);
1557
1558 if (cmdsts & CMDSTS_OWN) {
1559 txState = txFifoBlock;
1560 txFragPtr = bufptr;
1561 txDescCnt = cmdsts & CMDSTS_LEN_MASK;
1562 } else {
1563 devIntrPost(ISR_TXIDLE);
1564 txState = txIdle;
1565 goto exit;
1566 }
1567 break;
1568
1569 case txFifoBlock:
1570 if (!txPacket) {
1571 DPRINTF(EthernetSM, "****starting the tx of a new packet****\n");
1572 txPacket = make_shared<EthPacketData>(16384);
1573 txPacketBufPtr = txPacket->data;
1574 }
1575
1576 if (txDescCnt == 0) {
1577 DPRINTF(EthernetSM, "the txDescCnt == 0, done with descriptor\n");
1578 if (cmdsts & CMDSTS_MORE) {
1579 DPRINTF(EthernetSM, "there are more descriptors to come\n");
1580 txState = txDescWrite;
1581
1582 cmdsts &= ~CMDSTS_OWN;
1583
1584 txDmaAddr = regs.txdp & 0x3fffffff;
1585 txDmaData = &cmdsts;
1586 if (is64bit) {
1587 txDmaAddr += offsetof(ns_desc64, cmdsts);
1588 txDmaLen = sizeof(txDesc64.cmdsts);
1589 } else {
1590 txDmaAddr += offsetof(ns_desc32, cmdsts);
1591 txDmaLen = sizeof(txDesc32.cmdsts);
1592 }
1593 txDmaFree = dmaDescFree;
1594
1595 if (doTxDmaWrite())
1596 goto exit;
1597
1598 } else { /* this packet is totally done */
1599 DPRINTF(EthernetSM, "This packet is done, let's wrap it up\n");
1600 /* deal with the the packet that just finished */
1601 if ((regs.vtcr & VTCR_PPCHK) && extstsEnable) {
1602 IpPtr ip(txPacket);
1603 if (extsts & EXTSTS_UDPPKT) {
1604 UdpPtr udp(ip);
1605 if (udp) {
1606 udp->sum(0);
1607 udp->sum(cksum(udp));
1608 txUdpChecksums++;
1609 } else {
1610 Debug::breakpoint();
1611 warn_once("UDPPKT set, but not UDP!\n");
1612 }
1613 } else if (extsts & EXTSTS_TCPPKT) {
1614 TcpPtr tcp(ip);
1615 if (tcp) {
1616 tcp->sum(0);
1617 tcp->sum(cksum(tcp));
1618 txTcpChecksums++;
1619 } else {
1620 warn_once("TCPPKT set, but not UDP!\n");
1621 }
1622 }
1623 if (extsts & EXTSTS_IPPKT) {
1624 if (ip) {
1625 ip->sum(0);
1626 ip->sum(cksum(ip));
1627 txIpChecksums++;
1628 } else {
1629 warn_once("IPPKT set, but not UDP!\n");
1630 }
1631 }
1632 }
1633
1634 txPacket->simLength = txPacketBufPtr - txPacket->data;
1635 txPacket->length = txPacketBufPtr - txPacket->data;
1636 // this is just because the receive can't handle a
1637 // packet bigger want to make sure
1638 if (txPacket->length > 1514)
1639 panic("transmit packet too large, %s > 1514\n",
1640 txPacket->length);
1641
1642 #ifndef NDEBUG
1643 bool success =
1644 #endif
1645 txFifo.push(txPacket);
1646 assert(success);
1647
1648 /*
1649 * this following section is not tqo spec, but
1650 * functionally shouldn't be any different. normally,
1651 * the chip will wait til the transmit has occurred
1652 * before writing back the descriptor because it has
1653 * to wait to see that it was successfully transmitted
1654 * to decide whether to set CMDSTS_OK or not.
1655 * however, in the simulator since it is always
1656 * successfully transmitted, and writing it exactly to
1657 * spec would complicate the code, we just do it here
1658 */
1659
1660 cmdsts &= ~CMDSTS_OWN;
1661 cmdsts |= CMDSTS_OK;
1662
1663 DPRINTF(EthernetDesc,
1664 "txDesc writeback: cmdsts=%08x extsts=%08x\n",
1665 cmdsts, extsts);
1666
1667 txDmaFree = dmaDescFree;
1668 txDmaAddr = regs.txdp & 0x3fffffff;
1669 txDmaData = &cmdsts;
1670 if (is64bit) {
1671 txDmaAddr += offsetof(ns_desc64, cmdsts);
1672 txDmaLen =
1673 sizeof(txDesc64.cmdsts) + sizeof(txDesc64.extsts);
1674 } else {
1675 txDmaAddr += offsetof(ns_desc32, cmdsts);
1676 txDmaLen =
1677 sizeof(txDesc32.cmdsts) + sizeof(txDesc32.extsts);
1678 }
1679
1680 descDmaWrites++;
1681 descDmaWrBytes += txDmaLen;
1682
1683 transmit();
1684 txPacket = 0;
1685
1686 if (!txEnable) {
1687 DPRINTF(EthernetSM, "halting TX state machine\n");
1688 txState = txIdle;
1689 goto exit;
1690 } else
1691 txState = txAdvance;
1692
1693 if (doTxDmaWrite())
1694 goto exit;
1695 }
1696 } else {
1697 DPRINTF(EthernetSM, "this descriptor isn't done yet\n");
1698 if (!txFifo.full()) {
1699 txState = txFragRead;
1700
1701 /*
1702 * The number of bytes transferred is either whatever
1703 * is left in the descriptor (txDescCnt), or if there
1704 * is not enough room in the fifo, just whatever room
1705 * is left in the fifo
1706 */
1707 txXferLen = min<uint32_t>(txDescCnt, txFifo.avail());
1708
1709 txDmaAddr = txFragPtr & 0x3fffffff;
1710 txDmaData = txPacketBufPtr;
1711 txDmaLen = txXferLen;
1712 txDmaFree = dmaDataFree;
1713
1714 if (doTxDmaRead())
1715 goto exit;
1716 } else {
1717 txState = txFifoBlock;
1718 transmit();
1719
1720 goto exit;
1721 }
1722
1723 }
1724 break;
1725
1726 case txFragRead:
1727 if (txDmaState != dmaIdle)
1728 goto exit;
1729
1730 txPacketBufPtr += txXferLen;
1731 txFragPtr += txXferLen;
1732 txDescCnt -= txXferLen;
1733 txFifo.reserve(txXferLen);
1734
1735 txState = txFifoBlock;
1736 break;
1737
1738 case txDescWrite:
1739 if (txDmaState != dmaIdle)
1740 goto exit;
1741
1742 if (cmdsts & CMDSTS_INTR)
1743 devIntrPost(ISR_TXDESC);
1744
1745 if (!txEnable) {
1746 DPRINTF(EthernetSM, "halting TX state machine\n");
1747 txState = txIdle;
1748 goto exit;
1749 } else
1750 txState = txAdvance;
1751 break;
1752
1753 case txAdvance:
1754 if (link == 0) {
1755 devIntrPost(ISR_TXIDLE);
1756 txState = txIdle;
1757 goto exit;
1758 } else {
1759 if (txDmaState != dmaIdle)
1760 goto exit;
1761 txState = txDescRead;
1762 regs.txdp = link;
1763 CTDD = false;
1764
1765 txDmaAddr = link & 0x3fffffff;
1766 txDmaData = is64bit ? (void *)&txDesc64 : (void *)&txDesc32;
1767 txDmaLen = is64bit ? sizeof(txDesc64) : sizeof(txDesc32);
1768 txDmaFree = dmaDescFree;
1769
1770 if (doTxDmaRead())
1771 goto exit;
1772 }
1773 break;
1774
1775 default:
1776 panic("invalid state");
1777 }
1778
1779 DPRINTF(EthernetSM, "entering next txState=%s\n",
1780 NsTxStateStrings[txState]);
1781 goto next;
1782
1783 exit:
1784 /**
1785 * @todo do we want to schedule a future kick?
1786 */
1787 DPRINTF(EthernetSM, "tx state machine exited txState=%s\n",
1788 NsTxStateStrings[txState]);
1789
1790 if (!txKickEvent.scheduled())
1791 schedule(txKickEvent, txKickTick);
1792 }
1793
1794 /**
1795 * Advance the EEPROM state machine
1796 * Called on rising edge of EEPROM clock bit in MEAR
1797 */
1798 void
1799 NSGigE::eepromKick()
1800 {
1801 switch (eepromState) {
1802
1803 case eepromStart:
1804
1805 // Wait for start bit
1806 if (regs.mear & MEAR_EEDI) {
1807 // Set up to get 2 opcode bits
1808 eepromState = eepromGetOpcode;
1809 eepromBitsToRx = 2;
1810 eepromOpcode = 0;
1811 }
1812 break;
1813
1814 case eepromGetOpcode:
1815 eepromOpcode <<= 1;
1816 eepromOpcode += (regs.mear & MEAR_EEDI) ? 1 : 0;
1817 --eepromBitsToRx;
1818
1819 // Done getting opcode
1820 if (eepromBitsToRx == 0) {
1821 if (eepromOpcode != EEPROM_READ)
1822 panic("only EEPROM reads are implemented!");
1823
1824 // Set up to get address
1825 eepromState = eepromGetAddress;
1826 eepromBitsToRx = 6;
1827 eepromAddress = 0;
1828 }
1829 break;
1830
1831 case eepromGetAddress:
1832 eepromAddress <<= 1;
1833 eepromAddress += (regs.mear & MEAR_EEDI) ? 1 : 0;
1834 --eepromBitsToRx;
1835
1836 // Done getting address
1837 if (eepromBitsToRx == 0) {
1838
1839 if (eepromAddress >= EEPROM_SIZE)
1840 panic("EEPROM read access out of range!");
1841
1842 switch (eepromAddress) {
1843
1844 case EEPROM_PMATCH2_ADDR:
1845 eepromData = rom.perfectMatch[5];
1846 eepromData <<= 8;
1847 eepromData += rom.perfectMatch[4];
1848 break;
1849
1850 case EEPROM_PMATCH1_ADDR:
1851 eepromData = rom.perfectMatch[3];
1852 eepromData <<= 8;
1853 eepromData += rom.perfectMatch[2];
1854 break;
1855
1856 case EEPROM_PMATCH0_ADDR:
1857 eepromData = rom.perfectMatch[1];
1858 eepromData <<= 8;
1859 eepromData += rom.perfectMatch[0];
1860 break;
1861
1862 default:
1863 panic("FreeBSD driver only uses EEPROM to read PMATCH!");
1864 }
1865 // Set up to read data
1866 eepromState = eepromRead;
1867 eepromBitsToRx = 16;
1868
1869 // Clear data in bit
1870 regs.mear &= ~MEAR_EEDI;
1871 }
1872 break;
1873
1874 case eepromRead:
1875 // Clear Data Out bit
1876 regs.mear &= ~MEAR_EEDO;
1877 // Set bit to value of current EEPROM bit
1878 regs.mear |= (eepromData & 0x8000) ? MEAR_EEDO : 0x0;
1879
1880 eepromData <<= 1;
1881 --eepromBitsToRx;
1882
1883 // All done
1884 if (eepromBitsToRx == 0) {
1885 eepromState = eepromStart;
1886 }
1887 break;
1888
1889 default:
1890 panic("invalid EEPROM state");
1891 }
1892
1893 }
1894
1895 void
1896 NSGigE::transferDone()
1897 {
1898 if (txFifo.empty()) {
1899 DPRINTF(Ethernet, "transfer complete: txFifo empty...nothing to do\n");
1900 return;
1901 }
1902
1903 DPRINTF(Ethernet, "transfer complete: data in txFifo...schedule xmit\n");
1904
1905 reschedule(txEvent, clockEdge(Cycles(1)), true);
1906 }
1907
1908 bool
1909 NSGigE::rxFilter(const EthPacketPtr &packet)
1910 {
1911 EthPtr eth = packet;
1912 bool drop = true;
1913 string type;
1914
1915 const EthAddr &dst = eth->dst();
1916 if (dst.unicast()) {
1917 // If we're accepting all unicast addresses
1918 if (acceptUnicast)
1919 drop = false;
1920
1921 // If we make a perfect match
1922 if (acceptPerfect && dst == rom.perfectMatch)
1923 drop = false;
1924
1925 if (acceptArp && eth->type() == ETH_TYPE_ARP)
1926 drop = false;
1927
1928 } else if (dst.broadcast()) {
1929 // if we're accepting broadcasts
1930 if (acceptBroadcast)
1931 drop = false;
1932
1933 } else if (dst.multicast()) {
1934 // if we're accepting all multicasts
1935 if (acceptMulticast)
1936 drop = false;
1937
1938 // Multicast hashing faked - all packets accepted
1939 if (multicastHashEnable)
1940 drop = false;
1941 }
1942
1943 if (drop) {
1944 DPRINTF(Ethernet, "rxFilter drop\n");
1945 DDUMP(EthernetData, packet->data, packet->length);
1946 }
1947
1948 return drop;
1949 }
1950
1951 bool
1952 NSGigE::recvPacket(EthPacketPtr packet)
1953 {
1954 rxBytes += packet->length;
1955 rxPackets++;
1956
1957 DPRINTF(Ethernet, "Receiving packet from wire, rxFifoAvail=%d\n",
1958 rxFifo.avail());
1959
1960 if (!rxEnable) {
1961 DPRINTF(Ethernet, "receive disabled...packet dropped\n");
1962 return true;
1963 }
1964
1965 if (!rxFilterEnable) {
1966 DPRINTF(Ethernet,
1967 "receive packet filtering disabled . . . packet dropped\n");
1968 return true;
1969 }
1970
1971 if (rxFilter(packet)) {
1972 DPRINTF(Ethernet, "packet filtered...dropped\n");
1973 return true;
1974 }
1975
1976 if (rxFifo.avail() < packet->length) {
1977 #if TRACING_ON
1978 IpPtr ip(packet);
1979 TcpPtr tcp(ip);
1980 if (ip) {
1981 DPRINTF(Ethernet,
1982 "packet won't fit in receive buffer...pkt ID %d dropped\n",
1983 ip->id());
1984 if (tcp) {
1985 DPRINTF(Ethernet, "Seq=%d\n", tcp->seq());
1986 }
1987 }
1988 #endif
1989 droppedPackets++;
1990 devIntrPost(ISR_RXORN);
1991 return false;
1992 }
1993
1994 rxFifo.push(packet);
1995
1996 rxKick();
1997 return true;
1998 }
1999
2000
2001 void
2002 NSGigE::drainResume()
2003 {
2004 Drainable::drainResume();
2005
2006 // During drain we could have left the state machines in a waiting state and
2007 // they wouldn't get out until some other event occured to kick them.
2008 // This way they'll get out immediately
2009 txKick();
2010 rxKick();
2011 }
2012
2013
2014 //=====================================================================
2015 //
2016 //
2017 void
2018 NSGigE::serialize(CheckpointOut &cp) const
2019 {
2020 // Serialize the PciDevice base class
2021 PciDevice::serialize(cp);
2022
2023 /*
2024 * Finalize any DMA events now.
2025 */
2026 // @todo will mem system save pending dma?
2027
2028 /*
2029 * Serialize the device registers
2030 */
2031 SERIALIZE_SCALAR(regs.command);
2032 SERIALIZE_SCALAR(regs.config);
2033 SERIALIZE_SCALAR(regs.mear);
2034 SERIALIZE_SCALAR(regs.ptscr);
2035 SERIALIZE_SCALAR(regs.isr);
2036 SERIALIZE_SCALAR(regs.imr);
2037 SERIALIZE_SCALAR(regs.ier);
2038 SERIALIZE_SCALAR(regs.ihr);
2039 SERIALIZE_SCALAR(regs.txdp);
2040 SERIALIZE_SCALAR(regs.txdp_hi);
2041 SERIALIZE_SCALAR(regs.txcfg);
2042 SERIALIZE_SCALAR(regs.gpior);
2043 SERIALIZE_SCALAR(regs.rxdp);
2044 SERIALIZE_SCALAR(regs.rxdp_hi);
2045 SERIALIZE_SCALAR(regs.rxcfg);
2046 SERIALIZE_SCALAR(regs.pqcr);
2047 SERIALIZE_SCALAR(regs.wcsr);
2048 SERIALIZE_SCALAR(regs.pcr);
2049 SERIALIZE_SCALAR(regs.rfcr);
2050 SERIALIZE_SCALAR(regs.rfdr);
2051 SERIALIZE_SCALAR(regs.brar);
2052 SERIALIZE_SCALAR(regs.brdr);
2053 SERIALIZE_SCALAR(regs.srr);
2054 SERIALIZE_SCALAR(regs.mibc);
2055 SERIALIZE_SCALAR(regs.vrcr);
2056 SERIALIZE_SCALAR(regs.vtcr);
2057 SERIALIZE_SCALAR(regs.vdr);
2058 SERIALIZE_SCALAR(regs.ccsr);
2059 SERIALIZE_SCALAR(regs.tbicr);
2060 SERIALIZE_SCALAR(regs.tbisr);
2061 SERIALIZE_SCALAR(regs.tanar);
2062 SERIALIZE_SCALAR(regs.tanlpar);
2063 SERIALIZE_SCALAR(regs.taner);
2064 SERIALIZE_SCALAR(regs.tesr);
2065
2066 SERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN);
2067 SERIALIZE_ARRAY(rom.filterHash, FHASH_SIZE);
2068
2069 SERIALIZE_SCALAR(ioEnable);
2070
2071 /*
2072 * Serialize the data Fifos
2073 */
2074 rxFifo.serialize("rxFifo", cp);
2075 txFifo.serialize("txFifo", cp);
2076
2077 /*
2078 * Serialize the various helper variables
2079 */
2080 bool txPacketExists = txPacket != nullptr;
2081 SERIALIZE_SCALAR(txPacketExists);
2082 if (txPacketExists) {
2083 txPacket->simLength = txPacketBufPtr - txPacket->data;
2084 txPacket->length = txPacketBufPtr - txPacket->data;
2085 txPacket->serialize("txPacket", cp);
2086 uint32_t txPktBufPtr = (uint32_t) (txPacketBufPtr - txPacket->data);
2087 SERIALIZE_SCALAR(txPktBufPtr);
2088 }
2089
2090 bool rxPacketExists = rxPacket != nullptr;
2091 SERIALIZE_SCALAR(rxPacketExists);
2092 if (rxPacketExists) {
2093 rxPacket->serialize("rxPacket", cp);
2094 uint32_t rxPktBufPtr = (uint32_t) (rxPacketBufPtr - rxPacket->data);
2095 SERIALIZE_SCALAR(rxPktBufPtr);
2096 }
2097
2098 SERIALIZE_SCALAR(txXferLen);
2099 SERIALIZE_SCALAR(rxXferLen);
2100
2101 /*
2102 * Serialize Cached Descriptors
2103 */
2104 SERIALIZE_SCALAR(rxDesc64.link);
2105 SERIALIZE_SCALAR(rxDesc64.bufptr);
2106 SERIALIZE_SCALAR(rxDesc64.cmdsts);
2107 SERIALIZE_SCALAR(rxDesc64.extsts);
2108 SERIALIZE_SCALAR(txDesc64.link);
2109 SERIALIZE_SCALAR(txDesc64.bufptr);
2110 SERIALIZE_SCALAR(txDesc64.cmdsts);
2111 SERIALIZE_SCALAR(txDesc64.extsts);
2112 SERIALIZE_SCALAR(rxDesc32.link);
2113 SERIALIZE_SCALAR(rxDesc32.bufptr);
2114 SERIALIZE_SCALAR(rxDesc32.cmdsts);
2115 SERIALIZE_SCALAR(rxDesc32.extsts);
2116 SERIALIZE_SCALAR(txDesc32.link);
2117 SERIALIZE_SCALAR(txDesc32.bufptr);
2118 SERIALIZE_SCALAR(txDesc32.cmdsts);
2119 SERIALIZE_SCALAR(txDesc32.extsts);
2120 SERIALIZE_SCALAR(extstsEnable);
2121
2122 /*
2123 * Serialize tx state machine
2124 */
2125 int txState = this->txState;
2126 SERIALIZE_SCALAR(txState);
2127 SERIALIZE_SCALAR(txEnable);
2128 SERIALIZE_SCALAR(CTDD);
2129 SERIALIZE_SCALAR(txFragPtr);
2130 SERIALIZE_SCALAR(txDescCnt);
2131 int txDmaState = this->txDmaState;
2132 SERIALIZE_SCALAR(txDmaState);
2133 SERIALIZE_SCALAR(txKickTick);
2134
2135 /*
2136 * Serialize rx state machine
2137 */
2138 int rxState = this->rxState;
2139 SERIALIZE_SCALAR(rxState);
2140 SERIALIZE_SCALAR(rxEnable);
2141 SERIALIZE_SCALAR(CRDD);
2142 SERIALIZE_SCALAR(rxPktBytes);
2143 SERIALIZE_SCALAR(rxFragPtr);
2144 SERIALIZE_SCALAR(rxDescCnt);
2145 int rxDmaState = this->rxDmaState;
2146 SERIALIZE_SCALAR(rxDmaState);
2147 SERIALIZE_SCALAR(rxKickTick);
2148
2149 /*
2150 * Serialize EEPROM state machine
2151 */
2152 int eepromState = this->eepromState;
2153 SERIALIZE_SCALAR(eepromState);
2154 SERIALIZE_SCALAR(eepromClk);
2155 SERIALIZE_SCALAR(eepromBitsToRx);
2156 SERIALIZE_SCALAR(eepromOpcode);
2157 SERIALIZE_SCALAR(eepromAddress);
2158 SERIALIZE_SCALAR(eepromData);
2159
2160 /*
2161 * If there's a pending transmit, store the time so we can
2162 * reschedule it later
2163 */
2164 Tick transmitTick = txEvent.scheduled() ? txEvent.when() - curTick() : 0;
2165 SERIALIZE_SCALAR(transmitTick);
2166
2167 /*
2168 * receive address filter settings
2169 */
2170 SERIALIZE_SCALAR(rxFilterEnable);
2171 SERIALIZE_SCALAR(acceptBroadcast);
2172 SERIALIZE_SCALAR(acceptMulticast);
2173 SERIALIZE_SCALAR(acceptUnicast);
2174 SERIALIZE_SCALAR(acceptPerfect);
2175 SERIALIZE_SCALAR(acceptArp);
2176 SERIALIZE_SCALAR(multicastHashEnable);
2177
2178 /*
2179 * Keep track of pending interrupt status.
2180 */
2181 SERIALIZE_SCALAR(intrTick);
2182 SERIALIZE_SCALAR(cpuPendingIntr);
2183 Tick intrEventTick = 0;
2184 if (intrEvent)
2185 intrEventTick = intrEvent->when();
2186 SERIALIZE_SCALAR(intrEventTick);
2187
2188 }
2189
2190 void
2191 NSGigE::unserialize(CheckpointIn &cp)
2192 {
2193 // Unserialize the PciDevice base class
2194 PciDevice::unserialize(cp);
2195
2196 UNSERIALIZE_SCALAR(regs.command);
2197 UNSERIALIZE_SCALAR(regs.config);
2198 UNSERIALIZE_SCALAR(regs.mear);
2199 UNSERIALIZE_SCALAR(regs.ptscr);
2200 UNSERIALIZE_SCALAR(regs.isr);
2201 UNSERIALIZE_SCALAR(regs.imr);
2202 UNSERIALIZE_SCALAR(regs.ier);
2203 UNSERIALIZE_SCALAR(regs.ihr);
2204 UNSERIALIZE_SCALAR(regs.txdp);
2205 UNSERIALIZE_SCALAR(regs.txdp_hi);
2206 UNSERIALIZE_SCALAR(regs.txcfg);
2207 UNSERIALIZE_SCALAR(regs.gpior);
2208 UNSERIALIZE_SCALAR(regs.rxdp);
2209 UNSERIALIZE_SCALAR(regs.rxdp_hi);
2210 UNSERIALIZE_SCALAR(regs.rxcfg);
2211 UNSERIALIZE_SCALAR(regs.pqcr);
2212 UNSERIALIZE_SCALAR(regs.wcsr);
2213 UNSERIALIZE_SCALAR(regs.pcr);
2214 UNSERIALIZE_SCALAR(regs.rfcr);
2215 UNSERIALIZE_SCALAR(regs.rfdr);
2216 UNSERIALIZE_SCALAR(regs.brar);
2217 UNSERIALIZE_SCALAR(regs.brdr);
2218 UNSERIALIZE_SCALAR(regs.srr);
2219 UNSERIALIZE_SCALAR(regs.mibc);
2220 UNSERIALIZE_SCALAR(regs.vrcr);
2221 UNSERIALIZE_SCALAR(regs.vtcr);
2222 UNSERIALIZE_SCALAR(regs.vdr);
2223 UNSERIALIZE_SCALAR(regs.ccsr);
2224 UNSERIALIZE_SCALAR(regs.tbicr);
2225 UNSERIALIZE_SCALAR(regs.tbisr);
2226 UNSERIALIZE_SCALAR(regs.tanar);
2227 UNSERIALIZE_SCALAR(regs.tanlpar);
2228 UNSERIALIZE_SCALAR(regs.taner);
2229 UNSERIALIZE_SCALAR(regs.tesr);
2230
2231 UNSERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN);
2232 UNSERIALIZE_ARRAY(rom.filterHash, FHASH_SIZE);
2233
2234 UNSERIALIZE_SCALAR(ioEnable);
2235
2236 /*
2237 * unserialize the data fifos
2238 */
2239 rxFifo.unserialize("rxFifo", cp);
2240 txFifo.unserialize("txFifo", cp);
2241
2242 /*
2243 * unserialize the various helper variables
2244 */
2245 bool txPacketExists;
2246 UNSERIALIZE_SCALAR(txPacketExists);
2247 if (txPacketExists) {
2248 txPacket = make_shared<EthPacketData>(16384);
2249 txPacket->unserialize("txPacket", cp);
2250 uint32_t txPktBufPtr;
2251 UNSERIALIZE_SCALAR(txPktBufPtr);
2252 txPacketBufPtr = (uint8_t *) txPacket->data + txPktBufPtr;
2253 } else
2254 txPacket = 0;
2255
2256 bool rxPacketExists;
2257 UNSERIALIZE_SCALAR(rxPacketExists);
2258 rxPacket = 0;
2259 if (rxPacketExists) {
2260 rxPacket = make_shared<EthPacketData>();
2261 rxPacket->unserialize("rxPacket", cp);
2262 uint32_t rxPktBufPtr;
2263 UNSERIALIZE_SCALAR(rxPktBufPtr);
2264 rxPacketBufPtr = (uint8_t *) rxPacket->data + rxPktBufPtr;
2265 } else
2266 rxPacket = 0;
2267
2268 UNSERIALIZE_SCALAR(txXferLen);
2269 UNSERIALIZE_SCALAR(rxXferLen);
2270
2271 /*
2272 * Unserialize Cached Descriptors
2273 */
2274 UNSERIALIZE_SCALAR(rxDesc64.link);
2275 UNSERIALIZE_SCALAR(rxDesc64.bufptr);
2276 UNSERIALIZE_SCALAR(rxDesc64.cmdsts);
2277 UNSERIALIZE_SCALAR(rxDesc64.extsts);
2278 UNSERIALIZE_SCALAR(txDesc64.link);
2279 UNSERIALIZE_SCALAR(txDesc64.bufptr);
2280 UNSERIALIZE_SCALAR(txDesc64.cmdsts);
2281 UNSERIALIZE_SCALAR(txDesc64.extsts);
2282 UNSERIALIZE_SCALAR(rxDesc32.link);
2283 UNSERIALIZE_SCALAR(rxDesc32.bufptr);
2284 UNSERIALIZE_SCALAR(rxDesc32.cmdsts);
2285 UNSERIALIZE_SCALAR(rxDesc32.extsts);
2286 UNSERIALIZE_SCALAR(txDesc32.link);
2287 UNSERIALIZE_SCALAR(txDesc32.bufptr);
2288 UNSERIALIZE_SCALAR(txDesc32.cmdsts);
2289 UNSERIALIZE_SCALAR(txDesc32.extsts);
2290 UNSERIALIZE_SCALAR(extstsEnable);
2291
2292 /*
2293 * unserialize tx state machine
2294 */
2295 int txState;
2296 UNSERIALIZE_SCALAR(txState);
2297 this->txState = (TxState) txState;
2298 UNSERIALIZE_SCALAR(txEnable);
2299 UNSERIALIZE_SCALAR(CTDD);
2300 UNSERIALIZE_SCALAR(txFragPtr);
2301 UNSERIALIZE_SCALAR(txDescCnt);
2302 int txDmaState;
2303 UNSERIALIZE_SCALAR(txDmaState);
2304 this->txDmaState = (DmaState) txDmaState;
2305 UNSERIALIZE_SCALAR(txKickTick);
2306 if (txKickTick)
2307 schedule(txKickEvent, txKickTick);
2308
2309 /*
2310 * unserialize rx state machine
2311 */
2312 int rxState;
2313 UNSERIALIZE_SCALAR(rxState);
2314 this->rxState = (RxState) rxState;
2315 UNSERIALIZE_SCALAR(rxEnable);
2316 UNSERIALIZE_SCALAR(CRDD);
2317 UNSERIALIZE_SCALAR(rxPktBytes);
2318 UNSERIALIZE_SCALAR(rxFragPtr);
2319 UNSERIALIZE_SCALAR(rxDescCnt);
2320 int rxDmaState;
2321 UNSERIALIZE_SCALAR(rxDmaState);
2322 this->rxDmaState = (DmaState) rxDmaState;
2323 UNSERIALIZE_SCALAR(rxKickTick);
2324 if (rxKickTick)
2325 schedule(rxKickEvent, rxKickTick);
2326
2327 /*
2328 * Unserialize EEPROM state machine
2329 */
2330 int eepromState;
2331 UNSERIALIZE_SCALAR(eepromState);
2332 this->eepromState = (EEPROMState) eepromState;
2333 UNSERIALIZE_SCALAR(eepromClk);
2334 UNSERIALIZE_SCALAR(eepromBitsToRx);
2335 UNSERIALIZE_SCALAR(eepromOpcode);
2336 UNSERIALIZE_SCALAR(eepromAddress);
2337 UNSERIALIZE_SCALAR(eepromData);
2338
2339 /*
2340 * If there's a pending transmit, reschedule it now
2341 */
2342 Tick transmitTick;
2343 UNSERIALIZE_SCALAR(transmitTick);
2344 if (transmitTick)
2345 schedule(txEvent, curTick() + transmitTick);
2346
2347 /*
2348 * unserialize receive address filter settings
2349 */
2350 UNSERIALIZE_SCALAR(rxFilterEnable);
2351 UNSERIALIZE_SCALAR(acceptBroadcast);
2352 UNSERIALIZE_SCALAR(acceptMulticast);
2353 UNSERIALIZE_SCALAR(acceptUnicast);
2354 UNSERIALIZE_SCALAR(acceptPerfect);
2355 UNSERIALIZE_SCALAR(acceptArp);
2356 UNSERIALIZE_SCALAR(multicastHashEnable);
2357
2358 /*
2359 * Keep track of pending interrupt status.
2360 */
2361 UNSERIALIZE_SCALAR(intrTick);
2362 UNSERIALIZE_SCALAR(cpuPendingIntr);
2363 Tick intrEventTick;
2364 UNSERIALIZE_SCALAR(intrEventTick);
2365 if (intrEventTick) {
2366 intrEvent = new EventFunctionWrapper([this]{ cpuInterrupt(); },
2367 name(), true);
2368 schedule(intrEvent, intrEventTick);
2369 }
2370 }
2371
2372 NSGigE *
2373 NSGigEParams::create()
2374 {
2375 return new NSGigE(this);
2376 }