7260ecde49698ba32c2f0feb14987034ffc04bcf
[gem5.git] / dev / ns_gige.cc
1 /*
2 * Copyright (c) 2004 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /* @file
30 * Device module for modelling the National Semiconductor
31 * DP83820 ethernet controller. Does not support priority queueing
32 */
33 #include <cstdio>
34 #include <deque>
35 #include <string>
36
37 #include "base/inet.hh"
38 #include "cpu/exec_context.hh"
39 #include "cpu/intr_control.hh"
40 #include "dev/dma.hh"
41 #include "dev/etherlink.hh"
42 #include "dev/ns_gige.hh"
43 #include "dev/pciconfigall.hh"
44 #include "dev/tsunami_cchip.hh"
45 #include "mem/bus/bus.hh"
46 #include "mem/bus/dma_interface.hh"
47 #include "mem/bus/pio_interface.hh"
48 #include "mem/bus/pio_interface_impl.hh"
49 #include "mem/functional_mem/memory_control.hh"
50 #include "mem/functional_mem/physical_memory.hh"
51 #include "sim/builder.hh"
52 #include "sim/debug.hh"
53 #include "sim/host.hh"
54 #include "sim/sim_stats.hh"
55 #include "targetarch/vtophys.hh"
56
57 const char *NsRxStateStrings[] =
58 {
59 "rxIdle",
60 "rxDescRefr",
61 "rxDescRead",
62 "rxFifoBlock",
63 "rxFragWrite",
64 "rxDescWrite",
65 "rxAdvance"
66 };
67
68 const char *NsTxStateStrings[] =
69 {
70 "txIdle",
71 "txDescRefr",
72 "txDescRead",
73 "txFifoBlock",
74 "txFragRead",
75 "txDescWrite",
76 "txAdvance"
77 };
78
79 const char *NsDmaState[] =
80 {
81 "dmaIdle",
82 "dmaReading",
83 "dmaWriting",
84 "dmaReadWaiting",
85 "dmaWriteWaiting"
86 };
87
88 using namespace std;
89
90
91 ///////////////////////////////////////////////////////////////////////
92 //
93 // NSGigE PCI Device
94 //
95 NSGigE::NSGigE(const std::string &name, IntrControl *i, Tick intr_delay,
96 PhysicalMemory *pmem, Tick tx_delay, Tick rx_delay,
97 MemoryController *mmu, HierParams *hier, Bus *header_bus,
98 Bus *payload_bus, Tick pio_latency, bool dma_desc_free,
99 bool dma_data_free, Tick dma_read_delay, Tick dma_write_delay,
100 Tick dma_read_factor, Tick dma_write_factor, PciConfigAll *cf,
101 PciConfigData *cd, Tsunami *t, uint32_t bus, uint32_t dev,
102 uint32_t func, bool rx_filter, const int eaddr[6],
103 uint32_t tx_fifo_size, uint32_t rx_fifo_size)
104 : PciDev(name, mmu, cf, cd, bus, dev, func), tsunami(t), ioEnable(false),
105 maxTxFifoSize(tx_fifo_size), maxRxFifoSize(rx_fifo_size),
106 txPacket(0), rxPacket(0), txPacketBufPtr(NULL), rxPacketBufPtr(NULL),
107 txXferLen(0), rxXferLen(0), txState(txIdle), txEnable(false),
108 CTDD(false), txFifoAvail(tx_fifo_size),
109 txFragPtr(0), txDescCnt(0), txDmaState(dmaIdle), rxState(rxIdle),
110 rxEnable(false), CRDD(false), rxPktBytes(0), rxFifoCnt(0),
111 rxFragPtr(0), rxDescCnt(0), rxDmaState(dmaIdle), extstsEnable(false),
112 rxDmaReadEvent(this), rxDmaWriteEvent(this),
113 txDmaReadEvent(this), txDmaWriteEvent(this),
114 dmaDescFree(dma_desc_free), dmaDataFree(dma_data_free),
115 txDelay(tx_delay), rxDelay(rx_delay), rxKickTick(0), txKickTick(0),
116 txEvent(this), rxFilterEnable(rx_filter), acceptBroadcast(false),
117 acceptMulticast(false), acceptUnicast(false),
118 acceptPerfect(false), acceptArp(false),
119 physmem(pmem), intctrl(i), intrTick(0), cpuPendingIntr(false),
120 intrEvent(0), interface(0)
121 {
122 tsunami->ethernet = this;
123
124 if (header_bus) {
125 pioInterface = newPioInterface(name, hier, header_bus, this,
126 &NSGigE::cacheAccess);
127
128 pioLatency = pio_latency * header_bus->clockRatio;
129
130 if (payload_bus)
131 dmaInterface = new DMAInterface<Bus>(name + ".dma",
132 header_bus, payload_bus, 1);
133 else
134 dmaInterface = new DMAInterface<Bus>(name + ".dma",
135 header_bus, header_bus, 1);
136 } else if (payload_bus) {
137 pioInterface = newPioInterface(name, hier, payload_bus, this,
138 &NSGigE::cacheAccess);
139
140 pioLatency = pio_latency * payload_bus->clockRatio;
141
142 dmaInterface = new DMAInterface<Bus>(name + ".dma", payload_bus,
143 payload_bus, 1);
144 }
145
146
147 intrDelay = US2Ticks(intr_delay);
148 dmaReadDelay = dma_read_delay;
149 dmaWriteDelay = dma_write_delay;
150 dmaReadFactor = dma_read_factor;
151 dmaWriteFactor = dma_write_factor;
152
153 regsReset();
154 rom.perfectMatch[0] = eaddr[0];
155 rom.perfectMatch[1] = eaddr[1];
156 rom.perfectMatch[2] = eaddr[2];
157 rom.perfectMatch[3] = eaddr[3];
158 rom.perfectMatch[4] = eaddr[4];
159 rom.perfectMatch[5] = eaddr[5];
160 }
161
162 NSGigE::~NSGigE()
163 {}
164
165 void
166 NSGigE::regStats()
167 {
168 txBytes
169 .name(name() + ".txBytes")
170 .desc("Bytes Transmitted")
171 .prereq(txBytes)
172 ;
173
174 rxBytes
175 .name(name() + ".rxBytes")
176 .desc("Bytes Received")
177 .prereq(rxBytes)
178 ;
179
180 txPackets
181 .name(name() + ".txPackets")
182 .desc("Number of Packets Transmitted")
183 .prereq(txBytes)
184 ;
185
186 rxPackets
187 .name(name() + ".rxPackets")
188 .desc("Number of Packets Received")
189 .prereq(rxBytes)
190 ;
191
192 txIpChecksums
193 .name(name() + ".txIpChecksums")
194 .desc("Number of tx IP Checksums done by device")
195 .precision(0)
196 .prereq(txBytes)
197 ;
198
199 rxIpChecksums
200 .name(name() + ".rxIpChecksums")
201 .desc("Number of rx IP Checksums done by device")
202 .precision(0)
203 .prereq(rxBytes)
204 ;
205
206 txTcpChecksums
207 .name(name() + ".txTcpChecksums")
208 .desc("Number of tx TCP Checksums done by device")
209 .precision(0)
210 .prereq(txBytes)
211 ;
212
213 rxTcpChecksums
214 .name(name() + ".rxTcpChecksums")
215 .desc("Number of rx TCP Checksums done by device")
216 .precision(0)
217 .prereq(rxBytes)
218 ;
219
220 txUdpChecksums
221 .name(name() + ".txUdpChecksums")
222 .desc("Number of tx UDP Checksums done by device")
223 .precision(0)
224 .prereq(txBytes)
225 ;
226
227 rxUdpChecksums
228 .name(name() + ".rxUdpChecksums")
229 .desc("Number of rx UDP Checksums done by device")
230 .precision(0)
231 .prereq(rxBytes)
232 ;
233
234 descDmaReads
235 .name(name() + ".descDMAReads")
236 .desc("Number of descriptors the device read w/ DMA")
237 .precision(0)
238 ;
239
240 descDmaWrites
241 .name(name() + ".descDMAWrites")
242 .desc("Number of descriptors the device wrote w/ DMA")
243 .precision(0)
244 ;
245
246 descDmaRdBytes
247 .name(name() + ".descDmaReadBytes")
248 .desc("number of descriptor bytes read w/ DMA")
249 .precision(0)
250 ;
251
252 descDmaWrBytes
253 .name(name() + ".descDmaWriteBytes")
254 .desc("number of descriptor bytes write w/ DMA")
255 .precision(0)
256 ;
257
258
259 txBandwidth
260 .name(name() + ".txBandwidth")
261 .desc("Transmit Bandwidth (bits/s)")
262 .precision(0)
263 .prereq(txBytes)
264 ;
265
266 rxBandwidth
267 .name(name() + ".rxBandwidth")
268 .desc("Receive Bandwidth (bits/s)")
269 .precision(0)
270 .prereq(rxBytes)
271 ;
272
273 txPacketRate
274 .name(name() + ".txPPS")
275 .desc("Packet Tranmission Rate (packets/s)")
276 .precision(0)
277 .prereq(txBytes)
278 ;
279
280 rxPacketRate
281 .name(name() + ".rxPPS")
282 .desc("Packet Reception Rate (packets/s)")
283 .precision(0)
284 .prereq(rxBytes)
285 ;
286
287 txBandwidth = txBytes * Stats::constant(8) / simSeconds;
288 rxBandwidth = rxBytes * Stats::constant(8) / simSeconds;
289 txPacketRate = txPackets / simSeconds;
290 rxPacketRate = rxPackets / simSeconds;
291 }
292
293 /**
294 * This is to read the PCI general configuration registers
295 */
296 void
297 NSGigE::ReadConfig(int offset, int size, uint8_t *data)
298 {
299 if (offset < PCI_DEVICE_SPECIFIC)
300 PciDev::ReadConfig(offset, size, data);
301 else
302 panic("Device specific PCI config space not implemented!\n");
303 }
304
305 /**
306 * This is to write to the PCI general configuration registers
307 */
308 void
309 NSGigE::WriteConfig(int offset, int size, uint32_t data)
310 {
311 if (offset < PCI_DEVICE_SPECIFIC)
312 PciDev::WriteConfig(offset, size, data);
313 else
314 panic("Device specific PCI config space not implemented!\n");
315
316 // Need to catch writes to BARs to update the PIO interface
317 switch (offset) {
318 // seems to work fine without all these PCI settings, but i
319 // put in the IO to double check, an assertion will fail if we
320 // need to properly implement it
321 case PCI_COMMAND:
322 if (config.data[offset] & PCI_CMD_IOSE)
323 ioEnable = true;
324 else
325 ioEnable = false;
326
327 #if 0
328 if (config.data[offset] & PCI_CMD_BME) {
329 bmEnabled = true;
330 }
331 else {
332 bmEnabled = false;
333 }
334
335 if (config.data[offset] & PCI_CMD_MSE) {
336 memEnable = true;
337 }
338 else {
339 memEnable = false;
340 }
341 #endif
342 break;
343
344 case PCI0_BASE_ADDR0:
345 if (BARAddrs[0] != 0) {
346 if (pioInterface)
347 pioInterface->addAddrRange(BARAddrs[0],
348 BARAddrs[0] + BARSize[0] - 1);
349
350 BARAddrs[0] &= PA_UNCACHED_MASK;
351 }
352 break;
353 case PCI0_BASE_ADDR1:
354 if (BARAddrs[1] != 0) {
355 if (pioInterface)
356 pioInterface->addAddrRange(BARAddrs[1],
357 BARAddrs[1] + BARSize[1] - 1);
358
359 BARAddrs[1] &= PA_UNCACHED_MASK;
360 }
361 break;
362 }
363 }
364
365 /**
366 * This reads the device registers, which are detailed in the NS83820
367 * spec sheet
368 */
369 Fault
370 NSGigE::read(MemReqPtr &req, uint8_t *data)
371 {
372 assert(ioEnable);
373
374 //The mask is to give you only the offset into the device register file
375 Addr daddr = req->paddr & 0xfff;
376 DPRINTF(EthernetPIO, "read da=%#x pa=%#x va=%#x size=%d\n",
377 daddr, req->paddr, req->vaddr, req->size);
378
379
380 // there are some reserved registers, you can see ns_gige_reg.h and
381 // the spec sheet for details
382 if (daddr > LAST && daddr <= RESERVED) {
383 panic("Accessing reserved register");
384 } else if (daddr > RESERVED && daddr <= 0x3FC) {
385 ReadConfig(daddr & 0xff, req->size, data);
386 return No_Fault;
387 } else if (daddr >= MIB_START && daddr <= MIB_END) {
388 // don't implement all the MIB's. hopefully the kernel
389 // doesn't actually DEPEND upon their values
390 // MIB are just hardware stats keepers
391 uint32_t &reg = *(uint32_t *) data;
392 reg = 0;
393 return No_Fault;
394 } else if (daddr > 0x3FC)
395 panic("Something is messed up!\n");
396
397 switch (req->size) {
398 case sizeof(uint32_t):
399 {
400 uint32_t &reg = *(uint32_t *)data;
401
402 switch (daddr) {
403 case CR:
404 reg = regs.command;
405 //these are supposed to be cleared on a read
406 reg &= ~(CR_RXD | CR_TXD | CR_TXR | CR_RXR);
407 break;
408
409 case CFG:
410 reg = regs.config;
411 break;
412
413 case MEAR:
414 reg = regs.mear;
415 break;
416
417 case PTSCR:
418 reg = regs.ptscr;
419 break;
420
421 case ISR:
422 reg = regs.isr;
423 devIntrClear(ISR_ALL);
424 break;
425
426 case IMR:
427 reg = regs.imr;
428 break;
429
430 case IER:
431 reg = regs.ier;
432 break;
433
434 case IHR:
435 reg = regs.ihr;
436 break;
437
438 case TXDP:
439 reg = regs.txdp;
440 break;
441
442 case TXDP_HI:
443 reg = regs.txdp_hi;
444 break;
445
446 case TXCFG:
447 reg = regs.txcfg;
448 break;
449
450 case GPIOR:
451 reg = regs.gpior;
452 break;
453
454 case RXDP:
455 reg = regs.rxdp;
456 break;
457
458 case RXDP_HI:
459 reg = regs.rxdp_hi;
460 break;
461
462 case RXCFG:
463 reg = regs.rxcfg;
464 break;
465
466 case PQCR:
467 reg = regs.pqcr;
468 break;
469
470 case WCSR:
471 reg = regs.wcsr;
472 break;
473
474 case PCR:
475 reg = regs.pcr;
476 break;
477
478 // see the spec sheet for how RFCR and RFDR work
479 // basically, you write to RFCR to tell the machine
480 // what you want to do next, then you act upon RFDR,
481 // and the device will be prepared b/c of what you
482 // wrote to RFCR
483 case RFCR:
484 reg = regs.rfcr;
485 break;
486
487 case RFDR:
488 switch (regs.rfcr & RFCR_RFADDR) {
489 case 0x000:
490 reg = rom.perfectMatch[1];
491 reg = reg << 8;
492 reg += rom.perfectMatch[0];
493 break;
494 case 0x002:
495 reg = rom.perfectMatch[3] << 8;
496 reg += rom.perfectMatch[2];
497 break;
498 case 0x004:
499 reg = rom.perfectMatch[5] << 8;
500 reg += rom.perfectMatch[4];
501 break;
502 default:
503 panic("reading RFDR for something other than PMATCH!\n");
504 // didn't implement other RFDR functionality b/c
505 // driver didn't use it
506 }
507 break;
508
509 case SRR:
510 reg = regs.srr;
511 break;
512
513 case MIBC:
514 reg = regs.mibc;
515 reg &= ~(MIBC_MIBS | MIBC_ACLR);
516 break;
517
518 case VRCR:
519 reg = regs.vrcr;
520 break;
521
522 case VTCR:
523 reg = regs.vtcr;
524 break;
525
526 case VDR:
527 reg = regs.vdr;
528 break;
529
530 case CCSR:
531 reg = regs.ccsr;
532 break;
533
534 case TBICR:
535 reg = regs.tbicr;
536 break;
537
538 case TBISR:
539 reg = regs.tbisr;
540 break;
541
542 case TANAR:
543 reg = regs.tanar;
544 break;
545
546 case TANLPAR:
547 reg = regs.tanlpar;
548 break;
549
550 case TANER:
551 reg = regs.taner;
552 break;
553
554 case TESR:
555 reg = regs.tesr;
556 break;
557
558 default:
559 panic("reading unimplemented register: addr=%#x", daddr);
560 }
561
562 DPRINTF(EthernetPIO, "read from %#x: data=%d data=%#x\n",
563 daddr, reg, reg);
564 }
565 break;
566
567 default:
568 panic("accessing register with invalid size: addr=%#x, size=%d",
569 daddr, req->size);
570 }
571
572 return No_Fault;
573 }
574
575 Fault
576 NSGigE::write(MemReqPtr &req, const uint8_t *data)
577 {
578 assert(ioEnable);
579
580 Addr daddr = req->paddr & 0xfff;
581 DPRINTF(EthernetPIO, "write da=%#x pa=%#x va=%#x size=%d\n",
582 daddr, req->paddr, req->vaddr, req->size);
583
584 if (daddr > LAST && daddr <= RESERVED) {
585 panic("Accessing reserved register");
586 } else if (daddr > RESERVED && daddr <= 0x3FC) {
587 WriteConfig(daddr & 0xff, req->size, *(uint32_t *)data);
588 return No_Fault;
589 } else if (daddr > 0x3FC)
590 panic("Something is messed up!\n");
591
592 if (req->size == sizeof(uint32_t)) {
593 uint32_t reg = *(uint32_t *)data;
594 DPRINTF(EthernetPIO, "write data=%d data=%#x\n", reg, reg);
595
596 switch (daddr) {
597 case CR:
598 regs.command = reg;
599 if (reg & CR_TXD) {
600 txEnable = false;
601 } else if (reg & CR_TXE) {
602 txEnable = true;
603
604 // the kernel is enabling the transmit machine
605 if (txState == txIdle)
606 txKick();
607 }
608
609 if (reg & CR_RXD) {
610 rxEnable = false;
611 } else if (reg & CR_RXE) {
612 rxEnable = true;
613
614 if (rxState == rxIdle)
615 rxKick();
616 }
617
618 if (reg & CR_TXR)
619 txReset();
620
621 if (reg & CR_RXR)
622 rxReset();
623
624 if (reg & CR_SWI)
625 devIntrPost(ISR_SWI);
626
627 if (reg & CR_RST) {
628 txReset();
629 rxReset();
630
631 regsReset();
632 }
633 break;
634
635 case CFG:
636 if (reg & CFG_LNKSTS ||
637 reg & CFG_SPDSTS ||
638 reg & CFG_DUPSTS ||
639 reg & CFG_RESERVED ||
640 reg & CFG_T64ADDR ||
641 reg & CFG_PCI64_DET)
642 panic("writing to read-only or reserved CFG bits!\n");
643
644 regs.config |= reg & ~(CFG_LNKSTS | CFG_SPDSTS | CFG_DUPSTS |
645 CFG_RESERVED | CFG_T64ADDR | CFG_PCI64_DET);
646
647 // all these #if 0's are because i don't THINK the kernel needs to
648 // have these implemented. if there is a problem relating to one of
649 // these, you may need to add functionality in.
650 #if 0
651 if (reg & CFG_TBI_EN) ;
652 if (reg & CFG_MODE_1000) ;
653 #endif
654
655 if (reg & CFG_AUTO_1000)
656 panic("CFG_AUTO_1000 not implemented!\n");
657
658 #if 0
659 if (reg & CFG_PINT_DUPSTS ||
660 reg & CFG_PINT_LNKSTS ||
661 reg & CFG_PINT_SPDSTS)
662 ;
663
664 if (reg & CFG_TMRTEST) ;
665 if (reg & CFG_MRM_DIS) ;
666 if (reg & CFG_MWI_DIS) ;
667
668 if (reg & CFG_T64ADDR)
669 panic("CFG_T64ADDR is read only register!\n");
670
671 if (reg & CFG_PCI64_DET)
672 panic("CFG_PCI64_DET is read only register!\n");
673
674 if (reg & CFG_DATA64_EN) ;
675 if (reg & CFG_M64ADDR) ;
676 if (reg & CFG_PHY_RST) ;
677 if (reg & CFG_PHY_DIS) ;
678 #endif
679
680 if (reg & CFG_EXTSTS_EN)
681 extstsEnable = true;
682 else
683 extstsEnable = false;
684
685 #if 0
686 if (reg & CFG_REQALG) ;
687 if (reg & CFG_SB) ;
688 if (reg & CFG_POW) ;
689 if (reg & CFG_EXD) ;
690 if (reg & CFG_PESEL) ;
691 if (reg & CFG_BROM_DIS) ;
692 if (reg & CFG_EXT_125) ;
693 if (reg & CFG_BEM) ;
694 #endif
695 break;
696
697 case MEAR:
698 regs.mear = reg;
699 // since phy is completely faked, MEAR_MD* don't matter
700 // and since the driver never uses MEAR_EE*, they don't
701 // matter
702 #if 0
703 if (reg & MEAR_EEDI) ;
704 if (reg & MEAR_EEDO) ; // this one is read only
705 if (reg & MEAR_EECLK) ;
706 if (reg & MEAR_EESEL) ;
707 if (reg & MEAR_MDIO) ;
708 if (reg & MEAR_MDDIR) ;
709 if (reg & MEAR_MDC) ;
710 #endif
711 break;
712
713 case PTSCR:
714 regs.ptscr = reg & ~(PTSCR_RBIST_RDONLY);
715 // these control BISTs for various parts of chip - we
716 // don't care or do just fake that the BIST is done
717 if (reg & PTSCR_RBIST_EN)
718 regs.ptscr |= PTSCR_RBIST_DONE;
719 if (reg & PTSCR_EEBIST_EN)
720 regs.ptscr &= ~PTSCR_EEBIST_EN;
721 if (reg & PTSCR_EELOAD_EN)
722 regs.ptscr &= ~PTSCR_EELOAD_EN;
723 break;
724
725 case ISR: /* writing to the ISR has no effect */
726 panic("ISR is a read only register!\n");
727
728 case IMR:
729 regs.imr = reg;
730 devIntrChangeMask();
731 break;
732
733 case IER:
734 regs.ier = reg;
735 break;
736
737 case IHR:
738 regs.ihr = reg;
739 /* not going to implement real interrupt holdoff */
740 break;
741
742 case TXDP:
743 regs.txdp = (reg & 0xFFFFFFFC);
744 assert(txState == txIdle);
745 CTDD = false;
746 break;
747
748 case TXDP_HI:
749 regs.txdp_hi = reg;
750 break;
751
752 case TXCFG:
753 regs.txcfg = reg;
754 #if 0
755 if (reg & TXCFG_CSI) ;
756 if (reg & TXCFG_HBI) ;
757 if (reg & TXCFG_MLB) ;
758 if (reg & TXCFG_ATP) ;
759 if (reg & TXCFG_ECRETRY) {
760 /*
761 * this could easily be implemented, but considering
762 * the network is just a fake pipe, wouldn't make
763 * sense to do this
764 */
765 }
766
767 if (reg & TXCFG_BRST_DIS) ;
768 #endif
769
770 #if 0
771 /* we handle our own DMA, ignore the kernel's exhortations */
772 if (reg & TXCFG_MXDMA) ;
773 #endif
774
775 // also, we currently don't care about fill/drain
776 // thresholds though this may change in the future with
777 // more realistic networks or a driver which changes it
778 // according to feedback
779
780 break;
781
782 case GPIOR:
783 regs.gpior = reg;
784 /* these just control general purpose i/o pins, don't matter */
785 break;
786
787 case RXDP:
788 regs.rxdp = reg;
789 CRDD = false;
790 break;
791
792 case RXDP_HI:
793 regs.rxdp_hi = reg;
794 break;
795
796 case RXCFG:
797 regs.rxcfg = reg;
798 #if 0
799 if (reg & RXCFG_AEP) ;
800 if (reg & RXCFG_ARP) ;
801 if (reg & RXCFG_STRIPCRC) ;
802 if (reg & RXCFG_RX_RD) ;
803 if (reg & RXCFG_ALP) ;
804 if (reg & RXCFG_AIRL) ;
805
806 /* we handle our own DMA, ignore what kernel says about it */
807 if (reg & RXCFG_MXDMA) ;
808
809 //also, we currently don't care about fill/drain thresholds
810 //though this may change in the future with more realistic
811 //networks or a driver which changes it according to feedback
812 if (reg & (RXCFG_DRTH | RXCFG_DRTH0)) ;
813 #endif
814 break;
815
816 case PQCR:
817 /* there is no priority queueing used in the linux 2.6 driver */
818 regs.pqcr = reg;
819 break;
820
821 case WCSR:
822 /* not going to implement wake on LAN */
823 regs.wcsr = reg;
824 break;
825
826 case PCR:
827 /* not going to implement pause control */
828 regs.pcr = reg;
829 break;
830
831 case RFCR:
832 regs.rfcr = reg;
833
834 rxFilterEnable = (reg & RFCR_RFEN) ? true : false;
835 acceptBroadcast = (reg & RFCR_AAB) ? true : false;
836 acceptMulticast = (reg & RFCR_AAM) ? true : false;
837 acceptUnicast = (reg & RFCR_AAU) ? true : false;
838 acceptPerfect = (reg & RFCR_APM) ? true : false;
839 acceptArp = (reg & RFCR_AARP) ? true : false;
840
841 #if 0
842 if (reg & RFCR_APAT)
843 panic("RFCR_APAT not implemented!\n");
844 #endif
845
846 if (reg & RFCR_MHEN || reg & RFCR_UHEN)
847 panic("hash filtering not implemented!\n");
848
849 if (reg & RFCR_ULM)
850 panic("RFCR_ULM not implemented!\n");
851
852 break;
853
854 case RFDR:
855 panic("the driver never writes to RFDR, something is wrong!\n");
856
857 case BRAR:
858 panic("the driver never uses BRAR, something is wrong!\n");
859
860 case BRDR:
861 panic("the driver never uses BRDR, something is wrong!\n");
862
863 case SRR:
864 panic("SRR is read only register!\n");
865
866 case MIBC:
867 panic("the driver never uses MIBC, something is wrong!\n");
868
869 case VRCR:
870 regs.vrcr = reg;
871 break;
872
873 case VTCR:
874 regs.vtcr = reg;
875 break;
876
877 case VDR:
878 panic("the driver never uses VDR, something is wrong!\n");
879 break;
880
881 case CCSR:
882 /* not going to implement clockrun stuff */
883 regs.ccsr = reg;
884 break;
885
886 case TBICR:
887 regs.tbicr = reg;
888 if (reg & TBICR_MR_LOOPBACK)
889 panic("TBICR_MR_LOOPBACK never used, something wrong!\n");
890
891 if (reg & TBICR_MR_AN_ENABLE) {
892 regs.tanlpar = regs.tanar;
893 regs.tbisr |= (TBISR_MR_AN_COMPLETE | TBISR_MR_LINK_STATUS);
894 }
895
896 #if 0
897 if (reg & TBICR_MR_RESTART_AN) ;
898 #endif
899
900 break;
901
902 case TBISR:
903 panic("TBISR is read only register!\n");
904
905 case TANAR:
906 regs.tanar = reg;
907 if (reg & TANAR_PS2)
908 panic("this isn't used in driver, something wrong!\n");
909
910 if (reg & TANAR_PS1)
911 panic("this isn't used in driver, something wrong!\n");
912 break;
913
914 case TANLPAR:
915 panic("this should only be written to by the fake phy!\n");
916
917 case TANER:
918 panic("TANER is read only register!\n");
919
920 case TESR:
921 regs.tesr = reg;
922 break;
923
924 default:
925 panic("invalid register access daddr=%#x", daddr);
926 }
927 } else {
928 panic("Invalid Request Size");
929 }
930
931 return No_Fault;
932 }
933
934 void
935 NSGigE::devIntrPost(uint32_t interrupts)
936 {
937 if (interrupts & ISR_RESERVE)
938 panic("Cannot set a reserved interrupt");
939
940 if (interrupts & ISR_NOIMPL)
941 warn("interrupt not implemented %#x\n", interrupts);
942
943 interrupts &= ~ISR_NOIMPL;
944 regs.isr |= interrupts;
945
946 DPRINTF(EthernetIntr,
947 "interrupt written to ISR: intr=%#x isr=%#x imr=%#x\n",
948 interrupts, regs.isr, regs.imr);
949
950 if ((regs.isr & regs.imr)) {
951 Tick when = curTick;
952 if (!(regs.isr & regs.imr & ISR_NODELAY))
953 when += intrDelay;
954 cpuIntrPost(when);
955 }
956 }
957
958 void
959 NSGigE::devIntrClear(uint32_t interrupts)
960 {
961 if (interrupts & ISR_RESERVE)
962 panic("Cannot clear a reserved interrupt");
963
964 interrupts &= ~ISR_NOIMPL;
965 regs.isr &= ~interrupts;
966
967 DPRINTF(EthernetIntr,
968 "interrupt cleared from ISR: intr=%x isr=%x imr=%x\n",
969 interrupts, regs.isr, regs.imr);
970
971 if (!(regs.isr & regs.imr))
972 cpuIntrClear();
973 }
974
975 void
976 NSGigE::devIntrChangeMask()
977 {
978 DPRINTF(EthernetIntr, "interrupt mask changed: isr=%x imr=%x masked=%x\n",
979 regs.isr, regs.imr, regs.isr & regs.imr);
980
981 if (regs.isr & regs.imr)
982 cpuIntrPost(curTick);
983 else
984 cpuIntrClear();
985 }
986
987 void
988 NSGigE::cpuIntrPost(Tick when)
989 {
990 // If the interrupt you want to post is later than an interrupt
991 // already scheduled, just let it post in the coming one and don't
992 // schedule another.
993 // HOWEVER, must be sure that the scheduled intrTick is in the
994 // future (this was formerly the source of a bug)
995 /**
996 * @todo this warning should be removed and the intrTick code should
997 * be fixed.
998 */
999 assert(when >= curTick);
1000 assert(intrTick >= curTick || intrTick == 0);
1001 if (when > intrTick && intrTick != 0) {
1002 DPRINTF(EthernetIntr, "don't need to schedule event...intrTick=%d\n",
1003 intrTick);
1004 return;
1005 }
1006
1007 intrTick = when;
1008 if (intrTick < curTick) {
1009 debug_break();
1010 intrTick = curTick;
1011 }
1012
1013 DPRINTF(EthernetIntr, "going to schedule an interrupt for intrTick=%d\n",
1014 intrTick);
1015
1016 if (intrEvent)
1017 intrEvent->squash();
1018 intrEvent = new IntrEvent(this, true);
1019 intrEvent->schedule(intrTick);
1020 }
1021
1022 void
1023 NSGigE::cpuInterrupt()
1024 {
1025 assert(intrTick == curTick);
1026
1027 // Whether or not there's a pending interrupt, we don't care about
1028 // it anymore
1029 intrEvent = 0;
1030 intrTick = 0;
1031
1032 // Don't send an interrupt if there's already one
1033 if (cpuPendingIntr) {
1034 DPRINTF(EthernetIntr,
1035 "would send an interrupt now, but there's already pending\n");
1036 } else {
1037 // Send interrupt
1038 cpuPendingIntr = true;
1039
1040 DPRINTF(EthernetIntr, "posting cchip interrupt\n");
1041 tsunami->postPciInt(configData->config.hdr.pci0.interruptLine);
1042 }
1043 }
1044
1045 void
1046 NSGigE::cpuIntrClear()
1047 {
1048 if (!cpuPendingIntr)
1049 return;
1050
1051 if (intrEvent) {
1052 intrEvent->squash();
1053 intrEvent = 0;
1054 }
1055
1056 intrTick = 0;
1057
1058 cpuPendingIntr = false;
1059
1060 DPRINTF(EthernetIntr, "clearing cchip interrupt\n");
1061 tsunami->clearPciInt(configData->config.hdr.pci0.interruptLine);
1062 }
1063
1064 bool
1065 NSGigE::cpuIntrPending() const
1066 { return cpuPendingIntr; }
1067
1068 void
1069 NSGigE::txReset()
1070 {
1071
1072 DPRINTF(Ethernet, "transmit reset\n");
1073
1074 CTDD = false;
1075 txFifoAvail = maxTxFifoSize;
1076 txEnable = false;;
1077 txFragPtr = 0;
1078 assert(txDescCnt == 0);
1079 txFifo.clear();
1080 txState = txIdle;
1081 assert(txDmaState == dmaIdle);
1082 }
1083
1084 void
1085 NSGigE::rxReset()
1086 {
1087 DPRINTF(Ethernet, "receive reset\n");
1088
1089 CRDD = false;
1090 assert(rxPktBytes == 0);
1091 rxFifoCnt = 0;
1092 rxEnable = false;
1093 rxFragPtr = 0;
1094 assert(rxDescCnt == 0);
1095 assert(rxDmaState == dmaIdle);
1096 rxFifo.clear();
1097 rxState = rxIdle;
1098 }
1099
1100 void
1101 NSGigE::regsReset()
1102 {
1103 memset(&regs, 0, sizeof(regs));
1104 regs.config = CFG_LNKSTS;
1105 regs.mear = MEAR_MDDIR | MEAR_EEDO;
1106 regs.txcfg = 0x120; // set drain threshold to 1024 bytes and
1107 // fill threshold to 32 bytes
1108 regs.rxcfg = 0x4; // set drain threshold to 16 bytes
1109 regs.srr = 0x0103; // set the silicon revision to rev B or 0x103
1110 regs.mibc = MIBC_FRZ;
1111 regs.vdr = 0x81; // set the vlan tag type to 802.1q
1112 regs.tesr = 0xc000; // TBI capable of both full and half duplex
1113
1114 extstsEnable = false;
1115 acceptBroadcast = false;
1116 acceptMulticast = false;
1117 acceptUnicast = false;
1118 acceptPerfect = false;
1119 acceptArp = false;
1120 }
1121
1122 void
1123 NSGigE::rxDmaReadCopy()
1124 {
1125 assert(rxDmaState == dmaReading);
1126
1127 physmem->dma_read((uint8_t *)rxDmaData, rxDmaAddr, rxDmaLen);
1128 rxDmaState = dmaIdle;
1129
1130 DPRINTF(EthernetDMA, "rx dma read paddr=%#x len=%d\n",
1131 rxDmaAddr, rxDmaLen);
1132 DDUMP(EthernetDMA, rxDmaData, rxDmaLen);
1133 }
1134
1135 bool
1136 NSGigE::doRxDmaRead()
1137 {
1138 assert(rxDmaState == dmaIdle || rxDmaState == dmaReadWaiting);
1139 rxDmaState = dmaReading;
1140
1141 if (dmaInterface && !rxDmaFree) {
1142 if (dmaInterface->busy())
1143 rxDmaState = dmaReadWaiting;
1144 else
1145 dmaInterface->doDMA(Read, rxDmaAddr, rxDmaLen, curTick,
1146 &rxDmaReadEvent, true);
1147 return true;
1148 }
1149
1150 if (dmaReadDelay == 0 && dmaReadFactor == 0) {
1151 rxDmaReadCopy();
1152 return false;
1153 }
1154
1155 Tick factor = ((rxDmaLen + ULL(63)) >> ULL(6)) * dmaReadFactor;
1156 Tick start = curTick + dmaReadDelay + factor;
1157 rxDmaReadEvent.schedule(start);
1158 return true;
1159 }
1160
1161 void
1162 NSGigE::rxDmaReadDone()
1163 {
1164 assert(rxDmaState == dmaReading);
1165 rxDmaReadCopy();
1166
1167 // If the transmit state machine has a pending DMA, let it go first
1168 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting)
1169 txKick();
1170
1171 rxKick();
1172 }
1173
1174 void
1175 NSGigE::rxDmaWriteCopy()
1176 {
1177 assert(rxDmaState == dmaWriting);
1178
1179 physmem->dma_write(rxDmaAddr, (uint8_t *)rxDmaData, rxDmaLen);
1180 rxDmaState = dmaIdle;
1181
1182 DPRINTF(EthernetDMA, "rx dma write paddr=%#x len=%d\n",
1183 rxDmaAddr, rxDmaLen);
1184 DDUMP(EthernetDMA, rxDmaData, rxDmaLen);
1185 }
1186
1187 bool
1188 NSGigE::doRxDmaWrite()
1189 {
1190 assert(rxDmaState == dmaIdle || rxDmaState == dmaWriteWaiting);
1191 rxDmaState = dmaWriting;
1192
1193 if (dmaInterface && !rxDmaFree) {
1194 if (dmaInterface->busy())
1195 rxDmaState = dmaWriteWaiting;
1196 else
1197 dmaInterface->doDMA(WriteInvalidate, rxDmaAddr, rxDmaLen, curTick,
1198 &rxDmaWriteEvent, true);
1199 return true;
1200 }
1201
1202 if (dmaWriteDelay == 0 && dmaWriteFactor == 0) {
1203 rxDmaWriteCopy();
1204 return false;
1205 }
1206
1207 Tick factor = ((rxDmaLen + ULL(63)) >> ULL(6)) * dmaWriteFactor;
1208 Tick start = curTick + dmaWriteDelay + factor;
1209 rxDmaWriteEvent.schedule(start);
1210 return true;
1211 }
1212
1213 void
1214 NSGigE::rxDmaWriteDone()
1215 {
1216 assert(rxDmaState == dmaWriting);
1217 rxDmaWriteCopy();
1218
1219 // If the transmit state machine has a pending DMA, let it go first
1220 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting)
1221 txKick();
1222
1223 rxKick();
1224 }
1225
1226 void
1227 NSGigE::rxKick()
1228 {
1229 DPRINTF(EthernetSM, "receive kick rxState=%s (rxBuf.size=%d)\n",
1230 NsRxStateStrings[rxState], rxFifo.size());
1231
1232 if (rxKickTick > curTick) {
1233 DPRINTF(EthernetSM, "receive kick exiting, can't run till %d\n",
1234 rxKickTick);
1235 return;
1236 }
1237
1238 next:
1239 switch(rxDmaState) {
1240 case dmaReadWaiting:
1241 if (doRxDmaRead())
1242 goto exit;
1243 break;
1244 case dmaWriteWaiting:
1245 if (doRxDmaWrite())
1246 goto exit;
1247 break;
1248 default:
1249 break;
1250 }
1251
1252 // see state machine from spec for details
1253 // the way this works is, if you finish work on one state and can
1254 // go directly to another, you do that through jumping to the
1255 // label "next". however, if you have intermediate work, like DMA
1256 // so that you can't go to the next state yet, you go to exit and
1257 // exit the loop. however, when the DMA is done it will trigger
1258 // an event and come back to this loop.
1259 switch (rxState) {
1260 case rxIdle:
1261 if (!rxEnable) {
1262 DPRINTF(EthernetSM, "Receive Disabled! Nothing to do.\n");
1263 goto exit;
1264 }
1265
1266 if (CRDD) {
1267 rxState = rxDescRefr;
1268
1269 rxDmaAddr = regs.rxdp & 0x3fffffff;
1270 rxDmaData = &rxDescCache + offsetof(ns_desc, link);
1271 rxDmaLen = sizeof(rxDescCache.link);
1272 rxDmaFree = dmaDescFree;
1273
1274 descDmaReads++;
1275 descDmaRdBytes += rxDmaLen;
1276
1277 if (doRxDmaRead())
1278 goto exit;
1279 } else {
1280 rxState = rxDescRead;
1281
1282 rxDmaAddr = regs.rxdp & 0x3fffffff;
1283 rxDmaData = &rxDescCache;
1284 rxDmaLen = sizeof(ns_desc);
1285 rxDmaFree = dmaDescFree;
1286
1287 descDmaReads++;
1288 descDmaRdBytes += rxDmaLen;
1289
1290 if (doRxDmaRead())
1291 goto exit;
1292 }
1293 break;
1294
1295 case rxDescRefr:
1296 if (rxDmaState != dmaIdle)
1297 goto exit;
1298
1299 rxState = rxAdvance;
1300 break;
1301
1302 case rxDescRead:
1303 if (rxDmaState != dmaIdle)
1304 goto exit;
1305
1306 DPRINTF(EthernetDesc,
1307 "rxDescCache: addr=%08x read descriptor\n",
1308 regs.rxdp & 0x3fffffff);
1309 DPRINTF(EthernetDesc,
1310 "rxDescCache: link=%08x bufptr=%08x cmdsts=%08x extsts=%08x\n",
1311 rxDescCache.link, rxDescCache.bufptr, rxDescCache.cmdsts,
1312 rxDescCache.extsts);
1313
1314 if (rxDescCache.cmdsts & CMDSTS_OWN) {
1315 devIntrPost(ISR_RXIDLE);
1316 rxState = rxIdle;
1317 goto exit;
1318 } else {
1319 rxState = rxFifoBlock;
1320 rxFragPtr = rxDescCache.bufptr;
1321 rxDescCnt = rxDescCache.cmdsts & CMDSTS_LEN_MASK;
1322 }
1323 break;
1324
1325 case rxFifoBlock:
1326 if (!rxPacket) {
1327 /**
1328 * @todo in reality, we should be able to start processing
1329 * the packet as it arrives, and not have to wait for the
1330 * full packet ot be in the receive fifo.
1331 */
1332 if (rxFifo.empty())
1333 goto exit;
1334
1335 DPRINTF(EthernetSM, "****processing receive of new packet****\n");
1336
1337 // If we don't have a packet, grab a new one from the fifo.
1338 rxPacket = rxFifo.front();
1339 rxPktBytes = rxPacket->length;
1340 rxPacketBufPtr = rxPacket->data;
1341
1342 #if TRACING_ON
1343 if (DTRACE(Ethernet)) {
1344 const IpHdr *ip = rxPacket->ip();
1345 if (ip) {
1346 DPRINTF(Ethernet, "ID is %d\n", ip->id());
1347 const TcpHdr *tcp = rxPacket->tcp();
1348 if (tcp) {
1349 DPRINTF(Ethernet, "Src Port=%d, Dest Port=%d\n",
1350 tcp->sport(), tcp->dport());
1351 }
1352 }
1353 }
1354 #endif
1355
1356 // sanity check - i think the driver behaves like this
1357 assert(rxDescCnt >= rxPktBytes);
1358
1359 // Must clear the value before popping to decrement the
1360 // reference count
1361 rxFifo.front() = NULL;
1362 rxFifo.pop_front();
1363 rxFifoCnt -= rxPacket->length;
1364 }
1365
1366
1367 // dont' need the && rxDescCnt > 0 if driver sanity check
1368 // above holds
1369 if (rxPktBytes > 0) {
1370 rxState = rxFragWrite;
1371 // don't need min<>(rxPktBytes,rxDescCnt) if above sanity
1372 // check holds
1373 rxXferLen = rxPktBytes;
1374
1375 rxDmaAddr = rxFragPtr & 0x3fffffff;
1376 rxDmaData = rxPacketBufPtr;
1377 rxDmaLen = rxXferLen;
1378 rxDmaFree = dmaDataFree;
1379
1380 if (doRxDmaWrite())
1381 goto exit;
1382
1383 } else {
1384 rxState = rxDescWrite;
1385
1386 //if (rxPktBytes == 0) { /* packet is done */
1387 assert(rxPktBytes == 0);
1388 DPRINTF(EthernetSM, "done with receiving packet\n");
1389
1390 rxDescCache.cmdsts |= CMDSTS_OWN;
1391 rxDescCache.cmdsts &= ~CMDSTS_MORE;
1392 rxDescCache.cmdsts |= CMDSTS_OK;
1393 rxDescCache.cmdsts &= 0xffff0000;
1394 rxDescCache.cmdsts += rxPacket->length; //i.e. set CMDSTS_SIZE
1395
1396 #if 0
1397 /*
1398 * all the driver uses these are for its own stats keeping
1399 * which we don't care about, aren't necessary for
1400 * functionality and doing this would just slow us down.
1401 * if they end up using this in a later version for
1402 * functional purposes, just undef
1403 */
1404 if (rxFilterEnable) {
1405 rxDescCache.cmdsts &= ~CMDSTS_DEST_MASK;
1406 EthHdr *eth = rxFifoFront()->eth();
1407 if (eth->unicast())
1408 rxDescCache.cmdsts |= CMDSTS_DEST_SELF;
1409 if (eth->multicast())
1410 rxDescCache.cmdsts |= CMDSTS_DEST_MULTI;
1411 if (eth->broadcast())
1412 rxDescCache.cmdsts |= CMDSTS_DEST_MASK;
1413 }
1414 #endif
1415
1416 if (extstsEnable && rxPacket->ip()) {
1417 rxDescCache.extsts |= EXTSTS_IPPKT;
1418 rxIpChecksums++;
1419 IpHdr *ip = rxPacket->ip();
1420 if (ip->ip_cksum() != 0) {
1421 DPRINTF(EthernetCksum, "Rx IP Checksum Error\n");
1422 rxDescCache.extsts |= EXTSTS_IPERR;
1423 }
1424 if (rxPacket->tcp()) {
1425 rxDescCache.extsts |= EXTSTS_TCPPKT;
1426 rxTcpChecksums++;
1427 if (ip->tu_cksum() != 0) {
1428 DPRINTF(EthernetCksum, "Rx TCP Checksum Error\n");
1429 rxDescCache.extsts |= EXTSTS_TCPERR;
1430
1431 }
1432 } else if (rxPacket->udp()) {
1433 rxDescCache.extsts |= EXTSTS_UDPPKT;
1434 rxUdpChecksums++;
1435 if (ip->tu_cksum() != 0) {
1436 DPRINTF(EthernetCksum, "Rx UDP Checksum Error\n");
1437 rxDescCache.extsts |= EXTSTS_UDPERR;
1438 }
1439 }
1440 }
1441 rxPacket = 0;
1442
1443 /*
1444 * the driver seems to always receive into desc buffers
1445 * of size 1514, so you never have a pkt that is split
1446 * into multiple descriptors on the receive side, so
1447 * i don't implement that case, hence the assert above.
1448 */
1449
1450 DPRINTF(EthernetDesc,
1451 "rxDescCache: addr=%08x writeback cmdsts extsts\n",
1452 regs.rxdp & 0x3fffffff);
1453 DPRINTF(EthernetDesc,
1454 "rxDescCache: link=%08x bufptr=%08x cmdsts=%08x extsts=%08x\n",
1455 rxDescCache.link, rxDescCache.bufptr, rxDescCache.cmdsts,
1456 rxDescCache.extsts);
1457
1458 rxDmaAddr = (regs.rxdp + offsetof(ns_desc, cmdsts)) & 0x3fffffff;
1459 rxDmaData = &(rxDescCache.cmdsts);
1460 rxDmaLen = sizeof(rxDescCache.cmdsts) + sizeof(rxDescCache.extsts);
1461 rxDmaFree = dmaDescFree;
1462
1463 descDmaWrites++;
1464 descDmaWrBytes += rxDmaLen;
1465
1466 if (doRxDmaWrite())
1467 goto exit;
1468 }
1469 break;
1470
1471 case rxFragWrite:
1472 if (rxDmaState != dmaIdle)
1473 goto exit;
1474
1475 rxPacketBufPtr += rxXferLen;
1476 rxFragPtr += rxXferLen;
1477 rxPktBytes -= rxXferLen;
1478
1479 rxState = rxFifoBlock;
1480 break;
1481
1482 case rxDescWrite:
1483 if (rxDmaState != dmaIdle)
1484 goto exit;
1485
1486 assert(rxDescCache.cmdsts & CMDSTS_OWN);
1487
1488 assert(rxPacket == 0);
1489 devIntrPost(ISR_RXOK);
1490
1491 if (rxDescCache.cmdsts & CMDSTS_INTR)
1492 devIntrPost(ISR_RXDESC);
1493
1494 if (!rxEnable) {
1495 DPRINTF(EthernetSM, "Halting the RX state machine\n");
1496 rxState = rxIdle;
1497 goto exit;
1498 } else
1499 rxState = rxAdvance;
1500 break;
1501
1502 case rxAdvance:
1503 if (rxDescCache.link == 0) {
1504 devIntrPost(ISR_RXIDLE);
1505 rxState = rxIdle;
1506 CRDD = true;
1507 goto exit;
1508 } else {
1509 rxState = rxDescRead;
1510 regs.rxdp = rxDescCache.link;
1511 CRDD = false;
1512
1513 rxDmaAddr = regs.rxdp & 0x3fffffff;
1514 rxDmaData = &rxDescCache;
1515 rxDmaLen = sizeof(ns_desc);
1516 rxDmaFree = dmaDescFree;
1517
1518 if (doRxDmaRead())
1519 goto exit;
1520 }
1521 break;
1522
1523 default:
1524 panic("Invalid rxState!");
1525 }
1526
1527 DPRINTF(EthernetSM, "entering next rxState=%s\n",
1528 NsRxStateStrings[rxState]);
1529
1530 goto next;
1531
1532 exit:
1533 /**
1534 * @todo do we want to schedule a future kick?
1535 */
1536 DPRINTF(EthernetSM, "rx state machine exited rxState=%s\n",
1537 NsRxStateStrings[rxState]);
1538 }
1539
1540 void
1541 NSGigE::transmit()
1542 {
1543 if (txFifo.empty()) {
1544 DPRINTF(Ethernet, "nothing to transmit\n");
1545 return;
1546 }
1547
1548 DPRINTF(Ethernet, "Attempt Pkt Transmit: txFifo length=%d\n",
1549 maxTxFifoSize - txFifoAvail);
1550 if (interface->sendPacket(txFifo.front())) {
1551 #if TRACING_ON
1552 if (DTRACE(Ethernet)) {
1553 const IpHdr *ip = txFifo.front()->ip();
1554 if (ip) {
1555 DPRINTF(Ethernet, "ID is %d\n", ip->id());
1556 const TcpHdr *tcp = txFifo.front()->tcp();
1557 if (tcp) {
1558 DPRINTF(Ethernet, "Src Port=%d, Dest Port=%d\n",
1559 tcp->sport(), tcp->dport());
1560 }
1561 }
1562 }
1563 #endif
1564
1565 DDUMP(Ethernet, txFifo.front()->data, txFifo.front()->length);
1566 txBytes += txFifo.front()->length;
1567 txPackets++;
1568
1569 txFifoAvail += txFifo.front()->length;
1570
1571 DPRINTF(Ethernet, "Successful Xmit! now txFifoAvail is %d\n",
1572 txFifoAvail);
1573 txFifo.front() = NULL;
1574 txFifo.pop_front();
1575
1576 /*
1577 * normally do a writeback of the descriptor here, and ONLY
1578 * after that is done, send this interrupt. but since our
1579 * stuff never actually fails, just do this interrupt here,
1580 * otherwise the code has to stray from this nice format.
1581 * besides, it's functionally the same.
1582 */
1583 devIntrPost(ISR_TXOK);
1584 } else {
1585 DPRINTF(Ethernet,
1586 "May need to rethink always sending the descriptors back?\n");
1587 }
1588
1589 if (!txFifo.empty() && !txEvent.scheduled()) {
1590 DPRINTF(Ethernet, "reschedule transmit\n");
1591 txEvent.schedule(curTick + 1000);
1592 }
1593 }
1594
1595 void
1596 NSGigE::txDmaReadCopy()
1597 {
1598 assert(txDmaState == dmaReading);
1599
1600 physmem->dma_read((uint8_t *)txDmaData, txDmaAddr, txDmaLen);
1601 txDmaState = dmaIdle;
1602
1603 DPRINTF(EthernetDMA, "tx dma read paddr=%#x len=%d\n",
1604 txDmaAddr, txDmaLen);
1605 DDUMP(EthernetDMA, txDmaData, txDmaLen);
1606 }
1607
1608 bool
1609 NSGigE::doTxDmaRead()
1610 {
1611 assert(txDmaState == dmaIdle || txDmaState == dmaReadWaiting);
1612 txDmaState = dmaReading;
1613
1614 if (dmaInterface && !txDmaFree) {
1615 if (dmaInterface->busy())
1616 txDmaState = dmaReadWaiting;
1617 else
1618 dmaInterface->doDMA(Read, txDmaAddr, txDmaLen, curTick,
1619 &txDmaReadEvent, true);
1620 return true;
1621 }
1622
1623 if (dmaReadDelay == 0 && dmaReadFactor == 0.0) {
1624 txDmaReadCopy();
1625 return false;
1626 }
1627
1628 Tick factor = ((txDmaLen + ULL(63)) >> ULL(6)) * dmaReadFactor;
1629 Tick start = curTick + dmaReadDelay + factor;
1630 txDmaReadEvent.schedule(start);
1631 return true;
1632 }
1633
1634 void
1635 NSGigE::txDmaReadDone()
1636 {
1637 assert(txDmaState == dmaReading);
1638 txDmaReadCopy();
1639
1640 // If the receive state machine has a pending DMA, let it go first
1641 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting)
1642 rxKick();
1643
1644 txKick();
1645 }
1646
1647 void
1648 NSGigE::txDmaWriteCopy()
1649 {
1650 assert(txDmaState == dmaWriting);
1651
1652 physmem->dma_write(txDmaAddr, (uint8_t *)txDmaData, txDmaLen);
1653 txDmaState = dmaIdle;
1654
1655 DPRINTF(EthernetDMA, "tx dma write paddr=%#x len=%d\n",
1656 txDmaAddr, txDmaLen);
1657 DDUMP(EthernetDMA, txDmaData, txDmaLen);
1658 }
1659
1660 bool
1661 NSGigE::doTxDmaWrite()
1662 {
1663 assert(txDmaState == dmaIdle || txDmaState == dmaWriteWaiting);
1664 txDmaState = dmaWriting;
1665
1666 if (dmaInterface && !txDmaFree) {
1667 if (dmaInterface->busy())
1668 txDmaState = dmaWriteWaiting;
1669 else
1670 dmaInterface->doDMA(WriteInvalidate, txDmaAddr, txDmaLen, curTick,
1671 &txDmaWriteEvent, true);
1672 return true;
1673 }
1674
1675 if (dmaWriteDelay == 0 && dmaWriteFactor == 0.0) {
1676 txDmaWriteCopy();
1677 return false;
1678 }
1679
1680 Tick factor = ((txDmaLen + ULL(63)) >> ULL(6)) * dmaWriteFactor;
1681 Tick start = curTick + dmaWriteDelay + factor;
1682 txDmaWriteEvent.schedule(start);
1683 return true;
1684 }
1685
1686 void
1687 NSGigE::txDmaWriteDone()
1688 {
1689 assert(txDmaState == dmaWriting);
1690 txDmaWriteCopy();
1691
1692 // If the receive state machine has a pending DMA, let it go first
1693 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting)
1694 rxKick();
1695
1696 txKick();
1697 }
1698
1699 void
1700 NSGigE::txKick()
1701 {
1702 DPRINTF(EthernetSM, "transmit kick txState=%s\n",
1703 NsTxStateStrings[txState]);
1704
1705 if (txKickTick > curTick) {
1706 DPRINTF(EthernetSM, "transmit kick exiting, can't run till %d\n",
1707 txKickTick);
1708
1709 return;
1710 }
1711
1712 next:
1713 switch(txDmaState) {
1714 case dmaReadWaiting:
1715 if (doTxDmaRead())
1716 goto exit;
1717 break;
1718 case dmaWriteWaiting:
1719 if (doTxDmaWrite())
1720 goto exit;
1721 break;
1722 default:
1723 break;
1724 }
1725
1726 switch (txState) {
1727 case txIdle:
1728 if (!txEnable) {
1729 DPRINTF(EthernetSM, "Transmit disabled. Nothing to do.\n");
1730 goto exit;
1731 }
1732
1733 if (CTDD) {
1734 txState = txDescRefr;
1735
1736 txDmaAddr = regs.txdp & 0x3fffffff;
1737 txDmaData = &txDescCache + offsetof(ns_desc, link);
1738 txDmaLen = sizeof(txDescCache.link);
1739 txDmaFree = dmaDescFree;
1740
1741 descDmaReads++;
1742 descDmaRdBytes += txDmaLen;
1743
1744 if (doTxDmaRead())
1745 goto exit;
1746
1747 } else {
1748 txState = txDescRead;
1749
1750 txDmaAddr = regs.txdp & 0x3fffffff;
1751 txDmaData = &txDescCache;
1752 txDmaLen = sizeof(ns_desc);
1753 txDmaFree = dmaDescFree;
1754
1755 descDmaReads++;
1756 descDmaRdBytes += txDmaLen;
1757
1758 if (doTxDmaRead())
1759 goto exit;
1760 }
1761 break;
1762
1763 case txDescRefr:
1764 if (txDmaState != dmaIdle)
1765 goto exit;
1766
1767 txState = txAdvance;
1768 break;
1769
1770 case txDescRead:
1771 if (txDmaState != dmaIdle)
1772 goto exit;
1773
1774 DPRINTF(EthernetDesc,
1775 "txDescCache: link=%08x bufptr=%08x cmdsts=%08x extsts=%08x\n",
1776 txDescCache.link, txDescCache.bufptr, txDescCache.cmdsts,
1777 txDescCache.extsts);
1778
1779 if (txDescCache.cmdsts & CMDSTS_OWN) {
1780 txState = txFifoBlock;
1781 txFragPtr = txDescCache.bufptr;
1782 txDescCnt = txDescCache.cmdsts & CMDSTS_LEN_MASK;
1783 } else {
1784 devIntrPost(ISR_TXIDLE);
1785 txState = txIdle;
1786 goto exit;
1787 }
1788 break;
1789
1790 case txFifoBlock:
1791 if (!txPacket) {
1792 DPRINTF(EthernetSM, "****starting the tx of a new packet****\n");
1793 txPacket = new PacketData;
1794 txPacket->data = new uint8_t[16384];
1795 txPacketBufPtr = txPacket->data;
1796 }
1797
1798 if (txDescCnt == 0) {
1799 DPRINTF(EthernetSM, "the txDescCnt == 0, done with descriptor\n");
1800 if (txDescCache.cmdsts & CMDSTS_MORE) {
1801 DPRINTF(EthernetSM, "there are more descriptors to come\n");
1802 txState = txDescWrite;
1803
1804 txDescCache.cmdsts &= ~CMDSTS_OWN;
1805
1806 txDmaAddr = regs.txdp + offsetof(ns_desc, cmdsts);
1807 txDmaAddr &= 0x3fffffff;
1808 txDmaData = &(txDescCache.cmdsts);
1809 txDmaLen = sizeof(txDescCache.cmdsts);
1810 txDmaFree = dmaDescFree;
1811
1812 if (doTxDmaWrite())
1813 goto exit;
1814
1815 } else { /* this packet is totally done */
1816 DPRINTF(EthernetSM, "This packet is done, let's wrap it up\n");
1817 /* deal with the the packet that just finished */
1818 if ((regs.vtcr & VTCR_PPCHK) && extstsEnable) {
1819 IpHdr *ip = txPacket->ip();
1820 if (txDescCache.extsts & EXTSTS_UDPPKT) {
1821 UdpHdr *udp = txPacket->udp();
1822 udp->sum(0);
1823 udp->sum(ip->tu_cksum());
1824 txUdpChecksums++;
1825 } else if (txDescCache.extsts & EXTSTS_TCPPKT) {
1826 TcpHdr *tcp = txPacket->tcp();
1827 tcp->sum(0);
1828 tcp->sum(ip->tu_cksum());
1829 txTcpChecksums++;
1830 }
1831 if (txDescCache.extsts & EXTSTS_IPPKT) {
1832 ip->sum(0);
1833 ip->sum(ip->ip_cksum());
1834 txIpChecksums++;
1835 }
1836 }
1837
1838 txPacket->length = txPacketBufPtr - txPacket->data;
1839 // this is just because the receive can't handle a
1840 // packet bigger want to make sure
1841 assert(txPacket->length <= 1514);
1842 txFifo.push_back(txPacket);
1843
1844 /*
1845 * this following section is not tqo spec, but
1846 * functionally shouldn't be any different. normally,
1847 * the chip will wait til the transmit has occurred
1848 * before writing back the descriptor because it has
1849 * to wait to see that it was successfully transmitted
1850 * to decide whether to set CMDSTS_OK or not.
1851 * however, in the simulator since it is always
1852 * successfully transmitted, and writing it exactly to
1853 * spec would complicate the code, we just do it here
1854 */
1855
1856 txDescCache.cmdsts &= ~CMDSTS_OWN;
1857 txDescCache.cmdsts |= CMDSTS_OK;
1858
1859 DPRINTF(EthernetDesc,
1860 "txDesc writeback: cmdsts=%08x extsts=%08x\n",
1861 txDescCache.cmdsts, txDescCache.extsts);
1862
1863 txDmaAddr = regs.txdp + offsetof(ns_desc, cmdsts);
1864 txDmaAddr &= 0x3fffffff;
1865 txDmaData = &(txDescCache.cmdsts);
1866 txDmaLen = sizeof(txDescCache.cmdsts) +
1867 sizeof(txDescCache.extsts);
1868 txDmaFree = dmaDescFree;
1869
1870 descDmaWrites++;
1871 descDmaWrBytes += txDmaLen;
1872
1873 transmit();
1874 txPacket = 0;
1875
1876 if (!txEnable) {
1877 DPRINTF(EthernetSM, "halting TX state machine\n");
1878 txState = txIdle;
1879 goto exit;
1880 } else
1881 txState = txAdvance;
1882
1883 if (doTxDmaWrite())
1884 goto exit;
1885 }
1886 } else {
1887 DPRINTF(EthernetSM, "this descriptor isn't done yet\n");
1888 if (txFifoAvail) {
1889 txState = txFragRead;
1890
1891 /*
1892 * The number of bytes transferred is either whatever
1893 * is left in the descriptor (txDescCnt), or if there
1894 * is not enough room in the fifo, just whatever room
1895 * is left in the fifo
1896 */
1897 txXferLen = min<uint32_t>(txDescCnt, txFifoAvail);
1898
1899 txDmaAddr = txFragPtr & 0x3fffffff;
1900 txDmaData = txPacketBufPtr;
1901 txDmaLen = txXferLen;
1902 txDmaFree = dmaDataFree;
1903
1904 if (doTxDmaRead())
1905 goto exit;
1906 } else {
1907 txState = txFifoBlock;
1908 transmit();
1909
1910 goto exit;
1911 }
1912
1913 }
1914 break;
1915
1916 case txFragRead:
1917 if (txDmaState != dmaIdle)
1918 goto exit;
1919
1920 txPacketBufPtr += txXferLen;
1921 txFragPtr += txXferLen;
1922 txDescCnt -= txXferLen;
1923 txFifoAvail -= txXferLen;
1924
1925 txState = txFifoBlock;
1926 break;
1927
1928 case txDescWrite:
1929 if (txDmaState != dmaIdle)
1930 goto exit;
1931
1932 if (txDescCache.cmdsts & CMDSTS_INTR)
1933 devIntrPost(ISR_TXDESC);
1934
1935 txState = txAdvance;
1936 break;
1937
1938 case txAdvance:
1939 if (txDescCache.link == 0) {
1940 devIntrPost(ISR_TXIDLE);
1941 txState = txIdle;
1942 goto exit;
1943 } else {
1944 txState = txDescRead;
1945 regs.txdp = txDescCache.link;
1946 CTDD = false;
1947
1948 txDmaAddr = txDescCache.link & 0x3fffffff;
1949 txDmaData = &txDescCache;
1950 txDmaLen = sizeof(ns_desc);
1951 txDmaFree = dmaDescFree;
1952
1953 if (doTxDmaRead())
1954 goto exit;
1955 }
1956 break;
1957
1958 default:
1959 panic("invalid state");
1960 }
1961
1962 DPRINTF(EthernetSM, "entering next txState=%s\n",
1963 NsTxStateStrings[txState]);
1964
1965 goto next;
1966
1967 exit:
1968 /**
1969 * @todo do we want to schedule a future kick?
1970 */
1971 DPRINTF(EthernetSM, "tx state machine exited txState=%s\n",
1972 NsTxStateStrings[txState]);
1973 }
1974
1975 void
1976 NSGigE::transferDone()
1977 {
1978 if (txFifo.empty()) {
1979 DPRINTF(Ethernet, "transfer complete: txFifo empty...nothing to do\n");
1980 return;
1981 }
1982
1983 DPRINTF(Ethernet, "transfer complete: data in txFifo...schedule xmit\n");
1984
1985 if (txEvent.scheduled())
1986 txEvent.reschedule(curTick + 1);
1987 else
1988 txEvent.schedule(curTick + 1);
1989 }
1990
1991 bool
1992 NSGigE::rxFilter(PacketPtr packet)
1993 {
1994 bool drop = true;
1995 string type;
1996
1997 EthHdr *eth = packet->eth();
1998 if (eth->unicast()) {
1999 // If we're accepting all unicast addresses
2000 if (acceptUnicast)
2001 drop = false;
2002
2003 // If we make a perfect match
2004 if (acceptPerfect &&
2005 memcmp(rom.perfectMatch, packet->data, EADDR_LEN) == 0)
2006 drop = false;
2007
2008 if (acceptArp && eth->type() == ETH_TYPE_ARP)
2009 drop = false;
2010
2011 } else if (eth->broadcast()) {
2012 // if we're accepting broadcasts
2013 if (acceptBroadcast)
2014 drop = false;
2015
2016 } else if (eth->multicast()) {
2017 // if we're accepting all multicasts
2018 if (acceptMulticast)
2019 drop = false;
2020
2021 }
2022
2023 if (drop) {
2024 DPRINTF(Ethernet, "rxFilter drop\n");
2025 DDUMP(EthernetData, packet->data, packet->length);
2026 }
2027
2028 return drop;
2029 }
2030
2031 bool
2032 NSGigE::recvPacket(PacketPtr packet)
2033 {
2034 rxBytes += packet->length;
2035 rxPackets++;
2036
2037 DPRINTF(Ethernet, "Receiving packet from wire, rxFifoAvail=%d\n",
2038 maxRxFifoSize - rxFifoCnt);
2039
2040 if (!rxEnable) {
2041 DPRINTF(Ethernet, "receive disabled...packet dropped\n");
2042 debug_break();
2043 interface->recvDone();
2044 return true;
2045 }
2046
2047 if (rxFilterEnable && rxFilter(packet)) {
2048 DPRINTF(Ethernet, "packet filtered...dropped\n");
2049 interface->recvDone();
2050 return true;
2051 }
2052
2053 if ((rxFifoCnt + packet->length) >= maxRxFifoSize) {
2054 DPRINTF(Ethernet,
2055 "packet will not fit in receive buffer...packet dropped\n");
2056 devIntrPost(ISR_RXORN);
2057 return false;
2058 }
2059
2060 rxFifo.push_back(packet);
2061 rxFifoCnt += packet->length;
2062 interface->recvDone();
2063
2064 rxKick();
2065 return true;
2066 }
2067
2068 //=====================================================================
2069 //
2070 //
2071 void
2072 NSGigE::serialize(ostream &os)
2073 {
2074 // Serialize the PciDev base class
2075 PciDev::serialize(os);
2076
2077 /*
2078 * Finalize any DMA events now.
2079 */
2080 if (rxDmaReadEvent.scheduled())
2081 rxDmaReadCopy();
2082 if (rxDmaWriteEvent.scheduled())
2083 rxDmaWriteCopy();
2084 if (txDmaReadEvent.scheduled())
2085 txDmaReadCopy();
2086 if (txDmaWriteEvent.scheduled())
2087 txDmaWriteCopy();
2088
2089 /*
2090 * Serialize the device registers
2091 */
2092 SERIALIZE_SCALAR(regs.command);
2093 SERIALIZE_SCALAR(regs.config);
2094 SERIALIZE_SCALAR(regs.mear);
2095 SERIALIZE_SCALAR(regs.ptscr);
2096 SERIALIZE_SCALAR(regs.isr);
2097 SERIALIZE_SCALAR(regs.imr);
2098 SERIALIZE_SCALAR(regs.ier);
2099 SERIALIZE_SCALAR(regs.ihr);
2100 SERIALIZE_SCALAR(regs.txdp);
2101 SERIALIZE_SCALAR(regs.txdp_hi);
2102 SERIALIZE_SCALAR(regs.txcfg);
2103 SERIALIZE_SCALAR(regs.gpior);
2104 SERIALIZE_SCALAR(regs.rxdp);
2105 SERIALIZE_SCALAR(regs.rxdp_hi);
2106 SERIALIZE_SCALAR(regs.rxcfg);
2107 SERIALIZE_SCALAR(regs.pqcr);
2108 SERIALIZE_SCALAR(regs.wcsr);
2109 SERIALIZE_SCALAR(regs.pcr);
2110 SERIALIZE_SCALAR(regs.rfcr);
2111 SERIALIZE_SCALAR(regs.rfdr);
2112 SERIALIZE_SCALAR(regs.srr);
2113 SERIALIZE_SCALAR(regs.mibc);
2114 SERIALIZE_SCALAR(regs.vrcr);
2115 SERIALIZE_SCALAR(regs.vtcr);
2116 SERIALIZE_SCALAR(regs.vdr);
2117 SERIALIZE_SCALAR(regs.ccsr);
2118 SERIALIZE_SCALAR(regs.tbicr);
2119 SERIALIZE_SCALAR(regs.tbisr);
2120 SERIALIZE_SCALAR(regs.tanar);
2121 SERIALIZE_SCALAR(regs.tanlpar);
2122 SERIALIZE_SCALAR(regs.taner);
2123 SERIALIZE_SCALAR(regs.tesr);
2124
2125 SERIALIZE_ARRAY(rom.perfectMatch, EADDR_LEN);
2126
2127 SERIALIZE_SCALAR(ioEnable);
2128
2129 /*
2130 * Serialize the data Fifos
2131 */
2132 int txNumPkts = txFifo.size();
2133 SERIALIZE_SCALAR(txNumPkts);
2134 int i = 0;
2135 pktiter_t end = txFifo.end();
2136 for (pktiter_t p = txFifo.begin(); p != end; ++p) {
2137 nameOut(os, csprintf("%s.txFifo%d", name(), i++));
2138 (*p)->serialize(os);
2139 }
2140
2141 int rxNumPkts = rxFifo.size();
2142 SERIALIZE_SCALAR(rxNumPkts);
2143 i = 0;
2144 end = rxFifo.end();
2145 for (pktiter_t p = rxFifo.begin(); p != end; ++p) {
2146 nameOut(os, csprintf("%s.rxFifo%d", name(), i++));
2147 (*p)->serialize(os);
2148 }
2149
2150 /*
2151 * Serialize the various helper variables
2152 */
2153 bool txPacketExists = txPacket;
2154 SERIALIZE_SCALAR(txPacketExists);
2155 if (txPacketExists) {
2156 nameOut(os, csprintf("%s.txPacket", name()));
2157 txPacket->serialize(os);
2158 uint32_t txPktBufPtr = (uint32_t) (txPacketBufPtr - txPacket->data);
2159 SERIALIZE_SCALAR(txPktBufPtr);
2160 }
2161
2162 bool rxPacketExists = rxPacket;
2163 SERIALIZE_SCALAR(rxPacketExists);
2164 if (rxPacketExists) {
2165 nameOut(os, csprintf("%s.rxPacket", name()));
2166 rxPacket->serialize(os);
2167 uint32_t rxPktBufPtr = (uint32_t) (rxPacketBufPtr - rxPacket->data);
2168 SERIALIZE_SCALAR(rxPktBufPtr);
2169 }
2170
2171 SERIALIZE_SCALAR(txXferLen);
2172 SERIALIZE_SCALAR(rxXferLen);
2173
2174 /*
2175 * Serialize DescCaches
2176 */
2177 SERIALIZE_SCALAR(txDescCache.link);
2178 SERIALIZE_SCALAR(txDescCache.bufptr);
2179 SERIALIZE_SCALAR(txDescCache.cmdsts);
2180 SERIALIZE_SCALAR(txDescCache.extsts);
2181 SERIALIZE_SCALAR(rxDescCache.link);
2182 SERIALIZE_SCALAR(rxDescCache.bufptr);
2183 SERIALIZE_SCALAR(rxDescCache.cmdsts);
2184 SERIALIZE_SCALAR(rxDescCache.extsts);
2185
2186 /*
2187 * Serialize tx state machine
2188 */
2189 int txState = this->txState;
2190 SERIALIZE_SCALAR(txState);
2191 SERIALIZE_SCALAR(txEnable);
2192 SERIALIZE_SCALAR(CTDD);
2193 SERIALIZE_SCALAR(txFifoAvail);
2194 SERIALIZE_SCALAR(txFragPtr);
2195 SERIALIZE_SCALAR(txDescCnt);
2196 int txDmaState = this->txDmaState;
2197 SERIALIZE_SCALAR(txDmaState);
2198
2199 /*
2200 * Serialize rx state machine
2201 */
2202 int rxState = this->rxState;
2203 SERIALIZE_SCALAR(rxState);
2204 SERIALIZE_SCALAR(rxEnable);
2205 SERIALIZE_SCALAR(CRDD);
2206 SERIALIZE_SCALAR(rxPktBytes);
2207 SERIALIZE_SCALAR(rxFifoCnt);
2208 SERIALIZE_SCALAR(rxDescCnt);
2209 int rxDmaState = this->rxDmaState;
2210 SERIALIZE_SCALAR(rxDmaState);
2211
2212 SERIALIZE_SCALAR(extstsEnable);
2213
2214 /*
2215 * If there's a pending transmit, store the time so we can
2216 * reschedule it later
2217 */
2218 Tick transmitTick = txEvent.scheduled() ? txEvent.when() - curTick : 0;
2219 SERIALIZE_SCALAR(transmitTick);
2220
2221 /*
2222 * receive address filter settings
2223 */
2224 SERIALIZE_SCALAR(rxFilterEnable);
2225 SERIALIZE_SCALAR(acceptBroadcast);
2226 SERIALIZE_SCALAR(acceptMulticast);
2227 SERIALIZE_SCALAR(acceptUnicast);
2228 SERIALIZE_SCALAR(acceptPerfect);
2229 SERIALIZE_SCALAR(acceptArp);
2230
2231 /*
2232 * Keep track of pending interrupt status.
2233 */
2234 SERIALIZE_SCALAR(intrTick);
2235 SERIALIZE_SCALAR(cpuPendingIntr);
2236 Tick intrEventTick = 0;
2237 if (intrEvent)
2238 intrEventTick = intrEvent->when();
2239 SERIALIZE_SCALAR(intrEventTick);
2240
2241 }
2242
2243 void
2244 NSGigE::unserialize(Checkpoint *cp, const std::string &section)
2245 {
2246 // Unserialize the PciDev base class
2247 PciDev::unserialize(cp, section);
2248
2249 UNSERIALIZE_SCALAR(regs.command);
2250 UNSERIALIZE_SCALAR(regs.config);
2251 UNSERIALIZE_SCALAR(regs.mear);
2252 UNSERIALIZE_SCALAR(regs.ptscr);
2253 UNSERIALIZE_SCALAR(regs.isr);
2254 UNSERIALIZE_SCALAR(regs.imr);
2255 UNSERIALIZE_SCALAR(regs.ier);
2256 UNSERIALIZE_SCALAR(regs.ihr);
2257 UNSERIALIZE_SCALAR(regs.txdp);
2258 UNSERIALIZE_SCALAR(regs.txdp_hi);
2259 UNSERIALIZE_SCALAR(regs.txcfg);
2260 UNSERIALIZE_SCALAR(regs.gpior);
2261 UNSERIALIZE_SCALAR(regs.rxdp);
2262 UNSERIALIZE_SCALAR(regs.rxdp_hi);
2263 UNSERIALIZE_SCALAR(regs.rxcfg);
2264 UNSERIALIZE_SCALAR(regs.pqcr);
2265 UNSERIALIZE_SCALAR(regs.wcsr);
2266 UNSERIALIZE_SCALAR(regs.pcr);
2267 UNSERIALIZE_SCALAR(regs.rfcr);
2268 UNSERIALIZE_SCALAR(regs.rfdr);
2269 UNSERIALIZE_SCALAR(regs.srr);
2270 UNSERIALIZE_SCALAR(regs.mibc);
2271 UNSERIALIZE_SCALAR(regs.vrcr);
2272 UNSERIALIZE_SCALAR(regs.vtcr);
2273 UNSERIALIZE_SCALAR(regs.vdr);
2274 UNSERIALIZE_SCALAR(regs.ccsr);
2275 UNSERIALIZE_SCALAR(regs.tbicr);
2276 UNSERIALIZE_SCALAR(regs.tbisr);
2277 UNSERIALIZE_SCALAR(regs.tanar);
2278 UNSERIALIZE_SCALAR(regs.tanlpar);
2279 UNSERIALIZE_SCALAR(regs.taner);
2280 UNSERIALIZE_SCALAR(regs.tesr);
2281
2282 UNSERIALIZE_ARRAY(rom.perfectMatch, EADDR_LEN);
2283
2284 UNSERIALIZE_SCALAR(ioEnable);
2285
2286 /*
2287 * unserialize the data fifos
2288 */
2289 int txNumPkts;
2290 UNSERIALIZE_SCALAR(txNumPkts);
2291 int i;
2292 for (i = 0; i < txNumPkts; ++i) {
2293 PacketPtr p = new PacketData;
2294 p->unserialize(cp, csprintf("%s.rxFifo%d", section, i));
2295 txFifo.push_back(p);
2296 }
2297
2298 int rxNumPkts;
2299 UNSERIALIZE_SCALAR(rxNumPkts);
2300 for (i = 0; i < rxNumPkts; ++i) {
2301 PacketPtr p = new PacketData;
2302 p->unserialize(cp, csprintf("%s.rxFifo%d", section, i));
2303 rxFifo.push_back(p);
2304 }
2305
2306 /*
2307 * unserialize the various helper variables
2308 */
2309 bool txPacketExists;
2310 UNSERIALIZE_SCALAR(txPacketExists);
2311 if (txPacketExists) {
2312 txPacket = new PacketData;
2313 txPacket->unserialize(cp, csprintf("%s.txPacket", section));
2314 uint32_t txPktBufPtr;
2315 UNSERIALIZE_SCALAR(txPktBufPtr);
2316 txPacketBufPtr = (uint8_t *) txPacket->data + txPktBufPtr;
2317 } else
2318 txPacket = 0;
2319
2320 bool rxPacketExists;
2321 UNSERIALIZE_SCALAR(rxPacketExists);
2322 rxPacket = 0;
2323 if (rxPacketExists) {
2324 rxPacket = new PacketData;
2325 rxPacket->unserialize(cp, csprintf("%s.rxPacket", section));
2326 uint32_t rxPktBufPtr;
2327 UNSERIALIZE_SCALAR(rxPktBufPtr);
2328 rxPacketBufPtr = (uint8_t *) rxPacket->data + rxPktBufPtr;
2329 } else
2330 rxPacket = 0;
2331
2332 UNSERIALIZE_SCALAR(txXferLen);
2333 UNSERIALIZE_SCALAR(rxXferLen);
2334
2335 /*
2336 * Unserialize DescCaches
2337 */
2338 UNSERIALIZE_SCALAR(txDescCache.link);
2339 UNSERIALIZE_SCALAR(txDescCache.bufptr);
2340 UNSERIALIZE_SCALAR(txDescCache.cmdsts);
2341 UNSERIALIZE_SCALAR(txDescCache.extsts);
2342 UNSERIALIZE_SCALAR(rxDescCache.link);
2343 UNSERIALIZE_SCALAR(rxDescCache.bufptr);
2344 UNSERIALIZE_SCALAR(rxDescCache.cmdsts);
2345 UNSERIALIZE_SCALAR(rxDescCache.extsts);
2346
2347 /*
2348 * unserialize tx state machine
2349 */
2350 int txState;
2351 UNSERIALIZE_SCALAR(txState);
2352 this->txState = (TxState) txState;
2353 UNSERIALIZE_SCALAR(txEnable);
2354 UNSERIALIZE_SCALAR(CTDD);
2355 UNSERIALIZE_SCALAR(txFifoAvail);
2356 UNSERIALIZE_SCALAR(txFragPtr);
2357 UNSERIALIZE_SCALAR(txDescCnt);
2358 int txDmaState;
2359 UNSERIALIZE_SCALAR(txDmaState);
2360 this->txDmaState = (DmaState) txDmaState;
2361
2362 /*
2363 * unserialize rx state machine
2364 */
2365 int rxState;
2366 UNSERIALIZE_SCALAR(rxState);
2367 this->rxState = (RxState) rxState;
2368 UNSERIALIZE_SCALAR(rxEnable);
2369 UNSERIALIZE_SCALAR(CRDD);
2370 UNSERIALIZE_SCALAR(rxPktBytes);
2371 UNSERIALIZE_SCALAR(rxFifoCnt);
2372 UNSERIALIZE_SCALAR(rxDescCnt);
2373 int rxDmaState;
2374 UNSERIALIZE_SCALAR(rxDmaState);
2375 this->rxDmaState = (DmaState) rxDmaState;
2376
2377 UNSERIALIZE_SCALAR(extstsEnable);
2378
2379 /*
2380 * If there's a pending transmit, reschedule it now
2381 */
2382 Tick transmitTick;
2383 UNSERIALIZE_SCALAR(transmitTick);
2384 if (transmitTick)
2385 txEvent.schedule(curTick + transmitTick);
2386
2387 /*
2388 * unserialize receive address filter settings
2389 */
2390 UNSERIALIZE_SCALAR(rxFilterEnable);
2391 UNSERIALIZE_SCALAR(acceptBroadcast);
2392 UNSERIALIZE_SCALAR(acceptMulticast);
2393 UNSERIALIZE_SCALAR(acceptUnicast);
2394 UNSERIALIZE_SCALAR(acceptPerfect);
2395 UNSERIALIZE_SCALAR(acceptArp);
2396
2397 /*
2398 * Keep track of pending interrupt status.
2399 */
2400 UNSERIALIZE_SCALAR(intrTick);
2401 UNSERIALIZE_SCALAR(cpuPendingIntr);
2402 Tick intrEventTick;
2403 UNSERIALIZE_SCALAR(intrEventTick);
2404 if (intrEventTick) {
2405 intrEvent = new IntrEvent(this, true);
2406 intrEvent->schedule(intrEventTick);
2407 }
2408
2409 /*
2410 * re-add addrRanges to bus bridges
2411 */
2412 if (pioInterface) {
2413 pioInterface->addAddrRange(BARAddrs[0], BARAddrs[0] + BARSize[0] - 1);
2414 pioInterface->addAddrRange(BARAddrs[1], BARAddrs[1] + BARSize[1] - 1);
2415 }
2416 }
2417
2418 Tick
2419 NSGigE::cacheAccess(MemReqPtr &req)
2420 {
2421 DPRINTF(EthernetPIO, "timing access to paddr=%#x (daddr=%#x)\n",
2422 req->paddr, req->paddr - addr);
2423 return curTick + pioLatency;
2424 }
2425
2426 BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt)
2427
2428 SimObjectParam<EtherInt *> peer;
2429 SimObjectParam<NSGigE *> device;
2430
2431 END_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt)
2432
2433 BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigEInt)
2434
2435 INIT_PARAM_DFLT(peer, "peer interface", NULL),
2436 INIT_PARAM(device, "Ethernet device of this interface")
2437
2438 END_INIT_SIM_OBJECT_PARAMS(NSGigEInt)
2439
2440 CREATE_SIM_OBJECT(NSGigEInt)
2441 {
2442 NSGigEInt *dev_int = new NSGigEInt(getInstanceName(), device);
2443
2444 EtherInt *p = (EtherInt *)peer;
2445 if (p) {
2446 dev_int->setPeer(p);
2447 p->setPeer(dev_int);
2448 }
2449
2450 return dev_int;
2451 }
2452
2453 REGISTER_SIM_OBJECT("NSGigEInt", NSGigEInt)
2454
2455
2456 BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigE)
2457
2458 Param<Tick> tx_delay;
2459 Param<Tick> rx_delay;
2460 SimObjectParam<IntrControl *> intr_ctrl;
2461 Param<Tick> intr_delay;
2462 SimObjectParam<MemoryController *> mmu;
2463 SimObjectParam<PhysicalMemory *> physmem;
2464 Param<bool> rx_filter;
2465 Param<string> hardware_address;
2466 SimObjectParam<Bus*> header_bus;
2467 SimObjectParam<Bus*> payload_bus;
2468 SimObjectParam<HierParams *> hier;
2469 Param<Tick> pio_latency;
2470 Param<bool> dma_desc_free;
2471 Param<bool> dma_data_free;
2472 Param<Tick> dma_read_delay;
2473 Param<Tick> dma_write_delay;
2474 Param<Tick> dma_read_factor;
2475 Param<Tick> dma_write_factor;
2476 SimObjectParam<PciConfigAll *> configspace;
2477 SimObjectParam<PciConfigData *> configdata;
2478 SimObjectParam<Tsunami *> tsunami;
2479 Param<uint32_t> pci_bus;
2480 Param<uint32_t> pci_dev;
2481 Param<uint32_t> pci_func;
2482 Param<uint32_t> tx_fifo_size;
2483 Param<uint32_t> rx_fifo_size;
2484
2485 END_DECLARE_SIM_OBJECT_PARAMS(NSGigE)
2486
2487 BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigE)
2488
2489 INIT_PARAM_DFLT(tx_delay, "Transmit Delay", 1000),
2490 INIT_PARAM_DFLT(rx_delay, "Receive Delay", 1000),
2491 INIT_PARAM(intr_ctrl, "Interrupt Controller"),
2492 INIT_PARAM_DFLT(intr_delay, "Interrupt Delay in microseconds", 0),
2493 INIT_PARAM(mmu, "Memory Controller"),
2494 INIT_PARAM(physmem, "Physical Memory"),
2495 INIT_PARAM_DFLT(rx_filter, "Enable Receive Filter", true),
2496 INIT_PARAM_DFLT(hardware_address, "Ethernet Hardware Address",
2497 "00:99:00:00:00:01"),
2498 INIT_PARAM_DFLT(header_bus, "The IO Bus to attach to for headers", NULL),
2499 INIT_PARAM_DFLT(payload_bus, "The IO Bus to attach to for payload", NULL),
2500 INIT_PARAM_DFLT(hier, "Hierarchy global variables", &defaultHierParams),
2501 INIT_PARAM_DFLT(pio_latency, "Programmed IO latency in bus cycles", 1),
2502 INIT_PARAM_DFLT(dma_desc_free, "DMA of Descriptors is free", false),
2503 INIT_PARAM_DFLT(dma_data_free, "DMA of Data is free", false),
2504 INIT_PARAM_DFLT(dma_read_delay, "fixed delay for dma reads", 0),
2505 INIT_PARAM_DFLT(dma_write_delay, "fixed delay for dma writes", 0),
2506 INIT_PARAM_DFLT(dma_read_factor, "multiplier for dma reads", 0),
2507 INIT_PARAM_DFLT(dma_write_factor, "multiplier for dma writes", 0),
2508 INIT_PARAM(configspace, "PCI Configspace"),
2509 INIT_PARAM(configdata, "PCI Config data"),
2510 INIT_PARAM(tsunami, "Tsunami"),
2511 INIT_PARAM(pci_bus, "PCI bus"),
2512 INIT_PARAM(pci_dev, "PCI device number"),
2513 INIT_PARAM(pci_func, "PCI function code"),
2514 INIT_PARAM_DFLT(tx_fifo_size, "max size in bytes of txFifo", 131072),
2515 INIT_PARAM_DFLT(rx_fifo_size, "max size in bytes of rxFifo", 131072)
2516
2517 END_INIT_SIM_OBJECT_PARAMS(NSGigE)
2518
2519
2520 CREATE_SIM_OBJECT(NSGigE)
2521 {
2522 int eaddr[6];
2523 sscanf(((string)hardware_address).c_str(), "%x:%x:%x:%x:%x:%x",
2524 &eaddr[0], &eaddr[1], &eaddr[2], &eaddr[3], &eaddr[4], &eaddr[5]);
2525
2526 return new NSGigE(getInstanceName(), intr_ctrl, intr_delay,
2527 physmem, tx_delay, rx_delay, mmu, hier, header_bus,
2528 payload_bus, pio_latency, dma_desc_free, dma_data_free,
2529 dma_read_delay, dma_write_delay, dma_read_factor,
2530 dma_write_factor, configspace, configdata,
2531 tsunami, pci_bus, pci_dev, pci_func, rx_filter, eaddr,
2532 tx_fifo_size, rx_fifo_size);
2533 }
2534
2535 REGISTER_SIM_OBJECT("NSGigE", NSGigE)