Very minor formatting glitches.
[gem5.git] / dev / ns_gige.cc
1 /*
2 * Copyright (c) 2004 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /* @file
30 * Device module for modelling the National Semiconductor
31 * DP83820 ethernet controller. Does not support priority queueing
32 */
33 #include <cstdio>
34 #include <deque>
35 #include <string>
36
37 #include "base/inet.hh"
38 #include "cpu/exec_context.hh"
39 #include "cpu/intr_control.hh"
40 #include "dev/dma.hh"
41 #include "dev/etherlink.hh"
42 #include "dev/ns_gige.hh"
43 #include "dev/pciconfigall.hh"
44 #include "dev/tsunami_cchip.hh"
45 #include "mem/bus/bus.hh"
46 #include "mem/bus/dma_interface.hh"
47 #include "mem/bus/pio_interface.hh"
48 #include "mem/bus/pio_interface_impl.hh"
49 #include "mem/functional_mem/memory_control.hh"
50 #include "mem/functional_mem/physical_memory.hh"
51 #include "sim/builder.hh"
52 #include "sim/debug.hh"
53 #include "sim/host.hh"
54 #include "sim/sim_stats.hh"
55 #include "targetarch/vtophys.hh"
56
57 const char *NsRxStateStrings[] =
58 {
59 "rxIdle",
60 "rxDescRefr",
61 "rxDescRead",
62 "rxFifoBlock",
63 "rxFragWrite",
64 "rxDescWrite",
65 "rxAdvance"
66 };
67
68 const char *NsTxStateStrings[] =
69 {
70 "txIdle",
71 "txDescRefr",
72 "txDescRead",
73 "txFifoBlock",
74 "txFragRead",
75 "txDescWrite",
76 "txAdvance"
77 };
78
79 const char *NsDmaState[] =
80 {
81 "dmaIdle",
82 "dmaReading",
83 "dmaWriting",
84 "dmaReadWaiting",
85 "dmaWriteWaiting"
86 };
87
88 using namespace std;
89
90
91 ///////////////////////////////////////////////////////////////////////
92 //
93 // NSGigE PCI Device
94 //
95 NSGigE::NSGigE(const std::string &name, IntrControl *i, Tick intr_delay,
96 PhysicalMemory *pmem, Tick tx_delay, Tick rx_delay,
97 MemoryController *mmu, HierParams *hier, Bus *header_bus,
98 Bus *payload_bus, Tick pio_latency, bool dma_desc_free,
99 bool dma_data_free, Tick dma_read_delay, Tick dma_write_delay,
100 Tick dma_read_factor, Tick dma_write_factor, PciConfigAll *cf,
101 PciConfigData *cd, Tsunami *t, uint32_t bus, uint32_t dev,
102 uint32_t func, bool rx_filter, const int eaddr[6],
103 uint32_t tx_fifo_size, uint32_t rx_fifo_size)
104 : PciDev(name, mmu, cf, cd, bus, dev, func), tsunami(t), ioEnable(false),
105 maxTxFifoSize(tx_fifo_size), maxRxFifoSize(rx_fifo_size),
106 txPacket(0), rxPacket(0), txPacketBufPtr(NULL), rxPacketBufPtr(NULL),
107 txXferLen(0), rxXferLen(0), txState(txIdle), txEnable(false),
108 CTDD(false), txFifoAvail(tx_fifo_size),
109 txFragPtr(0), txDescCnt(0), txDmaState(dmaIdle), rxState(rxIdle),
110 rxEnable(false), CRDD(false), rxPktBytes(0), rxFifoCnt(0),
111 rxFragPtr(0), rxDescCnt(0), rxDmaState(dmaIdle), extstsEnable(false),
112 rxDmaReadEvent(this), rxDmaWriteEvent(this),
113 txDmaReadEvent(this), txDmaWriteEvent(this),
114 dmaDescFree(dma_desc_free), dmaDataFree(dma_data_free),
115 txDelay(tx_delay), rxDelay(rx_delay), rxKickTick(0), txKickTick(0),
116 txEvent(this), rxFilterEnable(rx_filter), acceptBroadcast(false),
117 acceptMulticast(false), acceptUnicast(false),
118 acceptPerfect(false), acceptArp(false),
119 physmem(pmem), intctrl(i), intrTick(0), cpuPendingIntr(false),
120 intrEvent(0), interface(0)
121 {
122 tsunami->ethernet = this;
123
124 if (header_bus) {
125 pioInterface = newPioInterface(name, hier, header_bus, this,
126 &NSGigE::cacheAccess);
127
128 pioLatency = pio_latency * header_bus->clockRatio;
129
130 if (payload_bus)
131 dmaInterface = new DMAInterface<Bus>(name + ".dma",
132 header_bus, payload_bus, 1);
133 else
134 dmaInterface = new DMAInterface<Bus>(name + ".dma",
135 header_bus, header_bus, 1);
136 } else if (payload_bus) {
137 pioInterface = newPioInterface(name, hier, payload_bus, this,
138 &NSGigE::cacheAccess);
139
140 pioLatency = pio_latency * payload_bus->clockRatio;
141
142 dmaInterface = new DMAInterface<Bus>(name + ".dma", payload_bus,
143 payload_bus, 1);
144 }
145
146
147 intrDelay = US2Ticks(intr_delay);
148 dmaReadDelay = dma_read_delay;
149 dmaWriteDelay = dma_write_delay;
150 dmaReadFactor = dma_read_factor;
151 dmaWriteFactor = dma_write_factor;
152
153 regsReset();
154 rom.perfectMatch[0] = eaddr[0];
155 rom.perfectMatch[1] = eaddr[1];
156 rom.perfectMatch[2] = eaddr[2];
157 rom.perfectMatch[3] = eaddr[3];
158 rom.perfectMatch[4] = eaddr[4];
159 rom.perfectMatch[5] = eaddr[5];
160 }
161
162 NSGigE::~NSGigE()
163 {}
164
165 void
166 NSGigE::regStats()
167 {
168 txBytes
169 .name(name() + ".txBytes")
170 .desc("Bytes Transmitted")
171 .prereq(txBytes)
172 ;
173
174 rxBytes
175 .name(name() + ".rxBytes")
176 .desc("Bytes Received")
177 .prereq(rxBytes)
178 ;
179
180 txPackets
181 .name(name() + ".txPackets")
182 .desc("Number of Packets Transmitted")
183 .prereq(txBytes)
184 ;
185
186 rxPackets
187 .name(name() + ".rxPackets")
188 .desc("Number of Packets Received")
189 .prereq(rxBytes)
190 ;
191
192 txIpChecksums
193 .name(name() + ".txIpChecksums")
194 .desc("Number of tx IP Checksums done by device")
195 .precision(0)
196 .prereq(txBytes)
197 ;
198
199 rxIpChecksums
200 .name(name() + ".rxIpChecksums")
201 .desc("Number of rx IP Checksums done by device")
202 .precision(0)
203 .prereq(rxBytes)
204 ;
205
206 txTcpChecksums
207 .name(name() + ".txTcpChecksums")
208 .desc("Number of tx TCP Checksums done by device")
209 .precision(0)
210 .prereq(txBytes)
211 ;
212
213 rxTcpChecksums
214 .name(name() + ".rxTcpChecksums")
215 .desc("Number of rx TCP Checksums done by device")
216 .precision(0)
217 .prereq(rxBytes)
218 ;
219
220 txUdpChecksums
221 .name(name() + ".txUdpChecksums")
222 .desc("Number of tx UDP Checksums done by device")
223 .precision(0)
224 .prereq(txBytes)
225 ;
226
227 rxUdpChecksums
228 .name(name() + ".rxUdpChecksums")
229 .desc("Number of rx UDP Checksums done by device")
230 .precision(0)
231 .prereq(rxBytes)
232 ;
233
234 descDmaReads
235 .name(name() + ".descDMAReads")
236 .desc("Number of descriptors the device read w/ DMA")
237 .precision(0)
238 ;
239
240 descDmaWrites
241 .name(name() + ".descDMAWrites")
242 .desc("Number of descriptors the device wrote w/ DMA")
243 .precision(0)
244 ;
245
246 descDmaRdBytes
247 .name(name() + ".descDmaReadBytes")
248 .desc("number of descriptor bytes read w/ DMA")
249 .precision(0)
250 ;
251
252 descDmaWrBytes
253 .name(name() + ".descDmaWriteBytes")
254 .desc("number of descriptor bytes write w/ DMA")
255 .precision(0)
256 ;
257
258
259 txBandwidth
260 .name(name() + ".txBandwidth")
261 .desc("Transmit Bandwidth (bits/s)")
262 .precision(0)
263 .prereq(txBytes)
264 ;
265
266 rxBandwidth
267 .name(name() + ".rxBandwidth")
268 .desc("Receive Bandwidth (bits/s)")
269 .precision(0)
270 .prereq(rxBytes)
271 ;
272
273 txPacketRate
274 .name(name() + ".txPPS")
275 .desc("Packet Tranmission Rate (packets/s)")
276 .precision(0)
277 .prereq(txBytes)
278 ;
279
280 rxPacketRate
281 .name(name() + ".rxPPS")
282 .desc("Packet Reception Rate (packets/s)")
283 .precision(0)
284 .prereq(rxBytes)
285 ;
286
287 txBandwidth = txBytes * Stats::constant(8) / simSeconds;
288 rxBandwidth = rxBytes * Stats::constant(8) / simSeconds;
289 txPacketRate = txPackets / simSeconds;
290 rxPacketRate = rxPackets / simSeconds;
291 }
292
293 /**
294 * This is to read the PCI general configuration registers
295 */
296 void
297 NSGigE::ReadConfig(int offset, int size, uint8_t *data)
298 {
299 if (offset < PCI_DEVICE_SPECIFIC)
300 PciDev::ReadConfig(offset, size, data);
301 else
302 panic("Device specific PCI config space not implemented!\n");
303 }
304
305 /**
306 * This is to write to the PCI general configuration registers
307 */
308 void
309 NSGigE::WriteConfig(int offset, int size, uint32_t data)
310 {
311 if (offset < PCI_DEVICE_SPECIFIC)
312 PciDev::WriteConfig(offset, size, data);
313 else
314 panic("Device specific PCI config space not implemented!\n");
315
316 // Need to catch writes to BARs to update the PIO interface
317 switch (offset) {
318 // seems to work fine without all these PCI settings, but i
319 // put in the IO to double check, an assertion will fail if we
320 // need to properly implement it
321 case PCI_COMMAND:
322 if (config.data[offset] & PCI_CMD_IOSE)
323 ioEnable = true;
324 else
325 ioEnable = false;
326
327 #if 0
328 if (config.data[offset] & PCI_CMD_BME) {
329 bmEnabled = true;
330 }
331 else {
332 bmEnabled = false;
333 }
334
335 if (config.data[offset] & PCI_CMD_MSE) {
336 memEnable = true;
337 }
338 else {
339 memEnable = false;
340 }
341 #endif
342 break;
343
344 case PCI0_BASE_ADDR0:
345 if (BARAddrs[0] != 0) {
346 if (pioInterface)
347 pioInterface->addAddrRange(RangeSize(BARAddrs[0], BARSize[0]));
348
349 BARAddrs[0] &= PA_UNCACHED_MASK;
350 }
351 break;
352 case PCI0_BASE_ADDR1:
353 if (BARAddrs[1] != 0) {
354 if (pioInterface)
355 pioInterface->addAddrRange(RangeSize(BARAddrs[1], BARSize[1]));
356
357 BARAddrs[1] &= PA_UNCACHED_MASK;
358 }
359 break;
360 }
361 }
362
363 /**
364 * This reads the device registers, which are detailed in the NS83820
365 * spec sheet
366 */
367 Fault
368 NSGigE::read(MemReqPtr &req, uint8_t *data)
369 {
370 assert(ioEnable);
371
372 //The mask is to give you only the offset into the device register file
373 Addr daddr = req->paddr & 0xfff;
374 DPRINTF(EthernetPIO, "read da=%#x pa=%#x va=%#x size=%d\n",
375 daddr, req->paddr, req->vaddr, req->size);
376
377
378 // there are some reserved registers, you can see ns_gige_reg.h and
379 // the spec sheet for details
380 if (daddr > LAST && daddr <= RESERVED) {
381 panic("Accessing reserved register");
382 } else if (daddr > RESERVED && daddr <= 0x3FC) {
383 ReadConfig(daddr & 0xff, req->size, data);
384 return No_Fault;
385 } else if (daddr >= MIB_START && daddr <= MIB_END) {
386 // don't implement all the MIB's. hopefully the kernel
387 // doesn't actually DEPEND upon their values
388 // MIB are just hardware stats keepers
389 uint32_t &reg = *(uint32_t *) data;
390 reg = 0;
391 return No_Fault;
392 } else if (daddr > 0x3FC)
393 panic("Something is messed up!\n");
394
395 switch (req->size) {
396 case sizeof(uint32_t):
397 {
398 uint32_t &reg = *(uint32_t *)data;
399
400 switch (daddr) {
401 case CR:
402 reg = regs.command;
403 //these are supposed to be cleared on a read
404 reg &= ~(CR_RXD | CR_TXD | CR_TXR | CR_RXR);
405 break;
406
407 case CFG:
408 reg = regs.config;
409 break;
410
411 case MEAR:
412 reg = regs.mear;
413 break;
414
415 case PTSCR:
416 reg = regs.ptscr;
417 break;
418
419 case ISR:
420 reg = regs.isr;
421 devIntrClear(ISR_ALL);
422 break;
423
424 case IMR:
425 reg = regs.imr;
426 break;
427
428 case IER:
429 reg = regs.ier;
430 break;
431
432 case IHR:
433 reg = regs.ihr;
434 break;
435
436 case TXDP:
437 reg = regs.txdp;
438 break;
439
440 case TXDP_HI:
441 reg = regs.txdp_hi;
442 break;
443
444 case TXCFG:
445 reg = regs.txcfg;
446 break;
447
448 case GPIOR:
449 reg = regs.gpior;
450 break;
451
452 case RXDP:
453 reg = regs.rxdp;
454 break;
455
456 case RXDP_HI:
457 reg = regs.rxdp_hi;
458 break;
459
460 case RXCFG:
461 reg = regs.rxcfg;
462 break;
463
464 case PQCR:
465 reg = regs.pqcr;
466 break;
467
468 case WCSR:
469 reg = regs.wcsr;
470 break;
471
472 case PCR:
473 reg = regs.pcr;
474 break;
475
476 // see the spec sheet for how RFCR and RFDR work
477 // basically, you write to RFCR to tell the machine
478 // what you want to do next, then you act upon RFDR,
479 // and the device will be prepared b/c of what you
480 // wrote to RFCR
481 case RFCR:
482 reg = regs.rfcr;
483 break;
484
485 case RFDR:
486 switch (regs.rfcr & RFCR_RFADDR) {
487 case 0x000:
488 reg = rom.perfectMatch[1];
489 reg = reg << 8;
490 reg += rom.perfectMatch[0];
491 break;
492 case 0x002:
493 reg = rom.perfectMatch[3] << 8;
494 reg += rom.perfectMatch[2];
495 break;
496 case 0x004:
497 reg = rom.perfectMatch[5] << 8;
498 reg += rom.perfectMatch[4];
499 break;
500 default:
501 panic("reading RFDR for something other than PMATCH!\n");
502 // didn't implement other RFDR functionality b/c
503 // driver didn't use it
504 }
505 break;
506
507 case SRR:
508 reg = regs.srr;
509 break;
510
511 case MIBC:
512 reg = regs.mibc;
513 reg &= ~(MIBC_MIBS | MIBC_ACLR);
514 break;
515
516 case VRCR:
517 reg = regs.vrcr;
518 break;
519
520 case VTCR:
521 reg = regs.vtcr;
522 break;
523
524 case VDR:
525 reg = regs.vdr;
526 break;
527
528 case CCSR:
529 reg = regs.ccsr;
530 break;
531
532 case TBICR:
533 reg = regs.tbicr;
534 break;
535
536 case TBISR:
537 reg = regs.tbisr;
538 break;
539
540 case TANAR:
541 reg = regs.tanar;
542 break;
543
544 case TANLPAR:
545 reg = regs.tanlpar;
546 break;
547
548 case TANER:
549 reg = regs.taner;
550 break;
551
552 case TESR:
553 reg = regs.tesr;
554 break;
555
556 default:
557 panic("reading unimplemented register: addr=%#x", daddr);
558 }
559
560 DPRINTF(EthernetPIO, "read from %#x: data=%d data=%#x\n",
561 daddr, reg, reg);
562 }
563 break;
564
565 default:
566 panic("accessing register with invalid size: addr=%#x, size=%d",
567 daddr, req->size);
568 }
569
570 return No_Fault;
571 }
572
573 Fault
574 NSGigE::write(MemReqPtr &req, const uint8_t *data)
575 {
576 assert(ioEnable);
577
578 Addr daddr = req->paddr & 0xfff;
579 DPRINTF(EthernetPIO, "write da=%#x pa=%#x va=%#x size=%d\n",
580 daddr, req->paddr, req->vaddr, req->size);
581
582 if (daddr > LAST && daddr <= RESERVED) {
583 panic("Accessing reserved register");
584 } else if (daddr > RESERVED && daddr <= 0x3FC) {
585 WriteConfig(daddr & 0xff, req->size, *(uint32_t *)data);
586 return No_Fault;
587 } else if (daddr > 0x3FC)
588 panic("Something is messed up!\n");
589
590 if (req->size == sizeof(uint32_t)) {
591 uint32_t reg = *(uint32_t *)data;
592 DPRINTF(EthernetPIO, "write data=%d data=%#x\n", reg, reg);
593
594 switch (daddr) {
595 case CR:
596 regs.command = reg;
597 if (reg & CR_TXD) {
598 txEnable = false;
599 } else if (reg & CR_TXE) {
600 txEnable = true;
601
602 // the kernel is enabling the transmit machine
603 if (txState == txIdle)
604 txKick();
605 }
606
607 if (reg & CR_RXD) {
608 rxEnable = false;
609 } else if (reg & CR_RXE) {
610 rxEnable = true;
611
612 if (rxState == rxIdle)
613 rxKick();
614 }
615
616 if (reg & CR_TXR)
617 txReset();
618
619 if (reg & CR_RXR)
620 rxReset();
621
622 if (reg & CR_SWI)
623 devIntrPost(ISR_SWI);
624
625 if (reg & CR_RST) {
626 txReset();
627 rxReset();
628
629 regsReset();
630 }
631 break;
632
633 case CFG:
634 if (reg & CFG_LNKSTS ||
635 reg & CFG_SPDSTS ||
636 reg & CFG_DUPSTS ||
637 reg & CFG_RESERVED ||
638 reg & CFG_T64ADDR ||
639 reg & CFG_PCI64_DET)
640 panic("writing to read-only or reserved CFG bits!\n");
641
642 regs.config |= reg & ~(CFG_LNKSTS | CFG_SPDSTS | CFG_DUPSTS |
643 CFG_RESERVED | CFG_T64ADDR | CFG_PCI64_DET);
644
645 // all these #if 0's are because i don't THINK the kernel needs to
646 // have these implemented. if there is a problem relating to one of
647 // these, you may need to add functionality in.
648 #if 0
649 if (reg & CFG_TBI_EN) ;
650 if (reg & CFG_MODE_1000) ;
651 #endif
652
653 if (reg & CFG_AUTO_1000)
654 panic("CFG_AUTO_1000 not implemented!\n");
655
656 #if 0
657 if (reg & CFG_PINT_DUPSTS ||
658 reg & CFG_PINT_LNKSTS ||
659 reg & CFG_PINT_SPDSTS)
660 ;
661
662 if (reg & CFG_TMRTEST) ;
663 if (reg & CFG_MRM_DIS) ;
664 if (reg & CFG_MWI_DIS) ;
665
666 if (reg & CFG_T64ADDR)
667 panic("CFG_T64ADDR is read only register!\n");
668
669 if (reg & CFG_PCI64_DET)
670 panic("CFG_PCI64_DET is read only register!\n");
671
672 if (reg & CFG_DATA64_EN) ;
673 if (reg & CFG_M64ADDR) ;
674 if (reg & CFG_PHY_RST) ;
675 if (reg & CFG_PHY_DIS) ;
676 #endif
677
678 if (reg & CFG_EXTSTS_EN)
679 extstsEnable = true;
680 else
681 extstsEnable = false;
682
683 #if 0
684 if (reg & CFG_REQALG) ;
685 if (reg & CFG_SB) ;
686 if (reg & CFG_POW) ;
687 if (reg & CFG_EXD) ;
688 if (reg & CFG_PESEL) ;
689 if (reg & CFG_BROM_DIS) ;
690 if (reg & CFG_EXT_125) ;
691 if (reg & CFG_BEM) ;
692 #endif
693 break;
694
695 case MEAR:
696 regs.mear = reg;
697 // since phy is completely faked, MEAR_MD* don't matter
698 // and since the driver never uses MEAR_EE*, they don't
699 // matter
700 #if 0
701 if (reg & MEAR_EEDI) ;
702 if (reg & MEAR_EEDO) ; // this one is read only
703 if (reg & MEAR_EECLK) ;
704 if (reg & MEAR_EESEL) ;
705 if (reg & MEAR_MDIO) ;
706 if (reg & MEAR_MDDIR) ;
707 if (reg & MEAR_MDC) ;
708 #endif
709 break;
710
711 case PTSCR:
712 regs.ptscr = reg & ~(PTSCR_RBIST_RDONLY);
713 // these control BISTs for various parts of chip - we
714 // don't care or do just fake that the BIST is done
715 if (reg & PTSCR_RBIST_EN)
716 regs.ptscr |= PTSCR_RBIST_DONE;
717 if (reg & PTSCR_EEBIST_EN)
718 regs.ptscr &= ~PTSCR_EEBIST_EN;
719 if (reg & PTSCR_EELOAD_EN)
720 regs.ptscr &= ~PTSCR_EELOAD_EN;
721 break;
722
723 case ISR: /* writing to the ISR has no effect */
724 panic("ISR is a read only register!\n");
725
726 case IMR:
727 regs.imr = reg;
728 devIntrChangeMask();
729 break;
730
731 case IER:
732 regs.ier = reg;
733 break;
734
735 case IHR:
736 regs.ihr = reg;
737 /* not going to implement real interrupt holdoff */
738 break;
739
740 case TXDP:
741 regs.txdp = (reg & 0xFFFFFFFC);
742 assert(txState == txIdle);
743 CTDD = false;
744 break;
745
746 case TXDP_HI:
747 regs.txdp_hi = reg;
748 break;
749
750 case TXCFG:
751 regs.txcfg = reg;
752 #if 0
753 if (reg & TXCFG_CSI) ;
754 if (reg & TXCFG_HBI) ;
755 if (reg & TXCFG_MLB) ;
756 if (reg & TXCFG_ATP) ;
757 if (reg & TXCFG_ECRETRY) {
758 /*
759 * this could easily be implemented, but considering
760 * the network is just a fake pipe, wouldn't make
761 * sense to do this
762 */
763 }
764
765 if (reg & TXCFG_BRST_DIS) ;
766 #endif
767
768 #if 0
769 /* we handle our own DMA, ignore the kernel's exhortations */
770 if (reg & TXCFG_MXDMA) ;
771 #endif
772
773 // also, we currently don't care about fill/drain
774 // thresholds though this may change in the future with
775 // more realistic networks or a driver which changes it
776 // according to feedback
777
778 break;
779
780 case GPIOR:
781 regs.gpior = reg;
782 /* these just control general purpose i/o pins, don't matter */
783 break;
784
785 case RXDP:
786 regs.rxdp = reg;
787 CRDD = false;
788 break;
789
790 case RXDP_HI:
791 regs.rxdp_hi = reg;
792 break;
793
794 case RXCFG:
795 regs.rxcfg = reg;
796 #if 0
797 if (reg & RXCFG_AEP) ;
798 if (reg & RXCFG_ARP) ;
799 if (reg & RXCFG_STRIPCRC) ;
800 if (reg & RXCFG_RX_RD) ;
801 if (reg & RXCFG_ALP) ;
802 if (reg & RXCFG_AIRL) ;
803
804 /* we handle our own DMA, ignore what kernel says about it */
805 if (reg & RXCFG_MXDMA) ;
806
807 //also, we currently don't care about fill/drain thresholds
808 //though this may change in the future with more realistic
809 //networks or a driver which changes it according to feedback
810 if (reg & (RXCFG_DRTH | RXCFG_DRTH0)) ;
811 #endif
812 break;
813
814 case PQCR:
815 /* there is no priority queueing used in the linux 2.6 driver */
816 regs.pqcr = reg;
817 break;
818
819 case WCSR:
820 /* not going to implement wake on LAN */
821 regs.wcsr = reg;
822 break;
823
824 case PCR:
825 /* not going to implement pause control */
826 regs.pcr = reg;
827 break;
828
829 case RFCR:
830 regs.rfcr = reg;
831
832 rxFilterEnable = (reg & RFCR_RFEN) ? true : false;
833 acceptBroadcast = (reg & RFCR_AAB) ? true : false;
834 acceptMulticast = (reg & RFCR_AAM) ? true : false;
835 acceptUnicast = (reg & RFCR_AAU) ? true : false;
836 acceptPerfect = (reg & RFCR_APM) ? true : false;
837 acceptArp = (reg & RFCR_AARP) ? true : false;
838
839 #if 0
840 if (reg & RFCR_APAT)
841 panic("RFCR_APAT not implemented!\n");
842 #endif
843
844 if (reg & RFCR_MHEN || reg & RFCR_UHEN)
845 panic("hash filtering not implemented!\n");
846
847 if (reg & RFCR_ULM)
848 panic("RFCR_ULM not implemented!\n");
849
850 break;
851
852 case RFDR:
853 panic("the driver never writes to RFDR, something is wrong!\n");
854
855 case BRAR:
856 panic("the driver never uses BRAR, something is wrong!\n");
857
858 case BRDR:
859 panic("the driver never uses BRDR, something is wrong!\n");
860
861 case SRR:
862 panic("SRR is read only register!\n");
863
864 case MIBC:
865 panic("the driver never uses MIBC, something is wrong!\n");
866
867 case VRCR:
868 regs.vrcr = reg;
869 break;
870
871 case VTCR:
872 regs.vtcr = reg;
873 break;
874
875 case VDR:
876 panic("the driver never uses VDR, something is wrong!\n");
877 break;
878
879 case CCSR:
880 /* not going to implement clockrun stuff */
881 regs.ccsr = reg;
882 break;
883
884 case TBICR:
885 regs.tbicr = reg;
886 if (reg & TBICR_MR_LOOPBACK)
887 panic("TBICR_MR_LOOPBACK never used, something wrong!\n");
888
889 if (reg & TBICR_MR_AN_ENABLE) {
890 regs.tanlpar = regs.tanar;
891 regs.tbisr |= (TBISR_MR_AN_COMPLETE | TBISR_MR_LINK_STATUS);
892 }
893
894 #if 0
895 if (reg & TBICR_MR_RESTART_AN) ;
896 #endif
897
898 break;
899
900 case TBISR:
901 panic("TBISR is read only register!\n");
902
903 case TANAR:
904 regs.tanar = reg;
905 if (reg & TANAR_PS2)
906 panic("this isn't used in driver, something wrong!\n");
907
908 if (reg & TANAR_PS1)
909 panic("this isn't used in driver, something wrong!\n");
910 break;
911
912 case TANLPAR:
913 panic("this should only be written to by the fake phy!\n");
914
915 case TANER:
916 panic("TANER is read only register!\n");
917
918 case TESR:
919 regs.tesr = reg;
920 break;
921
922 default:
923 panic("invalid register access daddr=%#x", daddr);
924 }
925 } else {
926 panic("Invalid Request Size");
927 }
928
929 return No_Fault;
930 }
931
932 void
933 NSGigE::devIntrPost(uint32_t interrupts)
934 {
935 if (interrupts & ISR_RESERVE)
936 panic("Cannot set a reserved interrupt");
937
938 if (interrupts & ISR_NOIMPL)
939 warn("interrupt not implemented %#x\n", interrupts);
940
941 interrupts &= ~ISR_NOIMPL;
942 regs.isr |= interrupts;
943
944 DPRINTF(EthernetIntr,
945 "interrupt written to ISR: intr=%#x isr=%#x imr=%#x\n",
946 interrupts, regs.isr, regs.imr);
947
948 if ((regs.isr & regs.imr)) {
949 Tick when = curTick;
950 if (!(regs.isr & regs.imr & ISR_NODELAY))
951 when += intrDelay;
952 cpuIntrPost(when);
953 }
954 }
955
956 void
957 NSGigE::devIntrClear(uint32_t interrupts)
958 {
959 if (interrupts & ISR_RESERVE)
960 panic("Cannot clear a reserved interrupt");
961
962 interrupts &= ~ISR_NOIMPL;
963 regs.isr &= ~interrupts;
964
965 DPRINTF(EthernetIntr,
966 "interrupt cleared from ISR: intr=%x isr=%x imr=%x\n",
967 interrupts, regs.isr, regs.imr);
968
969 if (!(regs.isr & regs.imr))
970 cpuIntrClear();
971 }
972
973 void
974 NSGigE::devIntrChangeMask()
975 {
976 DPRINTF(EthernetIntr, "interrupt mask changed: isr=%x imr=%x masked=%x\n",
977 regs.isr, regs.imr, regs.isr & regs.imr);
978
979 if (regs.isr & regs.imr)
980 cpuIntrPost(curTick);
981 else
982 cpuIntrClear();
983 }
984
985 void
986 NSGigE::cpuIntrPost(Tick when)
987 {
988 // If the interrupt you want to post is later than an interrupt
989 // already scheduled, just let it post in the coming one and don't
990 // schedule another.
991 // HOWEVER, must be sure that the scheduled intrTick is in the
992 // future (this was formerly the source of a bug)
993 /**
994 * @todo this warning should be removed and the intrTick code should
995 * be fixed.
996 */
997 assert(when >= curTick);
998 assert(intrTick >= curTick || intrTick == 0);
999 if (when > intrTick && intrTick != 0) {
1000 DPRINTF(EthernetIntr, "don't need to schedule event...intrTick=%d\n",
1001 intrTick);
1002 return;
1003 }
1004
1005 intrTick = when;
1006 if (intrTick < curTick) {
1007 debug_break();
1008 intrTick = curTick;
1009 }
1010
1011 DPRINTF(EthernetIntr, "going to schedule an interrupt for intrTick=%d\n",
1012 intrTick);
1013
1014 if (intrEvent)
1015 intrEvent->squash();
1016 intrEvent = new IntrEvent(this, true);
1017 intrEvent->schedule(intrTick);
1018 }
1019
1020 void
1021 NSGigE::cpuInterrupt()
1022 {
1023 assert(intrTick == curTick);
1024
1025 // Whether or not there's a pending interrupt, we don't care about
1026 // it anymore
1027 intrEvent = 0;
1028 intrTick = 0;
1029
1030 // Don't send an interrupt if there's already one
1031 if (cpuPendingIntr) {
1032 DPRINTF(EthernetIntr,
1033 "would send an interrupt now, but there's already pending\n");
1034 } else {
1035 // Send interrupt
1036 cpuPendingIntr = true;
1037
1038 DPRINTF(EthernetIntr, "posting cchip interrupt\n");
1039 tsunami->cchip->postDRIR(configData->config.hdr.pci0.interruptLine);
1040 }
1041 }
1042
1043 void
1044 NSGigE::cpuIntrClear()
1045 {
1046 if (!cpuPendingIntr)
1047 return;
1048
1049 if (intrEvent) {
1050 intrEvent->squash();
1051 intrEvent = 0;
1052 }
1053
1054 intrTick = 0;
1055
1056 cpuPendingIntr = false;
1057
1058 DPRINTF(EthernetIntr, "clearing cchip interrupt\n");
1059 tsunami->cchip->clearDRIR(configData->config.hdr.pci0.interruptLine);
1060 }
1061
1062 bool
1063 NSGigE::cpuIntrPending() const
1064 { return cpuPendingIntr; }
1065
1066 void
1067 NSGigE::txReset()
1068 {
1069
1070 DPRINTF(Ethernet, "transmit reset\n");
1071
1072 CTDD = false;
1073 txFifoAvail = maxTxFifoSize;
1074 txEnable = false;;
1075 txFragPtr = 0;
1076 assert(txDescCnt == 0);
1077 txFifo.clear();
1078 txState = txIdle;
1079 assert(txDmaState == dmaIdle);
1080 }
1081
1082 void
1083 NSGigE::rxReset()
1084 {
1085 DPRINTF(Ethernet, "receive reset\n");
1086
1087 CRDD = false;
1088 assert(rxPktBytes == 0);
1089 rxFifoCnt = 0;
1090 rxEnable = false;
1091 rxFragPtr = 0;
1092 assert(rxDescCnt == 0);
1093 assert(rxDmaState == dmaIdle);
1094 rxFifo.clear();
1095 rxState = rxIdle;
1096 }
1097
1098 void
1099 NSGigE::regsReset()
1100 {
1101 memset(&regs, 0, sizeof(regs));
1102 regs.config = CFG_LNKSTS;
1103 regs.mear = MEAR_MDDIR | MEAR_EEDO;
1104 regs.txcfg = 0x120; // set drain threshold to 1024 bytes and
1105 // fill threshold to 32 bytes
1106 regs.rxcfg = 0x4; // set drain threshold to 16 bytes
1107 regs.srr = 0x0103; // set the silicon revision to rev B or 0x103
1108 regs.mibc = MIBC_FRZ;
1109 regs.vdr = 0x81; // set the vlan tag type to 802.1q
1110 regs.tesr = 0xc000; // TBI capable of both full and half duplex
1111
1112 extstsEnable = false;
1113 acceptBroadcast = false;
1114 acceptMulticast = false;
1115 acceptUnicast = false;
1116 acceptPerfect = false;
1117 acceptArp = false;
1118 }
1119
1120 void
1121 NSGigE::rxDmaReadCopy()
1122 {
1123 assert(rxDmaState == dmaReading);
1124
1125 physmem->dma_read((uint8_t *)rxDmaData, rxDmaAddr, rxDmaLen);
1126 rxDmaState = dmaIdle;
1127
1128 DPRINTF(EthernetDMA, "rx dma read paddr=%#x len=%d\n",
1129 rxDmaAddr, rxDmaLen);
1130 DDUMP(EthernetDMA, rxDmaData, rxDmaLen);
1131 }
1132
1133 bool
1134 NSGigE::doRxDmaRead()
1135 {
1136 assert(rxDmaState == dmaIdle || rxDmaState == dmaReadWaiting);
1137 rxDmaState = dmaReading;
1138
1139 if (dmaInterface && !rxDmaFree) {
1140 if (dmaInterface->busy())
1141 rxDmaState = dmaReadWaiting;
1142 else
1143 dmaInterface->doDMA(Read, rxDmaAddr, rxDmaLen, curTick,
1144 &rxDmaReadEvent, true);
1145 return true;
1146 }
1147
1148 if (dmaReadDelay == 0 && dmaReadFactor == 0) {
1149 rxDmaReadCopy();
1150 return false;
1151 }
1152
1153 Tick factor = ((rxDmaLen + ULL(63)) >> ULL(6)) * dmaReadFactor;
1154 Tick start = curTick + dmaReadDelay + factor;
1155 rxDmaReadEvent.schedule(start);
1156 return true;
1157 }
1158
1159 void
1160 NSGigE::rxDmaReadDone()
1161 {
1162 assert(rxDmaState == dmaReading);
1163 rxDmaReadCopy();
1164
1165 // If the transmit state machine has a pending DMA, let it go first
1166 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting)
1167 txKick();
1168
1169 rxKick();
1170 }
1171
1172 void
1173 NSGigE::rxDmaWriteCopy()
1174 {
1175 assert(rxDmaState == dmaWriting);
1176
1177 physmem->dma_write(rxDmaAddr, (uint8_t *)rxDmaData, rxDmaLen);
1178 rxDmaState = dmaIdle;
1179
1180 DPRINTF(EthernetDMA, "rx dma write paddr=%#x len=%d\n",
1181 rxDmaAddr, rxDmaLen);
1182 DDUMP(EthernetDMA, rxDmaData, rxDmaLen);
1183 }
1184
1185 bool
1186 NSGigE::doRxDmaWrite()
1187 {
1188 assert(rxDmaState == dmaIdle || rxDmaState == dmaWriteWaiting);
1189 rxDmaState = dmaWriting;
1190
1191 if (dmaInterface && !rxDmaFree) {
1192 if (dmaInterface->busy())
1193 rxDmaState = dmaWriteWaiting;
1194 else
1195 dmaInterface->doDMA(WriteInvalidate, rxDmaAddr, rxDmaLen, curTick,
1196 &rxDmaWriteEvent, true);
1197 return true;
1198 }
1199
1200 if (dmaWriteDelay == 0 && dmaWriteFactor == 0) {
1201 rxDmaWriteCopy();
1202 return false;
1203 }
1204
1205 Tick factor = ((rxDmaLen + ULL(63)) >> ULL(6)) * dmaWriteFactor;
1206 Tick start = curTick + dmaWriteDelay + factor;
1207 rxDmaWriteEvent.schedule(start);
1208 return true;
1209 }
1210
1211 void
1212 NSGigE::rxDmaWriteDone()
1213 {
1214 assert(rxDmaState == dmaWriting);
1215 rxDmaWriteCopy();
1216
1217 // If the transmit state machine has a pending DMA, let it go first
1218 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting)
1219 txKick();
1220
1221 rxKick();
1222 }
1223
1224 void
1225 NSGigE::rxKick()
1226 {
1227 DPRINTF(EthernetSM, "receive kick rxState=%s (rxBuf.size=%d)\n",
1228 NsRxStateStrings[rxState], rxFifo.size());
1229
1230 if (rxKickTick > curTick) {
1231 DPRINTF(EthernetSM, "receive kick exiting, can't run till %d\n",
1232 rxKickTick);
1233 return;
1234 }
1235
1236 next:
1237 switch(rxDmaState) {
1238 case dmaReadWaiting:
1239 if (doRxDmaRead())
1240 goto exit;
1241 break;
1242 case dmaWriteWaiting:
1243 if (doRxDmaWrite())
1244 goto exit;
1245 break;
1246 default:
1247 break;
1248 }
1249
1250 // see state machine from spec for details
1251 // the way this works is, if you finish work on one state and can
1252 // go directly to another, you do that through jumping to the
1253 // label "next". however, if you have intermediate work, like DMA
1254 // so that you can't go to the next state yet, you go to exit and
1255 // exit the loop. however, when the DMA is done it will trigger
1256 // an event and come back to this loop.
1257 switch (rxState) {
1258 case rxIdle:
1259 if (!rxEnable) {
1260 DPRINTF(EthernetSM, "Receive Disabled! Nothing to do.\n");
1261 goto exit;
1262 }
1263
1264 if (CRDD) {
1265 rxState = rxDescRefr;
1266
1267 rxDmaAddr = regs.rxdp & 0x3fffffff;
1268 rxDmaData = &rxDescCache + offsetof(ns_desc, link);
1269 rxDmaLen = sizeof(rxDescCache.link);
1270 rxDmaFree = dmaDescFree;
1271
1272 descDmaReads++;
1273 descDmaRdBytes += rxDmaLen;
1274
1275 if (doRxDmaRead())
1276 goto exit;
1277 } else {
1278 rxState = rxDescRead;
1279
1280 rxDmaAddr = regs.rxdp & 0x3fffffff;
1281 rxDmaData = &rxDescCache;
1282 rxDmaLen = sizeof(ns_desc);
1283 rxDmaFree = dmaDescFree;
1284
1285 descDmaReads++;
1286 descDmaRdBytes += rxDmaLen;
1287
1288 if (doRxDmaRead())
1289 goto exit;
1290 }
1291 break;
1292
1293 case rxDescRefr:
1294 if (rxDmaState != dmaIdle)
1295 goto exit;
1296
1297 rxState = rxAdvance;
1298 break;
1299
1300 case rxDescRead:
1301 if (rxDmaState != dmaIdle)
1302 goto exit;
1303
1304 DPRINTF(EthernetDesc,
1305 "rxDescCache: addr=%08x read descriptor\n",
1306 regs.rxdp & 0x3fffffff);
1307 DPRINTF(EthernetDesc,
1308 "rxDescCache: link=%08x bufptr=%08x cmdsts=%08x extsts=%08x\n",
1309 rxDescCache.link, rxDescCache.bufptr, rxDescCache.cmdsts,
1310 rxDescCache.extsts);
1311
1312 if (rxDescCache.cmdsts & CMDSTS_OWN) {
1313 devIntrPost(ISR_RXIDLE);
1314 rxState = rxIdle;
1315 goto exit;
1316 } else {
1317 rxState = rxFifoBlock;
1318 rxFragPtr = rxDescCache.bufptr;
1319 rxDescCnt = rxDescCache.cmdsts & CMDSTS_LEN_MASK;
1320 }
1321 break;
1322
1323 case rxFifoBlock:
1324 if (!rxPacket) {
1325 /**
1326 * @todo in reality, we should be able to start processing
1327 * the packet as it arrives, and not have to wait for the
1328 * full packet ot be in the receive fifo.
1329 */
1330 if (rxFifo.empty())
1331 goto exit;
1332
1333 DPRINTF(EthernetSM, "****processing receive of new packet****\n");
1334
1335 // If we don't have a packet, grab a new one from the fifo.
1336 rxPacket = rxFifo.front();
1337 rxPktBytes = rxPacket->length;
1338 rxPacketBufPtr = rxPacket->data;
1339
1340 #if TRACING_ON
1341 if (DTRACE(Ethernet)) {
1342 const IpHdr *ip = rxPacket->ip();
1343 if (ip) {
1344 DPRINTF(Ethernet, "ID is %d\n", ip->id());
1345 const TcpHdr *tcp = rxPacket->tcp();
1346 if (tcp) {
1347 DPRINTF(Ethernet, "Src Port=%d, Dest Port=%d\n",
1348 tcp->sport(), tcp->dport());
1349 }
1350 }
1351 }
1352 #endif
1353
1354 // sanity check - i think the driver behaves like this
1355 assert(rxDescCnt >= rxPktBytes);
1356
1357 // Must clear the value before popping to decrement the
1358 // reference count
1359 rxFifo.front() = NULL;
1360 rxFifo.pop_front();
1361 rxFifoCnt -= rxPacket->length;
1362 }
1363
1364
1365 // dont' need the && rxDescCnt > 0 if driver sanity check
1366 // above holds
1367 if (rxPktBytes > 0) {
1368 rxState = rxFragWrite;
1369 // don't need min<>(rxPktBytes,rxDescCnt) if above sanity
1370 // check holds
1371 rxXferLen = rxPktBytes;
1372
1373 rxDmaAddr = rxFragPtr & 0x3fffffff;
1374 rxDmaData = rxPacketBufPtr;
1375 rxDmaLen = rxXferLen;
1376 rxDmaFree = dmaDataFree;
1377
1378 if (doRxDmaWrite())
1379 goto exit;
1380
1381 } else {
1382 rxState = rxDescWrite;
1383
1384 //if (rxPktBytes == 0) { /* packet is done */
1385 assert(rxPktBytes == 0);
1386 DPRINTF(EthernetSM, "done with receiving packet\n");
1387
1388 rxDescCache.cmdsts |= CMDSTS_OWN;
1389 rxDescCache.cmdsts &= ~CMDSTS_MORE;
1390 rxDescCache.cmdsts |= CMDSTS_OK;
1391 rxDescCache.cmdsts &= 0xffff0000;
1392 rxDescCache.cmdsts += rxPacket->length; //i.e. set CMDSTS_SIZE
1393
1394 #if 0
1395 /*
1396 * all the driver uses these are for its own stats keeping
1397 * which we don't care about, aren't necessary for
1398 * functionality and doing this would just slow us down.
1399 * if they end up using this in a later version for
1400 * functional purposes, just undef
1401 */
1402 if (rxFilterEnable) {
1403 rxDescCache.cmdsts &= ~CMDSTS_DEST_MASK;
1404 EthHdr *eth = rxFifoFront()->eth();
1405 if (eth->unicast())
1406 rxDescCache.cmdsts |= CMDSTS_DEST_SELF;
1407 if (eth->multicast())
1408 rxDescCache.cmdsts |= CMDSTS_DEST_MULTI;
1409 if (eth->broadcast())
1410 rxDescCache.cmdsts |= CMDSTS_DEST_MASK;
1411 }
1412 #endif
1413
1414 if (extstsEnable && rxPacket->ip()) {
1415 rxDescCache.extsts |= EXTSTS_IPPKT;
1416 rxIpChecksums++;
1417 IpHdr *ip = rxPacket->ip();
1418 if (ip->ip_cksum() != 0) {
1419 DPRINTF(EthernetCksum, "Rx IP Checksum Error\n");
1420 rxDescCache.extsts |= EXTSTS_IPERR;
1421 }
1422 if (rxPacket->tcp()) {
1423 rxDescCache.extsts |= EXTSTS_TCPPKT;
1424 rxTcpChecksums++;
1425 if (ip->tu_cksum() != 0) {
1426 DPRINTF(EthernetCksum, "Rx TCP Checksum Error\n");
1427 rxDescCache.extsts |= EXTSTS_TCPERR;
1428
1429 }
1430 } else if (rxPacket->udp()) {
1431 rxDescCache.extsts |= EXTSTS_UDPPKT;
1432 rxUdpChecksums++;
1433 if (ip->tu_cksum() != 0) {
1434 DPRINTF(EthernetCksum, "Rx UDP Checksum Error\n");
1435 rxDescCache.extsts |= EXTSTS_UDPERR;
1436 }
1437 }
1438 }
1439 rxPacket = 0;
1440
1441 /*
1442 * the driver seems to always receive into desc buffers
1443 * of size 1514, so you never have a pkt that is split
1444 * into multiple descriptors on the receive side, so
1445 * i don't implement that case, hence the assert above.
1446 */
1447
1448 DPRINTF(EthernetDesc,
1449 "rxDescCache: addr=%08x writeback cmdsts extsts\n",
1450 regs.rxdp & 0x3fffffff);
1451 DPRINTF(EthernetDesc,
1452 "rxDescCache: link=%08x bufptr=%08x cmdsts=%08x extsts=%08x\n",
1453 rxDescCache.link, rxDescCache.bufptr, rxDescCache.cmdsts,
1454 rxDescCache.extsts);
1455
1456 rxDmaAddr = (regs.rxdp + offsetof(ns_desc, cmdsts)) & 0x3fffffff;
1457 rxDmaData = &(rxDescCache.cmdsts);
1458 rxDmaLen = sizeof(rxDescCache.cmdsts) + sizeof(rxDescCache.extsts);
1459 rxDmaFree = dmaDescFree;
1460
1461 descDmaWrites++;
1462 descDmaWrBytes += rxDmaLen;
1463
1464 if (doRxDmaWrite())
1465 goto exit;
1466 }
1467 break;
1468
1469 case rxFragWrite:
1470 if (rxDmaState != dmaIdle)
1471 goto exit;
1472
1473 rxPacketBufPtr += rxXferLen;
1474 rxFragPtr += rxXferLen;
1475 rxPktBytes -= rxXferLen;
1476
1477 rxState = rxFifoBlock;
1478 break;
1479
1480 case rxDescWrite:
1481 if (rxDmaState != dmaIdle)
1482 goto exit;
1483
1484 assert(rxDescCache.cmdsts & CMDSTS_OWN);
1485
1486 assert(rxPacket == 0);
1487 devIntrPost(ISR_RXOK);
1488
1489 if (rxDescCache.cmdsts & CMDSTS_INTR)
1490 devIntrPost(ISR_RXDESC);
1491
1492 if (!rxEnable) {
1493 DPRINTF(EthernetSM, "Halting the RX state machine\n");
1494 rxState = rxIdle;
1495 goto exit;
1496 } else
1497 rxState = rxAdvance;
1498 break;
1499
1500 case rxAdvance:
1501 if (rxDescCache.link == 0) {
1502 devIntrPost(ISR_RXIDLE);
1503 rxState = rxIdle;
1504 CRDD = true;
1505 goto exit;
1506 } else {
1507 rxState = rxDescRead;
1508 regs.rxdp = rxDescCache.link;
1509 CRDD = false;
1510
1511 rxDmaAddr = regs.rxdp & 0x3fffffff;
1512 rxDmaData = &rxDescCache;
1513 rxDmaLen = sizeof(ns_desc);
1514 rxDmaFree = dmaDescFree;
1515
1516 if (doRxDmaRead())
1517 goto exit;
1518 }
1519 break;
1520
1521 default:
1522 panic("Invalid rxState!");
1523 }
1524
1525 DPRINTF(EthernetSM, "entering next rxState=%s\n",
1526 NsRxStateStrings[rxState]);
1527
1528 goto next;
1529
1530 exit:
1531 /**
1532 * @todo do we want to schedule a future kick?
1533 */
1534 DPRINTF(EthernetSM, "rx state machine exited rxState=%s\n",
1535 NsRxStateStrings[rxState]);
1536 }
1537
1538 void
1539 NSGigE::transmit()
1540 {
1541 if (txFifo.empty()) {
1542 DPRINTF(Ethernet, "nothing to transmit\n");
1543 return;
1544 }
1545
1546 DPRINTF(Ethernet, "Attempt Pkt Transmit: txFifo length=%d\n",
1547 maxTxFifoSize - txFifoAvail);
1548 if (interface->sendPacket(txFifo.front())) {
1549 #if TRACING_ON
1550 if (DTRACE(Ethernet)) {
1551 const IpHdr *ip = txFifo.front()->ip();
1552 if (ip) {
1553 DPRINTF(Ethernet, "ID is %d\n", ip->id());
1554 const TcpHdr *tcp = txFifo.front()->tcp();
1555 if (tcp) {
1556 DPRINTF(Ethernet, "Src Port=%d, Dest Port=%d\n",
1557 tcp->sport(), tcp->dport());
1558 }
1559 }
1560 }
1561 #endif
1562
1563 DDUMP(Ethernet, txFifo.front()->data, txFifo.front()->length);
1564 txBytes += txFifo.front()->length;
1565 txPackets++;
1566
1567 txFifoAvail += txFifo.front()->length;
1568
1569 DPRINTF(Ethernet, "Successful Xmit! now txFifoAvail is %d\n",
1570 txFifoAvail);
1571 txFifo.front() = NULL;
1572 txFifo.pop_front();
1573
1574 /*
1575 * normally do a writeback of the descriptor here, and ONLY
1576 * after that is done, send this interrupt. but since our
1577 * stuff never actually fails, just do this interrupt here,
1578 * otherwise the code has to stray from this nice format.
1579 * besides, it's functionally the same.
1580 */
1581 devIntrPost(ISR_TXOK);
1582 } else {
1583 DPRINTF(Ethernet,
1584 "May need to rethink always sending the descriptors back?\n");
1585 }
1586
1587 if (!txFifo.empty() && !txEvent.scheduled()) {
1588 DPRINTF(Ethernet, "reschedule transmit\n");
1589 txEvent.schedule(curTick + 1000);
1590 }
1591 }
1592
1593 void
1594 NSGigE::txDmaReadCopy()
1595 {
1596 assert(txDmaState == dmaReading);
1597
1598 physmem->dma_read((uint8_t *)txDmaData, txDmaAddr, txDmaLen);
1599 txDmaState = dmaIdle;
1600
1601 DPRINTF(EthernetDMA, "tx dma read paddr=%#x len=%d\n",
1602 txDmaAddr, txDmaLen);
1603 DDUMP(EthernetDMA, txDmaData, txDmaLen);
1604 }
1605
1606 bool
1607 NSGigE::doTxDmaRead()
1608 {
1609 assert(txDmaState == dmaIdle || txDmaState == dmaReadWaiting);
1610 txDmaState = dmaReading;
1611
1612 if (dmaInterface && !txDmaFree) {
1613 if (dmaInterface->busy())
1614 txDmaState = dmaReadWaiting;
1615 else
1616 dmaInterface->doDMA(Read, txDmaAddr, txDmaLen, curTick,
1617 &txDmaReadEvent, true);
1618 return true;
1619 }
1620
1621 if (dmaReadDelay == 0 && dmaReadFactor == 0.0) {
1622 txDmaReadCopy();
1623 return false;
1624 }
1625
1626 Tick factor = ((txDmaLen + ULL(63)) >> ULL(6)) * dmaReadFactor;
1627 Tick start = curTick + dmaReadDelay + factor;
1628 txDmaReadEvent.schedule(start);
1629 return true;
1630 }
1631
1632 void
1633 NSGigE::txDmaReadDone()
1634 {
1635 assert(txDmaState == dmaReading);
1636 txDmaReadCopy();
1637
1638 // If the receive state machine has a pending DMA, let it go first
1639 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting)
1640 rxKick();
1641
1642 txKick();
1643 }
1644
1645 void
1646 NSGigE::txDmaWriteCopy()
1647 {
1648 assert(txDmaState == dmaWriting);
1649
1650 physmem->dma_write(txDmaAddr, (uint8_t *)txDmaData, txDmaLen);
1651 txDmaState = dmaIdle;
1652
1653 DPRINTF(EthernetDMA, "tx dma write paddr=%#x len=%d\n",
1654 txDmaAddr, txDmaLen);
1655 DDUMP(EthernetDMA, txDmaData, txDmaLen);
1656 }
1657
1658 bool
1659 NSGigE::doTxDmaWrite()
1660 {
1661 assert(txDmaState == dmaIdle || txDmaState == dmaWriteWaiting);
1662 txDmaState = dmaWriting;
1663
1664 if (dmaInterface && !txDmaFree) {
1665 if (dmaInterface->busy())
1666 txDmaState = dmaWriteWaiting;
1667 else
1668 dmaInterface->doDMA(WriteInvalidate, txDmaAddr, txDmaLen, curTick,
1669 &txDmaWriteEvent, true);
1670 return true;
1671 }
1672
1673 if (dmaWriteDelay == 0 && dmaWriteFactor == 0.0) {
1674 txDmaWriteCopy();
1675 return false;
1676 }
1677
1678 Tick factor = ((txDmaLen + ULL(63)) >> ULL(6)) * dmaWriteFactor;
1679 Tick start = curTick + dmaWriteDelay + factor;
1680 txDmaWriteEvent.schedule(start);
1681 return true;
1682 }
1683
1684 void
1685 NSGigE::txDmaWriteDone()
1686 {
1687 assert(txDmaState == dmaWriting);
1688 txDmaWriteCopy();
1689
1690 // If the receive state machine has a pending DMA, let it go first
1691 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting)
1692 rxKick();
1693
1694 txKick();
1695 }
1696
1697 void
1698 NSGigE::txKick()
1699 {
1700 DPRINTF(EthernetSM, "transmit kick txState=%s\n",
1701 NsTxStateStrings[txState]);
1702
1703 if (txKickTick > curTick) {
1704 DPRINTF(EthernetSM, "transmit kick exiting, can't run till %d\n",
1705 txKickTick);
1706
1707 return;
1708 }
1709
1710 next:
1711 switch(txDmaState) {
1712 case dmaReadWaiting:
1713 if (doTxDmaRead())
1714 goto exit;
1715 break;
1716 case dmaWriteWaiting:
1717 if (doTxDmaWrite())
1718 goto exit;
1719 break;
1720 default:
1721 break;
1722 }
1723
1724 switch (txState) {
1725 case txIdle:
1726 if (!txEnable) {
1727 DPRINTF(EthernetSM, "Transmit disabled. Nothing to do.\n");
1728 goto exit;
1729 }
1730
1731 if (CTDD) {
1732 txState = txDescRefr;
1733
1734 txDmaAddr = regs.txdp & 0x3fffffff;
1735 txDmaData = &txDescCache + offsetof(ns_desc, link);
1736 txDmaLen = sizeof(txDescCache.link);
1737 txDmaFree = dmaDescFree;
1738
1739 descDmaReads++;
1740 descDmaRdBytes += txDmaLen;
1741
1742 if (doTxDmaRead())
1743 goto exit;
1744
1745 } else {
1746 txState = txDescRead;
1747
1748 txDmaAddr = regs.txdp & 0x3fffffff;
1749 txDmaData = &txDescCache;
1750 txDmaLen = sizeof(ns_desc);
1751 txDmaFree = dmaDescFree;
1752
1753 descDmaReads++;
1754 descDmaRdBytes += txDmaLen;
1755
1756 if (doTxDmaRead())
1757 goto exit;
1758 }
1759 break;
1760
1761 case txDescRefr:
1762 if (txDmaState != dmaIdle)
1763 goto exit;
1764
1765 txState = txAdvance;
1766 break;
1767
1768 case txDescRead:
1769 if (txDmaState != dmaIdle)
1770 goto exit;
1771
1772 DPRINTF(EthernetDesc,
1773 "txDescCache: link=%08x bufptr=%08x cmdsts=%08x extsts=%08x\n",
1774 txDescCache.link, txDescCache.bufptr, txDescCache.cmdsts,
1775 txDescCache.extsts);
1776
1777 if (txDescCache.cmdsts & CMDSTS_OWN) {
1778 txState = txFifoBlock;
1779 txFragPtr = txDescCache.bufptr;
1780 txDescCnt = txDescCache.cmdsts & CMDSTS_LEN_MASK;
1781 } else {
1782 devIntrPost(ISR_TXIDLE);
1783 txState = txIdle;
1784 goto exit;
1785 }
1786 break;
1787
1788 case txFifoBlock:
1789 if (!txPacket) {
1790 DPRINTF(EthernetSM, "****starting the tx of a new packet****\n");
1791 txPacket = new PacketData;
1792 txPacket->data = new uint8_t[16384];
1793 txPacketBufPtr = txPacket->data;
1794 }
1795
1796 if (txDescCnt == 0) {
1797 DPRINTF(EthernetSM, "the txDescCnt == 0, done with descriptor\n");
1798 if (txDescCache.cmdsts & CMDSTS_MORE) {
1799 DPRINTF(EthernetSM, "there are more descriptors to come\n");
1800 txState = txDescWrite;
1801
1802 txDescCache.cmdsts &= ~CMDSTS_OWN;
1803
1804 txDmaAddr = regs.txdp + offsetof(ns_desc, cmdsts);
1805 txDmaAddr &= 0x3fffffff;
1806 txDmaData = &(txDescCache.cmdsts);
1807 txDmaLen = sizeof(txDescCache.cmdsts);
1808 txDmaFree = dmaDescFree;
1809
1810 if (doTxDmaWrite())
1811 goto exit;
1812
1813 } else { /* this packet is totally done */
1814 DPRINTF(EthernetSM, "This packet is done, let's wrap it up\n");
1815 /* deal with the the packet that just finished */
1816 if ((regs.vtcr & VTCR_PPCHK) && extstsEnable) {
1817 IpHdr *ip = txPacket->ip();
1818 if (txDescCache.extsts & EXTSTS_UDPPKT) {
1819 UdpHdr *udp = txPacket->udp();
1820 udp->sum(0);
1821 udp->sum(ip->tu_cksum());
1822 txUdpChecksums++;
1823 } else if (txDescCache.extsts & EXTSTS_TCPPKT) {
1824 TcpHdr *tcp = txPacket->tcp();
1825 tcp->sum(0);
1826 tcp->sum(ip->tu_cksum());
1827 txTcpChecksums++;
1828 }
1829 if (txDescCache.extsts & EXTSTS_IPPKT) {
1830 ip->sum(0);
1831 ip->sum(ip->ip_cksum());
1832 txIpChecksums++;
1833 }
1834 }
1835
1836 txPacket->length = txPacketBufPtr - txPacket->data;
1837 // this is just because the receive can't handle a
1838 // packet bigger want to make sure
1839 assert(txPacket->length <= 1514);
1840 txFifo.push_back(txPacket);
1841
1842 /*
1843 * this following section is not tqo spec, but
1844 * functionally shouldn't be any different. normally,
1845 * the chip will wait til the transmit has occurred
1846 * before writing back the descriptor because it has
1847 * to wait to see that it was successfully transmitted
1848 * to decide whether to set CMDSTS_OK or not.
1849 * however, in the simulator since it is always
1850 * successfully transmitted, and writing it exactly to
1851 * spec would complicate the code, we just do it here
1852 */
1853
1854 txDescCache.cmdsts &= ~CMDSTS_OWN;
1855 txDescCache.cmdsts |= CMDSTS_OK;
1856
1857 DPRINTF(EthernetDesc,
1858 "txDesc writeback: cmdsts=%08x extsts=%08x\n",
1859 txDescCache.cmdsts, txDescCache.extsts);
1860
1861 txDmaAddr = regs.txdp + offsetof(ns_desc, cmdsts);
1862 txDmaAddr &= 0x3fffffff;
1863 txDmaData = &(txDescCache.cmdsts);
1864 txDmaLen = sizeof(txDescCache.cmdsts) +
1865 sizeof(txDescCache.extsts);
1866 txDmaFree = dmaDescFree;
1867
1868 descDmaWrites++;
1869 descDmaWrBytes += txDmaLen;
1870
1871 transmit();
1872 txPacket = 0;
1873
1874 if (!txEnable) {
1875 DPRINTF(EthernetSM, "halting TX state machine\n");
1876 txState = txIdle;
1877 goto exit;
1878 } else
1879 txState = txAdvance;
1880
1881 if (doTxDmaWrite())
1882 goto exit;
1883 }
1884 } else {
1885 DPRINTF(EthernetSM, "this descriptor isn't done yet\n");
1886 if (txFifoAvail) {
1887 txState = txFragRead;
1888
1889 /*
1890 * The number of bytes transferred is either whatever
1891 * is left in the descriptor (txDescCnt), or if there
1892 * is not enough room in the fifo, just whatever room
1893 * is left in the fifo
1894 */
1895 txXferLen = min<uint32_t>(txDescCnt, txFifoAvail);
1896
1897 txDmaAddr = txFragPtr & 0x3fffffff;
1898 txDmaData = txPacketBufPtr;
1899 txDmaLen = txXferLen;
1900 txDmaFree = dmaDataFree;
1901
1902 if (doTxDmaRead())
1903 goto exit;
1904 } else {
1905 txState = txFifoBlock;
1906 transmit();
1907
1908 goto exit;
1909 }
1910
1911 }
1912 break;
1913
1914 case txFragRead:
1915 if (txDmaState != dmaIdle)
1916 goto exit;
1917
1918 txPacketBufPtr += txXferLen;
1919 txFragPtr += txXferLen;
1920 txDescCnt -= txXferLen;
1921 txFifoAvail -= txXferLen;
1922
1923 txState = txFifoBlock;
1924 break;
1925
1926 case txDescWrite:
1927 if (txDmaState != dmaIdle)
1928 goto exit;
1929
1930 if (txDescCache.cmdsts & CMDSTS_INTR)
1931 devIntrPost(ISR_TXDESC);
1932
1933 txState = txAdvance;
1934 break;
1935
1936 case txAdvance:
1937 if (txDescCache.link == 0) {
1938 devIntrPost(ISR_TXIDLE);
1939 txState = txIdle;
1940 goto exit;
1941 } else {
1942 txState = txDescRead;
1943 regs.txdp = txDescCache.link;
1944 CTDD = false;
1945
1946 txDmaAddr = txDescCache.link & 0x3fffffff;
1947 txDmaData = &txDescCache;
1948 txDmaLen = sizeof(ns_desc);
1949 txDmaFree = dmaDescFree;
1950
1951 if (doTxDmaRead())
1952 goto exit;
1953 }
1954 break;
1955
1956 default:
1957 panic("invalid state");
1958 }
1959
1960 DPRINTF(EthernetSM, "entering next txState=%s\n",
1961 NsTxStateStrings[txState]);
1962
1963 goto next;
1964
1965 exit:
1966 /**
1967 * @todo do we want to schedule a future kick?
1968 */
1969 DPRINTF(EthernetSM, "tx state machine exited txState=%s\n",
1970 NsTxStateStrings[txState]);
1971 }
1972
1973 void
1974 NSGigE::transferDone()
1975 {
1976 if (txFifo.empty()) {
1977 DPRINTF(Ethernet, "transfer complete: txFifo empty...nothing to do\n");
1978 return;
1979 }
1980
1981 DPRINTF(Ethernet, "transfer complete: data in txFifo...schedule xmit\n");
1982
1983 if (txEvent.scheduled())
1984 txEvent.reschedule(curTick + 1);
1985 else
1986 txEvent.schedule(curTick + 1);
1987 }
1988
1989 bool
1990 NSGigE::rxFilter(PacketPtr packet)
1991 {
1992 bool drop = true;
1993 string type;
1994
1995 EthHdr *eth = packet->eth();
1996 if (eth->unicast()) {
1997 // If we're accepting all unicast addresses
1998 if (acceptUnicast)
1999 drop = false;
2000
2001 // If we make a perfect match
2002 if (acceptPerfect &&
2003 memcmp(rom.perfectMatch, packet->data, EADDR_LEN) == 0)
2004 drop = false;
2005
2006 if (acceptArp && eth->type() == ETH_TYPE_ARP)
2007 drop = false;
2008
2009 } else if (eth->broadcast()) {
2010 // if we're accepting broadcasts
2011 if (acceptBroadcast)
2012 drop = false;
2013
2014 } else if (eth->multicast()) {
2015 // if we're accepting all multicasts
2016 if (acceptMulticast)
2017 drop = false;
2018
2019 }
2020
2021 if (drop) {
2022 DPRINTF(Ethernet, "rxFilter drop\n");
2023 DDUMP(EthernetData, packet->data, packet->length);
2024 }
2025
2026 return drop;
2027 }
2028
2029 bool
2030 NSGigE::recvPacket(PacketPtr packet)
2031 {
2032 rxBytes += packet->length;
2033 rxPackets++;
2034
2035 DPRINTF(Ethernet, "Receiving packet from wire, rxFifoAvail=%d\n",
2036 maxRxFifoSize - rxFifoCnt);
2037
2038 if (!rxEnable) {
2039 DPRINTF(Ethernet, "receive disabled...packet dropped\n");
2040 debug_break();
2041 interface->recvDone();
2042 return true;
2043 }
2044
2045 if (rxFilterEnable && rxFilter(packet)) {
2046 DPRINTF(Ethernet, "packet filtered...dropped\n");
2047 interface->recvDone();
2048 return true;
2049 }
2050
2051 if ((rxFifoCnt + packet->length) >= maxRxFifoSize) {
2052 DPRINTF(Ethernet,
2053 "packet will not fit in receive buffer...packet dropped\n");
2054 devIntrPost(ISR_RXORN);
2055 return false;
2056 }
2057
2058 rxFifo.push_back(packet);
2059 rxFifoCnt += packet->length;
2060 interface->recvDone();
2061
2062 rxKick();
2063 return true;
2064 }
2065
2066 //=====================================================================
2067 //
2068 //
2069 void
2070 NSGigE::serialize(ostream &os)
2071 {
2072 // Serialize the PciDev base class
2073 PciDev::serialize(os);
2074
2075 /*
2076 * Finalize any DMA events now.
2077 */
2078 if (rxDmaReadEvent.scheduled())
2079 rxDmaReadCopy();
2080 if (rxDmaWriteEvent.scheduled())
2081 rxDmaWriteCopy();
2082 if (txDmaReadEvent.scheduled())
2083 txDmaReadCopy();
2084 if (txDmaWriteEvent.scheduled())
2085 txDmaWriteCopy();
2086
2087 /*
2088 * Serialize the device registers
2089 */
2090 SERIALIZE_SCALAR(regs.command);
2091 SERIALIZE_SCALAR(regs.config);
2092 SERIALIZE_SCALAR(regs.mear);
2093 SERIALIZE_SCALAR(regs.ptscr);
2094 SERIALIZE_SCALAR(regs.isr);
2095 SERIALIZE_SCALAR(regs.imr);
2096 SERIALIZE_SCALAR(regs.ier);
2097 SERIALIZE_SCALAR(regs.ihr);
2098 SERIALIZE_SCALAR(regs.txdp);
2099 SERIALIZE_SCALAR(regs.txdp_hi);
2100 SERIALIZE_SCALAR(regs.txcfg);
2101 SERIALIZE_SCALAR(regs.gpior);
2102 SERIALIZE_SCALAR(regs.rxdp);
2103 SERIALIZE_SCALAR(regs.rxdp_hi);
2104 SERIALIZE_SCALAR(regs.rxcfg);
2105 SERIALIZE_SCALAR(regs.pqcr);
2106 SERIALIZE_SCALAR(regs.wcsr);
2107 SERIALIZE_SCALAR(regs.pcr);
2108 SERIALIZE_SCALAR(regs.rfcr);
2109 SERIALIZE_SCALAR(regs.rfdr);
2110 SERIALIZE_SCALAR(regs.srr);
2111 SERIALIZE_SCALAR(regs.mibc);
2112 SERIALIZE_SCALAR(regs.vrcr);
2113 SERIALIZE_SCALAR(regs.vtcr);
2114 SERIALIZE_SCALAR(regs.vdr);
2115 SERIALIZE_SCALAR(regs.ccsr);
2116 SERIALIZE_SCALAR(regs.tbicr);
2117 SERIALIZE_SCALAR(regs.tbisr);
2118 SERIALIZE_SCALAR(regs.tanar);
2119 SERIALIZE_SCALAR(regs.tanlpar);
2120 SERIALIZE_SCALAR(regs.taner);
2121 SERIALIZE_SCALAR(regs.tesr);
2122
2123 SERIALIZE_ARRAY(rom.perfectMatch, EADDR_LEN);
2124
2125 SERIALIZE_SCALAR(ioEnable);
2126
2127 /*
2128 * Serialize the data Fifos
2129 */
2130 int txNumPkts = txFifo.size();
2131 SERIALIZE_SCALAR(txNumPkts);
2132 int i = 0;
2133 pktiter_t end = txFifo.end();
2134 for (pktiter_t p = txFifo.begin(); p != end; ++p) {
2135 nameOut(os, csprintf("%s.txFifo%d", name(), i++));
2136 (*p)->serialize(os);
2137 }
2138
2139 int rxNumPkts = rxFifo.size();
2140 SERIALIZE_SCALAR(rxNumPkts);
2141 i = 0;
2142 end = rxFifo.end();
2143 for (pktiter_t p = rxFifo.begin(); p != end; ++p) {
2144 nameOut(os, csprintf("%s.rxFifo%d", name(), i++));
2145 (*p)->serialize(os);
2146 }
2147
2148 /*
2149 * Serialize the various helper variables
2150 */
2151 bool txPacketExists = txPacket;
2152 SERIALIZE_SCALAR(txPacketExists);
2153 if (txPacketExists) {
2154 nameOut(os, csprintf("%s.txPacket", name()));
2155 txPacket->serialize(os);
2156 uint32_t txPktBufPtr = (uint32_t) (txPacketBufPtr - txPacket->data);
2157 SERIALIZE_SCALAR(txPktBufPtr);
2158 }
2159
2160 bool rxPacketExists = rxPacket;
2161 SERIALIZE_SCALAR(rxPacketExists);
2162 if (rxPacketExists) {
2163 nameOut(os, csprintf("%s.rxPacket", name()));
2164 rxPacket->serialize(os);
2165 uint32_t rxPktBufPtr = (uint32_t) (rxPacketBufPtr - rxPacket->data);
2166 SERIALIZE_SCALAR(rxPktBufPtr);
2167 }
2168
2169 SERIALIZE_SCALAR(txXferLen);
2170 SERIALIZE_SCALAR(rxXferLen);
2171
2172 /*
2173 * Serialize DescCaches
2174 */
2175 SERIALIZE_SCALAR(txDescCache.link);
2176 SERIALIZE_SCALAR(txDescCache.bufptr);
2177 SERIALIZE_SCALAR(txDescCache.cmdsts);
2178 SERIALIZE_SCALAR(txDescCache.extsts);
2179 SERIALIZE_SCALAR(rxDescCache.link);
2180 SERIALIZE_SCALAR(rxDescCache.bufptr);
2181 SERIALIZE_SCALAR(rxDescCache.cmdsts);
2182 SERIALIZE_SCALAR(rxDescCache.extsts);
2183
2184 /*
2185 * Serialize tx state machine
2186 */
2187 int txState = this->txState;
2188 SERIALIZE_SCALAR(txState);
2189 SERIALIZE_SCALAR(txEnable);
2190 SERIALIZE_SCALAR(CTDD);
2191 SERIALIZE_SCALAR(txFifoAvail);
2192 SERIALIZE_SCALAR(txFragPtr);
2193 SERIALIZE_SCALAR(txDescCnt);
2194 int txDmaState = this->txDmaState;
2195 SERIALIZE_SCALAR(txDmaState);
2196
2197 /*
2198 * Serialize rx state machine
2199 */
2200 int rxState = this->rxState;
2201 SERIALIZE_SCALAR(rxState);
2202 SERIALIZE_SCALAR(rxEnable);
2203 SERIALIZE_SCALAR(CRDD);
2204 SERIALIZE_SCALAR(rxPktBytes);
2205 SERIALIZE_SCALAR(rxFifoCnt);
2206 SERIALIZE_SCALAR(rxDescCnt);
2207 int rxDmaState = this->rxDmaState;
2208 SERIALIZE_SCALAR(rxDmaState);
2209
2210 SERIALIZE_SCALAR(extstsEnable);
2211
2212 /*
2213 * If there's a pending transmit, store the time so we can
2214 * reschedule it later
2215 */
2216 Tick transmitTick = txEvent.scheduled() ? txEvent.when() - curTick : 0;
2217 SERIALIZE_SCALAR(transmitTick);
2218
2219 /*
2220 * receive address filter settings
2221 */
2222 SERIALIZE_SCALAR(rxFilterEnable);
2223 SERIALIZE_SCALAR(acceptBroadcast);
2224 SERIALIZE_SCALAR(acceptMulticast);
2225 SERIALIZE_SCALAR(acceptUnicast);
2226 SERIALIZE_SCALAR(acceptPerfect);
2227 SERIALIZE_SCALAR(acceptArp);
2228
2229 /*
2230 * Keep track of pending interrupt status.
2231 */
2232 SERIALIZE_SCALAR(intrTick);
2233 SERIALIZE_SCALAR(cpuPendingIntr);
2234 Tick intrEventTick = 0;
2235 if (intrEvent)
2236 intrEventTick = intrEvent->when();
2237 SERIALIZE_SCALAR(intrEventTick);
2238
2239 }
2240
2241 void
2242 NSGigE::unserialize(Checkpoint *cp, const std::string &section)
2243 {
2244 // Unserialize the PciDev base class
2245 PciDev::unserialize(cp, section);
2246
2247 UNSERIALIZE_SCALAR(regs.command);
2248 UNSERIALIZE_SCALAR(regs.config);
2249 UNSERIALIZE_SCALAR(regs.mear);
2250 UNSERIALIZE_SCALAR(regs.ptscr);
2251 UNSERIALIZE_SCALAR(regs.isr);
2252 UNSERIALIZE_SCALAR(regs.imr);
2253 UNSERIALIZE_SCALAR(regs.ier);
2254 UNSERIALIZE_SCALAR(regs.ihr);
2255 UNSERIALIZE_SCALAR(regs.txdp);
2256 UNSERIALIZE_SCALAR(regs.txdp_hi);
2257 UNSERIALIZE_SCALAR(regs.txcfg);
2258 UNSERIALIZE_SCALAR(regs.gpior);
2259 UNSERIALIZE_SCALAR(regs.rxdp);
2260 UNSERIALIZE_SCALAR(regs.rxdp_hi);
2261 UNSERIALIZE_SCALAR(regs.rxcfg);
2262 UNSERIALIZE_SCALAR(regs.pqcr);
2263 UNSERIALIZE_SCALAR(regs.wcsr);
2264 UNSERIALIZE_SCALAR(regs.pcr);
2265 UNSERIALIZE_SCALAR(regs.rfcr);
2266 UNSERIALIZE_SCALAR(regs.rfdr);
2267 UNSERIALIZE_SCALAR(regs.srr);
2268 UNSERIALIZE_SCALAR(regs.mibc);
2269 UNSERIALIZE_SCALAR(regs.vrcr);
2270 UNSERIALIZE_SCALAR(regs.vtcr);
2271 UNSERIALIZE_SCALAR(regs.vdr);
2272 UNSERIALIZE_SCALAR(regs.ccsr);
2273 UNSERIALIZE_SCALAR(regs.tbicr);
2274 UNSERIALIZE_SCALAR(regs.tbisr);
2275 UNSERIALIZE_SCALAR(regs.tanar);
2276 UNSERIALIZE_SCALAR(regs.tanlpar);
2277 UNSERIALIZE_SCALAR(regs.taner);
2278 UNSERIALIZE_SCALAR(regs.tesr);
2279
2280 UNSERIALIZE_ARRAY(rom.perfectMatch, EADDR_LEN);
2281
2282 UNSERIALIZE_SCALAR(ioEnable);
2283
2284 /*
2285 * unserialize the data fifos
2286 */
2287 int txNumPkts;
2288 UNSERIALIZE_SCALAR(txNumPkts);
2289 int i;
2290 for (i = 0; i < txNumPkts; ++i) {
2291 PacketPtr p = new PacketData;
2292 p->unserialize(cp, csprintf("%s.rxFifo%d", section, i));
2293 txFifo.push_back(p);
2294 }
2295
2296 int rxNumPkts;
2297 UNSERIALIZE_SCALAR(rxNumPkts);
2298 for (i = 0; i < rxNumPkts; ++i) {
2299 PacketPtr p = new PacketData;
2300 p->unserialize(cp, csprintf("%s.rxFifo%d", section, i));
2301 rxFifo.push_back(p);
2302 }
2303
2304 /*
2305 * unserialize the various helper variables
2306 */
2307 bool txPacketExists;
2308 UNSERIALIZE_SCALAR(txPacketExists);
2309 if (txPacketExists) {
2310 txPacket = new PacketData;
2311 txPacket->unserialize(cp, csprintf("%s.txPacket", section));
2312 uint32_t txPktBufPtr;
2313 UNSERIALIZE_SCALAR(txPktBufPtr);
2314 txPacketBufPtr = (uint8_t *) txPacket->data + txPktBufPtr;
2315 } else
2316 txPacket = 0;
2317
2318 bool rxPacketExists;
2319 UNSERIALIZE_SCALAR(rxPacketExists);
2320 rxPacket = 0;
2321 if (rxPacketExists) {
2322 rxPacket = new PacketData;
2323 rxPacket->unserialize(cp, csprintf("%s.rxPacket", section));
2324 uint32_t rxPktBufPtr;
2325 UNSERIALIZE_SCALAR(rxPktBufPtr);
2326 rxPacketBufPtr = (uint8_t *) rxPacket->data + rxPktBufPtr;
2327 } else
2328 rxPacket = 0;
2329
2330 UNSERIALIZE_SCALAR(txXferLen);
2331 UNSERIALIZE_SCALAR(rxXferLen);
2332
2333 /*
2334 * Unserialize DescCaches
2335 */
2336 UNSERIALIZE_SCALAR(txDescCache.link);
2337 UNSERIALIZE_SCALAR(txDescCache.bufptr);
2338 UNSERIALIZE_SCALAR(txDescCache.cmdsts);
2339 UNSERIALIZE_SCALAR(txDescCache.extsts);
2340 UNSERIALIZE_SCALAR(rxDescCache.link);
2341 UNSERIALIZE_SCALAR(rxDescCache.bufptr);
2342 UNSERIALIZE_SCALAR(rxDescCache.cmdsts);
2343 UNSERIALIZE_SCALAR(rxDescCache.extsts);
2344
2345 /*
2346 * unserialize tx state machine
2347 */
2348 int txState;
2349 UNSERIALIZE_SCALAR(txState);
2350 this->txState = (TxState) txState;
2351 UNSERIALIZE_SCALAR(txEnable);
2352 UNSERIALIZE_SCALAR(CTDD);
2353 UNSERIALIZE_SCALAR(txFifoAvail);
2354 UNSERIALIZE_SCALAR(txFragPtr);
2355 UNSERIALIZE_SCALAR(txDescCnt);
2356 int txDmaState;
2357 UNSERIALIZE_SCALAR(txDmaState);
2358 this->txDmaState = (DmaState) txDmaState;
2359
2360 /*
2361 * unserialize rx state machine
2362 */
2363 int rxState;
2364 UNSERIALIZE_SCALAR(rxState);
2365 this->rxState = (RxState) rxState;
2366 UNSERIALIZE_SCALAR(rxEnable);
2367 UNSERIALIZE_SCALAR(CRDD);
2368 UNSERIALIZE_SCALAR(rxPktBytes);
2369 UNSERIALIZE_SCALAR(rxFifoCnt);
2370 UNSERIALIZE_SCALAR(rxDescCnt);
2371 int rxDmaState;
2372 UNSERIALIZE_SCALAR(rxDmaState);
2373 this->rxDmaState = (DmaState) rxDmaState;
2374
2375 UNSERIALIZE_SCALAR(extstsEnable);
2376
2377 /*
2378 * If there's a pending transmit, reschedule it now
2379 */
2380 Tick transmitTick;
2381 UNSERIALIZE_SCALAR(transmitTick);
2382 if (transmitTick)
2383 txEvent.schedule(curTick + transmitTick);
2384
2385 /*
2386 * unserialize receive address filter settings
2387 */
2388 UNSERIALIZE_SCALAR(rxFilterEnable);
2389 UNSERIALIZE_SCALAR(acceptBroadcast);
2390 UNSERIALIZE_SCALAR(acceptMulticast);
2391 UNSERIALIZE_SCALAR(acceptUnicast);
2392 UNSERIALIZE_SCALAR(acceptPerfect);
2393 UNSERIALIZE_SCALAR(acceptArp);
2394
2395 /*
2396 * Keep track of pending interrupt status.
2397 */
2398 UNSERIALIZE_SCALAR(intrTick);
2399 UNSERIALIZE_SCALAR(cpuPendingIntr);
2400 Tick intrEventTick;
2401 UNSERIALIZE_SCALAR(intrEventTick);
2402 if (intrEventTick) {
2403 intrEvent = new IntrEvent(this, true);
2404 intrEvent->schedule(intrEventTick);
2405 }
2406
2407 /*
2408 * re-add addrRanges to bus bridges
2409 */
2410 if (pioInterface) {
2411 pioInterface->addAddrRange(RangeSize(BARAddrs[0], BARSize[0]));
2412 pioInterface->addAddrRange(RangeSize(BARAddrs[1], BARSize[1]));
2413 }
2414 }
2415
2416 Tick
2417 NSGigE::cacheAccess(MemReqPtr &req)
2418 {
2419 DPRINTF(EthernetPIO, "timing access to paddr=%#x (daddr=%#x)\n",
2420 req->paddr, req->paddr - addr);
2421 return curTick + pioLatency;
2422 }
2423
2424 BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt)
2425
2426 SimObjectParam<EtherInt *> peer;
2427 SimObjectParam<NSGigE *> device;
2428
2429 END_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt)
2430
2431 BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigEInt)
2432
2433 INIT_PARAM_DFLT(peer, "peer interface", NULL),
2434 INIT_PARAM(device, "Ethernet device of this interface")
2435
2436 END_INIT_SIM_OBJECT_PARAMS(NSGigEInt)
2437
2438 CREATE_SIM_OBJECT(NSGigEInt)
2439 {
2440 NSGigEInt *dev_int = new NSGigEInt(getInstanceName(), device);
2441
2442 EtherInt *p = (EtherInt *)peer;
2443 if (p) {
2444 dev_int->setPeer(p);
2445 p->setPeer(dev_int);
2446 }
2447
2448 return dev_int;
2449 }
2450
2451 REGISTER_SIM_OBJECT("NSGigEInt", NSGigEInt)
2452
2453
2454 BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigE)
2455
2456 Param<Tick> tx_delay;
2457 Param<Tick> rx_delay;
2458 SimObjectParam<IntrControl *> intr_ctrl;
2459 Param<Tick> intr_delay;
2460 SimObjectParam<MemoryController *> mmu;
2461 SimObjectParam<PhysicalMemory *> physmem;
2462 Param<bool> rx_filter;
2463 Param<string> hardware_address;
2464 SimObjectParam<Bus*> header_bus;
2465 SimObjectParam<Bus*> payload_bus;
2466 SimObjectParam<HierParams *> hier;
2467 Param<Tick> pio_latency;
2468 Param<bool> dma_desc_free;
2469 Param<bool> dma_data_free;
2470 Param<Tick> dma_read_delay;
2471 Param<Tick> dma_write_delay;
2472 Param<Tick> dma_read_factor;
2473 Param<Tick> dma_write_factor;
2474 SimObjectParam<PciConfigAll *> configspace;
2475 SimObjectParam<PciConfigData *> configdata;
2476 SimObjectParam<Tsunami *> tsunami;
2477 Param<uint32_t> pci_bus;
2478 Param<uint32_t> pci_dev;
2479 Param<uint32_t> pci_func;
2480 Param<uint32_t> tx_fifo_size;
2481 Param<uint32_t> rx_fifo_size;
2482
2483 END_DECLARE_SIM_OBJECT_PARAMS(NSGigE)
2484
2485 BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigE)
2486
2487 INIT_PARAM_DFLT(tx_delay, "Transmit Delay", 1000),
2488 INIT_PARAM_DFLT(rx_delay, "Receive Delay", 1000),
2489 INIT_PARAM(intr_ctrl, "Interrupt Controller"),
2490 INIT_PARAM_DFLT(intr_delay, "Interrupt Delay in microseconds", 0),
2491 INIT_PARAM(mmu, "Memory Controller"),
2492 INIT_PARAM(physmem, "Physical Memory"),
2493 INIT_PARAM_DFLT(rx_filter, "Enable Receive Filter", true),
2494 INIT_PARAM_DFLT(hardware_address, "Ethernet Hardware Address",
2495 "00:99:00:00:00:01"),
2496 INIT_PARAM_DFLT(header_bus, "The IO Bus to attach to for headers", NULL),
2497 INIT_PARAM_DFLT(payload_bus, "The IO Bus to attach to for payload", NULL),
2498 INIT_PARAM_DFLT(hier, "Hierarchy global variables", &defaultHierParams),
2499 INIT_PARAM_DFLT(pio_latency, "Programmed IO latency in bus cycles", 1),
2500 INIT_PARAM_DFLT(dma_desc_free, "DMA of Descriptors is free", false),
2501 INIT_PARAM_DFLT(dma_data_free, "DMA of Data is free", false),
2502 INIT_PARAM_DFLT(dma_read_delay, "fixed delay for dma reads", 0),
2503 INIT_PARAM_DFLT(dma_write_delay, "fixed delay for dma writes", 0),
2504 INIT_PARAM_DFLT(dma_read_factor, "multiplier for dma reads", 0),
2505 INIT_PARAM_DFLT(dma_write_factor, "multiplier for dma writes", 0),
2506 INIT_PARAM(configspace, "PCI Configspace"),
2507 INIT_PARAM(configdata, "PCI Config data"),
2508 INIT_PARAM(tsunami, "Tsunami"),
2509 INIT_PARAM(pci_bus, "PCI bus"),
2510 INIT_PARAM(pci_dev, "PCI device number"),
2511 INIT_PARAM(pci_func, "PCI function code"),
2512 INIT_PARAM_DFLT(tx_fifo_size, "max size in bytes of txFifo", 131072),
2513 INIT_PARAM_DFLT(rx_fifo_size, "max size in bytes of rxFifo", 131072)
2514
2515 END_INIT_SIM_OBJECT_PARAMS(NSGigE)
2516
2517
2518 CREATE_SIM_OBJECT(NSGigE)
2519 {
2520 int eaddr[6];
2521 sscanf(((string)hardware_address).c_str(), "%x:%x:%x:%x:%x:%x",
2522 &eaddr[0], &eaddr[1], &eaddr[2], &eaddr[3], &eaddr[4], &eaddr[5]);
2523
2524 return new NSGigE(getInstanceName(), intr_ctrl, intr_delay,
2525 physmem, tx_delay, rx_delay, mmu, hier, header_bus,
2526 payload_bus, pio_latency, dma_desc_free, dma_data_free,
2527 dma_read_delay, dma_write_delay, dma_read_factor,
2528 dma_write_factor, configspace, configdata,
2529 tsunami, pci_bus, pci_dev, pci_func, rx_filter, eaddr,
2530 tx_fifo_size, rx_fifo_size);
2531 }
2532
2533 REGISTER_SIM_OBJECT("NSGigE", NSGigE)