minor mods for mimicking NS83820 functionality
[gem5.git] / dev / ns_gige.cc
1 /*
2 * Copyright (c) 2004 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /* @file
30 * Device module for modelling the National Semiconductor
31 * DP83820 ethernet controller. Does not support priority queueing
32 */
33 #include <cstdio>
34 #include <deque>
35 #include <string>
36
37 #include "base/inet.hh"
38 #include "cpu/exec_context.hh"
39 #include "cpu/intr_control.hh"
40 #include "dev/dma.hh"
41 #include "dev/ns_gige.hh"
42 #include "dev/etherlink.hh"
43 #include "mem/bus/bus.hh"
44 #include "mem/bus/dma_interface.hh"
45 #include "mem/bus/pio_interface.hh"
46 #include "mem/bus/pio_interface_impl.hh"
47 #include "mem/functional_mem/memory_control.hh"
48 #include "mem/functional_mem/physical_memory.hh"
49 #include "sim/builder.hh"
50 #include "sim/host.hh"
51 #include "sim/sim_stats.hh"
52 #include "targetarch/vtophys.hh"
53 #include "dev/pciconfigall.hh"
54 #include "dev/tsunami_cchip.hh"
55
56 const char *NsRxStateStrings[] =
57 {
58 "rxIdle",
59 "rxDescRefr",
60 "rxDescRead",
61 "rxFifoBlock",
62 "rxFragWrite",
63 "rxDescWrite",
64 "rxAdvance"
65 };
66
67 const char *NsTxStateStrings[] =
68 {
69 "txIdle",
70 "txDescRefr",
71 "txDescRead",
72 "txFifoBlock",
73 "txFragRead",
74 "txDescWrite",
75 "txAdvance"
76 };
77
78 const char *NsDmaState[] =
79 {
80 "dmaIdle",
81 "dmaReading",
82 "dmaWriting",
83 "dmaReadWaiting",
84 "dmaWriteWaiting"
85 };
86
87 using namespace std;
88
89 ///////////////////////////////////////////////////////////////////////
90 //
91 // NSGigE PCI Device
92 //
93 NSGigE::NSGigE(const std::string &name, IntrControl *i, Tick intr_delay,
94 PhysicalMemory *pmem, Tick tx_delay, Tick rx_delay,
95 MemoryController *mmu, HierParams *hier, Bus *header_bus,
96 Bus *payload_bus, Tick pio_latency, bool dma_desc_free,
97 bool dma_data_free, Tick dma_read_delay, Tick dma_write_delay,
98 Tick dma_read_factor, Tick dma_write_factor, PciConfigAll *cf,
99 PciConfigData *cd, Tsunami *t, uint32_t bus, uint32_t dev,
100 uint32_t func, bool rx_filter, const int eaddr[6])
101 : PciDev(name, mmu, cf, cd, bus, dev, func), tsunami(t), io_enable(false),
102 txPacket(0), rxPacket(0), txPacketBufPtr(NULL), rxPacketBufPtr(NULL),
103 txXferLen(0), rxXferLen(0), txPktXmitted(0), txState(txIdle), CTDD(false),
104 txFifoCnt(0), txFifoAvail(MAX_TX_FIFO_SIZE), txHalt(false),
105 txFragPtr(0), txDescCnt(0), txDmaState(dmaIdle), rxState(rxIdle),
106 CRDD(false), rxPktBytes(0), rxFifoCnt(0), rxHalt(false),
107 rxFragPtr(0), rxDescCnt(0), rxDmaState(dmaIdle), extstsEnable(false),
108 rxDmaReadEvent(this), rxDmaWriteEvent(this),
109 txDmaReadEvent(this), txDmaWriteEvent(this),
110 dmaDescFree(dma_desc_free), dmaDataFree(dma_data_free),
111 txDelay(tx_delay), rxDelay(rx_delay), rxKickTick(0), txKickTick(0),
112 txEvent(this), rxFilterEnable(rx_filter), acceptBroadcast(false),
113 acceptMulticast(false), acceptUnicast(false),
114 acceptPerfect(false), acceptArp(false),
115 physmem(pmem), intctrl(i), intrTick(0),
116 cpuPendingIntr(false), intrEvent(0), interface(0), pioLatency(pio_latency)
117 {
118 tsunami->ethernet = this;
119
120 if (header_bus) {
121 pioInterface = newPioInterface(name, hier, header_bus, this,
122 &NSGigE::cacheAccess);
123
124 if (payload_bus)
125 dmaInterface = new DMAInterface<Bus>(name + ".dma",
126 header_bus, payload_bus, 1);
127 else
128 dmaInterface = new DMAInterface<Bus>(name + ".dma",
129 header_bus, header_bus, 1);
130 } else if (payload_bus) {
131 pioInterface = newPioInterface(name, hier, payload_bus, this,
132 &NSGigE::cacheAccess);
133
134 dmaInterface = new DMAInterface<Bus>(name + ".dma", payload_bus,
135 payload_bus, 1);
136
137 }
138
139
140 intrDelay = US2Ticks(intr_delay);
141 dmaReadDelay = dma_read_delay;
142 dmaWriteDelay = dma_write_delay;
143 dmaReadFactor = dma_read_factor;
144 dmaWriteFactor = dma_write_factor;
145
146 memset(&regs, 0, sizeof(regs));
147 regsReset();
148 rom.perfectMatch[0] = eaddr[0];
149 rom.perfectMatch[1] = eaddr[1];
150 rom.perfectMatch[2] = eaddr[2];
151 rom.perfectMatch[3] = eaddr[3];
152 rom.perfectMatch[4] = eaddr[4];
153 rom.perfectMatch[5] = eaddr[5];
154 }
155
156 NSGigE::~NSGigE()
157 {}
158
159 void
160 NSGigE::regStats()
161 {
162 txBytes
163 .name(name() + ".txBytes")
164 .desc("Bytes Transmitted")
165 .prereq(txBytes)
166 ;
167
168 rxBytes
169 .name(name() + ".rxBytes")
170 .desc("Bytes Received")
171 .prereq(rxBytes)
172 ;
173
174 txPackets
175 .name(name() + ".txPackets")
176 .desc("Number of Packets Transmitted")
177 .prereq(txBytes)
178 ;
179
180 rxPackets
181 .name(name() + ".rxPackets")
182 .desc("Number of Packets Received")
183 .prereq(rxBytes)
184 ;
185
186 txBandwidth
187 .name(name() + ".txBandwidth")
188 .desc("Transmit Bandwidth (bits/s)")
189 .precision(0)
190 .prereq(txBytes)
191 ;
192
193 rxBandwidth
194 .name(name() + ".rxBandwidth")
195 .desc("Receive Bandwidth (bits/s)")
196 .precision(0)
197 .prereq(rxBytes)
198 ;
199
200 txPacketRate
201 .name(name() + ".txPPS")
202 .desc("Packet Tranmission Rate (packets/s)")
203 .precision(0)
204 .prereq(txBytes)
205 ;
206
207 rxPacketRate
208 .name(name() + ".rxPPS")
209 .desc("Packet Reception Rate (packets/s)")
210 .precision(0)
211 .prereq(rxBytes)
212 ;
213
214 txBandwidth = txBytes * Stats::constant(8) / simSeconds;
215 rxBandwidth = rxBytes * Stats::constant(8) / simSeconds;
216 txPacketRate = txPackets / simSeconds;
217 rxPacketRate = rxPackets / simSeconds;
218 }
219
220 /**
221 * This is to read the PCI general configuration registers
222 */
223 void
224 NSGigE::ReadConfig(int offset, int size, uint8_t *data)
225 {
226 if (offset < PCI_DEVICE_SPECIFIC)
227 PciDev::ReadConfig(offset, size, data);
228 else
229 panic("Device specific PCI config space not implemented!\n");
230 }
231
232 /**
233 * This is to write to the PCI general configuration registers
234 */
235 void
236 NSGigE::WriteConfig(int offset, int size, uint32_t data)
237 {
238 if (offset < PCI_DEVICE_SPECIFIC)
239 PciDev::WriteConfig(offset, size, data);
240 else
241 panic("Device specific PCI config space not implemented!\n");
242
243 // Need to catch writes to BARs to update the PIO interface
244 switch (offset) {
245 //seems to work fine without all these, but i ut in the IO to
246 //double check, an assertion will fail if we need to properly
247 // imlpement it
248 case PCI_COMMAND:
249 if (config.data[offset] & PCI_CMD_IOSE)
250 io_enable = true;
251 else
252 io_enable = false;
253 #if 0
254 if (config.data[offset] & PCI_CMD_BME)
255 bm_enabled = true;
256 else
257 bm_enabled = false;
258 break;
259
260 if (config.data[offset] & PCI_CMD_MSE)
261 mem_enable = true;
262 else
263 mem_enable = false;
264 break;
265 #endif
266
267 case PCI0_BASE_ADDR0:
268 if (BARAddrs[0] != 0) {
269
270 if (pioInterface)
271 pioInterface->addAddrRange(BARAddrs[0], BARAddrs[0] + BARSize[0] - 1);
272
273 BARAddrs[0] &= PA_UNCACHED_MASK;
274
275 }
276 break;
277 case PCI0_BASE_ADDR1:
278 if (BARAddrs[1] != 0) {
279
280 if (pioInterface)
281 pioInterface->addAddrRange(BARAddrs[1], BARAddrs[1] + BARSize[1] - 1);
282
283 BARAddrs[1] &= PA_UNCACHED_MASK;
284
285 }
286 break;
287 }
288 }
289
290 /**
291 * This reads the device registers, which are detailed in the NS83820
292 * spec sheet
293 */
294 Fault
295 NSGigE::read(MemReqPtr &req, uint8_t *data)
296 {
297 assert(io_enable);
298
299 //The mask is to give you only the offset into the device register file
300 Addr daddr = req->paddr & 0xfff;
301 DPRINTF(EthernetPIO, "read da=%#x pa=%#x va=%#x size=%d\n",
302 daddr, req->paddr, req->vaddr, req->size);
303
304
305 //there are some reserved registers, you can see ns_gige_reg.h and
306 //the spec sheet for details
307 if (daddr > LAST && daddr <= RESERVED) {
308 panic("Accessing reserved register");
309 } else if (daddr > RESERVED && daddr <= 0x3FC) {
310 ReadConfig(daddr & 0xff, req->size, data);
311 return No_Fault;
312 } else if (daddr >= MIB_START && daddr <= MIB_END) {
313 // don't implement all the MIB's. hopefully the kernel
314 // doesn't actually DEPEND upon their values
315 // MIB are just hardware stats keepers
316 uint32_t &reg = *(uint32_t *) data;
317 reg = 0;
318 return No_Fault;
319 } else if (daddr > 0x3FC)
320 panic("Something is messed up!\n");
321
322 switch (req->size) {
323 case sizeof(uint32_t):
324 {
325 uint32_t &reg = *(uint32_t *)data;
326
327 switch (daddr) {
328 case CR:
329 reg = regs.command;
330 //these are supposed to be cleared on a read
331 reg &= ~(CR_RXD | CR_TXD | CR_TXR | CR_RXR);
332 break;
333
334 case CFG:
335 reg = regs.config;
336 break;
337
338 case MEAR:
339 reg = regs.mear;
340 break;
341
342 case PTSCR:
343 reg = regs.ptscr;
344 break;
345
346 case ISR:
347 reg = regs.isr;
348 devIntrClear(ISR_ALL);
349 break;
350
351 case IMR:
352 reg = regs.imr;
353 break;
354
355 case IER:
356 reg = regs.ier;
357 break;
358
359 case IHR:
360 reg = regs.ihr;
361 break;
362
363 case TXDP:
364 reg = regs.txdp;
365 break;
366
367 case TXDP_HI:
368 reg = regs.txdp_hi;
369 break;
370
371 case TXCFG:
372 reg = regs.txcfg;
373 break;
374
375 case GPIOR:
376 reg = regs.gpior;
377 break;
378
379 case RXDP:
380 reg = regs.rxdp;
381 break;
382
383 case RXDP_HI:
384 reg = regs.rxdp_hi;
385 break;
386
387 case RXCFG:
388 reg = regs.rxcfg;
389 break;
390
391 case PQCR:
392 reg = regs.pqcr;
393 break;
394
395 case WCSR:
396 reg = regs.wcsr;
397 break;
398
399 case PCR:
400 reg = regs.pcr;
401 break;
402
403 //see the spec sheet for how RFCR and RFDR work
404 //basically, you write to RFCR to tell the machine what you want to do next
405 //then you act upon RFDR, and the device will be prepared b/c
406 //of what you wrote to RFCR
407 case RFCR:
408 reg = regs.rfcr;
409 break;
410
411 case RFDR:
412 switch (regs.rfcr & RFCR_RFADDR) {
413 case 0x000:
414 reg = rom.perfectMatch[1];
415 reg = reg << 8;
416 reg += rom.perfectMatch[0];
417 break;
418 case 0x002:
419 reg = rom.perfectMatch[3] << 8;
420 reg += rom.perfectMatch[2];
421 break;
422 case 0x004:
423 reg = rom.perfectMatch[5] << 8;
424 reg += rom.perfectMatch[4];
425 break;
426 default:
427 panic("reading from RFDR for something for other than PMATCH!\n");
428 //didn't implement other RFDR functionality b/c driver didn't use
429 }
430 break;
431
432 case SRR:
433 reg = regs.srr;
434 break;
435
436 case MIBC:
437 reg = regs.mibc;
438 reg &= ~(MIBC_MIBS | MIBC_ACLR);
439 break;
440
441 case VRCR:
442 reg = regs.vrcr;
443 break;
444
445 case VTCR:
446 reg = regs.vtcr;
447 break;
448
449 case VDR:
450 reg = regs.vdr;
451 break;
452
453 case CCSR:
454 reg = regs.ccsr;
455 break;
456
457 case TBICR:
458 reg = regs.tbicr;
459 break;
460
461 case TBISR:
462 reg = regs.tbisr;
463 break;
464
465 case TANAR:
466 reg = regs.tanar;
467 break;
468
469 case TANLPAR:
470 reg = regs.tanlpar;
471 break;
472
473 case TANER:
474 reg = regs.taner;
475 break;
476
477 case TESR:
478 reg = regs.tesr;
479 break;
480
481 default:
482 panic("reading unimplemented register: addr = %#x", daddr);
483 }
484
485 DPRINTF(EthernetPIO, "read from %#x: data=%d data=%#x\n",
486 daddr, reg, reg);
487 }
488 break;
489
490 default:
491 panic("accessing register with invalid size: addr=%#x, size=%d",
492 daddr, req->size);
493 }
494
495 return No_Fault;
496 }
497
498 Fault
499 NSGigE::write(MemReqPtr &req, const uint8_t *data)
500 {
501 assert(io_enable);
502
503 Addr daddr = req->paddr & 0xfff;
504 DPRINTF(EthernetPIO, "write da=%#x pa=%#x va=%#x size=%d\n",
505 daddr, req->paddr, req->vaddr, req->size);
506
507 if (daddr > LAST && daddr <= RESERVED) {
508 panic("Accessing reserved register");
509 } else if (daddr > RESERVED && daddr <= 0x3FC) {
510 WriteConfig(daddr & 0xff, req->size, *(uint32_t *)data);
511 return No_Fault;
512 } else if (daddr > 0x3FC)
513 panic("Something is messed up!\n");
514
515 if (req->size == sizeof(uint32_t)) {
516 uint32_t reg = *(uint32_t *)data;
517 DPRINTF(EthernetPIO, "write data=%d data=%#x\n", reg, reg);
518
519 switch (daddr) {
520 case CR:
521 regs.command = reg;
522 if ((reg & (CR_TXE | CR_TXD)) == (CR_TXE | CR_TXD)) {
523 txHalt = true;
524 } else if (reg & CR_TXE) {
525 //the kernel is enabling the transmit machine
526 if (txState == txIdle)
527 txKick();
528 } else if (reg & CR_TXD) {
529 txHalt = true;
530 }
531
532 if ((reg & (CR_RXE | CR_RXD)) == (CR_RXE | CR_RXD)) {
533 rxHalt = true;
534 } else if (reg & CR_RXE) {
535 if (rxState == rxIdle) {
536 rxKick();
537 }
538 } else if (reg & CR_RXD) {
539 rxHalt = true;
540 }
541
542 if (reg & CR_TXR)
543 txReset();
544
545 if (reg & CR_RXR)
546 rxReset();
547
548 if (reg & CR_SWI)
549 devIntrPost(ISR_SWI);
550
551 if (reg & CR_RST) {
552 txReset();
553 rxReset();
554
555 regsReset();
556 }
557 break;
558
559 case CFG:
560 if (reg & CFG_LNKSTS || reg & CFG_SPDSTS || reg & CFG_DUPSTS
561 || reg & CFG_RESERVED || reg & CFG_T64ADDR
562 || reg & CFG_PCI64_DET)
563 panic("writing to read-only or reserved CFG bits!\n");
564
565 regs.config |= reg & ~(CFG_LNKSTS | CFG_SPDSTS | CFG_DUPSTS | CFG_RESERVED |
566 CFG_T64ADDR | CFG_PCI64_DET);
567
568 // all these #if 0's are because i don't THINK the kernel needs to have these implemented
569 // if there is a problem relating to one of these, you may need to add functionality in
570 #if 0
571 if (reg & CFG_TBI_EN) ;
572 if (reg & CFG_MODE_1000) ;
573 #endif
574
575 if (reg & CFG_AUTO_1000)
576 panic("CFG_AUTO_1000 not implemented!\n");
577
578 #if 0
579 if (reg & CFG_PINT_DUPSTS || reg & CFG_PINT_LNKSTS || reg & CFG_PINT_SPDSTS) ;
580 if (reg & CFG_TMRTEST) ;
581 if (reg & CFG_MRM_DIS) ;
582 if (reg & CFG_MWI_DIS) ;
583
584 if (reg & CFG_T64ADDR)
585 panic("CFG_T64ADDR is read only register!\n");
586
587 if (reg & CFG_PCI64_DET)
588 panic("CFG_PCI64_DET is read only register!\n");
589
590 if (reg & CFG_DATA64_EN) ;
591 if (reg & CFG_M64ADDR) ;
592 if (reg & CFG_PHY_RST) ;
593 if (reg & CFG_PHY_DIS) ;
594 #endif
595
596 if (reg & CFG_EXTSTS_EN)
597 extstsEnable = true;
598 else
599 extstsEnable = false;
600
601 #if 0
602 if (reg & CFG_REQALG) ;
603 if (reg & CFG_SB) ;
604 if (reg & CFG_POW) ;
605 if (reg & CFG_EXD) ;
606 if (reg & CFG_PESEL) ;
607 if (reg & CFG_BROM_DIS) ;
608 if (reg & CFG_EXT_125) ;
609 if (reg & CFG_BEM) ;
610 #endif
611 break;
612
613 case MEAR:
614 regs.mear = reg;
615 /* since phy is completely faked, MEAR_MD* don't matter
616 and since the driver never uses MEAR_EE*, they don't matter */
617 #if 0
618 if (reg & MEAR_EEDI) ;
619 if (reg & MEAR_EEDO) ; //this one is read only
620 if (reg & MEAR_EECLK) ;
621 if (reg & MEAR_EESEL) ;
622 if (reg & MEAR_MDIO) ;
623 if (reg & MEAR_MDDIR) ;
624 if (reg & MEAR_MDC) ;
625 #endif
626 break;
627
628 case PTSCR:
629 regs.ptscr = reg & ~(PTSCR_RBIST_RDONLY);
630 /* these control BISTs for various parts of chip - we don't care or do
631 just fake that the BIST is done */
632 if (reg & PTSCR_RBIST_EN)
633 regs.ptscr |= PTSCR_RBIST_DONE;
634 if (reg & PTSCR_EEBIST_EN)
635 regs.ptscr &= ~PTSCR_EEBIST_EN;
636 if (reg & PTSCR_EELOAD_EN)
637 regs.ptscr &= ~PTSCR_EELOAD_EN;
638 break;
639
640 case ISR: /* writing to the ISR has no effect */
641 panic("ISR is a read only register!\n");
642
643 case IMR:
644 regs.imr = reg;
645 devIntrChangeMask();
646 break;
647
648 case IER:
649 regs.ier = reg;
650 break;
651
652 case IHR:
653 regs.ihr = reg;
654 /* not going to implement real interrupt holdoff */
655 break;
656
657 case TXDP:
658 regs.txdp = (reg & 0xFFFFFFFC);
659 assert(txState == txIdle);
660 CTDD = false;
661 break;
662
663 case TXDP_HI:
664 regs.txdp_hi = reg;
665 break;
666
667 case TXCFG:
668 regs.txcfg = reg;
669 #if 0
670 if (reg & TXCFG_CSI) ;
671 if (reg & TXCFG_HBI) ;
672 if (reg & TXCFG_MLB) ;
673 if (reg & TXCFG_ATP) ;
674 if (reg & TXCFG_ECRETRY) ; /* this could easily be implemented, but
675 considering the network is just a fake
676 pipe, wouldn't make sense to do this */
677
678 if (reg & TXCFG_BRST_DIS) ;
679 #endif
680
681
682 /* we handle our own DMA, ignore the kernel's exhortations */
683 if (reg & TXCFG_MXDMA) ;
684
685 break;
686
687 case GPIOR:
688 regs.gpior = reg;
689 /* these just control general purpose i/o pins, don't matter */
690 break;
691
692 case RXDP:
693 regs.rxdp = reg;
694 break;
695
696 case RXDP_HI:
697 regs.rxdp_hi = reg;
698 break;
699
700 case RXCFG:
701 regs.rxcfg = reg;
702 #if 0
703 if (reg & RXCFG_AEP) ;
704 if (reg & RXCFG_ARP) ;
705 if (reg & RXCFG_STRIPCRC) ;
706 if (reg & RXCFG_RX_RD) ;
707 if (reg & RXCFG_ALP) ;
708 if (reg & RXCFG_AIRL) ;
709 #endif
710
711 /* we handle our own DMA, ignore what kernel says about it */
712 if (reg & RXCFG_MXDMA) ;
713
714 #if 0
715 if (reg & (RXCFG_DRTH | RXCFG_DRTH0)) ;
716 #endif
717 break;
718
719 case PQCR:
720 /* there is no priority queueing used in the linux 2.6 driver */
721 regs.pqcr = reg;
722 break;
723
724 case WCSR:
725 /* not going to implement wake on LAN */
726 regs.wcsr = reg;
727 break;
728
729 case PCR:
730 /* not going to implement pause control */
731 regs.pcr = reg;
732 break;
733
734 case RFCR:
735 regs.rfcr = reg;
736
737 rxFilterEnable = (reg & RFCR_RFEN) ? true : false;
738
739 acceptBroadcast = (reg & RFCR_AAB) ? true : false;
740
741 acceptMulticast = (reg & RFCR_AAM) ? true : false;
742
743 acceptUnicast = (reg & RFCR_AAU) ? true : false;
744
745 acceptPerfect = (reg & RFCR_APM) ? true : false;
746
747 acceptArp = (reg & RFCR_AARP) ? true : false;
748
749 if (reg & RFCR_APAT) ;
750 // panic("RFCR_APAT not implemented!\n");
751
752 if (reg & RFCR_MHEN || reg & RFCR_UHEN)
753 panic("hash filtering not implemented!\n");
754
755 if (reg & RFCR_ULM)
756 panic("RFCR_ULM not implemented!\n");
757
758 break;
759
760 case RFDR:
761 panic("the driver never writes to RFDR, something is wrong!\n");
762
763 case BRAR:
764 panic("the driver never uses BRAR, something is wrong!\n");
765
766 case BRDR:
767 panic("the driver never uses BRDR, something is wrong!\n");
768
769 case SRR:
770 panic("SRR is read only register!\n");
771
772 case MIBC:
773 panic("the driver never uses MIBC, something is wrong!\n");
774
775 case VRCR:
776 regs.vrcr = reg;
777 break;
778
779 case VTCR:
780 regs.vtcr = reg;
781 break;
782
783 case VDR:
784 panic("the driver never uses VDR, something is wrong!\n");
785 break;
786
787 case CCSR:
788 /* not going to implement clockrun stuff */
789 regs.ccsr = reg;
790 break;
791
792 case TBICR:
793 regs.tbicr = reg;
794 if (reg & TBICR_MR_LOOPBACK)
795 panic("TBICR_MR_LOOPBACK never used, something wrong!\n");
796
797 if (reg & TBICR_MR_AN_ENABLE) {
798 regs.tanlpar = regs.tanar;
799 regs.tbisr |= (TBISR_MR_AN_COMPLETE | TBISR_MR_LINK_STATUS);
800 }
801
802 #if 0
803 if (reg & TBICR_MR_RESTART_AN) ;
804 #endif
805
806 break;
807
808 case TBISR:
809 panic("TBISR is read only register!\n");
810
811 case TANAR:
812 regs.tanar = reg;
813 if (reg & TANAR_PS2)
814 panic("this isn't used in driver, something wrong!\n");
815
816 if (reg & TANAR_PS1)
817 panic("this isn't used in driver, something wrong!\n");
818 break;
819
820 case TANLPAR:
821 panic("this should only be written to by the fake phy!\n");
822
823 case TANER:
824 panic("TANER is read only register!\n");
825
826 case TESR:
827 regs.tesr = reg;
828 break;
829
830 default:
831 panic("thought i covered all the register, what is this? addr=%#x",
832 daddr);
833 }
834 } else
835 panic("Invalid Request Size");
836
837 return No_Fault;
838 }
839
840 void
841 NSGigE::devIntrPost(uint32_t interrupts)
842 {
843 bool delay = false;
844
845 if (interrupts & ISR_RESERVE)
846 panic("Cannot set a reserved interrupt");
847
848 if (interrupts & ISR_TXRCMP)
849 regs.isr |= ISR_TXRCMP;
850
851 if (interrupts & ISR_RXRCMP)
852 regs.isr |= ISR_RXRCMP;
853
854 //ISR_DPERR not implemented
855 //ISR_SSERR not implemented
856 //ISR_RMABT not implemented
857 //ISR_RXSOVR not implemented
858 //ISR_HIBINT not implemented
859 //ISR_PHY not implemented
860 //ISR_PME not implemented
861
862 if (interrupts & ISR_SWI)
863 regs.isr |= ISR_SWI;
864
865 //ISR_MIB not implemented
866 //ISR_TXURN not implemented
867
868 if (interrupts & ISR_TXIDLE)
869 regs.isr |= ISR_TXIDLE;
870
871 if (interrupts & ISR_TXERR)
872 regs.isr |= ISR_TXERR;
873
874 if (interrupts & ISR_TXDESC)
875 regs.isr |= ISR_TXDESC;
876
877 if (interrupts & ISR_TXOK) {
878 regs.isr |= ISR_TXOK;
879 delay = true;
880 }
881
882 if (interrupts & ISR_RXORN)
883 regs.isr |= ISR_RXORN;
884
885 if (interrupts & ISR_RXIDLE)
886 regs.isr |= ISR_RXIDLE;
887
888 //ISR_RXEARLY not implemented
889
890 if (interrupts & ISR_RXERR)
891 regs.isr |= ISR_RXERR;
892
893 if (interrupts & ISR_RXDESC)
894 regs.isr |= ISR_RXDESC;
895
896 if (interrupts & ISR_RXOK) {
897 delay = true;
898 regs.isr |= ISR_RXOK;
899 }
900
901 if ((regs.isr & regs.imr)) {
902 Tick when = curTick;
903 if (delay)
904 when += intrDelay;
905 cpuIntrPost(when);
906 }
907
908 DPRINTF(Ethernet, "interrupt posted intr=%#x isr=%#x imr=%#x\n",
909 interrupts, regs.isr, regs.imr);
910 }
911
912 void
913 NSGigE::devIntrClear(uint32_t interrupts)
914 {
915 if (interrupts & ISR_RESERVE)
916 panic("Cannot clear a reserved interrupt");
917
918 if (interrupts & ISR_TXRCMP)
919 regs.isr &= ~ISR_TXRCMP;
920
921 if (interrupts & ISR_RXRCMP)
922 regs.isr &= ~ISR_RXRCMP;
923
924 //ISR_DPERR not implemented
925 //ISR_SSERR not implemented
926 //ISR_RMABT not implemented
927 //ISR_RXSOVR not implemented
928 //ISR_HIBINT not implemented
929 //ISR_PHY not implemented
930 //ISR_PME not implemented
931
932 if (interrupts & ISR_SWI)
933 regs.isr &= ~ISR_SWI;
934
935 //ISR_MIB not implemented
936 //ISR_TXURN not implemented
937
938 if (interrupts & ISR_TXIDLE)
939 regs.isr &= ~ISR_TXIDLE;
940
941 if (interrupts & ISR_TXERR)
942 regs.isr &= ~ISR_TXERR;
943
944 if (interrupts & ISR_TXDESC)
945 regs.isr &= ~ISR_TXDESC;
946
947 if (interrupts & ISR_TXOK)
948 regs.isr &= ~ISR_TXOK;
949
950 if (interrupts & ISR_RXORN)
951 regs.isr &= ~ISR_RXORN;
952
953 if (interrupts & ISR_RXIDLE)
954 regs.isr &= ~ISR_RXIDLE;
955
956 //ISR_RXEARLY not implemented
957
958 if (interrupts & ISR_RXERR)
959 regs.isr &= ~ISR_RXERR;
960
961 if (interrupts & ISR_RXDESC)
962 regs.isr &= ~ISR_RXDESC;
963
964 if (interrupts & ISR_RXOK)
965 regs.isr &= ~ISR_RXOK;
966
967 if (!(regs.isr & regs.imr))
968 cpuIntrClear();
969
970 DPRINTF(Ethernet, "interrupt cleared intr=%x isr=%x imr=%x\n",
971 interrupts, regs.isr, regs.imr);
972 }
973
974 void
975 NSGigE::devIntrChangeMask()
976 {
977 DPRINTF(Ethernet, "interrupt mask changed\n");
978
979 if (regs.isr & regs.imr)
980 cpuIntrPost(curTick);
981 else
982 cpuIntrClear();
983 }
984
985 void
986 NSGigE::cpuIntrPost(Tick when)
987 {
988 if (when > intrTick && intrTick != 0)
989 return;
990
991 intrTick = when;
992
993 if (intrEvent) {
994 intrEvent->squash();
995 intrEvent = 0;
996 }
997
998 if (when < curTick) {
999 cpuInterrupt();
1000 } else {
1001 intrEvent = new IntrEvent(this, true);
1002 intrEvent->schedule(intrTick);
1003 }
1004 }
1005
1006 void
1007 NSGigE::cpuInterrupt()
1008 {
1009 // Don't send an interrupt if there's already one
1010 if (cpuPendingIntr)
1011 return;
1012
1013 // Don't send an interrupt if it's supposed to be delayed
1014 if (intrTick > curTick)
1015 return;
1016
1017 // Whether or not there's a pending interrupt, we don't care about
1018 // it anymore
1019 intrEvent = 0;
1020 intrTick = 0;
1021
1022 // Send interrupt
1023 cpuPendingIntr = true;
1024 /** @todo rework the intctrl to be tsunami ok */
1025 //intctrl->post(TheISA::INTLEVEL_IRQ1, TheISA::INTINDEX_ETHERNET);
1026 tsunami->cchip->postDRIR(configData->config.hdr.pci0.interruptLine);
1027 }
1028
1029 void
1030 NSGigE::cpuIntrClear()
1031 {
1032 if (cpuPendingIntr) {
1033 cpuPendingIntr = false;
1034 /** @todo rework the intctrl to be tsunami ok */
1035 //intctrl->clear(TheISA::INTLEVEL_IRQ1, TheISA::INTINDEX_ETHERNET);
1036 tsunami->cchip->clearDRIR(configData->config.hdr.pci0.interruptLine);
1037 }
1038 }
1039
1040 bool
1041 NSGigE::cpuIntrPending() const
1042 { return cpuPendingIntr; }
1043
1044 void
1045 NSGigE::txReset()
1046 {
1047
1048 DPRINTF(Ethernet, "transmit reset\n");
1049
1050 CTDD = false;
1051 txFifoCnt = 0;
1052 txFifoAvail = MAX_TX_FIFO_SIZE;
1053 txHalt = false;
1054 txFragPtr = 0;
1055 assert(txDescCnt == 0);
1056 txFifo.clear();
1057 regs.command &= ~CR_TXE;
1058 txState = txIdle;
1059 assert(txDmaState == dmaIdle);
1060 }
1061
1062 void
1063 NSGigE::rxReset()
1064 {
1065 DPRINTF(Ethernet, "receive reset\n");
1066
1067 CRDD = false;
1068 assert(rxPktBytes == 0);
1069 rxFifoCnt = 0;
1070 rxHalt = false;
1071 rxFragPtr = 0;
1072 assert(rxDescCnt == 0);
1073 assert(rxDmaState == dmaIdle);
1074 rxFifo.clear();
1075 regs.command &= ~CR_RXE;
1076 rxState = rxIdle;
1077 }
1078
1079 void NSGigE::regsReset()
1080 {
1081 memset(&regs, 0, sizeof(regs));
1082 regs.config = 0x80000000;
1083 regs.mear = 0x12;
1084 regs.isr = 0x00608000;
1085 regs.txcfg = 0x120;
1086 regs.rxcfg = 0x4;
1087 regs.srr = 0x0103;
1088 regs.mibc = 0x2;
1089 regs.vdr = 0x81;
1090 regs.tesr = 0xc000;
1091 }
1092
1093 void
1094 NSGigE::rxDmaReadCopy()
1095 {
1096 assert(rxDmaState == dmaReading);
1097
1098 memcpy(rxDmaData, physmem->dma_addr(rxDmaAddr, rxDmaLen), rxDmaLen);
1099 rxDmaState = dmaIdle;
1100
1101 DPRINTF(EthernetDMA, "rx dma read paddr=%#x len=%d\n",
1102 rxDmaAddr, rxDmaLen);
1103 DDUMP(EthernetDMA, rxDmaData, rxDmaLen);
1104 }
1105
1106 bool
1107 NSGigE::doRxDmaRead()
1108 {
1109 assert(rxDmaState == dmaIdle || rxDmaState == dmaReadWaiting);
1110 rxDmaState = dmaReading;
1111
1112 if (dmaInterface && !rxDmaFree) {
1113 if (dmaInterface->busy())
1114 rxDmaState = dmaReadWaiting;
1115 else
1116 dmaInterface->doDMA(Read, rxDmaAddr, rxDmaLen, curTick,
1117 &rxDmaReadEvent);
1118 return true;
1119 }
1120
1121 if (dmaReadDelay == 0 && dmaReadFactor == 0) {
1122 rxDmaReadCopy();
1123 return false;
1124 }
1125
1126 Tick factor = ((rxDmaLen + ULL(63)) >> ULL(6)) * dmaReadFactor;
1127 Tick start = curTick + dmaReadDelay + factor;
1128 rxDmaReadEvent.schedule(start);
1129 return true;
1130 }
1131
1132 void
1133 NSGigE::rxDmaReadDone()
1134 {
1135 assert(rxDmaState == dmaReading);
1136 rxDmaReadCopy();
1137
1138 // If the transmit state machine has a pending DMA, let it go first
1139 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting)
1140 txKick();
1141
1142 rxKick();
1143 }
1144
1145 void
1146 NSGigE::rxDmaWriteCopy()
1147 {
1148 assert(rxDmaState == dmaWriting);
1149
1150 memcpy(physmem->dma_addr(rxDmaAddr, rxDmaLen), rxDmaData, rxDmaLen);
1151 rxDmaState = dmaIdle;
1152
1153 DPRINTF(EthernetDMA, "rx dma write paddr=%#x len=%d\n",
1154 rxDmaAddr, rxDmaLen);
1155 DDUMP(EthernetDMA, rxDmaData, rxDmaLen);
1156 }
1157
1158 bool
1159 NSGigE::doRxDmaWrite()
1160 {
1161 assert(rxDmaState == dmaIdle || rxDmaState == dmaWriteWaiting);
1162 rxDmaState = dmaWriting;
1163
1164 if (dmaInterface && !rxDmaFree) {
1165 if (dmaInterface->busy())
1166 rxDmaState = dmaWriteWaiting;
1167 else
1168 dmaInterface->doDMA(WriteInvalidate, rxDmaAddr, rxDmaLen, curTick,
1169 &rxDmaWriteEvent);
1170 return true;
1171 }
1172
1173 if (dmaWriteDelay == 0 && dmaWriteFactor == 0) {
1174 rxDmaWriteCopy();
1175 return false;
1176 }
1177
1178 Tick factor = ((rxDmaLen + ULL(63)) >> ULL(6)) * dmaWriteFactor;
1179 Tick start = curTick + dmaWriteDelay + factor;
1180 rxDmaWriteEvent.schedule(start);
1181 return true;
1182 }
1183
1184 void
1185 NSGigE::rxDmaWriteDone()
1186 {
1187 assert(rxDmaState == dmaWriting);
1188 rxDmaWriteCopy();
1189
1190 // If the transmit state machine has a pending DMA, let it go first
1191 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting)
1192 txKick();
1193
1194 rxKick();
1195 }
1196
1197 void
1198 NSGigE::rxKick()
1199 {
1200 DPRINTF(Ethernet, "receive kick state=%s (rxBuf.size=%d)\n",
1201 NsRxStateStrings[rxState], rxFifo.size());
1202
1203 if (rxKickTick > curTick) {
1204 DPRINTF(Ethernet, "receive kick exiting, can't run till %d\n",
1205 rxKickTick);
1206 return;
1207 }
1208
1209 next:
1210 switch(rxDmaState) {
1211 case dmaReadWaiting:
1212 if (doRxDmaRead())
1213 goto exit;
1214 break;
1215 case dmaWriteWaiting:
1216 if (doRxDmaWrite())
1217 goto exit;
1218 break;
1219 default:
1220 break;
1221 }
1222
1223 // see state machine from spec for details
1224 // the way this works is, if you finish work on one state and can go directly to
1225 // another, you do that through jumping to the label "next". however, if you have
1226 // intermediate work, like DMA so that you can't go to the next state yet, you go to
1227 // exit and exit the loop. however, when the DMA is done it will trigger an
1228 // event and come back to this loop.
1229 switch (rxState) {
1230 case rxIdle:
1231 if (!regs.command & CR_RXE) {
1232 DPRINTF(Ethernet, "Receive Disabled! Nothing to do.\n");
1233 goto exit;
1234 }
1235
1236 if (CRDD) {
1237 rxState = rxDescRefr;
1238
1239 rxDmaAddr = regs.rxdp & 0x3fffffff;
1240 rxDmaData = &rxDescCache + offsetof(ns_desc, link);
1241 rxDmaLen = sizeof(rxDescCache.link);
1242 rxDmaFree = dmaDescFree;
1243
1244 if (doRxDmaRead())
1245 goto exit;
1246 } else {
1247 rxState = rxDescRead;
1248
1249 rxDmaAddr = regs.rxdp & 0x3fffffff;
1250 rxDmaData = &rxDescCache;
1251 rxDmaLen = sizeof(ns_desc);
1252 rxDmaFree = dmaDescFree;
1253
1254 if (doRxDmaRead())
1255 goto exit;
1256 }
1257 break;
1258
1259 case rxDescRefr:
1260 if (rxDmaState != dmaIdle)
1261 goto exit;
1262
1263 rxState = rxAdvance;
1264 break;
1265
1266 case rxDescRead:
1267 if (rxDmaState != dmaIdle)
1268 goto exit;
1269
1270 DPRINTF(Ethernet,
1271 "rxDescCache:\n\tlink=%#x\n\tbufptr=%#x\n\tcmdsts=%#x\n\textsts=%#x\n"
1272 ,rxDescCache.link, rxDescCache.bufptr, rxDescCache.cmdsts,
1273 rxDescCache.extsts);
1274
1275 if (rxDescCache.cmdsts & CMDSTS_OWN) {
1276 rxState = rxIdle;
1277 } else {
1278 rxState = rxFifoBlock;
1279 rxFragPtr = rxDescCache.bufptr;
1280 rxDescCnt = rxDescCache.cmdsts & CMDSTS_LEN_MASK;
1281 }
1282 break;
1283
1284 case rxFifoBlock:
1285 if (!rxPacket) {
1286 /**
1287 * @todo in reality, we should be able to start processing
1288 * the packet as it arrives, and not have to wait for the
1289 * full packet ot be in the receive fifo.
1290 */
1291 if (rxFifo.empty())
1292 goto exit;
1293
1294 // If we don't have a packet, grab a new one from the fifo.
1295 rxPacket = rxFifo.front();
1296 rxPktBytes = rxPacket->length;
1297 rxPacketBufPtr = rxPacket->data;
1298
1299 // sanity check - i think the driver behaves like this
1300 assert(rxDescCnt >= rxPktBytes);
1301
1302 // Must clear the value before popping to decrement the
1303 // reference count
1304 rxFifo.front() = NULL;
1305 rxFifo.pop_front();
1306 }
1307
1308
1309 // dont' need the && rxDescCnt > 0 if driver sanity check above holds
1310 if (rxPktBytes > 0) {
1311 rxState = rxFragWrite;
1312 // don't need min<>(rxPktBytes,rxDescCnt) if above sanity check holds
1313 rxXferLen = rxPktBytes;
1314
1315 rxDmaAddr = rxFragPtr & 0x3fffffff;
1316 rxDmaData = rxPacketBufPtr;
1317 rxDmaLen = rxXferLen;
1318 rxDmaFree = dmaDataFree;
1319
1320 if (doRxDmaWrite())
1321 goto exit;
1322
1323 } else {
1324 rxState = rxDescWrite;
1325
1326 //if (rxPktBytes == 0) { /* packet is done */
1327 assert(rxPktBytes == 0);
1328
1329 rxDescCache.cmdsts |= CMDSTS_OWN;
1330 rxDescCache.cmdsts &= ~CMDSTS_MORE;
1331 rxDescCache.cmdsts |= CMDSTS_OK;
1332 rxDescCache.cmdsts &= 0xffff0000;
1333 rxDescCache.cmdsts += rxPacket->length; //i.e. set CMDSTS_SIZE
1334
1335 #if 0
1336 /* all the driver uses these are for its own stats keeping
1337 which we don't care about, aren't necessary for functionality
1338 and doing this would just slow us down. if they end up using
1339 this in a later version for functional purposes, just undef
1340 */
1341 if (rxFilterEnable) {
1342 rxDescCache.cmdsts &= ~CMDSTS_DEST_MASK;
1343 if (rxFifo.front()->IsUnicast())
1344 rxDescCache.cmdsts |= CMDSTS_DEST_SELF;
1345 if (rxFifo.front()->IsMulticast())
1346 rxDescCache.cmdsts |= CMDSTS_DEST_MULTI;
1347 if (rxFifo.front()->IsBroadcast())
1348 rxDescCache.cmdsts |= CMDSTS_DEST_MASK;
1349 }
1350 #endif
1351
1352 if (rxPacket->isIpPkt() && extstsEnable) { rxDescCache.extsts |= EXTSTS_IPPKT;
1353 if (!ipChecksum(rxPacket, false))
1354 rxDescCache.extsts |= EXTSTS_IPERR;
1355
1356 if (rxPacket->isTcpPkt()) {
1357 rxDescCache.extsts |= EXTSTS_TCPPKT;
1358 if (!tcpChecksum(rxPacket, false))
1359 rxDescCache.extsts |= EXTSTS_TCPERR;
1360 } else if (rxPacket->isUdpPkt()) {
1361 rxDescCache.extsts |= EXTSTS_UDPPKT;
1362 if (!udpChecksum(rxPacket, false))
1363 rxDescCache.extsts |= EXTSTS_UDPERR;
1364 }
1365 }
1366
1367 rxFifoCnt -= rxPacket->length;
1368 rxPacket = 0;
1369
1370 /* the driver seems to always receive into desc buffers
1371 of size 1514, so you never have a pkt that is split
1372 into multiple descriptors on the receive side, so
1373 i don't implement that case, hence the assert above.
1374 */
1375
1376 DPRINTF(Ethernet, "rxDesc writeback:\n\tcmdsts=%#x\n\textsts=%#x\n",
1377 rxDescCache.cmdsts, rxDescCache.extsts);
1378
1379 rxDmaAddr = (regs.rxdp + offsetof(ns_desc, cmdsts)) & 0x3fffffff;
1380 rxDmaData = &(rxDescCache.cmdsts);
1381 rxDmaLen = sizeof(rxDescCache.cmdsts) + sizeof(rxDescCache.extsts);
1382 rxDmaFree = dmaDescFree;
1383
1384 if (doRxDmaWrite())
1385 goto exit;
1386 }
1387 break;
1388
1389 case rxFragWrite:
1390 if (rxDmaState != dmaIdle)
1391 goto exit;
1392
1393 rxPacketBufPtr += rxXferLen;
1394 rxFragPtr += rxXferLen;
1395 rxPktBytes -= rxXferLen;
1396
1397 rxState = rxFifoBlock;
1398 break;
1399
1400 case rxDescWrite:
1401 if (rxDmaState != dmaIdle)
1402 goto exit;
1403
1404 assert(rxDescCache.cmdsts & CMDSTS_OWN);
1405
1406 assert(rxPacket == 0);
1407 devIntrPost(ISR_RXOK);
1408
1409 if (rxDescCache.cmdsts & CMDSTS_INTR)
1410 devIntrPost(ISR_RXDESC);
1411
1412 if (rxHalt) {
1413 rxState = rxIdle;
1414 rxHalt = false;
1415 } else
1416 rxState = rxAdvance;
1417 break;
1418
1419 case rxAdvance:
1420 if (rxDescCache.link == 0) {
1421 rxState = rxIdle;
1422 return;
1423 } else {
1424 rxState = rxDescRead;
1425 regs.rxdp = rxDescCache.link;
1426 CRDD = false;
1427
1428 rxDmaAddr = regs.rxdp & 0x3fffffff;
1429 rxDmaData = &rxDescCache;
1430 rxDmaLen = sizeof(ns_desc);
1431 rxDmaFree = dmaDescFree;
1432
1433 if (doRxDmaRead())
1434 goto exit;
1435 }
1436 break;
1437
1438 default:
1439 panic("Invalid rxState!");
1440 }
1441
1442
1443 DPRINTF(Ethernet, "entering next rx state = %s\n",
1444 NsRxStateStrings[rxState]);
1445
1446 if (rxState == rxIdle) {
1447 regs.command &= ~CR_RXE;
1448 devIntrPost(ISR_RXIDLE);
1449 return;
1450 }
1451
1452 goto next;
1453
1454 exit:
1455 /**
1456 * @todo do we want to schedule a future kick?
1457 */
1458 DPRINTF(Ethernet, "rx state machine exited state=%s\n",
1459 NsRxStateStrings[rxState]);
1460 }
1461
1462 void
1463 NSGigE::transmit()
1464 {
1465 if (txFifo.empty()) {
1466 DPRINTF(Ethernet, "nothing to transmit\n");
1467 return;
1468 }
1469
1470 if (interface->sendPacket(txFifo.front())) {
1471 DPRINTF(Ethernet, "transmit packet\n");
1472 DDUMP(Ethernet, txFifo.front()->data, txFifo.front()->length);
1473 txBytes += txFifo.front()->length;
1474 txPackets++;
1475
1476 txFifoCnt -= (txFifo.front()->length - txPktXmitted);
1477 txPktXmitted = 0;
1478 txFifo.front() = NULL;
1479 txFifo.pop_front();
1480
1481 /* normally do a writeback of the descriptor here, and ONLY after that is
1482 done, send this interrupt. but since our stuff never actually fails,
1483 just do this interrupt here, otherwise the code has to stray from this
1484 nice format. besides, it's functionally the same.
1485 */
1486 devIntrPost(ISR_TXOK);
1487 }
1488
1489 if (!txFifo.empty() && !txEvent.scheduled()) {
1490 DPRINTF(Ethernet, "reschedule transmit\n");
1491 txEvent.schedule(curTick + 1000);
1492 }
1493 }
1494
1495 void
1496 NSGigE::txDmaReadCopy()
1497 {
1498 assert(txDmaState == dmaReading);
1499
1500 memcpy(txDmaData, physmem->dma_addr(txDmaAddr, txDmaLen), txDmaLen);
1501 txDmaState = dmaIdle;
1502
1503 DPRINTF(EthernetDMA, "tx dma read paddr=%#x len=%d\n",
1504 txDmaAddr, txDmaLen);
1505 DDUMP(EthernetDMA, txDmaData, txDmaLen);
1506 }
1507
1508 bool
1509 NSGigE::doTxDmaRead()
1510 {
1511 assert(txDmaState == dmaIdle || txDmaState == dmaReadWaiting);
1512 txDmaState = dmaReading;
1513
1514 if (dmaInterface && !txDmaFree) {
1515 if (dmaInterface->busy())
1516 txDmaState = dmaReadWaiting;
1517 else
1518 dmaInterface->doDMA(Read, txDmaAddr, txDmaLen, curTick,
1519 &txDmaReadEvent);
1520 return true;
1521 }
1522
1523 if (dmaReadDelay == 0 && dmaReadFactor == 0.0) {
1524 txDmaReadCopy();
1525 return false;
1526 }
1527
1528 Tick factor = ((txDmaLen + ULL(63)) >> ULL(6)) * dmaReadFactor;
1529 Tick start = curTick + dmaReadDelay + factor;
1530 txDmaReadEvent.schedule(start);
1531 return true;
1532 }
1533
1534 void
1535 NSGigE::txDmaReadDone()
1536 {
1537 assert(txDmaState == dmaReading);
1538 txDmaReadCopy();
1539
1540 // If the receive state machine has a pending DMA, let it go first
1541 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting)
1542 rxKick();
1543
1544 txKick();
1545 }
1546
1547 void
1548 NSGigE::txDmaWriteCopy()
1549 {
1550 assert(txDmaState == dmaWriting);
1551
1552 memcpy(physmem->dma_addr(txDmaAddr, txDmaLen), txDmaData, txDmaLen);
1553 txDmaState = dmaIdle;
1554
1555 DPRINTF(EthernetDMA, "tx dma write paddr=%#x len=%d\n",
1556 txDmaAddr, txDmaLen);
1557 DDUMP(EthernetDMA, txDmaData, txDmaLen);
1558 }
1559
1560 bool
1561 NSGigE::doTxDmaWrite()
1562 {
1563 assert(txDmaState == dmaIdle || txDmaState == dmaWriteWaiting);
1564 txDmaState = dmaWriting;
1565
1566 if (dmaInterface && !txDmaFree) {
1567 if (dmaInterface->busy())
1568 txDmaState = dmaWriteWaiting;
1569 else
1570 dmaInterface->doDMA(WriteInvalidate, txDmaAddr, txDmaLen, curTick,
1571 &txDmaWriteEvent);
1572 return true;
1573 }
1574
1575 if (dmaWriteDelay == 0 && dmaWriteFactor == 0.0) {
1576 txDmaWriteCopy();
1577 return false;
1578 }
1579
1580 Tick factor = ((txDmaLen + ULL(63)) >> ULL(6)) * dmaWriteFactor;
1581 Tick start = curTick + dmaWriteDelay + factor;
1582 txDmaWriteEvent.schedule(start);
1583 return true;
1584 }
1585
1586 void
1587 NSGigE::txDmaWriteDone()
1588 {
1589 assert(txDmaState == dmaWriting);
1590 txDmaWriteCopy();
1591
1592 // If the receive state machine has a pending DMA, let it go first
1593 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting)
1594 rxKick();
1595
1596 txKick();
1597 }
1598
1599 void
1600 NSGigE::txKick()
1601 {
1602 DPRINTF(Ethernet, "transmit kick state=%s\n", NsTxStateStrings[txState]);
1603
1604 if (rxKickTick > curTick) {
1605 DPRINTF(Ethernet, "receive kick exiting, can't run till %d\n",
1606 rxKickTick);
1607
1608 return;
1609 }
1610
1611 next:
1612 switch(txDmaState) {
1613 case dmaReadWaiting:
1614 if (doTxDmaRead())
1615 goto exit;
1616 break;
1617 case dmaWriteWaiting:
1618 if (doTxDmaWrite())
1619 goto exit;
1620 break;
1621 default:
1622 break;
1623 }
1624
1625 switch (txState) {
1626 case txIdle:
1627 if (!regs.command & CR_TXE) {
1628 DPRINTF(Ethernet, "Transmit disabled. Nothing to do.\n");
1629 goto exit;
1630 }
1631
1632 if (CTDD) {
1633 txState = txDescRefr;
1634
1635 txDmaAddr = regs.txdp & 0x3fffffff;
1636 txDmaData = &txDescCache + offsetof(ns_desc, link);
1637 txDmaLen = sizeof(txDescCache.link);
1638 txDmaFree = dmaDescFree;
1639
1640 if (doTxDmaRead())
1641 goto exit;
1642
1643 } else {
1644 txState = txDescRead;
1645
1646 txDmaAddr = regs.txdp & 0x3fffffff;
1647 txDmaData = &txDescCache;
1648 txDmaLen = sizeof(ns_desc);
1649 txDmaFree = dmaDescFree;
1650
1651 if (doTxDmaRead())
1652 goto exit;
1653 }
1654 break;
1655
1656 case txDescRefr:
1657 if (txDmaState != dmaIdle)
1658 goto exit;
1659
1660 txState = txAdvance;
1661 break;
1662
1663 case txDescRead:
1664 if (txDmaState != dmaIdle)
1665 goto exit;
1666
1667 DPRINTF(Ethernet,
1668 "txDescCache data:\n\tlink=%#x\n\tbufptr=%#x\n\tcmdsts=%#x\n\textsts=%#x\n"
1669 ,txDescCache.link, txDescCache.bufptr, txDescCache.cmdsts,
1670 txDescCache.extsts);
1671
1672 if (txDescCache.cmdsts & CMDSTS_OWN) {
1673 txState = txFifoBlock;
1674 txFragPtr = txDescCache.bufptr;
1675 txDescCnt = txDescCache.cmdsts & CMDSTS_LEN_MASK;
1676 } else {
1677 txState = txIdle;
1678 }
1679 break;
1680
1681 case txFifoBlock:
1682 if (!txPacket) {
1683 DPRINTF(Ethernet, "starting the tx of a new packet\n");
1684 txPacket = new EtherPacket;
1685 txPacket->data = new uint8_t[16384];
1686 txPacketBufPtr = txPacket->data;
1687 }
1688
1689 if (txDescCnt == 0) {
1690 DPRINTF(Ethernet, "the txDescCnt == 0, done with descriptor\n");
1691 if (txDescCache.cmdsts & CMDSTS_MORE) {
1692 DPRINTF(Ethernet, "there are more descriptors to come\n");
1693 txState = txDescWrite;
1694
1695 txDescCache.cmdsts &= ~CMDSTS_OWN;
1696
1697 txDmaAddr = (regs.txdp + offsetof(ns_desc, cmdsts)) & 0x3fffffff;
1698 txDmaData = &(txDescCache.cmdsts);
1699 txDmaLen = sizeof(txDescCache.cmdsts);
1700 txDmaFree = dmaDescFree;
1701
1702 if (doTxDmaWrite())
1703 goto exit;
1704
1705 } else { /* this packet is totally done */
1706 DPRINTF(Ethernet, "This packet is done, let's wrap it up\n");
1707 /* deal with the the packet that just finished */
1708 if ((regs.vtcr & VTCR_PPCHK) && extstsEnable) {
1709 if (txDescCache.extsts & EXTSTS_UDPPKT) {
1710 udpChecksum(txPacket, true);
1711 } else if (txDescCache.extsts & EXTSTS_TCPPKT) {
1712 tcpChecksum(txPacket, true);
1713 } else if (txDescCache.extsts & EXTSTS_IPPKT) {
1714 ipChecksum(txPacket, true);
1715 }
1716 }
1717
1718 txPacket->length = txPacketBufPtr - txPacket->data;
1719 /* this is just because the receive can't handle a packet bigger
1720 want to make sure */
1721 assert(txPacket->length <= 1514);
1722 txFifo.push_back(txPacket);
1723
1724
1725 /* this following section is not to spec, but functionally shouldn't
1726 be any different. normally, the chip will wait til the transmit has
1727 occurred before writing back the descriptor because it has to wait
1728 to see that it was successfully transmitted to decide whether to set
1729 CMDSTS_OK or not. however, in the simulator since it is always
1730 successfully transmitted, and writing it exactly to spec would
1731 complicate the code, we just do it here
1732 */
1733 txDescCache.cmdsts &= ~CMDSTS_OWN;
1734 txDescCache.cmdsts |= CMDSTS_OK;
1735
1736 DPRINTF(Ethernet,
1737 "txDesc writeback:\n\tcmdsts=%#x\n\textsts=%#x\n",
1738 txDescCache.cmdsts, txDescCache.extsts);
1739
1740 txDmaAddr = (regs.txdp + offsetof(ns_desc, cmdsts)) & 0x3fffffff;
1741 txDmaData = &(txDescCache.cmdsts);
1742 txDmaLen = sizeof(txDescCache.cmdsts) + sizeof(txDescCache.extsts);
1743 txDmaFree = dmaDescFree;
1744
1745 if (doTxDmaWrite())
1746 goto exit;
1747
1748 txPacket = 0;
1749 transmit();
1750
1751 if (txHalt) {
1752 txState = txIdle;
1753 txHalt = false;
1754 } else
1755 txState = txAdvance;
1756 }
1757 } else {
1758 DPRINTF(Ethernet, "this descriptor isn't done yet\n");
1759 /* the fill thresh is in units of 32 bytes, shift right by 8 to get the
1760 value, shift left by 5 to get the real number of bytes */
1761 if (txFifoAvail < ((regs.txcfg & TXCFG_FLTH_MASK) >> 3)) {
1762 DPRINTF(Ethernet, "txFifoAvail=%d, regs.txcfg & TXCFG_FLTH_MASK = %#x\n",
1763 txFifoAvail, regs.txcfg & TXCFG_FLTH_MASK);
1764 goto exit;
1765 }
1766
1767 txState = txFragRead;
1768
1769 /* The number of bytes transferred is either whatever is left
1770 in the descriptor (txDescCnt), or if there is not enough
1771 room in the fifo, just whatever room is left in the fifo
1772 */
1773 txXferLen = min<uint32_t>(txDescCnt, txFifoAvail);
1774
1775 txDmaAddr = txFragPtr & 0x3fffffff;
1776 txDmaData = txPacketBufPtr;
1777 txDmaLen = txXferLen;
1778 txDmaFree = dmaDataFree;
1779
1780 if (doTxDmaRead())
1781 goto exit;
1782 }
1783 break;
1784
1785 case txFragRead:
1786 if (txDmaState != dmaIdle)
1787 goto exit;
1788
1789 txPacketBufPtr += txXferLen;
1790 txFragPtr += txXferLen;
1791 txFifoCnt += txXferLen;
1792 txDescCnt -= txXferLen;
1793
1794 txState = txFifoBlock;
1795 break;
1796
1797 case txDescWrite:
1798 if (txDmaState != dmaIdle)
1799 goto exit;
1800
1801 if (txFifoCnt >= ((regs.txcfg & TXCFG_DRTH_MASK) << 5)) {
1802 if (txFifo.empty()) {
1803 uint32_t xmitted = (uint32_t) (txPacketBufPtr - txPacket->data - txPktXmitted);
1804 txFifoCnt -= xmitted;
1805 txPktXmitted += xmitted;
1806 } else {
1807 transmit();
1808 }
1809 }
1810
1811 if (txDescCache.cmdsts & CMDSTS_INTR) {
1812 devIntrPost(ISR_TXDESC);
1813 }
1814
1815 txState = txAdvance;
1816 break;
1817
1818 case txAdvance:
1819 if (txDescCache.link == 0) {
1820 txState = txIdle;
1821 } else {
1822 txState = txDescRead;
1823 regs.txdp = txDescCache.link;
1824 CTDD = false;
1825
1826 txDmaAddr = txDescCache.link & 0x3fffffff;
1827 txDmaData = &txDescCache;
1828 txDmaLen = sizeof(ns_desc);
1829 txDmaFree = dmaDescFree;
1830
1831 if (doTxDmaRead())
1832 goto exit;
1833 }
1834 break;
1835
1836 default:
1837 panic("invalid state");
1838 }
1839
1840 DPRINTF(Ethernet, "entering next tx state=%s\n",
1841 NsTxStateStrings[txState]);
1842
1843 if (txState == txIdle) {
1844 regs.command &= ~CR_TXE;
1845 devIntrPost(ISR_TXIDLE);
1846 return;
1847 }
1848
1849 goto next;
1850
1851 exit:
1852 /**
1853 * @todo do we want to schedule a future kick?
1854 */
1855 DPRINTF(Ethernet, "tx state machine exited state=%s\n",
1856 NsTxStateStrings[txState]);
1857 }
1858
1859 void
1860 NSGigE::transferDone()
1861 {
1862 if (txFifo.empty())
1863 return;
1864
1865 DPRINTF(Ethernet, "schedule transmit\n");
1866
1867 if (txEvent.scheduled())
1868 txEvent.reschedule(curTick + 1);
1869 else
1870 txEvent.schedule(curTick + 1);
1871 }
1872
1873 bool
1874 NSGigE::rxFilter(PacketPtr packet)
1875 {
1876 bool drop = true;
1877 string type;
1878
1879 if (packet->IsUnicast()) {
1880 type = "unicast";
1881
1882 // If we're accepting all unicast addresses
1883 if (acceptUnicast)
1884 drop = false;
1885
1886 // If we make a perfect match
1887 if ((acceptPerfect)
1888 && (memcmp(rom.perfectMatch, packet->data, sizeof(rom.perfectMatch)) == 0))
1889 drop = false;
1890
1891 eth_header *eth = (eth_header *) packet->data;
1892 if ((acceptArp) && (eth->type == 0x806))
1893 drop = false;
1894
1895 } else if (packet->IsBroadcast()) {
1896 type = "broadcast";
1897
1898 // if we're accepting broadcasts
1899 if (acceptBroadcast)
1900 drop = false;
1901
1902 } else if (packet->IsMulticast()) {
1903 type = "multicast";
1904
1905 // if we're accepting all multicasts
1906 if (acceptMulticast)
1907 drop = false;
1908
1909 } else {
1910 type = "unknown";
1911
1912 // oh well, punt on this one
1913 }
1914
1915 if (drop) {
1916 DPRINTF(Ethernet, "rxFilter drop\n");
1917 DDUMP(EthernetData, packet->data, packet->length);
1918 }
1919
1920 return drop;
1921 }
1922
1923 bool
1924 NSGigE::recvPacket(PacketPtr packet)
1925 {
1926 rxBytes += packet->length;
1927 rxPackets++;
1928
1929 if (rxState == rxIdle) {
1930 DPRINTF(Ethernet, "receive disabled...packet dropped\n");
1931 interface->recvDone();
1932 return true;
1933 }
1934
1935 if (rxFilterEnable && rxFilter(packet)) {
1936 DPRINTF(Ethernet, "packet filtered...dropped\n");
1937 interface->recvDone();
1938 return true;
1939 }
1940
1941 if (rxFifoCnt + packet->length >= MAX_RX_FIFO_SIZE) {
1942 DPRINTF(Ethernet,
1943 "packet will not fit in receive buffer...packet dropped\n");
1944 devIntrPost(ISR_RXORN);
1945 return false;
1946 }
1947
1948 rxFifo.push_back(packet);
1949 rxFifoCnt += packet->length;
1950 interface->recvDone();
1951
1952 rxKick();
1953 return true;
1954 }
1955
1956 /**
1957 * does a udp checksum. if gen is true, then it generates it and puts it in the right place
1958 * else, it just checks what it calculates against the value in the header in packet
1959 */
1960 bool
1961 NSGigE::udpChecksum(PacketPtr packet, bool gen)
1962 {
1963 ip_header *ip = packet->getIpHdr();
1964 udp_header *hdr = packet->getUdpHdr(ip);
1965
1966 pseudo_header *pseudo = new pseudo_header;
1967
1968 pseudo->src_ip_addr = ip->src_ip_addr;
1969 pseudo->dest_ip_addr = ip->dest_ip_addr;
1970 pseudo->protocol = ip->protocol;
1971 pseudo->len = hdr->len;
1972
1973 uint16_t cksum = checksumCalc((uint16_t *) pseudo, (uint16_t *) hdr,
1974 (uint32_t) hdr->len);
1975
1976 delete pseudo;
1977 if (gen)
1978 hdr->chksum = cksum;
1979 else
1980 if (cksum != 0)
1981 return false;
1982
1983 return true;
1984 }
1985
1986 bool
1987 NSGigE::tcpChecksum(PacketPtr packet, bool gen)
1988 {
1989 ip_header *ip = packet->getIpHdr();
1990 tcp_header *hdr = packet->getTcpHdr(ip);
1991
1992 pseudo_header *pseudo = new pseudo_header;
1993
1994 pseudo->src_ip_addr = ip->src_ip_addr;
1995 pseudo->dest_ip_addr = ip->dest_ip_addr;
1996 pseudo->protocol = ip->protocol;
1997 pseudo->len = ip->dgram_len - (ip->vers_len & 0xf);
1998
1999 uint16_t cksum = checksumCalc((uint16_t *) pseudo, (uint16_t *) hdr,
2000 (uint32_t) pseudo->len);
2001
2002 delete pseudo;
2003 if (gen)
2004 hdr->chksum = cksum;
2005 else
2006 if (cksum != 0)
2007 return false;
2008
2009 return true;
2010 }
2011
2012 bool
2013 NSGigE::ipChecksum(PacketPtr packet, bool gen)
2014 {
2015 ip_header *hdr = packet->getIpHdr();
2016
2017 uint16_t cksum = checksumCalc(NULL, (uint16_t *) hdr, (hdr->vers_len & 0xf));
2018
2019 if (gen)
2020 hdr->hdr_chksum = cksum;
2021 else
2022 if (cksum != 0)
2023 return false;
2024
2025 return true;
2026 }
2027
2028 uint16_t
2029 NSGigE::checksumCalc(uint16_t *pseudo, uint16_t *buf, uint32_t len)
2030 {
2031 uint32_t sum = 0;
2032
2033 uint16_t last_pad = 0;
2034 if (len & 1) {
2035 last_pad = buf[len/2] & 0xff;
2036 len--;
2037 sum += last_pad;
2038 }
2039
2040 if (pseudo) {
2041 sum = pseudo[0] + pseudo[1] + pseudo[2] +
2042 pseudo[3] + pseudo[4] + pseudo[5];
2043 }
2044
2045 for (int i=0; i < (len/2); ++i) {
2046 sum += buf[i];
2047 }
2048
2049 while (sum >> 16)
2050 sum = (sum >> 16) + (sum & 0xffff);
2051
2052 return ~sum;
2053 }
2054
2055 //=====================================================================
2056 //
2057 //
2058 void
2059 NSGigE::serialize(ostream &os)
2060 {
2061 // Serialize the PciDev base class
2062 PciDev::serialize(os);
2063
2064 /*
2065 * Finalize any DMA events now.
2066 */
2067 if (rxDmaReadEvent.scheduled())
2068 rxDmaReadCopy();
2069 if (rxDmaWriteEvent.scheduled())
2070 rxDmaWriteCopy();
2071 if (txDmaReadEvent.scheduled())
2072 txDmaReadCopy();
2073 if (txDmaWriteEvent.scheduled())
2074 txDmaWriteCopy();
2075
2076 /*
2077 * Serialize the device registers
2078 */
2079 SERIALIZE_SCALAR(regs.command);
2080 SERIALIZE_SCALAR(regs.config);
2081 SERIALIZE_SCALAR(regs.mear);
2082 SERIALIZE_SCALAR(regs.ptscr);
2083 SERIALIZE_SCALAR(regs.isr);
2084 SERIALIZE_SCALAR(regs.imr);
2085 SERIALIZE_SCALAR(regs.ier);
2086 SERIALIZE_SCALAR(regs.ihr);
2087 SERIALIZE_SCALAR(regs.txdp);
2088 SERIALIZE_SCALAR(regs.txdp_hi);
2089 SERIALIZE_SCALAR(regs.txcfg);
2090 SERIALIZE_SCALAR(regs.gpior);
2091 SERIALIZE_SCALAR(regs.rxdp);
2092 SERIALIZE_SCALAR(regs.rxdp_hi);
2093 SERIALIZE_SCALAR(regs.rxcfg);
2094 SERIALIZE_SCALAR(regs.pqcr);
2095 SERIALIZE_SCALAR(regs.wcsr);
2096 SERIALIZE_SCALAR(regs.pcr);
2097 SERIALIZE_SCALAR(regs.rfcr);
2098 SERIALIZE_SCALAR(regs.rfdr);
2099 SERIALIZE_SCALAR(regs.srr);
2100 SERIALIZE_SCALAR(regs.mibc);
2101 SERIALIZE_SCALAR(regs.vrcr);
2102 SERIALIZE_SCALAR(regs.vtcr);
2103 SERIALIZE_SCALAR(regs.vdr);
2104 SERIALIZE_SCALAR(regs.ccsr);
2105 SERIALIZE_SCALAR(regs.tbicr);
2106 SERIALIZE_SCALAR(regs.tbisr);
2107 SERIALIZE_SCALAR(regs.tanar);
2108 SERIALIZE_SCALAR(regs.tanlpar);
2109 SERIALIZE_SCALAR(regs.taner);
2110 SERIALIZE_SCALAR(regs.tesr);
2111
2112 SERIALIZE_ARRAY(rom.perfectMatch, EADDR_LEN);
2113
2114 SERIALIZE_SCALAR(io_enable);
2115
2116 /*
2117 * Serialize the data Fifos
2118 */
2119 int txNumPkts = txFifo.size();
2120 SERIALIZE_SCALAR(txNumPkts);
2121 int i = 0;
2122 pktiter_t end = txFifo.end();
2123 for (pktiter_t p = txFifo.begin(); p != end; ++p) {
2124 nameOut(os, csprintf("%s.txFifo%d", name(), i++));
2125 (*p)->serialize(os);
2126 }
2127
2128 int rxNumPkts = rxFifo.size();
2129 SERIALIZE_SCALAR(rxNumPkts);
2130 i = 0;
2131 end = rxFifo.end();
2132 for (pktiter_t p = rxFifo.begin(); p != end; ++p) {
2133 nameOut(os, csprintf("%s.rxFifo%d", name(), i++));
2134 (*p)->serialize(os);
2135 }
2136
2137 /*
2138 * Serialize the various helper variables
2139 */
2140 bool txPacketExists = txPacket;
2141 SERIALIZE_SCALAR(txPacketExists);
2142 if (txPacketExists) {
2143 nameOut(os, csprintf("%s.txPacket", name()));
2144 txPacket->serialize(os);
2145 uint32_t txPktBufPtr = (uint32_t) (txPacketBufPtr - txPacket->data);
2146 SERIALIZE_SCALAR(txPktBufPtr);
2147 }
2148
2149 bool rxPacketExists = rxPacket;
2150 SERIALIZE_SCALAR(rxPacketExists);
2151 if (rxPacketExists) {
2152 nameOut(os, csprintf("%s.rxPacket", name()));
2153 rxPacket->serialize(os);
2154 uint32_t rxPktBufPtr = (uint32_t) (rxPacketBufPtr - rxPacket->data);
2155 SERIALIZE_SCALAR(rxPktBufPtr);
2156 }
2157
2158 SERIALIZE_SCALAR(txXferLen);
2159 SERIALIZE_SCALAR(rxXferLen);
2160 SERIALIZE_SCALAR(txPktXmitted);
2161
2162 /*
2163 * Serialize DescCaches
2164 */
2165 SERIALIZE_SCALAR(txDescCache.link);
2166 SERIALIZE_SCALAR(txDescCache.bufptr);
2167 SERIALIZE_SCALAR(txDescCache.cmdsts);
2168 SERIALIZE_SCALAR(txDescCache.extsts);
2169 SERIALIZE_SCALAR(rxDescCache.link);
2170 SERIALIZE_SCALAR(rxDescCache.bufptr);
2171 SERIALIZE_SCALAR(rxDescCache.cmdsts);
2172 SERIALIZE_SCALAR(rxDescCache.extsts);
2173
2174 /*
2175 * Serialize tx state machine
2176 */
2177 int txState = this->txState;
2178 SERIALIZE_SCALAR(txState);
2179 SERIALIZE_SCALAR(CTDD);
2180 SERIALIZE_SCALAR(txFifoCnt);
2181 SERIALIZE_SCALAR(txFifoAvail);
2182 SERIALIZE_SCALAR(txHalt);
2183 SERIALIZE_SCALAR(txFragPtr);
2184 SERIALIZE_SCALAR(txDescCnt);
2185 int txDmaState = this->txDmaState;
2186 SERIALIZE_SCALAR(txDmaState);
2187
2188 /*
2189 * Serialize rx state machine
2190 */
2191 int rxState = this->rxState;
2192 SERIALIZE_SCALAR(rxState);
2193 SERIALIZE_SCALAR(CRDD);
2194 SERIALIZE_SCALAR(rxPktBytes);
2195 SERIALIZE_SCALAR(rxFifoCnt);
2196 SERIALIZE_SCALAR(rxHalt);
2197 SERIALIZE_SCALAR(rxDescCnt);
2198 int rxDmaState = this->rxDmaState;
2199 SERIALIZE_SCALAR(rxDmaState);
2200
2201 SERIALIZE_SCALAR(extstsEnable);
2202
2203 /*
2204 * If there's a pending transmit, store the time so we can
2205 * reschedule it later
2206 */
2207 Tick transmitTick = txEvent.scheduled() ? txEvent.when() - curTick : 0;
2208 SERIALIZE_SCALAR(transmitTick);
2209
2210 /*
2211 * receive address filter settings
2212 */
2213 SERIALIZE_SCALAR(rxFilterEnable);
2214 SERIALIZE_SCALAR(acceptBroadcast);
2215 SERIALIZE_SCALAR(acceptMulticast);
2216 SERIALIZE_SCALAR(acceptUnicast);
2217 SERIALIZE_SCALAR(acceptPerfect);
2218 SERIALIZE_SCALAR(acceptArp);
2219
2220 /*
2221 * Keep track of pending interrupt status.
2222 */
2223 SERIALIZE_SCALAR(intrTick);
2224 SERIALIZE_SCALAR(cpuPendingIntr);
2225 Tick intrEventTick = 0;
2226 if (intrEvent)
2227 intrEventTick = intrEvent->when();
2228 SERIALIZE_SCALAR(intrEventTick);
2229
2230 }
2231
2232 void
2233 NSGigE::unserialize(Checkpoint *cp, const std::string &section)
2234 {
2235 // Unserialize the PciDev base class
2236 PciDev::unserialize(cp, section);
2237
2238 UNSERIALIZE_SCALAR(regs.command);
2239 UNSERIALIZE_SCALAR(regs.config);
2240 UNSERIALIZE_SCALAR(regs.mear);
2241 UNSERIALIZE_SCALAR(regs.ptscr);
2242 UNSERIALIZE_SCALAR(regs.isr);
2243 UNSERIALIZE_SCALAR(regs.imr);
2244 UNSERIALIZE_SCALAR(regs.ier);
2245 UNSERIALIZE_SCALAR(regs.ihr);
2246 UNSERIALIZE_SCALAR(regs.txdp);
2247 UNSERIALIZE_SCALAR(regs.txdp_hi);
2248 UNSERIALIZE_SCALAR(regs.txcfg);
2249 UNSERIALIZE_SCALAR(regs.gpior);
2250 UNSERIALIZE_SCALAR(regs.rxdp);
2251 UNSERIALIZE_SCALAR(regs.rxdp_hi);
2252 UNSERIALIZE_SCALAR(regs.rxcfg);
2253 UNSERIALIZE_SCALAR(regs.pqcr);
2254 UNSERIALIZE_SCALAR(regs.wcsr);
2255 UNSERIALIZE_SCALAR(regs.pcr);
2256 UNSERIALIZE_SCALAR(regs.rfcr);
2257 UNSERIALIZE_SCALAR(regs.rfdr);
2258 UNSERIALIZE_SCALAR(regs.srr);
2259 UNSERIALIZE_SCALAR(regs.mibc);
2260 UNSERIALIZE_SCALAR(regs.vrcr);
2261 UNSERIALIZE_SCALAR(regs.vtcr);
2262 UNSERIALIZE_SCALAR(regs.vdr);
2263 UNSERIALIZE_SCALAR(regs.ccsr);
2264 UNSERIALIZE_SCALAR(regs.tbicr);
2265 UNSERIALIZE_SCALAR(regs.tbisr);
2266 UNSERIALIZE_SCALAR(regs.tanar);
2267 UNSERIALIZE_SCALAR(regs.tanlpar);
2268 UNSERIALIZE_SCALAR(regs.taner);
2269 UNSERIALIZE_SCALAR(regs.tesr);
2270
2271 UNSERIALIZE_ARRAY(rom.perfectMatch, EADDR_LEN);
2272
2273 UNSERIALIZE_SCALAR(io_enable);
2274
2275 /*
2276 * unserialize the data fifos
2277 */
2278 int txNumPkts;
2279 UNSERIALIZE_SCALAR(txNumPkts);
2280 int i;
2281 for (i = 0; i < txNumPkts; ++i) {
2282 PacketPtr p = new EtherPacket;
2283 p->unserialize(cp, csprintf("%s.rxFifo%d", section, i));
2284 txFifo.push_back(p);
2285 }
2286
2287 int rxNumPkts;
2288 UNSERIALIZE_SCALAR(rxNumPkts);
2289 for (i = 0; i < rxNumPkts; ++i) {
2290 PacketPtr p = new EtherPacket;
2291 p->unserialize(cp, csprintf("%s.rxFifo%d", section, i));
2292 rxFifo.push_back(p);
2293 }
2294
2295 /*
2296 * unserialize the various helper variables
2297 */
2298 bool txPacketExists;
2299 UNSERIALIZE_SCALAR(txPacketExists);
2300 if (txPacketExists) {
2301 txPacket = new EtherPacket;
2302 txPacket->unserialize(cp, csprintf("%s.txPacket", section));
2303 uint32_t txPktBufPtr;
2304 UNSERIALIZE_SCALAR(txPktBufPtr);
2305 txPacketBufPtr = (uint8_t *) txPacket->data + txPktBufPtr;
2306 } else
2307 txPacket = 0;
2308
2309 bool rxPacketExists;
2310 UNSERIALIZE_SCALAR(rxPacketExists);
2311 rxPacket = 0;
2312 if (rxPacketExists) {
2313 rxPacket = new EtherPacket;
2314 rxPacket->unserialize(cp, csprintf("%s.rxPacket", section));
2315 uint32_t rxPktBufPtr;
2316 UNSERIALIZE_SCALAR(rxPktBufPtr);
2317 rxPacketBufPtr = (uint8_t *) rxPacket->data + rxPktBufPtr;
2318 } else
2319 rxPacket = 0;
2320
2321 UNSERIALIZE_SCALAR(txXferLen);
2322 UNSERIALIZE_SCALAR(rxXferLen);
2323 UNSERIALIZE_SCALAR(txPktXmitted);
2324
2325 /*
2326 * Unserialize DescCaches
2327 */
2328 UNSERIALIZE_SCALAR(txDescCache.link);
2329 UNSERIALIZE_SCALAR(txDescCache.bufptr);
2330 UNSERIALIZE_SCALAR(txDescCache.cmdsts);
2331 UNSERIALIZE_SCALAR(txDescCache.extsts);
2332 UNSERIALIZE_SCALAR(rxDescCache.link);
2333 UNSERIALIZE_SCALAR(rxDescCache.bufptr);
2334 UNSERIALIZE_SCALAR(rxDescCache.cmdsts);
2335 UNSERIALIZE_SCALAR(rxDescCache.extsts);
2336
2337 /*
2338 * unserialize tx state machine
2339 */
2340 int txState;
2341 UNSERIALIZE_SCALAR(txState);
2342 this->txState = (TxState) txState;
2343 UNSERIALIZE_SCALAR(CTDD);
2344 UNSERIALIZE_SCALAR(txFifoCnt);
2345 UNSERIALIZE_SCALAR(txFifoAvail);
2346 UNSERIALIZE_SCALAR(txHalt);
2347 UNSERIALIZE_SCALAR(txFragPtr);
2348 UNSERIALIZE_SCALAR(txDescCnt);
2349 int txDmaState;
2350 UNSERIALIZE_SCALAR(txDmaState);
2351 this->txDmaState = (DmaState) txDmaState;
2352
2353 /*
2354 * unserialize rx state machine
2355 */
2356 int rxState;
2357 UNSERIALIZE_SCALAR(rxState);
2358 this->rxState = (RxState) rxState;
2359 UNSERIALIZE_SCALAR(CRDD);
2360 UNSERIALIZE_SCALAR(rxPktBytes);
2361 UNSERIALIZE_SCALAR(rxFifoCnt);
2362 UNSERIALIZE_SCALAR(rxHalt);
2363 UNSERIALIZE_SCALAR(rxDescCnt);
2364 int rxDmaState;
2365 UNSERIALIZE_SCALAR(rxDmaState);
2366 this->rxDmaState = (DmaState) rxDmaState;
2367
2368 UNSERIALIZE_SCALAR(extstsEnable);
2369
2370 /*
2371 * If there's a pending transmit, reschedule it now
2372 */
2373 Tick transmitTick;
2374 UNSERIALIZE_SCALAR(transmitTick);
2375 if (transmitTick)
2376 txEvent.schedule(curTick + transmitTick);
2377
2378 /*
2379 * unserialize receive address filter settings
2380 */
2381 UNSERIALIZE_SCALAR(rxFilterEnable);
2382 UNSERIALIZE_SCALAR(acceptBroadcast);
2383 UNSERIALIZE_SCALAR(acceptMulticast);
2384 UNSERIALIZE_SCALAR(acceptUnicast);
2385 UNSERIALIZE_SCALAR(acceptPerfect);
2386 UNSERIALIZE_SCALAR(acceptArp);
2387
2388 /*
2389 * Keep track of pending interrupt status.
2390 */
2391 UNSERIALIZE_SCALAR(intrTick);
2392 UNSERIALIZE_SCALAR(cpuPendingIntr);
2393 Tick intrEventTick;
2394 UNSERIALIZE_SCALAR(intrEventTick);
2395 if (intrEventTick) {
2396 intrEvent = new IntrEvent(this, true);
2397 intrEvent->schedule(intrEventTick);
2398 }
2399
2400 /*
2401 * re-add addrRanges to bus bridges
2402 */
2403 if (pioInterface) {
2404 pioInterface->addAddrRange(BARAddrs[0], BARAddrs[0] + BARSize[0] - 1);
2405 pioInterface->addAddrRange(BARAddrs[1], BARAddrs[1] + BARSize[1] - 1);
2406 }
2407 }
2408
2409 Tick
2410 NSGigE::cacheAccess(MemReqPtr &req)
2411 {
2412 DPRINTF(EthernetPIO, "timing access to paddr=%#x (daddr=%#x)\n",
2413 req->paddr, req->paddr - addr);
2414 return curTick + pioLatency;
2415 }
2416 //=====================================================================
2417
2418
2419 BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt)
2420
2421 SimObjectParam<EtherInt *> peer;
2422 SimObjectParam<NSGigE *> device;
2423
2424 END_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt)
2425
2426 BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigEInt)
2427
2428 INIT_PARAM_DFLT(peer, "peer interface", NULL),
2429 INIT_PARAM(device, "Ethernet device of this interface")
2430
2431 END_INIT_SIM_OBJECT_PARAMS(NSGigEInt)
2432
2433 CREATE_SIM_OBJECT(NSGigEInt)
2434 {
2435 NSGigEInt *dev_int = new NSGigEInt(getInstanceName(), device);
2436
2437 EtherInt *p = (EtherInt *)peer;
2438 if (p) {
2439 dev_int->setPeer(p);
2440 p->setPeer(dev_int);
2441 }
2442
2443 return dev_int;
2444 }
2445
2446 REGISTER_SIM_OBJECT("NSGigEInt", NSGigEInt)
2447
2448
2449 BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigE)
2450
2451 Param<Tick> tx_delay;
2452 Param<Tick> rx_delay;
2453 SimObjectParam<IntrControl *> intr_ctrl;
2454 Param<Tick> intr_delay;
2455 SimObjectParam<MemoryController *> mmu;
2456 SimObjectParam<PhysicalMemory *> physmem;
2457 Param<bool> rx_filter;
2458 Param<string> hardware_address;
2459 SimObjectParam<Bus*> header_bus;
2460 SimObjectParam<Bus*> payload_bus;
2461 SimObjectParam<HierParams *> hier;
2462 Param<Tick> pio_latency;
2463 Param<bool> dma_desc_free;
2464 Param<bool> dma_data_free;
2465 Param<Tick> dma_read_delay;
2466 Param<Tick> dma_write_delay;
2467 Param<Tick> dma_read_factor;
2468 Param<Tick> dma_write_factor;
2469 SimObjectParam<PciConfigAll *> configspace;
2470 SimObjectParam<PciConfigData *> configdata;
2471 SimObjectParam<Tsunami *> tsunami;
2472 Param<uint32_t> pci_bus;
2473 Param<uint32_t> pci_dev;
2474 Param<uint32_t> pci_func;
2475
2476 END_DECLARE_SIM_OBJECT_PARAMS(NSGigE)
2477
2478 BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigE)
2479
2480 INIT_PARAM_DFLT(tx_delay, "Transmit Delay", 1000),
2481 INIT_PARAM_DFLT(rx_delay, "Receive Delay", 1000),
2482 INIT_PARAM(intr_ctrl, "Interrupt Controller"),
2483 INIT_PARAM_DFLT(intr_delay, "Interrupt Delay in microseconds", 0),
2484 INIT_PARAM(mmu, "Memory Controller"),
2485 INIT_PARAM(physmem, "Physical Memory"),
2486 INIT_PARAM_DFLT(rx_filter, "Enable Receive Filter", true),
2487 INIT_PARAM_DFLT(hardware_address, "Ethernet Hardware Address",
2488 "00:99:00:00:00:01"),
2489 INIT_PARAM_DFLT(header_bus, "The IO Bus to attach to for headers", NULL),
2490 INIT_PARAM_DFLT(payload_bus, "The IO Bus to attach to for payload", NULL),
2491 INIT_PARAM_DFLT(hier, "Hierarchy global variables", &defaultHierParams),
2492 INIT_PARAM_DFLT(pio_latency, "Programmed IO latency", 1000),
2493 INIT_PARAM_DFLT(dma_desc_free, "DMA of Descriptors is free", false),
2494 INIT_PARAM_DFLT(dma_data_free, "DMA of Data is free", false),
2495 INIT_PARAM_DFLT(dma_read_delay, "fixed delay for dma reads", 0),
2496 INIT_PARAM_DFLT(dma_write_delay, "fixed delay for dma writes", 0),
2497 INIT_PARAM_DFLT(dma_read_factor, "multiplier for dma reads", 0),
2498 INIT_PARAM_DFLT(dma_write_factor, "multiplier for dma writes", 0),
2499 INIT_PARAM(configspace, "PCI Configspace"),
2500 INIT_PARAM(configdata, "PCI Config data"),
2501 INIT_PARAM(tsunami, "Tsunami"),
2502 INIT_PARAM(pci_bus, "PCI bus"),
2503 INIT_PARAM(pci_dev, "PCI device number"),
2504 INIT_PARAM(pci_func, "PCI function code")
2505
2506 END_INIT_SIM_OBJECT_PARAMS(NSGigE)
2507
2508
2509 CREATE_SIM_OBJECT(NSGigE)
2510 {
2511 int eaddr[6];
2512 sscanf(((string)hardware_address).c_str(), "%x:%x:%x:%x:%x:%x",
2513 &eaddr[0], &eaddr[1], &eaddr[2], &eaddr[3], &eaddr[4], &eaddr[5]);
2514
2515 return new NSGigE(getInstanceName(), intr_ctrl, intr_delay,
2516 physmem, tx_delay, rx_delay, mmu, hier, header_bus,
2517 payload_bus, pio_latency, dma_desc_free, dma_data_free,
2518 dma_read_delay, dma_write_delay, dma_read_factor,
2519 dma_write_factor, configspace, configdata,
2520 tsunami, pci_bus, pci_dev, pci_func, rx_filter, eaddr);
2521 }
2522
2523 REGISTER_SIM_OBJECT("NSGigE", NSGigE)