fix transmit side checksum offloading to not generate a pseudo header.
[gem5.git] / dev / ns_gige.cc
1 /*
2 * Copyright (c) 2004 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /* @file
30 * Device module for modelling the National Semiconductor
31 * DP83820 ethernet controller. Does not support priority queueing
32 */
33 #include <cstdio>
34 #include <deque>
35 #include <string>
36
37 #include "base/inet.hh"
38 #include "cpu/exec_context.hh"
39 #include "cpu/intr_control.hh"
40 #include "dev/dma.hh"
41 #include "dev/ns_gige.hh"
42 #include "dev/etherlink.hh"
43 #include "mem/bus/bus.hh"
44 #include "mem/bus/dma_interface.hh"
45 #include "mem/bus/pio_interface.hh"
46 #include "mem/bus/pio_interface_impl.hh"
47 #include "mem/functional_mem/memory_control.hh"
48 #include "mem/functional_mem/physical_memory.hh"
49 #include "sim/builder.hh"
50 #include "sim/host.hh"
51 #include "sim/sim_stats.hh"
52 #include "targetarch/vtophys.hh"
53 #include "dev/pciconfigall.hh"
54 #include "dev/tsunami_cchip.hh"
55
56 const char *NsRxStateStrings[] =
57 {
58 "rxIdle",
59 "rxDescRefr",
60 "rxDescRead",
61 "rxFifoBlock",
62 "rxFragWrite",
63 "rxDescWrite",
64 "rxAdvance"
65 };
66
67 const char *NsTxStateStrings[] =
68 {
69 "txIdle",
70 "txDescRefr",
71 "txDescRead",
72 "txFifoBlock",
73 "txFragRead",
74 "txDescWrite",
75 "txAdvance"
76 };
77
78 const char *NsDmaState[] =
79 {
80 "dmaIdle",
81 "dmaReading",
82 "dmaWriting",
83 "dmaReadWaiting",
84 "dmaWriteWaiting"
85 };
86
87 using namespace std;
88
89 //helper function declarations
90 //These functions reverse Endianness so we can evaluate network data correctly
91 uint16_t reverseEnd16(uint16_t);
92 uint32_t reverseEnd32(uint32_t);
93
94 ///////////////////////////////////////////////////////////////////////
95 //
96 // NSGigE PCI Device
97 //
98 NSGigE::NSGigE(const std::string &name, IntrControl *i, Tick intr_delay,
99 PhysicalMemory *pmem, Tick tx_delay, Tick rx_delay,
100 MemoryController *mmu, HierParams *hier, Bus *header_bus,
101 Bus *payload_bus, Tick pio_latency, bool dma_desc_free,
102 bool dma_data_free, Tick dma_read_delay, Tick dma_write_delay,
103 Tick dma_read_factor, Tick dma_write_factor, PciConfigAll *cf,
104 PciConfigData *cd, Tsunami *t, uint32_t bus, uint32_t dev,
105 uint32_t func, bool rx_filter, const int eaddr[6])
106 : PciDev(name, mmu, cf, cd, bus, dev, func), tsunami(t), ioEnable(false),
107 txPacket(0), rxPacket(0), txPacketBufPtr(NULL), rxPacketBufPtr(NULL),
108 txXferLen(0), rxXferLen(0), txState(txIdle), CTDD(false),
109 txFifoAvail(MAX_TX_FIFO_SIZE), txHalt(false),
110 txFragPtr(0), txDescCnt(0), txDmaState(dmaIdle), rxState(rxIdle),
111 CRDD(false), rxPktBytes(0), rxFifoCnt(0), rxHalt(false),
112 rxFragPtr(0), rxDescCnt(0), rxDmaState(dmaIdle), extstsEnable(false),
113 rxDmaReadEvent(this), rxDmaWriteEvent(this),
114 txDmaReadEvent(this), txDmaWriteEvent(this),
115 dmaDescFree(dma_desc_free), dmaDataFree(dma_data_free),
116 txDelay(tx_delay), rxDelay(rx_delay), rxKickTick(0), txKickTick(0),
117 txEvent(this), rxFilterEnable(rx_filter), acceptBroadcast(false),
118 acceptMulticast(false), acceptUnicast(false),
119 acceptPerfect(false), acceptArp(false),
120 physmem(pmem), intctrl(i), intrTick(0), cpuPendingIntr(false),
121 intrEvent(0), interface(0), pioLatency(pio_latency)
122 {
123 tsunami->ethernet = this;
124
125 if (header_bus) {
126 pioInterface = newPioInterface(name, hier, header_bus, this,
127 &NSGigE::cacheAccess);
128
129 if (payload_bus)
130 dmaInterface = new DMAInterface<Bus>(name + ".dma",
131 header_bus, payload_bus, 1);
132 else
133 dmaInterface = new DMAInterface<Bus>(name + ".dma",
134 header_bus, header_bus, 1);
135 } else if (payload_bus) {
136 pioInterface = newPioInterface(name, hier, payload_bus, this,
137 &NSGigE::cacheAccess);
138
139 dmaInterface = new DMAInterface<Bus>(name + ".dma", payload_bus,
140 payload_bus, 1);
141
142 }
143
144
145 intrDelay = US2Ticks(intr_delay);
146 dmaReadDelay = dma_read_delay;
147 dmaWriteDelay = dma_write_delay;
148 dmaReadFactor = dma_read_factor;
149 dmaWriteFactor = dma_write_factor;
150
151 regsReset();
152 rom.perfectMatch[0] = eaddr[0];
153 rom.perfectMatch[1] = eaddr[1];
154 rom.perfectMatch[2] = eaddr[2];
155 rom.perfectMatch[3] = eaddr[3];
156 rom.perfectMatch[4] = eaddr[4];
157 rom.perfectMatch[5] = eaddr[5];
158 }
159
160 NSGigE::~NSGigE()
161 {}
162
163 void
164 NSGigE::regStats()
165 {
166 txBytes
167 .name(name() + ".txBytes")
168 .desc("Bytes Transmitted")
169 .prereq(txBytes)
170 ;
171
172 rxBytes
173 .name(name() + ".rxBytes")
174 .desc("Bytes Received")
175 .prereq(rxBytes)
176 ;
177
178 txPackets
179 .name(name() + ".txPackets")
180 .desc("Number of Packets Transmitted")
181 .prereq(txBytes)
182 ;
183
184 rxPackets
185 .name(name() + ".rxPackets")
186 .desc("Number of Packets Received")
187 .prereq(rxBytes)
188 ;
189
190 txBandwidth
191 .name(name() + ".txBandwidth")
192 .desc("Transmit Bandwidth (bits/s)")
193 .precision(0)
194 .prereq(txBytes)
195 ;
196
197 rxBandwidth
198 .name(name() + ".rxBandwidth")
199 .desc("Receive Bandwidth (bits/s)")
200 .precision(0)
201 .prereq(rxBytes)
202 ;
203
204 txPacketRate
205 .name(name() + ".txPPS")
206 .desc("Packet Tranmission Rate (packets/s)")
207 .precision(0)
208 .prereq(txBytes)
209 ;
210
211 rxPacketRate
212 .name(name() + ".rxPPS")
213 .desc("Packet Reception Rate (packets/s)")
214 .precision(0)
215 .prereq(rxBytes)
216 ;
217
218 txBandwidth = txBytes * Stats::constant(8) / simSeconds;
219 rxBandwidth = rxBytes * Stats::constant(8) / simSeconds;
220 txPacketRate = txPackets / simSeconds;
221 rxPacketRate = rxPackets / simSeconds;
222 }
223
224 /**
225 * This is to read the PCI general configuration registers
226 */
227 void
228 NSGigE::ReadConfig(int offset, int size, uint8_t *data)
229 {
230 if (offset < PCI_DEVICE_SPECIFIC)
231 PciDev::ReadConfig(offset, size, data);
232 else
233 panic("Device specific PCI config space not implemented!\n");
234 }
235
236 /**
237 * This is to write to the PCI general configuration registers
238 */
239 void
240 NSGigE::WriteConfig(int offset, int size, uint32_t data)
241 {
242 if (offset < PCI_DEVICE_SPECIFIC)
243 PciDev::WriteConfig(offset, size, data);
244 else
245 panic("Device specific PCI config space not implemented!\n");
246
247 // Need to catch writes to BARs to update the PIO interface
248 switch (offset) {
249 //seems to work fine without all these PCI settings, but i put in the IO
250 //to double check, an assertion will fail if we need to properly
251 // implement it
252 case PCI_COMMAND:
253 if (config.data[offset] & PCI_CMD_IOSE)
254 ioEnable = true;
255 else
256 ioEnable = false;
257
258 #if 0
259 if (config.data[offset] & PCI_CMD_BME) {
260 bmEnabled = true;
261 }
262 else {
263 bmEnabled = false;
264 }
265
266 if (config.data[offset] & PCI_CMD_MSE) {
267 memEnable = true;
268 }
269 else {
270 memEnable = false;
271 }
272 #endif
273 break;
274
275 case PCI0_BASE_ADDR0:
276 if (BARAddrs[0] != 0) {
277
278 if (pioInterface)
279 pioInterface->addAddrRange(BARAddrs[0], BARAddrs[0] + BARSize[0] - 1);
280
281 BARAddrs[0] &= PA_UNCACHED_MASK;
282
283 }
284 break;
285 case PCI0_BASE_ADDR1:
286 if (BARAddrs[1] != 0) {
287
288 if (pioInterface)
289 pioInterface->addAddrRange(BARAddrs[1], BARAddrs[1] + BARSize[1] - 1);
290
291 BARAddrs[1] &= PA_UNCACHED_MASK;
292
293 }
294 break;
295 }
296 }
297
298 /**
299 * This reads the device registers, which are detailed in the NS83820
300 * spec sheet
301 */
302 Fault
303 NSGigE::read(MemReqPtr &req, uint8_t *data)
304 {
305 assert(ioEnable);
306
307 //The mask is to give you only the offset into the device register file
308 Addr daddr = req->paddr & 0xfff;
309 DPRINTF(EthernetPIO, "read da=%#x pa=%#x va=%#x size=%d\n",
310 daddr, req->paddr, req->vaddr, req->size);
311
312
313 //there are some reserved registers, you can see ns_gige_reg.h and
314 //the spec sheet for details
315 if (daddr > LAST && daddr <= RESERVED) {
316 panic("Accessing reserved register");
317 } else if (daddr > RESERVED && daddr <= 0x3FC) {
318 ReadConfig(daddr & 0xff, req->size, data);
319 return No_Fault;
320 } else if (daddr >= MIB_START && daddr <= MIB_END) {
321 // don't implement all the MIB's. hopefully the kernel
322 // doesn't actually DEPEND upon their values
323 // MIB are just hardware stats keepers
324 uint32_t &reg = *(uint32_t *) data;
325 reg = 0;
326 return No_Fault;
327 } else if (daddr > 0x3FC)
328 panic("Something is messed up!\n");
329
330 switch (req->size) {
331 case sizeof(uint32_t):
332 {
333 uint32_t &reg = *(uint32_t *)data;
334
335 switch (daddr) {
336 case CR:
337 reg = regs.command;
338 //these are supposed to be cleared on a read
339 reg &= ~(CR_RXD | CR_TXD | CR_TXR | CR_RXR);
340 break;
341
342 case CFG:
343 reg = regs.config;
344 break;
345
346 case MEAR:
347 reg = regs.mear;
348 break;
349
350 case PTSCR:
351 reg = regs.ptscr;
352 break;
353
354 case ISR:
355 reg = regs.isr;
356 devIntrClear(ISR_ALL);
357 break;
358
359 case IMR:
360 reg = regs.imr;
361 break;
362
363 case IER:
364 reg = regs.ier;
365 break;
366
367 case IHR:
368 reg = regs.ihr;
369 break;
370
371 case TXDP:
372 reg = regs.txdp;
373 break;
374
375 case TXDP_HI:
376 reg = regs.txdp_hi;
377 break;
378
379 case TXCFG:
380 reg = regs.txcfg;
381 break;
382
383 case GPIOR:
384 reg = regs.gpior;
385 break;
386
387 case RXDP:
388 reg = regs.rxdp;
389 break;
390
391 case RXDP_HI:
392 reg = regs.rxdp_hi;
393 break;
394
395 case RXCFG:
396 reg = regs.rxcfg;
397 break;
398
399 case PQCR:
400 reg = regs.pqcr;
401 break;
402
403 case WCSR:
404 reg = regs.wcsr;
405 break;
406
407 case PCR:
408 reg = regs.pcr;
409 break;
410
411 //see the spec sheet for how RFCR and RFDR work
412 //basically, you write to RFCR to tell the machine what you want to do next
413 //then you act upon RFDR, and the device will be prepared b/c
414 //of what you wrote to RFCR
415 case RFCR:
416 reg = regs.rfcr;
417 break;
418
419 case RFDR:
420 switch (regs.rfcr & RFCR_RFADDR) {
421 case 0x000:
422 reg = rom.perfectMatch[1];
423 reg = reg << 8;
424 reg += rom.perfectMatch[0];
425 break;
426 case 0x002:
427 reg = rom.perfectMatch[3] << 8;
428 reg += rom.perfectMatch[2];
429 break;
430 case 0x004:
431 reg = rom.perfectMatch[5] << 8;
432 reg += rom.perfectMatch[4];
433 break;
434 default:
435 panic("reading from RFDR for something for other than PMATCH!\n");
436 //didn't implement other RFDR functionality b/c driver didn't use
437 }
438 break;
439
440 case SRR:
441 reg = regs.srr;
442 break;
443
444 case MIBC:
445 reg = regs.mibc;
446 reg &= ~(MIBC_MIBS | MIBC_ACLR);
447 break;
448
449 case VRCR:
450 reg = regs.vrcr;
451 break;
452
453 case VTCR:
454 reg = regs.vtcr;
455 break;
456
457 case VDR:
458 reg = regs.vdr;
459 break;
460
461 case CCSR:
462 reg = regs.ccsr;
463 break;
464
465 case TBICR:
466 reg = regs.tbicr;
467 break;
468
469 case TBISR:
470 reg = regs.tbisr;
471 break;
472
473 case TANAR:
474 reg = regs.tanar;
475 break;
476
477 case TANLPAR:
478 reg = regs.tanlpar;
479 break;
480
481 case TANER:
482 reg = regs.taner;
483 break;
484
485 case TESR:
486 reg = regs.tesr;
487 break;
488
489 default:
490 panic("reading unimplemented register: addr = %#x", daddr);
491 }
492
493 DPRINTF(EthernetPIO, "read from %#x: data=%d data=%#x\n",
494 daddr, reg, reg);
495 }
496 break;
497
498 default:
499 panic("accessing register with invalid size: addr=%#x, size=%d",
500 daddr, req->size);
501 }
502
503 return No_Fault;
504 }
505
506 Fault
507 NSGigE::write(MemReqPtr &req, const uint8_t *data)
508 {
509 assert(ioEnable);
510
511 Addr daddr = req->paddr & 0xfff;
512 DPRINTF(EthernetPIO, "write da=%#x pa=%#x va=%#x size=%d\n",
513 daddr, req->paddr, req->vaddr, req->size);
514
515 if (daddr > LAST && daddr <= RESERVED) {
516 panic("Accessing reserved register");
517 } else if (daddr > RESERVED && daddr <= 0x3FC) {
518 WriteConfig(daddr & 0xff, req->size, *(uint32_t *)data);
519 return No_Fault;
520 } else if (daddr > 0x3FC)
521 panic("Something is messed up!\n");
522
523 if (req->size == sizeof(uint32_t)) {
524 uint32_t reg = *(uint32_t *)data;
525 DPRINTF(EthernetPIO, "write data=%d data=%#x\n", reg, reg);
526
527 switch (daddr) {
528 case CR:
529 regs.command = reg;
530 if ((reg & (CR_TXE | CR_TXD)) == (CR_TXE | CR_TXD)) {
531 txHalt = true;
532 } else if (reg & CR_TXE) {
533 //the kernel is enabling the transmit machine
534 if (txState == txIdle)
535 txKick();
536 } else if (reg & CR_TXD) {
537 txHalt = true;
538 }
539
540 if ((reg & (CR_RXE | CR_RXD)) == (CR_RXE | CR_RXD)) {
541 rxHalt = true;
542 } else if (reg & CR_RXE) {
543 if (rxState == rxIdle) {
544 rxKick();
545 }
546 } else if (reg & CR_RXD) {
547 rxHalt = true;
548 }
549
550 if (reg & CR_TXR)
551 txReset();
552
553 if (reg & CR_RXR)
554 rxReset();
555
556 if (reg & CR_SWI)
557 devIntrPost(ISR_SWI);
558
559 if (reg & CR_RST) {
560 txReset();
561 rxReset();
562
563 regsReset();
564 }
565 break;
566
567 case CFG:
568 if (reg & CFG_LNKSTS || reg & CFG_SPDSTS || reg & CFG_DUPSTS
569 || reg & CFG_RESERVED || reg & CFG_T64ADDR
570 || reg & CFG_PCI64_DET)
571 panic("writing to read-only or reserved CFG bits!\n");
572
573 regs.config |= reg & ~(CFG_LNKSTS | CFG_SPDSTS | CFG_DUPSTS | CFG_RESERVED |
574 CFG_T64ADDR | CFG_PCI64_DET);
575
576 // all these #if 0's are because i don't THINK the kernel needs to have these implemented
577 // if there is a problem relating to one of these, you may need to add functionality in
578 #if 0
579 if (reg & CFG_TBI_EN) ;
580 if (reg & CFG_MODE_1000) ;
581 #endif
582
583 if (reg & CFG_AUTO_1000)
584 panic("CFG_AUTO_1000 not implemented!\n");
585
586 #if 0
587 if (reg & CFG_PINT_DUPSTS || reg & CFG_PINT_LNKSTS || reg & CFG_PINT_SPDSTS) ;
588 if (reg & CFG_TMRTEST) ;
589 if (reg & CFG_MRM_DIS) ;
590 if (reg & CFG_MWI_DIS) ;
591
592 if (reg & CFG_T64ADDR)
593 panic("CFG_T64ADDR is read only register!\n");
594
595 if (reg & CFG_PCI64_DET)
596 panic("CFG_PCI64_DET is read only register!\n");
597
598 if (reg & CFG_DATA64_EN) ;
599 if (reg & CFG_M64ADDR) ;
600 if (reg & CFG_PHY_RST) ;
601 if (reg & CFG_PHY_DIS) ;
602 #endif
603
604 if (reg & CFG_EXTSTS_EN)
605 extstsEnable = true;
606 else
607 extstsEnable = false;
608
609 #if 0
610 if (reg & CFG_REQALG) ;
611 if (reg & CFG_SB) ;
612 if (reg & CFG_POW) ;
613 if (reg & CFG_EXD) ;
614 if (reg & CFG_PESEL) ;
615 if (reg & CFG_BROM_DIS) ;
616 if (reg & CFG_EXT_125) ;
617 if (reg & CFG_BEM) ;
618 #endif
619 break;
620
621 case MEAR:
622 regs.mear = reg;
623 /* since phy is completely faked, MEAR_MD* don't matter
624 and since the driver never uses MEAR_EE*, they don't matter */
625 #if 0
626 if (reg & MEAR_EEDI) ;
627 if (reg & MEAR_EEDO) ; //this one is read only
628 if (reg & MEAR_EECLK) ;
629 if (reg & MEAR_EESEL) ;
630 if (reg & MEAR_MDIO) ;
631 if (reg & MEAR_MDDIR) ;
632 if (reg & MEAR_MDC) ;
633 #endif
634 break;
635
636 case PTSCR:
637 regs.ptscr = reg & ~(PTSCR_RBIST_RDONLY);
638 /* these control BISTs for various parts of chip - we don't care or do
639 just fake that the BIST is done */
640 if (reg & PTSCR_RBIST_EN)
641 regs.ptscr |= PTSCR_RBIST_DONE;
642 if (reg & PTSCR_EEBIST_EN)
643 regs.ptscr &= ~PTSCR_EEBIST_EN;
644 if (reg & PTSCR_EELOAD_EN)
645 regs.ptscr &= ~PTSCR_EELOAD_EN;
646 break;
647
648 case ISR: /* writing to the ISR has no effect */
649 panic("ISR is a read only register!\n");
650
651 case IMR:
652 regs.imr = reg;
653 devIntrChangeMask();
654 break;
655
656 case IER:
657 regs.ier = reg;
658 break;
659
660 case IHR:
661 regs.ihr = reg;
662 /* not going to implement real interrupt holdoff */
663 break;
664
665 case TXDP:
666 regs.txdp = (reg & 0xFFFFFFFC);
667 assert(txState == txIdle);
668 CTDD = false;
669 break;
670
671 case TXDP_HI:
672 regs.txdp_hi = reg;
673 break;
674
675 case TXCFG:
676 regs.txcfg = reg;
677 #if 0
678 if (reg & TXCFG_CSI) ;
679 if (reg & TXCFG_HBI) ;
680 if (reg & TXCFG_MLB) ;
681 if (reg & TXCFG_ATP) ;
682 if (reg & TXCFG_ECRETRY) ; /* this could easily be implemented, but
683 considering the network is just a fake
684 pipe, wouldn't make sense to do this */
685
686 if (reg & TXCFG_BRST_DIS) ;
687 #endif
688
689
690 /* we handle our own DMA, ignore the kernel's exhortations */
691 //if (reg & TXCFG_MXDMA) ;
692
693 //also, we currently don't care about fill/drain thresholds
694 //though this may change in the future with more realistic
695 //networks or a driver which changes it according to feedback
696
697 break;
698
699 case GPIOR:
700 regs.gpior = reg;
701 /* these just control general purpose i/o pins, don't matter */
702 break;
703
704 case RXDP:
705 regs.rxdp = reg;
706 break;
707
708 case RXDP_HI:
709 regs.rxdp_hi = reg;
710 break;
711
712 case RXCFG:
713 regs.rxcfg = reg;
714 #if 0
715 if (reg & RXCFG_AEP) ;
716 if (reg & RXCFG_ARP) ;
717 if (reg & RXCFG_STRIPCRC) ;
718 if (reg & RXCFG_RX_RD) ;
719 if (reg & RXCFG_ALP) ;
720 if (reg & RXCFG_AIRL) ;
721 #endif
722
723 /* we handle our own DMA, ignore what kernel says about it */
724 //if (reg & RXCFG_MXDMA) ;
725
726 #if 0
727 //also, we currently don't care about fill/drain thresholds
728 //though this may change in the future with more realistic
729 //networks or a driver which changes it according to feedback
730 if (reg & (RXCFG_DRTH | RXCFG_DRTH0)) ;
731 #endif
732 break;
733
734 case PQCR:
735 /* there is no priority queueing used in the linux 2.6 driver */
736 regs.pqcr = reg;
737 break;
738
739 case WCSR:
740 /* not going to implement wake on LAN */
741 regs.wcsr = reg;
742 break;
743
744 case PCR:
745 /* not going to implement pause control */
746 regs.pcr = reg;
747 break;
748
749 case RFCR:
750 regs.rfcr = reg;
751
752 rxFilterEnable = (reg & RFCR_RFEN) ? true : false;
753 acceptBroadcast = (reg & RFCR_AAB) ? true : false;
754 acceptMulticast = (reg & RFCR_AAM) ? true : false;
755 acceptUnicast = (reg & RFCR_AAU) ? true : false;
756 acceptPerfect = (reg & RFCR_APM) ? true : false;
757 acceptArp = (reg & RFCR_AARP) ? true : false;
758
759 if (reg & RFCR_APAT) ;
760 // panic("RFCR_APAT not implemented!\n");
761
762 if (reg & RFCR_MHEN || reg & RFCR_UHEN)
763 panic("hash filtering not implemented!\n");
764
765 if (reg & RFCR_ULM)
766 panic("RFCR_ULM not implemented!\n");
767
768 break;
769
770 case RFDR:
771 panic("the driver never writes to RFDR, something is wrong!\n");
772
773 case BRAR:
774 panic("the driver never uses BRAR, something is wrong!\n");
775
776 case BRDR:
777 panic("the driver never uses BRDR, something is wrong!\n");
778
779 case SRR:
780 panic("SRR is read only register!\n");
781
782 case MIBC:
783 panic("the driver never uses MIBC, something is wrong!\n");
784
785 case VRCR:
786 regs.vrcr = reg;
787 break;
788
789 case VTCR:
790 regs.vtcr = reg;
791 break;
792
793 case VDR:
794 panic("the driver never uses VDR, something is wrong!\n");
795 break;
796
797 case CCSR:
798 /* not going to implement clockrun stuff */
799 regs.ccsr = reg;
800 break;
801
802 case TBICR:
803 regs.tbicr = reg;
804 if (reg & TBICR_MR_LOOPBACK)
805 panic("TBICR_MR_LOOPBACK never used, something wrong!\n");
806
807 if (reg & TBICR_MR_AN_ENABLE) {
808 regs.tanlpar = regs.tanar;
809 regs.tbisr |= (TBISR_MR_AN_COMPLETE | TBISR_MR_LINK_STATUS);
810 }
811
812 #if 0
813 if (reg & TBICR_MR_RESTART_AN) ;
814 #endif
815
816 break;
817
818 case TBISR:
819 panic("TBISR is read only register!\n");
820
821 case TANAR:
822 regs.tanar = reg;
823 if (reg & TANAR_PS2)
824 panic("this isn't used in driver, something wrong!\n");
825
826 if (reg & TANAR_PS1)
827 panic("this isn't used in driver, something wrong!\n");
828 break;
829
830 case TANLPAR:
831 panic("this should only be written to by the fake phy!\n");
832
833 case TANER:
834 panic("TANER is read only register!\n");
835
836 case TESR:
837 regs.tesr = reg;
838 break;
839
840 default:
841 panic("thought i covered all the register, what is this? addr=%#x",
842 daddr);
843 }
844 } else
845 panic("Invalid Request Size");
846
847 return No_Fault;
848 }
849
850 void
851 NSGigE::devIntrPost(uint32_t interrupts)
852 {
853 bool delay = false;
854
855 if (interrupts & ISR_RESERVE)
856 panic("Cannot set a reserved interrupt");
857
858 if (interrupts & ISR_TXRCMP)
859 regs.isr |= ISR_TXRCMP;
860
861 if (interrupts & ISR_RXRCMP)
862 regs.isr |= ISR_RXRCMP;
863
864 //ISR_DPERR not implemented
865 //ISR_SSERR not implemented
866 //ISR_RMABT not implemented
867 //ISR_RXSOVR not implemented
868 //ISR_HIBINT not implemented
869 //ISR_PHY not implemented
870 //ISR_PME not implemented
871
872 if (interrupts & ISR_SWI)
873 regs.isr |= ISR_SWI;
874
875 //ISR_MIB not implemented
876 //ISR_TXURN not implemented
877
878 if (interrupts & ISR_TXIDLE)
879 regs.isr |= ISR_TXIDLE;
880
881 if (interrupts & ISR_TXERR)
882 regs.isr |= ISR_TXERR;
883
884 if (interrupts & ISR_TXDESC)
885 regs.isr |= ISR_TXDESC;
886
887 if (interrupts & ISR_TXOK) {
888 regs.isr |= ISR_TXOK;
889 delay = true;
890 }
891
892 if (interrupts & ISR_RXORN)
893 regs.isr |= ISR_RXORN;
894
895 if (interrupts & ISR_RXIDLE)
896 regs.isr |= ISR_RXIDLE;
897
898 //ISR_RXEARLY not implemented
899
900 if (interrupts & ISR_RXERR)
901 regs.isr |= ISR_RXERR;
902
903 if (interrupts & ISR_RXDESC)
904 regs.isr |= ISR_RXDESC;
905
906 if (interrupts & ISR_RXOK) {
907 delay = true;
908 regs.isr |= ISR_RXOK;
909 }
910
911 if ((regs.isr & regs.imr)) {
912 Tick when = curTick;
913 if (delay)
914 when += intrDelay;
915 cpuIntrPost(when);
916 }
917
918 DPRINTF(EthernetIntr, "**interrupt written to ISR: intr=%#x isr=%#x imr=%#x\n",
919 interrupts, regs.isr, regs.imr);
920 }
921
922 void
923 NSGigE::devIntrClear(uint32_t interrupts)
924 {
925 if (interrupts & ISR_RESERVE)
926 panic("Cannot clear a reserved interrupt");
927
928 if (interrupts & ISR_TXRCMP)
929 regs.isr &= ~ISR_TXRCMP;
930
931 if (interrupts & ISR_RXRCMP)
932 regs.isr &= ~ISR_RXRCMP;
933
934 //ISR_DPERR not implemented
935 //ISR_SSERR not implemented
936 //ISR_RMABT not implemented
937 //ISR_RXSOVR not implemented
938 //ISR_HIBINT not implemented
939 //ISR_PHY not implemented
940 //ISR_PME not implemented
941
942 if (interrupts & ISR_SWI)
943 regs.isr &= ~ISR_SWI;
944
945 //ISR_MIB not implemented
946 //ISR_TXURN not implemented
947
948 if (interrupts & ISR_TXIDLE)
949 regs.isr &= ~ISR_TXIDLE;
950
951 if (interrupts & ISR_TXERR)
952 regs.isr &= ~ISR_TXERR;
953
954 if (interrupts & ISR_TXDESC)
955 regs.isr &= ~ISR_TXDESC;
956
957 if (interrupts & ISR_TXOK)
958 regs.isr &= ~ISR_TXOK;
959
960 if (interrupts & ISR_RXORN)
961 regs.isr &= ~ISR_RXORN;
962
963 if (interrupts & ISR_RXIDLE)
964 regs.isr &= ~ISR_RXIDLE;
965
966 //ISR_RXEARLY not implemented
967
968 if (interrupts & ISR_RXERR)
969 regs.isr &= ~ISR_RXERR;
970
971 if (interrupts & ISR_RXDESC)
972 regs.isr &= ~ISR_RXDESC;
973
974 if (interrupts & ISR_RXOK)
975 regs.isr &= ~ISR_RXOK;
976
977 if (!(regs.isr & regs.imr))
978 cpuIntrClear();
979
980 DPRINTF(EthernetIntr, "**interrupt cleared from ISR: intr=%x isr=%x imr=%x\n",
981 interrupts, regs.isr, regs.imr);
982 }
983
984 void
985 NSGigE::devIntrChangeMask()
986 {
987 DPRINTF(EthernetIntr, "interrupt mask changed\n");
988
989 if (regs.isr & regs.imr)
990 cpuIntrPost(curTick);
991 else
992 cpuIntrClear();
993 }
994
995 void
996 NSGigE::cpuIntrPost(Tick when)
997 {
998 //If the interrupt you want to post is later than an
999 //interrupt already scheduled, just let it post in the coming one and
1000 //don't schedule another.
1001 //HOWEVER, must be sure that the scheduled intrTick is in the future
1002 //(this was formerly the source of a bug)
1003 assert((intrTick >= curTick) || (intrTick == 0));
1004 if (when > intrTick && intrTick != 0)
1005 return;
1006
1007 intrTick = when;
1008
1009 if (intrEvent) {
1010 intrEvent->squash();
1011 intrEvent = 0;
1012 }
1013
1014 if (when < curTick) {
1015 cpuInterrupt();
1016 } else {
1017 DPRINTF(EthernetIntr, "going to schedule an interrupt for intrTick=%d\n",
1018 intrTick);
1019 intrEvent = new IntrEvent(this, true);
1020 intrEvent->schedule(intrTick);
1021 }
1022 }
1023
1024 void
1025 NSGigE::cpuInterrupt()
1026 {
1027 // Don't send an interrupt if there's already one
1028 if (cpuPendingIntr) {
1029 DPRINTF(EthernetIntr,
1030 "would send an interrupt now, but there's already pending\n");
1031 intrTick = 0;
1032 return;
1033 }
1034 // Don't send an interrupt if it's supposed to be delayed
1035 if (intrTick > curTick) {
1036 DPRINTF(EthernetIntr, "an interrupt is scheduled for %d, wait til then\n",
1037 intrTick);
1038 return;
1039 }
1040
1041 // Whether or not there's a pending interrupt, we don't care about
1042 // it anymore
1043 intrEvent = 0;
1044 intrTick = 0;
1045
1046 // Send interrupt
1047 cpuPendingIntr = true;
1048 /** @todo rework the intctrl to be tsunami ok */
1049 //intctrl->post(TheISA::INTLEVEL_IRQ1, TheISA::INTINDEX_ETHERNET);
1050 DPRINTF(EthernetIntr, "Posting interrupts to cchip!\n");
1051 tsunami->cchip->postDRIR(configData->config.hdr.pci0.interruptLine);
1052 }
1053
1054 void
1055 NSGigE::cpuIntrClear()
1056 {
1057 if (cpuPendingIntr) {
1058 cpuPendingIntr = false;
1059 /** @todo rework the intctrl to be tsunami ok */
1060 //intctrl->clear(TheISA::INTLEVEL_IRQ1, TheISA::INTINDEX_ETHERNET);
1061 DPRINTF(EthernetIntr, "clearing all interrupts from cchip\n");
1062 tsunami->cchip->clearDRIR(configData->config.hdr.pci0.interruptLine);
1063 }
1064 }
1065
1066 bool
1067 NSGigE::cpuIntrPending() const
1068 { return cpuPendingIntr; }
1069
1070 void
1071 NSGigE::txReset()
1072 {
1073
1074 DPRINTF(Ethernet, "transmit reset\n");
1075
1076 CTDD = false;
1077 txFifoAvail = MAX_TX_FIFO_SIZE;
1078 txHalt = false;
1079 txFragPtr = 0;
1080 assert(txDescCnt == 0);
1081 txFifo.clear();
1082 regs.command &= ~CR_TXE;
1083 txState = txIdle;
1084 assert(txDmaState == dmaIdle);
1085 }
1086
1087 void
1088 NSGigE::rxReset()
1089 {
1090 DPRINTF(Ethernet, "receive reset\n");
1091
1092 CRDD = false;
1093 assert(rxPktBytes == 0);
1094 rxFifoCnt = 0;
1095 rxHalt = false;
1096 rxFragPtr = 0;
1097 assert(rxDescCnt == 0);
1098 assert(rxDmaState == dmaIdle);
1099 rxFifo.clear();
1100 regs.command &= ~CR_RXE;
1101 rxState = rxIdle;
1102 }
1103
1104 void NSGigE::regsReset()
1105 {
1106 memset(&regs, 0, sizeof(regs));
1107 regs.config = 0x80000000;
1108 regs.mear = 0x12;
1109 regs.isr = 0x00608000;
1110 regs.txcfg = 0x120;
1111 regs.rxcfg = 0x4;
1112 regs.srr = 0x0103;
1113 regs.mibc = 0x2;
1114 regs.vdr = 0x81;
1115 regs.tesr = 0xc000;
1116
1117 extstsEnable = false;
1118 acceptBroadcast = false;
1119 acceptMulticast = false;
1120 acceptUnicast = false;
1121 acceptPerfect = false;
1122 acceptArp = false;
1123 }
1124
1125 void
1126 NSGigE::rxDmaReadCopy()
1127 {
1128 assert(rxDmaState == dmaReading);
1129
1130 memcpy(rxDmaData, physmem->dma_addr(rxDmaAddr, rxDmaLen), rxDmaLen);
1131 rxDmaState = dmaIdle;
1132
1133 DPRINTF(EthernetDMA, "rx dma read paddr=%#x len=%d\n",
1134 rxDmaAddr, rxDmaLen);
1135 DDUMP(EthernetDMA, rxDmaData, rxDmaLen);
1136 }
1137
1138 bool
1139 NSGigE::doRxDmaRead()
1140 {
1141 assert(rxDmaState == dmaIdle || rxDmaState == dmaReadWaiting);
1142 rxDmaState = dmaReading;
1143
1144 if (dmaInterface && !rxDmaFree) {
1145 if (dmaInterface->busy())
1146 rxDmaState = dmaReadWaiting;
1147 else
1148 dmaInterface->doDMA(Read, rxDmaAddr, rxDmaLen, curTick,
1149 &rxDmaReadEvent);
1150 return true;
1151 }
1152
1153 if (dmaReadDelay == 0 && dmaReadFactor == 0) {
1154 rxDmaReadCopy();
1155 return false;
1156 }
1157
1158 Tick factor = ((rxDmaLen + ULL(63)) >> ULL(6)) * dmaReadFactor;
1159 Tick start = curTick + dmaReadDelay + factor;
1160 rxDmaReadEvent.schedule(start);
1161 return true;
1162 }
1163
1164 void
1165 NSGigE::rxDmaReadDone()
1166 {
1167 assert(rxDmaState == dmaReading);
1168 rxDmaReadCopy();
1169
1170 // If the transmit state machine has a pending DMA, let it go first
1171 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting)
1172 txKick();
1173
1174 rxKick();
1175 }
1176
1177 void
1178 NSGigE::rxDmaWriteCopy()
1179 {
1180 assert(rxDmaState == dmaWriting);
1181
1182 memcpy(physmem->dma_addr(rxDmaAddr, rxDmaLen), rxDmaData, rxDmaLen);
1183 rxDmaState = dmaIdle;
1184
1185 DPRINTF(EthernetDMA, "rx dma write paddr=%#x len=%d\n",
1186 rxDmaAddr, rxDmaLen);
1187 DDUMP(EthernetDMA, rxDmaData, rxDmaLen);
1188 }
1189
1190 bool
1191 NSGigE::doRxDmaWrite()
1192 {
1193 assert(rxDmaState == dmaIdle || rxDmaState == dmaWriteWaiting);
1194 rxDmaState = dmaWriting;
1195
1196 if (dmaInterface && !rxDmaFree) {
1197 if (dmaInterface->busy())
1198 rxDmaState = dmaWriteWaiting;
1199 else
1200 dmaInterface->doDMA(WriteInvalidate, rxDmaAddr, rxDmaLen, curTick,
1201 &rxDmaWriteEvent);
1202 return true;
1203 }
1204
1205 if (dmaWriteDelay == 0 && dmaWriteFactor == 0) {
1206 rxDmaWriteCopy();
1207 return false;
1208 }
1209
1210 Tick factor = ((rxDmaLen + ULL(63)) >> ULL(6)) * dmaWriteFactor;
1211 Tick start = curTick + dmaWriteDelay + factor;
1212 rxDmaWriteEvent.schedule(start);
1213 return true;
1214 }
1215
1216 void
1217 NSGigE::rxDmaWriteDone()
1218 {
1219 assert(rxDmaState == dmaWriting);
1220 rxDmaWriteCopy();
1221
1222 // If the transmit state machine has a pending DMA, let it go first
1223 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting)
1224 txKick();
1225
1226 rxKick();
1227 }
1228
1229 void
1230 NSGigE::rxKick()
1231 {
1232 DPRINTF(EthernetSM, "receive kick state=%s (rxBuf.size=%d)\n",
1233 NsRxStateStrings[rxState], rxFifo.size());
1234
1235 if (rxKickTick > curTick) {
1236 DPRINTF(EthernetSM, "receive kick exiting, can't run till %d\n",
1237 rxKickTick);
1238 return;
1239 }
1240
1241 next:
1242 switch(rxDmaState) {
1243 case dmaReadWaiting:
1244 if (doRxDmaRead())
1245 goto exit;
1246 break;
1247 case dmaWriteWaiting:
1248 if (doRxDmaWrite())
1249 goto exit;
1250 break;
1251 default:
1252 break;
1253 }
1254
1255 // see state machine from spec for details
1256 // the way this works is, if you finish work on one state and can go directly to
1257 // another, you do that through jumping to the label "next". however, if you have
1258 // intermediate work, like DMA so that you can't go to the next state yet, you go to
1259 // exit and exit the loop. however, when the DMA is done it will trigger an
1260 // event and come back to this loop.
1261 switch (rxState) {
1262 case rxIdle:
1263 if (!regs.command & CR_RXE) {
1264 DPRINTF(EthernetSM, "Receive Disabled! Nothing to do.\n");
1265 goto exit;
1266 }
1267
1268 if (CRDD) {
1269 rxState = rxDescRefr;
1270
1271 rxDmaAddr = regs.rxdp & 0x3fffffff;
1272 rxDmaData = &rxDescCache + offsetof(ns_desc, link);
1273 rxDmaLen = sizeof(rxDescCache.link);
1274 rxDmaFree = dmaDescFree;
1275
1276 if (doRxDmaRead())
1277 goto exit;
1278 } else {
1279 rxState = rxDescRead;
1280
1281 rxDmaAddr = regs.rxdp & 0x3fffffff;
1282 rxDmaData = &rxDescCache;
1283 rxDmaLen = sizeof(ns_desc);
1284 rxDmaFree = dmaDescFree;
1285
1286 if (doRxDmaRead())
1287 goto exit;
1288 }
1289 break;
1290
1291 case rxDescRefr:
1292 if (rxDmaState != dmaIdle)
1293 goto exit;
1294
1295 rxState = rxAdvance;
1296 break;
1297
1298 case rxDescRead:
1299 if (rxDmaState != dmaIdle)
1300 goto exit;
1301
1302 DPRINTF(EthernetDesc,
1303 "rxDescCache:\n\tlink=%08x\n\tbufptr=%08x\n\tcmdsts=%08x\n\textsts=%08x\n"
1304 ,rxDescCache.link, rxDescCache.bufptr, rxDescCache.cmdsts,
1305 rxDescCache.extsts);
1306
1307 if (rxDescCache.cmdsts & CMDSTS_OWN) {
1308 rxState = rxIdle;
1309 } else {
1310 rxState = rxFifoBlock;
1311 rxFragPtr = rxDescCache.bufptr;
1312 rxDescCnt = rxDescCache.cmdsts & CMDSTS_LEN_MASK;
1313 }
1314 break;
1315
1316 case rxFifoBlock:
1317 if (!rxPacket) {
1318 /**
1319 * @todo in reality, we should be able to start processing
1320 * the packet as it arrives, and not have to wait for the
1321 * full packet ot be in the receive fifo.
1322 */
1323 if (rxFifo.empty())
1324 goto exit;
1325
1326 DPRINTF(EthernetSM, "\n\n*****processing receive of new packet\n");
1327
1328 // If we don't have a packet, grab a new one from the fifo.
1329 rxPacket = rxFifo.front();
1330 rxPktBytes = rxPacket->length;
1331 rxPacketBufPtr = rxPacket->data;
1332
1333 if (DTRACE(Ethernet)) {
1334 if (rxPacket->isIpPkt()) {
1335 ip_header *ip = rxPacket->getIpHdr();
1336 DPRINTF(Ethernet, "ID is %d\n", reverseEnd16(ip->ID));
1337 if (rxPacket->isTcpPkt()) {
1338 tcp_header *tcp = rxPacket->getTcpHdr(ip);
1339 DPRINTF(Ethernet, "Src Port = %d, Dest Port = %d\n",
1340 reverseEnd16(tcp->src_port_num),
1341 reverseEnd16(tcp->dest_port_num));
1342 }
1343 }
1344 }
1345
1346 // sanity check - i think the driver behaves like this
1347 assert(rxDescCnt >= rxPktBytes);
1348
1349 // Must clear the value before popping to decrement the
1350 // reference count
1351 rxFifo.front() = NULL;
1352 rxFifo.pop_front();
1353 rxFifoCnt -= rxPacket->length;
1354 }
1355
1356
1357 // dont' need the && rxDescCnt > 0 if driver sanity check above holds
1358 if (rxPktBytes > 0) {
1359 rxState = rxFragWrite;
1360 // don't need min<>(rxPktBytes,rxDescCnt) if above sanity check holds
1361 rxXferLen = rxPktBytes;
1362
1363 rxDmaAddr = rxFragPtr & 0x3fffffff;
1364 rxDmaData = rxPacketBufPtr;
1365 rxDmaLen = rxXferLen;
1366 rxDmaFree = dmaDataFree;
1367
1368 if (doRxDmaWrite())
1369 goto exit;
1370
1371 } else {
1372 rxState = rxDescWrite;
1373
1374 //if (rxPktBytes == 0) { /* packet is done */
1375 assert(rxPktBytes == 0);
1376 DPRINTF(EthernetSM, "done with receiving packet\n");
1377
1378 rxDescCache.cmdsts |= CMDSTS_OWN;
1379 rxDescCache.cmdsts &= ~CMDSTS_MORE;
1380 rxDescCache.cmdsts |= CMDSTS_OK;
1381 rxDescCache.cmdsts &= 0xffff0000;
1382 rxDescCache.cmdsts += rxPacket->length; //i.e. set CMDSTS_SIZE
1383
1384 #if 0
1385 /* all the driver uses these are for its own stats keeping
1386 which we don't care about, aren't necessary for functionality
1387 and doing this would just slow us down. if they end up using
1388 this in a later version for functional purposes, just undef
1389 */
1390 if (rxFilterEnable) {
1391 rxDescCache.cmdsts &= ~CMDSTS_DEST_MASK;
1392 if (rxFifo.front()->IsUnicast())
1393 rxDescCache.cmdsts |= CMDSTS_DEST_SELF;
1394 if (rxFifo.front()->IsMulticast())
1395 rxDescCache.cmdsts |= CMDSTS_DEST_MULTI;
1396 if (rxFifo.front()->IsBroadcast())
1397 rxDescCache.cmdsts |= CMDSTS_DEST_MASK;
1398 }
1399 #endif
1400
1401 if (rxPacket->isIpPkt() && extstsEnable) {
1402 rxDescCache.extsts |= EXTSTS_IPPKT;
1403 if (!ipChecksum(rxPacket, false)) {
1404 DPRINTF(EthernetCksum, "Rx IP Checksum Error\n");
1405 rxDescCache.extsts |= EXTSTS_IPERR;
1406 }
1407 if (rxPacket->isTcpPkt()) {
1408 rxDescCache.extsts |= EXTSTS_TCPPKT;
1409 if (!tcpChecksum(rxPacket, false)) {
1410 DPRINTF(EthernetCksum, "Rx TCP Checksum Error\n");
1411 rxDescCache.extsts |= EXTSTS_TCPERR;
1412 }
1413 } else if (rxPacket->isUdpPkt()) {
1414 rxDescCache.extsts |= EXTSTS_UDPPKT;
1415 if (!udpChecksum(rxPacket, false)) {
1416 DPRINTF(EthernetCksum, "Rx UDP Checksum Error\n");
1417 rxDescCache.extsts |= EXTSTS_UDPERR;
1418 }
1419 }
1420 }
1421 rxPacket = 0;
1422
1423 /* the driver seems to always receive into desc buffers
1424 of size 1514, so you never have a pkt that is split
1425 into multiple descriptors on the receive side, so
1426 i don't implement that case, hence the assert above.
1427 */
1428
1429 DPRINTF(EthernetDesc, "rxDesc writeback:\n\tcmdsts=%08x\n\textsts=%08x\n",
1430 rxDescCache.cmdsts, rxDescCache.extsts);
1431
1432 rxDmaAddr = (regs.rxdp + offsetof(ns_desc, cmdsts)) & 0x3fffffff;
1433 rxDmaData = &(rxDescCache.cmdsts);
1434 rxDmaLen = sizeof(rxDescCache.cmdsts) + sizeof(rxDescCache.extsts);
1435 rxDmaFree = dmaDescFree;
1436
1437 if (doRxDmaWrite())
1438 goto exit;
1439 }
1440 break;
1441
1442 case rxFragWrite:
1443 if (rxDmaState != dmaIdle)
1444 goto exit;
1445
1446 rxPacketBufPtr += rxXferLen;
1447 rxFragPtr += rxXferLen;
1448 rxPktBytes -= rxXferLen;
1449
1450 rxState = rxFifoBlock;
1451 break;
1452
1453 case rxDescWrite:
1454 if (rxDmaState != dmaIdle)
1455 goto exit;
1456
1457 assert(rxDescCache.cmdsts & CMDSTS_OWN);
1458
1459 assert(rxPacket == 0);
1460 devIntrPost(ISR_RXOK);
1461
1462 if (rxDescCache.cmdsts & CMDSTS_INTR)
1463 devIntrPost(ISR_RXDESC);
1464
1465 if (rxHalt) {
1466 DPRINTF(EthernetSM, "Halting the RX state machine\n");
1467 rxState = rxIdle;
1468 rxHalt = false;
1469 } else
1470 rxState = rxAdvance;
1471 break;
1472
1473 case rxAdvance:
1474 if (rxDescCache.link == 0) {
1475 rxState = rxIdle;
1476 return;
1477 } else {
1478 rxState = rxDescRead;
1479 regs.rxdp = rxDescCache.link;
1480 CRDD = false;
1481
1482 rxDmaAddr = regs.rxdp & 0x3fffffff;
1483 rxDmaData = &rxDescCache;
1484 rxDmaLen = sizeof(ns_desc);
1485 rxDmaFree = dmaDescFree;
1486
1487 if (doRxDmaRead())
1488 goto exit;
1489 }
1490 break;
1491
1492 default:
1493 panic("Invalid rxState!");
1494 }
1495
1496
1497 DPRINTF(EthernetSM, "entering next rx state = %s\n",
1498 NsRxStateStrings[rxState]);
1499
1500 if (rxState == rxIdle) {
1501 regs.command &= ~CR_RXE;
1502 devIntrPost(ISR_RXIDLE);
1503 return;
1504 }
1505
1506 goto next;
1507
1508 exit:
1509 /**
1510 * @todo do we want to schedule a future kick?
1511 */
1512 DPRINTF(EthernetSM, "rx state machine exited state=%s\n",
1513 NsRxStateStrings[rxState]);
1514 }
1515
1516 void
1517 NSGigE::transmit()
1518 {
1519 if (txFifo.empty()) {
1520 DPRINTF(Ethernet, "nothing to transmit\n");
1521 return;
1522 }
1523
1524 DPRINTF(Ethernet, "\n\nAttempt Pkt Transmit: txFifo length = %d\n",
1525 MAX_TX_FIFO_SIZE - txFifoAvail);
1526 if (interface->sendPacket(txFifo.front())) {
1527 if (DTRACE(Ethernet)) {
1528 if (txFifo.front()->isIpPkt()) {
1529 ip_header *ip = txFifo.front()->getIpHdr();
1530 DPRINTF(Ethernet, "ID is %d\n", reverseEnd16(ip->ID));
1531 if (txFifo.front()->isTcpPkt()) {
1532 tcp_header *tcp = txFifo.front()->getTcpHdr(ip);
1533 DPRINTF(Ethernet, "Src Port = %d, Dest Port = %d\n",
1534 reverseEnd16(tcp->src_port_num),
1535 reverseEnd16(tcp->dest_port_num));
1536 }
1537 }
1538 }
1539
1540 DDUMP(Ethernet, txFifo.front()->data, txFifo.front()->length);
1541 txBytes += txFifo.front()->length;
1542 txPackets++;
1543
1544 txFifoAvail += txFifo.front()->length;
1545
1546 DPRINTF(Ethernet, "Successful Xmit! now txFifoAvail is %d\n", txFifoAvail);
1547 txFifo.front() = NULL;
1548 txFifo.pop_front();
1549
1550 /* normally do a writeback of the descriptor here, and ONLY after that is
1551 done, send this interrupt. but since our stuff never actually fails,
1552 just do this interrupt here, otherwise the code has to stray from this
1553 nice format. besides, it's functionally the same.
1554 */
1555 devIntrPost(ISR_TXOK);
1556 } else
1557 DPRINTF(Ethernet, "May need to rethink always sending the descriptors back?\n");
1558
1559 if (!txFifo.empty() && !txEvent.scheduled()) {
1560 DPRINTF(Ethernet, "reschedule transmit\n");
1561 txEvent.schedule(curTick + 1000);
1562 }
1563 }
1564
1565 void
1566 NSGigE::txDmaReadCopy()
1567 {
1568 assert(txDmaState == dmaReading);
1569
1570 memcpy(txDmaData, physmem->dma_addr(txDmaAddr, txDmaLen), txDmaLen);
1571 txDmaState = dmaIdle;
1572
1573 DPRINTF(EthernetDMA, "tx dma read paddr=%#x len=%d\n",
1574 txDmaAddr, txDmaLen);
1575 DDUMP(EthernetDMA, txDmaData, txDmaLen);
1576 }
1577
1578 bool
1579 NSGigE::doTxDmaRead()
1580 {
1581 assert(txDmaState == dmaIdle || txDmaState == dmaReadWaiting);
1582 txDmaState = dmaReading;
1583
1584 if (dmaInterface && !txDmaFree) {
1585 if (dmaInterface->busy())
1586 txDmaState = dmaReadWaiting;
1587 else
1588 dmaInterface->doDMA(Read, txDmaAddr, txDmaLen, curTick,
1589 &txDmaReadEvent);
1590 return true;
1591 }
1592
1593 if (dmaReadDelay == 0 && dmaReadFactor == 0.0) {
1594 txDmaReadCopy();
1595 return false;
1596 }
1597
1598 Tick factor = ((txDmaLen + ULL(63)) >> ULL(6)) * dmaReadFactor;
1599 Tick start = curTick + dmaReadDelay + factor;
1600 txDmaReadEvent.schedule(start);
1601 return true;
1602 }
1603
1604 void
1605 NSGigE::txDmaReadDone()
1606 {
1607 assert(txDmaState == dmaReading);
1608 txDmaReadCopy();
1609
1610 // If the receive state machine has a pending DMA, let it go first
1611 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting)
1612 rxKick();
1613
1614 txKick();
1615 }
1616
1617 void
1618 NSGigE::txDmaWriteCopy()
1619 {
1620 assert(txDmaState == dmaWriting);
1621
1622 memcpy(physmem->dma_addr(txDmaAddr, txDmaLen), txDmaData, txDmaLen);
1623 txDmaState = dmaIdle;
1624
1625 DPRINTF(EthernetDMA, "tx dma write paddr=%#x len=%d\n",
1626 txDmaAddr, txDmaLen);
1627 DDUMP(EthernetDMA, txDmaData, txDmaLen);
1628 }
1629
1630 bool
1631 NSGigE::doTxDmaWrite()
1632 {
1633 assert(txDmaState == dmaIdle || txDmaState == dmaWriteWaiting);
1634 txDmaState = dmaWriting;
1635
1636 if (dmaInterface && !txDmaFree) {
1637 if (dmaInterface->busy())
1638 txDmaState = dmaWriteWaiting;
1639 else
1640 dmaInterface->doDMA(WriteInvalidate, txDmaAddr, txDmaLen, curTick,
1641 &txDmaWriteEvent);
1642 return true;
1643 }
1644
1645 if (dmaWriteDelay == 0 && dmaWriteFactor == 0.0) {
1646 txDmaWriteCopy();
1647 return false;
1648 }
1649
1650 Tick factor = ((txDmaLen + ULL(63)) >> ULL(6)) * dmaWriteFactor;
1651 Tick start = curTick + dmaWriteDelay + factor;
1652 txDmaWriteEvent.schedule(start);
1653 return true;
1654 }
1655
1656 void
1657 NSGigE::txDmaWriteDone()
1658 {
1659 assert(txDmaState == dmaWriting);
1660 txDmaWriteCopy();
1661
1662 // If the receive state machine has a pending DMA, let it go first
1663 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting)
1664 rxKick();
1665
1666 txKick();
1667 }
1668
1669 void
1670 NSGigE::txKick()
1671 {
1672 DPRINTF(EthernetSM, "transmit kick state=%s\n", NsTxStateStrings[txState]);
1673
1674 if (txKickTick > curTick) {
1675 DPRINTF(EthernetSM, "transmit kick exiting, can't run till %d\n",
1676 txKickTick);
1677
1678 return;
1679 }
1680
1681 next:
1682 switch(txDmaState) {
1683 case dmaReadWaiting:
1684 if (doTxDmaRead())
1685 goto exit;
1686 break;
1687 case dmaWriteWaiting:
1688 if (doTxDmaWrite())
1689 goto exit;
1690 break;
1691 default:
1692 break;
1693 }
1694
1695 switch (txState) {
1696 case txIdle:
1697 if (!regs.command & CR_TXE) {
1698 DPRINTF(EthernetSM, "Transmit disabled. Nothing to do.\n");
1699 goto exit;
1700 }
1701
1702 if (CTDD) {
1703 txState = txDescRefr;
1704
1705 txDmaAddr = regs.txdp & 0x3fffffff;
1706 txDmaData = &txDescCache + offsetof(ns_desc, link);
1707 txDmaLen = sizeof(txDescCache.link);
1708 txDmaFree = dmaDescFree;
1709
1710 if (doTxDmaRead())
1711 goto exit;
1712
1713 } else {
1714 txState = txDescRead;
1715
1716 txDmaAddr = regs.txdp & 0x3fffffff;
1717 txDmaData = &txDescCache;
1718 txDmaLen = sizeof(ns_desc);
1719 txDmaFree = dmaDescFree;
1720
1721 if (doTxDmaRead())
1722 goto exit;
1723 }
1724 break;
1725
1726 case txDescRefr:
1727 if (txDmaState != dmaIdle)
1728 goto exit;
1729
1730 txState = txAdvance;
1731 break;
1732
1733 case txDescRead:
1734 if (txDmaState != dmaIdle)
1735 goto exit;
1736
1737 DPRINTF(EthernetDesc,
1738 "txDescCache data:\n\tlink=%08x\n\tbufptr=%08x\n\tcmdsts=%08x\n\textsts=%08x\n"
1739 ,txDescCache.link, txDescCache.bufptr, txDescCache.cmdsts,
1740 txDescCache.extsts);
1741
1742 if (txDescCache.cmdsts & CMDSTS_OWN) {
1743 txState = txFifoBlock;
1744 txFragPtr = txDescCache.bufptr;
1745 txDescCnt = txDescCache.cmdsts & CMDSTS_LEN_MASK;
1746 } else {
1747 txState = txIdle;
1748 }
1749 break;
1750
1751 case txFifoBlock:
1752 if (!txPacket) {
1753 DPRINTF(EthernetSM, "\n\n*****starting the tx of a new packet\n");
1754 txPacket = new EtherPacket;
1755 txPacket->data = new uint8_t[16384];
1756 txPacketBufPtr = txPacket->data;
1757 }
1758
1759 if (txDescCnt == 0) {
1760 DPRINTF(EthernetSM, "the txDescCnt == 0, done with descriptor\n");
1761 if (txDescCache.cmdsts & CMDSTS_MORE) {
1762 DPRINTF(EthernetSM, "there are more descriptors to come\n");
1763 txState = txDescWrite;
1764
1765 txDescCache.cmdsts &= ~CMDSTS_OWN;
1766
1767 txDmaAddr = (regs.txdp + offsetof(ns_desc, cmdsts)) & 0x3fffffff;
1768 txDmaData = &(txDescCache.cmdsts);
1769 txDmaLen = sizeof(txDescCache.cmdsts);
1770 txDmaFree = dmaDescFree;
1771
1772 if (doTxDmaWrite())
1773 goto exit;
1774
1775 } else { /* this packet is totally done */
1776 DPRINTF(EthernetSM, "This packet is done, let's wrap it up\n");
1777 /* deal with the the packet that just finished */
1778 if ((regs.vtcr & VTCR_PPCHK) && extstsEnable) {
1779 if (txDescCache.extsts & EXTSTS_UDPPKT) {
1780 udpChecksum(txPacket, true);
1781 } else if (txDescCache.extsts & EXTSTS_TCPPKT) {
1782 tcpChecksum(txPacket, true);
1783 }
1784 if (txDescCache.extsts & EXTSTS_IPPKT) {
1785 ipChecksum(txPacket, true);
1786 }
1787 }
1788
1789 txPacket->length = txPacketBufPtr - txPacket->data;
1790 /* this is just because the receive can't handle a packet bigger
1791 want to make sure */
1792 assert(txPacket->length <= 1514);
1793 txFifo.push_back(txPacket);
1794
1795 /* this following section is not to spec, but functionally shouldn't
1796 be any different. normally, the chip will wait til the transmit has
1797 occurred before writing back the descriptor because it has to wait
1798 to see that it was successfully transmitted to decide whether to set
1799 CMDSTS_OK or not. however, in the simulator since it is always
1800 successfully transmitted, and writing it exactly to spec would
1801 complicate the code, we just do it here
1802 */
1803
1804 txDescCache.cmdsts &= ~CMDSTS_OWN;
1805 txDescCache.cmdsts |= CMDSTS_OK;
1806
1807 DPRINTF(EthernetDesc,
1808 "txDesc writeback:\n\tcmdsts=%08x\n\textsts=%08x\n",
1809 txDescCache.cmdsts, txDescCache.extsts);
1810
1811 txDmaAddr = (regs.txdp + offsetof(ns_desc, cmdsts)) & 0x3fffffff;
1812 txDmaData = &(txDescCache.cmdsts);
1813 txDmaLen = sizeof(txDescCache.cmdsts) + sizeof(txDescCache.extsts);
1814 txDmaFree = dmaDescFree;
1815
1816 if (doTxDmaWrite())
1817 goto exit;
1818
1819 transmit();
1820
1821 txPacket = 0;
1822
1823 if (txHalt) {
1824 DPRINTF(EthernetSM, "halting TX state machine\n");
1825 txState = txIdle;
1826 txHalt = false;
1827 } else
1828 txState = txAdvance;
1829 }
1830 } else {
1831 DPRINTF(EthernetSM, "this descriptor isn't done yet\n");
1832 txState = txFragRead;
1833
1834 /* The number of bytes transferred is either whatever is left
1835 in the descriptor (txDescCnt), or if there is not enough
1836 room in the fifo, just whatever room is left in the fifo
1837 */
1838 txXferLen = min<uint32_t>(txDescCnt, txFifoAvail);
1839
1840 txDmaAddr = txFragPtr & 0x3fffffff;
1841 txDmaData = txPacketBufPtr;
1842 txDmaLen = txXferLen;
1843 txDmaFree = dmaDataFree;
1844
1845 if (doTxDmaRead())
1846 goto exit;
1847 }
1848 break;
1849
1850 case txFragRead:
1851 if (txDmaState != dmaIdle)
1852 goto exit;
1853
1854 txPacketBufPtr += txXferLen;
1855 txFragPtr += txXferLen;
1856 txDescCnt -= txXferLen;
1857 txFifoAvail -= txXferLen;
1858
1859 txState = txFifoBlock;
1860 break;
1861
1862 case txDescWrite:
1863 if (txDmaState != dmaIdle)
1864 goto exit;
1865
1866 if (txDescCache.cmdsts & CMDSTS_INTR) {
1867 devIntrPost(ISR_TXDESC);
1868 }
1869
1870 txState = txAdvance;
1871 break;
1872
1873 case txAdvance:
1874 if (txDescCache.link == 0) {
1875 txState = txIdle;
1876 } else {
1877 txState = txDescRead;
1878 regs.txdp = txDescCache.link;
1879 CTDD = false;
1880
1881 txDmaAddr = txDescCache.link & 0x3fffffff;
1882 txDmaData = &txDescCache;
1883 txDmaLen = sizeof(ns_desc);
1884 txDmaFree = dmaDescFree;
1885
1886 if (doTxDmaRead())
1887 goto exit;
1888 }
1889 break;
1890
1891 default:
1892 panic("invalid state");
1893 }
1894
1895 DPRINTF(EthernetSM, "entering next tx state=%s\n",
1896 NsTxStateStrings[txState]);
1897
1898 if (txState == txIdle) {
1899 regs.command &= ~CR_TXE;
1900 devIntrPost(ISR_TXIDLE);
1901 return;
1902 }
1903
1904 goto next;
1905
1906 exit:
1907 /**
1908 * @todo do we want to schedule a future kick?
1909 */
1910 DPRINTF(EthernetSM, "tx state machine exited state=%s\n",
1911 NsTxStateStrings[txState]);
1912 }
1913
1914 void
1915 NSGigE::transferDone()
1916 {
1917 if (txFifo.empty())
1918 return;
1919
1920 if (txEvent.scheduled())
1921 txEvent.reschedule(curTick + 1);
1922 else
1923 txEvent.schedule(curTick + 1);
1924 }
1925
1926 bool
1927 NSGigE::rxFilter(PacketPtr packet)
1928 {
1929 bool drop = true;
1930 string type;
1931
1932 if (packet->IsUnicast()) {
1933 type = "unicast";
1934
1935 // If we're accepting all unicast addresses
1936 if (acceptUnicast)
1937 drop = false;
1938
1939 // If we make a perfect match
1940 if ((acceptPerfect)
1941 && (memcmp(rom.perfectMatch, packet->data, sizeof(rom.perfectMatch)) == 0))
1942 drop = false;
1943
1944 eth_header *eth = (eth_header *) packet->data;
1945 if ((acceptArp) && (eth->type == 0x608))
1946 drop = false;
1947
1948 } else if (packet->IsBroadcast()) {
1949 type = "broadcast";
1950
1951 // if we're accepting broadcasts
1952 if (acceptBroadcast)
1953 drop = false;
1954
1955 } else if (packet->IsMulticast()) {
1956 type = "multicast";
1957
1958 // if we're accepting all multicasts
1959 if (acceptMulticast)
1960 drop = false;
1961
1962 } else {
1963 type = "unknown";
1964
1965 // oh well, punt on this one
1966 }
1967
1968 if (drop) {
1969 DPRINTF(Ethernet, "rxFilter drop\n");
1970 DDUMP(EthernetData, packet->data, packet->length);
1971 }
1972
1973 return drop;
1974 }
1975
1976 bool
1977 NSGigE::recvPacket(PacketPtr packet)
1978 {
1979 rxBytes += packet->length;
1980 rxPackets++;
1981
1982 DPRINTF(Ethernet, "\n\nReceiving packet from wire, rxFifoAvail = %d\n", MAX_RX_FIFO_SIZE - rxFifoCnt);
1983
1984 if (rxState == rxIdle) {
1985 DPRINTF(Ethernet, "receive disabled...packet dropped\n");
1986 interface->recvDone();
1987 return true;
1988 }
1989
1990 if (rxFilterEnable && rxFilter(packet)) {
1991 DPRINTF(Ethernet, "packet filtered...dropped\n");
1992 interface->recvDone();
1993 return true;
1994 }
1995
1996 if ((rxFifoCnt + packet->length) >= MAX_RX_FIFO_SIZE) {
1997 DPRINTF(Ethernet,
1998 "packet will not fit in receive buffer...packet dropped\n");
1999 devIntrPost(ISR_RXORN);
2000 return false;
2001 }
2002
2003 rxFifo.push_back(packet);
2004 rxFifoCnt += packet->length;
2005 interface->recvDone();
2006
2007 rxKick();
2008 return true;
2009 }
2010
2011 /**
2012 * does a udp checksum. if gen is true, then it generates it and puts it in the right place
2013 * else, it just checks what it calculates against the value in the header in packet
2014 */
2015 bool
2016 NSGigE::udpChecksum(PacketPtr packet, bool gen)
2017 {
2018 ip_header *ip = packet->getIpHdr();
2019 udp_header *hdr = packet->getUdpHdr(ip);
2020
2021 pseudo_header *pseudo = new pseudo_header;
2022
2023 pseudo->src_ip_addr = ip->src_ip_addr;
2024 pseudo->dest_ip_addr = ip->dest_ip_addr;
2025 pseudo->protocol = ip->protocol;
2026 pseudo->len = hdr->len;
2027
2028 uint16_t cksum = checksumCalc((uint16_t *) pseudo, (uint16_t *) hdr,
2029 (uint32_t) hdr->len);
2030
2031 delete pseudo;
2032 if (gen)
2033 hdr->chksum = cksum;
2034 else
2035 if (cksum != 0)
2036 return false;
2037
2038 return true;
2039 }
2040
2041 bool
2042 NSGigE::tcpChecksum(PacketPtr packet, bool gen)
2043 {
2044 ip_header *ip = packet->getIpHdr();
2045 tcp_header *hdr = packet->getTcpHdr(ip);
2046
2047 uint16_t cksum;
2048 pseudo_header *pseudo = new pseudo_header;
2049 if (!gen) {
2050 pseudo->src_ip_addr = ip->src_ip_addr;
2051 pseudo->dest_ip_addr = ip->dest_ip_addr;
2052 pseudo->protocol = reverseEnd16(ip->protocol);
2053 pseudo->len = reverseEnd16(reverseEnd16(ip->dgram_len) - (ip->vers_len & 0xf)*4);
2054
2055 cksum = checksumCalc((uint16_t *) pseudo, (uint16_t *) hdr,
2056 (uint32_t) reverseEnd16(pseudo->len));
2057 } else {
2058 pseudo->src_ip_addr = 0;
2059 pseudo->dest_ip_addr = 0;
2060 pseudo->protocol = hdr->chksum;
2061 pseudo->len = 0;
2062 hdr->chksum = 0;
2063 cksum = checksumCalc((uint16_t *) pseudo, (uint16_t *) hdr,
2064 (uint32_t) (reverseEnd16(ip->dgram_len) - (ip->vers_len & 0xf)*4));
2065 }
2066
2067 delete pseudo;
2068 if (gen)
2069 hdr->chksum = cksum;
2070 else
2071 if (cksum != 0)
2072 return false;
2073
2074 return true;
2075 }
2076
2077 bool
2078 NSGigE::ipChecksum(PacketPtr packet, bool gen)
2079 {
2080 ip_header *hdr = packet->getIpHdr();
2081
2082 uint16_t cksum = checksumCalc(NULL, (uint16_t *) hdr, (hdr->vers_len & 0xf)*4);
2083
2084 if (gen) {
2085 DPRINTF(EthernetCksum, "generated checksum: %#x\n", cksum);
2086 hdr->hdr_chksum = cksum;
2087 }
2088 else
2089 if (cksum != 0)
2090 return false;
2091
2092 return true;
2093 }
2094
2095 uint16_t
2096 NSGigE::checksumCalc(uint16_t *pseudo, uint16_t *buf, uint32_t len)
2097 {
2098 uint32_t sum = 0;
2099
2100 uint16_t last_pad = 0;
2101 if (len & 1) {
2102 last_pad = buf[len/2] & 0xff;
2103 len--;
2104 sum += last_pad;
2105 }
2106
2107 if (pseudo) {
2108 sum = pseudo[0] + pseudo[1] + pseudo[2] +
2109 pseudo[3] + pseudo[4] + pseudo[5];
2110 }
2111
2112 for (int i=0; i < (len/2); ++i) {
2113 sum += buf[i];
2114 }
2115
2116 while (sum >> 16)
2117 sum = (sum >> 16) + (sum & 0xffff);
2118
2119 return ~sum;
2120 }
2121
2122 //=====================================================================
2123 //
2124 //
2125 void
2126 NSGigE::serialize(ostream &os)
2127 {
2128 // Serialize the PciDev base class
2129 PciDev::serialize(os);
2130
2131 /*
2132 * Finalize any DMA events now.
2133 */
2134 if (rxDmaReadEvent.scheduled())
2135 rxDmaReadCopy();
2136 if (rxDmaWriteEvent.scheduled())
2137 rxDmaWriteCopy();
2138 if (txDmaReadEvent.scheduled())
2139 txDmaReadCopy();
2140 if (txDmaWriteEvent.scheduled())
2141 txDmaWriteCopy();
2142
2143 /*
2144 * Serialize the device registers
2145 */
2146 SERIALIZE_SCALAR(regs.command);
2147 SERIALIZE_SCALAR(regs.config);
2148 SERIALIZE_SCALAR(regs.mear);
2149 SERIALIZE_SCALAR(regs.ptscr);
2150 SERIALIZE_SCALAR(regs.isr);
2151 SERIALIZE_SCALAR(regs.imr);
2152 SERIALIZE_SCALAR(regs.ier);
2153 SERIALIZE_SCALAR(regs.ihr);
2154 SERIALIZE_SCALAR(regs.txdp);
2155 SERIALIZE_SCALAR(regs.txdp_hi);
2156 SERIALIZE_SCALAR(regs.txcfg);
2157 SERIALIZE_SCALAR(regs.gpior);
2158 SERIALIZE_SCALAR(regs.rxdp);
2159 SERIALIZE_SCALAR(regs.rxdp_hi);
2160 SERIALIZE_SCALAR(regs.rxcfg);
2161 SERIALIZE_SCALAR(regs.pqcr);
2162 SERIALIZE_SCALAR(regs.wcsr);
2163 SERIALIZE_SCALAR(regs.pcr);
2164 SERIALIZE_SCALAR(regs.rfcr);
2165 SERIALIZE_SCALAR(regs.rfdr);
2166 SERIALIZE_SCALAR(regs.srr);
2167 SERIALIZE_SCALAR(regs.mibc);
2168 SERIALIZE_SCALAR(regs.vrcr);
2169 SERIALIZE_SCALAR(regs.vtcr);
2170 SERIALIZE_SCALAR(regs.vdr);
2171 SERIALIZE_SCALAR(regs.ccsr);
2172 SERIALIZE_SCALAR(regs.tbicr);
2173 SERIALIZE_SCALAR(regs.tbisr);
2174 SERIALIZE_SCALAR(regs.tanar);
2175 SERIALIZE_SCALAR(regs.tanlpar);
2176 SERIALIZE_SCALAR(regs.taner);
2177 SERIALIZE_SCALAR(regs.tesr);
2178
2179 SERIALIZE_ARRAY(rom.perfectMatch, EADDR_LEN);
2180
2181 SERIALIZE_SCALAR(ioEnable);
2182
2183 /*
2184 * Serialize the data Fifos
2185 */
2186 int txNumPkts = txFifo.size();
2187 SERIALIZE_SCALAR(txNumPkts);
2188 int i = 0;
2189 pktiter_t end = txFifo.end();
2190 for (pktiter_t p = txFifo.begin(); p != end; ++p) {
2191 nameOut(os, csprintf("%s.txFifo%d", name(), i++));
2192 (*p)->serialize(os);
2193 }
2194
2195 int rxNumPkts = rxFifo.size();
2196 SERIALIZE_SCALAR(rxNumPkts);
2197 i = 0;
2198 end = rxFifo.end();
2199 for (pktiter_t p = rxFifo.begin(); p != end; ++p) {
2200 nameOut(os, csprintf("%s.rxFifo%d", name(), i++));
2201 (*p)->serialize(os);
2202 }
2203
2204 /*
2205 * Serialize the various helper variables
2206 */
2207 bool txPacketExists = txPacket;
2208 SERIALIZE_SCALAR(txPacketExists);
2209 if (txPacketExists) {
2210 nameOut(os, csprintf("%s.txPacket", name()));
2211 txPacket->serialize(os);
2212 uint32_t txPktBufPtr = (uint32_t) (txPacketBufPtr - txPacket->data);
2213 SERIALIZE_SCALAR(txPktBufPtr);
2214 }
2215
2216 bool rxPacketExists = rxPacket;
2217 SERIALIZE_SCALAR(rxPacketExists);
2218 if (rxPacketExists) {
2219 nameOut(os, csprintf("%s.rxPacket", name()));
2220 rxPacket->serialize(os);
2221 uint32_t rxPktBufPtr = (uint32_t) (rxPacketBufPtr - rxPacket->data);
2222 SERIALIZE_SCALAR(rxPktBufPtr);
2223 }
2224
2225 SERIALIZE_SCALAR(txXferLen);
2226 SERIALIZE_SCALAR(rxXferLen);
2227
2228 /*
2229 * Serialize DescCaches
2230 */
2231 SERIALIZE_SCALAR(txDescCache.link);
2232 SERIALIZE_SCALAR(txDescCache.bufptr);
2233 SERIALIZE_SCALAR(txDescCache.cmdsts);
2234 SERIALIZE_SCALAR(txDescCache.extsts);
2235 SERIALIZE_SCALAR(rxDescCache.link);
2236 SERIALIZE_SCALAR(rxDescCache.bufptr);
2237 SERIALIZE_SCALAR(rxDescCache.cmdsts);
2238 SERIALIZE_SCALAR(rxDescCache.extsts);
2239
2240 /*
2241 * Serialize tx state machine
2242 */
2243 int txState = this->txState;
2244 SERIALIZE_SCALAR(txState);
2245 SERIALIZE_SCALAR(CTDD);
2246 SERIALIZE_SCALAR(txFifoAvail);
2247 SERIALIZE_SCALAR(txHalt);
2248 SERIALIZE_SCALAR(txFragPtr);
2249 SERIALIZE_SCALAR(txDescCnt);
2250 int txDmaState = this->txDmaState;
2251 SERIALIZE_SCALAR(txDmaState);
2252
2253 /*
2254 * Serialize rx state machine
2255 */
2256 int rxState = this->rxState;
2257 SERIALIZE_SCALAR(rxState);
2258 SERIALIZE_SCALAR(CRDD);
2259 SERIALIZE_SCALAR(rxPktBytes);
2260 SERIALIZE_SCALAR(rxFifoCnt);
2261 SERIALIZE_SCALAR(rxHalt);
2262 SERIALIZE_SCALAR(rxDescCnt);
2263 int rxDmaState = this->rxDmaState;
2264 SERIALIZE_SCALAR(rxDmaState);
2265
2266 SERIALIZE_SCALAR(extstsEnable);
2267
2268 /*
2269 * If there's a pending transmit, store the time so we can
2270 * reschedule it later
2271 */
2272 Tick transmitTick = txEvent.scheduled() ? txEvent.when() - curTick : 0;
2273 SERIALIZE_SCALAR(transmitTick);
2274
2275 /*
2276 * receive address filter settings
2277 */
2278 SERIALIZE_SCALAR(rxFilterEnable);
2279 SERIALIZE_SCALAR(acceptBroadcast);
2280 SERIALIZE_SCALAR(acceptMulticast);
2281 SERIALIZE_SCALAR(acceptUnicast);
2282 SERIALIZE_SCALAR(acceptPerfect);
2283 SERIALIZE_SCALAR(acceptArp);
2284
2285 /*
2286 * Keep track of pending interrupt status.
2287 */
2288 SERIALIZE_SCALAR(intrTick);
2289 SERIALIZE_SCALAR(cpuPendingIntr);
2290 Tick intrEventTick = 0;
2291 if (intrEvent)
2292 intrEventTick = intrEvent->when();
2293 SERIALIZE_SCALAR(intrEventTick);
2294
2295 }
2296
2297 void
2298 NSGigE::unserialize(Checkpoint *cp, const std::string &section)
2299 {
2300 // Unserialize the PciDev base class
2301 PciDev::unserialize(cp, section);
2302
2303 UNSERIALIZE_SCALAR(regs.command);
2304 UNSERIALIZE_SCALAR(regs.config);
2305 UNSERIALIZE_SCALAR(regs.mear);
2306 UNSERIALIZE_SCALAR(regs.ptscr);
2307 UNSERIALIZE_SCALAR(regs.isr);
2308 UNSERIALIZE_SCALAR(regs.imr);
2309 UNSERIALIZE_SCALAR(regs.ier);
2310 UNSERIALIZE_SCALAR(regs.ihr);
2311 UNSERIALIZE_SCALAR(regs.txdp);
2312 UNSERIALIZE_SCALAR(regs.txdp_hi);
2313 UNSERIALIZE_SCALAR(regs.txcfg);
2314 UNSERIALIZE_SCALAR(regs.gpior);
2315 UNSERIALIZE_SCALAR(regs.rxdp);
2316 UNSERIALIZE_SCALAR(regs.rxdp_hi);
2317 UNSERIALIZE_SCALAR(regs.rxcfg);
2318 UNSERIALIZE_SCALAR(regs.pqcr);
2319 UNSERIALIZE_SCALAR(regs.wcsr);
2320 UNSERIALIZE_SCALAR(regs.pcr);
2321 UNSERIALIZE_SCALAR(regs.rfcr);
2322 UNSERIALIZE_SCALAR(regs.rfdr);
2323 UNSERIALIZE_SCALAR(regs.srr);
2324 UNSERIALIZE_SCALAR(regs.mibc);
2325 UNSERIALIZE_SCALAR(regs.vrcr);
2326 UNSERIALIZE_SCALAR(regs.vtcr);
2327 UNSERIALIZE_SCALAR(regs.vdr);
2328 UNSERIALIZE_SCALAR(regs.ccsr);
2329 UNSERIALIZE_SCALAR(regs.tbicr);
2330 UNSERIALIZE_SCALAR(regs.tbisr);
2331 UNSERIALIZE_SCALAR(regs.tanar);
2332 UNSERIALIZE_SCALAR(regs.tanlpar);
2333 UNSERIALIZE_SCALAR(regs.taner);
2334 UNSERIALIZE_SCALAR(regs.tesr);
2335
2336 UNSERIALIZE_ARRAY(rom.perfectMatch, EADDR_LEN);
2337
2338 UNSERIALIZE_SCALAR(ioEnable);
2339
2340 /*
2341 * unserialize the data fifos
2342 */
2343 int txNumPkts;
2344 UNSERIALIZE_SCALAR(txNumPkts);
2345 int i;
2346 for (i = 0; i < txNumPkts; ++i) {
2347 PacketPtr p = new EtherPacket;
2348 p->unserialize(cp, csprintf("%s.rxFifo%d", section, i));
2349 txFifo.push_back(p);
2350 }
2351
2352 int rxNumPkts;
2353 UNSERIALIZE_SCALAR(rxNumPkts);
2354 for (i = 0; i < rxNumPkts; ++i) {
2355 PacketPtr p = new EtherPacket;
2356 p->unserialize(cp, csprintf("%s.rxFifo%d", section, i));
2357 rxFifo.push_back(p);
2358 }
2359
2360 /*
2361 * unserialize the various helper variables
2362 */
2363 bool txPacketExists;
2364 UNSERIALIZE_SCALAR(txPacketExists);
2365 if (txPacketExists) {
2366 txPacket = new EtherPacket;
2367 txPacket->unserialize(cp, csprintf("%s.txPacket", section));
2368 uint32_t txPktBufPtr;
2369 UNSERIALIZE_SCALAR(txPktBufPtr);
2370 txPacketBufPtr = (uint8_t *) txPacket->data + txPktBufPtr;
2371 } else
2372 txPacket = 0;
2373
2374 bool rxPacketExists;
2375 UNSERIALIZE_SCALAR(rxPacketExists);
2376 rxPacket = 0;
2377 if (rxPacketExists) {
2378 rxPacket = new EtherPacket;
2379 rxPacket->unserialize(cp, csprintf("%s.rxPacket", section));
2380 uint32_t rxPktBufPtr;
2381 UNSERIALIZE_SCALAR(rxPktBufPtr);
2382 rxPacketBufPtr = (uint8_t *) rxPacket->data + rxPktBufPtr;
2383 } else
2384 rxPacket = 0;
2385
2386 UNSERIALIZE_SCALAR(txXferLen);
2387 UNSERIALIZE_SCALAR(rxXferLen);
2388
2389 /*
2390 * Unserialize DescCaches
2391 */
2392 UNSERIALIZE_SCALAR(txDescCache.link);
2393 UNSERIALIZE_SCALAR(txDescCache.bufptr);
2394 UNSERIALIZE_SCALAR(txDescCache.cmdsts);
2395 UNSERIALIZE_SCALAR(txDescCache.extsts);
2396 UNSERIALIZE_SCALAR(rxDescCache.link);
2397 UNSERIALIZE_SCALAR(rxDescCache.bufptr);
2398 UNSERIALIZE_SCALAR(rxDescCache.cmdsts);
2399 UNSERIALIZE_SCALAR(rxDescCache.extsts);
2400
2401 /*
2402 * unserialize tx state machine
2403 */
2404 int txState;
2405 UNSERIALIZE_SCALAR(txState);
2406 this->txState = (TxState) txState;
2407 UNSERIALIZE_SCALAR(CTDD);
2408 UNSERIALIZE_SCALAR(txFifoAvail);
2409 UNSERIALIZE_SCALAR(txHalt);
2410 UNSERIALIZE_SCALAR(txFragPtr);
2411 UNSERIALIZE_SCALAR(txDescCnt);
2412 int txDmaState;
2413 UNSERIALIZE_SCALAR(txDmaState);
2414 this->txDmaState = (DmaState) txDmaState;
2415
2416 /*
2417 * unserialize rx state machine
2418 */
2419 int rxState;
2420 UNSERIALIZE_SCALAR(rxState);
2421 this->rxState = (RxState) rxState;
2422 UNSERIALIZE_SCALAR(CRDD);
2423 UNSERIALIZE_SCALAR(rxPktBytes);
2424 UNSERIALIZE_SCALAR(rxFifoCnt);
2425 UNSERIALIZE_SCALAR(rxHalt);
2426 UNSERIALIZE_SCALAR(rxDescCnt);
2427 int rxDmaState;
2428 UNSERIALIZE_SCALAR(rxDmaState);
2429 this->rxDmaState = (DmaState) rxDmaState;
2430
2431 UNSERIALIZE_SCALAR(extstsEnable);
2432
2433 /*
2434 * If there's a pending transmit, reschedule it now
2435 */
2436 Tick transmitTick;
2437 UNSERIALIZE_SCALAR(transmitTick);
2438 if (transmitTick)
2439 txEvent.schedule(curTick + transmitTick);
2440
2441 /*
2442 * unserialize receive address filter settings
2443 */
2444 UNSERIALIZE_SCALAR(rxFilterEnable);
2445 UNSERIALIZE_SCALAR(acceptBroadcast);
2446 UNSERIALIZE_SCALAR(acceptMulticast);
2447 UNSERIALIZE_SCALAR(acceptUnicast);
2448 UNSERIALIZE_SCALAR(acceptPerfect);
2449 UNSERIALIZE_SCALAR(acceptArp);
2450
2451 /*
2452 * Keep track of pending interrupt status.
2453 */
2454 UNSERIALIZE_SCALAR(intrTick);
2455 UNSERIALIZE_SCALAR(cpuPendingIntr);
2456 Tick intrEventTick;
2457 UNSERIALIZE_SCALAR(intrEventTick);
2458 if (intrEventTick) {
2459 intrEvent = new IntrEvent(this, true);
2460 intrEvent->schedule(intrEventTick);
2461 }
2462
2463 /*
2464 * re-add addrRanges to bus bridges
2465 */
2466 if (pioInterface) {
2467 pioInterface->addAddrRange(BARAddrs[0], BARAddrs[0] + BARSize[0] - 1);
2468 pioInterface->addAddrRange(BARAddrs[1], BARAddrs[1] + BARSize[1] - 1);
2469 }
2470 }
2471
2472 Tick
2473 NSGigE::cacheAccess(MemReqPtr &req)
2474 {
2475 DPRINTF(EthernetPIO, "timing access to paddr=%#x (daddr=%#x)\n",
2476 req->paddr, req->paddr - addr);
2477 return curTick + pioLatency;
2478 }
2479 //=====================================================================
2480
2481
2482 //********** helper functions******************************************
2483
2484 uint16_t reverseEnd16(uint16_t num)
2485 {
2486 uint16_t reverse = (num & 0xff)<<8;
2487 reverse += ((num & 0xff00) >> 8);
2488 return reverse;
2489 }
2490
2491 uint32_t reverseEnd32(uint32_t num)
2492 {
2493 uint32_t reverse = (reverseEnd16(num & 0xffff)) << 16;
2494 reverse += reverseEnd16((uint16_t) ((num & 0xffff0000) >> 8));
2495 return reverse;
2496 }
2497
2498
2499
2500 //=====================================================================
2501
2502 BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt)
2503
2504 SimObjectParam<EtherInt *> peer;
2505 SimObjectParam<NSGigE *> device;
2506
2507 END_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt)
2508
2509 BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigEInt)
2510
2511 INIT_PARAM_DFLT(peer, "peer interface", NULL),
2512 INIT_PARAM(device, "Ethernet device of this interface")
2513
2514 END_INIT_SIM_OBJECT_PARAMS(NSGigEInt)
2515
2516 CREATE_SIM_OBJECT(NSGigEInt)
2517 {
2518 NSGigEInt *dev_int = new NSGigEInt(getInstanceName(), device);
2519
2520 EtherInt *p = (EtherInt *)peer;
2521 if (p) {
2522 dev_int->setPeer(p);
2523 p->setPeer(dev_int);
2524 }
2525
2526 return dev_int;
2527 }
2528
2529 REGISTER_SIM_OBJECT("NSGigEInt", NSGigEInt)
2530
2531
2532 BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigE)
2533
2534 Param<Tick> tx_delay;
2535 Param<Tick> rx_delay;
2536 SimObjectParam<IntrControl *> intr_ctrl;
2537 Param<Tick> intr_delay;
2538 SimObjectParam<MemoryController *> mmu;
2539 SimObjectParam<PhysicalMemory *> physmem;
2540 Param<bool> rx_filter;
2541 Param<string> hardware_address;
2542 SimObjectParam<Bus*> header_bus;
2543 SimObjectParam<Bus*> payload_bus;
2544 SimObjectParam<HierParams *> hier;
2545 Param<Tick> pio_latency;
2546 Param<bool> dma_desc_free;
2547 Param<bool> dma_data_free;
2548 Param<Tick> dma_read_delay;
2549 Param<Tick> dma_write_delay;
2550 Param<Tick> dma_read_factor;
2551 Param<Tick> dma_write_factor;
2552 SimObjectParam<PciConfigAll *> configspace;
2553 SimObjectParam<PciConfigData *> configdata;
2554 SimObjectParam<Tsunami *> tsunami;
2555 Param<uint32_t> pci_bus;
2556 Param<uint32_t> pci_dev;
2557 Param<uint32_t> pci_func;
2558
2559 END_DECLARE_SIM_OBJECT_PARAMS(NSGigE)
2560
2561 BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigE)
2562
2563 INIT_PARAM_DFLT(tx_delay, "Transmit Delay", 1000),
2564 INIT_PARAM_DFLT(rx_delay, "Receive Delay", 1000),
2565 INIT_PARAM(intr_ctrl, "Interrupt Controller"),
2566 INIT_PARAM_DFLT(intr_delay, "Interrupt Delay in microseconds", 0),
2567 INIT_PARAM(mmu, "Memory Controller"),
2568 INIT_PARAM(physmem, "Physical Memory"),
2569 INIT_PARAM_DFLT(rx_filter, "Enable Receive Filter", true),
2570 INIT_PARAM_DFLT(hardware_address, "Ethernet Hardware Address",
2571 "00:99:00:00:00:01"),
2572 INIT_PARAM_DFLT(header_bus, "The IO Bus to attach to for headers", NULL),
2573 INIT_PARAM_DFLT(payload_bus, "The IO Bus to attach to for payload", NULL),
2574 INIT_PARAM_DFLT(hier, "Hierarchy global variables", &defaultHierParams),
2575 INIT_PARAM_DFLT(pio_latency, "Programmed IO latency", 1000),
2576 INIT_PARAM_DFLT(dma_desc_free, "DMA of Descriptors is free", false),
2577 INIT_PARAM_DFLT(dma_data_free, "DMA of Data is free", false),
2578 INIT_PARAM_DFLT(dma_read_delay, "fixed delay for dma reads", 0),
2579 INIT_PARAM_DFLT(dma_write_delay, "fixed delay for dma writes", 0),
2580 INIT_PARAM_DFLT(dma_read_factor, "multiplier for dma reads", 0),
2581 INIT_PARAM_DFLT(dma_write_factor, "multiplier for dma writes", 0),
2582 INIT_PARAM(configspace, "PCI Configspace"),
2583 INIT_PARAM(configdata, "PCI Config data"),
2584 INIT_PARAM(tsunami, "Tsunami"),
2585 INIT_PARAM(pci_bus, "PCI bus"),
2586 INIT_PARAM(pci_dev, "PCI device number"),
2587 INIT_PARAM(pci_func, "PCI function code")
2588
2589 END_INIT_SIM_OBJECT_PARAMS(NSGigE)
2590
2591
2592 CREATE_SIM_OBJECT(NSGigE)
2593 {
2594 int eaddr[6];
2595 sscanf(((string)hardware_address).c_str(), "%x:%x:%x:%x:%x:%x",
2596 &eaddr[0], &eaddr[1], &eaddr[2], &eaddr[3], &eaddr[4], &eaddr[5]);
2597
2598 return new NSGigE(getInstanceName(), intr_ctrl, intr_delay,
2599 physmem, tx_delay, rx_delay, mmu, hier, header_bus,
2600 payload_bus, pio_latency, dma_desc_free, dma_data_free,
2601 dma_read_delay, dma_write_delay, dma_read_factor,
2602 dma_write_factor, configspace, configdata,
2603 tsunami, pci_bus, pci_dev, pci_func, rx_filter, eaddr);
2604 }
2605
2606 REGISTER_SIM_OBJECT("NSGigE", NSGigE)