Add support to store less than the full packet in an etherdump
[gem5.git] / dev / ns_gige.cc
1 /*
2 * Copyright (c) 2004 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /* @file
30 * Device module for modelling the National Semiconductor
31 * DP83820 ethernet controller. Does not support priority queueing
32 */
33 #include <cstdio>
34 #include <deque>
35 #include <string>
36
37 #include "base/inet.hh"
38 #include "cpu/exec_context.hh"
39 #include "cpu/intr_control.hh"
40 #include "dev/dma.hh"
41 #include "dev/ns_gige.hh"
42 #include "dev/etherlink.hh"
43 #include "mem/bus/bus.hh"
44 #include "mem/bus/dma_interface.hh"
45 #include "mem/bus/pio_interface.hh"
46 #include "mem/bus/pio_interface_impl.hh"
47 #include "mem/functional_mem/memory_control.hh"
48 #include "mem/functional_mem/physical_memory.hh"
49 #include "sim/builder.hh"
50 #include "sim/host.hh"
51 #include "sim/sim_stats.hh"
52 #include "targetarch/vtophys.hh"
53 #include "dev/pciconfigall.hh"
54 #include "dev/tsunami_cchip.hh"
55
56 const char *NsRxStateStrings[] =
57 {
58 "rxIdle",
59 "rxDescRefr",
60 "rxDescRead",
61 "rxFifoBlock",
62 "rxFragWrite",
63 "rxDescWrite",
64 "rxAdvance"
65 };
66
67 const char *NsTxStateStrings[] =
68 {
69 "txIdle",
70 "txDescRefr",
71 "txDescRead",
72 "txFifoBlock",
73 "txFragRead",
74 "txDescWrite",
75 "txAdvance"
76 };
77
78 const char *NsDmaState[] =
79 {
80 "dmaIdle",
81 "dmaReading",
82 "dmaWriting",
83 "dmaReadWaiting",
84 "dmaWriteWaiting"
85 };
86
87 using namespace std;
88
89 //helper function declarations
90 //These functions reverse Endianness so we can evaluate network data correctly
91 uint16_t reverseEnd16(uint16_t);
92 uint32_t reverseEnd32(uint32_t);
93
94 ///////////////////////////////////////////////////////////////////////
95 //
96 // NSGigE PCI Device
97 //
98 NSGigE::NSGigE(const std::string &name, IntrControl *i, Tick intr_delay,
99 PhysicalMemory *pmem, Tick tx_delay, Tick rx_delay,
100 MemoryController *mmu, HierParams *hier, Bus *header_bus,
101 Bus *payload_bus, Tick pio_latency, bool dma_desc_free,
102 bool dma_data_free, Tick dma_read_delay, Tick dma_write_delay,
103 Tick dma_read_factor, Tick dma_write_factor, PciConfigAll *cf,
104 PciConfigData *cd, Tsunami *t, uint32_t bus, uint32_t dev,
105 uint32_t func, bool rx_filter, const int eaddr[6],
106 uint32_t tx_fifo_size, uint32_t rx_fifo_size)
107 : PciDev(name, mmu, cf, cd, bus, dev, func), tsunami(t), ioEnable(false),
108 maxTxFifoSize(tx_fifo_size), maxRxFifoSize(rx_fifo_size),
109 txPacket(0), rxPacket(0), txPacketBufPtr(NULL), rxPacketBufPtr(NULL),
110 txXferLen(0), rxXferLen(0), txState(txIdle), CTDD(false),
111 txFifoAvail(tx_fifo_size), txHalt(false),
112 txFragPtr(0), txDescCnt(0), txDmaState(dmaIdle), rxState(rxIdle),
113 CRDD(false), rxPktBytes(0), rxFifoCnt(0), rxHalt(false),
114 rxFragPtr(0), rxDescCnt(0), rxDmaState(dmaIdle), extstsEnable(false),
115 rxDmaReadEvent(this), rxDmaWriteEvent(this),
116 txDmaReadEvent(this), txDmaWriteEvent(this),
117 dmaDescFree(dma_desc_free), dmaDataFree(dma_data_free),
118 txDelay(tx_delay), rxDelay(rx_delay), rxKickTick(0), txKickTick(0),
119 txEvent(this), rxFilterEnable(rx_filter), acceptBroadcast(false),
120 acceptMulticast(false), acceptUnicast(false),
121 acceptPerfect(false), acceptArp(false),
122 physmem(pmem), intctrl(i), intrTick(0), cpuPendingIntr(false),
123 intrEvent(0), interface(0)
124 {
125 tsunami->ethernet = this;
126
127 if (header_bus) {
128 pioInterface = newPioInterface(name, hier, header_bus, this,
129 &NSGigE::cacheAccess);
130
131 pioLatency = pio_latency * header_bus->clockRatio;
132
133 if (payload_bus)
134 dmaInterface = new DMAInterface<Bus>(name + ".dma",
135 header_bus, payload_bus, 1);
136 else
137 dmaInterface = new DMAInterface<Bus>(name + ".dma",
138 header_bus, header_bus, 1);
139 } else if (payload_bus) {
140 pioInterface = newPioInterface(name, hier, payload_bus, this,
141 &NSGigE::cacheAccess);
142
143 pioLatency = pio_latency * payload_bus->clockRatio;
144
145 dmaInterface = new DMAInterface<Bus>(name + ".dma", payload_bus,
146 payload_bus, 1);
147 }
148
149
150 intrDelay = US2Ticks(intr_delay);
151 dmaReadDelay = dma_read_delay;
152 dmaWriteDelay = dma_write_delay;
153 dmaReadFactor = dma_read_factor;
154 dmaWriteFactor = dma_write_factor;
155
156 regsReset();
157 rom.perfectMatch[0] = eaddr[0];
158 rom.perfectMatch[1] = eaddr[1];
159 rom.perfectMatch[2] = eaddr[2];
160 rom.perfectMatch[3] = eaddr[3];
161 rom.perfectMatch[4] = eaddr[4];
162 rom.perfectMatch[5] = eaddr[5];
163 }
164
165 NSGigE::~NSGigE()
166 {}
167
168 void
169 NSGigE::regStats()
170 {
171 txBytes
172 .name(name() + ".txBytes")
173 .desc("Bytes Transmitted")
174 .prereq(txBytes)
175 ;
176
177 rxBytes
178 .name(name() + ".rxBytes")
179 .desc("Bytes Received")
180 .prereq(rxBytes)
181 ;
182
183 txPackets
184 .name(name() + ".txPackets")
185 .desc("Number of Packets Transmitted")
186 .prereq(txBytes)
187 ;
188
189 rxPackets
190 .name(name() + ".rxPackets")
191 .desc("Number of Packets Received")
192 .prereq(rxBytes)
193 ;
194
195 txIPChecksums
196 .name(name() + ".txIPChecksums")
197 .desc("Number of tx IP Checksums done by device")
198 .precision(0)
199 .prereq(txBytes)
200 ;
201
202 rxIPChecksums
203 .name(name() + ".rxIPChecksums")
204 .desc("Number of rx IP Checksums done by device")
205 .precision(0)
206 .prereq(rxBytes)
207 ;
208
209 txTCPChecksums
210 .name(name() + ".txTCPChecksums")
211 .desc("Number of tx TCP Checksums done by device")
212 .precision(0)
213 .prereq(txBytes)
214 ;
215
216 rxTCPChecksums
217 .name(name() + ".rxTCPChecksums")
218 .desc("Number of rx TCP Checksums done by device")
219 .precision(0)
220 .prereq(rxBytes)
221 ;
222
223 descDmaReads
224 .name(name() + ".descDMAReads")
225 .desc("Number of descriptors the device read w/ DMA")
226 .precision(0)
227 ;
228
229 descDmaWrites
230 .name(name() + ".descDMAWrites")
231 .desc("Number of descriptors the device wrote w/ DMA")
232 .precision(0)
233 ;
234
235 descDmaRdBytes
236 .name(name() + ".descDmaReadBytes")
237 .desc("number of descriptor bytes read w/ DMA")
238 .precision(0)
239 ;
240
241 descDmaWrBytes
242 .name(name() + ".descDmaWriteBytes")
243 .desc("number of descriptor bytes write w/ DMA")
244 .precision(0)
245 ;
246
247
248 txBandwidth
249 .name(name() + ".txBandwidth")
250 .desc("Transmit Bandwidth (bits/s)")
251 .precision(0)
252 .prereq(txBytes)
253 ;
254
255 rxBandwidth
256 .name(name() + ".rxBandwidth")
257 .desc("Receive Bandwidth (bits/s)")
258 .precision(0)
259 .prereq(rxBytes)
260 ;
261
262 txPacketRate
263 .name(name() + ".txPPS")
264 .desc("Packet Tranmission Rate (packets/s)")
265 .precision(0)
266 .prereq(txBytes)
267 ;
268
269 rxPacketRate
270 .name(name() + ".rxPPS")
271 .desc("Packet Reception Rate (packets/s)")
272 .precision(0)
273 .prereq(rxBytes)
274 ;
275
276 txBandwidth = txBytes * Stats::constant(8) / simSeconds;
277 rxBandwidth = rxBytes * Stats::constant(8) / simSeconds;
278 txPacketRate = txPackets / simSeconds;
279 rxPacketRate = rxPackets / simSeconds;
280 }
281
282 /**
283 * This is to read the PCI general configuration registers
284 */
285 void
286 NSGigE::ReadConfig(int offset, int size, uint8_t *data)
287 {
288 if (offset < PCI_DEVICE_SPECIFIC)
289 PciDev::ReadConfig(offset, size, data);
290 else
291 panic("Device specific PCI config space not implemented!\n");
292 }
293
294 /**
295 * This is to write to the PCI general configuration registers
296 */
297 void
298 NSGigE::WriteConfig(int offset, int size, uint32_t data)
299 {
300 if (offset < PCI_DEVICE_SPECIFIC)
301 PciDev::WriteConfig(offset, size, data);
302 else
303 panic("Device specific PCI config space not implemented!\n");
304
305 // Need to catch writes to BARs to update the PIO interface
306 switch (offset) {
307 //seems to work fine without all these PCI settings, but i put in the IO
308 //to double check, an assertion will fail if we need to properly
309 // implement it
310 case PCI_COMMAND:
311 if (config.data[offset] & PCI_CMD_IOSE)
312 ioEnable = true;
313 else
314 ioEnable = false;
315
316 #if 0
317 if (config.data[offset] & PCI_CMD_BME) {
318 bmEnabled = true;
319 }
320 else {
321 bmEnabled = false;
322 }
323
324 if (config.data[offset] & PCI_CMD_MSE) {
325 memEnable = true;
326 }
327 else {
328 memEnable = false;
329 }
330 #endif
331 break;
332
333 case PCI0_BASE_ADDR0:
334 if (BARAddrs[0] != 0) {
335
336 if (pioInterface)
337 pioInterface->addAddrRange(BARAddrs[0], BARAddrs[0] + BARSize[0] - 1);
338
339 BARAddrs[0] &= PA_UNCACHED_MASK;
340
341 }
342 break;
343 case PCI0_BASE_ADDR1:
344 if (BARAddrs[1] != 0) {
345
346 if (pioInterface)
347 pioInterface->addAddrRange(BARAddrs[1], BARAddrs[1] + BARSize[1] - 1);
348
349 BARAddrs[1] &= PA_UNCACHED_MASK;
350
351 }
352 break;
353 }
354 }
355
356 /**
357 * This reads the device registers, which are detailed in the NS83820
358 * spec sheet
359 */
360 Fault
361 NSGigE::read(MemReqPtr &req, uint8_t *data)
362 {
363 assert(ioEnable);
364
365 //The mask is to give you only the offset into the device register file
366 Addr daddr = req->paddr & 0xfff;
367 DPRINTF(EthernetPIO, "read da=%#x pa=%#x va=%#x size=%d\n",
368 daddr, req->paddr, req->vaddr, req->size);
369
370
371 //there are some reserved registers, you can see ns_gige_reg.h and
372 //the spec sheet for details
373 if (daddr > LAST && daddr <= RESERVED) {
374 panic("Accessing reserved register");
375 } else if (daddr > RESERVED && daddr <= 0x3FC) {
376 ReadConfig(daddr & 0xff, req->size, data);
377 return No_Fault;
378 } else if (daddr >= MIB_START && daddr <= MIB_END) {
379 // don't implement all the MIB's. hopefully the kernel
380 // doesn't actually DEPEND upon their values
381 // MIB are just hardware stats keepers
382 uint32_t &reg = *(uint32_t *) data;
383 reg = 0;
384 return No_Fault;
385 } else if (daddr > 0x3FC)
386 panic("Something is messed up!\n");
387
388 switch (req->size) {
389 case sizeof(uint32_t):
390 {
391 uint32_t &reg = *(uint32_t *)data;
392
393 switch (daddr) {
394 case CR:
395 reg = regs.command;
396 //these are supposed to be cleared on a read
397 reg &= ~(CR_RXD | CR_TXD | CR_TXR | CR_RXR);
398 break;
399
400 case CFG:
401 reg = regs.config;
402 break;
403
404 case MEAR:
405 reg = regs.mear;
406 break;
407
408 case PTSCR:
409 reg = regs.ptscr;
410 break;
411
412 case ISR:
413 reg = regs.isr;
414 devIntrClear(ISR_ALL);
415 break;
416
417 case IMR:
418 reg = regs.imr;
419 break;
420
421 case IER:
422 reg = regs.ier;
423 break;
424
425 case IHR:
426 reg = regs.ihr;
427 break;
428
429 case TXDP:
430 reg = regs.txdp;
431 break;
432
433 case TXDP_HI:
434 reg = regs.txdp_hi;
435 break;
436
437 case TXCFG:
438 reg = regs.txcfg;
439 break;
440
441 case GPIOR:
442 reg = regs.gpior;
443 break;
444
445 case RXDP:
446 reg = regs.rxdp;
447 break;
448
449 case RXDP_HI:
450 reg = regs.rxdp_hi;
451 break;
452
453 case RXCFG:
454 reg = regs.rxcfg;
455 break;
456
457 case PQCR:
458 reg = regs.pqcr;
459 break;
460
461 case WCSR:
462 reg = regs.wcsr;
463 break;
464
465 case PCR:
466 reg = regs.pcr;
467 break;
468
469 //see the spec sheet for how RFCR and RFDR work
470 //basically, you write to RFCR to tell the machine what you want to do next
471 //then you act upon RFDR, and the device will be prepared b/c
472 //of what you wrote to RFCR
473 case RFCR:
474 reg = regs.rfcr;
475 break;
476
477 case RFDR:
478 switch (regs.rfcr & RFCR_RFADDR) {
479 case 0x000:
480 reg = rom.perfectMatch[1];
481 reg = reg << 8;
482 reg += rom.perfectMatch[0];
483 break;
484 case 0x002:
485 reg = rom.perfectMatch[3] << 8;
486 reg += rom.perfectMatch[2];
487 break;
488 case 0x004:
489 reg = rom.perfectMatch[5] << 8;
490 reg += rom.perfectMatch[4];
491 break;
492 default:
493 panic("reading from RFDR for something for other than PMATCH!\n");
494 //didn't implement other RFDR functionality b/c driver didn't use
495 }
496 break;
497
498 case SRR:
499 reg = regs.srr;
500 break;
501
502 case MIBC:
503 reg = regs.mibc;
504 reg &= ~(MIBC_MIBS | MIBC_ACLR);
505 break;
506
507 case VRCR:
508 reg = regs.vrcr;
509 break;
510
511 case VTCR:
512 reg = regs.vtcr;
513 break;
514
515 case VDR:
516 reg = regs.vdr;
517 break;
518
519 case CCSR:
520 reg = regs.ccsr;
521 break;
522
523 case TBICR:
524 reg = regs.tbicr;
525 break;
526
527 case TBISR:
528 reg = regs.tbisr;
529 break;
530
531 case TANAR:
532 reg = regs.tanar;
533 break;
534
535 case TANLPAR:
536 reg = regs.tanlpar;
537 break;
538
539 case TANER:
540 reg = regs.taner;
541 break;
542
543 case TESR:
544 reg = regs.tesr;
545 break;
546
547 default:
548 panic("reading unimplemented register: addr = %#x", daddr);
549 }
550
551 DPRINTF(EthernetPIO, "read from %#x: data=%d data=%#x\n",
552 daddr, reg, reg);
553 }
554 break;
555
556 default:
557 panic("accessing register with invalid size: addr=%#x, size=%d",
558 daddr, req->size);
559 }
560
561 return No_Fault;
562 }
563
564 Fault
565 NSGigE::write(MemReqPtr &req, const uint8_t *data)
566 {
567 assert(ioEnable);
568
569 Addr daddr = req->paddr & 0xfff;
570 DPRINTF(EthernetPIO, "write da=%#x pa=%#x va=%#x size=%d\n",
571 daddr, req->paddr, req->vaddr, req->size);
572
573 if (daddr > LAST && daddr <= RESERVED) {
574 panic("Accessing reserved register");
575 } else if (daddr > RESERVED && daddr <= 0x3FC) {
576 WriteConfig(daddr & 0xff, req->size, *(uint32_t *)data);
577 return No_Fault;
578 } else if (daddr > 0x3FC)
579 panic("Something is messed up!\n");
580
581 if (req->size == sizeof(uint32_t)) {
582 uint32_t reg = *(uint32_t *)data;
583 DPRINTF(EthernetPIO, "write data=%d data=%#x\n", reg, reg);
584
585 switch (daddr) {
586 case CR:
587 regs.command = reg;
588 if ((reg & (CR_TXE | CR_TXD)) == (CR_TXE | CR_TXD)) {
589 txHalt = true;
590 } else if (reg & CR_TXE) {
591 //the kernel is enabling the transmit machine
592 if (txState == txIdle)
593 txKick();
594 } else if (reg & CR_TXD) {
595 txHalt = true;
596 }
597
598 if ((reg & (CR_RXE | CR_RXD)) == (CR_RXE | CR_RXD)) {
599 rxHalt = true;
600 } else if (reg & CR_RXE) {
601 if (rxState == rxIdle) {
602 rxKick();
603 }
604 } else if (reg & CR_RXD) {
605 rxHalt = true;
606 }
607
608 if (reg & CR_TXR)
609 txReset();
610
611 if (reg & CR_RXR)
612 rxReset();
613
614 if (reg & CR_SWI)
615 devIntrPost(ISR_SWI);
616
617 if (reg & CR_RST) {
618 txReset();
619 rxReset();
620
621 regsReset();
622 }
623 break;
624
625 case CFG:
626 if (reg & CFG_LNKSTS || reg & CFG_SPDSTS || reg & CFG_DUPSTS
627 || reg & CFG_RESERVED || reg & CFG_T64ADDR
628 || reg & CFG_PCI64_DET)
629 panic("writing to read-only or reserved CFG bits!\n");
630
631 regs.config |= reg & ~(CFG_LNKSTS | CFG_SPDSTS | CFG_DUPSTS | CFG_RESERVED |
632 CFG_T64ADDR | CFG_PCI64_DET);
633
634 // all these #if 0's are because i don't THINK the kernel needs to have these implemented
635 // if there is a problem relating to one of these, you may need to add functionality in
636 #if 0
637 if (reg & CFG_TBI_EN) ;
638 if (reg & CFG_MODE_1000) ;
639 #endif
640
641 if (reg & CFG_AUTO_1000)
642 panic("CFG_AUTO_1000 not implemented!\n");
643
644 #if 0
645 if (reg & CFG_PINT_DUPSTS || reg & CFG_PINT_LNKSTS || reg & CFG_PINT_SPDSTS) ;
646 if (reg & CFG_TMRTEST) ;
647 if (reg & CFG_MRM_DIS) ;
648 if (reg & CFG_MWI_DIS) ;
649
650 if (reg & CFG_T64ADDR)
651 panic("CFG_T64ADDR is read only register!\n");
652
653 if (reg & CFG_PCI64_DET)
654 panic("CFG_PCI64_DET is read only register!\n");
655
656 if (reg & CFG_DATA64_EN) ;
657 if (reg & CFG_M64ADDR) ;
658 if (reg & CFG_PHY_RST) ;
659 if (reg & CFG_PHY_DIS) ;
660 #endif
661
662 if (reg & CFG_EXTSTS_EN)
663 extstsEnable = true;
664 else
665 extstsEnable = false;
666
667 #if 0
668 if (reg & CFG_REQALG) ;
669 if (reg & CFG_SB) ;
670 if (reg & CFG_POW) ;
671 if (reg & CFG_EXD) ;
672 if (reg & CFG_PESEL) ;
673 if (reg & CFG_BROM_DIS) ;
674 if (reg & CFG_EXT_125) ;
675 if (reg & CFG_BEM) ;
676 #endif
677 break;
678
679 case MEAR:
680 regs.mear = reg;
681 /* since phy is completely faked, MEAR_MD* don't matter
682 and since the driver never uses MEAR_EE*, they don't matter */
683 #if 0
684 if (reg & MEAR_EEDI) ;
685 if (reg & MEAR_EEDO) ; //this one is read only
686 if (reg & MEAR_EECLK) ;
687 if (reg & MEAR_EESEL) ;
688 if (reg & MEAR_MDIO) ;
689 if (reg & MEAR_MDDIR) ;
690 if (reg & MEAR_MDC) ;
691 #endif
692 break;
693
694 case PTSCR:
695 regs.ptscr = reg & ~(PTSCR_RBIST_RDONLY);
696 /* these control BISTs for various parts of chip - we don't care or do
697 just fake that the BIST is done */
698 if (reg & PTSCR_RBIST_EN)
699 regs.ptscr |= PTSCR_RBIST_DONE;
700 if (reg & PTSCR_EEBIST_EN)
701 regs.ptscr &= ~PTSCR_EEBIST_EN;
702 if (reg & PTSCR_EELOAD_EN)
703 regs.ptscr &= ~PTSCR_EELOAD_EN;
704 break;
705
706 case ISR: /* writing to the ISR has no effect */
707 panic("ISR is a read only register!\n");
708
709 case IMR:
710 regs.imr = reg;
711 devIntrChangeMask();
712 break;
713
714 case IER:
715 regs.ier = reg;
716 break;
717
718 case IHR:
719 regs.ihr = reg;
720 /* not going to implement real interrupt holdoff */
721 break;
722
723 case TXDP:
724 regs.txdp = (reg & 0xFFFFFFFC);
725 assert(txState == txIdle);
726 CTDD = false;
727 break;
728
729 case TXDP_HI:
730 regs.txdp_hi = reg;
731 break;
732
733 case TXCFG:
734 regs.txcfg = reg;
735 #if 0
736 if (reg & TXCFG_CSI) ;
737 if (reg & TXCFG_HBI) ;
738 if (reg & TXCFG_MLB) ;
739 if (reg & TXCFG_ATP) ;
740 if (reg & TXCFG_ECRETRY) ; /* this could easily be implemented, but
741 considering the network is just a fake
742 pipe, wouldn't make sense to do this */
743
744 if (reg & TXCFG_BRST_DIS) ;
745 #endif
746
747
748 /* we handle our own DMA, ignore the kernel's exhortations */
749 //if (reg & TXCFG_MXDMA) ;
750
751 //also, we currently don't care about fill/drain thresholds
752 //though this may change in the future with more realistic
753 //networks or a driver which changes it according to feedback
754
755 break;
756
757 case GPIOR:
758 regs.gpior = reg;
759 /* these just control general purpose i/o pins, don't matter */
760 break;
761
762 case RXDP:
763 regs.rxdp = reg;
764 break;
765
766 case RXDP_HI:
767 regs.rxdp_hi = reg;
768 break;
769
770 case RXCFG:
771 regs.rxcfg = reg;
772 #if 0
773 if (reg & RXCFG_AEP) ;
774 if (reg & RXCFG_ARP) ;
775 if (reg & RXCFG_STRIPCRC) ;
776 if (reg & RXCFG_RX_RD) ;
777 if (reg & RXCFG_ALP) ;
778 if (reg & RXCFG_AIRL) ;
779 #endif
780
781 /* we handle our own DMA, ignore what kernel says about it */
782 //if (reg & RXCFG_MXDMA) ;
783
784 #if 0
785 //also, we currently don't care about fill/drain thresholds
786 //though this may change in the future with more realistic
787 //networks or a driver which changes it according to feedback
788 if (reg & (RXCFG_DRTH | RXCFG_DRTH0)) ;
789 #endif
790 break;
791
792 case PQCR:
793 /* there is no priority queueing used in the linux 2.6 driver */
794 regs.pqcr = reg;
795 break;
796
797 case WCSR:
798 /* not going to implement wake on LAN */
799 regs.wcsr = reg;
800 break;
801
802 case PCR:
803 /* not going to implement pause control */
804 regs.pcr = reg;
805 break;
806
807 case RFCR:
808 regs.rfcr = reg;
809
810 rxFilterEnable = (reg & RFCR_RFEN) ? true : false;
811 acceptBroadcast = (reg & RFCR_AAB) ? true : false;
812 acceptMulticast = (reg & RFCR_AAM) ? true : false;
813 acceptUnicast = (reg & RFCR_AAU) ? true : false;
814 acceptPerfect = (reg & RFCR_APM) ? true : false;
815 acceptArp = (reg & RFCR_AARP) ? true : false;
816
817 if (reg & RFCR_APAT) ;
818 // panic("RFCR_APAT not implemented!\n");
819
820 if (reg & RFCR_MHEN || reg & RFCR_UHEN)
821 panic("hash filtering not implemented!\n");
822
823 if (reg & RFCR_ULM)
824 panic("RFCR_ULM not implemented!\n");
825
826 break;
827
828 case RFDR:
829 panic("the driver never writes to RFDR, something is wrong!\n");
830
831 case BRAR:
832 panic("the driver never uses BRAR, something is wrong!\n");
833
834 case BRDR:
835 panic("the driver never uses BRDR, something is wrong!\n");
836
837 case SRR:
838 panic("SRR is read only register!\n");
839
840 case MIBC:
841 panic("the driver never uses MIBC, something is wrong!\n");
842
843 case VRCR:
844 regs.vrcr = reg;
845 break;
846
847 case VTCR:
848 regs.vtcr = reg;
849 break;
850
851 case VDR:
852 panic("the driver never uses VDR, something is wrong!\n");
853 break;
854
855 case CCSR:
856 /* not going to implement clockrun stuff */
857 regs.ccsr = reg;
858 break;
859
860 case TBICR:
861 regs.tbicr = reg;
862 if (reg & TBICR_MR_LOOPBACK)
863 panic("TBICR_MR_LOOPBACK never used, something wrong!\n");
864
865 if (reg & TBICR_MR_AN_ENABLE) {
866 regs.tanlpar = regs.tanar;
867 regs.tbisr |= (TBISR_MR_AN_COMPLETE | TBISR_MR_LINK_STATUS);
868 }
869
870 #if 0
871 if (reg & TBICR_MR_RESTART_AN) ;
872 #endif
873
874 break;
875
876 case TBISR:
877 panic("TBISR is read only register!\n");
878
879 case TANAR:
880 regs.tanar = reg;
881 if (reg & TANAR_PS2)
882 panic("this isn't used in driver, something wrong!\n");
883
884 if (reg & TANAR_PS1)
885 panic("this isn't used in driver, something wrong!\n");
886 break;
887
888 case TANLPAR:
889 panic("this should only be written to by the fake phy!\n");
890
891 case TANER:
892 panic("TANER is read only register!\n");
893
894 case TESR:
895 regs.tesr = reg;
896 break;
897
898 default:
899 panic("thought i covered all the register, what is this? addr=%#x",
900 daddr);
901 }
902 } else
903 panic("Invalid Request Size");
904
905 return No_Fault;
906 }
907
908 void
909 NSGigE::devIntrPost(uint32_t interrupts)
910 {
911 bool delay = false;
912
913 if (interrupts & ISR_RESERVE)
914 panic("Cannot set a reserved interrupt");
915
916 if (interrupts & ISR_TXRCMP)
917 regs.isr |= ISR_TXRCMP;
918
919 if (interrupts & ISR_RXRCMP)
920 regs.isr |= ISR_RXRCMP;
921
922 //ISR_DPERR not implemented
923 //ISR_SSERR not implemented
924 //ISR_RMABT not implemented
925 //ISR_RXSOVR not implemented
926 //ISR_HIBINT not implemented
927 //ISR_PHY not implemented
928 //ISR_PME not implemented
929
930 if (interrupts & ISR_SWI)
931 regs.isr |= ISR_SWI;
932
933 //ISR_MIB not implemented
934 //ISR_TXURN not implemented
935
936 if (interrupts & ISR_TXIDLE)
937 regs.isr |= ISR_TXIDLE;
938
939 if (interrupts & ISR_TXERR)
940 regs.isr |= ISR_TXERR;
941
942 if (interrupts & ISR_TXDESC)
943 regs.isr |= ISR_TXDESC;
944
945 if (interrupts & ISR_TXOK) {
946 regs.isr |= ISR_TXOK;
947 delay = true;
948 }
949
950 if (interrupts & ISR_RXORN)
951 regs.isr |= ISR_RXORN;
952
953 if (interrupts & ISR_RXIDLE)
954 regs.isr |= ISR_RXIDLE;
955
956 //ISR_RXEARLY not implemented
957
958 if (interrupts & ISR_RXERR)
959 regs.isr |= ISR_RXERR;
960
961 if (interrupts & ISR_RXDESC)
962 regs.isr |= ISR_RXDESC;
963
964 if (interrupts & ISR_RXOK) {
965 delay = true;
966 regs.isr |= ISR_RXOK;
967 }
968
969 if ((regs.isr & regs.imr)) {
970 Tick when = curTick;
971 if (delay)
972 when += intrDelay;
973 cpuIntrPost(when);
974 }
975
976 DPRINTF(EthernetIntr, "**interrupt written to ISR: intr=%#x isr=%#x imr=%#x\n",
977 interrupts, regs.isr, regs.imr);
978 }
979
980 void
981 NSGigE::devIntrClear(uint32_t interrupts)
982 {
983 if (interrupts & ISR_RESERVE)
984 panic("Cannot clear a reserved interrupt");
985
986 if (interrupts & ISR_TXRCMP)
987 regs.isr &= ~ISR_TXRCMP;
988
989 if (interrupts & ISR_RXRCMP)
990 regs.isr &= ~ISR_RXRCMP;
991
992 //ISR_DPERR not implemented
993 //ISR_SSERR not implemented
994 //ISR_RMABT not implemented
995 //ISR_RXSOVR not implemented
996 //ISR_HIBINT not implemented
997 //ISR_PHY not implemented
998 //ISR_PME not implemented
999
1000 if (interrupts & ISR_SWI)
1001 regs.isr &= ~ISR_SWI;
1002
1003 //ISR_MIB not implemented
1004 //ISR_TXURN not implemented
1005
1006 if (interrupts & ISR_TXIDLE)
1007 regs.isr &= ~ISR_TXIDLE;
1008
1009 if (interrupts & ISR_TXERR)
1010 regs.isr &= ~ISR_TXERR;
1011
1012 if (interrupts & ISR_TXDESC)
1013 regs.isr &= ~ISR_TXDESC;
1014
1015 if (interrupts & ISR_TXOK)
1016 regs.isr &= ~ISR_TXOK;
1017
1018 if (interrupts & ISR_RXORN)
1019 regs.isr &= ~ISR_RXORN;
1020
1021 if (interrupts & ISR_RXIDLE)
1022 regs.isr &= ~ISR_RXIDLE;
1023
1024 //ISR_RXEARLY not implemented
1025
1026 if (interrupts & ISR_RXERR)
1027 regs.isr &= ~ISR_RXERR;
1028
1029 if (interrupts & ISR_RXDESC)
1030 regs.isr &= ~ISR_RXDESC;
1031
1032 if (interrupts & ISR_RXOK)
1033 regs.isr &= ~ISR_RXOK;
1034
1035 if (!(regs.isr & regs.imr))
1036 cpuIntrClear();
1037
1038 DPRINTF(EthernetIntr, "**interrupt cleared from ISR: intr=%x isr=%x imr=%x\n",
1039 interrupts, regs.isr, regs.imr);
1040 }
1041
1042 void
1043 NSGigE::devIntrChangeMask()
1044 {
1045 DPRINTF(EthernetIntr, "interrupt mask changed\n");
1046
1047 if (regs.isr & regs.imr)
1048 cpuIntrPost(curTick);
1049 else
1050 cpuIntrClear();
1051 }
1052
1053 void
1054 NSGigE::cpuIntrPost(Tick when)
1055 {
1056 //If the interrupt you want to post is later than an
1057 //interrupt already scheduled, just let it post in the coming one and
1058 //don't schedule another.
1059 //HOWEVER, must be sure that the scheduled intrTick is in the future
1060 //(this was formerly the source of a bug)
1061 assert((intrTick >= curTick) || (intrTick == 0));
1062 if (when > intrTick && intrTick != 0)
1063 return;
1064
1065 intrTick = when;
1066
1067 if (intrEvent) {
1068 intrEvent->squash();
1069 intrEvent = 0;
1070 }
1071
1072 if (when < curTick) {
1073 cpuInterrupt();
1074 } else {
1075 DPRINTF(EthernetIntr, "going to schedule an interrupt for intrTick=%d\n",
1076 intrTick);
1077 intrEvent = new IntrEvent(this, true);
1078 intrEvent->schedule(intrTick);
1079 }
1080 }
1081
1082 void
1083 NSGigE::cpuInterrupt()
1084 {
1085 // Don't send an interrupt if there's already one
1086 if (cpuPendingIntr) {
1087 DPRINTF(EthernetIntr,
1088 "would send an interrupt now, but there's already pending\n");
1089 intrTick = 0;
1090 return;
1091 }
1092 // Don't send an interrupt if it's supposed to be delayed
1093 if (intrTick > curTick) {
1094 DPRINTF(EthernetIntr, "an interrupt is scheduled for %d, wait til then\n",
1095 intrTick);
1096 return;
1097 }
1098
1099 // Whether or not there's a pending interrupt, we don't care about
1100 // it anymore
1101 intrEvent = 0;
1102 intrTick = 0;
1103
1104 // Send interrupt
1105 cpuPendingIntr = true;
1106 /** @todo rework the intctrl to be tsunami ok */
1107 //intctrl->post(TheISA::INTLEVEL_IRQ1, TheISA::INTINDEX_ETHERNET);
1108 DPRINTF(EthernetIntr, "Posting interrupts to cchip!\n");
1109 tsunami->cchip->postDRIR(configData->config.hdr.pci0.interruptLine);
1110 }
1111
1112 void
1113 NSGigE::cpuIntrClear()
1114 {
1115 if (cpuPendingIntr) {
1116 cpuPendingIntr = false;
1117 /** @todo rework the intctrl to be tsunami ok */
1118 //intctrl->clear(TheISA::INTLEVEL_IRQ1, TheISA::INTINDEX_ETHERNET);
1119 DPRINTF(EthernetIntr, "clearing all interrupts from cchip\n");
1120 tsunami->cchip->clearDRIR(configData->config.hdr.pci0.interruptLine);
1121 }
1122 }
1123
1124 bool
1125 NSGigE::cpuIntrPending() const
1126 { return cpuPendingIntr; }
1127
1128 void
1129 NSGigE::txReset()
1130 {
1131
1132 DPRINTF(Ethernet, "transmit reset\n");
1133
1134 CTDD = false;
1135 txFifoAvail = maxTxFifoSize;
1136 txHalt = false;
1137 txFragPtr = 0;
1138 assert(txDescCnt == 0);
1139 txFifo.clear();
1140 regs.command &= ~CR_TXE;
1141 txState = txIdle;
1142 assert(txDmaState == dmaIdle);
1143 }
1144
1145 void
1146 NSGigE::rxReset()
1147 {
1148 DPRINTF(Ethernet, "receive reset\n");
1149
1150 CRDD = false;
1151 assert(rxPktBytes == 0);
1152 rxFifoCnt = 0;
1153 rxHalt = false;
1154 rxFragPtr = 0;
1155 assert(rxDescCnt == 0);
1156 assert(rxDmaState == dmaIdle);
1157 rxFifo.clear();
1158 regs.command &= ~CR_RXE;
1159 rxState = rxIdle;
1160 }
1161
1162 void NSGigE::regsReset()
1163 {
1164 memset(&regs, 0, sizeof(regs));
1165 regs.config = 0x80000000;
1166 regs.mear = 0x12;
1167 regs.isr = 0x00608000;
1168 regs.txcfg = 0x120;
1169 regs.rxcfg = 0x4;
1170 regs.srr = 0x0103;
1171 regs.mibc = 0x2;
1172 regs.vdr = 0x81;
1173 regs.tesr = 0xc000;
1174
1175 extstsEnable = false;
1176 acceptBroadcast = false;
1177 acceptMulticast = false;
1178 acceptUnicast = false;
1179 acceptPerfect = false;
1180 acceptArp = false;
1181 }
1182
1183 void
1184 NSGigE::rxDmaReadCopy()
1185 {
1186 assert(rxDmaState == dmaReading);
1187
1188 memcpy(rxDmaData, physmem->dma_addr(rxDmaAddr, rxDmaLen), rxDmaLen);
1189 rxDmaState = dmaIdle;
1190
1191 DPRINTF(EthernetDMA, "rx dma read paddr=%#x len=%d\n",
1192 rxDmaAddr, rxDmaLen);
1193 DDUMP(EthernetDMA, rxDmaData, rxDmaLen);
1194 }
1195
1196 bool
1197 NSGigE::doRxDmaRead()
1198 {
1199 assert(rxDmaState == dmaIdle || rxDmaState == dmaReadWaiting);
1200 rxDmaState = dmaReading;
1201
1202 if (dmaInterface && !rxDmaFree) {
1203 if (dmaInterface->busy())
1204 rxDmaState = dmaReadWaiting;
1205 else
1206 dmaInterface->doDMA(Read, rxDmaAddr, rxDmaLen, curTick,
1207 &rxDmaReadEvent, true);
1208 return true;
1209 }
1210
1211 if (dmaReadDelay == 0 && dmaReadFactor == 0) {
1212 rxDmaReadCopy();
1213 return false;
1214 }
1215
1216 Tick factor = ((rxDmaLen + ULL(63)) >> ULL(6)) * dmaReadFactor;
1217 Tick start = curTick + dmaReadDelay + factor;
1218 rxDmaReadEvent.schedule(start);
1219 return true;
1220 }
1221
1222 void
1223 NSGigE::rxDmaReadDone()
1224 {
1225 assert(rxDmaState == dmaReading);
1226 rxDmaReadCopy();
1227
1228 // If the transmit state machine has a pending DMA, let it go first
1229 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting)
1230 txKick();
1231
1232 rxKick();
1233 }
1234
1235 void
1236 NSGigE::rxDmaWriteCopy()
1237 {
1238 assert(rxDmaState == dmaWriting);
1239
1240 memcpy(physmem->dma_addr(rxDmaAddr, rxDmaLen), rxDmaData, rxDmaLen);
1241 rxDmaState = dmaIdle;
1242
1243 DPRINTF(EthernetDMA, "rx dma write paddr=%#x len=%d\n",
1244 rxDmaAddr, rxDmaLen);
1245 DDUMP(EthernetDMA, rxDmaData, rxDmaLen);
1246 }
1247
1248 bool
1249 NSGigE::doRxDmaWrite()
1250 {
1251 assert(rxDmaState == dmaIdle || rxDmaState == dmaWriteWaiting);
1252 rxDmaState = dmaWriting;
1253
1254 if (dmaInterface && !rxDmaFree) {
1255 if (dmaInterface->busy())
1256 rxDmaState = dmaWriteWaiting;
1257 else
1258 dmaInterface->doDMA(WriteInvalidate, rxDmaAddr, rxDmaLen, curTick,
1259 &rxDmaWriteEvent, true);
1260 return true;
1261 }
1262
1263 if (dmaWriteDelay == 0 && dmaWriteFactor == 0) {
1264 rxDmaWriteCopy();
1265 return false;
1266 }
1267
1268 Tick factor = ((rxDmaLen + ULL(63)) >> ULL(6)) * dmaWriteFactor;
1269 Tick start = curTick + dmaWriteDelay + factor;
1270 rxDmaWriteEvent.schedule(start);
1271 return true;
1272 }
1273
1274 void
1275 NSGigE::rxDmaWriteDone()
1276 {
1277 assert(rxDmaState == dmaWriting);
1278 rxDmaWriteCopy();
1279
1280 // If the transmit state machine has a pending DMA, let it go first
1281 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting)
1282 txKick();
1283
1284 rxKick();
1285 }
1286
1287 void
1288 NSGigE::rxKick()
1289 {
1290 DPRINTF(EthernetSM, "receive kick state=%s (rxBuf.size=%d)\n",
1291 NsRxStateStrings[rxState], rxFifo.size());
1292
1293 if (rxKickTick > curTick) {
1294 DPRINTF(EthernetSM, "receive kick exiting, can't run till %d\n",
1295 rxKickTick);
1296 return;
1297 }
1298
1299 next:
1300 switch(rxDmaState) {
1301 case dmaReadWaiting:
1302 if (doRxDmaRead())
1303 goto exit;
1304 break;
1305 case dmaWriteWaiting:
1306 if (doRxDmaWrite())
1307 goto exit;
1308 break;
1309 default:
1310 break;
1311 }
1312
1313 // see state machine from spec for details
1314 // the way this works is, if you finish work on one state and can go directly to
1315 // another, you do that through jumping to the label "next". however, if you have
1316 // intermediate work, like DMA so that you can't go to the next state yet, you go to
1317 // exit and exit the loop. however, when the DMA is done it will trigger an
1318 // event and come back to this loop.
1319 switch (rxState) {
1320 case rxIdle:
1321 if (!regs.command & CR_RXE) {
1322 DPRINTF(EthernetSM, "Receive Disabled! Nothing to do.\n");
1323 goto exit;
1324 }
1325
1326 if (CRDD) {
1327 rxState = rxDescRefr;
1328
1329 rxDmaAddr = regs.rxdp & 0x3fffffff;
1330 rxDmaData = &rxDescCache + offsetof(ns_desc, link);
1331 rxDmaLen = sizeof(rxDescCache.link);
1332 rxDmaFree = dmaDescFree;
1333
1334 descDmaReads++;
1335 descDmaRdBytes += rxDmaLen;
1336
1337 if (doRxDmaRead())
1338 goto exit;
1339 } else {
1340 rxState = rxDescRead;
1341
1342 rxDmaAddr = regs.rxdp & 0x3fffffff;
1343 rxDmaData = &rxDescCache;
1344 rxDmaLen = sizeof(ns_desc);
1345 rxDmaFree = dmaDescFree;
1346
1347 descDmaReads++;
1348 descDmaRdBytes += rxDmaLen;
1349
1350 if (doRxDmaRead())
1351 goto exit;
1352 }
1353 break;
1354
1355 case rxDescRefr:
1356 if (rxDmaState != dmaIdle)
1357 goto exit;
1358
1359 rxState = rxAdvance;
1360 break;
1361
1362 case rxDescRead:
1363 if (rxDmaState != dmaIdle)
1364 goto exit;
1365
1366 DPRINTF(EthernetDesc,
1367 "rxDescCache:\n\tlink=%08x\n\tbufptr=%08x\n\tcmdsts=%08x\n\textsts=%08x\n"
1368 ,rxDescCache.link, rxDescCache.bufptr, rxDescCache.cmdsts,
1369 rxDescCache.extsts);
1370
1371 if (rxDescCache.cmdsts & CMDSTS_OWN) {
1372 rxState = rxIdle;
1373 } else {
1374 rxState = rxFifoBlock;
1375 rxFragPtr = rxDescCache.bufptr;
1376 rxDescCnt = rxDescCache.cmdsts & CMDSTS_LEN_MASK;
1377 }
1378 break;
1379
1380 case rxFifoBlock:
1381 if (!rxPacket) {
1382 /**
1383 * @todo in reality, we should be able to start processing
1384 * the packet as it arrives, and not have to wait for the
1385 * full packet ot be in the receive fifo.
1386 */
1387 if (rxFifo.empty())
1388 goto exit;
1389
1390 DPRINTF(EthernetSM, "\n\n*****processing receive of new packet\n");
1391
1392 // If we don't have a packet, grab a new one from the fifo.
1393 rxPacket = rxFifo.front();
1394 rxPktBytes = rxPacket->length;
1395 rxPacketBufPtr = rxPacket->data;
1396
1397 #if TRACING_ON
1398 if (DTRACE(Ethernet)) {
1399 if (rxPacket->isIpPkt()) {
1400 ip_header *ip = rxPacket->getIpHdr();
1401 DPRINTF(Ethernet, "ID is %d\n", reverseEnd16(ip->ID));
1402 if (rxPacket->isTcpPkt()) {
1403 tcp_header *tcp = rxPacket->getTcpHdr(ip);
1404 DPRINTF(Ethernet, "Src Port = %d, Dest Port = %d\n",
1405 reverseEnd16(tcp->src_port_num),
1406 reverseEnd16(tcp->dest_port_num));
1407 }
1408 }
1409 }
1410 #endif
1411
1412 // sanity check - i think the driver behaves like this
1413 assert(rxDescCnt >= rxPktBytes);
1414
1415 // Must clear the value before popping to decrement the
1416 // reference count
1417 rxFifo.front() = NULL;
1418 rxFifo.pop_front();
1419 rxFifoCnt -= rxPacket->length;
1420 }
1421
1422
1423 // dont' need the && rxDescCnt > 0 if driver sanity check above holds
1424 if (rxPktBytes > 0) {
1425 rxState = rxFragWrite;
1426 // don't need min<>(rxPktBytes,rxDescCnt) if above sanity check holds
1427 rxXferLen = rxPktBytes;
1428
1429 rxDmaAddr = rxFragPtr & 0x3fffffff;
1430 rxDmaData = rxPacketBufPtr;
1431 rxDmaLen = rxXferLen;
1432 rxDmaFree = dmaDataFree;
1433
1434 if (doRxDmaWrite())
1435 goto exit;
1436
1437 } else {
1438 rxState = rxDescWrite;
1439
1440 //if (rxPktBytes == 0) { /* packet is done */
1441 assert(rxPktBytes == 0);
1442 DPRINTF(EthernetSM, "done with receiving packet\n");
1443
1444 rxDescCache.cmdsts |= CMDSTS_OWN;
1445 rxDescCache.cmdsts &= ~CMDSTS_MORE;
1446 rxDescCache.cmdsts |= CMDSTS_OK;
1447 rxDescCache.cmdsts &= 0xffff0000;
1448 rxDescCache.cmdsts += rxPacket->length; //i.e. set CMDSTS_SIZE
1449
1450 #if 0
1451 /* all the driver uses these are for its own stats keeping
1452 which we don't care about, aren't necessary for functionality
1453 and doing this would just slow us down. if they end up using
1454 this in a later version for functional purposes, just undef
1455 */
1456 if (rxFilterEnable) {
1457 rxDescCache.cmdsts &= ~CMDSTS_DEST_MASK;
1458 if (rxFifo.front()->IsUnicast())
1459 rxDescCache.cmdsts |= CMDSTS_DEST_SELF;
1460 if (rxFifo.front()->IsMulticast())
1461 rxDescCache.cmdsts |= CMDSTS_DEST_MULTI;
1462 if (rxFifo.front()->IsBroadcast())
1463 rxDescCache.cmdsts |= CMDSTS_DEST_MASK;
1464 }
1465 #endif
1466
1467 if (rxPacket->isIpPkt() && extstsEnable) {
1468 rxDescCache.extsts |= EXTSTS_IPPKT;
1469 rxIPChecksums++;
1470 if (!ipChecksum(rxPacket, false)) {
1471 DPRINTF(EthernetCksum, "Rx IP Checksum Error\n");
1472 rxDescCache.extsts |= EXTSTS_IPERR;
1473 }
1474 if (rxPacket->isTcpPkt()) {
1475 rxDescCache.extsts |= EXTSTS_TCPPKT;
1476 rxTCPChecksums++;
1477 if (!tcpChecksum(rxPacket, false)) {
1478 DPRINTF(EthernetCksum, "Rx TCP Checksum Error\n");
1479 rxDescCache.extsts |= EXTSTS_TCPERR;
1480
1481 }
1482 } else if (rxPacket->isUdpPkt()) {
1483 rxDescCache.extsts |= EXTSTS_UDPPKT;
1484 if (!udpChecksum(rxPacket, false)) {
1485 DPRINTF(EthernetCksum, "Rx UDP Checksum Error\n");
1486 rxDescCache.extsts |= EXTSTS_UDPERR;
1487 }
1488 }
1489 }
1490 rxPacket = 0;
1491
1492 /* the driver seems to always receive into desc buffers
1493 of size 1514, so you never have a pkt that is split
1494 into multiple descriptors on the receive side, so
1495 i don't implement that case, hence the assert above.
1496 */
1497
1498 DPRINTF(EthernetDesc, "rxDesc writeback:\n\tcmdsts=%08x\n\textsts=%08x\n",
1499 rxDescCache.cmdsts, rxDescCache.extsts);
1500
1501 rxDmaAddr = (regs.rxdp + offsetof(ns_desc, cmdsts)) & 0x3fffffff;
1502 rxDmaData = &(rxDescCache.cmdsts);
1503 rxDmaLen = sizeof(rxDescCache.cmdsts) + sizeof(rxDescCache.extsts);
1504 rxDmaFree = dmaDescFree;
1505
1506 descDmaWrites++;
1507 descDmaWrBytes += rxDmaLen;
1508
1509 if (doRxDmaWrite())
1510 goto exit;
1511 }
1512 break;
1513
1514 case rxFragWrite:
1515 if (rxDmaState != dmaIdle)
1516 goto exit;
1517
1518 rxPacketBufPtr += rxXferLen;
1519 rxFragPtr += rxXferLen;
1520 rxPktBytes -= rxXferLen;
1521
1522 rxState = rxFifoBlock;
1523 break;
1524
1525 case rxDescWrite:
1526 if (rxDmaState != dmaIdle)
1527 goto exit;
1528
1529 assert(rxDescCache.cmdsts & CMDSTS_OWN);
1530
1531 assert(rxPacket == 0);
1532 devIntrPost(ISR_RXOK);
1533
1534 if (rxDescCache.cmdsts & CMDSTS_INTR)
1535 devIntrPost(ISR_RXDESC);
1536
1537 if (rxHalt) {
1538 DPRINTF(EthernetSM, "Halting the RX state machine\n");
1539 rxState = rxIdle;
1540 rxHalt = false;
1541 } else
1542 rxState = rxAdvance;
1543 break;
1544
1545 case rxAdvance:
1546 if (rxDescCache.link == 0) {
1547 rxState = rxIdle;
1548 return;
1549 } else {
1550 rxState = rxDescRead;
1551 regs.rxdp = rxDescCache.link;
1552 CRDD = false;
1553
1554 rxDmaAddr = regs.rxdp & 0x3fffffff;
1555 rxDmaData = &rxDescCache;
1556 rxDmaLen = sizeof(ns_desc);
1557 rxDmaFree = dmaDescFree;
1558
1559 if (doRxDmaRead())
1560 goto exit;
1561 }
1562 break;
1563
1564 default:
1565 panic("Invalid rxState!");
1566 }
1567
1568
1569 DPRINTF(EthernetSM, "entering next rx state = %s\n",
1570 NsRxStateStrings[rxState]);
1571
1572 if (rxState == rxIdle) {
1573 regs.command &= ~CR_RXE;
1574 devIntrPost(ISR_RXIDLE);
1575 return;
1576 }
1577
1578 goto next;
1579
1580 exit:
1581 /**
1582 * @todo do we want to schedule a future kick?
1583 */
1584 DPRINTF(EthernetSM, "rx state machine exited state=%s\n",
1585 NsRxStateStrings[rxState]);
1586 }
1587
1588 void
1589 NSGigE::transmit()
1590 {
1591 if (txFifo.empty()) {
1592 DPRINTF(Ethernet, "nothing to transmit\n");
1593 return;
1594 }
1595
1596 DPRINTF(Ethernet, "\n\nAttempt Pkt Transmit: txFifo length = %d\n",
1597 maxTxFifoSize - txFifoAvail);
1598 if (interface->sendPacket(txFifo.front())) {
1599 #if TRACING_ON
1600 if (DTRACE(Ethernet)) {
1601 if (txFifo.front()->isIpPkt()) {
1602 ip_header *ip = txFifo.front()->getIpHdr();
1603 DPRINTF(Ethernet, "ID is %d\n", reverseEnd16(ip->ID));
1604 if (txFifo.front()->isTcpPkt()) {
1605 tcp_header *tcp = txFifo.front()->getTcpHdr(ip);
1606 DPRINTF(Ethernet, "Src Port = %d, Dest Port = %d\n",
1607 reverseEnd16(tcp->src_port_num),
1608 reverseEnd16(tcp->dest_port_num));
1609 }
1610 }
1611 }
1612 #endif
1613
1614 DDUMP(Ethernet, txFifo.front()->data, txFifo.front()->length);
1615 txBytes += txFifo.front()->length;
1616 txPackets++;
1617
1618 txFifoAvail += txFifo.front()->length;
1619
1620 DPRINTF(Ethernet, "Successful Xmit! now txFifoAvail is %d\n", txFifoAvail);
1621 txFifo.front() = NULL;
1622 txFifo.pop_front();
1623
1624 /* normally do a writeback of the descriptor here, and ONLY after that is
1625 done, send this interrupt. but since our stuff never actually fails,
1626 just do this interrupt here, otherwise the code has to stray from this
1627 nice format. besides, it's functionally the same.
1628 */
1629 devIntrPost(ISR_TXOK);
1630 } else
1631 DPRINTF(Ethernet, "May need to rethink always sending the descriptors back?\n");
1632
1633 if (!txFifo.empty() && !txEvent.scheduled()) {
1634 DPRINTF(Ethernet, "reschedule transmit\n");
1635 txEvent.schedule(curTick + 1000);
1636 }
1637 }
1638
1639 void
1640 NSGigE::txDmaReadCopy()
1641 {
1642 assert(txDmaState == dmaReading);
1643
1644 memcpy(txDmaData, physmem->dma_addr(txDmaAddr, txDmaLen), txDmaLen);
1645 txDmaState = dmaIdle;
1646
1647 DPRINTF(EthernetDMA, "tx dma read paddr=%#x len=%d\n",
1648 txDmaAddr, txDmaLen);
1649 DDUMP(EthernetDMA, txDmaData, txDmaLen);
1650 }
1651
1652 bool
1653 NSGigE::doTxDmaRead()
1654 {
1655 assert(txDmaState == dmaIdle || txDmaState == dmaReadWaiting);
1656 txDmaState = dmaReading;
1657
1658 if (dmaInterface && !txDmaFree) {
1659 if (dmaInterface->busy())
1660 txDmaState = dmaReadWaiting;
1661 else
1662 dmaInterface->doDMA(Read, txDmaAddr, txDmaLen, curTick,
1663 &txDmaReadEvent, true);
1664 return true;
1665 }
1666
1667 if (dmaReadDelay == 0 && dmaReadFactor == 0.0) {
1668 txDmaReadCopy();
1669 return false;
1670 }
1671
1672 Tick factor = ((txDmaLen + ULL(63)) >> ULL(6)) * dmaReadFactor;
1673 Tick start = curTick + dmaReadDelay + factor;
1674 txDmaReadEvent.schedule(start);
1675 return true;
1676 }
1677
1678 void
1679 NSGigE::txDmaReadDone()
1680 {
1681 assert(txDmaState == dmaReading);
1682 txDmaReadCopy();
1683
1684 // If the receive state machine has a pending DMA, let it go first
1685 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting)
1686 rxKick();
1687
1688 txKick();
1689 }
1690
1691 void
1692 NSGigE::txDmaWriteCopy()
1693 {
1694 assert(txDmaState == dmaWriting);
1695
1696 memcpy(physmem->dma_addr(txDmaAddr, txDmaLen), txDmaData, txDmaLen);
1697 txDmaState = dmaIdle;
1698
1699 DPRINTF(EthernetDMA, "tx dma write paddr=%#x len=%d\n",
1700 txDmaAddr, txDmaLen);
1701 DDUMP(EthernetDMA, txDmaData, txDmaLen);
1702 }
1703
1704 bool
1705 NSGigE::doTxDmaWrite()
1706 {
1707 assert(txDmaState == dmaIdle || txDmaState == dmaWriteWaiting);
1708 txDmaState = dmaWriting;
1709
1710 if (dmaInterface && !txDmaFree) {
1711 if (dmaInterface->busy())
1712 txDmaState = dmaWriteWaiting;
1713 else
1714 dmaInterface->doDMA(WriteInvalidate, txDmaAddr, txDmaLen, curTick,
1715 &txDmaWriteEvent, true);
1716 return true;
1717 }
1718
1719 if (dmaWriteDelay == 0 && dmaWriteFactor == 0.0) {
1720 txDmaWriteCopy();
1721 return false;
1722 }
1723
1724 Tick factor = ((txDmaLen + ULL(63)) >> ULL(6)) * dmaWriteFactor;
1725 Tick start = curTick + dmaWriteDelay + factor;
1726 txDmaWriteEvent.schedule(start);
1727 return true;
1728 }
1729
1730 void
1731 NSGigE::txDmaWriteDone()
1732 {
1733 assert(txDmaState == dmaWriting);
1734 txDmaWriteCopy();
1735
1736 // If the receive state machine has a pending DMA, let it go first
1737 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting)
1738 rxKick();
1739
1740 txKick();
1741 }
1742
1743 void
1744 NSGigE::txKick()
1745 {
1746 DPRINTF(EthernetSM, "transmit kick state=%s\n", NsTxStateStrings[txState]);
1747
1748 if (txKickTick > curTick) {
1749 DPRINTF(EthernetSM, "transmit kick exiting, can't run till %d\n",
1750 txKickTick);
1751
1752 return;
1753 }
1754
1755 next:
1756 switch(txDmaState) {
1757 case dmaReadWaiting:
1758 if (doTxDmaRead())
1759 goto exit;
1760 break;
1761 case dmaWriteWaiting:
1762 if (doTxDmaWrite())
1763 goto exit;
1764 break;
1765 default:
1766 break;
1767 }
1768
1769 switch (txState) {
1770 case txIdle:
1771 if (!regs.command & CR_TXE) {
1772 DPRINTF(EthernetSM, "Transmit disabled. Nothing to do.\n");
1773 goto exit;
1774 }
1775
1776 if (CTDD) {
1777 txState = txDescRefr;
1778
1779 txDmaAddr = regs.txdp & 0x3fffffff;
1780 txDmaData = &txDescCache + offsetof(ns_desc, link);
1781 txDmaLen = sizeof(txDescCache.link);
1782 txDmaFree = dmaDescFree;
1783
1784 descDmaReads++;
1785 descDmaRdBytes += txDmaLen;
1786
1787 if (doTxDmaRead())
1788 goto exit;
1789
1790 } else {
1791 txState = txDescRead;
1792
1793 txDmaAddr = regs.txdp & 0x3fffffff;
1794 txDmaData = &txDescCache;
1795 txDmaLen = sizeof(ns_desc);
1796 txDmaFree = dmaDescFree;
1797
1798 descDmaReads++;
1799 descDmaRdBytes += txDmaLen;
1800
1801 if (doTxDmaRead())
1802 goto exit;
1803 }
1804 break;
1805
1806 case txDescRefr:
1807 if (txDmaState != dmaIdle)
1808 goto exit;
1809
1810 txState = txAdvance;
1811 break;
1812
1813 case txDescRead:
1814 if (txDmaState != dmaIdle)
1815 goto exit;
1816
1817 DPRINTF(EthernetDesc,
1818 "txDescCache data:\n\tlink=%08x\n\tbufptr=%08x\n\tcmdsts=%08x\n\textsts=%08x\n"
1819 ,txDescCache.link, txDescCache.bufptr, txDescCache.cmdsts,
1820 txDescCache.extsts);
1821
1822 if (txDescCache.cmdsts & CMDSTS_OWN) {
1823 txState = txFifoBlock;
1824 txFragPtr = txDescCache.bufptr;
1825 txDescCnt = txDescCache.cmdsts & CMDSTS_LEN_MASK;
1826 } else {
1827 txState = txIdle;
1828 }
1829 break;
1830
1831 case txFifoBlock:
1832 if (!txPacket) {
1833 DPRINTF(EthernetSM, "\n\n*****starting the tx of a new packet\n");
1834 txPacket = new EtherPacket;
1835 txPacket->data = new uint8_t[16384];
1836 txPacketBufPtr = txPacket->data;
1837 }
1838
1839 if (txDescCnt == 0) {
1840 DPRINTF(EthernetSM, "the txDescCnt == 0, done with descriptor\n");
1841 if (txDescCache.cmdsts & CMDSTS_MORE) {
1842 DPRINTF(EthernetSM, "there are more descriptors to come\n");
1843 txState = txDescWrite;
1844
1845 txDescCache.cmdsts &= ~CMDSTS_OWN;
1846
1847 txDmaAddr = (regs.txdp + offsetof(ns_desc, cmdsts)) & 0x3fffffff;
1848 txDmaData = &(txDescCache.cmdsts);
1849 txDmaLen = sizeof(txDescCache.cmdsts);
1850 txDmaFree = dmaDescFree;
1851
1852 if (doTxDmaWrite())
1853 goto exit;
1854
1855 } else { /* this packet is totally done */
1856 DPRINTF(EthernetSM, "This packet is done, let's wrap it up\n");
1857 /* deal with the the packet that just finished */
1858 if ((regs.vtcr & VTCR_PPCHK) && extstsEnable) {
1859 if (txDescCache.extsts & EXTSTS_UDPPKT) {
1860 udpChecksum(txPacket, true);
1861 } else if (txDescCache.extsts & EXTSTS_TCPPKT) {
1862 tcpChecksum(txPacket, true);
1863 txTCPChecksums++;
1864 }
1865 if (txDescCache.extsts & EXTSTS_IPPKT) {
1866 ipChecksum(txPacket, true);
1867 txIPChecksums++;
1868 }
1869 }
1870
1871 txPacket->length = txPacketBufPtr - txPacket->data;
1872 /* this is just because the receive can't handle a packet bigger
1873 want to make sure */
1874 assert(txPacket->length <= 1514);
1875 txFifo.push_back(txPacket);
1876
1877 /* this following section is not to spec, but functionally shouldn't
1878 be any different. normally, the chip will wait til the transmit has
1879 occurred before writing back the descriptor because it has to wait
1880 to see that it was successfully transmitted to decide whether to set
1881 CMDSTS_OK or not. however, in the simulator since it is always
1882 successfully transmitted, and writing it exactly to spec would
1883 complicate the code, we just do it here
1884 */
1885
1886 txDescCache.cmdsts &= ~CMDSTS_OWN;
1887 txDescCache.cmdsts |= CMDSTS_OK;
1888
1889 DPRINTF(EthernetDesc,
1890 "txDesc writeback:\n\tcmdsts=%08x\n\textsts=%08x\n",
1891 txDescCache.cmdsts, txDescCache.extsts);
1892
1893 txDmaAddr = (regs.txdp + offsetof(ns_desc, cmdsts)) & 0x3fffffff;
1894 txDmaData = &(txDescCache.cmdsts);
1895 txDmaLen = sizeof(txDescCache.cmdsts) + sizeof(txDescCache.extsts);
1896 txDmaFree = dmaDescFree;
1897
1898 descDmaWrites++;
1899 descDmaWrBytes += txDmaLen;
1900
1901 transmit();
1902 txPacket = 0;
1903
1904 if (txHalt) {
1905 DPRINTF(EthernetSM, "halting TX state machine\n");
1906 txState = txIdle;
1907 txHalt = false;
1908 } else
1909 txState = txAdvance;
1910
1911 if (doTxDmaWrite())
1912 goto exit;
1913 }
1914 } else {
1915 DPRINTF(EthernetSM, "this descriptor isn't done yet\n");
1916 if (txFifoAvail) {
1917 txState = txFragRead;
1918
1919 /* The number of bytes transferred is either whatever is left
1920 in the descriptor (txDescCnt), or if there is not enough
1921 room in the fifo, just whatever room is left in the fifo
1922 */
1923 txXferLen = min<uint32_t>(txDescCnt, txFifoAvail);
1924
1925 txDmaAddr = txFragPtr & 0x3fffffff;
1926 txDmaData = txPacketBufPtr;
1927 txDmaLen = txXferLen;
1928 txDmaFree = dmaDataFree;
1929
1930 if (doTxDmaRead())
1931 goto exit;
1932 } else {
1933 txState = txFifoBlock;
1934 transmit();
1935
1936 goto exit;
1937 }
1938
1939 }
1940 break;
1941
1942 case txFragRead:
1943 if (txDmaState != dmaIdle)
1944 goto exit;
1945
1946 txPacketBufPtr += txXferLen;
1947 txFragPtr += txXferLen;
1948 txDescCnt -= txXferLen;
1949 txFifoAvail -= txXferLen;
1950
1951 txState = txFifoBlock;
1952 break;
1953
1954 case txDescWrite:
1955 if (txDmaState != dmaIdle)
1956 goto exit;
1957
1958 if (txDescCache.cmdsts & CMDSTS_INTR) {
1959 devIntrPost(ISR_TXDESC);
1960 }
1961
1962 txState = txAdvance;
1963 break;
1964
1965 case txAdvance:
1966 if (txDescCache.link == 0) {
1967 txState = txIdle;
1968 } else {
1969 txState = txDescRead;
1970 regs.txdp = txDescCache.link;
1971 CTDD = false;
1972
1973 txDmaAddr = txDescCache.link & 0x3fffffff;
1974 txDmaData = &txDescCache;
1975 txDmaLen = sizeof(ns_desc);
1976 txDmaFree = dmaDescFree;
1977
1978 if (doTxDmaRead())
1979 goto exit;
1980 }
1981 break;
1982
1983 default:
1984 panic("invalid state");
1985 }
1986
1987 DPRINTF(EthernetSM, "entering next tx state=%s\n",
1988 NsTxStateStrings[txState]);
1989
1990 if (txState == txIdle) {
1991 regs.command &= ~CR_TXE;
1992 devIntrPost(ISR_TXIDLE);
1993 return;
1994 }
1995
1996 goto next;
1997
1998 exit:
1999 /**
2000 * @todo do we want to schedule a future kick?
2001 */
2002 DPRINTF(EthernetSM, "tx state machine exited state=%s\n",
2003 NsTxStateStrings[txState]);
2004 }
2005
2006 void
2007 NSGigE::transferDone()
2008 {
2009 if (txFifo.empty())
2010 return;
2011
2012 if (txEvent.scheduled())
2013 txEvent.reschedule(curTick + 1);
2014 else
2015 txEvent.schedule(curTick + 1);
2016 }
2017
2018 bool
2019 NSGigE::rxFilter(PacketPtr packet)
2020 {
2021 bool drop = true;
2022 string type;
2023
2024 if (packet->IsUnicast()) {
2025 type = "unicast";
2026
2027 // If we're accepting all unicast addresses
2028 if (acceptUnicast)
2029 drop = false;
2030
2031 // If we make a perfect match
2032 if ((acceptPerfect)
2033 && (memcmp(rom.perfectMatch, packet->data, sizeof(rom.perfectMatch)) == 0))
2034 drop = false;
2035
2036 eth_header *eth = (eth_header *) packet->data;
2037 if ((acceptArp) && (eth->type == 0x608))
2038 drop = false;
2039
2040 } else if (packet->IsBroadcast()) {
2041 type = "broadcast";
2042
2043 // if we're accepting broadcasts
2044 if (acceptBroadcast)
2045 drop = false;
2046
2047 } else if (packet->IsMulticast()) {
2048 type = "multicast";
2049
2050 // if we're accepting all multicasts
2051 if (acceptMulticast)
2052 drop = false;
2053
2054 } else {
2055 type = "unknown";
2056
2057 // oh well, punt on this one
2058 }
2059
2060 if (drop) {
2061 DPRINTF(Ethernet, "rxFilter drop\n");
2062 DDUMP(EthernetData, packet->data, packet->length);
2063 }
2064
2065 return drop;
2066 }
2067
2068 bool
2069 NSGigE::recvPacket(PacketPtr packet)
2070 {
2071 rxBytes += packet->length;
2072 rxPackets++;
2073
2074 DPRINTF(Ethernet, "\n\nReceiving packet from wire, rxFifoAvail = %d\n", maxRxFifoSize - rxFifoCnt);
2075
2076 if (rxState == rxIdle) {
2077 DPRINTF(Ethernet, "receive disabled...packet dropped\n");
2078 interface->recvDone();
2079 return true;
2080 }
2081
2082 if (rxFilterEnable && rxFilter(packet)) {
2083 DPRINTF(Ethernet, "packet filtered...dropped\n");
2084 interface->recvDone();
2085 return true;
2086 }
2087
2088 if ((rxFifoCnt + packet->length) >= maxRxFifoSize) {
2089 DPRINTF(Ethernet,
2090 "packet will not fit in receive buffer...packet dropped\n");
2091 devIntrPost(ISR_RXORN);
2092 return false;
2093 }
2094
2095 rxFifo.push_back(packet);
2096 rxFifoCnt += packet->length;
2097 interface->recvDone();
2098
2099 rxKick();
2100 return true;
2101 }
2102
2103 /**
2104 * does a udp checksum. if gen is true, then it generates it and puts it in the right place
2105 * else, it just checks what it calculates against the value in the header in packet
2106 */
2107 bool
2108 NSGigE::udpChecksum(PacketPtr packet, bool gen)
2109 {
2110 ip_header *ip = packet->getIpHdr();
2111 udp_header *hdr = packet->getUdpHdr(ip);
2112
2113 pseudo_header *pseudo = new pseudo_header;
2114
2115 pseudo->src_ip_addr = ip->src_ip_addr;
2116 pseudo->dest_ip_addr = ip->dest_ip_addr;
2117 pseudo->protocol = ip->protocol;
2118 pseudo->len = hdr->len;
2119
2120 uint16_t cksum = checksumCalc((uint16_t *) pseudo, (uint16_t *) hdr,
2121 (uint32_t) hdr->len);
2122
2123 delete pseudo;
2124 if (gen)
2125 hdr->chksum = cksum;
2126 else
2127 if (cksum != 0)
2128 return false;
2129
2130 return true;
2131 }
2132
2133 bool
2134 NSGigE::tcpChecksum(PacketPtr packet, bool gen)
2135 {
2136 ip_header *ip = packet->getIpHdr();
2137 tcp_header *hdr = packet->getTcpHdr(ip);
2138
2139 uint16_t cksum;
2140 pseudo_header *pseudo = new pseudo_header;
2141 if (!gen) {
2142 pseudo->src_ip_addr = ip->src_ip_addr;
2143 pseudo->dest_ip_addr = ip->dest_ip_addr;
2144 pseudo->protocol = reverseEnd16(ip->protocol);
2145 pseudo->len = reverseEnd16(reverseEnd16(ip->dgram_len) - (ip->vers_len & 0xf)*4);
2146
2147 cksum = checksumCalc((uint16_t *) pseudo, (uint16_t *) hdr,
2148 (uint32_t) reverseEnd16(pseudo->len));
2149 } else {
2150 pseudo->src_ip_addr = 0;
2151 pseudo->dest_ip_addr = 0;
2152 pseudo->protocol = hdr->chksum;
2153 pseudo->len = 0;
2154 hdr->chksum = 0;
2155 cksum = checksumCalc((uint16_t *) pseudo, (uint16_t *) hdr,
2156 (uint32_t) (reverseEnd16(ip->dgram_len) - (ip->vers_len & 0xf)*4));
2157 }
2158
2159 delete pseudo;
2160 if (gen)
2161 hdr->chksum = cksum;
2162 else
2163 if (cksum != 0)
2164 return false;
2165
2166 return true;
2167 }
2168
2169 bool
2170 NSGigE::ipChecksum(PacketPtr packet, bool gen)
2171 {
2172 ip_header *hdr = packet->getIpHdr();
2173
2174 uint16_t cksum = checksumCalc(NULL, (uint16_t *) hdr, (hdr->vers_len & 0xf)*4);
2175
2176 if (gen) {
2177 DPRINTF(EthernetCksum, "generated checksum: %#x\n", cksum);
2178 hdr->hdr_chksum = cksum;
2179 }
2180 else
2181 if (cksum != 0)
2182 return false;
2183
2184 return true;
2185 }
2186
2187 uint16_t
2188 NSGigE::checksumCalc(uint16_t *pseudo, uint16_t *buf, uint32_t len)
2189 {
2190 uint32_t sum = 0;
2191
2192 uint16_t last_pad = 0;
2193 if (len & 1) {
2194 last_pad = buf[len/2] & 0xff;
2195 len--;
2196 sum += last_pad;
2197 }
2198
2199 if (pseudo) {
2200 sum = pseudo[0] + pseudo[1] + pseudo[2] +
2201 pseudo[3] + pseudo[4] + pseudo[5];
2202 }
2203
2204 for (int i=0; i < (len/2); ++i) {
2205 sum += buf[i];
2206 }
2207
2208 while (sum >> 16)
2209 sum = (sum >> 16) + (sum & 0xffff);
2210
2211 return ~sum;
2212 }
2213
2214 //=====================================================================
2215 //
2216 //
2217 void
2218 NSGigE::serialize(ostream &os)
2219 {
2220 // Serialize the PciDev base class
2221 PciDev::serialize(os);
2222
2223 /*
2224 * Finalize any DMA events now.
2225 */
2226 if (rxDmaReadEvent.scheduled())
2227 rxDmaReadCopy();
2228 if (rxDmaWriteEvent.scheduled())
2229 rxDmaWriteCopy();
2230 if (txDmaReadEvent.scheduled())
2231 txDmaReadCopy();
2232 if (txDmaWriteEvent.scheduled())
2233 txDmaWriteCopy();
2234
2235 /*
2236 * Serialize the device registers
2237 */
2238 SERIALIZE_SCALAR(regs.command);
2239 SERIALIZE_SCALAR(regs.config);
2240 SERIALIZE_SCALAR(regs.mear);
2241 SERIALIZE_SCALAR(regs.ptscr);
2242 SERIALIZE_SCALAR(regs.isr);
2243 SERIALIZE_SCALAR(regs.imr);
2244 SERIALIZE_SCALAR(regs.ier);
2245 SERIALIZE_SCALAR(regs.ihr);
2246 SERIALIZE_SCALAR(regs.txdp);
2247 SERIALIZE_SCALAR(regs.txdp_hi);
2248 SERIALIZE_SCALAR(regs.txcfg);
2249 SERIALIZE_SCALAR(regs.gpior);
2250 SERIALIZE_SCALAR(regs.rxdp);
2251 SERIALIZE_SCALAR(regs.rxdp_hi);
2252 SERIALIZE_SCALAR(regs.rxcfg);
2253 SERIALIZE_SCALAR(regs.pqcr);
2254 SERIALIZE_SCALAR(regs.wcsr);
2255 SERIALIZE_SCALAR(regs.pcr);
2256 SERIALIZE_SCALAR(regs.rfcr);
2257 SERIALIZE_SCALAR(regs.rfdr);
2258 SERIALIZE_SCALAR(regs.srr);
2259 SERIALIZE_SCALAR(regs.mibc);
2260 SERIALIZE_SCALAR(regs.vrcr);
2261 SERIALIZE_SCALAR(regs.vtcr);
2262 SERIALIZE_SCALAR(regs.vdr);
2263 SERIALIZE_SCALAR(regs.ccsr);
2264 SERIALIZE_SCALAR(regs.tbicr);
2265 SERIALIZE_SCALAR(regs.tbisr);
2266 SERIALIZE_SCALAR(regs.tanar);
2267 SERIALIZE_SCALAR(regs.tanlpar);
2268 SERIALIZE_SCALAR(regs.taner);
2269 SERIALIZE_SCALAR(regs.tesr);
2270
2271 SERIALIZE_ARRAY(rom.perfectMatch, EADDR_LEN);
2272
2273 SERIALIZE_SCALAR(ioEnable);
2274
2275 /*
2276 * Serialize the data Fifos
2277 */
2278 int txNumPkts = txFifo.size();
2279 SERIALIZE_SCALAR(txNumPkts);
2280 int i = 0;
2281 pktiter_t end = txFifo.end();
2282 for (pktiter_t p = txFifo.begin(); p != end; ++p) {
2283 nameOut(os, csprintf("%s.txFifo%d", name(), i++));
2284 (*p)->serialize(os);
2285 }
2286
2287 int rxNumPkts = rxFifo.size();
2288 SERIALIZE_SCALAR(rxNumPkts);
2289 i = 0;
2290 end = rxFifo.end();
2291 for (pktiter_t p = rxFifo.begin(); p != end; ++p) {
2292 nameOut(os, csprintf("%s.rxFifo%d", name(), i++));
2293 (*p)->serialize(os);
2294 }
2295
2296 /*
2297 * Serialize the various helper variables
2298 */
2299 bool txPacketExists = txPacket;
2300 SERIALIZE_SCALAR(txPacketExists);
2301 if (txPacketExists) {
2302 nameOut(os, csprintf("%s.txPacket", name()));
2303 txPacket->serialize(os);
2304 uint32_t txPktBufPtr = (uint32_t) (txPacketBufPtr - txPacket->data);
2305 SERIALIZE_SCALAR(txPktBufPtr);
2306 }
2307
2308 bool rxPacketExists = rxPacket;
2309 SERIALIZE_SCALAR(rxPacketExists);
2310 if (rxPacketExists) {
2311 nameOut(os, csprintf("%s.rxPacket", name()));
2312 rxPacket->serialize(os);
2313 uint32_t rxPktBufPtr = (uint32_t) (rxPacketBufPtr - rxPacket->data);
2314 SERIALIZE_SCALAR(rxPktBufPtr);
2315 }
2316
2317 SERIALIZE_SCALAR(txXferLen);
2318 SERIALIZE_SCALAR(rxXferLen);
2319
2320 /*
2321 * Serialize DescCaches
2322 */
2323 SERIALIZE_SCALAR(txDescCache.link);
2324 SERIALIZE_SCALAR(txDescCache.bufptr);
2325 SERIALIZE_SCALAR(txDescCache.cmdsts);
2326 SERIALIZE_SCALAR(txDescCache.extsts);
2327 SERIALIZE_SCALAR(rxDescCache.link);
2328 SERIALIZE_SCALAR(rxDescCache.bufptr);
2329 SERIALIZE_SCALAR(rxDescCache.cmdsts);
2330 SERIALIZE_SCALAR(rxDescCache.extsts);
2331
2332 /*
2333 * Serialize tx state machine
2334 */
2335 int txState = this->txState;
2336 SERIALIZE_SCALAR(txState);
2337 SERIALIZE_SCALAR(CTDD);
2338 SERIALIZE_SCALAR(txFifoAvail);
2339 SERIALIZE_SCALAR(txHalt);
2340 SERIALIZE_SCALAR(txFragPtr);
2341 SERIALIZE_SCALAR(txDescCnt);
2342 int txDmaState = this->txDmaState;
2343 SERIALIZE_SCALAR(txDmaState);
2344
2345 /*
2346 * Serialize rx state machine
2347 */
2348 int rxState = this->rxState;
2349 SERIALIZE_SCALAR(rxState);
2350 SERIALIZE_SCALAR(CRDD);
2351 SERIALIZE_SCALAR(rxPktBytes);
2352 SERIALIZE_SCALAR(rxFifoCnt);
2353 SERIALIZE_SCALAR(rxHalt);
2354 SERIALIZE_SCALAR(rxDescCnt);
2355 int rxDmaState = this->rxDmaState;
2356 SERIALIZE_SCALAR(rxDmaState);
2357
2358 SERIALIZE_SCALAR(extstsEnable);
2359
2360 /*
2361 * If there's a pending transmit, store the time so we can
2362 * reschedule it later
2363 */
2364 Tick transmitTick = txEvent.scheduled() ? txEvent.when() - curTick : 0;
2365 SERIALIZE_SCALAR(transmitTick);
2366
2367 /*
2368 * receive address filter settings
2369 */
2370 SERIALIZE_SCALAR(rxFilterEnable);
2371 SERIALIZE_SCALAR(acceptBroadcast);
2372 SERIALIZE_SCALAR(acceptMulticast);
2373 SERIALIZE_SCALAR(acceptUnicast);
2374 SERIALIZE_SCALAR(acceptPerfect);
2375 SERIALIZE_SCALAR(acceptArp);
2376
2377 /*
2378 * Keep track of pending interrupt status.
2379 */
2380 SERIALIZE_SCALAR(intrTick);
2381 SERIALIZE_SCALAR(cpuPendingIntr);
2382 Tick intrEventTick = 0;
2383 if (intrEvent)
2384 intrEventTick = intrEvent->when();
2385 SERIALIZE_SCALAR(intrEventTick);
2386
2387 }
2388
2389 void
2390 NSGigE::unserialize(Checkpoint *cp, const std::string &section)
2391 {
2392 // Unserialize the PciDev base class
2393 PciDev::unserialize(cp, section);
2394
2395 UNSERIALIZE_SCALAR(regs.command);
2396 UNSERIALIZE_SCALAR(regs.config);
2397 UNSERIALIZE_SCALAR(regs.mear);
2398 UNSERIALIZE_SCALAR(regs.ptscr);
2399 UNSERIALIZE_SCALAR(regs.isr);
2400 UNSERIALIZE_SCALAR(regs.imr);
2401 UNSERIALIZE_SCALAR(regs.ier);
2402 UNSERIALIZE_SCALAR(regs.ihr);
2403 UNSERIALIZE_SCALAR(regs.txdp);
2404 UNSERIALIZE_SCALAR(regs.txdp_hi);
2405 UNSERIALIZE_SCALAR(regs.txcfg);
2406 UNSERIALIZE_SCALAR(regs.gpior);
2407 UNSERIALIZE_SCALAR(regs.rxdp);
2408 UNSERIALIZE_SCALAR(regs.rxdp_hi);
2409 UNSERIALIZE_SCALAR(regs.rxcfg);
2410 UNSERIALIZE_SCALAR(regs.pqcr);
2411 UNSERIALIZE_SCALAR(regs.wcsr);
2412 UNSERIALIZE_SCALAR(regs.pcr);
2413 UNSERIALIZE_SCALAR(regs.rfcr);
2414 UNSERIALIZE_SCALAR(regs.rfdr);
2415 UNSERIALIZE_SCALAR(regs.srr);
2416 UNSERIALIZE_SCALAR(regs.mibc);
2417 UNSERIALIZE_SCALAR(regs.vrcr);
2418 UNSERIALIZE_SCALAR(regs.vtcr);
2419 UNSERIALIZE_SCALAR(regs.vdr);
2420 UNSERIALIZE_SCALAR(regs.ccsr);
2421 UNSERIALIZE_SCALAR(regs.tbicr);
2422 UNSERIALIZE_SCALAR(regs.tbisr);
2423 UNSERIALIZE_SCALAR(regs.tanar);
2424 UNSERIALIZE_SCALAR(regs.tanlpar);
2425 UNSERIALIZE_SCALAR(regs.taner);
2426 UNSERIALIZE_SCALAR(regs.tesr);
2427
2428 UNSERIALIZE_ARRAY(rom.perfectMatch, EADDR_LEN);
2429
2430 UNSERIALIZE_SCALAR(ioEnable);
2431
2432 /*
2433 * unserialize the data fifos
2434 */
2435 int txNumPkts;
2436 UNSERIALIZE_SCALAR(txNumPkts);
2437 int i;
2438 for (i = 0; i < txNumPkts; ++i) {
2439 PacketPtr p = new EtherPacket;
2440 p->unserialize(cp, csprintf("%s.rxFifo%d", section, i));
2441 txFifo.push_back(p);
2442 }
2443
2444 int rxNumPkts;
2445 UNSERIALIZE_SCALAR(rxNumPkts);
2446 for (i = 0; i < rxNumPkts; ++i) {
2447 PacketPtr p = new EtherPacket;
2448 p->unserialize(cp, csprintf("%s.rxFifo%d", section, i));
2449 rxFifo.push_back(p);
2450 }
2451
2452 /*
2453 * unserialize the various helper variables
2454 */
2455 bool txPacketExists;
2456 UNSERIALIZE_SCALAR(txPacketExists);
2457 if (txPacketExists) {
2458 txPacket = new EtherPacket;
2459 txPacket->unserialize(cp, csprintf("%s.txPacket", section));
2460 uint32_t txPktBufPtr;
2461 UNSERIALIZE_SCALAR(txPktBufPtr);
2462 txPacketBufPtr = (uint8_t *) txPacket->data + txPktBufPtr;
2463 } else
2464 txPacket = 0;
2465
2466 bool rxPacketExists;
2467 UNSERIALIZE_SCALAR(rxPacketExists);
2468 rxPacket = 0;
2469 if (rxPacketExists) {
2470 rxPacket = new EtherPacket;
2471 rxPacket->unserialize(cp, csprintf("%s.rxPacket", section));
2472 uint32_t rxPktBufPtr;
2473 UNSERIALIZE_SCALAR(rxPktBufPtr);
2474 rxPacketBufPtr = (uint8_t *) rxPacket->data + rxPktBufPtr;
2475 } else
2476 rxPacket = 0;
2477
2478 UNSERIALIZE_SCALAR(txXferLen);
2479 UNSERIALIZE_SCALAR(rxXferLen);
2480
2481 /*
2482 * Unserialize DescCaches
2483 */
2484 UNSERIALIZE_SCALAR(txDescCache.link);
2485 UNSERIALIZE_SCALAR(txDescCache.bufptr);
2486 UNSERIALIZE_SCALAR(txDescCache.cmdsts);
2487 UNSERIALIZE_SCALAR(txDescCache.extsts);
2488 UNSERIALIZE_SCALAR(rxDescCache.link);
2489 UNSERIALIZE_SCALAR(rxDescCache.bufptr);
2490 UNSERIALIZE_SCALAR(rxDescCache.cmdsts);
2491 UNSERIALIZE_SCALAR(rxDescCache.extsts);
2492
2493 /*
2494 * unserialize tx state machine
2495 */
2496 int txState;
2497 UNSERIALIZE_SCALAR(txState);
2498 this->txState = (TxState) txState;
2499 UNSERIALIZE_SCALAR(CTDD);
2500 UNSERIALIZE_SCALAR(txFifoAvail);
2501 UNSERIALIZE_SCALAR(txHalt);
2502 UNSERIALIZE_SCALAR(txFragPtr);
2503 UNSERIALIZE_SCALAR(txDescCnt);
2504 int txDmaState;
2505 UNSERIALIZE_SCALAR(txDmaState);
2506 this->txDmaState = (DmaState) txDmaState;
2507
2508 /*
2509 * unserialize rx state machine
2510 */
2511 int rxState;
2512 UNSERIALIZE_SCALAR(rxState);
2513 this->rxState = (RxState) rxState;
2514 UNSERIALIZE_SCALAR(CRDD);
2515 UNSERIALIZE_SCALAR(rxPktBytes);
2516 UNSERIALIZE_SCALAR(rxFifoCnt);
2517 UNSERIALIZE_SCALAR(rxHalt);
2518 UNSERIALIZE_SCALAR(rxDescCnt);
2519 int rxDmaState;
2520 UNSERIALIZE_SCALAR(rxDmaState);
2521 this->rxDmaState = (DmaState) rxDmaState;
2522
2523 UNSERIALIZE_SCALAR(extstsEnable);
2524
2525 /*
2526 * If there's a pending transmit, reschedule it now
2527 */
2528 Tick transmitTick;
2529 UNSERIALIZE_SCALAR(transmitTick);
2530 if (transmitTick)
2531 txEvent.schedule(curTick + transmitTick);
2532
2533 /*
2534 * unserialize receive address filter settings
2535 */
2536 UNSERIALIZE_SCALAR(rxFilterEnable);
2537 UNSERIALIZE_SCALAR(acceptBroadcast);
2538 UNSERIALIZE_SCALAR(acceptMulticast);
2539 UNSERIALIZE_SCALAR(acceptUnicast);
2540 UNSERIALIZE_SCALAR(acceptPerfect);
2541 UNSERIALIZE_SCALAR(acceptArp);
2542
2543 /*
2544 * Keep track of pending interrupt status.
2545 */
2546 UNSERIALIZE_SCALAR(intrTick);
2547 UNSERIALIZE_SCALAR(cpuPendingIntr);
2548 Tick intrEventTick;
2549 UNSERIALIZE_SCALAR(intrEventTick);
2550 if (intrEventTick) {
2551 intrEvent = new IntrEvent(this, true);
2552 intrEvent->schedule(intrEventTick);
2553 }
2554
2555 /*
2556 * re-add addrRanges to bus bridges
2557 */
2558 if (pioInterface) {
2559 pioInterface->addAddrRange(BARAddrs[0], BARAddrs[0] + BARSize[0] - 1);
2560 pioInterface->addAddrRange(BARAddrs[1], BARAddrs[1] + BARSize[1] - 1);
2561 }
2562 }
2563
2564 Tick
2565 NSGigE::cacheAccess(MemReqPtr &req)
2566 {
2567 DPRINTF(EthernetPIO, "timing access to paddr=%#x (daddr=%#x)\n",
2568 req->paddr, req->paddr - addr);
2569 return curTick + pioLatency;
2570 }
2571 //=====================================================================
2572
2573
2574 //********** helper functions******************************************
2575
2576 uint16_t reverseEnd16(uint16_t num)
2577 {
2578 uint16_t reverse = (num & 0xff)<<8;
2579 reverse += ((num & 0xff00) >> 8);
2580 return reverse;
2581 }
2582
2583 uint32_t reverseEnd32(uint32_t num)
2584 {
2585 uint32_t reverse = (reverseEnd16(num & 0xffff)) << 16;
2586 reverse += reverseEnd16((uint16_t) ((num & 0xffff0000) >> 8));
2587 return reverse;
2588 }
2589
2590
2591
2592 //=====================================================================
2593
2594 BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt)
2595
2596 SimObjectParam<EtherInt *> peer;
2597 SimObjectParam<NSGigE *> device;
2598
2599 END_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt)
2600
2601 BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigEInt)
2602
2603 INIT_PARAM_DFLT(peer, "peer interface", NULL),
2604 INIT_PARAM(device, "Ethernet device of this interface")
2605
2606 END_INIT_SIM_OBJECT_PARAMS(NSGigEInt)
2607
2608 CREATE_SIM_OBJECT(NSGigEInt)
2609 {
2610 NSGigEInt *dev_int = new NSGigEInt(getInstanceName(), device);
2611
2612 EtherInt *p = (EtherInt *)peer;
2613 if (p) {
2614 dev_int->setPeer(p);
2615 p->setPeer(dev_int);
2616 }
2617
2618 return dev_int;
2619 }
2620
2621 REGISTER_SIM_OBJECT("NSGigEInt", NSGigEInt)
2622
2623
2624 BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigE)
2625
2626 Param<Tick> tx_delay;
2627 Param<Tick> rx_delay;
2628 SimObjectParam<IntrControl *> intr_ctrl;
2629 Param<Tick> intr_delay;
2630 SimObjectParam<MemoryController *> mmu;
2631 SimObjectParam<PhysicalMemory *> physmem;
2632 Param<bool> rx_filter;
2633 Param<string> hardware_address;
2634 SimObjectParam<Bus*> header_bus;
2635 SimObjectParam<Bus*> payload_bus;
2636 SimObjectParam<HierParams *> hier;
2637 Param<Tick> pio_latency;
2638 Param<bool> dma_desc_free;
2639 Param<bool> dma_data_free;
2640 Param<Tick> dma_read_delay;
2641 Param<Tick> dma_write_delay;
2642 Param<Tick> dma_read_factor;
2643 Param<Tick> dma_write_factor;
2644 SimObjectParam<PciConfigAll *> configspace;
2645 SimObjectParam<PciConfigData *> configdata;
2646 SimObjectParam<Tsunami *> tsunami;
2647 Param<uint32_t> pci_bus;
2648 Param<uint32_t> pci_dev;
2649 Param<uint32_t> pci_func;
2650 Param<uint32_t> tx_fifo_size;
2651 Param<uint32_t> rx_fifo_size;
2652
2653 END_DECLARE_SIM_OBJECT_PARAMS(NSGigE)
2654
2655 BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigE)
2656
2657 INIT_PARAM_DFLT(tx_delay, "Transmit Delay", 1000),
2658 INIT_PARAM_DFLT(rx_delay, "Receive Delay", 1000),
2659 INIT_PARAM(intr_ctrl, "Interrupt Controller"),
2660 INIT_PARAM_DFLT(intr_delay, "Interrupt Delay in microseconds", 0),
2661 INIT_PARAM(mmu, "Memory Controller"),
2662 INIT_PARAM(physmem, "Physical Memory"),
2663 INIT_PARAM_DFLT(rx_filter, "Enable Receive Filter", true),
2664 INIT_PARAM_DFLT(hardware_address, "Ethernet Hardware Address",
2665 "00:99:00:00:00:01"),
2666 INIT_PARAM_DFLT(header_bus, "The IO Bus to attach to for headers", NULL),
2667 INIT_PARAM_DFLT(payload_bus, "The IO Bus to attach to for payload", NULL),
2668 INIT_PARAM_DFLT(hier, "Hierarchy global variables", &defaultHierParams),
2669 INIT_PARAM_DFLT(pio_latency, "Programmed IO latency in bus cycles", 1),
2670 INIT_PARAM_DFLT(dma_desc_free, "DMA of Descriptors is free", false),
2671 INIT_PARAM_DFLT(dma_data_free, "DMA of Data is free", false),
2672 INIT_PARAM_DFLT(dma_read_delay, "fixed delay for dma reads", 0),
2673 INIT_PARAM_DFLT(dma_write_delay, "fixed delay for dma writes", 0),
2674 INIT_PARAM_DFLT(dma_read_factor, "multiplier for dma reads", 0),
2675 INIT_PARAM_DFLT(dma_write_factor, "multiplier for dma writes", 0),
2676 INIT_PARAM(configspace, "PCI Configspace"),
2677 INIT_PARAM(configdata, "PCI Config data"),
2678 INIT_PARAM(tsunami, "Tsunami"),
2679 INIT_PARAM(pci_bus, "PCI bus"),
2680 INIT_PARAM(pci_dev, "PCI device number"),
2681 INIT_PARAM(pci_func, "PCI function code"),
2682 INIT_PARAM_DFLT(tx_fifo_size, "max size in bytes of txFifo", 131072),
2683 INIT_PARAM_DFLT(rx_fifo_size, "max size in bytes of rxFifo", 131072)
2684
2685 END_INIT_SIM_OBJECT_PARAMS(NSGigE)
2686
2687
2688 CREATE_SIM_OBJECT(NSGigE)
2689 {
2690 int eaddr[6];
2691 sscanf(((string)hardware_address).c_str(), "%x:%x:%x:%x:%x:%x",
2692 &eaddr[0], &eaddr[1], &eaddr[2], &eaddr[3], &eaddr[4], &eaddr[5]);
2693
2694 return new NSGigE(getInstanceName(), intr_ctrl, intr_delay,
2695 physmem, tx_delay, rx_delay, mmu, hier, header_bus,
2696 payload_bus, pio_latency, dma_desc_free, dma_data_free,
2697 dma_read_delay, dma_write_delay, dma_read_factor,
2698 dma_write_factor, configspace, configdata,
2699 tsunami, pci_bus, pci_dev, pci_func, rx_filter, eaddr,
2700 tx_fifo_size, rx_fifo_size);
2701 }
2702
2703 REGISTER_SIM_OBJECT("NSGigE", NSGigE)