Add a clock multiplier for simple CPU so that it is possible
[gem5.git] / dev / ns_gige.cc
1 /*
2 * Copyright (c) 2004 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /* @file
30 * Device module for modelling the National Semiconductor
31 * DP83820 ethernet controller. Does not support priority queueing
32 */
33 #include <cstdio>
34 #include <deque>
35 #include <string>
36
37 #include "base/inet.hh"
38 #include "cpu/exec_context.hh"
39 #include "cpu/intr_control.hh"
40 #include "dev/dma.hh"
41 #include "dev/ns_gige.hh"
42 #include "dev/etherlink.hh"
43 #include "mem/bus/bus.hh"
44 #include "mem/bus/dma_interface.hh"
45 #include "mem/bus/pio_interface.hh"
46 #include "mem/bus/pio_interface_impl.hh"
47 #include "mem/functional_mem/memory_control.hh"
48 #include "mem/functional_mem/physical_memory.hh"
49 #include "sim/builder.hh"
50 #include "sim/host.hh"
51 #include "sim/sim_stats.hh"
52 #include "targetarch/vtophys.hh"
53 #include "dev/pciconfigall.hh"
54 #include "dev/tsunami_cchip.hh"
55
56 const char *NsRxStateStrings[] =
57 {
58 "rxIdle",
59 "rxDescRefr",
60 "rxDescRead",
61 "rxFifoBlock",
62 "rxFragWrite",
63 "rxDescWrite",
64 "rxAdvance"
65 };
66
67 const char *NsTxStateStrings[] =
68 {
69 "txIdle",
70 "txDescRefr",
71 "txDescRead",
72 "txFifoBlock",
73 "txFragRead",
74 "txDescWrite",
75 "txAdvance"
76 };
77
78 const char *NsDmaState[] =
79 {
80 "dmaIdle",
81 "dmaReading",
82 "dmaWriting",
83 "dmaReadWaiting",
84 "dmaWriteWaiting"
85 };
86
87 using namespace std;
88
89 //helper function declarations
90 //These functions reverse Endianness so we can evaluate network data correctly
91 uint16_t reverseEnd16(uint16_t);
92 uint32_t reverseEnd32(uint32_t);
93
94 ///////////////////////////////////////////////////////////////////////
95 //
96 // NSGigE PCI Device
97 //
98 NSGigE::NSGigE(const std::string &name, IntrControl *i, Tick intr_delay,
99 PhysicalMemory *pmem, Tick tx_delay, Tick rx_delay,
100 MemoryController *mmu, HierParams *hier, Bus *header_bus,
101 Bus *payload_bus, Tick pio_latency, bool dma_desc_free,
102 bool dma_data_free, Tick dma_read_delay, Tick dma_write_delay,
103 Tick dma_read_factor, Tick dma_write_factor, PciConfigAll *cf,
104 PciConfigData *cd, Tsunami *t, uint32_t bus, uint32_t dev,
105 uint32_t func, bool rx_filter, const int eaddr[6],
106 uint32_t tx_fifo_size, uint32_t rx_fifo_size)
107 : PciDev(name, mmu, cf, cd, bus, dev, func), tsunami(t), ioEnable(false),
108 maxTxFifoSize(tx_fifo_size), maxRxFifoSize(rx_fifo_size),
109 txPacket(0), rxPacket(0), txPacketBufPtr(NULL), rxPacketBufPtr(NULL),
110 txXferLen(0), rxXferLen(0), txState(txIdle), CTDD(false),
111 txFifoAvail(tx_fifo_size), txHalt(false),
112 txFragPtr(0), txDescCnt(0), txDmaState(dmaIdle), rxState(rxIdle),
113 CRDD(false), rxPktBytes(0), rxFifoCnt(0), rxHalt(false),
114 rxFragPtr(0), rxDescCnt(0), rxDmaState(dmaIdle), extstsEnable(false),
115 rxDmaReadEvent(this), rxDmaWriteEvent(this),
116 txDmaReadEvent(this), txDmaWriteEvent(this),
117 dmaDescFree(dma_desc_free), dmaDataFree(dma_data_free),
118 txDelay(tx_delay), rxDelay(rx_delay), rxKickTick(0), txKickTick(0),
119 txEvent(this), rxFilterEnable(rx_filter), acceptBroadcast(false),
120 acceptMulticast(false), acceptUnicast(false),
121 acceptPerfect(false), acceptArp(false),
122 physmem(pmem), intctrl(i), intrTick(0), cpuPendingIntr(false),
123 intrEvent(0), interface(0)
124 {
125 tsunami->ethernet = this;
126
127 if (header_bus) {
128 pioInterface = newPioInterface(name, hier, header_bus, this,
129 &NSGigE::cacheAccess);
130
131 pioLatency = pio_latency * header_bus->clockRatio;
132
133 if (payload_bus)
134 dmaInterface = new DMAInterface<Bus>(name + ".dma",
135 header_bus, payload_bus, 1);
136 else
137 dmaInterface = new DMAInterface<Bus>(name + ".dma",
138 header_bus, header_bus, 1);
139 } else if (payload_bus) {
140 pioInterface = newPioInterface(name, hier, payload_bus, this,
141 &NSGigE::cacheAccess);
142
143 pioLatency = pio_latency * payload_bus->clockRatio;
144
145 dmaInterface = new DMAInterface<Bus>(name + ".dma", payload_bus,
146 payload_bus, 1);
147 }
148
149
150 intrDelay = US2Ticks(intr_delay);
151 dmaReadDelay = dma_read_delay;
152 dmaWriteDelay = dma_write_delay;
153 dmaReadFactor = dma_read_factor;
154 dmaWriteFactor = dma_write_factor;
155
156 regsReset();
157 rom.perfectMatch[0] = eaddr[0];
158 rom.perfectMatch[1] = eaddr[1];
159 rom.perfectMatch[2] = eaddr[2];
160 rom.perfectMatch[3] = eaddr[3];
161 rom.perfectMatch[4] = eaddr[4];
162 rom.perfectMatch[5] = eaddr[5];
163 }
164
165 NSGigE::~NSGigE()
166 {}
167
168 void
169 NSGigE::regStats()
170 {
171 txBytes
172 .name(name() + ".txBytes")
173 .desc("Bytes Transmitted")
174 .prereq(txBytes)
175 ;
176
177 rxBytes
178 .name(name() + ".rxBytes")
179 .desc("Bytes Received")
180 .prereq(rxBytes)
181 ;
182
183 txPackets
184 .name(name() + ".txPackets")
185 .desc("Number of Packets Transmitted")
186 .prereq(txBytes)
187 ;
188
189 rxPackets
190 .name(name() + ".rxPackets")
191 .desc("Number of Packets Received")
192 .prereq(rxBytes)
193 ;
194
195 txIPChecksums
196 .name(name() + ".txIPChecksums")
197 .desc("Number of tx IP Checksums done by device")
198 .precision(0)
199 .prereq(txBytes)
200 ;
201
202 rxIPChecksums
203 .name(name() + ".rxIPChecksums")
204 .desc("Number of rx IP Checksums done by device")
205 .precision(0)
206 .prereq(rxBytes)
207 ;
208
209 txTCPChecksums
210 .name(name() + ".txTCPChecksums")
211 .desc("Number of tx TCP Checksums done by device")
212 .precision(0)
213 .prereq(txBytes)
214 ;
215
216 rxTCPChecksums
217 .name(name() + ".rxTCPChecksums")
218 .desc("Number of rx TCP Checksums done by device")
219 .precision(0)
220 .prereq(rxBytes)
221 ;
222
223 descDmaReads
224 .name(name() + ".descDMAReads")
225 .desc("Number of descriptors the device read w/ DMA")
226 .precision(0)
227 ;
228
229 descDmaWrites
230 .name(name() + ".descDMAWrites")
231 .desc("Number of descriptors the device wrote w/ DMA")
232 .precision(0)
233 ;
234
235 descDmaRdBytes
236 .name(name() + ".descDmaReadBytes")
237 .desc("number of descriptor bytes read w/ DMA")
238 .precision(0)
239 ;
240
241 descDmaWrBytes
242 .name(name() + ".descDmaWriteBytes")
243 .desc("number of descriptor bytes write w/ DMA")
244 .precision(0)
245 ;
246
247
248 txBandwidth
249 .name(name() + ".txBandwidth")
250 .desc("Transmit Bandwidth (bits/s)")
251 .precision(0)
252 .prereq(txBytes)
253 ;
254
255 rxBandwidth
256 .name(name() + ".rxBandwidth")
257 .desc("Receive Bandwidth (bits/s)")
258 .precision(0)
259 .prereq(rxBytes)
260 ;
261
262 txPacketRate
263 .name(name() + ".txPPS")
264 .desc("Packet Tranmission Rate (packets/s)")
265 .precision(0)
266 .prereq(txBytes)
267 ;
268
269 rxPacketRate
270 .name(name() + ".rxPPS")
271 .desc("Packet Reception Rate (packets/s)")
272 .precision(0)
273 .prereq(rxBytes)
274 ;
275
276 txBandwidth = txBytes * Stats::constant(8) / simSeconds;
277 rxBandwidth = rxBytes * Stats::constant(8) / simSeconds;
278 txPacketRate = txPackets / simSeconds;
279 rxPacketRate = rxPackets / simSeconds;
280 }
281
282 /**
283 * This is to read the PCI general configuration registers
284 */
285 void
286 NSGigE::ReadConfig(int offset, int size, uint8_t *data)
287 {
288 if (offset < PCI_DEVICE_SPECIFIC)
289 PciDev::ReadConfig(offset, size, data);
290 else
291 panic("Device specific PCI config space not implemented!\n");
292 }
293
294 /**
295 * This is to write to the PCI general configuration registers
296 */
297 void
298 NSGigE::WriteConfig(int offset, int size, uint32_t data)
299 {
300 if (offset < PCI_DEVICE_SPECIFIC)
301 PciDev::WriteConfig(offset, size, data);
302 else
303 panic("Device specific PCI config space not implemented!\n");
304
305 // Need to catch writes to BARs to update the PIO interface
306 switch (offset) {
307 //seems to work fine without all these PCI settings, but i put in the IO
308 //to double check, an assertion will fail if we need to properly
309 // implement it
310 case PCI_COMMAND:
311 if (config.data[offset] & PCI_CMD_IOSE)
312 ioEnable = true;
313 else
314 ioEnable = false;
315
316 #if 0
317 if (config.data[offset] & PCI_CMD_BME) {
318 bmEnabled = true;
319 }
320 else {
321 bmEnabled = false;
322 }
323
324 if (config.data[offset] & PCI_CMD_MSE) {
325 memEnable = true;
326 }
327 else {
328 memEnable = false;
329 }
330 #endif
331 break;
332
333 case PCI0_BASE_ADDR0:
334 if (BARAddrs[0] != 0) {
335
336 if (pioInterface)
337 pioInterface->addAddrRange(BARAddrs[0], BARAddrs[0] + BARSize[0] - 1);
338
339 BARAddrs[0] &= PA_UNCACHED_MASK;
340
341 }
342 break;
343 case PCI0_BASE_ADDR1:
344 if (BARAddrs[1] != 0) {
345
346 if (pioInterface)
347 pioInterface->addAddrRange(BARAddrs[1], BARAddrs[1] + BARSize[1] - 1);
348
349 BARAddrs[1] &= PA_UNCACHED_MASK;
350
351 }
352 break;
353 }
354 }
355
356 /**
357 * This reads the device registers, which are detailed in the NS83820
358 * spec sheet
359 */
360 Fault
361 NSGigE::read(MemReqPtr &req, uint8_t *data)
362 {
363 assert(ioEnable);
364
365 //The mask is to give you only the offset into the device register file
366 Addr daddr = req->paddr & 0xfff;
367 DPRINTF(EthernetPIO, "read da=%#x pa=%#x va=%#x size=%d\n",
368 daddr, req->paddr, req->vaddr, req->size);
369
370
371 //there are some reserved registers, you can see ns_gige_reg.h and
372 //the spec sheet for details
373 if (daddr > LAST && daddr <= RESERVED) {
374 panic("Accessing reserved register");
375 } else if (daddr > RESERVED && daddr <= 0x3FC) {
376 ReadConfig(daddr & 0xff, req->size, data);
377 return No_Fault;
378 } else if (daddr >= MIB_START && daddr <= MIB_END) {
379 // don't implement all the MIB's. hopefully the kernel
380 // doesn't actually DEPEND upon their values
381 // MIB are just hardware stats keepers
382 uint32_t &reg = *(uint32_t *) data;
383 reg = 0;
384 return No_Fault;
385 } else if (daddr > 0x3FC)
386 panic("Something is messed up!\n");
387
388 switch (req->size) {
389 case sizeof(uint32_t):
390 {
391 uint32_t &reg = *(uint32_t *)data;
392
393 switch (daddr) {
394 case CR:
395 reg = regs.command;
396 //these are supposed to be cleared on a read
397 reg &= ~(CR_RXD | CR_TXD | CR_TXR | CR_RXR);
398 break;
399
400 case CFG:
401 reg = regs.config;
402 break;
403
404 case MEAR:
405 reg = regs.mear;
406 break;
407
408 case PTSCR:
409 reg = regs.ptscr;
410 break;
411
412 case ISR:
413 reg = regs.isr;
414 devIntrClear(ISR_ALL);
415 break;
416
417 case IMR:
418 reg = regs.imr;
419 break;
420
421 case IER:
422 reg = regs.ier;
423 break;
424
425 case IHR:
426 reg = regs.ihr;
427 break;
428
429 case TXDP:
430 reg = regs.txdp;
431 break;
432
433 case TXDP_HI:
434 reg = regs.txdp_hi;
435 break;
436
437 case TXCFG:
438 reg = regs.txcfg;
439 break;
440
441 case GPIOR:
442 reg = regs.gpior;
443 break;
444
445 case RXDP:
446 reg = regs.rxdp;
447 break;
448
449 case RXDP_HI:
450 reg = regs.rxdp_hi;
451 break;
452
453 case RXCFG:
454 reg = regs.rxcfg;
455 break;
456
457 case PQCR:
458 reg = regs.pqcr;
459 break;
460
461 case WCSR:
462 reg = regs.wcsr;
463 break;
464
465 case PCR:
466 reg = regs.pcr;
467 break;
468
469 //see the spec sheet for how RFCR and RFDR work
470 //basically, you write to RFCR to tell the machine what you want to do next
471 //then you act upon RFDR, and the device will be prepared b/c
472 //of what you wrote to RFCR
473 case RFCR:
474 reg = regs.rfcr;
475 break;
476
477 case RFDR:
478 switch (regs.rfcr & RFCR_RFADDR) {
479 case 0x000:
480 reg = rom.perfectMatch[1];
481 reg = reg << 8;
482 reg += rom.perfectMatch[0];
483 break;
484 case 0x002:
485 reg = rom.perfectMatch[3] << 8;
486 reg += rom.perfectMatch[2];
487 break;
488 case 0x004:
489 reg = rom.perfectMatch[5] << 8;
490 reg += rom.perfectMatch[4];
491 break;
492 default:
493 panic("reading from RFDR for something for other than PMATCH!\n");
494 //didn't implement other RFDR functionality b/c driver didn't use
495 }
496 break;
497
498 case SRR:
499 reg = regs.srr;
500 break;
501
502 case MIBC:
503 reg = regs.mibc;
504 reg &= ~(MIBC_MIBS | MIBC_ACLR);
505 break;
506
507 case VRCR:
508 reg = regs.vrcr;
509 break;
510
511 case VTCR:
512 reg = regs.vtcr;
513 break;
514
515 case VDR:
516 reg = regs.vdr;
517 break;
518
519 case CCSR:
520 reg = regs.ccsr;
521 break;
522
523 case TBICR:
524 reg = regs.tbicr;
525 break;
526
527 case TBISR:
528 reg = regs.tbisr;
529 break;
530
531 case TANAR:
532 reg = regs.tanar;
533 break;
534
535 case TANLPAR:
536 reg = regs.tanlpar;
537 break;
538
539 case TANER:
540 reg = regs.taner;
541 break;
542
543 case TESR:
544 reg = regs.tesr;
545 break;
546
547 default:
548 panic("reading unimplemented register: addr = %#x", daddr);
549 }
550
551 DPRINTF(EthernetPIO, "read from %#x: data=%d data=%#x\n",
552 daddr, reg, reg);
553 }
554 break;
555
556 default:
557 panic("accessing register with invalid size: addr=%#x, size=%d",
558 daddr, req->size);
559 }
560
561 return No_Fault;
562 }
563
564 Fault
565 NSGigE::write(MemReqPtr &req, const uint8_t *data)
566 {
567 assert(ioEnable);
568
569 Addr daddr = req->paddr & 0xfff;
570 DPRINTF(EthernetPIO, "write da=%#x pa=%#x va=%#x size=%d\n",
571 daddr, req->paddr, req->vaddr, req->size);
572
573 if (daddr > LAST && daddr <= RESERVED) {
574 panic("Accessing reserved register");
575 } else if (daddr > RESERVED && daddr <= 0x3FC) {
576 WriteConfig(daddr & 0xff, req->size, *(uint32_t *)data);
577 return No_Fault;
578 } else if (daddr > 0x3FC)
579 panic("Something is messed up!\n");
580
581 if (req->size == sizeof(uint32_t)) {
582 uint32_t reg = *(uint32_t *)data;
583 DPRINTF(EthernetPIO, "write data=%d data=%#x\n", reg, reg);
584
585 switch (daddr) {
586 case CR:
587 regs.command = reg;
588 if ((reg & (CR_TXE | CR_TXD)) == (CR_TXE | CR_TXD)) {
589 txHalt = true;
590 } else if (reg & CR_TXE) {
591 //the kernel is enabling the transmit machine
592 if (txState == txIdle)
593 txKick();
594 } else if (reg & CR_TXD) {
595 txHalt = true;
596 }
597
598 if ((reg & (CR_RXE | CR_RXD)) == (CR_RXE | CR_RXD)) {
599 rxHalt = true;
600 } else if (reg & CR_RXE) {
601 if (rxState == rxIdle) {
602 rxKick();
603 }
604 } else if (reg & CR_RXD) {
605 rxHalt = true;
606 }
607
608 if (reg & CR_TXR)
609 txReset();
610
611 if (reg & CR_RXR)
612 rxReset();
613
614 if (reg & CR_SWI)
615 devIntrPost(ISR_SWI);
616
617 if (reg & CR_RST) {
618 txReset();
619 rxReset();
620
621 regsReset();
622 }
623 break;
624
625 case CFG:
626 if (reg & CFG_LNKSTS || reg & CFG_SPDSTS || reg & CFG_DUPSTS
627 || reg & CFG_RESERVED || reg & CFG_T64ADDR
628 || reg & CFG_PCI64_DET)
629 panic("writing to read-only or reserved CFG bits!\n");
630
631 regs.config |= reg & ~(CFG_LNKSTS | CFG_SPDSTS | CFG_DUPSTS | CFG_RESERVED |
632 CFG_T64ADDR | CFG_PCI64_DET);
633
634 // all these #if 0's are because i don't THINK the kernel needs to have these implemented
635 // if there is a problem relating to one of these, you may need to add functionality in
636 #if 0
637 if (reg & CFG_TBI_EN) ;
638 if (reg & CFG_MODE_1000) ;
639 #endif
640
641 if (reg & CFG_AUTO_1000)
642 panic("CFG_AUTO_1000 not implemented!\n");
643
644 #if 0
645 if (reg & CFG_PINT_DUPSTS || reg & CFG_PINT_LNKSTS || reg & CFG_PINT_SPDSTS) ;
646 if (reg & CFG_TMRTEST) ;
647 if (reg & CFG_MRM_DIS) ;
648 if (reg & CFG_MWI_DIS) ;
649
650 if (reg & CFG_T64ADDR)
651 panic("CFG_T64ADDR is read only register!\n");
652
653 if (reg & CFG_PCI64_DET)
654 panic("CFG_PCI64_DET is read only register!\n");
655
656 if (reg & CFG_DATA64_EN) ;
657 if (reg & CFG_M64ADDR) ;
658 if (reg & CFG_PHY_RST) ;
659 if (reg & CFG_PHY_DIS) ;
660 #endif
661
662 if (reg & CFG_EXTSTS_EN)
663 extstsEnable = true;
664 else
665 extstsEnable = false;
666
667 #if 0
668 if (reg & CFG_REQALG) ;
669 if (reg & CFG_SB) ;
670 if (reg & CFG_POW) ;
671 if (reg & CFG_EXD) ;
672 if (reg & CFG_PESEL) ;
673 if (reg & CFG_BROM_DIS) ;
674 if (reg & CFG_EXT_125) ;
675 if (reg & CFG_BEM) ;
676 #endif
677 break;
678
679 case MEAR:
680 regs.mear = reg;
681 /* since phy is completely faked, MEAR_MD* don't matter
682 and since the driver never uses MEAR_EE*, they don't matter */
683 #if 0
684 if (reg & MEAR_EEDI) ;
685 if (reg & MEAR_EEDO) ; //this one is read only
686 if (reg & MEAR_EECLK) ;
687 if (reg & MEAR_EESEL) ;
688 if (reg & MEAR_MDIO) ;
689 if (reg & MEAR_MDDIR) ;
690 if (reg & MEAR_MDC) ;
691 #endif
692 break;
693
694 case PTSCR:
695 regs.ptscr = reg & ~(PTSCR_RBIST_RDONLY);
696 /* these control BISTs for various parts of chip - we don't care or do
697 just fake that the BIST is done */
698 if (reg & PTSCR_RBIST_EN)
699 regs.ptscr |= PTSCR_RBIST_DONE;
700 if (reg & PTSCR_EEBIST_EN)
701 regs.ptscr &= ~PTSCR_EEBIST_EN;
702 if (reg & PTSCR_EELOAD_EN)
703 regs.ptscr &= ~PTSCR_EELOAD_EN;
704 break;
705
706 case ISR: /* writing to the ISR has no effect */
707 panic("ISR is a read only register!\n");
708
709 case IMR:
710 regs.imr = reg;
711 devIntrChangeMask();
712 break;
713
714 case IER:
715 regs.ier = reg;
716 break;
717
718 case IHR:
719 regs.ihr = reg;
720 /* not going to implement real interrupt holdoff */
721 break;
722
723 case TXDP:
724 regs.txdp = (reg & 0xFFFFFFFC);
725 assert(txState == txIdle);
726 CTDD = false;
727 break;
728
729 case TXDP_HI:
730 regs.txdp_hi = reg;
731 break;
732
733 case TXCFG:
734 regs.txcfg = reg;
735 #if 0
736 if (reg & TXCFG_CSI) ;
737 if (reg & TXCFG_HBI) ;
738 if (reg & TXCFG_MLB) ;
739 if (reg & TXCFG_ATP) ;
740 if (reg & TXCFG_ECRETRY) ; /* this could easily be implemented, but
741 considering the network is just a fake
742 pipe, wouldn't make sense to do this */
743
744 if (reg & TXCFG_BRST_DIS) ;
745 #endif
746
747
748 /* we handle our own DMA, ignore the kernel's exhortations */
749 //if (reg & TXCFG_MXDMA) ;
750
751 //also, we currently don't care about fill/drain thresholds
752 //though this may change in the future with more realistic
753 //networks or a driver which changes it according to feedback
754
755 break;
756
757 case GPIOR:
758 regs.gpior = reg;
759 /* these just control general purpose i/o pins, don't matter */
760 break;
761
762 case RXDP:
763 regs.rxdp = reg;
764 break;
765
766 case RXDP_HI:
767 regs.rxdp_hi = reg;
768 break;
769
770 case RXCFG:
771 regs.rxcfg = reg;
772 #if 0
773 if (reg & RXCFG_AEP) ;
774 if (reg & RXCFG_ARP) ;
775 if (reg & RXCFG_STRIPCRC) ;
776 if (reg & RXCFG_RX_RD) ;
777 if (reg & RXCFG_ALP) ;
778 if (reg & RXCFG_AIRL) ;
779 #endif
780
781 /* we handle our own DMA, ignore what kernel says about it */
782 //if (reg & RXCFG_MXDMA) ;
783
784 #if 0
785 //also, we currently don't care about fill/drain thresholds
786 //though this may change in the future with more realistic
787 //networks or a driver which changes it according to feedback
788 if (reg & (RXCFG_DRTH | RXCFG_DRTH0)) ;
789 #endif
790 break;
791
792 case PQCR:
793 /* there is no priority queueing used in the linux 2.6 driver */
794 regs.pqcr = reg;
795 break;
796
797 case WCSR:
798 /* not going to implement wake on LAN */
799 regs.wcsr = reg;
800 break;
801
802 case PCR:
803 /* not going to implement pause control */
804 regs.pcr = reg;
805 break;
806
807 case RFCR:
808 regs.rfcr = reg;
809
810 rxFilterEnable = (reg & RFCR_RFEN) ? true : false;
811 acceptBroadcast = (reg & RFCR_AAB) ? true : false;
812 acceptMulticast = (reg & RFCR_AAM) ? true : false;
813 acceptUnicast = (reg & RFCR_AAU) ? true : false;
814 acceptPerfect = (reg & RFCR_APM) ? true : false;
815 acceptArp = (reg & RFCR_AARP) ? true : false;
816
817 if (reg & RFCR_APAT) ;
818 // panic("RFCR_APAT not implemented!\n");
819
820 if (reg & RFCR_MHEN || reg & RFCR_UHEN)
821 panic("hash filtering not implemented!\n");
822
823 if (reg & RFCR_ULM)
824 panic("RFCR_ULM not implemented!\n");
825
826 break;
827
828 case RFDR:
829 panic("the driver never writes to RFDR, something is wrong!\n");
830
831 case BRAR:
832 panic("the driver never uses BRAR, something is wrong!\n");
833
834 case BRDR:
835 panic("the driver never uses BRDR, something is wrong!\n");
836
837 case SRR:
838 panic("SRR is read only register!\n");
839
840 case MIBC:
841 panic("the driver never uses MIBC, something is wrong!\n");
842
843 case VRCR:
844 regs.vrcr = reg;
845 break;
846
847 case VTCR:
848 regs.vtcr = reg;
849 break;
850
851 case VDR:
852 panic("the driver never uses VDR, something is wrong!\n");
853 break;
854
855 case CCSR:
856 /* not going to implement clockrun stuff */
857 regs.ccsr = reg;
858 break;
859
860 case TBICR:
861 regs.tbicr = reg;
862 if (reg & TBICR_MR_LOOPBACK)
863 panic("TBICR_MR_LOOPBACK never used, something wrong!\n");
864
865 if (reg & TBICR_MR_AN_ENABLE) {
866 regs.tanlpar = regs.tanar;
867 regs.tbisr |= (TBISR_MR_AN_COMPLETE | TBISR_MR_LINK_STATUS);
868 }
869
870 #if 0
871 if (reg & TBICR_MR_RESTART_AN) ;
872 #endif
873
874 break;
875
876 case TBISR:
877 panic("TBISR is read only register!\n");
878
879 case TANAR:
880 regs.tanar = reg;
881 if (reg & TANAR_PS2)
882 panic("this isn't used in driver, something wrong!\n");
883
884 if (reg & TANAR_PS1)
885 panic("this isn't used in driver, something wrong!\n");
886 break;
887
888 case TANLPAR:
889 panic("this should only be written to by the fake phy!\n");
890
891 case TANER:
892 panic("TANER is read only register!\n");
893
894 case TESR:
895 regs.tesr = reg;
896 break;
897
898 default:
899 panic("thought i covered all the register, what is this? addr=%#x",
900 daddr);
901 }
902 } else
903 panic("Invalid Request Size");
904
905 return No_Fault;
906 }
907
908 void
909 NSGigE::devIntrPost(uint32_t interrupts)
910 {
911 bool delay = false;
912
913 if (interrupts & ISR_RESERVE)
914 panic("Cannot set a reserved interrupt");
915
916 if (interrupts & ISR_TXRCMP)
917 regs.isr |= ISR_TXRCMP;
918
919 if (interrupts & ISR_RXRCMP)
920 regs.isr |= ISR_RXRCMP;
921
922 //ISR_DPERR not implemented
923 //ISR_SSERR not implemented
924 //ISR_RMABT not implemented
925 //ISR_RXSOVR not implemented
926 //ISR_HIBINT not implemented
927 //ISR_PHY not implemented
928 //ISR_PME not implemented
929
930 if (interrupts & ISR_SWI)
931 regs.isr |= ISR_SWI;
932
933 //ISR_MIB not implemented
934 //ISR_TXURN not implemented
935
936 if (interrupts & ISR_TXIDLE)
937 regs.isr |= ISR_TXIDLE;
938
939 if (interrupts & ISR_TXERR)
940 regs.isr |= ISR_TXERR;
941
942 if (interrupts & ISR_TXDESC)
943 regs.isr |= ISR_TXDESC;
944
945 if (interrupts & ISR_TXOK) {
946 regs.isr |= ISR_TXOK;
947 delay = true;
948 }
949
950 if (interrupts & ISR_RXORN)
951 regs.isr |= ISR_RXORN;
952
953 if (interrupts & ISR_RXIDLE)
954 regs.isr |= ISR_RXIDLE;
955
956 //ISR_RXEARLY not implemented
957
958 if (interrupts & ISR_RXERR)
959 regs.isr |= ISR_RXERR;
960
961 if (interrupts & ISR_RXDESC)
962 regs.isr |= ISR_RXDESC;
963
964 if (interrupts & ISR_RXOK) {
965 delay = true;
966 regs.isr |= ISR_RXOK;
967 }
968
969 if ((regs.isr & regs.imr)) {
970 Tick when = curTick;
971 if (delay)
972 when += intrDelay;
973 cpuIntrPost(when);
974 }
975
976 DPRINTF(EthernetIntr, "**interrupt written to ISR: intr=%#x isr=%#x imr=%#x\n",
977 interrupts, regs.isr, regs.imr);
978 }
979
980 void
981 NSGigE::devIntrClear(uint32_t interrupts)
982 {
983 if (interrupts & ISR_RESERVE)
984 panic("Cannot clear a reserved interrupt");
985
986 if (interrupts & ISR_TXRCMP)
987 regs.isr &= ~ISR_TXRCMP;
988
989 if (interrupts & ISR_RXRCMP)
990 regs.isr &= ~ISR_RXRCMP;
991
992 //ISR_DPERR not implemented
993 //ISR_SSERR not implemented
994 //ISR_RMABT not implemented
995 //ISR_RXSOVR not implemented
996 //ISR_HIBINT not implemented
997 //ISR_PHY not implemented
998 //ISR_PME not implemented
999
1000 if (interrupts & ISR_SWI)
1001 regs.isr &= ~ISR_SWI;
1002
1003 //ISR_MIB not implemented
1004 //ISR_TXURN not implemented
1005
1006 if (interrupts & ISR_TXIDLE)
1007 regs.isr &= ~ISR_TXIDLE;
1008
1009 if (interrupts & ISR_TXERR)
1010 regs.isr &= ~ISR_TXERR;
1011
1012 if (interrupts & ISR_TXDESC)
1013 regs.isr &= ~ISR_TXDESC;
1014
1015 if (interrupts & ISR_TXOK)
1016 regs.isr &= ~ISR_TXOK;
1017
1018 if (interrupts & ISR_RXORN)
1019 regs.isr &= ~ISR_RXORN;
1020
1021 if (interrupts & ISR_RXIDLE)
1022 regs.isr &= ~ISR_RXIDLE;
1023
1024 //ISR_RXEARLY not implemented
1025
1026 if (interrupts & ISR_RXERR)
1027 regs.isr &= ~ISR_RXERR;
1028
1029 if (interrupts & ISR_RXDESC)
1030 regs.isr &= ~ISR_RXDESC;
1031
1032 if (interrupts & ISR_RXOK)
1033 regs.isr &= ~ISR_RXOK;
1034
1035 if (!(regs.isr & regs.imr))
1036 cpuIntrClear();
1037
1038 DPRINTF(EthernetIntr, "**interrupt cleared from ISR: intr=%x isr=%x imr=%x\n",
1039 interrupts, regs.isr, regs.imr);
1040 }
1041
1042 void
1043 NSGigE::devIntrChangeMask()
1044 {
1045 DPRINTF(EthernetIntr, "interrupt mask changed\n");
1046
1047 if (regs.isr & regs.imr)
1048 cpuIntrPost(curTick);
1049 else
1050 cpuIntrClear();
1051 }
1052
1053 void
1054 NSGigE::cpuIntrPost(Tick when)
1055 {
1056 //If the interrupt you want to post is later than an
1057 //interrupt already scheduled, just let it post in the coming one and
1058 //don't schedule another.
1059 //HOWEVER, must be sure that the scheduled intrTick is in the future
1060 //(this was formerly the source of a bug)
1061 assert((intrTick >= curTick) || (intrTick == 0));
1062 if (when > intrTick && intrTick != 0)
1063 return;
1064
1065 intrTick = when;
1066
1067 if (intrEvent) {
1068 intrEvent->squash();
1069 intrEvent = 0;
1070 }
1071
1072 if (when < curTick) {
1073 cpuInterrupt();
1074 } else {
1075 DPRINTF(EthernetIntr, "going to schedule an interrupt for intrTick=%d\n",
1076 intrTick);
1077 intrEvent = new IntrEvent(this, true);
1078 intrEvent->schedule(intrTick);
1079 }
1080 }
1081
1082 void
1083 NSGigE::cpuInterrupt()
1084 {
1085 // Don't send an interrupt if there's already one
1086 if (cpuPendingIntr) {
1087 DPRINTF(EthernetIntr,
1088 "would send an interrupt now, but there's already pending\n");
1089 intrTick = 0;
1090 return;
1091 }
1092 // Don't send an interrupt if it's supposed to be delayed
1093 if (intrTick > curTick) {
1094 DPRINTF(EthernetIntr, "an interrupt is scheduled for %d, wait til then\n",
1095 intrTick);
1096 return;
1097 }
1098
1099 // Whether or not there's a pending interrupt, we don't care about
1100 // it anymore
1101 intrEvent = 0;
1102 intrTick = 0;
1103
1104 // Send interrupt
1105 cpuPendingIntr = true;
1106 /** @todo rework the intctrl to be tsunami ok */
1107 //intctrl->post(TheISA::INTLEVEL_IRQ1, TheISA::INTINDEX_ETHERNET);
1108 DPRINTF(EthernetIntr, "Posting interrupts to cchip!\n");
1109 tsunami->cchip->postDRIR(configData->config.hdr.pci0.interruptLine);
1110 }
1111
1112 void
1113 NSGigE::cpuIntrClear()
1114 {
1115 if (cpuPendingIntr) {
1116 cpuPendingIntr = false;
1117 /** @todo rework the intctrl to be tsunami ok */
1118 //intctrl->clear(TheISA::INTLEVEL_IRQ1, TheISA::INTINDEX_ETHERNET);
1119 DPRINTF(EthernetIntr, "clearing all interrupts from cchip\n");
1120 tsunami->cchip->clearDRIR(configData->config.hdr.pci0.interruptLine);
1121 }
1122 }
1123
1124 bool
1125 NSGigE::cpuIntrPending() const
1126 { return cpuPendingIntr; }
1127
1128 void
1129 NSGigE::txReset()
1130 {
1131
1132 DPRINTF(Ethernet, "transmit reset\n");
1133
1134 CTDD = false;
1135 txFifoAvail = maxTxFifoSize;
1136 txHalt = false;
1137 txFragPtr = 0;
1138 assert(txDescCnt == 0);
1139 txFifo.clear();
1140 regs.command &= ~CR_TXE;
1141 txState = txIdle;
1142 assert(txDmaState == dmaIdle);
1143 }
1144
1145 void
1146 NSGigE::rxReset()
1147 {
1148 DPRINTF(Ethernet, "receive reset\n");
1149
1150 CRDD = false;
1151 assert(rxPktBytes == 0);
1152 rxFifoCnt = 0;
1153 rxHalt = false;
1154 rxFragPtr = 0;
1155 assert(rxDescCnt == 0);
1156 assert(rxDmaState == dmaIdle);
1157 rxFifo.clear();
1158 regs.command &= ~CR_RXE;
1159 rxState = rxIdle;
1160 }
1161
1162 void NSGigE::regsReset()
1163 {
1164 memset(&regs, 0, sizeof(regs));
1165 regs.config = 0x80000000;
1166 regs.mear = 0x12;
1167 regs.isr = 0x00608000;
1168 regs.txcfg = 0x120;
1169 regs.rxcfg = 0x4;
1170 regs.srr = 0x0103;
1171 regs.mibc = 0x2;
1172 regs.vdr = 0x81;
1173 regs.tesr = 0xc000;
1174
1175 extstsEnable = false;
1176 acceptBroadcast = false;
1177 acceptMulticast = false;
1178 acceptUnicast = false;
1179 acceptPerfect = false;
1180 acceptArp = false;
1181 }
1182
1183 void
1184 NSGigE::rxDmaReadCopy()
1185 {
1186 assert(rxDmaState == dmaReading);
1187
1188 memcpy(rxDmaData, physmem->dma_addr(rxDmaAddr, rxDmaLen), rxDmaLen);
1189 rxDmaState = dmaIdle;
1190
1191 DPRINTF(EthernetDMA, "rx dma read paddr=%#x len=%d\n",
1192 rxDmaAddr, rxDmaLen);
1193 DDUMP(EthernetDMA, rxDmaData, rxDmaLen);
1194 }
1195
1196 bool
1197 NSGigE::doRxDmaRead()
1198 {
1199 assert(rxDmaState == dmaIdle || rxDmaState == dmaReadWaiting);
1200 rxDmaState = dmaReading;
1201
1202 if (dmaInterface && !rxDmaFree) {
1203 if (dmaInterface->busy())
1204 rxDmaState = dmaReadWaiting;
1205 else
1206 dmaInterface->doDMA(Read, rxDmaAddr, rxDmaLen, curTick,
1207 &rxDmaReadEvent, true);
1208 return true;
1209 }
1210
1211 if (dmaReadDelay == 0 && dmaReadFactor == 0) {
1212 rxDmaReadCopy();
1213 return false;
1214 }
1215
1216 Tick factor = ((rxDmaLen + ULL(63)) >> ULL(6)) * dmaReadFactor;
1217 Tick start = curTick + dmaReadDelay + factor;
1218 rxDmaReadEvent.schedule(start);
1219 return true;
1220 }
1221
1222 void
1223 NSGigE::rxDmaReadDone()
1224 {
1225 assert(rxDmaState == dmaReading);
1226 rxDmaReadCopy();
1227
1228 // If the transmit state machine has a pending DMA, let it go first
1229 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting)
1230 txKick();
1231
1232 rxKick();
1233 }
1234
1235 void
1236 NSGigE::rxDmaWriteCopy()
1237 {
1238 assert(rxDmaState == dmaWriting);
1239
1240 memcpy(physmem->dma_addr(rxDmaAddr, rxDmaLen), rxDmaData, rxDmaLen);
1241 rxDmaState = dmaIdle;
1242
1243 DPRINTF(EthernetDMA, "rx dma write paddr=%#x len=%d\n",
1244 rxDmaAddr, rxDmaLen);
1245 DDUMP(EthernetDMA, rxDmaData, rxDmaLen);
1246 }
1247
1248 bool
1249 NSGigE::doRxDmaWrite()
1250 {
1251 assert(rxDmaState == dmaIdle || rxDmaState == dmaWriteWaiting);
1252 rxDmaState = dmaWriting;
1253
1254 if (dmaInterface && !rxDmaFree) {
1255 if (dmaInterface->busy())
1256 rxDmaState = dmaWriteWaiting;
1257 else
1258 dmaInterface->doDMA(WriteInvalidate, rxDmaAddr, rxDmaLen, curTick,
1259 &rxDmaWriteEvent, true);
1260 return true;
1261 }
1262
1263 if (dmaWriteDelay == 0 && dmaWriteFactor == 0) {
1264 rxDmaWriteCopy();
1265 return false;
1266 }
1267
1268 Tick factor = ((rxDmaLen + ULL(63)) >> ULL(6)) * dmaWriteFactor;
1269 Tick start = curTick + dmaWriteDelay + factor;
1270 rxDmaWriteEvent.schedule(start);
1271 return true;
1272 }
1273
1274 void
1275 NSGigE::rxDmaWriteDone()
1276 {
1277 assert(rxDmaState == dmaWriting);
1278 rxDmaWriteCopy();
1279
1280 // If the transmit state machine has a pending DMA, let it go first
1281 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting)
1282 txKick();
1283
1284 rxKick();
1285 }
1286
1287 void
1288 NSGigE::rxKick()
1289 {
1290 DPRINTF(EthernetSM, "receive kick state=%s (rxBuf.size=%d)\n",
1291 NsRxStateStrings[rxState], rxFifo.size());
1292
1293 if (rxKickTick > curTick) {
1294 DPRINTF(EthernetSM, "receive kick exiting, can't run till %d\n",
1295 rxKickTick);
1296 return;
1297 }
1298
1299 next:
1300 switch(rxDmaState) {
1301 case dmaReadWaiting:
1302 if (doRxDmaRead())
1303 goto exit;
1304 break;
1305 case dmaWriteWaiting:
1306 if (doRxDmaWrite())
1307 goto exit;
1308 break;
1309 default:
1310 break;
1311 }
1312
1313 // see state machine from spec for details
1314 // the way this works is, if you finish work on one state and can go directly to
1315 // another, you do that through jumping to the label "next". however, if you have
1316 // intermediate work, like DMA so that you can't go to the next state yet, you go to
1317 // exit and exit the loop. however, when the DMA is done it will trigger an
1318 // event and come back to this loop.
1319 switch (rxState) {
1320 case rxIdle:
1321 if (!regs.command & CR_RXE) {
1322 DPRINTF(EthernetSM, "Receive Disabled! Nothing to do.\n");
1323 goto exit;
1324 }
1325
1326 if (CRDD) {
1327 rxState = rxDescRefr;
1328
1329 rxDmaAddr = regs.rxdp & 0x3fffffff;
1330 rxDmaData = &rxDescCache + offsetof(ns_desc, link);
1331 rxDmaLen = sizeof(rxDescCache.link);
1332 rxDmaFree = dmaDescFree;
1333
1334 descDmaReads++;
1335 descDmaRdBytes += rxDmaLen;
1336
1337 if (doRxDmaRead())
1338 goto exit;
1339 } else {
1340 rxState = rxDescRead;
1341
1342 rxDmaAddr = regs.rxdp & 0x3fffffff;
1343 rxDmaData = &rxDescCache;
1344 rxDmaLen = sizeof(ns_desc);
1345 rxDmaFree = dmaDescFree;
1346
1347 descDmaReads++;
1348 descDmaRdBytes += rxDmaLen;
1349
1350 if (doRxDmaRead())
1351 goto exit;
1352 }
1353 break;
1354
1355 case rxDescRefr:
1356 if (rxDmaState != dmaIdle)
1357 goto exit;
1358
1359 rxState = rxAdvance;
1360 break;
1361
1362 case rxDescRead:
1363 if (rxDmaState != dmaIdle)
1364 goto exit;
1365
1366 DPRINTF(EthernetDesc,
1367 "rxDescCache:\n\tlink=%08x\n\tbufptr=%08x\n\tcmdsts=%08x\n\textsts=%08x\n"
1368 ,rxDescCache.link, rxDescCache.bufptr, rxDescCache.cmdsts,
1369 rxDescCache.extsts);
1370
1371 if (rxDescCache.cmdsts & CMDSTS_OWN) {
1372 rxState = rxIdle;
1373 } else {
1374 rxState = rxFifoBlock;
1375 rxFragPtr = rxDescCache.bufptr;
1376 rxDescCnt = rxDescCache.cmdsts & CMDSTS_LEN_MASK;
1377 }
1378 break;
1379
1380 case rxFifoBlock:
1381 if (!rxPacket) {
1382 /**
1383 * @todo in reality, we should be able to start processing
1384 * the packet as it arrives, and not have to wait for the
1385 * full packet ot be in the receive fifo.
1386 */
1387 if (rxFifo.empty())
1388 goto exit;
1389
1390 DPRINTF(EthernetSM, "\n\n*****processing receive of new packet\n");
1391
1392 // If we don't have a packet, grab a new one from the fifo.
1393 rxPacket = rxFifo.front();
1394 rxPktBytes = rxPacket->length;
1395 rxPacketBufPtr = rxPacket->data;
1396
1397 if (DTRACE(Ethernet)) {
1398 if (rxPacket->isIpPkt()) {
1399 ip_header *ip = rxPacket->getIpHdr();
1400 DPRINTF(Ethernet, "ID is %d\n", reverseEnd16(ip->ID));
1401 if (rxPacket->isTcpPkt()) {
1402 tcp_header *tcp = rxPacket->getTcpHdr(ip);
1403 DPRINTF(Ethernet, "Src Port = %d, Dest Port = %d\n",
1404 reverseEnd16(tcp->src_port_num),
1405 reverseEnd16(tcp->dest_port_num));
1406 }
1407 }
1408 }
1409
1410 // sanity check - i think the driver behaves like this
1411 assert(rxDescCnt >= rxPktBytes);
1412
1413 // Must clear the value before popping to decrement the
1414 // reference count
1415 rxFifo.front() = NULL;
1416 rxFifo.pop_front();
1417 rxFifoCnt -= rxPacket->length;
1418 }
1419
1420
1421 // dont' need the && rxDescCnt > 0 if driver sanity check above holds
1422 if (rxPktBytes > 0) {
1423 rxState = rxFragWrite;
1424 // don't need min<>(rxPktBytes,rxDescCnt) if above sanity check holds
1425 rxXferLen = rxPktBytes;
1426
1427 rxDmaAddr = rxFragPtr & 0x3fffffff;
1428 rxDmaData = rxPacketBufPtr;
1429 rxDmaLen = rxXferLen;
1430 rxDmaFree = dmaDataFree;
1431
1432 if (doRxDmaWrite())
1433 goto exit;
1434
1435 } else {
1436 rxState = rxDescWrite;
1437
1438 //if (rxPktBytes == 0) { /* packet is done */
1439 assert(rxPktBytes == 0);
1440 DPRINTF(EthernetSM, "done with receiving packet\n");
1441
1442 rxDescCache.cmdsts |= CMDSTS_OWN;
1443 rxDescCache.cmdsts &= ~CMDSTS_MORE;
1444 rxDescCache.cmdsts |= CMDSTS_OK;
1445 rxDescCache.cmdsts &= 0xffff0000;
1446 rxDescCache.cmdsts += rxPacket->length; //i.e. set CMDSTS_SIZE
1447
1448 #if 0
1449 /* all the driver uses these are for its own stats keeping
1450 which we don't care about, aren't necessary for functionality
1451 and doing this would just slow us down. if they end up using
1452 this in a later version for functional purposes, just undef
1453 */
1454 if (rxFilterEnable) {
1455 rxDescCache.cmdsts &= ~CMDSTS_DEST_MASK;
1456 if (rxFifo.front()->IsUnicast())
1457 rxDescCache.cmdsts |= CMDSTS_DEST_SELF;
1458 if (rxFifo.front()->IsMulticast())
1459 rxDescCache.cmdsts |= CMDSTS_DEST_MULTI;
1460 if (rxFifo.front()->IsBroadcast())
1461 rxDescCache.cmdsts |= CMDSTS_DEST_MASK;
1462 }
1463 #endif
1464
1465 if (rxPacket->isIpPkt() && extstsEnable) {
1466 rxDescCache.extsts |= EXTSTS_IPPKT;
1467 rxIPChecksums++;
1468 if (!ipChecksum(rxPacket, false)) {
1469 DPRINTF(EthernetCksum, "Rx IP Checksum Error\n");
1470 rxDescCache.extsts |= EXTSTS_IPERR;
1471 }
1472 if (rxPacket->isTcpPkt()) {
1473 rxDescCache.extsts |= EXTSTS_TCPPKT;
1474 rxTCPChecksums++;
1475 if (!tcpChecksum(rxPacket, false)) {
1476 DPRINTF(EthernetCksum, "Rx TCP Checksum Error\n");
1477 rxDescCache.extsts |= EXTSTS_TCPERR;
1478
1479 }
1480 } else if (rxPacket->isUdpPkt()) {
1481 rxDescCache.extsts |= EXTSTS_UDPPKT;
1482 if (!udpChecksum(rxPacket, false)) {
1483 DPRINTF(EthernetCksum, "Rx UDP Checksum Error\n");
1484 rxDescCache.extsts |= EXTSTS_UDPERR;
1485 }
1486 }
1487 }
1488 rxPacket = 0;
1489
1490 /* the driver seems to always receive into desc buffers
1491 of size 1514, so you never have a pkt that is split
1492 into multiple descriptors on the receive side, so
1493 i don't implement that case, hence the assert above.
1494 */
1495
1496 DPRINTF(EthernetDesc, "rxDesc writeback:\n\tcmdsts=%08x\n\textsts=%08x\n",
1497 rxDescCache.cmdsts, rxDescCache.extsts);
1498
1499 rxDmaAddr = (regs.rxdp + offsetof(ns_desc, cmdsts)) & 0x3fffffff;
1500 rxDmaData = &(rxDescCache.cmdsts);
1501 rxDmaLen = sizeof(rxDescCache.cmdsts) + sizeof(rxDescCache.extsts);
1502 rxDmaFree = dmaDescFree;
1503
1504 descDmaWrites++;
1505 descDmaWrBytes += rxDmaLen;
1506
1507 if (doRxDmaWrite())
1508 goto exit;
1509 }
1510 break;
1511
1512 case rxFragWrite:
1513 if (rxDmaState != dmaIdle)
1514 goto exit;
1515
1516 rxPacketBufPtr += rxXferLen;
1517 rxFragPtr += rxXferLen;
1518 rxPktBytes -= rxXferLen;
1519
1520 rxState = rxFifoBlock;
1521 break;
1522
1523 case rxDescWrite:
1524 if (rxDmaState != dmaIdle)
1525 goto exit;
1526
1527 assert(rxDescCache.cmdsts & CMDSTS_OWN);
1528
1529 assert(rxPacket == 0);
1530 devIntrPost(ISR_RXOK);
1531
1532 if (rxDescCache.cmdsts & CMDSTS_INTR)
1533 devIntrPost(ISR_RXDESC);
1534
1535 if (rxHalt) {
1536 DPRINTF(EthernetSM, "Halting the RX state machine\n");
1537 rxState = rxIdle;
1538 rxHalt = false;
1539 } else
1540 rxState = rxAdvance;
1541 break;
1542
1543 case rxAdvance:
1544 if (rxDescCache.link == 0) {
1545 rxState = rxIdle;
1546 return;
1547 } else {
1548 rxState = rxDescRead;
1549 regs.rxdp = rxDescCache.link;
1550 CRDD = false;
1551
1552 rxDmaAddr = regs.rxdp & 0x3fffffff;
1553 rxDmaData = &rxDescCache;
1554 rxDmaLen = sizeof(ns_desc);
1555 rxDmaFree = dmaDescFree;
1556
1557 if (doRxDmaRead())
1558 goto exit;
1559 }
1560 break;
1561
1562 default:
1563 panic("Invalid rxState!");
1564 }
1565
1566
1567 DPRINTF(EthernetSM, "entering next rx state = %s\n",
1568 NsRxStateStrings[rxState]);
1569
1570 if (rxState == rxIdle) {
1571 regs.command &= ~CR_RXE;
1572 devIntrPost(ISR_RXIDLE);
1573 return;
1574 }
1575
1576 goto next;
1577
1578 exit:
1579 /**
1580 * @todo do we want to schedule a future kick?
1581 */
1582 DPRINTF(EthernetSM, "rx state machine exited state=%s\n",
1583 NsRxStateStrings[rxState]);
1584 }
1585
1586 void
1587 NSGigE::transmit()
1588 {
1589 if (txFifo.empty()) {
1590 DPRINTF(Ethernet, "nothing to transmit\n");
1591 return;
1592 }
1593
1594 DPRINTF(Ethernet, "\n\nAttempt Pkt Transmit: txFifo length = %d\n",
1595 maxTxFifoSize - txFifoAvail);
1596 if (interface->sendPacket(txFifo.front())) {
1597 if (DTRACE(Ethernet)) {
1598 if (txFifo.front()->isIpPkt()) {
1599 ip_header *ip = txFifo.front()->getIpHdr();
1600 DPRINTF(Ethernet, "ID is %d\n", reverseEnd16(ip->ID));
1601 if (txFifo.front()->isTcpPkt()) {
1602 tcp_header *tcp = txFifo.front()->getTcpHdr(ip);
1603 DPRINTF(Ethernet, "Src Port = %d, Dest Port = %d\n",
1604 reverseEnd16(tcp->src_port_num),
1605 reverseEnd16(tcp->dest_port_num));
1606 }
1607 }
1608 }
1609
1610 DDUMP(Ethernet, txFifo.front()->data, txFifo.front()->length);
1611 txBytes += txFifo.front()->length;
1612 txPackets++;
1613
1614 txFifoAvail += txFifo.front()->length;
1615
1616 DPRINTF(Ethernet, "Successful Xmit! now txFifoAvail is %d\n", txFifoAvail);
1617 txFifo.front() = NULL;
1618 txFifo.pop_front();
1619
1620 /* normally do a writeback of the descriptor here, and ONLY after that is
1621 done, send this interrupt. but since our stuff never actually fails,
1622 just do this interrupt here, otherwise the code has to stray from this
1623 nice format. besides, it's functionally the same.
1624 */
1625 devIntrPost(ISR_TXOK);
1626 } else
1627 DPRINTF(Ethernet, "May need to rethink always sending the descriptors back?\n");
1628
1629 if (!txFifo.empty() && !txEvent.scheduled()) {
1630 DPRINTF(Ethernet, "reschedule transmit\n");
1631 txEvent.schedule(curTick + 1000);
1632 }
1633 }
1634
1635 void
1636 NSGigE::txDmaReadCopy()
1637 {
1638 assert(txDmaState == dmaReading);
1639
1640 memcpy(txDmaData, physmem->dma_addr(txDmaAddr, txDmaLen), txDmaLen);
1641 txDmaState = dmaIdle;
1642
1643 DPRINTF(EthernetDMA, "tx dma read paddr=%#x len=%d\n",
1644 txDmaAddr, txDmaLen);
1645 DDUMP(EthernetDMA, txDmaData, txDmaLen);
1646 }
1647
1648 bool
1649 NSGigE::doTxDmaRead()
1650 {
1651 assert(txDmaState == dmaIdle || txDmaState == dmaReadWaiting);
1652 txDmaState = dmaReading;
1653
1654 if (dmaInterface && !txDmaFree) {
1655 if (dmaInterface->busy())
1656 txDmaState = dmaReadWaiting;
1657 else
1658 dmaInterface->doDMA(Read, txDmaAddr, txDmaLen, curTick,
1659 &txDmaReadEvent, true);
1660 return true;
1661 }
1662
1663 if (dmaReadDelay == 0 && dmaReadFactor == 0.0) {
1664 txDmaReadCopy();
1665 return false;
1666 }
1667
1668 Tick factor = ((txDmaLen + ULL(63)) >> ULL(6)) * dmaReadFactor;
1669 Tick start = curTick + dmaReadDelay + factor;
1670 txDmaReadEvent.schedule(start);
1671 return true;
1672 }
1673
1674 void
1675 NSGigE::txDmaReadDone()
1676 {
1677 assert(txDmaState == dmaReading);
1678 txDmaReadCopy();
1679
1680 // If the receive state machine has a pending DMA, let it go first
1681 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting)
1682 rxKick();
1683
1684 txKick();
1685 }
1686
1687 void
1688 NSGigE::txDmaWriteCopy()
1689 {
1690 assert(txDmaState == dmaWriting);
1691
1692 memcpy(physmem->dma_addr(txDmaAddr, txDmaLen), txDmaData, txDmaLen);
1693 txDmaState = dmaIdle;
1694
1695 DPRINTF(EthernetDMA, "tx dma write paddr=%#x len=%d\n",
1696 txDmaAddr, txDmaLen);
1697 DDUMP(EthernetDMA, txDmaData, txDmaLen);
1698 }
1699
1700 bool
1701 NSGigE::doTxDmaWrite()
1702 {
1703 assert(txDmaState == dmaIdle || txDmaState == dmaWriteWaiting);
1704 txDmaState = dmaWriting;
1705
1706 if (dmaInterface && !txDmaFree) {
1707 if (dmaInterface->busy())
1708 txDmaState = dmaWriteWaiting;
1709 else
1710 dmaInterface->doDMA(WriteInvalidate, txDmaAddr, txDmaLen, curTick,
1711 &txDmaWriteEvent, true);
1712 return true;
1713 }
1714
1715 if (dmaWriteDelay == 0 && dmaWriteFactor == 0.0) {
1716 txDmaWriteCopy();
1717 return false;
1718 }
1719
1720 Tick factor = ((txDmaLen + ULL(63)) >> ULL(6)) * dmaWriteFactor;
1721 Tick start = curTick + dmaWriteDelay + factor;
1722 txDmaWriteEvent.schedule(start);
1723 return true;
1724 }
1725
1726 void
1727 NSGigE::txDmaWriteDone()
1728 {
1729 assert(txDmaState == dmaWriting);
1730 txDmaWriteCopy();
1731
1732 // If the receive state machine has a pending DMA, let it go first
1733 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting)
1734 rxKick();
1735
1736 txKick();
1737 }
1738
1739 void
1740 NSGigE::txKick()
1741 {
1742 DPRINTF(EthernetSM, "transmit kick state=%s\n", NsTxStateStrings[txState]);
1743
1744 if (txKickTick > curTick) {
1745 DPRINTF(EthernetSM, "transmit kick exiting, can't run till %d\n",
1746 txKickTick);
1747
1748 return;
1749 }
1750
1751 next:
1752 switch(txDmaState) {
1753 case dmaReadWaiting:
1754 if (doTxDmaRead())
1755 goto exit;
1756 break;
1757 case dmaWriteWaiting:
1758 if (doTxDmaWrite())
1759 goto exit;
1760 break;
1761 default:
1762 break;
1763 }
1764
1765 switch (txState) {
1766 case txIdle:
1767 if (!regs.command & CR_TXE) {
1768 DPRINTF(EthernetSM, "Transmit disabled. Nothing to do.\n");
1769 goto exit;
1770 }
1771
1772 if (CTDD) {
1773 txState = txDescRefr;
1774
1775 txDmaAddr = regs.txdp & 0x3fffffff;
1776 txDmaData = &txDescCache + offsetof(ns_desc, link);
1777 txDmaLen = sizeof(txDescCache.link);
1778 txDmaFree = dmaDescFree;
1779
1780 descDmaReads++;
1781 descDmaRdBytes += txDmaLen;
1782
1783 if (doTxDmaRead())
1784 goto exit;
1785
1786 } else {
1787 txState = txDescRead;
1788
1789 txDmaAddr = regs.txdp & 0x3fffffff;
1790 txDmaData = &txDescCache;
1791 txDmaLen = sizeof(ns_desc);
1792 txDmaFree = dmaDescFree;
1793
1794 descDmaReads++;
1795 descDmaRdBytes += txDmaLen;
1796
1797 if (doTxDmaRead())
1798 goto exit;
1799 }
1800 break;
1801
1802 case txDescRefr:
1803 if (txDmaState != dmaIdle)
1804 goto exit;
1805
1806 txState = txAdvance;
1807 break;
1808
1809 case txDescRead:
1810 if (txDmaState != dmaIdle)
1811 goto exit;
1812
1813 DPRINTF(EthernetDesc,
1814 "txDescCache data:\n\tlink=%08x\n\tbufptr=%08x\n\tcmdsts=%08x\n\textsts=%08x\n"
1815 ,txDescCache.link, txDescCache.bufptr, txDescCache.cmdsts,
1816 txDescCache.extsts);
1817
1818 if (txDescCache.cmdsts & CMDSTS_OWN) {
1819 txState = txFifoBlock;
1820 txFragPtr = txDescCache.bufptr;
1821 txDescCnt = txDescCache.cmdsts & CMDSTS_LEN_MASK;
1822 } else {
1823 txState = txIdle;
1824 }
1825 break;
1826
1827 case txFifoBlock:
1828 if (!txPacket) {
1829 DPRINTF(EthernetSM, "\n\n*****starting the tx of a new packet\n");
1830 txPacket = new EtherPacket;
1831 txPacket->data = new uint8_t[16384];
1832 txPacketBufPtr = txPacket->data;
1833 }
1834
1835 if (txDescCnt == 0) {
1836 DPRINTF(EthernetSM, "the txDescCnt == 0, done with descriptor\n");
1837 if (txDescCache.cmdsts & CMDSTS_MORE) {
1838 DPRINTF(EthernetSM, "there are more descriptors to come\n");
1839 txState = txDescWrite;
1840
1841 txDescCache.cmdsts &= ~CMDSTS_OWN;
1842
1843 txDmaAddr = (regs.txdp + offsetof(ns_desc, cmdsts)) & 0x3fffffff;
1844 txDmaData = &(txDescCache.cmdsts);
1845 txDmaLen = sizeof(txDescCache.cmdsts);
1846 txDmaFree = dmaDescFree;
1847
1848 if (doTxDmaWrite())
1849 goto exit;
1850
1851 } else { /* this packet is totally done */
1852 DPRINTF(EthernetSM, "This packet is done, let's wrap it up\n");
1853 /* deal with the the packet that just finished */
1854 if ((regs.vtcr & VTCR_PPCHK) && extstsEnable) {
1855 if (txDescCache.extsts & EXTSTS_UDPPKT) {
1856 udpChecksum(txPacket, true);
1857 } else if (txDescCache.extsts & EXTSTS_TCPPKT) {
1858 tcpChecksum(txPacket, true);
1859 txTCPChecksums++;
1860 }
1861 if (txDescCache.extsts & EXTSTS_IPPKT) {
1862 ipChecksum(txPacket, true);
1863 txIPChecksums++;
1864 }
1865 }
1866
1867 txPacket->length = txPacketBufPtr - txPacket->data;
1868 /* this is just because the receive can't handle a packet bigger
1869 want to make sure */
1870 assert(txPacket->length <= 1514);
1871 txFifo.push_back(txPacket);
1872
1873 /* this following section is not to spec, but functionally shouldn't
1874 be any different. normally, the chip will wait til the transmit has
1875 occurred before writing back the descriptor because it has to wait
1876 to see that it was successfully transmitted to decide whether to set
1877 CMDSTS_OK or not. however, in the simulator since it is always
1878 successfully transmitted, and writing it exactly to spec would
1879 complicate the code, we just do it here
1880 */
1881
1882 txDescCache.cmdsts &= ~CMDSTS_OWN;
1883 txDescCache.cmdsts |= CMDSTS_OK;
1884
1885 DPRINTF(EthernetDesc,
1886 "txDesc writeback:\n\tcmdsts=%08x\n\textsts=%08x\n",
1887 txDescCache.cmdsts, txDescCache.extsts);
1888
1889 txDmaAddr = (regs.txdp + offsetof(ns_desc, cmdsts)) & 0x3fffffff;
1890 txDmaData = &(txDescCache.cmdsts);
1891 txDmaLen = sizeof(txDescCache.cmdsts) + sizeof(txDescCache.extsts);
1892 txDmaFree = dmaDescFree;
1893
1894 descDmaWrites++;
1895 descDmaWrBytes += txDmaLen;
1896
1897 transmit();
1898 txPacket = 0;
1899
1900 if (txHalt) {
1901 DPRINTF(EthernetSM, "halting TX state machine\n");
1902 txState = txIdle;
1903 txHalt = false;
1904 } else
1905 txState = txAdvance;
1906
1907 if (doTxDmaWrite())
1908 goto exit;
1909 }
1910 } else {
1911 DPRINTF(EthernetSM, "this descriptor isn't done yet\n");
1912 if (txFifoAvail) {
1913 txState = txFragRead;
1914
1915 /* The number of bytes transferred is either whatever is left
1916 in the descriptor (txDescCnt), or if there is not enough
1917 room in the fifo, just whatever room is left in the fifo
1918 */
1919 txXferLen = min<uint32_t>(txDescCnt, txFifoAvail);
1920
1921 txDmaAddr = txFragPtr & 0x3fffffff;
1922 txDmaData = txPacketBufPtr;
1923 txDmaLen = txXferLen;
1924 txDmaFree = dmaDataFree;
1925
1926 if (doTxDmaRead())
1927 goto exit;
1928 } else {
1929 txState = txFifoBlock;
1930 transmit();
1931
1932 goto exit;
1933 }
1934
1935 }
1936 break;
1937
1938 case txFragRead:
1939 if (txDmaState != dmaIdle)
1940 goto exit;
1941
1942 txPacketBufPtr += txXferLen;
1943 txFragPtr += txXferLen;
1944 txDescCnt -= txXferLen;
1945 txFifoAvail -= txXferLen;
1946
1947 txState = txFifoBlock;
1948 break;
1949
1950 case txDescWrite:
1951 if (txDmaState != dmaIdle)
1952 goto exit;
1953
1954 if (txDescCache.cmdsts & CMDSTS_INTR) {
1955 devIntrPost(ISR_TXDESC);
1956 }
1957
1958 txState = txAdvance;
1959 break;
1960
1961 case txAdvance:
1962 if (txDescCache.link == 0) {
1963 txState = txIdle;
1964 } else {
1965 txState = txDescRead;
1966 regs.txdp = txDescCache.link;
1967 CTDD = false;
1968
1969 txDmaAddr = txDescCache.link & 0x3fffffff;
1970 txDmaData = &txDescCache;
1971 txDmaLen = sizeof(ns_desc);
1972 txDmaFree = dmaDescFree;
1973
1974 if (doTxDmaRead())
1975 goto exit;
1976 }
1977 break;
1978
1979 default:
1980 panic("invalid state");
1981 }
1982
1983 DPRINTF(EthernetSM, "entering next tx state=%s\n",
1984 NsTxStateStrings[txState]);
1985
1986 if (txState == txIdle) {
1987 regs.command &= ~CR_TXE;
1988 devIntrPost(ISR_TXIDLE);
1989 return;
1990 }
1991
1992 goto next;
1993
1994 exit:
1995 /**
1996 * @todo do we want to schedule a future kick?
1997 */
1998 DPRINTF(EthernetSM, "tx state machine exited state=%s\n",
1999 NsTxStateStrings[txState]);
2000 }
2001
2002 void
2003 NSGigE::transferDone()
2004 {
2005 if (txFifo.empty())
2006 return;
2007
2008 if (txEvent.scheduled())
2009 txEvent.reschedule(curTick + 1);
2010 else
2011 txEvent.schedule(curTick + 1);
2012 }
2013
2014 bool
2015 NSGigE::rxFilter(PacketPtr packet)
2016 {
2017 bool drop = true;
2018 string type;
2019
2020 if (packet->IsUnicast()) {
2021 type = "unicast";
2022
2023 // If we're accepting all unicast addresses
2024 if (acceptUnicast)
2025 drop = false;
2026
2027 // If we make a perfect match
2028 if ((acceptPerfect)
2029 && (memcmp(rom.perfectMatch, packet->data, sizeof(rom.perfectMatch)) == 0))
2030 drop = false;
2031
2032 eth_header *eth = (eth_header *) packet->data;
2033 if ((acceptArp) && (eth->type == 0x608))
2034 drop = false;
2035
2036 } else if (packet->IsBroadcast()) {
2037 type = "broadcast";
2038
2039 // if we're accepting broadcasts
2040 if (acceptBroadcast)
2041 drop = false;
2042
2043 } else if (packet->IsMulticast()) {
2044 type = "multicast";
2045
2046 // if we're accepting all multicasts
2047 if (acceptMulticast)
2048 drop = false;
2049
2050 } else {
2051 type = "unknown";
2052
2053 // oh well, punt on this one
2054 }
2055
2056 if (drop) {
2057 DPRINTF(Ethernet, "rxFilter drop\n");
2058 DDUMP(EthernetData, packet->data, packet->length);
2059 }
2060
2061 return drop;
2062 }
2063
2064 bool
2065 NSGigE::recvPacket(PacketPtr packet)
2066 {
2067 rxBytes += packet->length;
2068 rxPackets++;
2069
2070 DPRINTF(Ethernet, "\n\nReceiving packet from wire, rxFifoAvail = %d\n", maxRxFifoSize - rxFifoCnt);
2071
2072 if (rxState == rxIdle) {
2073 DPRINTF(Ethernet, "receive disabled...packet dropped\n");
2074 interface->recvDone();
2075 return true;
2076 }
2077
2078 if (rxFilterEnable && rxFilter(packet)) {
2079 DPRINTF(Ethernet, "packet filtered...dropped\n");
2080 interface->recvDone();
2081 return true;
2082 }
2083
2084 if ((rxFifoCnt + packet->length) >= maxRxFifoSize) {
2085 DPRINTF(Ethernet,
2086 "packet will not fit in receive buffer...packet dropped\n");
2087 devIntrPost(ISR_RXORN);
2088 return false;
2089 }
2090
2091 rxFifo.push_back(packet);
2092 rxFifoCnt += packet->length;
2093 interface->recvDone();
2094
2095 rxKick();
2096 return true;
2097 }
2098
2099 /**
2100 * does a udp checksum. if gen is true, then it generates it and puts it in the right place
2101 * else, it just checks what it calculates against the value in the header in packet
2102 */
2103 bool
2104 NSGigE::udpChecksum(PacketPtr packet, bool gen)
2105 {
2106 ip_header *ip = packet->getIpHdr();
2107 udp_header *hdr = packet->getUdpHdr(ip);
2108
2109 pseudo_header *pseudo = new pseudo_header;
2110
2111 pseudo->src_ip_addr = ip->src_ip_addr;
2112 pseudo->dest_ip_addr = ip->dest_ip_addr;
2113 pseudo->protocol = ip->protocol;
2114 pseudo->len = hdr->len;
2115
2116 uint16_t cksum = checksumCalc((uint16_t *) pseudo, (uint16_t *) hdr,
2117 (uint32_t) hdr->len);
2118
2119 delete pseudo;
2120 if (gen)
2121 hdr->chksum = cksum;
2122 else
2123 if (cksum != 0)
2124 return false;
2125
2126 return true;
2127 }
2128
2129 bool
2130 NSGigE::tcpChecksum(PacketPtr packet, bool gen)
2131 {
2132 ip_header *ip = packet->getIpHdr();
2133 tcp_header *hdr = packet->getTcpHdr(ip);
2134
2135 uint16_t cksum;
2136 pseudo_header *pseudo = new pseudo_header;
2137 if (!gen) {
2138 pseudo->src_ip_addr = ip->src_ip_addr;
2139 pseudo->dest_ip_addr = ip->dest_ip_addr;
2140 pseudo->protocol = reverseEnd16(ip->protocol);
2141 pseudo->len = reverseEnd16(reverseEnd16(ip->dgram_len) - (ip->vers_len & 0xf)*4);
2142
2143 cksum = checksumCalc((uint16_t *) pseudo, (uint16_t *) hdr,
2144 (uint32_t) reverseEnd16(pseudo->len));
2145 } else {
2146 pseudo->src_ip_addr = 0;
2147 pseudo->dest_ip_addr = 0;
2148 pseudo->protocol = hdr->chksum;
2149 pseudo->len = 0;
2150 hdr->chksum = 0;
2151 cksum = checksumCalc((uint16_t *) pseudo, (uint16_t *) hdr,
2152 (uint32_t) (reverseEnd16(ip->dgram_len) - (ip->vers_len & 0xf)*4));
2153 }
2154
2155 delete pseudo;
2156 if (gen)
2157 hdr->chksum = cksum;
2158 else
2159 if (cksum != 0)
2160 return false;
2161
2162 return true;
2163 }
2164
2165 bool
2166 NSGigE::ipChecksum(PacketPtr packet, bool gen)
2167 {
2168 ip_header *hdr = packet->getIpHdr();
2169
2170 uint16_t cksum = checksumCalc(NULL, (uint16_t *) hdr, (hdr->vers_len & 0xf)*4);
2171
2172 if (gen) {
2173 DPRINTF(EthernetCksum, "generated checksum: %#x\n", cksum);
2174 hdr->hdr_chksum = cksum;
2175 }
2176 else
2177 if (cksum != 0)
2178 return false;
2179
2180 return true;
2181 }
2182
2183 uint16_t
2184 NSGigE::checksumCalc(uint16_t *pseudo, uint16_t *buf, uint32_t len)
2185 {
2186 uint32_t sum = 0;
2187
2188 uint16_t last_pad = 0;
2189 if (len & 1) {
2190 last_pad = buf[len/2] & 0xff;
2191 len--;
2192 sum += last_pad;
2193 }
2194
2195 if (pseudo) {
2196 sum = pseudo[0] + pseudo[1] + pseudo[2] +
2197 pseudo[3] + pseudo[4] + pseudo[5];
2198 }
2199
2200 for (int i=0; i < (len/2); ++i) {
2201 sum += buf[i];
2202 }
2203
2204 while (sum >> 16)
2205 sum = (sum >> 16) + (sum & 0xffff);
2206
2207 return ~sum;
2208 }
2209
2210 //=====================================================================
2211 //
2212 //
2213 void
2214 NSGigE::serialize(ostream &os)
2215 {
2216 // Serialize the PciDev base class
2217 PciDev::serialize(os);
2218
2219 /*
2220 * Finalize any DMA events now.
2221 */
2222 if (rxDmaReadEvent.scheduled())
2223 rxDmaReadCopy();
2224 if (rxDmaWriteEvent.scheduled())
2225 rxDmaWriteCopy();
2226 if (txDmaReadEvent.scheduled())
2227 txDmaReadCopy();
2228 if (txDmaWriteEvent.scheduled())
2229 txDmaWriteCopy();
2230
2231 /*
2232 * Serialize the device registers
2233 */
2234 SERIALIZE_SCALAR(regs.command);
2235 SERIALIZE_SCALAR(regs.config);
2236 SERIALIZE_SCALAR(regs.mear);
2237 SERIALIZE_SCALAR(regs.ptscr);
2238 SERIALIZE_SCALAR(regs.isr);
2239 SERIALIZE_SCALAR(regs.imr);
2240 SERIALIZE_SCALAR(regs.ier);
2241 SERIALIZE_SCALAR(regs.ihr);
2242 SERIALIZE_SCALAR(regs.txdp);
2243 SERIALIZE_SCALAR(regs.txdp_hi);
2244 SERIALIZE_SCALAR(regs.txcfg);
2245 SERIALIZE_SCALAR(regs.gpior);
2246 SERIALIZE_SCALAR(regs.rxdp);
2247 SERIALIZE_SCALAR(regs.rxdp_hi);
2248 SERIALIZE_SCALAR(regs.rxcfg);
2249 SERIALIZE_SCALAR(regs.pqcr);
2250 SERIALIZE_SCALAR(regs.wcsr);
2251 SERIALIZE_SCALAR(regs.pcr);
2252 SERIALIZE_SCALAR(regs.rfcr);
2253 SERIALIZE_SCALAR(regs.rfdr);
2254 SERIALIZE_SCALAR(regs.srr);
2255 SERIALIZE_SCALAR(regs.mibc);
2256 SERIALIZE_SCALAR(regs.vrcr);
2257 SERIALIZE_SCALAR(regs.vtcr);
2258 SERIALIZE_SCALAR(regs.vdr);
2259 SERIALIZE_SCALAR(regs.ccsr);
2260 SERIALIZE_SCALAR(regs.tbicr);
2261 SERIALIZE_SCALAR(regs.tbisr);
2262 SERIALIZE_SCALAR(regs.tanar);
2263 SERIALIZE_SCALAR(regs.tanlpar);
2264 SERIALIZE_SCALAR(regs.taner);
2265 SERIALIZE_SCALAR(regs.tesr);
2266
2267 SERIALIZE_ARRAY(rom.perfectMatch, EADDR_LEN);
2268
2269 SERIALIZE_SCALAR(ioEnable);
2270
2271 /*
2272 * Serialize the data Fifos
2273 */
2274 int txNumPkts = txFifo.size();
2275 SERIALIZE_SCALAR(txNumPkts);
2276 int i = 0;
2277 pktiter_t end = txFifo.end();
2278 for (pktiter_t p = txFifo.begin(); p != end; ++p) {
2279 nameOut(os, csprintf("%s.txFifo%d", name(), i++));
2280 (*p)->serialize(os);
2281 }
2282
2283 int rxNumPkts = rxFifo.size();
2284 SERIALIZE_SCALAR(rxNumPkts);
2285 i = 0;
2286 end = rxFifo.end();
2287 for (pktiter_t p = rxFifo.begin(); p != end; ++p) {
2288 nameOut(os, csprintf("%s.rxFifo%d", name(), i++));
2289 (*p)->serialize(os);
2290 }
2291
2292 /*
2293 * Serialize the various helper variables
2294 */
2295 bool txPacketExists = txPacket;
2296 SERIALIZE_SCALAR(txPacketExists);
2297 if (txPacketExists) {
2298 nameOut(os, csprintf("%s.txPacket", name()));
2299 txPacket->serialize(os);
2300 uint32_t txPktBufPtr = (uint32_t) (txPacketBufPtr - txPacket->data);
2301 SERIALIZE_SCALAR(txPktBufPtr);
2302 }
2303
2304 bool rxPacketExists = rxPacket;
2305 SERIALIZE_SCALAR(rxPacketExists);
2306 if (rxPacketExists) {
2307 nameOut(os, csprintf("%s.rxPacket", name()));
2308 rxPacket->serialize(os);
2309 uint32_t rxPktBufPtr = (uint32_t) (rxPacketBufPtr - rxPacket->data);
2310 SERIALIZE_SCALAR(rxPktBufPtr);
2311 }
2312
2313 SERIALIZE_SCALAR(txXferLen);
2314 SERIALIZE_SCALAR(rxXferLen);
2315
2316 /*
2317 * Serialize DescCaches
2318 */
2319 SERIALIZE_SCALAR(txDescCache.link);
2320 SERIALIZE_SCALAR(txDescCache.bufptr);
2321 SERIALIZE_SCALAR(txDescCache.cmdsts);
2322 SERIALIZE_SCALAR(txDescCache.extsts);
2323 SERIALIZE_SCALAR(rxDescCache.link);
2324 SERIALIZE_SCALAR(rxDescCache.bufptr);
2325 SERIALIZE_SCALAR(rxDescCache.cmdsts);
2326 SERIALIZE_SCALAR(rxDescCache.extsts);
2327
2328 /*
2329 * Serialize tx state machine
2330 */
2331 int txState = this->txState;
2332 SERIALIZE_SCALAR(txState);
2333 SERIALIZE_SCALAR(CTDD);
2334 SERIALIZE_SCALAR(txFifoAvail);
2335 SERIALIZE_SCALAR(txHalt);
2336 SERIALIZE_SCALAR(txFragPtr);
2337 SERIALIZE_SCALAR(txDescCnt);
2338 int txDmaState = this->txDmaState;
2339 SERIALIZE_SCALAR(txDmaState);
2340
2341 /*
2342 * Serialize rx state machine
2343 */
2344 int rxState = this->rxState;
2345 SERIALIZE_SCALAR(rxState);
2346 SERIALIZE_SCALAR(CRDD);
2347 SERIALIZE_SCALAR(rxPktBytes);
2348 SERIALIZE_SCALAR(rxFifoCnt);
2349 SERIALIZE_SCALAR(rxHalt);
2350 SERIALIZE_SCALAR(rxDescCnt);
2351 int rxDmaState = this->rxDmaState;
2352 SERIALIZE_SCALAR(rxDmaState);
2353
2354 SERIALIZE_SCALAR(extstsEnable);
2355
2356 /*
2357 * If there's a pending transmit, store the time so we can
2358 * reschedule it later
2359 */
2360 Tick transmitTick = txEvent.scheduled() ? txEvent.when() - curTick : 0;
2361 SERIALIZE_SCALAR(transmitTick);
2362
2363 /*
2364 * receive address filter settings
2365 */
2366 SERIALIZE_SCALAR(rxFilterEnable);
2367 SERIALIZE_SCALAR(acceptBroadcast);
2368 SERIALIZE_SCALAR(acceptMulticast);
2369 SERIALIZE_SCALAR(acceptUnicast);
2370 SERIALIZE_SCALAR(acceptPerfect);
2371 SERIALIZE_SCALAR(acceptArp);
2372
2373 /*
2374 * Keep track of pending interrupt status.
2375 */
2376 SERIALIZE_SCALAR(intrTick);
2377 SERIALIZE_SCALAR(cpuPendingIntr);
2378 Tick intrEventTick = 0;
2379 if (intrEvent)
2380 intrEventTick = intrEvent->when();
2381 SERIALIZE_SCALAR(intrEventTick);
2382
2383 }
2384
2385 void
2386 NSGigE::unserialize(Checkpoint *cp, const std::string &section)
2387 {
2388 // Unserialize the PciDev base class
2389 PciDev::unserialize(cp, section);
2390
2391 UNSERIALIZE_SCALAR(regs.command);
2392 UNSERIALIZE_SCALAR(regs.config);
2393 UNSERIALIZE_SCALAR(regs.mear);
2394 UNSERIALIZE_SCALAR(regs.ptscr);
2395 UNSERIALIZE_SCALAR(regs.isr);
2396 UNSERIALIZE_SCALAR(regs.imr);
2397 UNSERIALIZE_SCALAR(regs.ier);
2398 UNSERIALIZE_SCALAR(regs.ihr);
2399 UNSERIALIZE_SCALAR(regs.txdp);
2400 UNSERIALIZE_SCALAR(regs.txdp_hi);
2401 UNSERIALIZE_SCALAR(regs.txcfg);
2402 UNSERIALIZE_SCALAR(regs.gpior);
2403 UNSERIALIZE_SCALAR(regs.rxdp);
2404 UNSERIALIZE_SCALAR(regs.rxdp_hi);
2405 UNSERIALIZE_SCALAR(regs.rxcfg);
2406 UNSERIALIZE_SCALAR(regs.pqcr);
2407 UNSERIALIZE_SCALAR(regs.wcsr);
2408 UNSERIALIZE_SCALAR(regs.pcr);
2409 UNSERIALIZE_SCALAR(regs.rfcr);
2410 UNSERIALIZE_SCALAR(regs.rfdr);
2411 UNSERIALIZE_SCALAR(regs.srr);
2412 UNSERIALIZE_SCALAR(regs.mibc);
2413 UNSERIALIZE_SCALAR(regs.vrcr);
2414 UNSERIALIZE_SCALAR(regs.vtcr);
2415 UNSERIALIZE_SCALAR(regs.vdr);
2416 UNSERIALIZE_SCALAR(regs.ccsr);
2417 UNSERIALIZE_SCALAR(regs.tbicr);
2418 UNSERIALIZE_SCALAR(regs.tbisr);
2419 UNSERIALIZE_SCALAR(regs.tanar);
2420 UNSERIALIZE_SCALAR(regs.tanlpar);
2421 UNSERIALIZE_SCALAR(regs.taner);
2422 UNSERIALIZE_SCALAR(regs.tesr);
2423
2424 UNSERIALIZE_ARRAY(rom.perfectMatch, EADDR_LEN);
2425
2426 UNSERIALIZE_SCALAR(ioEnable);
2427
2428 /*
2429 * unserialize the data fifos
2430 */
2431 int txNumPkts;
2432 UNSERIALIZE_SCALAR(txNumPkts);
2433 int i;
2434 for (i = 0; i < txNumPkts; ++i) {
2435 PacketPtr p = new EtherPacket;
2436 p->unserialize(cp, csprintf("%s.rxFifo%d", section, i));
2437 txFifo.push_back(p);
2438 }
2439
2440 int rxNumPkts;
2441 UNSERIALIZE_SCALAR(rxNumPkts);
2442 for (i = 0; i < rxNumPkts; ++i) {
2443 PacketPtr p = new EtherPacket;
2444 p->unserialize(cp, csprintf("%s.rxFifo%d", section, i));
2445 rxFifo.push_back(p);
2446 }
2447
2448 /*
2449 * unserialize the various helper variables
2450 */
2451 bool txPacketExists;
2452 UNSERIALIZE_SCALAR(txPacketExists);
2453 if (txPacketExists) {
2454 txPacket = new EtherPacket;
2455 txPacket->unserialize(cp, csprintf("%s.txPacket", section));
2456 uint32_t txPktBufPtr;
2457 UNSERIALIZE_SCALAR(txPktBufPtr);
2458 txPacketBufPtr = (uint8_t *) txPacket->data + txPktBufPtr;
2459 } else
2460 txPacket = 0;
2461
2462 bool rxPacketExists;
2463 UNSERIALIZE_SCALAR(rxPacketExists);
2464 rxPacket = 0;
2465 if (rxPacketExists) {
2466 rxPacket = new EtherPacket;
2467 rxPacket->unserialize(cp, csprintf("%s.rxPacket", section));
2468 uint32_t rxPktBufPtr;
2469 UNSERIALIZE_SCALAR(rxPktBufPtr);
2470 rxPacketBufPtr = (uint8_t *) rxPacket->data + rxPktBufPtr;
2471 } else
2472 rxPacket = 0;
2473
2474 UNSERIALIZE_SCALAR(txXferLen);
2475 UNSERIALIZE_SCALAR(rxXferLen);
2476
2477 /*
2478 * Unserialize DescCaches
2479 */
2480 UNSERIALIZE_SCALAR(txDescCache.link);
2481 UNSERIALIZE_SCALAR(txDescCache.bufptr);
2482 UNSERIALIZE_SCALAR(txDescCache.cmdsts);
2483 UNSERIALIZE_SCALAR(txDescCache.extsts);
2484 UNSERIALIZE_SCALAR(rxDescCache.link);
2485 UNSERIALIZE_SCALAR(rxDescCache.bufptr);
2486 UNSERIALIZE_SCALAR(rxDescCache.cmdsts);
2487 UNSERIALIZE_SCALAR(rxDescCache.extsts);
2488
2489 /*
2490 * unserialize tx state machine
2491 */
2492 int txState;
2493 UNSERIALIZE_SCALAR(txState);
2494 this->txState = (TxState) txState;
2495 UNSERIALIZE_SCALAR(CTDD);
2496 UNSERIALIZE_SCALAR(txFifoAvail);
2497 UNSERIALIZE_SCALAR(txHalt);
2498 UNSERIALIZE_SCALAR(txFragPtr);
2499 UNSERIALIZE_SCALAR(txDescCnt);
2500 int txDmaState;
2501 UNSERIALIZE_SCALAR(txDmaState);
2502 this->txDmaState = (DmaState) txDmaState;
2503
2504 /*
2505 * unserialize rx state machine
2506 */
2507 int rxState;
2508 UNSERIALIZE_SCALAR(rxState);
2509 this->rxState = (RxState) rxState;
2510 UNSERIALIZE_SCALAR(CRDD);
2511 UNSERIALIZE_SCALAR(rxPktBytes);
2512 UNSERIALIZE_SCALAR(rxFifoCnt);
2513 UNSERIALIZE_SCALAR(rxHalt);
2514 UNSERIALIZE_SCALAR(rxDescCnt);
2515 int rxDmaState;
2516 UNSERIALIZE_SCALAR(rxDmaState);
2517 this->rxDmaState = (DmaState) rxDmaState;
2518
2519 UNSERIALIZE_SCALAR(extstsEnable);
2520
2521 /*
2522 * If there's a pending transmit, reschedule it now
2523 */
2524 Tick transmitTick;
2525 UNSERIALIZE_SCALAR(transmitTick);
2526 if (transmitTick)
2527 txEvent.schedule(curTick + transmitTick);
2528
2529 /*
2530 * unserialize receive address filter settings
2531 */
2532 UNSERIALIZE_SCALAR(rxFilterEnable);
2533 UNSERIALIZE_SCALAR(acceptBroadcast);
2534 UNSERIALIZE_SCALAR(acceptMulticast);
2535 UNSERIALIZE_SCALAR(acceptUnicast);
2536 UNSERIALIZE_SCALAR(acceptPerfect);
2537 UNSERIALIZE_SCALAR(acceptArp);
2538
2539 /*
2540 * Keep track of pending interrupt status.
2541 */
2542 UNSERIALIZE_SCALAR(intrTick);
2543 UNSERIALIZE_SCALAR(cpuPendingIntr);
2544 Tick intrEventTick;
2545 UNSERIALIZE_SCALAR(intrEventTick);
2546 if (intrEventTick) {
2547 intrEvent = new IntrEvent(this, true);
2548 intrEvent->schedule(intrEventTick);
2549 }
2550
2551 /*
2552 * re-add addrRanges to bus bridges
2553 */
2554 if (pioInterface) {
2555 pioInterface->addAddrRange(BARAddrs[0], BARAddrs[0] + BARSize[0] - 1);
2556 pioInterface->addAddrRange(BARAddrs[1], BARAddrs[1] + BARSize[1] - 1);
2557 }
2558 }
2559
2560 Tick
2561 NSGigE::cacheAccess(MemReqPtr &req)
2562 {
2563 DPRINTF(EthernetPIO, "timing access to paddr=%#x (daddr=%#x)\n",
2564 req->paddr, req->paddr - addr);
2565 return curTick + pioLatency;
2566 }
2567 //=====================================================================
2568
2569
2570 //********** helper functions******************************************
2571
2572 uint16_t reverseEnd16(uint16_t num)
2573 {
2574 uint16_t reverse = (num & 0xff)<<8;
2575 reverse += ((num & 0xff00) >> 8);
2576 return reverse;
2577 }
2578
2579 uint32_t reverseEnd32(uint32_t num)
2580 {
2581 uint32_t reverse = (reverseEnd16(num & 0xffff)) << 16;
2582 reverse += reverseEnd16((uint16_t) ((num & 0xffff0000) >> 8));
2583 return reverse;
2584 }
2585
2586
2587
2588 //=====================================================================
2589
2590 BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt)
2591
2592 SimObjectParam<EtherInt *> peer;
2593 SimObjectParam<NSGigE *> device;
2594
2595 END_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt)
2596
2597 BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigEInt)
2598
2599 INIT_PARAM_DFLT(peer, "peer interface", NULL),
2600 INIT_PARAM(device, "Ethernet device of this interface")
2601
2602 END_INIT_SIM_OBJECT_PARAMS(NSGigEInt)
2603
2604 CREATE_SIM_OBJECT(NSGigEInt)
2605 {
2606 NSGigEInt *dev_int = new NSGigEInt(getInstanceName(), device);
2607
2608 EtherInt *p = (EtherInt *)peer;
2609 if (p) {
2610 dev_int->setPeer(p);
2611 p->setPeer(dev_int);
2612 }
2613
2614 return dev_int;
2615 }
2616
2617 REGISTER_SIM_OBJECT("NSGigEInt", NSGigEInt)
2618
2619
2620 BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigE)
2621
2622 Param<Tick> tx_delay;
2623 Param<Tick> rx_delay;
2624 SimObjectParam<IntrControl *> intr_ctrl;
2625 Param<Tick> intr_delay;
2626 SimObjectParam<MemoryController *> mmu;
2627 SimObjectParam<PhysicalMemory *> physmem;
2628 Param<bool> rx_filter;
2629 Param<string> hardware_address;
2630 SimObjectParam<Bus*> header_bus;
2631 SimObjectParam<Bus*> payload_bus;
2632 SimObjectParam<HierParams *> hier;
2633 Param<Tick> pio_latency;
2634 Param<bool> dma_desc_free;
2635 Param<bool> dma_data_free;
2636 Param<Tick> dma_read_delay;
2637 Param<Tick> dma_write_delay;
2638 Param<Tick> dma_read_factor;
2639 Param<Tick> dma_write_factor;
2640 SimObjectParam<PciConfigAll *> configspace;
2641 SimObjectParam<PciConfigData *> configdata;
2642 SimObjectParam<Tsunami *> tsunami;
2643 Param<uint32_t> pci_bus;
2644 Param<uint32_t> pci_dev;
2645 Param<uint32_t> pci_func;
2646 Param<uint32_t> tx_fifo_size;
2647 Param<uint32_t> rx_fifo_size;
2648
2649 END_DECLARE_SIM_OBJECT_PARAMS(NSGigE)
2650
2651 BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigE)
2652
2653 INIT_PARAM_DFLT(tx_delay, "Transmit Delay", 1000),
2654 INIT_PARAM_DFLT(rx_delay, "Receive Delay", 1000),
2655 INIT_PARAM(intr_ctrl, "Interrupt Controller"),
2656 INIT_PARAM_DFLT(intr_delay, "Interrupt Delay in microseconds", 0),
2657 INIT_PARAM(mmu, "Memory Controller"),
2658 INIT_PARAM(physmem, "Physical Memory"),
2659 INIT_PARAM_DFLT(rx_filter, "Enable Receive Filter", true),
2660 INIT_PARAM_DFLT(hardware_address, "Ethernet Hardware Address",
2661 "00:99:00:00:00:01"),
2662 INIT_PARAM_DFLT(header_bus, "The IO Bus to attach to for headers", NULL),
2663 INIT_PARAM_DFLT(payload_bus, "The IO Bus to attach to for payload", NULL),
2664 INIT_PARAM_DFLT(hier, "Hierarchy global variables", &defaultHierParams),
2665 INIT_PARAM_DFLT(pio_latency, "Programmed IO latency in bus cycles", 1),
2666 INIT_PARAM_DFLT(dma_desc_free, "DMA of Descriptors is free", false),
2667 INIT_PARAM_DFLT(dma_data_free, "DMA of Data is free", false),
2668 INIT_PARAM_DFLT(dma_read_delay, "fixed delay for dma reads", 0),
2669 INIT_PARAM_DFLT(dma_write_delay, "fixed delay for dma writes", 0),
2670 INIT_PARAM_DFLT(dma_read_factor, "multiplier for dma reads", 0),
2671 INIT_PARAM_DFLT(dma_write_factor, "multiplier for dma writes", 0),
2672 INIT_PARAM(configspace, "PCI Configspace"),
2673 INIT_PARAM(configdata, "PCI Config data"),
2674 INIT_PARAM(tsunami, "Tsunami"),
2675 INIT_PARAM(pci_bus, "PCI bus"),
2676 INIT_PARAM(pci_dev, "PCI device number"),
2677 INIT_PARAM(pci_func, "PCI function code"),
2678 INIT_PARAM_DFLT(tx_fifo_size, "max size in bytes of txFifo", 131072),
2679 INIT_PARAM_DFLT(rx_fifo_size, "max size in bytes of rxFifo", 131072)
2680
2681 END_INIT_SIM_OBJECT_PARAMS(NSGigE)
2682
2683
2684 CREATE_SIM_OBJECT(NSGigE)
2685 {
2686 int eaddr[6];
2687 sscanf(((string)hardware_address).c_str(), "%x:%x:%x:%x:%x:%x",
2688 &eaddr[0], &eaddr[1], &eaddr[2], &eaddr[3], &eaddr[4], &eaddr[5]);
2689
2690 return new NSGigE(getInstanceName(), intr_ctrl, intr_delay,
2691 physmem, tx_delay, rx_delay, mmu, hier, header_bus,
2692 payload_bus, pio_latency, dma_desc_free, dma_data_free,
2693 dma_read_delay, dma_write_delay, dma_read_factor,
2694 dma_write_factor, configspace, configdata,
2695 tsunami, pci_bus, pci_dev, pci_func, rx_filter, eaddr,
2696 tx_fifo_size, rx_fifo_size);
2697 }
2698
2699 REGISTER_SIM_OBJECT("NSGigE", NSGigE)