Added code using VPtr to be able to extract info from linux thread
[gem5.git] / dev / ns_gige.cc
1 /*
2 * Copyright (c) 2004 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /* @file
30 * Device module for modelling the National Semiconductor
31 * DP83820 ethernet controller. Does not support priority queueing
32 */
33 #include <cstdio>
34 #include <deque>
35 #include <string>
36
37 #include "base/inet.hh"
38 #include "cpu/exec_context.hh"
39 #include "cpu/intr_control.hh"
40 #include "dev/dma.hh"
41 #include "dev/etherlink.hh"
42 #include "dev/ns_gige.hh"
43 #include "dev/pciconfigall.hh"
44 #include "dev/tsunami_cchip.hh"
45 #include "mem/bus/bus.hh"
46 #include "mem/bus/dma_interface.hh"
47 #include "mem/bus/pio_interface.hh"
48 #include "mem/bus/pio_interface_impl.hh"
49 #include "mem/functional_mem/memory_control.hh"
50 #include "mem/functional_mem/physical_memory.hh"
51 #include "sim/builder.hh"
52 #include "sim/debug.hh"
53 #include "sim/host.hh"
54 #include "sim/sim_stats.hh"
55 #include "targetarch/vtophys.hh"
56
57 const char *NsRxStateStrings[] =
58 {
59 "rxIdle",
60 "rxDescRefr",
61 "rxDescRead",
62 "rxFifoBlock",
63 "rxFragWrite",
64 "rxDescWrite",
65 "rxAdvance"
66 };
67
68 const char *NsTxStateStrings[] =
69 {
70 "txIdle",
71 "txDescRefr",
72 "txDescRead",
73 "txFifoBlock",
74 "txFragRead",
75 "txDescWrite",
76 "txAdvance"
77 };
78
79 const char *NsDmaState[] =
80 {
81 "dmaIdle",
82 "dmaReading",
83 "dmaWriting",
84 "dmaReadWaiting",
85 "dmaWriteWaiting"
86 };
87
88 using namespace std;
89
90 // helper function declarations
91 // These functions reverse Endianness so we can evaluate network data
92 // correctly
93 uint16_t reverseEnd16(uint16_t);
94 uint32_t reverseEnd32(uint32_t);
95
96 ///////////////////////////////////////////////////////////////////////
97 //
98 // NSGigE PCI Device
99 //
100 NSGigE::NSGigE(const std::string &name, IntrControl *i, Tick intr_delay,
101 PhysicalMemory *pmem, Tick tx_delay, Tick rx_delay,
102 MemoryController *mmu, HierParams *hier, Bus *header_bus,
103 Bus *payload_bus, Tick pio_latency, bool dma_desc_free,
104 bool dma_data_free, Tick dma_read_delay, Tick dma_write_delay,
105 Tick dma_read_factor, Tick dma_write_factor, PciConfigAll *cf,
106 PciConfigData *cd, Tsunami *t, uint32_t bus, uint32_t dev,
107 uint32_t func, bool rx_filter, const int eaddr[6],
108 uint32_t tx_fifo_size, uint32_t rx_fifo_size)
109 : PciDev(name, mmu, cf, cd, bus, dev, func), tsunami(t), ioEnable(false),
110 maxTxFifoSize(tx_fifo_size), maxRxFifoSize(rx_fifo_size),
111 txPacket(0), rxPacket(0), txPacketBufPtr(NULL), rxPacketBufPtr(NULL),
112 txXferLen(0), rxXferLen(0), txState(txIdle), txEnable(false),
113 CTDD(false), txFifoAvail(tx_fifo_size),
114 txFragPtr(0), txDescCnt(0), txDmaState(dmaIdle), rxState(rxIdle),
115 rxEnable(false), CRDD(false), rxPktBytes(0), rxFifoCnt(0),
116 rxFragPtr(0), rxDescCnt(0), rxDmaState(dmaIdle), extstsEnable(false),
117 rxDmaReadEvent(this), rxDmaWriteEvent(this),
118 txDmaReadEvent(this), txDmaWriteEvent(this),
119 dmaDescFree(dma_desc_free), dmaDataFree(dma_data_free),
120 txDelay(tx_delay), rxDelay(rx_delay), rxKickTick(0), txKickTick(0),
121 txEvent(this), rxFilterEnable(rx_filter), acceptBroadcast(false),
122 acceptMulticast(false), acceptUnicast(false),
123 acceptPerfect(false), acceptArp(false),
124 physmem(pmem), intctrl(i), intrTick(0), cpuPendingIntr(false),
125 intrEvent(0), interface(0)
126 {
127 tsunami->ethernet = this;
128
129 if (header_bus) {
130 pioInterface = newPioInterface(name, hier, header_bus, this,
131 &NSGigE::cacheAccess);
132
133 pioLatency = pio_latency * header_bus->clockRatio;
134
135 if (payload_bus)
136 dmaInterface = new DMAInterface<Bus>(name + ".dma",
137 header_bus, payload_bus, 1);
138 else
139 dmaInterface = new DMAInterface<Bus>(name + ".dma",
140 header_bus, header_bus, 1);
141 } else if (payload_bus) {
142 pioInterface = newPioInterface(name, hier, payload_bus, this,
143 &NSGigE::cacheAccess);
144
145 pioLatency = pio_latency * payload_bus->clockRatio;
146
147 dmaInterface = new DMAInterface<Bus>(name + ".dma", payload_bus,
148 payload_bus, 1);
149 }
150
151
152 intrDelay = US2Ticks(intr_delay);
153 dmaReadDelay = dma_read_delay;
154 dmaWriteDelay = dma_write_delay;
155 dmaReadFactor = dma_read_factor;
156 dmaWriteFactor = dma_write_factor;
157
158 regsReset();
159 rom.perfectMatch[0] = eaddr[0];
160 rom.perfectMatch[1] = eaddr[1];
161 rom.perfectMatch[2] = eaddr[2];
162 rom.perfectMatch[3] = eaddr[3];
163 rom.perfectMatch[4] = eaddr[4];
164 rom.perfectMatch[5] = eaddr[5];
165 }
166
167 NSGigE::~NSGigE()
168 {}
169
170 void
171 NSGigE::regStats()
172 {
173 txBytes
174 .name(name() + ".txBytes")
175 .desc("Bytes Transmitted")
176 .prereq(txBytes)
177 ;
178
179 rxBytes
180 .name(name() + ".rxBytes")
181 .desc("Bytes Received")
182 .prereq(rxBytes)
183 ;
184
185 txPackets
186 .name(name() + ".txPackets")
187 .desc("Number of Packets Transmitted")
188 .prereq(txBytes)
189 ;
190
191 rxPackets
192 .name(name() + ".rxPackets")
193 .desc("Number of Packets Received")
194 .prereq(rxBytes)
195 ;
196
197 txIPChecksums
198 .name(name() + ".txIPChecksums")
199 .desc("Number of tx IP Checksums done by device")
200 .precision(0)
201 .prereq(txBytes)
202 ;
203
204 rxIPChecksums
205 .name(name() + ".rxIPChecksums")
206 .desc("Number of rx IP Checksums done by device")
207 .precision(0)
208 .prereq(rxBytes)
209 ;
210
211 txTCPChecksums
212 .name(name() + ".txTCPChecksums")
213 .desc("Number of tx TCP Checksums done by device")
214 .precision(0)
215 .prereq(txBytes)
216 ;
217
218 rxTCPChecksums
219 .name(name() + ".rxTCPChecksums")
220 .desc("Number of rx TCP Checksums done by device")
221 .precision(0)
222 .prereq(rxBytes)
223 ;
224
225 descDmaReads
226 .name(name() + ".descDMAReads")
227 .desc("Number of descriptors the device read w/ DMA")
228 .precision(0)
229 ;
230
231 descDmaWrites
232 .name(name() + ".descDMAWrites")
233 .desc("Number of descriptors the device wrote w/ DMA")
234 .precision(0)
235 ;
236
237 descDmaRdBytes
238 .name(name() + ".descDmaReadBytes")
239 .desc("number of descriptor bytes read w/ DMA")
240 .precision(0)
241 ;
242
243 descDmaWrBytes
244 .name(name() + ".descDmaWriteBytes")
245 .desc("number of descriptor bytes write w/ DMA")
246 .precision(0)
247 ;
248
249
250 txBandwidth
251 .name(name() + ".txBandwidth")
252 .desc("Transmit Bandwidth (bits/s)")
253 .precision(0)
254 .prereq(txBytes)
255 ;
256
257 rxBandwidth
258 .name(name() + ".rxBandwidth")
259 .desc("Receive Bandwidth (bits/s)")
260 .precision(0)
261 .prereq(rxBytes)
262 ;
263
264 txPacketRate
265 .name(name() + ".txPPS")
266 .desc("Packet Tranmission Rate (packets/s)")
267 .precision(0)
268 .prereq(txBytes)
269 ;
270
271 rxPacketRate
272 .name(name() + ".rxPPS")
273 .desc("Packet Reception Rate (packets/s)")
274 .precision(0)
275 .prereq(rxBytes)
276 ;
277
278 txBandwidth = txBytes * Stats::constant(8) / simSeconds;
279 rxBandwidth = rxBytes * Stats::constant(8) / simSeconds;
280 txPacketRate = txPackets / simSeconds;
281 rxPacketRate = rxPackets / simSeconds;
282 }
283
284 /**
285 * This is to read the PCI general configuration registers
286 */
287 void
288 NSGigE::ReadConfig(int offset, int size, uint8_t *data)
289 {
290 if (offset < PCI_DEVICE_SPECIFIC)
291 PciDev::ReadConfig(offset, size, data);
292 else
293 panic("Device specific PCI config space not implemented!\n");
294 }
295
296 /**
297 * This is to write to the PCI general configuration registers
298 */
299 void
300 NSGigE::WriteConfig(int offset, int size, uint32_t data)
301 {
302 if (offset < PCI_DEVICE_SPECIFIC)
303 PciDev::WriteConfig(offset, size, data);
304 else
305 panic("Device specific PCI config space not implemented!\n");
306
307 // Need to catch writes to BARs to update the PIO interface
308 switch (offset) {
309 // seems to work fine without all these PCI settings, but i
310 // put in the IO to double check, an assertion will fail if we
311 // need to properly implement it
312 case PCI_COMMAND:
313 if (config.data[offset] & PCI_CMD_IOSE)
314 ioEnable = true;
315 else
316 ioEnable = false;
317
318 #if 0
319 if (config.data[offset] & PCI_CMD_BME) {
320 bmEnabled = true;
321 }
322 else {
323 bmEnabled = false;
324 }
325
326 if (config.data[offset] & PCI_CMD_MSE) {
327 memEnable = true;
328 }
329 else {
330 memEnable = false;
331 }
332 #endif
333 break;
334
335 case PCI0_BASE_ADDR0:
336 if (BARAddrs[0] != 0) {
337 if (pioInterface)
338 pioInterface->addAddrRange(BARAddrs[0],
339 BARAddrs[0] + BARSize[0] - 1);
340
341 BARAddrs[0] &= PA_UNCACHED_MASK;
342 }
343 break;
344 case PCI0_BASE_ADDR1:
345 if (BARAddrs[1] != 0) {
346 if (pioInterface)
347 pioInterface->addAddrRange(BARAddrs[1],
348 BARAddrs[1] + BARSize[1] - 1);
349
350 BARAddrs[1] &= PA_UNCACHED_MASK;
351 }
352 break;
353 }
354 }
355
356 /**
357 * This reads the device registers, which are detailed in the NS83820
358 * spec sheet
359 */
360 Fault
361 NSGigE::read(MemReqPtr &req, uint8_t *data)
362 {
363 assert(ioEnable);
364
365 //The mask is to give you only the offset into the device register file
366 Addr daddr = req->paddr & 0xfff;
367 DPRINTF(EthernetPIO, "read da=%#x pa=%#x va=%#x size=%d\n",
368 daddr, req->paddr, req->vaddr, req->size);
369
370
371 // there are some reserved registers, you can see ns_gige_reg.h and
372 // the spec sheet for details
373 if (daddr > LAST && daddr <= RESERVED) {
374 panic("Accessing reserved register");
375 } else if (daddr > RESERVED && daddr <= 0x3FC) {
376 ReadConfig(daddr & 0xff, req->size, data);
377 return No_Fault;
378 } else if (daddr >= MIB_START && daddr <= MIB_END) {
379 // don't implement all the MIB's. hopefully the kernel
380 // doesn't actually DEPEND upon their values
381 // MIB are just hardware stats keepers
382 uint32_t &reg = *(uint32_t *) data;
383 reg = 0;
384 return No_Fault;
385 } else if (daddr > 0x3FC)
386 panic("Something is messed up!\n");
387
388 switch (req->size) {
389 case sizeof(uint32_t):
390 {
391 uint32_t &reg = *(uint32_t *)data;
392
393 switch (daddr) {
394 case CR:
395 reg = regs.command;
396 //these are supposed to be cleared on a read
397 reg &= ~(CR_RXD | CR_TXD | CR_TXR | CR_RXR);
398 break;
399
400 case CFG:
401 reg = regs.config;
402 break;
403
404 case MEAR:
405 reg = regs.mear;
406 break;
407
408 case PTSCR:
409 reg = regs.ptscr;
410 break;
411
412 case ISR:
413 reg = regs.isr;
414 devIntrClear(ISR_ALL);
415 break;
416
417 case IMR:
418 reg = regs.imr;
419 break;
420
421 case IER:
422 reg = regs.ier;
423 break;
424
425 case IHR:
426 reg = regs.ihr;
427 break;
428
429 case TXDP:
430 reg = regs.txdp;
431 break;
432
433 case TXDP_HI:
434 reg = regs.txdp_hi;
435 break;
436
437 case TXCFG:
438 reg = regs.txcfg;
439 break;
440
441 case GPIOR:
442 reg = regs.gpior;
443 break;
444
445 case RXDP:
446 reg = regs.rxdp;
447 break;
448
449 case RXDP_HI:
450 reg = regs.rxdp_hi;
451 break;
452
453 case RXCFG:
454 reg = regs.rxcfg;
455 break;
456
457 case PQCR:
458 reg = regs.pqcr;
459 break;
460
461 case WCSR:
462 reg = regs.wcsr;
463 break;
464
465 case PCR:
466 reg = regs.pcr;
467 break;
468
469 // see the spec sheet for how RFCR and RFDR work
470 // basically, you write to RFCR to tell the machine
471 // what you want to do next, then you act upon RFDR,
472 // and the device will be prepared b/c of what you
473 // wrote to RFCR
474 case RFCR:
475 reg = regs.rfcr;
476 break;
477
478 case RFDR:
479 switch (regs.rfcr & RFCR_RFADDR) {
480 case 0x000:
481 reg = rom.perfectMatch[1];
482 reg = reg << 8;
483 reg += rom.perfectMatch[0];
484 break;
485 case 0x002:
486 reg = rom.perfectMatch[3] << 8;
487 reg += rom.perfectMatch[2];
488 break;
489 case 0x004:
490 reg = rom.perfectMatch[5] << 8;
491 reg += rom.perfectMatch[4];
492 break;
493 default:
494 panic("reading RFDR for something other than PMATCH!\n");
495 // didn't implement other RFDR functionality b/c
496 // driver didn't use it
497 }
498 break;
499
500 case SRR:
501 reg = regs.srr;
502 break;
503
504 case MIBC:
505 reg = regs.mibc;
506 reg &= ~(MIBC_MIBS | MIBC_ACLR);
507 break;
508
509 case VRCR:
510 reg = regs.vrcr;
511 break;
512
513 case VTCR:
514 reg = regs.vtcr;
515 break;
516
517 case VDR:
518 reg = regs.vdr;
519 break;
520
521 case CCSR:
522 reg = regs.ccsr;
523 break;
524
525 case TBICR:
526 reg = regs.tbicr;
527 break;
528
529 case TBISR:
530 reg = regs.tbisr;
531 break;
532
533 case TANAR:
534 reg = regs.tanar;
535 break;
536
537 case TANLPAR:
538 reg = regs.tanlpar;
539 break;
540
541 case TANER:
542 reg = regs.taner;
543 break;
544
545 case TESR:
546 reg = regs.tesr;
547 break;
548
549 default:
550 panic("reading unimplemented register: addr=%#x", daddr);
551 }
552
553 DPRINTF(EthernetPIO, "read from %#x: data=%d data=%#x\n",
554 daddr, reg, reg);
555 }
556 break;
557
558 default:
559 panic("accessing register with invalid size: addr=%#x, size=%d",
560 daddr, req->size);
561 }
562
563 return No_Fault;
564 }
565
566 Fault
567 NSGigE::write(MemReqPtr &req, const uint8_t *data)
568 {
569 assert(ioEnable);
570
571 Addr daddr = req->paddr & 0xfff;
572 DPRINTF(EthernetPIO, "write da=%#x pa=%#x va=%#x size=%d\n",
573 daddr, req->paddr, req->vaddr, req->size);
574
575 if (daddr > LAST && daddr <= RESERVED) {
576 panic("Accessing reserved register");
577 } else if (daddr > RESERVED && daddr <= 0x3FC) {
578 WriteConfig(daddr & 0xff, req->size, *(uint32_t *)data);
579 return No_Fault;
580 } else if (daddr > 0x3FC)
581 panic("Something is messed up!\n");
582
583 if (req->size == sizeof(uint32_t)) {
584 uint32_t reg = *(uint32_t *)data;
585 DPRINTF(EthernetPIO, "write data=%d data=%#x\n", reg, reg);
586
587 switch (daddr) {
588 case CR:
589 regs.command = reg;
590 if (reg & CR_TXD) {
591 txEnable = false;
592 } else if (reg & CR_TXE) {
593 txEnable = true;
594
595 // the kernel is enabling the transmit machine
596 if (txState == txIdle)
597 txKick();
598 }
599
600 if (reg & CR_RXD) {
601 rxEnable = false;
602 } else if (reg & CR_RXE) {
603 rxEnable = true;
604
605 if (rxState == rxIdle)
606 rxKick();
607 }
608
609 if (reg & CR_TXR)
610 txReset();
611
612 if (reg & CR_RXR)
613 rxReset();
614
615 if (reg & CR_SWI)
616 devIntrPost(ISR_SWI);
617
618 if (reg & CR_RST) {
619 txReset();
620 rxReset();
621
622 regsReset();
623 }
624 break;
625
626 case CFG:
627 if (reg & CFG_LNKSTS ||
628 reg & CFG_SPDSTS ||
629 reg & CFG_DUPSTS ||
630 reg & CFG_RESERVED ||
631 reg & CFG_T64ADDR ||
632 reg & CFG_PCI64_DET)
633 panic("writing to read-only or reserved CFG bits!\n");
634
635 regs.config |= reg & ~(CFG_LNKSTS | CFG_SPDSTS | CFG_DUPSTS |
636 CFG_RESERVED | CFG_T64ADDR | CFG_PCI64_DET);
637
638 // all these #if 0's are because i don't THINK the kernel needs to
639 // have these implemented. if there is a problem relating to one of
640 // these, you may need to add functionality in.
641 #if 0
642 if (reg & CFG_TBI_EN) ;
643 if (reg & CFG_MODE_1000) ;
644 #endif
645
646 if (reg & CFG_AUTO_1000)
647 panic("CFG_AUTO_1000 not implemented!\n");
648
649 #if 0
650 if (reg & CFG_PINT_DUPSTS ||
651 reg & CFG_PINT_LNKSTS ||
652 reg & CFG_PINT_SPDSTS)
653 ;
654
655 if (reg & CFG_TMRTEST) ;
656 if (reg & CFG_MRM_DIS) ;
657 if (reg & CFG_MWI_DIS) ;
658
659 if (reg & CFG_T64ADDR)
660 panic("CFG_T64ADDR is read only register!\n");
661
662 if (reg & CFG_PCI64_DET)
663 panic("CFG_PCI64_DET is read only register!\n");
664
665 if (reg & CFG_DATA64_EN) ;
666 if (reg & CFG_M64ADDR) ;
667 if (reg & CFG_PHY_RST) ;
668 if (reg & CFG_PHY_DIS) ;
669 #endif
670
671 if (reg & CFG_EXTSTS_EN)
672 extstsEnable = true;
673 else
674 extstsEnable = false;
675
676 #if 0
677 if (reg & CFG_REQALG) ;
678 if (reg & CFG_SB) ;
679 if (reg & CFG_POW) ;
680 if (reg & CFG_EXD) ;
681 if (reg & CFG_PESEL) ;
682 if (reg & CFG_BROM_DIS) ;
683 if (reg & CFG_EXT_125) ;
684 if (reg & CFG_BEM) ;
685 #endif
686 break;
687
688 case MEAR:
689 regs.mear = reg;
690 // since phy is completely faked, MEAR_MD* don't matter
691 // and since the driver never uses MEAR_EE*, they don't
692 // matter
693 #if 0
694 if (reg & MEAR_EEDI) ;
695 if (reg & MEAR_EEDO) ; // this one is read only
696 if (reg & MEAR_EECLK) ;
697 if (reg & MEAR_EESEL) ;
698 if (reg & MEAR_MDIO) ;
699 if (reg & MEAR_MDDIR) ;
700 if (reg & MEAR_MDC) ;
701 #endif
702 break;
703
704 case PTSCR:
705 regs.ptscr = reg & ~(PTSCR_RBIST_RDONLY);
706 // these control BISTs for various parts of chip - we
707 // don't care or do just fake that the BIST is done
708 if (reg & PTSCR_RBIST_EN)
709 regs.ptscr |= PTSCR_RBIST_DONE;
710 if (reg & PTSCR_EEBIST_EN)
711 regs.ptscr &= ~PTSCR_EEBIST_EN;
712 if (reg & PTSCR_EELOAD_EN)
713 regs.ptscr &= ~PTSCR_EELOAD_EN;
714 break;
715
716 case ISR: /* writing to the ISR has no effect */
717 panic("ISR is a read only register!\n");
718
719 case IMR:
720 regs.imr = reg;
721 devIntrChangeMask();
722 break;
723
724 case IER:
725 regs.ier = reg;
726 break;
727
728 case IHR:
729 regs.ihr = reg;
730 /* not going to implement real interrupt holdoff */
731 break;
732
733 case TXDP:
734 regs.txdp = (reg & 0xFFFFFFFC);
735 assert(txState == txIdle);
736 CTDD = false;
737 break;
738
739 case TXDP_HI:
740 regs.txdp_hi = reg;
741 break;
742
743 case TXCFG:
744 regs.txcfg = reg;
745 #if 0
746 if (reg & TXCFG_CSI) ;
747 if (reg & TXCFG_HBI) ;
748 if (reg & TXCFG_MLB) ;
749 if (reg & TXCFG_ATP) ;
750 if (reg & TXCFG_ECRETRY) {
751 /*
752 * this could easily be implemented, but considering
753 * the network is just a fake pipe, wouldn't make
754 * sense to do this
755 */
756 }
757
758 if (reg & TXCFG_BRST_DIS) ;
759 #endif
760
761 #if 0
762 /* we handle our own DMA, ignore the kernel's exhortations */
763 if (reg & TXCFG_MXDMA) ;
764 #endif
765
766 // also, we currently don't care about fill/drain
767 // thresholds though this may change in the future with
768 // more realistic networks or a driver which changes it
769 // according to feedback
770
771 break;
772
773 case GPIOR:
774 regs.gpior = reg;
775 /* these just control general purpose i/o pins, don't matter */
776 break;
777
778 case RXDP:
779 regs.rxdp = reg;
780 CRDD = false;
781 break;
782
783 case RXDP_HI:
784 regs.rxdp_hi = reg;
785 break;
786
787 case RXCFG:
788 regs.rxcfg = reg;
789 #if 0
790 if (reg & RXCFG_AEP) ;
791 if (reg & RXCFG_ARP) ;
792 if (reg & RXCFG_STRIPCRC) ;
793 if (reg & RXCFG_RX_RD) ;
794 if (reg & RXCFG_ALP) ;
795 if (reg & RXCFG_AIRL) ;
796
797 /* we handle our own DMA, ignore what kernel says about it */
798 if (reg & RXCFG_MXDMA) ;
799
800 //also, we currently don't care about fill/drain thresholds
801 //though this may change in the future with more realistic
802 //networks or a driver which changes it according to feedback
803 if (reg & (RXCFG_DRTH | RXCFG_DRTH0)) ;
804 #endif
805 break;
806
807 case PQCR:
808 /* there is no priority queueing used in the linux 2.6 driver */
809 regs.pqcr = reg;
810 break;
811
812 case WCSR:
813 /* not going to implement wake on LAN */
814 regs.wcsr = reg;
815 break;
816
817 case PCR:
818 /* not going to implement pause control */
819 regs.pcr = reg;
820 break;
821
822 case RFCR:
823 regs.rfcr = reg;
824
825 rxFilterEnable = (reg & RFCR_RFEN) ? true : false;
826 acceptBroadcast = (reg & RFCR_AAB) ? true : false;
827 acceptMulticast = (reg & RFCR_AAM) ? true : false;
828 acceptUnicast = (reg & RFCR_AAU) ? true : false;
829 acceptPerfect = (reg & RFCR_APM) ? true : false;
830 acceptArp = (reg & RFCR_AARP) ? true : false;
831
832 #if 0
833 if (reg & RFCR_APAT)
834 panic("RFCR_APAT not implemented!\n");
835 #endif
836
837 if (reg & RFCR_MHEN || reg & RFCR_UHEN)
838 panic("hash filtering not implemented!\n");
839
840 if (reg & RFCR_ULM)
841 panic("RFCR_ULM not implemented!\n");
842
843 break;
844
845 case RFDR:
846 panic("the driver never writes to RFDR, something is wrong!\n");
847
848 case BRAR:
849 panic("the driver never uses BRAR, something is wrong!\n");
850
851 case BRDR:
852 panic("the driver never uses BRDR, something is wrong!\n");
853
854 case SRR:
855 panic("SRR is read only register!\n");
856
857 case MIBC:
858 panic("the driver never uses MIBC, something is wrong!\n");
859
860 case VRCR:
861 regs.vrcr = reg;
862 break;
863
864 case VTCR:
865 regs.vtcr = reg;
866 break;
867
868 case VDR:
869 panic("the driver never uses VDR, something is wrong!\n");
870 break;
871
872 case CCSR:
873 /* not going to implement clockrun stuff */
874 regs.ccsr = reg;
875 break;
876
877 case TBICR:
878 regs.tbicr = reg;
879 if (reg & TBICR_MR_LOOPBACK)
880 panic("TBICR_MR_LOOPBACK never used, something wrong!\n");
881
882 if (reg & TBICR_MR_AN_ENABLE) {
883 regs.tanlpar = regs.tanar;
884 regs.tbisr |= (TBISR_MR_AN_COMPLETE | TBISR_MR_LINK_STATUS);
885 }
886
887 #if 0
888 if (reg & TBICR_MR_RESTART_AN) ;
889 #endif
890
891 break;
892
893 case TBISR:
894 panic("TBISR is read only register!\n");
895
896 case TANAR:
897 regs.tanar = reg;
898 if (reg & TANAR_PS2)
899 panic("this isn't used in driver, something wrong!\n");
900
901 if (reg & TANAR_PS1)
902 panic("this isn't used in driver, something wrong!\n");
903 break;
904
905 case TANLPAR:
906 panic("this should only be written to by the fake phy!\n");
907
908 case TANER:
909 panic("TANER is read only register!\n");
910
911 case TESR:
912 regs.tesr = reg;
913 break;
914
915 default:
916 panic("invalid register access daddr=%#x", daddr);
917 }
918 } else {
919 panic("Invalid Request Size");
920 }
921
922 return No_Fault;
923 }
924
925 void
926 NSGigE::devIntrPost(uint32_t interrupts)
927 {
928 if (interrupts & ISR_RESERVE)
929 panic("Cannot set a reserved interrupt");
930
931 if (interrupts & ISR_NOIMPL)
932 warn("interrupt not implemented %#x\n", interrupts);
933
934 interrupts &= ~ISR_NOIMPL;
935 regs.isr |= interrupts;
936
937 DPRINTF(EthernetIntr,
938 "interrupt written to ISR: intr=%#x isr=%#x imr=%#x\n",
939 interrupts, regs.isr, regs.imr);
940
941 if ((regs.isr & regs.imr)) {
942 Tick when = curTick;
943 if (!(regs.isr & regs.imr & ISR_NODELAY))
944 when += intrDelay;
945 cpuIntrPost(when);
946 }
947 }
948
949 void
950 NSGigE::devIntrClear(uint32_t interrupts)
951 {
952 if (interrupts & ISR_RESERVE)
953 panic("Cannot clear a reserved interrupt");
954
955 interrupts &= ~ISR_NOIMPL;
956 regs.isr &= ~interrupts;
957
958 DPRINTF(EthernetIntr,
959 "interrupt cleared from ISR: intr=%x isr=%x imr=%x\n",
960 interrupts, regs.isr, regs.imr);
961
962 if (!(regs.isr & regs.imr))
963 cpuIntrClear();
964 }
965
966 void
967 NSGigE::devIntrChangeMask()
968 {
969 DPRINTF(EthernetIntr, "interrupt mask changed: isr=%x imr=%x masked=%x\n",
970 regs.isr, regs.imr, regs.isr & regs.imr);
971
972 if (regs.isr & regs.imr)
973 cpuIntrPost(curTick);
974 else
975 cpuIntrClear();
976 }
977
978 void
979 NSGigE::cpuIntrPost(Tick when)
980 {
981 // If the interrupt you want to post is later than an interrupt
982 // already scheduled, just let it post in the coming one and don't
983 // schedule another.
984 // HOWEVER, must be sure that the scheduled intrTick is in the
985 // future (this was formerly the source of a bug)
986 /**
987 * @todo this warning should be removed and the intrTick code should
988 * be fixed.
989 */
990 if (intrTick < curTick && intrTick != 0) {
991 warn("intrTick < curTick !!! intrTick=%d curTick=%d\n",
992 intrTick, curTick);
993 intrTick = 0;
994 }
995 assert((intrTick >= curTick) || (intrTick == 0));
996 if (when > intrTick && intrTick != 0)
997 return;
998
999 intrTick = when;
1000
1001 if (intrEvent) {
1002 intrEvent->squash();
1003 intrEvent = 0;
1004 }
1005
1006 if (when < curTick) {
1007 cpuInterrupt();
1008 } else {
1009 DPRINTF(EthernetIntr,
1010 "going to schedule an interrupt for intrTick=%d\n",
1011 intrTick);
1012 intrEvent = new IntrEvent(this, true);
1013 intrEvent->schedule(intrTick);
1014 }
1015 }
1016
1017 void
1018 NSGigE::cpuInterrupt()
1019 {
1020 // Don't send an interrupt if there's already one
1021 if (cpuPendingIntr) {
1022 DPRINTF(EthernetIntr,
1023 "would send an interrupt now, but there's already pending\n");
1024 intrTick = 0;
1025 return;
1026 }
1027 // Don't send an interrupt if it's supposed to be delayed
1028 if (intrTick > curTick) {
1029 DPRINTF(EthernetIntr,
1030 "an interrupt is scheduled for %d, wait til then\n",
1031 intrTick);
1032 return;
1033 }
1034
1035 // Whether or not there's a pending interrupt, we don't care about
1036 // it anymore
1037 intrEvent = 0;
1038 intrTick = 0;
1039
1040 // Send interrupt
1041 cpuPendingIntr = true;
1042
1043 DPRINTF(EthernetIntr, "posting cchip interrupt\n");
1044 tsunami->cchip->postDRIR(configData->config.hdr.pci0.interruptLine);
1045 }
1046
1047 void
1048 NSGigE::cpuIntrClear()
1049 {
1050 if (!cpuPendingIntr)
1051 return;
1052
1053 cpuPendingIntr = false;
1054
1055 DPRINTF(EthernetIntr, "clearing cchip interrupt\n");
1056 tsunami->cchip->clearDRIR(configData->config.hdr.pci0.interruptLine);
1057 }
1058
1059 bool
1060 NSGigE::cpuIntrPending() const
1061 { return cpuPendingIntr; }
1062
1063 void
1064 NSGigE::txReset()
1065 {
1066
1067 DPRINTF(Ethernet, "transmit reset\n");
1068
1069 CTDD = false;
1070 txFifoAvail = maxTxFifoSize;
1071 txEnable = false;;
1072 txFragPtr = 0;
1073 assert(txDescCnt == 0);
1074 txFifo.clear();
1075 txState = txIdle;
1076 assert(txDmaState == dmaIdle);
1077 }
1078
1079 void
1080 NSGigE::rxReset()
1081 {
1082 DPRINTF(Ethernet, "receive reset\n");
1083
1084 CRDD = false;
1085 assert(rxPktBytes == 0);
1086 rxFifoCnt = 0;
1087 rxEnable = false;
1088 rxFragPtr = 0;
1089 assert(rxDescCnt == 0);
1090 assert(rxDmaState == dmaIdle);
1091 rxFifo.clear();
1092 rxState = rxIdle;
1093 }
1094
1095 void
1096 NSGigE::regsReset()
1097 {
1098 memset(&regs, 0, sizeof(regs));
1099 regs.config = CFG_LNKSTS;
1100 regs.mear = MEAR_MDDIR | MEAR_EEDO;
1101 regs.txcfg = 0x120; // set drain threshold to 1024 bytes and
1102 // fill threshold to 32 bytes
1103 regs.rxcfg = 0x4; // set drain threshold to 16 bytes
1104 regs.srr = 0x0103; // set the silicon revision to rev B or 0x103
1105 regs.mibc = MIBC_FRZ;
1106 regs.vdr = 0x81; // set the vlan tag type to 802.1q
1107 regs.tesr = 0xc000; // TBI capable of both full and half duplex
1108
1109 extstsEnable = false;
1110 acceptBroadcast = false;
1111 acceptMulticast = false;
1112 acceptUnicast = false;
1113 acceptPerfect = false;
1114 acceptArp = false;
1115 }
1116
1117 void
1118 NSGigE::rxDmaReadCopy()
1119 {
1120 assert(rxDmaState == dmaReading);
1121
1122 physmem->dma_read((uint8_t *)rxDmaData, rxDmaAddr, rxDmaLen);
1123 rxDmaState = dmaIdle;
1124
1125 DPRINTF(EthernetDMA, "rx dma read paddr=%#x len=%d\n",
1126 rxDmaAddr, rxDmaLen);
1127 DDUMP(EthernetDMA, rxDmaData, rxDmaLen);
1128 }
1129
1130 bool
1131 NSGigE::doRxDmaRead()
1132 {
1133 assert(rxDmaState == dmaIdle || rxDmaState == dmaReadWaiting);
1134 rxDmaState = dmaReading;
1135
1136 if (dmaInterface && !rxDmaFree) {
1137 if (dmaInterface->busy())
1138 rxDmaState = dmaReadWaiting;
1139 else
1140 dmaInterface->doDMA(Read, rxDmaAddr, rxDmaLen, curTick,
1141 &rxDmaReadEvent, true);
1142 return true;
1143 }
1144
1145 if (dmaReadDelay == 0 && dmaReadFactor == 0) {
1146 rxDmaReadCopy();
1147 return false;
1148 }
1149
1150 Tick factor = ((rxDmaLen + ULL(63)) >> ULL(6)) * dmaReadFactor;
1151 Tick start = curTick + dmaReadDelay + factor;
1152 rxDmaReadEvent.schedule(start);
1153 return true;
1154 }
1155
1156 void
1157 NSGigE::rxDmaReadDone()
1158 {
1159 assert(rxDmaState == dmaReading);
1160 rxDmaReadCopy();
1161
1162 // If the transmit state machine has a pending DMA, let it go first
1163 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting)
1164 txKick();
1165
1166 rxKick();
1167 }
1168
1169 void
1170 NSGigE::rxDmaWriteCopy()
1171 {
1172 assert(rxDmaState == dmaWriting);
1173
1174 physmem->dma_write(rxDmaAddr, (uint8_t *)rxDmaData, rxDmaLen);
1175 rxDmaState = dmaIdle;
1176
1177 DPRINTF(EthernetDMA, "rx dma write paddr=%#x len=%d\n",
1178 rxDmaAddr, rxDmaLen);
1179 DDUMP(EthernetDMA, rxDmaData, rxDmaLen);
1180 }
1181
1182 bool
1183 NSGigE::doRxDmaWrite()
1184 {
1185 assert(rxDmaState == dmaIdle || rxDmaState == dmaWriteWaiting);
1186 rxDmaState = dmaWriting;
1187
1188 if (dmaInterface && !rxDmaFree) {
1189 if (dmaInterface->busy())
1190 rxDmaState = dmaWriteWaiting;
1191 else
1192 dmaInterface->doDMA(WriteInvalidate, rxDmaAddr, rxDmaLen, curTick,
1193 &rxDmaWriteEvent, true);
1194 return true;
1195 }
1196
1197 if (dmaWriteDelay == 0 && dmaWriteFactor == 0) {
1198 rxDmaWriteCopy();
1199 return false;
1200 }
1201
1202 Tick factor = ((rxDmaLen + ULL(63)) >> ULL(6)) * dmaWriteFactor;
1203 Tick start = curTick + dmaWriteDelay + factor;
1204 rxDmaWriteEvent.schedule(start);
1205 return true;
1206 }
1207
1208 void
1209 NSGigE::rxDmaWriteDone()
1210 {
1211 assert(rxDmaState == dmaWriting);
1212 rxDmaWriteCopy();
1213
1214 // If the transmit state machine has a pending DMA, let it go first
1215 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting)
1216 txKick();
1217
1218 rxKick();
1219 }
1220
1221 void
1222 NSGigE::rxKick()
1223 {
1224 DPRINTF(EthernetSM, "receive kick rxState=%s (rxBuf.size=%d)\n",
1225 NsRxStateStrings[rxState], rxFifo.size());
1226
1227 if (rxKickTick > curTick) {
1228 DPRINTF(EthernetSM, "receive kick exiting, can't run till %d\n",
1229 rxKickTick);
1230 return;
1231 }
1232
1233 next:
1234 switch(rxDmaState) {
1235 case dmaReadWaiting:
1236 if (doRxDmaRead())
1237 goto exit;
1238 break;
1239 case dmaWriteWaiting:
1240 if (doRxDmaWrite())
1241 goto exit;
1242 break;
1243 default:
1244 break;
1245 }
1246
1247 // see state machine from spec for details
1248 // the way this works is, if you finish work on one state and can
1249 // go directly to another, you do that through jumping to the
1250 // label "next". however, if you have intermediate work, like DMA
1251 // so that you can't go to the next state yet, you go to exit and
1252 // exit the loop. however, when the DMA is done it will trigger
1253 // an event and come back to this loop.
1254 switch (rxState) {
1255 case rxIdle:
1256 if (!rxEnable) {
1257 DPRINTF(EthernetSM, "Receive Disabled! Nothing to do.\n");
1258 goto exit;
1259 }
1260
1261 if (CRDD) {
1262 rxState = rxDescRefr;
1263
1264 rxDmaAddr = regs.rxdp & 0x3fffffff;
1265 rxDmaData = &rxDescCache + offsetof(ns_desc, link);
1266 rxDmaLen = sizeof(rxDescCache.link);
1267 rxDmaFree = dmaDescFree;
1268
1269 descDmaReads++;
1270 descDmaRdBytes += rxDmaLen;
1271
1272 if (doRxDmaRead())
1273 goto exit;
1274 } else {
1275 rxState = rxDescRead;
1276
1277 rxDmaAddr = regs.rxdp & 0x3fffffff;
1278 rxDmaData = &rxDescCache;
1279 rxDmaLen = sizeof(ns_desc);
1280 rxDmaFree = dmaDescFree;
1281
1282 descDmaReads++;
1283 descDmaRdBytes += rxDmaLen;
1284
1285 if (doRxDmaRead())
1286 goto exit;
1287 }
1288 break;
1289
1290 case rxDescRefr:
1291 if (rxDmaState != dmaIdle)
1292 goto exit;
1293
1294 rxState = rxAdvance;
1295 break;
1296
1297 case rxDescRead:
1298 if (rxDmaState != dmaIdle)
1299 goto exit;
1300
1301 DPRINTF(EthernetDesc,
1302 "rxDescCache: addr=%08x read descriptor\n",
1303 regs.rxdp & 0x3fffffff);
1304 DPRINTF(EthernetDesc,
1305 "rxDescCache: link=%08x bufptr=%08x cmdsts=%08x extsts=%08x\n",
1306 rxDescCache.link, rxDescCache.bufptr, rxDescCache.cmdsts,
1307 rxDescCache.extsts);
1308
1309 if (rxDescCache.cmdsts & CMDSTS_OWN) {
1310 devIntrPost(ISR_RXIDLE);
1311 rxState = rxIdle;
1312 goto exit;
1313 } else {
1314 rxState = rxFifoBlock;
1315 rxFragPtr = rxDescCache.bufptr;
1316 rxDescCnt = rxDescCache.cmdsts & CMDSTS_LEN_MASK;
1317 }
1318 break;
1319
1320 case rxFifoBlock:
1321 if (!rxPacket) {
1322 /**
1323 * @todo in reality, we should be able to start processing
1324 * the packet as it arrives, and not have to wait for the
1325 * full packet ot be in the receive fifo.
1326 */
1327 if (rxFifo.empty())
1328 goto exit;
1329
1330 DPRINTF(EthernetSM, "****processing receive of new packet****\n");
1331
1332 // If we don't have a packet, grab a new one from the fifo.
1333 rxPacket = rxFifo.front();
1334 rxPktBytes = rxPacket->length;
1335 rxPacketBufPtr = rxPacket->data;
1336
1337 #if TRACING_ON
1338 if (DTRACE(Ethernet)) {
1339 if (rxPacket->isIpPkt()) {
1340 ip_header *ip = rxPacket->getIpHdr();
1341 DPRINTF(Ethernet, "ID is %d\n", reverseEnd16(ip->ID));
1342 if (rxPacket->isTcpPkt()) {
1343 tcp_header *tcp = rxPacket->getTcpHdr(ip);
1344 DPRINTF(Ethernet, "Src Port=%d, Dest Port=%d\n",
1345 reverseEnd16(tcp->src_port_num),
1346 reverseEnd16(tcp->dest_port_num));
1347 }
1348 }
1349 }
1350 #endif
1351
1352 // sanity check - i think the driver behaves like this
1353 assert(rxDescCnt >= rxPktBytes);
1354
1355 // Must clear the value before popping to decrement the
1356 // reference count
1357 rxFifo.front() = NULL;
1358 rxFifo.pop_front();
1359 rxFifoCnt -= rxPacket->length;
1360 }
1361
1362
1363 // dont' need the && rxDescCnt > 0 if driver sanity check
1364 // above holds
1365 if (rxPktBytes > 0) {
1366 rxState = rxFragWrite;
1367 // don't need min<>(rxPktBytes,rxDescCnt) if above sanity
1368 // check holds
1369 rxXferLen = rxPktBytes;
1370
1371 rxDmaAddr = rxFragPtr & 0x3fffffff;
1372 rxDmaData = rxPacketBufPtr;
1373 rxDmaLen = rxXferLen;
1374 rxDmaFree = dmaDataFree;
1375
1376 if (doRxDmaWrite())
1377 goto exit;
1378
1379 } else {
1380 rxState = rxDescWrite;
1381
1382 //if (rxPktBytes == 0) { /* packet is done */
1383 assert(rxPktBytes == 0);
1384 DPRINTF(EthernetSM, "done with receiving packet\n");
1385
1386 rxDescCache.cmdsts |= CMDSTS_OWN;
1387 rxDescCache.cmdsts &= ~CMDSTS_MORE;
1388 rxDescCache.cmdsts |= CMDSTS_OK;
1389 rxDescCache.cmdsts &= 0xffff0000;
1390 rxDescCache.cmdsts += rxPacket->length; //i.e. set CMDSTS_SIZE
1391
1392 #if 0
1393 /*
1394 * all the driver uses these are for its own stats keeping
1395 * which we don't care about, aren't necessary for
1396 * functionality and doing this would just slow us down.
1397 * if they end up using this in a later version for
1398 * functional purposes, just undef
1399 */
1400 if (rxFilterEnable) {
1401 rxDescCache.cmdsts &= ~CMDSTS_DEST_MASK;
1402 if (rxFifo.front()->IsUnicast())
1403 rxDescCache.cmdsts |= CMDSTS_DEST_SELF;
1404 if (rxFifo.front()->IsMulticast())
1405 rxDescCache.cmdsts |= CMDSTS_DEST_MULTI;
1406 if (rxFifo.front()->IsBroadcast())
1407 rxDescCache.cmdsts |= CMDSTS_DEST_MASK;
1408 }
1409 #endif
1410
1411 if (rxPacket->isIpPkt() && extstsEnable) {
1412 rxDescCache.extsts |= EXTSTS_IPPKT;
1413 rxIPChecksums++;
1414 if (!ipChecksum(rxPacket, false)) {
1415 DPRINTF(EthernetCksum, "Rx IP Checksum Error\n");
1416 rxDescCache.extsts |= EXTSTS_IPERR;
1417 }
1418 if (rxPacket->isTcpPkt()) {
1419 rxDescCache.extsts |= EXTSTS_TCPPKT;
1420 rxTCPChecksums++;
1421 if (!tcpChecksum(rxPacket, false)) {
1422 DPRINTF(EthernetCksum, "Rx TCP Checksum Error\n");
1423 rxDescCache.extsts |= EXTSTS_TCPERR;
1424
1425 }
1426 } else if (rxPacket->isUdpPkt()) {
1427 rxDescCache.extsts |= EXTSTS_UDPPKT;
1428 if (!udpChecksum(rxPacket, false)) {
1429 DPRINTF(EthernetCksum, "Rx UDP Checksum Error\n");
1430 rxDescCache.extsts |= EXTSTS_UDPERR;
1431 }
1432 }
1433 }
1434 rxPacket = 0;
1435
1436 /*
1437 * the driver seems to always receive into desc buffers
1438 * of size 1514, so you never have a pkt that is split
1439 * into multiple descriptors on the receive side, so
1440 * i don't implement that case, hence the assert above.
1441 */
1442
1443 DPRINTF(EthernetDesc,
1444 "rxDescCache: addr=%08x writeback cmdsts extsts\n",
1445 regs.rxdp & 0x3fffffff);
1446 DPRINTF(EthernetDesc,
1447 "rxDescCache: link=%08x bufptr=%08x cmdsts=%08x extsts=%08x\n",
1448 rxDescCache.link, rxDescCache.bufptr, rxDescCache.cmdsts,
1449 rxDescCache.extsts);
1450
1451 rxDmaAddr = (regs.rxdp + offsetof(ns_desc, cmdsts)) & 0x3fffffff;
1452 rxDmaData = &(rxDescCache.cmdsts);
1453 rxDmaLen = sizeof(rxDescCache.cmdsts) + sizeof(rxDescCache.extsts);
1454 rxDmaFree = dmaDescFree;
1455
1456 descDmaWrites++;
1457 descDmaWrBytes += rxDmaLen;
1458
1459 if (doRxDmaWrite())
1460 goto exit;
1461 }
1462 break;
1463
1464 case rxFragWrite:
1465 if (rxDmaState != dmaIdle)
1466 goto exit;
1467
1468 rxPacketBufPtr += rxXferLen;
1469 rxFragPtr += rxXferLen;
1470 rxPktBytes -= rxXferLen;
1471
1472 rxState = rxFifoBlock;
1473 break;
1474
1475 case rxDescWrite:
1476 if (rxDmaState != dmaIdle)
1477 goto exit;
1478
1479 assert(rxDescCache.cmdsts & CMDSTS_OWN);
1480
1481 assert(rxPacket == 0);
1482 devIntrPost(ISR_RXOK);
1483
1484 if (rxDescCache.cmdsts & CMDSTS_INTR)
1485 devIntrPost(ISR_RXDESC);
1486
1487 if (!rxEnable) {
1488 DPRINTF(EthernetSM, "Halting the RX state machine\n");
1489 rxState = rxIdle;
1490 goto exit;
1491 } else
1492 rxState = rxAdvance;
1493 break;
1494
1495 case rxAdvance:
1496 if (rxDescCache.link == 0) {
1497 devIntrPost(ISR_RXIDLE);
1498 rxState = rxIdle;
1499 CRDD = true;
1500 goto exit;
1501 } else {
1502 rxState = rxDescRead;
1503 regs.rxdp = rxDescCache.link;
1504 CRDD = false;
1505
1506 rxDmaAddr = regs.rxdp & 0x3fffffff;
1507 rxDmaData = &rxDescCache;
1508 rxDmaLen = sizeof(ns_desc);
1509 rxDmaFree = dmaDescFree;
1510
1511 if (doRxDmaRead())
1512 goto exit;
1513 }
1514 break;
1515
1516 default:
1517 panic("Invalid rxState!");
1518 }
1519
1520 DPRINTF(EthernetSM, "entering next rxState=%s\n",
1521 NsRxStateStrings[rxState]);
1522
1523 goto next;
1524
1525 exit:
1526 /**
1527 * @todo do we want to schedule a future kick?
1528 */
1529 DPRINTF(EthernetSM, "rx state machine exited rxState=%s\n",
1530 NsRxStateStrings[rxState]);
1531 }
1532
1533 void
1534 NSGigE::transmit()
1535 {
1536 if (txFifo.empty()) {
1537 DPRINTF(Ethernet, "nothing to transmit\n");
1538 return;
1539 }
1540
1541 DPRINTF(Ethernet, "Attempt Pkt Transmit: txFifo length=%d\n",
1542 maxTxFifoSize - txFifoAvail);
1543 if (interface->sendPacket(txFifo.front())) {
1544 #if TRACING_ON
1545 if (DTRACE(Ethernet)) {
1546 if (txFifo.front()->isIpPkt()) {
1547 ip_header *ip = txFifo.front()->getIpHdr();
1548 DPRINTF(Ethernet, "ID is %d\n", reverseEnd16(ip->ID));
1549 if (txFifo.front()->isTcpPkt()) {
1550 tcp_header *tcp = txFifo.front()->getTcpHdr(ip);
1551 DPRINTF(Ethernet, "Src Port=%d, Dest Port=%d\n",
1552 reverseEnd16(tcp->src_port_num),
1553 reverseEnd16(tcp->dest_port_num));
1554 }
1555 }
1556 }
1557 #endif
1558
1559 DDUMP(Ethernet, txFifo.front()->data, txFifo.front()->length);
1560 txBytes += txFifo.front()->length;
1561 txPackets++;
1562
1563 txFifoAvail += txFifo.front()->length;
1564
1565 DPRINTF(Ethernet, "Successful Xmit! now txFifoAvail is %d\n",
1566 txFifoAvail);
1567 txFifo.front() = NULL;
1568 txFifo.pop_front();
1569
1570 /*
1571 * normally do a writeback of the descriptor here, and ONLY
1572 * after that is done, send this interrupt. but since our
1573 * stuff never actually fails, just do this interrupt here,
1574 * otherwise the code has to stray from this nice format.
1575 * besides, it's functionally the same.
1576 */
1577 devIntrPost(ISR_TXOK);
1578 } else {
1579 DPRINTF(Ethernet,
1580 "May need to rethink always sending the descriptors back?\n");
1581 }
1582
1583 if (!txFifo.empty() && !txEvent.scheduled()) {
1584 DPRINTF(Ethernet, "reschedule transmit\n");
1585 txEvent.schedule(curTick + 1000);
1586 }
1587 }
1588
1589 void
1590 NSGigE::txDmaReadCopy()
1591 {
1592 assert(txDmaState == dmaReading);
1593
1594 physmem->dma_read((uint8_t *)txDmaData, txDmaAddr, txDmaLen);
1595 txDmaState = dmaIdle;
1596
1597 DPRINTF(EthernetDMA, "tx dma read paddr=%#x len=%d\n",
1598 txDmaAddr, txDmaLen);
1599 DDUMP(EthernetDMA, txDmaData, txDmaLen);
1600 }
1601
1602 bool
1603 NSGigE::doTxDmaRead()
1604 {
1605 assert(txDmaState == dmaIdle || txDmaState == dmaReadWaiting);
1606 txDmaState = dmaReading;
1607
1608 if (dmaInterface && !txDmaFree) {
1609 if (dmaInterface->busy())
1610 txDmaState = dmaReadWaiting;
1611 else
1612 dmaInterface->doDMA(Read, txDmaAddr, txDmaLen, curTick,
1613 &txDmaReadEvent, true);
1614 return true;
1615 }
1616
1617 if (dmaReadDelay == 0 && dmaReadFactor == 0.0) {
1618 txDmaReadCopy();
1619 return false;
1620 }
1621
1622 Tick factor = ((txDmaLen + ULL(63)) >> ULL(6)) * dmaReadFactor;
1623 Tick start = curTick + dmaReadDelay + factor;
1624 txDmaReadEvent.schedule(start);
1625 return true;
1626 }
1627
1628 void
1629 NSGigE::txDmaReadDone()
1630 {
1631 assert(txDmaState == dmaReading);
1632 txDmaReadCopy();
1633
1634 // If the receive state machine has a pending DMA, let it go first
1635 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting)
1636 rxKick();
1637
1638 txKick();
1639 }
1640
1641 void
1642 NSGigE::txDmaWriteCopy()
1643 {
1644 assert(txDmaState == dmaWriting);
1645
1646 physmem->dma_write(txDmaAddr, (uint8_t *)txDmaData, txDmaLen);
1647 txDmaState = dmaIdle;
1648
1649 DPRINTF(EthernetDMA, "tx dma write paddr=%#x len=%d\n",
1650 txDmaAddr, txDmaLen);
1651 DDUMP(EthernetDMA, txDmaData, txDmaLen);
1652 }
1653
1654 bool
1655 NSGigE::doTxDmaWrite()
1656 {
1657 assert(txDmaState == dmaIdle || txDmaState == dmaWriteWaiting);
1658 txDmaState = dmaWriting;
1659
1660 if (dmaInterface && !txDmaFree) {
1661 if (dmaInterface->busy())
1662 txDmaState = dmaWriteWaiting;
1663 else
1664 dmaInterface->doDMA(WriteInvalidate, txDmaAddr, txDmaLen, curTick,
1665 &txDmaWriteEvent, true);
1666 return true;
1667 }
1668
1669 if (dmaWriteDelay == 0 && dmaWriteFactor == 0.0) {
1670 txDmaWriteCopy();
1671 return false;
1672 }
1673
1674 Tick factor = ((txDmaLen + ULL(63)) >> ULL(6)) * dmaWriteFactor;
1675 Tick start = curTick + dmaWriteDelay + factor;
1676 txDmaWriteEvent.schedule(start);
1677 return true;
1678 }
1679
1680 void
1681 NSGigE::txDmaWriteDone()
1682 {
1683 assert(txDmaState == dmaWriting);
1684 txDmaWriteCopy();
1685
1686 // If the receive state machine has a pending DMA, let it go first
1687 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting)
1688 rxKick();
1689
1690 txKick();
1691 }
1692
1693 void
1694 NSGigE::txKick()
1695 {
1696 DPRINTF(EthernetSM, "transmit kick txState=%s\n",
1697 NsTxStateStrings[txState]);
1698
1699 if (txKickTick > curTick) {
1700 DPRINTF(EthernetSM, "transmit kick exiting, can't run till %d\n",
1701 txKickTick);
1702
1703 return;
1704 }
1705
1706 next:
1707 switch(txDmaState) {
1708 case dmaReadWaiting:
1709 if (doTxDmaRead())
1710 goto exit;
1711 break;
1712 case dmaWriteWaiting:
1713 if (doTxDmaWrite())
1714 goto exit;
1715 break;
1716 default:
1717 break;
1718 }
1719
1720 switch (txState) {
1721 case txIdle:
1722 if (!txEnable) {
1723 DPRINTF(EthernetSM, "Transmit disabled. Nothing to do.\n");
1724 goto exit;
1725 }
1726
1727 if (CTDD) {
1728 txState = txDescRefr;
1729
1730 txDmaAddr = regs.txdp & 0x3fffffff;
1731 txDmaData = &txDescCache + offsetof(ns_desc, link);
1732 txDmaLen = sizeof(txDescCache.link);
1733 txDmaFree = dmaDescFree;
1734
1735 descDmaReads++;
1736 descDmaRdBytes += txDmaLen;
1737
1738 if (doTxDmaRead())
1739 goto exit;
1740
1741 } else {
1742 txState = txDescRead;
1743
1744 txDmaAddr = regs.txdp & 0x3fffffff;
1745 txDmaData = &txDescCache;
1746 txDmaLen = sizeof(ns_desc);
1747 txDmaFree = dmaDescFree;
1748
1749 descDmaReads++;
1750 descDmaRdBytes += txDmaLen;
1751
1752 if (doTxDmaRead())
1753 goto exit;
1754 }
1755 break;
1756
1757 case txDescRefr:
1758 if (txDmaState != dmaIdle)
1759 goto exit;
1760
1761 txState = txAdvance;
1762 break;
1763
1764 case txDescRead:
1765 if (txDmaState != dmaIdle)
1766 goto exit;
1767
1768 DPRINTF(EthernetDesc,
1769 "txDescCache: link=%08x bufptr=%08x cmdsts=%08x extsts=%08x\n",
1770 txDescCache.link, txDescCache.bufptr, txDescCache.cmdsts,
1771 txDescCache.extsts);
1772
1773 if (txDescCache.cmdsts & CMDSTS_OWN) {
1774 txState = txFifoBlock;
1775 txFragPtr = txDescCache.bufptr;
1776 txDescCnt = txDescCache.cmdsts & CMDSTS_LEN_MASK;
1777 } else {
1778 devIntrPost(ISR_TXIDLE);
1779 txState = txIdle;
1780 goto exit;
1781 }
1782 break;
1783
1784 case txFifoBlock:
1785 if (!txPacket) {
1786 DPRINTF(EthernetSM, "****starting the tx of a new packet****\n");
1787 txPacket = new EtherPacket;
1788 txPacket->data = new uint8_t[16384];
1789 txPacketBufPtr = txPacket->data;
1790 }
1791
1792 if (txDescCnt == 0) {
1793 DPRINTF(EthernetSM, "the txDescCnt == 0, done with descriptor\n");
1794 if (txDescCache.cmdsts & CMDSTS_MORE) {
1795 DPRINTF(EthernetSM, "there are more descriptors to come\n");
1796 txState = txDescWrite;
1797
1798 txDescCache.cmdsts &= ~CMDSTS_OWN;
1799
1800 txDmaAddr = regs.txdp + offsetof(ns_desc, cmdsts);
1801 txDmaAddr &= 0x3fffffff;
1802 txDmaData = &(txDescCache.cmdsts);
1803 txDmaLen = sizeof(txDescCache.cmdsts);
1804 txDmaFree = dmaDescFree;
1805
1806 if (doTxDmaWrite())
1807 goto exit;
1808
1809 } else { /* this packet is totally done */
1810 DPRINTF(EthernetSM, "This packet is done, let's wrap it up\n");
1811 /* deal with the the packet that just finished */
1812 if ((regs.vtcr & VTCR_PPCHK) && extstsEnable) {
1813 if (txDescCache.extsts & EXTSTS_UDPPKT) {
1814 udpChecksum(txPacket, true);
1815 } else if (txDescCache.extsts & EXTSTS_TCPPKT) {
1816 tcpChecksum(txPacket, true);
1817 txTCPChecksums++;
1818 }
1819 if (txDescCache.extsts & EXTSTS_IPPKT) {
1820 ipChecksum(txPacket, true);
1821 txIPChecksums++;
1822 }
1823 }
1824
1825 txPacket->length = txPacketBufPtr - txPacket->data;
1826 // this is just because the receive can't handle a
1827 // packet bigger want to make sure
1828 assert(txPacket->length <= 1514);
1829 txFifo.push_back(txPacket);
1830
1831 /*
1832 * this following section is not tqo spec, but
1833 * functionally shouldn't be any different. normally,
1834 * the chip will wait til the transmit has occurred
1835 * before writing back the descriptor because it has
1836 * to wait to see that it was successfully transmitted
1837 * to decide whether to set CMDSTS_OK or not.
1838 * however, in the simulator since it is always
1839 * successfully transmitted, and writing it exactly to
1840 * spec would complicate the code, we just do it here
1841 */
1842
1843 txDescCache.cmdsts &= ~CMDSTS_OWN;
1844 txDescCache.cmdsts |= CMDSTS_OK;
1845
1846 DPRINTF(EthernetDesc,
1847 "txDesc writeback: cmdsts=%08x extsts=%08x\n",
1848 txDescCache.cmdsts, txDescCache.extsts);
1849
1850 txDmaAddr = regs.txdp + offsetof(ns_desc, cmdsts);
1851 txDmaAddr &= 0x3fffffff;
1852 txDmaData = &(txDescCache.cmdsts);
1853 txDmaLen = sizeof(txDescCache.cmdsts) +
1854 sizeof(txDescCache.extsts);
1855 txDmaFree = dmaDescFree;
1856
1857 descDmaWrites++;
1858 descDmaWrBytes += txDmaLen;
1859
1860 transmit();
1861 txPacket = 0;
1862
1863 if (!txEnable) {
1864 DPRINTF(EthernetSM, "halting TX state machine\n");
1865 txState = txIdle;
1866 goto exit;
1867 } else
1868 txState = txAdvance;
1869
1870 if (doTxDmaWrite())
1871 goto exit;
1872 }
1873 } else {
1874 DPRINTF(EthernetSM, "this descriptor isn't done yet\n");
1875 if (txFifoAvail) {
1876 txState = txFragRead;
1877
1878 /*
1879 * The number of bytes transferred is either whatever
1880 * is left in the descriptor (txDescCnt), or if there
1881 * is not enough room in the fifo, just whatever room
1882 * is left in the fifo
1883 */
1884 txXferLen = min<uint32_t>(txDescCnt, txFifoAvail);
1885
1886 txDmaAddr = txFragPtr & 0x3fffffff;
1887 txDmaData = txPacketBufPtr;
1888 txDmaLen = txXferLen;
1889 txDmaFree = dmaDataFree;
1890
1891 if (doTxDmaRead())
1892 goto exit;
1893 } else {
1894 txState = txFifoBlock;
1895 transmit();
1896
1897 goto exit;
1898 }
1899
1900 }
1901 break;
1902
1903 case txFragRead:
1904 if (txDmaState != dmaIdle)
1905 goto exit;
1906
1907 txPacketBufPtr += txXferLen;
1908 txFragPtr += txXferLen;
1909 txDescCnt -= txXferLen;
1910 txFifoAvail -= txXferLen;
1911
1912 txState = txFifoBlock;
1913 break;
1914
1915 case txDescWrite:
1916 if (txDmaState != dmaIdle)
1917 goto exit;
1918
1919 if (txDescCache.cmdsts & CMDSTS_INTR)
1920 devIntrPost(ISR_TXDESC);
1921
1922 txState = txAdvance;
1923 break;
1924
1925 case txAdvance:
1926 if (txDescCache.link == 0) {
1927 devIntrPost(ISR_TXIDLE);
1928 txState = txIdle;
1929 goto exit;
1930 } else {
1931 txState = txDescRead;
1932 regs.txdp = txDescCache.link;
1933 CTDD = false;
1934
1935 txDmaAddr = txDescCache.link & 0x3fffffff;
1936 txDmaData = &txDescCache;
1937 txDmaLen = sizeof(ns_desc);
1938 txDmaFree = dmaDescFree;
1939
1940 if (doTxDmaRead())
1941 goto exit;
1942 }
1943 break;
1944
1945 default:
1946 panic("invalid state");
1947 }
1948
1949 DPRINTF(EthernetSM, "entering next txState=%s\n",
1950 NsTxStateStrings[txState]);
1951
1952 goto next;
1953
1954 exit:
1955 /**
1956 * @todo do we want to schedule a future kick?
1957 */
1958 DPRINTF(EthernetSM, "tx state machine exited txState=%s\n",
1959 NsTxStateStrings[txState]);
1960 }
1961
1962 void
1963 NSGigE::transferDone()
1964 {
1965 if (txFifo.empty()) {
1966 DPRINTF(Ethernet, "transfer complete: txFifo empty...nothing to do\n");
1967 return;
1968 }
1969
1970 DPRINTF(Ethernet, "transfer complete: data in txFifo...schedule xmit\n");
1971
1972 if (txEvent.scheduled())
1973 txEvent.reschedule(curTick + 1);
1974 else
1975 txEvent.schedule(curTick + 1);
1976 }
1977
1978 bool
1979 NSGigE::rxFilter(PacketPtr packet)
1980 {
1981 bool drop = true;
1982 string type;
1983
1984 if (packet->IsUnicast()) {
1985 type = "unicast";
1986
1987 // If we're accepting all unicast addresses
1988 if (acceptUnicast)
1989 drop = false;
1990
1991 // If we make a perfect match
1992 if (acceptPerfect &&
1993 memcmp(rom.perfectMatch, packet->data, EADDR_LEN) == 0)
1994 drop = false;
1995
1996 eth_header *eth = (eth_header *) packet->data;
1997 if ((acceptArp) && (eth->type == 0x608))
1998 drop = false;
1999
2000 } else if (packet->IsBroadcast()) {
2001 type = "broadcast";
2002
2003 // if we're accepting broadcasts
2004 if (acceptBroadcast)
2005 drop = false;
2006
2007 } else if (packet->IsMulticast()) {
2008 type = "multicast";
2009
2010 // if we're accepting all multicasts
2011 if (acceptMulticast)
2012 drop = false;
2013
2014 } else {
2015 type = "unknown";
2016
2017 // oh well, punt on this one
2018 }
2019
2020 if (drop) {
2021 DPRINTF(Ethernet, "rxFilter drop\n");
2022 DDUMP(EthernetData, packet->data, packet->length);
2023 }
2024
2025 return drop;
2026 }
2027
2028 bool
2029 NSGigE::recvPacket(PacketPtr packet)
2030 {
2031 rxBytes += packet->length;
2032 rxPackets++;
2033
2034 DPRINTF(Ethernet, "Receiving packet from wire, rxFifoAvail=%d\n",
2035 maxRxFifoSize - rxFifoCnt);
2036
2037 if (!rxEnable) {
2038 DPRINTF(Ethernet, "receive disabled...packet dropped\n");
2039 debug_break();
2040 interface->recvDone();
2041 return true;
2042 }
2043
2044 if (rxFilterEnable && rxFilter(packet)) {
2045 DPRINTF(Ethernet, "packet filtered...dropped\n");
2046 interface->recvDone();
2047 return true;
2048 }
2049
2050 if ((rxFifoCnt + packet->length) >= maxRxFifoSize) {
2051 DPRINTF(Ethernet,
2052 "packet will not fit in receive buffer...packet dropped\n");
2053 devIntrPost(ISR_RXORN);
2054 return false;
2055 }
2056
2057 rxFifo.push_back(packet);
2058 rxFifoCnt += packet->length;
2059 interface->recvDone();
2060
2061 rxKick();
2062 return true;
2063 }
2064
2065 /**
2066 * does a udp checksum. if gen is true, then it generates it and puts
2067 * it in the right place else, it just checks what it calculates
2068 * against the value in the header in packet
2069 */
2070 bool
2071 NSGigE::udpChecksum(PacketPtr packet, bool gen)
2072 {
2073 ip_header *ip = packet->getIpHdr();
2074 udp_header *hdr = packet->getUdpHdr(ip);
2075
2076 pseudo_header *pseudo = new pseudo_header;
2077
2078 pseudo->src_ip_addr = ip->src_ip_addr;
2079 pseudo->dest_ip_addr = ip->dest_ip_addr;
2080 pseudo->protocol = ip->protocol;
2081 pseudo->len = hdr->len;
2082
2083 uint16_t cksum = checksumCalc((uint16_t *) pseudo, (uint16_t *) hdr,
2084 (uint32_t) hdr->len);
2085
2086 delete pseudo;
2087 if (gen)
2088 hdr->chksum = cksum;
2089 else
2090 if (cksum != 0)
2091 return false;
2092
2093 return true;
2094 }
2095
2096 bool
2097 NSGigE::tcpChecksum(PacketPtr packet, bool gen)
2098 {
2099 ip_header *ip = packet->getIpHdr();
2100 tcp_header *hdr = packet->getTcpHdr(ip);
2101
2102 uint16_t cksum;
2103 pseudo_header *pseudo = new pseudo_header;
2104 if (!gen) {
2105 pseudo->src_ip_addr = ip->src_ip_addr;
2106 pseudo->dest_ip_addr = ip->dest_ip_addr;
2107 pseudo->protocol = reverseEnd16(ip->protocol);
2108 pseudo->len = reverseEnd16(reverseEnd16(ip->dgram_len) -
2109 (ip->vers_len & 0xf)*4);
2110
2111 cksum = checksumCalc((uint16_t *) pseudo, (uint16_t *) hdr,
2112 (uint32_t) reverseEnd16(pseudo->len));
2113 } else {
2114 pseudo->src_ip_addr = 0;
2115 pseudo->dest_ip_addr = 0;
2116 pseudo->protocol = hdr->chksum;
2117 pseudo->len = 0;
2118 hdr->chksum = 0;
2119 cksum = checksumCalc((uint16_t *) pseudo, (uint16_t *) hdr,
2120 (uint32_t) (reverseEnd16(ip->dgram_len) -
2121 (ip->vers_len & 0xf)*4));
2122 }
2123
2124 delete pseudo;
2125 if (gen)
2126 hdr->chksum = cksum;
2127 else
2128 if (cksum != 0)
2129 return false;
2130
2131 return true;
2132 }
2133
2134 bool
2135 NSGigE::ipChecksum(PacketPtr packet, bool gen)
2136 {
2137 ip_header *hdr = packet->getIpHdr();
2138
2139 uint16_t cksum = checksumCalc(NULL, (uint16_t *) hdr,
2140 (hdr->vers_len & 0xf)*4);
2141
2142 if (gen) {
2143 DPRINTF(EthernetCksum, "generated checksum: %#x\n", cksum);
2144 hdr->hdr_chksum = cksum;
2145 }
2146 else
2147 if (cksum != 0)
2148 return false;
2149
2150 return true;
2151 }
2152
2153 uint16_t
2154 NSGigE::checksumCalc(uint16_t *pseudo, uint16_t *buf, uint32_t len)
2155 {
2156 uint32_t sum = 0;
2157
2158 uint16_t last_pad = 0;
2159 if (len & 1) {
2160 last_pad = buf[len/2] & 0xff;
2161 len--;
2162 sum += last_pad;
2163 }
2164
2165 if (pseudo) {
2166 sum = pseudo[0] + pseudo[1] + pseudo[2] +
2167 pseudo[3] + pseudo[4] + pseudo[5];
2168 }
2169
2170 for (int i=0; i < (len/2); ++i) {
2171 sum += buf[i];
2172 }
2173
2174 while (sum >> 16)
2175 sum = (sum >> 16) + (sum & 0xffff);
2176
2177 return ~sum;
2178 }
2179
2180 //=====================================================================
2181 //
2182 //
2183 void
2184 NSGigE::serialize(ostream &os)
2185 {
2186 // Serialize the PciDev base class
2187 PciDev::serialize(os);
2188
2189 /*
2190 * Finalize any DMA events now.
2191 */
2192 if (rxDmaReadEvent.scheduled())
2193 rxDmaReadCopy();
2194 if (rxDmaWriteEvent.scheduled())
2195 rxDmaWriteCopy();
2196 if (txDmaReadEvent.scheduled())
2197 txDmaReadCopy();
2198 if (txDmaWriteEvent.scheduled())
2199 txDmaWriteCopy();
2200
2201 /*
2202 * Serialize the device registers
2203 */
2204 SERIALIZE_SCALAR(regs.command);
2205 SERIALIZE_SCALAR(regs.config);
2206 SERIALIZE_SCALAR(regs.mear);
2207 SERIALIZE_SCALAR(regs.ptscr);
2208 SERIALIZE_SCALAR(regs.isr);
2209 SERIALIZE_SCALAR(regs.imr);
2210 SERIALIZE_SCALAR(regs.ier);
2211 SERIALIZE_SCALAR(regs.ihr);
2212 SERIALIZE_SCALAR(regs.txdp);
2213 SERIALIZE_SCALAR(regs.txdp_hi);
2214 SERIALIZE_SCALAR(regs.txcfg);
2215 SERIALIZE_SCALAR(regs.gpior);
2216 SERIALIZE_SCALAR(regs.rxdp);
2217 SERIALIZE_SCALAR(regs.rxdp_hi);
2218 SERIALIZE_SCALAR(regs.rxcfg);
2219 SERIALIZE_SCALAR(regs.pqcr);
2220 SERIALIZE_SCALAR(regs.wcsr);
2221 SERIALIZE_SCALAR(regs.pcr);
2222 SERIALIZE_SCALAR(regs.rfcr);
2223 SERIALIZE_SCALAR(regs.rfdr);
2224 SERIALIZE_SCALAR(regs.srr);
2225 SERIALIZE_SCALAR(regs.mibc);
2226 SERIALIZE_SCALAR(regs.vrcr);
2227 SERIALIZE_SCALAR(regs.vtcr);
2228 SERIALIZE_SCALAR(regs.vdr);
2229 SERIALIZE_SCALAR(regs.ccsr);
2230 SERIALIZE_SCALAR(regs.tbicr);
2231 SERIALIZE_SCALAR(regs.tbisr);
2232 SERIALIZE_SCALAR(regs.tanar);
2233 SERIALIZE_SCALAR(regs.tanlpar);
2234 SERIALIZE_SCALAR(regs.taner);
2235 SERIALIZE_SCALAR(regs.tesr);
2236
2237 SERIALIZE_ARRAY(rom.perfectMatch, EADDR_LEN);
2238
2239 SERIALIZE_SCALAR(ioEnable);
2240
2241 /*
2242 * Serialize the data Fifos
2243 */
2244 int txNumPkts = txFifo.size();
2245 SERIALIZE_SCALAR(txNumPkts);
2246 int i = 0;
2247 pktiter_t end = txFifo.end();
2248 for (pktiter_t p = txFifo.begin(); p != end; ++p) {
2249 nameOut(os, csprintf("%s.txFifo%d", name(), i++));
2250 (*p)->serialize(os);
2251 }
2252
2253 int rxNumPkts = rxFifo.size();
2254 SERIALIZE_SCALAR(rxNumPkts);
2255 i = 0;
2256 end = rxFifo.end();
2257 for (pktiter_t p = rxFifo.begin(); p != end; ++p) {
2258 nameOut(os, csprintf("%s.rxFifo%d", name(), i++));
2259 (*p)->serialize(os);
2260 }
2261
2262 /*
2263 * Serialize the various helper variables
2264 */
2265 bool txPacketExists = txPacket;
2266 SERIALIZE_SCALAR(txPacketExists);
2267 if (txPacketExists) {
2268 nameOut(os, csprintf("%s.txPacket", name()));
2269 txPacket->serialize(os);
2270 uint32_t txPktBufPtr = (uint32_t) (txPacketBufPtr - txPacket->data);
2271 SERIALIZE_SCALAR(txPktBufPtr);
2272 }
2273
2274 bool rxPacketExists = rxPacket;
2275 SERIALIZE_SCALAR(rxPacketExists);
2276 if (rxPacketExists) {
2277 nameOut(os, csprintf("%s.rxPacket", name()));
2278 rxPacket->serialize(os);
2279 uint32_t rxPktBufPtr = (uint32_t) (rxPacketBufPtr - rxPacket->data);
2280 SERIALIZE_SCALAR(rxPktBufPtr);
2281 }
2282
2283 SERIALIZE_SCALAR(txXferLen);
2284 SERIALIZE_SCALAR(rxXferLen);
2285
2286 /*
2287 * Serialize DescCaches
2288 */
2289 SERIALIZE_SCALAR(txDescCache.link);
2290 SERIALIZE_SCALAR(txDescCache.bufptr);
2291 SERIALIZE_SCALAR(txDescCache.cmdsts);
2292 SERIALIZE_SCALAR(txDescCache.extsts);
2293 SERIALIZE_SCALAR(rxDescCache.link);
2294 SERIALIZE_SCALAR(rxDescCache.bufptr);
2295 SERIALIZE_SCALAR(rxDescCache.cmdsts);
2296 SERIALIZE_SCALAR(rxDescCache.extsts);
2297
2298 /*
2299 * Serialize tx state machine
2300 */
2301 int txState = this->txState;
2302 SERIALIZE_SCALAR(txState);
2303 SERIALIZE_SCALAR(txEnable);
2304 SERIALIZE_SCALAR(CTDD);
2305 SERIALIZE_SCALAR(txFifoAvail);
2306 SERIALIZE_SCALAR(txFragPtr);
2307 SERIALIZE_SCALAR(txDescCnt);
2308 int txDmaState = this->txDmaState;
2309 SERIALIZE_SCALAR(txDmaState);
2310
2311 /*
2312 * Serialize rx state machine
2313 */
2314 int rxState = this->rxState;
2315 SERIALIZE_SCALAR(rxState);
2316 SERIALIZE_SCALAR(rxEnable);
2317 SERIALIZE_SCALAR(CRDD);
2318 SERIALIZE_SCALAR(rxPktBytes);
2319 SERIALIZE_SCALAR(rxFifoCnt);
2320 SERIALIZE_SCALAR(rxDescCnt);
2321 int rxDmaState = this->rxDmaState;
2322 SERIALIZE_SCALAR(rxDmaState);
2323
2324 SERIALIZE_SCALAR(extstsEnable);
2325
2326 /*
2327 * If there's a pending transmit, store the time so we can
2328 * reschedule it later
2329 */
2330 Tick transmitTick = txEvent.scheduled() ? txEvent.when() - curTick : 0;
2331 SERIALIZE_SCALAR(transmitTick);
2332
2333 /*
2334 * receive address filter settings
2335 */
2336 SERIALIZE_SCALAR(rxFilterEnable);
2337 SERIALIZE_SCALAR(acceptBroadcast);
2338 SERIALIZE_SCALAR(acceptMulticast);
2339 SERIALIZE_SCALAR(acceptUnicast);
2340 SERIALIZE_SCALAR(acceptPerfect);
2341 SERIALIZE_SCALAR(acceptArp);
2342
2343 /*
2344 * Keep track of pending interrupt status.
2345 */
2346 SERIALIZE_SCALAR(intrTick);
2347 SERIALIZE_SCALAR(cpuPendingIntr);
2348 Tick intrEventTick = 0;
2349 if (intrEvent)
2350 intrEventTick = intrEvent->when();
2351 SERIALIZE_SCALAR(intrEventTick);
2352
2353 }
2354
2355 void
2356 NSGigE::unserialize(Checkpoint *cp, const std::string &section)
2357 {
2358 // Unserialize the PciDev base class
2359 PciDev::unserialize(cp, section);
2360
2361 UNSERIALIZE_SCALAR(regs.command);
2362 UNSERIALIZE_SCALAR(regs.config);
2363 UNSERIALIZE_SCALAR(regs.mear);
2364 UNSERIALIZE_SCALAR(regs.ptscr);
2365 UNSERIALIZE_SCALAR(regs.isr);
2366 UNSERIALIZE_SCALAR(regs.imr);
2367 UNSERIALIZE_SCALAR(regs.ier);
2368 UNSERIALIZE_SCALAR(regs.ihr);
2369 UNSERIALIZE_SCALAR(regs.txdp);
2370 UNSERIALIZE_SCALAR(regs.txdp_hi);
2371 UNSERIALIZE_SCALAR(regs.txcfg);
2372 UNSERIALIZE_SCALAR(regs.gpior);
2373 UNSERIALIZE_SCALAR(regs.rxdp);
2374 UNSERIALIZE_SCALAR(regs.rxdp_hi);
2375 UNSERIALIZE_SCALAR(regs.rxcfg);
2376 UNSERIALIZE_SCALAR(regs.pqcr);
2377 UNSERIALIZE_SCALAR(regs.wcsr);
2378 UNSERIALIZE_SCALAR(regs.pcr);
2379 UNSERIALIZE_SCALAR(regs.rfcr);
2380 UNSERIALIZE_SCALAR(regs.rfdr);
2381 UNSERIALIZE_SCALAR(regs.srr);
2382 UNSERIALIZE_SCALAR(regs.mibc);
2383 UNSERIALIZE_SCALAR(regs.vrcr);
2384 UNSERIALIZE_SCALAR(regs.vtcr);
2385 UNSERIALIZE_SCALAR(regs.vdr);
2386 UNSERIALIZE_SCALAR(regs.ccsr);
2387 UNSERIALIZE_SCALAR(regs.tbicr);
2388 UNSERIALIZE_SCALAR(regs.tbisr);
2389 UNSERIALIZE_SCALAR(regs.tanar);
2390 UNSERIALIZE_SCALAR(regs.tanlpar);
2391 UNSERIALIZE_SCALAR(regs.taner);
2392 UNSERIALIZE_SCALAR(regs.tesr);
2393
2394 UNSERIALIZE_ARRAY(rom.perfectMatch, EADDR_LEN);
2395
2396 UNSERIALIZE_SCALAR(ioEnable);
2397
2398 /*
2399 * unserialize the data fifos
2400 */
2401 int txNumPkts;
2402 UNSERIALIZE_SCALAR(txNumPkts);
2403 int i;
2404 for (i = 0; i < txNumPkts; ++i) {
2405 PacketPtr p = new EtherPacket;
2406 p->unserialize(cp, csprintf("%s.rxFifo%d", section, i));
2407 txFifo.push_back(p);
2408 }
2409
2410 int rxNumPkts;
2411 UNSERIALIZE_SCALAR(rxNumPkts);
2412 for (i = 0; i < rxNumPkts; ++i) {
2413 PacketPtr p = new EtherPacket;
2414 p->unserialize(cp, csprintf("%s.rxFifo%d", section, i));
2415 rxFifo.push_back(p);
2416 }
2417
2418 /*
2419 * unserialize the various helper variables
2420 */
2421 bool txPacketExists;
2422 UNSERIALIZE_SCALAR(txPacketExists);
2423 if (txPacketExists) {
2424 txPacket = new EtherPacket;
2425 txPacket->unserialize(cp, csprintf("%s.txPacket", section));
2426 uint32_t txPktBufPtr;
2427 UNSERIALIZE_SCALAR(txPktBufPtr);
2428 txPacketBufPtr = (uint8_t *) txPacket->data + txPktBufPtr;
2429 } else
2430 txPacket = 0;
2431
2432 bool rxPacketExists;
2433 UNSERIALIZE_SCALAR(rxPacketExists);
2434 rxPacket = 0;
2435 if (rxPacketExists) {
2436 rxPacket = new EtherPacket;
2437 rxPacket->unserialize(cp, csprintf("%s.rxPacket", section));
2438 uint32_t rxPktBufPtr;
2439 UNSERIALIZE_SCALAR(rxPktBufPtr);
2440 rxPacketBufPtr = (uint8_t *) rxPacket->data + rxPktBufPtr;
2441 } else
2442 rxPacket = 0;
2443
2444 UNSERIALIZE_SCALAR(txXferLen);
2445 UNSERIALIZE_SCALAR(rxXferLen);
2446
2447 /*
2448 * Unserialize DescCaches
2449 */
2450 UNSERIALIZE_SCALAR(txDescCache.link);
2451 UNSERIALIZE_SCALAR(txDescCache.bufptr);
2452 UNSERIALIZE_SCALAR(txDescCache.cmdsts);
2453 UNSERIALIZE_SCALAR(txDescCache.extsts);
2454 UNSERIALIZE_SCALAR(rxDescCache.link);
2455 UNSERIALIZE_SCALAR(rxDescCache.bufptr);
2456 UNSERIALIZE_SCALAR(rxDescCache.cmdsts);
2457 UNSERIALIZE_SCALAR(rxDescCache.extsts);
2458
2459 /*
2460 * unserialize tx state machine
2461 */
2462 int txState;
2463 UNSERIALIZE_SCALAR(txState);
2464 this->txState = (TxState) txState;
2465 UNSERIALIZE_SCALAR(txEnable);
2466 UNSERIALIZE_SCALAR(CTDD);
2467 UNSERIALIZE_SCALAR(txFifoAvail);
2468 UNSERIALIZE_SCALAR(txFragPtr);
2469 UNSERIALIZE_SCALAR(txDescCnt);
2470 int txDmaState;
2471 UNSERIALIZE_SCALAR(txDmaState);
2472 this->txDmaState = (DmaState) txDmaState;
2473
2474 /*
2475 * unserialize rx state machine
2476 */
2477 int rxState;
2478 UNSERIALIZE_SCALAR(rxState);
2479 this->rxState = (RxState) rxState;
2480 UNSERIALIZE_SCALAR(rxEnable);
2481 UNSERIALIZE_SCALAR(CRDD);
2482 UNSERIALIZE_SCALAR(rxPktBytes);
2483 UNSERIALIZE_SCALAR(rxFifoCnt);
2484 UNSERIALIZE_SCALAR(rxDescCnt);
2485 int rxDmaState;
2486 UNSERIALIZE_SCALAR(rxDmaState);
2487 this->rxDmaState = (DmaState) rxDmaState;
2488
2489 UNSERIALIZE_SCALAR(extstsEnable);
2490
2491 /*
2492 * If there's a pending transmit, reschedule it now
2493 */
2494 Tick transmitTick;
2495 UNSERIALIZE_SCALAR(transmitTick);
2496 if (transmitTick)
2497 txEvent.schedule(curTick + transmitTick);
2498
2499 /*
2500 * unserialize receive address filter settings
2501 */
2502 UNSERIALIZE_SCALAR(rxFilterEnable);
2503 UNSERIALIZE_SCALAR(acceptBroadcast);
2504 UNSERIALIZE_SCALAR(acceptMulticast);
2505 UNSERIALIZE_SCALAR(acceptUnicast);
2506 UNSERIALIZE_SCALAR(acceptPerfect);
2507 UNSERIALIZE_SCALAR(acceptArp);
2508
2509 /*
2510 * Keep track of pending interrupt status.
2511 */
2512 UNSERIALIZE_SCALAR(intrTick);
2513 UNSERIALIZE_SCALAR(cpuPendingIntr);
2514 Tick intrEventTick;
2515 UNSERIALIZE_SCALAR(intrEventTick);
2516 if (intrEventTick) {
2517 intrEvent = new IntrEvent(this, true);
2518 intrEvent->schedule(intrEventTick);
2519 }
2520
2521 /*
2522 * re-add addrRanges to bus bridges
2523 */
2524 if (pioInterface) {
2525 pioInterface->addAddrRange(BARAddrs[0], BARAddrs[0] + BARSize[0] - 1);
2526 pioInterface->addAddrRange(BARAddrs[1], BARAddrs[1] + BARSize[1] - 1);
2527 }
2528 }
2529
2530 Tick
2531 NSGigE::cacheAccess(MemReqPtr &req)
2532 {
2533 DPRINTF(EthernetPIO, "timing access to paddr=%#x (daddr=%#x)\n",
2534 req->paddr, req->paddr - addr);
2535 return curTick + pioLatency;
2536 }
2537 //=====================================================================
2538
2539
2540 //********** helper functions******************************************
2541
2542 uint16_t reverseEnd16(uint16_t num)
2543 {
2544 uint16_t reverse = (num & 0xff)<<8;
2545 reverse += ((num & 0xff00) >> 8);
2546 return reverse;
2547 }
2548
2549 uint32_t reverseEnd32(uint32_t num)
2550 {
2551 uint32_t reverse = (reverseEnd16(num & 0xffff)) << 16;
2552 reverse += reverseEnd16((uint16_t) ((num & 0xffff0000) >> 8));
2553 return reverse;
2554 }
2555
2556
2557
2558 //=====================================================================
2559
2560 BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt)
2561
2562 SimObjectParam<EtherInt *> peer;
2563 SimObjectParam<NSGigE *> device;
2564
2565 END_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt)
2566
2567 BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigEInt)
2568
2569 INIT_PARAM_DFLT(peer, "peer interface", NULL),
2570 INIT_PARAM(device, "Ethernet device of this interface")
2571
2572 END_INIT_SIM_OBJECT_PARAMS(NSGigEInt)
2573
2574 CREATE_SIM_OBJECT(NSGigEInt)
2575 {
2576 NSGigEInt *dev_int = new NSGigEInt(getInstanceName(), device);
2577
2578 EtherInt *p = (EtherInt *)peer;
2579 if (p) {
2580 dev_int->setPeer(p);
2581 p->setPeer(dev_int);
2582 }
2583
2584 return dev_int;
2585 }
2586
2587 REGISTER_SIM_OBJECT("NSGigEInt", NSGigEInt)
2588
2589
2590 BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigE)
2591
2592 Param<Tick> tx_delay;
2593 Param<Tick> rx_delay;
2594 SimObjectParam<IntrControl *> intr_ctrl;
2595 Param<Tick> intr_delay;
2596 SimObjectParam<MemoryController *> mmu;
2597 SimObjectParam<PhysicalMemory *> physmem;
2598 Param<bool> rx_filter;
2599 Param<string> hardware_address;
2600 SimObjectParam<Bus*> header_bus;
2601 SimObjectParam<Bus*> payload_bus;
2602 SimObjectParam<HierParams *> hier;
2603 Param<Tick> pio_latency;
2604 Param<bool> dma_desc_free;
2605 Param<bool> dma_data_free;
2606 Param<Tick> dma_read_delay;
2607 Param<Tick> dma_write_delay;
2608 Param<Tick> dma_read_factor;
2609 Param<Tick> dma_write_factor;
2610 SimObjectParam<PciConfigAll *> configspace;
2611 SimObjectParam<PciConfigData *> configdata;
2612 SimObjectParam<Tsunami *> tsunami;
2613 Param<uint32_t> pci_bus;
2614 Param<uint32_t> pci_dev;
2615 Param<uint32_t> pci_func;
2616 Param<uint32_t> tx_fifo_size;
2617 Param<uint32_t> rx_fifo_size;
2618
2619 END_DECLARE_SIM_OBJECT_PARAMS(NSGigE)
2620
2621 BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigE)
2622
2623 INIT_PARAM_DFLT(tx_delay, "Transmit Delay", 1000),
2624 INIT_PARAM_DFLT(rx_delay, "Receive Delay", 1000),
2625 INIT_PARAM(intr_ctrl, "Interrupt Controller"),
2626 INIT_PARAM_DFLT(intr_delay, "Interrupt Delay in microseconds", 0),
2627 INIT_PARAM(mmu, "Memory Controller"),
2628 INIT_PARAM(physmem, "Physical Memory"),
2629 INIT_PARAM_DFLT(rx_filter, "Enable Receive Filter", true),
2630 INIT_PARAM_DFLT(hardware_address, "Ethernet Hardware Address",
2631 "00:99:00:00:00:01"),
2632 INIT_PARAM_DFLT(header_bus, "The IO Bus to attach to for headers", NULL),
2633 INIT_PARAM_DFLT(payload_bus, "The IO Bus to attach to for payload", NULL),
2634 INIT_PARAM_DFLT(hier, "Hierarchy global variables", &defaultHierParams),
2635 INIT_PARAM_DFLT(pio_latency, "Programmed IO latency in bus cycles", 1),
2636 INIT_PARAM_DFLT(dma_desc_free, "DMA of Descriptors is free", false),
2637 INIT_PARAM_DFLT(dma_data_free, "DMA of Data is free", false),
2638 INIT_PARAM_DFLT(dma_read_delay, "fixed delay for dma reads", 0),
2639 INIT_PARAM_DFLT(dma_write_delay, "fixed delay for dma writes", 0),
2640 INIT_PARAM_DFLT(dma_read_factor, "multiplier for dma reads", 0),
2641 INIT_PARAM_DFLT(dma_write_factor, "multiplier for dma writes", 0),
2642 INIT_PARAM(configspace, "PCI Configspace"),
2643 INIT_PARAM(configdata, "PCI Config data"),
2644 INIT_PARAM(tsunami, "Tsunami"),
2645 INIT_PARAM(pci_bus, "PCI bus"),
2646 INIT_PARAM(pci_dev, "PCI device number"),
2647 INIT_PARAM(pci_func, "PCI function code"),
2648 INIT_PARAM_DFLT(tx_fifo_size, "max size in bytes of txFifo", 131072),
2649 INIT_PARAM_DFLT(rx_fifo_size, "max size in bytes of rxFifo", 131072)
2650
2651 END_INIT_SIM_OBJECT_PARAMS(NSGigE)
2652
2653
2654 CREATE_SIM_OBJECT(NSGigE)
2655 {
2656 int eaddr[6];
2657 sscanf(((string)hardware_address).c_str(), "%x:%x:%x:%x:%x:%x",
2658 &eaddr[0], &eaddr[1], &eaddr[2], &eaddr[3], &eaddr[4], &eaddr[5]);
2659
2660 return new NSGigE(getInstanceName(), intr_ctrl, intr_delay,
2661 physmem, tx_delay, rx_delay, mmu, hier, header_bus,
2662 payload_bus, pio_latency, dma_desc_free, dma_data_free,
2663 dma_read_delay, dma_write_delay, dma_read_factor,
2664 dma_write_factor, configspace, configdata,
2665 tsunami, pci_bus, pci_dev, pci_func, rx_filter, eaddr,
2666 tx_fifo_size, rx_fifo_size);
2667 }
2668
2669 REGISTER_SIM_OBJECT("NSGigE", NSGigE)