Updated Copyright with information in bitkeeper changelogs
[gem5.git] / dev / ns_gige.cc
1 /*
2 * Copyright (c) 2004 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /* @file
30 * Device module for modelling the National Semiconductor
31 * DP83820 ethernet controller. Does not support priority queueing
32 */
33 #include <cstdio>
34 #include <deque>
35 #include <string>
36
37 #include "base/inet.hh"
38 #include "cpu/exec_context.hh"
39 #include "cpu/intr_control.hh"
40 #include "dev/dma.hh"
41 #include "dev/ns_gige.hh"
42 #include "dev/etherlink.hh"
43 #include "mem/bus/bus.hh"
44 #include "mem/bus/dma_interface.hh"
45 #include "mem/bus/pio_interface.hh"
46 #include "mem/bus/pio_interface_impl.hh"
47 #include "mem/functional_mem/memory_control.hh"
48 #include "mem/functional_mem/physical_memory.hh"
49 #include "sim/builder.hh"
50 #include "sim/host.hh"
51 #include "sim/sim_stats.hh"
52 #include "targetarch/vtophys.hh"
53 #include "dev/pciconfigall.hh"
54 #include "dev/tsunami_cchip.hh"
55
56 const char *NsRxStateStrings[] =
57 {
58 "rxIdle",
59 "rxDescRefr",
60 "rxDescRead",
61 "rxFifoBlock",
62 "rxFragWrite",
63 "rxDescWrite",
64 "rxAdvance"
65 };
66
67 const char *NsTxStateStrings[] =
68 {
69 "txIdle",
70 "txDescRefr",
71 "txDescRead",
72 "txFifoBlock",
73 "txFragRead",
74 "txDescWrite",
75 "txAdvance"
76 };
77
78 const char *NsDmaState[] =
79 {
80 "dmaIdle",
81 "dmaReading",
82 "dmaWriting",
83 "dmaReadWaiting",
84 "dmaWriteWaiting"
85 };
86
87 using namespace std;
88
89 ///////////////////////////////////////////////////////////////////////
90 //
91 // NSGigE PCI Device
92 //
93 NSGigE::NSGigE(const std::string &name, IntrControl *i, Tick intr_delay,
94 PhysicalMemory *pmem, Tick tx_delay, Tick rx_delay,
95 MemoryController *mmu, HierParams *hier, Bus *header_bus,
96 Bus *payload_bus, Tick pio_latency, bool dma_desc_free,
97 bool dma_data_free, Tick dma_read_delay, Tick dma_write_delay,
98 Tick dma_read_factor, Tick dma_write_factor, PciConfigAll *cf,
99 PciConfigData *cd, Tsunami *t, uint32_t bus, uint32_t dev,
100 uint32_t func, bool rx_filter, const int eaddr[6])
101 : PciDev(name, mmu, cf, cd, bus, dev, func), tsunami(t),
102 txPacketBufPtr(NULL), rxPacketBufPtr(NULL),
103 txXferLen(0), rxXferLen(0), txPktXmitted(0), txState(txIdle), CTDD(false),
104 txFifoCnt(0), txFifoAvail(MAX_TX_FIFO_SIZE), txHalt(false),
105 txFragPtr(0), txDescCnt(0), txDmaState(dmaIdle), rxState(rxIdle),
106 CRDD(false), rxPktBytes(0), rxFifoCnt(0), rxHalt(false),
107 rxFragPtr(0), rxDescCnt(0), rxDmaState(dmaIdle), extstsEnable(false),
108 rxDmaReadEvent(this), rxDmaWriteEvent(this),
109 txDmaReadEvent(this), txDmaWriteEvent(this),
110 dmaDescFree(dma_desc_free), dmaDataFree(dma_data_free),
111 txDelay(tx_delay), rxDelay(rx_delay), rxKickTick(0), txKickTick(0),
112 txEvent(this), rxFilterEnable(rx_filter), acceptBroadcast(false),
113 acceptMulticast(false), acceptUnicast(false),
114 acceptPerfect(false), acceptArp(false),
115 physmem(pmem), intctrl(i), intrTick(0),
116 cpuPendingIntr(false), intrEvent(0), interface(0), pioLatency(pio_latency)
117 {
118 tsunami->ethernet = this;
119
120 if (header_bus) {
121 pioInterface = newPioInterface(name, hier, header_bus, this,
122 &NSGigE::cacheAccess);
123
124 if (payload_bus)
125 dmaInterface = new DMAInterface<Bus>(name + ".dma",
126 header_bus, payload_bus, 1);
127 else
128 dmaInterface = new DMAInterface<Bus>(name + ".dma",
129 header_bus, header_bus, 1);
130 } else if (payload_bus) {
131 pioInterface = newPioInterface(name, hier, payload_bus, this,
132 &NSGigE::cacheAccess);
133
134 dmaInterface = new DMAInterface<Bus>(name + ".dma",
135 payload_bus, payload_bus, 1);
136
137 }
138
139
140 intrDelay = US2Ticks(intr_delay);
141 dmaReadDelay = dma_read_delay;
142 dmaWriteDelay = dma_write_delay;
143 dmaReadFactor = dma_read_factor;
144 dmaWriteFactor = dma_write_factor;
145
146 memset(&regs, 0, sizeof(regs));
147 regsReset();
148 rom.perfectMatch[0] = eaddr[0];
149 rom.perfectMatch[1] = eaddr[1];
150 rom.perfectMatch[2] = eaddr[2];
151 rom.perfectMatch[3] = eaddr[3];
152 rom.perfectMatch[4] = eaddr[4];
153 rom.perfectMatch[5] = eaddr[5];
154 }
155
156 NSGigE::~NSGigE()
157 {}
158
159 void
160 NSGigE::regStats()
161 {
162 txBytes
163 .name(name() + ".txBytes")
164 .desc("Bytes Transmitted")
165 .prereq(txBytes)
166 ;
167
168 rxBytes
169 .name(name() + ".rxBytes")
170 .desc("Bytes Received")
171 .prereq(rxBytes)
172 ;
173
174 txPackets
175 .name(name() + ".txPackets")
176 .desc("Number of Packets Transmitted")
177 .prereq(txBytes)
178 ;
179
180 rxPackets
181 .name(name() + ".rxPackets")
182 .desc("Number of Packets Received")
183 .prereq(rxBytes)
184 ;
185
186 txBandwidth
187 .name(name() + ".txBandwidth")
188 .desc("Transmit Bandwidth (bits/s)")
189 .precision(0)
190 .prereq(txBytes)
191 ;
192
193 rxBandwidth
194 .name(name() + ".rxBandwidth")
195 .desc("Receive Bandwidth (bits/s)")
196 .precision(0)
197 .prereq(rxBytes)
198 ;
199
200 txPacketRate
201 .name(name() + ".txPPS")
202 .desc("Packet Tranmission Rate (packets/s)")
203 .precision(0)
204 .prereq(txBytes)
205 ;
206
207 rxPacketRate
208 .name(name() + ".rxPPS")
209 .desc("Packet Reception Rate (packets/s)")
210 .precision(0)
211 .prereq(rxBytes)
212 ;
213
214 txBandwidth = txBytes * Stats::constant(8) / simSeconds;
215 rxBandwidth = rxBytes * Stats::constant(8) / simSeconds;
216 txPacketRate = txPackets / simSeconds;
217 rxPacketRate = rxPackets / simSeconds;
218 }
219
220 /**
221 * This is to read the PCI general configuration registers
222 */
223 void
224 NSGigE::ReadConfig(int offset, int size, uint8_t *data)
225 {
226 if (offset < PCI_DEVICE_SPECIFIC)
227 PciDev::ReadConfig(offset, size, data);
228 else
229 panic("Device specific PCI config space not implemented!\n");
230 }
231
232 /**
233 * This is to write to the PCI general configuration registers
234 */
235 void
236 NSGigE::WriteConfig(int offset, int size, uint32_t data)
237 {
238 if (offset < PCI_DEVICE_SPECIFIC)
239 PciDev::WriteConfig(offset, size, data);
240 else
241 panic("Device specific PCI config space not implemented!\n");
242
243 // Need to catch writes to BARs to update the PIO interface
244 switch (offset) {
245 case PCI0_BASE_ADDR0:
246 if (BARAddrs[0] != 0) {
247 addr = BARAddrs[0];
248
249 if (pioInterface)
250 pioInterface->addAddrRange(addr, addr + size - 1);
251
252 addr &= PA_UNCACHED_MASK;
253 }
254 break;
255 }
256 }
257
258 /**
259 * This reads the device registers, which are detailed in the NS83820
260 * spec sheet
261 */
262 Fault
263 NSGigE::read(MemReqPtr &req, uint8_t *data)
264 {
265 //The mask is to give you only the offset into the device register file
266 Addr daddr = req->paddr & 0xfff;
267 DPRINTF(EthernetPIO, "read da=%#x pa=%#x va=%#x size=%d\n",
268 daddr, req->paddr, req->vaddr, req->size);
269
270
271 //there are some reserved registers, you can see ns_gige_reg.h and
272 //the spec sheet for details
273 if (daddr > LAST && daddr <= RESERVED) {
274 panic("Accessing reserved register");
275 } else if (daddr > RESERVED && daddr <= 0x3FC) {
276 ReadConfig(daddr & 0xff, req->size, data);
277 return No_Fault;
278 } else if (daddr >= MIB_START && daddr <= MIB_END) {
279 // don't implement all the MIB's. hopefully the kernel
280 // doesn't actually DEPEND upon their values
281 // MIB are just hardware stats keepers
282 uint32_t &reg = *(uint32_t *) data;
283 reg = 0;
284 return No_Fault;
285 } else if (daddr > 0x3FC)
286 panic("Something is messed up!\n");
287
288 switch (req->size) {
289 case sizeof(uint32_t):
290 {
291 uint32_t &reg = *(uint32_t *)data;
292
293 switch (daddr) {
294 case CR:
295 reg = regs.command;
296 //these are supposed to be cleared on a read
297 reg &= ~(CR_RXD | CR_TXD | CR_TXR | CR_RXR);
298 break;
299
300 case CFG:
301 reg = regs.config;
302 break;
303
304 case MEAR:
305 reg = regs.mear;
306 break;
307
308 case PTSCR:
309 reg = regs.ptscr;
310 break;
311
312 case ISR:
313 reg = regs.isr;
314 devIntrClear(ISR_ALL);
315 break;
316
317 case IMR:
318 reg = regs.imr;
319 break;
320
321 case IER:
322 reg = regs.ier;
323 break;
324
325 case IHR:
326 reg = regs.ihr;
327 break;
328
329 case TXDP:
330 reg = regs.txdp;
331 break;
332
333 case TXDP_HI:
334 reg = regs.txdp_hi;
335 break;
336
337 case TXCFG:
338 reg = regs.txcfg;
339 break;
340
341 case GPIOR:
342 reg = regs.gpior;
343 break;
344
345 case RXDP:
346 reg = regs.rxdp;
347 break;
348
349 case RXDP_HI:
350 reg = regs.rxdp_hi;
351 break;
352
353 case RXCFG:
354 reg = regs.rxcfg;
355 break;
356
357 case PQCR:
358 reg = regs.pqcr;
359 break;
360
361 case WCSR:
362 reg = regs.wcsr;
363 break;
364
365 case PCR:
366 reg = regs.pcr;
367 break;
368
369 //see the spec sheet for how RFCR and RFDR work
370 //basically, you write to RFCR to tell the machine what you want to do next
371 //then you act upon RFDR, and the device will be prepared b/c
372 //of what you wrote to RFCR
373 case RFCR:
374 reg = regs.rfcr;
375 break;
376
377 case RFDR:
378 switch (regs.rfcr & RFCR_RFADDR) {
379 case 0x000:
380 reg = rom.perfectMatch[1];
381 reg = reg << 8;
382 reg += rom.perfectMatch[0];
383 break;
384 case 0x002:
385 reg = rom.perfectMatch[3] << 8;
386 reg += rom.perfectMatch[2];
387 break;
388 case 0x004:
389 reg = rom.perfectMatch[5] << 8;
390 reg += rom.perfectMatch[4];
391 break;
392 default:
393 panic("reading from RFDR for something for other than PMATCH!\n");
394 //didn't implement other RFDR functionality b/c driver didn't use
395 }
396 break;
397
398 case SRR:
399 reg = regs.srr;
400 break;
401
402 case MIBC:
403 reg = regs.mibc;
404 reg &= ~(MIBC_MIBS | MIBC_ACLR);
405 break;
406
407 case VRCR:
408 reg = regs.vrcr;
409 break;
410
411 case VTCR:
412 reg = regs.vtcr;
413 break;
414
415 case VDR:
416 reg = regs.vdr;
417 break;
418
419 case CCSR:
420 reg = regs.ccsr;
421 break;
422
423 case TBICR:
424 reg = regs.tbicr;
425 break;
426
427 case TBISR:
428 reg = regs.tbisr;
429 break;
430
431 case TANAR:
432 reg = regs.tanar;
433 break;
434
435 case TANLPAR:
436 reg = regs.tanlpar;
437 break;
438
439 case TANER:
440 reg = regs.taner;
441 break;
442
443 case TESR:
444 reg = regs.tesr;
445 break;
446
447 default:
448 panic("reading unimplemented register: addr = %#x", daddr);
449 }
450
451 DPRINTF(EthernetPIO, "read from %#x: data=%d data=%#x\n",
452 daddr, reg, reg);
453 }
454 break;
455
456 default:
457 panic("accessing register with invalid size: addr=%#x, size=%d",
458 daddr, req->size);
459 }
460
461 return No_Fault;
462 }
463
464 Fault
465 NSGigE::write(MemReqPtr &req, const uint8_t *data)
466 {
467 Addr daddr = req->paddr & 0xfff;
468 DPRINTF(EthernetPIO, "write da=%#x pa=%#x va=%#x size=%d\n",
469 daddr, req->paddr, req->vaddr, req->size);
470
471 if (daddr > LAST && daddr <= RESERVED) {
472 panic("Accessing reserved register");
473 } else if (daddr > RESERVED && daddr <= 0x3FC) {
474 WriteConfig(daddr & 0xff, req->size, *(uint32_t *)data);
475 return No_Fault;
476 } else if (daddr > 0x3FC)
477 panic("Something is messed up!\n");
478
479 if (req->size == sizeof(uint32_t)) {
480 uint32_t reg = *(uint32_t *)data;
481 DPRINTF(EthernetPIO, "write data=%d data=%#x\n", reg, reg);
482
483 switch (daddr) {
484 case CR:
485 regs.command = reg;
486 if ((reg & (CR_TXE | CR_TXD)) == (CR_TXE | CR_TXD)) {
487 txHalt = true;
488 } else if (reg & CR_TXE) {
489 //the kernel is enabling the transmit machine
490 if (txState == txIdle)
491 txKick();
492 } else if (reg & CR_TXD) {
493 txHalt = true;
494 }
495
496 if ((reg & (CR_RXE | CR_RXD)) == (CR_RXE | CR_RXD)) {
497 rxHalt = true;
498 } else if (reg & CR_RXE) {
499 if (rxState == rxIdle) {
500 rxKick();
501 }
502 } else if (reg & CR_RXD) {
503 rxHalt = true;
504 }
505
506 if (reg & CR_TXR)
507 txReset();
508
509 if (reg & CR_RXR)
510 rxReset();
511
512 if (reg & CR_SWI)
513 devIntrPost(ISR_SWI);
514
515 if (reg & CR_RST) {
516 txReset();
517 rxReset();
518
519 regsReset();
520 }
521 break;
522
523 case CFG:
524 if (reg & CFG_LNKSTS || reg & CFG_SPDSTS || reg & CFG_DUPSTS
525 || reg & CFG_RESERVED || reg & CFG_T64ADDR
526 || reg & CFG_PCI64_DET)
527 panic("writing to read-only or reserved CFG bits!\n");
528
529 regs.config |= reg & ~(CFG_LNKSTS | CFG_SPDSTS | CFG_DUPSTS | CFG_RESERVED |
530 CFG_T64ADDR | CFG_PCI64_DET);
531
532 // all these #if 0's are because i don't THINK the kernel needs to have these implemented
533 // if there is a problem relating to one of these, you may need to add functionality in
534 #if 0
535 if (reg & CFG_TBI_EN) ;
536 if (reg & CFG_MODE_1000) ;
537 #endif
538
539 if (reg & CFG_AUTO_1000)
540 panic("CFG_AUTO_1000 not implemented!\n");
541
542 #if 0
543 if (reg & CFG_PINT_DUPSTS || reg & CFG_PINT_LNKSTS || reg & CFG_PINT_SPDSTS) ;
544 if (reg & CFG_TMRTEST) ;
545 if (reg & CFG_MRM_DIS) ;
546 if (reg & CFG_MWI_DIS) ;
547
548 if (reg & CFG_T64ADDR)
549 panic("CFG_T64ADDR is read only register!\n");
550
551 if (reg & CFG_PCI64_DET)
552 panic("CFG_PCI64_DET is read only register!\n");
553
554 if (reg & CFG_DATA64_EN) ;
555 if (reg & CFG_M64ADDR) ;
556 if (reg & CFG_PHY_RST) ;
557 if (reg & CFG_PHY_DIS) ;
558 #endif
559
560 if (reg & CFG_EXTSTS_EN)
561 extstsEnable = true;
562 else
563 extstsEnable = false;
564
565 #if 0
566 if (reg & CFG_REQALG) ;
567 if (reg & CFG_SB) ;
568 if (reg & CFG_POW) ;
569 if (reg & CFG_EXD) ;
570 if (reg & CFG_PESEL) ;
571 if (reg & CFG_BROM_DIS) ;
572 if (reg & CFG_EXT_125) ;
573 if (reg & CFG_BEM) ;
574 #endif
575 break;
576
577 case MEAR:
578 regs.mear = reg;
579 /* since phy is completely faked, MEAR_MD* don't matter
580 and since the driver never uses MEAR_EE*, they don't matter */
581 #if 0
582 if (reg & MEAR_EEDI) ;
583 if (reg & MEAR_EEDO) ; //this one is read only
584 if (reg & MEAR_EECLK) ;
585 if (reg & MEAR_EESEL) ;
586 if (reg & MEAR_MDIO) ;
587 if (reg & MEAR_MDDIR) ;
588 if (reg & MEAR_MDC) ;
589 #endif
590 break;
591
592 case PTSCR:
593 regs.ptscr = reg & ~(PTSCR_RBIST_RDONLY);
594 /* these control BISTs for various parts of chip - we don't care or do
595 just fake that the BIST is done */
596 if (reg & PTSCR_RBIST_EN)
597 regs.ptscr |= PTSCR_RBIST_DONE;
598 if (reg & PTSCR_EEBIST_EN)
599 regs.ptscr &= ~PTSCR_EEBIST_EN;
600 if (reg & PTSCR_EELOAD_EN)
601 regs.ptscr &= ~PTSCR_EELOAD_EN;
602 break;
603
604 case ISR: /* writing to the ISR has no effect */
605 panic("ISR is a read only register!\n");
606
607 case IMR:
608 regs.imr = reg;
609 devIntrChangeMask();
610 break;
611
612 case IER:
613 regs.ier = reg;
614 break;
615
616 case IHR:
617 regs.ihr = reg;
618 /* not going to implement real interrupt holdoff */
619 break;
620
621 case TXDP:
622 regs.txdp = (reg & 0xFFFFFFFC);
623 assert(txState == txIdle);
624 CTDD = false;
625 break;
626
627 case TXDP_HI:
628 regs.txdp_hi = reg;
629 break;
630
631 case TXCFG:
632 regs.txcfg = reg;
633 #if 0
634 if (reg & TXCFG_CSI) ;
635 if (reg & TXCFG_HBI) ;
636 if (reg & TXCFG_MLB) ;
637 if (reg & TXCFG_ATP) ;
638 if (reg & TXCFG_ECRETRY) ; /* this could easily be implemented, but
639 considering the network is just a fake
640 pipe, wouldn't make sense to do this */
641
642 if (reg & TXCFG_BRST_DIS) ;
643 #endif
644
645
646 /* we handle our own DMA, ignore the kernel's exhortations */
647 if (reg & TXCFG_MXDMA) ;
648
649 break;
650
651 case GPIOR:
652 regs.gpior = reg;
653 /* these just control general purpose i/o pins, don't matter */
654 break;
655
656 case RXDP:
657 regs.rxdp = reg;
658 break;
659
660 case RXDP_HI:
661 regs.rxdp_hi = reg;
662 break;
663
664 case RXCFG:
665 regs.rxcfg = reg;
666 #if 0
667 if (reg & RXCFG_AEP) ;
668 if (reg & RXCFG_ARP) ;
669 if (reg & RXCFG_STRIPCRC) ;
670 if (reg & RXCFG_RX_RD) ;
671 if (reg & RXCFG_ALP) ;
672 if (reg & RXCFG_AIRL) ;
673 #endif
674
675 /* we handle our own DMA, ignore what kernel says about it */
676 if (reg & RXCFG_MXDMA) ;
677
678 #if 0
679 if (reg & (RXCFG_DRTH | RXCFG_DRTH0)) ;
680 #endif
681 break;
682
683 case PQCR:
684 /* there is no priority queueing used in the linux 2.6 driver */
685 regs.pqcr = reg;
686 break;
687
688 case WCSR:
689 /* not going to implement wake on LAN */
690 regs.wcsr = reg;
691 break;
692
693 case PCR:
694 /* not going to implement pause control */
695 regs.pcr = reg;
696 break;
697
698 case RFCR:
699 regs.rfcr = reg;
700
701 rxFilterEnable = (reg & RFCR_RFEN) ? true : false;
702
703 acceptBroadcast = (reg & RFCR_AAB) ? true : false;
704
705 acceptMulticast = (reg & RFCR_AAM) ? true : false;
706
707 acceptUnicast = (reg & RFCR_AAU) ? true : false;
708
709 acceptPerfect = (reg & RFCR_APM) ? true : false;
710
711 acceptArp = (reg & RFCR_AARP) ? true : false;
712
713 if (reg & RFCR_APAT) ;
714 // panic("RFCR_APAT not implemented!\n");
715
716 if (reg & RFCR_MHEN || reg & RFCR_UHEN)
717 panic("hash filtering not implemented!\n");
718
719 if (reg & RFCR_ULM)
720 panic("RFCR_ULM not implemented!\n");
721
722 break;
723
724 case RFDR:
725 panic("the driver never writes to RFDR, something is wrong!\n");
726
727 case BRAR:
728 panic("the driver never uses BRAR, something is wrong!\n");
729
730 case BRDR:
731 panic("the driver never uses BRDR, something is wrong!\n");
732
733 case SRR:
734 panic("SRR is read only register!\n");
735
736 case MIBC:
737 panic("the driver never uses MIBC, something is wrong!\n");
738
739 case VRCR:
740 regs.vrcr = reg;
741 break;
742
743 case VTCR:
744 regs.vtcr = reg;
745 break;
746
747 case VDR:
748 panic("the driver never uses VDR, something is wrong!\n");
749 break;
750
751 case CCSR:
752 /* not going to implement clockrun stuff */
753 regs.ccsr = reg;
754 break;
755
756 case TBICR:
757 regs.tbicr = reg;
758 if (reg & TBICR_MR_LOOPBACK)
759 panic("TBICR_MR_LOOPBACK never used, something wrong!\n");
760
761 if (reg & TBICR_MR_AN_ENABLE) {
762 regs.tanlpar = regs.tanar;
763 regs.tbisr |= (TBISR_MR_AN_COMPLETE | TBISR_MR_LINK_STATUS);
764 }
765
766 #if 0
767 if (reg & TBICR_MR_RESTART_AN) ;
768 #endif
769
770 break;
771
772 case TBISR:
773 panic("TBISR is read only register!\n");
774
775 case TANAR:
776 regs.tanar = reg;
777 if (reg & TANAR_PS2)
778 panic("this isn't used in driver, something wrong!\n");
779
780 if (reg & TANAR_PS1)
781 panic("this isn't used in driver, something wrong!\n");
782 break;
783
784 case TANLPAR:
785 panic("this should only be written to by the fake phy!\n");
786
787 case TANER:
788 panic("TANER is read only register!\n");
789
790 case TESR:
791 regs.tesr = reg;
792 break;
793
794 default:
795 panic("thought i covered all the register, what is this? addr=%#x",
796 daddr);
797 }
798 } else
799 panic("Invalid Request Size");
800
801 return No_Fault;
802 }
803
804 void
805 NSGigE::devIntrPost(uint32_t interrupts)
806 {
807 bool delay = false;
808
809 if (interrupts & ISR_RESERVE)
810 panic("Cannot set a reserved interrupt");
811
812 if (interrupts & ISR_TXRCMP)
813 regs.isr |= ISR_TXRCMP;
814
815 if (interrupts & ISR_RXRCMP)
816 regs.isr |= ISR_RXRCMP;
817
818 //ISR_DPERR not implemented
819 //ISR_SSERR not implemented
820 //ISR_RMABT not implemented
821 //ISR_RXSOVR not implemented
822 //ISR_HIBINT not implemented
823 //ISR_PHY not implemented
824 //ISR_PME not implemented
825
826 if (interrupts & ISR_SWI)
827 regs.isr |= ISR_SWI;
828
829 //ISR_MIB not implemented
830 //ISR_TXURN not implemented
831
832 if (interrupts & ISR_TXIDLE)
833 regs.isr |= ISR_TXIDLE;
834
835 if (interrupts & ISR_TXERR)
836 regs.isr |= ISR_TXERR;
837
838 if (interrupts & ISR_TXDESC)
839 regs.isr |= ISR_TXDESC;
840
841 if (interrupts & ISR_TXOK) {
842 regs.isr |= ISR_TXOK;
843 delay = true;
844 }
845
846 if (interrupts & ISR_RXORN)
847 regs.isr |= ISR_RXORN;
848
849 if (interrupts & ISR_RXIDLE)
850 regs.isr |= ISR_RXIDLE;
851
852 //ISR_RXEARLY not implemented
853
854 if (interrupts & ISR_RXERR)
855 regs.isr |= ISR_RXERR;
856
857 if (interrupts & ISR_RXDESC)
858 regs.isr |= ISR_RXDESC;
859
860 if (interrupts & ISR_RXOK) {
861 delay = true;
862 regs.isr |= ISR_RXOK;
863 }
864
865 if ((regs.isr & regs.imr)) {
866 Tick when = curTick;
867 if (delay)
868 when += intrDelay;
869 cpuIntrPost(when);
870 }
871
872 DPRINTF(Ethernet, "interrupt posted intr=%#x isr=%#x imr=%#x\n",
873 interrupts, regs.isr, regs.imr);
874 }
875
876 void
877 NSGigE::devIntrClear(uint32_t interrupts)
878 {
879 if (interrupts & ISR_RESERVE)
880 panic("Cannot clear a reserved interrupt");
881
882 if (interrupts & ISR_TXRCMP)
883 regs.isr &= ~ISR_TXRCMP;
884
885 if (interrupts & ISR_RXRCMP)
886 regs.isr &= ~ISR_RXRCMP;
887
888 //ISR_DPERR not implemented
889 //ISR_SSERR not implemented
890 //ISR_RMABT not implemented
891 //ISR_RXSOVR not implemented
892 //ISR_HIBINT not implemented
893 //ISR_PHY not implemented
894 //ISR_PME not implemented
895
896 if (interrupts & ISR_SWI)
897 regs.isr &= ~ISR_SWI;
898
899 //ISR_MIB not implemented
900 //ISR_TXURN not implemented
901
902 if (interrupts & ISR_TXIDLE)
903 regs.isr &= ~ISR_TXIDLE;
904
905 if (interrupts & ISR_TXERR)
906 regs.isr &= ~ISR_TXERR;
907
908 if (interrupts & ISR_TXDESC)
909 regs.isr &= ~ISR_TXDESC;
910
911 if (interrupts & ISR_TXOK)
912 regs.isr &= ~ISR_TXOK;
913
914 if (interrupts & ISR_RXORN)
915 regs.isr &= ~ISR_RXORN;
916
917 if (interrupts & ISR_RXIDLE)
918 regs.isr &= ~ISR_RXIDLE;
919
920 //ISR_RXEARLY not implemented
921
922 if (interrupts & ISR_RXERR)
923 regs.isr &= ~ISR_RXERR;
924
925 if (interrupts & ISR_RXDESC)
926 regs.isr &= ~ISR_RXDESC;
927
928 if (interrupts & ISR_RXOK)
929 regs.isr &= ~ISR_RXOK;
930
931 if (!(regs.isr & regs.imr))
932 cpuIntrClear();
933
934 DPRINTF(Ethernet, "interrupt cleared intr=%x isr=%x imr=%x\n",
935 interrupts, regs.isr, regs.imr);
936 }
937
938 void
939 NSGigE::devIntrChangeMask()
940 {
941 DPRINTF(Ethernet, "interrupt mask changed\n");
942
943 if (regs.isr & regs.imr)
944 cpuIntrPost(curTick);
945 else
946 cpuIntrClear();
947 }
948
949 void
950 NSGigE::cpuIntrPost(Tick when)
951 {
952 if (when > intrTick && intrTick != 0)
953 return;
954
955 intrTick = when;
956
957 if (intrEvent) {
958 intrEvent->squash();
959 intrEvent = 0;
960 }
961
962 if (when < curTick) {
963 cpuInterrupt();
964 } else {
965 intrEvent = new IntrEvent(this, true);
966 intrEvent->schedule(intrTick);
967 }
968 }
969
970 void
971 NSGigE::cpuInterrupt()
972 {
973 // Don't send an interrupt if there's already one
974 if (cpuPendingIntr)
975 return;
976
977 // Don't send an interrupt if it's supposed to be delayed
978 if (intrTick > curTick)
979 return;
980
981 // Whether or not there's a pending interrupt, we don't care about
982 // it anymore
983 intrEvent = 0;
984 intrTick = 0;
985
986 // Send interrupt
987 cpuPendingIntr = true;
988 /** @todo rework the intctrl to be tsunami ok */
989 //intctrl->post(TheISA::INTLEVEL_IRQ1, TheISA::INTINDEX_ETHERNET);
990 tsunami->cchip->postDRIR(configData->config.hdr.pci0.interruptLine);
991 }
992
993 void
994 NSGigE::cpuIntrClear()
995 {
996 if (cpuPendingIntr) {
997 cpuPendingIntr = false;
998 /** @todo rework the intctrl to be tsunami ok */
999 //intctrl->clear(TheISA::INTLEVEL_IRQ1, TheISA::INTINDEX_ETHERNET);
1000 tsunami->cchip->clearDRIR(configData->config.hdr.pci0.interruptLine);
1001 }
1002 }
1003
1004 bool
1005 NSGigE::cpuIntrPending() const
1006 { return cpuPendingIntr; }
1007
1008 void
1009 NSGigE::txReset()
1010 {
1011
1012 DPRINTF(Ethernet, "transmit reset\n");
1013
1014 CTDD = false;
1015 txFifoCnt = 0;
1016 txFifoAvail = MAX_TX_FIFO_SIZE;
1017 txHalt = false;
1018 txFragPtr = 0;
1019 assert(txDescCnt == 0);
1020 txFifo.clear();
1021 regs.command &= ~CR_TXE;
1022 txState = txIdle;
1023 assert(txDmaState == dmaIdle);
1024 }
1025
1026 void
1027 NSGigE::rxReset()
1028 {
1029 DPRINTF(Ethernet, "receive reset\n");
1030
1031 CRDD = false;
1032 assert(rxPktBytes == 0);
1033 rxFifoCnt = 0;
1034 rxHalt = false;
1035 rxFragPtr = 0;
1036 assert(rxDescCnt == 0);
1037 assert(rxDmaState == dmaIdle);
1038 rxFifo.clear();
1039 regs.command &= ~CR_RXE;
1040 rxState = rxIdle;
1041 }
1042
1043 void
1044 NSGigE::rxDmaReadCopy()
1045 {
1046 assert(rxDmaState == dmaReading);
1047
1048 memcpy(rxDmaData, physmem->dma_addr(rxDmaAddr, rxDmaLen), rxDmaLen);
1049 rxDmaState = dmaIdle;
1050
1051 DPRINTF(EthernetDMA, "rx dma read paddr=%#x len=%d\n",
1052 rxDmaAddr, rxDmaLen);
1053 DDUMP(EthernetDMA, rxDmaData, rxDmaLen);
1054 }
1055
1056 bool
1057 NSGigE::doRxDmaRead()
1058 {
1059 assert(rxDmaState == dmaIdle || rxDmaState == dmaReadWaiting);
1060 rxDmaState = dmaReading;
1061
1062 if (dmaInterface && !rxDmaFree) {
1063 if (dmaInterface->busy())
1064 rxDmaState = dmaReadWaiting;
1065 else
1066 dmaInterface->doDMA(Read, rxDmaAddr, rxDmaLen, curTick,
1067 &rxDmaReadEvent);
1068 return true;
1069 }
1070
1071 if (dmaReadDelay == 0 && dmaReadFactor == 0) {
1072 rxDmaReadCopy();
1073 return false;
1074 }
1075
1076 Tick factor = ((rxDmaLen + ULL(63)) >> ULL(6)) * dmaReadFactor;
1077 Tick start = curTick + dmaReadDelay + factor;
1078 rxDmaReadEvent.schedule(start);
1079 return true;
1080 }
1081
1082 void
1083 NSGigE::rxDmaReadDone()
1084 {
1085 assert(rxDmaState == dmaReading);
1086 rxDmaReadCopy();
1087
1088 // If the transmit state machine has a pending DMA, let it go first
1089 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting)
1090 txKick();
1091
1092 rxKick();
1093 }
1094
1095 void
1096 NSGigE::rxDmaWriteCopy()
1097 {
1098 assert(rxDmaState == dmaWriting);
1099
1100 memcpy(physmem->dma_addr(rxDmaAddr, rxDmaLen), rxDmaData, rxDmaLen);
1101 rxDmaState = dmaIdle;
1102
1103 DPRINTF(EthernetDMA, "rx dma write paddr=%#x len=%d\n",
1104 rxDmaAddr, rxDmaLen);
1105 DDUMP(EthernetDMA, rxDmaData, rxDmaLen);
1106 }
1107
1108 bool
1109 NSGigE::doRxDmaWrite()
1110 {
1111 assert(rxDmaState == dmaIdle || rxDmaState == dmaWriteWaiting);
1112 rxDmaState = dmaWriting;
1113
1114 if (dmaInterface && !rxDmaFree) {
1115 if (dmaInterface->busy())
1116 rxDmaState = dmaWriteWaiting;
1117 else
1118 dmaInterface->doDMA(WriteInvalidate, rxDmaAddr, rxDmaLen, curTick,
1119 &rxDmaWriteEvent);
1120 return true;
1121 }
1122
1123 if (dmaWriteDelay == 0 && dmaWriteFactor == 0) {
1124 rxDmaWriteCopy();
1125 return false;
1126 }
1127
1128 Tick factor = ((rxDmaLen + ULL(63)) >> ULL(6)) * dmaWriteFactor;
1129 Tick start = curTick + dmaWriteDelay + factor;
1130 rxDmaWriteEvent.schedule(start);
1131 return true;
1132 }
1133
1134 void
1135 NSGigE::rxDmaWriteDone()
1136 {
1137 assert(rxDmaState == dmaWriting);
1138 rxDmaWriteCopy();
1139
1140 // If the transmit state machine has a pending DMA, let it go first
1141 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting)
1142 txKick();
1143
1144 rxKick();
1145 }
1146
1147 void
1148 NSGigE::rxKick()
1149 {
1150 DPRINTF(Ethernet, "receive kick state=%s (rxBuf.size=%d)\n",
1151 NsRxStateStrings[rxState], rxFifo.size());
1152
1153 if (rxKickTick > curTick) {
1154 DPRINTF(Ethernet, "receive kick exiting, can't run till %d\n",
1155 rxKickTick);
1156 return;
1157 }
1158
1159 next:
1160 switch(rxDmaState) {
1161 case dmaReadWaiting:
1162 if (doRxDmaRead())
1163 goto exit;
1164 break;
1165 case dmaWriteWaiting:
1166 if (doRxDmaWrite())
1167 goto exit;
1168 break;
1169 default:
1170 break;
1171 }
1172
1173 // see state machine from spec for details
1174 // the way this works is, if you finish work on one state and can go directly to
1175 // another, you do that through jumping to the label "next". however, if you have
1176 // intermediate work, like DMA so that you can't go to the next state yet, you go to
1177 // exit and exit the loop. however, when the DMA is done it will trigger an
1178 // event and come back to this loop.
1179 switch (rxState) {
1180 case rxIdle:
1181 if (!regs.command & CR_RXE) {
1182 DPRINTF(Ethernet, "Receive Disabled! Nothing to do.\n");
1183 goto exit;
1184 }
1185
1186 if (CRDD) {
1187 rxState = rxDescRefr;
1188
1189 rxDmaAddr = regs.rxdp & 0x3fffffff;
1190 rxDmaData = &rxDescCache + offsetof(ns_desc, link);
1191 rxDmaLen = sizeof(rxDescCache.link);
1192 rxDmaFree = dmaDescFree;
1193
1194 if (doRxDmaRead())
1195 goto exit;
1196 } else {
1197 rxState = rxDescRead;
1198
1199 rxDmaAddr = regs.rxdp & 0x3fffffff;
1200 rxDmaData = &rxDescCache;
1201 rxDmaLen = sizeof(ns_desc);
1202 rxDmaFree = dmaDescFree;
1203
1204 if (doRxDmaRead())
1205 goto exit;
1206 }
1207 break;
1208
1209 case rxDescRefr:
1210 if (rxDmaState != dmaIdle)
1211 goto exit;
1212
1213 rxState = rxAdvance;
1214 break;
1215
1216 case rxDescRead:
1217 if (rxDmaState != dmaIdle)
1218 goto exit;
1219
1220 DPRINTF(Ethernet,
1221 "rxDescCache:\n\tlink=%#x\n\tbufptr=%#x\n\tcmdsts=%#x\n\textsts=%#x\n"
1222 ,rxDescCache.link, rxDescCache.bufptr, rxDescCache.cmdsts,
1223 rxDescCache.extsts);
1224
1225 if (rxDescCache.cmdsts & CMDSTS_OWN) {
1226 rxState = rxIdle;
1227 } else {
1228 rxState = rxFifoBlock;
1229 rxFragPtr = rxDescCache.bufptr;
1230 rxDescCnt = rxDescCache.cmdsts & CMDSTS_LEN_MASK;
1231 }
1232 break;
1233
1234 case rxFifoBlock:
1235 if (!rxPacket) {
1236 /**
1237 * @todo in reality, we should be able to start processing
1238 * the packet as it arrives, and not have to wait for the
1239 * full packet ot be in the receive fifo.
1240 */
1241 if (rxFifo.empty())
1242 goto exit;
1243
1244 // If we don't have a packet, grab a new one from the fifo.
1245 rxPacket = rxFifo.front();
1246 rxPktBytes = rxPacket->length;
1247 rxPacketBufPtr = rxPacket->data;
1248
1249 // sanity check - i think the driver behaves like this
1250 assert(rxDescCnt >= rxPktBytes);
1251
1252 // Must clear the value before popping to decrement the
1253 // reference count
1254 rxFifo.front() = NULL;
1255 rxFifo.pop_front();
1256 }
1257
1258
1259 // dont' need the && rxDescCnt > 0 if driver sanity check above holds
1260 if (rxPktBytes > 0) {
1261 rxState = rxFragWrite;
1262 // don't need min<>(rxPktBytes,rxDescCnt) if above sanity check holds
1263 rxXferLen = rxPktBytes;
1264
1265 rxDmaAddr = rxFragPtr & 0x3fffffff;
1266 rxDmaData = rxPacketBufPtr;
1267 rxDmaLen = rxXferLen;
1268 rxDmaFree = dmaDataFree;
1269
1270 if (doRxDmaWrite())
1271 goto exit;
1272
1273 } else {
1274 rxState = rxDescWrite;
1275
1276 //if (rxPktBytes == 0) { /* packet is done */
1277 assert(rxPktBytes == 0);
1278
1279 rxDescCache.cmdsts |= CMDSTS_OWN;
1280 rxDescCache.cmdsts &= ~CMDSTS_MORE;
1281 rxDescCache.cmdsts |= CMDSTS_OK;
1282 rxDescCache.cmdsts &= 0xffff0000;
1283 rxDescCache.cmdsts += rxPacket->length; //i.e. set CMDSTS_SIZE
1284
1285 #if 0
1286 /* all the driver uses these are for its own stats keeping
1287 which we don't care about, aren't necessary for functionality
1288 and doing this would just slow us down. if they end up using
1289 this in a later version for functional purposes, just undef
1290 */
1291 if (rxFilterEnable) {
1292 rxDescCache.cmdsts &= ~CMDSTS_DEST_MASK;
1293 if (rxFifo.front()->IsUnicast())
1294 rxDescCache.cmdsts |= CMDSTS_DEST_SELF;
1295 if (rxFifo.front()->IsMulticast())
1296 rxDescCache.cmdsts |= CMDSTS_DEST_MULTI;
1297 if (rxFifo.front()->IsBroadcast())
1298 rxDescCache.cmdsts |= CMDSTS_DEST_MASK;
1299 }
1300 #endif
1301
1302 eth_header *eth = (eth_header *) rxPacket->data;
1303 // eth->type 0x800 indicated that it's an ip packet.
1304 if (eth->type == 0x800 && extstsEnable) {
1305 rxDescCache.extsts |= EXTSTS_IPPKT;
1306 if (!ipChecksum(rxPacket, false))
1307 rxDescCache.extsts |= EXTSTS_IPERR;
1308 ip_header *ip = rxFifo.front()->getIpHdr();
1309
1310 if (ip->protocol == 6) {
1311 rxDescCache.extsts |= EXTSTS_TCPPKT;
1312 if (!tcpChecksum(rxPacket, false))
1313 rxDescCache.extsts |= EXTSTS_TCPERR;
1314 } else if (ip->protocol == 17) {
1315 rxDescCache.extsts |= EXTSTS_UDPPKT;
1316 if (!udpChecksum(rxPacket, false))
1317 rxDescCache.extsts |= EXTSTS_UDPERR;
1318 }
1319 }
1320
1321 rxFifoCnt -= rxPacket->length;
1322 rxPacket = 0;
1323
1324 /* the driver seems to always receive into desc buffers
1325 of size 1514, so you never have a pkt that is split
1326 into multiple descriptors on the receive side, so
1327 i don't implement that case, hence the assert above.
1328 */
1329
1330 DPRINTF(Ethernet, "rxDesc writeback:\n\tcmdsts=%#x\n\textsts=%#x\n",
1331 rxDescCache.cmdsts, rxDescCache.extsts);
1332
1333 rxDmaAddr = (regs.rxdp + offsetof(ns_desc, cmdsts)) & 0x3fffffff;
1334 rxDmaData = &(rxDescCache.cmdsts);
1335 rxDmaLen = sizeof(rxDescCache.cmdsts) + sizeof(rxDescCache.extsts);
1336 rxDmaFree = dmaDescFree;
1337
1338 if (doRxDmaWrite())
1339 goto exit;
1340 }
1341 break;
1342
1343 case rxFragWrite:
1344 if (rxDmaState != dmaIdle)
1345 goto exit;
1346
1347 rxPacketBufPtr += rxXferLen;
1348 rxFragPtr += rxXferLen;
1349 rxPktBytes -= rxXferLen;
1350
1351 rxState = rxFifoBlock;
1352 break;
1353
1354 case rxDescWrite:
1355 if (rxDmaState != dmaIdle)
1356 goto exit;
1357
1358 assert(rxDescCache.cmdsts & CMDSTS_OWN);
1359
1360 assert(rxPacket == 0);
1361 devIntrPost(ISR_RXOK);
1362
1363 if (rxDescCache.cmdsts & CMDSTS_INTR)
1364 devIntrPost(ISR_RXDESC);
1365
1366 if (rxHalt) {
1367 rxState = rxIdle;
1368 rxHalt = false;
1369 } else
1370 rxState = rxAdvance;
1371 break;
1372
1373 case rxAdvance:
1374 if (rxDescCache.link == 0) {
1375 rxState = rxIdle;
1376 return;
1377 } else {
1378 rxState = rxDescRead;
1379 regs.rxdp = rxDescCache.link;
1380 CRDD = false;
1381
1382 rxDmaAddr = regs.rxdp & 0x3fffffff;
1383 rxDmaData = &rxDescCache;
1384 rxDmaLen = sizeof(ns_desc);
1385 rxDmaFree = dmaDescFree;
1386
1387 if (doRxDmaRead())
1388 goto exit;
1389 }
1390 break;
1391
1392 default:
1393 panic("Invalid rxState!");
1394 }
1395
1396
1397 DPRINTF(Ethernet, "entering next rx state = %s\n",
1398 NsRxStateStrings[rxState]);
1399
1400 if (rxState == rxIdle) {
1401 regs.command &= ~CR_RXE;
1402 devIntrPost(ISR_RXIDLE);
1403 return;
1404 }
1405
1406 goto next;
1407
1408 exit:
1409 /**
1410 * @todo do we want to schedule a future kick?
1411 */
1412 DPRINTF(Ethernet, "rx state machine exited state=%s\n",
1413 NsRxStateStrings[rxState]);
1414 }
1415
1416 void
1417 NSGigE::transmit()
1418 {
1419 if (txFifo.empty()) {
1420 DPRINTF(Ethernet, "nothing to transmit\n");
1421 return;
1422 }
1423
1424 if (interface->sendPacket(txFifo.front())) {
1425 DPRINTF(Ethernet, "transmit packet\n");
1426 DDUMP(Ethernet, txFifo.front()->data, txFifo.front()->length);
1427 txBytes += txFifo.front()->length;
1428 txPackets++;
1429
1430 txFifoCnt -= (txFifo.front()->length - txPktXmitted);
1431 txPktXmitted = 0;
1432 txFifo.front() = NULL;
1433 txFifo.pop_front();
1434
1435 /* normally do a writeback of the descriptor here, and ONLY after that is
1436 done, send this interrupt. but since our stuff never actually fails,
1437 just do this interrupt here, otherwise the code has to stray from this
1438 nice format. besides, it's functionally the same.
1439 */
1440 devIntrPost(ISR_TXOK);
1441 }
1442
1443 if (!txFifo.empty() && !txEvent.scheduled()) {
1444 DPRINTF(Ethernet, "reschedule transmit\n");
1445 txEvent.schedule(curTick + 1000);
1446 }
1447 }
1448
1449 void
1450 NSGigE::txDmaReadCopy()
1451 {
1452 assert(txDmaState == dmaReading);
1453
1454 memcpy(txDmaData, physmem->dma_addr(txDmaAddr, txDmaLen), txDmaLen);
1455 txDmaState = dmaIdle;
1456
1457 DPRINTF(EthernetDMA, "tx dma read paddr=%#x len=%d\n",
1458 txDmaAddr, txDmaLen);
1459 DDUMP(EthernetDMA, txDmaData, txDmaLen);
1460 }
1461
1462 bool
1463 NSGigE::doTxDmaRead()
1464 {
1465 assert(txDmaState == dmaIdle || txDmaState == dmaReadWaiting);
1466 txDmaState = dmaReading;
1467
1468 if (dmaInterface && !txDmaFree) {
1469 if (dmaInterface->busy())
1470 txDmaState = dmaReadWaiting;
1471 else
1472 dmaInterface->doDMA(Read, txDmaAddr, txDmaLen, curTick,
1473 &txDmaReadEvent);
1474 return true;
1475 }
1476
1477 if (dmaReadDelay == 0 && dmaReadFactor == 0.0) {
1478 txDmaReadCopy();
1479 return false;
1480 }
1481
1482 Tick factor = ((txDmaLen + ULL(63)) >> ULL(6)) * dmaReadFactor;
1483 Tick start = curTick + dmaReadDelay + factor;
1484 txDmaReadEvent.schedule(start);
1485 return true;
1486 }
1487
1488 void
1489 NSGigE::txDmaReadDone()
1490 {
1491 assert(txDmaState == dmaReading);
1492 txDmaReadCopy();
1493
1494 // If the receive state machine has a pending DMA, let it go first
1495 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting)
1496 rxKick();
1497
1498 txKick();
1499 }
1500
1501 void
1502 NSGigE::txDmaWriteCopy()
1503 {
1504 assert(txDmaState == dmaWriting);
1505
1506 memcpy(physmem->dma_addr(txDmaAddr, txDmaLen), txDmaData, txDmaLen);
1507 txDmaState = dmaIdle;
1508
1509 DPRINTF(EthernetDMA, "tx dma write paddr=%#x len=%d\n",
1510 txDmaAddr, txDmaLen);
1511 DDUMP(EthernetDMA, txDmaData, txDmaLen);
1512 }
1513
1514 bool
1515 NSGigE::doTxDmaWrite()
1516 {
1517 assert(txDmaState == dmaIdle || txDmaState == dmaWriteWaiting);
1518 txDmaState = dmaWriting;
1519
1520 if (dmaInterface && !txDmaFree) {
1521 if (dmaInterface->busy())
1522 txDmaState = dmaWriteWaiting;
1523 else
1524 dmaInterface->doDMA(WriteInvalidate, txDmaAddr, txDmaLen, curTick,
1525 &txDmaWriteEvent);
1526 return true;
1527 }
1528
1529 if (dmaWriteDelay == 0 && dmaWriteFactor == 0.0) {
1530 txDmaWriteCopy();
1531 return false;
1532 }
1533
1534 Tick factor = ((txDmaLen + ULL(63)) >> ULL(6)) * dmaWriteFactor;
1535 Tick start = curTick + dmaWriteDelay + factor;
1536 txDmaWriteEvent.schedule(start);
1537 return true;
1538 }
1539
1540 void
1541 NSGigE::txDmaWriteDone()
1542 {
1543 assert(txDmaState == dmaWriting);
1544 txDmaWriteCopy();
1545
1546 // If the receive state machine has a pending DMA, let it go first
1547 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting)
1548 rxKick();
1549
1550 txKick();
1551 }
1552
1553 void
1554 NSGigE::txKick()
1555 {
1556 DPRINTF(Ethernet, "transmit kick state=%s\n", NsTxStateStrings[txState]);
1557
1558 if (rxKickTick > curTick) {
1559 DPRINTF(Ethernet, "receive kick exiting, can't run till %d\n",
1560 rxKickTick);
1561
1562 return;
1563 }
1564
1565 next:
1566 switch(txDmaState) {
1567 case dmaReadWaiting:
1568 if (doTxDmaRead())
1569 goto exit;
1570 break;
1571 case dmaWriteWaiting:
1572 if (doTxDmaWrite())
1573 goto exit;
1574 break;
1575 default:
1576 break;
1577 }
1578
1579 switch (txState) {
1580 case txIdle:
1581 if (!regs.command & CR_TXE) {
1582 DPRINTF(Ethernet, "Transmit disabled. Nothing to do.\n");
1583 goto exit;
1584 }
1585
1586 if (CTDD) {
1587 txState = txDescRefr;
1588
1589 txDmaAddr = regs.txdp & 0x3fffffff;
1590 txDmaData = &txDescCache + offsetof(ns_desc, link);
1591 txDmaLen = sizeof(txDescCache.link);
1592 txDmaFree = dmaDescFree;
1593
1594 if (doTxDmaRead())
1595 goto exit;
1596
1597 } else {
1598 txState = txDescRead;
1599
1600 txDmaAddr = regs.txdp & 0x3fffffff;
1601 txDmaData = &txDescCache;
1602 txDmaLen = sizeof(ns_desc);
1603 txDmaFree = dmaDescFree;
1604
1605 if (doTxDmaRead())
1606 goto exit;
1607 }
1608 break;
1609
1610 case txDescRefr:
1611 if (txDmaState != dmaIdle)
1612 goto exit;
1613
1614 txState = txAdvance;
1615 break;
1616
1617 case txDescRead:
1618 if (txDmaState != dmaIdle)
1619 goto exit;
1620
1621 DPRINTF(Ethernet,
1622 "txDescCache data:\n\tlink=%#x\n\tbufptr=%#x\n\tcmdsts=%#x\n\textsts=%#x\n"
1623 ,txDescCache.link, txDescCache.bufptr, txDescCache.cmdsts,
1624 txDescCache.extsts);
1625
1626 if (txDescCache.cmdsts & CMDSTS_OWN) {
1627 txState = txFifoBlock;
1628 txFragPtr = txDescCache.bufptr;
1629 txDescCnt = txDescCache.cmdsts & CMDSTS_LEN_MASK;
1630 } else {
1631 txState = txIdle;
1632 }
1633 break;
1634
1635 case txFifoBlock:
1636 if (!txPacket) {
1637 DPRINTF(Ethernet, "starting the tx of a new packet\n");
1638 txPacket = new EtherPacket;
1639 txPacket->data = new uint8_t[16384];
1640 txPacketBufPtr = txPacket->data;
1641 }
1642
1643 if (txDescCnt == 0) {
1644 DPRINTF(Ethernet, "the txDescCnt == 0, done with descriptor\n");
1645 if (txDescCache.cmdsts & CMDSTS_MORE) {
1646 DPRINTF(Ethernet, "there are more descriptors to come\n");
1647 txState = txDescWrite;
1648
1649 txDescCache.cmdsts &= ~CMDSTS_OWN;
1650
1651 txDmaAddr = (regs.txdp + offsetof(ns_desc, cmdsts)) & 0x3fffffff;
1652 txDmaData = &(txDescCache.cmdsts);
1653 txDmaLen = sizeof(txDescCache.cmdsts);
1654 txDmaFree = dmaDescFree;
1655
1656 if (doTxDmaWrite())
1657 goto exit;
1658
1659 } else { /* this packet is totally done */
1660 DPRINTF(Ethernet, "This packet is done, let's wrap it up\n");
1661 /* deal with the the packet that just finished */
1662 if ((regs.vtcr & VTCR_PPCHK) && extstsEnable) {
1663 if (txDescCache.extsts & EXTSTS_UDPPKT) {
1664 udpChecksum(txPacket, true);
1665 } else if (txDescCache.extsts & EXTSTS_TCPPKT) {
1666 tcpChecksum(txPacket, true);
1667 } else if (txDescCache.extsts & EXTSTS_IPPKT) {
1668 ipChecksum(txPacket, true);
1669 }
1670 }
1671
1672 txPacket->length = txPacketBufPtr - txPacket->data;
1673 /* this is just because the receive can't handle a packet bigger
1674 want to make sure */
1675 assert(txPacket->length <= 1514);
1676 txFifo.push_back(txPacket);
1677
1678
1679 /* this following section is not to spec, but functionally shouldn't
1680 be any different. normally, the chip will wait til the transmit has
1681 occurred before writing back the descriptor because it has to wait
1682 to see that it was successfully transmitted to decide whether to set
1683 CMDSTS_OK or not. however, in the simulator since it is always
1684 successfully transmitted, and writing it exactly to spec would
1685 complicate the code, we just do it here
1686 */
1687 txDescCache.cmdsts &= ~CMDSTS_OWN;
1688 txDescCache.cmdsts |= CMDSTS_OK;
1689
1690 DPRINTF(Ethernet,
1691 "txDesc writeback:\n\tcmdsts=%#x\n\textsts=%#x\n",
1692 txDescCache.cmdsts, txDescCache.extsts);
1693
1694 txDmaAddr = (regs.txdp + offsetof(ns_desc, cmdsts)) & 0x3fffffff;
1695 txDmaData = &(txDescCache.cmdsts);
1696 txDmaLen = sizeof(txDescCache.cmdsts) + sizeof(txDescCache.extsts);
1697 txDmaFree = dmaDescFree;
1698
1699 if (doTxDmaWrite())
1700 goto exit;
1701
1702 txPacket = 0;
1703 transmit();
1704
1705 if (txHalt) {
1706 txState = txIdle;
1707 txHalt = false;
1708 } else
1709 txState = txAdvance;
1710 }
1711 } else {
1712 DPRINTF(Ethernet, "this descriptor isn't done yet\n");
1713 /* the fill thresh is in units of 32 bytes, shift right by 8 to get the
1714 value, shift left by 5 to get the real number of bytes */
1715 if (txFifoAvail < ((regs.txcfg & TXCFG_FLTH_MASK) >> 3)) {
1716 DPRINTF(Ethernet, "txFifoAvail=%d, regs.txcfg & TXCFG_FLTH_MASK = %#x\n",
1717 txFifoAvail, regs.txcfg & TXCFG_FLTH_MASK);
1718 goto exit;
1719 }
1720
1721 txState = txFragRead;
1722
1723 /* The number of bytes transferred is either whatever is left
1724 in the descriptor (txDescCnt), or if there is not enough
1725 room in the fifo, just whatever room is left in the fifo
1726 */
1727 txXferLen = min<uint32_t>(txDescCnt, txFifoAvail);
1728
1729 txDmaAddr = txFragPtr & 0x3fffffff;
1730 txDmaData = txPacketBufPtr;
1731 txDmaLen = txXferLen;
1732 txDmaFree = dmaDataFree;
1733
1734 if (doTxDmaRead())
1735 goto exit;
1736 }
1737 break;
1738
1739 case txFragRead:
1740 if (txDmaState != dmaIdle)
1741 goto exit;
1742
1743 txPacketBufPtr += txXferLen;
1744 txFragPtr += txXferLen;
1745 txFifoCnt += txXferLen;
1746 txDescCnt -= txXferLen;
1747
1748 txState = txFifoBlock;
1749 break;
1750
1751 case txDescWrite:
1752 if (txDmaState != dmaIdle)
1753 goto exit;
1754
1755 if (txFifoCnt >= ((regs.txcfg & TXCFG_DRTH_MASK) << 5)) {
1756 if (txFifo.empty()) {
1757 uint32_t xmitted = (uint32_t) (txPacketBufPtr - txPacket->data - txPktXmitted);
1758 txFifoCnt -= xmitted;
1759 txPktXmitted += xmitted;
1760 } else {
1761 transmit();
1762 }
1763 }
1764
1765 if (txDescCache.cmdsts & CMDSTS_INTR) {
1766 devIntrPost(ISR_TXDESC);
1767 }
1768
1769 txState = txAdvance;
1770 break;
1771
1772 case txAdvance:
1773 if (txDescCache.link == 0) {
1774 txState = txIdle;
1775 } else {
1776 txState = txDescRead;
1777 regs.txdp = txDescCache.link;
1778 CTDD = false;
1779
1780 txDmaAddr = txDescCache.link & 0x3fffffff;
1781 txDmaData = &txDescCache;
1782 txDmaLen = sizeof(ns_desc);
1783 txDmaFree = dmaDescFree;
1784
1785 if (doTxDmaRead())
1786 goto exit;
1787 }
1788 break;
1789
1790 default:
1791 panic("invalid state");
1792 }
1793
1794 DPRINTF(Ethernet, "entering next tx state=%s\n",
1795 NsTxStateStrings[txState]);
1796
1797 if (txState == txIdle) {
1798 regs.command &= ~CR_TXE;
1799 devIntrPost(ISR_TXIDLE);
1800 return;
1801 }
1802
1803 goto next;
1804
1805 exit:
1806 /**
1807 * @todo do we want to schedule a future kick?
1808 */
1809 DPRINTF(Ethernet, "tx state machine exited state=%s\n",
1810 NsTxStateStrings[txState]);
1811 }
1812
1813 void
1814 NSGigE::transferDone()
1815 {
1816 if (txFifo.empty())
1817 return;
1818
1819 DPRINTF(Ethernet, "schedule transmit\n");
1820
1821 if (txEvent.scheduled())
1822 txEvent.reschedule(curTick + 1);
1823 else
1824 txEvent.schedule(curTick + 1);
1825 }
1826
1827 bool
1828 NSGigE::rxFilter(PacketPtr packet)
1829 {
1830 bool drop = true;
1831 string type;
1832
1833 if (packet->IsUnicast()) {
1834 type = "unicast";
1835
1836 // If we're accepting all unicast addresses
1837 if (acceptUnicast)
1838 drop = false;
1839
1840 // If we make a perfect match
1841 if ((acceptPerfect)
1842 && (memcmp(rom.perfectMatch, packet->data, sizeof(rom.perfectMatch)) == 0))
1843 drop = false;
1844
1845 eth_header *eth = (eth_header *) packet->data;
1846 if ((acceptArp) && (eth->type == 0x806))
1847 drop = false;
1848
1849 } else if (packet->IsBroadcast()) {
1850 type = "broadcast";
1851
1852 // if we're accepting broadcasts
1853 if (acceptBroadcast)
1854 drop = false;
1855
1856 } else if (packet->IsMulticast()) {
1857 type = "multicast";
1858
1859 // if we're accepting all multicasts
1860 if (acceptMulticast)
1861 drop = false;
1862
1863 } else {
1864 type = "unknown";
1865
1866 // oh well, punt on this one
1867 }
1868
1869 if (drop) {
1870 DPRINTF(Ethernet, "rxFilter drop\n");
1871 DDUMP(EthernetData, packet->data, packet->length);
1872 }
1873
1874 return drop;
1875 }
1876
1877 bool
1878 NSGigE::recvPacket(PacketPtr packet)
1879 {
1880 rxBytes += packet->length;
1881 rxPackets++;
1882
1883 if (rxState == rxIdle) {
1884 DPRINTF(Ethernet, "receive disabled...packet dropped\n");
1885 interface->recvDone();
1886 return true;
1887 }
1888
1889 if (rxFilterEnable && rxFilter(packet)) {
1890 DPRINTF(Ethernet, "packet filtered...dropped\n");
1891 interface->recvDone();
1892 return true;
1893 }
1894
1895 if (rxFifoCnt + packet->length >= MAX_RX_FIFO_SIZE) {
1896 DPRINTF(Ethernet,
1897 "packet will not fit in receive buffer...packet dropped\n");
1898 devIntrPost(ISR_RXORN);
1899 return false;
1900 }
1901
1902 rxFifo.push_back(packet);
1903 rxFifoCnt += packet->length;
1904 interface->recvDone();
1905
1906 rxKick();
1907 return true;
1908 }
1909
1910 /**
1911 * does a udp checksum. if gen is true, then it generates it and puts it in the right place
1912 * else, it just checks what it calculates against the value in the header in packet
1913 */
1914 bool
1915 NSGigE::udpChecksum(PacketPtr packet, bool gen)
1916 {
1917 udp_header *hdr = (udp_header *) packet->getTransportHdr();
1918
1919 ip_header *ip = packet->getIpHdr();
1920
1921 pseudo_header *pseudo = new pseudo_header;
1922
1923 pseudo->src_ip_addr = ip->src_ip_addr;
1924 pseudo->dest_ip_addr = ip->dest_ip_addr;
1925 pseudo->protocol = ip->protocol;
1926 pseudo->len = hdr->len;
1927
1928 uint16_t cksum = checksumCalc((uint16_t *) pseudo, (uint16_t *) hdr,
1929 (uint32_t) hdr->len);
1930
1931 delete pseudo;
1932 if (gen)
1933 hdr->chksum = cksum;
1934 else
1935 if (cksum != 0)
1936 return false;
1937
1938 return true;
1939 }
1940
1941 bool
1942 NSGigE::tcpChecksum(PacketPtr packet, bool gen)
1943 {
1944 tcp_header *hdr = (tcp_header *) packet->getTransportHdr();
1945
1946 ip_header *ip = packet->getIpHdr();
1947
1948 pseudo_header *pseudo = new pseudo_header;
1949
1950 pseudo->src_ip_addr = ip->src_ip_addr;
1951 pseudo->dest_ip_addr = ip->dest_ip_addr;
1952 pseudo->protocol = ip->protocol;
1953 pseudo->len = ip->dgram_len - (ip->vers_len & 0xf);
1954
1955 uint16_t cksum = checksumCalc((uint16_t *) pseudo, (uint16_t *) hdr,
1956 (uint32_t) pseudo->len);
1957
1958 delete pseudo;
1959 if (gen)
1960 hdr->chksum = cksum;
1961 else
1962 if (cksum != 0)
1963 return false;
1964
1965 return true;
1966 }
1967
1968 bool
1969 NSGigE::ipChecksum(PacketPtr packet, bool gen)
1970 {
1971 ip_header *hdr = packet->getIpHdr();
1972
1973 uint16_t cksum = checksumCalc(NULL, (uint16_t *) hdr, (hdr->vers_len & 0xf));
1974
1975 if (gen)
1976 hdr->hdr_chksum = cksum;
1977 else
1978 if (cksum != 0)
1979 return false;
1980
1981 return true;
1982 }
1983
1984 uint16_t
1985 NSGigE::checksumCalc(uint16_t *pseudo, uint16_t *buf, uint32_t len)
1986 {
1987 uint32_t sum = 0;
1988
1989 uint16_t last_pad = 0;
1990 if (len & 1) {
1991 last_pad = buf[len/2] & 0xff;
1992 len--;
1993 sum += last_pad;
1994 }
1995
1996 if (pseudo) {
1997 sum = pseudo[0] + pseudo[1] + pseudo[2] +
1998 pseudo[3] + pseudo[4] + pseudo[5];
1999 }
2000
2001 for (int i=0; i < (len/2); ++i) {
2002 sum += buf[i];
2003 }
2004
2005 while (sum >> 16)
2006 sum = (sum >> 16) + (sum & 0xffff);
2007
2008 return ~sum;
2009 }
2010
2011 //=====================================================================
2012 //
2013 //
2014 void
2015 NSGigE::serialize(ostream &os)
2016 {
2017 // Serialize the PciDev base class
2018 PciDev::serialize(os);
2019
2020 /*
2021 * Finalize any DMA events now.
2022 */
2023 if (rxDmaReadEvent.scheduled())
2024 rxDmaReadCopy();
2025 if (rxDmaWriteEvent.scheduled())
2026 rxDmaWriteCopy();
2027 if (txDmaReadEvent.scheduled())
2028 txDmaReadCopy();
2029 if (txDmaWriteEvent.scheduled())
2030 txDmaWriteCopy();
2031
2032 /*
2033 * Serialize the device registers
2034 */
2035 SERIALIZE_SCALAR(regs.command);
2036 SERIALIZE_SCALAR(regs.config);
2037 SERIALIZE_SCALAR(regs.mear);
2038 SERIALIZE_SCALAR(regs.ptscr);
2039 SERIALIZE_SCALAR(regs.isr);
2040 SERIALIZE_SCALAR(regs.imr);
2041 SERIALIZE_SCALAR(regs.ier);
2042 SERIALIZE_SCALAR(regs.ihr);
2043 SERIALIZE_SCALAR(regs.txdp);
2044 SERIALIZE_SCALAR(regs.txdp_hi);
2045 SERIALIZE_SCALAR(regs.txcfg);
2046 SERIALIZE_SCALAR(regs.gpior);
2047 SERIALIZE_SCALAR(regs.rxdp);
2048 SERIALIZE_SCALAR(regs.rxdp_hi);
2049 SERIALIZE_SCALAR(regs.rxcfg);
2050 SERIALIZE_SCALAR(regs.pqcr);
2051 SERIALIZE_SCALAR(regs.wcsr);
2052 SERIALIZE_SCALAR(regs.pcr);
2053 SERIALIZE_SCALAR(regs.rfcr);
2054 SERIALIZE_SCALAR(regs.rfdr);
2055 SERIALIZE_SCALAR(regs.srr);
2056 SERIALIZE_SCALAR(regs.mibc);
2057 SERIALIZE_SCALAR(regs.vrcr);
2058 SERIALIZE_SCALAR(regs.vtcr);
2059 SERIALIZE_SCALAR(regs.vdr);
2060 SERIALIZE_SCALAR(regs.ccsr);
2061 SERIALIZE_SCALAR(regs.tbicr);
2062 SERIALIZE_SCALAR(regs.tbisr);
2063 SERIALIZE_SCALAR(regs.tanar);
2064 SERIALIZE_SCALAR(regs.tanlpar);
2065 SERIALIZE_SCALAR(regs.taner);
2066 SERIALIZE_SCALAR(regs.tesr);
2067
2068 SERIALIZE_ARRAY(rom.perfectMatch, EADDR_LEN);
2069
2070 /*
2071 * Serialize the various helper variables
2072 */
2073 uint32_t txPktBufPtr = (uint32_t) txPacketBufPtr;
2074 SERIALIZE_SCALAR(txPktBufPtr);
2075 uint32_t rxPktBufPtr = (uint32_t) rxPktBufPtr;
2076 SERIALIZE_SCALAR(rxPktBufPtr);
2077 SERIALIZE_SCALAR(txXferLen);
2078 SERIALIZE_SCALAR(rxXferLen);
2079 SERIALIZE_SCALAR(txPktXmitted);
2080
2081 bool txPacketExists = txPacket;
2082 SERIALIZE_SCALAR(txPacketExists);
2083 bool rxPacketExists = rxPacket;
2084 SERIALIZE_SCALAR(rxPacketExists);
2085
2086 /*
2087 * Serialize DescCaches
2088 */
2089 SERIALIZE_SCALAR(txDescCache.link);
2090 SERIALIZE_SCALAR(txDescCache.bufptr);
2091 SERIALIZE_SCALAR(txDescCache.cmdsts);
2092 SERIALIZE_SCALAR(txDescCache.extsts);
2093 SERIALIZE_SCALAR(rxDescCache.link);
2094 SERIALIZE_SCALAR(rxDescCache.bufptr);
2095 SERIALIZE_SCALAR(rxDescCache.cmdsts);
2096 SERIALIZE_SCALAR(rxDescCache.extsts);
2097
2098 /*
2099 * Serialize tx state machine
2100 */
2101 int txNumPkts = txFifo.size();
2102 SERIALIZE_SCALAR(txNumPkts);
2103 int txState = this->txState;
2104 SERIALIZE_SCALAR(txState);
2105 SERIALIZE_SCALAR(CTDD);
2106 SERIALIZE_SCALAR(txFifoCnt);
2107 SERIALIZE_SCALAR(txFifoAvail);
2108 SERIALIZE_SCALAR(txHalt);
2109 SERIALIZE_SCALAR(txFragPtr);
2110 SERIALIZE_SCALAR(txDescCnt);
2111 int txDmaState = this->txDmaState;
2112 SERIALIZE_SCALAR(txDmaState);
2113
2114 /*
2115 * Serialize rx state machine
2116 */
2117 int rxNumPkts = rxFifo.size();
2118 SERIALIZE_SCALAR(rxNumPkts);
2119 int rxState = this->rxState;
2120 SERIALIZE_SCALAR(rxState);
2121 SERIALIZE_SCALAR(CRDD);
2122 SERIALIZE_SCALAR(rxPktBytes);
2123 SERIALIZE_SCALAR(rxFifoCnt);
2124 SERIALIZE_SCALAR(rxHalt);
2125 SERIALIZE_SCALAR(rxDescCnt);
2126 int rxDmaState = this->rxDmaState;
2127 SERIALIZE_SCALAR(rxDmaState);
2128
2129 SERIALIZE_SCALAR(extstsEnable);
2130
2131 /*
2132 * If there's a pending transmit, store the time so we can
2133 * reschedule it later
2134 */
2135 Tick transmitTick = txEvent.scheduled() ? txEvent.when() - curTick : 0;
2136 SERIALIZE_SCALAR(transmitTick);
2137
2138 /*
2139 * Keep track of pending interrupt status.
2140 */
2141 SERIALIZE_SCALAR(intrTick);
2142 SERIALIZE_SCALAR(cpuPendingIntr);
2143 Tick intrEventTick = 0;
2144 if (intrEvent)
2145 intrEventTick = intrEvent->when();
2146 SERIALIZE_SCALAR(intrEventTick);
2147
2148 int i = 0;
2149 for (pktiter_t p = rxFifo.begin(); p != rxFifo.end(); ++p) {
2150 nameOut(os, csprintf("%s.rxFifo%d", name(), i++));
2151 (*p)->serialize(os);
2152 }
2153 if (rxPacketExists) {
2154 nameOut(os, csprintf("%s.rxPacket", name()));
2155 rxPacket->serialize(os);
2156 }
2157 i = 0;
2158 for (pktiter_t p = txFifo.begin(); p != txFifo.end(); ++p) {
2159 nameOut(os, csprintf("%s.txFifo%d", name(), i++));
2160 (*p)->serialize(os);
2161 }
2162 if (txPacketExists) {
2163 nameOut(os, csprintf("%s.txPacket", name()));
2164 txPacket->serialize(os);
2165 }
2166 }
2167
2168 void
2169 NSGigE::unserialize(Checkpoint *cp, const std::string &section)
2170 {
2171 // Unserialize the PciDev base class
2172 PciDev::unserialize(cp, section);
2173
2174 UNSERIALIZE_SCALAR(regs.command);
2175 UNSERIALIZE_SCALAR(regs.config);
2176 UNSERIALIZE_SCALAR(regs.mear);
2177 UNSERIALIZE_SCALAR(regs.ptscr);
2178 UNSERIALIZE_SCALAR(regs.isr);
2179 UNSERIALIZE_SCALAR(regs.imr);
2180 UNSERIALIZE_SCALAR(regs.ier);
2181 UNSERIALIZE_SCALAR(regs.ihr);
2182 UNSERIALIZE_SCALAR(regs.txdp);
2183 UNSERIALIZE_SCALAR(regs.txdp_hi);
2184 UNSERIALIZE_SCALAR(regs.txcfg);
2185 UNSERIALIZE_SCALAR(regs.gpior);
2186 UNSERIALIZE_SCALAR(regs.rxdp);
2187 UNSERIALIZE_SCALAR(regs.rxdp_hi);
2188 UNSERIALIZE_SCALAR(regs.rxcfg);
2189 UNSERIALIZE_SCALAR(regs.pqcr);
2190 UNSERIALIZE_SCALAR(regs.wcsr);
2191 UNSERIALIZE_SCALAR(regs.pcr);
2192 UNSERIALIZE_SCALAR(regs.rfcr);
2193 UNSERIALIZE_SCALAR(regs.rfdr);
2194 UNSERIALIZE_SCALAR(regs.srr);
2195 UNSERIALIZE_SCALAR(regs.mibc);
2196 UNSERIALIZE_SCALAR(regs.vrcr);
2197 UNSERIALIZE_SCALAR(regs.vtcr);
2198 UNSERIALIZE_SCALAR(regs.vdr);
2199 UNSERIALIZE_SCALAR(regs.ccsr);
2200 UNSERIALIZE_SCALAR(regs.tbicr);
2201 UNSERIALIZE_SCALAR(regs.tbisr);
2202 UNSERIALIZE_SCALAR(regs.tanar);
2203 UNSERIALIZE_SCALAR(regs.tanlpar);
2204 UNSERIALIZE_SCALAR(regs.taner);
2205 UNSERIALIZE_SCALAR(regs.tesr);
2206
2207 UNSERIALIZE_ARRAY(rom.perfectMatch, EADDR_LEN);
2208
2209 /*
2210 * unserialize the various helper variables
2211 */
2212 uint32_t txPktBufPtr;
2213 UNSERIALIZE_SCALAR(txPktBufPtr);
2214 txPacketBufPtr = (uint8_t *) txPktBufPtr;
2215 uint32_t rxPktBufPtr;
2216 UNSERIALIZE_SCALAR(rxPktBufPtr);
2217 rxPacketBufPtr = (uint8_t *) rxPktBufPtr;
2218 UNSERIALIZE_SCALAR(txXferLen);
2219 UNSERIALIZE_SCALAR(rxXferLen);
2220 UNSERIALIZE_SCALAR(txPktXmitted);
2221
2222 bool txPacketExists;
2223 UNSERIALIZE_SCALAR(txPacketExists);
2224 bool rxPacketExists;
2225 UNSERIALIZE_SCALAR(rxPacketExists);
2226
2227 /*
2228 * Unserialize DescCaches
2229 */
2230 UNSERIALIZE_SCALAR(txDescCache.link);
2231 UNSERIALIZE_SCALAR(txDescCache.bufptr);
2232 UNSERIALIZE_SCALAR(txDescCache.cmdsts);
2233 UNSERIALIZE_SCALAR(txDescCache.extsts);
2234 UNSERIALIZE_SCALAR(rxDescCache.link);
2235 UNSERIALIZE_SCALAR(rxDescCache.bufptr);
2236 UNSERIALIZE_SCALAR(rxDescCache.cmdsts);
2237 UNSERIALIZE_SCALAR(rxDescCache.extsts);
2238
2239 /*
2240 * unserialize tx state machine
2241 */
2242 int txNumPkts;
2243 UNSERIALIZE_SCALAR(txNumPkts);
2244 int txState;
2245 UNSERIALIZE_SCALAR(txState);
2246 this->txState = (TxState) txState;
2247 UNSERIALIZE_SCALAR(CTDD);
2248 UNSERIALIZE_SCALAR(txFifoCnt);
2249 UNSERIALIZE_SCALAR(txFifoAvail);
2250 UNSERIALIZE_SCALAR(txHalt);
2251 UNSERIALIZE_SCALAR(txFragPtr);
2252 UNSERIALIZE_SCALAR(txDescCnt);
2253 int txDmaState;
2254 UNSERIALIZE_SCALAR(txDmaState);
2255 this->txDmaState = (DmaState) txDmaState;
2256
2257 /*
2258 * unserialize rx state machine
2259 */
2260 int rxNumPkts;
2261 UNSERIALIZE_SCALAR(rxNumPkts);
2262 int rxState;
2263 UNSERIALIZE_SCALAR(rxState);
2264 this->rxState = (RxState) rxState;
2265 UNSERIALIZE_SCALAR(CRDD);
2266 UNSERIALIZE_SCALAR(rxPktBytes);
2267 UNSERIALIZE_SCALAR(rxFifoCnt);
2268 UNSERIALIZE_SCALAR(rxHalt);
2269 UNSERIALIZE_SCALAR(rxDescCnt);
2270 int rxDmaState;
2271 UNSERIALIZE_SCALAR(rxDmaState);
2272 this->rxDmaState = (DmaState) rxDmaState;
2273
2274 UNSERIALIZE_SCALAR(extstsEnable);
2275
2276 /*
2277 * If there's a pending transmit, store the time so we can
2278 * reschedule it later
2279 */
2280 Tick transmitTick;
2281 UNSERIALIZE_SCALAR(transmitTick);
2282 if (transmitTick)
2283 txEvent.schedule(curTick + transmitTick);
2284
2285 /*
2286 * Keep track of pending interrupt status.
2287 */
2288 UNSERIALIZE_SCALAR(intrTick);
2289 UNSERIALIZE_SCALAR(cpuPendingIntr);
2290 Tick intrEventTick;
2291 UNSERIALIZE_SCALAR(intrEventTick);
2292 if (intrEventTick) {
2293 intrEvent = new IntrEvent(this, true);
2294 intrEvent->schedule(intrEventTick);
2295 }
2296
2297 for (int i = 0; i < rxNumPkts; ++i) {
2298 PacketPtr p = new EtherPacket;
2299 p->unserialize(cp, csprintf("%s.rxFifo%d", section, i));
2300 rxFifo.push_back(p);
2301 }
2302 rxPacket = NULL;
2303 if (rxPacketExists) {
2304 rxPacket = new EtherPacket;
2305 rxPacket->unserialize(cp, csprintf("%s.rxPacket", section));
2306 }
2307 for (int i = 0; i < txNumPkts; ++i) {
2308 PacketPtr p = new EtherPacket;
2309 p->unserialize(cp, csprintf("%s.rxFifo%d", section, i));
2310 txFifo.push_back(p);
2311 }
2312 if (txPacketExists) {
2313 txPacket = new EtherPacket;
2314 txPacket->unserialize(cp, csprintf("%s.txPacket", section));
2315 }
2316 }
2317
2318
2319 Tick
2320 NSGigE::cacheAccess(MemReqPtr &req)
2321 {
2322 DPRINTF(EthernetPIO, "timing access to paddr=%#x (daddr=%#x)\n",
2323 req->paddr, req->paddr - addr);
2324 return curTick + pioLatency;
2325 }
2326 //=====================================================================
2327
2328
2329 BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt)
2330
2331 SimObjectParam<EtherInt *> peer;
2332 SimObjectParam<NSGigE *> device;
2333
2334 END_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt)
2335
2336 BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigEInt)
2337
2338 INIT_PARAM_DFLT(peer, "peer interface", NULL),
2339 INIT_PARAM(device, "Ethernet device of this interface")
2340
2341 END_INIT_SIM_OBJECT_PARAMS(NSGigEInt)
2342
2343 CREATE_SIM_OBJECT(NSGigEInt)
2344 {
2345 NSGigEInt *dev_int = new NSGigEInt(getInstanceName(), device);
2346
2347 EtherInt *p = (EtherInt *)peer;
2348 if (p) {
2349 dev_int->setPeer(p);
2350 p->setPeer(dev_int);
2351 }
2352
2353 return dev_int;
2354 }
2355
2356 REGISTER_SIM_OBJECT("NSGigEInt", NSGigEInt)
2357
2358
2359 BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigE)
2360
2361 Param<Tick> tx_delay;
2362 Param<Tick> rx_delay;
2363 SimObjectParam<IntrControl *> intr_ctrl;
2364 Param<Tick> intr_delay;
2365 SimObjectParam<MemoryController *> mmu;
2366 SimObjectParam<PhysicalMemory *> physmem;
2367 Param<bool> rx_filter;
2368 Param<string> hardware_address;
2369 SimObjectParam<Bus*> header_bus;
2370 SimObjectParam<Bus*> payload_bus;
2371 SimObjectParam<HierParams *> hier;
2372 Param<Tick> pio_latency;
2373 Param<bool> dma_desc_free;
2374 Param<bool> dma_data_free;
2375 Param<Tick> dma_read_delay;
2376 Param<Tick> dma_write_delay;
2377 Param<Tick> dma_read_factor;
2378 Param<Tick> dma_write_factor;
2379 SimObjectParam<PciConfigAll *> configspace;
2380 SimObjectParam<PciConfigData *> configdata;
2381 SimObjectParam<Tsunami *> tsunami;
2382 Param<uint32_t> pci_bus;
2383 Param<uint32_t> pci_dev;
2384 Param<uint32_t> pci_func;
2385
2386 END_DECLARE_SIM_OBJECT_PARAMS(NSGigE)
2387
2388 BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigE)
2389
2390 INIT_PARAM_DFLT(tx_delay, "Transmit Delay", 1000),
2391 INIT_PARAM_DFLT(rx_delay, "Receive Delay", 1000),
2392 INIT_PARAM(intr_ctrl, "Interrupt Controller"),
2393 INIT_PARAM_DFLT(intr_delay, "Interrupt Delay in microseconds", 0),
2394 INIT_PARAM(mmu, "Memory Controller"),
2395 INIT_PARAM(physmem, "Physical Memory"),
2396 INIT_PARAM_DFLT(rx_filter, "Enable Receive Filter", true),
2397 INIT_PARAM_DFLT(hardware_address, "Ethernet Hardware Address",
2398 "00:99:00:00:00:01"),
2399 INIT_PARAM_DFLT(header_bus, "The IO Bus to attach to for headers", NULL),
2400 INIT_PARAM_DFLT(payload_bus, "The IO Bus to attach to for payload", NULL),
2401 INIT_PARAM_DFLT(hier, "Hierarchy global variables", &defaultHierParams),
2402 INIT_PARAM_DFLT(pio_latency, "Programmed IO latency", 1000),
2403 INIT_PARAM_DFLT(dma_desc_free, "DMA of Descriptors is free", false),
2404 INIT_PARAM_DFLT(dma_data_free, "DMA of Data is free", false),
2405 INIT_PARAM_DFLT(dma_read_delay, "fixed delay for dma reads", 0),
2406 INIT_PARAM_DFLT(dma_write_delay, "fixed delay for dma writes", 0),
2407 INIT_PARAM_DFLT(dma_read_factor, "multiplier for dma reads", 0),
2408 INIT_PARAM_DFLT(dma_write_factor, "multiplier for dma writes", 0),
2409 INIT_PARAM(configspace, "PCI Configspace"),
2410 INIT_PARAM(configdata, "PCI Config data"),
2411 INIT_PARAM(tsunami, "Tsunami"),
2412 INIT_PARAM(pci_bus, "PCI bus"),
2413 INIT_PARAM(pci_dev, "PCI device number"),
2414 INIT_PARAM(pci_func, "PCI function code")
2415
2416 END_INIT_SIM_OBJECT_PARAMS(NSGigE)
2417
2418
2419 CREATE_SIM_OBJECT(NSGigE)
2420 {
2421 int eaddr[6];
2422 sscanf(((string)hardware_address).c_str(), "%x:%x:%x:%x:%x:%x",
2423 &eaddr[0], &eaddr[1], &eaddr[2], &eaddr[3], &eaddr[4], &eaddr[5]);
2424
2425 return new NSGigE(getInstanceName(), intr_ctrl, intr_delay,
2426 physmem, tx_delay, rx_delay, mmu, hier, header_bus,
2427 payload_bus, pio_latency, dma_desc_free, dma_data_free,
2428 dma_read_delay, dma_write_delay, dma_read_factor,
2429 dma_write_factor, configspace, configdata,
2430 tsunami, pci_bus, pci_dev, pci_func, rx_filter, eaddr);
2431 }
2432
2433 REGISTER_SIM_OBJECT("NSGigE", NSGigE)