Merge zizzer:/bk/linux
[gem5.git] / dev / ns_gige.cc
1 /*
2 * Copyright (c) 2003 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /* @file
30 * Device module for modelling the National Semiconductor
31 * DP83820 ethernet controller. Does not support priority queueing
32 */
33 #include <cstdio>
34 #include <deque>
35 #include <string>
36
37 #include "base/inet.hh"
38 #include "cpu/exec_context.hh"
39 #include "cpu/intr_control.hh"
40 #include "dev/dma.hh"
41 #include "dev/ns_gige.hh"
42 #include "dev/etherlink.hh"
43 #include "mem/bus/bus.hh"
44 #include "mem/bus/dma_interface.hh"
45 #include "mem/bus/pio_interface.hh"
46 #include "mem/bus/pio_interface_impl.hh"
47 #include "mem/functional_mem/memory_control.hh"
48 #include "mem/functional_mem/physical_memory.hh"
49 #include "sim/builder.hh"
50 #include "sim/host.hh"
51 #include "sim/sim_stats.hh"
52 #include "targetarch/vtophys.hh"
53 #include "dev/pciconfigall.hh"
54 #include "dev/tsunami_cchip.hh"
55
56 const char *NsRxStateStrings[] =
57 {
58 "rxIdle",
59 "rxDescRefr",
60 "rxDescRead",
61 "rxFifoBlock",
62 "rxFragWrite",
63 "rxDescWrite",
64 "rxAdvance"
65 };
66
67 const char *NsTxStateStrings[] =
68 {
69 "txIdle",
70 "txDescRefr",
71 "txDescRead",
72 "txFifoBlock",
73 "txFragRead",
74 "txDescWrite",
75 "txAdvance"
76 };
77
78 const char *NsDmaState[] =
79 {
80 "dmaIdle",
81 "dmaReading",
82 "dmaWriting",
83 "dmaReadWaiting",
84 "dmaWriteWaiting"
85 };
86
87 using namespace std;
88
89 ///////////////////////////////////////////////////////////////////////
90 //
91 // NSGigE PCI Device
92 //
93 NSGigE::NSGigE(const std::string &name, IntrControl *i, Tick intr_delay,
94 PhysicalMemory *pmem, Tick tx_delay, Tick rx_delay,
95 MemoryController *mmu, HierParams *hier, Bus *header_bus,
96 Bus *payload_bus, Tick pio_latency, bool dma_desc_free,
97 bool dma_data_free, Tick dma_read_delay, Tick dma_write_delay,
98 Tick dma_read_factor, Tick dma_write_factor, PciConfigAll *cf,
99 PciConfigData *cd, Tsunami *t, uint32_t bus, uint32_t dev,
100 uint32_t func, bool rx_filter, const int eaddr[6], Addr addr)
101 : PciDev(name, mmu, cf, cd, bus, dev, func), tsunami(t),
102 addr(addr), txPacketBufPtr(NULL), rxPacketBufPtr(NULL),
103 txXferLen(0), rxXferLen(0), txPktXmitted(0), txState(txIdle), CTDD(false),
104 txFifoCnt(0), txFifoAvail(MAX_TX_FIFO_SIZE), txHalt(false),
105 txFragPtr(0), txDescCnt(0), txDmaState(dmaIdle), rxState(rxIdle),
106 CRDD(false), rxPktBytes(0), rxFifoCnt(0), rxHalt(false),
107 rxFragPtr(0), rxDescCnt(0), rxDmaState(dmaIdle), extstsEnable(false),
108 rxDmaReadEvent(this), rxDmaWriteEvent(this),
109 txDmaReadEvent(this), txDmaWriteEvent(this),
110 dmaDescFree(dma_desc_free), dmaDataFree(dma_data_free),
111 txDelay(tx_delay), rxDelay(rx_delay), rxKickTick(0), txKickTick(0),
112 txEvent(this), rxFilterEnable(rx_filter), acceptBroadcast(false),
113 acceptMulticast(false), acceptUnicast(false),
114 acceptPerfect(false), acceptArp(false),
115 physmem(pmem), intctrl(i), intrTick(0),
116 cpuPendingIntr(false), intrEvent(0), interface(0), pioLatency(pio_latency)
117 {
118 mmu->add_child(this, Range<Addr>(addr, addr + size));
119 tsunami->ethernet = this;
120
121 if (header_bus) {
122 pioInterface = newPioInterface(name, hier, header_bus, this,
123 &NSGigE::cacheAccess);
124 pioInterface->addAddrRange(addr, addr + size - 1);
125 if (payload_bus)
126 dmaInterface = new DMAInterface<Bus>(name + ".dma",
127 header_bus, payload_bus, 1);
128 else
129 dmaInterface = new DMAInterface<Bus>(name + ".dma",
130 header_bus, header_bus, 1);
131 } else if (payload_bus) {
132 pioInterface = newPioInterface(name, hier, payload_bus, this,
133 &NSGigE::cacheAccess);
134 pioInterface->addAddrRange(addr, addr + size - 1);
135 dmaInterface = new DMAInterface<Bus>(name + ".dma",
136 payload_bus, payload_bus, 1);
137
138 }
139
140
141 intrDelay = US2Ticks(intr_delay);
142 dmaReadDelay = dma_read_delay;
143 dmaWriteDelay = dma_write_delay;
144 dmaReadFactor = dma_read_factor;
145 dmaWriteFactor = dma_write_factor;
146
147 memset(&regs, 0, sizeof(regs));
148 regsReset();
149 rom.perfectMatch[0] = eaddr[0];
150 rom.perfectMatch[1] = eaddr[1];
151 rom.perfectMatch[2] = eaddr[2];
152 rom.perfectMatch[3] = eaddr[3];
153 rom.perfectMatch[4] = eaddr[4];
154 rom.perfectMatch[5] = eaddr[5];
155 }
156
157 NSGigE::~NSGigE()
158 {}
159
160 void
161 NSGigE::regStats()
162 {
163 txBytes
164 .name(name() + ".txBytes")
165 .desc("Bytes Transmitted")
166 .prereq(txBytes)
167 ;
168
169 rxBytes
170 .name(name() + ".rxBytes")
171 .desc("Bytes Received")
172 .prereq(rxBytes)
173 ;
174
175 txPackets
176 .name(name() + ".txPackets")
177 .desc("Number of Packets Transmitted")
178 .prereq(txBytes)
179 ;
180
181 rxPackets
182 .name(name() + ".rxPackets")
183 .desc("Number of Packets Received")
184 .prereq(rxBytes)
185 ;
186
187 txBandwidth
188 .name(name() + ".txBandwidth")
189 .desc("Transmit Bandwidth (bits/s)")
190 .precision(0)
191 .prereq(txBytes)
192 ;
193
194 rxBandwidth
195 .name(name() + ".rxBandwidth")
196 .desc("Receive Bandwidth (bits/s)")
197 .precision(0)
198 .prereq(rxBytes)
199 ;
200
201 txPacketRate
202 .name(name() + ".txPPS")
203 .desc("Packet Tranmission Rate (packets/s)")
204 .precision(0)
205 .prereq(txBytes)
206 ;
207
208 rxPacketRate
209 .name(name() + ".rxPPS")
210 .desc("Packet Reception Rate (packets/s)")
211 .precision(0)
212 .prereq(rxBytes)
213 ;
214
215 txBandwidth = txBytes * Stats::constant(8) / simSeconds;
216 rxBandwidth = rxBytes * Stats::constant(8) / simSeconds;
217 txPacketRate = txPackets / simSeconds;
218 rxPacketRate = rxPackets / simSeconds;
219 }
220
221 /**
222 * This is to read the PCI general configuration registers
223 */
224 void
225 NSGigE::ReadConfig(int offset, int size, uint8_t *data)
226 {
227 if (offset < PCI_DEVICE_SPECIFIC)
228 PciDev::ReadConfig(offset, size, data);
229 else {
230 panic("need to do this\n");
231 }
232 }
233
234 /**
235 * This is to write to the PCI general configuration registers
236 */
237 void
238 NSGigE::WriteConfig(int offset, int size, uint32_t data)
239 {
240 if (offset < PCI_DEVICE_SPECIFIC)
241 PciDev::WriteConfig(offset, size, data);
242 else
243 panic("Need to do that\n");
244 }
245
246 /**
247 * This reads the device registers, which are detailed in the NS83820
248 * spec sheet
249 */
250 Fault
251 NSGigE::read(MemReqPtr &req, uint8_t *data)
252 {
253 //The mask is to give you only the offset into the device register file
254 Addr daddr = req->paddr & 0xfff;
255 DPRINTF(EthernetPIO, "read da=%#x pa=%#x va=%#x size=%d\n",
256 daddr, req->paddr, req->vaddr, req->size);
257
258
259 //there are some reserved registers, you can see ns_gige_reg.h and
260 //the spec sheet for details
261 if (daddr > LAST && daddr <= RESERVED) {
262 panic("Accessing reserved register");
263 } else if (daddr > RESERVED && daddr <= 0x3FC) {
264 ReadConfig(daddr & 0xff, req->size, data);
265 return No_Fault;
266 } else if (daddr >= MIB_START && daddr <= MIB_END) {
267 // don't implement all the MIB's. hopefully the kernel
268 // doesn't actually DEPEND upon their values
269 // MIB are just hardware stats keepers
270 uint32_t &reg = *(uint32_t *) data;
271 reg = 0;
272 return No_Fault;
273 } else if (daddr > 0x3FC)
274 panic("Something is messed up!\n");
275
276 switch (req->size) {
277 case sizeof(uint32_t):
278 {
279 uint32_t &reg = *(uint32_t *)data;
280
281 switch (daddr) {
282 case CR:
283 reg = regs.command;
284 //these are supposed to be cleared on a read
285 reg &= ~(CR_RXD | CR_TXD | CR_TXR | CR_RXR);
286 break;
287
288 case CFG:
289 reg = regs.config;
290 break;
291
292 case MEAR:
293 reg = regs.mear;
294 break;
295
296 case PTSCR:
297 reg = regs.ptscr;
298 break;
299
300 case ISR:
301 reg = regs.isr;
302 devIntrClear(ISR_ALL);
303 break;
304
305 case IMR:
306 reg = regs.imr;
307 break;
308
309 case IER:
310 reg = regs.ier;
311 break;
312
313 case IHR:
314 reg = regs.ihr;
315 break;
316
317 case TXDP:
318 reg = regs.txdp;
319 break;
320
321 case TXDP_HI:
322 reg = regs.txdp_hi;
323 break;
324
325 case TXCFG:
326 reg = regs.txcfg;
327 break;
328
329 case GPIOR:
330 reg = regs.gpior;
331 break;
332
333 case RXDP:
334 reg = regs.rxdp;
335 break;
336
337 case RXDP_HI:
338 reg = regs.rxdp_hi;
339 break;
340
341 case RXCFG:
342 reg = regs.rxcfg;
343 break;
344
345 case PQCR:
346 reg = regs.pqcr;
347 break;
348
349 case WCSR:
350 reg = regs.wcsr;
351 break;
352
353 case PCR:
354 reg = regs.pcr;
355 break;
356
357 //see the spec sheet for how RFCR and RFDR work
358 //basically, you write to RFCR to tell the machine what you want to do next
359 //then you act upon RFDR, and the device will be prepared b/c
360 //of what you wrote to RFCR
361 case RFCR:
362 reg = regs.rfcr;
363 break;
364
365 case RFDR:
366 switch (regs.rfcr & RFCR_RFADDR) {
367 case 0x000:
368 reg = rom.perfectMatch[1];
369 reg = reg << 8;
370 reg += rom.perfectMatch[0];
371 break;
372 case 0x002:
373 reg = rom.perfectMatch[3] << 8;
374 reg += rom.perfectMatch[2];
375 break;
376 case 0x004:
377 reg = rom.perfectMatch[5] << 8;
378 reg += rom.perfectMatch[4];
379 break;
380 default:
381 panic("reading from RFDR for something for other than PMATCH!\n");
382 //didn't implement other RFDR functionality b/c driver didn't use
383 }
384 break;
385
386 case SRR:
387 reg = regs.srr;
388 break;
389
390 case MIBC:
391 reg = regs.mibc;
392 reg &= ~(MIBC_MIBS | MIBC_ACLR);
393 break;
394
395 case VRCR:
396 reg = regs.vrcr;
397 break;
398
399 case VTCR:
400 reg = regs.vtcr;
401 break;
402
403 case VDR:
404 reg = regs.vdr;
405 break;
406
407 case CCSR:
408 reg = regs.ccsr;
409 break;
410
411 case TBICR:
412 reg = regs.tbicr;
413 break;
414
415 case TBISR:
416 reg = regs.tbisr;
417 break;
418
419 case TANAR:
420 reg = regs.tanar;
421 break;
422
423 case TANLPAR:
424 reg = regs.tanlpar;
425 break;
426
427 case TANER:
428 reg = regs.taner;
429 break;
430
431 case TESR:
432 reg = regs.tesr;
433 break;
434
435 default:
436 panic("reading unimplemented register: addr = %#x", daddr);
437 }
438
439 DPRINTF(EthernetPIO, "read from %#x: data=%d data=%#x\n",
440 daddr, reg, reg);
441 }
442 break;
443
444 default:
445 panic("accessing register with invalid size: addr=%#x, size=%d",
446 daddr, req->size);
447 }
448
449 return No_Fault;
450 }
451
452 Fault
453 NSGigE::write(MemReqPtr &req, const uint8_t *data)
454 {
455 Addr daddr = req->paddr & 0xfff;
456 DPRINTF(EthernetPIO, "write da=%#x pa=%#x va=%#x size=%d\n",
457 daddr, req->paddr, req->vaddr, req->size);
458
459 if (daddr > LAST && daddr <= RESERVED) {
460 panic("Accessing reserved register");
461 } else if (daddr > RESERVED && daddr <= 0x3FC) {
462 WriteConfig(daddr & 0xff, req->size, *(uint32_t *)data);
463 return No_Fault;
464 } else if (daddr > 0x3FC)
465 panic("Something is messed up!\n");
466
467 if (req->size == sizeof(uint32_t)) {
468 uint32_t reg = *(uint32_t *)data;
469 DPRINTF(EthernetPIO, "write data=%d data=%#x\n", reg, reg);
470
471 switch (daddr) {
472 case CR:
473 regs.command = reg;
474 if ((reg & (CR_TXE | CR_TXD)) == (CR_TXE | CR_TXD)) {
475 txHalt = true;
476 } else if (reg & CR_TXE) {
477 //the kernel is enabling the transmit machine
478 if (txState == txIdle)
479 txKick();
480 } else if (reg & CR_TXD) {
481 txHalt = true;
482 }
483
484 if ((reg & (CR_RXE | CR_RXD)) == (CR_RXE | CR_RXD)) {
485 rxHalt = true;
486 } else if (reg & CR_RXE) {
487 if (rxState == rxIdle) {
488 rxKick();
489 }
490 } else if (reg & CR_RXD) {
491 rxHalt = true;
492 }
493
494 if (reg & CR_TXR)
495 txReset();
496
497 if (reg & CR_RXR)
498 rxReset();
499
500 if (reg & CR_SWI)
501 devIntrPost(ISR_SWI);
502
503 if (reg & CR_RST) {
504 txReset();
505 rxReset();
506
507 regsReset();
508 }
509 break;
510
511 case CFG:
512 if (reg & CFG_LNKSTS || reg & CFG_SPDSTS || reg & CFG_DUPSTS
513 || reg & CFG_RESERVED || reg & CFG_T64ADDR
514 || reg & CFG_PCI64_DET)
515 panic("writing to read-only or reserved CFG bits!\n");
516
517 regs.config |= reg & ~(CFG_LNKSTS | CFG_SPDSTS | CFG_DUPSTS | CFG_RESERVED |
518 CFG_T64ADDR | CFG_PCI64_DET);
519
520 // all these #if 0's are because i don't THINK the kernel needs to have these implemented
521 // if there is a problem relating to one of these, you may need to add functionality in
522 #if 0
523 if (reg & CFG_TBI_EN) ;
524 if (reg & CFG_MODE_1000) ;
525 #endif
526
527 if (reg & CFG_AUTO_1000)
528 panic("CFG_AUTO_1000 not implemented!\n");
529
530 #if 0
531 if (reg & CFG_PINT_DUPSTS || reg & CFG_PINT_LNKSTS || reg & CFG_PINT_SPDSTS) ;
532 if (reg & CFG_TMRTEST) ;
533 if (reg & CFG_MRM_DIS) ;
534 if (reg & CFG_MWI_DIS) ;
535
536 if (reg & CFG_T64ADDR)
537 panic("CFG_T64ADDR is read only register!\n");
538
539 if (reg & CFG_PCI64_DET)
540 panic("CFG_PCI64_DET is read only register!\n");
541
542 if (reg & CFG_DATA64_EN) ;
543 if (reg & CFG_M64ADDR) ;
544 if (reg & CFG_PHY_RST) ;
545 if (reg & CFG_PHY_DIS) ;
546 #endif
547
548 if (reg & CFG_EXTSTS_EN)
549 extstsEnable = true;
550 else
551 extstsEnable = false;
552
553 #if 0
554 if (reg & CFG_REQALG) ;
555 if (reg & CFG_SB) ;
556 if (reg & CFG_POW) ;
557 if (reg & CFG_EXD) ;
558 if (reg & CFG_PESEL) ;
559 if (reg & CFG_BROM_DIS) ;
560 if (reg & CFG_EXT_125) ;
561 if (reg & CFG_BEM) ;
562 #endif
563 break;
564
565 case MEAR:
566 regs.mear = reg;
567 /* since phy is completely faked, MEAR_MD* don't matter
568 and since the driver never uses MEAR_EE*, they don't matter */
569 #if 0
570 if (reg & MEAR_EEDI) ;
571 if (reg & MEAR_EEDO) ; //this one is read only
572 if (reg & MEAR_EECLK) ;
573 if (reg & MEAR_EESEL) ;
574 if (reg & MEAR_MDIO) ;
575 if (reg & MEAR_MDDIR) ;
576 if (reg & MEAR_MDC) ;
577 #endif
578 break;
579
580 case PTSCR:
581 regs.ptscr = reg & ~(PTSCR_RBIST_RDONLY);
582 /* these control BISTs for various parts of chip - we don't care or do
583 just fake that the BIST is done */
584 if (reg & PTSCR_RBIST_EN)
585 regs.ptscr |= PTSCR_RBIST_DONE;
586 if (reg & PTSCR_EEBIST_EN)
587 regs.ptscr &= ~PTSCR_EEBIST_EN;
588 if (reg & PTSCR_EELOAD_EN)
589 regs.ptscr &= ~PTSCR_EELOAD_EN;
590 break;
591
592 case ISR: /* writing to the ISR has no effect */
593 panic("ISR is a read only register!\n");
594
595 case IMR:
596 regs.imr = reg;
597 devIntrChangeMask();
598 break;
599
600 case IER:
601 regs.ier = reg;
602 break;
603
604 case IHR:
605 regs.ihr = reg;
606 /* not going to implement real interrupt holdoff */
607 break;
608
609 case TXDP:
610 regs.txdp = (reg & 0xFFFFFFFC);
611 assert(txState == txIdle);
612 CTDD = false;
613 break;
614
615 case TXDP_HI:
616 regs.txdp_hi = reg;
617 break;
618
619 case TXCFG:
620 regs.txcfg = reg;
621 #if 0
622 if (reg & TXCFG_CSI) ;
623 if (reg & TXCFG_HBI) ;
624 if (reg & TXCFG_MLB) ;
625 if (reg & TXCFG_ATP) ;
626 if (reg & TXCFG_ECRETRY) ; /* this could easily be implemented, but
627 considering the network is just a fake
628 pipe, wouldn't make sense to do this */
629
630 if (reg & TXCFG_BRST_DIS) ;
631 #endif
632
633
634 /* we handle our own DMA, ignore the kernel's exhortations */
635 if (reg & TXCFG_MXDMA) ;
636
637 break;
638
639 case GPIOR:
640 regs.gpior = reg;
641 /* these just control general purpose i/o pins, don't matter */
642 break;
643
644 case RXDP:
645 regs.rxdp = reg;
646 break;
647
648 case RXDP_HI:
649 regs.rxdp_hi = reg;
650 break;
651
652 case RXCFG:
653 regs.rxcfg = reg;
654 #if 0
655 if (reg & RXCFG_AEP) ;
656 if (reg & RXCFG_ARP) ;
657 if (reg & RXCFG_STRIPCRC) ;
658 if (reg & RXCFG_RX_RD) ;
659 if (reg & RXCFG_ALP) ;
660 if (reg & RXCFG_AIRL) ;
661 #endif
662
663 /* we handle our own DMA, ignore what kernel says about it */
664 if (reg & RXCFG_MXDMA) ;
665
666 #if 0
667 if (reg & (RXCFG_DRTH | RXCFG_DRTH0)) ;
668 #endif
669 break;
670
671 case PQCR:
672 /* there is no priority queueing used in the linux 2.6 driver */
673 regs.pqcr = reg;
674 break;
675
676 case WCSR:
677 /* not going to implement wake on LAN */
678 regs.wcsr = reg;
679 break;
680
681 case PCR:
682 /* not going to implement pause control */
683 regs.pcr = reg;
684 break;
685
686 case RFCR:
687 regs.rfcr = reg;
688
689 rxFilterEnable = (reg & RFCR_RFEN) ? true : false;
690
691 acceptBroadcast = (reg & RFCR_AAB) ? true : false;
692
693 acceptMulticast = (reg & RFCR_AAM) ? true : false;
694
695 acceptUnicast = (reg & RFCR_AAU) ? true : false;
696
697 acceptPerfect = (reg & RFCR_APM) ? true : false;
698
699 acceptArp = (reg & RFCR_AARP) ? true : false;
700
701 if (reg & RFCR_APAT) ;
702 // panic("RFCR_APAT not implemented!\n");
703
704 if (reg & RFCR_MHEN || reg & RFCR_UHEN)
705 panic("hash filtering not implemented!\n");
706
707 if (reg & RFCR_ULM)
708 panic("RFCR_ULM not implemented!\n");
709
710 break;
711
712 case RFDR:
713 panic("the driver never writes to RFDR, something is wrong!\n");
714
715 case BRAR:
716 panic("the driver never uses BRAR, something is wrong!\n");
717
718 case BRDR:
719 panic("the driver never uses BRDR, something is wrong!\n");
720
721 case SRR:
722 panic("SRR is read only register!\n");
723
724 case MIBC:
725 panic("the driver never uses MIBC, something is wrong!\n");
726
727 case VRCR:
728 regs.vrcr = reg;
729 break;
730
731 case VTCR:
732 regs.vtcr = reg;
733 break;
734
735 case VDR:
736 panic("the driver never uses VDR, something is wrong!\n");
737 break;
738
739 case CCSR:
740 /* not going to implement clockrun stuff */
741 regs.ccsr = reg;
742 break;
743
744 case TBICR:
745 regs.tbicr = reg;
746 if (reg & TBICR_MR_LOOPBACK)
747 panic("TBICR_MR_LOOPBACK never used, something wrong!\n");
748
749 if (reg & TBICR_MR_AN_ENABLE) {
750 regs.tanlpar = regs.tanar;
751 regs.tbisr |= (TBISR_MR_AN_COMPLETE | TBISR_MR_LINK_STATUS);
752 }
753
754 #if 0
755 if (reg & TBICR_MR_RESTART_AN) ;
756 #endif
757
758 break;
759
760 case TBISR:
761 panic("TBISR is read only register!\n");
762
763 case TANAR:
764 regs.tanar = reg;
765 if (reg & TANAR_PS2)
766 panic("this isn't used in driver, something wrong!\n");
767
768 if (reg & TANAR_PS1)
769 panic("this isn't used in driver, something wrong!\n");
770 break;
771
772 case TANLPAR:
773 panic("this should only be written to by the fake phy!\n");
774
775 case TANER:
776 panic("TANER is read only register!\n");
777
778 case TESR:
779 regs.tesr = reg;
780 break;
781
782 default:
783 panic("thought i covered all the register, what is this? addr=%#x",
784 daddr);
785 }
786 } else
787 panic("Invalid Request Size");
788
789 return No_Fault;
790 }
791
792 void
793 NSGigE::devIntrPost(uint32_t interrupts)
794 {
795 bool delay = false;
796
797 if (interrupts & ISR_RESERVE)
798 panic("Cannot set a reserved interrupt");
799
800 if (interrupts & ISR_TXRCMP)
801 regs.isr |= ISR_TXRCMP;
802
803 if (interrupts & ISR_RXRCMP)
804 regs.isr |= ISR_RXRCMP;
805
806 //ISR_DPERR not implemented
807 //ISR_SSERR not implemented
808 //ISR_RMABT not implemented
809 //ISR_RXSOVR not implemented
810 //ISR_HIBINT not implemented
811 //ISR_PHY not implemented
812 //ISR_PME not implemented
813
814 if (interrupts & ISR_SWI)
815 regs.isr |= ISR_SWI;
816
817 //ISR_MIB not implemented
818 //ISR_TXURN not implemented
819
820 if (interrupts & ISR_TXIDLE)
821 regs.isr |= ISR_TXIDLE;
822
823 if (interrupts & ISR_TXERR)
824 regs.isr |= ISR_TXERR;
825
826 if (interrupts & ISR_TXDESC)
827 regs.isr |= ISR_TXDESC;
828
829 if (interrupts & ISR_TXOK) {
830 regs.isr |= ISR_TXOK;
831 delay = true;
832 }
833
834 if (interrupts & ISR_RXORN)
835 regs.isr |= ISR_RXORN;
836
837 if (interrupts & ISR_RXIDLE)
838 regs.isr |= ISR_RXIDLE;
839
840 //ISR_RXEARLY not implemented
841
842 if (interrupts & ISR_RXERR)
843 regs.isr |= ISR_RXERR;
844
845 if (interrupts & ISR_RXDESC)
846 regs.isr |= ISR_RXDESC;
847
848 if (interrupts & ISR_RXOK) {
849 delay = true;
850 regs.isr |= ISR_RXOK;
851 }
852
853 if ((regs.isr & regs.imr)) {
854 Tick when = curTick;
855 if (delay)
856 when += intrDelay;
857 cpuIntrPost(when);
858 }
859
860 DPRINTF(Ethernet, "interrupt posted intr=%#x isr=%#x imr=%#x\n",
861 interrupts, regs.isr, regs.imr);
862 }
863
864 void
865 NSGigE::devIntrClear(uint32_t interrupts)
866 {
867 if (interrupts & ISR_RESERVE)
868 panic("Cannot clear a reserved interrupt");
869
870 if (interrupts & ISR_TXRCMP)
871 regs.isr &= ~ISR_TXRCMP;
872
873 if (interrupts & ISR_RXRCMP)
874 regs.isr &= ~ISR_RXRCMP;
875
876 //ISR_DPERR not implemented
877 //ISR_SSERR not implemented
878 //ISR_RMABT not implemented
879 //ISR_RXSOVR not implemented
880 //ISR_HIBINT not implemented
881 //ISR_PHY not implemented
882 //ISR_PME not implemented
883
884 if (interrupts & ISR_SWI)
885 regs.isr &= ~ISR_SWI;
886
887 //ISR_MIB not implemented
888 //ISR_TXURN not implemented
889
890 if (interrupts & ISR_TXIDLE)
891 regs.isr &= ~ISR_TXIDLE;
892
893 if (interrupts & ISR_TXERR)
894 regs.isr &= ~ISR_TXERR;
895
896 if (interrupts & ISR_TXDESC)
897 regs.isr &= ~ISR_TXDESC;
898
899 if (interrupts & ISR_TXOK)
900 regs.isr &= ~ISR_TXOK;
901
902 if (interrupts & ISR_RXORN)
903 regs.isr &= ~ISR_RXORN;
904
905 if (interrupts & ISR_RXIDLE)
906 regs.isr &= ~ISR_RXIDLE;
907
908 //ISR_RXEARLY not implemented
909
910 if (interrupts & ISR_RXERR)
911 regs.isr &= ~ISR_RXERR;
912
913 if (interrupts & ISR_RXDESC)
914 regs.isr &= ~ISR_RXDESC;
915
916 if (interrupts & ISR_RXOK)
917 regs.isr &= ~ISR_RXOK;
918
919 if (!(regs.isr & regs.imr))
920 cpuIntrClear();
921
922 DPRINTF(Ethernet, "interrupt cleared intr=%x isr=%x imr=%x\n",
923 interrupts, regs.isr, regs.imr);
924 }
925
926 void
927 NSGigE::devIntrChangeMask()
928 {
929 DPRINTF(Ethernet, "interrupt mask changed\n");
930
931 if (regs.isr & regs.imr)
932 cpuIntrPost(curTick);
933 else
934 cpuIntrClear();
935 }
936
937 void
938 NSGigE::cpuIntrPost(Tick when)
939 {
940 if (when > intrTick && intrTick != 0)
941 return;
942
943 intrTick = when;
944
945 if (intrEvent) {
946 intrEvent->squash();
947 intrEvent = 0;
948 }
949
950 if (when < curTick) {
951 cpuInterrupt();
952 } else {
953 intrEvent = new IntrEvent(this, true);
954 intrEvent->schedule(intrTick);
955 }
956 }
957
958 void
959 NSGigE::cpuInterrupt()
960 {
961 // Don't send an interrupt if there's already one
962 if (cpuPendingIntr)
963 return;
964
965 // Don't send an interrupt if it's supposed to be delayed
966 if (intrTick > curTick)
967 return;
968
969 // Whether or not there's a pending interrupt, we don't care about
970 // it anymore
971 intrEvent = 0;
972 intrTick = 0;
973
974 // Send interrupt
975 cpuPendingIntr = true;
976 /** @todo rework the intctrl to be tsunami ok */
977 //intctrl->post(TheISA::INTLEVEL_IRQ1, TheISA::INTINDEX_ETHERNET);
978 tsunami->cchip->postDRIR(configData->config.hdr.pci0.interruptLine);
979 }
980
981 void
982 NSGigE::cpuIntrClear()
983 {
984 if (cpuPendingIntr) {
985 cpuPendingIntr = false;
986 /** @todo rework the intctrl to be tsunami ok */
987 //intctrl->clear(TheISA::INTLEVEL_IRQ1, TheISA::INTINDEX_ETHERNET);
988 tsunami->cchip->clearDRIR(configData->config.hdr.pci0.interruptLine);
989 }
990 }
991
992 bool
993 NSGigE::cpuIntrPending() const
994 { return cpuPendingIntr; }
995
996 void
997 NSGigE::txReset()
998 {
999
1000 DPRINTF(Ethernet, "transmit reset\n");
1001
1002 CTDD = false;
1003 txFifoCnt = 0;
1004 txFifoAvail = MAX_TX_FIFO_SIZE;
1005 txHalt = false;
1006 txFragPtr = 0;
1007 assert(txDescCnt == 0);
1008 txFifo.clear();
1009 regs.command &= ~CR_TXE;
1010 txState = txIdle;
1011 assert(txDmaState == dmaIdle);
1012 }
1013
1014 void
1015 NSGigE::rxReset()
1016 {
1017 DPRINTF(Ethernet, "receive reset\n");
1018
1019 CRDD = false;
1020 assert(rxPktBytes == 0);
1021 rxFifoCnt = 0;
1022 rxHalt = false;
1023 rxFragPtr = 0;
1024 assert(rxDescCnt == 0);
1025 assert(rxDmaState == dmaIdle);
1026 rxFifo.clear();
1027 regs.command &= ~CR_RXE;
1028 rxState = rxIdle;
1029 }
1030
1031 void
1032 NSGigE::rxDmaReadCopy()
1033 {
1034 assert(rxDmaState == dmaReading);
1035
1036 memcpy(rxDmaData, physmem->dma_addr(rxDmaAddr, rxDmaLen), rxDmaLen);
1037 rxDmaState = dmaIdle;
1038
1039 DPRINTF(EthernetDMA, "rx dma read paddr=%#x len=%d\n",
1040 rxDmaAddr, rxDmaLen);
1041 DDUMP(EthernetDMA, rxDmaData, rxDmaLen);
1042 }
1043
1044 bool
1045 NSGigE::doRxDmaRead()
1046 {
1047 assert(rxDmaState == dmaIdle || rxDmaState == dmaReadWaiting);
1048 rxDmaState = dmaReading;
1049
1050 if (dmaInterface && !rxDmaFree) {
1051 if (dmaInterface->busy())
1052 rxDmaState = dmaReadWaiting;
1053 else
1054 dmaInterface->doDMA(Read, rxDmaAddr, rxDmaLen, curTick,
1055 &rxDmaReadEvent);
1056 return true;
1057 }
1058
1059 if (dmaReadDelay == 0 && dmaReadFactor == 0) {
1060 rxDmaReadCopy();
1061 return false;
1062 }
1063
1064 Tick factor = ((rxDmaLen + ULL(63)) >> ULL(6)) * dmaReadFactor;
1065 Tick start = curTick + dmaReadDelay + factor;
1066 rxDmaReadEvent.schedule(start);
1067 return true;
1068 }
1069
1070 void
1071 NSGigE::rxDmaReadDone()
1072 {
1073 assert(rxDmaState == dmaReading);
1074 rxDmaReadCopy();
1075
1076 // If the transmit state machine has a pending DMA, let it go first
1077 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting)
1078 txKick();
1079
1080 rxKick();
1081 }
1082
1083 void
1084 NSGigE::rxDmaWriteCopy()
1085 {
1086 assert(rxDmaState == dmaWriting);
1087
1088 memcpy(physmem->dma_addr(rxDmaAddr, rxDmaLen), rxDmaData, rxDmaLen);
1089 rxDmaState = dmaIdle;
1090
1091 DPRINTF(EthernetDMA, "rx dma write paddr=%#x len=%d\n",
1092 rxDmaAddr, rxDmaLen);
1093 DDUMP(EthernetDMA, rxDmaData, rxDmaLen);
1094 }
1095
1096 bool
1097 NSGigE::doRxDmaWrite()
1098 {
1099 assert(rxDmaState == dmaIdle || rxDmaState == dmaWriteWaiting);
1100 rxDmaState = dmaWriting;
1101
1102 if (dmaInterface && !rxDmaFree) {
1103 if (dmaInterface->busy())
1104 rxDmaState = dmaWriteWaiting;
1105 else
1106 dmaInterface->doDMA(WriteInvalidate, rxDmaAddr, rxDmaLen, curTick,
1107 &rxDmaWriteEvent);
1108 return true;
1109 }
1110
1111 if (dmaWriteDelay == 0 && dmaWriteFactor == 0) {
1112 rxDmaWriteCopy();
1113 return false;
1114 }
1115
1116 Tick factor = ((rxDmaLen + ULL(63)) >> ULL(6)) * dmaWriteFactor;
1117 Tick start = curTick + dmaWriteDelay + factor;
1118 rxDmaWriteEvent.schedule(start);
1119 return true;
1120 }
1121
1122 void
1123 NSGigE::rxDmaWriteDone()
1124 {
1125 assert(rxDmaState == dmaWriting);
1126 rxDmaWriteCopy();
1127
1128 // If the transmit state machine has a pending DMA, let it go first
1129 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting)
1130 txKick();
1131
1132 rxKick();
1133 }
1134
1135 void
1136 NSGigE::rxKick()
1137 {
1138 DPRINTF(Ethernet, "receive kick state=%s (rxBuf.size=%d)\n",
1139 NsRxStateStrings[rxState], rxFifo.size());
1140
1141 if (rxKickTick > curTick) {
1142 DPRINTF(Ethernet, "receive kick exiting, can't run till %d\n",
1143 rxKickTick);
1144 return;
1145 }
1146
1147 next:
1148 switch(rxDmaState) {
1149 case dmaReadWaiting:
1150 if (doRxDmaRead())
1151 goto exit;
1152 break;
1153 case dmaWriteWaiting:
1154 if (doRxDmaWrite())
1155 goto exit;
1156 break;
1157 default:
1158 break;
1159 }
1160
1161 // see state machine from spec for details
1162 // the way this works is, if you finish work on one state and can go directly to
1163 // another, you do that through jumping to the label "next". however, if you have
1164 // intermediate work, like DMA so that you can't go to the next state yet, you go to
1165 // exit and exit the loop. however, when the DMA is done it will trigger an
1166 // event and come back to this loop.
1167 switch (rxState) {
1168 case rxIdle:
1169 if (!regs.command & CR_RXE) {
1170 DPRINTF(Ethernet, "Receive Disabled! Nothing to do.\n");
1171 goto exit;
1172 }
1173
1174 if (CRDD) {
1175 rxState = rxDescRefr;
1176
1177 rxDmaAddr = regs.rxdp & 0x3fffffff;
1178 rxDmaData = &rxDescCache + offsetof(ns_desc, link);
1179 rxDmaLen = sizeof(rxDescCache.link);
1180 rxDmaFree = dmaDescFree;
1181
1182 if (doRxDmaRead())
1183 goto exit;
1184 } else {
1185 rxState = rxDescRead;
1186
1187 rxDmaAddr = regs.rxdp & 0x3fffffff;
1188 rxDmaData = &rxDescCache;
1189 rxDmaLen = sizeof(ns_desc);
1190 rxDmaFree = dmaDescFree;
1191
1192 if (doRxDmaRead())
1193 goto exit;
1194 }
1195 break;
1196
1197 case rxDescRefr:
1198 if (rxDmaState != dmaIdle)
1199 goto exit;
1200
1201 rxState = rxAdvance;
1202 break;
1203
1204 case rxDescRead:
1205 if (rxDmaState != dmaIdle)
1206 goto exit;
1207
1208 DPRINTF(Ethernet,
1209 "rxDescCache:\n\tlink=%#x\n\tbufptr=%#x\n\tcmdsts=%#x\n\textsts=%#x\n"
1210 ,rxDescCache.link, rxDescCache.bufptr, rxDescCache.cmdsts,
1211 rxDescCache.extsts);
1212
1213 if (rxDescCache.cmdsts & CMDSTS_OWN) {
1214 rxState = rxIdle;
1215 } else {
1216 rxState = rxFifoBlock;
1217 rxFragPtr = rxDescCache.bufptr;
1218 rxDescCnt = rxDescCache.cmdsts & CMDSTS_LEN_MASK;
1219 }
1220 break;
1221
1222 case rxFifoBlock:
1223 if (!rxPacket) {
1224 /**
1225 * @todo in reality, we should be able to start processing
1226 * the packet as it arrives, and not have to wait for the
1227 * full packet ot be in the receive fifo.
1228 */
1229 if (rxFifo.empty())
1230 goto exit;
1231
1232 // If we don't have a packet, grab a new one from the fifo.
1233 rxPacket = rxFifo.front();
1234 rxPktBytes = rxPacket->length;
1235 rxPacketBufPtr = rxPacket->data;
1236
1237 // sanity check - i think the driver behaves like this
1238 assert(rxDescCnt >= rxPktBytes);
1239
1240 // Must clear the value before popping to decrement the
1241 // reference count
1242 rxFifo.front() = NULL;
1243 rxFifo.pop_front();
1244 }
1245
1246
1247 // dont' need the && rxDescCnt > 0 if driver sanity check above holds
1248 if (rxPktBytes > 0) {
1249 rxState = rxFragWrite;
1250 // don't need min<>(rxPktBytes,rxDescCnt) if above sanity check holds
1251 rxXferLen = rxPktBytes;
1252
1253 rxDmaAddr = rxFragPtr & 0x3fffffff;
1254 rxDmaData = rxPacketBufPtr;
1255 rxDmaLen = rxXferLen;
1256 rxDmaFree = dmaDataFree;
1257
1258 if (doRxDmaWrite())
1259 goto exit;
1260
1261 } else {
1262 rxState = rxDescWrite;
1263
1264 //if (rxPktBytes == 0) { /* packet is done */
1265 assert(rxPktBytes == 0);
1266
1267 rxDescCache.cmdsts |= CMDSTS_OWN;
1268 rxDescCache.cmdsts &= ~CMDSTS_MORE;
1269 rxDescCache.cmdsts |= CMDSTS_OK;
1270 rxDescCache.cmdsts &= 0xffff0000;
1271 rxDescCache.cmdsts += rxPacket->length; //i.e. set CMDSTS_SIZE
1272
1273 #if 0
1274 /* all the driver uses these are for its own stats keeping
1275 which we don't care about, aren't necessary for functionality
1276 and doing this would just slow us down. if they end up using
1277 this in a later version for functional purposes, just undef
1278 */
1279 if (rxFilterEnable) {
1280 rxDescCache.cmdsts &= ~CMDSTS_DEST_MASK;
1281 if (rxFifo.front()->IsUnicast())
1282 rxDescCache.cmdsts |= CMDSTS_DEST_SELF;
1283 if (rxFifo.front()->IsMulticast())
1284 rxDescCache.cmdsts |= CMDSTS_DEST_MULTI;
1285 if (rxFifo.front()->IsBroadcast())
1286 rxDescCache.cmdsts |= CMDSTS_DEST_MASK;
1287 }
1288 #endif
1289
1290 eth_header *eth = (eth_header *) rxPacket->data;
1291 // eth->type 0x800 indicated that it's an ip packet.
1292 if (eth->type == 0x800 && extstsEnable) {
1293 rxDescCache.extsts |= EXTSTS_IPPKT;
1294 if (!ipChecksum(rxPacket, false))
1295 rxDescCache.extsts |= EXTSTS_IPERR;
1296 ip_header *ip = rxFifo.front()->getIpHdr();
1297
1298 if (ip->protocol == 6) {
1299 rxDescCache.extsts |= EXTSTS_TCPPKT;
1300 if (!tcpChecksum(rxPacket, false))
1301 rxDescCache.extsts |= EXTSTS_TCPERR;
1302 } else if (ip->protocol == 17) {
1303 rxDescCache.extsts |= EXTSTS_UDPPKT;
1304 if (!udpChecksum(rxPacket, false))
1305 rxDescCache.extsts |= EXTSTS_UDPERR;
1306 }
1307 }
1308
1309 rxFifoCnt -= rxPacket->length;
1310 rxPacket = 0;
1311
1312 /* the driver seems to always receive into desc buffers
1313 of size 1514, so you never have a pkt that is split
1314 into multiple descriptors on the receive side, so
1315 i don't implement that case, hence the assert above.
1316 */
1317
1318 DPRINTF(Ethernet, "rxDesc writeback:\n\tcmdsts=%#x\n\textsts=%#x\n",
1319 rxDescCache.cmdsts, rxDescCache.extsts);
1320
1321 rxDmaAddr = (regs.rxdp + offsetof(ns_desc, cmdsts)) & 0x3fffffff;
1322 rxDmaData = &(rxDescCache.cmdsts);
1323 rxDmaLen = sizeof(rxDescCache.cmdsts) + sizeof(rxDescCache.extsts);
1324 rxDmaFree = dmaDescFree;
1325
1326 if (doRxDmaWrite())
1327 goto exit;
1328 }
1329 break;
1330
1331 case rxFragWrite:
1332 if (rxDmaState != dmaIdle)
1333 goto exit;
1334
1335 rxPacketBufPtr += rxXferLen;
1336 rxFragPtr += rxXferLen;
1337 rxPktBytes -= rxXferLen;
1338
1339 rxState = rxFifoBlock;
1340 break;
1341
1342 case rxDescWrite:
1343 if (rxDmaState != dmaIdle)
1344 goto exit;
1345
1346 assert(rxDescCache.cmdsts & CMDSTS_OWN);
1347
1348 assert(rxPacket == 0);
1349 devIntrPost(ISR_RXOK);
1350
1351 if (rxDescCache.cmdsts & CMDSTS_INTR)
1352 devIntrPost(ISR_RXDESC);
1353
1354 if (rxHalt) {
1355 rxState = rxIdle;
1356 rxHalt = false;
1357 } else
1358 rxState = rxAdvance;
1359 break;
1360
1361 case rxAdvance:
1362 if (rxDescCache.link == 0) {
1363 rxState = rxIdle;
1364 return;
1365 } else {
1366 rxState = rxDescRead;
1367 regs.rxdp = rxDescCache.link;
1368 CRDD = false;
1369
1370 rxDmaAddr = regs.rxdp & 0x3fffffff;
1371 rxDmaData = &rxDescCache;
1372 rxDmaLen = sizeof(ns_desc);
1373 rxDmaFree = dmaDescFree;
1374
1375 if (doRxDmaRead())
1376 goto exit;
1377 }
1378 break;
1379
1380 default:
1381 panic("Invalid rxState!");
1382 }
1383
1384
1385 DPRINTF(Ethernet, "entering next rx state = %s\n",
1386 NsRxStateStrings[rxState]);
1387
1388 if (rxState == rxIdle) {
1389 regs.command &= ~CR_RXE;
1390 devIntrPost(ISR_RXIDLE);
1391 return;
1392 }
1393
1394 goto next;
1395
1396 exit:
1397 /**
1398 * @todo do we want to schedule a future kick?
1399 */
1400 DPRINTF(Ethernet, "rx state machine exited state=%s\n",
1401 NsRxStateStrings[rxState]);
1402 }
1403
1404 void
1405 NSGigE::transmit()
1406 {
1407 if (txFifo.empty()) {
1408 DPRINTF(Ethernet, "nothing to transmit\n");
1409 return;
1410 }
1411
1412 if (interface->sendPacket(txFifo.front())) {
1413 DPRINTF(Ethernet, "transmit packet\n");
1414 DDUMP(Ethernet, txFifo.front()->data, txFifo.front()->length);
1415 txBytes += txFifo.front()->length;
1416 txPackets++;
1417
1418 txFifoCnt -= (txFifo.front()->length - txPktXmitted);
1419 txPktXmitted = 0;
1420 txFifo.front() = NULL;
1421 txFifo.pop_front();
1422
1423 /* normally do a writeback of the descriptor here, and ONLY after that is
1424 done, send this interrupt. but since our stuff never actually fails,
1425 just do this interrupt here, otherwise the code has to stray from this
1426 nice format. besides, it's functionally the same.
1427 */
1428 devIntrPost(ISR_TXOK);
1429 }
1430
1431 if (!txFifo.empty() && !txEvent.scheduled()) {
1432 DPRINTF(Ethernet, "reschedule transmit\n");
1433 txEvent.schedule(curTick + 1000);
1434 }
1435 }
1436
1437 void
1438 NSGigE::txDmaReadCopy()
1439 {
1440 assert(txDmaState == dmaReading);
1441
1442 memcpy(txDmaData, physmem->dma_addr(txDmaAddr, txDmaLen), txDmaLen);
1443 txDmaState = dmaIdle;
1444
1445 DPRINTF(EthernetDMA, "tx dma read paddr=%#x len=%d\n",
1446 txDmaAddr, txDmaLen);
1447 DDUMP(EthernetDMA, txDmaData, txDmaLen);
1448 }
1449
1450 bool
1451 NSGigE::doTxDmaRead()
1452 {
1453 assert(txDmaState == dmaIdle || txDmaState == dmaReadWaiting);
1454 txDmaState = dmaReading;
1455
1456 if (dmaInterface && !txDmaFree) {
1457 if (dmaInterface->busy())
1458 txDmaState = dmaReadWaiting;
1459 else
1460 dmaInterface->doDMA(Read, txDmaAddr, txDmaLen, curTick,
1461 &txDmaReadEvent);
1462 return true;
1463 }
1464
1465 if (dmaReadDelay == 0 && dmaReadFactor == 0.0) {
1466 txDmaReadCopy();
1467 return false;
1468 }
1469
1470 Tick factor = ((txDmaLen + ULL(63)) >> ULL(6)) * dmaReadFactor;
1471 Tick start = curTick + dmaReadDelay + factor;
1472 txDmaReadEvent.schedule(start);
1473 return true;
1474 }
1475
1476 void
1477 NSGigE::txDmaReadDone()
1478 {
1479 assert(txDmaState == dmaReading);
1480 txDmaReadCopy();
1481
1482 // If the receive state machine has a pending DMA, let it go first
1483 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting)
1484 rxKick();
1485
1486 txKick();
1487 }
1488
1489 void
1490 NSGigE::txDmaWriteCopy()
1491 {
1492 assert(txDmaState == dmaWriting);
1493
1494 memcpy(physmem->dma_addr(txDmaAddr, txDmaLen), txDmaData, txDmaLen);
1495 txDmaState = dmaIdle;
1496
1497 DPRINTF(EthernetDMA, "tx dma write paddr=%#x len=%d\n",
1498 txDmaAddr, txDmaLen);
1499 DDUMP(EthernetDMA, txDmaData, txDmaLen);
1500 }
1501
1502 bool
1503 NSGigE::doTxDmaWrite()
1504 {
1505 assert(txDmaState == dmaIdle || txDmaState == dmaWriteWaiting);
1506 txDmaState = dmaWriting;
1507
1508 if (dmaInterface && !txDmaFree) {
1509 if (dmaInterface->busy())
1510 txDmaState = dmaWriteWaiting;
1511 else
1512 dmaInterface->doDMA(WriteInvalidate, txDmaAddr, txDmaLen, curTick,
1513 &txDmaWriteEvent);
1514 return true;
1515 }
1516
1517 if (dmaWriteDelay == 0 && dmaWriteFactor == 0.0) {
1518 txDmaWriteCopy();
1519 return false;
1520 }
1521
1522 Tick factor = ((txDmaLen + ULL(63)) >> ULL(6)) * dmaWriteFactor;
1523 Tick start = curTick + dmaWriteDelay + factor;
1524 txDmaWriteEvent.schedule(start);
1525 return true;
1526 }
1527
1528 void
1529 NSGigE::txDmaWriteDone()
1530 {
1531 assert(txDmaState == dmaWriting);
1532 txDmaWriteCopy();
1533
1534 // If the receive state machine has a pending DMA, let it go first
1535 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting)
1536 rxKick();
1537
1538 txKick();
1539 }
1540
1541 void
1542 NSGigE::txKick()
1543 {
1544 DPRINTF(Ethernet, "transmit kick state=%s\n", NsTxStateStrings[txState]);
1545
1546 if (rxKickTick > curTick) {
1547 DPRINTF(Ethernet, "receive kick exiting, can't run till %d\n",
1548 rxKickTick);
1549
1550 return;
1551 }
1552
1553 next:
1554 switch(txDmaState) {
1555 case dmaReadWaiting:
1556 if (doTxDmaRead())
1557 goto exit;
1558 break;
1559 case dmaWriteWaiting:
1560 if (doTxDmaWrite())
1561 goto exit;
1562 break;
1563 default:
1564 break;
1565 }
1566
1567 switch (txState) {
1568 case txIdle:
1569 if (!regs.command & CR_TXE) {
1570 DPRINTF(Ethernet, "Transmit disabled. Nothing to do.\n");
1571 goto exit;
1572 }
1573
1574 if (CTDD) {
1575 txState = txDescRefr;
1576
1577 txDmaAddr = regs.txdp & 0x3fffffff;
1578 txDmaData = &txDescCache + offsetof(ns_desc, link);
1579 txDmaLen = sizeof(txDescCache.link);
1580 txDmaFree = dmaDescFree;
1581
1582 if (doTxDmaRead())
1583 goto exit;
1584
1585 } else {
1586 txState = txDescRead;
1587
1588 txDmaAddr = regs.txdp & 0x3fffffff;
1589 txDmaData = &txDescCache;
1590 txDmaLen = sizeof(ns_desc);
1591 txDmaFree = dmaDescFree;
1592
1593 if (doTxDmaRead())
1594 goto exit;
1595 }
1596 break;
1597
1598 case txDescRefr:
1599 if (txDmaState != dmaIdle)
1600 goto exit;
1601
1602 txState = txAdvance;
1603 break;
1604
1605 case txDescRead:
1606 if (txDmaState != dmaIdle)
1607 goto exit;
1608
1609 DPRINTF(Ethernet,
1610 "txDescCache data:\n\tlink=%#x\n\tbufptr=%#x\n\tcmdsts=%#x\n\textsts=%#x\n"
1611 ,txDescCache.link, txDescCache.bufptr, txDescCache.cmdsts,
1612 txDescCache.extsts);
1613
1614 if (txDescCache.cmdsts & CMDSTS_OWN) {
1615 txState = txFifoBlock;
1616 txFragPtr = txDescCache.bufptr;
1617 txDescCnt = txDescCache.cmdsts & CMDSTS_LEN_MASK;
1618 } else {
1619 txState = txIdle;
1620 }
1621 break;
1622
1623 case txFifoBlock:
1624 if (!txPacket) {
1625 DPRINTF(Ethernet, "starting the tx of a new packet\n");
1626 txPacket = new EtherPacket;
1627 txPacket->data = new uint8_t[16384];
1628 txPacketBufPtr = txPacket->data;
1629 }
1630
1631 if (txDescCnt == 0) {
1632 DPRINTF(Ethernet, "the txDescCnt == 0, done with descriptor\n");
1633 if (txDescCache.cmdsts & CMDSTS_MORE) {
1634 DPRINTF(Ethernet, "there are more descriptors to come\n");
1635 txState = txDescWrite;
1636
1637 txDescCache.cmdsts &= ~CMDSTS_OWN;
1638
1639 txDmaAddr = (regs.txdp + offsetof(ns_desc, cmdsts)) & 0x3fffffff;
1640 txDmaData = &(txDescCache.cmdsts);
1641 txDmaLen = sizeof(txDescCache.cmdsts);
1642 txDmaFree = dmaDescFree;
1643
1644 if (doTxDmaWrite())
1645 goto exit;
1646
1647 } else { /* this packet is totally done */
1648 DPRINTF(Ethernet, "This packet is done, let's wrap it up\n");
1649 /* deal with the the packet that just finished */
1650 if ((regs.vtcr & VTCR_PPCHK) && extstsEnable) {
1651 if (txDescCache.extsts & EXTSTS_UDPPKT) {
1652 udpChecksum(txPacket, true);
1653 } else if (txDescCache.extsts & EXTSTS_TCPPKT) {
1654 tcpChecksum(txPacket, true);
1655 } else if (txDescCache.extsts & EXTSTS_IPPKT) {
1656 ipChecksum(txPacket, true);
1657 }
1658 }
1659
1660 txPacket->length = txPacketBufPtr - txPacket->data;
1661 /* this is just because the receive can't handle a packet bigger
1662 want to make sure */
1663 assert(txPacket->length <= 1514);
1664 txFifo.push_back(txPacket);
1665
1666
1667 /* this following section is not to spec, but functionally shouldn't
1668 be any different. normally, the chip will wait til the transmit has
1669 occurred before writing back the descriptor because it has to wait
1670 to see that it was successfully transmitted to decide whether to set
1671 CMDSTS_OK or not. however, in the simulator since it is always
1672 successfully transmitted, and writing it exactly to spec would
1673 complicate the code, we just do it here
1674 */
1675 txDescCache.cmdsts &= ~CMDSTS_OWN;
1676 txDescCache.cmdsts |= CMDSTS_OK;
1677
1678 DPRINTF(Ethernet,
1679 "txDesc writeback:\n\tcmdsts=%#x\n\textsts=%#x\n",
1680 txDescCache.cmdsts, txDescCache.extsts);
1681
1682 txDmaAddr = (regs.txdp + offsetof(ns_desc, cmdsts)) & 0x3fffffff;
1683 txDmaData = &(txDescCache.cmdsts);
1684 txDmaLen = sizeof(txDescCache.cmdsts) + sizeof(txDescCache.extsts);
1685 txDmaFree = dmaDescFree;
1686
1687 if (doTxDmaWrite())
1688 goto exit;
1689
1690 txPacket = 0;
1691 transmit();
1692
1693 if (txHalt) {
1694 txState = txIdle;
1695 txHalt = false;
1696 } else
1697 txState = txAdvance;
1698 }
1699 } else {
1700 DPRINTF(Ethernet, "this descriptor isn't done yet\n");
1701 /* the fill thresh is in units of 32 bytes, shift right by 8 to get the
1702 value, shift left by 5 to get the real number of bytes */
1703 if (txFifoAvail < ((regs.txcfg & TXCFG_FLTH_MASK) >> 3)) {
1704 DPRINTF(Ethernet, "txFifoAvail=%d, regs.txcfg & TXCFG_FLTH_MASK = %#x\n",
1705 txFifoAvail, regs.txcfg & TXCFG_FLTH_MASK);
1706 goto exit;
1707 }
1708
1709 txState = txFragRead;
1710
1711 /* The number of bytes transferred is either whatever is left
1712 in the descriptor (txDescCnt), or if there is not enough
1713 room in the fifo, just whatever room is left in the fifo
1714 */
1715 txXferLen = min<uint32_t>(txDescCnt, txFifoAvail);
1716
1717 txDmaAddr = txFragPtr & 0x3fffffff;
1718 txDmaData = txPacketBufPtr;
1719 txDmaLen = txXferLen;
1720 txDmaFree = dmaDataFree;
1721
1722 if (doTxDmaRead())
1723 goto exit;
1724 }
1725 break;
1726
1727 case txFragRead:
1728 if (txDmaState != dmaIdle)
1729 goto exit;
1730
1731 txPacketBufPtr += txXferLen;
1732 txFragPtr += txXferLen;
1733 txFifoCnt += txXferLen;
1734 txDescCnt -= txXferLen;
1735
1736 txState = txFifoBlock;
1737 break;
1738
1739 case txDescWrite:
1740 if (txDmaState != dmaIdle)
1741 goto exit;
1742
1743 if (txFifoCnt >= ((regs.txcfg & TXCFG_DRTH_MASK) << 5)) {
1744 if (txFifo.empty()) {
1745 uint32_t xmitted = (uint32_t) (txPacketBufPtr - txPacket->data - txPktXmitted);
1746 txFifoCnt -= xmitted;
1747 txPktXmitted += xmitted;
1748 } else {
1749 transmit();
1750 }
1751 }
1752
1753 if (txDescCache.cmdsts & CMDSTS_INTR) {
1754 devIntrPost(ISR_TXDESC);
1755 }
1756
1757 txState = txAdvance;
1758 break;
1759
1760 case txAdvance:
1761 if (txDescCache.link == 0) {
1762 txState = txIdle;
1763 } else {
1764 txState = txDescRead;
1765 regs.txdp = txDescCache.link;
1766 CTDD = false;
1767
1768 txDmaAddr = txDescCache.link & 0x3fffffff;
1769 txDmaData = &txDescCache;
1770 txDmaLen = sizeof(ns_desc);
1771 txDmaFree = dmaDescFree;
1772
1773 if (doTxDmaRead())
1774 goto exit;
1775 }
1776 break;
1777
1778 default:
1779 panic("invalid state");
1780 }
1781
1782 DPRINTF(Ethernet, "entering next tx state=%s\n",
1783 NsTxStateStrings[txState]);
1784
1785 if (txState == txIdle) {
1786 regs.command &= ~CR_TXE;
1787 devIntrPost(ISR_TXIDLE);
1788 return;
1789 }
1790
1791 goto next;
1792
1793 exit:
1794 /**
1795 * @todo do we want to schedule a future kick?
1796 */
1797 DPRINTF(Ethernet, "tx state machine exited state=%s\n",
1798 NsTxStateStrings[txState]);
1799 }
1800
1801 void
1802 NSGigE::transferDone()
1803 {
1804 if (txFifo.empty())
1805 return;
1806
1807 DPRINTF(Ethernet, "schedule transmit\n");
1808
1809 if (txEvent.scheduled())
1810 txEvent.reschedule(curTick + 1);
1811 else
1812 txEvent.schedule(curTick + 1);
1813 }
1814
1815 bool
1816 NSGigE::rxFilter(PacketPtr packet)
1817 {
1818 bool drop = true;
1819 string type;
1820
1821 if (packet->IsUnicast()) {
1822 type = "unicast";
1823
1824 // If we're accepting all unicast addresses
1825 if (acceptUnicast)
1826 drop = false;
1827
1828 // If we make a perfect match
1829 if ((acceptPerfect)
1830 && (memcmp(rom.perfectMatch, packet->data, sizeof(rom.perfectMatch)) == 0))
1831 drop = false;
1832
1833 eth_header *eth = (eth_header *) packet->data;
1834 if ((acceptArp) && (eth->type == 0x806))
1835 drop = false;
1836
1837 } else if (packet->IsBroadcast()) {
1838 type = "broadcast";
1839
1840 // if we're accepting broadcasts
1841 if (acceptBroadcast)
1842 drop = false;
1843
1844 } else if (packet->IsMulticast()) {
1845 type = "multicast";
1846
1847 // if we're accepting all multicasts
1848 if (acceptMulticast)
1849 drop = false;
1850
1851 } else {
1852 type = "unknown";
1853
1854 // oh well, punt on this one
1855 }
1856
1857 if (drop) {
1858 DPRINTF(Ethernet, "rxFilter drop\n");
1859 DDUMP(EthernetData, packet->data, packet->length);
1860 }
1861
1862 return drop;
1863 }
1864
1865 bool
1866 NSGigE::recvPacket(PacketPtr packet)
1867 {
1868 rxBytes += packet->length;
1869 rxPackets++;
1870
1871 if (rxState == rxIdle) {
1872 DPRINTF(Ethernet, "receive disabled...packet dropped\n");
1873 interface->recvDone();
1874 return true;
1875 }
1876
1877 if (rxFilterEnable && rxFilter(packet)) {
1878 DPRINTF(Ethernet, "packet filtered...dropped\n");
1879 interface->recvDone();
1880 return true;
1881 }
1882
1883 if (rxFifoCnt + packet->length >= MAX_RX_FIFO_SIZE) {
1884 DPRINTF(Ethernet,
1885 "packet will not fit in receive buffer...packet dropped\n");
1886 devIntrPost(ISR_RXORN);
1887 return false;
1888 }
1889
1890 rxFifo.push_back(packet);
1891 rxFifoCnt += packet->length;
1892 interface->recvDone();
1893
1894 rxKick();
1895 return true;
1896 }
1897
1898 /**
1899 * does a udp checksum. if gen is true, then it generates it and puts it in the right place
1900 * else, it just checks what it calculates against the value in the header in packet
1901 */
1902 bool
1903 NSGigE::udpChecksum(PacketPtr packet, bool gen)
1904 {
1905 udp_header *hdr = (udp_header *) packet->getTransportHdr();
1906
1907 ip_header *ip = packet->getIpHdr();
1908
1909 pseudo_header *pseudo = new pseudo_header;
1910
1911 pseudo->src_ip_addr = ip->src_ip_addr;
1912 pseudo->dest_ip_addr = ip->dest_ip_addr;
1913 pseudo->protocol = ip->protocol;
1914 pseudo->len = hdr->len;
1915
1916 uint16_t cksum = checksumCalc((uint16_t *) pseudo, (uint16_t *) hdr,
1917 (uint32_t) hdr->len);
1918
1919 delete pseudo;
1920 if (gen)
1921 hdr->chksum = cksum;
1922 else
1923 if (cksum != 0)
1924 return false;
1925
1926 return true;
1927 }
1928
1929 bool
1930 NSGigE::tcpChecksum(PacketPtr packet, bool gen)
1931 {
1932 tcp_header *hdr = (tcp_header *) packet->getTransportHdr();
1933
1934 ip_header *ip = packet->getIpHdr();
1935
1936 pseudo_header *pseudo = new pseudo_header;
1937
1938 pseudo->src_ip_addr = ip->src_ip_addr;
1939 pseudo->dest_ip_addr = ip->dest_ip_addr;
1940 pseudo->protocol = ip->protocol;
1941 pseudo->len = ip->dgram_len - (ip->vers_len & 0xf);
1942
1943 uint16_t cksum = checksumCalc((uint16_t *) pseudo, (uint16_t *) hdr,
1944 (uint32_t) pseudo->len);
1945
1946 delete pseudo;
1947 if (gen)
1948 hdr->chksum = cksum;
1949 else
1950 if (cksum != 0)
1951 return false;
1952
1953 return true;
1954 }
1955
1956 bool
1957 NSGigE::ipChecksum(PacketPtr packet, bool gen)
1958 {
1959 ip_header *hdr = packet->getIpHdr();
1960
1961 uint16_t cksum = checksumCalc(NULL, (uint16_t *) hdr, (hdr->vers_len & 0xf));
1962
1963 if (gen)
1964 hdr->hdr_chksum = cksum;
1965 else
1966 if (cksum != 0)
1967 return false;
1968
1969 return true;
1970 }
1971
1972 uint16_t
1973 NSGigE::checksumCalc(uint16_t *pseudo, uint16_t *buf, uint32_t len)
1974 {
1975 uint32_t sum = 0;
1976
1977 uint16_t last_pad = 0;
1978 if (len & 1) {
1979 last_pad = buf[len/2] & 0xff;
1980 len--;
1981 sum += last_pad;
1982 }
1983
1984 if (pseudo) {
1985 sum = pseudo[0] + pseudo[1] + pseudo[2] +
1986 pseudo[3] + pseudo[4] + pseudo[5];
1987 }
1988
1989 for (int i=0; i < (len/2); ++i) {
1990 sum += buf[i];
1991 }
1992
1993 while (sum >> 16)
1994 sum = (sum >> 16) + (sum & 0xffff);
1995
1996 return ~sum;
1997 }
1998
1999 //=====================================================================
2000 //
2001 //
2002 void
2003 NSGigE::serialize(ostream &os)
2004 {
2005 /*
2006 * Finalize any DMA events now.
2007 */
2008 if (rxDmaReadEvent.scheduled())
2009 rxDmaReadCopy();
2010 if (rxDmaWriteEvent.scheduled())
2011 rxDmaWriteCopy();
2012 if (txDmaReadEvent.scheduled())
2013 txDmaReadCopy();
2014 if (txDmaWriteEvent.scheduled())
2015 txDmaWriteCopy();
2016
2017 /*
2018 * Serialize the device registers
2019 */
2020 SERIALIZE_SCALAR(regs.command);
2021 SERIALIZE_SCALAR(regs.config);
2022 SERIALIZE_SCALAR(regs.mear);
2023 SERIALIZE_SCALAR(regs.ptscr);
2024 SERIALIZE_SCALAR(regs.isr);
2025 SERIALIZE_SCALAR(regs.imr);
2026 SERIALIZE_SCALAR(regs.ier);
2027 SERIALIZE_SCALAR(regs.ihr);
2028 SERIALIZE_SCALAR(regs.txdp);
2029 SERIALIZE_SCALAR(regs.txdp_hi);
2030 SERIALIZE_SCALAR(regs.txcfg);
2031 SERIALIZE_SCALAR(regs.gpior);
2032 SERIALIZE_SCALAR(regs.rxdp);
2033 SERIALIZE_SCALAR(regs.rxdp_hi);
2034 SERIALIZE_SCALAR(regs.rxcfg);
2035 SERIALIZE_SCALAR(regs.pqcr);
2036 SERIALIZE_SCALAR(regs.wcsr);
2037 SERIALIZE_SCALAR(regs.pcr);
2038 SERIALIZE_SCALAR(regs.rfcr);
2039 SERIALIZE_SCALAR(regs.rfdr);
2040 SERIALIZE_SCALAR(regs.srr);
2041 SERIALIZE_SCALAR(regs.mibc);
2042 SERIALIZE_SCALAR(regs.vrcr);
2043 SERIALIZE_SCALAR(regs.vtcr);
2044 SERIALIZE_SCALAR(regs.vdr);
2045 SERIALIZE_SCALAR(regs.ccsr);
2046 SERIALIZE_SCALAR(regs.tbicr);
2047 SERIALIZE_SCALAR(regs.tbisr);
2048 SERIALIZE_SCALAR(regs.tanar);
2049 SERIALIZE_SCALAR(regs.tanlpar);
2050 SERIALIZE_SCALAR(regs.taner);
2051 SERIALIZE_SCALAR(regs.tesr);
2052
2053 SERIALIZE_ARRAY(rom.perfectMatch, EADDR_LEN);
2054
2055 /*
2056 * Serialize the various helper variables
2057 */
2058 uint32_t txPktBufPtr = (uint32_t) txPacketBufPtr;
2059 SERIALIZE_SCALAR(txPktBufPtr);
2060 uint32_t rxPktBufPtr = (uint32_t) rxPktBufPtr;
2061 SERIALIZE_SCALAR(rxPktBufPtr);
2062 SERIALIZE_SCALAR(txXferLen);
2063 SERIALIZE_SCALAR(rxXferLen);
2064 SERIALIZE_SCALAR(txPktXmitted);
2065
2066 bool txPacketExists = txPacket;
2067 SERIALIZE_SCALAR(txPacketExists);
2068 bool rxPacketExists = rxPacket;
2069 SERIALIZE_SCALAR(rxPacketExists);
2070
2071 /*
2072 * Serialize DescCaches
2073 */
2074 SERIALIZE_SCALAR(txDescCache.link);
2075 SERIALIZE_SCALAR(txDescCache.bufptr);
2076 SERIALIZE_SCALAR(txDescCache.cmdsts);
2077 SERIALIZE_SCALAR(txDescCache.extsts);
2078 SERIALIZE_SCALAR(rxDescCache.link);
2079 SERIALIZE_SCALAR(rxDescCache.bufptr);
2080 SERIALIZE_SCALAR(rxDescCache.cmdsts);
2081 SERIALIZE_SCALAR(rxDescCache.extsts);
2082
2083 /*
2084 * Serialize tx state machine
2085 */
2086 int txNumPkts = txFifo.size();
2087 SERIALIZE_SCALAR(txNumPkts);
2088 int txState = this->txState;
2089 SERIALIZE_SCALAR(txState);
2090 SERIALIZE_SCALAR(CTDD);
2091 SERIALIZE_SCALAR(txFifoCnt);
2092 SERIALIZE_SCALAR(txFifoAvail);
2093 SERIALIZE_SCALAR(txHalt);
2094 SERIALIZE_SCALAR(txFragPtr);
2095 SERIALIZE_SCALAR(txDescCnt);
2096 int txDmaState = this->txDmaState;
2097 SERIALIZE_SCALAR(txDmaState);
2098
2099 /*
2100 * Serialize rx state machine
2101 */
2102 int rxNumPkts = rxFifo.size();
2103 SERIALIZE_SCALAR(rxNumPkts);
2104 int rxState = this->rxState;
2105 SERIALIZE_SCALAR(rxState);
2106 SERIALIZE_SCALAR(CRDD);
2107 SERIALIZE_SCALAR(rxPktBytes);
2108 SERIALIZE_SCALAR(rxFifoCnt);
2109 SERIALIZE_SCALAR(rxHalt);
2110 SERIALIZE_SCALAR(rxDescCnt);
2111 int rxDmaState = this->rxDmaState;
2112 SERIALIZE_SCALAR(rxDmaState);
2113
2114 SERIALIZE_SCALAR(extstsEnable);
2115
2116 /*
2117 * If there's a pending transmit, store the time so we can
2118 * reschedule it later
2119 */
2120 Tick transmitTick = txEvent.scheduled() ? txEvent.when() - curTick : 0;
2121 SERIALIZE_SCALAR(transmitTick);
2122
2123 /*
2124 * Keep track of pending interrupt status.
2125 */
2126 SERIALIZE_SCALAR(intrTick);
2127 SERIALIZE_SCALAR(cpuPendingIntr);
2128 Tick intrEventTick = 0;
2129 if (intrEvent)
2130 intrEventTick = intrEvent->when();
2131 SERIALIZE_SCALAR(intrEventTick);
2132
2133 int i = 0;
2134 for (pktiter_t p = rxFifo.begin(); p != rxFifo.end(); ++p) {
2135 nameOut(os, csprintf("%s.rxFifo%d", name(), i++));
2136 (*p)->serialize(os);
2137 }
2138 if (rxPacketExists) {
2139 nameOut(os, csprintf("%s.rxPacket", name()));
2140 rxPacket->serialize(os);
2141 }
2142 i = 0;
2143 for (pktiter_t p = txFifo.begin(); p != txFifo.end(); ++p) {
2144 nameOut(os, csprintf("%s.txFifo%d", name(), i++));
2145 (*p)->serialize(os);
2146 }
2147 if (txPacketExists) {
2148 nameOut(os, csprintf("%s.txPacket", name()));
2149 txPacket->serialize(os);
2150 }
2151 }
2152
2153 void
2154 NSGigE::unserialize(Checkpoint *cp, const std::string &section)
2155 {
2156 UNSERIALIZE_SCALAR(regs.command);
2157 UNSERIALIZE_SCALAR(regs.config);
2158 UNSERIALIZE_SCALAR(regs.mear);
2159 UNSERIALIZE_SCALAR(regs.ptscr);
2160 UNSERIALIZE_SCALAR(regs.isr);
2161 UNSERIALIZE_SCALAR(regs.imr);
2162 UNSERIALIZE_SCALAR(regs.ier);
2163 UNSERIALIZE_SCALAR(regs.ihr);
2164 UNSERIALIZE_SCALAR(regs.txdp);
2165 UNSERIALIZE_SCALAR(regs.txdp_hi);
2166 UNSERIALIZE_SCALAR(regs.txcfg);
2167 UNSERIALIZE_SCALAR(regs.gpior);
2168 UNSERIALIZE_SCALAR(regs.rxdp);
2169 UNSERIALIZE_SCALAR(regs.rxdp_hi);
2170 UNSERIALIZE_SCALAR(regs.rxcfg);
2171 UNSERIALIZE_SCALAR(regs.pqcr);
2172 UNSERIALIZE_SCALAR(regs.wcsr);
2173 UNSERIALIZE_SCALAR(regs.pcr);
2174 UNSERIALIZE_SCALAR(regs.rfcr);
2175 UNSERIALIZE_SCALAR(regs.rfdr);
2176 UNSERIALIZE_SCALAR(regs.srr);
2177 UNSERIALIZE_SCALAR(regs.mibc);
2178 UNSERIALIZE_SCALAR(regs.vrcr);
2179 UNSERIALIZE_SCALAR(regs.vtcr);
2180 UNSERIALIZE_SCALAR(regs.vdr);
2181 UNSERIALIZE_SCALAR(regs.ccsr);
2182 UNSERIALIZE_SCALAR(regs.tbicr);
2183 UNSERIALIZE_SCALAR(regs.tbisr);
2184 UNSERIALIZE_SCALAR(regs.tanar);
2185 UNSERIALIZE_SCALAR(regs.tanlpar);
2186 UNSERIALIZE_SCALAR(regs.taner);
2187 UNSERIALIZE_SCALAR(regs.tesr);
2188
2189 UNSERIALIZE_ARRAY(rom.perfectMatch, EADDR_LEN);
2190
2191 /*
2192 * unserialize the various helper variables
2193 */
2194 uint32_t txPktBufPtr;
2195 UNSERIALIZE_SCALAR(txPktBufPtr);
2196 txPacketBufPtr = (uint8_t *) txPktBufPtr;
2197 uint32_t rxPktBufPtr;
2198 UNSERIALIZE_SCALAR(rxPktBufPtr);
2199 rxPacketBufPtr = (uint8_t *) rxPktBufPtr;
2200 UNSERIALIZE_SCALAR(txXferLen);
2201 UNSERIALIZE_SCALAR(rxXferLen);
2202 UNSERIALIZE_SCALAR(txPktXmitted);
2203
2204 bool txPacketExists;
2205 UNSERIALIZE_SCALAR(txPacketExists);
2206 bool rxPacketExists;
2207 UNSERIALIZE_SCALAR(rxPacketExists);
2208
2209 /*
2210 * Unserialize DescCaches
2211 */
2212 UNSERIALIZE_SCALAR(txDescCache.link);
2213 UNSERIALIZE_SCALAR(txDescCache.bufptr);
2214 UNSERIALIZE_SCALAR(txDescCache.cmdsts);
2215 UNSERIALIZE_SCALAR(txDescCache.extsts);
2216 UNSERIALIZE_SCALAR(rxDescCache.link);
2217 UNSERIALIZE_SCALAR(rxDescCache.bufptr);
2218 UNSERIALIZE_SCALAR(rxDescCache.cmdsts);
2219 UNSERIALIZE_SCALAR(rxDescCache.extsts);
2220
2221 /*
2222 * unserialize tx state machine
2223 */
2224 int txNumPkts;
2225 UNSERIALIZE_SCALAR(txNumPkts);
2226 int txState;
2227 UNSERIALIZE_SCALAR(txState);
2228 this->txState = (TxState) txState;
2229 UNSERIALIZE_SCALAR(CTDD);
2230 UNSERIALIZE_SCALAR(txFifoCnt);
2231 UNSERIALIZE_SCALAR(txFifoAvail);
2232 UNSERIALIZE_SCALAR(txHalt);
2233 UNSERIALIZE_SCALAR(txFragPtr);
2234 UNSERIALIZE_SCALAR(txDescCnt);
2235 int txDmaState;
2236 UNSERIALIZE_SCALAR(txDmaState);
2237 this->txDmaState = (DmaState) txDmaState;
2238
2239 /*
2240 * unserialize rx state machine
2241 */
2242 int rxNumPkts;
2243 UNSERIALIZE_SCALAR(rxNumPkts);
2244 int rxState;
2245 UNSERIALIZE_SCALAR(rxState);
2246 this->rxState = (RxState) rxState;
2247 UNSERIALIZE_SCALAR(CRDD);
2248 UNSERIALIZE_SCALAR(rxPktBytes);
2249 UNSERIALIZE_SCALAR(rxFifoCnt);
2250 UNSERIALIZE_SCALAR(rxHalt);
2251 UNSERIALIZE_SCALAR(rxDescCnt);
2252 int rxDmaState;
2253 UNSERIALIZE_SCALAR(rxDmaState);
2254 this->rxDmaState = (DmaState) rxDmaState;
2255
2256 UNSERIALIZE_SCALAR(extstsEnable);
2257
2258 /*
2259 * If there's a pending transmit, store the time so we can
2260 * reschedule it later
2261 */
2262 Tick transmitTick;
2263 UNSERIALIZE_SCALAR(transmitTick);
2264 if (transmitTick)
2265 txEvent.schedule(curTick + transmitTick);
2266
2267 /*
2268 * Keep track of pending interrupt status.
2269 */
2270 UNSERIALIZE_SCALAR(intrTick);
2271 UNSERIALIZE_SCALAR(cpuPendingIntr);
2272 Tick intrEventTick;
2273 UNSERIALIZE_SCALAR(intrEventTick);
2274 if (intrEventTick) {
2275 intrEvent = new IntrEvent(this, true);
2276 intrEvent->schedule(intrEventTick);
2277 }
2278
2279 for (int i = 0; i < rxNumPkts; ++i) {
2280 PacketPtr p = new EtherPacket;
2281 p->unserialize(cp, csprintf("%s.rxFifo%d", section, i));
2282 rxFifo.push_back(p);
2283 }
2284 rxPacket = NULL;
2285 if (rxPacketExists) {
2286 rxPacket = new EtherPacket;
2287 rxPacket->unserialize(cp, csprintf("%s.rxPacket", section));
2288 }
2289 for (int i = 0; i < txNumPkts; ++i) {
2290 PacketPtr p = new EtherPacket;
2291 p->unserialize(cp, csprintf("%s.rxFifo%d", section, i));
2292 txFifo.push_back(p);
2293 }
2294 if (txPacketExists) {
2295 txPacket = new EtherPacket;
2296 txPacket->unserialize(cp, csprintf("%s.txPacket", section));
2297 }
2298 }
2299
2300
2301 Tick
2302 NSGigE::cacheAccess(MemReqPtr &req)
2303 {
2304 DPRINTF(EthernetPIO, "timing access to paddr=%#x (daddr=%#x)\n",
2305 req->paddr, req->paddr - addr);
2306 return curTick + pioLatency;
2307 }
2308 //=====================================================================
2309
2310
2311 BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt)
2312
2313 SimObjectParam<EtherInt *> peer;
2314 SimObjectParam<NSGigE *> device;
2315
2316 END_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt)
2317
2318 BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigEInt)
2319
2320 INIT_PARAM_DFLT(peer, "peer interface", NULL),
2321 INIT_PARAM(device, "Ethernet device of this interface")
2322
2323 END_INIT_SIM_OBJECT_PARAMS(NSGigEInt)
2324
2325 CREATE_SIM_OBJECT(NSGigEInt)
2326 {
2327 NSGigEInt *dev_int = new NSGigEInt(getInstanceName(), device);
2328
2329 EtherInt *p = (EtherInt *)peer;
2330 if (p) {
2331 dev_int->setPeer(p);
2332 p->setPeer(dev_int);
2333 }
2334
2335 return dev_int;
2336 }
2337
2338 REGISTER_SIM_OBJECT("NSGigEInt", NSGigEInt)
2339
2340
2341 BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigE)
2342
2343 Param<Tick> tx_delay;
2344 Param<Tick> rx_delay;
2345 SimObjectParam<IntrControl *> intr_ctrl;
2346 Param<Tick> intr_delay;
2347 SimObjectParam<MemoryController *> mmu;
2348 SimObjectParam<PhysicalMemory *> physmem;
2349 Param<Addr> addr;
2350 Param<bool> rx_filter;
2351 Param<string> hardware_address;
2352 SimObjectParam<Bus*> header_bus;
2353 SimObjectParam<Bus*> payload_bus;
2354 SimObjectParam<HierParams *> hier;
2355 Param<Tick> pio_latency;
2356 Param<bool> dma_desc_free;
2357 Param<bool> dma_data_free;
2358 Param<Tick> dma_read_delay;
2359 Param<Tick> dma_write_delay;
2360 Param<Tick> dma_read_factor;
2361 Param<Tick> dma_write_factor;
2362 SimObjectParam<PciConfigAll *> configspace;
2363 SimObjectParam<PciConfigData *> configdata;
2364 SimObjectParam<Tsunami *> tsunami;
2365 Param<uint32_t> pci_bus;
2366 Param<uint32_t> pci_dev;
2367 Param<uint32_t> pci_func;
2368
2369 END_DECLARE_SIM_OBJECT_PARAMS(NSGigE)
2370
2371 BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigE)
2372
2373 INIT_PARAM_DFLT(tx_delay, "Transmit Delay", 1000),
2374 INIT_PARAM_DFLT(rx_delay, "Receive Delay", 1000),
2375 INIT_PARAM(intr_ctrl, "Interrupt Controller"),
2376 INIT_PARAM_DFLT(intr_delay, "Interrupt Delay in microseconds", 0),
2377 INIT_PARAM(mmu, "Memory Controller"),
2378 INIT_PARAM(physmem, "Physical Memory"),
2379 INIT_PARAM(addr, "Device Address"),
2380 INIT_PARAM_DFLT(rx_filter, "Enable Receive Filter", true),
2381 INIT_PARAM_DFLT(hardware_address, "Ethernet Hardware Address",
2382 "00:99:00:00:00:01"),
2383 INIT_PARAM_DFLT(header_bus, "The IO Bus to attach to for headers", NULL),
2384 INIT_PARAM_DFLT(payload_bus, "The IO Bus to attach to for payload", NULL),
2385 INIT_PARAM_DFLT(hier, "Hierarchy global variables", &defaultHierParams),
2386 INIT_PARAM_DFLT(pio_latency, "Programmed IO latency", 1000),
2387 INIT_PARAM_DFLT(dma_desc_free, "DMA of Descriptors is free", false),
2388 INIT_PARAM_DFLT(dma_data_free, "DMA of Data is free", false),
2389 INIT_PARAM_DFLT(dma_read_delay, "fixed delay for dma reads", 0),
2390 INIT_PARAM_DFLT(dma_write_delay, "fixed delay for dma writes", 0),
2391 INIT_PARAM_DFLT(dma_read_factor, "multiplier for dma reads", 0),
2392 INIT_PARAM_DFLT(dma_write_factor, "multiplier for dma writes", 0),
2393 INIT_PARAM(configspace, "PCI Configspace"),
2394 INIT_PARAM(configdata, "PCI Config data"),
2395 INIT_PARAM(tsunami, "Tsunami"),
2396 INIT_PARAM(pci_bus, "PCI bus"),
2397 INIT_PARAM(pci_dev, "PCI device number"),
2398 INIT_PARAM(pci_func, "PCI function code")
2399
2400 END_INIT_SIM_OBJECT_PARAMS(NSGigE)
2401
2402
2403 CREATE_SIM_OBJECT(NSGigE)
2404 {
2405 int eaddr[6];
2406 sscanf(((string)hardware_address).c_str(), "%x:%x:%x:%x:%x:%x",
2407 &eaddr[0], &eaddr[1], &eaddr[2], &eaddr[3], &eaddr[4], &eaddr[5]);
2408
2409 return new NSGigE(getInstanceName(), intr_ctrl, intr_delay,
2410 physmem, tx_delay, rx_delay, mmu, hier, header_bus,
2411 payload_bus, pio_latency, dma_desc_free, dma_data_free,
2412 dma_read_delay, dma_write_delay, dma_read_factor,
2413 dma_write_factor, configspace, configdata,
2414 tsunami, pci_bus, pci_dev, pci_func, rx_filter, eaddr,
2415 addr);
2416 }
2417
2418 REGISTER_SIM_OBJECT("NSGigE", NSGigE)