2 * Copyright (c) 2004 The Regents of The University of Michigan
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 * Device module for modelling the National Semiconductor
31 * DP83820 ethernet controller. Does not support priority queueing
37 #include "base/inet.hh"
38 #include "cpu/exec_context.hh"
39 #include "cpu/intr_control.hh"
41 #include "dev/etherlink.hh"
42 #include "dev/ns_gige.hh"
43 #include "dev/pciconfigall.hh"
44 #include "mem/bus/bus.hh"
45 #include "mem/bus/dma_interface.hh"
46 #include "mem/bus/pio_interface.hh"
47 #include "mem/bus/pio_interface_impl.hh"
48 #include "mem/functional_mem/memory_control.hh"
49 #include "mem/functional_mem/physical_memory.hh"
50 #include "sim/builder.hh"
51 #include "sim/debug.hh"
52 #include "sim/host.hh"
53 #include "sim/stats.hh"
54 #include "targetarch/vtophys.hh"
56 const char *NsRxStateStrings
[] =
67 const char *NsTxStateStrings
[] =
78 const char *NsDmaState
[] =
90 ///////////////////////////////////////////////////////////////////////
94 NSGigE::NSGigE(Params
*p
)
95 : PciDev(p
), ioEnable(false),
96 txFifo(p
->tx_fifo_size
), rxFifo(p
->rx_fifo_size
),
97 txPacket(0), rxPacket(0), txPacketBufPtr(NULL
), rxPacketBufPtr(NULL
),
98 txXferLen(0), rxXferLen(0), txState(txIdle
), txEnable(false),
100 txFragPtr(0), txDescCnt(0), txDmaState(dmaIdle
), rxState(rxIdle
),
101 rxEnable(false), CRDD(false), rxPktBytes(0),
102 rxFragPtr(0), rxDescCnt(0), rxDmaState(dmaIdle
), extstsEnable(false),
103 rxDmaReadEvent(this), rxDmaWriteEvent(this),
104 txDmaReadEvent(this), txDmaWriteEvent(this),
105 dmaDescFree(p
->dma_desc_free
), dmaDataFree(p
->dma_data_free
),
106 txDelay(p
->tx_delay
), rxDelay(p
->rx_delay
),
107 rxKickTick(0), txKickTick(0),
108 txEvent(this), rxFilterEnable(p
->rx_filter
), acceptBroadcast(false),
109 acceptMulticast(false), acceptUnicast(false),
110 acceptPerfect(false), acceptArp(false),
111 physmem(p
->pmem
), intrTick(0), cpuPendingIntr(false),
112 intrEvent(0), interface(0)
115 pioInterface
= newPioInterface(name(), p
->hier
,
117 &NSGigE::cacheAccess
);
119 pioLatency
= p
->pio_latency
* p
->header_bus
->clockRatio
;
122 dmaInterface
= new DMAInterface
<Bus
>(name() + ".dma",
126 dmaInterface
= new DMAInterface
<Bus
>(name() + ".dma",
129 } else if (p
->payload_bus
) {
130 pioInterface
= newPioInterface(name(), p
->hier
,
131 p
->payload_bus
, this,
132 &NSGigE::cacheAccess
);
134 pioLatency
= p
->pio_latency
* p
->payload_bus
->clockRatio
;
136 dmaInterface
= new DMAInterface
<Bus
>(name() + ".dma",
142 intrDelay
= US2Ticks(p
->intr_delay
);
143 dmaReadDelay
= p
->dma_read_delay
;
144 dmaWriteDelay
= p
->dma_write_delay
;
145 dmaReadFactor
= p
->dma_read_factor
;
146 dmaWriteFactor
= p
->dma_write_factor
;
149 memcpy(&rom
.perfectMatch
, p
->eaddr
.bytes(), ETH_ADDR_LEN
);
159 .name(name() + ".txBytes")
160 .desc("Bytes Transmitted")
165 .name(name() + ".rxBytes")
166 .desc("Bytes Received")
171 .name(name() + ".txPackets")
172 .desc("Number of Packets Transmitted")
177 .name(name() + ".rxPackets")
178 .desc("Number of Packets Received")
183 .name(name() + ".txIpChecksums")
184 .desc("Number of tx IP Checksums done by device")
190 .name(name() + ".rxIpChecksums")
191 .desc("Number of rx IP Checksums done by device")
197 .name(name() + ".txTcpChecksums")
198 .desc("Number of tx TCP Checksums done by device")
204 .name(name() + ".rxTcpChecksums")
205 .desc("Number of rx TCP Checksums done by device")
211 .name(name() + ".txUdpChecksums")
212 .desc("Number of tx UDP Checksums done by device")
218 .name(name() + ".rxUdpChecksums")
219 .desc("Number of rx UDP Checksums done by device")
225 .name(name() + ".descDMAReads")
226 .desc("Number of descriptors the device read w/ DMA")
231 .name(name() + ".descDMAWrites")
232 .desc("Number of descriptors the device wrote w/ DMA")
237 .name(name() + ".descDmaReadBytes")
238 .desc("number of descriptor bytes read w/ DMA")
243 .name(name() + ".descDmaWriteBytes")
244 .desc("number of descriptor bytes write w/ DMA")
250 .name(name() + ".txBandwidth")
251 .desc("Transmit Bandwidth (bits/s)")
257 .name(name() + ".rxBandwidth")
258 .desc("Receive Bandwidth (bits/s)")
264 .name(name() + ".txPPS")
265 .desc("Packet Tranmission Rate (packets/s)")
271 .name(name() + ".rxPPS")
272 .desc("Packet Reception Rate (packets/s)")
278 .name(name() + ".postedSwi")
279 .desc("number of software interrupts posted to CPU")
284 .name(name() + ".totalSwi")
285 .desc("number of total Swi written to ISR")
290 .name(name() + ".coalescedSwi")
291 .desc("average number of Swi's coalesced into each post")
296 .name(name() + ".postedRxIdle")
297 .desc("number of rxIdle interrupts posted to CPU")
302 .name(name() + ".totalRxIdle")
303 .desc("number of total RxIdle written to ISR")
308 .name(name() + ".coalescedRxIdle")
309 .desc("average number of RxIdle's coalesced into each post")
314 .name(name() + ".postedRxOk")
315 .desc("number of RxOk interrupts posted to CPU")
320 .name(name() + ".totalRxOk")
321 .desc("number of total RxOk written to ISR")
326 .name(name() + ".coalescedRxOk")
327 .desc("average number of RxOk's coalesced into each post")
332 .name(name() + ".postedRxDesc")
333 .desc("number of RxDesc interrupts posted to CPU")
338 .name(name() + ".totalRxDesc")
339 .desc("number of total RxDesc written to ISR")
344 .name(name() + ".coalescedRxDesc")
345 .desc("average number of RxDesc's coalesced into each post")
350 .name(name() + ".postedTxOk")
351 .desc("number of TxOk interrupts posted to CPU")
356 .name(name() + ".totalTxOk")
357 .desc("number of total TxOk written to ISR")
362 .name(name() + ".coalescedTxOk")
363 .desc("average number of TxOk's coalesced into each post")
368 .name(name() + ".postedTxIdle")
369 .desc("number of TxIdle interrupts posted to CPU")
374 .name(name() + ".totalTxIdle")
375 .desc("number of total TxIdle written to ISR")
380 .name(name() + ".coalescedTxIdle")
381 .desc("average number of TxIdle's coalesced into each post")
386 .name(name() + ".postedTxDesc")
387 .desc("number of TxDesc interrupts posted to CPU")
392 .name(name() + ".totalTxDesc")
393 .desc("number of total TxDesc written to ISR")
398 .name(name() + ".coalescedTxDesc")
399 .desc("average number of TxDesc's coalesced into each post")
404 .name(name() + ".postedRxOrn")
405 .desc("number of RxOrn posted to CPU")
410 .name(name() + ".totalRxOrn")
411 .desc("number of total RxOrn written to ISR")
416 .name(name() + ".coalescedRxOrn")
417 .desc("average number of RxOrn's coalesced into each post")
422 .name(name() + ".coalescedTotal")
423 .desc("average number of interrupts coalesced into each post")
428 .name(name() + ".postedInterrupts")
429 .desc("number of posts to CPU")
434 .name(name() + ".droppedPackets")
435 .desc("number of packets dropped")
439 coalescedSwi
= totalSwi
/ postedInterrupts
;
440 coalescedRxIdle
= totalRxIdle
/ postedInterrupts
;
441 coalescedRxOk
= totalRxOk
/ postedInterrupts
;
442 coalescedRxDesc
= totalRxDesc
/ postedInterrupts
;
443 coalescedTxOk
= totalTxOk
/ postedInterrupts
;
444 coalescedTxIdle
= totalTxIdle
/ postedInterrupts
;
445 coalescedTxDesc
= totalTxDesc
/ postedInterrupts
;
446 coalescedRxOrn
= totalRxOrn
/ postedInterrupts
;
448 coalescedTotal
= (totalSwi
+ totalRxIdle
+ totalRxOk
+ totalRxDesc
+ totalTxOk
449 + totalTxIdle
+ totalTxDesc
+ totalRxOrn
) / postedInterrupts
;
451 txBandwidth
= txBytes
* Stats::constant(8) / simSeconds
;
452 rxBandwidth
= rxBytes
* Stats::constant(8) / simSeconds
;
453 txPacketRate
= txPackets
/ simSeconds
;
454 rxPacketRate
= rxPackets
/ simSeconds
;
458 * This is to read the PCI general configuration registers
461 NSGigE::ReadConfig(int offset
, int size
, uint8_t *data
)
463 if (offset
< PCI_DEVICE_SPECIFIC
)
464 PciDev::ReadConfig(offset
, size
, data
);
466 panic("Device specific PCI config space not implemented!\n");
470 * This is to write to the PCI general configuration registers
473 NSGigE::WriteConfig(int offset
, int size
, uint32_t data
)
475 if (offset
< PCI_DEVICE_SPECIFIC
)
476 PciDev::WriteConfig(offset
, size
, data
);
478 panic("Device specific PCI config space not implemented!\n");
480 // Need to catch writes to BARs to update the PIO interface
482 // seems to work fine without all these PCI settings, but i
483 // put in the IO to double check, an assertion will fail if we
484 // need to properly implement it
486 if (config
.data
[offset
] & PCI_CMD_IOSE
)
492 if (config
.data
[offset
] & PCI_CMD_BME
) {
499 if (config
.data
[offset
] & PCI_CMD_MSE
) {
508 case PCI0_BASE_ADDR0
:
509 if (BARAddrs
[0] != 0) {
511 pioInterface
->addAddrRange(RangeSize(BARAddrs
[0], BARSize
[0]));
513 BARAddrs
[0] &= EV5::PAddrUncachedMask
;
516 case PCI0_BASE_ADDR1
:
517 if (BARAddrs
[1] != 0) {
519 pioInterface
->addAddrRange(RangeSize(BARAddrs
[1], BARSize
[1]));
521 BARAddrs
[1] &= EV5::PAddrUncachedMask
;
528 * This reads the device registers, which are detailed in the NS83820
532 NSGigE::read(MemReqPtr
&req
, uint8_t *data
)
536 //The mask is to give you only the offset into the device register file
537 Addr daddr
= req
->paddr
& 0xfff;
538 DPRINTF(EthernetPIO
, "read da=%#x pa=%#x va=%#x size=%d\n",
539 daddr
, req
->paddr
, req
->vaddr
, req
->size
);
542 // there are some reserved registers, you can see ns_gige_reg.h and
543 // the spec sheet for details
544 if (daddr
> LAST
&& daddr
<= RESERVED
) {
545 panic("Accessing reserved register");
546 } else if (daddr
> RESERVED
&& daddr
<= 0x3FC) {
547 ReadConfig(daddr
& 0xff, req
->size
, data
);
549 } else if (daddr
>= MIB_START
&& daddr
<= MIB_END
) {
550 // don't implement all the MIB's. hopefully the kernel
551 // doesn't actually DEPEND upon their values
552 // MIB are just hardware stats keepers
553 uint32_t ®
= *(uint32_t *) data
;
556 } else if (daddr
> 0x3FC)
557 panic("Something is messed up!\n");
560 case sizeof(uint32_t):
562 uint32_t ®
= *(uint32_t *)data
;
567 //these are supposed to be cleared on a read
568 reg
&= ~(CR_RXD
| CR_TXD
| CR_TXR
| CR_RXR
);
585 devIntrClear(ISR_ALL
);
640 // see the spec sheet for how RFCR and RFDR work
641 // basically, you write to RFCR to tell the machine
642 // what you want to do next, then you act upon RFDR,
643 // and the device will be prepared b/c of what you
650 switch (regs
.rfcr
& RFCR_RFADDR
) {
652 reg
= rom
.perfectMatch
[1];
654 reg
+= rom
.perfectMatch
[0];
657 reg
= rom
.perfectMatch
[3] << 8;
658 reg
+= rom
.perfectMatch
[2];
661 reg
= rom
.perfectMatch
[5] << 8;
662 reg
+= rom
.perfectMatch
[4];
665 panic("reading RFDR for something other than PMATCH!\n");
666 // didn't implement other RFDR functionality b/c
667 // driver didn't use it
677 reg
&= ~(MIBC_MIBS
| MIBC_ACLR
);
721 panic("reading unimplemented register: addr=%#x", daddr
);
724 DPRINTF(EthernetPIO
, "read from %#x: data=%d data=%#x\n",
730 panic("accessing register with invalid size: addr=%#x, size=%d",
738 NSGigE::write(MemReqPtr
&req
, const uint8_t *data
)
742 Addr daddr
= req
->paddr
& 0xfff;
743 DPRINTF(EthernetPIO
, "write da=%#x pa=%#x va=%#x size=%d\n",
744 daddr
, req
->paddr
, req
->vaddr
, req
->size
);
746 if (daddr
> LAST
&& daddr
<= RESERVED
) {
747 panic("Accessing reserved register");
748 } else if (daddr
> RESERVED
&& daddr
<= 0x3FC) {
749 WriteConfig(daddr
& 0xff, req
->size
, *(uint32_t *)data
);
751 } else if (daddr
> 0x3FC)
752 panic("Something is messed up!\n");
754 if (req
->size
== sizeof(uint32_t)) {
755 uint32_t reg
= *(uint32_t *)data
;
756 DPRINTF(EthernetPIO
, "write data=%d data=%#x\n", reg
, reg
);
763 } else if (reg
& CR_TXE
) {
766 // the kernel is enabling the transmit machine
767 if (txState
== txIdle
)
773 } else if (reg
& CR_RXE
) {
776 if (rxState
== rxIdle
)
787 devIntrPost(ISR_SWI
);
798 if (reg
& CFG_LNKSTS
||
801 reg
& CFG_RESERVED
||
804 panic("writing to read-only or reserved CFG bits!\n");
806 regs
.config
|= reg
& ~(CFG_LNKSTS
| CFG_SPDSTS
| CFG_DUPSTS
|
807 CFG_RESERVED
| CFG_T64ADDR
| CFG_PCI64_DET
);
809 // all these #if 0's are because i don't THINK the kernel needs to
810 // have these implemented. if there is a problem relating to one of
811 // these, you may need to add functionality in.
813 if (reg
& CFG_TBI_EN
) ;
814 if (reg
& CFG_MODE_1000
) ;
817 if (reg
& CFG_AUTO_1000
)
818 panic("CFG_AUTO_1000 not implemented!\n");
821 if (reg
& CFG_PINT_DUPSTS
||
822 reg
& CFG_PINT_LNKSTS
||
823 reg
& CFG_PINT_SPDSTS
)
826 if (reg
& CFG_TMRTEST
) ;
827 if (reg
& CFG_MRM_DIS
) ;
828 if (reg
& CFG_MWI_DIS
) ;
830 if (reg
& CFG_T64ADDR
)
831 panic("CFG_T64ADDR is read only register!\n");
833 if (reg
& CFG_PCI64_DET
)
834 panic("CFG_PCI64_DET is read only register!\n");
836 if (reg
& CFG_DATA64_EN
) ;
837 if (reg
& CFG_M64ADDR
) ;
838 if (reg
& CFG_PHY_RST
) ;
839 if (reg
& CFG_PHY_DIS
) ;
842 if (reg
& CFG_EXTSTS_EN
)
845 extstsEnable
= false;
848 if (reg
& CFG_REQALG
) ;
852 if (reg
& CFG_PESEL
) ;
853 if (reg
& CFG_BROM_DIS
) ;
854 if (reg
& CFG_EXT_125
) ;
861 // since phy is completely faked, MEAR_MD* don't matter
862 // and since the driver never uses MEAR_EE*, they don't
865 if (reg
& MEAR_EEDI
) ;
866 if (reg
& MEAR_EEDO
) ; // this one is read only
867 if (reg
& MEAR_EECLK
) ;
868 if (reg
& MEAR_EESEL
) ;
869 if (reg
& MEAR_MDIO
) ;
870 if (reg
& MEAR_MDDIR
) ;
871 if (reg
& MEAR_MDC
) ;
876 regs
.ptscr
= reg
& ~(PTSCR_RBIST_RDONLY
);
877 // these control BISTs for various parts of chip - we
878 // don't care or do just fake that the BIST is done
879 if (reg
& PTSCR_RBIST_EN
)
880 regs
.ptscr
|= PTSCR_RBIST_DONE
;
881 if (reg
& PTSCR_EEBIST_EN
)
882 regs
.ptscr
&= ~PTSCR_EEBIST_EN
;
883 if (reg
& PTSCR_EELOAD_EN
)
884 regs
.ptscr
&= ~PTSCR_EELOAD_EN
;
887 case ISR
: /* writing to the ISR has no effect */
888 panic("ISR is a read only register!\n");
901 /* not going to implement real interrupt holdoff */
905 regs
.txdp
= (reg
& 0xFFFFFFFC);
906 assert(txState
== txIdle
);
917 if (reg
& TXCFG_CSI
) ;
918 if (reg
& TXCFG_HBI
) ;
919 if (reg
& TXCFG_MLB
) ;
920 if (reg
& TXCFG_ATP
) ;
921 if (reg
& TXCFG_ECRETRY
) {
923 * this could easily be implemented, but considering
924 * the network is just a fake pipe, wouldn't make
929 if (reg
& TXCFG_BRST_DIS
) ;
933 /* we handle our own DMA, ignore the kernel's exhortations */
934 if (reg
& TXCFG_MXDMA
) ;
937 // also, we currently don't care about fill/drain
938 // thresholds though this may change in the future with
939 // more realistic networks or a driver which changes it
940 // according to feedback
946 /* these just control general purpose i/o pins, don't matter */
961 if (reg
& RXCFG_AEP
) ;
962 if (reg
& RXCFG_ARP
) ;
963 if (reg
& RXCFG_STRIPCRC
) ;
964 if (reg
& RXCFG_RX_RD
) ;
965 if (reg
& RXCFG_ALP
) ;
966 if (reg
& RXCFG_AIRL
) ;
968 /* we handle our own DMA, ignore what kernel says about it */
969 if (reg
& RXCFG_MXDMA
) ;
971 //also, we currently don't care about fill/drain thresholds
972 //though this may change in the future with more realistic
973 //networks or a driver which changes it according to feedback
974 if (reg
& (RXCFG_DRTH
| RXCFG_DRTH0
)) ;
979 /* there is no priority queueing used in the linux 2.6 driver */
984 /* not going to implement wake on LAN */
989 /* not going to implement pause control */
996 rxFilterEnable
= (reg
& RFCR_RFEN
) ? true : false;
997 acceptBroadcast
= (reg
& RFCR_AAB
) ? true : false;
998 acceptMulticast
= (reg
& RFCR_AAM
) ? true : false;
999 acceptUnicast
= (reg
& RFCR_AAU
) ? true : false;
1000 acceptPerfect
= (reg
& RFCR_APM
) ? true : false;
1001 acceptArp
= (reg
& RFCR_AARP
) ? true : false;
1004 if (reg
& RFCR_APAT
)
1005 panic("RFCR_APAT not implemented!\n");
1008 if (reg
& RFCR_MHEN
|| reg
& RFCR_UHEN
)
1009 panic("hash filtering not implemented!\n");
1012 panic("RFCR_ULM not implemented!\n");
1017 panic("the driver never writes to RFDR, something is wrong!\n");
1020 panic("the driver never uses BRAR, something is wrong!\n");
1023 panic("the driver never uses BRDR, something is wrong!\n");
1026 panic("SRR is read only register!\n");
1029 panic("the driver never uses MIBC, something is wrong!\n");
1040 panic("the driver never uses VDR, something is wrong!\n");
1044 /* not going to implement clockrun stuff */
1050 if (reg
& TBICR_MR_LOOPBACK
)
1051 panic("TBICR_MR_LOOPBACK never used, something wrong!\n");
1053 if (reg
& TBICR_MR_AN_ENABLE
) {
1054 regs
.tanlpar
= regs
.tanar
;
1055 regs
.tbisr
|= (TBISR_MR_AN_COMPLETE
| TBISR_MR_LINK_STATUS
);
1059 if (reg
& TBICR_MR_RESTART_AN
) ;
1065 panic("TBISR is read only register!\n");
1069 if (reg
& TANAR_PS2
)
1070 panic("this isn't used in driver, something wrong!\n");
1072 if (reg
& TANAR_PS1
)
1073 panic("this isn't used in driver, something wrong!\n");
1077 panic("this should only be written to by the fake phy!\n");
1080 panic("TANER is read only register!\n");
1087 panic("invalid register access daddr=%#x", daddr
);
1090 panic("Invalid Request Size");
1097 NSGigE::devIntrPost(uint32_t interrupts
)
1099 if (interrupts
& ISR_RESERVE
)
1100 panic("Cannot set a reserved interrupt");
1102 if (interrupts
& ISR_NOIMPL
)
1103 warn("interrupt not implemented %#x\n", interrupts
);
1105 interrupts
&= ~ISR_NOIMPL
;
1106 regs
.isr
|= interrupts
;
1108 if (interrupts
& regs
.imr
) {
1109 if (interrupts
& ISR_SWI
) {
1112 if (interrupts
& ISR_RXIDLE
) {
1115 if (interrupts
& ISR_RXOK
) {
1118 if (interrupts
& ISR_RXDESC
) {
1121 if (interrupts
& ISR_TXOK
) {
1124 if (interrupts
& ISR_TXIDLE
) {
1127 if (interrupts
& ISR_TXDESC
) {
1130 if (interrupts
& ISR_RXORN
) {
1135 DPRINTF(EthernetIntr
,
1136 "interrupt written to ISR: intr=%#x isr=%#x imr=%#x\n",
1137 interrupts
, regs
.isr
, regs
.imr
);
1139 if ((regs
.isr
& regs
.imr
)) {
1140 Tick when
= curTick
;
1141 if (!(regs
.isr
& regs
.imr
& ISR_NODELAY
))
1147 /* writing this interrupt counting stats inside this means that this function
1148 is now limited to being used to clear all interrupts upon the kernel
1149 reading isr and servicing. just telling you in case you were thinking
1153 NSGigE::devIntrClear(uint32_t interrupts
)
1155 if (interrupts
& ISR_RESERVE
)
1156 panic("Cannot clear a reserved interrupt");
1158 if (regs
.isr
& regs
.imr
& ISR_SWI
) {
1161 if (regs
.isr
& regs
.imr
& ISR_RXIDLE
) {
1164 if (regs
.isr
& regs
.imr
& ISR_RXOK
) {
1167 if (regs
.isr
& regs
.imr
& ISR_RXDESC
) {
1170 if (regs
.isr
& regs
.imr
& ISR_TXOK
) {
1173 if (regs
.isr
& regs
.imr
& ISR_TXIDLE
) {
1176 if (regs
.isr
& regs
.imr
& ISR_TXDESC
) {
1179 if (regs
.isr
& regs
.imr
& ISR_RXORN
) {
1183 if (regs
.isr
& regs
.imr
& (ISR_SWI
| ISR_RXIDLE
| ISR_RXOK
| ISR_RXDESC
|
1184 ISR_TXOK
| ISR_TXIDLE
| ISR_TXDESC
| ISR_RXORN
) )
1187 interrupts
&= ~ISR_NOIMPL
;
1188 regs
.isr
&= ~interrupts
;
1190 DPRINTF(EthernetIntr
,
1191 "interrupt cleared from ISR: intr=%x isr=%x imr=%x\n",
1192 interrupts
, regs
.isr
, regs
.imr
);
1194 if (!(regs
.isr
& regs
.imr
))
1199 NSGigE::devIntrChangeMask()
1201 DPRINTF(EthernetIntr
, "interrupt mask changed: isr=%x imr=%x masked=%x\n",
1202 regs
.isr
, regs
.imr
, regs
.isr
& regs
.imr
);
1204 if (regs
.isr
& regs
.imr
)
1205 cpuIntrPost(curTick
);
1211 NSGigE::cpuIntrPost(Tick when
)
1213 // If the interrupt you want to post is later than an interrupt
1214 // already scheduled, just let it post in the coming one and don't
1215 // schedule another.
1216 // HOWEVER, must be sure that the scheduled intrTick is in the
1217 // future (this was formerly the source of a bug)
1219 * @todo this warning should be removed and the intrTick code should
1222 assert(when
>= curTick
);
1223 assert(intrTick
>= curTick
|| intrTick
== 0);
1224 if (when
> intrTick
&& intrTick
!= 0) {
1225 DPRINTF(EthernetIntr
, "don't need to schedule event...intrTick=%d\n",
1231 if (intrTick
< curTick
) {
1236 DPRINTF(EthernetIntr
, "going to schedule an interrupt for intrTick=%d\n",
1240 intrEvent
->squash();
1241 intrEvent
= new IntrEvent(this, true);
1242 intrEvent
->schedule(intrTick
);
1246 NSGigE::cpuInterrupt()
1248 assert(intrTick
== curTick
);
1250 // Whether or not there's a pending interrupt, we don't care about
1255 // Don't send an interrupt if there's already one
1256 if (cpuPendingIntr
) {
1257 DPRINTF(EthernetIntr
,
1258 "would send an interrupt now, but there's already pending\n");
1261 cpuPendingIntr
= true;
1263 DPRINTF(EthernetIntr
, "posting interrupt\n");
1269 NSGigE::cpuIntrClear()
1271 if (!cpuPendingIntr
)
1275 intrEvent
->squash();
1281 cpuPendingIntr
= false;
1283 DPRINTF(EthernetIntr
, "clearing interrupt\n");
1288 NSGigE::cpuIntrPending() const
1289 { return cpuPendingIntr
; }
1295 DPRINTF(Ethernet
, "transmit reset\n");
1300 assert(txDescCnt
== 0);
1303 assert(txDmaState
== dmaIdle
);
1309 DPRINTF(Ethernet
, "receive reset\n");
1312 assert(rxPktBytes
== 0);
1315 assert(rxDescCnt
== 0);
1316 assert(rxDmaState
== dmaIdle
);
1324 memset(®s
, 0, sizeof(regs
));
1325 regs
.config
= CFG_LNKSTS
;
1326 regs
.mear
= MEAR_MDDIR
| MEAR_EEDO
;
1327 regs
.txcfg
= 0x120; // set drain threshold to 1024 bytes and
1328 // fill threshold to 32 bytes
1329 regs
.rxcfg
= 0x4; // set drain threshold to 16 bytes
1330 regs
.srr
= 0x0103; // set the silicon revision to rev B or 0x103
1331 regs
.mibc
= MIBC_FRZ
;
1332 regs
.vdr
= 0x81; // set the vlan tag type to 802.1q
1333 regs
.tesr
= 0xc000; // TBI capable of both full and half duplex
1335 extstsEnable
= false;
1336 acceptBroadcast
= false;
1337 acceptMulticast
= false;
1338 acceptUnicast
= false;
1339 acceptPerfect
= false;
1344 NSGigE::rxDmaReadCopy()
1346 assert(rxDmaState
== dmaReading
);
1348 physmem
->dma_read((uint8_t *)rxDmaData
, rxDmaAddr
, rxDmaLen
);
1349 rxDmaState
= dmaIdle
;
1351 DPRINTF(EthernetDMA
, "rx dma read paddr=%#x len=%d\n",
1352 rxDmaAddr
, rxDmaLen
);
1353 DDUMP(EthernetDMA
, rxDmaData
, rxDmaLen
);
1357 NSGigE::doRxDmaRead()
1359 assert(rxDmaState
== dmaIdle
|| rxDmaState
== dmaReadWaiting
);
1360 rxDmaState
= dmaReading
;
1362 if (dmaInterface
&& !rxDmaFree
) {
1363 if (dmaInterface
->busy())
1364 rxDmaState
= dmaReadWaiting
;
1366 dmaInterface
->doDMA(Read
, rxDmaAddr
, rxDmaLen
, curTick
,
1367 &rxDmaReadEvent
, true);
1371 if (dmaReadDelay
== 0 && dmaReadFactor
== 0) {
1376 Tick factor
= ((rxDmaLen
+ ULL(63)) >> ULL(6)) * dmaReadFactor
;
1377 Tick start
= curTick
+ dmaReadDelay
+ factor
;
1378 rxDmaReadEvent
.schedule(start
);
1383 NSGigE::rxDmaReadDone()
1385 assert(rxDmaState
== dmaReading
);
1388 // If the transmit state machine has a pending DMA, let it go first
1389 if (txDmaState
== dmaReadWaiting
|| txDmaState
== dmaWriteWaiting
)
1396 NSGigE::rxDmaWriteCopy()
1398 assert(rxDmaState
== dmaWriting
);
1400 physmem
->dma_write(rxDmaAddr
, (uint8_t *)rxDmaData
, rxDmaLen
);
1401 rxDmaState
= dmaIdle
;
1403 DPRINTF(EthernetDMA
, "rx dma write paddr=%#x len=%d\n",
1404 rxDmaAddr
, rxDmaLen
);
1405 DDUMP(EthernetDMA
, rxDmaData
, rxDmaLen
);
1409 NSGigE::doRxDmaWrite()
1411 assert(rxDmaState
== dmaIdle
|| rxDmaState
== dmaWriteWaiting
);
1412 rxDmaState
= dmaWriting
;
1414 if (dmaInterface
&& !rxDmaFree
) {
1415 if (dmaInterface
->busy())
1416 rxDmaState
= dmaWriteWaiting
;
1418 dmaInterface
->doDMA(WriteInvalidate
, rxDmaAddr
, rxDmaLen
, curTick
,
1419 &rxDmaWriteEvent
, true);
1423 if (dmaWriteDelay
== 0 && dmaWriteFactor
== 0) {
1428 Tick factor
= ((rxDmaLen
+ ULL(63)) >> ULL(6)) * dmaWriteFactor
;
1429 Tick start
= curTick
+ dmaWriteDelay
+ factor
;
1430 rxDmaWriteEvent
.schedule(start
);
1435 NSGigE::rxDmaWriteDone()
1437 assert(rxDmaState
== dmaWriting
);
1440 // If the transmit state machine has a pending DMA, let it go first
1441 if (txDmaState
== dmaReadWaiting
|| txDmaState
== dmaWriteWaiting
)
1450 DPRINTF(EthernetSM
, "receive kick rxState=%s (rxBuf.size=%d)\n",
1451 NsRxStateStrings
[rxState
], rxFifo
.size());
1453 if (rxKickTick
> curTick
) {
1454 DPRINTF(EthernetSM
, "receive kick exiting, can't run till %d\n",
1460 switch(rxDmaState
) {
1461 case dmaReadWaiting
:
1465 case dmaWriteWaiting
:
1473 // see state machine from spec for details
1474 // the way this works is, if you finish work on one state and can
1475 // go directly to another, you do that through jumping to the
1476 // label "next". however, if you have intermediate work, like DMA
1477 // so that you can't go to the next state yet, you go to exit and
1478 // exit the loop. however, when the DMA is done it will trigger
1479 // an event and come back to this loop.
1483 DPRINTF(EthernetSM
, "Receive Disabled! Nothing to do.\n");
1488 rxState
= rxDescRefr
;
1490 rxDmaAddr
= regs
.rxdp
& 0x3fffffff;
1491 rxDmaData
= &rxDescCache
+ offsetof(ns_desc
, link
);
1492 rxDmaLen
= sizeof(rxDescCache
.link
);
1493 rxDmaFree
= dmaDescFree
;
1496 descDmaRdBytes
+= rxDmaLen
;
1501 rxState
= rxDescRead
;
1503 rxDmaAddr
= regs
.rxdp
& 0x3fffffff;
1504 rxDmaData
= &rxDescCache
;
1505 rxDmaLen
= sizeof(ns_desc
);
1506 rxDmaFree
= dmaDescFree
;
1509 descDmaRdBytes
+= rxDmaLen
;
1517 if (rxDmaState
!= dmaIdle
)
1520 rxState
= rxAdvance
;
1524 if (rxDmaState
!= dmaIdle
)
1527 DPRINTF(EthernetDesc
,
1528 "rxDescCache: addr=%08x read descriptor\n",
1529 regs
.rxdp
& 0x3fffffff);
1530 DPRINTF(EthernetDesc
,
1531 "rxDescCache: link=%08x bufptr=%08x cmdsts=%08x extsts=%08x\n",
1532 rxDescCache
.link
, rxDescCache
.bufptr
, rxDescCache
.cmdsts
,
1533 rxDescCache
.extsts
);
1535 if (rxDescCache
.cmdsts
& CMDSTS_OWN
) {
1536 devIntrPost(ISR_RXIDLE
);
1540 rxState
= rxFifoBlock
;
1541 rxFragPtr
= rxDescCache
.bufptr
;
1542 rxDescCnt
= rxDescCache
.cmdsts
& CMDSTS_LEN_MASK
;
1549 * @todo in reality, we should be able to start processing
1550 * the packet as it arrives, and not have to wait for the
1551 * full packet ot be in the receive fifo.
1556 DPRINTF(EthernetSM
, "****processing receive of new packet****\n");
1558 // If we don't have a packet, grab a new one from the fifo.
1559 rxPacket
= rxFifo
.front();
1560 rxPktBytes
= rxPacket
->length
;
1561 rxPacketBufPtr
= rxPacket
->data
;
1564 if (DTRACE(Ethernet
)) {
1567 DPRINTF(Ethernet
, "ID is %d\n", ip
->id());
1570 DPRINTF(Ethernet
, "Src Port=%d, Dest Port=%d\n",
1571 tcp
->sport(), tcp
->dport());
1577 // sanity check - i think the driver behaves like this
1578 assert(rxDescCnt
>= rxPktBytes
);
1583 // dont' need the && rxDescCnt > 0 if driver sanity check
1585 if (rxPktBytes
> 0) {
1586 rxState
= rxFragWrite
;
1587 // don't need min<>(rxPktBytes,rxDescCnt) if above sanity
1589 rxXferLen
= rxPktBytes
;
1591 rxDmaAddr
= rxFragPtr
& 0x3fffffff;
1592 rxDmaData
= rxPacketBufPtr
;
1593 rxDmaLen
= rxXferLen
;
1594 rxDmaFree
= dmaDataFree
;
1600 rxState
= rxDescWrite
;
1602 //if (rxPktBytes == 0) { /* packet is done */
1603 assert(rxPktBytes
== 0);
1604 DPRINTF(EthernetSM
, "done with receiving packet\n");
1606 rxDescCache
.cmdsts
|= CMDSTS_OWN
;
1607 rxDescCache
.cmdsts
&= ~CMDSTS_MORE
;
1608 rxDescCache
.cmdsts
|= CMDSTS_OK
;
1609 rxDescCache
.cmdsts
&= 0xffff0000;
1610 rxDescCache
.cmdsts
+= rxPacket
->length
; //i.e. set CMDSTS_SIZE
1614 * all the driver uses these are for its own stats keeping
1615 * which we don't care about, aren't necessary for
1616 * functionality and doing this would just slow us down.
1617 * if they end up using this in a later version for
1618 * functional purposes, just undef
1620 if (rxFilterEnable
) {
1621 rxDescCache
.cmdsts
&= ~CMDSTS_DEST_MASK
;
1622 const EthAddr
&dst
= rxFifoFront()->dst();
1624 rxDescCache
.cmdsts
|= CMDSTS_DEST_SELF
;
1625 if (dst
->multicast())
1626 rxDescCache
.cmdsts
|= CMDSTS_DEST_MULTI
;
1627 if (dst
->broadcast())
1628 rxDescCache
.cmdsts
|= CMDSTS_DEST_MASK
;
1633 if (extstsEnable
&& ip
) {
1634 rxDescCache
.extsts
|= EXTSTS_IPPKT
;
1636 if (cksum(ip
) != 0) {
1637 DPRINTF(EthernetCksum
, "Rx IP Checksum Error\n");
1638 rxDescCache
.extsts
|= EXTSTS_IPERR
;
1643 rxDescCache
.extsts
|= EXTSTS_TCPPKT
;
1645 if (cksum(tcp
) != 0) {
1646 DPRINTF(EthernetCksum
, "Rx TCP Checksum Error\n");
1647 rxDescCache
.extsts
|= EXTSTS_TCPERR
;
1651 rxDescCache
.extsts
|= EXTSTS_UDPPKT
;
1653 if (cksum(udp
) != 0) {
1654 DPRINTF(EthernetCksum
, "Rx UDP Checksum Error\n");
1655 rxDescCache
.extsts
|= EXTSTS_UDPERR
;
1662 * the driver seems to always receive into desc buffers
1663 * of size 1514, so you never have a pkt that is split
1664 * into multiple descriptors on the receive side, so
1665 * i don't implement that case, hence the assert above.
1668 DPRINTF(EthernetDesc
,
1669 "rxDescCache: addr=%08x writeback cmdsts extsts\n",
1670 regs
.rxdp
& 0x3fffffff);
1671 DPRINTF(EthernetDesc
,
1672 "rxDescCache: link=%08x bufptr=%08x cmdsts=%08x extsts=%08x\n",
1673 rxDescCache
.link
, rxDescCache
.bufptr
, rxDescCache
.cmdsts
,
1674 rxDescCache
.extsts
);
1676 rxDmaAddr
= (regs
.rxdp
+ offsetof(ns_desc
, cmdsts
)) & 0x3fffffff;
1677 rxDmaData
= &(rxDescCache
.cmdsts
);
1678 rxDmaLen
= sizeof(rxDescCache
.cmdsts
) + sizeof(rxDescCache
.extsts
);
1679 rxDmaFree
= dmaDescFree
;
1682 descDmaWrBytes
+= rxDmaLen
;
1690 if (rxDmaState
!= dmaIdle
)
1693 rxPacketBufPtr
+= rxXferLen
;
1694 rxFragPtr
+= rxXferLen
;
1695 rxPktBytes
-= rxXferLen
;
1697 rxState
= rxFifoBlock
;
1701 if (rxDmaState
!= dmaIdle
)
1704 assert(rxDescCache
.cmdsts
& CMDSTS_OWN
);
1706 assert(rxPacket
== 0);
1707 devIntrPost(ISR_RXOK
);
1709 if (rxDescCache
.cmdsts
& CMDSTS_INTR
)
1710 devIntrPost(ISR_RXDESC
);
1713 DPRINTF(EthernetSM
, "Halting the RX state machine\n");
1717 rxState
= rxAdvance
;
1721 if (rxDescCache
.link
== 0) {
1722 devIntrPost(ISR_RXIDLE
);
1727 rxState
= rxDescRead
;
1728 regs
.rxdp
= rxDescCache
.link
;
1731 rxDmaAddr
= regs
.rxdp
& 0x3fffffff;
1732 rxDmaData
= &rxDescCache
;
1733 rxDmaLen
= sizeof(ns_desc
);
1734 rxDmaFree
= dmaDescFree
;
1742 panic("Invalid rxState!");
1745 DPRINTF(EthernetSM
, "entering next rxState=%s\n",
1746 NsRxStateStrings
[rxState
]);
1752 * @todo do we want to schedule a future kick?
1754 DPRINTF(EthernetSM
, "rx state machine exited rxState=%s\n",
1755 NsRxStateStrings
[rxState
]);
1761 if (txFifo
.empty()) {
1762 DPRINTF(Ethernet
, "nothing to transmit\n");
1766 DPRINTF(Ethernet
, "Attempt Pkt Transmit: txFifo length=%d\n",
1768 if (interface
->sendPacket(txFifo
.front())) {
1770 if (DTRACE(Ethernet
)) {
1771 IpPtr
ip(txFifo
.front());
1773 DPRINTF(Ethernet
, "ID is %d\n", ip
->id());
1776 DPRINTF(Ethernet
, "Src Port=%d, Dest Port=%d\n",
1777 tcp
->sport(), tcp
->dport());
1783 DDUMP(Ethernet
, txFifo
.front()->data
, txFifo
.front()->length
);
1784 txBytes
+= txFifo
.front()->length
;
1787 DPRINTF(Ethernet
, "Successful Xmit! now txFifoAvail is %d\n",
1792 * normally do a writeback of the descriptor here, and ONLY
1793 * after that is done, send this interrupt. but since our
1794 * stuff never actually fails, just do this interrupt here,
1795 * otherwise the code has to stray from this nice format.
1796 * besides, it's functionally the same.
1798 devIntrPost(ISR_TXOK
);
1801 if (!txFifo
.empty() && !txEvent
.scheduled()) {
1802 DPRINTF(Ethernet
, "reschedule transmit\n");
1803 txEvent
.schedule(curTick
+ 1000);
1808 NSGigE::txDmaReadCopy()
1810 assert(txDmaState
== dmaReading
);
1812 physmem
->dma_read((uint8_t *)txDmaData
, txDmaAddr
, txDmaLen
);
1813 txDmaState
= dmaIdle
;
1815 DPRINTF(EthernetDMA
, "tx dma read paddr=%#x len=%d\n",
1816 txDmaAddr
, txDmaLen
);
1817 DDUMP(EthernetDMA
, txDmaData
, txDmaLen
);
1821 NSGigE::doTxDmaRead()
1823 assert(txDmaState
== dmaIdle
|| txDmaState
== dmaReadWaiting
);
1824 txDmaState
= dmaReading
;
1826 if (dmaInterface
&& !txDmaFree
) {
1827 if (dmaInterface
->busy())
1828 txDmaState
= dmaReadWaiting
;
1830 dmaInterface
->doDMA(Read
, txDmaAddr
, txDmaLen
, curTick
,
1831 &txDmaReadEvent
, true);
1835 if (dmaReadDelay
== 0 && dmaReadFactor
== 0.0) {
1840 Tick factor
= ((txDmaLen
+ ULL(63)) >> ULL(6)) * dmaReadFactor
;
1841 Tick start
= curTick
+ dmaReadDelay
+ factor
;
1842 txDmaReadEvent
.schedule(start
);
1847 NSGigE::txDmaReadDone()
1849 assert(txDmaState
== dmaReading
);
1852 // If the receive state machine has a pending DMA, let it go first
1853 if (rxDmaState
== dmaReadWaiting
|| rxDmaState
== dmaWriteWaiting
)
1860 NSGigE::txDmaWriteCopy()
1862 assert(txDmaState
== dmaWriting
);
1864 physmem
->dma_write(txDmaAddr
, (uint8_t *)txDmaData
, txDmaLen
);
1865 txDmaState
= dmaIdle
;
1867 DPRINTF(EthernetDMA
, "tx dma write paddr=%#x len=%d\n",
1868 txDmaAddr
, txDmaLen
);
1869 DDUMP(EthernetDMA
, txDmaData
, txDmaLen
);
1873 NSGigE::doTxDmaWrite()
1875 assert(txDmaState
== dmaIdle
|| txDmaState
== dmaWriteWaiting
);
1876 txDmaState
= dmaWriting
;
1878 if (dmaInterface
&& !txDmaFree
) {
1879 if (dmaInterface
->busy())
1880 txDmaState
= dmaWriteWaiting
;
1882 dmaInterface
->doDMA(WriteInvalidate
, txDmaAddr
, txDmaLen
, curTick
,
1883 &txDmaWriteEvent
, true);
1887 if (dmaWriteDelay
== 0 && dmaWriteFactor
== 0.0) {
1892 Tick factor
= ((txDmaLen
+ ULL(63)) >> ULL(6)) * dmaWriteFactor
;
1893 Tick start
= curTick
+ dmaWriteDelay
+ factor
;
1894 txDmaWriteEvent
.schedule(start
);
1899 NSGigE::txDmaWriteDone()
1901 assert(txDmaState
== dmaWriting
);
1904 // If the receive state machine has a pending DMA, let it go first
1905 if (rxDmaState
== dmaReadWaiting
|| rxDmaState
== dmaWriteWaiting
)
1914 DPRINTF(EthernetSM
, "transmit kick txState=%s\n",
1915 NsTxStateStrings
[txState
]);
1917 if (txKickTick
> curTick
) {
1918 DPRINTF(EthernetSM
, "transmit kick exiting, can't run till %d\n",
1925 switch(txDmaState
) {
1926 case dmaReadWaiting
:
1930 case dmaWriteWaiting
:
1941 DPRINTF(EthernetSM
, "Transmit disabled. Nothing to do.\n");
1946 txState
= txDescRefr
;
1948 txDmaAddr
= regs
.txdp
& 0x3fffffff;
1949 txDmaData
= &txDescCache
+ offsetof(ns_desc
, link
);
1950 txDmaLen
= sizeof(txDescCache
.link
);
1951 txDmaFree
= dmaDescFree
;
1954 descDmaRdBytes
+= txDmaLen
;
1960 txState
= txDescRead
;
1962 txDmaAddr
= regs
.txdp
& 0x3fffffff;
1963 txDmaData
= &txDescCache
;
1964 txDmaLen
= sizeof(ns_desc
);
1965 txDmaFree
= dmaDescFree
;
1968 descDmaRdBytes
+= txDmaLen
;
1976 if (txDmaState
!= dmaIdle
)
1979 txState
= txAdvance
;
1983 if (txDmaState
!= dmaIdle
)
1986 DPRINTF(EthernetDesc
,
1987 "txDescCache: link=%08x bufptr=%08x cmdsts=%08x extsts=%08x\n",
1988 txDescCache
.link
, txDescCache
.bufptr
, txDescCache
.cmdsts
,
1989 txDescCache
.extsts
);
1991 if (txDescCache
.cmdsts
& CMDSTS_OWN
) {
1992 txState
= txFifoBlock
;
1993 txFragPtr
= txDescCache
.bufptr
;
1994 txDescCnt
= txDescCache
.cmdsts
& CMDSTS_LEN_MASK
;
1996 devIntrPost(ISR_TXIDLE
);
2004 DPRINTF(EthernetSM
, "****starting the tx of a new packet****\n");
2005 txPacket
= new PacketData
;
2006 txPacket
->data
= new uint8_t[16384];
2007 txPacketBufPtr
= txPacket
->data
;
2010 if (txDescCnt
== 0) {
2011 DPRINTF(EthernetSM
, "the txDescCnt == 0, done with descriptor\n");
2012 if (txDescCache
.cmdsts
& CMDSTS_MORE
) {
2013 DPRINTF(EthernetSM
, "there are more descriptors to come\n");
2014 txState
= txDescWrite
;
2016 txDescCache
.cmdsts
&= ~CMDSTS_OWN
;
2018 txDmaAddr
= regs
.txdp
+ offsetof(ns_desc
, cmdsts
);
2019 txDmaAddr
&= 0x3fffffff;
2020 txDmaData
= &(txDescCache
.cmdsts
);
2021 txDmaLen
= sizeof(txDescCache
.cmdsts
);
2022 txDmaFree
= dmaDescFree
;
2027 } else { /* this packet is totally done */
2028 DPRINTF(EthernetSM
, "This packet is done, let's wrap it up\n");
2029 /* deal with the the packet that just finished */
2030 if ((regs
.vtcr
& VTCR_PPCHK
) && extstsEnable
) {
2032 if (txDescCache
.extsts
& EXTSTS_UDPPKT
) {
2035 udp
->sum(cksum(udp
));
2037 } else if (txDescCache
.extsts
& EXTSTS_TCPPKT
) {
2040 tcp
->sum(cksum(tcp
));
2043 if (txDescCache
.extsts
& EXTSTS_IPPKT
) {
2050 txPacket
->length
= txPacketBufPtr
- txPacket
->data
;
2051 // this is just because the receive can't handle a
2052 // packet bigger want to make sure
2053 assert(txPacket
->length
<= 1514);
2057 txFifo
.push(txPacket
);
2061 * this following section is not tqo spec, but
2062 * functionally shouldn't be any different. normally,
2063 * the chip will wait til the transmit has occurred
2064 * before writing back the descriptor because it has
2065 * to wait to see that it was successfully transmitted
2066 * to decide whether to set CMDSTS_OK or not.
2067 * however, in the simulator since it is always
2068 * successfully transmitted, and writing it exactly to
2069 * spec would complicate the code, we just do it here
2072 txDescCache
.cmdsts
&= ~CMDSTS_OWN
;
2073 txDescCache
.cmdsts
|= CMDSTS_OK
;
2075 DPRINTF(EthernetDesc
,
2076 "txDesc writeback: cmdsts=%08x extsts=%08x\n",
2077 txDescCache
.cmdsts
, txDescCache
.extsts
);
2079 txDmaAddr
= regs
.txdp
+ offsetof(ns_desc
, cmdsts
);
2080 txDmaAddr
&= 0x3fffffff;
2081 txDmaData
= &(txDescCache
.cmdsts
);
2082 txDmaLen
= sizeof(txDescCache
.cmdsts
) +
2083 sizeof(txDescCache
.extsts
);
2084 txDmaFree
= dmaDescFree
;
2087 descDmaWrBytes
+= txDmaLen
;
2093 DPRINTF(EthernetSM
, "halting TX state machine\n");
2097 txState
= txAdvance
;
2103 DPRINTF(EthernetSM
, "this descriptor isn't done yet\n");
2104 if (!txFifo
.full()) {
2105 txState
= txFragRead
;
2108 * The number of bytes transferred is either whatever
2109 * is left in the descriptor (txDescCnt), or if there
2110 * is not enough room in the fifo, just whatever room
2111 * is left in the fifo
2113 txXferLen
= min
<uint32_t>(txDescCnt
, txFifo
.avail());
2115 txDmaAddr
= txFragPtr
& 0x3fffffff;
2116 txDmaData
= txPacketBufPtr
;
2117 txDmaLen
= txXferLen
;
2118 txDmaFree
= dmaDataFree
;
2123 txState
= txFifoBlock
;
2133 if (txDmaState
!= dmaIdle
)
2136 txPacketBufPtr
+= txXferLen
;
2137 txFragPtr
+= txXferLen
;
2138 txDescCnt
-= txXferLen
;
2139 txFifo
.reserve(txXferLen
);
2141 txState
= txFifoBlock
;
2145 if (txDmaState
!= dmaIdle
)
2148 if (txDescCache
.cmdsts
& CMDSTS_INTR
)
2149 devIntrPost(ISR_TXDESC
);
2151 txState
= txAdvance
;
2155 if (txDescCache
.link
== 0) {
2156 devIntrPost(ISR_TXIDLE
);
2160 txState
= txDescRead
;
2161 regs
.txdp
= txDescCache
.link
;
2164 txDmaAddr
= txDescCache
.link
& 0x3fffffff;
2165 txDmaData
= &txDescCache
;
2166 txDmaLen
= sizeof(ns_desc
);
2167 txDmaFree
= dmaDescFree
;
2175 panic("invalid state");
2178 DPRINTF(EthernetSM
, "entering next txState=%s\n",
2179 NsTxStateStrings
[txState
]);
2185 * @todo do we want to schedule a future kick?
2187 DPRINTF(EthernetSM
, "tx state machine exited txState=%s\n",
2188 NsTxStateStrings
[txState
]);
2192 NSGigE::transferDone()
2194 if (txFifo
.empty()) {
2195 DPRINTF(Ethernet
, "transfer complete: txFifo empty...nothing to do\n");
2199 DPRINTF(Ethernet
, "transfer complete: data in txFifo...schedule xmit\n");
2201 if (txEvent
.scheduled())
2202 txEvent
.reschedule(curTick
+ 1);
2204 txEvent
.schedule(curTick
+ 1);
2208 NSGigE::rxFilter(const PacketPtr
&packet
)
2210 EthPtr eth
= packet
;
2214 const EthAddr
&dst
= eth
->dst();
2215 if (dst
.unicast()) {
2216 // If we're accepting all unicast addresses
2220 // If we make a perfect match
2221 if (acceptPerfect
&& dst
== rom
.perfectMatch
)
2224 if (acceptArp
&& eth
->type() == ETH_TYPE_ARP
)
2227 } else if (dst
.broadcast()) {
2228 // if we're accepting broadcasts
2229 if (acceptBroadcast
)
2232 } else if (dst
.multicast()) {
2233 // if we're accepting all multicasts
2234 if (acceptMulticast
)
2240 DPRINTF(Ethernet
, "rxFilter drop\n");
2241 DDUMP(EthernetData
, packet
->data
, packet
->length
);
2248 NSGigE::recvPacket(PacketPtr packet
)
2250 rxBytes
+= packet
->length
;
2253 DPRINTF(Ethernet
, "Receiving packet from wire, rxFifoAvail=%d\n",
2257 DPRINTF(Ethernet
, "receive disabled...packet dropped\n");
2259 interface
->recvDone();
2263 if (rxFilterEnable
&& rxFilter(packet
)) {
2264 DPRINTF(Ethernet
, "packet filtered...dropped\n");
2265 interface
->recvDone();
2269 if (rxFifo
.avail() < packet
->length
) {
2271 "packet will not fit in receive buffer...packet dropped\n");
2273 devIntrPost(ISR_RXORN
);
2277 rxFifo
.push(packet
);
2278 interface
->recvDone();
2284 //=====================================================================
2288 NSGigE::serialize(ostream
&os
)
2290 // Serialize the PciDev base class
2291 PciDev::serialize(os
);
2294 * Finalize any DMA events now.
2296 if (rxDmaReadEvent
.scheduled())
2298 if (rxDmaWriteEvent
.scheduled())
2300 if (txDmaReadEvent
.scheduled())
2302 if (txDmaWriteEvent
.scheduled())
2306 * Serialize the device registers
2308 SERIALIZE_SCALAR(regs
.command
);
2309 SERIALIZE_SCALAR(regs
.config
);
2310 SERIALIZE_SCALAR(regs
.mear
);
2311 SERIALIZE_SCALAR(regs
.ptscr
);
2312 SERIALIZE_SCALAR(regs
.isr
);
2313 SERIALIZE_SCALAR(regs
.imr
);
2314 SERIALIZE_SCALAR(regs
.ier
);
2315 SERIALIZE_SCALAR(regs
.ihr
);
2316 SERIALIZE_SCALAR(regs
.txdp
);
2317 SERIALIZE_SCALAR(regs
.txdp_hi
);
2318 SERIALIZE_SCALAR(regs
.txcfg
);
2319 SERIALIZE_SCALAR(regs
.gpior
);
2320 SERIALIZE_SCALAR(regs
.rxdp
);
2321 SERIALIZE_SCALAR(regs
.rxdp_hi
);
2322 SERIALIZE_SCALAR(regs
.rxcfg
);
2323 SERIALIZE_SCALAR(regs
.pqcr
);
2324 SERIALIZE_SCALAR(regs
.wcsr
);
2325 SERIALIZE_SCALAR(regs
.pcr
);
2326 SERIALIZE_SCALAR(regs
.rfcr
);
2327 SERIALIZE_SCALAR(regs
.rfdr
);
2328 SERIALIZE_SCALAR(regs
.srr
);
2329 SERIALIZE_SCALAR(regs
.mibc
);
2330 SERIALIZE_SCALAR(regs
.vrcr
);
2331 SERIALIZE_SCALAR(regs
.vtcr
);
2332 SERIALIZE_SCALAR(regs
.vdr
);
2333 SERIALIZE_SCALAR(regs
.ccsr
);
2334 SERIALIZE_SCALAR(regs
.tbicr
);
2335 SERIALIZE_SCALAR(regs
.tbisr
);
2336 SERIALIZE_SCALAR(regs
.tanar
);
2337 SERIALIZE_SCALAR(regs
.tanlpar
);
2338 SERIALIZE_SCALAR(regs
.taner
);
2339 SERIALIZE_SCALAR(regs
.tesr
);
2341 SERIALIZE_ARRAY(rom
.perfectMatch
, ETH_ADDR_LEN
);
2343 SERIALIZE_SCALAR(ioEnable
);
2346 * Serialize the data Fifos
2348 rxFifo
.serialize("rxFifo", os
);
2349 txFifo
.serialize("txFifo", os
);
2352 * Serialize the various helper variables
2354 bool txPacketExists
= txPacket
;
2355 SERIALIZE_SCALAR(txPacketExists
);
2356 if (txPacketExists
) {
2357 txPacket
->serialize("txPacket", os
);
2358 uint32_t txPktBufPtr
= (uint32_t) (txPacketBufPtr
- txPacket
->data
);
2359 SERIALIZE_SCALAR(txPktBufPtr
);
2362 bool rxPacketExists
= rxPacket
;
2363 SERIALIZE_SCALAR(rxPacketExists
);
2364 if (rxPacketExists
) {
2365 rxPacket
->serialize("rxPacket", os
);
2366 uint32_t rxPktBufPtr
= (uint32_t) (rxPacketBufPtr
- rxPacket
->data
);
2367 SERIALIZE_SCALAR(rxPktBufPtr
);
2370 SERIALIZE_SCALAR(txXferLen
);
2371 SERIALIZE_SCALAR(rxXferLen
);
2374 * Serialize DescCaches
2376 SERIALIZE_SCALAR(txDescCache
.link
);
2377 SERIALIZE_SCALAR(txDescCache
.bufptr
);
2378 SERIALIZE_SCALAR(txDescCache
.cmdsts
);
2379 SERIALIZE_SCALAR(txDescCache
.extsts
);
2380 SERIALIZE_SCALAR(rxDescCache
.link
);
2381 SERIALIZE_SCALAR(rxDescCache
.bufptr
);
2382 SERIALIZE_SCALAR(rxDescCache
.cmdsts
);
2383 SERIALIZE_SCALAR(rxDescCache
.extsts
);
2386 * Serialize tx state machine
2388 int txState
= this->txState
;
2389 SERIALIZE_SCALAR(txState
);
2390 SERIALIZE_SCALAR(txEnable
);
2391 SERIALIZE_SCALAR(CTDD
);
2392 SERIALIZE_SCALAR(txFragPtr
);
2393 SERIALIZE_SCALAR(txDescCnt
);
2394 int txDmaState
= this->txDmaState
;
2395 SERIALIZE_SCALAR(txDmaState
);
2398 * Serialize rx state machine
2400 int rxState
= this->rxState
;
2401 SERIALIZE_SCALAR(rxState
);
2402 SERIALIZE_SCALAR(rxEnable
);
2403 SERIALIZE_SCALAR(CRDD
);
2404 SERIALIZE_SCALAR(rxPktBytes
);
2405 SERIALIZE_SCALAR(rxFragPtr
);
2406 SERIALIZE_SCALAR(rxDescCnt
);
2407 int rxDmaState
= this->rxDmaState
;
2408 SERIALIZE_SCALAR(rxDmaState
);
2410 SERIALIZE_SCALAR(extstsEnable
);
2413 * If there's a pending transmit, store the time so we can
2414 * reschedule it later
2416 Tick transmitTick
= txEvent
.scheduled() ? txEvent
.when() - curTick
: 0;
2417 SERIALIZE_SCALAR(transmitTick
);
2420 * receive address filter settings
2422 SERIALIZE_SCALAR(rxFilterEnable
);
2423 SERIALIZE_SCALAR(acceptBroadcast
);
2424 SERIALIZE_SCALAR(acceptMulticast
);
2425 SERIALIZE_SCALAR(acceptUnicast
);
2426 SERIALIZE_SCALAR(acceptPerfect
);
2427 SERIALIZE_SCALAR(acceptArp
);
2430 * Keep track of pending interrupt status.
2432 SERIALIZE_SCALAR(intrTick
);
2433 SERIALIZE_SCALAR(cpuPendingIntr
);
2434 Tick intrEventTick
= 0;
2436 intrEventTick
= intrEvent
->when();
2437 SERIALIZE_SCALAR(intrEventTick
);
2442 NSGigE::unserialize(Checkpoint
*cp
, const std::string
§ion
)
2444 // Unserialize the PciDev base class
2445 PciDev::unserialize(cp
, section
);
2447 UNSERIALIZE_SCALAR(regs
.command
);
2448 UNSERIALIZE_SCALAR(regs
.config
);
2449 UNSERIALIZE_SCALAR(regs
.mear
);
2450 UNSERIALIZE_SCALAR(regs
.ptscr
);
2451 UNSERIALIZE_SCALAR(regs
.isr
);
2452 UNSERIALIZE_SCALAR(regs
.imr
);
2453 UNSERIALIZE_SCALAR(regs
.ier
);
2454 UNSERIALIZE_SCALAR(regs
.ihr
);
2455 UNSERIALIZE_SCALAR(regs
.txdp
);
2456 UNSERIALIZE_SCALAR(regs
.txdp_hi
);
2457 UNSERIALIZE_SCALAR(regs
.txcfg
);
2458 UNSERIALIZE_SCALAR(regs
.gpior
);
2459 UNSERIALIZE_SCALAR(regs
.rxdp
);
2460 UNSERIALIZE_SCALAR(regs
.rxdp_hi
);
2461 UNSERIALIZE_SCALAR(regs
.rxcfg
);
2462 UNSERIALIZE_SCALAR(regs
.pqcr
);
2463 UNSERIALIZE_SCALAR(regs
.wcsr
);
2464 UNSERIALIZE_SCALAR(regs
.pcr
);
2465 UNSERIALIZE_SCALAR(regs
.rfcr
);
2466 UNSERIALIZE_SCALAR(regs
.rfdr
);
2467 UNSERIALIZE_SCALAR(regs
.srr
);
2468 UNSERIALIZE_SCALAR(regs
.mibc
);
2469 UNSERIALIZE_SCALAR(regs
.vrcr
);
2470 UNSERIALIZE_SCALAR(regs
.vtcr
);
2471 UNSERIALIZE_SCALAR(regs
.vdr
);
2472 UNSERIALIZE_SCALAR(regs
.ccsr
);
2473 UNSERIALIZE_SCALAR(regs
.tbicr
);
2474 UNSERIALIZE_SCALAR(regs
.tbisr
);
2475 UNSERIALIZE_SCALAR(regs
.tanar
);
2476 UNSERIALIZE_SCALAR(regs
.tanlpar
);
2477 UNSERIALIZE_SCALAR(regs
.taner
);
2478 UNSERIALIZE_SCALAR(regs
.tesr
);
2480 UNSERIALIZE_ARRAY(rom
.perfectMatch
, ETH_ADDR_LEN
);
2482 UNSERIALIZE_SCALAR(ioEnable
);
2485 * unserialize the data fifos
2487 rxFifo
.unserialize("rxFifo", cp
, section
);
2488 txFifo
.unserialize("txFifo", cp
, section
);
2491 * unserialize the various helper variables
2493 bool txPacketExists
;
2494 UNSERIALIZE_SCALAR(txPacketExists
);
2495 if (txPacketExists
) {
2496 txPacket
= new PacketData
;
2497 txPacket
->unserialize("txPacket", cp
, section
);
2498 uint32_t txPktBufPtr
;
2499 UNSERIALIZE_SCALAR(txPktBufPtr
);
2500 txPacketBufPtr
= (uint8_t *) txPacket
->data
+ txPktBufPtr
;
2504 bool rxPacketExists
;
2505 UNSERIALIZE_SCALAR(rxPacketExists
);
2507 if (rxPacketExists
) {
2508 rxPacket
= new PacketData
;
2509 rxPacket
->unserialize("rxPacket", cp
, section
);
2510 uint32_t rxPktBufPtr
;
2511 UNSERIALIZE_SCALAR(rxPktBufPtr
);
2512 rxPacketBufPtr
= (uint8_t *) rxPacket
->data
+ rxPktBufPtr
;
2516 UNSERIALIZE_SCALAR(txXferLen
);
2517 UNSERIALIZE_SCALAR(rxXferLen
);
2520 * Unserialize DescCaches
2522 UNSERIALIZE_SCALAR(txDescCache
.link
);
2523 UNSERIALIZE_SCALAR(txDescCache
.bufptr
);
2524 UNSERIALIZE_SCALAR(txDescCache
.cmdsts
);
2525 UNSERIALIZE_SCALAR(txDescCache
.extsts
);
2526 UNSERIALIZE_SCALAR(rxDescCache
.link
);
2527 UNSERIALIZE_SCALAR(rxDescCache
.bufptr
);
2528 UNSERIALIZE_SCALAR(rxDescCache
.cmdsts
);
2529 UNSERIALIZE_SCALAR(rxDescCache
.extsts
);
2532 * unserialize tx state machine
2535 UNSERIALIZE_SCALAR(txState
);
2536 this->txState
= (TxState
) txState
;
2537 UNSERIALIZE_SCALAR(txEnable
);
2538 UNSERIALIZE_SCALAR(CTDD
);
2539 UNSERIALIZE_SCALAR(txFragPtr
);
2540 UNSERIALIZE_SCALAR(txDescCnt
);
2542 UNSERIALIZE_SCALAR(txDmaState
);
2543 this->txDmaState
= (DmaState
) txDmaState
;
2546 * unserialize rx state machine
2549 UNSERIALIZE_SCALAR(rxState
);
2550 this->rxState
= (RxState
) rxState
;
2551 UNSERIALIZE_SCALAR(rxEnable
);
2552 UNSERIALIZE_SCALAR(CRDD
);
2553 UNSERIALIZE_SCALAR(rxPktBytes
);
2554 UNSERIALIZE_SCALAR(rxFragPtr
);
2555 UNSERIALIZE_SCALAR(rxDescCnt
);
2557 UNSERIALIZE_SCALAR(rxDmaState
);
2558 this->rxDmaState
= (DmaState
) rxDmaState
;
2560 UNSERIALIZE_SCALAR(extstsEnable
);
2563 * If there's a pending transmit, reschedule it now
2566 UNSERIALIZE_SCALAR(transmitTick
);
2568 txEvent
.schedule(curTick
+ transmitTick
);
2571 * unserialize receive address filter settings
2573 UNSERIALIZE_SCALAR(rxFilterEnable
);
2574 UNSERIALIZE_SCALAR(acceptBroadcast
);
2575 UNSERIALIZE_SCALAR(acceptMulticast
);
2576 UNSERIALIZE_SCALAR(acceptUnicast
);
2577 UNSERIALIZE_SCALAR(acceptPerfect
);
2578 UNSERIALIZE_SCALAR(acceptArp
);
2581 * Keep track of pending interrupt status.
2583 UNSERIALIZE_SCALAR(intrTick
);
2584 UNSERIALIZE_SCALAR(cpuPendingIntr
);
2586 UNSERIALIZE_SCALAR(intrEventTick
);
2587 if (intrEventTick
) {
2588 intrEvent
= new IntrEvent(this, true);
2589 intrEvent
->schedule(intrEventTick
);
2593 * re-add addrRanges to bus bridges
2596 pioInterface
->addAddrRange(RangeSize(BARAddrs
[0], BARSize
[0]));
2597 pioInterface
->addAddrRange(RangeSize(BARAddrs
[1], BARSize
[1]));
2602 NSGigE::cacheAccess(MemReqPtr
&req
)
2604 DPRINTF(EthernetPIO
, "timing access to paddr=%#x (daddr=%#x)\n",
2605 req
->paddr
, req
->paddr
- addr
);
2606 return curTick
+ pioLatency
;
2609 BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt
)
2611 SimObjectParam
<EtherInt
*> peer
;
2612 SimObjectParam
<NSGigE
*> device
;
2614 END_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt
)
2616 BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigEInt
)
2618 INIT_PARAM_DFLT(peer
, "peer interface", NULL
),
2619 INIT_PARAM(device
, "Ethernet device of this interface")
2621 END_INIT_SIM_OBJECT_PARAMS(NSGigEInt
)
2623 CREATE_SIM_OBJECT(NSGigEInt
)
2625 NSGigEInt
*dev_int
= new NSGigEInt(getInstanceName(), device
);
2627 EtherInt
*p
= (EtherInt
*)peer
;
2629 dev_int
->setPeer(p
);
2630 p
->setPeer(dev_int
);
2636 REGISTER_SIM_OBJECT("NSGigEInt", NSGigEInt
)
2639 BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigE
)
2641 Param
<Tick
> tx_delay
;
2642 Param
<Tick
> rx_delay
;
2643 Param
<Tick
> intr_delay
;
2644 SimObjectParam
<MemoryController
*> mmu
;
2645 SimObjectParam
<PhysicalMemory
*> physmem
;
2646 Param
<bool> rx_filter
;
2647 Param
<string
> hardware_address
;
2648 SimObjectParam
<Bus
*> header_bus
;
2649 SimObjectParam
<Bus
*> payload_bus
;
2650 SimObjectParam
<HierParams
*> hier
;
2651 Param
<Tick
> pio_latency
;
2652 Param
<bool> dma_desc_free
;
2653 Param
<bool> dma_data_free
;
2654 Param
<Tick
> dma_read_delay
;
2655 Param
<Tick
> dma_write_delay
;
2656 Param
<Tick
> dma_read_factor
;
2657 Param
<Tick
> dma_write_factor
;
2658 SimObjectParam
<PciConfigAll
*> configspace
;
2659 SimObjectParam
<PciConfigData
*> configdata
;
2660 SimObjectParam
<Platform
*> platform
;
2661 Param
<uint32_t> pci_bus
;
2662 Param
<uint32_t> pci_dev
;
2663 Param
<uint32_t> pci_func
;
2664 Param
<uint32_t> tx_fifo_size
;
2665 Param
<uint32_t> rx_fifo_size
;
2667 END_DECLARE_SIM_OBJECT_PARAMS(NSGigE
)
2669 BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigE
)
2671 INIT_PARAM_DFLT(tx_delay
, "Transmit Delay", 1000),
2672 INIT_PARAM_DFLT(rx_delay
, "Receive Delay", 1000),
2673 INIT_PARAM_DFLT(intr_delay
, "Interrupt Delay in microseconds", 0),
2674 INIT_PARAM(mmu
, "Memory Controller"),
2675 INIT_PARAM(physmem
, "Physical Memory"),
2676 INIT_PARAM_DFLT(rx_filter
, "Enable Receive Filter", true),
2677 INIT_PARAM_DFLT(hardware_address
, "Ethernet Hardware Address",
2678 "00:99:00:00:00:01"),
2679 INIT_PARAM_DFLT(header_bus
, "The IO Bus to attach to for headers", NULL
),
2680 INIT_PARAM_DFLT(payload_bus
, "The IO Bus to attach to for payload", NULL
),
2681 INIT_PARAM_DFLT(hier
, "Hierarchy global variables", &defaultHierParams
),
2682 INIT_PARAM_DFLT(pio_latency
, "Programmed IO latency in bus cycles", 1),
2683 INIT_PARAM_DFLT(dma_desc_free
, "DMA of Descriptors is free", false),
2684 INIT_PARAM_DFLT(dma_data_free
, "DMA of Data is free", false),
2685 INIT_PARAM_DFLT(dma_read_delay
, "fixed delay for dma reads", 0),
2686 INIT_PARAM_DFLT(dma_write_delay
, "fixed delay for dma writes", 0),
2687 INIT_PARAM_DFLT(dma_read_factor
, "multiplier for dma reads", 0),
2688 INIT_PARAM_DFLT(dma_write_factor
, "multiplier for dma writes", 0),
2689 INIT_PARAM(configspace
, "PCI Configspace"),
2690 INIT_PARAM(configdata
, "PCI Config data"),
2691 INIT_PARAM(platform
, "Platform"),
2692 INIT_PARAM(pci_bus
, "PCI bus"),
2693 INIT_PARAM(pci_dev
, "PCI device number"),
2694 INIT_PARAM(pci_func
, "PCI function code"),
2695 INIT_PARAM_DFLT(tx_fifo_size
, "max size in bytes of txFifo", 131072),
2696 INIT_PARAM_DFLT(rx_fifo_size
, "max size in bytes of rxFifo", 131072)
2698 END_INIT_SIM_OBJECT_PARAMS(NSGigE
)
2701 CREATE_SIM_OBJECT(NSGigE
)
2703 NSGigE::Params
*params
= new NSGigE::Params
;
2705 params
->name
= getInstanceName();
2707 params
->configSpace
= configspace
;
2708 params
->configData
= configdata
;
2709 params
->plat
= platform
;
2710 params
->busNum
= pci_bus
;
2711 params
->deviceNum
= pci_dev
;
2712 params
->functionNum
= pci_func
;
2714 params
->intr_delay
= intr_delay
;
2715 params
->pmem
= physmem
;
2716 params
->tx_delay
= tx_delay
;
2717 params
->rx_delay
= rx_delay
;
2718 params
->hier
= hier
;
2719 params
->header_bus
= header_bus
;
2720 params
->payload_bus
= payload_bus
;
2721 params
->pio_latency
= pio_latency
;
2722 params
->dma_desc_free
= dma_desc_free
;
2723 params
->dma_data_free
= dma_data_free
;
2724 params
->dma_read_delay
= dma_read_delay
;
2725 params
->dma_write_delay
= dma_write_delay
;
2726 params
->dma_read_factor
= dma_read_factor
;
2727 params
->dma_write_factor
= dma_write_factor
;
2728 params
->rx_filter
= rx_filter
;
2729 params
->eaddr
= hardware_address
;
2730 params
->tx_fifo_size
= tx_fifo_size
;
2731 params
->rx_fifo_size
= rx_fifo_size
;
2732 return new NSGigE(params
);
2735 REGISTER_SIM_OBJECT("NSGigE", NSGigE
)