2 * Copyright (c) 2004 The Regents of The University of Michigan
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 * Device module for modelling the National Semiconductor
31 * DP83820 ethernet controller. Does not support priority queueing
37 #include "base/inet.hh"
38 #include "cpu/exec_context.hh"
40 #include "dev/etherlink.hh"
41 #include "dev/ns_gige.hh"
42 #include "dev/pciconfigall.hh"
43 #include "mem/bus/bus.hh"
44 #include "mem/bus/dma_interface.hh"
45 #include "mem/bus/pio_interface.hh"
46 #include "mem/bus/pio_interface_impl.hh"
47 #include "mem/functional_mem/memory_control.hh"
48 #include "mem/functional_mem/physical_memory.hh"
49 #include "sim/builder.hh"
50 #include "sim/debug.hh"
51 #include "sim/host.hh"
52 #include "sim/stats.hh"
53 #include "targetarch/vtophys.hh"
55 const char *NsRxStateStrings
[] =
66 const char *NsTxStateStrings
[] =
77 const char *NsDmaState
[] =
89 ///////////////////////////////////////////////////////////////////////
93 NSGigE::NSGigE(Params
*p
)
94 : PciDev(p
), ioEnable(false),
95 txFifo(p
->tx_fifo_size
), rxFifo(p
->rx_fifo_size
),
96 txPacket(0), rxPacket(0), txPacketBufPtr(NULL
), rxPacketBufPtr(NULL
),
97 txXferLen(0), rxXferLen(0), txState(txIdle
), txEnable(false),
99 txFragPtr(0), txDescCnt(0), txDmaState(dmaIdle
), rxState(rxIdle
),
100 rxEnable(false), CRDD(false), rxPktBytes(0),
101 rxFragPtr(0), rxDescCnt(0), rxDmaState(dmaIdle
), extstsEnable(false),
102 rxDmaReadEvent(this), rxDmaWriteEvent(this),
103 txDmaReadEvent(this), txDmaWriteEvent(this),
104 dmaDescFree(p
->dma_desc_free
), dmaDataFree(p
->dma_data_free
),
105 txDelay(p
->tx_delay
), rxDelay(p
->rx_delay
),
106 rxKickTick(0), txKickTick(0),
107 txEvent(this), rxFilterEnable(p
->rx_filter
), acceptBroadcast(false),
108 acceptMulticast(false), acceptUnicast(false),
109 acceptPerfect(false), acceptArp(false),
110 physmem(p
->pmem
), intrTick(0), cpuPendingIntr(false),
111 intrEvent(0), interface(0)
114 pioInterface
= newPioInterface(name(), p
->hier
,
116 &NSGigE::cacheAccess
);
118 pioLatency
= p
->pio_latency
* p
->header_bus
->clockRatio
;
121 dmaInterface
= new DMAInterface
<Bus
>(name() + ".dma",
125 dmaInterface
= new DMAInterface
<Bus
>(name() + ".dma",
128 } else if (p
->payload_bus
) {
129 pioInterface
= newPioInterface(name(), p
->hier
,
130 p
->payload_bus
, this,
131 &NSGigE::cacheAccess
);
133 pioLatency
= p
->pio_latency
* p
->payload_bus
->clockRatio
;
135 dmaInterface
= new DMAInterface
<Bus
>(name() + ".dma",
141 intrDelay
= US2Ticks(p
->intr_delay
);
142 dmaReadDelay
= p
->dma_read_delay
;
143 dmaWriteDelay
= p
->dma_write_delay
;
144 dmaReadFactor
= p
->dma_read_factor
;
145 dmaWriteFactor
= p
->dma_write_factor
;
148 memcpy(&rom
.perfectMatch
, p
->eaddr
.bytes(), ETH_ADDR_LEN
);
158 .name(name() + ".txBytes")
159 .desc("Bytes Transmitted")
164 .name(name() + ".rxBytes")
165 .desc("Bytes Received")
170 .name(name() + ".txPackets")
171 .desc("Number of Packets Transmitted")
176 .name(name() + ".rxPackets")
177 .desc("Number of Packets Received")
182 .name(name() + ".txIpChecksums")
183 .desc("Number of tx IP Checksums done by device")
189 .name(name() + ".rxIpChecksums")
190 .desc("Number of rx IP Checksums done by device")
196 .name(name() + ".txTcpChecksums")
197 .desc("Number of tx TCP Checksums done by device")
203 .name(name() + ".rxTcpChecksums")
204 .desc("Number of rx TCP Checksums done by device")
210 .name(name() + ".txUdpChecksums")
211 .desc("Number of tx UDP Checksums done by device")
217 .name(name() + ".rxUdpChecksums")
218 .desc("Number of rx UDP Checksums done by device")
224 .name(name() + ".descDMAReads")
225 .desc("Number of descriptors the device read w/ DMA")
230 .name(name() + ".descDMAWrites")
231 .desc("Number of descriptors the device wrote w/ DMA")
236 .name(name() + ".descDmaReadBytes")
237 .desc("number of descriptor bytes read w/ DMA")
242 .name(name() + ".descDmaWriteBytes")
243 .desc("number of descriptor bytes write w/ DMA")
249 .name(name() + ".txBandwidth")
250 .desc("Transmit Bandwidth (bits/s)")
256 .name(name() + ".rxBandwidth")
257 .desc("Receive Bandwidth (bits/s)")
263 .name(name() + ".txPPS")
264 .desc("Packet Tranmission Rate (packets/s)")
270 .name(name() + ".rxPPS")
271 .desc("Packet Reception Rate (packets/s)")
277 .name(name() + ".postedSwi")
278 .desc("number of software interrupts posted to CPU")
283 .name(name() + ".totalSwi")
284 .desc("number of total Swi written to ISR")
289 .name(name() + ".coalescedSwi")
290 .desc("average number of Swi's coalesced into each post")
295 .name(name() + ".postedRxIdle")
296 .desc("number of rxIdle interrupts posted to CPU")
301 .name(name() + ".totalRxIdle")
302 .desc("number of total RxIdle written to ISR")
307 .name(name() + ".coalescedRxIdle")
308 .desc("average number of RxIdle's coalesced into each post")
313 .name(name() + ".postedRxOk")
314 .desc("number of RxOk interrupts posted to CPU")
319 .name(name() + ".totalRxOk")
320 .desc("number of total RxOk written to ISR")
325 .name(name() + ".coalescedRxOk")
326 .desc("average number of RxOk's coalesced into each post")
331 .name(name() + ".postedRxDesc")
332 .desc("number of RxDesc interrupts posted to CPU")
337 .name(name() + ".totalRxDesc")
338 .desc("number of total RxDesc written to ISR")
343 .name(name() + ".coalescedRxDesc")
344 .desc("average number of RxDesc's coalesced into each post")
349 .name(name() + ".postedTxOk")
350 .desc("number of TxOk interrupts posted to CPU")
355 .name(name() + ".totalTxOk")
356 .desc("number of total TxOk written to ISR")
361 .name(name() + ".coalescedTxOk")
362 .desc("average number of TxOk's coalesced into each post")
367 .name(name() + ".postedTxIdle")
368 .desc("number of TxIdle interrupts posted to CPU")
373 .name(name() + ".totalTxIdle")
374 .desc("number of total TxIdle written to ISR")
379 .name(name() + ".coalescedTxIdle")
380 .desc("average number of TxIdle's coalesced into each post")
385 .name(name() + ".postedTxDesc")
386 .desc("number of TxDesc interrupts posted to CPU")
391 .name(name() + ".totalTxDesc")
392 .desc("number of total TxDesc written to ISR")
397 .name(name() + ".coalescedTxDesc")
398 .desc("average number of TxDesc's coalesced into each post")
403 .name(name() + ".postedRxOrn")
404 .desc("number of RxOrn posted to CPU")
409 .name(name() + ".totalRxOrn")
410 .desc("number of total RxOrn written to ISR")
415 .name(name() + ".coalescedRxOrn")
416 .desc("average number of RxOrn's coalesced into each post")
421 .name(name() + ".coalescedTotal")
422 .desc("average number of interrupts coalesced into each post")
427 .name(name() + ".postedInterrupts")
428 .desc("number of posts to CPU")
433 .name(name() + ".droppedPackets")
434 .desc("number of packets dropped")
438 coalescedSwi
= totalSwi
/ postedInterrupts
;
439 coalescedRxIdle
= totalRxIdle
/ postedInterrupts
;
440 coalescedRxOk
= totalRxOk
/ postedInterrupts
;
441 coalescedRxDesc
= totalRxDesc
/ postedInterrupts
;
442 coalescedTxOk
= totalTxOk
/ postedInterrupts
;
443 coalescedTxIdle
= totalTxIdle
/ postedInterrupts
;
444 coalescedTxDesc
= totalTxDesc
/ postedInterrupts
;
445 coalescedRxOrn
= totalRxOrn
/ postedInterrupts
;
447 coalescedTotal
= (totalSwi
+ totalRxIdle
+ totalRxOk
+ totalRxDesc
+ totalTxOk
448 + totalTxIdle
+ totalTxDesc
+ totalRxOrn
) / postedInterrupts
;
450 txBandwidth
= txBytes
* Stats::constant(8) / simSeconds
;
451 rxBandwidth
= rxBytes
* Stats::constant(8) / simSeconds
;
452 txPacketRate
= txPackets
/ simSeconds
;
453 rxPacketRate
= rxPackets
/ simSeconds
;
457 * This is to read the PCI general configuration registers
460 NSGigE::ReadConfig(int offset
, int size
, uint8_t *data
)
462 if (offset
< PCI_DEVICE_SPECIFIC
)
463 PciDev::ReadConfig(offset
, size
, data
);
465 panic("Device specific PCI config space not implemented!\n");
469 * This is to write to the PCI general configuration registers
472 NSGigE::WriteConfig(int offset
, int size
, uint32_t data
)
474 if (offset
< PCI_DEVICE_SPECIFIC
)
475 PciDev::WriteConfig(offset
, size
, data
);
477 panic("Device specific PCI config space not implemented!\n");
479 // Need to catch writes to BARs to update the PIO interface
481 // seems to work fine without all these PCI settings, but i
482 // put in the IO to double check, an assertion will fail if we
483 // need to properly implement it
485 if (config
.data
[offset
] & PCI_CMD_IOSE
)
491 if (config
.data
[offset
] & PCI_CMD_BME
) {
498 if (config
.data
[offset
] & PCI_CMD_MSE
) {
507 case PCI0_BASE_ADDR0
:
508 if (BARAddrs
[0] != 0) {
510 pioInterface
->addAddrRange(RangeSize(BARAddrs
[0], BARSize
[0]));
512 BARAddrs
[0] &= EV5::PAddrUncachedMask
;
515 case PCI0_BASE_ADDR1
:
516 if (BARAddrs
[1] != 0) {
518 pioInterface
->addAddrRange(RangeSize(BARAddrs
[1], BARSize
[1]));
520 BARAddrs
[1] &= EV5::PAddrUncachedMask
;
527 * This reads the device registers, which are detailed in the NS83820
531 NSGigE::read(MemReqPtr
&req
, uint8_t *data
)
535 //The mask is to give you only the offset into the device register file
536 Addr daddr
= req
->paddr
& 0xfff;
537 DPRINTF(EthernetPIO
, "read da=%#x pa=%#x va=%#x size=%d\n",
538 daddr
, req
->paddr
, req
->vaddr
, req
->size
);
541 // there are some reserved registers, you can see ns_gige_reg.h and
542 // the spec sheet for details
543 if (daddr
> LAST
&& daddr
<= RESERVED
) {
544 panic("Accessing reserved register");
545 } else if (daddr
> RESERVED
&& daddr
<= 0x3FC) {
546 ReadConfig(daddr
& 0xff, req
->size
, data
);
548 } else if (daddr
>= MIB_START
&& daddr
<= MIB_END
) {
549 // don't implement all the MIB's. hopefully the kernel
550 // doesn't actually DEPEND upon their values
551 // MIB are just hardware stats keepers
552 uint32_t ®
= *(uint32_t *) data
;
555 } else if (daddr
> 0x3FC)
556 panic("Something is messed up!\n");
559 case sizeof(uint32_t):
561 uint32_t ®
= *(uint32_t *)data
;
566 //these are supposed to be cleared on a read
567 reg
&= ~(CR_RXD
| CR_TXD
| CR_TXR
| CR_RXR
);
584 devIntrClear(ISR_ALL
);
639 // see the spec sheet for how RFCR and RFDR work
640 // basically, you write to RFCR to tell the machine
641 // what you want to do next, then you act upon RFDR,
642 // and the device will be prepared b/c of what you
649 switch (regs
.rfcr
& RFCR_RFADDR
) {
651 reg
= rom
.perfectMatch
[1];
653 reg
+= rom
.perfectMatch
[0];
656 reg
= rom
.perfectMatch
[3] << 8;
657 reg
+= rom
.perfectMatch
[2];
660 reg
= rom
.perfectMatch
[5] << 8;
661 reg
+= rom
.perfectMatch
[4];
664 panic("reading RFDR for something other than PMATCH!\n");
665 // didn't implement other RFDR functionality b/c
666 // driver didn't use it
676 reg
&= ~(MIBC_MIBS
| MIBC_ACLR
);
720 panic("reading unimplemented register: addr=%#x", daddr
);
723 DPRINTF(EthernetPIO
, "read from %#x: data=%d data=%#x\n",
729 panic("accessing register with invalid size: addr=%#x, size=%d",
737 NSGigE::write(MemReqPtr
&req
, const uint8_t *data
)
741 Addr daddr
= req
->paddr
& 0xfff;
742 DPRINTF(EthernetPIO
, "write da=%#x pa=%#x va=%#x size=%d\n",
743 daddr
, req
->paddr
, req
->vaddr
, req
->size
);
745 if (daddr
> LAST
&& daddr
<= RESERVED
) {
746 panic("Accessing reserved register");
747 } else if (daddr
> RESERVED
&& daddr
<= 0x3FC) {
748 WriteConfig(daddr
& 0xff, req
->size
, *(uint32_t *)data
);
750 } else if (daddr
> 0x3FC)
751 panic("Something is messed up!\n");
753 if (req
->size
== sizeof(uint32_t)) {
754 uint32_t reg
= *(uint32_t *)data
;
755 DPRINTF(EthernetPIO
, "write data=%d data=%#x\n", reg
, reg
);
762 } else if (reg
& CR_TXE
) {
765 // the kernel is enabling the transmit machine
766 if (txState
== txIdle
)
772 } else if (reg
& CR_RXE
) {
775 if (rxState
== rxIdle
)
786 devIntrPost(ISR_SWI
);
797 if (reg
& CFG_LNKSTS
||
800 reg
& CFG_RESERVED
||
803 panic("writing to read-only or reserved CFG bits!\n");
805 regs
.config
|= reg
& ~(CFG_LNKSTS
| CFG_SPDSTS
| CFG_DUPSTS
|
806 CFG_RESERVED
| CFG_T64ADDR
| CFG_PCI64_DET
);
808 // all these #if 0's are because i don't THINK the kernel needs to
809 // have these implemented. if there is a problem relating to one of
810 // these, you may need to add functionality in.
812 if (reg
& CFG_TBI_EN
) ;
813 if (reg
& CFG_MODE_1000
) ;
816 if (reg
& CFG_AUTO_1000
)
817 panic("CFG_AUTO_1000 not implemented!\n");
820 if (reg
& CFG_PINT_DUPSTS
||
821 reg
& CFG_PINT_LNKSTS
||
822 reg
& CFG_PINT_SPDSTS
)
825 if (reg
& CFG_TMRTEST
) ;
826 if (reg
& CFG_MRM_DIS
) ;
827 if (reg
& CFG_MWI_DIS
) ;
829 if (reg
& CFG_T64ADDR
)
830 panic("CFG_T64ADDR is read only register!\n");
832 if (reg
& CFG_PCI64_DET
)
833 panic("CFG_PCI64_DET is read only register!\n");
835 if (reg
& CFG_DATA64_EN
) ;
836 if (reg
& CFG_M64ADDR
) ;
837 if (reg
& CFG_PHY_RST
) ;
838 if (reg
& CFG_PHY_DIS
) ;
841 if (reg
& CFG_EXTSTS_EN
)
844 extstsEnable
= false;
847 if (reg
& CFG_REQALG
) ;
851 if (reg
& CFG_PESEL
) ;
852 if (reg
& CFG_BROM_DIS
) ;
853 if (reg
& CFG_EXT_125
) ;
860 // since phy is completely faked, MEAR_MD* don't matter
861 // and since the driver never uses MEAR_EE*, they don't
864 if (reg
& MEAR_EEDI
) ;
865 if (reg
& MEAR_EEDO
) ; // this one is read only
866 if (reg
& MEAR_EECLK
) ;
867 if (reg
& MEAR_EESEL
) ;
868 if (reg
& MEAR_MDIO
) ;
869 if (reg
& MEAR_MDDIR
) ;
870 if (reg
& MEAR_MDC
) ;
875 regs
.ptscr
= reg
& ~(PTSCR_RBIST_RDONLY
);
876 // these control BISTs for various parts of chip - we
877 // don't care or do just fake that the BIST is done
878 if (reg
& PTSCR_RBIST_EN
)
879 regs
.ptscr
|= PTSCR_RBIST_DONE
;
880 if (reg
& PTSCR_EEBIST_EN
)
881 regs
.ptscr
&= ~PTSCR_EEBIST_EN
;
882 if (reg
& PTSCR_EELOAD_EN
)
883 regs
.ptscr
&= ~PTSCR_EELOAD_EN
;
886 case ISR
: /* writing to the ISR has no effect */
887 panic("ISR is a read only register!\n");
900 /* not going to implement real interrupt holdoff */
904 regs
.txdp
= (reg
& 0xFFFFFFFC);
905 assert(txState
== txIdle
);
916 if (reg
& TXCFG_CSI
) ;
917 if (reg
& TXCFG_HBI
) ;
918 if (reg
& TXCFG_MLB
) ;
919 if (reg
& TXCFG_ATP
) ;
920 if (reg
& TXCFG_ECRETRY
) {
922 * this could easily be implemented, but considering
923 * the network is just a fake pipe, wouldn't make
928 if (reg
& TXCFG_BRST_DIS
) ;
932 /* we handle our own DMA, ignore the kernel's exhortations */
933 if (reg
& TXCFG_MXDMA
) ;
936 // also, we currently don't care about fill/drain
937 // thresholds though this may change in the future with
938 // more realistic networks or a driver which changes it
939 // according to feedback
945 /* these just control general purpose i/o pins, don't matter */
960 if (reg
& RXCFG_AEP
) ;
961 if (reg
& RXCFG_ARP
) ;
962 if (reg
& RXCFG_STRIPCRC
) ;
963 if (reg
& RXCFG_RX_RD
) ;
964 if (reg
& RXCFG_ALP
) ;
965 if (reg
& RXCFG_AIRL
) ;
967 /* we handle our own DMA, ignore what kernel says about it */
968 if (reg
& RXCFG_MXDMA
) ;
970 //also, we currently don't care about fill/drain thresholds
971 //though this may change in the future with more realistic
972 //networks or a driver which changes it according to feedback
973 if (reg
& (RXCFG_DRTH
| RXCFG_DRTH0
)) ;
978 /* there is no priority queueing used in the linux 2.6 driver */
983 /* not going to implement wake on LAN */
988 /* not going to implement pause control */
995 rxFilterEnable
= (reg
& RFCR_RFEN
) ? true : false;
996 acceptBroadcast
= (reg
& RFCR_AAB
) ? true : false;
997 acceptMulticast
= (reg
& RFCR_AAM
) ? true : false;
998 acceptUnicast
= (reg
& RFCR_AAU
) ? true : false;
999 acceptPerfect
= (reg
& RFCR_APM
) ? true : false;
1000 acceptArp
= (reg
& RFCR_AARP
) ? true : false;
1003 if (reg
& RFCR_APAT
)
1004 panic("RFCR_APAT not implemented!\n");
1007 if (reg
& RFCR_MHEN
|| reg
& RFCR_UHEN
)
1008 panic("hash filtering not implemented!\n");
1011 panic("RFCR_ULM not implemented!\n");
1016 panic("the driver never writes to RFDR, something is wrong!\n");
1019 panic("the driver never uses BRAR, something is wrong!\n");
1022 panic("the driver never uses BRDR, something is wrong!\n");
1025 panic("SRR is read only register!\n");
1028 panic("the driver never uses MIBC, something is wrong!\n");
1039 panic("the driver never uses VDR, something is wrong!\n");
1043 /* not going to implement clockrun stuff */
1049 if (reg
& TBICR_MR_LOOPBACK
)
1050 panic("TBICR_MR_LOOPBACK never used, something wrong!\n");
1052 if (reg
& TBICR_MR_AN_ENABLE
) {
1053 regs
.tanlpar
= regs
.tanar
;
1054 regs
.tbisr
|= (TBISR_MR_AN_COMPLETE
| TBISR_MR_LINK_STATUS
);
1058 if (reg
& TBICR_MR_RESTART_AN
) ;
1064 panic("TBISR is read only register!\n");
1068 if (reg
& TANAR_PS2
)
1069 panic("this isn't used in driver, something wrong!\n");
1071 if (reg
& TANAR_PS1
)
1072 panic("this isn't used in driver, something wrong!\n");
1076 panic("this should only be written to by the fake phy!\n");
1079 panic("TANER is read only register!\n");
1086 panic("invalid register access daddr=%#x", daddr
);
1089 panic("Invalid Request Size");
1096 NSGigE::devIntrPost(uint32_t interrupts
)
1098 if (interrupts
& ISR_RESERVE
)
1099 panic("Cannot set a reserved interrupt");
1101 if (interrupts
& ISR_NOIMPL
)
1102 warn("interrupt not implemented %#x\n", interrupts
);
1104 interrupts
&= ~ISR_NOIMPL
;
1105 regs
.isr
|= interrupts
;
1107 if (interrupts
& regs
.imr
) {
1108 if (interrupts
& ISR_SWI
) {
1111 if (interrupts
& ISR_RXIDLE
) {
1114 if (interrupts
& ISR_RXOK
) {
1117 if (interrupts
& ISR_RXDESC
) {
1120 if (interrupts
& ISR_TXOK
) {
1123 if (interrupts
& ISR_TXIDLE
) {
1126 if (interrupts
& ISR_TXDESC
) {
1129 if (interrupts
& ISR_RXORN
) {
1134 DPRINTF(EthernetIntr
,
1135 "interrupt written to ISR: intr=%#x isr=%#x imr=%#x\n",
1136 interrupts
, regs
.isr
, regs
.imr
);
1138 if ((regs
.isr
& regs
.imr
)) {
1139 Tick when
= curTick
;
1140 if (!(regs
.isr
& regs
.imr
& ISR_NODELAY
))
1146 /* writing this interrupt counting stats inside this means that this function
1147 is now limited to being used to clear all interrupts upon the kernel
1148 reading isr and servicing. just telling you in case you were thinking
1152 NSGigE::devIntrClear(uint32_t interrupts
)
1154 if (interrupts
& ISR_RESERVE
)
1155 panic("Cannot clear a reserved interrupt");
1157 if (regs
.isr
& regs
.imr
& ISR_SWI
) {
1160 if (regs
.isr
& regs
.imr
& ISR_RXIDLE
) {
1163 if (regs
.isr
& regs
.imr
& ISR_RXOK
) {
1166 if (regs
.isr
& regs
.imr
& ISR_RXDESC
) {
1169 if (regs
.isr
& regs
.imr
& ISR_TXOK
) {
1172 if (regs
.isr
& regs
.imr
& ISR_TXIDLE
) {
1175 if (regs
.isr
& regs
.imr
& ISR_TXDESC
) {
1178 if (regs
.isr
& regs
.imr
& ISR_RXORN
) {
1182 if (regs
.isr
& regs
.imr
& (ISR_SWI
| ISR_RXIDLE
| ISR_RXOK
| ISR_RXDESC
|
1183 ISR_TXOK
| ISR_TXIDLE
| ISR_TXDESC
| ISR_RXORN
) )
1186 interrupts
&= ~ISR_NOIMPL
;
1187 regs
.isr
&= ~interrupts
;
1189 DPRINTF(EthernetIntr
,
1190 "interrupt cleared from ISR: intr=%x isr=%x imr=%x\n",
1191 interrupts
, regs
.isr
, regs
.imr
);
1193 if (!(regs
.isr
& regs
.imr
))
1198 NSGigE::devIntrChangeMask()
1200 DPRINTF(EthernetIntr
, "interrupt mask changed: isr=%x imr=%x masked=%x\n",
1201 regs
.isr
, regs
.imr
, regs
.isr
& regs
.imr
);
1203 if (regs
.isr
& regs
.imr
)
1204 cpuIntrPost(curTick
);
1210 NSGigE::cpuIntrPost(Tick when
)
1212 // If the interrupt you want to post is later than an interrupt
1213 // already scheduled, just let it post in the coming one and don't
1214 // schedule another.
1215 // HOWEVER, must be sure that the scheduled intrTick is in the
1216 // future (this was formerly the source of a bug)
1218 * @todo this warning should be removed and the intrTick code should
1221 assert(when
>= curTick
);
1222 assert(intrTick
>= curTick
|| intrTick
== 0);
1223 if (when
> intrTick
&& intrTick
!= 0) {
1224 DPRINTF(EthernetIntr
, "don't need to schedule event...intrTick=%d\n",
1230 if (intrTick
< curTick
) {
1235 DPRINTF(EthernetIntr
, "going to schedule an interrupt for intrTick=%d\n",
1239 intrEvent
->squash();
1240 intrEvent
= new IntrEvent(this, true);
1241 intrEvent
->schedule(intrTick
);
1245 NSGigE::cpuInterrupt()
1247 assert(intrTick
== curTick
);
1249 // Whether or not there's a pending interrupt, we don't care about
1254 // Don't send an interrupt if there's already one
1255 if (cpuPendingIntr
) {
1256 DPRINTF(EthernetIntr
,
1257 "would send an interrupt now, but there's already pending\n");
1260 cpuPendingIntr
= true;
1262 DPRINTF(EthernetIntr
, "posting interrupt\n");
1268 NSGigE::cpuIntrClear()
1270 if (!cpuPendingIntr
)
1274 intrEvent
->squash();
1280 cpuPendingIntr
= false;
1282 DPRINTF(EthernetIntr
, "clearing interrupt\n");
1287 NSGigE::cpuIntrPending() const
1288 { return cpuPendingIntr
; }
1294 DPRINTF(Ethernet
, "transmit reset\n");
1299 assert(txDescCnt
== 0);
1302 assert(txDmaState
== dmaIdle
);
1308 DPRINTF(Ethernet
, "receive reset\n");
1311 assert(rxPktBytes
== 0);
1314 assert(rxDescCnt
== 0);
1315 assert(rxDmaState
== dmaIdle
);
1323 memset(®s
, 0, sizeof(regs
));
1324 regs
.config
= CFG_LNKSTS
;
1325 regs
.mear
= MEAR_MDDIR
| MEAR_EEDO
;
1326 regs
.txcfg
= 0x120; // set drain threshold to 1024 bytes and
1327 // fill threshold to 32 bytes
1328 regs
.rxcfg
= 0x4; // set drain threshold to 16 bytes
1329 regs
.srr
= 0x0103; // set the silicon revision to rev B or 0x103
1330 regs
.mibc
= MIBC_FRZ
;
1331 regs
.vdr
= 0x81; // set the vlan tag type to 802.1q
1332 regs
.tesr
= 0xc000; // TBI capable of both full and half duplex
1334 extstsEnable
= false;
1335 acceptBroadcast
= false;
1336 acceptMulticast
= false;
1337 acceptUnicast
= false;
1338 acceptPerfect
= false;
1343 NSGigE::rxDmaReadCopy()
1345 assert(rxDmaState
== dmaReading
);
1347 physmem
->dma_read((uint8_t *)rxDmaData
, rxDmaAddr
, rxDmaLen
);
1348 rxDmaState
= dmaIdle
;
1350 DPRINTF(EthernetDMA
, "rx dma read paddr=%#x len=%d\n",
1351 rxDmaAddr
, rxDmaLen
);
1352 DDUMP(EthernetDMA
, rxDmaData
, rxDmaLen
);
1356 NSGigE::doRxDmaRead()
1358 assert(rxDmaState
== dmaIdle
|| rxDmaState
== dmaReadWaiting
);
1359 rxDmaState
= dmaReading
;
1361 if (dmaInterface
&& !rxDmaFree
) {
1362 if (dmaInterface
->busy())
1363 rxDmaState
= dmaReadWaiting
;
1365 dmaInterface
->doDMA(Read
, rxDmaAddr
, rxDmaLen
, curTick
,
1366 &rxDmaReadEvent
, true);
1370 if (dmaReadDelay
== 0 && dmaReadFactor
== 0) {
1375 Tick factor
= ((rxDmaLen
+ ULL(63)) >> ULL(6)) * dmaReadFactor
;
1376 Tick start
= curTick
+ dmaReadDelay
+ factor
;
1377 rxDmaReadEvent
.schedule(start
);
1382 NSGigE::rxDmaReadDone()
1384 assert(rxDmaState
== dmaReading
);
1387 // If the transmit state machine has a pending DMA, let it go first
1388 if (txDmaState
== dmaReadWaiting
|| txDmaState
== dmaWriteWaiting
)
1395 NSGigE::rxDmaWriteCopy()
1397 assert(rxDmaState
== dmaWriting
);
1399 physmem
->dma_write(rxDmaAddr
, (uint8_t *)rxDmaData
, rxDmaLen
);
1400 rxDmaState
= dmaIdle
;
1402 DPRINTF(EthernetDMA
, "rx dma write paddr=%#x len=%d\n",
1403 rxDmaAddr
, rxDmaLen
);
1404 DDUMP(EthernetDMA
, rxDmaData
, rxDmaLen
);
1408 NSGigE::doRxDmaWrite()
1410 assert(rxDmaState
== dmaIdle
|| rxDmaState
== dmaWriteWaiting
);
1411 rxDmaState
= dmaWriting
;
1413 if (dmaInterface
&& !rxDmaFree
) {
1414 if (dmaInterface
->busy())
1415 rxDmaState
= dmaWriteWaiting
;
1417 dmaInterface
->doDMA(WriteInvalidate
, rxDmaAddr
, rxDmaLen
, curTick
,
1418 &rxDmaWriteEvent
, true);
1422 if (dmaWriteDelay
== 0 && dmaWriteFactor
== 0) {
1427 Tick factor
= ((rxDmaLen
+ ULL(63)) >> ULL(6)) * dmaWriteFactor
;
1428 Tick start
= curTick
+ dmaWriteDelay
+ factor
;
1429 rxDmaWriteEvent
.schedule(start
);
1434 NSGigE::rxDmaWriteDone()
1436 assert(rxDmaState
== dmaWriting
);
1439 // If the transmit state machine has a pending DMA, let it go first
1440 if (txDmaState
== dmaReadWaiting
|| txDmaState
== dmaWriteWaiting
)
1449 DPRINTF(EthernetSM
, "receive kick rxState=%s (rxBuf.size=%d)\n",
1450 NsRxStateStrings
[rxState
], rxFifo
.size());
1452 if (rxKickTick
> curTick
) {
1453 DPRINTF(EthernetSM
, "receive kick exiting, can't run till %d\n",
1459 switch(rxDmaState
) {
1460 case dmaReadWaiting
:
1464 case dmaWriteWaiting
:
1472 // see state machine from spec for details
1473 // the way this works is, if you finish work on one state and can
1474 // go directly to another, you do that through jumping to the
1475 // label "next". however, if you have intermediate work, like DMA
1476 // so that you can't go to the next state yet, you go to exit and
1477 // exit the loop. however, when the DMA is done it will trigger
1478 // an event and come back to this loop.
1482 DPRINTF(EthernetSM
, "Receive Disabled! Nothing to do.\n");
1487 rxState
= rxDescRefr
;
1489 rxDmaAddr
= regs
.rxdp
& 0x3fffffff;
1490 rxDmaData
= &rxDescCache
+ offsetof(ns_desc
, link
);
1491 rxDmaLen
= sizeof(rxDescCache
.link
);
1492 rxDmaFree
= dmaDescFree
;
1495 descDmaRdBytes
+= rxDmaLen
;
1500 rxState
= rxDescRead
;
1502 rxDmaAddr
= regs
.rxdp
& 0x3fffffff;
1503 rxDmaData
= &rxDescCache
;
1504 rxDmaLen
= sizeof(ns_desc
);
1505 rxDmaFree
= dmaDescFree
;
1508 descDmaRdBytes
+= rxDmaLen
;
1516 if (rxDmaState
!= dmaIdle
)
1519 rxState
= rxAdvance
;
1523 if (rxDmaState
!= dmaIdle
)
1526 DPRINTF(EthernetDesc
,
1527 "rxDescCache: addr=%08x read descriptor\n",
1528 regs
.rxdp
& 0x3fffffff);
1529 DPRINTF(EthernetDesc
,
1530 "rxDescCache: link=%08x bufptr=%08x cmdsts=%08x extsts=%08x\n",
1531 rxDescCache
.link
, rxDescCache
.bufptr
, rxDescCache
.cmdsts
,
1532 rxDescCache
.extsts
);
1534 if (rxDescCache
.cmdsts
& CMDSTS_OWN
) {
1535 devIntrPost(ISR_RXIDLE
);
1539 rxState
= rxFifoBlock
;
1540 rxFragPtr
= rxDescCache
.bufptr
;
1541 rxDescCnt
= rxDescCache
.cmdsts
& CMDSTS_LEN_MASK
;
1548 * @todo in reality, we should be able to start processing
1549 * the packet as it arrives, and not have to wait for the
1550 * full packet ot be in the receive fifo.
1555 DPRINTF(EthernetSM
, "****processing receive of new packet****\n");
1557 // If we don't have a packet, grab a new one from the fifo.
1558 rxPacket
= rxFifo
.front();
1559 rxPktBytes
= rxPacket
->length
;
1560 rxPacketBufPtr
= rxPacket
->data
;
1563 if (DTRACE(Ethernet
)) {
1566 DPRINTF(Ethernet
, "ID is %d\n", ip
->id());
1569 DPRINTF(Ethernet
, "Src Port=%d, Dest Port=%d\n",
1570 tcp
->sport(), tcp
->dport());
1576 // sanity check - i think the driver behaves like this
1577 assert(rxDescCnt
>= rxPktBytes
);
1582 // dont' need the && rxDescCnt > 0 if driver sanity check
1584 if (rxPktBytes
> 0) {
1585 rxState
= rxFragWrite
;
1586 // don't need min<>(rxPktBytes,rxDescCnt) if above sanity
1588 rxXferLen
= rxPktBytes
;
1590 rxDmaAddr
= rxFragPtr
& 0x3fffffff;
1591 rxDmaData
= rxPacketBufPtr
;
1592 rxDmaLen
= rxXferLen
;
1593 rxDmaFree
= dmaDataFree
;
1599 rxState
= rxDescWrite
;
1601 //if (rxPktBytes == 0) { /* packet is done */
1602 assert(rxPktBytes
== 0);
1603 DPRINTF(EthernetSM
, "done with receiving packet\n");
1605 rxDescCache
.cmdsts
|= CMDSTS_OWN
;
1606 rxDescCache
.cmdsts
&= ~CMDSTS_MORE
;
1607 rxDescCache
.cmdsts
|= CMDSTS_OK
;
1608 rxDescCache
.cmdsts
&= 0xffff0000;
1609 rxDescCache
.cmdsts
+= rxPacket
->length
; //i.e. set CMDSTS_SIZE
1613 * all the driver uses these are for its own stats keeping
1614 * which we don't care about, aren't necessary for
1615 * functionality and doing this would just slow us down.
1616 * if they end up using this in a later version for
1617 * functional purposes, just undef
1619 if (rxFilterEnable
) {
1620 rxDescCache
.cmdsts
&= ~CMDSTS_DEST_MASK
;
1621 const EthAddr
&dst
= rxFifoFront()->dst();
1623 rxDescCache
.cmdsts
|= CMDSTS_DEST_SELF
;
1624 if (dst
->multicast())
1625 rxDescCache
.cmdsts
|= CMDSTS_DEST_MULTI
;
1626 if (dst
->broadcast())
1627 rxDescCache
.cmdsts
|= CMDSTS_DEST_MASK
;
1632 if (extstsEnable
&& ip
) {
1633 rxDescCache
.extsts
|= EXTSTS_IPPKT
;
1635 if (cksum(ip
) != 0) {
1636 DPRINTF(EthernetCksum
, "Rx IP Checksum Error\n");
1637 rxDescCache
.extsts
|= EXTSTS_IPERR
;
1642 rxDescCache
.extsts
|= EXTSTS_TCPPKT
;
1644 if (cksum(tcp
) != 0) {
1645 DPRINTF(EthernetCksum
, "Rx TCP Checksum Error\n");
1646 rxDescCache
.extsts
|= EXTSTS_TCPERR
;
1650 rxDescCache
.extsts
|= EXTSTS_UDPPKT
;
1652 if (cksum(udp
) != 0) {
1653 DPRINTF(EthernetCksum
, "Rx UDP Checksum Error\n");
1654 rxDescCache
.extsts
|= EXTSTS_UDPERR
;
1661 * the driver seems to always receive into desc buffers
1662 * of size 1514, so you never have a pkt that is split
1663 * into multiple descriptors on the receive side, so
1664 * i don't implement that case, hence the assert above.
1667 DPRINTF(EthernetDesc
,
1668 "rxDescCache: addr=%08x writeback cmdsts extsts\n",
1669 regs
.rxdp
& 0x3fffffff);
1670 DPRINTF(EthernetDesc
,
1671 "rxDescCache: link=%08x bufptr=%08x cmdsts=%08x extsts=%08x\n",
1672 rxDescCache
.link
, rxDescCache
.bufptr
, rxDescCache
.cmdsts
,
1673 rxDescCache
.extsts
);
1675 rxDmaAddr
= (regs
.rxdp
+ offsetof(ns_desc
, cmdsts
)) & 0x3fffffff;
1676 rxDmaData
= &(rxDescCache
.cmdsts
);
1677 rxDmaLen
= sizeof(rxDescCache
.cmdsts
) + sizeof(rxDescCache
.extsts
);
1678 rxDmaFree
= dmaDescFree
;
1681 descDmaWrBytes
+= rxDmaLen
;
1689 if (rxDmaState
!= dmaIdle
)
1692 rxPacketBufPtr
+= rxXferLen
;
1693 rxFragPtr
+= rxXferLen
;
1694 rxPktBytes
-= rxXferLen
;
1696 rxState
= rxFifoBlock
;
1700 if (rxDmaState
!= dmaIdle
)
1703 assert(rxDescCache
.cmdsts
& CMDSTS_OWN
);
1705 assert(rxPacket
== 0);
1706 devIntrPost(ISR_RXOK
);
1708 if (rxDescCache
.cmdsts
& CMDSTS_INTR
)
1709 devIntrPost(ISR_RXDESC
);
1712 DPRINTF(EthernetSM
, "Halting the RX state machine\n");
1716 rxState
= rxAdvance
;
1720 if (rxDescCache
.link
== 0) {
1721 devIntrPost(ISR_RXIDLE
);
1726 rxState
= rxDescRead
;
1727 regs
.rxdp
= rxDescCache
.link
;
1730 rxDmaAddr
= regs
.rxdp
& 0x3fffffff;
1731 rxDmaData
= &rxDescCache
;
1732 rxDmaLen
= sizeof(ns_desc
);
1733 rxDmaFree
= dmaDescFree
;
1741 panic("Invalid rxState!");
1744 DPRINTF(EthernetSM
, "entering next rxState=%s\n",
1745 NsRxStateStrings
[rxState
]);
1751 * @todo do we want to schedule a future kick?
1753 DPRINTF(EthernetSM
, "rx state machine exited rxState=%s\n",
1754 NsRxStateStrings
[rxState
]);
1760 if (txFifo
.empty()) {
1761 DPRINTF(Ethernet
, "nothing to transmit\n");
1765 DPRINTF(Ethernet
, "Attempt Pkt Transmit: txFifo length=%d\n",
1767 if (interface
->sendPacket(txFifo
.front())) {
1769 if (DTRACE(Ethernet
)) {
1770 IpPtr
ip(txFifo
.front());
1772 DPRINTF(Ethernet
, "ID is %d\n", ip
->id());
1775 DPRINTF(Ethernet
, "Src Port=%d, Dest Port=%d\n",
1776 tcp
->sport(), tcp
->dport());
1782 DDUMP(Ethernet
, txFifo
.front()->data
, txFifo
.front()->length
);
1783 txBytes
+= txFifo
.front()->length
;
1786 DPRINTF(Ethernet
, "Successful Xmit! now txFifoAvail is %d\n",
1791 * normally do a writeback of the descriptor here, and ONLY
1792 * after that is done, send this interrupt. but since our
1793 * stuff never actually fails, just do this interrupt here,
1794 * otherwise the code has to stray from this nice format.
1795 * besides, it's functionally the same.
1797 devIntrPost(ISR_TXOK
);
1800 if (!txFifo
.empty() && !txEvent
.scheduled()) {
1801 DPRINTF(Ethernet
, "reschedule transmit\n");
1802 txEvent
.schedule(curTick
+ 1000);
1807 NSGigE::txDmaReadCopy()
1809 assert(txDmaState
== dmaReading
);
1811 physmem
->dma_read((uint8_t *)txDmaData
, txDmaAddr
, txDmaLen
);
1812 txDmaState
= dmaIdle
;
1814 DPRINTF(EthernetDMA
, "tx dma read paddr=%#x len=%d\n",
1815 txDmaAddr
, txDmaLen
);
1816 DDUMP(EthernetDMA
, txDmaData
, txDmaLen
);
1820 NSGigE::doTxDmaRead()
1822 assert(txDmaState
== dmaIdle
|| txDmaState
== dmaReadWaiting
);
1823 txDmaState
= dmaReading
;
1825 if (dmaInterface
&& !txDmaFree
) {
1826 if (dmaInterface
->busy())
1827 txDmaState
= dmaReadWaiting
;
1829 dmaInterface
->doDMA(Read
, txDmaAddr
, txDmaLen
, curTick
,
1830 &txDmaReadEvent
, true);
1834 if (dmaReadDelay
== 0 && dmaReadFactor
== 0.0) {
1839 Tick factor
= ((txDmaLen
+ ULL(63)) >> ULL(6)) * dmaReadFactor
;
1840 Tick start
= curTick
+ dmaReadDelay
+ factor
;
1841 txDmaReadEvent
.schedule(start
);
1846 NSGigE::txDmaReadDone()
1848 assert(txDmaState
== dmaReading
);
1851 // If the receive state machine has a pending DMA, let it go first
1852 if (rxDmaState
== dmaReadWaiting
|| rxDmaState
== dmaWriteWaiting
)
1859 NSGigE::txDmaWriteCopy()
1861 assert(txDmaState
== dmaWriting
);
1863 physmem
->dma_write(txDmaAddr
, (uint8_t *)txDmaData
, txDmaLen
);
1864 txDmaState
= dmaIdle
;
1866 DPRINTF(EthernetDMA
, "tx dma write paddr=%#x len=%d\n",
1867 txDmaAddr
, txDmaLen
);
1868 DDUMP(EthernetDMA
, txDmaData
, txDmaLen
);
1872 NSGigE::doTxDmaWrite()
1874 assert(txDmaState
== dmaIdle
|| txDmaState
== dmaWriteWaiting
);
1875 txDmaState
= dmaWriting
;
1877 if (dmaInterface
&& !txDmaFree
) {
1878 if (dmaInterface
->busy())
1879 txDmaState
= dmaWriteWaiting
;
1881 dmaInterface
->doDMA(WriteInvalidate
, txDmaAddr
, txDmaLen
, curTick
,
1882 &txDmaWriteEvent
, true);
1886 if (dmaWriteDelay
== 0 && dmaWriteFactor
== 0.0) {
1891 Tick factor
= ((txDmaLen
+ ULL(63)) >> ULL(6)) * dmaWriteFactor
;
1892 Tick start
= curTick
+ dmaWriteDelay
+ factor
;
1893 txDmaWriteEvent
.schedule(start
);
1898 NSGigE::txDmaWriteDone()
1900 assert(txDmaState
== dmaWriting
);
1903 // If the receive state machine has a pending DMA, let it go first
1904 if (rxDmaState
== dmaReadWaiting
|| rxDmaState
== dmaWriteWaiting
)
1913 DPRINTF(EthernetSM
, "transmit kick txState=%s\n",
1914 NsTxStateStrings
[txState
]);
1916 if (txKickTick
> curTick
) {
1917 DPRINTF(EthernetSM
, "transmit kick exiting, can't run till %d\n",
1924 switch(txDmaState
) {
1925 case dmaReadWaiting
:
1929 case dmaWriteWaiting
:
1940 DPRINTF(EthernetSM
, "Transmit disabled. Nothing to do.\n");
1945 txState
= txDescRefr
;
1947 txDmaAddr
= regs
.txdp
& 0x3fffffff;
1948 txDmaData
= &txDescCache
+ offsetof(ns_desc
, link
);
1949 txDmaLen
= sizeof(txDescCache
.link
);
1950 txDmaFree
= dmaDescFree
;
1953 descDmaRdBytes
+= txDmaLen
;
1959 txState
= txDescRead
;
1961 txDmaAddr
= regs
.txdp
& 0x3fffffff;
1962 txDmaData
= &txDescCache
;
1963 txDmaLen
= sizeof(ns_desc
);
1964 txDmaFree
= dmaDescFree
;
1967 descDmaRdBytes
+= txDmaLen
;
1975 if (txDmaState
!= dmaIdle
)
1978 txState
= txAdvance
;
1982 if (txDmaState
!= dmaIdle
)
1985 DPRINTF(EthernetDesc
,
1986 "txDescCache: link=%08x bufptr=%08x cmdsts=%08x extsts=%08x\n",
1987 txDescCache
.link
, txDescCache
.bufptr
, txDescCache
.cmdsts
,
1988 txDescCache
.extsts
);
1990 if (txDescCache
.cmdsts
& CMDSTS_OWN
) {
1991 txState
= txFifoBlock
;
1992 txFragPtr
= txDescCache
.bufptr
;
1993 txDescCnt
= txDescCache
.cmdsts
& CMDSTS_LEN_MASK
;
1995 devIntrPost(ISR_TXIDLE
);
2003 DPRINTF(EthernetSM
, "****starting the tx of a new packet****\n");
2004 txPacket
= new PacketData(16384);
2005 txPacketBufPtr
= txPacket
->data
;
2008 if (txDescCnt
== 0) {
2009 DPRINTF(EthernetSM
, "the txDescCnt == 0, done with descriptor\n");
2010 if (txDescCache
.cmdsts
& CMDSTS_MORE
) {
2011 DPRINTF(EthernetSM
, "there are more descriptors to come\n");
2012 txState
= txDescWrite
;
2014 txDescCache
.cmdsts
&= ~CMDSTS_OWN
;
2016 txDmaAddr
= regs
.txdp
+ offsetof(ns_desc
, cmdsts
);
2017 txDmaAddr
&= 0x3fffffff;
2018 txDmaData
= &(txDescCache
.cmdsts
);
2019 txDmaLen
= sizeof(txDescCache
.cmdsts
);
2020 txDmaFree
= dmaDescFree
;
2025 } else { /* this packet is totally done */
2026 DPRINTF(EthernetSM
, "This packet is done, let's wrap it up\n");
2027 /* deal with the the packet that just finished */
2028 if ((regs
.vtcr
& VTCR_PPCHK
) && extstsEnable
) {
2030 if (txDescCache
.extsts
& EXTSTS_UDPPKT
) {
2033 udp
->sum(cksum(udp
));
2035 } else if (txDescCache
.extsts
& EXTSTS_TCPPKT
) {
2038 tcp
->sum(cksum(tcp
));
2041 if (txDescCache
.extsts
& EXTSTS_IPPKT
) {
2048 txPacket
->length
= txPacketBufPtr
- txPacket
->data
;
2049 // this is just because the receive can't handle a
2050 // packet bigger want to make sure
2051 assert(txPacket
->length
<= 1514);
2055 txFifo
.push(txPacket
);
2059 * this following section is not tqo spec, but
2060 * functionally shouldn't be any different. normally,
2061 * the chip will wait til the transmit has occurred
2062 * before writing back the descriptor because it has
2063 * to wait to see that it was successfully transmitted
2064 * to decide whether to set CMDSTS_OK or not.
2065 * however, in the simulator since it is always
2066 * successfully transmitted, and writing it exactly to
2067 * spec would complicate the code, we just do it here
2070 txDescCache
.cmdsts
&= ~CMDSTS_OWN
;
2071 txDescCache
.cmdsts
|= CMDSTS_OK
;
2073 DPRINTF(EthernetDesc
,
2074 "txDesc writeback: cmdsts=%08x extsts=%08x\n",
2075 txDescCache
.cmdsts
, txDescCache
.extsts
);
2077 txDmaAddr
= regs
.txdp
+ offsetof(ns_desc
, cmdsts
);
2078 txDmaAddr
&= 0x3fffffff;
2079 txDmaData
= &(txDescCache
.cmdsts
);
2080 txDmaLen
= sizeof(txDescCache
.cmdsts
) +
2081 sizeof(txDescCache
.extsts
);
2082 txDmaFree
= dmaDescFree
;
2085 descDmaWrBytes
+= txDmaLen
;
2091 DPRINTF(EthernetSM
, "halting TX state machine\n");
2095 txState
= txAdvance
;
2101 DPRINTF(EthernetSM
, "this descriptor isn't done yet\n");
2102 if (!txFifo
.full()) {
2103 txState
= txFragRead
;
2106 * The number of bytes transferred is either whatever
2107 * is left in the descriptor (txDescCnt), or if there
2108 * is not enough room in the fifo, just whatever room
2109 * is left in the fifo
2111 txXferLen
= min
<uint32_t>(txDescCnt
, txFifo
.avail());
2113 txDmaAddr
= txFragPtr
& 0x3fffffff;
2114 txDmaData
= txPacketBufPtr
;
2115 txDmaLen
= txXferLen
;
2116 txDmaFree
= dmaDataFree
;
2121 txState
= txFifoBlock
;
2131 if (txDmaState
!= dmaIdle
)
2134 txPacketBufPtr
+= txXferLen
;
2135 txFragPtr
+= txXferLen
;
2136 txDescCnt
-= txXferLen
;
2137 txFifo
.reserve(txXferLen
);
2139 txState
= txFifoBlock
;
2143 if (txDmaState
!= dmaIdle
)
2146 if (txDescCache
.cmdsts
& CMDSTS_INTR
)
2147 devIntrPost(ISR_TXDESC
);
2149 txState
= txAdvance
;
2153 if (txDescCache
.link
== 0) {
2154 devIntrPost(ISR_TXIDLE
);
2158 txState
= txDescRead
;
2159 regs
.txdp
= txDescCache
.link
;
2162 txDmaAddr
= txDescCache
.link
& 0x3fffffff;
2163 txDmaData
= &txDescCache
;
2164 txDmaLen
= sizeof(ns_desc
);
2165 txDmaFree
= dmaDescFree
;
2173 panic("invalid state");
2176 DPRINTF(EthernetSM
, "entering next txState=%s\n",
2177 NsTxStateStrings
[txState
]);
2183 * @todo do we want to schedule a future kick?
2185 DPRINTF(EthernetSM
, "tx state machine exited txState=%s\n",
2186 NsTxStateStrings
[txState
]);
2190 NSGigE::transferDone()
2192 if (txFifo
.empty()) {
2193 DPRINTF(Ethernet
, "transfer complete: txFifo empty...nothing to do\n");
2197 DPRINTF(Ethernet
, "transfer complete: data in txFifo...schedule xmit\n");
2199 if (txEvent
.scheduled())
2200 txEvent
.reschedule(curTick
+ 1);
2202 txEvent
.schedule(curTick
+ 1);
2206 NSGigE::rxFilter(const PacketPtr
&packet
)
2208 EthPtr eth
= packet
;
2212 const EthAddr
&dst
= eth
->dst();
2213 if (dst
.unicast()) {
2214 // If we're accepting all unicast addresses
2218 // If we make a perfect match
2219 if (acceptPerfect
&& dst
== rom
.perfectMatch
)
2222 if (acceptArp
&& eth
->type() == ETH_TYPE_ARP
)
2225 } else if (dst
.broadcast()) {
2226 // if we're accepting broadcasts
2227 if (acceptBroadcast
)
2230 } else if (dst
.multicast()) {
2231 // if we're accepting all multicasts
2232 if (acceptMulticast
)
2238 DPRINTF(Ethernet
, "rxFilter drop\n");
2239 DDUMP(EthernetData
, packet
->data
, packet
->length
);
2246 NSGigE::recvPacket(PacketPtr packet
)
2248 rxBytes
+= packet
->length
;
2251 DPRINTF(Ethernet
, "Receiving packet from wire, rxFifoAvail=%d\n",
2255 DPRINTF(Ethernet
, "receive disabled...packet dropped\n");
2257 interface
->recvDone();
2261 if (rxFilterEnable
&& rxFilter(packet
)) {
2262 DPRINTF(Ethernet
, "packet filtered...dropped\n");
2263 interface
->recvDone();
2267 if (rxFifo
.avail() < packet
->length
) {
2269 "packet will not fit in receive buffer...packet dropped\n");
2271 devIntrPost(ISR_RXORN
);
2275 rxFifo
.push(packet
);
2276 interface
->recvDone();
2282 //=====================================================================
2286 NSGigE::serialize(ostream
&os
)
2288 // Serialize the PciDev base class
2289 PciDev::serialize(os
);
2292 * Finalize any DMA events now.
2294 if (rxDmaReadEvent
.scheduled())
2296 if (rxDmaWriteEvent
.scheduled())
2298 if (txDmaReadEvent
.scheduled())
2300 if (txDmaWriteEvent
.scheduled())
2304 * Serialize the device registers
2306 SERIALIZE_SCALAR(regs
.command
);
2307 SERIALIZE_SCALAR(regs
.config
);
2308 SERIALIZE_SCALAR(regs
.mear
);
2309 SERIALIZE_SCALAR(regs
.ptscr
);
2310 SERIALIZE_SCALAR(regs
.isr
);
2311 SERIALIZE_SCALAR(regs
.imr
);
2312 SERIALIZE_SCALAR(regs
.ier
);
2313 SERIALIZE_SCALAR(regs
.ihr
);
2314 SERIALIZE_SCALAR(regs
.txdp
);
2315 SERIALIZE_SCALAR(regs
.txdp_hi
);
2316 SERIALIZE_SCALAR(regs
.txcfg
);
2317 SERIALIZE_SCALAR(regs
.gpior
);
2318 SERIALIZE_SCALAR(regs
.rxdp
);
2319 SERIALIZE_SCALAR(regs
.rxdp_hi
);
2320 SERIALIZE_SCALAR(regs
.rxcfg
);
2321 SERIALIZE_SCALAR(regs
.pqcr
);
2322 SERIALIZE_SCALAR(regs
.wcsr
);
2323 SERIALIZE_SCALAR(regs
.pcr
);
2324 SERIALIZE_SCALAR(regs
.rfcr
);
2325 SERIALIZE_SCALAR(regs
.rfdr
);
2326 SERIALIZE_SCALAR(regs
.srr
);
2327 SERIALIZE_SCALAR(regs
.mibc
);
2328 SERIALIZE_SCALAR(regs
.vrcr
);
2329 SERIALIZE_SCALAR(regs
.vtcr
);
2330 SERIALIZE_SCALAR(regs
.vdr
);
2331 SERIALIZE_SCALAR(regs
.ccsr
);
2332 SERIALIZE_SCALAR(regs
.tbicr
);
2333 SERIALIZE_SCALAR(regs
.tbisr
);
2334 SERIALIZE_SCALAR(regs
.tanar
);
2335 SERIALIZE_SCALAR(regs
.tanlpar
);
2336 SERIALIZE_SCALAR(regs
.taner
);
2337 SERIALIZE_SCALAR(regs
.tesr
);
2339 SERIALIZE_ARRAY(rom
.perfectMatch
, ETH_ADDR_LEN
);
2341 SERIALIZE_SCALAR(ioEnable
);
2344 * Serialize the data Fifos
2346 rxFifo
.serialize("rxFifo", os
);
2347 txFifo
.serialize("txFifo", os
);
2350 * Serialize the various helper variables
2352 bool txPacketExists
= txPacket
;
2353 SERIALIZE_SCALAR(txPacketExists
);
2354 if (txPacketExists
) {
2355 txPacket
->serialize("txPacket", os
);
2356 uint32_t txPktBufPtr
= (uint32_t) (txPacketBufPtr
- txPacket
->data
);
2357 SERIALIZE_SCALAR(txPktBufPtr
);
2360 bool rxPacketExists
= rxPacket
;
2361 SERIALIZE_SCALAR(rxPacketExists
);
2362 if (rxPacketExists
) {
2363 rxPacket
->serialize("rxPacket", os
);
2364 uint32_t rxPktBufPtr
= (uint32_t) (rxPacketBufPtr
- rxPacket
->data
);
2365 SERIALIZE_SCALAR(rxPktBufPtr
);
2368 SERIALIZE_SCALAR(txXferLen
);
2369 SERIALIZE_SCALAR(rxXferLen
);
2372 * Serialize DescCaches
2374 SERIALIZE_SCALAR(txDescCache
.link
);
2375 SERIALIZE_SCALAR(txDescCache
.bufptr
);
2376 SERIALIZE_SCALAR(txDescCache
.cmdsts
);
2377 SERIALIZE_SCALAR(txDescCache
.extsts
);
2378 SERIALIZE_SCALAR(rxDescCache
.link
);
2379 SERIALIZE_SCALAR(rxDescCache
.bufptr
);
2380 SERIALIZE_SCALAR(rxDescCache
.cmdsts
);
2381 SERIALIZE_SCALAR(rxDescCache
.extsts
);
2384 * Serialize tx state machine
2386 int txState
= this->txState
;
2387 SERIALIZE_SCALAR(txState
);
2388 SERIALIZE_SCALAR(txEnable
);
2389 SERIALIZE_SCALAR(CTDD
);
2390 SERIALIZE_SCALAR(txFragPtr
);
2391 SERIALIZE_SCALAR(txDescCnt
);
2392 int txDmaState
= this->txDmaState
;
2393 SERIALIZE_SCALAR(txDmaState
);
2396 * Serialize rx state machine
2398 int rxState
= this->rxState
;
2399 SERIALIZE_SCALAR(rxState
);
2400 SERIALIZE_SCALAR(rxEnable
);
2401 SERIALIZE_SCALAR(CRDD
);
2402 SERIALIZE_SCALAR(rxPktBytes
);
2403 SERIALIZE_SCALAR(rxFragPtr
);
2404 SERIALIZE_SCALAR(rxDescCnt
);
2405 int rxDmaState
= this->rxDmaState
;
2406 SERIALIZE_SCALAR(rxDmaState
);
2408 SERIALIZE_SCALAR(extstsEnable
);
2411 * If there's a pending transmit, store the time so we can
2412 * reschedule it later
2414 Tick transmitTick
= txEvent
.scheduled() ? txEvent
.when() - curTick
: 0;
2415 SERIALIZE_SCALAR(transmitTick
);
2418 * receive address filter settings
2420 SERIALIZE_SCALAR(rxFilterEnable
);
2421 SERIALIZE_SCALAR(acceptBroadcast
);
2422 SERIALIZE_SCALAR(acceptMulticast
);
2423 SERIALIZE_SCALAR(acceptUnicast
);
2424 SERIALIZE_SCALAR(acceptPerfect
);
2425 SERIALIZE_SCALAR(acceptArp
);
2428 * Keep track of pending interrupt status.
2430 SERIALIZE_SCALAR(intrTick
);
2431 SERIALIZE_SCALAR(cpuPendingIntr
);
2432 Tick intrEventTick
= 0;
2434 intrEventTick
= intrEvent
->when();
2435 SERIALIZE_SCALAR(intrEventTick
);
2440 NSGigE::unserialize(Checkpoint
*cp
, const std::string
§ion
)
2442 // Unserialize the PciDev base class
2443 PciDev::unserialize(cp
, section
);
2445 UNSERIALIZE_SCALAR(regs
.command
);
2446 UNSERIALIZE_SCALAR(regs
.config
);
2447 UNSERIALIZE_SCALAR(regs
.mear
);
2448 UNSERIALIZE_SCALAR(regs
.ptscr
);
2449 UNSERIALIZE_SCALAR(regs
.isr
);
2450 UNSERIALIZE_SCALAR(regs
.imr
);
2451 UNSERIALIZE_SCALAR(regs
.ier
);
2452 UNSERIALIZE_SCALAR(regs
.ihr
);
2453 UNSERIALIZE_SCALAR(regs
.txdp
);
2454 UNSERIALIZE_SCALAR(regs
.txdp_hi
);
2455 UNSERIALIZE_SCALAR(regs
.txcfg
);
2456 UNSERIALIZE_SCALAR(regs
.gpior
);
2457 UNSERIALIZE_SCALAR(regs
.rxdp
);
2458 UNSERIALIZE_SCALAR(regs
.rxdp_hi
);
2459 UNSERIALIZE_SCALAR(regs
.rxcfg
);
2460 UNSERIALIZE_SCALAR(regs
.pqcr
);
2461 UNSERIALIZE_SCALAR(regs
.wcsr
);
2462 UNSERIALIZE_SCALAR(regs
.pcr
);
2463 UNSERIALIZE_SCALAR(regs
.rfcr
);
2464 UNSERIALIZE_SCALAR(regs
.rfdr
);
2465 UNSERIALIZE_SCALAR(regs
.srr
);
2466 UNSERIALIZE_SCALAR(regs
.mibc
);
2467 UNSERIALIZE_SCALAR(regs
.vrcr
);
2468 UNSERIALIZE_SCALAR(regs
.vtcr
);
2469 UNSERIALIZE_SCALAR(regs
.vdr
);
2470 UNSERIALIZE_SCALAR(regs
.ccsr
);
2471 UNSERIALIZE_SCALAR(regs
.tbicr
);
2472 UNSERIALIZE_SCALAR(regs
.tbisr
);
2473 UNSERIALIZE_SCALAR(regs
.tanar
);
2474 UNSERIALIZE_SCALAR(regs
.tanlpar
);
2475 UNSERIALIZE_SCALAR(regs
.taner
);
2476 UNSERIALIZE_SCALAR(regs
.tesr
);
2478 UNSERIALIZE_ARRAY(rom
.perfectMatch
, ETH_ADDR_LEN
);
2480 UNSERIALIZE_SCALAR(ioEnable
);
2483 * unserialize the data fifos
2485 rxFifo
.unserialize("rxFifo", cp
, section
);
2486 txFifo
.unserialize("txFifo", cp
, section
);
2489 * unserialize the various helper variables
2491 bool txPacketExists
;
2492 UNSERIALIZE_SCALAR(txPacketExists
);
2493 if (txPacketExists
) {
2494 txPacket
= new PacketData(16384);
2495 txPacket
->unserialize("txPacket", cp
, section
);
2496 uint32_t txPktBufPtr
;
2497 UNSERIALIZE_SCALAR(txPktBufPtr
);
2498 txPacketBufPtr
= (uint8_t *) txPacket
->data
+ txPktBufPtr
;
2502 bool rxPacketExists
;
2503 UNSERIALIZE_SCALAR(rxPacketExists
);
2505 if (rxPacketExists
) {
2506 rxPacket
= new PacketData(16384);
2507 rxPacket
->unserialize("rxPacket", cp
, section
);
2508 uint32_t rxPktBufPtr
;
2509 UNSERIALIZE_SCALAR(rxPktBufPtr
);
2510 rxPacketBufPtr
= (uint8_t *) rxPacket
->data
+ rxPktBufPtr
;
2514 UNSERIALIZE_SCALAR(txXferLen
);
2515 UNSERIALIZE_SCALAR(rxXferLen
);
2518 * Unserialize DescCaches
2520 UNSERIALIZE_SCALAR(txDescCache
.link
);
2521 UNSERIALIZE_SCALAR(txDescCache
.bufptr
);
2522 UNSERIALIZE_SCALAR(txDescCache
.cmdsts
);
2523 UNSERIALIZE_SCALAR(txDescCache
.extsts
);
2524 UNSERIALIZE_SCALAR(rxDescCache
.link
);
2525 UNSERIALIZE_SCALAR(rxDescCache
.bufptr
);
2526 UNSERIALIZE_SCALAR(rxDescCache
.cmdsts
);
2527 UNSERIALIZE_SCALAR(rxDescCache
.extsts
);
2530 * unserialize tx state machine
2533 UNSERIALIZE_SCALAR(txState
);
2534 this->txState
= (TxState
) txState
;
2535 UNSERIALIZE_SCALAR(txEnable
);
2536 UNSERIALIZE_SCALAR(CTDD
);
2537 UNSERIALIZE_SCALAR(txFragPtr
);
2538 UNSERIALIZE_SCALAR(txDescCnt
);
2540 UNSERIALIZE_SCALAR(txDmaState
);
2541 this->txDmaState
= (DmaState
) txDmaState
;
2544 * unserialize rx state machine
2547 UNSERIALIZE_SCALAR(rxState
);
2548 this->rxState
= (RxState
) rxState
;
2549 UNSERIALIZE_SCALAR(rxEnable
);
2550 UNSERIALIZE_SCALAR(CRDD
);
2551 UNSERIALIZE_SCALAR(rxPktBytes
);
2552 UNSERIALIZE_SCALAR(rxFragPtr
);
2553 UNSERIALIZE_SCALAR(rxDescCnt
);
2555 UNSERIALIZE_SCALAR(rxDmaState
);
2556 this->rxDmaState
= (DmaState
) rxDmaState
;
2558 UNSERIALIZE_SCALAR(extstsEnable
);
2561 * If there's a pending transmit, reschedule it now
2564 UNSERIALIZE_SCALAR(transmitTick
);
2566 txEvent
.schedule(curTick
+ transmitTick
);
2569 * unserialize receive address filter settings
2571 UNSERIALIZE_SCALAR(rxFilterEnable
);
2572 UNSERIALIZE_SCALAR(acceptBroadcast
);
2573 UNSERIALIZE_SCALAR(acceptMulticast
);
2574 UNSERIALIZE_SCALAR(acceptUnicast
);
2575 UNSERIALIZE_SCALAR(acceptPerfect
);
2576 UNSERIALIZE_SCALAR(acceptArp
);
2579 * Keep track of pending interrupt status.
2581 UNSERIALIZE_SCALAR(intrTick
);
2582 UNSERIALIZE_SCALAR(cpuPendingIntr
);
2584 UNSERIALIZE_SCALAR(intrEventTick
);
2585 if (intrEventTick
) {
2586 intrEvent
= new IntrEvent(this, true);
2587 intrEvent
->schedule(intrEventTick
);
2591 * re-add addrRanges to bus bridges
2594 pioInterface
->addAddrRange(RangeSize(BARAddrs
[0], BARSize
[0]));
2595 pioInterface
->addAddrRange(RangeSize(BARAddrs
[1], BARSize
[1]));
2600 NSGigE::cacheAccess(MemReqPtr
&req
)
2602 DPRINTF(EthernetPIO
, "timing access to paddr=%#x (daddr=%#x)\n",
2603 req
->paddr
, req
->paddr
- addr
);
2604 return curTick
+ pioLatency
;
2607 BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt
)
2609 SimObjectParam
<EtherInt
*> peer
;
2610 SimObjectParam
<NSGigE
*> device
;
2612 END_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt
)
2614 BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigEInt
)
2616 INIT_PARAM_DFLT(peer
, "peer interface", NULL
),
2617 INIT_PARAM(device
, "Ethernet device of this interface")
2619 END_INIT_SIM_OBJECT_PARAMS(NSGigEInt
)
2621 CREATE_SIM_OBJECT(NSGigEInt
)
2623 NSGigEInt
*dev_int
= new NSGigEInt(getInstanceName(), device
);
2625 EtherInt
*p
= (EtherInt
*)peer
;
2627 dev_int
->setPeer(p
);
2628 p
->setPeer(dev_int
);
2634 REGISTER_SIM_OBJECT("NSGigEInt", NSGigEInt
)
2637 BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigE
)
2640 Param
<Tick
> tx_delay
;
2641 Param
<Tick
> rx_delay
;
2642 Param
<Tick
> intr_delay
;
2643 SimObjectParam
<MemoryController
*> mmu
;
2644 SimObjectParam
<PhysicalMemory
*> physmem
;
2645 Param
<bool> rx_filter
;
2646 Param
<string
> hardware_address
;
2647 SimObjectParam
<Bus
*> io_bus
;
2648 SimObjectParam
<Bus
*> payload_bus
;
2649 SimObjectParam
<HierParams
*> hier
;
2650 Param
<Tick
> pio_latency
;
2651 Param
<bool> dma_desc_free
;
2652 Param
<bool> dma_data_free
;
2653 Param
<Tick
> dma_read_delay
;
2654 Param
<Tick
> dma_write_delay
;
2655 Param
<Tick
> dma_read_factor
;
2656 Param
<Tick
> dma_write_factor
;
2657 SimObjectParam
<PciConfigAll
*> configspace
;
2658 SimObjectParam
<PciConfigData
*> configdata
;
2659 SimObjectParam
<Platform
*> platform
;
2660 Param
<uint32_t> pci_bus
;
2661 Param
<uint32_t> pci_dev
;
2662 Param
<uint32_t> pci_func
;
2663 Param
<uint32_t> tx_fifo_size
;
2664 Param
<uint32_t> rx_fifo_size
;
2666 END_DECLARE_SIM_OBJECT_PARAMS(NSGigE
)
2668 BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigE
)
2670 INIT_PARAM(addr
, "Device Address"),
2671 INIT_PARAM_DFLT(tx_delay
, "Transmit Delay", 1000),
2672 INIT_PARAM_DFLT(rx_delay
, "Receive Delay", 1000),
2673 INIT_PARAM_DFLT(intr_delay
, "Interrupt Delay in microseconds", 0),
2674 INIT_PARAM(mmu
, "Memory Controller"),
2675 INIT_PARAM(physmem
, "Physical Memory"),
2676 INIT_PARAM_DFLT(rx_filter
, "Enable Receive Filter", true),
2677 INIT_PARAM_DFLT(hardware_address
, "Ethernet Hardware Address",
2678 "00:99:00:00:00:01"),
2679 INIT_PARAM_DFLT(io_bus
, "The IO Bus to attach to for headers", NULL
),
2680 INIT_PARAM_DFLT(payload_bus
, "The IO Bus to attach to for payload", NULL
),
2681 INIT_PARAM_DFLT(hier
, "Hierarchy global variables", &defaultHierParams
),
2682 INIT_PARAM_DFLT(pio_latency
, "Programmed IO latency in bus cycles", 1),
2683 INIT_PARAM_DFLT(dma_desc_free
, "DMA of Descriptors is free", false),
2684 INIT_PARAM_DFLT(dma_data_free
, "DMA of Data is free", false),
2685 INIT_PARAM_DFLT(dma_read_delay
, "fixed delay for dma reads", 0),
2686 INIT_PARAM_DFLT(dma_write_delay
, "fixed delay for dma writes", 0),
2687 INIT_PARAM_DFLT(dma_read_factor
, "multiplier for dma reads", 0),
2688 INIT_PARAM_DFLT(dma_write_factor
, "multiplier for dma writes", 0),
2689 INIT_PARAM(configspace
, "PCI Configspace"),
2690 INIT_PARAM(configdata
, "PCI Config data"),
2691 INIT_PARAM(platform
, "Platform"),
2692 INIT_PARAM(pci_bus
, "PCI bus"),
2693 INIT_PARAM(pci_dev
, "PCI device number"),
2694 INIT_PARAM(pci_func
, "PCI function code"),
2695 INIT_PARAM_DFLT(tx_fifo_size
, "max size in bytes of txFifo", 131072),
2696 INIT_PARAM_DFLT(rx_fifo_size
, "max size in bytes of rxFifo", 131072)
2698 END_INIT_SIM_OBJECT_PARAMS(NSGigE
)
2701 CREATE_SIM_OBJECT(NSGigE
)
2703 NSGigE::Params
*params
= new NSGigE::Params
;
2705 params
->name
= getInstanceName();
2707 params
->configSpace
= configspace
;
2708 params
->configData
= configdata
;
2709 params
->plat
= platform
;
2710 params
->busNum
= pci_bus
;
2711 params
->deviceNum
= pci_dev
;
2712 params
->functionNum
= pci_func
;
2714 params
->intr_delay
= intr_delay
;
2715 params
->pmem
= physmem
;
2716 params
->tx_delay
= tx_delay
;
2717 params
->rx_delay
= rx_delay
;
2718 params
->hier
= hier
;
2719 params
->header_bus
= io_bus
;
2720 params
->payload_bus
= payload_bus
;
2721 params
->pio_latency
= pio_latency
;
2722 params
->dma_desc_free
= dma_desc_free
;
2723 params
->dma_data_free
= dma_data_free
;
2724 params
->dma_read_delay
= dma_read_delay
;
2725 params
->dma_write_delay
= dma_write_delay
;
2726 params
->dma_read_factor
= dma_read_factor
;
2727 params
->dma_write_factor
= dma_write_factor
;
2728 params
->rx_filter
= rx_filter
;
2729 params
->eaddr
= hardware_address
;
2730 params
->tx_fifo_size
= tx_fifo_size
;
2731 params
->rx_fifo_size
= rx_fifo_size
;
2732 return new NSGigE(params
);
2735 REGISTER_SIM_OBJECT("NSGigE", NSGigE
)