2 * Copyright (c) 2004 The Regents of The University of Michigan
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 * Device module for modelling the National Semiconductor
31 * DP83820 ethernet controller. Does not support priority queueing
37 #include "base/inet.hh"
38 #include "cpu/exec_context.hh"
40 #include "dev/etherlink.hh"
41 #include "dev/ns_gige.hh"
42 #include "dev/pciconfigall.hh"
43 #include "mem/bus/bus.hh"
44 #include "mem/bus/dma_interface.hh"
45 #include "mem/bus/pio_interface.hh"
46 #include "mem/bus/pio_interface_impl.hh"
47 #include "mem/functional_mem/memory_control.hh"
48 #include "mem/functional_mem/physical_memory.hh"
49 #include "sim/builder.hh"
50 #include "sim/debug.hh"
51 #include "sim/host.hh"
52 #include "sim/stats.hh"
53 #include "targetarch/vtophys.hh"
55 const char *NsRxStateStrings
[] =
66 const char *NsTxStateStrings
[] =
77 const char *NsDmaState
[] =
89 ///////////////////////////////////////////////////////////////////////
93 NSGigE::NSGigE(Params
*p
)
94 : PciDev(p
), ioEnable(false),
95 txFifo(p
->tx_fifo_size
), rxFifo(p
->rx_fifo_size
),
96 txPacket(0), rxPacket(0), txPacketBufPtr(NULL
), rxPacketBufPtr(NULL
),
97 txXferLen(0), rxXferLen(0), txState(txIdle
), txEnable(false),
99 txFragPtr(0), txDescCnt(0), txDmaState(dmaIdle
), rxState(rxIdle
),
100 rxEnable(false), CRDD(false), rxPktBytes(0),
101 rxFragPtr(0), rxDescCnt(0), rxDmaState(dmaIdle
), extstsEnable(false),
102 rxDmaReadEvent(this), rxDmaWriteEvent(this),
103 txDmaReadEvent(this), txDmaWriteEvent(this),
104 dmaDescFree(p
->dma_desc_free
), dmaDataFree(p
->dma_data_free
),
105 txDelay(p
->tx_delay
), rxDelay(p
->rx_delay
),
106 rxKickTick(0), txKickTick(0),
107 txEvent(this), rxFilterEnable(p
->rx_filter
), acceptBroadcast(false),
108 acceptMulticast(false), acceptUnicast(false),
109 acceptPerfect(false), acceptArp(false),
110 physmem(p
->pmem
), intrTick(0), cpuPendingIntr(false),
111 intrEvent(0), interface(0)
114 pioInterface
= newPioInterface(name(), p
->hier
,
116 &NSGigE::cacheAccess
);
118 pioLatency
= p
->pio_latency
* p
->header_bus
->clockRatio
;
121 dmaInterface
= new DMAInterface
<Bus
>(name() + ".dma",
125 dmaInterface
= new DMAInterface
<Bus
>(name() + ".dma",
128 } else if (p
->payload_bus
) {
129 pioInterface
= newPioInterface(name(), p
->hier
,
130 p
->payload_bus
, this,
131 &NSGigE::cacheAccess
);
133 pioLatency
= p
->pio_latency
* p
->payload_bus
->clockRatio
;
135 dmaInterface
= new DMAInterface
<Bus
>(name() + ".dma",
141 intrDelay
= p
->intr_delay
* Clock::Int::us
;
142 dmaReadDelay
= p
->dma_read_delay
;
143 dmaWriteDelay
= p
->dma_write_delay
;
144 dmaReadFactor
= p
->dma_read_factor
;
145 dmaWriteFactor
= p
->dma_write_factor
;
148 memcpy(&rom
.perfectMatch
, p
->eaddr
.bytes(), ETH_ADDR_LEN
);
158 .name(name() + ".txBytes")
159 .desc("Bytes Transmitted")
164 .name(name() + ".rxBytes")
165 .desc("Bytes Received")
170 .name(name() + ".txPackets")
171 .desc("Number of Packets Transmitted")
176 .name(name() + ".rxPackets")
177 .desc("Number of Packets Received")
182 .name(name() + ".txIpChecksums")
183 .desc("Number of tx IP Checksums done by device")
189 .name(name() + ".rxIpChecksums")
190 .desc("Number of rx IP Checksums done by device")
196 .name(name() + ".txTcpChecksums")
197 .desc("Number of tx TCP Checksums done by device")
203 .name(name() + ".rxTcpChecksums")
204 .desc("Number of rx TCP Checksums done by device")
210 .name(name() + ".txUdpChecksums")
211 .desc("Number of tx UDP Checksums done by device")
217 .name(name() + ".rxUdpChecksums")
218 .desc("Number of rx UDP Checksums done by device")
224 .name(name() + ".descDMAReads")
225 .desc("Number of descriptors the device read w/ DMA")
230 .name(name() + ".descDMAWrites")
231 .desc("Number of descriptors the device wrote w/ DMA")
236 .name(name() + ".descDmaReadBytes")
237 .desc("number of descriptor bytes read w/ DMA")
242 .name(name() + ".descDmaWriteBytes")
243 .desc("number of descriptor bytes write w/ DMA")
248 .name(name() + ".txBandwidth")
249 .desc("Transmit Bandwidth (bits/s)")
255 .name(name() + ".rxBandwidth")
256 .desc("Receive Bandwidth (bits/s)")
262 .name(name() + ".totBandwidth")
263 .desc("Total Bandwidth (bits/s)")
269 .name(name() + ".totPackets")
270 .desc("Total Packets")
276 .name(name() + ".totBytes")
283 .name(name() + ".totPPS")
284 .desc("Total Tranmission Rate (packets/s)")
290 .name(name() + ".txPPS")
291 .desc("Packet Tranmission Rate (packets/s)")
297 .name(name() + ".rxPPS")
298 .desc("Packet Reception Rate (packets/s)")
304 .name(name() + ".postedSwi")
305 .desc("number of software interrupts posted to CPU")
310 .name(name() + ".totalSwi")
311 .desc("number of total Swi written to ISR")
316 .name(name() + ".coalescedSwi")
317 .desc("average number of Swi's coalesced into each post")
322 .name(name() + ".postedRxIdle")
323 .desc("number of rxIdle interrupts posted to CPU")
328 .name(name() + ".totalRxIdle")
329 .desc("number of total RxIdle written to ISR")
334 .name(name() + ".coalescedRxIdle")
335 .desc("average number of RxIdle's coalesced into each post")
340 .name(name() + ".postedRxOk")
341 .desc("number of RxOk interrupts posted to CPU")
346 .name(name() + ".totalRxOk")
347 .desc("number of total RxOk written to ISR")
352 .name(name() + ".coalescedRxOk")
353 .desc("average number of RxOk's coalesced into each post")
358 .name(name() + ".postedRxDesc")
359 .desc("number of RxDesc interrupts posted to CPU")
364 .name(name() + ".totalRxDesc")
365 .desc("number of total RxDesc written to ISR")
370 .name(name() + ".coalescedRxDesc")
371 .desc("average number of RxDesc's coalesced into each post")
376 .name(name() + ".postedTxOk")
377 .desc("number of TxOk interrupts posted to CPU")
382 .name(name() + ".totalTxOk")
383 .desc("number of total TxOk written to ISR")
388 .name(name() + ".coalescedTxOk")
389 .desc("average number of TxOk's coalesced into each post")
394 .name(name() + ".postedTxIdle")
395 .desc("number of TxIdle interrupts posted to CPU")
400 .name(name() + ".totalTxIdle")
401 .desc("number of total TxIdle written to ISR")
406 .name(name() + ".coalescedTxIdle")
407 .desc("average number of TxIdle's coalesced into each post")
412 .name(name() + ".postedTxDesc")
413 .desc("number of TxDesc interrupts posted to CPU")
418 .name(name() + ".totalTxDesc")
419 .desc("number of total TxDesc written to ISR")
424 .name(name() + ".coalescedTxDesc")
425 .desc("average number of TxDesc's coalesced into each post")
430 .name(name() + ".postedRxOrn")
431 .desc("number of RxOrn posted to CPU")
436 .name(name() + ".totalRxOrn")
437 .desc("number of total RxOrn written to ISR")
442 .name(name() + ".coalescedRxOrn")
443 .desc("average number of RxOrn's coalesced into each post")
448 .name(name() + ".coalescedTotal")
449 .desc("average number of interrupts coalesced into each post")
454 .name(name() + ".postedInterrupts")
455 .desc("number of posts to CPU")
460 .name(name() + ".droppedPackets")
461 .desc("number of packets dropped")
465 coalescedSwi
= totalSwi
/ postedInterrupts
;
466 coalescedRxIdle
= totalRxIdle
/ postedInterrupts
;
467 coalescedRxOk
= totalRxOk
/ postedInterrupts
;
468 coalescedRxDesc
= totalRxDesc
/ postedInterrupts
;
469 coalescedTxOk
= totalTxOk
/ postedInterrupts
;
470 coalescedTxIdle
= totalTxIdle
/ postedInterrupts
;
471 coalescedTxDesc
= totalTxDesc
/ postedInterrupts
;
472 coalescedRxOrn
= totalRxOrn
/ postedInterrupts
;
474 coalescedTotal
= (totalSwi
+ totalRxIdle
+ totalRxOk
+ totalRxDesc
+ totalTxOk
475 + totalTxIdle
+ totalTxDesc
+ totalRxOrn
) / postedInterrupts
;
477 txBandwidth
= txBytes
* Stats::constant(8) / simSeconds
;
478 rxBandwidth
= rxBytes
* Stats::constant(8) / simSeconds
;
479 totBandwidth
= txBandwidth
+ rxBandwidth
;
480 totBytes
= txBytes
+ rxBytes
;
481 totPackets
= txPackets
+ rxPackets
;
483 txPacketRate
= txPackets
/ simSeconds
;
484 rxPacketRate
= rxPackets
/ simSeconds
;
488 * This is to read the PCI general configuration registers
491 NSGigE::ReadConfig(int offset
, int size
, uint8_t *data
)
493 if (offset
< PCI_DEVICE_SPECIFIC
)
494 PciDev::ReadConfig(offset
, size
, data
);
496 panic("Device specific PCI config space not implemented!\n");
500 * This is to write to the PCI general configuration registers
503 NSGigE::WriteConfig(int offset
, int size
, uint32_t data
)
505 if (offset
< PCI_DEVICE_SPECIFIC
)
506 PciDev::WriteConfig(offset
, size
, data
);
508 panic("Device specific PCI config space not implemented!\n");
510 // Need to catch writes to BARs to update the PIO interface
512 // seems to work fine without all these PCI settings, but i
513 // put in the IO to double check, an assertion will fail if we
514 // need to properly implement it
516 if (config
.data
[offset
] & PCI_CMD_IOSE
)
522 if (config
.data
[offset
] & PCI_CMD_BME
) {
529 if (config
.data
[offset
] & PCI_CMD_MSE
) {
538 case PCI0_BASE_ADDR0
:
539 if (BARAddrs
[0] != 0) {
541 pioInterface
->addAddrRange(RangeSize(BARAddrs
[0], BARSize
[0]));
543 BARAddrs
[0] &= EV5::PAddrUncachedMask
;
546 case PCI0_BASE_ADDR1
:
547 if (BARAddrs
[1] != 0) {
549 pioInterface
->addAddrRange(RangeSize(BARAddrs
[1], BARSize
[1]));
551 BARAddrs
[1] &= EV5::PAddrUncachedMask
;
558 * This reads the device registers, which are detailed in the NS83820
562 NSGigE::read(MemReqPtr
&req
, uint8_t *data
)
566 //The mask is to give you only the offset into the device register file
567 Addr daddr
= req
->paddr
& 0xfff;
568 DPRINTF(EthernetPIO
, "read da=%#x pa=%#x va=%#x size=%d\n",
569 daddr
, req
->paddr
, req
->vaddr
, req
->size
);
572 // there are some reserved registers, you can see ns_gige_reg.h and
573 // the spec sheet for details
574 if (daddr
> LAST
&& daddr
<= RESERVED
) {
575 panic("Accessing reserved register");
576 } else if (daddr
> RESERVED
&& daddr
<= 0x3FC) {
577 ReadConfig(daddr
& 0xff, req
->size
, data
);
579 } else if (daddr
>= MIB_START
&& daddr
<= MIB_END
) {
580 // don't implement all the MIB's. hopefully the kernel
581 // doesn't actually DEPEND upon their values
582 // MIB are just hardware stats keepers
583 uint32_t ®
= *(uint32_t *) data
;
586 } else if (daddr
> 0x3FC)
587 panic("Something is messed up!\n");
590 case sizeof(uint32_t):
592 uint32_t ®
= *(uint32_t *)data
;
597 //these are supposed to be cleared on a read
598 reg
&= ~(CR_RXD
| CR_TXD
| CR_TXR
| CR_RXR
);
615 devIntrClear(ISR_ALL
);
670 // see the spec sheet for how RFCR and RFDR work
671 // basically, you write to RFCR to tell the machine
672 // what you want to do next, then you act upon RFDR,
673 // and the device will be prepared b/c of what you
680 switch (regs
.rfcr
& RFCR_RFADDR
) {
682 reg
= rom
.perfectMatch
[1];
684 reg
+= rom
.perfectMatch
[0];
687 reg
= rom
.perfectMatch
[3] << 8;
688 reg
+= rom
.perfectMatch
[2];
691 reg
= rom
.perfectMatch
[5] << 8;
692 reg
+= rom
.perfectMatch
[4];
695 panic("reading RFDR for something other than PMATCH!\n");
696 // didn't implement other RFDR functionality b/c
697 // driver didn't use it
707 reg
&= ~(MIBC_MIBS
| MIBC_ACLR
);
751 panic("reading unimplemented register: addr=%#x", daddr
);
754 DPRINTF(EthernetPIO
, "read from %#x: data=%d data=%#x\n",
760 panic("accessing register with invalid size: addr=%#x, size=%d",
768 NSGigE::write(MemReqPtr
&req
, const uint8_t *data
)
772 Addr daddr
= req
->paddr
& 0xfff;
773 DPRINTF(EthernetPIO
, "write da=%#x pa=%#x va=%#x size=%d\n",
774 daddr
, req
->paddr
, req
->vaddr
, req
->size
);
776 if (daddr
> LAST
&& daddr
<= RESERVED
) {
777 panic("Accessing reserved register");
778 } else if (daddr
> RESERVED
&& daddr
<= 0x3FC) {
779 WriteConfig(daddr
& 0xff, req
->size
, *(uint32_t *)data
);
781 } else if (daddr
> 0x3FC)
782 panic("Something is messed up!\n");
784 if (req
->size
== sizeof(uint32_t)) {
785 uint32_t reg
= *(uint32_t *)data
;
786 DPRINTF(EthernetPIO
, "write data=%d data=%#x\n", reg
, reg
);
793 } else if (reg
& CR_TXE
) {
796 // the kernel is enabling the transmit machine
797 if (txState
== txIdle
)
803 } else if (reg
& CR_RXE
) {
806 if (rxState
== rxIdle
)
817 devIntrPost(ISR_SWI
);
828 if (reg
& CFG_LNKSTS
||
831 reg
& CFG_RESERVED
||
834 panic("writing to read-only or reserved CFG bits!\n");
836 regs
.config
|= reg
& ~(CFG_LNKSTS
| CFG_SPDSTS
| CFG_DUPSTS
|
837 CFG_RESERVED
| CFG_T64ADDR
| CFG_PCI64_DET
);
839 // all these #if 0's are because i don't THINK the kernel needs to
840 // have these implemented. if there is a problem relating to one of
841 // these, you may need to add functionality in.
843 if (reg
& CFG_TBI_EN
) ;
844 if (reg
& CFG_MODE_1000
) ;
847 if (reg
& CFG_AUTO_1000
)
848 panic("CFG_AUTO_1000 not implemented!\n");
851 if (reg
& CFG_PINT_DUPSTS
||
852 reg
& CFG_PINT_LNKSTS
||
853 reg
& CFG_PINT_SPDSTS
)
856 if (reg
& CFG_TMRTEST
) ;
857 if (reg
& CFG_MRM_DIS
) ;
858 if (reg
& CFG_MWI_DIS
) ;
860 if (reg
& CFG_T64ADDR
)
861 panic("CFG_T64ADDR is read only register!\n");
863 if (reg
& CFG_PCI64_DET
)
864 panic("CFG_PCI64_DET is read only register!\n");
866 if (reg
& CFG_DATA64_EN
) ;
867 if (reg
& CFG_M64ADDR
) ;
868 if (reg
& CFG_PHY_RST
) ;
869 if (reg
& CFG_PHY_DIS
) ;
872 if (reg
& CFG_EXTSTS_EN
)
875 extstsEnable
= false;
878 if (reg
& CFG_REQALG
) ;
882 if (reg
& CFG_PESEL
) ;
883 if (reg
& CFG_BROM_DIS
) ;
884 if (reg
& CFG_EXT_125
) ;
891 // since phy is completely faked, MEAR_MD* don't matter
892 // and since the driver never uses MEAR_EE*, they don't
895 if (reg
& MEAR_EEDI
) ;
896 if (reg
& MEAR_EEDO
) ; // this one is read only
897 if (reg
& MEAR_EECLK
) ;
898 if (reg
& MEAR_EESEL
) ;
899 if (reg
& MEAR_MDIO
) ;
900 if (reg
& MEAR_MDDIR
) ;
901 if (reg
& MEAR_MDC
) ;
906 regs
.ptscr
= reg
& ~(PTSCR_RBIST_RDONLY
);
907 // these control BISTs for various parts of chip - we
908 // don't care or do just fake that the BIST is done
909 if (reg
& PTSCR_RBIST_EN
)
910 regs
.ptscr
|= PTSCR_RBIST_DONE
;
911 if (reg
& PTSCR_EEBIST_EN
)
912 regs
.ptscr
&= ~PTSCR_EEBIST_EN
;
913 if (reg
& PTSCR_EELOAD_EN
)
914 regs
.ptscr
&= ~PTSCR_EELOAD_EN
;
917 case ISR
: /* writing to the ISR has no effect */
918 panic("ISR is a read only register!\n");
931 /* not going to implement real interrupt holdoff */
935 regs
.txdp
= (reg
& 0xFFFFFFFC);
936 assert(txState
== txIdle
);
947 if (reg
& TXCFG_CSI
) ;
948 if (reg
& TXCFG_HBI
) ;
949 if (reg
& TXCFG_MLB
) ;
950 if (reg
& TXCFG_ATP
) ;
951 if (reg
& TXCFG_ECRETRY
) {
953 * this could easily be implemented, but considering
954 * the network is just a fake pipe, wouldn't make
959 if (reg
& TXCFG_BRST_DIS
) ;
963 /* we handle our own DMA, ignore the kernel's exhortations */
964 if (reg
& TXCFG_MXDMA
) ;
967 // also, we currently don't care about fill/drain
968 // thresholds though this may change in the future with
969 // more realistic networks or a driver which changes it
970 // according to feedback
976 /* these just control general purpose i/o pins, don't matter */
991 if (reg
& RXCFG_AEP
) ;
992 if (reg
& RXCFG_ARP
) ;
993 if (reg
& RXCFG_STRIPCRC
) ;
994 if (reg
& RXCFG_RX_RD
) ;
995 if (reg
& RXCFG_ALP
) ;
996 if (reg
& RXCFG_AIRL
) ;
998 /* we handle our own DMA, ignore what kernel says about it */
999 if (reg
& RXCFG_MXDMA
) ;
1001 //also, we currently don't care about fill/drain thresholds
1002 //though this may change in the future with more realistic
1003 //networks or a driver which changes it according to feedback
1004 if (reg
& (RXCFG_DRTH
| RXCFG_DRTH0
)) ;
1009 /* there is no priority queueing used in the linux 2.6 driver */
1014 /* not going to implement wake on LAN */
1019 /* not going to implement pause control */
1026 rxFilterEnable
= (reg
& RFCR_RFEN
) ? true : false;
1027 acceptBroadcast
= (reg
& RFCR_AAB
) ? true : false;
1028 acceptMulticast
= (reg
& RFCR_AAM
) ? true : false;
1029 acceptUnicast
= (reg
& RFCR_AAU
) ? true : false;
1030 acceptPerfect
= (reg
& RFCR_APM
) ? true : false;
1031 acceptArp
= (reg
& RFCR_AARP
) ? true : false;
1034 if (reg
& RFCR_APAT
)
1035 panic("RFCR_APAT not implemented!\n");
1038 if (reg
& RFCR_MHEN
|| reg
& RFCR_UHEN
)
1039 panic("hash filtering not implemented!\n");
1042 panic("RFCR_ULM not implemented!\n");
1047 panic("the driver never writes to RFDR, something is wrong!\n");
1050 panic("the driver never uses BRAR, something is wrong!\n");
1053 panic("the driver never uses BRDR, something is wrong!\n");
1056 panic("SRR is read only register!\n");
1059 panic("the driver never uses MIBC, something is wrong!\n");
1070 panic("the driver never uses VDR, something is wrong!\n");
1074 /* not going to implement clockrun stuff */
1080 if (reg
& TBICR_MR_LOOPBACK
)
1081 panic("TBICR_MR_LOOPBACK never used, something wrong!\n");
1083 if (reg
& TBICR_MR_AN_ENABLE
) {
1084 regs
.tanlpar
= regs
.tanar
;
1085 regs
.tbisr
|= (TBISR_MR_AN_COMPLETE
| TBISR_MR_LINK_STATUS
);
1089 if (reg
& TBICR_MR_RESTART_AN
) ;
1095 panic("TBISR is read only register!\n");
1099 if (reg
& TANAR_PS2
)
1100 panic("this isn't used in driver, something wrong!\n");
1102 if (reg
& TANAR_PS1
)
1103 panic("this isn't used in driver, something wrong!\n");
1107 panic("this should only be written to by the fake phy!\n");
1110 panic("TANER is read only register!\n");
1117 panic("invalid register access daddr=%#x", daddr
);
1120 panic("Invalid Request Size");
1127 NSGigE::devIntrPost(uint32_t interrupts
)
1129 if (interrupts
& ISR_RESERVE
)
1130 panic("Cannot set a reserved interrupt");
1132 if (interrupts
& ISR_NOIMPL
)
1133 warn("interrupt not implemented %#x\n", interrupts
);
1135 interrupts
&= ~ISR_NOIMPL
;
1136 regs
.isr
|= interrupts
;
1138 if (interrupts
& regs
.imr
) {
1139 if (interrupts
& ISR_SWI
) {
1142 if (interrupts
& ISR_RXIDLE
) {
1145 if (interrupts
& ISR_RXOK
) {
1148 if (interrupts
& ISR_RXDESC
) {
1151 if (interrupts
& ISR_TXOK
) {
1154 if (interrupts
& ISR_TXIDLE
) {
1157 if (interrupts
& ISR_TXDESC
) {
1160 if (interrupts
& ISR_RXORN
) {
1165 DPRINTF(EthernetIntr
,
1166 "interrupt written to ISR: intr=%#x isr=%#x imr=%#x\n",
1167 interrupts
, regs
.isr
, regs
.imr
);
1169 if ((regs
.isr
& regs
.imr
)) {
1170 Tick when
= curTick
;
1171 if (!(regs
.isr
& regs
.imr
& ISR_NODELAY
))
1177 /* writing this interrupt counting stats inside this means that this function
1178 is now limited to being used to clear all interrupts upon the kernel
1179 reading isr and servicing. just telling you in case you were thinking
1183 NSGigE::devIntrClear(uint32_t interrupts
)
1185 if (interrupts
& ISR_RESERVE
)
1186 panic("Cannot clear a reserved interrupt");
1188 if (regs
.isr
& regs
.imr
& ISR_SWI
) {
1191 if (regs
.isr
& regs
.imr
& ISR_RXIDLE
) {
1194 if (regs
.isr
& regs
.imr
& ISR_RXOK
) {
1197 if (regs
.isr
& regs
.imr
& ISR_RXDESC
) {
1200 if (regs
.isr
& regs
.imr
& ISR_TXOK
) {
1203 if (regs
.isr
& regs
.imr
& ISR_TXIDLE
) {
1206 if (regs
.isr
& regs
.imr
& ISR_TXDESC
) {
1209 if (regs
.isr
& regs
.imr
& ISR_RXORN
) {
1213 if (regs
.isr
& regs
.imr
& (ISR_SWI
| ISR_RXIDLE
| ISR_RXOK
| ISR_RXDESC
|
1214 ISR_TXOK
| ISR_TXIDLE
| ISR_TXDESC
| ISR_RXORN
) )
1217 interrupts
&= ~ISR_NOIMPL
;
1218 regs
.isr
&= ~interrupts
;
1220 DPRINTF(EthernetIntr
,
1221 "interrupt cleared from ISR: intr=%x isr=%x imr=%x\n",
1222 interrupts
, regs
.isr
, regs
.imr
);
1224 if (!(regs
.isr
& regs
.imr
))
1229 NSGigE::devIntrChangeMask()
1231 DPRINTF(EthernetIntr
, "interrupt mask changed: isr=%x imr=%x masked=%x\n",
1232 regs
.isr
, regs
.imr
, regs
.isr
& regs
.imr
);
1234 if (regs
.isr
& regs
.imr
)
1235 cpuIntrPost(curTick
);
1241 NSGigE::cpuIntrPost(Tick when
)
1243 // If the interrupt you want to post is later than an interrupt
1244 // already scheduled, just let it post in the coming one and don't
1245 // schedule another.
1246 // HOWEVER, must be sure that the scheduled intrTick is in the
1247 // future (this was formerly the source of a bug)
1249 * @todo this warning should be removed and the intrTick code should
1252 assert(when
>= curTick
);
1253 assert(intrTick
>= curTick
|| intrTick
== 0);
1254 if (when
> intrTick
&& intrTick
!= 0) {
1255 DPRINTF(EthernetIntr
, "don't need to schedule event...intrTick=%d\n",
1261 if (intrTick
< curTick
) {
1266 DPRINTF(EthernetIntr
, "going to schedule an interrupt for intrTick=%d\n",
1270 intrEvent
->squash();
1271 intrEvent
= new IntrEvent(this, true);
1272 intrEvent
->schedule(intrTick
);
1276 NSGigE::cpuInterrupt()
1278 assert(intrTick
== curTick
);
1280 // Whether or not there's a pending interrupt, we don't care about
1285 // Don't send an interrupt if there's already one
1286 if (cpuPendingIntr
) {
1287 DPRINTF(EthernetIntr
,
1288 "would send an interrupt now, but there's already pending\n");
1291 cpuPendingIntr
= true;
1293 DPRINTF(EthernetIntr
, "posting interrupt\n");
1299 NSGigE::cpuIntrClear()
1301 if (!cpuPendingIntr
)
1305 intrEvent
->squash();
1311 cpuPendingIntr
= false;
1313 DPRINTF(EthernetIntr
, "clearing interrupt\n");
1318 NSGigE::cpuIntrPending() const
1319 { return cpuPendingIntr
; }
1325 DPRINTF(Ethernet
, "transmit reset\n");
1330 assert(txDescCnt
== 0);
1333 assert(txDmaState
== dmaIdle
);
1339 DPRINTF(Ethernet
, "receive reset\n");
1342 assert(rxPktBytes
== 0);
1345 assert(rxDescCnt
== 0);
1346 assert(rxDmaState
== dmaIdle
);
1354 memset(®s
, 0, sizeof(regs
));
1355 regs
.config
= CFG_LNKSTS
;
1356 regs
.mear
= MEAR_MDDIR
| MEAR_EEDO
;
1357 regs
.txcfg
= 0x120; // set drain threshold to 1024 bytes and
1358 // fill threshold to 32 bytes
1359 regs
.rxcfg
= 0x4; // set drain threshold to 16 bytes
1360 regs
.srr
= 0x0103; // set the silicon revision to rev B or 0x103
1361 regs
.mibc
= MIBC_FRZ
;
1362 regs
.vdr
= 0x81; // set the vlan tag type to 802.1q
1363 regs
.tesr
= 0xc000; // TBI capable of both full and half duplex
1365 extstsEnable
= false;
1366 acceptBroadcast
= false;
1367 acceptMulticast
= false;
1368 acceptUnicast
= false;
1369 acceptPerfect
= false;
1374 NSGigE::rxDmaReadCopy()
1376 assert(rxDmaState
== dmaReading
);
1378 physmem
->dma_read((uint8_t *)rxDmaData
, rxDmaAddr
, rxDmaLen
);
1379 rxDmaState
= dmaIdle
;
1381 DPRINTF(EthernetDMA
, "rx dma read paddr=%#x len=%d\n",
1382 rxDmaAddr
, rxDmaLen
);
1383 DDUMP(EthernetDMA
, rxDmaData
, rxDmaLen
);
1387 NSGigE::doRxDmaRead()
1389 assert(rxDmaState
== dmaIdle
|| rxDmaState
== dmaReadWaiting
);
1390 rxDmaState
= dmaReading
;
1392 if (dmaInterface
&& !rxDmaFree
) {
1393 if (dmaInterface
->busy())
1394 rxDmaState
= dmaReadWaiting
;
1396 dmaInterface
->doDMA(Read
, rxDmaAddr
, rxDmaLen
, curTick
,
1397 &rxDmaReadEvent
, true);
1401 if (dmaReadDelay
== 0 && dmaReadFactor
== 0) {
1406 Tick factor
= ((rxDmaLen
+ ULL(63)) >> ULL(6)) * dmaReadFactor
;
1407 Tick start
= curTick
+ dmaReadDelay
+ factor
;
1408 rxDmaReadEvent
.schedule(start
);
1413 NSGigE::rxDmaReadDone()
1415 assert(rxDmaState
== dmaReading
);
1418 // If the transmit state machine has a pending DMA, let it go first
1419 if (txDmaState
== dmaReadWaiting
|| txDmaState
== dmaWriteWaiting
)
1426 NSGigE::rxDmaWriteCopy()
1428 assert(rxDmaState
== dmaWriting
);
1430 physmem
->dma_write(rxDmaAddr
, (uint8_t *)rxDmaData
, rxDmaLen
);
1431 rxDmaState
= dmaIdle
;
1433 DPRINTF(EthernetDMA
, "rx dma write paddr=%#x len=%d\n",
1434 rxDmaAddr
, rxDmaLen
);
1435 DDUMP(EthernetDMA
, rxDmaData
, rxDmaLen
);
1439 NSGigE::doRxDmaWrite()
1441 assert(rxDmaState
== dmaIdle
|| rxDmaState
== dmaWriteWaiting
);
1442 rxDmaState
= dmaWriting
;
1444 if (dmaInterface
&& !rxDmaFree
) {
1445 if (dmaInterface
->busy())
1446 rxDmaState
= dmaWriteWaiting
;
1448 dmaInterface
->doDMA(WriteInvalidate
, rxDmaAddr
, rxDmaLen
, curTick
,
1449 &rxDmaWriteEvent
, true);
1453 if (dmaWriteDelay
== 0 && dmaWriteFactor
== 0) {
1458 Tick factor
= ((rxDmaLen
+ ULL(63)) >> ULL(6)) * dmaWriteFactor
;
1459 Tick start
= curTick
+ dmaWriteDelay
+ factor
;
1460 rxDmaWriteEvent
.schedule(start
);
1465 NSGigE::rxDmaWriteDone()
1467 assert(rxDmaState
== dmaWriting
);
1470 // If the transmit state machine has a pending DMA, let it go first
1471 if (txDmaState
== dmaReadWaiting
|| txDmaState
== dmaWriteWaiting
)
1480 DPRINTF(EthernetSM
, "receive kick rxState=%s (rxBuf.size=%d)\n",
1481 NsRxStateStrings
[rxState
], rxFifo
.size());
1483 if (rxKickTick
> curTick
) {
1484 DPRINTF(EthernetSM
, "receive kick exiting, can't run till %d\n",
1490 switch(rxDmaState
) {
1491 case dmaReadWaiting
:
1495 case dmaWriteWaiting
:
1503 // see state machine from spec for details
1504 // the way this works is, if you finish work on one state and can
1505 // go directly to another, you do that through jumping to the
1506 // label "next". however, if you have intermediate work, like DMA
1507 // so that you can't go to the next state yet, you go to exit and
1508 // exit the loop. however, when the DMA is done it will trigger
1509 // an event and come back to this loop.
1513 DPRINTF(EthernetSM
, "Receive Disabled! Nothing to do.\n");
1518 rxState
= rxDescRefr
;
1520 rxDmaAddr
= regs
.rxdp
& 0x3fffffff;
1521 rxDmaData
= &rxDescCache
+ offsetof(ns_desc
, link
);
1522 rxDmaLen
= sizeof(rxDescCache
.link
);
1523 rxDmaFree
= dmaDescFree
;
1526 descDmaRdBytes
+= rxDmaLen
;
1531 rxState
= rxDescRead
;
1533 rxDmaAddr
= regs
.rxdp
& 0x3fffffff;
1534 rxDmaData
= &rxDescCache
;
1535 rxDmaLen
= sizeof(ns_desc
);
1536 rxDmaFree
= dmaDescFree
;
1539 descDmaRdBytes
+= rxDmaLen
;
1547 if (rxDmaState
!= dmaIdle
)
1550 rxState
= rxAdvance
;
1554 if (rxDmaState
!= dmaIdle
)
1557 DPRINTF(EthernetDesc
,
1558 "rxDescCache: addr=%08x read descriptor\n",
1559 regs
.rxdp
& 0x3fffffff);
1560 DPRINTF(EthernetDesc
,
1561 "rxDescCache: link=%08x bufptr=%08x cmdsts=%08x extsts=%08x\n",
1562 rxDescCache
.link
, rxDescCache
.bufptr
, rxDescCache
.cmdsts
,
1563 rxDescCache
.extsts
);
1565 if (rxDescCache
.cmdsts
& CMDSTS_OWN
) {
1566 devIntrPost(ISR_RXIDLE
);
1570 rxState
= rxFifoBlock
;
1571 rxFragPtr
= rxDescCache
.bufptr
;
1572 rxDescCnt
= rxDescCache
.cmdsts
& CMDSTS_LEN_MASK
;
1579 * @todo in reality, we should be able to start processing
1580 * the packet as it arrives, and not have to wait for the
1581 * full packet ot be in the receive fifo.
1586 DPRINTF(EthernetSM
, "****processing receive of new packet****\n");
1588 // If we don't have a packet, grab a new one from the fifo.
1589 rxPacket
= rxFifo
.front();
1590 rxPktBytes
= rxPacket
->length
;
1591 rxPacketBufPtr
= rxPacket
->data
;
1594 if (DTRACE(Ethernet
)) {
1597 DPRINTF(Ethernet
, "ID is %d\n", ip
->id());
1601 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n",
1602 tcp
->sport(), tcp
->dport(), tcp
->seq(),
1609 // sanity check - i think the driver behaves like this
1610 assert(rxDescCnt
>= rxPktBytes
);
1615 // dont' need the && rxDescCnt > 0 if driver sanity check
1617 if (rxPktBytes
> 0) {
1618 rxState
= rxFragWrite
;
1619 // don't need min<>(rxPktBytes,rxDescCnt) if above sanity
1621 rxXferLen
= rxPktBytes
;
1623 rxDmaAddr
= rxFragPtr
& 0x3fffffff;
1624 rxDmaData
= rxPacketBufPtr
;
1625 rxDmaLen
= rxXferLen
;
1626 rxDmaFree
= dmaDataFree
;
1632 rxState
= rxDescWrite
;
1634 //if (rxPktBytes == 0) { /* packet is done */
1635 assert(rxPktBytes
== 0);
1636 DPRINTF(EthernetSM
, "done with receiving packet\n");
1638 rxDescCache
.cmdsts
|= CMDSTS_OWN
;
1639 rxDescCache
.cmdsts
&= ~CMDSTS_MORE
;
1640 rxDescCache
.cmdsts
|= CMDSTS_OK
;
1641 rxDescCache
.cmdsts
&= 0xffff0000;
1642 rxDescCache
.cmdsts
+= rxPacket
->length
; //i.e. set CMDSTS_SIZE
1646 * all the driver uses these are for its own stats keeping
1647 * which we don't care about, aren't necessary for
1648 * functionality and doing this would just slow us down.
1649 * if they end up using this in a later version for
1650 * functional purposes, just undef
1652 if (rxFilterEnable
) {
1653 rxDescCache
.cmdsts
&= ~CMDSTS_DEST_MASK
;
1654 const EthAddr
&dst
= rxFifoFront()->dst();
1656 rxDescCache
.cmdsts
|= CMDSTS_DEST_SELF
;
1657 if (dst
->multicast())
1658 rxDescCache
.cmdsts
|= CMDSTS_DEST_MULTI
;
1659 if (dst
->broadcast())
1660 rxDescCache
.cmdsts
|= CMDSTS_DEST_MASK
;
1665 if (extstsEnable
&& ip
) {
1666 rxDescCache
.extsts
|= EXTSTS_IPPKT
;
1668 if (cksum(ip
) != 0) {
1669 DPRINTF(EthernetCksum
, "Rx IP Checksum Error\n");
1670 rxDescCache
.extsts
|= EXTSTS_IPERR
;
1675 rxDescCache
.extsts
|= EXTSTS_TCPPKT
;
1677 if (cksum(tcp
) != 0) {
1678 DPRINTF(EthernetCksum
, "Rx TCP Checksum Error\n");
1679 rxDescCache
.extsts
|= EXTSTS_TCPERR
;
1683 rxDescCache
.extsts
|= EXTSTS_UDPPKT
;
1685 if (cksum(udp
) != 0) {
1686 DPRINTF(EthernetCksum
, "Rx UDP Checksum Error\n");
1687 rxDescCache
.extsts
|= EXTSTS_UDPERR
;
1694 * the driver seems to always receive into desc buffers
1695 * of size 1514, so you never have a pkt that is split
1696 * into multiple descriptors on the receive side, so
1697 * i don't implement that case, hence the assert above.
1700 DPRINTF(EthernetDesc
,
1701 "rxDescCache: addr=%08x writeback cmdsts extsts\n",
1702 regs
.rxdp
& 0x3fffffff);
1703 DPRINTF(EthernetDesc
,
1704 "rxDescCache: link=%08x bufptr=%08x cmdsts=%08x extsts=%08x\n",
1705 rxDescCache
.link
, rxDescCache
.bufptr
, rxDescCache
.cmdsts
,
1706 rxDescCache
.extsts
);
1708 rxDmaAddr
= (regs
.rxdp
+ offsetof(ns_desc
, cmdsts
)) & 0x3fffffff;
1709 rxDmaData
= &(rxDescCache
.cmdsts
);
1710 rxDmaLen
= sizeof(rxDescCache
.cmdsts
) + sizeof(rxDescCache
.extsts
);
1711 rxDmaFree
= dmaDescFree
;
1714 descDmaWrBytes
+= rxDmaLen
;
1722 if (rxDmaState
!= dmaIdle
)
1725 rxPacketBufPtr
+= rxXferLen
;
1726 rxFragPtr
+= rxXferLen
;
1727 rxPktBytes
-= rxXferLen
;
1729 rxState
= rxFifoBlock
;
1733 if (rxDmaState
!= dmaIdle
)
1736 assert(rxDescCache
.cmdsts
& CMDSTS_OWN
);
1738 assert(rxPacket
== 0);
1739 devIntrPost(ISR_RXOK
);
1741 if (rxDescCache
.cmdsts
& CMDSTS_INTR
)
1742 devIntrPost(ISR_RXDESC
);
1745 DPRINTF(EthernetSM
, "Halting the RX state machine\n");
1749 rxState
= rxAdvance
;
1753 if (rxDescCache
.link
== 0) {
1754 devIntrPost(ISR_RXIDLE
);
1759 rxState
= rxDescRead
;
1760 regs
.rxdp
= rxDescCache
.link
;
1763 rxDmaAddr
= regs
.rxdp
& 0x3fffffff;
1764 rxDmaData
= &rxDescCache
;
1765 rxDmaLen
= sizeof(ns_desc
);
1766 rxDmaFree
= dmaDescFree
;
1774 panic("Invalid rxState!");
1777 DPRINTF(EthernetSM
, "entering next rxState=%s\n",
1778 NsRxStateStrings
[rxState
]);
1784 * @todo do we want to schedule a future kick?
1786 DPRINTF(EthernetSM
, "rx state machine exited rxState=%s\n",
1787 NsRxStateStrings
[rxState
]);
1793 if (txFifo
.empty()) {
1794 DPRINTF(Ethernet
, "nothing to transmit\n");
1798 DPRINTF(Ethernet
, "Attempt Pkt Transmit: txFifo length=%d\n",
1800 if (interface
->sendPacket(txFifo
.front())) {
1802 if (DTRACE(Ethernet
)) {
1803 IpPtr
ip(txFifo
.front());
1805 DPRINTF(Ethernet
, "ID is %d\n", ip
->id());
1809 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n",
1810 tcp
->sport(), tcp
->dport(), tcp
->seq(), tcp
->ack());
1816 DDUMP(EthernetData
, txFifo
.front()->data
, txFifo
.front()->length
);
1817 txBytes
+= txFifo
.front()->length
;
1820 DPRINTF(Ethernet
, "Successful Xmit! now txFifoAvail is %d\n",
1825 * normally do a writeback of the descriptor here, and ONLY
1826 * after that is done, send this interrupt. but since our
1827 * stuff never actually fails, just do this interrupt here,
1828 * otherwise the code has to stray from this nice format.
1829 * besides, it's functionally the same.
1831 devIntrPost(ISR_TXOK
);
1834 if (!txFifo
.empty() && !txEvent
.scheduled()) {
1835 DPRINTF(Ethernet
, "reschedule transmit\n");
1836 txEvent
.schedule(curTick
+ 1000);
1841 NSGigE::txDmaReadCopy()
1843 assert(txDmaState
== dmaReading
);
1845 physmem
->dma_read((uint8_t *)txDmaData
, txDmaAddr
, txDmaLen
);
1846 txDmaState
= dmaIdle
;
1848 DPRINTF(EthernetDMA
, "tx dma read paddr=%#x len=%d\n",
1849 txDmaAddr
, txDmaLen
);
1850 DDUMP(EthernetDMA
, txDmaData
, txDmaLen
);
1854 NSGigE::doTxDmaRead()
1856 assert(txDmaState
== dmaIdle
|| txDmaState
== dmaReadWaiting
);
1857 txDmaState
= dmaReading
;
1859 if (dmaInterface
&& !txDmaFree
) {
1860 if (dmaInterface
->busy())
1861 txDmaState
= dmaReadWaiting
;
1863 dmaInterface
->doDMA(Read
, txDmaAddr
, txDmaLen
, curTick
,
1864 &txDmaReadEvent
, true);
1868 if (dmaReadDelay
== 0 && dmaReadFactor
== 0.0) {
1873 Tick factor
= ((txDmaLen
+ ULL(63)) >> ULL(6)) * dmaReadFactor
;
1874 Tick start
= curTick
+ dmaReadDelay
+ factor
;
1875 txDmaReadEvent
.schedule(start
);
1880 NSGigE::txDmaReadDone()
1882 assert(txDmaState
== dmaReading
);
1885 // If the receive state machine has a pending DMA, let it go first
1886 if (rxDmaState
== dmaReadWaiting
|| rxDmaState
== dmaWriteWaiting
)
1893 NSGigE::txDmaWriteCopy()
1895 assert(txDmaState
== dmaWriting
);
1897 physmem
->dma_write(txDmaAddr
, (uint8_t *)txDmaData
, txDmaLen
);
1898 txDmaState
= dmaIdle
;
1900 DPRINTF(EthernetDMA
, "tx dma write paddr=%#x len=%d\n",
1901 txDmaAddr
, txDmaLen
);
1902 DDUMP(EthernetDMA
, txDmaData
, txDmaLen
);
1906 NSGigE::doTxDmaWrite()
1908 assert(txDmaState
== dmaIdle
|| txDmaState
== dmaWriteWaiting
);
1909 txDmaState
= dmaWriting
;
1911 if (dmaInterface
&& !txDmaFree
) {
1912 if (dmaInterface
->busy())
1913 txDmaState
= dmaWriteWaiting
;
1915 dmaInterface
->doDMA(WriteInvalidate
, txDmaAddr
, txDmaLen
, curTick
,
1916 &txDmaWriteEvent
, true);
1920 if (dmaWriteDelay
== 0 && dmaWriteFactor
== 0.0) {
1925 Tick factor
= ((txDmaLen
+ ULL(63)) >> ULL(6)) * dmaWriteFactor
;
1926 Tick start
= curTick
+ dmaWriteDelay
+ factor
;
1927 txDmaWriteEvent
.schedule(start
);
1932 NSGigE::txDmaWriteDone()
1934 assert(txDmaState
== dmaWriting
);
1937 // If the receive state machine has a pending DMA, let it go first
1938 if (rxDmaState
== dmaReadWaiting
|| rxDmaState
== dmaWriteWaiting
)
1947 DPRINTF(EthernetSM
, "transmit kick txState=%s\n",
1948 NsTxStateStrings
[txState
]);
1950 if (txKickTick
> curTick
) {
1951 DPRINTF(EthernetSM
, "transmit kick exiting, can't run till %d\n",
1958 switch(txDmaState
) {
1959 case dmaReadWaiting
:
1963 case dmaWriteWaiting
:
1974 DPRINTF(EthernetSM
, "Transmit disabled. Nothing to do.\n");
1979 txState
= txDescRefr
;
1981 txDmaAddr
= regs
.txdp
& 0x3fffffff;
1982 txDmaData
= &txDescCache
+ offsetof(ns_desc
, link
);
1983 txDmaLen
= sizeof(txDescCache
.link
);
1984 txDmaFree
= dmaDescFree
;
1987 descDmaRdBytes
+= txDmaLen
;
1993 txState
= txDescRead
;
1995 txDmaAddr
= regs
.txdp
& 0x3fffffff;
1996 txDmaData
= &txDescCache
;
1997 txDmaLen
= sizeof(ns_desc
);
1998 txDmaFree
= dmaDescFree
;
2001 descDmaRdBytes
+= txDmaLen
;
2009 if (txDmaState
!= dmaIdle
)
2012 txState
= txAdvance
;
2016 if (txDmaState
!= dmaIdle
)
2019 DPRINTF(EthernetDesc
,
2020 "txDescCache: link=%08x bufptr=%08x cmdsts=%08x extsts=%08x\n",
2021 txDescCache
.link
, txDescCache
.bufptr
, txDescCache
.cmdsts
,
2022 txDescCache
.extsts
);
2024 if (txDescCache
.cmdsts
& CMDSTS_OWN
) {
2025 txState
= txFifoBlock
;
2026 txFragPtr
= txDescCache
.bufptr
;
2027 txDescCnt
= txDescCache
.cmdsts
& CMDSTS_LEN_MASK
;
2029 devIntrPost(ISR_TXIDLE
);
2037 DPRINTF(EthernetSM
, "****starting the tx of a new packet****\n");
2038 txPacket
= new PacketData(16384);
2039 txPacketBufPtr
= txPacket
->data
;
2042 if (txDescCnt
== 0) {
2043 DPRINTF(EthernetSM
, "the txDescCnt == 0, done with descriptor\n");
2044 if (txDescCache
.cmdsts
& CMDSTS_MORE
) {
2045 DPRINTF(EthernetSM
, "there are more descriptors to come\n");
2046 txState
= txDescWrite
;
2048 txDescCache
.cmdsts
&= ~CMDSTS_OWN
;
2050 txDmaAddr
= regs
.txdp
+ offsetof(ns_desc
, cmdsts
);
2051 txDmaAddr
&= 0x3fffffff;
2052 txDmaData
= &(txDescCache
.cmdsts
);
2053 txDmaLen
= sizeof(txDescCache
.cmdsts
);
2054 txDmaFree
= dmaDescFree
;
2059 } else { /* this packet is totally done */
2060 DPRINTF(EthernetSM
, "This packet is done, let's wrap it up\n");
2061 /* deal with the the packet that just finished */
2062 if ((regs
.vtcr
& VTCR_PPCHK
) && extstsEnable
) {
2064 if (txDescCache
.extsts
& EXTSTS_UDPPKT
) {
2067 udp
->sum(cksum(udp
));
2069 } else if (txDescCache
.extsts
& EXTSTS_TCPPKT
) {
2072 tcp
->sum(cksum(tcp
));
2075 if (txDescCache
.extsts
& EXTSTS_IPPKT
) {
2082 txPacket
->length
= txPacketBufPtr
- txPacket
->data
;
2083 // this is just because the receive can't handle a
2084 // packet bigger want to make sure
2085 assert(txPacket
->length
<= 1514);
2089 txFifo
.push(txPacket
);
2093 * this following section is not tqo spec, but
2094 * functionally shouldn't be any different. normally,
2095 * the chip will wait til the transmit has occurred
2096 * before writing back the descriptor because it has
2097 * to wait to see that it was successfully transmitted
2098 * to decide whether to set CMDSTS_OK or not.
2099 * however, in the simulator since it is always
2100 * successfully transmitted, and writing it exactly to
2101 * spec would complicate the code, we just do it here
2104 txDescCache
.cmdsts
&= ~CMDSTS_OWN
;
2105 txDescCache
.cmdsts
|= CMDSTS_OK
;
2107 DPRINTF(EthernetDesc
,
2108 "txDesc writeback: cmdsts=%08x extsts=%08x\n",
2109 txDescCache
.cmdsts
, txDescCache
.extsts
);
2111 txDmaAddr
= regs
.txdp
+ offsetof(ns_desc
, cmdsts
);
2112 txDmaAddr
&= 0x3fffffff;
2113 txDmaData
= &(txDescCache
.cmdsts
);
2114 txDmaLen
= sizeof(txDescCache
.cmdsts
) +
2115 sizeof(txDescCache
.extsts
);
2116 txDmaFree
= dmaDescFree
;
2119 descDmaWrBytes
+= txDmaLen
;
2125 DPRINTF(EthernetSM
, "halting TX state machine\n");
2129 txState
= txAdvance
;
2135 DPRINTF(EthernetSM
, "this descriptor isn't done yet\n");
2136 if (!txFifo
.full()) {
2137 txState
= txFragRead
;
2140 * The number of bytes transferred is either whatever
2141 * is left in the descriptor (txDescCnt), or if there
2142 * is not enough room in the fifo, just whatever room
2143 * is left in the fifo
2145 txXferLen
= min
<uint32_t>(txDescCnt
, txFifo
.avail());
2147 txDmaAddr
= txFragPtr
& 0x3fffffff;
2148 txDmaData
= txPacketBufPtr
;
2149 txDmaLen
= txXferLen
;
2150 txDmaFree
= dmaDataFree
;
2155 txState
= txFifoBlock
;
2165 if (txDmaState
!= dmaIdle
)
2168 txPacketBufPtr
+= txXferLen
;
2169 txFragPtr
+= txXferLen
;
2170 txDescCnt
-= txXferLen
;
2171 txFifo
.reserve(txXferLen
);
2173 txState
= txFifoBlock
;
2177 if (txDmaState
!= dmaIdle
)
2180 if (txDescCache
.cmdsts
& CMDSTS_INTR
)
2181 devIntrPost(ISR_TXDESC
);
2183 txState
= txAdvance
;
2187 if (txDescCache
.link
== 0) {
2188 devIntrPost(ISR_TXIDLE
);
2192 txState
= txDescRead
;
2193 regs
.txdp
= txDescCache
.link
;
2196 txDmaAddr
= txDescCache
.link
& 0x3fffffff;
2197 txDmaData
= &txDescCache
;
2198 txDmaLen
= sizeof(ns_desc
);
2199 txDmaFree
= dmaDescFree
;
2207 panic("invalid state");
2210 DPRINTF(EthernetSM
, "entering next txState=%s\n",
2211 NsTxStateStrings
[txState
]);
2217 * @todo do we want to schedule a future kick?
2219 DPRINTF(EthernetSM
, "tx state machine exited txState=%s\n",
2220 NsTxStateStrings
[txState
]);
2224 NSGigE::transferDone()
2226 if (txFifo
.empty()) {
2227 DPRINTF(Ethernet
, "transfer complete: txFifo empty...nothing to do\n");
2231 DPRINTF(Ethernet
, "transfer complete: data in txFifo...schedule xmit\n");
2233 if (txEvent
.scheduled())
2234 txEvent
.reschedule(curTick
+ 1);
2236 txEvent
.schedule(curTick
+ 1);
2240 NSGigE::rxFilter(const PacketPtr
&packet
)
2242 EthPtr eth
= packet
;
2246 const EthAddr
&dst
= eth
->dst();
2247 if (dst
.unicast()) {
2248 // If we're accepting all unicast addresses
2252 // If we make a perfect match
2253 if (acceptPerfect
&& dst
== rom
.perfectMatch
)
2256 if (acceptArp
&& eth
->type() == ETH_TYPE_ARP
)
2259 } else if (dst
.broadcast()) {
2260 // if we're accepting broadcasts
2261 if (acceptBroadcast
)
2264 } else if (dst
.multicast()) {
2265 // if we're accepting all multicasts
2266 if (acceptMulticast
)
2272 DPRINTF(Ethernet
, "rxFilter drop\n");
2273 DDUMP(EthernetData
, packet
->data
, packet
->length
);
2280 NSGigE::recvPacket(PacketPtr packet
)
2282 rxBytes
+= packet
->length
;
2285 DPRINTF(Ethernet
, "Receiving packet from wire, rxFifoAvail=%d\n",
2289 DPRINTF(Ethernet
, "receive disabled...packet dropped\n");
2291 interface
->recvDone();
2295 if (rxFilterEnable
&& rxFilter(packet
)) {
2296 DPRINTF(Ethernet
, "packet filtered...dropped\n");
2297 interface
->recvDone();
2301 if (rxFifo
.avail() < packet
->length
) {
2307 "packet won't fit in receive buffer...pkt ID %d dropped\n",
2310 DPRINTF(Ethernet
, "Seq=%d\n", tcp
->seq());
2315 devIntrPost(ISR_RXORN
);
2319 rxFifo
.push(packet
);
2320 interface
->recvDone();
2326 //=====================================================================
2330 NSGigE::serialize(ostream
&os
)
2332 // Serialize the PciDev base class
2333 PciDev::serialize(os
);
2336 * Finalize any DMA events now.
2338 if (rxDmaReadEvent
.scheduled())
2340 if (rxDmaWriteEvent
.scheduled())
2342 if (txDmaReadEvent
.scheduled())
2344 if (txDmaWriteEvent
.scheduled())
2348 * Serialize the device registers
2350 SERIALIZE_SCALAR(regs
.command
);
2351 SERIALIZE_SCALAR(regs
.config
);
2352 SERIALIZE_SCALAR(regs
.mear
);
2353 SERIALIZE_SCALAR(regs
.ptscr
);
2354 SERIALIZE_SCALAR(regs
.isr
);
2355 SERIALIZE_SCALAR(regs
.imr
);
2356 SERIALIZE_SCALAR(regs
.ier
);
2357 SERIALIZE_SCALAR(regs
.ihr
);
2358 SERIALIZE_SCALAR(regs
.txdp
);
2359 SERIALIZE_SCALAR(regs
.txdp_hi
);
2360 SERIALIZE_SCALAR(regs
.txcfg
);
2361 SERIALIZE_SCALAR(regs
.gpior
);
2362 SERIALIZE_SCALAR(regs
.rxdp
);
2363 SERIALIZE_SCALAR(regs
.rxdp_hi
);
2364 SERIALIZE_SCALAR(regs
.rxcfg
);
2365 SERIALIZE_SCALAR(regs
.pqcr
);
2366 SERIALIZE_SCALAR(regs
.wcsr
);
2367 SERIALIZE_SCALAR(regs
.pcr
);
2368 SERIALIZE_SCALAR(regs
.rfcr
);
2369 SERIALIZE_SCALAR(regs
.rfdr
);
2370 SERIALIZE_SCALAR(regs
.srr
);
2371 SERIALIZE_SCALAR(regs
.mibc
);
2372 SERIALIZE_SCALAR(regs
.vrcr
);
2373 SERIALIZE_SCALAR(regs
.vtcr
);
2374 SERIALIZE_SCALAR(regs
.vdr
);
2375 SERIALIZE_SCALAR(regs
.ccsr
);
2376 SERIALIZE_SCALAR(regs
.tbicr
);
2377 SERIALIZE_SCALAR(regs
.tbisr
);
2378 SERIALIZE_SCALAR(regs
.tanar
);
2379 SERIALIZE_SCALAR(regs
.tanlpar
);
2380 SERIALIZE_SCALAR(regs
.taner
);
2381 SERIALIZE_SCALAR(regs
.tesr
);
2383 SERIALIZE_ARRAY(rom
.perfectMatch
, ETH_ADDR_LEN
);
2385 SERIALIZE_SCALAR(ioEnable
);
2388 * Serialize the data Fifos
2390 rxFifo
.serialize("rxFifo", os
);
2391 txFifo
.serialize("txFifo", os
);
2394 * Serialize the various helper variables
2396 bool txPacketExists
= txPacket
;
2397 SERIALIZE_SCALAR(txPacketExists
);
2398 if (txPacketExists
) {
2399 txPacket
->length
= txPacketBufPtr
- txPacket
->data
;
2400 txPacket
->serialize("txPacket", os
);
2401 uint32_t txPktBufPtr
= (uint32_t) (txPacketBufPtr
- txPacket
->data
);
2402 SERIALIZE_SCALAR(txPktBufPtr
);
2405 bool rxPacketExists
= rxPacket
;
2406 SERIALIZE_SCALAR(rxPacketExists
);
2407 if (rxPacketExists
) {
2408 rxPacket
->serialize("rxPacket", os
);
2409 uint32_t rxPktBufPtr
= (uint32_t) (rxPacketBufPtr
- rxPacket
->data
);
2410 SERIALIZE_SCALAR(rxPktBufPtr
);
2413 SERIALIZE_SCALAR(txXferLen
);
2414 SERIALIZE_SCALAR(rxXferLen
);
2417 * Serialize DescCaches
2419 SERIALIZE_SCALAR(txDescCache
.link
);
2420 SERIALIZE_SCALAR(txDescCache
.bufptr
);
2421 SERIALIZE_SCALAR(txDescCache
.cmdsts
);
2422 SERIALIZE_SCALAR(txDescCache
.extsts
);
2423 SERIALIZE_SCALAR(rxDescCache
.link
);
2424 SERIALIZE_SCALAR(rxDescCache
.bufptr
);
2425 SERIALIZE_SCALAR(rxDescCache
.cmdsts
);
2426 SERIALIZE_SCALAR(rxDescCache
.extsts
);
2429 * Serialize tx state machine
2431 int txState
= this->txState
;
2432 SERIALIZE_SCALAR(txState
);
2433 SERIALIZE_SCALAR(txEnable
);
2434 SERIALIZE_SCALAR(CTDD
);
2435 SERIALIZE_SCALAR(txFragPtr
);
2436 SERIALIZE_SCALAR(txDescCnt
);
2437 int txDmaState
= this->txDmaState
;
2438 SERIALIZE_SCALAR(txDmaState
);
2441 * Serialize rx state machine
2443 int rxState
= this->rxState
;
2444 SERIALIZE_SCALAR(rxState
);
2445 SERIALIZE_SCALAR(rxEnable
);
2446 SERIALIZE_SCALAR(CRDD
);
2447 SERIALIZE_SCALAR(rxPktBytes
);
2448 SERIALIZE_SCALAR(rxFragPtr
);
2449 SERIALIZE_SCALAR(rxDescCnt
);
2450 int rxDmaState
= this->rxDmaState
;
2451 SERIALIZE_SCALAR(rxDmaState
);
2453 SERIALIZE_SCALAR(extstsEnable
);
2456 * If there's a pending transmit, store the time so we can
2457 * reschedule it later
2459 Tick transmitTick
= txEvent
.scheduled() ? txEvent
.when() - curTick
: 0;
2460 SERIALIZE_SCALAR(transmitTick
);
2463 * receive address filter settings
2465 SERIALIZE_SCALAR(rxFilterEnable
);
2466 SERIALIZE_SCALAR(acceptBroadcast
);
2467 SERIALIZE_SCALAR(acceptMulticast
);
2468 SERIALIZE_SCALAR(acceptUnicast
);
2469 SERIALIZE_SCALAR(acceptPerfect
);
2470 SERIALIZE_SCALAR(acceptArp
);
2473 * Keep track of pending interrupt status.
2475 SERIALIZE_SCALAR(intrTick
);
2476 SERIALIZE_SCALAR(cpuPendingIntr
);
2477 Tick intrEventTick
= 0;
2479 intrEventTick
= intrEvent
->when();
2480 SERIALIZE_SCALAR(intrEventTick
);
2485 NSGigE::unserialize(Checkpoint
*cp
, const std::string
§ion
)
2487 // Unserialize the PciDev base class
2488 PciDev::unserialize(cp
, section
);
2490 UNSERIALIZE_SCALAR(regs
.command
);
2491 UNSERIALIZE_SCALAR(regs
.config
);
2492 UNSERIALIZE_SCALAR(regs
.mear
);
2493 UNSERIALIZE_SCALAR(regs
.ptscr
);
2494 UNSERIALIZE_SCALAR(regs
.isr
);
2495 UNSERIALIZE_SCALAR(regs
.imr
);
2496 UNSERIALIZE_SCALAR(regs
.ier
);
2497 UNSERIALIZE_SCALAR(regs
.ihr
);
2498 UNSERIALIZE_SCALAR(regs
.txdp
);
2499 UNSERIALIZE_SCALAR(regs
.txdp_hi
);
2500 UNSERIALIZE_SCALAR(regs
.txcfg
);
2501 UNSERIALIZE_SCALAR(regs
.gpior
);
2502 UNSERIALIZE_SCALAR(regs
.rxdp
);
2503 UNSERIALIZE_SCALAR(regs
.rxdp_hi
);
2504 UNSERIALIZE_SCALAR(regs
.rxcfg
);
2505 UNSERIALIZE_SCALAR(regs
.pqcr
);
2506 UNSERIALIZE_SCALAR(regs
.wcsr
);
2507 UNSERIALIZE_SCALAR(regs
.pcr
);
2508 UNSERIALIZE_SCALAR(regs
.rfcr
);
2509 UNSERIALIZE_SCALAR(regs
.rfdr
);
2510 UNSERIALIZE_SCALAR(regs
.srr
);
2511 UNSERIALIZE_SCALAR(regs
.mibc
);
2512 UNSERIALIZE_SCALAR(regs
.vrcr
);
2513 UNSERIALIZE_SCALAR(regs
.vtcr
);
2514 UNSERIALIZE_SCALAR(regs
.vdr
);
2515 UNSERIALIZE_SCALAR(regs
.ccsr
);
2516 UNSERIALIZE_SCALAR(regs
.tbicr
);
2517 UNSERIALIZE_SCALAR(regs
.tbisr
);
2518 UNSERIALIZE_SCALAR(regs
.tanar
);
2519 UNSERIALIZE_SCALAR(regs
.tanlpar
);
2520 UNSERIALIZE_SCALAR(regs
.taner
);
2521 UNSERIALIZE_SCALAR(regs
.tesr
);
2523 UNSERIALIZE_ARRAY(rom
.perfectMatch
, ETH_ADDR_LEN
);
2525 UNSERIALIZE_SCALAR(ioEnable
);
2528 * unserialize the data fifos
2530 rxFifo
.unserialize("rxFifo", cp
, section
);
2531 txFifo
.unserialize("txFifo", cp
, section
);
2534 * unserialize the various helper variables
2536 bool txPacketExists
;
2537 UNSERIALIZE_SCALAR(txPacketExists
);
2538 if (txPacketExists
) {
2539 txPacket
= new PacketData(16384);
2540 txPacket
->unserialize("txPacket", cp
, section
);
2541 uint32_t txPktBufPtr
;
2542 UNSERIALIZE_SCALAR(txPktBufPtr
);
2543 txPacketBufPtr
= (uint8_t *) txPacket
->data
+ txPktBufPtr
;
2547 bool rxPacketExists
;
2548 UNSERIALIZE_SCALAR(rxPacketExists
);
2550 if (rxPacketExists
) {
2551 rxPacket
= new PacketData(16384);
2552 rxPacket
->unserialize("rxPacket", cp
, section
);
2553 uint32_t rxPktBufPtr
;
2554 UNSERIALIZE_SCALAR(rxPktBufPtr
);
2555 rxPacketBufPtr
= (uint8_t *) rxPacket
->data
+ rxPktBufPtr
;
2559 UNSERIALIZE_SCALAR(txXferLen
);
2560 UNSERIALIZE_SCALAR(rxXferLen
);
2563 * Unserialize DescCaches
2565 UNSERIALIZE_SCALAR(txDescCache
.link
);
2566 UNSERIALIZE_SCALAR(txDescCache
.bufptr
);
2567 UNSERIALIZE_SCALAR(txDescCache
.cmdsts
);
2568 UNSERIALIZE_SCALAR(txDescCache
.extsts
);
2569 UNSERIALIZE_SCALAR(rxDescCache
.link
);
2570 UNSERIALIZE_SCALAR(rxDescCache
.bufptr
);
2571 UNSERIALIZE_SCALAR(rxDescCache
.cmdsts
);
2572 UNSERIALIZE_SCALAR(rxDescCache
.extsts
);
2575 * unserialize tx state machine
2578 UNSERIALIZE_SCALAR(txState
);
2579 this->txState
= (TxState
) txState
;
2580 UNSERIALIZE_SCALAR(txEnable
);
2581 UNSERIALIZE_SCALAR(CTDD
);
2582 UNSERIALIZE_SCALAR(txFragPtr
);
2583 UNSERIALIZE_SCALAR(txDescCnt
);
2585 UNSERIALIZE_SCALAR(txDmaState
);
2586 this->txDmaState
= (DmaState
) txDmaState
;
2589 * unserialize rx state machine
2592 UNSERIALIZE_SCALAR(rxState
);
2593 this->rxState
= (RxState
) rxState
;
2594 UNSERIALIZE_SCALAR(rxEnable
);
2595 UNSERIALIZE_SCALAR(CRDD
);
2596 UNSERIALIZE_SCALAR(rxPktBytes
);
2597 UNSERIALIZE_SCALAR(rxFragPtr
);
2598 UNSERIALIZE_SCALAR(rxDescCnt
);
2600 UNSERIALIZE_SCALAR(rxDmaState
);
2601 this->rxDmaState
= (DmaState
) rxDmaState
;
2603 UNSERIALIZE_SCALAR(extstsEnable
);
2606 * If there's a pending transmit, reschedule it now
2609 UNSERIALIZE_SCALAR(transmitTick
);
2611 txEvent
.schedule(curTick
+ transmitTick
);
2614 * unserialize receive address filter settings
2616 UNSERIALIZE_SCALAR(rxFilterEnable
);
2617 UNSERIALIZE_SCALAR(acceptBroadcast
);
2618 UNSERIALIZE_SCALAR(acceptMulticast
);
2619 UNSERIALIZE_SCALAR(acceptUnicast
);
2620 UNSERIALIZE_SCALAR(acceptPerfect
);
2621 UNSERIALIZE_SCALAR(acceptArp
);
2624 * Keep track of pending interrupt status.
2626 UNSERIALIZE_SCALAR(intrTick
);
2627 UNSERIALIZE_SCALAR(cpuPendingIntr
);
2629 UNSERIALIZE_SCALAR(intrEventTick
);
2630 if (intrEventTick
) {
2631 intrEvent
= new IntrEvent(this, true);
2632 intrEvent
->schedule(intrEventTick
);
2636 * re-add addrRanges to bus bridges
2639 pioInterface
->addAddrRange(RangeSize(BARAddrs
[0], BARSize
[0]));
2640 pioInterface
->addAddrRange(RangeSize(BARAddrs
[1], BARSize
[1]));
2645 NSGigE::cacheAccess(MemReqPtr
&req
)
2647 DPRINTF(EthernetPIO
, "timing access to paddr=%#x (daddr=%#x)\n",
2648 req
->paddr
, req
->paddr
- addr
);
2649 return curTick
+ pioLatency
;
2652 BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt
)
2654 SimObjectParam
<EtherInt
*> peer
;
2655 SimObjectParam
<NSGigE
*> device
;
2657 END_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt
)
2659 BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigEInt
)
2661 INIT_PARAM_DFLT(peer
, "peer interface", NULL
),
2662 INIT_PARAM(device
, "Ethernet device of this interface")
2664 END_INIT_SIM_OBJECT_PARAMS(NSGigEInt
)
2666 CREATE_SIM_OBJECT(NSGigEInt
)
2668 NSGigEInt
*dev_int
= new NSGigEInt(getInstanceName(), device
);
2670 EtherInt
*p
= (EtherInt
*)peer
;
2672 dev_int
->setPeer(p
);
2673 p
->setPeer(dev_int
);
2679 REGISTER_SIM_OBJECT("NSGigEInt", NSGigEInt
)
2682 BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigE
)
2685 Param
<Tick
> tx_delay
;
2686 Param
<Tick
> rx_delay
;
2687 Param
<Tick
> intr_delay
;
2688 SimObjectParam
<MemoryController
*> mmu
;
2689 SimObjectParam
<PhysicalMemory
*> physmem
;
2690 Param
<bool> rx_filter
;
2691 Param
<string
> hardware_address
;
2692 SimObjectParam
<Bus
*> io_bus
;
2693 SimObjectParam
<Bus
*> payload_bus
;
2694 SimObjectParam
<HierParams
*> hier
;
2695 Param
<Tick
> pio_latency
;
2696 Param
<bool> dma_desc_free
;
2697 Param
<bool> dma_data_free
;
2698 Param
<Tick
> dma_read_delay
;
2699 Param
<Tick
> dma_write_delay
;
2700 Param
<Tick
> dma_read_factor
;
2701 Param
<Tick
> dma_write_factor
;
2702 SimObjectParam
<PciConfigAll
*> configspace
;
2703 SimObjectParam
<PciConfigData
*> configdata
;
2704 SimObjectParam
<Platform
*> platform
;
2705 Param
<uint32_t> pci_bus
;
2706 Param
<uint32_t> pci_dev
;
2707 Param
<uint32_t> pci_func
;
2708 Param
<uint32_t> tx_fifo_size
;
2709 Param
<uint32_t> rx_fifo_size
;
2711 END_DECLARE_SIM_OBJECT_PARAMS(NSGigE
)
2713 BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigE
)
2715 INIT_PARAM(addr
, "Device Address"),
2716 INIT_PARAM_DFLT(tx_delay
, "Transmit Delay", 1000),
2717 INIT_PARAM_DFLT(rx_delay
, "Receive Delay", 1000),
2718 INIT_PARAM_DFLT(intr_delay
, "Interrupt Delay in microseconds", 0),
2719 INIT_PARAM(mmu
, "Memory Controller"),
2720 INIT_PARAM(physmem
, "Physical Memory"),
2721 INIT_PARAM_DFLT(rx_filter
, "Enable Receive Filter", true),
2722 INIT_PARAM_DFLT(hardware_address
, "Ethernet Hardware Address",
2723 "00:99:00:00:00:01"),
2724 INIT_PARAM_DFLT(io_bus
, "The IO Bus to attach to for headers", NULL
),
2725 INIT_PARAM_DFLT(payload_bus
, "The IO Bus to attach to for payload", NULL
),
2726 INIT_PARAM_DFLT(hier
, "Hierarchy global variables", &defaultHierParams
),
2727 INIT_PARAM_DFLT(pio_latency
, "Programmed IO latency in bus cycles", 1),
2728 INIT_PARAM_DFLT(dma_desc_free
, "DMA of Descriptors is free", false),
2729 INIT_PARAM_DFLT(dma_data_free
, "DMA of Data is free", false),
2730 INIT_PARAM_DFLT(dma_read_delay
, "fixed delay for dma reads", 0),
2731 INIT_PARAM_DFLT(dma_write_delay
, "fixed delay for dma writes", 0),
2732 INIT_PARAM_DFLT(dma_read_factor
, "multiplier for dma reads", 0),
2733 INIT_PARAM_DFLT(dma_write_factor
, "multiplier for dma writes", 0),
2734 INIT_PARAM(configspace
, "PCI Configspace"),
2735 INIT_PARAM(configdata
, "PCI Config data"),
2736 INIT_PARAM(platform
, "Platform"),
2737 INIT_PARAM(pci_bus
, "PCI bus"),
2738 INIT_PARAM(pci_dev
, "PCI device number"),
2739 INIT_PARAM(pci_func
, "PCI function code"),
2740 INIT_PARAM_DFLT(tx_fifo_size
, "max size in bytes of txFifo", 131072),
2741 INIT_PARAM_DFLT(rx_fifo_size
, "max size in bytes of rxFifo", 131072)
2743 END_INIT_SIM_OBJECT_PARAMS(NSGigE
)
2746 CREATE_SIM_OBJECT(NSGigE
)
2748 NSGigE::Params
*params
= new NSGigE::Params
;
2750 params
->name
= getInstanceName();
2752 params
->configSpace
= configspace
;
2753 params
->configData
= configdata
;
2754 params
->plat
= platform
;
2755 params
->busNum
= pci_bus
;
2756 params
->deviceNum
= pci_dev
;
2757 params
->functionNum
= pci_func
;
2759 params
->intr_delay
= intr_delay
;
2760 params
->pmem
= physmem
;
2761 params
->tx_delay
= tx_delay
;
2762 params
->rx_delay
= rx_delay
;
2763 params
->hier
= hier
;
2764 params
->header_bus
= io_bus
;
2765 params
->payload_bus
= payload_bus
;
2766 params
->pio_latency
= pio_latency
;
2767 params
->dma_desc_free
= dma_desc_free
;
2768 params
->dma_data_free
= dma_data_free
;
2769 params
->dma_read_delay
= dma_read_delay
;
2770 params
->dma_write_delay
= dma_write_delay
;
2771 params
->dma_read_factor
= dma_read_factor
;
2772 params
->dma_write_factor
= dma_write_factor
;
2773 params
->rx_filter
= rx_filter
;
2774 params
->eaddr
= hardware_address
;
2775 params
->tx_fifo_size
= tx_fifo_size
;
2776 params
->rx_fifo_size
= rx_fifo_size
;
2777 return new NSGigE(params
);
2780 REGISTER_SIM_OBJECT("NSGigE", NSGigE
)