0d446214d947a08daf30d1e184236a00a63f177a
2 * Copyright (c) 2004-2005 The Regents of The University of Michigan
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 * Device module for modelling the National Semiconductor
31 * DP83820 ethernet controller. Does not support priority queueing
37 #include "base/inet.hh"
38 #include "cpu/exec_context.hh"
39 #include "dev/etherlink.hh"
40 #include "dev/ns_gige.hh"
41 #include "dev/pciconfigall.hh"
42 #include "mem/bus/bus.hh"
43 #include "mem/bus/dma_interface.hh"
44 #include "mem/bus/pio_interface.hh"
45 #include "mem/bus/pio_interface_impl.hh"
46 #include "mem/functional/memory_control.hh"
47 #include "mem/functional/physical.hh"
48 #include "sim/builder.hh"
49 #include "sim/debug.hh"
50 #include "sim/host.hh"
51 #include "sim/stats.hh"
52 #include "targetarch/vtophys.hh"
54 const char *NsRxStateStrings
[] =
65 const char *NsTxStateStrings
[] =
76 const char *NsDmaState
[] =
88 ///////////////////////////////////////////////////////////////////////
92 NSGigE::NSGigE(Params
*p
)
93 : PciDev(p
), ioEnable(false),
94 txFifo(p
->tx_fifo_size
), rxFifo(p
->rx_fifo_size
),
95 txPacket(0), rxPacket(0), txPacketBufPtr(NULL
), rxPacketBufPtr(NULL
),
96 txXferLen(0), rxXferLen(0), clock(p
->clock
),
97 txState(txIdle
), txEnable(false), CTDD(false),
98 txFragPtr(0), txDescCnt(0), txDmaState(dmaIdle
), rxState(rxIdle
),
99 rxEnable(false), CRDD(false), rxPktBytes(0),
100 rxFragPtr(0), rxDescCnt(0), rxDmaState(dmaIdle
), extstsEnable(false),
101 rxDmaReadEvent(this), rxDmaWriteEvent(this),
102 txDmaReadEvent(this), txDmaWriteEvent(this),
103 dmaDescFree(p
->dma_desc_free
), dmaDataFree(p
->dma_data_free
),
104 txDelay(p
->tx_delay
), rxDelay(p
->rx_delay
),
105 rxKickTick(0), rxKickEvent(this), txKickTick(0), txKickEvent(this),
106 txEvent(this), rxFilterEnable(p
->rx_filter
), acceptBroadcast(false),
107 acceptMulticast(false), acceptUnicast(false),
108 acceptPerfect(false), acceptArp(false),
109 physmem(p
->pmem
), intrTick(0), cpuPendingIntr(false),
110 intrEvent(0), interface(0)
113 pioInterface
= newPioInterface(name(), p
->hier
,
115 &NSGigE::cacheAccess
);
117 pioLatency
= p
->pio_latency
* p
->header_bus
->clockRate
;
120 dmaInterface
= new DMAInterface
<Bus
>(name() + ".dma",
125 dmaInterface
= new DMAInterface
<Bus
>(name() + ".dma",
129 } else if (p
->payload_bus
) {
130 pioInterface
= newPioInterface(name(), p
->hier
,
131 p
->payload_bus
, this,
132 &NSGigE::cacheAccess
);
134 pioLatency
= p
->pio_latency
* p
->payload_bus
->clockRate
;
136 dmaInterface
= new DMAInterface
<Bus
>(name() + ".dma",
143 intrDelay
= p
->intr_delay
;
144 dmaReadDelay
= p
->dma_read_delay
;
145 dmaWriteDelay
= p
->dma_write_delay
;
146 dmaReadFactor
= p
->dma_read_factor
;
147 dmaWriteFactor
= p
->dma_write_factor
;
150 memcpy(&rom
.perfectMatch
, p
->eaddr
.bytes(), ETH_ADDR_LEN
);
160 .name(name() + ".txBytes")
161 .desc("Bytes Transmitted")
166 .name(name() + ".rxBytes")
167 .desc("Bytes Received")
172 .name(name() + ".txPackets")
173 .desc("Number of Packets Transmitted")
178 .name(name() + ".rxPackets")
179 .desc("Number of Packets Received")
184 .name(name() + ".txIpChecksums")
185 .desc("Number of tx IP Checksums done by device")
191 .name(name() + ".rxIpChecksums")
192 .desc("Number of rx IP Checksums done by device")
198 .name(name() + ".txTcpChecksums")
199 .desc("Number of tx TCP Checksums done by device")
205 .name(name() + ".rxTcpChecksums")
206 .desc("Number of rx TCP Checksums done by device")
212 .name(name() + ".txUdpChecksums")
213 .desc("Number of tx UDP Checksums done by device")
219 .name(name() + ".rxUdpChecksums")
220 .desc("Number of rx UDP Checksums done by device")
226 .name(name() + ".descDMAReads")
227 .desc("Number of descriptors the device read w/ DMA")
232 .name(name() + ".descDMAWrites")
233 .desc("Number of descriptors the device wrote w/ DMA")
238 .name(name() + ".descDmaReadBytes")
239 .desc("number of descriptor bytes read w/ DMA")
244 .name(name() + ".descDmaWriteBytes")
245 .desc("number of descriptor bytes write w/ DMA")
250 .name(name() + ".txBandwidth")
251 .desc("Transmit Bandwidth (bits/s)")
257 .name(name() + ".rxBandwidth")
258 .desc("Receive Bandwidth (bits/s)")
264 .name(name() + ".totBandwidth")
265 .desc("Total Bandwidth (bits/s)")
271 .name(name() + ".totPackets")
272 .desc("Total Packets")
278 .name(name() + ".totBytes")
285 .name(name() + ".totPPS")
286 .desc("Total Tranmission Rate (packets/s)")
292 .name(name() + ".txPPS")
293 .desc("Packet Tranmission Rate (packets/s)")
299 .name(name() + ".rxPPS")
300 .desc("Packet Reception Rate (packets/s)")
306 .name(name() + ".postedSwi")
307 .desc("number of software interrupts posted to CPU")
312 .name(name() + ".totalSwi")
313 .desc("number of total Swi written to ISR")
318 .name(name() + ".coalescedSwi")
319 .desc("average number of Swi's coalesced into each post")
324 .name(name() + ".postedRxIdle")
325 .desc("number of rxIdle interrupts posted to CPU")
330 .name(name() + ".totalRxIdle")
331 .desc("number of total RxIdle written to ISR")
336 .name(name() + ".coalescedRxIdle")
337 .desc("average number of RxIdle's coalesced into each post")
342 .name(name() + ".postedRxOk")
343 .desc("number of RxOk interrupts posted to CPU")
348 .name(name() + ".totalRxOk")
349 .desc("number of total RxOk written to ISR")
354 .name(name() + ".coalescedRxOk")
355 .desc("average number of RxOk's coalesced into each post")
360 .name(name() + ".postedRxDesc")
361 .desc("number of RxDesc interrupts posted to CPU")
366 .name(name() + ".totalRxDesc")
367 .desc("number of total RxDesc written to ISR")
372 .name(name() + ".coalescedRxDesc")
373 .desc("average number of RxDesc's coalesced into each post")
378 .name(name() + ".postedTxOk")
379 .desc("number of TxOk interrupts posted to CPU")
384 .name(name() + ".totalTxOk")
385 .desc("number of total TxOk written to ISR")
390 .name(name() + ".coalescedTxOk")
391 .desc("average number of TxOk's coalesced into each post")
396 .name(name() + ".postedTxIdle")
397 .desc("number of TxIdle interrupts posted to CPU")
402 .name(name() + ".totalTxIdle")
403 .desc("number of total TxIdle written to ISR")
408 .name(name() + ".coalescedTxIdle")
409 .desc("average number of TxIdle's coalesced into each post")
414 .name(name() + ".postedTxDesc")
415 .desc("number of TxDesc interrupts posted to CPU")
420 .name(name() + ".totalTxDesc")
421 .desc("number of total TxDesc written to ISR")
426 .name(name() + ".coalescedTxDesc")
427 .desc("average number of TxDesc's coalesced into each post")
432 .name(name() + ".postedRxOrn")
433 .desc("number of RxOrn posted to CPU")
438 .name(name() + ".totalRxOrn")
439 .desc("number of total RxOrn written to ISR")
444 .name(name() + ".coalescedRxOrn")
445 .desc("average number of RxOrn's coalesced into each post")
450 .name(name() + ".coalescedTotal")
451 .desc("average number of interrupts coalesced into each post")
456 .name(name() + ".postedInterrupts")
457 .desc("number of posts to CPU")
462 .name(name() + ".droppedPackets")
463 .desc("number of packets dropped")
467 coalescedSwi
= totalSwi
/ postedInterrupts
;
468 coalescedRxIdle
= totalRxIdle
/ postedInterrupts
;
469 coalescedRxOk
= totalRxOk
/ postedInterrupts
;
470 coalescedRxDesc
= totalRxDesc
/ postedInterrupts
;
471 coalescedTxOk
= totalTxOk
/ postedInterrupts
;
472 coalescedTxIdle
= totalTxIdle
/ postedInterrupts
;
473 coalescedTxDesc
= totalTxDesc
/ postedInterrupts
;
474 coalescedRxOrn
= totalRxOrn
/ postedInterrupts
;
476 coalescedTotal
= (totalSwi
+ totalRxIdle
+ totalRxOk
+ totalRxDesc
+
477 totalTxOk
+ totalTxIdle
+ totalTxDesc
+
478 totalRxOrn
) / postedInterrupts
;
480 txBandwidth
= txBytes
* Stats::constant(8) / simSeconds
;
481 rxBandwidth
= rxBytes
* Stats::constant(8) / simSeconds
;
482 totBandwidth
= txBandwidth
+ rxBandwidth
;
483 totBytes
= txBytes
+ rxBytes
;
484 totPackets
= txPackets
+ rxPackets
;
486 txPacketRate
= txPackets
/ simSeconds
;
487 rxPacketRate
= rxPackets
/ simSeconds
;
491 * This is to read the PCI general configuration registers
494 NSGigE::ReadConfig(int offset
, int size
, uint8_t *data
)
496 if (offset
< PCI_DEVICE_SPECIFIC
)
497 PciDev::ReadConfig(offset
, size
, data
);
499 panic("Device specific PCI config space not implemented!\n");
503 * This is to write to the PCI general configuration registers
506 NSGigE::WriteConfig(int offset
, int size
, uint32_t data
)
508 if (offset
< PCI_DEVICE_SPECIFIC
)
509 PciDev::WriteConfig(offset
, size
, data
);
511 panic("Device specific PCI config space not implemented!\n");
513 // Need to catch writes to BARs to update the PIO interface
515 // seems to work fine without all these PCI settings, but i
516 // put in the IO to double check, an assertion will fail if we
517 // need to properly implement it
519 if (config
.data
[offset
] & PCI_CMD_IOSE
)
525 if (config
.data
[offset
] & PCI_CMD_BME
) {
532 if (config
.data
[offset
] & PCI_CMD_MSE
) {
541 case PCI0_BASE_ADDR0
:
542 if (BARAddrs
[0] != 0) {
544 pioInterface
->addAddrRange(RangeSize(BARAddrs
[0], BARSize
[0]));
546 BARAddrs
[0] &= EV5::PAddrUncachedMask
;
549 case PCI0_BASE_ADDR1
:
550 if (BARAddrs
[1] != 0) {
552 pioInterface
->addAddrRange(RangeSize(BARAddrs
[1], BARSize
[1]));
554 BARAddrs
[1] &= EV5::PAddrUncachedMask
;
561 * This reads the device registers, which are detailed in the NS83820
565 NSGigE::read(MemReqPtr
&req
, uint8_t *data
)
569 //The mask is to give you only the offset into the device register file
570 Addr daddr
= req
->paddr
& 0xfff;
571 DPRINTF(EthernetPIO
, "read da=%#x pa=%#x va=%#x size=%d\n",
572 daddr
, req
->paddr
, req
->vaddr
, req
->size
);
575 // there are some reserved registers, you can see ns_gige_reg.h and
576 // the spec sheet for details
577 if (daddr
> LAST
&& daddr
<= RESERVED
) {
578 panic("Accessing reserved register");
579 } else if (daddr
> RESERVED
&& daddr
<= 0x3FC) {
580 ReadConfig(daddr
& 0xff, req
->size
, data
);
582 } else if (daddr
>= MIB_START
&& daddr
<= MIB_END
) {
583 // don't implement all the MIB's. hopefully the kernel
584 // doesn't actually DEPEND upon their values
585 // MIB are just hardware stats keepers
586 uint32_t ®
= *(uint32_t *) data
;
589 } else if (daddr
> 0x3FC)
590 panic("Something is messed up!\n");
593 case sizeof(uint32_t):
595 uint32_t ®
= *(uint32_t *)data
;
600 //these are supposed to be cleared on a read
601 reg
&= ~(CR_RXD
| CR_TXD
| CR_TXR
| CR_RXR
);
618 devIntrClear(ISR_ALL
);
673 // see the spec sheet for how RFCR and RFDR work
674 // basically, you write to RFCR to tell the machine
675 // what you want to do next, then you act upon RFDR,
676 // and the device will be prepared b/c of what you
683 switch (regs
.rfcr
& RFCR_RFADDR
) {
685 reg
= rom
.perfectMatch
[1];
687 reg
+= rom
.perfectMatch
[0];
690 reg
= rom
.perfectMatch
[3] << 8;
691 reg
+= rom
.perfectMatch
[2];
694 reg
= rom
.perfectMatch
[5] << 8;
695 reg
+= rom
.perfectMatch
[4];
698 panic("reading RFDR for something other than PMATCH!\n");
699 // didn't implement other RFDR functionality b/c
700 // driver didn't use it
710 reg
&= ~(MIBC_MIBS
| MIBC_ACLR
);
754 reg
= params()->m5reg
;
758 panic("reading unimplemented register: addr=%#x", daddr
);
761 DPRINTF(EthernetPIO
, "read from %#x: data=%d data=%#x\n",
767 panic("accessing register with invalid size: addr=%#x, size=%d",
775 NSGigE::write(MemReqPtr
&req
, const uint8_t *data
)
779 Addr daddr
= req
->paddr
& 0xfff;
780 DPRINTF(EthernetPIO
, "write da=%#x pa=%#x va=%#x size=%d\n",
781 daddr
, req
->paddr
, req
->vaddr
, req
->size
);
783 if (daddr
> LAST
&& daddr
<= RESERVED
) {
784 panic("Accessing reserved register");
785 } else if (daddr
> RESERVED
&& daddr
<= 0x3FC) {
786 WriteConfig(daddr
& 0xff, req
->size
, *(uint32_t *)data
);
788 } else if (daddr
> 0x3FC)
789 panic("Something is messed up!\n");
791 if (req
->size
== sizeof(uint32_t)) {
792 uint32_t reg
= *(uint32_t *)data
;
793 DPRINTF(EthernetPIO
, "write data=%d data=%#x\n", reg
, reg
);
800 } else if (reg
& CR_TXE
) {
803 // the kernel is enabling the transmit machine
804 if (txState
== txIdle
)
810 } else if (reg
& CR_RXE
) {
813 if (rxState
== rxIdle
)
824 devIntrPost(ISR_SWI
);
835 if (reg
& CFGR_LNKSTS
||
838 reg
& CFGR_RESERVED
||
839 reg
& CFGR_T64ADDR
||
840 reg
& CFGR_PCI64_DET
)
841 panic("writing to read-only or reserved CFGR bits!\n");
843 regs
.config
|= reg
& ~(CFGR_LNKSTS
| CFGR_SPDSTS
| CFGR_DUPSTS
|
844 CFGR_RESERVED
| CFGR_T64ADDR
|
847 // all these #if 0's are because i don't THINK the kernel needs to
848 // have these implemented. if there is a problem relating to one of
849 // these, you may need to add functionality in.
851 if (reg
& CFGR_TBI_EN
) ;
852 if (reg
& CFGR_MODE_1000
) ;
855 if (reg
& CFGR_AUTO_1000
)
856 panic("CFGR_AUTO_1000 not implemented!\n");
859 if (reg
& CFGR_PINT_DUPSTS
||
860 reg
& CFGR_PINT_LNKSTS
||
861 reg
& CFGR_PINT_SPDSTS
)
864 if (reg
& CFGR_TMRTEST
) ;
865 if (reg
& CFGR_MRM_DIS
) ;
866 if (reg
& CFGR_MWI_DIS
) ;
868 if (reg
& CFGR_T64ADDR
)
869 panic("CFGR_T64ADDR is read only register!\n");
871 if (reg
& CFGR_PCI64_DET
)
872 panic("CFGR_PCI64_DET is read only register!\n");
874 if (reg
& CFGR_DATA64_EN
) ;
875 if (reg
& CFGR_M64ADDR
) ;
876 if (reg
& CFGR_PHY_RST
) ;
877 if (reg
& CFGR_PHY_DIS
) ;
880 if (reg
& CFGR_EXTSTS_EN
)
883 extstsEnable
= false;
886 if (reg
& CFGR_REQALG
) ;
888 if (reg
& CFGR_POW
) ;
889 if (reg
& CFGR_EXD
) ;
890 if (reg
& CFGR_PESEL
) ;
891 if (reg
& CFGR_BROM_DIS
) ;
892 if (reg
& CFGR_EXT_125
) ;
893 if (reg
& CFGR_BEM
) ;
899 // since phy is completely faked, MEAR_MD* don't matter
900 // and since the driver never uses MEAR_EE*, they don't
903 if (reg
& MEAR_EEDI
) ;
904 if (reg
& MEAR_EEDO
) ; // this one is read only
905 if (reg
& MEAR_EECLK
) ;
906 if (reg
& MEAR_EESEL
) ;
907 if (reg
& MEAR_MDIO
) ;
908 if (reg
& MEAR_MDDIR
) ;
909 if (reg
& MEAR_MDC
) ;
914 regs
.ptscr
= reg
& ~(PTSCR_RBIST_RDONLY
);
915 // these control BISTs for various parts of chip - we
916 // don't care or do just fake that the BIST is done
917 if (reg
& PTSCR_RBIST_EN
)
918 regs
.ptscr
|= PTSCR_RBIST_DONE
;
919 if (reg
& PTSCR_EEBIST_EN
)
920 regs
.ptscr
&= ~PTSCR_EEBIST_EN
;
921 if (reg
& PTSCR_EELOAD_EN
)
922 regs
.ptscr
&= ~PTSCR_EELOAD_EN
;
925 case ISR
: /* writing to the ISR has no effect */
926 panic("ISR is a read only register!\n");
939 /* not going to implement real interrupt holdoff */
943 regs
.txdp
= (reg
& 0xFFFFFFFC);
944 assert(txState
== txIdle
);
955 if (reg
& TX_CFG_CSI
) ;
956 if (reg
& TX_CFG_HBI
) ;
957 if (reg
& TX_CFG_MLB
) ;
958 if (reg
& TX_CFG_ATP
) ;
959 if (reg
& TX_CFG_ECRETRY
) {
961 * this could easily be implemented, but considering
962 * the network is just a fake pipe, wouldn't make
967 if (reg
& TX_CFG_BRST_DIS
) ;
971 /* we handle our own DMA, ignore the kernel's exhortations */
972 if (reg
& TX_CFG_MXDMA
) ;
975 // also, we currently don't care about fill/drain
976 // thresholds though this may change in the future with
977 // more realistic networks or a driver which changes it
978 // according to feedback
984 /* these just control general purpose i/o pins, don't matter */
999 if (reg
& RX_CFG_AEP
) ;
1000 if (reg
& RX_CFG_ARP
) ;
1001 if (reg
& RX_CFG_STRIPCRC
) ;
1002 if (reg
& RX_CFG_RX_RD
) ;
1003 if (reg
& RX_CFG_ALP
) ;
1004 if (reg
& RX_CFG_AIRL
) ;
1006 /* we handle our own DMA, ignore what kernel says about it */
1007 if (reg
& RX_CFG_MXDMA
) ;
1009 //also, we currently don't care about fill/drain thresholds
1010 //though this may change in the future with more realistic
1011 //networks or a driver which changes it according to feedback
1012 if (reg
& (RX_CFG_DRTH
| RX_CFG_DRTH0
)) ;
1017 /* there is no priority queueing used in the linux 2.6 driver */
1022 /* not going to implement wake on LAN */
1027 /* not going to implement pause control */
1034 rxFilterEnable
= (reg
& RFCR_RFEN
) ? true : false;
1035 acceptBroadcast
= (reg
& RFCR_AAB
) ? true : false;
1036 acceptMulticast
= (reg
& RFCR_AAM
) ? true : false;
1037 acceptUnicast
= (reg
& RFCR_AAU
) ? true : false;
1038 acceptPerfect
= (reg
& RFCR_APM
) ? true : false;
1039 acceptArp
= (reg
& RFCR_AARP
) ? true : false;
1042 if (reg
& RFCR_APAT
)
1043 panic("RFCR_APAT not implemented!\n");
1046 if (reg
& RFCR_MHEN
|| reg
& RFCR_UHEN
)
1047 panic("hash filtering not implemented!\n");
1050 panic("RFCR_ULM not implemented!\n");
1055 panic("the driver never writes to RFDR, something is wrong!\n");
1058 panic("the driver never uses BRAR, something is wrong!\n");
1061 panic("the driver never uses BRDR, something is wrong!\n");
1064 panic("SRR is read only register!\n");
1067 panic("the driver never uses MIBC, something is wrong!\n");
1078 panic("the driver never uses VDR, something is wrong!\n");
1082 /* not going to implement clockrun stuff */
1088 if (reg
& TBICR_MR_LOOPBACK
)
1089 panic("TBICR_MR_LOOPBACK never used, something wrong!\n");
1091 if (reg
& TBICR_MR_AN_ENABLE
) {
1092 regs
.tanlpar
= regs
.tanar
;
1093 regs
.tbisr
|= (TBISR_MR_AN_COMPLETE
| TBISR_MR_LINK_STATUS
);
1097 if (reg
& TBICR_MR_RESTART_AN
) ;
1103 panic("TBISR is read only register!\n");
1107 if (reg
& TANAR_PS2
)
1108 panic("this isn't used in driver, something wrong!\n");
1110 if (reg
& TANAR_PS1
)
1111 panic("this isn't used in driver, something wrong!\n");
1115 panic("this should only be written to by the fake phy!\n");
1118 panic("TANER is read only register!\n");
1125 panic("invalid register access daddr=%#x", daddr
);
1128 panic("Invalid Request Size");
1135 NSGigE::devIntrPost(uint32_t interrupts
)
1137 if (interrupts
& ISR_RESERVE
)
1138 panic("Cannot set a reserved interrupt");
1140 if (interrupts
& ISR_NOIMPL
)
1141 warn("interrupt not implemented %#x\n", interrupts
);
1143 interrupts
&= ~ISR_NOIMPL
;
1144 regs
.isr
|= interrupts
;
1146 if (interrupts
& regs
.imr
) {
1147 if (interrupts
& ISR_SWI
) {
1150 if (interrupts
& ISR_RXIDLE
) {
1153 if (interrupts
& ISR_RXOK
) {
1156 if (interrupts
& ISR_RXDESC
) {
1159 if (interrupts
& ISR_TXOK
) {
1162 if (interrupts
& ISR_TXIDLE
) {
1165 if (interrupts
& ISR_TXDESC
) {
1168 if (interrupts
& ISR_RXORN
) {
1173 DPRINTF(EthernetIntr
,
1174 "interrupt written to ISR: intr=%#x isr=%#x imr=%#x\n",
1175 interrupts
, regs
.isr
, regs
.imr
);
1177 if ((regs
.isr
& regs
.imr
)) {
1178 Tick when
= curTick
;
1179 if (!(regs
.isr
& regs
.imr
& ISR_NODELAY
))
1185 /* writing this interrupt counting stats inside this means that this function
1186 is now limited to being used to clear all interrupts upon the kernel
1187 reading isr and servicing. just telling you in case you were thinking
1191 NSGigE::devIntrClear(uint32_t interrupts
)
1193 if (interrupts
& ISR_RESERVE
)
1194 panic("Cannot clear a reserved interrupt");
1196 if (regs
.isr
& regs
.imr
& ISR_SWI
) {
1199 if (regs
.isr
& regs
.imr
& ISR_RXIDLE
) {
1202 if (regs
.isr
& regs
.imr
& ISR_RXOK
) {
1205 if (regs
.isr
& regs
.imr
& ISR_RXDESC
) {
1208 if (regs
.isr
& regs
.imr
& ISR_TXOK
) {
1211 if (regs
.isr
& regs
.imr
& ISR_TXIDLE
) {
1214 if (regs
.isr
& regs
.imr
& ISR_TXDESC
) {
1217 if (regs
.isr
& regs
.imr
& ISR_RXORN
) {
1221 if (regs
.isr
& regs
.imr
& (ISR_SWI
| ISR_RXIDLE
| ISR_RXOK
| ISR_RXDESC
|
1222 ISR_TXOK
| ISR_TXIDLE
| ISR_TXDESC
| ISR_RXORN
) )
1225 interrupts
&= ~ISR_NOIMPL
;
1226 regs
.isr
&= ~interrupts
;
1228 DPRINTF(EthernetIntr
,
1229 "interrupt cleared from ISR: intr=%x isr=%x imr=%x\n",
1230 interrupts
, regs
.isr
, regs
.imr
);
1232 if (!(regs
.isr
& regs
.imr
))
1237 NSGigE::devIntrChangeMask()
1239 DPRINTF(EthernetIntr
, "interrupt mask changed: isr=%x imr=%x masked=%x\n",
1240 regs
.isr
, regs
.imr
, regs
.isr
& regs
.imr
);
1242 if (regs
.isr
& regs
.imr
)
1243 cpuIntrPost(curTick
);
1249 NSGigE::cpuIntrPost(Tick when
)
1251 // If the interrupt you want to post is later than an interrupt
1252 // already scheduled, just let it post in the coming one and don't
1253 // schedule another.
1254 // HOWEVER, must be sure that the scheduled intrTick is in the
1255 // future (this was formerly the source of a bug)
1257 * @todo this warning should be removed and the intrTick code should
1260 assert(when
>= curTick
);
1261 assert(intrTick
>= curTick
|| intrTick
== 0);
1262 if (when
> intrTick
&& intrTick
!= 0) {
1263 DPRINTF(EthernetIntr
, "don't need to schedule event...intrTick=%d\n",
1269 if (intrTick
< curTick
) {
1274 DPRINTF(EthernetIntr
, "going to schedule an interrupt for intrTick=%d\n",
1278 intrEvent
->squash();
1279 intrEvent
= new IntrEvent(this, true);
1280 intrEvent
->schedule(intrTick
);
1284 NSGigE::cpuInterrupt()
1286 assert(intrTick
== curTick
);
1288 // Whether or not there's a pending interrupt, we don't care about
1293 // Don't send an interrupt if there's already one
1294 if (cpuPendingIntr
) {
1295 DPRINTF(EthernetIntr
,
1296 "would send an interrupt now, but there's already pending\n");
1299 cpuPendingIntr
= true;
1301 DPRINTF(EthernetIntr
, "posting interrupt\n");
1307 NSGigE::cpuIntrClear()
1309 if (!cpuPendingIntr
)
1313 intrEvent
->squash();
1319 cpuPendingIntr
= false;
1321 DPRINTF(EthernetIntr
, "clearing interrupt\n");
1326 NSGigE::cpuIntrPending() const
1327 { return cpuPendingIntr
; }
1333 DPRINTF(Ethernet
, "transmit reset\n");
1338 assert(txDescCnt
== 0);
1341 assert(txDmaState
== dmaIdle
);
1347 DPRINTF(Ethernet
, "receive reset\n");
1350 assert(rxPktBytes
== 0);
1353 assert(rxDescCnt
== 0);
1354 assert(rxDmaState
== dmaIdle
);
1362 memset(®s
, 0, sizeof(regs
));
1363 regs
.config
= (CFGR_LNKSTS
| CFGR_TBI_EN
| CFGR_MODE_1000
);
1365 regs
.txcfg
= 0x120; // set drain threshold to 1024 bytes and
1366 // fill threshold to 32 bytes
1367 regs
.rxcfg
= 0x4; // set drain threshold to 16 bytes
1368 regs
.srr
= 0x0103; // set the silicon revision to rev B or 0x103
1369 regs
.mibc
= MIBC_FRZ
;
1370 regs
.vdr
= 0x81; // set the vlan tag type to 802.1q
1371 regs
.tesr
= 0xc000; // TBI capable of both full and half duplex
1373 extstsEnable
= false;
1374 acceptBroadcast
= false;
1375 acceptMulticast
= false;
1376 acceptUnicast
= false;
1377 acceptPerfect
= false;
1382 NSGigE::rxDmaReadCopy()
1384 assert(rxDmaState
== dmaReading
);
1386 physmem
->dma_read((uint8_t *)rxDmaData
, rxDmaAddr
, rxDmaLen
);
1387 rxDmaState
= dmaIdle
;
1389 DPRINTF(EthernetDMA
, "rx dma read paddr=%#x len=%d\n",
1390 rxDmaAddr
, rxDmaLen
);
1391 DDUMP(EthernetDMA
, rxDmaData
, rxDmaLen
);
1395 NSGigE::doRxDmaRead()
1397 assert(rxDmaState
== dmaIdle
|| rxDmaState
== dmaReadWaiting
);
1398 rxDmaState
= dmaReading
;
1400 if (dmaInterface
&& !rxDmaFree
) {
1401 if (dmaInterface
->busy())
1402 rxDmaState
= dmaReadWaiting
;
1404 dmaInterface
->doDMA(Read
, rxDmaAddr
, rxDmaLen
, curTick
,
1405 &rxDmaReadEvent
, true);
1409 if (dmaReadDelay
== 0 && dmaReadFactor
== 0) {
1414 Tick factor
= ((rxDmaLen
+ ULL(63)) >> ULL(6)) * dmaReadFactor
;
1415 Tick start
= curTick
+ dmaReadDelay
+ factor
;
1416 rxDmaReadEvent
.schedule(start
);
1421 NSGigE::rxDmaReadDone()
1423 assert(rxDmaState
== dmaReading
);
1426 // If the transmit state machine has a pending DMA, let it go first
1427 if (txDmaState
== dmaReadWaiting
|| txDmaState
== dmaWriteWaiting
)
1434 NSGigE::rxDmaWriteCopy()
1436 assert(rxDmaState
== dmaWriting
);
1438 physmem
->dma_write(rxDmaAddr
, (uint8_t *)rxDmaData
, rxDmaLen
);
1439 rxDmaState
= dmaIdle
;
1441 DPRINTF(EthernetDMA
, "rx dma write paddr=%#x len=%d\n",
1442 rxDmaAddr
, rxDmaLen
);
1443 DDUMP(EthernetDMA
, rxDmaData
, rxDmaLen
);
1447 NSGigE::doRxDmaWrite()
1449 assert(rxDmaState
== dmaIdle
|| rxDmaState
== dmaWriteWaiting
);
1450 rxDmaState
= dmaWriting
;
1452 if (dmaInterface
&& !rxDmaFree
) {
1453 if (dmaInterface
->busy())
1454 rxDmaState
= dmaWriteWaiting
;
1456 dmaInterface
->doDMA(WriteInvalidate
, rxDmaAddr
, rxDmaLen
, curTick
,
1457 &rxDmaWriteEvent
, true);
1461 if (dmaWriteDelay
== 0 && dmaWriteFactor
== 0) {
1466 Tick factor
= ((rxDmaLen
+ ULL(63)) >> ULL(6)) * dmaWriteFactor
;
1467 Tick start
= curTick
+ dmaWriteDelay
+ factor
;
1468 rxDmaWriteEvent
.schedule(start
);
1473 NSGigE::rxDmaWriteDone()
1475 assert(rxDmaState
== dmaWriting
);
1478 // If the transmit state machine has a pending DMA, let it go first
1479 if (txDmaState
== dmaReadWaiting
|| txDmaState
== dmaWriteWaiting
)
1488 DPRINTF(EthernetSM
, "receive kick rxState=%s (rxBuf.size=%d)\n",
1489 NsRxStateStrings
[rxState
], rxFifo
.size());
1493 if (rxKickTick
> curTick
) {
1494 DPRINTF(EthernetSM
, "receive kick exiting, can't run till %d\n",
1500 // Go to the next state machine clock tick.
1501 rxKickTick
= curTick
+ cycles(1);
1504 switch(rxDmaState
) {
1505 case dmaReadWaiting
:
1509 case dmaWriteWaiting
:
1517 // see state machine from spec for details
1518 // the way this works is, if you finish work on one state and can
1519 // go directly to another, you do that through jumping to the
1520 // label "next". however, if you have intermediate work, like DMA
1521 // so that you can't go to the next state yet, you go to exit and
1522 // exit the loop. however, when the DMA is done it will trigger
1523 // an event and come back to this loop.
1527 DPRINTF(EthernetSM
, "Receive Disabled! Nothing to do.\n");
1532 rxState
= rxDescRefr
;
1534 rxDmaAddr
= regs
.rxdp
& 0x3fffffff;
1535 rxDmaData
= &rxDescCache
+ offsetof(ns_desc
, link
);
1536 rxDmaLen
= sizeof(rxDescCache
.link
);
1537 rxDmaFree
= dmaDescFree
;
1540 descDmaRdBytes
+= rxDmaLen
;
1545 rxState
= rxDescRead
;
1547 rxDmaAddr
= regs
.rxdp
& 0x3fffffff;
1548 rxDmaData
= &rxDescCache
;
1549 rxDmaLen
= sizeof(ns_desc
);
1550 rxDmaFree
= dmaDescFree
;
1553 descDmaRdBytes
+= rxDmaLen
;
1561 if (rxDmaState
!= dmaIdle
)
1564 rxState
= rxAdvance
;
1568 if (rxDmaState
!= dmaIdle
)
1571 DPRINTF(EthernetDesc
, "rxDescCache: addr=%08x read descriptor\n",
1572 regs
.rxdp
& 0x3fffffff);
1573 DPRINTF(EthernetDesc
,
1574 "rxDescCache: link=%08x bufptr=%08x cmdsts=%08x extsts=%08x\n",
1575 rxDescCache
.link
, rxDescCache
.bufptr
, rxDescCache
.cmdsts
,
1576 rxDescCache
.extsts
);
1578 if (rxDescCache
.cmdsts
& CMDSTS_OWN
) {
1579 devIntrPost(ISR_RXIDLE
);
1583 rxState
= rxFifoBlock
;
1584 rxFragPtr
= rxDescCache
.bufptr
;
1585 rxDescCnt
= rxDescCache
.cmdsts
& CMDSTS_LEN_MASK
;
1592 * @todo in reality, we should be able to start processing
1593 * the packet as it arrives, and not have to wait for the
1594 * full packet ot be in the receive fifo.
1599 DPRINTF(EthernetSM
, "****processing receive of new packet****\n");
1601 // If we don't have a packet, grab a new one from the fifo.
1602 rxPacket
= rxFifo
.front();
1603 rxPktBytes
= rxPacket
->length
;
1604 rxPacketBufPtr
= rxPacket
->data
;
1607 if (DTRACE(Ethernet
)) {
1610 DPRINTF(Ethernet
, "ID is %d\n", ip
->id());
1614 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n",
1615 tcp
->sport(), tcp
->dport(), tcp
->seq(),
1622 // sanity check - i think the driver behaves like this
1623 assert(rxDescCnt
>= rxPktBytes
);
1628 // dont' need the && rxDescCnt > 0 if driver sanity check
1630 if (rxPktBytes
> 0) {
1631 rxState
= rxFragWrite
;
1632 // don't need min<>(rxPktBytes,rxDescCnt) if above sanity
1634 rxXferLen
= rxPktBytes
;
1636 rxDmaAddr
= rxFragPtr
& 0x3fffffff;
1637 rxDmaData
= rxPacketBufPtr
;
1638 rxDmaLen
= rxXferLen
;
1639 rxDmaFree
= dmaDataFree
;
1645 rxState
= rxDescWrite
;
1647 //if (rxPktBytes == 0) { /* packet is done */
1648 assert(rxPktBytes
== 0);
1649 DPRINTF(EthernetSM
, "done with receiving packet\n");
1651 rxDescCache
.cmdsts
|= CMDSTS_OWN
;
1652 rxDescCache
.cmdsts
&= ~CMDSTS_MORE
;
1653 rxDescCache
.cmdsts
|= CMDSTS_OK
;
1654 rxDescCache
.cmdsts
&= 0xffff0000;
1655 rxDescCache
.cmdsts
+= rxPacket
->length
; //i.e. set CMDSTS_SIZE
1659 * all the driver uses these are for its own stats keeping
1660 * which we don't care about, aren't necessary for
1661 * functionality and doing this would just slow us down.
1662 * if they end up using this in a later version for
1663 * functional purposes, just undef
1665 if (rxFilterEnable
) {
1666 rxDescCache
.cmdsts
&= ~CMDSTS_DEST_MASK
;
1667 const EthAddr
&dst
= rxFifoFront()->dst();
1669 rxDescCache
.cmdsts
|= CMDSTS_DEST_SELF
;
1670 if (dst
->multicast())
1671 rxDescCache
.cmdsts
|= CMDSTS_DEST_MULTI
;
1672 if (dst
->broadcast())
1673 rxDescCache
.cmdsts
|= CMDSTS_DEST_MASK
;
1678 if (extstsEnable
&& ip
) {
1679 rxDescCache
.extsts
|= EXTSTS_IPPKT
;
1681 if (cksum(ip
) != 0) {
1682 DPRINTF(EthernetCksum
, "Rx IP Checksum Error\n");
1683 rxDescCache
.extsts
|= EXTSTS_IPERR
;
1688 rxDescCache
.extsts
|= EXTSTS_TCPPKT
;
1690 if (cksum(tcp
) != 0) {
1691 DPRINTF(EthernetCksum
, "Rx TCP Checksum Error\n");
1692 rxDescCache
.extsts
|= EXTSTS_TCPERR
;
1696 rxDescCache
.extsts
|= EXTSTS_UDPPKT
;
1698 if (cksum(udp
) != 0) {
1699 DPRINTF(EthernetCksum
, "Rx UDP Checksum Error\n");
1700 rxDescCache
.extsts
|= EXTSTS_UDPERR
;
1707 * the driver seems to always receive into desc buffers
1708 * of size 1514, so you never have a pkt that is split
1709 * into multiple descriptors on the receive side, so
1710 * i don't implement that case, hence the assert above.
1713 DPRINTF(EthernetDesc
,
1714 "rxDescCache: addr=%08x writeback cmdsts extsts\n",
1715 regs
.rxdp
& 0x3fffffff);
1716 DPRINTF(EthernetDesc
,
1717 "rxDescCache: link=%08x bufptr=%08x cmdsts=%08x extsts=%08x\n",
1718 rxDescCache
.link
, rxDescCache
.bufptr
, rxDescCache
.cmdsts
,
1719 rxDescCache
.extsts
);
1721 rxDmaAddr
= (regs
.rxdp
+ offsetof(ns_desc
, cmdsts
)) & 0x3fffffff;
1722 rxDmaData
= &(rxDescCache
.cmdsts
);
1723 rxDmaLen
= sizeof(rxDescCache
.cmdsts
) + sizeof(rxDescCache
.extsts
);
1724 rxDmaFree
= dmaDescFree
;
1727 descDmaWrBytes
+= rxDmaLen
;
1735 if (rxDmaState
!= dmaIdle
)
1738 rxPacketBufPtr
+= rxXferLen
;
1739 rxFragPtr
+= rxXferLen
;
1740 rxPktBytes
-= rxXferLen
;
1742 rxState
= rxFifoBlock
;
1746 if (rxDmaState
!= dmaIdle
)
1749 assert(rxDescCache
.cmdsts
& CMDSTS_OWN
);
1751 assert(rxPacket
== 0);
1752 devIntrPost(ISR_RXOK
);
1754 if (rxDescCache
.cmdsts
& CMDSTS_INTR
)
1755 devIntrPost(ISR_RXDESC
);
1758 DPRINTF(EthernetSM
, "Halting the RX state machine\n");
1762 rxState
= rxAdvance
;
1766 if (rxDescCache
.link
== 0) {
1767 devIntrPost(ISR_RXIDLE
);
1772 rxState
= rxDescRead
;
1773 regs
.rxdp
= rxDescCache
.link
;
1776 rxDmaAddr
= regs
.rxdp
& 0x3fffffff;
1777 rxDmaData
= &rxDescCache
;
1778 rxDmaLen
= sizeof(ns_desc
);
1779 rxDmaFree
= dmaDescFree
;
1787 panic("Invalid rxState!");
1790 DPRINTF(EthernetSM
, "entering next rxState=%s\n",
1791 NsRxStateStrings
[rxState
]);
1796 * @todo do we want to schedule a future kick?
1798 DPRINTF(EthernetSM
, "rx state machine exited rxState=%s\n",
1799 NsRxStateStrings
[rxState
]);
1801 if (clock
&& !rxKickEvent
.scheduled())
1802 rxKickEvent
.schedule(rxKickTick
);
1808 if (txFifo
.empty()) {
1809 DPRINTF(Ethernet
, "nothing to transmit\n");
1813 DPRINTF(Ethernet
, "Attempt Pkt Transmit: txFifo length=%d\n",
1815 if (interface
->sendPacket(txFifo
.front())) {
1817 if (DTRACE(Ethernet
)) {
1818 IpPtr
ip(txFifo
.front());
1820 DPRINTF(Ethernet
, "ID is %d\n", ip
->id());
1824 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n",
1825 tcp
->sport(), tcp
->dport(), tcp
->seq(), tcp
->ack());
1831 DDUMP(EthernetData
, txFifo
.front()->data
, txFifo
.front()->length
);
1832 txBytes
+= txFifo
.front()->length
;
1835 DPRINTF(Ethernet
, "Successful Xmit! now txFifoAvail is %d\n",
1840 * normally do a writeback of the descriptor here, and ONLY
1841 * after that is done, send this interrupt. but since our
1842 * stuff never actually fails, just do this interrupt here,
1843 * otherwise the code has to stray from this nice format.
1844 * besides, it's functionally the same.
1846 devIntrPost(ISR_TXOK
);
1849 if (!txFifo
.empty() && !txEvent
.scheduled()) {
1850 DPRINTF(Ethernet
, "reschedule transmit\n");
1851 txEvent
.schedule(curTick
+ retryTime
);
1856 NSGigE::txDmaReadCopy()
1858 assert(txDmaState
== dmaReading
);
1860 physmem
->dma_read((uint8_t *)txDmaData
, txDmaAddr
, txDmaLen
);
1861 txDmaState
= dmaIdle
;
1863 DPRINTF(EthernetDMA
, "tx dma read paddr=%#x len=%d\n",
1864 txDmaAddr
, txDmaLen
);
1865 DDUMP(EthernetDMA
, txDmaData
, txDmaLen
);
1869 NSGigE::doTxDmaRead()
1871 assert(txDmaState
== dmaIdle
|| txDmaState
== dmaReadWaiting
);
1872 txDmaState
= dmaReading
;
1874 if (dmaInterface
&& !txDmaFree
) {
1875 if (dmaInterface
->busy())
1876 txDmaState
= dmaReadWaiting
;
1878 dmaInterface
->doDMA(Read
, txDmaAddr
, txDmaLen
, curTick
,
1879 &txDmaReadEvent
, true);
1883 if (dmaReadDelay
== 0 && dmaReadFactor
== 0.0) {
1888 Tick factor
= ((txDmaLen
+ ULL(63)) >> ULL(6)) * dmaReadFactor
;
1889 Tick start
= curTick
+ dmaReadDelay
+ factor
;
1890 txDmaReadEvent
.schedule(start
);
1895 NSGigE::txDmaReadDone()
1897 assert(txDmaState
== dmaReading
);
1900 // If the receive state machine has a pending DMA, let it go first
1901 if (rxDmaState
== dmaReadWaiting
|| rxDmaState
== dmaWriteWaiting
)
1908 NSGigE::txDmaWriteCopy()
1910 assert(txDmaState
== dmaWriting
);
1912 physmem
->dma_write(txDmaAddr
, (uint8_t *)txDmaData
, txDmaLen
);
1913 txDmaState
= dmaIdle
;
1915 DPRINTF(EthernetDMA
, "tx dma write paddr=%#x len=%d\n",
1916 txDmaAddr
, txDmaLen
);
1917 DDUMP(EthernetDMA
, txDmaData
, txDmaLen
);
1921 NSGigE::doTxDmaWrite()
1923 assert(txDmaState
== dmaIdle
|| txDmaState
== dmaWriteWaiting
);
1924 txDmaState
= dmaWriting
;
1926 if (dmaInterface
&& !txDmaFree
) {
1927 if (dmaInterface
->busy())
1928 txDmaState
= dmaWriteWaiting
;
1930 dmaInterface
->doDMA(WriteInvalidate
, txDmaAddr
, txDmaLen
, curTick
,
1931 &txDmaWriteEvent
, true);
1935 if (dmaWriteDelay
== 0 && dmaWriteFactor
== 0.0) {
1940 Tick factor
= ((txDmaLen
+ ULL(63)) >> ULL(6)) * dmaWriteFactor
;
1941 Tick start
= curTick
+ dmaWriteDelay
+ factor
;
1942 txDmaWriteEvent
.schedule(start
);
1947 NSGigE::txDmaWriteDone()
1949 assert(txDmaState
== dmaWriting
);
1952 // If the receive state machine has a pending DMA, let it go first
1953 if (rxDmaState
== dmaReadWaiting
|| rxDmaState
== dmaWriteWaiting
)
1962 DPRINTF(EthernetSM
, "transmit kick txState=%s\n",
1963 NsTxStateStrings
[txState
]);
1967 if (txKickTick
> curTick
) {
1968 DPRINTF(EthernetSM
, "transmit kick exiting, can't run till %d\n",
1973 // Go to the next state machine clock tick.
1974 txKickTick
= curTick
+ cycles(1);
1977 switch(txDmaState
) {
1978 case dmaReadWaiting
:
1982 case dmaWriteWaiting
:
1993 DPRINTF(EthernetSM
, "Transmit disabled. Nothing to do.\n");
1998 txState
= txDescRefr
;
2000 txDmaAddr
= regs
.txdp
& 0x3fffffff;
2001 txDmaData
= &txDescCache
+ offsetof(ns_desc
, link
);
2002 txDmaLen
= sizeof(txDescCache
.link
);
2003 txDmaFree
= dmaDescFree
;
2006 descDmaRdBytes
+= txDmaLen
;
2012 txState
= txDescRead
;
2014 txDmaAddr
= regs
.txdp
& 0x3fffffff;
2015 txDmaData
= &txDescCache
;
2016 txDmaLen
= sizeof(ns_desc
);
2017 txDmaFree
= dmaDescFree
;
2020 descDmaRdBytes
+= txDmaLen
;
2028 if (txDmaState
!= dmaIdle
)
2031 txState
= txAdvance
;
2035 if (txDmaState
!= dmaIdle
)
2038 DPRINTF(EthernetDesc
, "txDescCache: addr=%08x read descriptor\n",
2039 regs
.txdp
& 0x3fffffff);
2040 DPRINTF(EthernetDesc
,
2041 "txDescCache: link=%08x bufptr=%08x cmdsts=%08x extsts=%08x\n",
2042 txDescCache
.link
, txDescCache
.bufptr
, txDescCache
.cmdsts
,
2043 txDescCache
.extsts
);
2045 if (txDescCache
.cmdsts
& CMDSTS_OWN
) {
2046 txState
= txFifoBlock
;
2047 txFragPtr
= txDescCache
.bufptr
;
2048 txDescCnt
= txDescCache
.cmdsts
& CMDSTS_LEN_MASK
;
2050 devIntrPost(ISR_TXIDLE
);
2058 DPRINTF(EthernetSM
, "****starting the tx of a new packet****\n");
2059 txPacket
= new PacketData(16384);
2060 txPacketBufPtr
= txPacket
->data
;
2063 if (txDescCnt
== 0) {
2064 DPRINTF(EthernetSM
, "the txDescCnt == 0, done with descriptor\n");
2065 if (txDescCache
.cmdsts
& CMDSTS_MORE
) {
2066 DPRINTF(EthernetSM
, "there are more descriptors to come\n");
2067 txState
= txDescWrite
;
2069 txDescCache
.cmdsts
&= ~CMDSTS_OWN
;
2071 txDmaAddr
= regs
.txdp
+ offsetof(ns_desc
, cmdsts
);
2072 txDmaAddr
&= 0x3fffffff;
2073 txDmaData
= &(txDescCache
.cmdsts
);
2074 txDmaLen
= sizeof(txDescCache
.cmdsts
);
2075 txDmaFree
= dmaDescFree
;
2080 } else { /* this packet is totally done */
2081 DPRINTF(EthernetSM
, "This packet is done, let's wrap it up\n");
2082 /* deal with the the packet that just finished */
2083 if ((regs
.vtcr
& VTCR_PPCHK
) && extstsEnable
) {
2085 if (txDescCache
.extsts
& EXTSTS_UDPPKT
) {
2088 udp
->sum(cksum(udp
));
2090 } else if (txDescCache
.extsts
& EXTSTS_TCPPKT
) {
2093 tcp
->sum(cksum(tcp
));
2096 if (txDescCache
.extsts
& EXTSTS_IPPKT
) {
2103 txPacket
->length
= txPacketBufPtr
- txPacket
->data
;
2104 // this is just because the receive can't handle a
2105 // packet bigger want to make sure
2106 assert(txPacket
->length
<= 1514);
2110 txFifo
.push(txPacket
);
2114 * this following section is not tqo spec, but
2115 * functionally shouldn't be any different. normally,
2116 * the chip will wait til the transmit has occurred
2117 * before writing back the descriptor because it has
2118 * to wait to see that it was successfully transmitted
2119 * to decide whether to set CMDSTS_OK or not.
2120 * however, in the simulator since it is always
2121 * successfully transmitted, and writing it exactly to
2122 * spec would complicate the code, we just do it here
2125 txDescCache
.cmdsts
&= ~CMDSTS_OWN
;
2126 txDescCache
.cmdsts
|= CMDSTS_OK
;
2128 DPRINTF(EthernetDesc
,
2129 "txDesc writeback: cmdsts=%08x extsts=%08x\n",
2130 txDescCache
.cmdsts
, txDescCache
.extsts
);
2132 txDmaAddr
= regs
.txdp
+ offsetof(ns_desc
, cmdsts
);
2133 txDmaAddr
&= 0x3fffffff;
2134 txDmaData
= &(txDescCache
.cmdsts
);
2135 txDmaLen
= sizeof(txDescCache
.cmdsts
) +
2136 sizeof(txDescCache
.extsts
);
2137 txDmaFree
= dmaDescFree
;
2140 descDmaWrBytes
+= txDmaLen
;
2146 DPRINTF(EthernetSM
, "halting TX state machine\n");
2150 txState
= txAdvance
;
2156 DPRINTF(EthernetSM
, "this descriptor isn't done yet\n");
2157 if (!txFifo
.full()) {
2158 txState
= txFragRead
;
2161 * The number of bytes transferred is either whatever
2162 * is left in the descriptor (txDescCnt), or if there
2163 * is not enough room in the fifo, just whatever room
2164 * is left in the fifo
2166 txXferLen
= min
<uint32_t>(txDescCnt
, txFifo
.avail());
2168 txDmaAddr
= txFragPtr
& 0x3fffffff;
2169 txDmaData
= txPacketBufPtr
;
2170 txDmaLen
= txXferLen
;
2171 txDmaFree
= dmaDataFree
;
2176 txState
= txFifoBlock
;
2186 if (txDmaState
!= dmaIdle
)
2189 txPacketBufPtr
+= txXferLen
;
2190 txFragPtr
+= txXferLen
;
2191 txDescCnt
-= txXferLen
;
2192 txFifo
.reserve(txXferLen
);
2194 txState
= txFifoBlock
;
2198 if (txDmaState
!= dmaIdle
)
2201 if (txDescCache
.cmdsts
& CMDSTS_INTR
)
2202 devIntrPost(ISR_TXDESC
);
2205 DPRINTF(EthernetSM
, "halting TX state machine\n");
2209 txState
= txAdvance
;
2213 if (txDescCache
.link
== 0) {
2214 devIntrPost(ISR_TXIDLE
);
2218 txState
= txDescRead
;
2219 regs
.txdp
= txDescCache
.link
;
2222 txDmaAddr
= txDescCache
.link
& 0x3fffffff;
2223 txDmaData
= &txDescCache
;
2224 txDmaLen
= sizeof(ns_desc
);
2225 txDmaFree
= dmaDescFree
;
2233 panic("invalid state");
2236 DPRINTF(EthernetSM
, "entering next txState=%s\n",
2237 NsTxStateStrings
[txState
]);
2242 * @todo do we want to schedule a future kick?
2244 DPRINTF(EthernetSM
, "tx state machine exited txState=%s\n",
2245 NsTxStateStrings
[txState
]);
2247 if (clock
&& !txKickEvent
.scheduled())
2248 txKickEvent
.schedule(txKickTick
);
2252 NSGigE::transferDone()
2254 if (txFifo
.empty()) {
2255 DPRINTF(Ethernet
, "transfer complete: txFifo empty...nothing to do\n");
2259 DPRINTF(Ethernet
, "transfer complete: data in txFifo...schedule xmit\n");
2261 if (txEvent
.scheduled())
2262 txEvent
.reschedule(curTick
+ cycles(1));
2264 txEvent
.schedule(curTick
+ cycles(1));
2268 NSGigE::rxFilter(const PacketPtr
&packet
)
2270 EthPtr eth
= packet
;
2274 const EthAddr
&dst
= eth
->dst();
2275 if (dst
.unicast()) {
2276 // If we're accepting all unicast addresses
2280 // If we make a perfect match
2281 if (acceptPerfect
&& dst
== rom
.perfectMatch
)
2284 if (acceptArp
&& eth
->type() == ETH_TYPE_ARP
)
2287 } else if (dst
.broadcast()) {
2288 // if we're accepting broadcasts
2289 if (acceptBroadcast
)
2292 } else if (dst
.multicast()) {
2293 // if we're accepting all multicasts
2294 if (acceptMulticast
)
2300 DPRINTF(Ethernet
, "rxFilter drop\n");
2301 DDUMP(EthernetData
, packet
->data
, packet
->length
);
2308 NSGigE::recvPacket(PacketPtr packet
)
2310 rxBytes
+= packet
->length
;
2313 DPRINTF(Ethernet
, "Receiving packet from wire, rxFifoAvail=%d\n",
2317 DPRINTF(Ethernet
, "receive disabled...packet dropped\n");
2318 interface
->recvDone();
2322 if (rxFilterEnable
&& rxFilter(packet
)) {
2323 DPRINTF(Ethernet
, "packet filtered...dropped\n");
2324 interface
->recvDone();
2328 if (rxFifo
.avail() < packet
->length
) {
2334 "packet won't fit in receive buffer...pkt ID %d dropped\n",
2337 DPRINTF(Ethernet
, "Seq=%d\n", tcp
->seq());
2342 devIntrPost(ISR_RXORN
);
2346 rxFifo
.push(packet
);
2347 interface
->recvDone();
2353 //=====================================================================
2357 NSGigE::serialize(ostream
&os
)
2359 // Serialize the PciDev base class
2360 PciDev::serialize(os
);
2363 * Finalize any DMA events now.
2365 if (rxDmaReadEvent
.scheduled())
2367 if (rxDmaWriteEvent
.scheduled())
2369 if (txDmaReadEvent
.scheduled())
2371 if (txDmaWriteEvent
.scheduled())
2375 * Serialize the device registers
2377 SERIALIZE_SCALAR(regs
.command
);
2378 SERIALIZE_SCALAR(regs
.config
);
2379 SERIALIZE_SCALAR(regs
.mear
);
2380 SERIALIZE_SCALAR(regs
.ptscr
);
2381 SERIALIZE_SCALAR(regs
.isr
);
2382 SERIALIZE_SCALAR(regs
.imr
);
2383 SERIALIZE_SCALAR(regs
.ier
);
2384 SERIALIZE_SCALAR(regs
.ihr
);
2385 SERIALIZE_SCALAR(regs
.txdp
);
2386 SERIALIZE_SCALAR(regs
.txdp_hi
);
2387 SERIALIZE_SCALAR(regs
.txcfg
);
2388 SERIALIZE_SCALAR(regs
.gpior
);
2389 SERIALIZE_SCALAR(regs
.rxdp
);
2390 SERIALIZE_SCALAR(regs
.rxdp_hi
);
2391 SERIALIZE_SCALAR(regs
.rxcfg
);
2392 SERIALIZE_SCALAR(regs
.pqcr
);
2393 SERIALIZE_SCALAR(regs
.wcsr
);
2394 SERIALIZE_SCALAR(regs
.pcr
);
2395 SERIALIZE_SCALAR(regs
.rfcr
);
2396 SERIALIZE_SCALAR(regs
.rfdr
);
2397 SERIALIZE_SCALAR(regs
.srr
);
2398 SERIALIZE_SCALAR(regs
.mibc
);
2399 SERIALIZE_SCALAR(regs
.vrcr
);
2400 SERIALIZE_SCALAR(regs
.vtcr
);
2401 SERIALIZE_SCALAR(regs
.vdr
);
2402 SERIALIZE_SCALAR(regs
.ccsr
);
2403 SERIALIZE_SCALAR(regs
.tbicr
);
2404 SERIALIZE_SCALAR(regs
.tbisr
);
2405 SERIALIZE_SCALAR(regs
.tanar
);
2406 SERIALIZE_SCALAR(regs
.tanlpar
);
2407 SERIALIZE_SCALAR(regs
.taner
);
2408 SERIALIZE_SCALAR(regs
.tesr
);
2410 SERIALIZE_ARRAY(rom
.perfectMatch
, ETH_ADDR_LEN
);
2412 SERIALIZE_SCALAR(ioEnable
);
2415 * Serialize the data Fifos
2417 rxFifo
.serialize("rxFifo", os
);
2418 txFifo
.serialize("txFifo", os
);
2421 * Serialize the various helper variables
2423 bool txPacketExists
= txPacket
;
2424 SERIALIZE_SCALAR(txPacketExists
);
2425 if (txPacketExists
) {
2426 txPacket
->length
= txPacketBufPtr
- txPacket
->data
;
2427 txPacket
->serialize("txPacket", os
);
2428 uint32_t txPktBufPtr
= (uint32_t) (txPacketBufPtr
- txPacket
->data
);
2429 SERIALIZE_SCALAR(txPktBufPtr
);
2432 bool rxPacketExists
= rxPacket
;
2433 SERIALIZE_SCALAR(rxPacketExists
);
2434 if (rxPacketExists
) {
2435 rxPacket
->serialize("rxPacket", os
);
2436 uint32_t rxPktBufPtr
= (uint32_t) (rxPacketBufPtr
- rxPacket
->data
);
2437 SERIALIZE_SCALAR(rxPktBufPtr
);
2440 SERIALIZE_SCALAR(txXferLen
);
2441 SERIALIZE_SCALAR(rxXferLen
);
2444 * Serialize DescCaches
2446 SERIALIZE_SCALAR(txDescCache
.link
);
2447 SERIALIZE_SCALAR(txDescCache
.bufptr
);
2448 SERIALIZE_SCALAR(txDescCache
.cmdsts
);
2449 SERIALIZE_SCALAR(txDescCache
.extsts
);
2450 SERIALIZE_SCALAR(rxDescCache
.link
);
2451 SERIALIZE_SCALAR(rxDescCache
.bufptr
);
2452 SERIALIZE_SCALAR(rxDescCache
.cmdsts
);
2453 SERIALIZE_SCALAR(rxDescCache
.extsts
);
2454 SERIALIZE_SCALAR(extstsEnable
);
2457 * Serialize tx state machine
2459 int txState
= this->txState
;
2460 SERIALIZE_SCALAR(txState
);
2461 SERIALIZE_SCALAR(txEnable
);
2462 SERIALIZE_SCALAR(CTDD
);
2463 SERIALIZE_SCALAR(txFragPtr
);
2464 SERIALIZE_SCALAR(txDescCnt
);
2465 int txDmaState
= this->txDmaState
;
2466 SERIALIZE_SCALAR(txDmaState
);
2467 SERIALIZE_SCALAR(txKickTick
);
2470 * Serialize rx state machine
2472 int rxState
= this->rxState
;
2473 SERIALIZE_SCALAR(rxState
);
2474 SERIALIZE_SCALAR(rxEnable
);
2475 SERIALIZE_SCALAR(CRDD
);
2476 SERIALIZE_SCALAR(rxPktBytes
);
2477 SERIALIZE_SCALAR(rxFragPtr
);
2478 SERIALIZE_SCALAR(rxDescCnt
);
2479 int rxDmaState
= this->rxDmaState
;
2480 SERIALIZE_SCALAR(rxDmaState
);
2481 SERIALIZE_SCALAR(rxKickTick
);
2484 * If there's a pending transmit, store the time so we can
2485 * reschedule it later
2487 Tick transmitTick
= txEvent
.scheduled() ? txEvent
.when() - curTick
: 0;
2488 SERIALIZE_SCALAR(transmitTick
);
2491 * receive address filter settings
2493 SERIALIZE_SCALAR(rxFilterEnable
);
2494 SERIALIZE_SCALAR(acceptBroadcast
);
2495 SERIALIZE_SCALAR(acceptMulticast
);
2496 SERIALIZE_SCALAR(acceptUnicast
);
2497 SERIALIZE_SCALAR(acceptPerfect
);
2498 SERIALIZE_SCALAR(acceptArp
);
2501 * Keep track of pending interrupt status.
2503 SERIALIZE_SCALAR(intrTick
);
2504 SERIALIZE_SCALAR(cpuPendingIntr
);
2505 Tick intrEventTick
= 0;
2507 intrEventTick
= intrEvent
->when();
2508 SERIALIZE_SCALAR(intrEventTick
);
2513 NSGigE::unserialize(Checkpoint
*cp
, const std::string
§ion
)
2515 // Unserialize the PciDev base class
2516 PciDev::unserialize(cp
, section
);
2518 UNSERIALIZE_SCALAR(regs
.command
);
2519 UNSERIALIZE_SCALAR(regs
.config
);
2520 UNSERIALIZE_SCALAR(regs
.mear
);
2521 UNSERIALIZE_SCALAR(regs
.ptscr
);
2522 UNSERIALIZE_SCALAR(regs
.isr
);
2523 UNSERIALIZE_SCALAR(regs
.imr
);
2524 UNSERIALIZE_SCALAR(regs
.ier
);
2525 UNSERIALIZE_SCALAR(regs
.ihr
);
2526 UNSERIALIZE_SCALAR(regs
.txdp
);
2527 UNSERIALIZE_SCALAR(regs
.txdp_hi
);
2528 UNSERIALIZE_SCALAR(regs
.txcfg
);
2529 UNSERIALIZE_SCALAR(regs
.gpior
);
2530 UNSERIALIZE_SCALAR(regs
.rxdp
);
2531 UNSERIALIZE_SCALAR(regs
.rxdp_hi
);
2532 UNSERIALIZE_SCALAR(regs
.rxcfg
);
2533 UNSERIALIZE_SCALAR(regs
.pqcr
);
2534 UNSERIALIZE_SCALAR(regs
.wcsr
);
2535 UNSERIALIZE_SCALAR(regs
.pcr
);
2536 UNSERIALIZE_SCALAR(regs
.rfcr
);
2537 UNSERIALIZE_SCALAR(regs
.rfdr
);
2538 UNSERIALIZE_SCALAR(regs
.srr
);
2539 UNSERIALIZE_SCALAR(regs
.mibc
);
2540 UNSERIALIZE_SCALAR(regs
.vrcr
);
2541 UNSERIALIZE_SCALAR(regs
.vtcr
);
2542 UNSERIALIZE_SCALAR(regs
.vdr
);
2543 UNSERIALIZE_SCALAR(regs
.ccsr
);
2544 UNSERIALIZE_SCALAR(regs
.tbicr
);
2545 UNSERIALIZE_SCALAR(regs
.tbisr
);
2546 UNSERIALIZE_SCALAR(regs
.tanar
);
2547 UNSERIALIZE_SCALAR(regs
.tanlpar
);
2548 UNSERIALIZE_SCALAR(regs
.taner
);
2549 UNSERIALIZE_SCALAR(regs
.tesr
);
2551 UNSERIALIZE_ARRAY(rom
.perfectMatch
, ETH_ADDR_LEN
);
2553 UNSERIALIZE_SCALAR(ioEnable
);
2556 * unserialize the data fifos
2558 rxFifo
.unserialize("rxFifo", cp
, section
);
2559 txFifo
.unserialize("txFifo", cp
, section
);
2562 * unserialize the various helper variables
2564 bool txPacketExists
;
2565 UNSERIALIZE_SCALAR(txPacketExists
);
2566 if (txPacketExists
) {
2567 txPacket
= new PacketData(16384);
2568 txPacket
->unserialize("txPacket", cp
, section
);
2569 uint32_t txPktBufPtr
;
2570 UNSERIALIZE_SCALAR(txPktBufPtr
);
2571 txPacketBufPtr
= (uint8_t *) txPacket
->data
+ txPktBufPtr
;
2575 bool rxPacketExists
;
2576 UNSERIALIZE_SCALAR(rxPacketExists
);
2578 if (rxPacketExists
) {
2579 rxPacket
= new PacketData(16384);
2580 rxPacket
->unserialize("rxPacket", cp
, section
);
2581 uint32_t rxPktBufPtr
;
2582 UNSERIALIZE_SCALAR(rxPktBufPtr
);
2583 rxPacketBufPtr
= (uint8_t *) rxPacket
->data
+ rxPktBufPtr
;
2587 UNSERIALIZE_SCALAR(txXferLen
);
2588 UNSERIALIZE_SCALAR(rxXferLen
);
2591 * Unserialize DescCaches
2593 UNSERIALIZE_SCALAR(txDescCache
.link
);
2594 UNSERIALIZE_SCALAR(txDescCache
.bufptr
);
2595 UNSERIALIZE_SCALAR(txDescCache
.cmdsts
);
2596 UNSERIALIZE_SCALAR(txDescCache
.extsts
);
2597 UNSERIALIZE_SCALAR(rxDescCache
.link
);
2598 UNSERIALIZE_SCALAR(rxDescCache
.bufptr
);
2599 UNSERIALIZE_SCALAR(rxDescCache
.cmdsts
);
2600 UNSERIALIZE_SCALAR(rxDescCache
.extsts
);
2601 UNSERIALIZE_SCALAR(extstsEnable
);
2604 * unserialize tx state machine
2607 UNSERIALIZE_SCALAR(txState
);
2608 this->txState
= (TxState
) txState
;
2609 UNSERIALIZE_SCALAR(txEnable
);
2610 UNSERIALIZE_SCALAR(CTDD
);
2611 UNSERIALIZE_SCALAR(txFragPtr
);
2612 UNSERIALIZE_SCALAR(txDescCnt
);
2614 UNSERIALIZE_SCALAR(txDmaState
);
2615 this->txDmaState
= (DmaState
) txDmaState
;
2616 UNSERIALIZE_SCALAR(txKickTick
);
2618 txKickEvent
.schedule(txKickTick
);
2621 * unserialize rx state machine
2624 UNSERIALIZE_SCALAR(rxState
);
2625 this->rxState
= (RxState
) rxState
;
2626 UNSERIALIZE_SCALAR(rxEnable
);
2627 UNSERIALIZE_SCALAR(CRDD
);
2628 UNSERIALIZE_SCALAR(rxPktBytes
);
2629 UNSERIALIZE_SCALAR(rxFragPtr
);
2630 UNSERIALIZE_SCALAR(rxDescCnt
);
2632 UNSERIALIZE_SCALAR(rxDmaState
);
2633 this->rxDmaState
= (DmaState
) rxDmaState
;
2634 UNSERIALIZE_SCALAR(rxKickTick
);
2636 rxKickEvent
.schedule(rxKickTick
);
2639 * If there's a pending transmit, reschedule it now
2642 UNSERIALIZE_SCALAR(transmitTick
);
2644 txEvent
.schedule(curTick
+ transmitTick
);
2647 * unserialize receive address filter settings
2649 UNSERIALIZE_SCALAR(rxFilterEnable
);
2650 UNSERIALIZE_SCALAR(acceptBroadcast
);
2651 UNSERIALIZE_SCALAR(acceptMulticast
);
2652 UNSERIALIZE_SCALAR(acceptUnicast
);
2653 UNSERIALIZE_SCALAR(acceptPerfect
);
2654 UNSERIALIZE_SCALAR(acceptArp
);
2657 * Keep track of pending interrupt status.
2659 UNSERIALIZE_SCALAR(intrTick
);
2660 UNSERIALIZE_SCALAR(cpuPendingIntr
);
2662 UNSERIALIZE_SCALAR(intrEventTick
);
2663 if (intrEventTick
) {
2664 intrEvent
= new IntrEvent(this, true);
2665 intrEvent
->schedule(intrEventTick
);
2669 * re-add addrRanges to bus bridges
2672 pioInterface
->addAddrRange(RangeSize(BARAddrs
[0], BARSize
[0]));
2673 pioInterface
->addAddrRange(RangeSize(BARAddrs
[1], BARSize
[1]));
2678 NSGigE::cacheAccess(MemReqPtr
&req
)
2680 DPRINTF(EthernetPIO
, "timing access to paddr=%#x (daddr=%#x)\n",
2681 req
->paddr
, req
->paddr
- addr
);
2682 return curTick
+ pioLatency
;
2685 BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt
)
2687 SimObjectParam
<EtherInt
*> peer
;
2688 SimObjectParam
<NSGigE
*> device
;
2690 END_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt
)
2692 BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigEInt
)
2694 INIT_PARAM_DFLT(peer
, "peer interface", NULL
),
2695 INIT_PARAM(device
, "Ethernet device of this interface")
2697 END_INIT_SIM_OBJECT_PARAMS(NSGigEInt
)
2699 CREATE_SIM_OBJECT(NSGigEInt
)
2701 NSGigEInt
*dev_int
= new NSGigEInt(getInstanceName(), device
);
2703 EtherInt
*p
= (EtherInt
*)peer
;
2705 dev_int
->setPeer(p
);
2706 p
->setPeer(dev_int
);
2712 REGISTER_SIM_OBJECT("NSGigEInt", NSGigEInt
)
2715 BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigE
)
2719 Param
<Tick
> tx_delay
;
2720 Param
<Tick
> rx_delay
;
2721 Param
<Tick
> intr_delay
;
2722 SimObjectParam
<MemoryController
*> mmu
;
2723 SimObjectParam
<PhysicalMemory
*> physmem
;
2724 Param
<bool> rx_filter
;
2725 Param
<string
> hardware_address
;
2726 SimObjectParam
<Bus
*> io_bus
;
2727 SimObjectParam
<Bus
*> payload_bus
;
2728 SimObjectParam
<HierParams
*> hier
;
2729 Param
<Tick
> pio_latency
;
2730 Param
<bool> dma_desc_free
;
2731 Param
<bool> dma_data_free
;
2732 Param
<Tick
> dma_read_delay
;
2733 Param
<Tick
> dma_write_delay
;
2734 Param
<Tick
> dma_read_factor
;
2735 Param
<Tick
> dma_write_factor
;
2736 SimObjectParam
<PciConfigAll
*> configspace
;
2737 SimObjectParam
<PciConfigData
*> configdata
;
2738 SimObjectParam
<Platform
*> platform
;
2739 Param
<uint32_t> pci_bus
;
2740 Param
<uint32_t> pci_dev
;
2741 Param
<uint32_t> pci_func
;
2742 Param
<uint32_t> tx_fifo_size
;
2743 Param
<uint32_t> rx_fifo_size
;
2744 Param
<uint32_t> m5reg
;
2745 Param
<bool> dma_no_allocate
;
2747 END_DECLARE_SIM_OBJECT_PARAMS(NSGigE
)
2749 BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigE
)
2751 INIT_PARAM(addr
, "Device Address"),
2752 INIT_PARAM(clock
, "State machine processor frequency"),
2753 INIT_PARAM(tx_delay
, "Transmit Delay"),
2754 INIT_PARAM(rx_delay
, "Receive Delay"),
2755 INIT_PARAM(intr_delay
, "Interrupt Delay in microseconds"),
2756 INIT_PARAM(mmu
, "Memory Controller"),
2757 INIT_PARAM(physmem
, "Physical Memory"),
2758 INIT_PARAM_DFLT(rx_filter
, "Enable Receive Filter", true),
2759 INIT_PARAM_DFLT(hardware_address
, "Ethernet Hardware Address",
2760 "00:99:00:00:00:01"),
2761 INIT_PARAM_DFLT(io_bus
, "The IO Bus to attach to for headers", NULL
),
2762 INIT_PARAM_DFLT(payload_bus
, "The IO Bus to attach to for payload", NULL
),
2763 INIT_PARAM_DFLT(hier
, "Hierarchy global variables", &defaultHierParams
),
2764 INIT_PARAM_DFLT(pio_latency
, "Programmed IO latency in bus cycles", 1),
2765 INIT_PARAM_DFLT(dma_desc_free
, "DMA of Descriptors is free", false),
2766 INIT_PARAM_DFLT(dma_data_free
, "DMA of Data is free", false),
2767 INIT_PARAM_DFLT(dma_read_delay
, "fixed delay for dma reads", 0),
2768 INIT_PARAM_DFLT(dma_write_delay
, "fixed delay for dma writes", 0),
2769 INIT_PARAM_DFLT(dma_read_factor
, "multiplier for dma reads", 0),
2770 INIT_PARAM_DFLT(dma_write_factor
, "multiplier for dma writes", 0),
2771 INIT_PARAM(configspace
, "PCI Configspace"),
2772 INIT_PARAM(configdata
, "PCI Config data"),
2773 INIT_PARAM(platform
, "Platform"),
2774 INIT_PARAM(pci_bus
, "PCI bus"),
2775 INIT_PARAM(pci_dev
, "PCI device number"),
2776 INIT_PARAM(pci_func
, "PCI function code"),
2777 INIT_PARAM_DFLT(tx_fifo_size
, "max size in bytes of txFifo", 131072),
2778 INIT_PARAM_DFLT(rx_fifo_size
, "max size in bytes of rxFifo", 131072),
2779 INIT_PARAM(m5reg
, "m5 register"),
2780 INIT_PARAM_DFLT(dma_no_allocate
, "Should DMA reads allocate cache lines", true)
2782 END_INIT_SIM_OBJECT_PARAMS(NSGigE
)
2785 CREATE_SIM_OBJECT(NSGigE
)
2787 NSGigE::Params
*params
= new NSGigE::Params
;
2789 params
->name
= getInstanceName();
2791 params
->configSpace
= configspace
;
2792 params
->configData
= configdata
;
2793 params
->plat
= platform
;
2794 params
->busNum
= pci_bus
;
2795 params
->deviceNum
= pci_dev
;
2796 params
->functionNum
= pci_func
;
2798 params
->clock
= clock
;
2799 params
->intr_delay
= intr_delay
;
2800 params
->pmem
= physmem
;
2801 params
->tx_delay
= tx_delay
;
2802 params
->rx_delay
= rx_delay
;
2803 params
->hier
= hier
;
2804 params
->header_bus
= io_bus
;
2805 params
->payload_bus
= payload_bus
;
2806 params
->pio_latency
= pio_latency
;
2807 params
->dma_desc_free
= dma_desc_free
;
2808 params
->dma_data_free
= dma_data_free
;
2809 params
->dma_read_delay
= dma_read_delay
;
2810 params
->dma_write_delay
= dma_write_delay
;
2811 params
->dma_read_factor
= dma_read_factor
;
2812 params
->dma_write_factor
= dma_write_factor
;
2813 params
->rx_filter
= rx_filter
;
2814 params
->eaddr
= hardware_address
;
2815 params
->tx_fifo_size
= tx_fifo_size
;
2816 params
->rx_fifo_size
= rx_fifo_size
;
2817 params
->m5reg
= m5reg
;
2818 params
->dma_no_allocate
= dma_no_allocate
;
2819 return new NSGigE(params
);
2822 REGISTER_SIM_OBJECT("NSGigE", NSGigE
)