2 * Copyright (c) 2004-2005 The Regents of The University of Michigan
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 * Device module for modelling the National Semiconductor
31 * DP83820 ethernet controller. Does not support priority queueing
37 #include "base/inet.hh"
38 #include "cpu/exec_context.hh"
39 #include "dev/etherlink.hh"
40 #include "dev/ns_gige.hh"
41 #include "dev/pciconfigall.hh"
42 #include "mem/bus/bus.hh"
43 #include "mem/bus/dma_interface.hh"
44 #include "mem/bus/pio_interface.hh"
45 #include "mem/bus/pio_interface_impl.hh"
46 #include "mem/functional/memory_control.hh"
47 #include "mem/functional/physical.hh"
48 #include "sim/builder.hh"
49 #include "sim/debug.hh"
50 #include "sim/host.hh"
51 #include "sim/stats.hh"
52 #include "targetarch/vtophys.hh"
54 const char *NsRxStateStrings
[] =
65 const char *NsTxStateStrings
[] =
76 const char *NsDmaState
[] =
88 ///////////////////////////////////////////////////////////////////////
92 NSGigE::NSGigE(Params
*p
)
93 : PciDev(p
), ioEnable(false),
94 txFifo(p
->tx_fifo_size
), rxFifo(p
->rx_fifo_size
),
95 txPacket(0), rxPacket(0), txPacketBufPtr(NULL
), rxPacketBufPtr(NULL
),
96 txXferLen(0), rxXferLen(0), clock(p
->clock
),
97 txState(txIdle
), txEnable(false), CTDD(false),
98 txFragPtr(0), txDescCnt(0), txDmaState(dmaIdle
), rxState(rxIdle
),
99 rxEnable(false), CRDD(false), rxPktBytes(0),
100 rxFragPtr(0), rxDescCnt(0), rxDmaState(dmaIdle
), extstsEnable(false),
101 rxDmaReadEvent(this), rxDmaWriteEvent(this),
102 txDmaReadEvent(this), txDmaWriteEvent(this),
103 dmaDescFree(p
->dma_desc_free
), dmaDataFree(p
->dma_data_free
),
104 txDelay(p
->tx_delay
), rxDelay(p
->rx_delay
),
105 rxKickTick(0), txKickTick(0),
106 txEvent(this), rxFilterEnable(p
->rx_filter
), acceptBroadcast(false),
107 acceptMulticast(false), acceptUnicast(false),
108 acceptPerfect(false), acceptArp(false),
109 physmem(p
->pmem
), intrTick(0), cpuPendingIntr(false),
110 intrEvent(0), interface(0)
113 pioInterface
= newPioInterface(name(), p
->hier
,
115 &NSGigE::cacheAccess
);
117 pioLatency
= p
->pio_latency
* p
->header_bus
->clockRate
;
120 dmaInterface
= new DMAInterface
<Bus
>(name() + ".dma",
125 dmaInterface
= new DMAInterface
<Bus
>(name() + ".dma",
129 } else if (p
->payload_bus
) {
130 pioInterface
= newPioInterface(name(), p
->hier
,
131 p
->payload_bus
, this,
132 &NSGigE::cacheAccess
);
134 pioLatency
= p
->pio_latency
* p
->payload_bus
->clockRate
;
136 dmaInterface
= new DMAInterface
<Bus
>(name() + ".dma",
143 intrDelay
= p
->intr_delay
;
144 dmaReadDelay
= p
->dma_read_delay
;
145 dmaWriteDelay
= p
->dma_write_delay
;
146 dmaReadFactor
= p
->dma_read_factor
;
147 dmaWriteFactor
= p
->dma_write_factor
;
150 memcpy(&rom
.perfectMatch
, p
->eaddr
.bytes(), ETH_ADDR_LEN
);
160 .name(name() + ".txBytes")
161 .desc("Bytes Transmitted")
166 .name(name() + ".rxBytes")
167 .desc("Bytes Received")
172 .name(name() + ".txPackets")
173 .desc("Number of Packets Transmitted")
178 .name(name() + ".rxPackets")
179 .desc("Number of Packets Received")
184 .name(name() + ".txIpChecksums")
185 .desc("Number of tx IP Checksums done by device")
191 .name(name() + ".rxIpChecksums")
192 .desc("Number of rx IP Checksums done by device")
198 .name(name() + ".txTcpChecksums")
199 .desc("Number of tx TCP Checksums done by device")
205 .name(name() + ".rxTcpChecksums")
206 .desc("Number of rx TCP Checksums done by device")
212 .name(name() + ".txUdpChecksums")
213 .desc("Number of tx UDP Checksums done by device")
219 .name(name() + ".rxUdpChecksums")
220 .desc("Number of rx UDP Checksums done by device")
226 .name(name() + ".descDMAReads")
227 .desc("Number of descriptors the device read w/ DMA")
232 .name(name() + ".descDMAWrites")
233 .desc("Number of descriptors the device wrote w/ DMA")
238 .name(name() + ".descDmaReadBytes")
239 .desc("number of descriptor bytes read w/ DMA")
244 .name(name() + ".descDmaWriteBytes")
245 .desc("number of descriptor bytes write w/ DMA")
250 .name(name() + ".txBandwidth")
251 .desc("Transmit Bandwidth (bits/s)")
257 .name(name() + ".rxBandwidth")
258 .desc("Receive Bandwidth (bits/s)")
264 .name(name() + ".totBandwidth")
265 .desc("Total Bandwidth (bits/s)")
271 .name(name() + ".totPackets")
272 .desc("Total Packets")
278 .name(name() + ".totBytes")
285 .name(name() + ".totPPS")
286 .desc("Total Tranmission Rate (packets/s)")
292 .name(name() + ".txPPS")
293 .desc("Packet Tranmission Rate (packets/s)")
299 .name(name() + ".rxPPS")
300 .desc("Packet Reception Rate (packets/s)")
306 .name(name() + ".postedSwi")
307 .desc("number of software interrupts posted to CPU")
312 .name(name() + ".totalSwi")
313 .desc("number of total Swi written to ISR")
318 .name(name() + ".coalescedSwi")
319 .desc("average number of Swi's coalesced into each post")
324 .name(name() + ".postedRxIdle")
325 .desc("number of rxIdle interrupts posted to CPU")
330 .name(name() + ".totalRxIdle")
331 .desc("number of total RxIdle written to ISR")
336 .name(name() + ".coalescedRxIdle")
337 .desc("average number of RxIdle's coalesced into each post")
342 .name(name() + ".postedRxOk")
343 .desc("number of RxOk interrupts posted to CPU")
348 .name(name() + ".totalRxOk")
349 .desc("number of total RxOk written to ISR")
354 .name(name() + ".coalescedRxOk")
355 .desc("average number of RxOk's coalesced into each post")
360 .name(name() + ".postedRxDesc")
361 .desc("number of RxDesc interrupts posted to CPU")
366 .name(name() + ".totalRxDesc")
367 .desc("number of total RxDesc written to ISR")
372 .name(name() + ".coalescedRxDesc")
373 .desc("average number of RxDesc's coalesced into each post")
378 .name(name() + ".postedTxOk")
379 .desc("number of TxOk interrupts posted to CPU")
384 .name(name() + ".totalTxOk")
385 .desc("number of total TxOk written to ISR")
390 .name(name() + ".coalescedTxOk")
391 .desc("average number of TxOk's coalesced into each post")
396 .name(name() + ".postedTxIdle")
397 .desc("number of TxIdle interrupts posted to CPU")
402 .name(name() + ".totalTxIdle")
403 .desc("number of total TxIdle written to ISR")
408 .name(name() + ".coalescedTxIdle")
409 .desc("average number of TxIdle's coalesced into each post")
414 .name(name() + ".postedTxDesc")
415 .desc("number of TxDesc interrupts posted to CPU")
420 .name(name() + ".totalTxDesc")
421 .desc("number of total TxDesc written to ISR")
426 .name(name() + ".coalescedTxDesc")
427 .desc("average number of TxDesc's coalesced into each post")
432 .name(name() + ".postedRxOrn")
433 .desc("number of RxOrn posted to CPU")
438 .name(name() + ".totalRxOrn")
439 .desc("number of total RxOrn written to ISR")
444 .name(name() + ".coalescedRxOrn")
445 .desc("average number of RxOrn's coalesced into each post")
450 .name(name() + ".coalescedTotal")
451 .desc("average number of interrupts coalesced into each post")
456 .name(name() + ".postedInterrupts")
457 .desc("number of posts to CPU")
462 .name(name() + ".droppedPackets")
463 .desc("number of packets dropped")
467 coalescedSwi
= totalSwi
/ postedInterrupts
;
468 coalescedRxIdle
= totalRxIdle
/ postedInterrupts
;
469 coalescedRxOk
= totalRxOk
/ postedInterrupts
;
470 coalescedRxDesc
= totalRxDesc
/ postedInterrupts
;
471 coalescedTxOk
= totalTxOk
/ postedInterrupts
;
472 coalescedTxIdle
= totalTxIdle
/ postedInterrupts
;
473 coalescedTxDesc
= totalTxDesc
/ postedInterrupts
;
474 coalescedRxOrn
= totalRxOrn
/ postedInterrupts
;
476 coalescedTotal
= (totalSwi
+ totalRxIdle
+ totalRxOk
+ totalRxDesc
+
477 totalTxOk
+ totalTxIdle
+ totalTxDesc
+
478 totalRxOrn
) / postedInterrupts
;
480 txBandwidth
= txBytes
* Stats::constant(8) / simSeconds
;
481 rxBandwidth
= rxBytes
* Stats::constant(8) / simSeconds
;
482 totBandwidth
= txBandwidth
+ rxBandwidth
;
483 totBytes
= txBytes
+ rxBytes
;
484 totPackets
= txPackets
+ rxPackets
;
486 txPacketRate
= txPackets
/ simSeconds
;
487 rxPacketRate
= rxPackets
/ simSeconds
;
491 * This is to read the PCI general configuration registers
494 NSGigE::ReadConfig(int offset
, int size
, uint8_t *data
)
496 if (offset
< PCI_DEVICE_SPECIFIC
)
497 PciDev::ReadConfig(offset
, size
, data
);
499 panic("Device specific PCI config space not implemented!\n");
503 * This is to write to the PCI general configuration registers
506 NSGigE::WriteConfig(int offset
, int size
, uint32_t data
)
508 if (offset
< PCI_DEVICE_SPECIFIC
)
509 PciDev::WriteConfig(offset
, size
, data
);
511 panic("Device specific PCI config space not implemented!\n");
513 // Need to catch writes to BARs to update the PIO interface
515 // seems to work fine without all these PCI settings, but i
516 // put in the IO to double check, an assertion will fail if we
517 // need to properly implement it
519 if (config
.data
[offset
] & PCI_CMD_IOSE
)
525 if (config
.data
[offset
] & PCI_CMD_BME
) {
532 if (config
.data
[offset
] & PCI_CMD_MSE
) {
541 case PCI0_BASE_ADDR0
:
542 if (BARAddrs
[0] != 0) {
544 pioInterface
->addAddrRange(RangeSize(BARAddrs
[0], BARSize
[0]));
546 BARAddrs
[0] &= EV5::PAddrUncachedMask
;
549 case PCI0_BASE_ADDR1
:
550 if (BARAddrs
[1] != 0) {
552 pioInterface
->addAddrRange(RangeSize(BARAddrs
[1], BARSize
[1]));
554 BARAddrs
[1] &= EV5::PAddrUncachedMask
;
561 * This reads the device registers, which are detailed in the NS83820
565 NSGigE::read(MemReqPtr
&req
, uint8_t *data
)
569 //The mask is to give you only the offset into the device register file
570 Addr daddr
= req
->paddr
& 0xfff;
571 DPRINTF(EthernetPIO
, "read da=%#x pa=%#x va=%#x size=%d\n",
572 daddr
, req
->paddr
, req
->vaddr
, req
->size
);
575 // there are some reserved registers, you can see ns_gige_reg.h and
576 // the spec sheet for details
577 if (daddr
> LAST
&& daddr
<= RESERVED
) {
578 panic("Accessing reserved register");
579 } else if (daddr
> RESERVED
&& daddr
<= 0x3FC) {
580 ReadConfig(daddr
& 0xff, req
->size
, data
);
582 } else if (daddr
>= MIB_START
&& daddr
<= MIB_END
) {
583 // don't implement all the MIB's. hopefully the kernel
584 // doesn't actually DEPEND upon their values
585 // MIB are just hardware stats keepers
586 uint32_t ®
= *(uint32_t *) data
;
589 } else if (daddr
> 0x3FC)
590 panic("Something is messed up!\n");
593 case sizeof(uint32_t):
595 uint32_t ®
= *(uint32_t *)data
;
600 //these are supposed to be cleared on a read
601 reg
&= ~(CR_RXD
| CR_TXD
| CR_TXR
| CR_RXR
);
618 devIntrClear(ISR_ALL
);
673 // see the spec sheet for how RFCR and RFDR work
674 // basically, you write to RFCR to tell the machine
675 // what you want to do next, then you act upon RFDR,
676 // and the device will be prepared b/c of what you
683 switch (regs
.rfcr
& RFCR_RFADDR
) {
685 reg
= rom
.perfectMatch
[1];
687 reg
+= rom
.perfectMatch
[0];
690 reg
= rom
.perfectMatch
[3] << 8;
691 reg
+= rom
.perfectMatch
[2];
694 reg
= rom
.perfectMatch
[5] << 8;
695 reg
+= rom
.perfectMatch
[4];
698 panic("reading RFDR for something other than PMATCH!\n");
699 // didn't implement other RFDR functionality b/c
700 // driver didn't use it
710 reg
&= ~(MIBC_MIBS
| MIBC_ACLR
);
754 reg
= params()->m5reg
;
758 panic("reading unimplemented register: addr=%#x", daddr
);
761 DPRINTF(EthernetPIO
, "read from %#x: data=%d data=%#x\n",
767 panic("accessing register with invalid size: addr=%#x, size=%d",
775 NSGigE::write(MemReqPtr
&req
, const uint8_t *data
)
779 Addr daddr
= req
->paddr
& 0xfff;
780 DPRINTF(EthernetPIO
, "write da=%#x pa=%#x va=%#x size=%d\n",
781 daddr
, req
->paddr
, req
->vaddr
, req
->size
);
783 if (daddr
> LAST
&& daddr
<= RESERVED
) {
784 panic("Accessing reserved register");
785 } else if (daddr
> RESERVED
&& daddr
<= 0x3FC) {
786 WriteConfig(daddr
& 0xff, req
->size
, *(uint32_t *)data
);
788 } else if (daddr
> 0x3FC)
789 panic("Something is messed up!\n");
791 if (req
->size
== sizeof(uint32_t)) {
792 uint32_t reg
= *(uint32_t *)data
;
793 DPRINTF(EthernetPIO
, "write data=%d data=%#x\n", reg
, reg
);
800 } else if (reg
& CR_TXE
) {
803 // the kernel is enabling the transmit machine
804 if (txState
== txIdle
)
810 } else if (reg
& CR_RXE
) {
813 if (rxState
== rxIdle
)
824 devIntrPost(ISR_SWI
);
835 if (reg
& CFGR_LNKSTS
||
838 reg
& CFGR_RESERVED
||
839 reg
& CFGR_T64ADDR
||
840 reg
& CFGR_PCI64_DET
)
841 panic("writing to read-only or reserved CFGR bits!\n");
843 regs
.config
|= reg
& ~(CFGR_LNKSTS
| CFGR_SPDSTS
| CFGR_DUPSTS
|
844 CFGR_RESERVED
| CFGR_T64ADDR
| CFGR_PCI64_DET
);
846 // all these #if 0's are because i don't THINK the kernel needs to
847 // have these implemented. if there is a problem relating to one of
848 // these, you may need to add functionality in.
850 if (reg
& CFGR_TBI_EN
) ;
851 if (reg
& CFGR_MODE_1000
) ;
854 if (reg
& CFGR_AUTO_1000
)
855 panic("CFGR_AUTO_1000 not implemented!\n");
858 if (reg
& CFGR_PINT_DUPSTS
||
859 reg
& CFGR_PINT_LNKSTS
||
860 reg
& CFGR_PINT_SPDSTS
)
863 if (reg
& CFGR_TMRTEST
) ;
864 if (reg
& CFGR_MRM_DIS
) ;
865 if (reg
& CFGR_MWI_DIS
) ;
867 if (reg
& CFGR_T64ADDR
)
868 panic("CFGR_T64ADDR is read only register!\n");
870 if (reg
& CFGR_PCI64_DET
)
871 panic("CFGR_PCI64_DET is read only register!\n");
873 if (reg
& CFGR_DATA64_EN
) ;
874 if (reg
& CFGR_M64ADDR
) ;
875 if (reg
& CFGR_PHY_RST
) ;
876 if (reg
& CFGR_PHY_DIS
) ;
879 if (reg
& CFGR_EXTSTS_EN
)
882 extstsEnable
= false;
885 if (reg
& CFGR_REQALG
) ;
887 if (reg
& CFGR_POW
) ;
888 if (reg
& CFGR_EXD
) ;
889 if (reg
& CFGR_PESEL
) ;
890 if (reg
& CFGR_BROM_DIS
) ;
891 if (reg
& CFGR_EXT_125
) ;
892 if (reg
& CFGR_BEM
) ;
898 // since phy is completely faked, MEAR_MD* don't matter
899 // and since the driver never uses MEAR_EE*, they don't
902 if (reg
& MEAR_EEDI
) ;
903 if (reg
& MEAR_EEDO
) ; // this one is read only
904 if (reg
& MEAR_EECLK
) ;
905 if (reg
& MEAR_EESEL
) ;
906 if (reg
& MEAR_MDIO
) ;
907 if (reg
& MEAR_MDDIR
) ;
908 if (reg
& MEAR_MDC
) ;
913 regs
.ptscr
= reg
& ~(PTSCR_RBIST_RDONLY
);
914 // these control BISTs for various parts of chip - we
915 // don't care or do just fake that the BIST is done
916 if (reg
& PTSCR_RBIST_EN
)
917 regs
.ptscr
|= PTSCR_RBIST_DONE
;
918 if (reg
& PTSCR_EEBIST_EN
)
919 regs
.ptscr
&= ~PTSCR_EEBIST_EN
;
920 if (reg
& PTSCR_EELOAD_EN
)
921 regs
.ptscr
&= ~PTSCR_EELOAD_EN
;
924 case ISR
: /* writing to the ISR has no effect */
925 panic("ISR is a read only register!\n");
938 /* not going to implement real interrupt holdoff */
942 regs
.txdp
= (reg
& 0xFFFFFFFC);
943 assert(txState
== txIdle
);
954 if (reg
& TX_CFG_CSI
) ;
955 if (reg
& TX_CFG_HBI
) ;
956 if (reg
& TX_CFG_MLB
) ;
957 if (reg
& TX_CFG_ATP
) ;
958 if (reg
& TX_CFG_ECRETRY
) {
960 * this could easily be implemented, but considering
961 * the network is just a fake pipe, wouldn't make
966 if (reg
& TX_CFG_BRST_DIS
) ;
970 /* we handle our own DMA, ignore the kernel's exhortations */
971 if (reg
& TX_CFG_MXDMA
) ;
974 // also, we currently don't care about fill/drain
975 // thresholds though this may change in the future with
976 // more realistic networks or a driver which changes it
977 // according to feedback
983 /* these just control general purpose i/o pins, don't matter */
998 if (reg
& RX_CFG_AEP
) ;
999 if (reg
& RX_CFG_ARP
) ;
1000 if (reg
& RX_CFG_STRIPCRC
) ;
1001 if (reg
& RX_CFG_RX_RD
) ;
1002 if (reg
& RX_CFG_ALP
) ;
1003 if (reg
& RX_CFG_AIRL
) ;
1005 /* we handle our own DMA, ignore what kernel says about it */
1006 if (reg
& RX_CFG_MXDMA
) ;
1008 //also, we currently don't care about fill/drain thresholds
1009 //though this may change in the future with more realistic
1010 //networks or a driver which changes it according to feedback
1011 if (reg
& (RX_CFG_DRTH
| RX_CFG_DRTH0
)) ;
1016 /* there is no priority queueing used in the linux 2.6 driver */
1021 /* not going to implement wake on LAN */
1026 /* not going to implement pause control */
1033 rxFilterEnable
= (reg
& RFCR_RFEN
) ? true : false;
1034 acceptBroadcast
= (reg
& RFCR_AAB
) ? true : false;
1035 acceptMulticast
= (reg
& RFCR_AAM
) ? true : false;
1036 acceptUnicast
= (reg
& RFCR_AAU
) ? true : false;
1037 acceptPerfect
= (reg
& RFCR_APM
) ? true : false;
1038 acceptArp
= (reg
& RFCR_AARP
) ? true : false;
1041 if (reg
& RFCR_APAT
)
1042 panic("RFCR_APAT not implemented!\n");
1045 if (reg
& RFCR_MHEN
|| reg
& RFCR_UHEN
)
1046 panic("hash filtering not implemented!\n");
1049 panic("RFCR_ULM not implemented!\n");
1054 panic("the driver never writes to RFDR, something is wrong!\n");
1057 panic("the driver never uses BRAR, something is wrong!\n");
1060 panic("the driver never uses BRDR, something is wrong!\n");
1063 panic("SRR is read only register!\n");
1066 panic("the driver never uses MIBC, something is wrong!\n");
1077 panic("the driver never uses VDR, something is wrong!\n");
1081 /* not going to implement clockrun stuff */
1087 if (reg
& TBICR_MR_LOOPBACK
)
1088 panic("TBICR_MR_LOOPBACK never used, something wrong!\n");
1090 if (reg
& TBICR_MR_AN_ENABLE
) {
1091 regs
.tanlpar
= regs
.tanar
;
1092 regs
.tbisr
|= (TBISR_MR_AN_COMPLETE
| TBISR_MR_LINK_STATUS
);
1096 if (reg
& TBICR_MR_RESTART_AN
) ;
1102 panic("TBISR is read only register!\n");
1106 if (reg
& TANAR_PS2
)
1107 panic("this isn't used in driver, something wrong!\n");
1109 if (reg
& TANAR_PS1
)
1110 panic("this isn't used in driver, something wrong!\n");
1114 panic("this should only be written to by the fake phy!\n");
1117 panic("TANER is read only register!\n");
1124 panic("invalid register access daddr=%#x", daddr
);
1127 panic("Invalid Request Size");
1134 NSGigE::devIntrPost(uint32_t interrupts
)
1136 if (interrupts
& ISR_RESERVE
)
1137 panic("Cannot set a reserved interrupt");
1139 if (interrupts
& ISR_NOIMPL
)
1140 warn("interrupt not implemented %#x\n", interrupts
);
1142 interrupts
&= ~ISR_NOIMPL
;
1143 regs
.isr
|= interrupts
;
1145 if (interrupts
& regs
.imr
) {
1146 if (interrupts
& ISR_SWI
) {
1149 if (interrupts
& ISR_RXIDLE
) {
1152 if (interrupts
& ISR_RXOK
) {
1155 if (interrupts
& ISR_RXDESC
) {
1158 if (interrupts
& ISR_TXOK
) {
1161 if (interrupts
& ISR_TXIDLE
) {
1164 if (interrupts
& ISR_TXDESC
) {
1167 if (interrupts
& ISR_RXORN
) {
1172 DPRINTF(EthernetIntr
,
1173 "interrupt written to ISR: intr=%#x isr=%#x imr=%#x\n",
1174 interrupts
, regs
.isr
, regs
.imr
);
1176 if ((regs
.isr
& regs
.imr
)) {
1177 Tick when
= curTick
;
1178 if (!(regs
.isr
& regs
.imr
& ISR_NODELAY
))
1184 /* writing this interrupt counting stats inside this means that this function
1185 is now limited to being used to clear all interrupts upon the kernel
1186 reading isr and servicing. just telling you in case you were thinking
1190 NSGigE::devIntrClear(uint32_t interrupts
)
1192 if (interrupts
& ISR_RESERVE
)
1193 panic("Cannot clear a reserved interrupt");
1195 if (regs
.isr
& regs
.imr
& ISR_SWI
) {
1198 if (regs
.isr
& regs
.imr
& ISR_RXIDLE
) {
1201 if (regs
.isr
& regs
.imr
& ISR_RXOK
) {
1204 if (regs
.isr
& regs
.imr
& ISR_RXDESC
) {
1207 if (regs
.isr
& regs
.imr
& ISR_TXOK
) {
1210 if (regs
.isr
& regs
.imr
& ISR_TXIDLE
) {
1213 if (regs
.isr
& regs
.imr
& ISR_TXDESC
) {
1216 if (regs
.isr
& regs
.imr
& ISR_RXORN
) {
1220 if (regs
.isr
& regs
.imr
& (ISR_SWI
| ISR_RXIDLE
| ISR_RXOK
| ISR_RXDESC
|
1221 ISR_TXOK
| ISR_TXIDLE
| ISR_TXDESC
| ISR_RXORN
) )
1224 interrupts
&= ~ISR_NOIMPL
;
1225 regs
.isr
&= ~interrupts
;
1227 DPRINTF(EthernetIntr
,
1228 "interrupt cleared from ISR: intr=%x isr=%x imr=%x\n",
1229 interrupts
, regs
.isr
, regs
.imr
);
1231 if (!(regs
.isr
& regs
.imr
))
1236 NSGigE::devIntrChangeMask()
1238 DPRINTF(EthernetIntr
, "interrupt mask changed: isr=%x imr=%x masked=%x\n",
1239 regs
.isr
, regs
.imr
, regs
.isr
& regs
.imr
);
1241 if (regs
.isr
& regs
.imr
)
1242 cpuIntrPost(curTick
);
1248 NSGigE::cpuIntrPost(Tick when
)
1250 // If the interrupt you want to post is later than an interrupt
1251 // already scheduled, just let it post in the coming one and don't
1252 // schedule another.
1253 // HOWEVER, must be sure that the scheduled intrTick is in the
1254 // future (this was formerly the source of a bug)
1256 * @todo this warning should be removed and the intrTick code should
1259 assert(when
>= curTick
);
1260 assert(intrTick
>= curTick
|| intrTick
== 0);
1261 if (when
> intrTick
&& intrTick
!= 0) {
1262 DPRINTF(EthernetIntr
, "don't need to schedule event...intrTick=%d\n",
1268 if (intrTick
< curTick
) {
1273 DPRINTF(EthernetIntr
, "going to schedule an interrupt for intrTick=%d\n",
1277 intrEvent
->squash();
1278 intrEvent
= new IntrEvent(this, true);
1279 intrEvent
->schedule(intrTick
);
1283 NSGigE::cpuInterrupt()
1285 assert(intrTick
== curTick
);
1287 // Whether or not there's a pending interrupt, we don't care about
1292 // Don't send an interrupt if there's already one
1293 if (cpuPendingIntr
) {
1294 DPRINTF(EthernetIntr
,
1295 "would send an interrupt now, but there's already pending\n");
1298 cpuPendingIntr
= true;
1300 DPRINTF(EthernetIntr
, "posting interrupt\n");
1306 NSGigE::cpuIntrClear()
1308 if (!cpuPendingIntr
)
1312 intrEvent
->squash();
1318 cpuPendingIntr
= false;
1320 DPRINTF(EthernetIntr
, "clearing interrupt\n");
1325 NSGigE::cpuIntrPending() const
1326 { return cpuPendingIntr
; }
1332 DPRINTF(Ethernet
, "transmit reset\n");
1337 assert(txDescCnt
== 0);
1340 assert(txDmaState
== dmaIdle
);
1346 DPRINTF(Ethernet
, "receive reset\n");
1349 assert(rxPktBytes
== 0);
1352 assert(rxDescCnt
== 0);
1353 assert(rxDmaState
== dmaIdle
);
1361 memset(®s
, 0, sizeof(regs
));
1362 regs
.config
= CFGR_LNKSTS
;
1364 regs
.txcfg
= 0x120; // set drain threshold to 1024 bytes and
1365 // fill threshold to 32 bytes
1366 regs
.rxcfg
= 0x4; // set drain threshold to 16 bytes
1367 regs
.srr
= 0x0103; // set the silicon revision to rev B or 0x103
1368 regs
.mibc
= MIBC_FRZ
;
1369 regs
.vdr
= 0x81; // set the vlan tag type to 802.1q
1370 regs
.tesr
= 0xc000; // TBI capable of both full and half duplex
1372 extstsEnable
= false;
1373 acceptBroadcast
= false;
1374 acceptMulticast
= false;
1375 acceptUnicast
= false;
1376 acceptPerfect
= false;
1381 NSGigE::rxDmaReadCopy()
1383 assert(rxDmaState
== dmaReading
);
1385 physmem
->dma_read((uint8_t *)rxDmaData
, rxDmaAddr
, rxDmaLen
);
1386 rxDmaState
= dmaIdle
;
1388 DPRINTF(EthernetDMA
, "rx dma read paddr=%#x len=%d\n",
1389 rxDmaAddr
, rxDmaLen
);
1390 DDUMP(EthernetDMA
, rxDmaData
, rxDmaLen
);
1394 NSGigE::doRxDmaRead()
1396 assert(rxDmaState
== dmaIdle
|| rxDmaState
== dmaReadWaiting
);
1397 rxDmaState
= dmaReading
;
1399 if (dmaInterface
&& !rxDmaFree
) {
1400 if (dmaInterface
->busy())
1401 rxDmaState
= dmaReadWaiting
;
1403 dmaInterface
->doDMA(Read
, rxDmaAddr
, rxDmaLen
, curTick
,
1404 &rxDmaReadEvent
, true);
1408 if (dmaReadDelay
== 0 && dmaReadFactor
== 0) {
1413 Tick factor
= ((rxDmaLen
+ ULL(63)) >> ULL(6)) * dmaReadFactor
;
1414 Tick start
= curTick
+ dmaReadDelay
+ factor
;
1415 rxDmaReadEvent
.schedule(start
);
1420 NSGigE::rxDmaReadDone()
1422 assert(rxDmaState
== dmaReading
);
1425 // If the transmit state machine has a pending DMA, let it go first
1426 if (txDmaState
== dmaReadWaiting
|| txDmaState
== dmaWriteWaiting
)
1433 NSGigE::rxDmaWriteCopy()
1435 assert(rxDmaState
== dmaWriting
);
1437 physmem
->dma_write(rxDmaAddr
, (uint8_t *)rxDmaData
, rxDmaLen
);
1438 rxDmaState
= dmaIdle
;
1440 DPRINTF(EthernetDMA
, "rx dma write paddr=%#x len=%d\n",
1441 rxDmaAddr
, rxDmaLen
);
1442 DDUMP(EthernetDMA
, rxDmaData
, rxDmaLen
);
1446 NSGigE::doRxDmaWrite()
1448 assert(rxDmaState
== dmaIdle
|| rxDmaState
== dmaWriteWaiting
);
1449 rxDmaState
= dmaWriting
;
1451 if (dmaInterface
&& !rxDmaFree
) {
1452 if (dmaInterface
->busy())
1453 rxDmaState
= dmaWriteWaiting
;
1455 dmaInterface
->doDMA(WriteInvalidate
, rxDmaAddr
, rxDmaLen
, curTick
,
1456 &rxDmaWriteEvent
, true);
1460 if (dmaWriteDelay
== 0 && dmaWriteFactor
== 0) {
1465 Tick factor
= ((rxDmaLen
+ ULL(63)) >> ULL(6)) * dmaWriteFactor
;
1466 Tick start
= curTick
+ dmaWriteDelay
+ factor
;
1467 rxDmaWriteEvent
.schedule(start
);
1472 NSGigE::rxDmaWriteDone()
1474 assert(rxDmaState
== dmaWriting
);
1477 // If the transmit state machine has a pending DMA, let it go first
1478 if (txDmaState
== dmaReadWaiting
|| txDmaState
== dmaWriteWaiting
)
1487 DPRINTF(EthernetSM
, "receive kick rxState=%s (rxBuf.size=%d)\n",
1488 NsRxStateStrings
[rxState
], rxFifo
.size());
1490 if (rxKickTick
> curTick
) {
1491 DPRINTF(EthernetSM
, "receive kick exiting, can't run till %d\n",
1497 switch(rxDmaState
) {
1498 case dmaReadWaiting
:
1502 case dmaWriteWaiting
:
1510 // see state machine from spec for details
1511 // the way this works is, if you finish work on one state and can
1512 // go directly to another, you do that through jumping to the
1513 // label "next". however, if you have intermediate work, like DMA
1514 // so that you can't go to the next state yet, you go to exit and
1515 // exit the loop. however, when the DMA is done it will trigger
1516 // an event and come back to this loop.
1520 DPRINTF(EthernetSM
, "Receive Disabled! Nothing to do.\n");
1525 rxState
= rxDescRefr
;
1527 rxDmaAddr
= regs
.rxdp
& 0x3fffffff;
1528 rxDmaData
= &rxDescCache
+ offsetof(ns_desc
, link
);
1529 rxDmaLen
= sizeof(rxDescCache
.link
);
1530 rxDmaFree
= dmaDescFree
;
1533 descDmaRdBytes
+= rxDmaLen
;
1538 rxState
= rxDescRead
;
1540 rxDmaAddr
= regs
.rxdp
& 0x3fffffff;
1541 rxDmaData
= &rxDescCache
;
1542 rxDmaLen
= sizeof(ns_desc
);
1543 rxDmaFree
= dmaDescFree
;
1546 descDmaRdBytes
+= rxDmaLen
;
1554 if (rxDmaState
!= dmaIdle
)
1557 rxState
= rxAdvance
;
1561 if (rxDmaState
!= dmaIdle
)
1564 DPRINTF(EthernetDesc
,
1565 "rxDescCache: addr=%08x read descriptor\n",
1566 regs
.rxdp
& 0x3fffffff);
1567 DPRINTF(EthernetDesc
,
1568 "rxDescCache: link=%08x bufptr=%08x cmdsts=%08x extsts=%08x\n",
1569 rxDescCache
.link
, rxDescCache
.bufptr
, rxDescCache
.cmdsts
,
1570 rxDescCache
.extsts
);
1572 if (rxDescCache
.cmdsts
& CMDSTS_OWN
) {
1573 devIntrPost(ISR_RXIDLE
);
1577 rxState
= rxFifoBlock
;
1578 rxFragPtr
= rxDescCache
.bufptr
;
1579 rxDescCnt
= rxDescCache
.cmdsts
& CMDSTS_LEN_MASK
;
1586 * @todo in reality, we should be able to start processing
1587 * the packet as it arrives, and not have to wait for the
1588 * full packet ot be in the receive fifo.
1593 DPRINTF(EthernetSM
, "****processing receive of new packet****\n");
1595 // If we don't have a packet, grab a new one from the fifo.
1596 rxPacket
= rxFifo
.front();
1597 rxPktBytes
= rxPacket
->length
;
1598 rxPacketBufPtr
= rxPacket
->data
;
1601 if (DTRACE(Ethernet
)) {
1604 DPRINTF(Ethernet
, "ID is %d\n", ip
->id());
1608 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n",
1609 tcp
->sport(), tcp
->dport(), tcp
->seq(),
1616 // sanity check - i think the driver behaves like this
1617 assert(rxDescCnt
>= rxPktBytes
);
1622 // dont' need the && rxDescCnt > 0 if driver sanity check
1624 if (rxPktBytes
> 0) {
1625 rxState
= rxFragWrite
;
1626 // don't need min<>(rxPktBytes,rxDescCnt) if above sanity
1628 rxXferLen
= rxPktBytes
;
1630 rxDmaAddr
= rxFragPtr
& 0x3fffffff;
1631 rxDmaData
= rxPacketBufPtr
;
1632 rxDmaLen
= rxXferLen
;
1633 rxDmaFree
= dmaDataFree
;
1639 rxState
= rxDescWrite
;
1641 //if (rxPktBytes == 0) { /* packet is done */
1642 assert(rxPktBytes
== 0);
1643 DPRINTF(EthernetSM
, "done with receiving packet\n");
1645 rxDescCache
.cmdsts
|= CMDSTS_OWN
;
1646 rxDescCache
.cmdsts
&= ~CMDSTS_MORE
;
1647 rxDescCache
.cmdsts
|= CMDSTS_OK
;
1648 rxDescCache
.cmdsts
&= 0xffff0000;
1649 rxDescCache
.cmdsts
+= rxPacket
->length
; //i.e. set CMDSTS_SIZE
1653 * all the driver uses these are for its own stats keeping
1654 * which we don't care about, aren't necessary for
1655 * functionality and doing this would just slow us down.
1656 * if they end up using this in a later version for
1657 * functional purposes, just undef
1659 if (rxFilterEnable
) {
1660 rxDescCache
.cmdsts
&= ~CMDSTS_DEST_MASK
;
1661 const EthAddr
&dst
= rxFifoFront()->dst();
1663 rxDescCache
.cmdsts
|= CMDSTS_DEST_SELF
;
1664 if (dst
->multicast())
1665 rxDescCache
.cmdsts
|= CMDSTS_DEST_MULTI
;
1666 if (dst
->broadcast())
1667 rxDescCache
.cmdsts
|= CMDSTS_DEST_MASK
;
1672 if (extstsEnable
&& ip
) {
1673 rxDescCache
.extsts
|= EXTSTS_IPPKT
;
1675 if (cksum(ip
) != 0) {
1676 DPRINTF(EthernetCksum
, "Rx IP Checksum Error\n");
1677 rxDescCache
.extsts
|= EXTSTS_IPERR
;
1682 rxDescCache
.extsts
|= EXTSTS_TCPPKT
;
1684 if (cksum(tcp
) != 0) {
1685 DPRINTF(EthernetCksum
, "Rx TCP Checksum Error\n");
1686 rxDescCache
.extsts
|= EXTSTS_TCPERR
;
1690 rxDescCache
.extsts
|= EXTSTS_UDPPKT
;
1692 if (cksum(udp
) != 0) {
1693 DPRINTF(EthernetCksum
, "Rx UDP Checksum Error\n");
1694 rxDescCache
.extsts
|= EXTSTS_UDPERR
;
1701 * the driver seems to always receive into desc buffers
1702 * of size 1514, so you never have a pkt that is split
1703 * into multiple descriptors on the receive side, so
1704 * i don't implement that case, hence the assert above.
1707 DPRINTF(EthernetDesc
,
1708 "rxDescCache: addr=%08x writeback cmdsts extsts\n",
1709 regs
.rxdp
& 0x3fffffff);
1710 DPRINTF(EthernetDesc
,
1711 "rxDescCache: link=%08x bufptr=%08x cmdsts=%08x extsts=%08x\n",
1712 rxDescCache
.link
, rxDescCache
.bufptr
, rxDescCache
.cmdsts
,
1713 rxDescCache
.extsts
);
1715 rxDmaAddr
= (regs
.rxdp
+ offsetof(ns_desc
, cmdsts
)) & 0x3fffffff;
1716 rxDmaData
= &(rxDescCache
.cmdsts
);
1717 rxDmaLen
= sizeof(rxDescCache
.cmdsts
) + sizeof(rxDescCache
.extsts
);
1718 rxDmaFree
= dmaDescFree
;
1721 descDmaWrBytes
+= rxDmaLen
;
1729 if (rxDmaState
!= dmaIdle
)
1732 rxPacketBufPtr
+= rxXferLen
;
1733 rxFragPtr
+= rxXferLen
;
1734 rxPktBytes
-= rxXferLen
;
1736 rxState
= rxFifoBlock
;
1740 if (rxDmaState
!= dmaIdle
)
1743 assert(rxDescCache
.cmdsts
& CMDSTS_OWN
);
1745 assert(rxPacket
== 0);
1746 devIntrPost(ISR_RXOK
);
1748 if (rxDescCache
.cmdsts
& CMDSTS_INTR
)
1749 devIntrPost(ISR_RXDESC
);
1752 DPRINTF(EthernetSM
, "Halting the RX state machine\n");
1756 rxState
= rxAdvance
;
1760 if (rxDescCache
.link
== 0) {
1761 devIntrPost(ISR_RXIDLE
);
1766 rxState
= rxDescRead
;
1767 regs
.rxdp
= rxDescCache
.link
;
1770 rxDmaAddr
= regs
.rxdp
& 0x3fffffff;
1771 rxDmaData
= &rxDescCache
;
1772 rxDmaLen
= sizeof(ns_desc
);
1773 rxDmaFree
= dmaDescFree
;
1781 panic("Invalid rxState!");
1784 DPRINTF(EthernetSM
, "entering next rxState=%s\n",
1785 NsRxStateStrings
[rxState
]);
1791 * @todo do we want to schedule a future kick?
1793 DPRINTF(EthernetSM
, "rx state machine exited rxState=%s\n",
1794 NsRxStateStrings
[rxState
]);
1800 if (txFifo
.empty()) {
1801 DPRINTF(Ethernet
, "nothing to transmit\n");
1805 DPRINTF(Ethernet
, "Attempt Pkt Transmit: txFifo length=%d\n",
1807 if (interface
->sendPacket(txFifo
.front())) {
1809 if (DTRACE(Ethernet
)) {
1810 IpPtr
ip(txFifo
.front());
1812 DPRINTF(Ethernet
, "ID is %d\n", ip
->id());
1816 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n",
1817 tcp
->sport(), tcp
->dport(), tcp
->seq(), tcp
->ack());
1823 DDUMP(EthernetData
, txFifo
.front()->data
, txFifo
.front()->length
);
1824 txBytes
+= txFifo
.front()->length
;
1827 DPRINTF(Ethernet
, "Successful Xmit! now txFifoAvail is %d\n",
1832 * normally do a writeback of the descriptor here, and ONLY
1833 * after that is done, send this interrupt. but since our
1834 * stuff never actually fails, just do this interrupt here,
1835 * otherwise the code has to stray from this nice format.
1836 * besides, it's functionally the same.
1838 devIntrPost(ISR_TXOK
);
1841 if (!txFifo
.empty() && !txEvent
.scheduled()) {
1842 DPRINTF(Ethernet
, "reschedule transmit\n");
1843 txEvent
.schedule(curTick
+ retryTime
);
1848 NSGigE::txDmaReadCopy()
1850 assert(txDmaState
== dmaReading
);
1852 physmem
->dma_read((uint8_t *)txDmaData
, txDmaAddr
, txDmaLen
);
1853 txDmaState
= dmaIdle
;
1855 DPRINTF(EthernetDMA
, "tx dma read paddr=%#x len=%d\n",
1856 txDmaAddr
, txDmaLen
);
1857 DDUMP(EthernetDMA
, txDmaData
, txDmaLen
);
1861 NSGigE::doTxDmaRead()
1863 assert(txDmaState
== dmaIdle
|| txDmaState
== dmaReadWaiting
);
1864 txDmaState
= dmaReading
;
1866 if (dmaInterface
&& !txDmaFree
) {
1867 if (dmaInterface
->busy())
1868 txDmaState
= dmaReadWaiting
;
1870 dmaInterface
->doDMA(Read
, txDmaAddr
, txDmaLen
, curTick
,
1871 &txDmaReadEvent
, true);
1875 if (dmaReadDelay
== 0 && dmaReadFactor
== 0.0) {
1880 Tick factor
= ((txDmaLen
+ ULL(63)) >> ULL(6)) * dmaReadFactor
;
1881 Tick start
= curTick
+ dmaReadDelay
+ factor
;
1882 txDmaReadEvent
.schedule(start
);
1887 NSGigE::txDmaReadDone()
1889 assert(txDmaState
== dmaReading
);
1892 // If the receive state machine has a pending DMA, let it go first
1893 if (rxDmaState
== dmaReadWaiting
|| rxDmaState
== dmaWriteWaiting
)
1900 NSGigE::txDmaWriteCopy()
1902 assert(txDmaState
== dmaWriting
);
1904 physmem
->dma_write(txDmaAddr
, (uint8_t *)txDmaData
, txDmaLen
);
1905 txDmaState
= dmaIdle
;
1907 DPRINTF(EthernetDMA
, "tx dma write paddr=%#x len=%d\n",
1908 txDmaAddr
, txDmaLen
);
1909 DDUMP(EthernetDMA
, txDmaData
, txDmaLen
);
1913 NSGigE::doTxDmaWrite()
1915 assert(txDmaState
== dmaIdle
|| txDmaState
== dmaWriteWaiting
);
1916 txDmaState
= dmaWriting
;
1918 if (dmaInterface
&& !txDmaFree
) {
1919 if (dmaInterface
->busy())
1920 txDmaState
= dmaWriteWaiting
;
1922 dmaInterface
->doDMA(WriteInvalidate
, txDmaAddr
, txDmaLen
, curTick
,
1923 &txDmaWriteEvent
, true);
1927 if (dmaWriteDelay
== 0 && dmaWriteFactor
== 0.0) {
1932 Tick factor
= ((txDmaLen
+ ULL(63)) >> ULL(6)) * dmaWriteFactor
;
1933 Tick start
= curTick
+ dmaWriteDelay
+ factor
;
1934 txDmaWriteEvent
.schedule(start
);
1939 NSGigE::txDmaWriteDone()
1941 assert(txDmaState
== dmaWriting
);
1944 // If the receive state machine has a pending DMA, let it go first
1945 if (rxDmaState
== dmaReadWaiting
|| rxDmaState
== dmaWriteWaiting
)
1954 DPRINTF(EthernetSM
, "transmit kick txState=%s\n",
1955 NsTxStateStrings
[txState
]);
1957 if (txKickTick
> curTick
) {
1958 DPRINTF(EthernetSM
, "transmit kick exiting, can't run till %d\n",
1964 switch(txDmaState
) {
1965 case dmaReadWaiting
:
1969 case dmaWriteWaiting
:
1980 DPRINTF(EthernetSM
, "Transmit disabled. Nothing to do.\n");
1985 txState
= txDescRefr
;
1987 txDmaAddr
= regs
.txdp
& 0x3fffffff;
1988 txDmaData
= &txDescCache
+ offsetof(ns_desc
, link
);
1989 txDmaLen
= sizeof(txDescCache
.link
);
1990 txDmaFree
= dmaDescFree
;
1993 descDmaRdBytes
+= txDmaLen
;
1999 txState
= txDescRead
;
2001 txDmaAddr
= regs
.txdp
& 0x3fffffff;
2002 txDmaData
= &txDescCache
;
2003 txDmaLen
= sizeof(ns_desc
);
2004 txDmaFree
= dmaDescFree
;
2007 descDmaRdBytes
+= txDmaLen
;
2015 if (txDmaState
!= dmaIdle
)
2018 txState
= txAdvance
;
2022 if (txDmaState
!= dmaIdle
)
2025 DPRINTF(EthernetDesc
,
2026 "txDescCache: link=%08x bufptr=%08x cmdsts=%08x extsts=%08x\n",
2027 txDescCache
.link
, txDescCache
.bufptr
, txDescCache
.cmdsts
,
2028 txDescCache
.extsts
);
2030 if (txDescCache
.cmdsts
& CMDSTS_OWN
) {
2031 txState
= txFifoBlock
;
2032 txFragPtr
= txDescCache
.bufptr
;
2033 txDescCnt
= txDescCache
.cmdsts
& CMDSTS_LEN_MASK
;
2035 devIntrPost(ISR_TXIDLE
);
2043 DPRINTF(EthernetSM
, "****starting the tx of a new packet****\n");
2044 txPacket
= new PacketData(16384);
2045 txPacketBufPtr
= txPacket
->data
;
2048 if (txDescCnt
== 0) {
2049 DPRINTF(EthernetSM
, "the txDescCnt == 0, done with descriptor\n");
2050 if (txDescCache
.cmdsts
& CMDSTS_MORE
) {
2051 DPRINTF(EthernetSM
, "there are more descriptors to come\n");
2052 txState
= txDescWrite
;
2054 txDescCache
.cmdsts
&= ~CMDSTS_OWN
;
2056 txDmaAddr
= regs
.txdp
+ offsetof(ns_desc
, cmdsts
);
2057 txDmaAddr
&= 0x3fffffff;
2058 txDmaData
= &(txDescCache
.cmdsts
);
2059 txDmaLen
= sizeof(txDescCache
.cmdsts
);
2060 txDmaFree
= dmaDescFree
;
2065 } else { /* this packet is totally done */
2066 DPRINTF(EthernetSM
, "This packet is done, let's wrap it up\n");
2067 /* deal with the the packet that just finished */
2068 if ((regs
.vtcr
& VTCR_PPCHK
) && extstsEnable
) {
2070 if (txDescCache
.extsts
& EXTSTS_UDPPKT
) {
2073 udp
->sum(cksum(udp
));
2075 } else if (txDescCache
.extsts
& EXTSTS_TCPPKT
) {
2078 tcp
->sum(cksum(tcp
));
2081 if (txDescCache
.extsts
& EXTSTS_IPPKT
) {
2088 txPacket
->length
= txPacketBufPtr
- txPacket
->data
;
2089 // this is just because the receive can't handle a
2090 // packet bigger want to make sure
2091 assert(txPacket
->length
<= 1514);
2095 txFifo
.push(txPacket
);
2099 * this following section is not tqo spec, but
2100 * functionally shouldn't be any different. normally,
2101 * the chip will wait til the transmit has occurred
2102 * before writing back the descriptor because it has
2103 * to wait to see that it was successfully transmitted
2104 * to decide whether to set CMDSTS_OK or not.
2105 * however, in the simulator since it is always
2106 * successfully transmitted, and writing it exactly to
2107 * spec would complicate the code, we just do it here
2110 txDescCache
.cmdsts
&= ~CMDSTS_OWN
;
2111 txDescCache
.cmdsts
|= CMDSTS_OK
;
2113 DPRINTF(EthernetDesc
,
2114 "txDesc writeback: cmdsts=%08x extsts=%08x\n",
2115 txDescCache
.cmdsts
, txDescCache
.extsts
);
2117 txDmaAddr
= regs
.txdp
+ offsetof(ns_desc
, cmdsts
);
2118 txDmaAddr
&= 0x3fffffff;
2119 txDmaData
= &(txDescCache
.cmdsts
);
2120 txDmaLen
= sizeof(txDescCache
.cmdsts
) +
2121 sizeof(txDescCache
.extsts
);
2122 txDmaFree
= dmaDescFree
;
2125 descDmaWrBytes
+= txDmaLen
;
2131 DPRINTF(EthernetSM
, "halting TX state machine\n");
2135 txState
= txAdvance
;
2141 DPRINTF(EthernetSM
, "this descriptor isn't done yet\n");
2142 if (!txFifo
.full()) {
2143 txState
= txFragRead
;
2146 * The number of bytes transferred is either whatever
2147 * is left in the descriptor (txDescCnt), or if there
2148 * is not enough room in the fifo, just whatever room
2149 * is left in the fifo
2151 txXferLen
= min
<uint32_t>(txDescCnt
, txFifo
.avail());
2153 txDmaAddr
= txFragPtr
& 0x3fffffff;
2154 txDmaData
= txPacketBufPtr
;
2155 txDmaLen
= txXferLen
;
2156 txDmaFree
= dmaDataFree
;
2161 txState
= txFifoBlock
;
2171 if (txDmaState
!= dmaIdle
)
2174 txPacketBufPtr
+= txXferLen
;
2175 txFragPtr
+= txXferLen
;
2176 txDescCnt
-= txXferLen
;
2177 txFifo
.reserve(txXferLen
);
2179 txState
= txFifoBlock
;
2183 if (txDmaState
!= dmaIdle
)
2186 if (txDescCache
.cmdsts
& CMDSTS_INTR
)
2187 devIntrPost(ISR_TXDESC
);
2189 txState
= txAdvance
;
2193 if (txDescCache
.link
== 0) {
2194 devIntrPost(ISR_TXIDLE
);
2198 txState
= txDescRead
;
2199 regs
.txdp
= txDescCache
.link
;
2202 txDmaAddr
= txDescCache
.link
& 0x3fffffff;
2203 txDmaData
= &txDescCache
;
2204 txDmaLen
= sizeof(ns_desc
);
2205 txDmaFree
= dmaDescFree
;
2213 panic("invalid state");
2216 DPRINTF(EthernetSM
, "entering next txState=%s\n",
2217 NsTxStateStrings
[txState
]);
2223 * @todo do we want to schedule a future kick?
2225 DPRINTF(EthernetSM
, "tx state machine exited txState=%s\n",
2226 NsTxStateStrings
[txState
]);
2230 NSGigE::transferDone()
2232 if (txFifo
.empty()) {
2233 DPRINTF(Ethernet
, "transfer complete: txFifo empty...nothing to do\n");
2237 DPRINTF(Ethernet
, "transfer complete: data in txFifo...schedule xmit\n");
2239 if (txEvent
.scheduled())
2240 txEvent
.reschedule(curTick
+ cycles(1));
2242 txEvent
.schedule(curTick
+ cycles(1));
2246 NSGigE::rxFilter(const PacketPtr
&packet
)
2248 EthPtr eth
= packet
;
2252 const EthAddr
&dst
= eth
->dst();
2253 if (dst
.unicast()) {
2254 // If we're accepting all unicast addresses
2258 // If we make a perfect match
2259 if (acceptPerfect
&& dst
== rom
.perfectMatch
)
2262 if (acceptArp
&& eth
->type() == ETH_TYPE_ARP
)
2265 } else if (dst
.broadcast()) {
2266 // if we're accepting broadcasts
2267 if (acceptBroadcast
)
2270 } else if (dst
.multicast()) {
2271 // if we're accepting all multicasts
2272 if (acceptMulticast
)
2278 DPRINTF(Ethernet
, "rxFilter drop\n");
2279 DDUMP(EthernetData
, packet
->data
, packet
->length
);
2286 NSGigE::recvPacket(PacketPtr packet
)
2288 rxBytes
+= packet
->length
;
2291 DPRINTF(Ethernet
, "Receiving packet from wire, rxFifoAvail=%d\n",
2295 DPRINTF(Ethernet
, "receive disabled...packet dropped\n");
2296 interface
->recvDone();
2300 if (rxFilterEnable
&& rxFilter(packet
)) {
2301 DPRINTF(Ethernet
, "packet filtered...dropped\n");
2302 interface
->recvDone();
2306 if (rxFifo
.avail() < packet
->length
) {
2312 "packet won't fit in receive buffer...pkt ID %d dropped\n",
2315 DPRINTF(Ethernet
, "Seq=%d\n", tcp
->seq());
2320 devIntrPost(ISR_RXORN
);
2324 rxFifo
.push(packet
);
2325 interface
->recvDone();
2331 //=====================================================================
2335 NSGigE::serialize(ostream
&os
)
2337 // Serialize the PciDev base class
2338 PciDev::serialize(os
);
2341 * Finalize any DMA events now.
2343 if (rxDmaReadEvent
.scheduled())
2345 if (rxDmaWriteEvent
.scheduled())
2347 if (txDmaReadEvent
.scheduled())
2349 if (txDmaWriteEvent
.scheduled())
2353 * Serialize the device registers
2355 SERIALIZE_SCALAR(regs
.command
);
2356 SERIALIZE_SCALAR(regs
.config
);
2357 SERIALIZE_SCALAR(regs
.mear
);
2358 SERIALIZE_SCALAR(regs
.ptscr
);
2359 SERIALIZE_SCALAR(regs
.isr
);
2360 SERIALIZE_SCALAR(regs
.imr
);
2361 SERIALIZE_SCALAR(regs
.ier
);
2362 SERIALIZE_SCALAR(regs
.ihr
);
2363 SERIALIZE_SCALAR(regs
.txdp
);
2364 SERIALIZE_SCALAR(regs
.txdp_hi
);
2365 SERIALIZE_SCALAR(regs
.txcfg
);
2366 SERIALIZE_SCALAR(regs
.gpior
);
2367 SERIALIZE_SCALAR(regs
.rxdp
);
2368 SERIALIZE_SCALAR(regs
.rxdp_hi
);
2369 SERIALIZE_SCALAR(regs
.rxcfg
);
2370 SERIALIZE_SCALAR(regs
.pqcr
);
2371 SERIALIZE_SCALAR(regs
.wcsr
);
2372 SERIALIZE_SCALAR(regs
.pcr
);
2373 SERIALIZE_SCALAR(regs
.rfcr
);
2374 SERIALIZE_SCALAR(regs
.rfdr
);
2375 SERIALIZE_SCALAR(regs
.srr
);
2376 SERIALIZE_SCALAR(regs
.mibc
);
2377 SERIALIZE_SCALAR(regs
.vrcr
);
2378 SERIALIZE_SCALAR(regs
.vtcr
);
2379 SERIALIZE_SCALAR(regs
.vdr
);
2380 SERIALIZE_SCALAR(regs
.ccsr
);
2381 SERIALIZE_SCALAR(regs
.tbicr
);
2382 SERIALIZE_SCALAR(regs
.tbisr
);
2383 SERIALIZE_SCALAR(regs
.tanar
);
2384 SERIALIZE_SCALAR(regs
.tanlpar
);
2385 SERIALIZE_SCALAR(regs
.taner
);
2386 SERIALIZE_SCALAR(regs
.tesr
);
2388 SERIALIZE_ARRAY(rom
.perfectMatch
, ETH_ADDR_LEN
);
2390 SERIALIZE_SCALAR(ioEnable
);
2393 * Serialize the data Fifos
2395 rxFifo
.serialize("rxFifo", os
);
2396 txFifo
.serialize("txFifo", os
);
2399 * Serialize the various helper variables
2401 bool txPacketExists
= txPacket
;
2402 SERIALIZE_SCALAR(txPacketExists
);
2403 if (txPacketExists
) {
2404 txPacket
->length
= txPacketBufPtr
- txPacket
->data
;
2405 txPacket
->serialize("txPacket", os
);
2406 uint32_t txPktBufPtr
= (uint32_t) (txPacketBufPtr
- txPacket
->data
);
2407 SERIALIZE_SCALAR(txPktBufPtr
);
2410 bool rxPacketExists
= rxPacket
;
2411 SERIALIZE_SCALAR(rxPacketExists
);
2412 if (rxPacketExists
) {
2413 rxPacket
->serialize("rxPacket", os
);
2414 uint32_t rxPktBufPtr
= (uint32_t) (rxPacketBufPtr
- rxPacket
->data
);
2415 SERIALIZE_SCALAR(rxPktBufPtr
);
2418 SERIALIZE_SCALAR(txXferLen
);
2419 SERIALIZE_SCALAR(rxXferLen
);
2422 * Serialize DescCaches
2424 SERIALIZE_SCALAR(txDescCache
.link
);
2425 SERIALIZE_SCALAR(txDescCache
.bufptr
);
2426 SERIALIZE_SCALAR(txDescCache
.cmdsts
);
2427 SERIALIZE_SCALAR(txDescCache
.extsts
);
2428 SERIALIZE_SCALAR(rxDescCache
.link
);
2429 SERIALIZE_SCALAR(rxDescCache
.bufptr
);
2430 SERIALIZE_SCALAR(rxDescCache
.cmdsts
);
2431 SERIALIZE_SCALAR(rxDescCache
.extsts
);
2434 * Serialize tx state machine
2436 int txState
= this->txState
;
2437 SERIALIZE_SCALAR(txState
);
2438 SERIALIZE_SCALAR(txEnable
);
2439 SERIALIZE_SCALAR(CTDD
);
2440 SERIALIZE_SCALAR(txFragPtr
);
2441 SERIALIZE_SCALAR(txDescCnt
);
2442 int txDmaState
= this->txDmaState
;
2443 SERIALIZE_SCALAR(txDmaState
);
2446 * Serialize rx state machine
2448 int rxState
= this->rxState
;
2449 SERIALIZE_SCALAR(rxState
);
2450 SERIALIZE_SCALAR(rxEnable
);
2451 SERIALIZE_SCALAR(CRDD
);
2452 SERIALIZE_SCALAR(rxPktBytes
);
2453 SERIALIZE_SCALAR(rxFragPtr
);
2454 SERIALIZE_SCALAR(rxDescCnt
);
2455 int rxDmaState
= this->rxDmaState
;
2456 SERIALIZE_SCALAR(rxDmaState
);
2458 SERIALIZE_SCALAR(extstsEnable
);
2461 * If there's a pending transmit, store the time so we can
2462 * reschedule it later
2464 Tick transmitTick
= txEvent
.scheduled() ? txEvent
.when() - curTick
: 0;
2465 SERIALIZE_SCALAR(transmitTick
);
2468 * receive address filter settings
2470 SERIALIZE_SCALAR(rxFilterEnable
);
2471 SERIALIZE_SCALAR(acceptBroadcast
);
2472 SERIALIZE_SCALAR(acceptMulticast
);
2473 SERIALIZE_SCALAR(acceptUnicast
);
2474 SERIALIZE_SCALAR(acceptPerfect
);
2475 SERIALIZE_SCALAR(acceptArp
);
2478 * Keep track of pending interrupt status.
2480 SERIALIZE_SCALAR(intrTick
);
2481 SERIALIZE_SCALAR(cpuPendingIntr
);
2482 Tick intrEventTick
= 0;
2484 intrEventTick
= intrEvent
->when();
2485 SERIALIZE_SCALAR(intrEventTick
);
2490 NSGigE::unserialize(Checkpoint
*cp
, const std::string
§ion
)
2492 // Unserialize the PciDev base class
2493 PciDev::unserialize(cp
, section
);
2495 UNSERIALIZE_SCALAR(regs
.command
);
2496 UNSERIALIZE_SCALAR(regs
.config
);
2497 UNSERIALIZE_SCALAR(regs
.mear
);
2498 UNSERIALIZE_SCALAR(regs
.ptscr
);
2499 UNSERIALIZE_SCALAR(regs
.isr
);
2500 UNSERIALIZE_SCALAR(regs
.imr
);
2501 UNSERIALIZE_SCALAR(regs
.ier
);
2502 UNSERIALIZE_SCALAR(regs
.ihr
);
2503 UNSERIALIZE_SCALAR(regs
.txdp
);
2504 UNSERIALIZE_SCALAR(regs
.txdp_hi
);
2505 UNSERIALIZE_SCALAR(regs
.txcfg
);
2506 UNSERIALIZE_SCALAR(regs
.gpior
);
2507 UNSERIALIZE_SCALAR(regs
.rxdp
);
2508 UNSERIALIZE_SCALAR(regs
.rxdp_hi
);
2509 UNSERIALIZE_SCALAR(regs
.rxcfg
);
2510 UNSERIALIZE_SCALAR(regs
.pqcr
);
2511 UNSERIALIZE_SCALAR(regs
.wcsr
);
2512 UNSERIALIZE_SCALAR(regs
.pcr
);
2513 UNSERIALIZE_SCALAR(regs
.rfcr
);
2514 UNSERIALIZE_SCALAR(regs
.rfdr
);
2515 UNSERIALIZE_SCALAR(regs
.srr
);
2516 UNSERIALIZE_SCALAR(regs
.mibc
);
2517 UNSERIALIZE_SCALAR(regs
.vrcr
);
2518 UNSERIALIZE_SCALAR(regs
.vtcr
);
2519 UNSERIALIZE_SCALAR(regs
.vdr
);
2520 UNSERIALIZE_SCALAR(regs
.ccsr
);
2521 UNSERIALIZE_SCALAR(regs
.tbicr
);
2522 UNSERIALIZE_SCALAR(regs
.tbisr
);
2523 UNSERIALIZE_SCALAR(regs
.tanar
);
2524 UNSERIALIZE_SCALAR(regs
.tanlpar
);
2525 UNSERIALIZE_SCALAR(regs
.taner
);
2526 UNSERIALIZE_SCALAR(regs
.tesr
);
2528 UNSERIALIZE_ARRAY(rom
.perfectMatch
, ETH_ADDR_LEN
);
2530 UNSERIALIZE_SCALAR(ioEnable
);
2533 * unserialize the data fifos
2535 rxFifo
.unserialize("rxFifo", cp
, section
);
2536 txFifo
.unserialize("txFifo", cp
, section
);
2539 * unserialize the various helper variables
2541 bool txPacketExists
;
2542 UNSERIALIZE_SCALAR(txPacketExists
);
2543 if (txPacketExists
) {
2544 txPacket
= new PacketData(16384);
2545 txPacket
->unserialize("txPacket", cp
, section
);
2546 uint32_t txPktBufPtr
;
2547 UNSERIALIZE_SCALAR(txPktBufPtr
);
2548 txPacketBufPtr
= (uint8_t *) txPacket
->data
+ txPktBufPtr
;
2552 bool rxPacketExists
;
2553 UNSERIALIZE_SCALAR(rxPacketExists
);
2555 if (rxPacketExists
) {
2556 rxPacket
= new PacketData(16384);
2557 rxPacket
->unserialize("rxPacket", cp
, section
);
2558 uint32_t rxPktBufPtr
;
2559 UNSERIALIZE_SCALAR(rxPktBufPtr
);
2560 rxPacketBufPtr
= (uint8_t *) rxPacket
->data
+ rxPktBufPtr
;
2564 UNSERIALIZE_SCALAR(txXferLen
);
2565 UNSERIALIZE_SCALAR(rxXferLen
);
2568 * Unserialize DescCaches
2570 UNSERIALIZE_SCALAR(txDescCache
.link
);
2571 UNSERIALIZE_SCALAR(txDescCache
.bufptr
);
2572 UNSERIALIZE_SCALAR(txDescCache
.cmdsts
);
2573 UNSERIALIZE_SCALAR(txDescCache
.extsts
);
2574 UNSERIALIZE_SCALAR(rxDescCache
.link
);
2575 UNSERIALIZE_SCALAR(rxDescCache
.bufptr
);
2576 UNSERIALIZE_SCALAR(rxDescCache
.cmdsts
);
2577 UNSERIALIZE_SCALAR(rxDescCache
.extsts
);
2580 * unserialize tx state machine
2583 UNSERIALIZE_SCALAR(txState
);
2584 this->txState
= (TxState
) txState
;
2585 UNSERIALIZE_SCALAR(txEnable
);
2586 UNSERIALIZE_SCALAR(CTDD
);
2587 UNSERIALIZE_SCALAR(txFragPtr
);
2588 UNSERIALIZE_SCALAR(txDescCnt
);
2590 UNSERIALIZE_SCALAR(txDmaState
);
2591 this->txDmaState
= (DmaState
) txDmaState
;
2594 * unserialize rx state machine
2597 UNSERIALIZE_SCALAR(rxState
);
2598 this->rxState
= (RxState
) rxState
;
2599 UNSERIALIZE_SCALAR(rxEnable
);
2600 UNSERIALIZE_SCALAR(CRDD
);
2601 UNSERIALIZE_SCALAR(rxPktBytes
);
2602 UNSERIALIZE_SCALAR(rxFragPtr
);
2603 UNSERIALIZE_SCALAR(rxDescCnt
);
2605 UNSERIALIZE_SCALAR(rxDmaState
);
2606 this->rxDmaState
= (DmaState
) rxDmaState
;
2608 UNSERIALIZE_SCALAR(extstsEnable
);
2611 * If there's a pending transmit, reschedule it now
2614 UNSERIALIZE_SCALAR(transmitTick
);
2616 txEvent
.schedule(curTick
+ transmitTick
);
2619 * unserialize receive address filter settings
2621 UNSERIALIZE_SCALAR(rxFilterEnable
);
2622 UNSERIALIZE_SCALAR(acceptBroadcast
);
2623 UNSERIALIZE_SCALAR(acceptMulticast
);
2624 UNSERIALIZE_SCALAR(acceptUnicast
);
2625 UNSERIALIZE_SCALAR(acceptPerfect
);
2626 UNSERIALIZE_SCALAR(acceptArp
);
2629 * Keep track of pending interrupt status.
2631 UNSERIALIZE_SCALAR(intrTick
);
2632 UNSERIALIZE_SCALAR(cpuPendingIntr
);
2634 UNSERIALIZE_SCALAR(intrEventTick
);
2635 if (intrEventTick
) {
2636 intrEvent
= new IntrEvent(this, true);
2637 intrEvent
->schedule(intrEventTick
);
2641 * re-add addrRanges to bus bridges
2644 pioInterface
->addAddrRange(RangeSize(BARAddrs
[0], BARSize
[0]));
2645 pioInterface
->addAddrRange(RangeSize(BARAddrs
[1], BARSize
[1]));
2650 NSGigE::cacheAccess(MemReqPtr
&req
)
2652 DPRINTF(EthernetPIO
, "timing access to paddr=%#x (daddr=%#x)\n",
2653 req
->paddr
, req
->paddr
- addr
);
2654 return curTick
+ pioLatency
;
2657 BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt
)
2659 SimObjectParam
<EtherInt
*> peer
;
2660 SimObjectParam
<NSGigE
*> device
;
2662 END_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt
)
2664 BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigEInt
)
2666 INIT_PARAM_DFLT(peer
, "peer interface", NULL
),
2667 INIT_PARAM(device
, "Ethernet device of this interface")
2669 END_INIT_SIM_OBJECT_PARAMS(NSGigEInt
)
2671 CREATE_SIM_OBJECT(NSGigEInt
)
2673 NSGigEInt
*dev_int
= new NSGigEInt(getInstanceName(), device
);
2675 EtherInt
*p
= (EtherInt
*)peer
;
2677 dev_int
->setPeer(p
);
2678 p
->setPeer(dev_int
);
2684 REGISTER_SIM_OBJECT("NSGigEInt", NSGigEInt
)
2687 BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigE
)
2691 Param
<Tick
> tx_delay
;
2692 Param
<Tick
> rx_delay
;
2693 Param
<Tick
> intr_delay
;
2694 SimObjectParam
<MemoryController
*> mmu
;
2695 SimObjectParam
<PhysicalMemory
*> physmem
;
2696 Param
<bool> rx_filter
;
2697 Param
<string
> hardware_address
;
2698 SimObjectParam
<Bus
*> io_bus
;
2699 SimObjectParam
<Bus
*> payload_bus
;
2700 SimObjectParam
<HierParams
*> hier
;
2701 Param
<Tick
> pio_latency
;
2702 Param
<bool> dma_desc_free
;
2703 Param
<bool> dma_data_free
;
2704 Param
<Tick
> dma_read_delay
;
2705 Param
<Tick
> dma_write_delay
;
2706 Param
<Tick
> dma_read_factor
;
2707 Param
<Tick
> dma_write_factor
;
2708 SimObjectParam
<PciConfigAll
*> configspace
;
2709 SimObjectParam
<PciConfigData
*> configdata
;
2710 SimObjectParam
<Platform
*> platform
;
2711 Param
<uint32_t> pci_bus
;
2712 Param
<uint32_t> pci_dev
;
2713 Param
<uint32_t> pci_func
;
2714 Param
<uint32_t> tx_fifo_size
;
2715 Param
<uint32_t> rx_fifo_size
;
2716 Param
<uint32_t> m5reg
;
2717 Param
<bool> dma_no_allocate
;
2719 END_DECLARE_SIM_OBJECT_PARAMS(NSGigE
)
2721 BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigE
)
2723 INIT_PARAM(addr
, "Device Address"),
2724 INIT_PARAM(clock
, "State machine processor frequency"),
2725 INIT_PARAM(tx_delay
, "Transmit Delay"),
2726 INIT_PARAM(rx_delay
, "Receive Delay"),
2727 INIT_PARAM(intr_delay
, "Interrupt Delay in microseconds"),
2728 INIT_PARAM(mmu
, "Memory Controller"),
2729 INIT_PARAM(physmem
, "Physical Memory"),
2730 INIT_PARAM_DFLT(rx_filter
, "Enable Receive Filter", true),
2731 INIT_PARAM_DFLT(hardware_address
, "Ethernet Hardware Address",
2732 "00:99:00:00:00:01"),
2733 INIT_PARAM_DFLT(io_bus
, "The IO Bus to attach to for headers", NULL
),
2734 INIT_PARAM_DFLT(payload_bus
, "The IO Bus to attach to for payload", NULL
),
2735 INIT_PARAM_DFLT(hier
, "Hierarchy global variables", &defaultHierParams
),
2736 INIT_PARAM_DFLT(pio_latency
, "Programmed IO latency in bus cycles", 1),
2737 INIT_PARAM_DFLT(dma_desc_free
, "DMA of Descriptors is free", false),
2738 INIT_PARAM_DFLT(dma_data_free
, "DMA of Data is free", false),
2739 INIT_PARAM_DFLT(dma_read_delay
, "fixed delay for dma reads", 0),
2740 INIT_PARAM_DFLT(dma_write_delay
, "fixed delay for dma writes", 0),
2741 INIT_PARAM_DFLT(dma_read_factor
, "multiplier for dma reads", 0),
2742 INIT_PARAM_DFLT(dma_write_factor
, "multiplier for dma writes", 0),
2743 INIT_PARAM(configspace
, "PCI Configspace"),
2744 INIT_PARAM(configdata
, "PCI Config data"),
2745 INIT_PARAM(platform
, "Platform"),
2746 INIT_PARAM(pci_bus
, "PCI bus"),
2747 INIT_PARAM(pci_dev
, "PCI device number"),
2748 INIT_PARAM(pci_func
, "PCI function code"),
2749 INIT_PARAM_DFLT(tx_fifo_size
, "max size in bytes of txFifo", 131072),
2750 INIT_PARAM_DFLT(rx_fifo_size
, "max size in bytes of rxFifo", 131072),
2751 INIT_PARAM(m5reg
, "m5 register"),
2752 INIT_PARAM_DFLT(dma_no_allocate
, "Should DMA reads allocate cache lines", true)
2754 END_INIT_SIM_OBJECT_PARAMS(NSGigE
)
2757 CREATE_SIM_OBJECT(NSGigE
)
2759 NSGigE::Params
*params
= new NSGigE::Params
;
2761 params
->name
= getInstanceName();
2763 params
->configSpace
= configspace
;
2764 params
->configData
= configdata
;
2765 params
->plat
= platform
;
2766 params
->busNum
= pci_bus
;
2767 params
->deviceNum
= pci_dev
;
2768 params
->functionNum
= pci_func
;
2770 params
->clock
= clock
;
2771 params
->intr_delay
= intr_delay
;
2772 params
->pmem
= physmem
;
2773 params
->tx_delay
= tx_delay
;
2774 params
->rx_delay
= rx_delay
;
2775 params
->hier
= hier
;
2776 params
->header_bus
= io_bus
;
2777 params
->payload_bus
= payload_bus
;
2778 params
->pio_latency
= pio_latency
;
2779 params
->dma_desc_free
= dma_desc_free
;
2780 params
->dma_data_free
= dma_data_free
;
2781 params
->dma_read_delay
= dma_read_delay
;
2782 params
->dma_write_delay
= dma_write_delay
;
2783 params
->dma_read_factor
= dma_read_factor
;
2784 params
->dma_write_factor
= dma_write_factor
;
2785 params
->rx_filter
= rx_filter
;
2786 params
->eaddr
= hardware_address
;
2787 params
->tx_fifo_size
= tx_fifo_size
;
2788 params
->rx_fifo_size
= rx_fifo_size
;
2789 params
->m5reg
= m5reg
;
2790 params
->dma_no_allocate
= dma_no_allocate
;
2791 return new NSGigE(params
);
2794 REGISTER_SIM_OBJECT("NSGigE", NSGigE
)