2 * Copyright (c) 2004 The Regents of The University of Michigan
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 * Device module for modelling the National Semiconductor
31 * DP83820 ethernet controller. Does not support priority queueing
37 #include "base/inet.hh"
38 #include "cpu/exec_context.hh"
40 #include "dev/etherlink.hh"
41 #include "dev/ns_gige.hh"
42 #include "dev/pciconfigall.hh"
43 #include "mem/bus/bus.hh"
44 #include "mem/bus/dma_interface.hh"
45 #include "mem/bus/pio_interface.hh"
46 #include "mem/bus/pio_interface_impl.hh"
47 #include "mem/functional_mem/memory_control.hh"
48 #include "mem/functional_mem/physical_memory.hh"
49 #include "sim/builder.hh"
50 #include "sim/debug.hh"
51 #include "sim/host.hh"
52 #include "sim/stats.hh"
53 #include "targetarch/vtophys.hh"
55 const char *NsRxStateStrings
[] =
66 const char *NsTxStateStrings
[] =
77 const char *NsDmaState
[] =
89 ///////////////////////////////////////////////////////////////////////
93 NSGigE::NSGigE(Params
*p
)
94 : PciDev(p
), ioEnable(false),
95 txFifo(p
->tx_fifo_size
), rxFifo(p
->rx_fifo_size
),
96 txPacket(0), rxPacket(0), txPacketBufPtr(NULL
), rxPacketBufPtr(NULL
),
97 txXferLen(0), rxXferLen(0), clock(p
->clock
),
98 txState(txIdle
), txEnable(false), CTDD(false),
99 txFragPtr(0), txDescCnt(0), txDmaState(dmaIdle
), rxState(rxIdle
),
100 rxEnable(false), CRDD(false), rxPktBytes(0),
101 rxFragPtr(0), rxDescCnt(0), rxDmaState(dmaIdle
), extstsEnable(false),
102 rxDmaReadEvent(this), rxDmaWriteEvent(this),
103 txDmaReadEvent(this), txDmaWriteEvent(this),
104 dmaDescFree(p
->dma_desc_free
), dmaDataFree(p
->dma_data_free
),
105 txDelay(p
->tx_delay
), rxDelay(p
->rx_delay
),
106 rxKickTick(0), txKickTick(0),
107 txEvent(this), rxFilterEnable(p
->rx_filter
), acceptBroadcast(false),
108 acceptMulticast(false), acceptUnicast(false),
109 acceptPerfect(false), acceptArp(false),
110 physmem(p
->pmem
), intrTick(0), cpuPendingIntr(false),
111 intrEvent(0), interface(0)
114 pioInterface
= newPioInterface(name(), p
->hier
,
116 &NSGigE::cacheAccess
);
118 pioLatency
= p
->pio_latency
* p
->header_bus
->clockRate
;
121 dmaInterface
= new DMAInterface
<Bus
>(name() + ".dma",
126 dmaInterface
= new DMAInterface
<Bus
>(name() + ".dma",
130 } else if (p
->payload_bus
) {
131 pioInterface
= newPioInterface(name(), p
->hier
,
132 p
->payload_bus
, this,
133 &NSGigE::cacheAccess
);
135 pioLatency
= p
->pio_latency
* p
->payload_bus
->clockRate
;
137 dmaInterface
= new DMAInterface
<Bus
>(name() + ".dma",
144 intrDelay
= p
->intr_delay
;
145 dmaReadDelay
= p
->dma_read_delay
;
146 dmaWriteDelay
= p
->dma_write_delay
;
147 dmaReadFactor
= p
->dma_read_factor
;
148 dmaWriteFactor
= p
->dma_write_factor
;
151 memcpy(&rom
.perfectMatch
, p
->eaddr
.bytes(), ETH_ADDR_LEN
);
161 .name(name() + ".txBytes")
162 .desc("Bytes Transmitted")
167 .name(name() + ".rxBytes")
168 .desc("Bytes Received")
173 .name(name() + ".txPackets")
174 .desc("Number of Packets Transmitted")
179 .name(name() + ".rxPackets")
180 .desc("Number of Packets Received")
185 .name(name() + ".txIpChecksums")
186 .desc("Number of tx IP Checksums done by device")
192 .name(name() + ".rxIpChecksums")
193 .desc("Number of rx IP Checksums done by device")
199 .name(name() + ".txTcpChecksums")
200 .desc("Number of tx TCP Checksums done by device")
206 .name(name() + ".rxTcpChecksums")
207 .desc("Number of rx TCP Checksums done by device")
213 .name(name() + ".txUdpChecksums")
214 .desc("Number of tx UDP Checksums done by device")
220 .name(name() + ".rxUdpChecksums")
221 .desc("Number of rx UDP Checksums done by device")
227 .name(name() + ".descDMAReads")
228 .desc("Number of descriptors the device read w/ DMA")
233 .name(name() + ".descDMAWrites")
234 .desc("Number of descriptors the device wrote w/ DMA")
239 .name(name() + ".descDmaReadBytes")
240 .desc("number of descriptor bytes read w/ DMA")
245 .name(name() + ".descDmaWriteBytes")
246 .desc("number of descriptor bytes write w/ DMA")
251 .name(name() + ".txBandwidth")
252 .desc("Transmit Bandwidth (bits/s)")
258 .name(name() + ".rxBandwidth")
259 .desc("Receive Bandwidth (bits/s)")
265 .name(name() + ".totBandwidth")
266 .desc("Total Bandwidth (bits/s)")
272 .name(name() + ".totPackets")
273 .desc("Total Packets")
279 .name(name() + ".totBytes")
286 .name(name() + ".totPPS")
287 .desc("Total Tranmission Rate (packets/s)")
293 .name(name() + ".txPPS")
294 .desc("Packet Tranmission Rate (packets/s)")
300 .name(name() + ".rxPPS")
301 .desc("Packet Reception Rate (packets/s)")
307 .name(name() + ".postedSwi")
308 .desc("number of software interrupts posted to CPU")
313 .name(name() + ".totalSwi")
314 .desc("number of total Swi written to ISR")
319 .name(name() + ".coalescedSwi")
320 .desc("average number of Swi's coalesced into each post")
325 .name(name() + ".postedRxIdle")
326 .desc("number of rxIdle interrupts posted to CPU")
331 .name(name() + ".totalRxIdle")
332 .desc("number of total RxIdle written to ISR")
337 .name(name() + ".coalescedRxIdle")
338 .desc("average number of RxIdle's coalesced into each post")
343 .name(name() + ".postedRxOk")
344 .desc("number of RxOk interrupts posted to CPU")
349 .name(name() + ".totalRxOk")
350 .desc("number of total RxOk written to ISR")
355 .name(name() + ".coalescedRxOk")
356 .desc("average number of RxOk's coalesced into each post")
361 .name(name() + ".postedRxDesc")
362 .desc("number of RxDesc interrupts posted to CPU")
367 .name(name() + ".totalRxDesc")
368 .desc("number of total RxDesc written to ISR")
373 .name(name() + ".coalescedRxDesc")
374 .desc("average number of RxDesc's coalesced into each post")
379 .name(name() + ".postedTxOk")
380 .desc("number of TxOk interrupts posted to CPU")
385 .name(name() + ".totalTxOk")
386 .desc("number of total TxOk written to ISR")
391 .name(name() + ".coalescedTxOk")
392 .desc("average number of TxOk's coalesced into each post")
397 .name(name() + ".postedTxIdle")
398 .desc("number of TxIdle interrupts posted to CPU")
403 .name(name() + ".totalTxIdle")
404 .desc("number of total TxIdle written to ISR")
409 .name(name() + ".coalescedTxIdle")
410 .desc("average number of TxIdle's coalesced into each post")
415 .name(name() + ".postedTxDesc")
416 .desc("number of TxDesc interrupts posted to CPU")
421 .name(name() + ".totalTxDesc")
422 .desc("number of total TxDesc written to ISR")
427 .name(name() + ".coalescedTxDesc")
428 .desc("average number of TxDesc's coalesced into each post")
433 .name(name() + ".postedRxOrn")
434 .desc("number of RxOrn posted to CPU")
439 .name(name() + ".totalRxOrn")
440 .desc("number of total RxOrn written to ISR")
445 .name(name() + ".coalescedRxOrn")
446 .desc("average number of RxOrn's coalesced into each post")
451 .name(name() + ".coalescedTotal")
452 .desc("average number of interrupts coalesced into each post")
457 .name(name() + ".postedInterrupts")
458 .desc("number of posts to CPU")
463 .name(name() + ".droppedPackets")
464 .desc("number of packets dropped")
468 coalescedSwi
= totalSwi
/ postedInterrupts
;
469 coalescedRxIdle
= totalRxIdle
/ postedInterrupts
;
470 coalescedRxOk
= totalRxOk
/ postedInterrupts
;
471 coalescedRxDesc
= totalRxDesc
/ postedInterrupts
;
472 coalescedTxOk
= totalTxOk
/ postedInterrupts
;
473 coalescedTxIdle
= totalTxIdle
/ postedInterrupts
;
474 coalescedTxDesc
= totalTxDesc
/ postedInterrupts
;
475 coalescedRxOrn
= totalRxOrn
/ postedInterrupts
;
477 coalescedTotal
= (totalSwi
+ totalRxIdle
+ totalRxOk
+ totalRxDesc
+ totalTxOk
478 + totalTxIdle
+ totalTxDesc
+ totalRxOrn
) / postedInterrupts
;
480 txBandwidth
= txBytes
* Stats::constant(8) / simSeconds
;
481 rxBandwidth
= rxBytes
* Stats::constant(8) / simSeconds
;
482 totBandwidth
= txBandwidth
+ rxBandwidth
;
483 totBytes
= txBytes
+ rxBytes
;
484 totPackets
= txPackets
+ rxPackets
;
486 txPacketRate
= txPackets
/ simSeconds
;
487 rxPacketRate
= rxPackets
/ simSeconds
;
491 * This is to read the PCI general configuration registers
494 NSGigE::ReadConfig(int offset
, int size
, uint8_t *data
)
496 if (offset
< PCI_DEVICE_SPECIFIC
)
497 PciDev::ReadConfig(offset
, size
, data
);
499 panic("Device specific PCI config space not implemented!\n");
503 * This is to write to the PCI general configuration registers
506 NSGigE::WriteConfig(int offset
, int size
, uint32_t data
)
508 if (offset
< PCI_DEVICE_SPECIFIC
)
509 PciDev::WriteConfig(offset
, size
, data
);
511 panic("Device specific PCI config space not implemented!\n");
513 // Need to catch writes to BARs to update the PIO interface
515 // seems to work fine without all these PCI settings, but i
516 // put in the IO to double check, an assertion will fail if we
517 // need to properly implement it
519 if (config
.data
[offset
] & PCI_CMD_IOSE
)
525 if (config
.data
[offset
] & PCI_CMD_BME
) {
532 if (config
.data
[offset
] & PCI_CMD_MSE
) {
541 case PCI0_BASE_ADDR0
:
542 if (BARAddrs
[0] != 0) {
544 pioInterface
->addAddrRange(RangeSize(BARAddrs
[0], BARSize
[0]));
546 BARAddrs
[0] &= EV5::PAddrUncachedMask
;
549 case PCI0_BASE_ADDR1
:
550 if (BARAddrs
[1] != 0) {
552 pioInterface
->addAddrRange(RangeSize(BARAddrs
[1], BARSize
[1]));
554 BARAddrs
[1] &= EV5::PAddrUncachedMask
;
561 * This reads the device registers, which are detailed in the NS83820
565 NSGigE::read(MemReqPtr
&req
, uint8_t *data
)
569 //The mask is to give you only the offset into the device register file
570 Addr daddr
= req
->paddr
& 0xfff;
571 DPRINTF(EthernetPIO
, "read da=%#x pa=%#x va=%#x size=%d\n",
572 daddr
, req
->paddr
, req
->vaddr
, req
->size
);
575 // there are some reserved registers, you can see ns_gige_reg.h and
576 // the spec sheet for details
577 if (daddr
> LAST
&& daddr
<= RESERVED
) {
578 panic("Accessing reserved register");
579 } else if (daddr
> RESERVED
&& daddr
<= 0x3FC) {
580 ReadConfig(daddr
& 0xff, req
->size
, data
);
582 } else if (daddr
>= MIB_START
&& daddr
<= MIB_END
) {
583 // don't implement all the MIB's. hopefully the kernel
584 // doesn't actually DEPEND upon their values
585 // MIB are just hardware stats keepers
586 uint32_t ®
= *(uint32_t *) data
;
589 } else if (daddr
> 0x3FC)
590 panic("Something is messed up!\n");
593 case sizeof(uint32_t):
595 uint32_t ®
= *(uint32_t *)data
;
600 //these are supposed to be cleared on a read
601 reg
&= ~(CR_RXD
| CR_TXD
| CR_TXR
| CR_RXR
);
618 devIntrClear(ISR_ALL
);
673 // see the spec sheet for how RFCR and RFDR work
674 // basically, you write to RFCR to tell the machine
675 // what you want to do next, then you act upon RFDR,
676 // and the device will be prepared b/c of what you
683 switch (regs
.rfcr
& RFCR_RFADDR
) {
685 reg
= rom
.perfectMatch
[1];
687 reg
+= rom
.perfectMatch
[0];
690 reg
= rom
.perfectMatch
[3] << 8;
691 reg
+= rom
.perfectMatch
[2];
694 reg
= rom
.perfectMatch
[5] << 8;
695 reg
+= rom
.perfectMatch
[4];
698 panic("reading RFDR for something other than PMATCH!\n");
699 // didn't implement other RFDR functionality b/c
700 // driver didn't use it
710 reg
&= ~(MIBC_MIBS
| MIBC_ACLR
);
754 reg
= params()->m5reg
;
758 panic("reading unimplemented register: addr=%#x", daddr
);
761 DPRINTF(EthernetPIO
, "read from %#x: data=%d data=%#x\n",
767 panic("accessing register with invalid size: addr=%#x, size=%d",
775 NSGigE::write(MemReqPtr
&req
, const uint8_t *data
)
779 Addr daddr
= req
->paddr
& 0xfff;
780 DPRINTF(EthernetPIO
, "write da=%#x pa=%#x va=%#x size=%d\n",
781 daddr
, req
->paddr
, req
->vaddr
, req
->size
);
783 if (daddr
> LAST
&& daddr
<= RESERVED
) {
784 panic("Accessing reserved register");
785 } else if (daddr
> RESERVED
&& daddr
<= 0x3FC) {
786 WriteConfig(daddr
& 0xff, req
->size
, *(uint32_t *)data
);
788 } else if (daddr
> 0x3FC)
789 panic("Something is messed up!\n");
791 if (req
->size
== sizeof(uint32_t)) {
792 uint32_t reg
= *(uint32_t *)data
;
793 DPRINTF(EthernetPIO
, "write data=%d data=%#x\n", reg
, reg
);
800 } else if (reg
& CR_TXE
) {
803 // the kernel is enabling the transmit machine
804 if (txState
== txIdle
)
810 } else if (reg
& CR_RXE
) {
813 if (rxState
== rxIdle
)
824 devIntrPost(ISR_SWI
);
835 if (reg
& CFGR_LNKSTS
||
838 reg
& CFGR_RESERVED
||
839 reg
& CFGR_T64ADDR
||
840 reg
& CFGR_PCI64_DET
)
841 panic("writing to read-only or reserved CFGR bits!\n");
843 regs
.config
|= reg
& ~(CFGR_LNKSTS
| CFGR_SPDSTS
| CFGR_DUPSTS
|
844 CFGR_RESERVED
| CFGR_T64ADDR
| CFGR_PCI64_DET
);
846 // all these #if 0's are because i don't THINK the kernel needs to
847 // have these implemented. if there is a problem relating to one of
848 // these, you may need to add functionality in.
850 if (reg
& CFGR_TBI_EN
) ;
851 if (reg
& CFGR_MODE_1000
) ;
854 if (reg
& CFGR_AUTO_1000
)
855 panic("CFGR_AUTO_1000 not implemented!\n");
858 if (reg
& CFGR_PINT_DUPSTS
||
859 reg
& CFGR_PINT_LNKSTS
||
860 reg
& CFGR_PINT_SPDSTS
)
863 if (reg
& CFGR_TMRTEST
) ;
864 if (reg
& CFGR_MRM_DIS
) ;
865 if (reg
& CFGR_MWI_DIS
) ;
867 if (reg
& CFGR_T64ADDR
)
868 panic("CFGR_T64ADDR is read only register!\n");
870 if (reg
& CFGR_PCI64_DET
)
871 panic("CFGR_PCI64_DET is read only register!\n");
873 if (reg
& CFGR_DATA64_EN
) ;
874 if (reg
& CFGR_M64ADDR
) ;
875 if (reg
& CFGR_PHY_RST
) ;
876 if (reg
& CFGR_PHY_DIS
) ;
879 if (reg
& CFGR_EXTSTS_EN
)
882 extstsEnable
= false;
885 if (reg
& CFGR_REQALG
) ;
887 if (reg
& CFGR_POW
) ;
888 if (reg
& CFGR_EXD
) ;
889 if (reg
& CFGR_PESEL
) ;
890 if (reg
& CFGR_BROM_DIS
) ;
891 if (reg
& CFGR_EXT_125
) ;
892 if (reg
& CFGR_BEM
) ;
898 // since phy is completely faked, MEAR_MD* don't matter
899 // and since the driver never uses MEAR_EE*, they don't
902 if (reg
& MEAR_EEDI
) ;
903 if (reg
& MEAR_EEDO
) ; // this one is read only
904 if (reg
& MEAR_EECLK
) ;
905 if (reg
& MEAR_EESEL
) ;
906 if (reg
& MEAR_MDIO
) ;
907 if (reg
& MEAR_MDDIR
) ;
908 if (reg
& MEAR_MDC
) ;
913 regs
.ptscr
= reg
& ~(PTSCR_RBIST_RDONLY
);
914 // these control BISTs for various parts of chip - we
915 // don't care or do just fake that the BIST is done
916 if (reg
& PTSCR_RBIST_EN
)
917 regs
.ptscr
|= PTSCR_RBIST_DONE
;
918 if (reg
& PTSCR_EEBIST_EN
)
919 regs
.ptscr
&= ~PTSCR_EEBIST_EN
;
920 if (reg
& PTSCR_EELOAD_EN
)
921 regs
.ptscr
&= ~PTSCR_EELOAD_EN
;
924 case ISR
: /* writing to the ISR has no effect */
925 panic("ISR is a read only register!\n");
938 /* not going to implement real interrupt holdoff */
942 regs
.txdp
= (reg
& 0xFFFFFFFC);
943 assert(txState
== txIdle
);
954 if (reg
& TX_CFG_CSI
) ;
955 if (reg
& TX_CFG_HBI
) ;
956 if (reg
& TX_CFG_MLB
) ;
957 if (reg
& TX_CFG_ATP
) ;
958 if (reg
& TX_CFG_ECRETRY
) {
960 * this could easily be implemented, but considering
961 * the network is just a fake pipe, wouldn't make
966 if (reg
& TX_CFG_BRST_DIS
) ;
970 /* we handle our own DMA, ignore the kernel's exhortations */
971 if (reg
& TX_CFG_MXDMA
) ;
974 // also, we currently don't care about fill/drain
975 // thresholds though this may change in the future with
976 // more realistic networks or a driver which changes it
977 // according to feedback
983 /* these just control general purpose i/o pins, don't matter */
998 if (reg
& RX_CFG_AEP
) ;
999 if (reg
& RX_CFG_ARP
) ;
1000 if (reg
& RX_CFG_STRIPCRC
) ;
1001 if (reg
& RX_CFG_RX_RD
) ;
1002 if (reg
& RX_CFG_ALP
) ;
1003 if (reg
& RX_CFG_AIRL
) ;
1005 /* we handle our own DMA, ignore what kernel says about it */
1006 if (reg
& RX_CFG_MXDMA
) ;
1008 //also, we currently don't care about fill/drain thresholds
1009 //though this may change in the future with more realistic
1010 //networks or a driver which changes it according to feedback
1011 if (reg
& (RX_CFG_DRTH
| RX_CFG_DRTH0
)) ;
1016 /* there is no priority queueing used in the linux 2.6 driver */
1021 /* not going to implement wake on LAN */
1026 /* not going to implement pause control */
1033 rxFilterEnable
= (reg
& RFCR_RFEN
) ? true : false;
1034 acceptBroadcast
= (reg
& RFCR_AAB
) ? true : false;
1035 acceptMulticast
= (reg
& RFCR_AAM
) ? true : false;
1036 acceptUnicast
= (reg
& RFCR_AAU
) ? true : false;
1037 acceptPerfect
= (reg
& RFCR_APM
) ? true : false;
1038 acceptArp
= (reg
& RFCR_AARP
) ? true : false;
1041 if (reg
& RFCR_APAT
)
1042 panic("RFCR_APAT not implemented!\n");
1045 if (reg
& RFCR_MHEN
|| reg
& RFCR_UHEN
)
1046 panic("hash filtering not implemented!\n");
1049 panic("RFCR_ULM not implemented!\n");
1054 panic("the driver never writes to RFDR, something is wrong!\n");
1057 panic("the driver never uses BRAR, something is wrong!\n");
1060 panic("the driver never uses BRDR, something is wrong!\n");
1063 panic("SRR is read only register!\n");
1066 panic("the driver never uses MIBC, something is wrong!\n");
1077 panic("the driver never uses VDR, something is wrong!\n");
1081 /* not going to implement clockrun stuff */
1087 if (reg
& TBICR_MR_LOOPBACK
)
1088 panic("TBICR_MR_LOOPBACK never used, something wrong!\n");
1090 if (reg
& TBICR_MR_AN_ENABLE
) {
1091 regs
.tanlpar
= regs
.tanar
;
1092 regs
.tbisr
|= (TBISR_MR_AN_COMPLETE
| TBISR_MR_LINK_STATUS
);
1096 if (reg
& TBICR_MR_RESTART_AN
) ;
1102 panic("TBISR is read only register!\n");
1106 if (reg
& TANAR_PS2
)
1107 panic("this isn't used in driver, something wrong!\n");
1109 if (reg
& TANAR_PS1
)
1110 panic("this isn't used in driver, something wrong!\n");
1114 panic("this should only be written to by the fake phy!\n");
1117 panic("TANER is read only register!\n");
1124 panic("invalid register access daddr=%#x", daddr
);
1127 panic("Invalid Request Size");
1134 NSGigE::devIntrPost(uint32_t interrupts
)
1136 if (interrupts
& ISR_RESERVE
)
1137 panic("Cannot set a reserved interrupt");
1139 if (interrupts
& ISR_NOIMPL
)
1140 warn("interrupt not implemented %#x\n", interrupts
);
1142 interrupts
&= ~ISR_NOIMPL
;
1143 regs
.isr
|= interrupts
;
1145 if (interrupts
& regs
.imr
) {
1146 if (interrupts
& ISR_SWI
) {
1149 if (interrupts
& ISR_RXIDLE
) {
1152 if (interrupts
& ISR_RXOK
) {
1155 if (interrupts
& ISR_RXDESC
) {
1158 if (interrupts
& ISR_TXOK
) {
1161 if (interrupts
& ISR_TXIDLE
) {
1164 if (interrupts
& ISR_TXDESC
) {
1167 if (interrupts
& ISR_RXORN
) {
1172 DPRINTF(EthernetIntr
,
1173 "interrupt written to ISR: intr=%#x isr=%#x imr=%#x\n",
1174 interrupts
, regs
.isr
, regs
.imr
);
1176 if ((regs
.isr
& regs
.imr
)) {
1177 Tick when
= curTick
;
1178 if (!(regs
.isr
& regs
.imr
& ISR_NODELAY
))
1184 /* writing this interrupt counting stats inside this means that this function
1185 is now limited to being used to clear all interrupts upon the kernel
1186 reading isr and servicing. just telling you in case you were thinking
1190 NSGigE::devIntrClear(uint32_t interrupts
)
1192 if (interrupts
& ISR_RESERVE
)
1193 panic("Cannot clear a reserved interrupt");
1195 if (regs
.isr
& regs
.imr
& ISR_SWI
) {
1198 if (regs
.isr
& regs
.imr
& ISR_RXIDLE
) {
1201 if (regs
.isr
& regs
.imr
& ISR_RXOK
) {
1204 if (regs
.isr
& regs
.imr
& ISR_RXDESC
) {
1207 if (regs
.isr
& regs
.imr
& ISR_TXOK
) {
1210 if (regs
.isr
& regs
.imr
& ISR_TXIDLE
) {
1213 if (regs
.isr
& regs
.imr
& ISR_TXDESC
) {
1216 if (regs
.isr
& regs
.imr
& ISR_RXORN
) {
1220 if (regs
.isr
& regs
.imr
& (ISR_SWI
| ISR_RXIDLE
| ISR_RXOK
| ISR_RXDESC
|
1221 ISR_TXOK
| ISR_TXIDLE
| ISR_TXDESC
| ISR_RXORN
) )
1224 interrupts
&= ~ISR_NOIMPL
;
1225 regs
.isr
&= ~interrupts
;
1227 DPRINTF(EthernetIntr
,
1228 "interrupt cleared from ISR: intr=%x isr=%x imr=%x\n",
1229 interrupts
, regs
.isr
, regs
.imr
);
1231 if (!(regs
.isr
& regs
.imr
))
1236 NSGigE::devIntrChangeMask()
1238 DPRINTF(EthernetIntr
, "interrupt mask changed: isr=%x imr=%x masked=%x\n",
1239 regs
.isr
, regs
.imr
, regs
.isr
& regs
.imr
);
1241 if (regs
.isr
& regs
.imr
)
1242 cpuIntrPost(curTick
);
1248 NSGigE::cpuIntrPost(Tick when
)
1250 // If the interrupt you want to post is later than an interrupt
1251 // already scheduled, just let it post in the coming one and don't
1252 // schedule another.
1253 // HOWEVER, must be sure that the scheduled intrTick is in the
1254 // future (this was formerly the source of a bug)
1256 * @todo this warning should be removed and the intrTick code should
1259 assert(when
>= curTick
);
1260 assert(intrTick
>= curTick
|| intrTick
== 0);
1261 if (when
> intrTick
&& intrTick
!= 0) {
1262 DPRINTF(EthernetIntr
, "don't need to schedule event...intrTick=%d\n",
1268 if (intrTick
< curTick
) {
1273 DPRINTF(EthernetIntr
, "going to schedule an interrupt for intrTick=%d\n",
1277 intrEvent
->squash();
1278 intrEvent
= new IntrEvent(this, true);
1279 intrEvent
->schedule(intrTick
);
1283 NSGigE::cpuInterrupt()
1285 assert(intrTick
== curTick
);
1287 // Whether or not there's a pending interrupt, we don't care about
1292 // Don't send an interrupt if there's already one
1293 if (cpuPendingIntr
) {
1294 DPRINTF(EthernetIntr
,
1295 "would send an interrupt now, but there's already pending\n");
1298 cpuPendingIntr
= true;
1300 DPRINTF(EthernetIntr
, "posting interrupt\n");
1306 NSGigE::cpuIntrClear()
1308 if (!cpuPendingIntr
)
1312 intrEvent
->squash();
1318 cpuPendingIntr
= false;
1320 DPRINTF(EthernetIntr
, "clearing interrupt\n");
1325 NSGigE::cpuIntrPending() const
1326 { return cpuPendingIntr
; }
1332 DPRINTF(Ethernet
, "transmit reset\n");
1337 assert(txDescCnt
== 0);
1340 assert(txDmaState
== dmaIdle
);
1346 DPRINTF(Ethernet
, "receive reset\n");
1349 assert(rxPktBytes
== 0);
1352 assert(rxDescCnt
== 0);
1353 assert(rxDmaState
== dmaIdle
);
1361 memset(®s
, 0, sizeof(regs
));
1362 regs
.config
= CFGR_LNKSTS
;
1364 regs
.txcfg
= 0x120; // set drain threshold to 1024 bytes and
1365 // fill threshold to 32 bytes
1366 regs
.rxcfg
= 0x4; // set drain threshold to 16 bytes
1367 regs
.srr
= 0x0103; // set the silicon revision to rev B or 0x103
1368 regs
.mibc
= MIBC_FRZ
;
1369 regs
.vdr
= 0x81; // set the vlan tag type to 802.1q
1370 regs
.tesr
= 0xc000; // TBI capable of both full and half duplex
1372 extstsEnable
= false;
1373 acceptBroadcast
= false;
1374 acceptMulticast
= false;
1375 acceptUnicast
= false;
1376 acceptPerfect
= false;
1381 NSGigE::rxDmaReadCopy()
1383 assert(rxDmaState
== dmaReading
);
1385 physmem
->dma_read((uint8_t *)rxDmaData
, rxDmaAddr
, rxDmaLen
);
1386 rxDmaState
= dmaIdle
;
1388 DPRINTF(EthernetDMA
, "rx dma read paddr=%#x len=%d\n",
1389 rxDmaAddr
, rxDmaLen
);
1390 DDUMP(EthernetDMA
, rxDmaData
, rxDmaLen
);
1394 NSGigE::doRxDmaRead()
1396 assert(rxDmaState
== dmaIdle
|| rxDmaState
== dmaReadWaiting
);
1397 rxDmaState
= dmaReading
;
1399 if (dmaInterface
&& !rxDmaFree
) {
1400 if (dmaInterface
->busy())
1401 rxDmaState
= dmaReadWaiting
;
1403 dmaInterface
->doDMA(Read
, rxDmaAddr
, rxDmaLen
, curTick
,
1404 &rxDmaReadEvent
, true);
1408 if (dmaReadDelay
== 0 && dmaReadFactor
== 0) {
1413 Tick factor
= ((rxDmaLen
+ ULL(63)) >> ULL(6)) * dmaReadFactor
;
1414 Tick start
= curTick
+ dmaReadDelay
+ factor
;
1415 rxDmaReadEvent
.schedule(start
);
1420 NSGigE::rxDmaReadDone()
1422 assert(rxDmaState
== dmaReading
);
1425 // If the transmit state machine has a pending DMA, let it go first
1426 if (txDmaState
== dmaReadWaiting
|| txDmaState
== dmaWriteWaiting
)
1433 NSGigE::rxDmaWriteCopy()
1435 assert(rxDmaState
== dmaWriting
);
1437 physmem
->dma_write(rxDmaAddr
, (uint8_t *)rxDmaData
, rxDmaLen
);
1438 rxDmaState
= dmaIdle
;
1440 DPRINTF(EthernetDMA
, "rx dma write paddr=%#x len=%d\n",
1441 rxDmaAddr
, rxDmaLen
);
1442 DDUMP(EthernetDMA
, rxDmaData
, rxDmaLen
);
1446 NSGigE::doRxDmaWrite()
1448 assert(rxDmaState
== dmaIdle
|| rxDmaState
== dmaWriteWaiting
);
1449 rxDmaState
= dmaWriting
;
1451 if (dmaInterface
&& !rxDmaFree
) {
1452 if (dmaInterface
->busy())
1453 rxDmaState
= dmaWriteWaiting
;
1455 dmaInterface
->doDMA(WriteInvalidate
, rxDmaAddr
, rxDmaLen
, curTick
,
1456 &rxDmaWriteEvent
, true);
1460 if (dmaWriteDelay
== 0 && dmaWriteFactor
== 0) {
1465 Tick factor
= ((rxDmaLen
+ ULL(63)) >> ULL(6)) * dmaWriteFactor
;
1466 Tick start
= curTick
+ dmaWriteDelay
+ factor
;
1467 rxDmaWriteEvent
.schedule(start
);
1472 NSGigE::rxDmaWriteDone()
1474 assert(rxDmaState
== dmaWriting
);
1477 // If the transmit state machine has a pending DMA, let it go first
1478 if (txDmaState
== dmaReadWaiting
|| txDmaState
== dmaWriteWaiting
)
1487 DPRINTF(EthernetSM
, "receive kick rxState=%s (rxBuf.size=%d)\n",
1488 NsRxStateStrings
[rxState
], rxFifo
.size());
1490 if (rxKickTick
> curTick
) {
1491 DPRINTF(EthernetSM
, "receive kick exiting, can't run till %d\n",
1497 switch(rxDmaState
) {
1498 case dmaReadWaiting
:
1502 case dmaWriteWaiting
:
1510 // see state machine from spec for details
1511 // the way this works is, if you finish work on one state and can
1512 // go directly to another, you do that through jumping to the
1513 // label "next". however, if you have intermediate work, like DMA
1514 // so that you can't go to the next state yet, you go to exit and
1515 // exit the loop. however, when the DMA is done it will trigger
1516 // an event and come back to this loop.
1520 DPRINTF(EthernetSM
, "Receive Disabled! Nothing to do.\n");
1525 rxState
= rxDescRefr
;
1527 rxDmaAddr
= regs
.rxdp
& 0x3fffffff;
1528 rxDmaData
= &rxDescCache
+ offsetof(ns_desc
, link
);
1529 rxDmaLen
= sizeof(rxDescCache
.link
);
1530 rxDmaFree
= dmaDescFree
;
1533 descDmaRdBytes
+= rxDmaLen
;
1538 rxState
= rxDescRead
;
1540 rxDmaAddr
= regs
.rxdp
& 0x3fffffff;
1541 rxDmaData
= &rxDescCache
;
1542 rxDmaLen
= sizeof(ns_desc
);
1543 rxDmaFree
= dmaDescFree
;
1546 descDmaRdBytes
+= rxDmaLen
;
1554 if (rxDmaState
!= dmaIdle
)
1557 rxState
= rxAdvance
;
1561 if (rxDmaState
!= dmaIdle
)
1564 DPRINTF(EthernetDesc
,
1565 "rxDescCache: addr=%08x read descriptor\n",
1566 regs
.rxdp
& 0x3fffffff);
1567 DPRINTF(EthernetDesc
,
1568 "rxDescCache: link=%08x bufptr=%08x cmdsts=%08x extsts=%08x\n",
1569 rxDescCache
.link
, rxDescCache
.bufptr
, rxDescCache
.cmdsts
,
1570 rxDescCache
.extsts
);
1572 if (rxDescCache
.cmdsts
& CMDSTS_OWN
) {
1573 devIntrPost(ISR_RXIDLE
);
1577 rxState
= rxFifoBlock
;
1578 rxFragPtr
= rxDescCache
.bufptr
;
1579 rxDescCnt
= rxDescCache
.cmdsts
& CMDSTS_LEN_MASK
;
1586 * @todo in reality, we should be able to start processing
1587 * the packet as it arrives, and not have to wait for the
1588 * full packet ot be in the receive fifo.
1593 DPRINTF(EthernetSM
, "****processing receive of new packet****\n");
1595 // If we don't have a packet, grab a new one from the fifo.
1596 rxPacket
= rxFifo
.front();
1597 rxPktBytes
= rxPacket
->length
;
1598 rxPacketBufPtr
= rxPacket
->data
;
1601 if (DTRACE(Ethernet
)) {
1604 DPRINTF(Ethernet
, "ID is %d\n", ip
->id());
1608 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n",
1609 tcp
->sport(), tcp
->dport(), tcp
->seq(),
1616 // sanity check - i think the driver behaves like this
1617 assert(rxDescCnt
>= rxPktBytes
);
1622 // dont' need the && rxDescCnt > 0 if driver sanity check
1624 if (rxPktBytes
> 0) {
1625 rxState
= rxFragWrite
;
1626 // don't need min<>(rxPktBytes,rxDescCnt) if above sanity
1628 rxXferLen
= rxPktBytes
;
1630 rxDmaAddr
= rxFragPtr
& 0x3fffffff;
1631 rxDmaData
= rxPacketBufPtr
;
1632 rxDmaLen
= rxXferLen
;
1633 rxDmaFree
= dmaDataFree
;
1639 rxState
= rxDescWrite
;
1641 //if (rxPktBytes == 0) { /* packet is done */
1642 assert(rxPktBytes
== 0);
1643 DPRINTF(EthernetSM
, "done with receiving packet\n");
1645 rxDescCache
.cmdsts
|= CMDSTS_OWN
;
1646 rxDescCache
.cmdsts
&= ~CMDSTS_MORE
;
1647 rxDescCache
.cmdsts
|= CMDSTS_OK
;
1648 rxDescCache
.cmdsts
&= 0xffff0000;
1649 rxDescCache
.cmdsts
+= rxPacket
->length
; //i.e. set CMDSTS_SIZE
1653 * all the driver uses these are for its own stats keeping
1654 * which we don't care about, aren't necessary for
1655 * functionality and doing this would just slow us down.
1656 * if they end up using this in a later version for
1657 * functional purposes, just undef
1659 if (rxFilterEnable
) {
1660 rxDescCache
.cmdsts
&= ~CMDSTS_DEST_MASK
;
1661 const EthAddr
&dst
= rxFifoFront()->dst();
1663 rxDescCache
.cmdsts
|= CMDSTS_DEST_SELF
;
1664 if (dst
->multicast())
1665 rxDescCache
.cmdsts
|= CMDSTS_DEST_MULTI
;
1666 if (dst
->broadcast())
1667 rxDescCache
.cmdsts
|= CMDSTS_DEST_MASK
;
1672 if (extstsEnable
&& ip
) {
1673 rxDescCache
.extsts
|= EXTSTS_IPPKT
;
1675 if (cksum(ip
) != 0) {
1676 DPRINTF(EthernetCksum
, "Rx IP Checksum Error\n");
1677 rxDescCache
.extsts
|= EXTSTS_IPERR
;
1682 rxDescCache
.extsts
|= EXTSTS_TCPPKT
;
1684 if (cksum(tcp
) != 0) {
1685 DPRINTF(EthernetCksum
, "Rx TCP Checksum Error\n");
1686 rxDescCache
.extsts
|= EXTSTS_TCPERR
;
1690 rxDescCache
.extsts
|= EXTSTS_UDPPKT
;
1692 if (cksum(udp
) != 0) {
1693 DPRINTF(EthernetCksum
, "Rx UDP Checksum Error\n");
1694 rxDescCache
.extsts
|= EXTSTS_UDPERR
;
1701 * the driver seems to always receive into desc buffers
1702 * of size 1514, so you never have a pkt that is split
1703 * into multiple descriptors on the receive side, so
1704 * i don't implement that case, hence the assert above.
1707 DPRINTF(EthernetDesc
,
1708 "rxDescCache: addr=%08x writeback cmdsts extsts\n",
1709 regs
.rxdp
& 0x3fffffff);
1710 DPRINTF(EthernetDesc
,
1711 "rxDescCache: link=%08x bufptr=%08x cmdsts=%08x extsts=%08x\n",
1712 rxDescCache
.link
, rxDescCache
.bufptr
, rxDescCache
.cmdsts
,
1713 rxDescCache
.extsts
);
1715 rxDmaAddr
= (regs
.rxdp
+ offsetof(ns_desc
, cmdsts
)) & 0x3fffffff;
1716 rxDmaData
= &(rxDescCache
.cmdsts
);
1717 rxDmaLen
= sizeof(rxDescCache
.cmdsts
) + sizeof(rxDescCache
.extsts
);
1718 rxDmaFree
= dmaDescFree
;
1721 descDmaWrBytes
+= rxDmaLen
;
1729 if (rxDmaState
!= dmaIdle
)
1732 rxPacketBufPtr
+= rxXferLen
;
1733 rxFragPtr
+= rxXferLen
;
1734 rxPktBytes
-= rxXferLen
;
1736 rxState
= rxFifoBlock
;
1740 if (rxDmaState
!= dmaIdle
)
1743 assert(rxDescCache
.cmdsts
& CMDSTS_OWN
);
1745 assert(rxPacket
== 0);
1746 devIntrPost(ISR_RXOK
);
1748 if (rxDescCache
.cmdsts
& CMDSTS_INTR
)
1749 devIntrPost(ISR_RXDESC
);
1752 DPRINTF(EthernetSM
, "Halting the RX state machine\n");
1756 rxState
= rxAdvance
;
1760 if (rxDescCache
.link
== 0) {
1761 devIntrPost(ISR_RXIDLE
);
1766 rxState
= rxDescRead
;
1767 regs
.rxdp
= rxDescCache
.link
;
1770 rxDmaAddr
= regs
.rxdp
& 0x3fffffff;
1771 rxDmaData
= &rxDescCache
;
1772 rxDmaLen
= sizeof(ns_desc
);
1773 rxDmaFree
= dmaDescFree
;
1781 panic("Invalid rxState!");
1784 DPRINTF(EthernetSM
, "entering next rxState=%s\n",
1785 NsRxStateStrings
[rxState
]);
1791 * @todo do we want to schedule a future kick?
1793 DPRINTF(EthernetSM
, "rx state machine exited rxState=%s\n",
1794 NsRxStateStrings
[rxState
]);
1800 if (txFifo
.empty()) {
1801 DPRINTF(Ethernet
, "nothing to transmit\n");
1805 DPRINTF(Ethernet
, "Attempt Pkt Transmit: txFifo length=%d\n",
1807 if (interface
->sendPacket(txFifo
.front())) {
1809 if (DTRACE(Ethernet
)) {
1810 IpPtr
ip(txFifo
.front());
1812 DPRINTF(Ethernet
, "ID is %d\n", ip
->id());
1816 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n",
1817 tcp
->sport(), tcp
->dport(), tcp
->seq(), tcp
->ack());
1823 DDUMP(EthernetData
, txFifo
.front()->data
, txFifo
.front()->length
);
1824 txBytes
+= txFifo
.front()->length
;
1827 DPRINTF(Ethernet
, "Successful Xmit! now txFifoAvail is %d\n",
1832 * normally do a writeback of the descriptor here, and ONLY
1833 * after that is done, send this interrupt. but since our
1834 * stuff never actually fails, just do this interrupt here,
1835 * otherwise the code has to stray from this nice format.
1836 * besides, it's functionally the same.
1838 devIntrPost(ISR_TXOK
);
1841 if (!txFifo
.empty() && !txEvent
.scheduled()) {
1842 DPRINTF(Ethernet
, "reschedule transmit\n");
1843 txEvent
.schedule(curTick
+ retryTime
);
1848 NSGigE::txDmaReadCopy()
1850 assert(txDmaState
== dmaReading
);
1852 physmem
->dma_read((uint8_t *)txDmaData
, txDmaAddr
, txDmaLen
);
1853 txDmaState
= dmaIdle
;
1855 DPRINTF(EthernetDMA
, "tx dma read paddr=%#x len=%d\n",
1856 txDmaAddr
, txDmaLen
);
1857 DDUMP(EthernetDMA
, txDmaData
, txDmaLen
);
1861 NSGigE::doTxDmaRead()
1863 assert(txDmaState
== dmaIdle
|| txDmaState
== dmaReadWaiting
);
1864 txDmaState
= dmaReading
;
1866 if (dmaInterface
&& !txDmaFree
) {
1867 if (dmaInterface
->busy())
1868 txDmaState
= dmaReadWaiting
;
1870 dmaInterface
->doDMA(Read
, txDmaAddr
, txDmaLen
, curTick
,
1871 &txDmaReadEvent
, true);
1875 if (dmaReadDelay
== 0 && dmaReadFactor
== 0.0) {
1880 Tick factor
= ((txDmaLen
+ ULL(63)) >> ULL(6)) * dmaReadFactor
;
1881 Tick start
= curTick
+ dmaReadDelay
+ factor
;
1882 txDmaReadEvent
.schedule(start
);
1887 NSGigE::txDmaReadDone()
1889 assert(txDmaState
== dmaReading
);
1892 // If the receive state machine has a pending DMA, let it go first
1893 if (rxDmaState
== dmaReadWaiting
|| rxDmaState
== dmaWriteWaiting
)
1900 NSGigE::txDmaWriteCopy()
1902 assert(txDmaState
== dmaWriting
);
1904 physmem
->dma_write(txDmaAddr
, (uint8_t *)txDmaData
, txDmaLen
);
1905 txDmaState
= dmaIdle
;
1907 DPRINTF(EthernetDMA
, "tx dma write paddr=%#x len=%d\n",
1908 txDmaAddr
, txDmaLen
);
1909 DDUMP(EthernetDMA
, txDmaData
, txDmaLen
);
1913 NSGigE::doTxDmaWrite()
1915 assert(txDmaState
== dmaIdle
|| txDmaState
== dmaWriteWaiting
);
1916 txDmaState
= dmaWriting
;
1918 if (dmaInterface
&& !txDmaFree
) {
1919 if (dmaInterface
->busy())
1920 txDmaState
= dmaWriteWaiting
;
1922 dmaInterface
->doDMA(WriteInvalidate
, txDmaAddr
, txDmaLen
, curTick
,
1923 &txDmaWriteEvent
, true);
1927 if (dmaWriteDelay
== 0 && dmaWriteFactor
== 0.0) {
1932 Tick factor
= ((txDmaLen
+ ULL(63)) >> ULL(6)) * dmaWriteFactor
;
1933 Tick start
= curTick
+ dmaWriteDelay
+ factor
;
1934 txDmaWriteEvent
.schedule(start
);
1939 NSGigE::txDmaWriteDone()
1941 assert(txDmaState
== dmaWriting
);
1944 // If the receive state machine has a pending DMA, let it go first
1945 if (rxDmaState
== dmaReadWaiting
|| rxDmaState
== dmaWriteWaiting
)
1954 DPRINTF(EthernetSM
, "transmit kick txState=%s\n",
1955 NsTxStateStrings
[txState
]);
1957 if (txKickTick
> curTick
) {
1958 DPRINTF(EthernetSM
, "transmit kick exiting, can't run till %d\n",
1965 switch(txDmaState
) {
1966 case dmaReadWaiting
:
1970 case dmaWriteWaiting
:
1981 DPRINTF(EthernetSM
, "Transmit disabled. Nothing to do.\n");
1986 txState
= txDescRefr
;
1988 txDmaAddr
= regs
.txdp
& 0x3fffffff;
1989 txDmaData
= &txDescCache
+ offsetof(ns_desc
, link
);
1990 txDmaLen
= sizeof(txDescCache
.link
);
1991 txDmaFree
= dmaDescFree
;
1994 descDmaRdBytes
+= txDmaLen
;
2000 txState
= txDescRead
;
2002 txDmaAddr
= regs
.txdp
& 0x3fffffff;
2003 txDmaData
= &txDescCache
;
2004 txDmaLen
= sizeof(ns_desc
);
2005 txDmaFree
= dmaDescFree
;
2008 descDmaRdBytes
+= txDmaLen
;
2016 if (txDmaState
!= dmaIdle
)
2019 txState
= txAdvance
;
2023 if (txDmaState
!= dmaIdle
)
2026 DPRINTF(EthernetDesc
,
2027 "txDescCache: link=%08x bufptr=%08x cmdsts=%08x extsts=%08x\n",
2028 txDescCache
.link
, txDescCache
.bufptr
, txDescCache
.cmdsts
,
2029 txDescCache
.extsts
);
2031 if (txDescCache
.cmdsts
& CMDSTS_OWN
) {
2032 txState
= txFifoBlock
;
2033 txFragPtr
= txDescCache
.bufptr
;
2034 txDescCnt
= txDescCache
.cmdsts
& CMDSTS_LEN_MASK
;
2036 devIntrPost(ISR_TXIDLE
);
2044 DPRINTF(EthernetSM
, "****starting the tx of a new packet****\n");
2045 txPacket
= new PacketData(16384);
2046 txPacketBufPtr
= txPacket
->data
;
2049 if (txDescCnt
== 0) {
2050 DPRINTF(EthernetSM
, "the txDescCnt == 0, done with descriptor\n");
2051 if (txDescCache
.cmdsts
& CMDSTS_MORE
) {
2052 DPRINTF(EthernetSM
, "there are more descriptors to come\n");
2053 txState
= txDescWrite
;
2055 txDescCache
.cmdsts
&= ~CMDSTS_OWN
;
2057 txDmaAddr
= regs
.txdp
+ offsetof(ns_desc
, cmdsts
);
2058 txDmaAddr
&= 0x3fffffff;
2059 txDmaData
= &(txDescCache
.cmdsts
);
2060 txDmaLen
= sizeof(txDescCache
.cmdsts
);
2061 txDmaFree
= dmaDescFree
;
2066 } else { /* this packet is totally done */
2067 DPRINTF(EthernetSM
, "This packet is done, let's wrap it up\n");
2068 /* deal with the the packet that just finished */
2069 if ((regs
.vtcr
& VTCR_PPCHK
) && extstsEnable
) {
2071 if (txDescCache
.extsts
& EXTSTS_UDPPKT
) {
2074 udp
->sum(cksum(udp
));
2076 } else if (txDescCache
.extsts
& EXTSTS_TCPPKT
) {
2079 tcp
->sum(cksum(tcp
));
2082 if (txDescCache
.extsts
& EXTSTS_IPPKT
) {
2089 txPacket
->length
= txPacketBufPtr
- txPacket
->data
;
2090 // this is just because the receive can't handle a
2091 // packet bigger want to make sure
2092 assert(txPacket
->length
<= 1514);
2096 txFifo
.push(txPacket
);
2100 * this following section is not tqo spec, but
2101 * functionally shouldn't be any different. normally,
2102 * the chip will wait til the transmit has occurred
2103 * before writing back the descriptor because it has
2104 * to wait to see that it was successfully transmitted
2105 * to decide whether to set CMDSTS_OK or not.
2106 * however, in the simulator since it is always
2107 * successfully transmitted, and writing it exactly to
2108 * spec would complicate the code, we just do it here
2111 txDescCache
.cmdsts
&= ~CMDSTS_OWN
;
2112 txDescCache
.cmdsts
|= CMDSTS_OK
;
2114 DPRINTF(EthernetDesc
,
2115 "txDesc writeback: cmdsts=%08x extsts=%08x\n",
2116 txDescCache
.cmdsts
, txDescCache
.extsts
);
2118 txDmaAddr
= regs
.txdp
+ offsetof(ns_desc
, cmdsts
);
2119 txDmaAddr
&= 0x3fffffff;
2120 txDmaData
= &(txDescCache
.cmdsts
);
2121 txDmaLen
= sizeof(txDescCache
.cmdsts
) +
2122 sizeof(txDescCache
.extsts
);
2123 txDmaFree
= dmaDescFree
;
2126 descDmaWrBytes
+= txDmaLen
;
2132 DPRINTF(EthernetSM
, "halting TX state machine\n");
2136 txState
= txAdvance
;
2142 DPRINTF(EthernetSM
, "this descriptor isn't done yet\n");
2143 if (!txFifo
.full()) {
2144 txState
= txFragRead
;
2147 * The number of bytes transferred is either whatever
2148 * is left in the descriptor (txDescCnt), or if there
2149 * is not enough room in the fifo, just whatever room
2150 * is left in the fifo
2152 txXferLen
= min
<uint32_t>(txDescCnt
, txFifo
.avail());
2154 txDmaAddr
= txFragPtr
& 0x3fffffff;
2155 txDmaData
= txPacketBufPtr
;
2156 txDmaLen
= txXferLen
;
2157 txDmaFree
= dmaDataFree
;
2162 txState
= txFifoBlock
;
2172 if (txDmaState
!= dmaIdle
)
2175 txPacketBufPtr
+= txXferLen
;
2176 txFragPtr
+= txXferLen
;
2177 txDescCnt
-= txXferLen
;
2178 txFifo
.reserve(txXferLen
);
2180 txState
= txFifoBlock
;
2184 if (txDmaState
!= dmaIdle
)
2187 if (txDescCache
.cmdsts
& CMDSTS_INTR
)
2188 devIntrPost(ISR_TXDESC
);
2190 txState
= txAdvance
;
2194 if (txDescCache
.link
== 0) {
2195 devIntrPost(ISR_TXIDLE
);
2199 txState
= txDescRead
;
2200 regs
.txdp
= txDescCache
.link
;
2203 txDmaAddr
= txDescCache
.link
& 0x3fffffff;
2204 txDmaData
= &txDescCache
;
2205 txDmaLen
= sizeof(ns_desc
);
2206 txDmaFree
= dmaDescFree
;
2214 panic("invalid state");
2217 DPRINTF(EthernetSM
, "entering next txState=%s\n",
2218 NsTxStateStrings
[txState
]);
2224 * @todo do we want to schedule a future kick?
2226 DPRINTF(EthernetSM
, "tx state machine exited txState=%s\n",
2227 NsTxStateStrings
[txState
]);
2231 NSGigE::transferDone()
2233 if (txFifo
.empty()) {
2234 DPRINTF(Ethernet
, "transfer complete: txFifo empty...nothing to do\n");
2238 DPRINTF(Ethernet
, "transfer complete: data in txFifo...schedule xmit\n");
2240 if (txEvent
.scheduled())
2241 txEvent
.reschedule(curTick
+ cycles(1));
2243 txEvent
.schedule(curTick
+ cycles(1));
2247 NSGigE::rxFilter(const PacketPtr
&packet
)
2249 EthPtr eth
= packet
;
2253 const EthAddr
&dst
= eth
->dst();
2254 if (dst
.unicast()) {
2255 // If we're accepting all unicast addresses
2259 // If we make a perfect match
2260 if (acceptPerfect
&& dst
== rom
.perfectMatch
)
2263 if (acceptArp
&& eth
->type() == ETH_TYPE_ARP
)
2266 } else if (dst
.broadcast()) {
2267 // if we're accepting broadcasts
2268 if (acceptBroadcast
)
2271 } else if (dst
.multicast()) {
2272 // if we're accepting all multicasts
2273 if (acceptMulticast
)
2279 DPRINTF(Ethernet
, "rxFilter drop\n");
2280 DDUMP(EthernetData
, packet
->data
, packet
->length
);
2287 NSGigE::recvPacket(PacketPtr packet
)
2289 rxBytes
+= packet
->length
;
2292 DPRINTF(Ethernet
, "Receiving packet from wire, rxFifoAvail=%d\n",
2296 DPRINTF(Ethernet
, "receive disabled...packet dropped\n");
2298 interface
->recvDone();
2302 if (rxFilterEnable
&& rxFilter(packet
)) {
2303 DPRINTF(Ethernet
, "packet filtered...dropped\n");
2304 interface
->recvDone();
2308 if (rxFifo
.avail() < packet
->length
) {
2314 "packet won't fit in receive buffer...pkt ID %d dropped\n",
2317 DPRINTF(Ethernet
, "Seq=%d\n", tcp
->seq());
2322 devIntrPost(ISR_RXORN
);
2326 rxFifo
.push(packet
);
2327 interface
->recvDone();
2333 //=====================================================================
2337 NSGigE::serialize(ostream
&os
)
2339 // Serialize the PciDev base class
2340 PciDev::serialize(os
);
2343 * Finalize any DMA events now.
2345 if (rxDmaReadEvent
.scheduled())
2347 if (rxDmaWriteEvent
.scheduled())
2349 if (txDmaReadEvent
.scheduled())
2351 if (txDmaWriteEvent
.scheduled())
2355 * Serialize the device registers
2357 SERIALIZE_SCALAR(regs
.command
);
2358 SERIALIZE_SCALAR(regs
.config
);
2359 SERIALIZE_SCALAR(regs
.mear
);
2360 SERIALIZE_SCALAR(regs
.ptscr
);
2361 SERIALIZE_SCALAR(regs
.isr
);
2362 SERIALIZE_SCALAR(regs
.imr
);
2363 SERIALIZE_SCALAR(regs
.ier
);
2364 SERIALIZE_SCALAR(regs
.ihr
);
2365 SERIALIZE_SCALAR(regs
.txdp
);
2366 SERIALIZE_SCALAR(regs
.txdp_hi
);
2367 SERIALIZE_SCALAR(regs
.txcfg
);
2368 SERIALIZE_SCALAR(regs
.gpior
);
2369 SERIALIZE_SCALAR(regs
.rxdp
);
2370 SERIALIZE_SCALAR(regs
.rxdp_hi
);
2371 SERIALIZE_SCALAR(regs
.rxcfg
);
2372 SERIALIZE_SCALAR(regs
.pqcr
);
2373 SERIALIZE_SCALAR(regs
.wcsr
);
2374 SERIALIZE_SCALAR(regs
.pcr
);
2375 SERIALIZE_SCALAR(regs
.rfcr
);
2376 SERIALIZE_SCALAR(regs
.rfdr
);
2377 SERIALIZE_SCALAR(regs
.srr
);
2378 SERIALIZE_SCALAR(regs
.mibc
);
2379 SERIALIZE_SCALAR(regs
.vrcr
);
2380 SERIALIZE_SCALAR(regs
.vtcr
);
2381 SERIALIZE_SCALAR(regs
.vdr
);
2382 SERIALIZE_SCALAR(regs
.ccsr
);
2383 SERIALIZE_SCALAR(regs
.tbicr
);
2384 SERIALIZE_SCALAR(regs
.tbisr
);
2385 SERIALIZE_SCALAR(regs
.tanar
);
2386 SERIALIZE_SCALAR(regs
.tanlpar
);
2387 SERIALIZE_SCALAR(regs
.taner
);
2388 SERIALIZE_SCALAR(regs
.tesr
);
2390 SERIALIZE_ARRAY(rom
.perfectMatch
, ETH_ADDR_LEN
);
2392 SERIALIZE_SCALAR(ioEnable
);
2395 * Serialize the data Fifos
2397 rxFifo
.serialize("rxFifo", os
);
2398 txFifo
.serialize("txFifo", os
);
2401 * Serialize the various helper variables
2403 bool txPacketExists
= txPacket
;
2404 SERIALIZE_SCALAR(txPacketExists
);
2405 if (txPacketExists
) {
2406 txPacket
->length
= txPacketBufPtr
- txPacket
->data
;
2407 txPacket
->serialize("txPacket", os
);
2408 uint32_t txPktBufPtr
= (uint32_t) (txPacketBufPtr
- txPacket
->data
);
2409 SERIALIZE_SCALAR(txPktBufPtr
);
2412 bool rxPacketExists
= rxPacket
;
2413 SERIALIZE_SCALAR(rxPacketExists
);
2414 if (rxPacketExists
) {
2415 rxPacket
->serialize("rxPacket", os
);
2416 uint32_t rxPktBufPtr
= (uint32_t) (rxPacketBufPtr
- rxPacket
->data
);
2417 SERIALIZE_SCALAR(rxPktBufPtr
);
2420 SERIALIZE_SCALAR(txXferLen
);
2421 SERIALIZE_SCALAR(rxXferLen
);
2424 * Serialize DescCaches
2426 SERIALIZE_SCALAR(txDescCache
.link
);
2427 SERIALIZE_SCALAR(txDescCache
.bufptr
);
2428 SERIALIZE_SCALAR(txDescCache
.cmdsts
);
2429 SERIALIZE_SCALAR(txDescCache
.extsts
);
2430 SERIALIZE_SCALAR(rxDescCache
.link
);
2431 SERIALIZE_SCALAR(rxDescCache
.bufptr
);
2432 SERIALIZE_SCALAR(rxDescCache
.cmdsts
);
2433 SERIALIZE_SCALAR(rxDescCache
.extsts
);
2436 * Serialize tx state machine
2438 int txState
= this->txState
;
2439 SERIALIZE_SCALAR(txState
);
2440 SERIALIZE_SCALAR(txEnable
);
2441 SERIALIZE_SCALAR(CTDD
);
2442 SERIALIZE_SCALAR(txFragPtr
);
2443 SERIALIZE_SCALAR(txDescCnt
);
2444 int txDmaState
= this->txDmaState
;
2445 SERIALIZE_SCALAR(txDmaState
);
2448 * Serialize rx state machine
2450 int rxState
= this->rxState
;
2451 SERIALIZE_SCALAR(rxState
);
2452 SERIALIZE_SCALAR(rxEnable
);
2453 SERIALIZE_SCALAR(CRDD
);
2454 SERIALIZE_SCALAR(rxPktBytes
);
2455 SERIALIZE_SCALAR(rxFragPtr
);
2456 SERIALIZE_SCALAR(rxDescCnt
);
2457 int rxDmaState
= this->rxDmaState
;
2458 SERIALIZE_SCALAR(rxDmaState
);
2460 SERIALIZE_SCALAR(extstsEnable
);
2463 * If there's a pending transmit, store the time so we can
2464 * reschedule it later
2466 Tick transmitTick
= txEvent
.scheduled() ? txEvent
.when() - curTick
: 0;
2467 SERIALIZE_SCALAR(transmitTick
);
2470 * receive address filter settings
2472 SERIALIZE_SCALAR(rxFilterEnable
);
2473 SERIALIZE_SCALAR(acceptBroadcast
);
2474 SERIALIZE_SCALAR(acceptMulticast
);
2475 SERIALIZE_SCALAR(acceptUnicast
);
2476 SERIALIZE_SCALAR(acceptPerfect
);
2477 SERIALIZE_SCALAR(acceptArp
);
2480 * Keep track of pending interrupt status.
2482 SERIALIZE_SCALAR(intrTick
);
2483 SERIALIZE_SCALAR(cpuPendingIntr
);
2484 Tick intrEventTick
= 0;
2486 intrEventTick
= intrEvent
->when();
2487 SERIALIZE_SCALAR(intrEventTick
);
2492 NSGigE::unserialize(Checkpoint
*cp
, const std::string
§ion
)
2494 // Unserialize the PciDev base class
2495 PciDev::unserialize(cp
, section
);
2497 UNSERIALIZE_SCALAR(regs
.command
);
2498 UNSERIALIZE_SCALAR(regs
.config
);
2499 UNSERIALIZE_SCALAR(regs
.mear
);
2500 UNSERIALIZE_SCALAR(regs
.ptscr
);
2501 UNSERIALIZE_SCALAR(regs
.isr
);
2502 UNSERIALIZE_SCALAR(regs
.imr
);
2503 UNSERIALIZE_SCALAR(regs
.ier
);
2504 UNSERIALIZE_SCALAR(regs
.ihr
);
2505 UNSERIALIZE_SCALAR(regs
.txdp
);
2506 UNSERIALIZE_SCALAR(regs
.txdp_hi
);
2507 UNSERIALIZE_SCALAR(regs
.txcfg
);
2508 UNSERIALIZE_SCALAR(regs
.gpior
);
2509 UNSERIALIZE_SCALAR(regs
.rxdp
);
2510 UNSERIALIZE_SCALAR(regs
.rxdp_hi
);
2511 UNSERIALIZE_SCALAR(regs
.rxcfg
);
2512 UNSERIALIZE_SCALAR(regs
.pqcr
);
2513 UNSERIALIZE_SCALAR(regs
.wcsr
);
2514 UNSERIALIZE_SCALAR(regs
.pcr
);
2515 UNSERIALIZE_SCALAR(regs
.rfcr
);
2516 UNSERIALIZE_SCALAR(regs
.rfdr
);
2517 UNSERIALIZE_SCALAR(regs
.srr
);
2518 UNSERIALIZE_SCALAR(regs
.mibc
);
2519 UNSERIALIZE_SCALAR(regs
.vrcr
);
2520 UNSERIALIZE_SCALAR(regs
.vtcr
);
2521 UNSERIALIZE_SCALAR(regs
.vdr
);
2522 UNSERIALIZE_SCALAR(regs
.ccsr
);
2523 UNSERIALIZE_SCALAR(regs
.tbicr
);
2524 UNSERIALIZE_SCALAR(regs
.tbisr
);
2525 UNSERIALIZE_SCALAR(regs
.tanar
);
2526 UNSERIALIZE_SCALAR(regs
.tanlpar
);
2527 UNSERIALIZE_SCALAR(regs
.taner
);
2528 UNSERIALIZE_SCALAR(regs
.tesr
);
2530 UNSERIALIZE_ARRAY(rom
.perfectMatch
, ETH_ADDR_LEN
);
2532 UNSERIALIZE_SCALAR(ioEnable
);
2535 * unserialize the data fifos
2537 rxFifo
.unserialize("rxFifo", cp
, section
);
2538 txFifo
.unserialize("txFifo", cp
, section
);
2541 * unserialize the various helper variables
2543 bool txPacketExists
;
2544 UNSERIALIZE_SCALAR(txPacketExists
);
2545 if (txPacketExists
) {
2546 txPacket
= new PacketData(16384);
2547 txPacket
->unserialize("txPacket", cp
, section
);
2548 uint32_t txPktBufPtr
;
2549 UNSERIALIZE_SCALAR(txPktBufPtr
);
2550 txPacketBufPtr
= (uint8_t *) txPacket
->data
+ txPktBufPtr
;
2554 bool rxPacketExists
;
2555 UNSERIALIZE_SCALAR(rxPacketExists
);
2557 if (rxPacketExists
) {
2558 rxPacket
= new PacketData(16384);
2559 rxPacket
->unserialize("rxPacket", cp
, section
);
2560 uint32_t rxPktBufPtr
;
2561 UNSERIALIZE_SCALAR(rxPktBufPtr
);
2562 rxPacketBufPtr
= (uint8_t *) rxPacket
->data
+ rxPktBufPtr
;
2566 UNSERIALIZE_SCALAR(txXferLen
);
2567 UNSERIALIZE_SCALAR(rxXferLen
);
2570 * Unserialize DescCaches
2572 UNSERIALIZE_SCALAR(txDescCache
.link
);
2573 UNSERIALIZE_SCALAR(txDescCache
.bufptr
);
2574 UNSERIALIZE_SCALAR(txDescCache
.cmdsts
);
2575 UNSERIALIZE_SCALAR(txDescCache
.extsts
);
2576 UNSERIALIZE_SCALAR(rxDescCache
.link
);
2577 UNSERIALIZE_SCALAR(rxDescCache
.bufptr
);
2578 UNSERIALIZE_SCALAR(rxDescCache
.cmdsts
);
2579 UNSERIALIZE_SCALAR(rxDescCache
.extsts
);
2582 * unserialize tx state machine
2585 UNSERIALIZE_SCALAR(txState
);
2586 this->txState
= (TxState
) txState
;
2587 UNSERIALIZE_SCALAR(txEnable
);
2588 UNSERIALIZE_SCALAR(CTDD
);
2589 UNSERIALIZE_SCALAR(txFragPtr
);
2590 UNSERIALIZE_SCALAR(txDescCnt
);
2592 UNSERIALIZE_SCALAR(txDmaState
);
2593 this->txDmaState
= (DmaState
) txDmaState
;
2596 * unserialize rx state machine
2599 UNSERIALIZE_SCALAR(rxState
);
2600 this->rxState
= (RxState
) rxState
;
2601 UNSERIALIZE_SCALAR(rxEnable
);
2602 UNSERIALIZE_SCALAR(CRDD
);
2603 UNSERIALIZE_SCALAR(rxPktBytes
);
2604 UNSERIALIZE_SCALAR(rxFragPtr
);
2605 UNSERIALIZE_SCALAR(rxDescCnt
);
2607 UNSERIALIZE_SCALAR(rxDmaState
);
2608 this->rxDmaState
= (DmaState
) rxDmaState
;
2610 UNSERIALIZE_SCALAR(extstsEnable
);
2613 * If there's a pending transmit, reschedule it now
2616 UNSERIALIZE_SCALAR(transmitTick
);
2618 txEvent
.schedule(curTick
+ transmitTick
);
2621 * unserialize receive address filter settings
2623 UNSERIALIZE_SCALAR(rxFilterEnable
);
2624 UNSERIALIZE_SCALAR(acceptBroadcast
);
2625 UNSERIALIZE_SCALAR(acceptMulticast
);
2626 UNSERIALIZE_SCALAR(acceptUnicast
);
2627 UNSERIALIZE_SCALAR(acceptPerfect
);
2628 UNSERIALIZE_SCALAR(acceptArp
);
2631 * Keep track of pending interrupt status.
2633 UNSERIALIZE_SCALAR(intrTick
);
2634 UNSERIALIZE_SCALAR(cpuPendingIntr
);
2636 UNSERIALIZE_SCALAR(intrEventTick
);
2637 if (intrEventTick
) {
2638 intrEvent
= new IntrEvent(this, true);
2639 intrEvent
->schedule(intrEventTick
);
2643 * re-add addrRanges to bus bridges
2646 pioInterface
->addAddrRange(RangeSize(BARAddrs
[0], BARSize
[0]));
2647 pioInterface
->addAddrRange(RangeSize(BARAddrs
[1], BARSize
[1]));
2652 NSGigE::cacheAccess(MemReqPtr
&req
)
2654 DPRINTF(EthernetPIO
, "timing access to paddr=%#x (daddr=%#x)\n",
2655 req
->paddr
, req
->paddr
- addr
);
2656 return curTick
+ pioLatency
;
2659 BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt
)
2661 SimObjectParam
<EtherInt
*> peer
;
2662 SimObjectParam
<NSGigE
*> device
;
2664 END_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt
)
2666 BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigEInt
)
2668 INIT_PARAM_DFLT(peer
, "peer interface", NULL
),
2669 INIT_PARAM(device
, "Ethernet device of this interface")
2671 END_INIT_SIM_OBJECT_PARAMS(NSGigEInt
)
2673 CREATE_SIM_OBJECT(NSGigEInt
)
2675 NSGigEInt
*dev_int
= new NSGigEInt(getInstanceName(), device
);
2677 EtherInt
*p
= (EtherInt
*)peer
;
2679 dev_int
->setPeer(p
);
2680 p
->setPeer(dev_int
);
2686 REGISTER_SIM_OBJECT("NSGigEInt", NSGigEInt
)
2689 BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigE
)
2693 Param
<Tick
> tx_delay
;
2694 Param
<Tick
> rx_delay
;
2695 Param
<Tick
> intr_delay
;
2696 SimObjectParam
<MemoryController
*> mmu
;
2697 SimObjectParam
<PhysicalMemory
*> physmem
;
2698 Param
<bool> rx_filter
;
2699 Param
<string
> hardware_address
;
2700 SimObjectParam
<Bus
*> io_bus
;
2701 SimObjectParam
<Bus
*> payload_bus
;
2702 SimObjectParam
<HierParams
*> hier
;
2703 Param
<Tick
> pio_latency
;
2704 Param
<bool> dma_desc_free
;
2705 Param
<bool> dma_data_free
;
2706 Param
<Tick
> dma_read_delay
;
2707 Param
<Tick
> dma_write_delay
;
2708 Param
<Tick
> dma_read_factor
;
2709 Param
<Tick
> dma_write_factor
;
2710 SimObjectParam
<PciConfigAll
*> configspace
;
2711 SimObjectParam
<PciConfigData
*> configdata
;
2712 SimObjectParam
<Platform
*> platform
;
2713 Param
<uint32_t> pci_bus
;
2714 Param
<uint32_t> pci_dev
;
2715 Param
<uint32_t> pci_func
;
2716 Param
<uint32_t> tx_fifo_size
;
2717 Param
<uint32_t> rx_fifo_size
;
2718 Param
<uint32_t> m5reg
;
2719 Param
<bool> dma_no_allocate
;
2721 END_DECLARE_SIM_OBJECT_PARAMS(NSGigE
)
2723 BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigE
)
2725 INIT_PARAM(addr
, "Device Address"),
2726 INIT_PARAM(clock
, "State machine processor frequency"),
2727 INIT_PARAM(tx_delay
, "Transmit Delay"),
2728 INIT_PARAM(rx_delay
, "Receive Delay"),
2729 INIT_PARAM(intr_delay
, "Interrupt Delay in microseconds"),
2730 INIT_PARAM(mmu
, "Memory Controller"),
2731 INIT_PARAM(physmem
, "Physical Memory"),
2732 INIT_PARAM_DFLT(rx_filter
, "Enable Receive Filter", true),
2733 INIT_PARAM_DFLT(hardware_address
, "Ethernet Hardware Address",
2734 "00:99:00:00:00:01"),
2735 INIT_PARAM_DFLT(io_bus
, "The IO Bus to attach to for headers", NULL
),
2736 INIT_PARAM_DFLT(payload_bus
, "The IO Bus to attach to for payload", NULL
),
2737 INIT_PARAM_DFLT(hier
, "Hierarchy global variables", &defaultHierParams
),
2738 INIT_PARAM_DFLT(pio_latency
, "Programmed IO latency in bus cycles", 1),
2739 INIT_PARAM_DFLT(dma_desc_free
, "DMA of Descriptors is free", false),
2740 INIT_PARAM_DFLT(dma_data_free
, "DMA of Data is free", false),
2741 INIT_PARAM_DFLT(dma_read_delay
, "fixed delay for dma reads", 0),
2742 INIT_PARAM_DFLT(dma_write_delay
, "fixed delay for dma writes", 0),
2743 INIT_PARAM_DFLT(dma_read_factor
, "multiplier for dma reads", 0),
2744 INIT_PARAM_DFLT(dma_write_factor
, "multiplier for dma writes", 0),
2745 INIT_PARAM(configspace
, "PCI Configspace"),
2746 INIT_PARAM(configdata
, "PCI Config data"),
2747 INIT_PARAM(platform
, "Platform"),
2748 INIT_PARAM(pci_bus
, "PCI bus"),
2749 INIT_PARAM(pci_dev
, "PCI device number"),
2750 INIT_PARAM(pci_func
, "PCI function code"),
2751 INIT_PARAM_DFLT(tx_fifo_size
, "max size in bytes of txFifo", 131072),
2752 INIT_PARAM_DFLT(rx_fifo_size
, "max size in bytes of rxFifo", 131072),
2753 INIT_PARAM(m5reg
, "m5 register"),
2754 INIT_PARAM_DFLT(dma_no_allocate
, "Should DMA reads allocate cache lines", true)
2756 END_INIT_SIM_OBJECT_PARAMS(NSGigE
)
2759 CREATE_SIM_OBJECT(NSGigE
)
2761 NSGigE::Params
*params
= new NSGigE::Params
;
2763 params
->name
= getInstanceName();
2765 params
->configSpace
= configspace
;
2766 params
->configData
= configdata
;
2767 params
->plat
= platform
;
2768 params
->busNum
= pci_bus
;
2769 params
->deviceNum
= pci_dev
;
2770 params
->functionNum
= pci_func
;
2772 params
->clock
= clock
;
2773 params
->intr_delay
= intr_delay
;
2774 params
->pmem
= physmem
;
2775 params
->tx_delay
= tx_delay
;
2776 params
->rx_delay
= rx_delay
;
2777 params
->hier
= hier
;
2778 params
->header_bus
= io_bus
;
2779 params
->payload_bus
= payload_bus
;
2780 params
->pio_latency
= pio_latency
;
2781 params
->dma_desc_free
= dma_desc_free
;
2782 params
->dma_data_free
= dma_data_free
;
2783 params
->dma_read_delay
= dma_read_delay
;
2784 params
->dma_write_delay
= dma_write_delay
;
2785 params
->dma_read_factor
= dma_read_factor
;
2786 params
->dma_write_factor
= dma_write_factor
;
2787 params
->rx_filter
= rx_filter
;
2788 params
->eaddr
= hardware_address
;
2789 params
->tx_fifo_size
= tx_fifo_size
;
2790 params
->rx_fifo_size
= rx_fifo_size
;
2791 params
->m5reg
= m5reg
;
2792 params
->dma_no_allocate
= dma_no_allocate
;
2793 return new NSGigE(params
);
2796 REGISTER_SIM_OBJECT("NSGigE", NSGigE
)