2 * Copyright (c) 2004-2005 The Regents of The University of Michigan
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 * Device module for modelling the National Semiconductor
31 * DP83820 ethernet controller. Does not support priority queueing
37 #include "base/inet.hh"
38 #include "cpu/exec_context.hh"
39 #include "dev/etherlink.hh"
40 #include "dev/ns_gige.hh"
41 #include "dev/pciconfigall.hh"
42 #include "mem/bus/bus.hh"
43 #include "mem/bus/dma_interface.hh"
44 #include "mem/bus/pio_interface.hh"
45 #include "mem/bus/pio_interface_impl.hh"
46 #include "mem/functional/memory_control.hh"
47 #include "mem/functional/physical.hh"
48 #include "sim/builder.hh"
49 #include "sim/debug.hh"
50 #include "sim/host.hh"
51 #include "sim/stats.hh"
52 #include "targetarch/vtophys.hh"
54 const char *NsRxStateStrings
[] =
65 const char *NsTxStateStrings
[] =
76 const char *NsDmaState
[] =
88 ///////////////////////////////////////////////////////////////////////
92 NSGigE::NSGigE(Params
*p
)
93 : PciDev(p
), ioEnable(false),
94 txFifo(p
->tx_fifo_size
), rxFifo(p
->rx_fifo_size
),
95 txPacket(0), rxPacket(0), txPacketBufPtr(NULL
), rxPacketBufPtr(NULL
),
96 txXferLen(0), rxXferLen(0), clock(p
->clock
),
97 txState(txIdle
), txEnable(false), CTDD(false),
98 txFragPtr(0), txDescCnt(0), txDmaState(dmaIdle
), rxState(rxIdle
),
99 rxEnable(false), CRDD(false), rxPktBytes(0),
100 rxFragPtr(0), rxDescCnt(0), rxDmaState(dmaIdle
), extstsEnable(false),
101 rxDmaReadEvent(this), rxDmaWriteEvent(this),
102 txDmaReadEvent(this), txDmaWriteEvent(this),
103 dmaDescFree(p
->dma_desc_free
), dmaDataFree(p
->dma_data_free
),
104 txDelay(p
->tx_delay
), rxDelay(p
->rx_delay
),
105 rxKickTick(0), txKickTick(0),
106 txEvent(this), rxFilterEnable(p
->rx_filter
), acceptBroadcast(false),
107 acceptMulticast(false), acceptUnicast(false),
108 acceptPerfect(false), acceptArp(false),
109 physmem(p
->pmem
), intrTick(0), cpuPendingIntr(false),
110 intrEvent(0), interface(0)
113 pioInterface
= newPioInterface(name(), p
->hier
,
115 &NSGigE::cacheAccess
);
117 pioLatency
= p
->pio_latency
* p
->header_bus
->clockRate
;
120 dmaInterface
= new DMAInterface
<Bus
>(name() + ".dma",
125 dmaInterface
= new DMAInterface
<Bus
>(name() + ".dma",
129 } else if (p
->payload_bus
) {
130 pioInterface
= newPioInterface(name(), p
->hier
,
131 p
->payload_bus
, this,
132 &NSGigE::cacheAccess
);
134 pioLatency
= p
->pio_latency
* p
->payload_bus
->clockRate
;
136 dmaInterface
= new DMAInterface
<Bus
>(name() + ".dma",
143 intrDelay
= p
->intr_delay
;
144 dmaReadDelay
= p
->dma_read_delay
;
145 dmaWriteDelay
= p
->dma_write_delay
;
146 dmaReadFactor
= p
->dma_read_factor
;
147 dmaWriteFactor
= p
->dma_write_factor
;
150 memcpy(&rom
.perfectMatch
, p
->eaddr
.bytes(), ETH_ADDR_LEN
);
160 .name(name() + ".txBytes")
161 .desc("Bytes Transmitted")
166 .name(name() + ".rxBytes")
167 .desc("Bytes Received")
172 .name(name() + ".txPackets")
173 .desc("Number of Packets Transmitted")
178 .name(name() + ".rxPackets")
179 .desc("Number of Packets Received")
184 .name(name() + ".txIpChecksums")
185 .desc("Number of tx IP Checksums done by device")
191 .name(name() + ".rxIpChecksums")
192 .desc("Number of rx IP Checksums done by device")
198 .name(name() + ".txTcpChecksums")
199 .desc("Number of tx TCP Checksums done by device")
205 .name(name() + ".rxTcpChecksums")
206 .desc("Number of rx TCP Checksums done by device")
212 .name(name() + ".txUdpChecksums")
213 .desc("Number of tx UDP Checksums done by device")
219 .name(name() + ".rxUdpChecksums")
220 .desc("Number of rx UDP Checksums done by device")
226 .name(name() + ".descDMAReads")
227 .desc("Number of descriptors the device read w/ DMA")
232 .name(name() + ".descDMAWrites")
233 .desc("Number of descriptors the device wrote w/ DMA")
238 .name(name() + ".descDmaReadBytes")
239 .desc("number of descriptor bytes read w/ DMA")
244 .name(name() + ".descDmaWriteBytes")
245 .desc("number of descriptor bytes write w/ DMA")
250 .name(name() + ".txBandwidth")
251 .desc("Transmit Bandwidth (bits/s)")
257 .name(name() + ".rxBandwidth")
258 .desc("Receive Bandwidth (bits/s)")
264 .name(name() + ".totBandwidth")
265 .desc("Total Bandwidth (bits/s)")
271 .name(name() + ".totPackets")
272 .desc("Total Packets")
278 .name(name() + ".totBytes")
285 .name(name() + ".totPPS")
286 .desc("Total Tranmission Rate (packets/s)")
292 .name(name() + ".txPPS")
293 .desc("Packet Tranmission Rate (packets/s)")
299 .name(name() + ".rxPPS")
300 .desc("Packet Reception Rate (packets/s)")
306 .name(name() + ".postedSwi")
307 .desc("number of software interrupts posted to CPU")
312 .name(name() + ".totalSwi")
313 .desc("number of total Swi written to ISR")
318 .name(name() + ".coalescedSwi")
319 .desc("average number of Swi's coalesced into each post")
324 .name(name() + ".postedRxIdle")
325 .desc("number of rxIdle interrupts posted to CPU")
330 .name(name() + ".totalRxIdle")
331 .desc("number of total RxIdle written to ISR")
336 .name(name() + ".coalescedRxIdle")
337 .desc("average number of RxIdle's coalesced into each post")
342 .name(name() + ".postedRxOk")
343 .desc("number of RxOk interrupts posted to CPU")
348 .name(name() + ".totalRxOk")
349 .desc("number of total RxOk written to ISR")
354 .name(name() + ".coalescedRxOk")
355 .desc("average number of RxOk's coalesced into each post")
360 .name(name() + ".postedRxDesc")
361 .desc("number of RxDesc interrupts posted to CPU")
366 .name(name() + ".totalRxDesc")
367 .desc("number of total RxDesc written to ISR")
372 .name(name() + ".coalescedRxDesc")
373 .desc("average number of RxDesc's coalesced into each post")
378 .name(name() + ".postedTxOk")
379 .desc("number of TxOk interrupts posted to CPU")
384 .name(name() + ".totalTxOk")
385 .desc("number of total TxOk written to ISR")
390 .name(name() + ".coalescedTxOk")
391 .desc("average number of TxOk's coalesced into each post")
396 .name(name() + ".postedTxIdle")
397 .desc("number of TxIdle interrupts posted to CPU")
402 .name(name() + ".totalTxIdle")
403 .desc("number of total TxIdle written to ISR")
408 .name(name() + ".coalescedTxIdle")
409 .desc("average number of TxIdle's coalesced into each post")
414 .name(name() + ".postedTxDesc")
415 .desc("number of TxDesc interrupts posted to CPU")
420 .name(name() + ".totalTxDesc")
421 .desc("number of total TxDesc written to ISR")
426 .name(name() + ".coalescedTxDesc")
427 .desc("average number of TxDesc's coalesced into each post")
432 .name(name() + ".postedRxOrn")
433 .desc("number of RxOrn posted to CPU")
438 .name(name() + ".totalRxOrn")
439 .desc("number of total RxOrn written to ISR")
444 .name(name() + ".coalescedRxOrn")
445 .desc("average number of RxOrn's coalesced into each post")
450 .name(name() + ".coalescedTotal")
451 .desc("average number of interrupts coalesced into each post")
456 .name(name() + ".postedInterrupts")
457 .desc("number of posts to CPU")
462 .name(name() + ".droppedPackets")
463 .desc("number of packets dropped")
467 coalescedSwi
= totalSwi
/ postedInterrupts
;
468 coalescedRxIdle
= totalRxIdle
/ postedInterrupts
;
469 coalescedRxOk
= totalRxOk
/ postedInterrupts
;
470 coalescedRxDesc
= totalRxDesc
/ postedInterrupts
;
471 coalescedTxOk
= totalTxOk
/ postedInterrupts
;
472 coalescedTxIdle
= totalTxIdle
/ postedInterrupts
;
473 coalescedTxDesc
= totalTxDesc
/ postedInterrupts
;
474 coalescedRxOrn
= totalRxOrn
/ postedInterrupts
;
476 coalescedTotal
= (totalSwi
+ totalRxIdle
+ totalRxOk
+ totalRxDesc
+ totalTxOk
477 + totalTxIdle
+ totalTxDesc
+ totalRxOrn
) / postedInterrupts
;
479 txBandwidth
= txBytes
* Stats::constant(8) / simSeconds
;
480 rxBandwidth
= rxBytes
* Stats::constant(8) / simSeconds
;
481 totBandwidth
= txBandwidth
+ rxBandwidth
;
482 totBytes
= txBytes
+ rxBytes
;
483 totPackets
= txPackets
+ rxPackets
;
485 txPacketRate
= txPackets
/ simSeconds
;
486 rxPacketRate
= rxPackets
/ simSeconds
;
490 * This is to read the PCI general configuration registers
493 NSGigE::ReadConfig(int offset
, int size
, uint8_t *data
)
495 if (offset
< PCI_DEVICE_SPECIFIC
)
496 PciDev::ReadConfig(offset
, size
, data
);
498 panic("Device specific PCI config space not implemented!\n");
502 * This is to write to the PCI general configuration registers
505 NSGigE::WriteConfig(int offset
, int size
, uint32_t data
)
507 if (offset
< PCI_DEVICE_SPECIFIC
)
508 PciDev::WriteConfig(offset
, size
, data
);
510 panic("Device specific PCI config space not implemented!\n");
512 // Need to catch writes to BARs to update the PIO interface
514 // seems to work fine without all these PCI settings, but i
515 // put in the IO to double check, an assertion will fail if we
516 // need to properly implement it
518 if (config
.data
[offset
] & PCI_CMD_IOSE
)
524 if (config
.data
[offset
] & PCI_CMD_BME
) {
531 if (config
.data
[offset
] & PCI_CMD_MSE
) {
540 case PCI0_BASE_ADDR0
:
541 if (BARAddrs
[0] != 0) {
543 pioInterface
->addAddrRange(RangeSize(BARAddrs
[0], BARSize
[0]));
545 BARAddrs
[0] &= EV5::PAddrUncachedMask
;
548 case PCI0_BASE_ADDR1
:
549 if (BARAddrs
[1] != 0) {
551 pioInterface
->addAddrRange(RangeSize(BARAddrs
[1], BARSize
[1]));
553 BARAddrs
[1] &= EV5::PAddrUncachedMask
;
560 * This reads the device registers, which are detailed in the NS83820
564 NSGigE::read(MemReqPtr
&req
, uint8_t *data
)
568 //The mask is to give you only the offset into the device register file
569 Addr daddr
= req
->paddr
& 0xfff;
570 DPRINTF(EthernetPIO
, "read da=%#x pa=%#x va=%#x size=%d\n",
571 daddr
, req
->paddr
, req
->vaddr
, req
->size
);
574 // there are some reserved registers, you can see ns_gige_reg.h and
575 // the spec sheet for details
576 if (daddr
> LAST
&& daddr
<= RESERVED
) {
577 panic("Accessing reserved register");
578 } else if (daddr
> RESERVED
&& daddr
<= 0x3FC) {
579 ReadConfig(daddr
& 0xff, req
->size
, data
);
581 } else if (daddr
>= MIB_START
&& daddr
<= MIB_END
) {
582 // don't implement all the MIB's. hopefully the kernel
583 // doesn't actually DEPEND upon their values
584 // MIB are just hardware stats keepers
585 uint32_t ®
= *(uint32_t *) data
;
588 } else if (daddr
> 0x3FC)
589 panic("Something is messed up!\n");
592 case sizeof(uint32_t):
594 uint32_t ®
= *(uint32_t *)data
;
599 //these are supposed to be cleared on a read
600 reg
&= ~(CR_RXD
| CR_TXD
| CR_TXR
| CR_RXR
);
617 devIntrClear(ISR_ALL
);
672 // see the spec sheet for how RFCR and RFDR work
673 // basically, you write to RFCR to tell the machine
674 // what you want to do next, then you act upon RFDR,
675 // and the device will be prepared b/c of what you
682 switch (regs
.rfcr
& RFCR_RFADDR
) {
684 reg
= rom
.perfectMatch
[1];
686 reg
+= rom
.perfectMatch
[0];
689 reg
= rom
.perfectMatch
[3] << 8;
690 reg
+= rom
.perfectMatch
[2];
693 reg
= rom
.perfectMatch
[5] << 8;
694 reg
+= rom
.perfectMatch
[4];
697 panic("reading RFDR for something other than PMATCH!\n");
698 // didn't implement other RFDR functionality b/c
699 // driver didn't use it
709 reg
&= ~(MIBC_MIBS
| MIBC_ACLR
);
753 reg
= params()->m5reg
;
757 panic("reading unimplemented register: addr=%#x", daddr
);
760 DPRINTF(EthernetPIO
, "read from %#x: data=%d data=%#x\n",
766 panic("accessing register with invalid size: addr=%#x, size=%d",
774 NSGigE::write(MemReqPtr
&req
, const uint8_t *data
)
778 Addr daddr
= req
->paddr
& 0xfff;
779 DPRINTF(EthernetPIO
, "write da=%#x pa=%#x va=%#x size=%d\n",
780 daddr
, req
->paddr
, req
->vaddr
, req
->size
);
782 if (daddr
> LAST
&& daddr
<= RESERVED
) {
783 panic("Accessing reserved register");
784 } else if (daddr
> RESERVED
&& daddr
<= 0x3FC) {
785 WriteConfig(daddr
& 0xff, req
->size
, *(uint32_t *)data
);
787 } else if (daddr
> 0x3FC)
788 panic("Something is messed up!\n");
790 if (req
->size
== sizeof(uint32_t)) {
791 uint32_t reg
= *(uint32_t *)data
;
792 DPRINTF(EthernetPIO
, "write data=%d data=%#x\n", reg
, reg
);
799 } else if (reg
& CR_TXE
) {
802 // the kernel is enabling the transmit machine
803 if (txState
== txIdle
)
809 } else if (reg
& CR_RXE
) {
812 if (rxState
== rxIdle
)
823 devIntrPost(ISR_SWI
);
834 if (reg
& CFGR_LNKSTS
||
837 reg
& CFGR_RESERVED
||
838 reg
& CFGR_T64ADDR
||
839 reg
& CFGR_PCI64_DET
)
840 panic("writing to read-only or reserved CFGR bits!\n");
842 regs
.config
|= reg
& ~(CFGR_LNKSTS
| CFGR_SPDSTS
| CFGR_DUPSTS
|
843 CFGR_RESERVED
| CFGR_T64ADDR
| CFGR_PCI64_DET
);
845 // all these #if 0's are because i don't THINK the kernel needs to
846 // have these implemented. if there is a problem relating to one of
847 // these, you may need to add functionality in.
849 if (reg
& CFGR_TBI_EN
) ;
850 if (reg
& CFGR_MODE_1000
) ;
853 if (reg
& CFGR_AUTO_1000
)
854 panic("CFGR_AUTO_1000 not implemented!\n");
857 if (reg
& CFGR_PINT_DUPSTS
||
858 reg
& CFGR_PINT_LNKSTS
||
859 reg
& CFGR_PINT_SPDSTS
)
862 if (reg
& CFGR_TMRTEST
) ;
863 if (reg
& CFGR_MRM_DIS
) ;
864 if (reg
& CFGR_MWI_DIS
) ;
866 if (reg
& CFGR_T64ADDR
)
867 panic("CFGR_T64ADDR is read only register!\n");
869 if (reg
& CFGR_PCI64_DET
)
870 panic("CFGR_PCI64_DET is read only register!\n");
872 if (reg
& CFGR_DATA64_EN
) ;
873 if (reg
& CFGR_M64ADDR
) ;
874 if (reg
& CFGR_PHY_RST
) ;
875 if (reg
& CFGR_PHY_DIS
) ;
878 if (reg
& CFGR_EXTSTS_EN
)
881 extstsEnable
= false;
884 if (reg
& CFGR_REQALG
) ;
886 if (reg
& CFGR_POW
) ;
887 if (reg
& CFGR_EXD
) ;
888 if (reg
& CFGR_PESEL
) ;
889 if (reg
& CFGR_BROM_DIS
) ;
890 if (reg
& CFGR_EXT_125
) ;
891 if (reg
& CFGR_BEM
) ;
897 // since phy is completely faked, MEAR_MD* don't matter
898 // and since the driver never uses MEAR_EE*, they don't
901 if (reg
& MEAR_EEDI
) ;
902 if (reg
& MEAR_EEDO
) ; // this one is read only
903 if (reg
& MEAR_EECLK
) ;
904 if (reg
& MEAR_EESEL
) ;
905 if (reg
& MEAR_MDIO
) ;
906 if (reg
& MEAR_MDDIR
) ;
907 if (reg
& MEAR_MDC
) ;
912 regs
.ptscr
= reg
& ~(PTSCR_RBIST_RDONLY
);
913 // these control BISTs for various parts of chip - we
914 // don't care or do just fake that the BIST is done
915 if (reg
& PTSCR_RBIST_EN
)
916 regs
.ptscr
|= PTSCR_RBIST_DONE
;
917 if (reg
& PTSCR_EEBIST_EN
)
918 regs
.ptscr
&= ~PTSCR_EEBIST_EN
;
919 if (reg
& PTSCR_EELOAD_EN
)
920 regs
.ptscr
&= ~PTSCR_EELOAD_EN
;
923 case ISR
: /* writing to the ISR has no effect */
924 panic("ISR is a read only register!\n");
937 /* not going to implement real interrupt holdoff */
941 regs
.txdp
= (reg
& 0xFFFFFFFC);
942 assert(txState
== txIdle
);
953 if (reg
& TX_CFG_CSI
) ;
954 if (reg
& TX_CFG_HBI
) ;
955 if (reg
& TX_CFG_MLB
) ;
956 if (reg
& TX_CFG_ATP
) ;
957 if (reg
& TX_CFG_ECRETRY
) {
959 * this could easily be implemented, but considering
960 * the network is just a fake pipe, wouldn't make
965 if (reg
& TX_CFG_BRST_DIS
) ;
969 /* we handle our own DMA, ignore the kernel's exhortations */
970 if (reg
& TX_CFG_MXDMA
) ;
973 // also, we currently don't care about fill/drain
974 // thresholds though this may change in the future with
975 // more realistic networks or a driver which changes it
976 // according to feedback
982 /* these just control general purpose i/o pins, don't matter */
997 if (reg
& RX_CFG_AEP
) ;
998 if (reg
& RX_CFG_ARP
) ;
999 if (reg
& RX_CFG_STRIPCRC
) ;
1000 if (reg
& RX_CFG_RX_RD
) ;
1001 if (reg
& RX_CFG_ALP
) ;
1002 if (reg
& RX_CFG_AIRL
) ;
1004 /* we handle our own DMA, ignore what kernel says about it */
1005 if (reg
& RX_CFG_MXDMA
) ;
1007 //also, we currently don't care about fill/drain thresholds
1008 //though this may change in the future with more realistic
1009 //networks or a driver which changes it according to feedback
1010 if (reg
& (RX_CFG_DRTH
| RX_CFG_DRTH0
)) ;
1015 /* there is no priority queueing used in the linux 2.6 driver */
1020 /* not going to implement wake on LAN */
1025 /* not going to implement pause control */
1032 rxFilterEnable
= (reg
& RFCR_RFEN
) ? true : false;
1033 acceptBroadcast
= (reg
& RFCR_AAB
) ? true : false;
1034 acceptMulticast
= (reg
& RFCR_AAM
) ? true : false;
1035 acceptUnicast
= (reg
& RFCR_AAU
) ? true : false;
1036 acceptPerfect
= (reg
& RFCR_APM
) ? true : false;
1037 acceptArp
= (reg
& RFCR_AARP
) ? true : false;
1040 if (reg
& RFCR_APAT
)
1041 panic("RFCR_APAT not implemented!\n");
1044 if (reg
& RFCR_MHEN
|| reg
& RFCR_UHEN
)
1045 panic("hash filtering not implemented!\n");
1048 panic("RFCR_ULM not implemented!\n");
1053 panic("the driver never writes to RFDR, something is wrong!\n");
1056 panic("the driver never uses BRAR, something is wrong!\n");
1059 panic("the driver never uses BRDR, something is wrong!\n");
1062 panic("SRR is read only register!\n");
1065 panic("the driver never uses MIBC, something is wrong!\n");
1076 panic("the driver never uses VDR, something is wrong!\n");
1080 /* not going to implement clockrun stuff */
1086 if (reg
& TBICR_MR_LOOPBACK
)
1087 panic("TBICR_MR_LOOPBACK never used, something wrong!\n");
1089 if (reg
& TBICR_MR_AN_ENABLE
) {
1090 regs
.tanlpar
= regs
.tanar
;
1091 regs
.tbisr
|= (TBISR_MR_AN_COMPLETE
| TBISR_MR_LINK_STATUS
);
1095 if (reg
& TBICR_MR_RESTART_AN
) ;
1101 panic("TBISR is read only register!\n");
1105 if (reg
& TANAR_PS2
)
1106 panic("this isn't used in driver, something wrong!\n");
1108 if (reg
& TANAR_PS1
)
1109 panic("this isn't used in driver, something wrong!\n");
1113 panic("this should only be written to by the fake phy!\n");
1116 panic("TANER is read only register!\n");
1123 panic("invalid register access daddr=%#x", daddr
);
1126 panic("Invalid Request Size");
1133 NSGigE::devIntrPost(uint32_t interrupts
)
1135 if (interrupts
& ISR_RESERVE
)
1136 panic("Cannot set a reserved interrupt");
1138 if (interrupts
& ISR_NOIMPL
)
1139 warn("interrupt not implemented %#x\n", interrupts
);
1141 interrupts
&= ~ISR_NOIMPL
;
1142 regs
.isr
|= interrupts
;
1144 if (interrupts
& regs
.imr
) {
1145 if (interrupts
& ISR_SWI
) {
1148 if (interrupts
& ISR_RXIDLE
) {
1151 if (interrupts
& ISR_RXOK
) {
1154 if (interrupts
& ISR_RXDESC
) {
1157 if (interrupts
& ISR_TXOK
) {
1160 if (interrupts
& ISR_TXIDLE
) {
1163 if (interrupts
& ISR_TXDESC
) {
1166 if (interrupts
& ISR_RXORN
) {
1171 DPRINTF(EthernetIntr
,
1172 "interrupt written to ISR: intr=%#x isr=%#x imr=%#x\n",
1173 interrupts
, regs
.isr
, regs
.imr
);
1175 if ((regs
.isr
& regs
.imr
)) {
1176 Tick when
= curTick
;
1177 if (!(regs
.isr
& regs
.imr
& ISR_NODELAY
))
1183 /* writing this interrupt counting stats inside this means that this function
1184 is now limited to being used to clear all interrupts upon the kernel
1185 reading isr and servicing. just telling you in case you were thinking
1189 NSGigE::devIntrClear(uint32_t interrupts
)
1191 if (interrupts
& ISR_RESERVE
)
1192 panic("Cannot clear a reserved interrupt");
1194 if (regs
.isr
& regs
.imr
& ISR_SWI
) {
1197 if (regs
.isr
& regs
.imr
& ISR_RXIDLE
) {
1200 if (regs
.isr
& regs
.imr
& ISR_RXOK
) {
1203 if (regs
.isr
& regs
.imr
& ISR_RXDESC
) {
1206 if (regs
.isr
& regs
.imr
& ISR_TXOK
) {
1209 if (regs
.isr
& regs
.imr
& ISR_TXIDLE
) {
1212 if (regs
.isr
& regs
.imr
& ISR_TXDESC
) {
1215 if (regs
.isr
& regs
.imr
& ISR_RXORN
) {
1219 if (regs
.isr
& regs
.imr
& (ISR_SWI
| ISR_RXIDLE
| ISR_RXOK
| ISR_RXDESC
|
1220 ISR_TXOK
| ISR_TXIDLE
| ISR_TXDESC
| ISR_RXORN
) )
1223 interrupts
&= ~ISR_NOIMPL
;
1224 regs
.isr
&= ~interrupts
;
1226 DPRINTF(EthernetIntr
,
1227 "interrupt cleared from ISR: intr=%x isr=%x imr=%x\n",
1228 interrupts
, regs
.isr
, regs
.imr
);
1230 if (!(regs
.isr
& regs
.imr
))
1235 NSGigE::devIntrChangeMask()
1237 DPRINTF(EthernetIntr
, "interrupt mask changed: isr=%x imr=%x masked=%x\n",
1238 regs
.isr
, regs
.imr
, regs
.isr
& regs
.imr
);
1240 if (regs
.isr
& regs
.imr
)
1241 cpuIntrPost(curTick
);
1247 NSGigE::cpuIntrPost(Tick when
)
1249 // If the interrupt you want to post is later than an interrupt
1250 // already scheduled, just let it post in the coming one and don't
1251 // schedule another.
1252 // HOWEVER, must be sure that the scheduled intrTick is in the
1253 // future (this was formerly the source of a bug)
1255 * @todo this warning should be removed and the intrTick code should
1258 assert(when
>= curTick
);
1259 assert(intrTick
>= curTick
|| intrTick
== 0);
1260 if (when
> intrTick
&& intrTick
!= 0) {
1261 DPRINTF(EthernetIntr
, "don't need to schedule event...intrTick=%d\n",
1267 if (intrTick
< curTick
) {
1272 DPRINTF(EthernetIntr
, "going to schedule an interrupt for intrTick=%d\n",
1276 intrEvent
->squash();
1277 intrEvent
= new IntrEvent(this, true);
1278 intrEvent
->schedule(intrTick
);
1282 NSGigE::cpuInterrupt()
1284 assert(intrTick
== curTick
);
1286 // Whether or not there's a pending interrupt, we don't care about
1291 // Don't send an interrupt if there's already one
1292 if (cpuPendingIntr
) {
1293 DPRINTF(EthernetIntr
,
1294 "would send an interrupt now, but there's already pending\n");
1297 cpuPendingIntr
= true;
1299 DPRINTF(EthernetIntr
, "posting interrupt\n");
1305 NSGigE::cpuIntrClear()
1307 if (!cpuPendingIntr
)
1311 intrEvent
->squash();
1317 cpuPendingIntr
= false;
1319 DPRINTF(EthernetIntr
, "clearing interrupt\n");
1324 NSGigE::cpuIntrPending() const
1325 { return cpuPendingIntr
; }
1331 DPRINTF(Ethernet
, "transmit reset\n");
1336 assert(txDescCnt
== 0);
1339 assert(txDmaState
== dmaIdle
);
1345 DPRINTF(Ethernet
, "receive reset\n");
1348 assert(rxPktBytes
== 0);
1351 assert(rxDescCnt
== 0);
1352 assert(rxDmaState
== dmaIdle
);
1360 memset(®s
, 0, sizeof(regs
));
1361 regs
.config
= CFGR_LNKSTS
;
1363 regs
.txcfg
= 0x120; // set drain threshold to 1024 bytes and
1364 // fill threshold to 32 bytes
1365 regs
.rxcfg
= 0x4; // set drain threshold to 16 bytes
1366 regs
.srr
= 0x0103; // set the silicon revision to rev B or 0x103
1367 regs
.mibc
= MIBC_FRZ
;
1368 regs
.vdr
= 0x81; // set the vlan tag type to 802.1q
1369 regs
.tesr
= 0xc000; // TBI capable of both full and half duplex
1371 extstsEnable
= false;
1372 acceptBroadcast
= false;
1373 acceptMulticast
= false;
1374 acceptUnicast
= false;
1375 acceptPerfect
= false;
1380 NSGigE::rxDmaReadCopy()
1382 assert(rxDmaState
== dmaReading
);
1384 physmem
->dma_read((uint8_t *)rxDmaData
, rxDmaAddr
, rxDmaLen
);
1385 rxDmaState
= dmaIdle
;
1387 DPRINTF(EthernetDMA
, "rx dma read paddr=%#x len=%d\n",
1388 rxDmaAddr
, rxDmaLen
);
1389 DDUMP(EthernetDMA
, rxDmaData
, rxDmaLen
);
1393 NSGigE::doRxDmaRead()
1395 assert(rxDmaState
== dmaIdle
|| rxDmaState
== dmaReadWaiting
);
1396 rxDmaState
= dmaReading
;
1398 if (dmaInterface
&& !rxDmaFree
) {
1399 if (dmaInterface
->busy())
1400 rxDmaState
= dmaReadWaiting
;
1402 dmaInterface
->doDMA(Read
, rxDmaAddr
, rxDmaLen
, curTick
,
1403 &rxDmaReadEvent
, true);
1407 if (dmaReadDelay
== 0 && dmaReadFactor
== 0) {
1412 Tick factor
= ((rxDmaLen
+ ULL(63)) >> ULL(6)) * dmaReadFactor
;
1413 Tick start
= curTick
+ dmaReadDelay
+ factor
;
1414 rxDmaReadEvent
.schedule(start
);
1419 NSGigE::rxDmaReadDone()
1421 assert(rxDmaState
== dmaReading
);
1424 // If the transmit state machine has a pending DMA, let it go first
1425 if (txDmaState
== dmaReadWaiting
|| txDmaState
== dmaWriteWaiting
)
1432 NSGigE::rxDmaWriteCopy()
1434 assert(rxDmaState
== dmaWriting
);
1436 physmem
->dma_write(rxDmaAddr
, (uint8_t *)rxDmaData
, rxDmaLen
);
1437 rxDmaState
= dmaIdle
;
1439 DPRINTF(EthernetDMA
, "rx dma write paddr=%#x len=%d\n",
1440 rxDmaAddr
, rxDmaLen
);
1441 DDUMP(EthernetDMA
, rxDmaData
, rxDmaLen
);
1445 NSGigE::doRxDmaWrite()
1447 assert(rxDmaState
== dmaIdle
|| rxDmaState
== dmaWriteWaiting
);
1448 rxDmaState
= dmaWriting
;
1450 if (dmaInterface
&& !rxDmaFree
) {
1451 if (dmaInterface
->busy())
1452 rxDmaState
= dmaWriteWaiting
;
1454 dmaInterface
->doDMA(WriteInvalidate
, rxDmaAddr
, rxDmaLen
, curTick
,
1455 &rxDmaWriteEvent
, true);
1459 if (dmaWriteDelay
== 0 && dmaWriteFactor
== 0) {
1464 Tick factor
= ((rxDmaLen
+ ULL(63)) >> ULL(6)) * dmaWriteFactor
;
1465 Tick start
= curTick
+ dmaWriteDelay
+ factor
;
1466 rxDmaWriteEvent
.schedule(start
);
1471 NSGigE::rxDmaWriteDone()
1473 assert(rxDmaState
== dmaWriting
);
1476 // If the transmit state machine has a pending DMA, let it go first
1477 if (txDmaState
== dmaReadWaiting
|| txDmaState
== dmaWriteWaiting
)
1486 DPRINTF(EthernetSM
, "receive kick rxState=%s (rxBuf.size=%d)\n",
1487 NsRxStateStrings
[rxState
], rxFifo
.size());
1489 if (rxKickTick
> curTick
) {
1490 DPRINTF(EthernetSM
, "receive kick exiting, can't run till %d\n",
1496 switch(rxDmaState
) {
1497 case dmaReadWaiting
:
1501 case dmaWriteWaiting
:
1509 // see state machine from spec for details
1510 // the way this works is, if you finish work on one state and can
1511 // go directly to another, you do that through jumping to the
1512 // label "next". however, if you have intermediate work, like DMA
1513 // so that you can't go to the next state yet, you go to exit and
1514 // exit the loop. however, when the DMA is done it will trigger
1515 // an event and come back to this loop.
1519 DPRINTF(EthernetSM
, "Receive Disabled! Nothing to do.\n");
1524 rxState
= rxDescRefr
;
1526 rxDmaAddr
= regs
.rxdp
& 0x3fffffff;
1527 rxDmaData
= &rxDescCache
+ offsetof(ns_desc
, link
);
1528 rxDmaLen
= sizeof(rxDescCache
.link
);
1529 rxDmaFree
= dmaDescFree
;
1532 descDmaRdBytes
+= rxDmaLen
;
1537 rxState
= rxDescRead
;
1539 rxDmaAddr
= regs
.rxdp
& 0x3fffffff;
1540 rxDmaData
= &rxDescCache
;
1541 rxDmaLen
= sizeof(ns_desc
);
1542 rxDmaFree
= dmaDescFree
;
1545 descDmaRdBytes
+= rxDmaLen
;
1553 if (rxDmaState
!= dmaIdle
)
1556 rxState
= rxAdvance
;
1560 if (rxDmaState
!= dmaIdle
)
1563 DPRINTF(EthernetDesc
,
1564 "rxDescCache: addr=%08x read descriptor\n",
1565 regs
.rxdp
& 0x3fffffff);
1566 DPRINTF(EthernetDesc
,
1567 "rxDescCache: link=%08x bufptr=%08x cmdsts=%08x extsts=%08x\n",
1568 rxDescCache
.link
, rxDescCache
.bufptr
, rxDescCache
.cmdsts
,
1569 rxDescCache
.extsts
);
1571 if (rxDescCache
.cmdsts
& CMDSTS_OWN
) {
1572 devIntrPost(ISR_RXIDLE
);
1576 rxState
= rxFifoBlock
;
1577 rxFragPtr
= rxDescCache
.bufptr
;
1578 rxDescCnt
= rxDescCache
.cmdsts
& CMDSTS_LEN_MASK
;
1585 * @todo in reality, we should be able to start processing
1586 * the packet as it arrives, and not have to wait for the
1587 * full packet ot be in the receive fifo.
1592 DPRINTF(EthernetSM
, "****processing receive of new packet****\n");
1594 // If we don't have a packet, grab a new one from the fifo.
1595 rxPacket
= rxFifo
.front();
1596 rxPktBytes
= rxPacket
->length
;
1597 rxPacketBufPtr
= rxPacket
->data
;
1600 if (DTRACE(Ethernet
)) {
1603 DPRINTF(Ethernet
, "ID is %d\n", ip
->id());
1607 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n",
1608 tcp
->sport(), tcp
->dport(), tcp
->seq(),
1615 // sanity check - i think the driver behaves like this
1616 assert(rxDescCnt
>= rxPktBytes
);
1621 // dont' need the && rxDescCnt > 0 if driver sanity check
1623 if (rxPktBytes
> 0) {
1624 rxState
= rxFragWrite
;
1625 // don't need min<>(rxPktBytes,rxDescCnt) if above sanity
1627 rxXferLen
= rxPktBytes
;
1629 rxDmaAddr
= rxFragPtr
& 0x3fffffff;
1630 rxDmaData
= rxPacketBufPtr
;
1631 rxDmaLen
= rxXferLen
;
1632 rxDmaFree
= dmaDataFree
;
1638 rxState
= rxDescWrite
;
1640 //if (rxPktBytes == 0) { /* packet is done */
1641 assert(rxPktBytes
== 0);
1642 DPRINTF(EthernetSM
, "done with receiving packet\n");
1644 rxDescCache
.cmdsts
|= CMDSTS_OWN
;
1645 rxDescCache
.cmdsts
&= ~CMDSTS_MORE
;
1646 rxDescCache
.cmdsts
|= CMDSTS_OK
;
1647 rxDescCache
.cmdsts
&= 0xffff0000;
1648 rxDescCache
.cmdsts
+= rxPacket
->length
; //i.e. set CMDSTS_SIZE
1652 * all the driver uses these are for its own stats keeping
1653 * which we don't care about, aren't necessary for
1654 * functionality and doing this would just slow us down.
1655 * if they end up using this in a later version for
1656 * functional purposes, just undef
1658 if (rxFilterEnable
) {
1659 rxDescCache
.cmdsts
&= ~CMDSTS_DEST_MASK
;
1660 const EthAddr
&dst
= rxFifoFront()->dst();
1662 rxDescCache
.cmdsts
|= CMDSTS_DEST_SELF
;
1663 if (dst
->multicast())
1664 rxDescCache
.cmdsts
|= CMDSTS_DEST_MULTI
;
1665 if (dst
->broadcast())
1666 rxDescCache
.cmdsts
|= CMDSTS_DEST_MASK
;
1671 if (extstsEnable
&& ip
) {
1672 rxDescCache
.extsts
|= EXTSTS_IPPKT
;
1674 if (cksum(ip
) != 0) {
1675 DPRINTF(EthernetCksum
, "Rx IP Checksum Error\n");
1676 rxDescCache
.extsts
|= EXTSTS_IPERR
;
1681 rxDescCache
.extsts
|= EXTSTS_TCPPKT
;
1683 if (cksum(tcp
) != 0) {
1684 DPRINTF(EthernetCksum
, "Rx TCP Checksum Error\n");
1685 rxDescCache
.extsts
|= EXTSTS_TCPERR
;
1689 rxDescCache
.extsts
|= EXTSTS_UDPPKT
;
1691 if (cksum(udp
) != 0) {
1692 DPRINTF(EthernetCksum
, "Rx UDP Checksum Error\n");
1693 rxDescCache
.extsts
|= EXTSTS_UDPERR
;
1700 * the driver seems to always receive into desc buffers
1701 * of size 1514, so you never have a pkt that is split
1702 * into multiple descriptors on the receive side, so
1703 * i don't implement that case, hence the assert above.
1706 DPRINTF(EthernetDesc
,
1707 "rxDescCache: addr=%08x writeback cmdsts extsts\n",
1708 regs
.rxdp
& 0x3fffffff);
1709 DPRINTF(EthernetDesc
,
1710 "rxDescCache: link=%08x bufptr=%08x cmdsts=%08x extsts=%08x\n",
1711 rxDescCache
.link
, rxDescCache
.bufptr
, rxDescCache
.cmdsts
,
1712 rxDescCache
.extsts
);
1714 rxDmaAddr
= (regs
.rxdp
+ offsetof(ns_desc
, cmdsts
)) & 0x3fffffff;
1715 rxDmaData
= &(rxDescCache
.cmdsts
);
1716 rxDmaLen
= sizeof(rxDescCache
.cmdsts
) + sizeof(rxDescCache
.extsts
);
1717 rxDmaFree
= dmaDescFree
;
1720 descDmaWrBytes
+= rxDmaLen
;
1728 if (rxDmaState
!= dmaIdle
)
1731 rxPacketBufPtr
+= rxXferLen
;
1732 rxFragPtr
+= rxXferLen
;
1733 rxPktBytes
-= rxXferLen
;
1735 rxState
= rxFifoBlock
;
1739 if (rxDmaState
!= dmaIdle
)
1742 assert(rxDescCache
.cmdsts
& CMDSTS_OWN
);
1744 assert(rxPacket
== 0);
1745 devIntrPost(ISR_RXOK
);
1747 if (rxDescCache
.cmdsts
& CMDSTS_INTR
)
1748 devIntrPost(ISR_RXDESC
);
1751 DPRINTF(EthernetSM
, "Halting the RX state machine\n");
1755 rxState
= rxAdvance
;
1759 if (rxDescCache
.link
== 0) {
1760 devIntrPost(ISR_RXIDLE
);
1765 rxState
= rxDescRead
;
1766 regs
.rxdp
= rxDescCache
.link
;
1769 rxDmaAddr
= regs
.rxdp
& 0x3fffffff;
1770 rxDmaData
= &rxDescCache
;
1771 rxDmaLen
= sizeof(ns_desc
);
1772 rxDmaFree
= dmaDescFree
;
1780 panic("Invalid rxState!");
1783 DPRINTF(EthernetSM
, "entering next rxState=%s\n",
1784 NsRxStateStrings
[rxState
]);
1790 * @todo do we want to schedule a future kick?
1792 DPRINTF(EthernetSM
, "rx state machine exited rxState=%s\n",
1793 NsRxStateStrings
[rxState
]);
1799 if (txFifo
.empty()) {
1800 DPRINTF(Ethernet
, "nothing to transmit\n");
1804 DPRINTF(Ethernet
, "Attempt Pkt Transmit: txFifo length=%d\n",
1806 if (interface
->sendPacket(txFifo
.front())) {
1808 if (DTRACE(Ethernet
)) {
1809 IpPtr
ip(txFifo
.front());
1811 DPRINTF(Ethernet
, "ID is %d\n", ip
->id());
1815 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n",
1816 tcp
->sport(), tcp
->dport(), tcp
->seq(), tcp
->ack());
1822 DDUMP(EthernetData
, txFifo
.front()->data
, txFifo
.front()->length
);
1823 txBytes
+= txFifo
.front()->length
;
1826 DPRINTF(Ethernet
, "Successful Xmit! now txFifoAvail is %d\n",
1831 * normally do a writeback of the descriptor here, and ONLY
1832 * after that is done, send this interrupt. but since our
1833 * stuff never actually fails, just do this interrupt here,
1834 * otherwise the code has to stray from this nice format.
1835 * besides, it's functionally the same.
1837 devIntrPost(ISR_TXOK
);
1840 if (!txFifo
.empty() && !txEvent
.scheduled()) {
1841 DPRINTF(Ethernet
, "reschedule transmit\n");
1842 txEvent
.schedule(curTick
+ retryTime
);
1847 NSGigE::txDmaReadCopy()
1849 assert(txDmaState
== dmaReading
);
1851 physmem
->dma_read((uint8_t *)txDmaData
, txDmaAddr
, txDmaLen
);
1852 txDmaState
= dmaIdle
;
1854 DPRINTF(EthernetDMA
, "tx dma read paddr=%#x len=%d\n",
1855 txDmaAddr
, txDmaLen
);
1856 DDUMP(EthernetDMA
, txDmaData
, txDmaLen
);
1860 NSGigE::doTxDmaRead()
1862 assert(txDmaState
== dmaIdle
|| txDmaState
== dmaReadWaiting
);
1863 txDmaState
= dmaReading
;
1865 if (dmaInterface
&& !txDmaFree
) {
1866 if (dmaInterface
->busy())
1867 txDmaState
= dmaReadWaiting
;
1869 dmaInterface
->doDMA(Read
, txDmaAddr
, txDmaLen
, curTick
,
1870 &txDmaReadEvent
, true);
1874 if (dmaReadDelay
== 0 && dmaReadFactor
== 0.0) {
1879 Tick factor
= ((txDmaLen
+ ULL(63)) >> ULL(6)) * dmaReadFactor
;
1880 Tick start
= curTick
+ dmaReadDelay
+ factor
;
1881 txDmaReadEvent
.schedule(start
);
1886 NSGigE::txDmaReadDone()
1888 assert(txDmaState
== dmaReading
);
1891 // If the receive state machine has a pending DMA, let it go first
1892 if (rxDmaState
== dmaReadWaiting
|| rxDmaState
== dmaWriteWaiting
)
1899 NSGigE::txDmaWriteCopy()
1901 assert(txDmaState
== dmaWriting
);
1903 physmem
->dma_write(txDmaAddr
, (uint8_t *)txDmaData
, txDmaLen
);
1904 txDmaState
= dmaIdle
;
1906 DPRINTF(EthernetDMA
, "tx dma write paddr=%#x len=%d\n",
1907 txDmaAddr
, txDmaLen
);
1908 DDUMP(EthernetDMA
, txDmaData
, txDmaLen
);
1912 NSGigE::doTxDmaWrite()
1914 assert(txDmaState
== dmaIdle
|| txDmaState
== dmaWriteWaiting
);
1915 txDmaState
= dmaWriting
;
1917 if (dmaInterface
&& !txDmaFree
) {
1918 if (dmaInterface
->busy())
1919 txDmaState
= dmaWriteWaiting
;
1921 dmaInterface
->doDMA(WriteInvalidate
, txDmaAddr
, txDmaLen
, curTick
,
1922 &txDmaWriteEvent
, true);
1926 if (dmaWriteDelay
== 0 && dmaWriteFactor
== 0.0) {
1931 Tick factor
= ((txDmaLen
+ ULL(63)) >> ULL(6)) * dmaWriteFactor
;
1932 Tick start
= curTick
+ dmaWriteDelay
+ factor
;
1933 txDmaWriteEvent
.schedule(start
);
1938 NSGigE::txDmaWriteDone()
1940 assert(txDmaState
== dmaWriting
);
1943 // If the receive state machine has a pending DMA, let it go first
1944 if (rxDmaState
== dmaReadWaiting
|| rxDmaState
== dmaWriteWaiting
)
1953 DPRINTF(EthernetSM
, "transmit kick txState=%s\n",
1954 NsTxStateStrings
[txState
]);
1956 if (txKickTick
> curTick
) {
1957 DPRINTF(EthernetSM
, "transmit kick exiting, can't run till %d\n",
1964 switch(txDmaState
) {
1965 case dmaReadWaiting
:
1969 case dmaWriteWaiting
:
1980 DPRINTF(EthernetSM
, "Transmit disabled. Nothing to do.\n");
1985 txState
= txDescRefr
;
1987 txDmaAddr
= regs
.txdp
& 0x3fffffff;
1988 txDmaData
= &txDescCache
+ offsetof(ns_desc
, link
);
1989 txDmaLen
= sizeof(txDescCache
.link
);
1990 txDmaFree
= dmaDescFree
;
1993 descDmaRdBytes
+= txDmaLen
;
1999 txState
= txDescRead
;
2001 txDmaAddr
= regs
.txdp
& 0x3fffffff;
2002 txDmaData
= &txDescCache
;
2003 txDmaLen
= sizeof(ns_desc
);
2004 txDmaFree
= dmaDescFree
;
2007 descDmaRdBytes
+= txDmaLen
;
2015 if (txDmaState
!= dmaIdle
)
2018 txState
= txAdvance
;
2022 if (txDmaState
!= dmaIdle
)
2025 DPRINTF(EthernetDesc
,
2026 "txDescCache: link=%08x bufptr=%08x cmdsts=%08x extsts=%08x\n",
2027 txDescCache
.link
, txDescCache
.bufptr
, txDescCache
.cmdsts
,
2028 txDescCache
.extsts
);
2030 if (txDescCache
.cmdsts
& CMDSTS_OWN
) {
2031 txState
= txFifoBlock
;
2032 txFragPtr
= txDescCache
.bufptr
;
2033 txDescCnt
= txDescCache
.cmdsts
& CMDSTS_LEN_MASK
;
2035 devIntrPost(ISR_TXIDLE
);
2043 DPRINTF(EthernetSM
, "****starting the tx of a new packet****\n");
2044 txPacket
= new PacketData(16384);
2045 txPacketBufPtr
= txPacket
->data
;
2048 if (txDescCnt
== 0) {
2049 DPRINTF(EthernetSM
, "the txDescCnt == 0, done with descriptor\n");
2050 if (txDescCache
.cmdsts
& CMDSTS_MORE
) {
2051 DPRINTF(EthernetSM
, "there are more descriptors to come\n");
2052 txState
= txDescWrite
;
2054 txDescCache
.cmdsts
&= ~CMDSTS_OWN
;
2056 txDmaAddr
= regs
.txdp
+ offsetof(ns_desc
, cmdsts
);
2057 txDmaAddr
&= 0x3fffffff;
2058 txDmaData
= &(txDescCache
.cmdsts
);
2059 txDmaLen
= sizeof(txDescCache
.cmdsts
);
2060 txDmaFree
= dmaDescFree
;
2065 } else { /* this packet is totally done */
2066 DPRINTF(EthernetSM
, "This packet is done, let's wrap it up\n");
2067 /* deal with the the packet that just finished */
2068 if ((regs
.vtcr
& VTCR_PPCHK
) && extstsEnable
) {
2070 if (txDescCache
.extsts
& EXTSTS_UDPPKT
) {
2073 udp
->sum(cksum(udp
));
2075 } else if (txDescCache
.extsts
& EXTSTS_TCPPKT
) {
2078 tcp
->sum(cksum(tcp
));
2081 if (txDescCache
.extsts
& EXTSTS_IPPKT
) {
2088 txPacket
->length
= txPacketBufPtr
- txPacket
->data
;
2089 // this is just because the receive can't handle a
2090 // packet bigger want to make sure
2091 assert(txPacket
->length
<= 1514);
2095 txFifo
.push(txPacket
);
2099 * this following section is not tqo spec, but
2100 * functionally shouldn't be any different. normally,
2101 * the chip will wait til the transmit has occurred
2102 * before writing back the descriptor because it has
2103 * to wait to see that it was successfully transmitted
2104 * to decide whether to set CMDSTS_OK or not.
2105 * however, in the simulator since it is always
2106 * successfully transmitted, and writing it exactly to
2107 * spec would complicate the code, we just do it here
2110 txDescCache
.cmdsts
&= ~CMDSTS_OWN
;
2111 txDescCache
.cmdsts
|= CMDSTS_OK
;
2113 DPRINTF(EthernetDesc
,
2114 "txDesc writeback: cmdsts=%08x extsts=%08x\n",
2115 txDescCache
.cmdsts
, txDescCache
.extsts
);
2117 txDmaAddr
= regs
.txdp
+ offsetof(ns_desc
, cmdsts
);
2118 txDmaAddr
&= 0x3fffffff;
2119 txDmaData
= &(txDescCache
.cmdsts
);
2120 txDmaLen
= sizeof(txDescCache
.cmdsts
) +
2121 sizeof(txDescCache
.extsts
);
2122 txDmaFree
= dmaDescFree
;
2125 descDmaWrBytes
+= txDmaLen
;
2131 DPRINTF(EthernetSM
, "halting TX state machine\n");
2135 txState
= txAdvance
;
2141 DPRINTF(EthernetSM
, "this descriptor isn't done yet\n");
2142 if (!txFifo
.full()) {
2143 txState
= txFragRead
;
2146 * The number of bytes transferred is either whatever
2147 * is left in the descriptor (txDescCnt), or if there
2148 * is not enough room in the fifo, just whatever room
2149 * is left in the fifo
2151 txXferLen
= min
<uint32_t>(txDescCnt
, txFifo
.avail());
2153 txDmaAddr
= txFragPtr
& 0x3fffffff;
2154 txDmaData
= txPacketBufPtr
;
2155 txDmaLen
= txXferLen
;
2156 txDmaFree
= dmaDataFree
;
2161 txState
= txFifoBlock
;
2171 if (txDmaState
!= dmaIdle
)
2174 txPacketBufPtr
+= txXferLen
;
2175 txFragPtr
+= txXferLen
;
2176 txDescCnt
-= txXferLen
;
2177 txFifo
.reserve(txXferLen
);
2179 txState
= txFifoBlock
;
2183 if (txDmaState
!= dmaIdle
)
2186 if (txDescCache
.cmdsts
& CMDSTS_INTR
)
2187 devIntrPost(ISR_TXDESC
);
2189 txState
= txAdvance
;
2193 if (txDescCache
.link
== 0) {
2194 devIntrPost(ISR_TXIDLE
);
2198 txState
= txDescRead
;
2199 regs
.txdp
= txDescCache
.link
;
2202 txDmaAddr
= txDescCache
.link
& 0x3fffffff;
2203 txDmaData
= &txDescCache
;
2204 txDmaLen
= sizeof(ns_desc
);
2205 txDmaFree
= dmaDescFree
;
2213 panic("invalid state");
2216 DPRINTF(EthernetSM
, "entering next txState=%s\n",
2217 NsTxStateStrings
[txState
]);
2223 * @todo do we want to schedule a future kick?
2225 DPRINTF(EthernetSM
, "tx state machine exited txState=%s\n",
2226 NsTxStateStrings
[txState
]);
2230 NSGigE::transferDone()
2232 if (txFifo
.empty()) {
2233 DPRINTF(Ethernet
, "transfer complete: txFifo empty...nothing to do\n");
2237 DPRINTF(Ethernet
, "transfer complete: data in txFifo...schedule xmit\n");
2239 if (txEvent
.scheduled())
2240 txEvent
.reschedule(curTick
+ cycles(1));
2242 txEvent
.schedule(curTick
+ cycles(1));
2246 NSGigE::rxFilter(const PacketPtr
&packet
)
2248 EthPtr eth
= packet
;
2252 const EthAddr
&dst
= eth
->dst();
2253 if (dst
.unicast()) {
2254 // If we're accepting all unicast addresses
2258 // If we make a perfect match
2259 if (acceptPerfect
&& dst
== rom
.perfectMatch
)
2262 if (acceptArp
&& eth
->type() == ETH_TYPE_ARP
)
2265 } else if (dst
.broadcast()) {
2266 // if we're accepting broadcasts
2267 if (acceptBroadcast
)
2270 } else if (dst
.multicast()) {
2271 // if we're accepting all multicasts
2272 if (acceptMulticast
)
2278 DPRINTF(Ethernet
, "rxFilter drop\n");
2279 DDUMP(EthernetData
, packet
->data
, packet
->length
);
2286 NSGigE::recvPacket(PacketPtr packet
)
2288 rxBytes
+= packet
->length
;
2291 DPRINTF(Ethernet
, "Receiving packet from wire, rxFifoAvail=%d\n",
2295 DPRINTF(Ethernet
, "receive disabled...packet dropped\n");
2297 interface
->recvDone();
2301 if (rxFilterEnable
&& rxFilter(packet
)) {
2302 DPRINTF(Ethernet
, "packet filtered...dropped\n");
2303 interface
->recvDone();
2307 if (rxFifo
.avail() < packet
->length
) {
2313 "packet won't fit in receive buffer...pkt ID %d dropped\n",
2316 DPRINTF(Ethernet
, "Seq=%d\n", tcp
->seq());
2321 devIntrPost(ISR_RXORN
);
2325 rxFifo
.push(packet
);
2326 interface
->recvDone();
2332 //=====================================================================
2336 NSGigE::serialize(ostream
&os
)
2338 // Serialize the PciDev base class
2339 PciDev::serialize(os
);
2342 * Finalize any DMA events now.
2344 if (rxDmaReadEvent
.scheduled())
2346 if (rxDmaWriteEvent
.scheduled())
2348 if (txDmaReadEvent
.scheduled())
2350 if (txDmaWriteEvent
.scheduled())
2354 * Serialize the device registers
2356 SERIALIZE_SCALAR(regs
.command
);
2357 SERIALIZE_SCALAR(regs
.config
);
2358 SERIALIZE_SCALAR(regs
.mear
);
2359 SERIALIZE_SCALAR(regs
.ptscr
);
2360 SERIALIZE_SCALAR(regs
.isr
);
2361 SERIALIZE_SCALAR(regs
.imr
);
2362 SERIALIZE_SCALAR(regs
.ier
);
2363 SERIALIZE_SCALAR(regs
.ihr
);
2364 SERIALIZE_SCALAR(regs
.txdp
);
2365 SERIALIZE_SCALAR(regs
.txdp_hi
);
2366 SERIALIZE_SCALAR(regs
.txcfg
);
2367 SERIALIZE_SCALAR(regs
.gpior
);
2368 SERIALIZE_SCALAR(regs
.rxdp
);
2369 SERIALIZE_SCALAR(regs
.rxdp_hi
);
2370 SERIALIZE_SCALAR(regs
.rxcfg
);
2371 SERIALIZE_SCALAR(regs
.pqcr
);
2372 SERIALIZE_SCALAR(regs
.wcsr
);
2373 SERIALIZE_SCALAR(regs
.pcr
);
2374 SERIALIZE_SCALAR(regs
.rfcr
);
2375 SERIALIZE_SCALAR(regs
.rfdr
);
2376 SERIALIZE_SCALAR(regs
.srr
);
2377 SERIALIZE_SCALAR(regs
.mibc
);
2378 SERIALIZE_SCALAR(regs
.vrcr
);
2379 SERIALIZE_SCALAR(regs
.vtcr
);
2380 SERIALIZE_SCALAR(regs
.vdr
);
2381 SERIALIZE_SCALAR(regs
.ccsr
);
2382 SERIALIZE_SCALAR(regs
.tbicr
);
2383 SERIALIZE_SCALAR(regs
.tbisr
);
2384 SERIALIZE_SCALAR(regs
.tanar
);
2385 SERIALIZE_SCALAR(regs
.tanlpar
);
2386 SERIALIZE_SCALAR(regs
.taner
);
2387 SERIALIZE_SCALAR(regs
.tesr
);
2389 SERIALIZE_ARRAY(rom
.perfectMatch
, ETH_ADDR_LEN
);
2391 SERIALIZE_SCALAR(ioEnable
);
2394 * Serialize the data Fifos
2396 rxFifo
.serialize("rxFifo", os
);
2397 txFifo
.serialize("txFifo", os
);
2400 * Serialize the various helper variables
2402 bool txPacketExists
= txPacket
;
2403 SERIALIZE_SCALAR(txPacketExists
);
2404 if (txPacketExists
) {
2405 txPacket
->length
= txPacketBufPtr
- txPacket
->data
;
2406 txPacket
->serialize("txPacket", os
);
2407 uint32_t txPktBufPtr
= (uint32_t) (txPacketBufPtr
- txPacket
->data
);
2408 SERIALIZE_SCALAR(txPktBufPtr
);
2411 bool rxPacketExists
= rxPacket
;
2412 SERIALIZE_SCALAR(rxPacketExists
);
2413 if (rxPacketExists
) {
2414 rxPacket
->serialize("rxPacket", os
);
2415 uint32_t rxPktBufPtr
= (uint32_t) (rxPacketBufPtr
- rxPacket
->data
);
2416 SERIALIZE_SCALAR(rxPktBufPtr
);
2419 SERIALIZE_SCALAR(txXferLen
);
2420 SERIALIZE_SCALAR(rxXferLen
);
2423 * Serialize DescCaches
2425 SERIALIZE_SCALAR(txDescCache
.link
);
2426 SERIALIZE_SCALAR(txDescCache
.bufptr
);
2427 SERIALIZE_SCALAR(txDescCache
.cmdsts
);
2428 SERIALIZE_SCALAR(txDescCache
.extsts
);
2429 SERIALIZE_SCALAR(rxDescCache
.link
);
2430 SERIALIZE_SCALAR(rxDescCache
.bufptr
);
2431 SERIALIZE_SCALAR(rxDescCache
.cmdsts
);
2432 SERIALIZE_SCALAR(rxDescCache
.extsts
);
2435 * Serialize tx state machine
2437 int txState
= this->txState
;
2438 SERIALIZE_SCALAR(txState
);
2439 SERIALIZE_SCALAR(txEnable
);
2440 SERIALIZE_SCALAR(CTDD
);
2441 SERIALIZE_SCALAR(txFragPtr
);
2442 SERIALIZE_SCALAR(txDescCnt
);
2443 int txDmaState
= this->txDmaState
;
2444 SERIALIZE_SCALAR(txDmaState
);
2447 * Serialize rx state machine
2449 int rxState
= this->rxState
;
2450 SERIALIZE_SCALAR(rxState
);
2451 SERIALIZE_SCALAR(rxEnable
);
2452 SERIALIZE_SCALAR(CRDD
);
2453 SERIALIZE_SCALAR(rxPktBytes
);
2454 SERIALIZE_SCALAR(rxFragPtr
);
2455 SERIALIZE_SCALAR(rxDescCnt
);
2456 int rxDmaState
= this->rxDmaState
;
2457 SERIALIZE_SCALAR(rxDmaState
);
2459 SERIALIZE_SCALAR(extstsEnable
);
2462 * If there's a pending transmit, store the time so we can
2463 * reschedule it later
2465 Tick transmitTick
= txEvent
.scheduled() ? txEvent
.when() - curTick
: 0;
2466 SERIALIZE_SCALAR(transmitTick
);
2469 * receive address filter settings
2471 SERIALIZE_SCALAR(rxFilterEnable
);
2472 SERIALIZE_SCALAR(acceptBroadcast
);
2473 SERIALIZE_SCALAR(acceptMulticast
);
2474 SERIALIZE_SCALAR(acceptUnicast
);
2475 SERIALIZE_SCALAR(acceptPerfect
);
2476 SERIALIZE_SCALAR(acceptArp
);
2479 * Keep track of pending interrupt status.
2481 SERIALIZE_SCALAR(intrTick
);
2482 SERIALIZE_SCALAR(cpuPendingIntr
);
2483 Tick intrEventTick
= 0;
2485 intrEventTick
= intrEvent
->when();
2486 SERIALIZE_SCALAR(intrEventTick
);
2491 NSGigE::unserialize(Checkpoint
*cp
, const std::string
§ion
)
2493 // Unserialize the PciDev base class
2494 PciDev::unserialize(cp
, section
);
2496 UNSERIALIZE_SCALAR(regs
.command
);
2497 UNSERIALIZE_SCALAR(regs
.config
);
2498 UNSERIALIZE_SCALAR(regs
.mear
);
2499 UNSERIALIZE_SCALAR(regs
.ptscr
);
2500 UNSERIALIZE_SCALAR(regs
.isr
);
2501 UNSERIALIZE_SCALAR(regs
.imr
);
2502 UNSERIALIZE_SCALAR(regs
.ier
);
2503 UNSERIALIZE_SCALAR(regs
.ihr
);
2504 UNSERIALIZE_SCALAR(regs
.txdp
);
2505 UNSERIALIZE_SCALAR(regs
.txdp_hi
);
2506 UNSERIALIZE_SCALAR(regs
.txcfg
);
2507 UNSERIALIZE_SCALAR(regs
.gpior
);
2508 UNSERIALIZE_SCALAR(regs
.rxdp
);
2509 UNSERIALIZE_SCALAR(regs
.rxdp_hi
);
2510 UNSERIALIZE_SCALAR(regs
.rxcfg
);
2511 UNSERIALIZE_SCALAR(regs
.pqcr
);
2512 UNSERIALIZE_SCALAR(regs
.wcsr
);
2513 UNSERIALIZE_SCALAR(regs
.pcr
);
2514 UNSERIALIZE_SCALAR(regs
.rfcr
);
2515 UNSERIALIZE_SCALAR(regs
.rfdr
);
2516 UNSERIALIZE_SCALAR(regs
.srr
);
2517 UNSERIALIZE_SCALAR(regs
.mibc
);
2518 UNSERIALIZE_SCALAR(regs
.vrcr
);
2519 UNSERIALIZE_SCALAR(regs
.vtcr
);
2520 UNSERIALIZE_SCALAR(regs
.vdr
);
2521 UNSERIALIZE_SCALAR(regs
.ccsr
);
2522 UNSERIALIZE_SCALAR(regs
.tbicr
);
2523 UNSERIALIZE_SCALAR(regs
.tbisr
);
2524 UNSERIALIZE_SCALAR(regs
.tanar
);
2525 UNSERIALIZE_SCALAR(regs
.tanlpar
);
2526 UNSERIALIZE_SCALAR(regs
.taner
);
2527 UNSERIALIZE_SCALAR(regs
.tesr
);
2529 UNSERIALIZE_ARRAY(rom
.perfectMatch
, ETH_ADDR_LEN
);
2531 UNSERIALIZE_SCALAR(ioEnable
);
2534 * unserialize the data fifos
2536 rxFifo
.unserialize("rxFifo", cp
, section
);
2537 txFifo
.unserialize("txFifo", cp
, section
);
2540 * unserialize the various helper variables
2542 bool txPacketExists
;
2543 UNSERIALIZE_SCALAR(txPacketExists
);
2544 if (txPacketExists
) {
2545 txPacket
= new PacketData(16384);
2546 txPacket
->unserialize("txPacket", cp
, section
);
2547 uint32_t txPktBufPtr
;
2548 UNSERIALIZE_SCALAR(txPktBufPtr
);
2549 txPacketBufPtr
= (uint8_t *) txPacket
->data
+ txPktBufPtr
;
2553 bool rxPacketExists
;
2554 UNSERIALIZE_SCALAR(rxPacketExists
);
2556 if (rxPacketExists
) {
2557 rxPacket
= new PacketData(16384);
2558 rxPacket
->unserialize("rxPacket", cp
, section
);
2559 uint32_t rxPktBufPtr
;
2560 UNSERIALIZE_SCALAR(rxPktBufPtr
);
2561 rxPacketBufPtr
= (uint8_t *) rxPacket
->data
+ rxPktBufPtr
;
2565 UNSERIALIZE_SCALAR(txXferLen
);
2566 UNSERIALIZE_SCALAR(rxXferLen
);
2569 * Unserialize DescCaches
2571 UNSERIALIZE_SCALAR(txDescCache
.link
);
2572 UNSERIALIZE_SCALAR(txDescCache
.bufptr
);
2573 UNSERIALIZE_SCALAR(txDescCache
.cmdsts
);
2574 UNSERIALIZE_SCALAR(txDescCache
.extsts
);
2575 UNSERIALIZE_SCALAR(rxDescCache
.link
);
2576 UNSERIALIZE_SCALAR(rxDescCache
.bufptr
);
2577 UNSERIALIZE_SCALAR(rxDescCache
.cmdsts
);
2578 UNSERIALIZE_SCALAR(rxDescCache
.extsts
);
2581 * unserialize tx state machine
2584 UNSERIALIZE_SCALAR(txState
);
2585 this->txState
= (TxState
) txState
;
2586 UNSERIALIZE_SCALAR(txEnable
);
2587 UNSERIALIZE_SCALAR(CTDD
);
2588 UNSERIALIZE_SCALAR(txFragPtr
);
2589 UNSERIALIZE_SCALAR(txDescCnt
);
2591 UNSERIALIZE_SCALAR(txDmaState
);
2592 this->txDmaState
= (DmaState
) txDmaState
;
2595 * unserialize rx state machine
2598 UNSERIALIZE_SCALAR(rxState
);
2599 this->rxState
= (RxState
) rxState
;
2600 UNSERIALIZE_SCALAR(rxEnable
);
2601 UNSERIALIZE_SCALAR(CRDD
);
2602 UNSERIALIZE_SCALAR(rxPktBytes
);
2603 UNSERIALIZE_SCALAR(rxFragPtr
);
2604 UNSERIALIZE_SCALAR(rxDescCnt
);
2606 UNSERIALIZE_SCALAR(rxDmaState
);
2607 this->rxDmaState
= (DmaState
) rxDmaState
;
2609 UNSERIALIZE_SCALAR(extstsEnable
);
2612 * If there's a pending transmit, reschedule it now
2615 UNSERIALIZE_SCALAR(transmitTick
);
2617 txEvent
.schedule(curTick
+ transmitTick
);
2620 * unserialize receive address filter settings
2622 UNSERIALIZE_SCALAR(rxFilterEnable
);
2623 UNSERIALIZE_SCALAR(acceptBroadcast
);
2624 UNSERIALIZE_SCALAR(acceptMulticast
);
2625 UNSERIALIZE_SCALAR(acceptUnicast
);
2626 UNSERIALIZE_SCALAR(acceptPerfect
);
2627 UNSERIALIZE_SCALAR(acceptArp
);
2630 * Keep track of pending interrupt status.
2632 UNSERIALIZE_SCALAR(intrTick
);
2633 UNSERIALIZE_SCALAR(cpuPendingIntr
);
2635 UNSERIALIZE_SCALAR(intrEventTick
);
2636 if (intrEventTick
) {
2637 intrEvent
= new IntrEvent(this, true);
2638 intrEvent
->schedule(intrEventTick
);
2642 * re-add addrRanges to bus bridges
2645 pioInterface
->addAddrRange(RangeSize(BARAddrs
[0], BARSize
[0]));
2646 pioInterface
->addAddrRange(RangeSize(BARAddrs
[1], BARSize
[1]));
2651 NSGigE::cacheAccess(MemReqPtr
&req
)
2653 DPRINTF(EthernetPIO
, "timing access to paddr=%#x (daddr=%#x)\n",
2654 req
->paddr
, req
->paddr
- addr
);
2655 return curTick
+ pioLatency
;
2658 BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt
)
2660 SimObjectParam
<EtherInt
*> peer
;
2661 SimObjectParam
<NSGigE
*> device
;
2663 END_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt
)
2665 BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigEInt
)
2667 INIT_PARAM_DFLT(peer
, "peer interface", NULL
),
2668 INIT_PARAM(device
, "Ethernet device of this interface")
2670 END_INIT_SIM_OBJECT_PARAMS(NSGigEInt
)
2672 CREATE_SIM_OBJECT(NSGigEInt
)
2674 NSGigEInt
*dev_int
= new NSGigEInt(getInstanceName(), device
);
2676 EtherInt
*p
= (EtherInt
*)peer
;
2678 dev_int
->setPeer(p
);
2679 p
->setPeer(dev_int
);
2685 REGISTER_SIM_OBJECT("NSGigEInt", NSGigEInt
)
2688 BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigE
)
2692 Param
<Tick
> tx_delay
;
2693 Param
<Tick
> rx_delay
;
2694 Param
<Tick
> intr_delay
;
2695 SimObjectParam
<MemoryController
*> mmu
;
2696 SimObjectParam
<PhysicalMemory
*> physmem
;
2697 Param
<bool> rx_filter
;
2698 Param
<string
> hardware_address
;
2699 SimObjectParam
<Bus
*> io_bus
;
2700 SimObjectParam
<Bus
*> payload_bus
;
2701 SimObjectParam
<HierParams
*> hier
;
2702 Param
<Tick
> pio_latency
;
2703 Param
<bool> dma_desc_free
;
2704 Param
<bool> dma_data_free
;
2705 Param
<Tick
> dma_read_delay
;
2706 Param
<Tick
> dma_write_delay
;
2707 Param
<Tick
> dma_read_factor
;
2708 Param
<Tick
> dma_write_factor
;
2709 SimObjectParam
<PciConfigAll
*> configspace
;
2710 SimObjectParam
<PciConfigData
*> configdata
;
2711 SimObjectParam
<Platform
*> platform
;
2712 Param
<uint32_t> pci_bus
;
2713 Param
<uint32_t> pci_dev
;
2714 Param
<uint32_t> pci_func
;
2715 Param
<uint32_t> tx_fifo_size
;
2716 Param
<uint32_t> rx_fifo_size
;
2717 Param
<uint32_t> m5reg
;
2718 Param
<bool> dma_no_allocate
;
2720 END_DECLARE_SIM_OBJECT_PARAMS(NSGigE
)
2722 BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigE
)
2724 INIT_PARAM(addr
, "Device Address"),
2725 INIT_PARAM(clock
, "State machine processor frequency"),
2726 INIT_PARAM(tx_delay
, "Transmit Delay"),
2727 INIT_PARAM(rx_delay
, "Receive Delay"),
2728 INIT_PARAM(intr_delay
, "Interrupt Delay in microseconds"),
2729 INIT_PARAM(mmu
, "Memory Controller"),
2730 INIT_PARAM(physmem
, "Physical Memory"),
2731 INIT_PARAM_DFLT(rx_filter
, "Enable Receive Filter", true),
2732 INIT_PARAM_DFLT(hardware_address
, "Ethernet Hardware Address",
2733 "00:99:00:00:00:01"),
2734 INIT_PARAM_DFLT(io_bus
, "The IO Bus to attach to for headers", NULL
),
2735 INIT_PARAM_DFLT(payload_bus
, "The IO Bus to attach to for payload", NULL
),
2736 INIT_PARAM_DFLT(hier
, "Hierarchy global variables", &defaultHierParams
),
2737 INIT_PARAM_DFLT(pio_latency
, "Programmed IO latency in bus cycles", 1),
2738 INIT_PARAM_DFLT(dma_desc_free
, "DMA of Descriptors is free", false),
2739 INIT_PARAM_DFLT(dma_data_free
, "DMA of Data is free", false),
2740 INIT_PARAM_DFLT(dma_read_delay
, "fixed delay for dma reads", 0),
2741 INIT_PARAM_DFLT(dma_write_delay
, "fixed delay for dma writes", 0),
2742 INIT_PARAM_DFLT(dma_read_factor
, "multiplier for dma reads", 0),
2743 INIT_PARAM_DFLT(dma_write_factor
, "multiplier for dma writes", 0),
2744 INIT_PARAM(configspace
, "PCI Configspace"),
2745 INIT_PARAM(configdata
, "PCI Config data"),
2746 INIT_PARAM(platform
, "Platform"),
2747 INIT_PARAM(pci_bus
, "PCI bus"),
2748 INIT_PARAM(pci_dev
, "PCI device number"),
2749 INIT_PARAM(pci_func
, "PCI function code"),
2750 INIT_PARAM_DFLT(tx_fifo_size
, "max size in bytes of txFifo", 131072),
2751 INIT_PARAM_DFLT(rx_fifo_size
, "max size in bytes of rxFifo", 131072),
2752 INIT_PARAM(m5reg
, "m5 register"),
2753 INIT_PARAM_DFLT(dma_no_allocate
, "Should DMA reads allocate cache lines", true)
2755 END_INIT_SIM_OBJECT_PARAMS(NSGigE
)
2758 CREATE_SIM_OBJECT(NSGigE
)
2760 NSGigE::Params
*params
= new NSGigE::Params
;
2762 params
->name
= getInstanceName();
2764 params
->configSpace
= configspace
;
2765 params
->configData
= configdata
;
2766 params
->plat
= platform
;
2767 params
->busNum
= pci_bus
;
2768 params
->deviceNum
= pci_dev
;
2769 params
->functionNum
= pci_func
;
2771 params
->clock
= clock
;
2772 params
->intr_delay
= intr_delay
;
2773 params
->pmem
= physmem
;
2774 params
->tx_delay
= tx_delay
;
2775 params
->rx_delay
= rx_delay
;
2776 params
->hier
= hier
;
2777 params
->header_bus
= io_bus
;
2778 params
->payload_bus
= payload_bus
;
2779 params
->pio_latency
= pio_latency
;
2780 params
->dma_desc_free
= dma_desc_free
;
2781 params
->dma_data_free
= dma_data_free
;
2782 params
->dma_read_delay
= dma_read_delay
;
2783 params
->dma_write_delay
= dma_write_delay
;
2784 params
->dma_read_factor
= dma_read_factor
;
2785 params
->dma_write_factor
= dma_write_factor
;
2786 params
->rx_filter
= rx_filter
;
2787 params
->eaddr
= hardware_address
;
2788 params
->tx_fifo_size
= tx_fifo_size
;
2789 params
->rx_fifo_size
= rx_fifo_size
;
2790 params
->m5reg
= m5reg
;
2791 params
->dma_no_allocate
= dma_no_allocate
;
2792 return new NSGigE(params
);
2795 REGISTER_SIM_OBJECT("NSGigE", NSGigE
)