2 * Copyright (c) 2004 The Regents of The University of Michigan
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 * Device module for modelling the National Semiconductor
31 * DP83820 ethernet controller. Does not support priority queueing
37 #include "base/inet.hh"
38 #include "cpu/exec_context.hh"
39 #include "cpu/intr_control.hh"
41 #include "dev/ns_gige.hh"
42 #include "dev/etherlink.hh"
43 #include "mem/bus/bus.hh"
44 #include "mem/bus/dma_interface.hh"
45 #include "mem/bus/pio_interface.hh"
46 #include "mem/bus/pio_interface_impl.hh"
47 #include "mem/functional_mem/memory_control.hh"
48 #include "mem/functional_mem/physical_memory.hh"
49 #include "sim/builder.hh"
50 #include "sim/host.hh"
51 #include "sim/sim_stats.hh"
52 #include "targetarch/vtophys.hh"
53 #include "dev/pciconfigall.hh"
54 #include "dev/tsunami_cchip.hh"
56 const char *NsRxStateStrings
[] =
67 const char *NsTxStateStrings
[] =
78 const char *NsDmaState
[] =
89 //helper function declarations
90 //These functions reverse Endianness so we can evaluate network data correctly
91 uint16_t reverseEnd16(uint16_t);
92 uint32_t reverseEnd32(uint32_t);
94 ///////////////////////////////////////////////////////////////////////
98 NSGigE::NSGigE(const std::string
&name
, IntrControl
*i
, Tick intr_delay
,
99 PhysicalMemory
*pmem
, Tick tx_delay
, Tick rx_delay
,
100 MemoryController
*mmu
, HierParams
*hier
, Bus
*header_bus
,
101 Bus
*payload_bus
, Tick pio_latency
, bool dma_desc_free
,
102 bool dma_data_free
, Tick dma_read_delay
, Tick dma_write_delay
,
103 Tick dma_read_factor
, Tick dma_write_factor
, PciConfigAll
*cf
,
104 PciConfigData
*cd
, Tsunami
*t
, uint32_t bus
, uint32_t dev
,
105 uint32_t func
, bool rx_filter
, const int eaddr
[6],
106 uint32_t tx_fifo_size
, uint32_t rx_fifo_size
)
107 : PciDev(name
, mmu
, cf
, cd
, bus
, dev
, func
), tsunami(t
), ioEnable(false),
108 maxTxFifoSize(tx_fifo_size
), maxRxFifoSize(rx_fifo_size
),
109 txPacket(0), rxPacket(0), txPacketBufPtr(NULL
), rxPacketBufPtr(NULL
),
110 txXferLen(0), rxXferLen(0), txState(txIdle
), CTDD(false),
111 txFifoAvail(tx_fifo_size
), txHalt(false),
112 txFragPtr(0), txDescCnt(0), txDmaState(dmaIdle
), rxState(rxIdle
),
113 CRDD(false), rxPktBytes(0), rxFifoCnt(0), rxHalt(false),
114 rxFragPtr(0), rxDescCnt(0), rxDmaState(dmaIdle
), extstsEnable(false),
115 rxDmaReadEvent(this), rxDmaWriteEvent(this),
116 txDmaReadEvent(this), txDmaWriteEvent(this),
117 dmaDescFree(dma_desc_free
), dmaDataFree(dma_data_free
),
118 txDelay(tx_delay
), rxDelay(rx_delay
), rxKickTick(0), txKickTick(0),
119 txEvent(this), rxFilterEnable(rx_filter
), acceptBroadcast(false),
120 acceptMulticast(false), acceptUnicast(false),
121 acceptPerfect(false), acceptArp(false),
122 physmem(pmem
), intctrl(i
), intrTick(0), cpuPendingIntr(false),
123 intrEvent(0), interface(0)
125 tsunami
->ethernet
= this;
128 pioInterface
= newPioInterface(name
, hier
, header_bus
, this,
129 &NSGigE::cacheAccess
);
131 pioLatency
= pio_latency
* header_bus
->clockRatio
;
134 dmaInterface
= new DMAInterface
<Bus
>(name
+ ".dma",
135 header_bus
, payload_bus
, 1);
137 dmaInterface
= new DMAInterface
<Bus
>(name
+ ".dma",
138 header_bus
, header_bus
, 1);
139 } else if (payload_bus
) {
140 pioInterface
= newPioInterface(name
, hier
, payload_bus
, this,
141 &NSGigE::cacheAccess
);
143 pioLatency
= pio_latency
* payload_bus
->clockRatio
;
145 dmaInterface
= new DMAInterface
<Bus
>(name
+ ".dma", payload_bus
,
150 intrDelay
= US2Ticks(intr_delay
);
151 dmaReadDelay
= dma_read_delay
;
152 dmaWriteDelay
= dma_write_delay
;
153 dmaReadFactor
= dma_read_factor
;
154 dmaWriteFactor
= dma_write_factor
;
157 rom
.perfectMatch
[0] = eaddr
[0];
158 rom
.perfectMatch
[1] = eaddr
[1];
159 rom
.perfectMatch
[2] = eaddr
[2];
160 rom
.perfectMatch
[3] = eaddr
[3];
161 rom
.perfectMatch
[4] = eaddr
[4];
162 rom
.perfectMatch
[5] = eaddr
[5];
172 .name(name() + ".txBytes")
173 .desc("Bytes Transmitted")
178 .name(name() + ".rxBytes")
179 .desc("Bytes Received")
184 .name(name() + ".txPackets")
185 .desc("Number of Packets Transmitted")
190 .name(name() + ".rxPackets")
191 .desc("Number of Packets Received")
196 .name(name() + ".txIPChecksums")
197 .desc("Number of tx IP Checksums done by device")
203 .name(name() + ".rxIPChecksums")
204 .desc("Number of rx IP Checksums done by device")
210 .name(name() + ".txTCPChecksums")
211 .desc("Number of tx TCP Checksums done by device")
217 .name(name() + ".rxTCPChecksums")
218 .desc("Number of rx TCP Checksums done by device")
224 .name(name() + ".descDMAReads")
225 .desc("Number of descriptors the device read w/ DMA")
230 .name(name() + ".descDMAWrites")
231 .desc("Number of descriptors the device wrote w/ DMA")
236 .name(name() + ".descDmaReadBytes")
237 .desc("number of descriptor bytes read w/ DMA")
242 .name(name() + ".descDmaWriteBytes")
243 .desc("number of descriptor bytes write w/ DMA")
249 .name(name() + ".txBandwidth")
250 .desc("Transmit Bandwidth (bits/s)")
256 .name(name() + ".rxBandwidth")
257 .desc("Receive Bandwidth (bits/s)")
263 .name(name() + ".txPPS")
264 .desc("Packet Tranmission Rate (packets/s)")
270 .name(name() + ".rxPPS")
271 .desc("Packet Reception Rate (packets/s)")
276 txBandwidth
= txBytes
* Stats::constant(8) / simSeconds
;
277 rxBandwidth
= rxBytes
* Stats::constant(8) / simSeconds
;
278 txPacketRate
= txPackets
/ simSeconds
;
279 rxPacketRate
= rxPackets
/ simSeconds
;
283 * This is to read the PCI general configuration registers
286 NSGigE::ReadConfig(int offset
, int size
, uint8_t *data
)
288 if (offset
< PCI_DEVICE_SPECIFIC
)
289 PciDev::ReadConfig(offset
, size
, data
);
291 panic("Device specific PCI config space not implemented!\n");
295 * This is to write to the PCI general configuration registers
298 NSGigE::WriteConfig(int offset
, int size
, uint32_t data
)
300 if (offset
< PCI_DEVICE_SPECIFIC
)
301 PciDev::WriteConfig(offset
, size
, data
);
303 panic("Device specific PCI config space not implemented!\n");
305 // Need to catch writes to BARs to update the PIO interface
307 //seems to work fine without all these PCI settings, but i put in the IO
308 //to double check, an assertion will fail if we need to properly
311 if (config
.data
[offset
] & PCI_CMD_IOSE
)
317 if (config
.data
[offset
] & PCI_CMD_BME
) {
324 if (config
.data
[offset
] & PCI_CMD_MSE
) {
333 case PCI0_BASE_ADDR0
:
334 if (BARAddrs
[0] != 0) {
337 pioInterface
->addAddrRange(BARAddrs
[0], BARAddrs
[0] + BARSize
[0] - 1);
339 BARAddrs
[0] &= PA_UNCACHED_MASK
;
343 case PCI0_BASE_ADDR1
:
344 if (BARAddrs
[1] != 0) {
347 pioInterface
->addAddrRange(BARAddrs
[1], BARAddrs
[1] + BARSize
[1] - 1);
349 BARAddrs
[1] &= PA_UNCACHED_MASK
;
357 * This reads the device registers, which are detailed in the NS83820
361 NSGigE::read(MemReqPtr
&req
, uint8_t *data
)
365 //The mask is to give you only the offset into the device register file
366 Addr daddr
= req
->paddr
& 0xfff;
367 DPRINTF(EthernetPIO
, "read da=%#x pa=%#x va=%#x size=%d\n",
368 daddr
, req
->paddr
, req
->vaddr
, req
->size
);
371 //there are some reserved registers, you can see ns_gige_reg.h and
372 //the spec sheet for details
373 if (daddr
> LAST
&& daddr
<= RESERVED
) {
374 panic("Accessing reserved register");
375 } else if (daddr
> RESERVED
&& daddr
<= 0x3FC) {
376 ReadConfig(daddr
& 0xff, req
->size
, data
);
378 } else if (daddr
>= MIB_START
&& daddr
<= MIB_END
) {
379 // don't implement all the MIB's. hopefully the kernel
380 // doesn't actually DEPEND upon their values
381 // MIB are just hardware stats keepers
382 uint32_t ®
= *(uint32_t *) data
;
385 } else if (daddr
> 0x3FC)
386 panic("Something is messed up!\n");
389 case sizeof(uint32_t):
391 uint32_t ®
= *(uint32_t *)data
;
396 //these are supposed to be cleared on a read
397 reg
&= ~(CR_RXD
| CR_TXD
| CR_TXR
| CR_RXR
);
414 devIntrClear(ISR_ALL
);
469 //see the spec sheet for how RFCR and RFDR work
470 //basically, you write to RFCR to tell the machine what you want to do next
471 //then you act upon RFDR, and the device will be prepared b/c
472 //of what you wrote to RFCR
478 switch (regs
.rfcr
& RFCR_RFADDR
) {
480 reg
= rom
.perfectMatch
[1];
482 reg
+= rom
.perfectMatch
[0];
485 reg
= rom
.perfectMatch
[3] << 8;
486 reg
+= rom
.perfectMatch
[2];
489 reg
= rom
.perfectMatch
[5] << 8;
490 reg
+= rom
.perfectMatch
[4];
493 panic("reading from RFDR for something for other than PMATCH!\n");
494 //didn't implement other RFDR functionality b/c driver didn't use
504 reg
&= ~(MIBC_MIBS
| MIBC_ACLR
);
548 panic("reading unimplemented register: addr = %#x", daddr
);
551 DPRINTF(EthernetPIO
, "read from %#x: data=%d data=%#x\n",
557 panic("accessing register with invalid size: addr=%#x, size=%d",
565 NSGigE::write(MemReqPtr
&req
, const uint8_t *data
)
569 Addr daddr
= req
->paddr
& 0xfff;
570 DPRINTF(EthernetPIO
, "write da=%#x pa=%#x va=%#x size=%d\n",
571 daddr
, req
->paddr
, req
->vaddr
, req
->size
);
573 if (daddr
> LAST
&& daddr
<= RESERVED
) {
574 panic("Accessing reserved register");
575 } else if (daddr
> RESERVED
&& daddr
<= 0x3FC) {
576 WriteConfig(daddr
& 0xff, req
->size
, *(uint32_t *)data
);
578 } else if (daddr
> 0x3FC)
579 panic("Something is messed up!\n");
581 if (req
->size
== sizeof(uint32_t)) {
582 uint32_t reg
= *(uint32_t *)data
;
583 DPRINTF(EthernetPIO
, "write data=%d data=%#x\n", reg
, reg
);
588 if ((reg
& (CR_TXE
| CR_TXD
)) == (CR_TXE
| CR_TXD
)) {
590 } else if (reg
& CR_TXE
) {
591 //the kernel is enabling the transmit machine
592 if (txState
== txIdle
)
594 } else if (reg
& CR_TXD
) {
598 if ((reg
& (CR_RXE
| CR_RXD
)) == (CR_RXE
| CR_RXD
)) {
600 } else if (reg
& CR_RXE
) {
601 if (rxState
== rxIdle
) {
604 } else if (reg
& CR_RXD
) {
615 devIntrPost(ISR_SWI
);
626 if (reg
& CFG_LNKSTS
|| reg
& CFG_SPDSTS
|| reg
& CFG_DUPSTS
627 || reg
& CFG_RESERVED
|| reg
& CFG_T64ADDR
628 || reg
& CFG_PCI64_DET
)
629 panic("writing to read-only or reserved CFG bits!\n");
631 regs
.config
|= reg
& ~(CFG_LNKSTS
| CFG_SPDSTS
| CFG_DUPSTS
| CFG_RESERVED
|
632 CFG_T64ADDR
| CFG_PCI64_DET
);
634 // all these #if 0's are because i don't THINK the kernel needs to have these implemented
635 // if there is a problem relating to one of these, you may need to add functionality in
637 if (reg
& CFG_TBI_EN
) ;
638 if (reg
& CFG_MODE_1000
) ;
641 if (reg
& CFG_AUTO_1000
)
642 panic("CFG_AUTO_1000 not implemented!\n");
645 if (reg
& CFG_PINT_DUPSTS
|| reg
& CFG_PINT_LNKSTS
|| reg
& CFG_PINT_SPDSTS
) ;
646 if (reg
& CFG_TMRTEST
) ;
647 if (reg
& CFG_MRM_DIS
) ;
648 if (reg
& CFG_MWI_DIS
) ;
650 if (reg
& CFG_T64ADDR
)
651 panic("CFG_T64ADDR is read only register!\n");
653 if (reg
& CFG_PCI64_DET
)
654 panic("CFG_PCI64_DET is read only register!\n");
656 if (reg
& CFG_DATA64_EN
) ;
657 if (reg
& CFG_M64ADDR
) ;
658 if (reg
& CFG_PHY_RST
) ;
659 if (reg
& CFG_PHY_DIS
) ;
662 if (reg
& CFG_EXTSTS_EN
)
665 extstsEnable
= false;
668 if (reg
& CFG_REQALG
) ;
672 if (reg
& CFG_PESEL
) ;
673 if (reg
& CFG_BROM_DIS
) ;
674 if (reg
& CFG_EXT_125
) ;
681 /* since phy is completely faked, MEAR_MD* don't matter
682 and since the driver never uses MEAR_EE*, they don't matter */
684 if (reg
& MEAR_EEDI
) ;
685 if (reg
& MEAR_EEDO
) ; //this one is read only
686 if (reg
& MEAR_EECLK
) ;
687 if (reg
& MEAR_EESEL
) ;
688 if (reg
& MEAR_MDIO
) ;
689 if (reg
& MEAR_MDDIR
) ;
690 if (reg
& MEAR_MDC
) ;
695 regs
.ptscr
= reg
& ~(PTSCR_RBIST_RDONLY
);
696 /* these control BISTs for various parts of chip - we don't care or do
697 just fake that the BIST is done */
698 if (reg
& PTSCR_RBIST_EN
)
699 regs
.ptscr
|= PTSCR_RBIST_DONE
;
700 if (reg
& PTSCR_EEBIST_EN
)
701 regs
.ptscr
&= ~PTSCR_EEBIST_EN
;
702 if (reg
& PTSCR_EELOAD_EN
)
703 regs
.ptscr
&= ~PTSCR_EELOAD_EN
;
706 case ISR
: /* writing to the ISR has no effect */
707 panic("ISR is a read only register!\n");
720 /* not going to implement real interrupt holdoff */
724 regs
.txdp
= (reg
& 0xFFFFFFFC);
725 assert(txState
== txIdle
);
736 if (reg
& TXCFG_CSI
) ;
737 if (reg
& TXCFG_HBI
) ;
738 if (reg
& TXCFG_MLB
) ;
739 if (reg
& TXCFG_ATP
) ;
740 if (reg
& TXCFG_ECRETRY
) ; /* this could easily be implemented, but
741 considering the network is just a fake
742 pipe, wouldn't make sense to do this */
744 if (reg
& TXCFG_BRST_DIS
) ;
748 /* we handle our own DMA, ignore the kernel's exhortations */
749 //if (reg & TXCFG_MXDMA) ;
751 //also, we currently don't care about fill/drain thresholds
752 //though this may change in the future with more realistic
753 //networks or a driver which changes it according to feedback
759 /* these just control general purpose i/o pins, don't matter */
773 if (reg
& RXCFG_AEP
) ;
774 if (reg
& RXCFG_ARP
) ;
775 if (reg
& RXCFG_STRIPCRC
) ;
776 if (reg
& RXCFG_RX_RD
) ;
777 if (reg
& RXCFG_ALP
) ;
778 if (reg
& RXCFG_AIRL
) ;
781 /* we handle our own DMA, ignore what kernel says about it */
782 //if (reg & RXCFG_MXDMA) ;
785 //also, we currently don't care about fill/drain thresholds
786 //though this may change in the future with more realistic
787 //networks or a driver which changes it according to feedback
788 if (reg
& (RXCFG_DRTH
| RXCFG_DRTH0
)) ;
793 /* there is no priority queueing used in the linux 2.6 driver */
798 /* not going to implement wake on LAN */
803 /* not going to implement pause control */
810 rxFilterEnable
= (reg
& RFCR_RFEN
) ? true : false;
811 acceptBroadcast
= (reg
& RFCR_AAB
) ? true : false;
812 acceptMulticast
= (reg
& RFCR_AAM
) ? true : false;
813 acceptUnicast
= (reg
& RFCR_AAU
) ? true : false;
814 acceptPerfect
= (reg
& RFCR_APM
) ? true : false;
815 acceptArp
= (reg
& RFCR_AARP
) ? true : false;
817 if (reg
& RFCR_APAT
) ;
818 // panic("RFCR_APAT not implemented!\n");
820 if (reg
& RFCR_MHEN
|| reg
& RFCR_UHEN
)
821 panic("hash filtering not implemented!\n");
824 panic("RFCR_ULM not implemented!\n");
829 panic("the driver never writes to RFDR, something is wrong!\n");
832 panic("the driver never uses BRAR, something is wrong!\n");
835 panic("the driver never uses BRDR, something is wrong!\n");
838 panic("SRR is read only register!\n");
841 panic("the driver never uses MIBC, something is wrong!\n");
852 panic("the driver never uses VDR, something is wrong!\n");
856 /* not going to implement clockrun stuff */
862 if (reg
& TBICR_MR_LOOPBACK
)
863 panic("TBICR_MR_LOOPBACK never used, something wrong!\n");
865 if (reg
& TBICR_MR_AN_ENABLE
) {
866 regs
.tanlpar
= regs
.tanar
;
867 regs
.tbisr
|= (TBISR_MR_AN_COMPLETE
| TBISR_MR_LINK_STATUS
);
871 if (reg
& TBICR_MR_RESTART_AN
) ;
877 panic("TBISR is read only register!\n");
882 panic("this isn't used in driver, something wrong!\n");
885 panic("this isn't used in driver, something wrong!\n");
889 panic("this should only be written to by the fake phy!\n");
892 panic("TANER is read only register!\n");
899 panic("thought i covered all the register, what is this? addr=%#x",
903 panic("Invalid Request Size");
909 NSGigE::devIntrPost(uint32_t interrupts
)
913 if (interrupts
& ISR_RESERVE
)
914 panic("Cannot set a reserved interrupt");
916 if (interrupts
& ISR_TXRCMP
)
917 regs
.isr
|= ISR_TXRCMP
;
919 if (interrupts
& ISR_RXRCMP
)
920 regs
.isr
|= ISR_RXRCMP
;
922 //ISR_DPERR not implemented
923 //ISR_SSERR not implemented
924 //ISR_RMABT not implemented
925 //ISR_RXSOVR not implemented
926 //ISR_HIBINT not implemented
927 //ISR_PHY not implemented
928 //ISR_PME not implemented
930 if (interrupts
& ISR_SWI
)
933 //ISR_MIB not implemented
934 //ISR_TXURN not implemented
936 if (interrupts
& ISR_TXIDLE
)
937 regs
.isr
|= ISR_TXIDLE
;
939 if (interrupts
& ISR_TXERR
)
940 regs
.isr
|= ISR_TXERR
;
942 if (interrupts
& ISR_TXDESC
)
943 regs
.isr
|= ISR_TXDESC
;
945 if (interrupts
& ISR_TXOK
) {
946 regs
.isr
|= ISR_TXOK
;
950 if (interrupts
& ISR_RXORN
)
951 regs
.isr
|= ISR_RXORN
;
953 if (interrupts
& ISR_RXIDLE
)
954 regs
.isr
|= ISR_RXIDLE
;
956 //ISR_RXEARLY not implemented
958 if (interrupts
& ISR_RXERR
)
959 regs
.isr
|= ISR_RXERR
;
961 if (interrupts
& ISR_RXDESC
)
962 regs
.isr
|= ISR_RXDESC
;
964 if (interrupts
& ISR_RXOK
) {
966 regs
.isr
|= ISR_RXOK
;
969 if ((regs
.isr
& regs
.imr
)) {
976 DPRINTF(EthernetIntr
, "**interrupt written to ISR: intr=%#x isr=%#x imr=%#x\n",
977 interrupts
, regs
.isr
, regs
.imr
);
981 NSGigE::devIntrClear(uint32_t interrupts
)
983 if (interrupts
& ISR_RESERVE
)
984 panic("Cannot clear a reserved interrupt");
986 if (interrupts
& ISR_TXRCMP
)
987 regs
.isr
&= ~ISR_TXRCMP
;
989 if (interrupts
& ISR_RXRCMP
)
990 regs
.isr
&= ~ISR_RXRCMP
;
992 //ISR_DPERR not implemented
993 //ISR_SSERR not implemented
994 //ISR_RMABT not implemented
995 //ISR_RXSOVR not implemented
996 //ISR_HIBINT not implemented
997 //ISR_PHY not implemented
998 //ISR_PME not implemented
1000 if (interrupts
& ISR_SWI
)
1001 regs
.isr
&= ~ISR_SWI
;
1003 //ISR_MIB not implemented
1004 //ISR_TXURN not implemented
1006 if (interrupts
& ISR_TXIDLE
)
1007 regs
.isr
&= ~ISR_TXIDLE
;
1009 if (interrupts
& ISR_TXERR
)
1010 regs
.isr
&= ~ISR_TXERR
;
1012 if (interrupts
& ISR_TXDESC
)
1013 regs
.isr
&= ~ISR_TXDESC
;
1015 if (interrupts
& ISR_TXOK
)
1016 regs
.isr
&= ~ISR_TXOK
;
1018 if (interrupts
& ISR_RXORN
)
1019 regs
.isr
&= ~ISR_RXORN
;
1021 if (interrupts
& ISR_RXIDLE
)
1022 regs
.isr
&= ~ISR_RXIDLE
;
1024 //ISR_RXEARLY not implemented
1026 if (interrupts
& ISR_RXERR
)
1027 regs
.isr
&= ~ISR_RXERR
;
1029 if (interrupts
& ISR_RXDESC
)
1030 regs
.isr
&= ~ISR_RXDESC
;
1032 if (interrupts
& ISR_RXOK
)
1033 regs
.isr
&= ~ISR_RXOK
;
1035 if (!(regs
.isr
& regs
.imr
))
1038 DPRINTF(EthernetIntr
, "**interrupt cleared from ISR: intr=%x isr=%x imr=%x\n",
1039 interrupts
, regs
.isr
, regs
.imr
);
1043 NSGigE::devIntrChangeMask()
1045 DPRINTF(EthernetIntr
, "interrupt mask changed\n");
1047 if (regs
.isr
& regs
.imr
)
1048 cpuIntrPost(curTick
);
1054 NSGigE::cpuIntrPost(Tick when
)
1056 //If the interrupt you want to post is later than an
1057 //interrupt already scheduled, just let it post in the coming one and
1058 //don't schedule another.
1059 //HOWEVER, must be sure that the scheduled intrTick is in the future
1060 //(this was formerly the source of a bug)
1061 assert((intrTick
>= curTick
) || (intrTick
== 0));
1062 if (when
> intrTick
&& intrTick
!= 0)
1068 intrEvent
->squash();
1072 if (when
< curTick
) {
1075 DPRINTF(EthernetIntr
, "going to schedule an interrupt for intrTick=%d\n",
1077 intrEvent
= new IntrEvent(this, true);
1078 intrEvent
->schedule(intrTick
);
1083 NSGigE::cpuInterrupt()
1085 // Don't send an interrupt if there's already one
1086 if (cpuPendingIntr
) {
1087 DPRINTF(EthernetIntr
,
1088 "would send an interrupt now, but there's already pending\n");
1092 // Don't send an interrupt if it's supposed to be delayed
1093 if (intrTick
> curTick
) {
1094 DPRINTF(EthernetIntr
, "an interrupt is scheduled for %d, wait til then\n",
1099 // Whether or not there's a pending interrupt, we don't care about
1105 cpuPendingIntr
= true;
1106 /** @todo rework the intctrl to be tsunami ok */
1107 //intctrl->post(TheISA::INTLEVEL_IRQ1, TheISA::INTINDEX_ETHERNET);
1108 DPRINTF(EthernetIntr
, "Posting interrupts to cchip!\n");
1109 tsunami
->cchip
->postDRIR(configData
->config
.hdr
.pci0
.interruptLine
);
1113 NSGigE::cpuIntrClear()
1115 if (cpuPendingIntr
) {
1116 cpuPendingIntr
= false;
1117 /** @todo rework the intctrl to be tsunami ok */
1118 //intctrl->clear(TheISA::INTLEVEL_IRQ1, TheISA::INTINDEX_ETHERNET);
1119 DPRINTF(EthernetIntr
, "clearing all interrupts from cchip\n");
1120 tsunami
->cchip
->clearDRIR(configData
->config
.hdr
.pci0
.interruptLine
);
1125 NSGigE::cpuIntrPending() const
1126 { return cpuPendingIntr
; }
1132 DPRINTF(Ethernet
, "transmit reset\n");
1135 txFifoAvail
= maxTxFifoSize
;
1138 assert(txDescCnt
== 0);
1140 regs
.command
&= ~CR_TXE
;
1142 assert(txDmaState
== dmaIdle
);
1148 DPRINTF(Ethernet
, "receive reset\n");
1151 assert(rxPktBytes
== 0);
1155 assert(rxDescCnt
== 0);
1156 assert(rxDmaState
== dmaIdle
);
1158 regs
.command
&= ~CR_RXE
;
1162 void NSGigE::regsReset()
1164 memset(®s
, 0, sizeof(regs
));
1165 regs
.config
= 0x80000000;
1167 regs
.isr
= 0x00608000;
1175 extstsEnable
= false;
1176 acceptBroadcast
= false;
1177 acceptMulticast
= false;
1178 acceptUnicast
= false;
1179 acceptPerfect
= false;
1184 NSGigE::rxDmaReadCopy()
1186 assert(rxDmaState
== dmaReading
);
1188 memcpy(rxDmaData
, physmem
->dma_addr(rxDmaAddr
, rxDmaLen
), rxDmaLen
);
1189 rxDmaState
= dmaIdle
;
1191 DPRINTF(EthernetDMA
, "rx dma read paddr=%#x len=%d\n",
1192 rxDmaAddr
, rxDmaLen
);
1193 DDUMP(EthernetDMA
, rxDmaData
, rxDmaLen
);
1197 NSGigE::doRxDmaRead()
1199 assert(rxDmaState
== dmaIdle
|| rxDmaState
== dmaReadWaiting
);
1200 rxDmaState
= dmaReading
;
1202 if (dmaInterface
&& !rxDmaFree
) {
1203 if (dmaInterface
->busy())
1204 rxDmaState
= dmaReadWaiting
;
1206 dmaInterface
->doDMA(Read
, rxDmaAddr
, rxDmaLen
, curTick
,
1207 &rxDmaReadEvent
, true);
1211 if (dmaReadDelay
== 0 && dmaReadFactor
== 0) {
1216 Tick factor
= ((rxDmaLen
+ ULL(63)) >> ULL(6)) * dmaReadFactor
;
1217 Tick start
= curTick
+ dmaReadDelay
+ factor
;
1218 rxDmaReadEvent
.schedule(start
);
1223 NSGigE::rxDmaReadDone()
1225 assert(rxDmaState
== dmaReading
);
1228 // If the transmit state machine has a pending DMA, let it go first
1229 if (txDmaState
== dmaReadWaiting
|| txDmaState
== dmaWriteWaiting
)
1236 NSGigE::rxDmaWriteCopy()
1238 assert(rxDmaState
== dmaWriting
);
1240 memcpy(physmem
->dma_addr(rxDmaAddr
, rxDmaLen
), rxDmaData
, rxDmaLen
);
1241 rxDmaState
= dmaIdle
;
1243 DPRINTF(EthernetDMA
, "rx dma write paddr=%#x len=%d\n",
1244 rxDmaAddr
, rxDmaLen
);
1245 DDUMP(EthernetDMA
, rxDmaData
, rxDmaLen
);
1249 NSGigE::doRxDmaWrite()
1251 assert(rxDmaState
== dmaIdle
|| rxDmaState
== dmaWriteWaiting
);
1252 rxDmaState
= dmaWriting
;
1254 if (dmaInterface
&& !rxDmaFree
) {
1255 if (dmaInterface
->busy())
1256 rxDmaState
= dmaWriteWaiting
;
1258 dmaInterface
->doDMA(WriteInvalidate
, rxDmaAddr
, rxDmaLen
, curTick
,
1259 &rxDmaWriteEvent
, true);
1263 if (dmaWriteDelay
== 0 && dmaWriteFactor
== 0) {
1268 Tick factor
= ((rxDmaLen
+ ULL(63)) >> ULL(6)) * dmaWriteFactor
;
1269 Tick start
= curTick
+ dmaWriteDelay
+ factor
;
1270 rxDmaWriteEvent
.schedule(start
);
1275 NSGigE::rxDmaWriteDone()
1277 assert(rxDmaState
== dmaWriting
);
1280 // If the transmit state machine has a pending DMA, let it go first
1281 if (txDmaState
== dmaReadWaiting
|| txDmaState
== dmaWriteWaiting
)
1290 DPRINTF(EthernetSM
, "receive kick state=%s (rxBuf.size=%d)\n",
1291 NsRxStateStrings
[rxState
], rxFifo
.size());
1293 if (rxKickTick
> curTick
) {
1294 DPRINTF(EthernetSM
, "receive kick exiting, can't run till %d\n",
1300 switch(rxDmaState
) {
1301 case dmaReadWaiting
:
1305 case dmaWriteWaiting
:
1313 // see state machine from spec for details
1314 // the way this works is, if you finish work on one state and can go directly to
1315 // another, you do that through jumping to the label "next". however, if you have
1316 // intermediate work, like DMA so that you can't go to the next state yet, you go to
1317 // exit and exit the loop. however, when the DMA is done it will trigger an
1318 // event and come back to this loop.
1321 if (!regs
.command
& CR_RXE
) {
1322 DPRINTF(EthernetSM
, "Receive Disabled! Nothing to do.\n");
1327 rxState
= rxDescRefr
;
1329 rxDmaAddr
= regs
.rxdp
& 0x3fffffff;
1330 rxDmaData
= &rxDescCache
+ offsetof(ns_desc
, link
);
1331 rxDmaLen
= sizeof(rxDescCache
.link
);
1332 rxDmaFree
= dmaDescFree
;
1335 descDmaRdBytes
+= rxDmaLen
;
1340 rxState
= rxDescRead
;
1342 rxDmaAddr
= regs
.rxdp
& 0x3fffffff;
1343 rxDmaData
= &rxDescCache
;
1344 rxDmaLen
= sizeof(ns_desc
);
1345 rxDmaFree
= dmaDescFree
;
1348 descDmaRdBytes
+= rxDmaLen
;
1356 if (rxDmaState
!= dmaIdle
)
1359 rxState
= rxAdvance
;
1363 if (rxDmaState
!= dmaIdle
)
1366 DPRINTF(EthernetDesc
,
1367 "rxDescCache:\n\tlink=%08x\n\tbufptr=%08x\n\tcmdsts=%08x\n\textsts=%08x\n"
1368 ,rxDescCache
.link
, rxDescCache
.bufptr
, rxDescCache
.cmdsts
,
1369 rxDescCache
.extsts
);
1371 if (rxDescCache
.cmdsts
& CMDSTS_OWN
) {
1374 rxState
= rxFifoBlock
;
1375 rxFragPtr
= rxDescCache
.bufptr
;
1376 rxDescCnt
= rxDescCache
.cmdsts
& CMDSTS_LEN_MASK
;
1383 * @todo in reality, we should be able to start processing
1384 * the packet as it arrives, and not have to wait for the
1385 * full packet ot be in the receive fifo.
1390 DPRINTF(EthernetSM
, "\n\n*****processing receive of new packet\n");
1392 // If we don't have a packet, grab a new one from the fifo.
1393 rxPacket
= rxFifo
.front();
1394 rxPktBytes
= rxPacket
->length
;
1395 rxPacketBufPtr
= rxPacket
->data
;
1397 if (DTRACE(Ethernet
)) {
1398 if (rxPacket
->isIpPkt()) {
1399 ip_header
*ip
= rxPacket
->getIpHdr();
1400 DPRINTF(Ethernet
, "ID is %d\n", reverseEnd16(ip
->ID
));
1401 if (rxPacket
->isTcpPkt()) {
1402 tcp_header
*tcp
= rxPacket
->getTcpHdr(ip
);
1403 DPRINTF(Ethernet
, "Src Port = %d, Dest Port = %d\n",
1404 reverseEnd16(tcp
->src_port_num
),
1405 reverseEnd16(tcp
->dest_port_num
));
1410 // sanity check - i think the driver behaves like this
1411 assert(rxDescCnt
>= rxPktBytes
);
1413 // Must clear the value before popping to decrement the
1415 rxFifo
.front() = NULL
;
1417 rxFifoCnt
-= rxPacket
->length
;
1421 // dont' need the && rxDescCnt > 0 if driver sanity check above holds
1422 if (rxPktBytes
> 0) {
1423 rxState
= rxFragWrite
;
1424 // don't need min<>(rxPktBytes,rxDescCnt) if above sanity check holds
1425 rxXferLen
= rxPktBytes
;
1427 rxDmaAddr
= rxFragPtr
& 0x3fffffff;
1428 rxDmaData
= rxPacketBufPtr
;
1429 rxDmaLen
= rxXferLen
;
1430 rxDmaFree
= dmaDataFree
;
1436 rxState
= rxDescWrite
;
1438 //if (rxPktBytes == 0) { /* packet is done */
1439 assert(rxPktBytes
== 0);
1440 DPRINTF(EthernetSM
, "done with receiving packet\n");
1442 rxDescCache
.cmdsts
|= CMDSTS_OWN
;
1443 rxDescCache
.cmdsts
&= ~CMDSTS_MORE
;
1444 rxDescCache
.cmdsts
|= CMDSTS_OK
;
1445 rxDescCache
.cmdsts
&= 0xffff0000;
1446 rxDescCache
.cmdsts
+= rxPacket
->length
; //i.e. set CMDSTS_SIZE
1449 /* all the driver uses these are for its own stats keeping
1450 which we don't care about, aren't necessary for functionality
1451 and doing this would just slow us down. if they end up using
1452 this in a later version for functional purposes, just undef
1454 if (rxFilterEnable
) {
1455 rxDescCache
.cmdsts
&= ~CMDSTS_DEST_MASK
;
1456 if (rxFifo
.front()->IsUnicast())
1457 rxDescCache
.cmdsts
|= CMDSTS_DEST_SELF
;
1458 if (rxFifo
.front()->IsMulticast())
1459 rxDescCache
.cmdsts
|= CMDSTS_DEST_MULTI
;
1460 if (rxFifo
.front()->IsBroadcast())
1461 rxDescCache
.cmdsts
|= CMDSTS_DEST_MASK
;
1465 if (rxPacket
->isIpPkt() && extstsEnable
) {
1466 rxDescCache
.extsts
|= EXTSTS_IPPKT
;
1468 if (!ipChecksum(rxPacket
, false)) {
1469 DPRINTF(EthernetCksum
, "Rx IP Checksum Error\n");
1470 rxDescCache
.extsts
|= EXTSTS_IPERR
;
1472 if (rxPacket
->isTcpPkt()) {
1473 rxDescCache
.extsts
|= EXTSTS_TCPPKT
;
1475 if (!tcpChecksum(rxPacket
, false)) {
1476 DPRINTF(EthernetCksum
, "Rx TCP Checksum Error\n");
1477 rxDescCache
.extsts
|= EXTSTS_TCPERR
;
1480 } else if (rxPacket
->isUdpPkt()) {
1481 rxDescCache
.extsts
|= EXTSTS_UDPPKT
;
1482 if (!udpChecksum(rxPacket
, false)) {
1483 DPRINTF(EthernetCksum
, "Rx UDP Checksum Error\n");
1484 rxDescCache
.extsts
|= EXTSTS_UDPERR
;
1490 /* the driver seems to always receive into desc buffers
1491 of size 1514, so you never have a pkt that is split
1492 into multiple descriptors on the receive side, so
1493 i don't implement that case, hence the assert above.
1496 DPRINTF(EthernetDesc
, "rxDesc writeback:\n\tcmdsts=%08x\n\textsts=%08x\n",
1497 rxDescCache
.cmdsts
, rxDescCache
.extsts
);
1499 rxDmaAddr
= (regs
.rxdp
+ offsetof(ns_desc
, cmdsts
)) & 0x3fffffff;
1500 rxDmaData
= &(rxDescCache
.cmdsts
);
1501 rxDmaLen
= sizeof(rxDescCache
.cmdsts
) + sizeof(rxDescCache
.extsts
);
1502 rxDmaFree
= dmaDescFree
;
1505 descDmaWrBytes
+= rxDmaLen
;
1513 if (rxDmaState
!= dmaIdle
)
1516 rxPacketBufPtr
+= rxXferLen
;
1517 rxFragPtr
+= rxXferLen
;
1518 rxPktBytes
-= rxXferLen
;
1520 rxState
= rxFifoBlock
;
1524 if (rxDmaState
!= dmaIdle
)
1527 assert(rxDescCache
.cmdsts
& CMDSTS_OWN
);
1529 assert(rxPacket
== 0);
1530 devIntrPost(ISR_RXOK
);
1532 if (rxDescCache
.cmdsts
& CMDSTS_INTR
)
1533 devIntrPost(ISR_RXDESC
);
1536 DPRINTF(EthernetSM
, "Halting the RX state machine\n");
1540 rxState
= rxAdvance
;
1544 if (rxDescCache
.link
== 0) {
1548 rxState
= rxDescRead
;
1549 regs
.rxdp
= rxDescCache
.link
;
1552 rxDmaAddr
= regs
.rxdp
& 0x3fffffff;
1553 rxDmaData
= &rxDescCache
;
1554 rxDmaLen
= sizeof(ns_desc
);
1555 rxDmaFree
= dmaDescFree
;
1563 panic("Invalid rxState!");
1567 DPRINTF(EthernetSM
, "entering next rx state = %s\n",
1568 NsRxStateStrings
[rxState
]);
1570 if (rxState
== rxIdle
) {
1571 regs
.command
&= ~CR_RXE
;
1572 devIntrPost(ISR_RXIDLE
);
1580 * @todo do we want to schedule a future kick?
1582 DPRINTF(EthernetSM
, "rx state machine exited state=%s\n",
1583 NsRxStateStrings
[rxState
]);
1589 if (txFifo
.empty()) {
1590 DPRINTF(Ethernet
, "nothing to transmit\n");
1594 DPRINTF(Ethernet
, "\n\nAttempt Pkt Transmit: txFifo length = %d\n",
1595 maxTxFifoSize
- txFifoAvail
);
1596 if (interface
->sendPacket(txFifo
.front())) {
1597 if (DTRACE(Ethernet
)) {
1598 if (txFifo
.front()->isIpPkt()) {
1599 ip_header
*ip
= txFifo
.front()->getIpHdr();
1600 DPRINTF(Ethernet
, "ID is %d\n", reverseEnd16(ip
->ID
));
1601 if (txFifo
.front()->isTcpPkt()) {
1602 tcp_header
*tcp
= txFifo
.front()->getTcpHdr(ip
);
1603 DPRINTF(Ethernet
, "Src Port = %d, Dest Port = %d\n",
1604 reverseEnd16(tcp
->src_port_num
),
1605 reverseEnd16(tcp
->dest_port_num
));
1610 DDUMP(Ethernet
, txFifo
.front()->data
, txFifo
.front()->length
);
1611 txBytes
+= txFifo
.front()->length
;
1614 txFifoAvail
+= txFifo
.front()->length
;
1616 DPRINTF(Ethernet
, "Successful Xmit! now txFifoAvail is %d\n", txFifoAvail
);
1617 txFifo
.front() = NULL
;
1620 /* normally do a writeback of the descriptor here, and ONLY after that is
1621 done, send this interrupt. but since our stuff never actually fails,
1622 just do this interrupt here, otherwise the code has to stray from this
1623 nice format. besides, it's functionally the same.
1625 devIntrPost(ISR_TXOK
);
1627 DPRINTF(Ethernet
, "May need to rethink always sending the descriptors back?\n");
1629 if (!txFifo
.empty() && !txEvent
.scheduled()) {
1630 DPRINTF(Ethernet
, "reschedule transmit\n");
1631 txEvent
.schedule(curTick
+ 1000);
1636 NSGigE::txDmaReadCopy()
1638 assert(txDmaState
== dmaReading
);
1640 memcpy(txDmaData
, physmem
->dma_addr(txDmaAddr
, txDmaLen
), txDmaLen
);
1641 txDmaState
= dmaIdle
;
1643 DPRINTF(EthernetDMA
, "tx dma read paddr=%#x len=%d\n",
1644 txDmaAddr
, txDmaLen
);
1645 DDUMP(EthernetDMA
, txDmaData
, txDmaLen
);
1649 NSGigE::doTxDmaRead()
1651 assert(txDmaState
== dmaIdle
|| txDmaState
== dmaReadWaiting
);
1652 txDmaState
= dmaReading
;
1654 if (dmaInterface
&& !txDmaFree
) {
1655 if (dmaInterface
->busy())
1656 txDmaState
= dmaReadWaiting
;
1658 dmaInterface
->doDMA(Read
, txDmaAddr
, txDmaLen
, curTick
,
1659 &txDmaReadEvent
, true);
1663 if (dmaReadDelay
== 0 && dmaReadFactor
== 0.0) {
1668 Tick factor
= ((txDmaLen
+ ULL(63)) >> ULL(6)) * dmaReadFactor
;
1669 Tick start
= curTick
+ dmaReadDelay
+ factor
;
1670 txDmaReadEvent
.schedule(start
);
1675 NSGigE::txDmaReadDone()
1677 assert(txDmaState
== dmaReading
);
1680 // If the receive state machine has a pending DMA, let it go first
1681 if (rxDmaState
== dmaReadWaiting
|| rxDmaState
== dmaWriteWaiting
)
1688 NSGigE::txDmaWriteCopy()
1690 assert(txDmaState
== dmaWriting
);
1692 memcpy(physmem
->dma_addr(txDmaAddr
, txDmaLen
), txDmaData
, txDmaLen
);
1693 txDmaState
= dmaIdle
;
1695 DPRINTF(EthernetDMA
, "tx dma write paddr=%#x len=%d\n",
1696 txDmaAddr
, txDmaLen
);
1697 DDUMP(EthernetDMA
, txDmaData
, txDmaLen
);
1701 NSGigE::doTxDmaWrite()
1703 assert(txDmaState
== dmaIdle
|| txDmaState
== dmaWriteWaiting
);
1704 txDmaState
= dmaWriting
;
1706 if (dmaInterface
&& !txDmaFree
) {
1707 if (dmaInterface
->busy())
1708 txDmaState
= dmaWriteWaiting
;
1710 dmaInterface
->doDMA(WriteInvalidate
, txDmaAddr
, txDmaLen
, curTick
,
1711 &txDmaWriteEvent
, true);
1715 if (dmaWriteDelay
== 0 && dmaWriteFactor
== 0.0) {
1720 Tick factor
= ((txDmaLen
+ ULL(63)) >> ULL(6)) * dmaWriteFactor
;
1721 Tick start
= curTick
+ dmaWriteDelay
+ factor
;
1722 txDmaWriteEvent
.schedule(start
);
1727 NSGigE::txDmaWriteDone()
1729 assert(txDmaState
== dmaWriting
);
1732 // If the receive state machine has a pending DMA, let it go first
1733 if (rxDmaState
== dmaReadWaiting
|| rxDmaState
== dmaWriteWaiting
)
1742 DPRINTF(EthernetSM
, "transmit kick state=%s\n", NsTxStateStrings
[txState
]);
1744 if (txKickTick
> curTick
) {
1745 DPRINTF(EthernetSM
, "transmit kick exiting, can't run till %d\n",
1752 switch(txDmaState
) {
1753 case dmaReadWaiting
:
1757 case dmaWriteWaiting
:
1767 if (!regs
.command
& CR_TXE
) {
1768 DPRINTF(EthernetSM
, "Transmit disabled. Nothing to do.\n");
1773 txState
= txDescRefr
;
1775 txDmaAddr
= regs
.txdp
& 0x3fffffff;
1776 txDmaData
= &txDescCache
+ offsetof(ns_desc
, link
);
1777 txDmaLen
= sizeof(txDescCache
.link
);
1778 txDmaFree
= dmaDescFree
;
1781 descDmaRdBytes
+= txDmaLen
;
1787 txState
= txDescRead
;
1789 txDmaAddr
= regs
.txdp
& 0x3fffffff;
1790 txDmaData
= &txDescCache
;
1791 txDmaLen
= sizeof(ns_desc
);
1792 txDmaFree
= dmaDescFree
;
1795 descDmaRdBytes
+= txDmaLen
;
1803 if (txDmaState
!= dmaIdle
)
1806 txState
= txAdvance
;
1810 if (txDmaState
!= dmaIdle
)
1813 DPRINTF(EthernetDesc
,
1814 "txDescCache data:\n\tlink=%08x\n\tbufptr=%08x\n\tcmdsts=%08x\n\textsts=%08x\n"
1815 ,txDescCache
.link
, txDescCache
.bufptr
, txDescCache
.cmdsts
,
1816 txDescCache
.extsts
);
1818 if (txDescCache
.cmdsts
& CMDSTS_OWN
) {
1819 txState
= txFifoBlock
;
1820 txFragPtr
= txDescCache
.bufptr
;
1821 txDescCnt
= txDescCache
.cmdsts
& CMDSTS_LEN_MASK
;
1829 DPRINTF(EthernetSM
, "\n\n*****starting the tx of a new packet\n");
1830 txPacket
= new EtherPacket
;
1831 txPacket
->data
= new uint8_t[16384];
1832 txPacketBufPtr
= txPacket
->data
;
1835 if (txDescCnt
== 0) {
1836 DPRINTF(EthernetSM
, "the txDescCnt == 0, done with descriptor\n");
1837 if (txDescCache
.cmdsts
& CMDSTS_MORE
) {
1838 DPRINTF(EthernetSM
, "there are more descriptors to come\n");
1839 txState
= txDescWrite
;
1841 txDescCache
.cmdsts
&= ~CMDSTS_OWN
;
1843 txDmaAddr
= (regs
.txdp
+ offsetof(ns_desc
, cmdsts
)) & 0x3fffffff;
1844 txDmaData
= &(txDescCache
.cmdsts
);
1845 txDmaLen
= sizeof(txDescCache
.cmdsts
);
1846 txDmaFree
= dmaDescFree
;
1851 } else { /* this packet is totally done */
1852 DPRINTF(EthernetSM
, "This packet is done, let's wrap it up\n");
1853 /* deal with the the packet that just finished */
1854 if ((regs
.vtcr
& VTCR_PPCHK
) && extstsEnable
) {
1855 if (txDescCache
.extsts
& EXTSTS_UDPPKT
) {
1856 udpChecksum(txPacket
, true);
1857 } else if (txDescCache
.extsts
& EXTSTS_TCPPKT
) {
1858 tcpChecksum(txPacket
, true);
1861 if (txDescCache
.extsts
& EXTSTS_IPPKT
) {
1862 ipChecksum(txPacket
, true);
1867 txPacket
->length
= txPacketBufPtr
- txPacket
->data
;
1868 /* this is just because the receive can't handle a packet bigger
1869 want to make sure */
1870 assert(txPacket
->length
<= 1514);
1871 txFifo
.push_back(txPacket
);
1873 /* this following section is not to spec, but functionally shouldn't
1874 be any different. normally, the chip will wait til the transmit has
1875 occurred before writing back the descriptor because it has to wait
1876 to see that it was successfully transmitted to decide whether to set
1877 CMDSTS_OK or not. however, in the simulator since it is always
1878 successfully transmitted, and writing it exactly to spec would
1879 complicate the code, we just do it here
1882 txDescCache
.cmdsts
&= ~CMDSTS_OWN
;
1883 txDescCache
.cmdsts
|= CMDSTS_OK
;
1885 DPRINTF(EthernetDesc
,
1886 "txDesc writeback:\n\tcmdsts=%08x\n\textsts=%08x\n",
1887 txDescCache
.cmdsts
, txDescCache
.extsts
);
1889 txDmaAddr
= (regs
.txdp
+ offsetof(ns_desc
, cmdsts
)) & 0x3fffffff;
1890 txDmaData
= &(txDescCache
.cmdsts
);
1891 txDmaLen
= sizeof(txDescCache
.cmdsts
) + sizeof(txDescCache
.extsts
);
1892 txDmaFree
= dmaDescFree
;
1895 descDmaWrBytes
+= txDmaLen
;
1901 DPRINTF(EthernetSM
, "halting TX state machine\n");
1905 txState
= txAdvance
;
1911 DPRINTF(EthernetSM
, "this descriptor isn't done yet\n");
1913 txState
= txFragRead
;
1915 /* The number of bytes transferred is either whatever is left
1916 in the descriptor (txDescCnt), or if there is not enough
1917 room in the fifo, just whatever room is left in the fifo
1919 txXferLen
= min
<uint32_t>(txDescCnt
, txFifoAvail
);
1921 txDmaAddr
= txFragPtr
& 0x3fffffff;
1922 txDmaData
= txPacketBufPtr
;
1923 txDmaLen
= txXferLen
;
1924 txDmaFree
= dmaDataFree
;
1929 txState
= txFifoBlock
;
1939 if (txDmaState
!= dmaIdle
)
1942 txPacketBufPtr
+= txXferLen
;
1943 txFragPtr
+= txXferLen
;
1944 txDescCnt
-= txXferLen
;
1945 txFifoAvail
-= txXferLen
;
1947 txState
= txFifoBlock
;
1951 if (txDmaState
!= dmaIdle
)
1954 if (txDescCache
.cmdsts
& CMDSTS_INTR
) {
1955 devIntrPost(ISR_TXDESC
);
1958 txState
= txAdvance
;
1962 if (txDescCache
.link
== 0) {
1965 txState
= txDescRead
;
1966 regs
.txdp
= txDescCache
.link
;
1969 txDmaAddr
= txDescCache
.link
& 0x3fffffff;
1970 txDmaData
= &txDescCache
;
1971 txDmaLen
= sizeof(ns_desc
);
1972 txDmaFree
= dmaDescFree
;
1980 panic("invalid state");
1983 DPRINTF(EthernetSM
, "entering next tx state=%s\n",
1984 NsTxStateStrings
[txState
]);
1986 if (txState
== txIdle
) {
1987 regs
.command
&= ~CR_TXE
;
1988 devIntrPost(ISR_TXIDLE
);
1996 * @todo do we want to schedule a future kick?
1998 DPRINTF(EthernetSM
, "tx state machine exited state=%s\n",
1999 NsTxStateStrings
[txState
]);
2003 NSGigE::transferDone()
2008 if (txEvent
.scheduled())
2009 txEvent
.reschedule(curTick
+ 1);
2011 txEvent
.schedule(curTick
+ 1);
2015 NSGigE::rxFilter(PacketPtr packet
)
2020 if (packet
->IsUnicast()) {
2023 // If we're accepting all unicast addresses
2027 // If we make a perfect match
2029 && (memcmp(rom
.perfectMatch
, packet
->data
, sizeof(rom
.perfectMatch
)) == 0))
2032 eth_header
*eth
= (eth_header
*) packet
->data
;
2033 if ((acceptArp
) && (eth
->type
== 0x608))
2036 } else if (packet
->IsBroadcast()) {
2039 // if we're accepting broadcasts
2040 if (acceptBroadcast
)
2043 } else if (packet
->IsMulticast()) {
2046 // if we're accepting all multicasts
2047 if (acceptMulticast
)
2053 // oh well, punt on this one
2057 DPRINTF(Ethernet
, "rxFilter drop\n");
2058 DDUMP(EthernetData
, packet
->data
, packet
->length
);
2065 NSGigE::recvPacket(PacketPtr packet
)
2067 rxBytes
+= packet
->length
;
2070 DPRINTF(Ethernet
, "\n\nReceiving packet from wire, rxFifoAvail = %d\n", maxRxFifoSize
- rxFifoCnt
);
2072 if (rxState
== rxIdle
) {
2073 DPRINTF(Ethernet
, "receive disabled...packet dropped\n");
2074 interface
->recvDone();
2078 if (rxFilterEnable
&& rxFilter(packet
)) {
2079 DPRINTF(Ethernet
, "packet filtered...dropped\n");
2080 interface
->recvDone();
2084 if ((rxFifoCnt
+ packet
->length
) >= maxRxFifoSize
) {
2086 "packet will not fit in receive buffer...packet dropped\n");
2087 devIntrPost(ISR_RXORN
);
2091 rxFifo
.push_back(packet
);
2092 rxFifoCnt
+= packet
->length
;
2093 interface
->recvDone();
2100 * does a udp checksum. if gen is true, then it generates it and puts it in the right place
2101 * else, it just checks what it calculates against the value in the header in packet
2104 NSGigE::udpChecksum(PacketPtr packet
, bool gen
)
2106 ip_header
*ip
= packet
->getIpHdr();
2107 udp_header
*hdr
= packet
->getUdpHdr(ip
);
2109 pseudo_header
*pseudo
= new pseudo_header
;
2111 pseudo
->src_ip_addr
= ip
->src_ip_addr
;
2112 pseudo
->dest_ip_addr
= ip
->dest_ip_addr
;
2113 pseudo
->protocol
= ip
->protocol
;
2114 pseudo
->len
= hdr
->len
;
2116 uint16_t cksum
= checksumCalc((uint16_t *) pseudo
, (uint16_t *) hdr
,
2117 (uint32_t) hdr
->len
);
2121 hdr
->chksum
= cksum
;
2130 NSGigE::tcpChecksum(PacketPtr packet
, bool gen
)
2132 ip_header
*ip
= packet
->getIpHdr();
2133 tcp_header
*hdr
= packet
->getTcpHdr(ip
);
2136 pseudo_header
*pseudo
= new pseudo_header
;
2138 pseudo
->src_ip_addr
= ip
->src_ip_addr
;
2139 pseudo
->dest_ip_addr
= ip
->dest_ip_addr
;
2140 pseudo
->protocol
= reverseEnd16(ip
->protocol
);
2141 pseudo
->len
= reverseEnd16(reverseEnd16(ip
->dgram_len
) - (ip
->vers_len
& 0xf)*4);
2143 cksum
= checksumCalc((uint16_t *) pseudo
, (uint16_t *) hdr
,
2144 (uint32_t) reverseEnd16(pseudo
->len
));
2146 pseudo
->src_ip_addr
= 0;
2147 pseudo
->dest_ip_addr
= 0;
2148 pseudo
->protocol
= hdr
->chksum
;
2151 cksum
= checksumCalc((uint16_t *) pseudo
, (uint16_t *) hdr
,
2152 (uint32_t) (reverseEnd16(ip
->dgram_len
) - (ip
->vers_len
& 0xf)*4));
2157 hdr
->chksum
= cksum
;
2166 NSGigE::ipChecksum(PacketPtr packet
, bool gen
)
2168 ip_header
*hdr
= packet
->getIpHdr();
2170 uint16_t cksum
= checksumCalc(NULL
, (uint16_t *) hdr
, (hdr
->vers_len
& 0xf)*4);
2173 DPRINTF(EthernetCksum
, "generated checksum: %#x\n", cksum
);
2174 hdr
->hdr_chksum
= cksum
;
2184 NSGigE::checksumCalc(uint16_t *pseudo
, uint16_t *buf
, uint32_t len
)
2188 uint16_t last_pad
= 0;
2190 last_pad
= buf
[len
/2] & 0xff;
2196 sum
= pseudo
[0] + pseudo
[1] + pseudo
[2] +
2197 pseudo
[3] + pseudo
[4] + pseudo
[5];
2200 for (int i
=0; i
< (len
/2); ++i
) {
2205 sum
= (sum
>> 16) + (sum
& 0xffff);
2210 //=====================================================================
2214 NSGigE::serialize(ostream
&os
)
2216 // Serialize the PciDev base class
2217 PciDev::serialize(os
);
2220 * Finalize any DMA events now.
2222 if (rxDmaReadEvent
.scheduled())
2224 if (rxDmaWriteEvent
.scheduled())
2226 if (txDmaReadEvent
.scheduled())
2228 if (txDmaWriteEvent
.scheduled())
2232 * Serialize the device registers
2234 SERIALIZE_SCALAR(regs
.command
);
2235 SERIALIZE_SCALAR(regs
.config
);
2236 SERIALIZE_SCALAR(regs
.mear
);
2237 SERIALIZE_SCALAR(regs
.ptscr
);
2238 SERIALIZE_SCALAR(regs
.isr
);
2239 SERIALIZE_SCALAR(regs
.imr
);
2240 SERIALIZE_SCALAR(regs
.ier
);
2241 SERIALIZE_SCALAR(regs
.ihr
);
2242 SERIALIZE_SCALAR(regs
.txdp
);
2243 SERIALIZE_SCALAR(regs
.txdp_hi
);
2244 SERIALIZE_SCALAR(regs
.txcfg
);
2245 SERIALIZE_SCALAR(regs
.gpior
);
2246 SERIALIZE_SCALAR(regs
.rxdp
);
2247 SERIALIZE_SCALAR(regs
.rxdp_hi
);
2248 SERIALIZE_SCALAR(regs
.rxcfg
);
2249 SERIALIZE_SCALAR(regs
.pqcr
);
2250 SERIALIZE_SCALAR(regs
.wcsr
);
2251 SERIALIZE_SCALAR(regs
.pcr
);
2252 SERIALIZE_SCALAR(regs
.rfcr
);
2253 SERIALIZE_SCALAR(regs
.rfdr
);
2254 SERIALIZE_SCALAR(regs
.srr
);
2255 SERIALIZE_SCALAR(regs
.mibc
);
2256 SERIALIZE_SCALAR(regs
.vrcr
);
2257 SERIALIZE_SCALAR(regs
.vtcr
);
2258 SERIALIZE_SCALAR(regs
.vdr
);
2259 SERIALIZE_SCALAR(regs
.ccsr
);
2260 SERIALIZE_SCALAR(regs
.tbicr
);
2261 SERIALIZE_SCALAR(regs
.tbisr
);
2262 SERIALIZE_SCALAR(regs
.tanar
);
2263 SERIALIZE_SCALAR(regs
.tanlpar
);
2264 SERIALIZE_SCALAR(regs
.taner
);
2265 SERIALIZE_SCALAR(regs
.tesr
);
2267 SERIALIZE_ARRAY(rom
.perfectMatch
, EADDR_LEN
);
2269 SERIALIZE_SCALAR(ioEnable
);
2272 * Serialize the data Fifos
2274 int txNumPkts
= txFifo
.size();
2275 SERIALIZE_SCALAR(txNumPkts
);
2277 pktiter_t end
= txFifo
.end();
2278 for (pktiter_t p
= txFifo
.begin(); p
!= end
; ++p
) {
2279 nameOut(os
, csprintf("%s.txFifo%d", name(), i
++));
2280 (*p
)->serialize(os
);
2283 int rxNumPkts
= rxFifo
.size();
2284 SERIALIZE_SCALAR(rxNumPkts
);
2287 for (pktiter_t p
= rxFifo
.begin(); p
!= end
; ++p
) {
2288 nameOut(os
, csprintf("%s.rxFifo%d", name(), i
++));
2289 (*p
)->serialize(os
);
2293 * Serialize the various helper variables
2295 bool txPacketExists
= txPacket
;
2296 SERIALIZE_SCALAR(txPacketExists
);
2297 if (txPacketExists
) {
2298 nameOut(os
, csprintf("%s.txPacket", name()));
2299 txPacket
->serialize(os
);
2300 uint32_t txPktBufPtr
= (uint32_t) (txPacketBufPtr
- txPacket
->data
);
2301 SERIALIZE_SCALAR(txPktBufPtr
);
2304 bool rxPacketExists
= rxPacket
;
2305 SERIALIZE_SCALAR(rxPacketExists
);
2306 if (rxPacketExists
) {
2307 nameOut(os
, csprintf("%s.rxPacket", name()));
2308 rxPacket
->serialize(os
);
2309 uint32_t rxPktBufPtr
= (uint32_t) (rxPacketBufPtr
- rxPacket
->data
);
2310 SERIALIZE_SCALAR(rxPktBufPtr
);
2313 SERIALIZE_SCALAR(txXferLen
);
2314 SERIALIZE_SCALAR(rxXferLen
);
2317 * Serialize DescCaches
2319 SERIALIZE_SCALAR(txDescCache
.link
);
2320 SERIALIZE_SCALAR(txDescCache
.bufptr
);
2321 SERIALIZE_SCALAR(txDescCache
.cmdsts
);
2322 SERIALIZE_SCALAR(txDescCache
.extsts
);
2323 SERIALIZE_SCALAR(rxDescCache
.link
);
2324 SERIALIZE_SCALAR(rxDescCache
.bufptr
);
2325 SERIALIZE_SCALAR(rxDescCache
.cmdsts
);
2326 SERIALIZE_SCALAR(rxDescCache
.extsts
);
2329 * Serialize tx state machine
2331 int txState
= this->txState
;
2332 SERIALIZE_SCALAR(txState
);
2333 SERIALIZE_SCALAR(CTDD
);
2334 SERIALIZE_SCALAR(txFifoAvail
);
2335 SERIALIZE_SCALAR(txHalt
);
2336 SERIALIZE_SCALAR(txFragPtr
);
2337 SERIALIZE_SCALAR(txDescCnt
);
2338 int txDmaState
= this->txDmaState
;
2339 SERIALIZE_SCALAR(txDmaState
);
2342 * Serialize rx state machine
2344 int rxState
= this->rxState
;
2345 SERIALIZE_SCALAR(rxState
);
2346 SERIALIZE_SCALAR(CRDD
);
2347 SERIALIZE_SCALAR(rxPktBytes
);
2348 SERIALIZE_SCALAR(rxFifoCnt
);
2349 SERIALIZE_SCALAR(rxHalt
);
2350 SERIALIZE_SCALAR(rxDescCnt
);
2351 int rxDmaState
= this->rxDmaState
;
2352 SERIALIZE_SCALAR(rxDmaState
);
2354 SERIALIZE_SCALAR(extstsEnable
);
2357 * If there's a pending transmit, store the time so we can
2358 * reschedule it later
2360 Tick transmitTick
= txEvent
.scheduled() ? txEvent
.when() - curTick
: 0;
2361 SERIALIZE_SCALAR(transmitTick
);
2364 * receive address filter settings
2366 SERIALIZE_SCALAR(rxFilterEnable
);
2367 SERIALIZE_SCALAR(acceptBroadcast
);
2368 SERIALIZE_SCALAR(acceptMulticast
);
2369 SERIALIZE_SCALAR(acceptUnicast
);
2370 SERIALIZE_SCALAR(acceptPerfect
);
2371 SERIALIZE_SCALAR(acceptArp
);
2374 * Keep track of pending interrupt status.
2376 SERIALIZE_SCALAR(intrTick
);
2377 SERIALIZE_SCALAR(cpuPendingIntr
);
2378 Tick intrEventTick
= 0;
2380 intrEventTick
= intrEvent
->when();
2381 SERIALIZE_SCALAR(intrEventTick
);
2386 NSGigE::unserialize(Checkpoint
*cp
, const std::string
§ion
)
2388 // Unserialize the PciDev base class
2389 PciDev::unserialize(cp
, section
);
2391 UNSERIALIZE_SCALAR(regs
.command
);
2392 UNSERIALIZE_SCALAR(regs
.config
);
2393 UNSERIALIZE_SCALAR(regs
.mear
);
2394 UNSERIALIZE_SCALAR(regs
.ptscr
);
2395 UNSERIALIZE_SCALAR(regs
.isr
);
2396 UNSERIALIZE_SCALAR(regs
.imr
);
2397 UNSERIALIZE_SCALAR(regs
.ier
);
2398 UNSERIALIZE_SCALAR(regs
.ihr
);
2399 UNSERIALIZE_SCALAR(regs
.txdp
);
2400 UNSERIALIZE_SCALAR(regs
.txdp_hi
);
2401 UNSERIALIZE_SCALAR(regs
.txcfg
);
2402 UNSERIALIZE_SCALAR(regs
.gpior
);
2403 UNSERIALIZE_SCALAR(regs
.rxdp
);
2404 UNSERIALIZE_SCALAR(regs
.rxdp_hi
);
2405 UNSERIALIZE_SCALAR(regs
.rxcfg
);
2406 UNSERIALIZE_SCALAR(regs
.pqcr
);
2407 UNSERIALIZE_SCALAR(regs
.wcsr
);
2408 UNSERIALIZE_SCALAR(regs
.pcr
);
2409 UNSERIALIZE_SCALAR(regs
.rfcr
);
2410 UNSERIALIZE_SCALAR(regs
.rfdr
);
2411 UNSERIALIZE_SCALAR(regs
.srr
);
2412 UNSERIALIZE_SCALAR(regs
.mibc
);
2413 UNSERIALIZE_SCALAR(regs
.vrcr
);
2414 UNSERIALIZE_SCALAR(regs
.vtcr
);
2415 UNSERIALIZE_SCALAR(regs
.vdr
);
2416 UNSERIALIZE_SCALAR(regs
.ccsr
);
2417 UNSERIALIZE_SCALAR(regs
.tbicr
);
2418 UNSERIALIZE_SCALAR(regs
.tbisr
);
2419 UNSERIALIZE_SCALAR(regs
.tanar
);
2420 UNSERIALIZE_SCALAR(regs
.tanlpar
);
2421 UNSERIALIZE_SCALAR(regs
.taner
);
2422 UNSERIALIZE_SCALAR(regs
.tesr
);
2424 UNSERIALIZE_ARRAY(rom
.perfectMatch
, EADDR_LEN
);
2426 UNSERIALIZE_SCALAR(ioEnable
);
2429 * unserialize the data fifos
2432 UNSERIALIZE_SCALAR(txNumPkts
);
2434 for (i
= 0; i
< txNumPkts
; ++i
) {
2435 PacketPtr p
= new EtherPacket
;
2436 p
->unserialize(cp
, csprintf("%s.rxFifo%d", section
, i
));
2437 txFifo
.push_back(p
);
2441 UNSERIALIZE_SCALAR(rxNumPkts
);
2442 for (i
= 0; i
< rxNumPkts
; ++i
) {
2443 PacketPtr p
= new EtherPacket
;
2444 p
->unserialize(cp
, csprintf("%s.rxFifo%d", section
, i
));
2445 rxFifo
.push_back(p
);
2449 * unserialize the various helper variables
2451 bool txPacketExists
;
2452 UNSERIALIZE_SCALAR(txPacketExists
);
2453 if (txPacketExists
) {
2454 txPacket
= new EtherPacket
;
2455 txPacket
->unserialize(cp
, csprintf("%s.txPacket", section
));
2456 uint32_t txPktBufPtr
;
2457 UNSERIALIZE_SCALAR(txPktBufPtr
);
2458 txPacketBufPtr
= (uint8_t *) txPacket
->data
+ txPktBufPtr
;
2462 bool rxPacketExists
;
2463 UNSERIALIZE_SCALAR(rxPacketExists
);
2465 if (rxPacketExists
) {
2466 rxPacket
= new EtherPacket
;
2467 rxPacket
->unserialize(cp
, csprintf("%s.rxPacket", section
));
2468 uint32_t rxPktBufPtr
;
2469 UNSERIALIZE_SCALAR(rxPktBufPtr
);
2470 rxPacketBufPtr
= (uint8_t *) rxPacket
->data
+ rxPktBufPtr
;
2474 UNSERIALIZE_SCALAR(txXferLen
);
2475 UNSERIALIZE_SCALAR(rxXferLen
);
2478 * Unserialize DescCaches
2480 UNSERIALIZE_SCALAR(txDescCache
.link
);
2481 UNSERIALIZE_SCALAR(txDescCache
.bufptr
);
2482 UNSERIALIZE_SCALAR(txDescCache
.cmdsts
);
2483 UNSERIALIZE_SCALAR(txDescCache
.extsts
);
2484 UNSERIALIZE_SCALAR(rxDescCache
.link
);
2485 UNSERIALIZE_SCALAR(rxDescCache
.bufptr
);
2486 UNSERIALIZE_SCALAR(rxDescCache
.cmdsts
);
2487 UNSERIALIZE_SCALAR(rxDescCache
.extsts
);
2490 * unserialize tx state machine
2493 UNSERIALIZE_SCALAR(txState
);
2494 this->txState
= (TxState
) txState
;
2495 UNSERIALIZE_SCALAR(CTDD
);
2496 UNSERIALIZE_SCALAR(txFifoAvail
);
2497 UNSERIALIZE_SCALAR(txHalt
);
2498 UNSERIALIZE_SCALAR(txFragPtr
);
2499 UNSERIALIZE_SCALAR(txDescCnt
);
2501 UNSERIALIZE_SCALAR(txDmaState
);
2502 this->txDmaState
= (DmaState
) txDmaState
;
2505 * unserialize rx state machine
2508 UNSERIALIZE_SCALAR(rxState
);
2509 this->rxState
= (RxState
) rxState
;
2510 UNSERIALIZE_SCALAR(CRDD
);
2511 UNSERIALIZE_SCALAR(rxPktBytes
);
2512 UNSERIALIZE_SCALAR(rxFifoCnt
);
2513 UNSERIALIZE_SCALAR(rxHalt
);
2514 UNSERIALIZE_SCALAR(rxDescCnt
);
2516 UNSERIALIZE_SCALAR(rxDmaState
);
2517 this->rxDmaState
= (DmaState
) rxDmaState
;
2519 UNSERIALIZE_SCALAR(extstsEnable
);
2522 * If there's a pending transmit, reschedule it now
2525 UNSERIALIZE_SCALAR(transmitTick
);
2527 txEvent
.schedule(curTick
+ transmitTick
);
2530 * unserialize receive address filter settings
2532 UNSERIALIZE_SCALAR(rxFilterEnable
);
2533 UNSERIALIZE_SCALAR(acceptBroadcast
);
2534 UNSERIALIZE_SCALAR(acceptMulticast
);
2535 UNSERIALIZE_SCALAR(acceptUnicast
);
2536 UNSERIALIZE_SCALAR(acceptPerfect
);
2537 UNSERIALIZE_SCALAR(acceptArp
);
2540 * Keep track of pending interrupt status.
2542 UNSERIALIZE_SCALAR(intrTick
);
2543 UNSERIALIZE_SCALAR(cpuPendingIntr
);
2545 UNSERIALIZE_SCALAR(intrEventTick
);
2546 if (intrEventTick
) {
2547 intrEvent
= new IntrEvent(this, true);
2548 intrEvent
->schedule(intrEventTick
);
2552 * re-add addrRanges to bus bridges
2555 pioInterface
->addAddrRange(BARAddrs
[0], BARAddrs
[0] + BARSize
[0] - 1);
2556 pioInterface
->addAddrRange(BARAddrs
[1], BARAddrs
[1] + BARSize
[1] - 1);
2561 NSGigE::cacheAccess(MemReqPtr
&req
)
2563 DPRINTF(EthernetPIO
, "timing access to paddr=%#x (daddr=%#x)\n",
2564 req
->paddr
, req
->paddr
- addr
);
2565 return curTick
+ pioLatency
;
2567 //=====================================================================
2570 //********** helper functions******************************************
2572 uint16_t reverseEnd16(uint16_t num
)
2574 uint16_t reverse
= (num
& 0xff)<<8;
2575 reverse
+= ((num
& 0xff00) >> 8);
2579 uint32_t reverseEnd32(uint32_t num
)
2581 uint32_t reverse
= (reverseEnd16(num
& 0xffff)) << 16;
2582 reverse
+= reverseEnd16((uint16_t) ((num
& 0xffff0000) >> 8));
2588 //=====================================================================
2590 BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt
)
2592 SimObjectParam
<EtherInt
*> peer
;
2593 SimObjectParam
<NSGigE
*> device
;
2595 END_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt
)
2597 BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigEInt
)
2599 INIT_PARAM_DFLT(peer
, "peer interface", NULL
),
2600 INIT_PARAM(device
, "Ethernet device of this interface")
2602 END_INIT_SIM_OBJECT_PARAMS(NSGigEInt
)
2604 CREATE_SIM_OBJECT(NSGigEInt
)
2606 NSGigEInt
*dev_int
= new NSGigEInt(getInstanceName(), device
);
2608 EtherInt
*p
= (EtherInt
*)peer
;
2610 dev_int
->setPeer(p
);
2611 p
->setPeer(dev_int
);
2617 REGISTER_SIM_OBJECT("NSGigEInt", NSGigEInt
)
2620 BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigE
)
2622 Param
<Tick
> tx_delay
;
2623 Param
<Tick
> rx_delay
;
2624 SimObjectParam
<IntrControl
*> intr_ctrl
;
2625 Param
<Tick
> intr_delay
;
2626 SimObjectParam
<MemoryController
*> mmu
;
2627 SimObjectParam
<PhysicalMemory
*> physmem
;
2628 Param
<bool> rx_filter
;
2629 Param
<string
> hardware_address
;
2630 SimObjectParam
<Bus
*> header_bus
;
2631 SimObjectParam
<Bus
*> payload_bus
;
2632 SimObjectParam
<HierParams
*> hier
;
2633 Param
<Tick
> pio_latency
;
2634 Param
<bool> dma_desc_free
;
2635 Param
<bool> dma_data_free
;
2636 Param
<Tick
> dma_read_delay
;
2637 Param
<Tick
> dma_write_delay
;
2638 Param
<Tick
> dma_read_factor
;
2639 Param
<Tick
> dma_write_factor
;
2640 SimObjectParam
<PciConfigAll
*> configspace
;
2641 SimObjectParam
<PciConfigData
*> configdata
;
2642 SimObjectParam
<Tsunami
*> tsunami
;
2643 Param
<uint32_t> pci_bus
;
2644 Param
<uint32_t> pci_dev
;
2645 Param
<uint32_t> pci_func
;
2646 Param
<uint32_t> tx_fifo_size
;
2647 Param
<uint32_t> rx_fifo_size
;
2649 END_DECLARE_SIM_OBJECT_PARAMS(NSGigE
)
2651 BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigE
)
2653 INIT_PARAM_DFLT(tx_delay
, "Transmit Delay", 1000),
2654 INIT_PARAM_DFLT(rx_delay
, "Receive Delay", 1000),
2655 INIT_PARAM(intr_ctrl
, "Interrupt Controller"),
2656 INIT_PARAM_DFLT(intr_delay
, "Interrupt Delay in microseconds", 0),
2657 INIT_PARAM(mmu
, "Memory Controller"),
2658 INIT_PARAM(physmem
, "Physical Memory"),
2659 INIT_PARAM_DFLT(rx_filter
, "Enable Receive Filter", true),
2660 INIT_PARAM_DFLT(hardware_address
, "Ethernet Hardware Address",
2661 "00:99:00:00:00:01"),
2662 INIT_PARAM_DFLT(header_bus
, "The IO Bus to attach to for headers", NULL
),
2663 INIT_PARAM_DFLT(payload_bus
, "The IO Bus to attach to for payload", NULL
),
2664 INIT_PARAM_DFLT(hier
, "Hierarchy global variables", &defaultHierParams
),
2665 INIT_PARAM_DFLT(pio_latency
, "Programmed IO latency in bus cycles", 1),
2666 INIT_PARAM_DFLT(dma_desc_free
, "DMA of Descriptors is free", false),
2667 INIT_PARAM_DFLT(dma_data_free
, "DMA of Data is free", false),
2668 INIT_PARAM_DFLT(dma_read_delay
, "fixed delay for dma reads", 0),
2669 INIT_PARAM_DFLT(dma_write_delay
, "fixed delay for dma writes", 0),
2670 INIT_PARAM_DFLT(dma_read_factor
, "multiplier for dma reads", 0),
2671 INIT_PARAM_DFLT(dma_write_factor
, "multiplier for dma writes", 0),
2672 INIT_PARAM(configspace
, "PCI Configspace"),
2673 INIT_PARAM(configdata
, "PCI Config data"),
2674 INIT_PARAM(tsunami
, "Tsunami"),
2675 INIT_PARAM(pci_bus
, "PCI bus"),
2676 INIT_PARAM(pci_dev
, "PCI device number"),
2677 INIT_PARAM(pci_func
, "PCI function code"),
2678 INIT_PARAM_DFLT(tx_fifo_size
, "max size in bytes of txFifo", 131072),
2679 INIT_PARAM_DFLT(rx_fifo_size
, "max size in bytes of rxFifo", 131072)
2681 END_INIT_SIM_OBJECT_PARAMS(NSGigE
)
2684 CREATE_SIM_OBJECT(NSGigE
)
2687 sscanf(((string
)hardware_address
).c_str(), "%x:%x:%x:%x:%x:%x",
2688 &eaddr
[0], &eaddr
[1], &eaddr
[2], &eaddr
[3], &eaddr
[4], &eaddr
[5]);
2690 return new NSGigE(getInstanceName(), intr_ctrl
, intr_delay
,
2691 physmem
, tx_delay
, rx_delay
, mmu
, hier
, header_bus
,
2692 payload_bus
, pio_latency
, dma_desc_free
, dma_data_free
,
2693 dma_read_delay
, dma_write_delay
, dma_read_factor
,
2694 dma_write_factor
, configspace
, configdata
,
2695 tsunami
, pci_bus
, pci_dev
, pci_func
, rx_filter
, eaddr
,
2696 tx_fifo_size
, rx_fifo_size
);
2699 REGISTER_SIM_OBJECT("NSGigE", NSGigE
)