2 * Copyright (c) 2004 The Regents of The University of Michigan
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 * Device module for modelling the National Semiconductor
31 * DP83820 ethernet controller. Does not support priority queueing
37 #include "base/inet.hh"
38 #include "cpu/exec_context.hh"
39 #include "cpu/intr_control.hh"
41 #include "dev/ns_gige.hh"
42 #include "dev/etherlink.hh"
43 #include "mem/bus/bus.hh"
44 #include "mem/bus/dma_interface.hh"
45 #include "mem/bus/pio_interface.hh"
46 #include "mem/bus/pio_interface_impl.hh"
47 #include "mem/functional_mem/memory_control.hh"
48 #include "mem/functional_mem/physical_memory.hh"
49 #include "sim/builder.hh"
50 #include "sim/host.hh"
51 #include "sim/sim_stats.hh"
52 #include "targetarch/vtophys.hh"
53 #include "dev/pciconfigall.hh"
54 #include "dev/tsunami_cchip.hh"
56 const char *NsRxStateStrings
[] =
67 const char *NsTxStateStrings
[] =
78 const char *NsDmaState
[] =
89 //helper function declarations
90 //These functions reverse Endianness so we can evaluate network data correctly
91 uint16_t reverseEnd16(uint16_t);
92 uint32_t reverseEnd32(uint32_t);
94 ///////////////////////////////////////////////////////////////////////
98 NSGigE::NSGigE(const std::string
&name
, IntrControl
*i
, Tick intr_delay
,
99 PhysicalMemory
*pmem
, Tick tx_delay
, Tick rx_delay
,
100 MemoryController
*mmu
, HierParams
*hier
, Bus
*header_bus
,
101 Bus
*payload_bus
, Tick pio_latency
, bool dma_desc_free
,
102 bool dma_data_free
, Tick dma_read_delay
, Tick dma_write_delay
,
103 Tick dma_read_factor
, Tick dma_write_factor
, PciConfigAll
*cf
,
104 PciConfigData
*cd
, Tsunami
*t
, uint32_t bus
, uint32_t dev
,
105 uint32_t func
, bool rx_filter
, const int eaddr
[6],
106 uint32_t tx_fifo_size
, uint32_t rx_fifo_size
)
107 : PciDev(name
, mmu
, cf
, cd
, bus
, dev
, func
), tsunami(t
), ioEnable(false),
108 maxTxFifoSize(tx_fifo_size
), maxRxFifoSize(rx_fifo_size
),
109 txPacket(0), rxPacket(0), txPacketBufPtr(NULL
), rxPacketBufPtr(NULL
),
110 txXferLen(0), rxXferLen(0), txState(txIdle
), CTDD(false),
111 txFifoAvail(tx_fifo_size
), txHalt(false),
112 txFragPtr(0), txDescCnt(0), txDmaState(dmaIdle
), rxState(rxIdle
),
113 CRDD(false), rxPktBytes(0), rxFifoCnt(0), rxHalt(false),
114 rxFragPtr(0), rxDescCnt(0), rxDmaState(dmaIdle
), extstsEnable(false),
115 rxDmaReadEvent(this), rxDmaWriteEvent(this),
116 txDmaReadEvent(this), txDmaWriteEvent(this),
117 dmaDescFree(dma_desc_free
), dmaDataFree(dma_data_free
),
118 txDelay(tx_delay
), rxDelay(rx_delay
), rxKickTick(0), txKickTick(0),
119 txEvent(this), rxFilterEnable(rx_filter
), acceptBroadcast(false),
120 acceptMulticast(false), acceptUnicast(false),
121 acceptPerfect(false), acceptArp(false),
122 physmem(pmem
), intctrl(i
), intrTick(0), cpuPendingIntr(false),
123 intrEvent(0), interface(0)
125 tsunami
->ethernet
= this;
128 pioInterface
= newPioInterface(name
, hier
, header_bus
, this,
129 &NSGigE::cacheAccess
);
131 pioLatency
= pio_latency
* header_bus
->clockRatio
;
134 dmaInterface
= new DMAInterface
<Bus
>(name
+ ".dma",
135 header_bus
, payload_bus
, 1);
137 dmaInterface
= new DMAInterface
<Bus
>(name
+ ".dma",
138 header_bus
, header_bus
, 1);
139 } else if (payload_bus
) {
140 pioInterface
= newPioInterface(name
, hier
, payload_bus
, this,
141 &NSGigE::cacheAccess
);
143 pioLatency
= pio_latency
* payload_bus
->clockRatio
;
145 dmaInterface
= new DMAInterface
<Bus
>(name
+ ".dma", payload_bus
,
150 intrDelay
= US2Ticks(intr_delay
);
151 dmaReadDelay
= dma_read_delay
;
152 dmaWriteDelay
= dma_write_delay
;
153 dmaReadFactor
= dma_read_factor
;
154 dmaWriteFactor
= dma_write_factor
;
157 rom
.perfectMatch
[0] = eaddr
[0];
158 rom
.perfectMatch
[1] = eaddr
[1];
159 rom
.perfectMatch
[2] = eaddr
[2];
160 rom
.perfectMatch
[3] = eaddr
[3];
161 rom
.perfectMatch
[4] = eaddr
[4];
162 rom
.perfectMatch
[5] = eaddr
[5];
172 .name(name() + ".txBytes")
173 .desc("Bytes Transmitted")
178 .name(name() + ".rxBytes")
179 .desc("Bytes Received")
184 .name(name() + ".txPackets")
185 .desc("Number of Packets Transmitted")
190 .name(name() + ".rxPackets")
191 .desc("Number of Packets Received")
196 .name(name() + ".txIPChecksums")
197 .desc("Number of tx IP Checksums done by device")
203 .name(name() + ".rxIPChecksums")
204 .desc("Number of rx IP Checksums done by device")
210 .name(name() + ".txTCPChecksums")
211 .desc("Number of tx TCP Checksums done by device")
217 .name(name() + ".rxTCPChecksums")
218 .desc("Number of rx TCP Checksums done by device")
224 .name(name() + ".descDMAReads")
225 .desc("Number of descriptors the device read w/ DMA")
230 .name(name() + ".descDMAWrites")
231 .desc("Number of descriptors the device wrote w/ DMA")
236 .name(name() + ".descDmaReadBytes")
237 .desc("number of descriptor bytes read w/ DMA")
242 .name(name() + ".descDmaWriteBytes")
243 .desc("number of descriptor bytes write w/ DMA")
249 .name(name() + ".txBandwidth")
250 .desc("Transmit Bandwidth (bits/s)")
256 .name(name() + ".rxBandwidth")
257 .desc("Receive Bandwidth (bits/s)")
263 .name(name() + ".txPPS")
264 .desc("Packet Tranmission Rate (packets/s)")
270 .name(name() + ".rxPPS")
271 .desc("Packet Reception Rate (packets/s)")
276 txBandwidth
= txBytes
* Stats::constant(8) / simSeconds
;
277 rxBandwidth
= rxBytes
* Stats::constant(8) / simSeconds
;
278 txPacketRate
= txPackets
/ simSeconds
;
279 rxPacketRate
= rxPackets
/ simSeconds
;
283 * This is to read the PCI general configuration registers
286 NSGigE::ReadConfig(int offset
, int size
, uint8_t *data
)
288 if (offset
< PCI_DEVICE_SPECIFIC
)
289 PciDev::ReadConfig(offset
, size
, data
);
291 panic("Device specific PCI config space not implemented!\n");
295 * This is to write to the PCI general configuration registers
298 NSGigE::WriteConfig(int offset
, int size
, uint32_t data
)
300 if (offset
< PCI_DEVICE_SPECIFIC
)
301 PciDev::WriteConfig(offset
, size
, data
);
303 panic("Device specific PCI config space not implemented!\n");
305 // Need to catch writes to BARs to update the PIO interface
307 //seems to work fine without all these PCI settings, but i put in the IO
308 //to double check, an assertion will fail if we need to properly
311 if (config
.data
[offset
] & PCI_CMD_IOSE
)
317 if (config
.data
[offset
] & PCI_CMD_BME
) {
324 if (config
.data
[offset
] & PCI_CMD_MSE
) {
333 case PCI0_BASE_ADDR0
:
334 if (BARAddrs
[0] != 0) {
337 pioInterface
->addAddrRange(BARAddrs
[0], BARAddrs
[0] + BARSize
[0] - 1);
339 BARAddrs
[0] &= PA_UNCACHED_MASK
;
343 case PCI0_BASE_ADDR1
:
344 if (BARAddrs
[1] != 0) {
347 pioInterface
->addAddrRange(BARAddrs
[1], BARAddrs
[1] + BARSize
[1] - 1);
349 BARAddrs
[1] &= PA_UNCACHED_MASK
;
357 * This reads the device registers, which are detailed in the NS83820
361 NSGigE::read(MemReqPtr
&req
, uint8_t *data
)
365 //The mask is to give you only the offset into the device register file
366 Addr daddr
= req
->paddr
& 0xfff;
367 DPRINTF(EthernetPIO
, "read da=%#x pa=%#x va=%#x size=%d\n",
368 daddr
, req
->paddr
, req
->vaddr
, req
->size
);
371 //there are some reserved registers, you can see ns_gige_reg.h and
372 //the spec sheet for details
373 if (daddr
> LAST
&& daddr
<= RESERVED
) {
374 panic("Accessing reserved register");
375 } else if (daddr
> RESERVED
&& daddr
<= 0x3FC) {
376 ReadConfig(daddr
& 0xff, req
->size
, data
);
378 } else if (daddr
>= MIB_START
&& daddr
<= MIB_END
) {
379 // don't implement all the MIB's. hopefully the kernel
380 // doesn't actually DEPEND upon their values
381 // MIB are just hardware stats keepers
382 uint32_t ®
= *(uint32_t *) data
;
385 } else if (daddr
> 0x3FC)
386 panic("Something is messed up!\n");
389 case sizeof(uint32_t):
391 uint32_t ®
= *(uint32_t *)data
;
396 //these are supposed to be cleared on a read
397 reg
&= ~(CR_RXD
| CR_TXD
| CR_TXR
| CR_RXR
);
414 devIntrClear(ISR_ALL
);
469 //see the spec sheet for how RFCR and RFDR work
470 //basically, you write to RFCR to tell the machine what you want to do next
471 //then you act upon RFDR, and the device will be prepared b/c
472 //of what you wrote to RFCR
478 switch (regs
.rfcr
& RFCR_RFADDR
) {
480 reg
= rom
.perfectMatch
[1];
482 reg
+= rom
.perfectMatch
[0];
485 reg
= rom
.perfectMatch
[3] << 8;
486 reg
+= rom
.perfectMatch
[2];
489 reg
= rom
.perfectMatch
[5] << 8;
490 reg
+= rom
.perfectMatch
[4];
493 panic("reading from RFDR for something for other than PMATCH!\n");
494 //didn't implement other RFDR functionality b/c driver didn't use
504 reg
&= ~(MIBC_MIBS
| MIBC_ACLR
);
548 panic("reading unimplemented register: addr = %#x", daddr
);
551 DPRINTF(EthernetPIO
, "read from %#x: data=%d data=%#x\n",
557 panic("accessing register with invalid size: addr=%#x, size=%d",
565 NSGigE::write(MemReqPtr
&req
, const uint8_t *data
)
569 Addr daddr
= req
->paddr
& 0xfff;
570 DPRINTF(EthernetPIO
, "write da=%#x pa=%#x va=%#x size=%d\n",
571 daddr
, req
->paddr
, req
->vaddr
, req
->size
);
573 if (daddr
> LAST
&& daddr
<= RESERVED
) {
574 panic("Accessing reserved register");
575 } else if (daddr
> RESERVED
&& daddr
<= 0x3FC) {
576 WriteConfig(daddr
& 0xff, req
->size
, *(uint32_t *)data
);
578 } else if (daddr
> 0x3FC)
579 panic("Something is messed up!\n");
581 if (req
->size
== sizeof(uint32_t)) {
582 uint32_t reg
= *(uint32_t *)data
;
583 DPRINTF(EthernetPIO
, "write data=%d data=%#x\n", reg
, reg
);
588 if ((reg
& (CR_TXE
| CR_TXD
)) == (CR_TXE
| CR_TXD
)) {
590 } else if (reg
& CR_TXE
) {
591 //the kernel is enabling the transmit machine
592 if (txState
== txIdle
)
594 } else if (reg
& CR_TXD
) {
598 if ((reg
& (CR_RXE
| CR_RXD
)) == (CR_RXE
| CR_RXD
)) {
600 } else if (reg
& CR_RXE
) {
601 if (rxState
== rxIdle
) {
604 } else if (reg
& CR_RXD
) {
615 devIntrPost(ISR_SWI
);
626 if (reg
& CFG_LNKSTS
|| reg
& CFG_SPDSTS
|| reg
& CFG_DUPSTS
627 || reg
& CFG_RESERVED
|| reg
& CFG_T64ADDR
628 || reg
& CFG_PCI64_DET
)
629 panic("writing to read-only or reserved CFG bits!\n");
631 regs
.config
|= reg
& ~(CFG_LNKSTS
| CFG_SPDSTS
| CFG_DUPSTS
| CFG_RESERVED
|
632 CFG_T64ADDR
| CFG_PCI64_DET
);
634 // all these #if 0's are because i don't THINK the kernel needs to have these implemented
635 // if there is a problem relating to one of these, you may need to add functionality in
637 if (reg
& CFG_TBI_EN
) ;
638 if (reg
& CFG_MODE_1000
) ;
641 if (reg
& CFG_AUTO_1000
)
642 panic("CFG_AUTO_1000 not implemented!\n");
645 if (reg
& CFG_PINT_DUPSTS
|| reg
& CFG_PINT_LNKSTS
|| reg
& CFG_PINT_SPDSTS
) ;
646 if (reg
& CFG_TMRTEST
) ;
647 if (reg
& CFG_MRM_DIS
) ;
648 if (reg
& CFG_MWI_DIS
) ;
650 if (reg
& CFG_T64ADDR
)
651 panic("CFG_T64ADDR is read only register!\n");
653 if (reg
& CFG_PCI64_DET
)
654 panic("CFG_PCI64_DET is read only register!\n");
656 if (reg
& CFG_DATA64_EN
) ;
657 if (reg
& CFG_M64ADDR
) ;
658 if (reg
& CFG_PHY_RST
) ;
659 if (reg
& CFG_PHY_DIS
) ;
662 if (reg
& CFG_EXTSTS_EN
)
665 extstsEnable
= false;
668 if (reg
& CFG_REQALG
) ;
672 if (reg
& CFG_PESEL
) ;
673 if (reg
& CFG_BROM_DIS
) ;
674 if (reg
& CFG_EXT_125
) ;
681 /* since phy is completely faked, MEAR_MD* don't matter
682 and since the driver never uses MEAR_EE*, they don't matter */
684 if (reg
& MEAR_EEDI
) ;
685 if (reg
& MEAR_EEDO
) ; //this one is read only
686 if (reg
& MEAR_EECLK
) ;
687 if (reg
& MEAR_EESEL
) ;
688 if (reg
& MEAR_MDIO
) ;
689 if (reg
& MEAR_MDDIR
) ;
690 if (reg
& MEAR_MDC
) ;
695 regs
.ptscr
= reg
& ~(PTSCR_RBIST_RDONLY
);
696 /* these control BISTs for various parts of chip - we don't care or do
697 just fake that the BIST is done */
698 if (reg
& PTSCR_RBIST_EN
)
699 regs
.ptscr
|= PTSCR_RBIST_DONE
;
700 if (reg
& PTSCR_EEBIST_EN
)
701 regs
.ptscr
&= ~PTSCR_EEBIST_EN
;
702 if (reg
& PTSCR_EELOAD_EN
)
703 regs
.ptscr
&= ~PTSCR_EELOAD_EN
;
706 case ISR
: /* writing to the ISR has no effect */
707 panic("ISR is a read only register!\n");
720 /* not going to implement real interrupt holdoff */
724 regs
.txdp
= (reg
& 0xFFFFFFFC);
725 assert(txState
== txIdle
);
736 if (reg
& TXCFG_CSI
) ;
737 if (reg
& TXCFG_HBI
) ;
738 if (reg
& TXCFG_MLB
) ;
739 if (reg
& TXCFG_ATP
) ;
740 if (reg
& TXCFG_ECRETRY
) ; /* this could easily be implemented, but
741 considering the network is just a fake
742 pipe, wouldn't make sense to do this */
744 if (reg
& TXCFG_BRST_DIS
) ;
748 /* we handle our own DMA, ignore the kernel's exhortations */
749 //if (reg & TXCFG_MXDMA) ;
751 //also, we currently don't care about fill/drain thresholds
752 //though this may change in the future with more realistic
753 //networks or a driver which changes it according to feedback
759 /* these just control general purpose i/o pins, don't matter */
773 if (reg
& RXCFG_AEP
) ;
774 if (reg
& RXCFG_ARP
) ;
775 if (reg
& RXCFG_STRIPCRC
) ;
776 if (reg
& RXCFG_RX_RD
) ;
777 if (reg
& RXCFG_ALP
) ;
778 if (reg
& RXCFG_AIRL
) ;
781 /* we handle our own DMA, ignore what kernel says about it */
782 //if (reg & RXCFG_MXDMA) ;
785 //also, we currently don't care about fill/drain thresholds
786 //though this may change in the future with more realistic
787 //networks or a driver which changes it according to feedback
788 if (reg
& (RXCFG_DRTH
| RXCFG_DRTH0
)) ;
793 /* there is no priority queueing used in the linux 2.6 driver */
798 /* not going to implement wake on LAN */
803 /* not going to implement pause control */
810 rxFilterEnable
= (reg
& RFCR_RFEN
) ? true : false;
811 acceptBroadcast
= (reg
& RFCR_AAB
) ? true : false;
812 acceptMulticast
= (reg
& RFCR_AAM
) ? true : false;
813 acceptUnicast
= (reg
& RFCR_AAU
) ? true : false;
814 acceptPerfect
= (reg
& RFCR_APM
) ? true : false;
815 acceptArp
= (reg
& RFCR_AARP
) ? true : false;
817 if (reg
& RFCR_APAT
) ;
818 // panic("RFCR_APAT not implemented!\n");
820 if (reg
& RFCR_MHEN
|| reg
& RFCR_UHEN
)
821 panic("hash filtering not implemented!\n");
824 panic("RFCR_ULM not implemented!\n");
829 panic("the driver never writes to RFDR, something is wrong!\n");
832 panic("the driver never uses BRAR, something is wrong!\n");
835 panic("the driver never uses BRDR, something is wrong!\n");
838 panic("SRR is read only register!\n");
841 panic("the driver never uses MIBC, something is wrong!\n");
852 panic("the driver never uses VDR, something is wrong!\n");
856 /* not going to implement clockrun stuff */
862 if (reg
& TBICR_MR_LOOPBACK
)
863 panic("TBICR_MR_LOOPBACK never used, something wrong!\n");
865 if (reg
& TBICR_MR_AN_ENABLE
) {
866 regs
.tanlpar
= regs
.tanar
;
867 regs
.tbisr
|= (TBISR_MR_AN_COMPLETE
| TBISR_MR_LINK_STATUS
);
871 if (reg
& TBICR_MR_RESTART_AN
) ;
877 panic("TBISR is read only register!\n");
882 panic("this isn't used in driver, something wrong!\n");
885 panic("this isn't used in driver, something wrong!\n");
889 panic("this should only be written to by the fake phy!\n");
892 panic("TANER is read only register!\n");
899 panic("thought i covered all the register, what is this? addr=%#x",
903 panic("Invalid Request Size");
909 NSGigE::devIntrPost(uint32_t interrupts
)
913 if (interrupts
& ISR_RESERVE
)
914 panic("Cannot set a reserved interrupt");
916 if (interrupts
& ISR_TXRCMP
)
917 regs
.isr
|= ISR_TXRCMP
;
919 if (interrupts
& ISR_RXRCMP
)
920 regs
.isr
|= ISR_RXRCMP
;
922 //ISR_DPERR not implemented
923 //ISR_SSERR not implemented
924 //ISR_RMABT not implemented
925 //ISR_RXSOVR not implemented
926 //ISR_HIBINT not implemented
927 //ISR_PHY not implemented
928 //ISR_PME not implemented
930 if (interrupts
& ISR_SWI
)
933 //ISR_MIB not implemented
934 //ISR_TXURN not implemented
936 if (interrupts
& ISR_TXIDLE
)
937 regs
.isr
|= ISR_TXIDLE
;
939 if (interrupts
& ISR_TXERR
)
940 regs
.isr
|= ISR_TXERR
;
942 if (interrupts
& ISR_TXDESC
)
943 regs
.isr
|= ISR_TXDESC
;
945 if (interrupts
& ISR_TXOK
) {
946 regs
.isr
|= ISR_TXOK
;
950 if (interrupts
& ISR_RXORN
)
951 regs
.isr
|= ISR_RXORN
;
953 if (interrupts
& ISR_RXIDLE
)
954 regs
.isr
|= ISR_RXIDLE
;
956 //ISR_RXEARLY not implemented
958 if (interrupts
& ISR_RXERR
)
959 regs
.isr
|= ISR_RXERR
;
961 if (interrupts
& ISR_RXDESC
)
962 regs
.isr
|= ISR_RXDESC
;
964 if (interrupts
& ISR_RXOK
) {
966 regs
.isr
|= ISR_RXOK
;
969 if ((regs
.isr
& regs
.imr
)) {
976 DPRINTF(EthernetIntr
, "**interrupt written to ISR: intr=%#x isr=%#x imr=%#x\n",
977 interrupts
, regs
.isr
, regs
.imr
);
981 NSGigE::devIntrClear(uint32_t interrupts
)
983 if (interrupts
& ISR_RESERVE
)
984 panic("Cannot clear a reserved interrupt");
986 if (interrupts
& ISR_TXRCMP
)
987 regs
.isr
&= ~ISR_TXRCMP
;
989 if (interrupts
& ISR_RXRCMP
)
990 regs
.isr
&= ~ISR_RXRCMP
;
992 //ISR_DPERR not implemented
993 //ISR_SSERR not implemented
994 //ISR_RMABT not implemented
995 //ISR_RXSOVR not implemented
996 //ISR_HIBINT not implemented
997 //ISR_PHY not implemented
998 //ISR_PME not implemented
1000 if (interrupts
& ISR_SWI
)
1001 regs
.isr
&= ~ISR_SWI
;
1003 //ISR_MIB not implemented
1004 //ISR_TXURN not implemented
1006 if (interrupts
& ISR_TXIDLE
)
1007 regs
.isr
&= ~ISR_TXIDLE
;
1009 if (interrupts
& ISR_TXERR
)
1010 regs
.isr
&= ~ISR_TXERR
;
1012 if (interrupts
& ISR_TXDESC
)
1013 regs
.isr
&= ~ISR_TXDESC
;
1015 if (interrupts
& ISR_TXOK
)
1016 regs
.isr
&= ~ISR_TXOK
;
1018 if (interrupts
& ISR_RXORN
)
1019 regs
.isr
&= ~ISR_RXORN
;
1021 if (interrupts
& ISR_RXIDLE
)
1022 regs
.isr
&= ~ISR_RXIDLE
;
1024 //ISR_RXEARLY not implemented
1026 if (interrupts
& ISR_RXERR
)
1027 regs
.isr
&= ~ISR_RXERR
;
1029 if (interrupts
& ISR_RXDESC
)
1030 regs
.isr
&= ~ISR_RXDESC
;
1032 if (interrupts
& ISR_RXOK
)
1033 regs
.isr
&= ~ISR_RXOK
;
1035 if (!(regs
.isr
& regs
.imr
))
1038 DPRINTF(EthernetIntr
, "**interrupt cleared from ISR: intr=%x isr=%x imr=%x\n",
1039 interrupts
, regs
.isr
, regs
.imr
);
1043 NSGigE::devIntrChangeMask()
1045 DPRINTF(EthernetIntr
, "interrupt mask changed\n");
1047 if (regs
.isr
& regs
.imr
)
1048 cpuIntrPost(curTick
);
1054 NSGigE::cpuIntrPost(Tick when
)
1056 //If the interrupt you want to post is later than an
1057 //interrupt already scheduled, just let it post in the coming one and
1058 //don't schedule another.
1059 //HOWEVER, must be sure that the scheduled intrTick is in the future
1060 //(this was formerly the source of a bug)
1061 assert((intrTick
>= curTick
) || (intrTick
== 0));
1062 if (when
> intrTick
&& intrTick
!= 0)
1068 intrEvent
->squash();
1072 if (when
< curTick
) {
1075 DPRINTF(EthernetIntr
, "going to schedule an interrupt for intrTick=%d\n",
1077 intrEvent
= new IntrEvent(this, true);
1078 intrEvent
->schedule(intrTick
);
1083 NSGigE::cpuInterrupt()
1085 // Don't send an interrupt if there's already one
1086 if (cpuPendingIntr
) {
1087 DPRINTF(EthernetIntr
,
1088 "would send an interrupt now, but there's already pending\n");
1092 // Don't send an interrupt if it's supposed to be delayed
1093 if (intrTick
> curTick
) {
1094 DPRINTF(EthernetIntr
, "an interrupt is scheduled for %d, wait til then\n",
1099 // Whether or not there's a pending interrupt, we don't care about
1105 cpuPendingIntr
= true;
1106 /** @todo rework the intctrl to be tsunami ok */
1107 //intctrl->post(TheISA::INTLEVEL_IRQ1, TheISA::INTINDEX_ETHERNET);
1108 DPRINTF(EthernetIntr
, "Posting interrupts to cchip!\n");
1109 tsunami
->cchip
->postDRIR(configData
->config
.hdr
.pci0
.interruptLine
);
1113 NSGigE::cpuIntrClear()
1115 if (cpuPendingIntr
) {
1116 cpuPendingIntr
= false;
1117 /** @todo rework the intctrl to be tsunami ok */
1118 //intctrl->clear(TheISA::INTLEVEL_IRQ1, TheISA::INTINDEX_ETHERNET);
1119 DPRINTF(EthernetIntr
, "clearing all interrupts from cchip\n");
1120 tsunami
->cchip
->clearDRIR(configData
->config
.hdr
.pci0
.interruptLine
);
1125 NSGigE::cpuIntrPending() const
1126 { return cpuPendingIntr
; }
1132 DPRINTF(Ethernet
, "transmit reset\n");
1135 txFifoAvail
= maxTxFifoSize
;
1138 assert(txDescCnt
== 0);
1140 regs
.command
&= ~CR_TXE
;
1142 assert(txDmaState
== dmaIdle
);
1148 DPRINTF(Ethernet
, "receive reset\n");
1151 assert(rxPktBytes
== 0);
1155 assert(rxDescCnt
== 0);
1156 assert(rxDmaState
== dmaIdle
);
1158 regs
.command
&= ~CR_RXE
;
1162 void NSGigE::regsReset()
1164 memset(®s
, 0, sizeof(regs
));
1165 regs
.config
= 0x80000000;
1167 regs
.isr
= 0x00608000;
1175 extstsEnable
= false;
1176 acceptBroadcast
= false;
1177 acceptMulticast
= false;
1178 acceptUnicast
= false;
1179 acceptPerfect
= false;
1184 NSGigE::rxDmaReadCopy()
1186 assert(rxDmaState
== dmaReading
);
1188 memcpy(rxDmaData
, physmem
->dma_addr(rxDmaAddr
, rxDmaLen
), rxDmaLen
);
1189 rxDmaState
= dmaIdle
;
1191 DPRINTF(EthernetDMA
, "rx dma read paddr=%#x len=%d\n",
1192 rxDmaAddr
, rxDmaLen
);
1193 DDUMP(EthernetDMA
, rxDmaData
, rxDmaLen
);
1197 NSGigE::doRxDmaRead()
1199 assert(rxDmaState
== dmaIdle
|| rxDmaState
== dmaReadWaiting
);
1200 rxDmaState
= dmaReading
;
1202 if (dmaInterface
&& !rxDmaFree
) {
1203 if (dmaInterface
->busy())
1204 rxDmaState
= dmaReadWaiting
;
1206 dmaInterface
->doDMA(Read
, rxDmaAddr
, rxDmaLen
, curTick
,
1207 &rxDmaReadEvent
, true);
1211 if (dmaReadDelay
== 0 && dmaReadFactor
== 0) {
1216 Tick factor
= ((rxDmaLen
+ ULL(63)) >> ULL(6)) * dmaReadFactor
;
1217 Tick start
= curTick
+ dmaReadDelay
+ factor
;
1218 rxDmaReadEvent
.schedule(start
);
1223 NSGigE::rxDmaReadDone()
1225 assert(rxDmaState
== dmaReading
);
1228 // If the transmit state machine has a pending DMA, let it go first
1229 if (txDmaState
== dmaReadWaiting
|| txDmaState
== dmaWriteWaiting
)
1236 NSGigE::rxDmaWriteCopy()
1238 assert(rxDmaState
== dmaWriting
);
1240 memcpy(physmem
->dma_addr(rxDmaAddr
, rxDmaLen
), rxDmaData
, rxDmaLen
);
1241 rxDmaState
= dmaIdle
;
1243 DPRINTF(EthernetDMA
, "rx dma write paddr=%#x len=%d\n",
1244 rxDmaAddr
, rxDmaLen
);
1245 DDUMP(EthernetDMA
, rxDmaData
, rxDmaLen
);
1249 NSGigE::doRxDmaWrite()
1251 assert(rxDmaState
== dmaIdle
|| rxDmaState
== dmaWriteWaiting
);
1252 rxDmaState
= dmaWriting
;
1254 if (dmaInterface
&& !rxDmaFree
) {
1255 if (dmaInterface
->busy())
1256 rxDmaState
= dmaWriteWaiting
;
1258 dmaInterface
->doDMA(WriteInvalidate
, rxDmaAddr
, rxDmaLen
, curTick
,
1259 &rxDmaWriteEvent
, true);
1263 if (dmaWriteDelay
== 0 && dmaWriteFactor
== 0) {
1268 Tick factor
= ((rxDmaLen
+ ULL(63)) >> ULL(6)) * dmaWriteFactor
;
1269 Tick start
= curTick
+ dmaWriteDelay
+ factor
;
1270 rxDmaWriteEvent
.schedule(start
);
1275 NSGigE::rxDmaWriteDone()
1277 assert(rxDmaState
== dmaWriting
);
1280 // If the transmit state machine has a pending DMA, let it go first
1281 if (txDmaState
== dmaReadWaiting
|| txDmaState
== dmaWriteWaiting
)
1290 DPRINTF(EthernetSM
, "receive kick state=%s (rxBuf.size=%d)\n",
1291 NsRxStateStrings
[rxState
], rxFifo
.size());
1293 if (rxKickTick
> curTick
) {
1294 DPRINTF(EthernetSM
, "receive kick exiting, can't run till %d\n",
1300 switch(rxDmaState
) {
1301 case dmaReadWaiting
:
1305 case dmaWriteWaiting
:
1313 // see state machine from spec for details
1314 // the way this works is, if you finish work on one state and can go directly to
1315 // another, you do that through jumping to the label "next". however, if you have
1316 // intermediate work, like DMA so that you can't go to the next state yet, you go to
1317 // exit and exit the loop. however, when the DMA is done it will trigger an
1318 // event and come back to this loop.
1321 if (!regs
.command
& CR_RXE
) {
1322 DPRINTF(EthernetSM
, "Receive Disabled! Nothing to do.\n");
1327 rxState
= rxDescRefr
;
1329 rxDmaAddr
= regs
.rxdp
& 0x3fffffff;
1330 rxDmaData
= &rxDescCache
+ offsetof(ns_desc
, link
);
1331 rxDmaLen
= sizeof(rxDescCache
.link
);
1332 rxDmaFree
= dmaDescFree
;
1335 descDmaRdBytes
+= rxDmaLen
;
1340 rxState
= rxDescRead
;
1342 rxDmaAddr
= regs
.rxdp
& 0x3fffffff;
1343 rxDmaData
= &rxDescCache
;
1344 rxDmaLen
= sizeof(ns_desc
);
1345 rxDmaFree
= dmaDescFree
;
1348 descDmaRdBytes
+= rxDmaLen
;
1356 if (rxDmaState
!= dmaIdle
)
1359 rxState
= rxAdvance
;
1363 if (rxDmaState
!= dmaIdle
)
1366 DPRINTF(EthernetDesc
,
1367 "rxDescCache:\n\tlink=%08x\n\tbufptr=%08x\n\tcmdsts=%08x\n\textsts=%08x\n"
1368 ,rxDescCache
.link
, rxDescCache
.bufptr
, rxDescCache
.cmdsts
,
1369 rxDescCache
.extsts
);
1371 if (rxDescCache
.cmdsts
& CMDSTS_OWN
) {
1374 rxState
= rxFifoBlock
;
1375 rxFragPtr
= rxDescCache
.bufptr
;
1376 rxDescCnt
= rxDescCache
.cmdsts
& CMDSTS_LEN_MASK
;
1383 * @todo in reality, we should be able to start processing
1384 * the packet as it arrives, and not have to wait for the
1385 * full packet ot be in the receive fifo.
1390 DPRINTF(EthernetSM
, "\n\n*****processing receive of new packet\n");
1392 // If we don't have a packet, grab a new one from the fifo.
1393 rxPacket
= rxFifo
.front();
1394 rxPktBytes
= rxPacket
->length
;
1395 rxPacketBufPtr
= rxPacket
->data
;
1398 if (DTRACE(Ethernet
)) {
1399 if (rxPacket
->isIpPkt()) {
1400 ip_header
*ip
= rxPacket
->getIpHdr();
1401 DPRINTF(Ethernet
, "ID is %d\n", reverseEnd16(ip
->ID
));
1402 if (rxPacket
->isTcpPkt()) {
1403 tcp_header
*tcp
= rxPacket
->getTcpHdr(ip
);
1404 DPRINTF(Ethernet
, "Src Port = %d, Dest Port = %d\n",
1405 reverseEnd16(tcp
->src_port_num
),
1406 reverseEnd16(tcp
->dest_port_num
));
1412 // sanity check - i think the driver behaves like this
1413 assert(rxDescCnt
>= rxPktBytes
);
1415 // Must clear the value before popping to decrement the
1417 rxFifo
.front() = NULL
;
1419 rxFifoCnt
-= rxPacket
->length
;
1423 // dont' need the && rxDescCnt > 0 if driver sanity check above holds
1424 if (rxPktBytes
> 0) {
1425 rxState
= rxFragWrite
;
1426 // don't need min<>(rxPktBytes,rxDescCnt) if above sanity check holds
1427 rxXferLen
= rxPktBytes
;
1429 rxDmaAddr
= rxFragPtr
& 0x3fffffff;
1430 rxDmaData
= rxPacketBufPtr
;
1431 rxDmaLen
= rxXferLen
;
1432 rxDmaFree
= dmaDataFree
;
1438 rxState
= rxDescWrite
;
1440 //if (rxPktBytes == 0) { /* packet is done */
1441 assert(rxPktBytes
== 0);
1442 DPRINTF(EthernetSM
, "done with receiving packet\n");
1444 rxDescCache
.cmdsts
|= CMDSTS_OWN
;
1445 rxDescCache
.cmdsts
&= ~CMDSTS_MORE
;
1446 rxDescCache
.cmdsts
|= CMDSTS_OK
;
1447 rxDescCache
.cmdsts
&= 0xffff0000;
1448 rxDescCache
.cmdsts
+= rxPacket
->length
; //i.e. set CMDSTS_SIZE
1451 /* all the driver uses these are for its own stats keeping
1452 which we don't care about, aren't necessary for functionality
1453 and doing this would just slow us down. if they end up using
1454 this in a later version for functional purposes, just undef
1456 if (rxFilterEnable
) {
1457 rxDescCache
.cmdsts
&= ~CMDSTS_DEST_MASK
;
1458 if (rxFifo
.front()->IsUnicast())
1459 rxDescCache
.cmdsts
|= CMDSTS_DEST_SELF
;
1460 if (rxFifo
.front()->IsMulticast())
1461 rxDescCache
.cmdsts
|= CMDSTS_DEST_MULTI
;
1462 if (rxFifo
.front()->IsBroadcast())
1463 rxDescCache
.cmdsts
|= CMDSTS_DEST_MASK
;
1467 if (rxPacket
->isIpPkt() && extstsEnable
) {
1468 rxDescCache
.extsts
|= EXTSTS_IPPKT
;
1470 if (!ipChecksum(rxPacket
, false)) {
1471 DPRINTF(EthernetCksum
, "Rx IP Checksum Error\n");
1472 rxDescCache
.extsts
|= EXTSTS_IPERR
;
1474 if (rxPacket
->isTcpPkt()) {
1475 rxDescCache
.extsts
|= EXTSTS_TCPPKT
;
1477 if (!tcpChecksum(rxPacket
, false)) {
1478 DPRINTF(EthernetCksum
, "Rx TCP Checksum Error\n");
1479 rxDescCache
.extsts
|= EXTSTS_TCPERR
;
1482 } else if (rxPacket
->isUdpPkt()) {
1483 rxDescCache
.extsts
|= EXTSTS_UDPPKT
;
1484 if (!udpChecksum(rxPacket
, false)) {
1485 DPRINTF(EthernetCksum
, "Rx UDP Checksum Error\n");
1486 rxDescCache
.extsts
|= EXTSTS_UDPERR
;
1492 /* the driver seems to always receive into desc buffers
1493 of size 1514, so you never have a pkt that is split
1494 into multiple descriptors on the receive side, so
1495 i don't implement that case, hence the assert above.
1498 DPRINTF(EthernetDesc
, "rxDesc writeback:\n\tcmdsts=%08x\n\textsts=%08x\n",
1499 rxDescCache
.cmdsts
, rxDescCache
.extsts
);
1501 rxDmaAddr
= (regs
.rxdp
+ offsetof(ns_desc
, cmdsts
)) & 0x3fffffff;
1502 rxDmaData
= &(rxDescCache
.cmdsts
);
1503 rxDmaLen
= sizeof(rxDescCache
.cmdsts
) + sizeof(rxDescCache
.extsts
);
1504 rxDmaFree
= dmaDescFree
;
1507 descDmaWrBytes
+= rxDmaLen
;
1515 if (rxDmaState
!= dmaIdle
)
1518 rxPacketBufPtr
+= rxXferLen
;
1519 rxFragPtr
+= rxXferLen
;
1520 rxPktBytes
-= rxXferLen
;
1522 rxState
= rxFifoBlock
;
1526 if (rxDmaState
!= dmaIdle
)
1529 assert(rxDescCache
.cmdsts
& CMDSTS_OWN
);
1531 assert(rxPacket
== 0);
1532 devIntrPost(ISR_RXOK
);
1534 if (rxDescCache
.cmdsts
& CMDSTS_INTR
)
1535 devIntrPost(ISR_RXDESC
);
1538 DPRINTF(EthernetSM
, "Halting the RX state machine\n");
1542 rxState
= rxAdvance
;
1546 if (rxDescCache
.link
== 0) {
1550 rxState
= rxDescRead
;
1551 regs
.rxdp
= rxDescCache
.link
;
1554 rxDmaAddr
= regs
.rxdp
& 0x3fffffff;
1555 rxDmaData
= &rxDescCache
;
1556 rxDmaLen
= sizeof(ns_desc
);
1557 rxDmaFree
= dmaDescFree
;
1565 panic("Invalid rxState!");
1569 DPRINTF(EthernetSM
, "entering next rx state = %s\n",
1570 NsRxStateStrings
[rxState
]);
1572 if (rxState
== rxIdle
) {
1573 regs
.command
&= ~CR_RXE
;
1574 devIntrPost(ISR_RXIDLE
);
1582 * @todo do we want to schedule a future kick?
1584 DPRINTF(EthernetSM
, "rx state machine exited state=%s\n",
1585 NsRxStateStrings
[rxState
]);
1591 if (txFifo
.empty()) {
1592 DPRINTF(Ethernet
, "nothing to transmit\n");
1596 DPRINTF(Ethernet
, "\n\nAttempt Pkt Transmit: txFifo length = %d\n",
1597 maxTxFifoSize
- txFifoAvail
);
1598 if (interface
->sendPacket(txFifo
.front())) {
1600 if (DTRACE(Ethernet
)) {
1601 if (txFifo
.front()->isIpPkt()) {
1602 ip_header
*ip
= txFifo
.front()->getIpHdr();
1603 DPRINTF(Ethernet
, "ID is %d\n", reverseEnd16(ip
->ID
));
1604 if (txFifo
.front()->isTcpPkt()) {
1605 tcp_header
*tcp
= txFifo
.front()->getTcpHdr(ip
);
1606 DPRINTF(Ethernet
, "Src Port = %d, Dest Port = %d\n",
1607 reverseEnd16(tcp
->src_port_num
),
1608 reverseEnd16(tcp
->dest_port_num
));
1614 DDUMP(Ethernet
, txFifo
.front()->data
, txFifo
.front()->length
);
1615 txBytes
+= txFifo
.front()->length
;
1618 txFifoAvail
+= txFifo
.front()->length
;
1620 DPRINTF(Ethernet
, "Successful Xmit! now txFifoAvail is %d\n", txFifoAvail
);
1621 txFifo
.front() = NULL
;
1624 /* normally do a writeback of the descriptor here, and ONLY after that is
1625 done, send this interrupt. but since our stuff never actually fails,
1626 just do this interrupt here, otherwise the code has to stray from this
1627 nice format. besides, it's functionally the same.
1629 devIntrPost(ISR_TXOK
);
1631 DPRINTF(Ethernet
, "May need to rethink always sending the descriptors back?\n");
1633 if (!txFifo
.empty() && !txEvent
.scheduled()) {
1634 DPRINTF(Ethernet
, "reschedule transmit\n");
1635 txEvent
.schedule(curTick
+ 1000);
1640 NSGigE::txDmaReadCopy()
1642 assert(txDmaState
== dmaReading
);
1644 memcpy(txDmaData
, physmem
->dma_addr(txDmaAddr
, txDmaLen
), txDmaLen
);
1645 txDmaState
= dmaIdle
;
1647 DPRINTF(EthernetDMA
, "tx dma read paddr=%#x len=%d\n",
1648 txDmaAddr
, txDmaLen
);
1649 DDUMP(EthernetDMA
, txDmaData
, txDmaLen
);
1653 NSGigE::doTxDmaRead()
1655 assert(txDmaState
== dmaIdle
|| txDmaState
== dmaReadWaiting
);
1656 txDmaState
= dmaReading
;
1658 if (dmaInterface
&& !txDmaFree
) {
1659 if (dmaInterface
->busy())
1660 txDmaState
= dmaReadWaiting
;
1662 dmaInterface
->doDMA(Read
, txDmaAddr
, txDmaLen
, curTick
,
1663 &txDmaReadEvent
, true);
1667 if (dmaReadDelay
== 0 && dmaReadFactor
== 0.0) {
1672 Tick factor
= ((txDmaLen
+ ULL(63)) >> ULL(6)) * dmaReadFactor
;
1673 Tick start
= curTick
+ dmaReadDelay
+ factor
;
1674 txDmaReadEvent
.schedule(start
);
1679 NSGigE::txDmaReadDone()
1681 assert(txDmaState
== dmaReading
);
1684 // If the receive state machine has a pending DMA, let it go first
1685 if (rxDmaState
== dmaReadWaiting
|| rxDmaState
== dmaWriteWaiting
)
1692 NSGigE::txDmaWriteCopy()
1694 assert(txDmaState
== dmaWriting
);
1696 memcpy(physmem
->dma_addr(txDmaAddr
, txDmaLen
), txDmaData
, txDmaLen
);
1697 txDmaState
= dmaIdle
;
1699 DPRINTF(EthernetDMA
, "tx dma write paddr=%#x len=%d\n",
1700 txDmaAddr
, txDmaLen
);
1701 DDUMP(EthernetDMA
, txDmaData
, txDmaLen
);
1705 NSGigE::doTxDmaWrite()
1707 assert(txDmaState
== dmaIdle
|| txDmaState
== dmaWriteWaiting
);
1708 txDmaState
= dmaWriting
;
1710 if (dmaInterface
&& !txDmaFree
) {
1711 if (dmaInterface
->busy())
1712 txDmaState
= dmaWriteWaiting
;
1714 dmaInterface
->doDMA(WriteInvalidate
, txDmaAddr
, txDmaLen
, curTick
,
1715 &txDmaWriteEvent
, true);
1719 if (dmaWriteDelay
== 0 && dmaWriteFactor
== 0.0) {
1724 Tick factor
= ((txDmaLen
+ ULL(63)) >> ULL(6)) * dmaWriteFactor
;
1725 Tick start
= curTick
+ dmaWriteDelay
+ factor
;
1726 txDmaWriteEvent
.schedule(start
);
1731 NSGigE::txDmaWriteDone()
1733 assert(txDmaState
== dmaWriting
);
1736 // If the receive state machine has a pending DMA, let it go first
1737 if (rxDmaState
== dmaReadWaiting
|| rxDmaState
== dmaWriteWaiting
)
1746 DPRINTF(EthernetSM
, "transmit kick state=%s\n", NsTxStateStrings
[txState
]);
1748 if (txKickTick
> curTick
) {
1749 DPRINTF(EthernetSM
, "transmit kick exiting, can't run till %d\n",
1756 switch(txDmaState
) {
1757 case dmaReadWaiting
:
1761 case dmaWriteWaiting
:
1771 if (!regs
.command
& CR_TXE
) {
1772 DPRINTF(EthernetSM
, "Transmit disabled. Nothing to do.\n");
1777 txState
= txDescRefr
;
1779 txDmaAddr
= regs
.txdp
& 0x3fffffff;
1780 txDmaData
= &txDescCache
+ offsetof(ns_desc
, link
);
1781 txDmaLen
= sizeof(txDescCache
.link
);
1782 txDmaFree
= dmaDescFree
;
1785 descDmaRdBytes
+= txDmaLen
;
1791 txState
= txDescRead
;
1793 txDmaAddr
= regs
.txdp
& 0x3fffffff;
1794 txDmaData
= &txDescCache
;
1795 txDmaLen
= sizeof(ns_desc
);
1796 txDmaFree
= dmaDescFree
;
1799 descDmaRdBytes
+= txDmaLen
;
1807 if (txDmaState
!= dmaIdle
)
1810 txState
= txAdvance
;
1814 if (txDmaState
!= dmaIdle
)
1817 DPRINTF(EthernetDesc
,
1818 "txDescCache data:\n\tlink=%08x\n\tbufptr=%08x\n\tcmdsts=%08x\n\textsts=%08x\n"
1819 ,txDescCache
.link
, txDescCache
.bufptr
, txDescCache
.cmdsts
,
1820 txDescCache
.extsts
);
1822 if (txDescCache
.cmdsts
& CMDSTS_OWN
) {
1823 txState
= txFifoBlock
;
1824 txFragPtr
= txDescCache
.bufptr
;
1825 txDescCnt
= txDescCache
.cmdsts
& CMDSTS_LEN_MASK
;
1833 DPRINTF(EthernetSM
, "\n\n*****starting the tx of a new packet\n");
1834 txPacket
= new EtherPacket
;
1835 txPacket
->data
= new uint8_t[16384];
1836 txPacketBufPtr
= txPacket
->data
;
1839 if (txDescCnt
== 0) {
1840 DPRINTF(EthernetSM
, "the txDescCnt == 0, done with descriptor\n");
1841 if (txDescCache
.cmdsts
& CMDSTS_MORE
) {
1842 DPRINTF(EthernetSM
, "there are more descriptors to come\n");
1843 txState
= txDescWrite
;
1845 txDescCache
.cmdsts
&= ~CMDSTS_OWN
;
1847 txDmaAddr
= (regs
.txdp
+ offsetof(ns_desc
, cmdsts
)) & 0x3fffffff;
1848 txDmaData
= &(txDescCache
.cmdsts
);
1849 txDmaLen
= sizeof(txDescCache
.cmdsts
);
1850 txDmaFree
= dmaDescFree
;
1855 } else { /* this packet is totally done */
1856 DPRINTF(EthernetSM
, "This packet is done, let's wrap it up\n");
1857 /* deal with the the packet that just finished */
1858 if ((regs
.vtcr
& VTCR_PPCHK
) && extstsEnable
) {
1859 if (txDescCache
.extsts
& EXTSTS_UDPPKT
) {
1860 udpChecksum(txPacket
, true);
1861 } else if (txDescCache
.extsts
& EXTSTS_TCPPKT
) {
1862 tcpChecksum(txPacket
, true);
1865 if (txDescCache
.extsts
& EXTSTS_IPPKT
) {
1866 ipChecksum(txPacket
, true);
1871 txPacket
->length
= txPacketBufPtr
- txPacket
->data
;
1872 /* this is just because the receive can't handle a packet bigger
1873 want to make sure */
1874 assert(txPacket
->length
<= 1514);
1875 txFifo
.push_back(txPacket
);
1877 /* this following section is not to spec, but functionally shouldn't
1878 be any different. normally, the chip will wait til the transmit has
1879 occurred before writing back the descriptor because it has to wait
1880 to see that it was successfully transmitted to decide whether to set
1881 CMDSTS_OK or not. however, in the simulator since it is always
1882 successfully transmitted, and writing it exactly to spec would
1883 complicate the code, we just do it here
1886 txDescCache
.cmdsts
&= ~CMDSTS_OWN
;
1887 txDescCache
.cmdsts
|= CMDSTS_OK
;
1889 DPRINTF(EthernetDesc
,
1890 "txDesc writeback:\n\tcmdsts=%08x\n\textsts=%08x\n",
1891 txDescCache
.cmdsts
, txDescCache
.extsts
);
1893 txDmaAddr
= (regs
.txdp
+ offsetof(ns_desc
, cmdsts
)) & 0x3fffffff;
1894 txDmaData
= &(txDescCache
.cmdsts
);
1895 txDmaLen
= sizeof(txDescCache
.cmdsts
) + sizeof(txDescCache
.extsts
);
1896 txDmaFree
= dmaDescFree
;
1899 descDmaWrBytes
+= txDmaLen
;
1905 DPRINTF(EthernetSM
, "halting TX state machine\n");
1909 txState
= txAdvance
;
1915 DPRINTF(EthernetSM
, "this descriptor isn't done yet\n");
1917 txState
= txFragRead
;
1919 /* The number of bytes transferred is either whatever is left
1920 in the descriptor (txDescCnt), or if there is not enough
1921 room in the fifo, just whatever room is left in the fifo
1923 txXferLen
= min
<uint32_t>(txDescCnt
, txFifoAvail
);
1925 txDmaAddr
= txFragPtr
& 0x3fffffff;
1926 txDmaData
= txPacketBufPtr
;
1927 txDmaLen
= txXferLen
;
1928 txDmaFree
= dmaDataFree
;
1933 txState
= txFifoBlock
;
1943 if (txDmaState
!= dmaIdle
)
1946 txPacketBufPtr
+= txXferLen
;
1947 txFragPtr
+= txXferLen
;
1948 txDescCnt
-= txXferLen
;
1949 txFifoAvail
-= txXferLen
;
1951 txState
= txFifoBlock
;
1955 if (txDmaState
!= dmaIdle
)
1958 if (txDescCache
.cmdsts
& CMDSTS_INTR
) {
1959 devIntrPost(ISR_TXDESC
);
1962 txState
= txAdvance
;
1966 if (txDescCache
.link
== 0) {
1969 txState
= txDescRead
;
1970 regs
.txdp
= txDescCache
.link
;
1973 txDmaAddr
= txDescCache
.link
& 0x3fffffff;
1974 txDmaData
= &txDescCache
;
1975 txDmaLen
= sizeof(ns_desc
);
1976 txDmaFree
= dmaDescFree
;
1984 panic("invalid state");
1987 DPRINTF(EthernetSM
, "entering next tx state=%s\n",
1988 NsTxStateStrings
[txState
]);
1990 if (txState
== txIdle
) {
1991 regs
.command
&= ~CR_TXE
;
1992 devIntrPost(ISR_TXIDLE
);
2000 * @todo do we want to schedule a future kick?
2002 DPRINTF(EthernetSM
, "tx state machine exited state=%s\n",
2003 NsTxStateStrings
[txState
]);
2007 NSGigE::transferDone()
2012 if (txEvent
.scheduled())
2013 txEvent
.reschedule(curTick
+ 1);
2015 txEvent
.schedule(curTick
+ 1);
2019 NSGigE::rxFilter(PacketPtr packet
)
2024 if (packet
->IsUnicast()) {
2027 // If we're accepting all unicast addresses
2031 // If we make a perfect match
2033 && (memcmp(rom
.perfectMatch
, packet
->data
, sizeof(rom
.perfectMatch
)) == 0))
2036 eth_header
*eth
= (eth_header
*) packet
->data
;
2037 if ((acceptArp
) && (eth
->type
== 0x608))
2040 } else if (packet
->IsBroadcast()) {
2043 // if we're accepting broadcasts
2044 if (acceptBroadcast
)
2047 } else if (packet
->IsMulticast()) {
2050 // if we're accepting all multicasts
2051 if (acceptMulticast
)
2057 // oh well, punt on this one
2061 DPRINTF(Ethernet
, "rxFilter drop\n");
2062 DDUMP(EthernetData
, packet
->data
, packet
->length
);
2069 NSGigE::recvPacket(PacketPtr packet
)
2071 rxBytes
+= packet
->length
;
2074 DPRINTF(Ethernet
, "\n\nReceiving packet from wire, rxFifoAvail = %d\n", maxRxFifoSize
- rxFifoCnt
);
2076 if (rxState
== rxIdle
) {
2077 DPRINTF(Ethernet
, "receive disabled...packet dropped\n");
2078 interface
->recvDone();
2082 if (rxFilterEnable
&& rxFilter(packet
)) {
2083 DPRINTF(Ethernet
, "packet filtered...dropped\n");
2084 interface
->recvDone();
2088 if ((rxFifoCnt
+ packet
->length
) >= maxRxFifoSize
) {
2090 "packet will not fit in receive buffer...packet dropped\n");
2091 devIntrPost(ISR_RXORN
);
2095 rxFifo
.push_back(packet
);
2096 rxFifoCnt
+= packet
->length
;
2097 interface
->recvDone();
2104 * does a udp checksum. if gen is true, then it generates it and puts it in the right place
2105 * else, it just checks what it calculates against the value in the header in packet
2108 NSGigE::udpChecksum(PacketPtr packet
, bool gen
)
2110 ip_header
*ip
= packet
->getIpHdr();
2111 udp_header
*hdr
= packet
->getUdpHdr(ip
);
2113 pseudo_header
*pseudo
= new pseudo_header
;
2115 pseudo
->src_ip_addr
= ip
->src_ip_addr
;
2116 pseudo
->dest_ip_addr
= ip
->dest_ip_addr
;
2117 pseudo
->protocol
= ip
->protocol
;
2118 pseudo
->len
= hdr
->len
;
2120 uint16_t cksum
= checksumCalc((uint16_t *) pseudo
, (uint16_t *) hdr
,
2121 (uint32_t) hdr
->len
);
2125 hdr
->chksum
= cksum
;
2134 NSGigE::tcpChecksum(PacketPtr packet
, bool gen
)
2136 ip_header
*ip
= packet
->getIpHdr();
2137 tcp_header
*hdr
= packet
->getTcpHdr(ip
);
2140 pseudo_header
*pseudo
= new pseudo_header
;
2142 pseudo
->src_ip_addr
= ip
->src_ip_addr
;
2143 pseudo
->dest_ip_addr
= ip
->dest_ip_addr
;
2144 pseudo
->protocol
= reverseEnd16(ip
->protocol
);
2145 pseudo
->len
= reverseEnd16(reverseEnd16(ip
->dgram_len
) - (ip
->vers_len
& 0xf)*4);
2147 cksum
= checksumCalc((uint16_t *) pseudo
, (uint16_t *) hdr
,
2148 (uint32_t) reverseEnd16(pseudo
->len
));
2150 pseudo
->src_ip_addr
= 0;
2151 pseudo
->dest_ip_addr
= 0;
2152 pseudo
->protocol
= hdr
->chksum
;
2155 cksum
= checksumCalc((uint16_t *) pseudo
, (uint16_t *) hdr
,
2156 (uint32_t) (reverseEnd16(ip
->dgram_len
) - (ip
->vers_len
& 0xf)*4));
2161 hdr
->chksum
= cksum
;
2170 NSGigE::ipChecksum(PacketPtr packet
, bool gen
)
2172 ip_header
*hdr
= packet
->getIpHdr();
2174 uint16_t cksum
= checksumCalc(NULL
, (uint16_t *) hdr
, (hdr
->vers_len
& 0xf)*4);
2177 DPRINTF(EthernetCksum
, "generated checksum: %#x\n", cksum
);
2178 hdr
->hdr_chksum
= cksum
;
2188 NSGigE::checksumCalc(uint16_t *pseudo
, uint16_t *buf
, uint32_t len
)
2192 uint16_t last_pad
= 0;
2194 last_pad
= buf
[len
/2] & 0xff;
2200 sum
= pseudo
[0] + pseudo
[1] + pseudo
[2] +
2201 pseudo
[3] + pseudo
[4] + pseudo
[5];
2204 for (int i
=0; i
< (len
/2); ++i
) {
2209 sum
= (sum
>> 16) + (sum
& 0xffff);
2214 //=====================================================================
2218 NSGigE::serialize(ostream
&os
)
2220 // Serialize the PciDev base class
2221 PciDev::serialize(os
);
2224 * Finalize any DMA events now.
2226 if (rxDmaReadEvent
.scheduled())
2228 if (rxDmaWriteEvent
.scheduled())
2230 if (txDmaReadEvent
.scheduled())
2232 if (txDmaWriteEvent
.scheduled())
2236 * Serialize the device registers
2238 SERIALIZE_SCALAR(regs
.command
);
2239 SERIALIZE_SCALAR(regs
.config
);
2240 SERIALIZE_SCALAR(regs
.mear
);
2241 SERIALIZE_SCALAR(regs
.ptscr
);
2242 SERIALIZE_SCALAR(regs
.isr
);
2243 SERIALIZE_SCALAR(regs
.imr
);
2244 SERIALIZE_SCALAR(regs
.ier
);
2245 SERIALIZE_SCALAR(regs
.ihr
);
2246 SERIALIZE_SCALAR(regs
.txdp
);
2247 SERIALIZE_SCALAR(regs
.txdp_hi
);
2248 SERIALIZE_SCALAR(regs
.txcfg
);
2249 SERIALIZE_SCALAR(regs
.gpior
);
2250 SERIALIZE_SCALAR(regs
.rxdp
);
2251 SERIALIZE_SCALAR(regs
.rxdp_hi
);
2252 SERIALIZE_SCALAR(regs
.rxcfg
);
2253 SERIALIZE_SCALAR(regs
.pqcr
);
2254 SERIALIZE_SCALAR(regs
.wcsr
);
2255 SERIALIZE_SCALAR(regs
.pcr
);
2256 SERIALIZE_SCALAR(regs
.rfcr
);
2257 SERIALIZE_SCALAR(regs
.rfdr
);
2258 SERIALIZE_SCALAR(regs
.srr
);
2259 SERIALIZE_SCALAR(regs
.mibc
);
2260 SERIALIZE_SCALAR(regs
.vrcr
);
2261 SERIALIZE_SCALAR(regs
.vtcr
);
2262 SERIALIZE_SCALAR(regs
.vdr
);
2263 SERIALIZE_SCALAR(regs
.ccsr
);
2264 SERIALIZE_SCALAR(regs
.tbicr
);
2265 SERIALIZE_SCALAR(regs
.tbisr
);
2266 SERIALIZE_SCALAR(regs
.tanar
);
2267 SERIALIZE_SCALAR(regs
.tanlpar
);
2268 SERIALIZE_SCALAR(regs
.taner
);
2269 SERIALIZE_SCALAR(regs
.tesr
);
2271 SERIALIZE_ARRAY(rom
.perfectMatch
, EADDR_LEN
);
2273 SERIALIZE_SCALAR(ioEnable
);
2276 * Serialize the data Fifos
2278 int txNumPkts
= txFifo
.size();
2279 SERIALIZE_SCALAR(txNumPkts
);
2281 pktiter_t end
= txFifo
.end();
2282 for (pktiter_t p
= txFifo
.begin(); p
!= end
; ++p
) {
2283 nameOut(os
, csprintf("%s.txFifo%d", name(), i
++));
2284 (*p
)->serialize(os
);
2287 int rxNumPkts
= rxFifo
.size();
2288 SERIALIZE_SCALAR(rxNumPkts
);
2291 for (pktiter_t p
= rxFifo
.begin(); p
!= end
; ++p
) {
2292 nameOut(os
, csprintf("%s.rxFifo%d", name(), i
++));
2293 (*p
)->serialize(os
);
2297 * Serialize the various helper variables
2299 bool txPacketExists
= txPacket
;
2300 SERIALIZE_SCALAR(txPacketExists
);
2301 if (txPacketExists
) {
2302 nameOut(os
, csprintf("%s.txPacket", name()));
2303 txPacket
->serialize(os
);
2304 uint32_t txPktBufPtr
= (uint32_t) (txPacketBufPtr
- txPacket
->data
);
2305 SERIALIZE_SCALAR(txPktBufPtr
);
2308 bool rxPacketExists
= rxPacket
;
2309 SERIALIZE_SCALAR(rxPacketExists
);
2310 if (rxPacketExists
) {
2311 nameOut(os
, csprintf("%s.rxPacket", name()));
2312 rxPacket
->serialize(os
);
2313 uint32_t rxPktBufPtr
= (uint32_t) (rxPacketBufPtr
- rxPacket
->data
);
2314 SERIALIZE_SCALAR(rxPktBufPtr
);
2317 SERIALIZE_SCALAR(txXferLen
);
2318 SERIALIZE_SCALAR(rxXferLen
);
2321 * Serialize DescCaches
2323 SERIALIZE_SCALAR(txDescCache
.link
);
2324 SERIALIZE_SCALAR(txDescCache
.bufptr
);
2325 SERIALIZE_SCALAR(txDescCache
.cmdsts
);
2326 SERIALIZE_SCALAR(txDescCache
.extsts
);
2327 SERIALIZE_SCALAR(rxDescCache
.link
);
2328 SERIALIZE_SCALAR(rxDescCache
.bufptr
);
2329 SERIALIZE_SCALAR(rxDescCache
.cmdsts
);
2330 SERIALIZE_SCALAR(rxDescCache
.extsts
);
2333 * Serialize tx state machine
2335 int txState
= this->txState
;
2336 SERIALIZE_SCALAR(txState
);
2337 SERIALIZE_SCALAR(CTDD
);
2338 SERIALIZE_SCALAR(txFifoAvail
);
2339 SERIALIZE_SCALAR(txHalt
);
2340 SERIALIZE_SCALAR(txFragPtr
);
2341 SERIALIZE_SCALAR(txDescCnt
);
2342 int txDmaState
= this->txDmaState
;
2343 SERIALIZE_SCALAR(txDmaState
);
2346 * Serialize rx state machine
2348 int rxState
= this->rxState
;
2349 SERIALIZE_SCALAR(rxState
);
2350 SERIALIZE_SCALAR(CRDD
);
2351 SERIALIZE_SCALAR(rxPktBytes
);
2352 SERIALIZE_SCALAR(rxFifoCnt
);
2353 SERIALIZE_SCALAR(rxHalt
);
2354 SERIALIZE_SCALAR(rxDescCnt
);
2355 int rxDmaState
= this->rxDmaState
;
2356 SERIALIZE_SCALAR(rxDmaState
);
2358 SERIALIZE_SCALAR(extstsEnable
);
2361 * If there's a pending transmit, store the time so we can
2362 * reschedule it later
2364 Tick transmitTick
= txEvent
.scheduled() ? txEvent
.when() - curTick
: 0;
2365 SERIALIZE_SCALAR(transmitTick
);
2368 * receive address filter settings
2370 SERIALIZE_SCALAR(rxFilterEnable
);
2371 SERIALIZE_SCALAR(acceptBroadcast
);
2372 SERIALIZE_SCALAR(acceptMulticast
);
2373 SERIALIZE_SCALAR(acceptUnicast
);
2374 SERIALIZE_SCALAR(acceptPerfect
);
2375 SERIALIZE_SCALAR(acceptArp
);
2378 * Keep track of pending interrupt status.
2380 SERIALIZE_SCALAR(intrTick
);
2381 SERIALIZE_SCALAR(cpuPendingIntr
);
2382 Tick intrEventTick
= 0;
2384 intrEventTick
= intrEvent
->when();
2385 SERIALIZE_SCALAR(intrEventTick
);
2390 NSGigE::unserialize(Checkpoint
*cp
, const std::string
§ion
)
2392 // Unserialize the PciDev base class
2393 PciDev::unserialize(cp
, section
);
2395 UNSERIALIZE_SCALAR(regs
.command
);
2396 UNSERIALIZE_SCALAR(regs
.config
);
2397 UNSERIALIZE_SCALAR(regs
.mear
);
2398 UNSERIALIZE_SCALAR(regs
.ptscr
);
2399 UNSERIALIZE_SCALAR(regs
.isr
);
2400 UNSERIALIZE_SCALAR(regs
.imr
);
2401 UNSERIALIZE_SCALAR(regs
.ier
);
2402 UNSERIALIZE_SCALAR(regs
.ihr
);
2403 UNSERIALIZE_SCALAR(regs
.txdp
);
2404 UNSERIALIZE_SCALAR(regs
.txdp_hi
);
2405 UNSERIALIZE_SCALAR(regs
.txcfg
);
2406 UNSERIALIZE_SCALAR(regs
.gpior
);
2407 UNSERIALIZE_SCALAR(regs
.rxdp
);
2408 UNSERIALIZE_SCALAR(regs
.rxdp_hi
);
2409 UNSERIALIZE_SCALAR(regs
.rxcfg
);
2410 UNSERIALIZE_SCALAR(regs
.pqcr
);
2411 UNSERIALIZE_SCALAR(regs
.wcsr
);
2412 UNSERIALIZE_SCALAR(regs
.pcr
);
2413 UNSERIALIZE_SCALAR(regs
.rfcr
);
2414 UNSERIALIZE_SCALAR(regs
.rfdr
);
2415 UNSERIALIZE_SCALAR(regs
.srr
);
2416 UNSERIALIZE_SCALAR(regs
.mibc
);
2417 UNSERIALIZE_SCALAR(regs
.vrcr
);
2418 UNSERIALIZE_SCALAR(regs
.vtcr
);
2419 UNSERIALIZE_SCALAR(regs
.vdr
);
2420 UNSERIALIZE_SCALAR(regs
.ccsr
);
2421 UNSERIALIZE_SCALAR(regs
.tbicr
);
2422 UNSERIALIZE_SCALAR(regs
.tbisr
);
2423 UNSERIALIZE_SCALAR(regs
.tanar
);
2424 UNSERIALIZE_SCALAR(regs
.tanlpar
);
2425 UNSERIALIZE_SCALAR(regs
.taner
);
2426 UNSERIALIZE_SCALAR(regs
.tesr
);
2428 UNSERIALIZE_ARRAY(rom
.perfectMatch
, EADDR_LEN
);
2430 UNSERIALIZE_SCALAR(ioEnable
);
2433 * unserialize the data fifos
2436 UNSERIALIZE_SCALAR(txNumPkts
);
2438 for (i
= 0; i
< txNumPkts
; ++i
) {
2439 PacketPtr p
= new EtherPacket
;
2440 p
->unserialize(cp
, csprintf("%s.rxFifo%d", section
, i
));
2441 txFifo
.push_back(p
);
2445 UNSERIALIZE_SCALAR(rxNumPkts
);
2446 for (i
= 0; i
< rxNumPkts
; ++i
) {
2447 PacketPtr p
= new EtherPacket
;
2448 p
->unserialize(cp
, csprintf("%s.rxFifo%d", section
, i
));
2449 rxFifo
.push_back(p
);
2453 * unserialize the various helper variables
2455 bool txPacketExists
;
2456 UNSERIALIZE_SCALAR(txPacketExists
);
2457 if (txPacketExists
) {
2458 txPacket
= new EtherPacket
;
2459 txPacket
->unserialize(cp
, csprintf("%s.txPacket", section
));
2460 uint32_t txPktBufPtr
;
2461 UNSERIALIZE_SCALAR(txPktBufPtr
);
2462 txPacketBufPtr
= (uint8_t *) txPacket
->data
+ txPktBufPtr
;
2466 bool rxPacketExists
;
2467 UNSERIALIZE_SCALAR(rxPacketExists
);
2469 if (rxPacketExists
) {
2470 rxPacket
= new EtherPacket
;
2471 rxPacket
->unserialize(cp
, csprintf("%s.rxPacket", section
));
2472 uint32_t rxPktBufPtr
;
2473 UNSERIALIZE_SCALAR(rxPktBufPtr
);
2474 rxPacketBufPtr
= (uint8_t *) rxPacket
->data
+ rxPktBufPtr
;
2478 UNSERIALIZE_SCALAR(txXferLen
);
2479 UNSERIALIZE_SCALAR(rxXferLen
);
2482 * Unserialize DescCaches
2484 UNSERIALIZE_SCALAR(txDescCache
.link
);
2485 UNSERIALIZE_SCALAR(txDescCache
.bufptr
);
2486 UNSERIALIZE_SCALAR(txDescCache
.cmdsts
);
2487 UNSERIALIZE_SCALAR(txDescCache
.extsts
);
2488 UNSERIALIZE_SCALAR(rxDescCache
.link
);
2489 UNSERIALIZE_SCALAR(rxDescCache
.bufptr
);
2490 UNSERIALIZE_SCALAR(rxDescCache
.cmdsts
);
2491 UNSERIALIZE_SCALAR(rxDescCache
.extsts
);
2494 * unserialize tx state machine
2497 UNSERIALIZE_SCALAR(txState
);
2498 this->txState
= (TxState
) txState
;
2499 UNSERIALIZE_SCALAR(CTDD
);
2500 UNSERIALIZE_SCALAR(txFifoAvail
);
2501 UNSERIALIZE_SCALAR(txHalt
);
2502 UNSERIALIZE_SCALAR(txFragPtr
);
2503 UNSERIALIZE_SCALAR(txDescCnt
);
2505 UNSERIALIZE_SCALAR(txDmaState
);
2506 this->txDmaState
= (DmaState
) txDmaState
;
2509 * unserialize rx state machine
2512 UNSERIALIZE_SCALAR(rxState
);
2513 this->rxState
= (RxState
) rxState
;
2514 UNSERIALIZE_SCALAR(CRDD
);
2515 UNSERIALIZE_SCALAR(rxPktBytes
);
2516 UNSERIALIZE_SCALAR(rxFifoCnt
);
2517 UNSERIALIZE_SCALAR(rxHalt
);
2518 UNSERIALIZE_SCALAR(rxDescCnt
);
2520 UNSERIALIZE_SCALAR(rxDmaState
);
2521 this->rxDmaState
= (DmaState
) rxDmaState
;
2523 UNSERIALIZE_SCALAR(extstsEnable
);
2526 * If there's a pending transmit, reschedule it now
2529 UNSERIALIZE_SCALAR(transmitTick
);
2531 txEvent
.schedule(curTick
+ transmitTick
);
2534 * unserialize receive address filter settings
2536 UNSERIALIZE_SCALAR(rxFilterEnable
);
2537 UNSERIALIZE_SCALAR(acceptBroadcast
);
2538 UNSERIALIZE_SCALAR(acceptMulticast
);
2539 UNSERIALIZE_SCALAR(acceptUnicast
);
2540 UNSERIALIZE_SCALAR(acceptPerfect
);
2541 UNSERIALIZE_SCALAR(acceptArp
);
2544 * Keep track of pending interrupt status.
2546 UNSERIALIZE_SCALAR(intrTick
);
2547 UNSERIALIZE_SCALAR(cpuPendingIntr
);
2549 UNSERIALIZE_SCALAR(intrEventTick
);
2550 if (intrEventTick
) {
2551 intrEvent
= new IntrEvent(this, true);
2552 intrEvent
->schedule(intrEventTick
);
2556 * re-add addrRanges to bus bridges
2559 pioInterface
->addAddrRange(BARAddrs
[0], BARAddrs
[0] + BARSize
[0] - 1);
2560 pioInterface
->addAddrRange(BARAddrs
[1], BARAddrs
[1] + BARSize
[1] - 1);
2565 NSGigE::cacheAccess(MemReqPtr
&req
)
2567 DPRINTF(EthernetPIO
, "timing access to paddr=%#x (daddr=%#x)\n",
2568 req
->paddr
, req
->paddr
- addr
);
2569 return curTick
+ pioLatency
;
2571 //=====================================================================
2574 //********** helper functions******************************************
2576 uint16_t reverseEnd16(uint16_t num
)
2578 uint16_t reverse
= (num
& 0xff)<<8;
2579 reverse
+= ((num
& 0xff00) >> 8);
2583 uint32_t reverseEnd32(uint32_t num
)
2585 uint32_t reverse
= (reverseEnd16(num
& 0xffff)) << 16;
2586 reverse
+= reverseEnd16((uint16_t) ((num
& 0xffff0000) >> 8));
2592 //=====================================================================
2594 BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt
)
2596 SimObjectParam
<EtherInt
*> peer
;
2597 SimObjectParam
<NSGigE
*> device
;
2599 END_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt
)
2601 BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigEInt
)
2603 INIT_PARAM_DFLT(peer
, "peer interface", NULL
),
2604 INIT_PARAM(device
, "Ethernet device of this interface")
2606 END_INIT_SIM_OBJECT_PARAMS(NSGigEInt
)
2608 CREATE_SIM_OBJECT(NSGigEInt
)
2610 NSGigEInt
*dev_int
= new NSGigEInt(getInstanceName(), device
);
2612 EtherInt
*p
= (EtherInt
*)peer
;
2614 dev_int
->setPeer(p
);
2615 p
->setPeer(dev_int
);
2621 REGISTER_SIM_OBJECT("NSGigEInt", NSGigEInt
)
2624 BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigE
)
2626 Param
<Tick
> tx_delay
;
2627 Param
<Tick
> rx_delay
;
2628 SimObjectParam
<IntrControl
*> intr_ctrl
;
2629 Param
<Tick
> intr_delay
;
2630 SimObjectParam
<MemoryController
*> mmu
;
2631 SimObjectParam
<PhysicalMemory
*> physmem
;
2632 Param
<bool> rx_filter
;
2633 Param
<string
> hardware_address
;
2634 SimObjectParam
<Bus
*> header_bus
;
2635 SimObjectParam
<Bus
*> payload_bus
;
2636 SimObjectParam
<HierParams
*> hier
;
2637 Param
<Tick
> pio_latency
;
2638 Param
<bool> dma_desc_free
;
2639 Param
<bool> dma_data_free
;
2640 Param
<Tick
> dma_read_delay
;
2641 Param
<Tick
> dma_write_delay
;
2642 Param
<Tick
> dma_read_factor
;
2643 Param
<Tick
> dma_write_factor
;
2644 SimObjectParam
<PciConfigAll
*> configspace
;
2645 SimObjectParam
<PciConfigData
*> configdata
;
2646 SimObjectParam
<Tsunami
*> tsunami
;
2647 Param
<uint32_t> pci_bus
;
2648 Param
<uint32_t> pci_dev
;
2649 Param
<uint32_t> pci_func
;
2650 Param
<uint32_t> tx_fifo_size
;
2651 Param
<uint32_t> rx_fifo_size
;
2653 END_DECLARE_SIM_OBJECT_PARAMS(NSGigE
)
2655 BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigE
)
2657 INIT_PARAM_DFLT(tx_delay
, "Transmit Delay", 1000),
2658 INIT_PARAM_DFLT(rx_delay
, "Receive Delay", 1000),
2659 INIT_PARAM(intr_ctrl
, "Interrupt Controller"),
2660 INIT_PARAM_DFLT(intr_delay
, "Interrupt Delay in microseconds", 0),
2661 INIT_PARAM(mmu
, "Memory Controller"),
2662 INIT_PARAM(physmem
, "Physical Memory"),
2663 INIT_PARAM_DFLT(rx_filter
, "Enable Receive Filter", true),
2664 INIT_PARAM_DFLT(hardware_address
, "Ethernet Hardware Address",
2665 "00:99:00:00:00:01"),
2666 INIT_PARAM_DFLT(header_bus
, "The IO Bus to attach to for headers", NULL
),
2667 INIT_PARAM_DFLT(payload_bus
, "The IO Bus to attach to for payload", NULL
),
2668 INIT_PARAM_DFLT(hier
, "Hierarchy global variables", &defaultHierParams
),
2669 INIT_PARAM_DFLT(pio_latency
, "Programmed IO latency in bus cycles", 1),
2670 INIT_PARAM_DFLT(dma_desc_free
, "DMA of Descriptors is free", false),
2671 INIT_PARAM_DFLT(dma_data_free
, "DMA of Data is free", false),
2672 INIT_PARAM_DFLT(dma_read_delay
, "fixed delay for dma reads", 0),
2673 INIT_PARAM_DFLT(dma_write_delay
, "fixed delay for dma writes", 0),
2674 INIT_PARAM_DFLT(dma_read_factor
, "multiplier for dma reads", 0),
2675 INIT_PARAM_DFLT(dma_write_factor
, "multiplier for dma writes", 0),
2676 INIT_PARAM(configspace
, "PCI Configspace"),
2677 INIT_PARAM(configdata
, "PCI Config data"),
2678 INIT_PARAM(tsunami
, "Tsunami"),
2679 INIT_PARAM(pci_bus
, "PCI bus"),
2680 INIT_PARAM(pci_dev
, "PCI device number"),
2681 INIT_PARAM(pci_func
, "PCI function code"),
2682 INIT_PARAM_DFLT(tx_fifo_size
, "max size in bytes of txFifo", 131072),
2683 INIT_PARAM_DFLT(rx_fifo_size
, "max size in bytes of rxFifo", 131072)
2685 END_INIT_SIM_OBJECT_PARAMS(NSGigE
)
2688 CREATE_SIM_OBJECT(NSGigE
)
2691 sscanf(((string
)hardware_address
).c_str(), "%x:%x:%x:%x:%x:%x",
2692 &eaddr
[0], &eaddr
[1], &eaddr
[2], &eaddr
[3], &eaddr
[4], &eaddr
[5]);
2694 return new NSGigE(getInstanceName(), intr_ctrl
, intr_delay
,
2695 physmem
, tx_delay
, rx_delay
, mmu
, hier
, header_bus
,
2696 payload_bus
, pio_latency
, dma_desc_free
, dma_data_free
,
2697 dma_read_delay
, dma_write_delay
, dma_read_factor
,
2698 dma_write_factor
, configspace
, configdata
,
2699 tsunami
, pci_bus
, pci_dev
, pci_func
, rx_filter
, eaddr
,
2700 tx_fifo_size
, rx_fifo_size
);
2703 REGISTER_SIM_OBJECT("NSGigE", NSGigE
)