2 * Copyright (c) 2004 The Regents of The University of Michigan
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 * Device module for modelling the National Semiconductor
31 * DP83820 ethernet controller. Does not support priority queueing
37 #include "base/inet.hh"
38 #include "cpu/exec_context.hh"
39 #include "cpu/intr_control.hh"
41 #include "dev/etherlink.hh"
42 #include "dev/ns_gige.hh"
43 #include "dev/pciconfigall.hh"
44 #include "dev/tsunami_cchip.hh"
45 #include "mem/bus/bus.hh"
46 #include "mem/bus/dma_interface.hh"
47 #include "mem/bus/pio_interface.hh"
48 #include "mem/bus/pio_interface_impl.hh"
49 #include "mem/functional_mem/memory_control.hh"
50 #include "mem/functional_mem/physical_memory.hh"
51 #include "sim/builder.hh"
52 #include "sim/debug.hh"
53 #include "sim/host.hh"
54 #include "sim/sim_stats.hh"
55 #include "targetarch/vtophys.hh"
57 const char *NsRxStateStrings
[] =
68 const char *NsTxStateStrings
[] =
79 const char *NsDmaState
[] =
91 ///////////////////////////////////////////////////////////////////////
95 NSGigE::NSGigE(const std::string
&name
, IntrControl
*i
, Tick intr_delay
,
96 PhysicalMemory
*pmem
, Tick tx_delay
, Tick rx_delay
,
97 MemoryController
*mmu
, HierParams
*hier
, Bus
*header_bus
,
98 Bus
*payload_bus
, Tick pio_latency
, bool dma_desc_free
,
99 bool dma_data_free
, Tick dma_read_delay
, Tick dma_write_delay
,
100 Tick dma_read_factor
, Tick dma_write_factor
, PciConfigAll
*cf
,
101 PciConfigData
*cd
, Tsunami
*t
, uint32_t bus
, uint32_t dev
,
102 uint32_t func
, bool rx_filter
, const int eaddr
[6],
103 uint32_t tx_fifo_size
, uint32_t rx_fifo_size
)
104 : PciDev(name
, mmu
, cf
, cd
, bus
, dev
, func
), tsunami(t
), ioEnable(false),
105 maxTxFifoSize(tx_fifo_size
), maxRxFifoSize(rx_fifo_size
),
106 txPacket(0), rxPacket(0), txPacketBufPtr(NULL
), rxPacketBufPtr(NULL
),
107 txXferLen(0), rxXferLen(0), txState(txIdle
), txEnable(false),
108 CTDD(false), txFifoAvail(tx_fifo_size
),
109 txFragPtr(0), txDescCnt(0), txDmaState(dmaIdle
), rxState(rxIdle
),
110 rxEnable(false), CRDD(false), rxPktBytes(0), rxFifoCnt(0),
111 rxFragPtr(0), rxDescCnt(0), rxDmaState(dmaIdle
), extstsEnable(false),
112 rxDmaReadEvent(this), rxDmaWriteEvent(this),
113 txDmaReadEvent(this), txDmaWriteEvent(this),
114 dmaDescFree(dma_desc_free
), dmaDataFree(dma_data_free
),
115 txDelay(tx_delay
), rxDelay(rx_delay
), rxKickTick(0), txKickTick(0),
116 txEvent(this), rxFilterEnable(rx_filter
), acceptBroadcast(false),
117 acceptMulticast(false), acceptUnicast(false),
118 acceptPerfect(false), acceptArp(false),
119 physmem(pmem
), intctrl(i
), intrTick(0), cpuPendingIntr(false),
120 intrEvent(0), interface(0)
122 tsunami
->ethernet
= this;
125 pioInterface
= newPioInterface(name
, hier
, header_bus
, this,
126 &NSGigE::cacheAccess
);
128 pioLatency
= pio_latency
* header_bus
->clockRatio
;
131 dmaInterface
= new DMAInterface
<Bus
>(name
+ ".dma",
132 header_bus
, payload_bus
, 1);
134 dmaInterface
= new DMAInterface
<Bus
>(name
+ ".dma",
135 header_bus
, header_bus
, 1);
136 } else if (payload_bus
) {
137 pioInterface
= newPioInterface(name
, hier
, payload_bus
, this,
138 &NSGigE::cacheAccess
);
140 pioLatency
= pio_latency
* payload_bus
->clockRatio
;
142 dmaInterface
= new DMAInterface
<Bus
>(name
+ ".dma", payload_bus
,
147 intrDelay
= US2Ticks(intr_delay
);
148 dmaReadDelay
= dma_read_delay
;
149 dmaWriteDelay
= dma_write_delay
;
150 dmaReadFactor
= dma_read_factor
;
151 dmaWriteFactor
= dma_write_factor
;
154 rom
.perfectMatch
[0] = eaddr
[0];
155 rom
.perfectMatch
[1] = eaddr
[1];
156 rom
.perfectMatch
[2] = eaddr
[2];
157 rom
.perfectMatch
[3] = eaddr
[3];
158 rom
.perfectMatch
[4] = eaddr
[4];
159 rom
.perfectMatch
[5] = eaddr
[5];
169 .name(name() + ".txBytes")
170 .desc("Bytes Transmitted")
175 .name(name() + ".rxBytes")
176 .desc("Bytes Received")
181 .name(name() + ".txPackets")
182 .desc("Number of Packets Transmitted")
187 .name(name() + ".rxPackets")
188 .desc("Number of Packets Received")
193 .name(name() + ".txIpChecksums")
194 .desc("Number of tx IP Checksums done by device")
200 .name(name() + ".rxIpChecksums")
201 .desc("Number of rx IP Checksums done by device")
207 .name(name() + ".txTcpChecksums")
208 .desc("Number of tx TCP Checksums done by device")
214 .name(name() + ".rxTcpChecksums")
215 .desc("Number of rx TCP Checksums done by device")
221 .name(name() + ".txUdpChecksums")
222 .desc("Number of tx UDP Checksums done by device")
228 .name(name() + ".rxUdpChecksums")
229 .desc("Number of rx UDP Checksums done by device")
235 .name(name() + ".descDMAReads")
236 .desc("Number of descriptors the device read w/ DMA")
241 .name(name() + ".descDMAWrites")
242 .desc("Number of descriptors the device wrote w/ DMA")
247 .name(name() + ".descDmaReadBytes")
248 .desc("number of descriptor bytes read w/ DMA")
253 .name(name() + ".descDmaWriteBytes")
254 .desc("number of descriptor bytes write w/ DMA")
260 .name(name() + ".txBandwidth")
261 .desc("Transmit Bandwidth (bits/s)")
267 .name(name() + ".rxBandwidth")
268 .desc("Receive Bandwidth (bits/s)")
274 .name(name() + ".txPPS")
275 .desc("Packet Tranmission Rate (packets/s)")
281 .name(name() + ".rxPPS")
282 .desc("Packet Reception Rate (packets/s)")
287 txBandwidth
= txBytes
* Stats::constant(8) / simSeconds
;
288 rxBandwidth
= rxBytes
* Stats::constant(8) / simSeconds
;
289 txPacketRate
= txPackets
/ simSeconds
;
290 rxPacketRate
= rxPackets
/ simSeconds
;
294 * This is to read the PCI general configuration registers
297 NSGigE::ReadConfig(int offset
, int size
, uint8_t *data
)
299 if (offset
< PCI_DEVICE_SPECIFIC
)
300 PciDev::ReadConfig(offset
, size
, data
);
302 panic("Device specific PCI config space not implemented!\n");
306 * This is to write to the PCI general configuration registers
309 NSGigE::WriteConfig(int offset
, int size
, uint32_t data
)
311 if (offset
< PCI_DEVICE_SPECIFIC
)
312 PciDev::WriteConfig(offset
, size
, data
);
314 panic("Device specific PCI config space not implemented!\n");
316 // Need to catch writes to BARs to update the PIO interface
318 // seems to work fine without all these PCI settings, but i
319 // put in the IO to double check, an assertion will fail if we
320 // need to properly implement it
322 if (config
.data
[offset
] & PCI_CMD_IOSE
)
328 if (config
.data
[offset
] & PCI_CMD_BME
) {
335 if (config
.data
[offset
] & PCI_CMD_MSE
) {
344 case PCI0_BASE_ADDR0
:
345 if (BARAddrs
[0] != 0) {
347 pioInterface
->addAddrRange(RangeSize(BARAddrs
[0], BARSize
[0]));
349 BARAddrs
[0] &= PA_UNCACHED_MASK
;
352 case PCI0_BASE_ADDR1
:
353 if (BARAddrs
[1] != 0) {
355 pioInterface
->addAddrRange(RangeSize(BARAddrs
[1], BARSize
[1]));
357 BARAddrs
[1] &= PA_UNCACHED_MASK
;
364 * This reads the device registers, which are detailed in the NS83820
368 NSGigE::read(MemReqPtr
&req
, uint8_t *data
)
372 //The mask is to give you only the offset into the device register file
373 Addr daddr
= req
->paddr
& 0xfff;
374 DPRINTF(EthernetPIO
, "read da=%#x pa=%#x va=%#x size=%d\n",
375 daddr
, req
->paddr
, req
->vaddr
, req
->size
);
378 // there are some reserved registers, you can see ns_gige_reg.h and
379 // the spec sheet for details
380 if (daddr
> LAST
&& daddr
<= RESERVED
) {
381 panic("Accessing reserved register");
382 } else if (daddr
> RESERVED
&& daddr
<= 0x3FC) {
383 ReadConfig(daddr
& 0xff, req
->size
, data
);
385 } else if (daddr
>= MIB_START
&& daddr
<= MIB_END
) {
386 // don't implement all the MIB's. hopefully the kernel
387 // doesn't actually DEPEND upon their values
388 // MIB are just hardware stats keepers
389 uint32_t ®
= *(uint32_t *) data
;
392 } else if (daddr
> 0x3FC)
393 panic("Something is messed up!\n");
396 case sizeof(uint32_t):
398 uint32_t ®
= *(uint32_t *)data
;
403 //these are supposed to be cleared on a read
404 reg
&= ~(CR_RXD
| CR_TXD
| CR_TXR
| CR_RXR
);
421 devIntrClear(ISR_ALL
);
476 // see the spec sheet for how RFCR and RFDR work
477 // basically, you write to RFCR to tell the machine
478 // what you want to do next, then you act upon RFDR,
479 // and the device will be prepared b/c of what you
486 switch (regs
.rfcr
& RFCR_RFADDR
) {
488 reg
= rom
.perfectMatch
[1];
490 reg
+= rom
.perfectMatch
[0];
493 reg
= rom
.perfectMatch
[3] << 8;
494 reg
+= rom
.perfectMatch
[2];
497 reg
= rom
.perfectMatch
[5] << 8;
498 reg
+= rom
.perfectMatch
[4];
501 panic("reading RFDR for something other than PMATCH!\n");
502 // didn't implement other RFDR functionality b/c
503 // driver didn't use it
513 reg
&= ~(MIBC_MIBS
| MIBC_ACLR
);
557 panic("reading unimplemented register: addr=%#x", daddr
);
560 DPRINTF(EthernetPIO
, "read from %#x: data=%d data=%#x\n",
566 panic("accessing register with invalid size: addr=%#x, size=%d",
574 NSGigE::write(MemReqPtr
&req
, const uint8_t *data
)
578 Addr daddr
= req
->paddr
& 0xfff;
579 DPRINTF(EthernetPIO
, "write da=%#x pa=%#x va=%#x size=%d\n",
580 daddr
, req
->paddr
, req
->vaddr
, req
->size
);
582 if (daddr
> LAST
&& daddr
<= RESERVED
) {
583 panic("Accessing reserved register");
584 } else if (daddr
> RESERVED
&& daddr
<= 0x3FC) {
585 WriteConfig(daddr
& 0xff, req
->size
, *(uint32_t *)data
);
587 } else if (daddr
> 0x3FC)
588 panic("Something is messed up!\n");
590 if (req
->size
== sizeof(uint32_t)) {
591 uint32_t reg
= *(uint32_t *)data
;
592 DPRINTF(EthernetPIO
, "write data=%d data=%#x\n", reg
, reg
);
599 } else if (reg
& CR_TXE
) {
602 // the kernel is enabling the transmit machine
603 if (txState
== txIdle
)
609 } else if (reg
& CR_RXE
) {
612 if (rxState
== rxIdle
)
623 devIntrPost(ISR_SWI
);
634 if (reg
& CFG_LNKSTS
||
637 reg
& CFG_RESERVED
||
640 panic("writing to read-only or reserved CFG bits!\n");
642 regs
.config
|= reg
& ~(CFG_LNKSTS
| CFG_SPDSTS
| CFG_DUPSTS
|
643 CFG_RESERVED
| CFG_T64ADDR
| CFG_PCI64_DET
);
645 // all these #if 0's are because i don't THINK the kernel needs to
646 // have these implemented. if there is a problem relating to one of
647 // these, you may need to add functionality in.
649 if (reg
& CFG_TBI_EN
) ;
650 if (reg
& CFG_MODE_1000
) ;
653 if (reg
& CFG_AUTO_1000
)
654 panic("CFG_AUTO_1000 not implemented!\n");
657 if (reg
& CFG_PINT_DUPSTS
||
658 reg
& CFG_PINT_LNKSTS
||
659 reg
& CFG_PINT_SPDSTS
)
662 if (reg
& CFG_TMRTEST
) ;
663 if (reg
& CFG_MRM_DIS
) ;
664 if (reg
& CFG_MWI_DIS
) ;
666 if (reg
& CFG_T64ADDR
)
667 panic("CFG_T64ADDR is read only register!\n");
669 if (reg
& CFG_PCI64_DET
)
670 panic("CFG_PCI64_DET is read only register!\n");
672 if (reg
& CFG_DATA64_EN
) ;
673 if (reg
& CFG_M64ADDR
) ;
674 if (reg
& CFG_PHY_RST
) ;
675 if (reg
& CFG_PHY_DIS
) ;
678 if (reg
& CFG_EXTSTS_EN
)
681 extstsEnable
= false;
684 if (reg
& CFG_REQALG
) ;
688 if (reg
& CFG_PESEL
) ;
689 if (reg
& CFG_BROM_DIS
) ;
690 if (reg
& CFG_EXT_125
) ;
697 // since phy is completely faked, MEAR_MD* don't matter
698 // and since the driver never uses MEAR_EE*, they don't
701 if (reg
& MEAR_EEDI
) ;
702 if (reg
& MEAR_EEDO
) ; // this one is read only
703 if (reg
& MEAR_EECLK
) ;
704 if (reg
& MEAR_EESEL
) ;
705 if (reg
& MEAR_MDIO
) ;
706 if (reg
& MEAR_MDDIR
) ;
707 if (reg
& MEAR_MDC
) ;
712 regs
.ptscr
= reg
& ~(PTSCR_RBIST_RDONLY
);
713 // these control BISTs for various parts of chip - we
714 // don't care or do just fake that the BIST is done
715 if (reg
& PTSCR_RBIST_EN
)
716 regs
.ptscr
|= PTSCR_RBIST_DONE
;
717 if (reg
& PTSCR_EEBIST_EN
)
718 regs
.ptscr
&= ~PTSCR_EEBIST_EN
;
719 if (reg
& PTSCR_EELOAD_EN
)
720 regs
.ptscr
&= ~PTSCR_EELOAD_EN
;
723 case ISR
: /* writing to the ISR has no effect */
724 panic("ISR is a read only register!\n");
737 /* not going to implement real interrupt holdoff */
741 regs
.txdp
= (reg
& 0xFFFFFFFC);
742 assert(txState
== txIdle
);
753 if (reg
& TXCFG_CSI
) ;
754 if (reg
& TXCFG_HBI
) ;
755 if (reg
& TXCFG_MLB
) ;
756 if (reg
& TXCFG_ATP
) ;
757 if (reg
& TXCFG_ECRETRY
) {
759 * this could easily be implemented, but considering
760 * the network is just a fake pipe, wouldn't make
765 if (reg
& TXCFG_BRST_DIS
) ;
769 /* we handle our own DMA, ignore the kernel's exhortations */
770 if (reg
& TXCFG_MXDMA
) ;
773 // also, we currently don't care about fill/drain
774 // thresholds though this may change in the future with
775 // more realistic networks or a driver which changes it
776 // according to feedback
782 /* these just control general purpose i/o pins, don't matter */
797 if (reg
& RXCFG_AEP
) ;
798 if (reg
& RXCFG_ARP
) ;
799 if (reg
& RXCFG_STRIPCRC
) ;
800 if (reg
& RXCFG_RX_RD
) ;
801 if (reg
& RXCFG_ALP
) ;
802 if (reg
& RXCFG_AIRL
) ;
804 /* we handle our own DMA, ignore what kernel says about it */
805 if (reg
& RXCFG_MXDMA
) ;
807 //also, we currently don't care about fill/drain thresholds
808 //though this may change in the future with more realistic
809 //networks or a driver which changes it according to feedback
810 if (reg
& (RXCFG_DRTH
| RXCFG_DRTH0
)) ;
815 /* there is no priority queueing used in the linux 2.6 driver */
820 /* not going to implement wake on LAN */
825 /* not going to implement pause control */
832 rxFilterEnable
= (reg
& RFCR_RFEN
) ? true : false;
833 acceptBroadcast
= (reg
& RFCR_AAB
) ? true : false;
834 acceptMulticast
= (reg
& RFCR_AAM
) ? true : false;
835 acceptUnicast
= (reg
& RFCR_AAU
) ? true : false;
836 acceptPerfect
= (reg
& RFCR_APM
) ? true : false;
837 acceptArp
= (reg
& RFCR_AARP
) ? true : false;
841 panic("RFCR_APAT not implemented!\n");
844 if (reg
& RFCR_MHEN
|| reg
& RFCR_UHEN
)
845 panic("hash filtering not implemented!\n");
848 panic("RFCR_ULM not implemented!\n");
853 panic("the driver never writes to RFDR, something is wrong!\n");
856 panic("the driver never uses BRAR, something is wrong!\n");
859 panic("the driver never uses BRDR, something is wrong!\n");
862 panic("SRR is read only register!\n");
865 panic("the driver never uses MIBC, something is wrong!\n");
876 panic("the driver never uses VDR, something is wrong!\n");
880 /* not going to implement clockrun stuff */
886 if (reg
& TBICR_MR_LOOPBACK
)
887 panic("TBICR_MR_LOOPBACK never used, something wrong!\n");
889 if (reg
& TBICR_MR_AN_ENABLE
) {
890 regs
.tanlpar
= regs
.tanar
;
891 regs
.tbisr
|= (TBISR_MR_AN_COMPLETE
| TBISR_MR_LINK_STATUS
);
895 if (reg
& TBICR_MR_RESTART_AN
) ;
901 panic("TBISR is read only register!\n");
906 panic("this isn't used in driver, something wrong!\n");
909 panic("this isn't used in driver, something wrong!\n");
913 panic("this should only be written to by the fake phy!\n");
916 panic("TANER is read only register!\n");
923 panic("invalid register access daddr=%#x", daddr
);
926 panic("Invalid Request Size");
933 NSGigE::devIntrPost(uint32_t interrupts
)
935 if (interrupts
& ISR_RESERVE
)
936 panic("Cannot set a reserved interrupt");
938 if (interrupts
& ISR_NOIMPL
)
939 warn("interrupt not implemented %#x\n", interrupts
);
941 interrupts
&= ~ISR_NOIMPL
;
942 regs
.isr
|= interrupts
;
944 DPRINTF(EthernetIntr
,
945 "interrupt written to ISR: intr=%#x isr=%#x imr=%#x\n",
946 interrupts
, regs
.isr
, regs
.imr
);
948 if ((regs
.isr
& regs
.imr
)) {
950 if (!(regs
.isr
& regs
.imr
& ISR_NODELAY
))
957 NSGigE::devIntrClear(uint32_t interrupts
)
959 if (interrupts
& ISR_RESERVE
)
960 panic("Cannot clear a reserved interrupt");
962 interrupts
&= ~ISR_NOIMPL
;
963 regs
.isr
&= ~interrupts
;
965 DPRINTF(EthernetIntr
,
966 "interrupt cleared from ISR: intr=%x isr=%x imr=%x\n",
967 interrupts
, regs
.isr
, regs
.imr
);
969 if (!(regs
.isr
& regs
.imr
))
974 NSGigE::devIntrChangeMask()
976 DPRINTF(EthernetIntr
, "interrupt mask changed: isr=%x imr=%x masked=%x\n",
977 regs
.isr
, regs
.imr
, regs
.isr
& regs
.imr
);
979 if (regs
.isr
& regs
.imr
)
980 cpuIntrPost(curTick
);
986 NSGigE::cpuIntrPost(Tick when
)
988 // If the interrupt you want to post is later than an interrupt
989 // already scheduled, just let it post in the coming one and don't
991 // HOWEVER, must be sure that the scheduled intrTick is in the
992 // future (this was formerly the source of a bug)
994 * @todo this warning should be removed and the intrTick code should
997 assert(when
>= curTick
);
998 assert(intrTick
>= curTick
|| intrTick
== 0);
999 if (when
> intrTick
&& intrTick
!= 0) {
1000 DPRINTF(EthernetIntr
, "don't need to schedule event...intrTick=%d\n",
1006 if (intrTick
< curTick
) {
1011 DPRINTF(EthernetIntr
, "going to schedule an interrupt for intrTick=%d\n",
1015 intrEvent
->squash();
1016 intrEvent
= new IntrEvent(this, true);
1017 intrEvent
->schedule(intrTick
);
1021 NSGigE::cpuInterrupt()
1023 assert(intrTick
== curTick
);
1025 // Whether or not there's a pending interrupt, we don't care about
1030 // Don't send an interrupt if there's already one
1031 if (cpuPendingIntr
) {
1032 DPRINTF(EthernetIntr
,
1033 "would send an interrupt now, but there's already pending\n");
1036 cpuPendingIntr
= true;
1038 DPRINTF(EthernetIntr
, "posting cchip interrupt\n");
1039 tsunami
->cchip
->postDRIR(configData
->config
.hdr
.pci0
.interruptLine
);
1044 NSGigE::cpuIntrClear()
1046 if (!cpuPendingIntr
)
1050 intrEvent
->squash();
1056 cpuPendingIntr
= false;
1058 DPRINTF(EthernetIntr
, "clearing cchip interrupt\n");
1059 tsunami
->cchip
->clearDRIR(configData
->config
.hdr
.pci0
.interruptLine
);
1063 NSGigE::cpuIntrPending() const
1064 { return cpuPendingIntr
; }
1070 DPRINTF(Ethernet
, "transmit reset\n");
1073 txFifoAvail
= maxTxFifoSize
;
1076 assert(txDescCnt
== 0);
1079 assert(txDmaState
== dmaIdle
);
1085 DPRINTF(Ethernet
, "receive reset\n");
1088 assert(rxPktBytes
== 0);
1092 assert(rxDescCnt
== 0);
1093 assert(rxDmaState
== dmaIdle
);
1101 memset(®s
, 0, sizeof(regs
));
1102 regs
.config
= CFG_LNKSTS
;
1103 regs
.mear
= MEAR_MDDIR
| MEAR_EEDO
;
1104 regs
.txcfg
= 0x120; // set drain threshold to 1024 bytes and
1105 // fill threshold to 32 bytes
1106 regs
.rxcfg
= 0x4; // set drain threshold to 16 bytes
1107 regs
.srr
= 0x0103; // set the silicon revision to rev B or 0x103
1108 regs
.mibc
= MIBC_FRZ
;
1109 regs
.vdr
= 0x81; // set the vlan tag type to 802.1q
1110 regs
.tesr
= 0xc000; // TBI capable of both full and half duplex
1112 extstsEnable
= false;
1113 acceptBroadcast
= false;
1114 acceptMulticast
= false;
1115 acceptUnicast
= false;
1116 acceptPerfect
= false;
1121 NSGigE::rxDmaReadCopy()
1123 assert(rxDmaState
== dmaReading
);
1125 physmem
->dma_read((uint8_t *)rxDmaData
, rxDmaAddr
, rxDmaLen
);
1126 rxDmaState
= dmaIdle
;
1128 DPRINTF(EthernetDMA
, "rx dma read paddr=%#x len=%d\n",
1129 rxDmaAddr
, rxDmaLen
);
1130 DDUMP(EthernetDMA
, rxDmaData
, rxDmaLen
);
1134 NSGigE::doRxDmaRead()
1136 assert(rxDmaState
== dmaIdle
|| rxDmaState
== dmaReadWaiting
);
1137 rxDmaState
= dmaReading
;
1139 if (dmaInterface
&& !rxDmaFree
) {
1140 if (dmaInterface
->busy())
1141 rxDmaState
= dmaReadWaiting
;
1143 dmaInterface
->doDMA(Read
, rxDmaAddr
, rxDmaLen
, curTick
,
1144 &rxDmaReadEvent
, true);
1148 if (dmaReadDelay
== 0 && dmaReadFactor
== 0) {
1153 Tick factor
= ((rxDmaLen
+ ULL(63)) >> ULL(6)) * dmaReadFactor
;
1154 Tick start
= curTick
+ dmaReadDelay
+ factor
;
1155 rxDmaReadEvent
.schedule(start
);
1160 NSGigE::rxDmaReadDone()
1162 assert(rxDmaState
== dmaReading
);
1165 // If the transmit state machine has a pending DMA, let it go first
1166 if (txDmaState
== dmaReadWaiting
|| txDmaState
== dmaWriteWaiting
)
1173 NSGigE::rxDmaWriteCopy()
1175 assert(rxDmaState
== dmaWriting
);
1177 physmem
->dma_write(rxDmaAddr
, (uint8_t *)rxDmaData
, rxDmaLen
);
1178 rxDmaState
= dmaIdle
;
1180 DPRINTF(EthernetDMA
, "rx dma write paddr=%#x len=%d\n",
1181 rxDmaAddr
, rxDmaLen
);
1182 DDUMP(EthernetDMA
, rxDmaData
, rxDmaLen
);
1186 NSGigE::doRxDmaWrite()
1188 assert(rxDmaState
== dmaIdle
|| rxDmaState
== dmaWriteWaiting
);
1189 rxDmaState
= dmaWriting
;
1191 if (dmaInterface
&& !rxDmaFree
) {
1192 if (dmaInterface
->busy())
1193 rxDmaState
= dmaWriteWaiting
;
1195 dmaInterface
->doDMA(WriteInvalidate
, rxDmaAddr
, rxDmaLen
, curTick
,
1196 &rxDmaWriteEvent
, true);
1200 if (dmaWriteDelay
== 0 && dmaWriteFactor
== 0) {
1205 Tick factor
= ((rxDmaLen
+ ULL(63)) >> ULL(6)) * dmaWriteFactor
;
1206 Tick start
= curTick
+ dmaWriteDelay
+ factor
;
1207 rxDmaWriteEvent
.schedule(start
);
1212 NSGigE::rxDmaWriteDone()
1214 assert(rxDmaState
== dmaWriting
);
1217 // If the transmit state machine has a pending DMA, let it go first
1218 if (txDmaState
== dmaReadWaiting
|| txDmaState
== dmaWriteWaiting
)
1227 DPRINTF(EthernetSM
, "receive kick rxState=%s (rxBuf.size=%d)\n",
1228 NsRxStateStrings
[rxState
], rxFifo
.size());
1230 if (rxKickTick
> curTick
) {
1231 DPRINTF(EthernetSM
, "receive kick exiting, can't run till %d\n",
1237 switch(rxDmaState
) {
1238 case dmaReadWaiting
:
1242 case dmaWriteWaiting
:
1250 // see state machine from spec for details
1251 // the way this works is, if you finish work on one state and can
1252 // go directly to another, you do that through jumping to the
1253 // label "next". however, if you have intermediate work, like DMA
1254 // so that you can't go to the next state yet, you go to exit and
1255 // exit the loop. however, when the DMA is done it will trigger
1256 // an event and come back to this loop.
1260 DPRINTF(EthernetSM
, "Receive Disabled! Nothing to do.\n");
1265 rxState
= rxDescRefr
;
1267 rxDmaAddr
= regs
.rxdp
& 0x3fffffff;
1268 rxDmaData
= &rxDescCache
+ offsetof(ns_desc
, link
);
1269 rxDmaLen
= sizeof(rxDescCache
.link
);
1270 rxDmaFree
= dmaDescFree
;
1273 descDmaRdBytes
+= rxDmaLen
;
1278 rxState
= rxDescRead
;
1280 rxDmaAddr
= regs
.rxdp
& 0x3fffffff;
1281 rxDmaData
= &rxDescCache
;
1282 rxDmaLen
= sizeof(ns_desc
);
1283 rxDmaFree
= dmaDescFree
;
1286 descDmaRdBytes
+= rxDmaLen
;
1294 if (rxDmaState
!= dmaIdle
)
1297 rxState
= rxAdvance
;
1301 if (rxDmaState
!= dmaIdle
)
1304 DPRINTF(EthernetDesc
,
1305 "rxDescCache: addr=%08x read descriptor\n",
1306 regs
.rxdp
& 0x3fffffff);
1307 DPRINTF(EthernetDesc
,
1308 "rxDescCache: link=%08x bufptr=%08x cmdsts=%08x extsts=%08x\n",
1309 rxDescCache
.link
, rxDescCache
.bufptr
, rxDescCache
.cmdsts
,
1310 rxDescCache
.extsts
);
1312 if (rxDescCache
.cmdsts
& CMDSTS_OWN
) {
1313 devIntrPost(ISR_RXIDLE
);
1317 rxState
= rxFifoBlock
;
1318 rxFragPtr
= rxDescCache
.bufptr
;
1319 rxDescCnt
= rxDescCache
.cmdsts
& CMDSTS_LEN_MASK
;
1326 * @todo in reality, we should be able to start processing
1327 * the packet as it arrives, and not have to wait for the
1328 * full packet ot be in the receive fifo.
1333 DPRINTF(EthernetSM
, "****processing receive of new packet****\n");
1335 // If we don't have a packet, grab a new one from the fifo.
1336 rxPacket
= rxFifo
.front();
1337 rxPktBytes
= rxPacket
->length
;
1338 rxPacketBufPtr
= rxPacket
->data
;
1341 if (DTRACE(Ethernet
)) {
1342 const IpHdr
*ip
= rxPacket
->ip();
1344 DPRINTF(Ethernet
, "ID is %d\n", ip
->id());
1345 const TcpHdr
*tcp
= rxPacket
->tcp();
1347 DPRINTF(Ethernet
, "Src Port=%d, Dest Port=%d\n",
1348 tcp
->sport(), tcp
->dport());
1354 // sanity check - i think the driver behaves like this
1355 assert(rxDescCnt
>= rxPktBytes
);
1357 // Must clear the value before popping to decrement the
1359 rxFifo
.front() = NULL
;
1361 rxFifoCnt
-= rxPacket
->length
;
1365 // dont' need the && rxDescCnt > 0 if driver sanity check
1367 if (rxPktBytes
> 0) {
1368 rxState
= rxFragWrite
;
1369 // don't need min<>(rxPktBytes,rxDescCnt) if above sanity
1371 rxXferLen
= rxPktBytes
;
1373 rxDmaAddr
= rxFragPtr
& 0x3fffffff;
1374 rxDmaData
= rxPacketBufPtr
;
1375 rxDmaLen
= rxXferLen
;
1376 rxDmaFree
= dmaDataFree
;
1382 rxState
= rxDescWrite
;
1384 //if (rxPktBytes == 0) { /* packet is done */
1385 assert(rxPktBytes
== 0);
1386 DPRINTF(EthernetSM
, "done with receiving packet\n");
1388 rxDescCache
.cmdsts
|= CMDSTS_OWN
;
1389 rxDescCache
.cmdsts
&= ~CMDSTS_MORE
;
1390 rxDescCache
.cmdsts
|= CMDSTS_OK
;
1391 rxDescCache
.cmdsts
&= 0xffff0000;
1392 rxDescCache
.cmdsts
+= rxPacket
->length
; //i.e. set CMDSTS_SIZE
1396 * all the driver uses these are for its own stats keeping
1397 * which we don't care about, aren't necessary for
1398 * functionality and doing this would just slow us down.
1399 * if they end up using this in a later version for
1400 * functional purposes, just undef
1402 if (rxFilterEnable
) {
1403 rxDescCache
.cmdsts
&= ~CMDSTS_DEST_MASK
;
1404 EthHdr
*eth
= rxFifoFront()->eth();
1406 rxDescCache
.cmdsts
|= CMDSTS_DEST_SELF
;
1407 if (eth
->multicast())
1408 rxDescCache
.cmdsts
|= CMDSTS_DEST_MULTI
;
1409 if (eth
->broadcast())
1410 rxDescCache
.cmdsts
|= CMDSTS_DEST_MASK
;
1414 if (extstsEnable
&& rxPacket
->ip()) {
1415 rxDescCache
.extsts
|= EXTSTS_IPPKT
;
1417 IpHdr
*ip
= rxPacket
->ip();
1418 if (ip
->ip_cksum() != 0) {
1419 DPRINTF(EthernetCksum
, "Rx IP Checksum Error\n");
1420 rxDescCache
.extsts
|= EXTSTS_IPERR
;
1422 if (rxPacket
->tcp()) {
1423 rxDescCache
.extsts
|= EXTSTS_TCPPKT
;
1425 if (ip
->tu_cksum() != 0) {
1426 DPRINTF(EthernetCksum
, "Rx TCP Checksum Error\n");
1427 rxDescCache
.extsts
|= EXTSTS_TCPERR
;
1430 } else if (rxPacket
->udp()) {
1431 rxDescCache
.extsts
|= EXTSTS_UDPPKT
;
1433 if (ip
->tu_cksum() != 0) {
1434 DPRINTF(EthernetCksum
, "Rx UDP Checksum Error\n");
1435 rxDescCache
.extsts
|= EXTSTS_UDPERR
;
1442 * the driver seems to always receive into desc buffers
1443 * of size 1514, so you never have a pkt that is split
1444 * into multiple descriptors on the receive side, so
1445 * i don't implement that case, hence the assert above.
1448 DPRINTF(EthernetDesc
,
1449 "rxDescCache: addr=%08x writeback cmdsts extsts\n",
1450 regs
.rxdp
& 0x3fffffff);
1451 DPRINTF(EthernetDesc
,
1452 "rxDescCache: link=%08x bufptr=%08x cmdsts=%08x extsts=%08x\n",
1453 rxDescCache
.link
, rxDescCache
.bufptr
, rxDescCache
.cmdsts
,
1454 rxDescCache
.extsts
);
1456 rxDmaAddr
= (regs
.rxdp
+ offsetof(ns_desc
, cmdsts
)) & 0x3fffffff;
1457 rxDmaData
= &(rxDescCache
.cmdsts
);
1458 rxDmaLen
= sizeof(rxDescCache
.cmdsts
) + sizeof(rxDescCache
.extsts
);
1459 rxDmaFree
= dmaDescFree
;
1462 descDmaWrBytes
+= rxDmaLen
;
1470 if (rxDmaState
!= dmaIdle
)
1473 rxPacketBufPtr
+= rxXferLen
;
1474 rxFragPtr
+= rxXferLen
;
1475 rxPktBytes
-= rxXferLen
;
1477 rxState
= rxFifoBlock
;
1481 if (rxDmaState
!= dmaIdle
)
1484 assert(rxDescCache
.cmdsts
& CMDSTS_OWN
);
1486 assert(rxPacket
== 0);
1487 devIntrPost(ISR_RXOK
);
1489 if (rxDescCache
.cmdsts
& CMDSTS_INTR
)
1490 devIntrPost(ISR_RXDESC
);
1493 DPRINTF(EthernetSM
, "Halting the RX state machine\n");
1497 rxState
= rxAdvance
;
1501 if (rxDescCache
.link
== 0) {
1502 devIntrPost(ISR_RXIDLE
);
1507 rxState
= rxDescRead
;
1508 regs
.rxdp
= rxDescCache
.link
;
1511 rxDmaAddr
= regs
.rxdp
& 0x3fffffff;
1512 rxDmaData
= &rxDescCache
;
1513 rxDmaLen
= sizeof(ns_desc
);
1514 rxDmaFree
= dmaDescFree
;
1522 panic("Invalid rxState!");
1525 DPRINTF(EthernetSM
, "entering next rxState=%s\n",
1526 NsRxStateStrings
[rxState
]);
1532 * @todo do we want to schedule a future kick?
1534 DPRINTF(EthernetSM
, "rx state machine exited rxState=%s\n",
1535 NsRxStateStrings
[rxState
]);
1541 if (txFifo
.empty()) {
1542 DPRINTF(Ethernet
, "nothing to transmit\n");
1546 DPRINTF(Ethernet
, "Attempt Pkt Transmit: txFifo length=%d\n",
1547 maxTxFifoSize
- txFifoAvail
);
1548 if (interface
->sendPacket(txFifo
.front())) {
1550 if (DTRACE(Ethernet
)) {
1551 const IpHdr
*ip
= txFifo
.front()->ip();
1553 DPRINTF(Ethernet
, "ID is %d\n", ip
->id());
1554 const TcpHdr
*tcp
= txFifo
.front()->tcp();
1556 DPRINTF(Ethernet
, "Src Port=%d, Dest Port=%d\n",
1557 tcp
->sport(), tcp
->dport());
1563 DDUMP(Ethernet
, txFifo
.front()->data
, txFifo
.front()->length
);
1564 txBytes
+= txFifo
.front()->length
;
1567 txFifoAvail
+= txFifo
.front()->length
;
1569 DPRINTF(Ethernet
, "Successful Xmit! now txFifoAvail is %d\n",
1571 txFifo
.front() = NULL
;
1575 * normally do a writeback of the descriptor here, and ONLY
1576 * after that is done, send this interrupt. but since our
1577 * stuff never actually fails, just do this interrupt here,
1578 * otherwise the code has to stray from this nice format.
1579 * besides, it's functionally the same.
1581 devIntrPost(ISR_TXOK
);
1584 "May need to rethink always sending the descriptors back?\n");
1587 if (!txFifo
.empty() && !txEvent
.scheduled()) {
1588 DPRINTF(Ethernet
, "reschedule transmit\n");
1589 txEvent
.schedule(curTick
+ 1000);
1594 NSGigE::txDmaReadCopy()
1596 assert(txDmaState
== dmaReading
);
1598 physmem
->dma_read((uint8_t *)txDmaData
, txDmaAddr
, txDmaLen
);
1599 txDmaState
= dmaIdle
;
1601 DPRINTF(EthernetDMA
, "tx dma read paddr=%#x len=%d\n",
1602 txDmaAddr
, txDmaLen
);
1603 DDUMP(EthernetDMA
, txDmaData
, txDmaLen
);
1607 NSGigE::doTxDmaRead()
1609 assert(txDmaState
== dmaIdle
|| txDmaState
== dmaReadWaiting
);
1610 txDmaState
= dmaReading
;
1612 if (dmaInterface
&& !txDmaFree
) {
1613 if (dmaInterface
->busy())
1614 txDmaState
= dmaReadWaiting
;
1616 dmaInterface
->doDMA(Read
, txDmaAddr
, txDmaLen
, curTick
,
1617 &txDmaReadEvent
, true);
1621 if (dmaReadDelay
== 0 && dmaReadFactor
== 0.0) {
1626 Tick factor
= ((txDmaLen
+ ULL(63)) >> ULL(6)) * dmaReadFactor
;
1627 Tick start
= curTick
+ dmaReadDelay
+ factor
;
1628 txDmaReadEvent
.schedule(start
);
1633 NSGigE::txDmaReadDone()
1635 assert(txDmaState
== dmaReading
);
1638 // If the receive state machine has a pending DMA, let it go first
1639 if (rxDmaState
== dmaReadWaiting
|| rxDmaState
== dmaWriteWaiting
)
1646 NSGigE::txDmaWriteCopy()
1648 assert(txDmaState
== dmaWriting
);
1650 physmem
->dma_write(txDmaAddr
, (uint8_t *)txDmaData
, txDmaLen
);
1651 txDmaState
= dmaIdle
;
1653 DPRINTF(EthernetDMA
, "tx dma write paddr=%#x len=%d\n",
1654 txDmaAddr
, txDmaLen
);
1655 DDUMP(EthernetDMA
, txDmaData
, txDmaLen
);
1659 NSGigE::doTxDmaWrite()
1661 assert(txDmaState
== dmaIdle
|| txDmaState
== dmaWriteWaiting
);
1662 txDmaState
= dmaWriting
;
1664 if (dmaInterface
&& !txDmaFree
) {
1665 if (dmaInterface
->busy())
1666 txDmaState
= dmaWriteWaiting
;
1668 dmaInterface
->doDMA(WriteInvalidate
, txDmaAddr
, txDmaLen
, curTick
,
1669 &txDmaWriteEvent
, true);
1673 if (dmaWriteDelay
== 0 && dmaWriteFactor
== 0.0) {
1678 Tick factor
= ((txDmaLen
+ ULL(63)) >> ULL(6)) * dmaWriteFactor
;
1679 Tick start
= curTick
+ dmaWriteDelay
+ factor
;
1680 txDmaWriteEvent
.schedule(start
);
1685 NSGigE::txDmaWriteDone()
1687 assert(txDmaState
== dmaWriting
);
1690 // If the receive state machine has a pending DMA, let it go first
1691 if (rxDmaState
== dmaReadWaiting
|| rxDmaState
== dmaWriteWaiting
)
1700 DPRINTF(EthernetSM
, "transmit kick txState=%s\n",
1701 NsTxStateStrings
[txState
]);
1703 if (txKickTick
> curTick
) {
1704 DPRINTF(EthernetSM
, "transmit kick exiting, can't run till %d\n",
1711 switch(txDmaState
) {
1712 case dmaReadWaiting
:
1716 case dmaWriteWaiting
:
1727 DPRINTF(EthernetSM
, "Transmit disabled. Nothing to do.\n");
1732 txState
= txDescRefr
;
1734 txDmaAddr
= regs
.txdp
& 0x3fffffff;
1735 txDmaData
= &txDescCache
+ offsetof(ns_desc
, link
);
1736 txDmaLen
= sizeof(txDescCache
.link
);
1737 txDmaFree
= dmaDescFree
;
1740 descDmaRdBytes
+= txDmaLen
;
1746 txState
= txDescRead
;
1748 txDmaAddr
= regs
.txdp
& 0x3fffffff;
1749 txDmaData
= &txDescCache
;
1750 txDmaLen
= sizeof(ns_desc
);
1751 txDmaFree
= dmaDescFree
;
1754 descDmaRdBytes
+= txDmaLen
;
1762 if (txDmaState
!= dmaIdle
)
1765 txState
= txAdvance
;
1769 if (txDmaState
!= dmaIdle
)
1772 DPRINTF(EthernetDesc
,
1773 "txDescCache: link=%08x bufptr=%08x cmdsts=%08x extsts=%08x\n",
1774 txDescCache
.link
, txDescCache
.bufptr
, txDescCache
.cmdsts
,
1775 txDescCache
.extsts
);
1777 if (txDescCache
.cmdsts
& CMDSTS_OWN
) {
1778 txState
= txFifoBlock
;
1779 txFragPtr
= txDescCache
.bufptr
;
1780 txDescCnt
= txDescCache
.cmdsts
& CMDSTS_LEN_MASK
;
1782 devIntrPost(ISR_TXIDLE
);
1790 DPRINTF(EthernetSM
, "****starting the tx of a new packet****\n");
1791 txPacket
= new PacketData
;
1792 txPacket
->data
= new uint8_t[16384];
1793 txPacketBufPtr
= txPacket
->data
;
1796 if (txDescCnt
== 0) {
1797 DPRINTF(EthernetSM
, "the txDescCnt == 0, done with descriptor\n");
1798 if (txDescCache
.cmdsts
& CMDSTS_MORE
) {
1799 DPRINTF(EthernetSM
, "there are more descriptors to come\n");
1800 txState
= txDescWrite
;
1802 txDescCache
.cmdsts
&= ~CMDSTS_OWN
;
1804 txDmaAddr
= regs
.txdp
+ offsetof(ns_desc
, cmdsts
);
1805 txDmaAddr
&= 0x3fffffff;
1806 txDmaData
= &(txDescCache
.cmdsts
);
1807 txDmaLen
= sizeof(txDescCache
.cmdsts
);
1808 txDmaFree
= dmaDescFree
;
1813 } else { /* this packet is totally done */
1814 DPRINTF(EthernetSM
, "This packet is done, let's wrap it up\n");
1815 /* deal with the the packet that just finished */
1816 if ((regs
.vtcr
& VTCR_PPCHK
) && extstsEnable
) {
1817 IpHdr
*ip
= txPacket
->ip();
1818 if (txDescCache
.extsts
& EXTSTS_UDPPKT
) {
1819 UdpHdr
*udp
= txPacket
->udp();
1821 udp
->sum(ip
->tu_cksum());
1823 } else if (txDescCache
.extsts
& EXTSTS_TCPPKT
) {
1824 TcpHdr
*tcp
= txPacket
->tcp();
1826 tcp
->sum(ip
->tu_cksum());
1829 if (txDescCache
.extsts
& EXTSTS_IPPKT
) {
1831 ip
->sum(ip
->ip_cksum());
1836 txPacket
->length
= txPacketBufPtr
- txPacket
->data
;
1837 // this is just because the receive can't handle a
1838 // packet bigger want to make sure
1839 assert(txPacket
->length
<= 1514);
1840 txFifo
.push_back(txPacket
);
1843 * this following section is not tqo spec, but
1844 * functionally shouldn't be any different. normally,
1845 * the chip will wait til the transmit has occurred
1846 * before writing back the descriptor because it has
1847 * to wait to see that it was successfully transmitted
1848 * to decide whether to set CMDSTS_OK or not.
1849 * however, in the simulator since it is always
1850 * successfully transmitted, and writing it exactly to
1851 * spec would complicate the code, we just do it here
1854 txDescCache
.cmdsts
&= ~CMDSTS_OWN
;
1855 txDescCache
.cmdsts
|= CMDSTS_OK
;
1857 DPRINTF(EthernetDesc
,
1858 "txDesc writeback: cmdsts=%08x extsts=%08x\n",
1859 txDescCache
.cmdsts
, txDescCache
.extsts
);
1861 txDmaAddr
= regs
.txdp
+ offsetof(ns_desc
, cmdsts
);
1862 txDmaAddr
&= 0x3fffffff;
1863 txDmaData
= &(txDescCache
.cmdsts
);
1864 txDmaLen
= sizeof(txDescCache
.cmdsts
) +
1865 sizeof(txDescCache
.extsts
);
1866 txDmaFree
= dmaDescFree
;
1869 descDmaWrBytes
+= txDmaLen
;
1875 DPRINTF(EthernetSM
, "halting TX state machine\n");
1879 txState
= txAdvance
;
1885 DPRINTF(EthernetSM
, "this descriptor isn't done yet\n");
1887 txState
= txFragRead
;
1890 * The number of bytes transferred is either whatever
1891 * is left in the descriptor (txDescCnt), or if there
1892 * is not enough room in the fifo, just whatever room
1893 * is left in the fifo
1895 txXferLen
= min
<uint32_t>(txDescCnt
, txFifoAvail
);
1897 txDmaAddr
= txFragPtr
& 0x3fffffff;
1898 txDmaData
= txPacketBufPtr
;
1899 txDmaLen
= txXferLen
;
1900 txDmaFree
= dmaDataFree
;
1905 txState
= txFifoBlock
;
1915 if (txDmaState
!= dmaIdle
)
1918 txPacketBufPtr
+= txXferLen
;
1919 txFragPtr
+= txXferLen
;
1920 txDescCnt
-= txXferLen
;
1921 txFifoAvail
-= txXferLen
;
1923 txState
= txFifoBlock
;
1927 if (txDmaState
!= dmaIdle
)
1930 if (txDescCache
.cmdsts
& CMDSTS_INTR
)
1931 devIntrPost(ISR_TXDESC
);
1933 txState
= txAdvance
;
1937 if (txDescCache
.link
== 0) {
1938 devIntrPost(ISR_TXIDLE
);
1942 txState
= txDescRead
;
1943 regs
.txdp
= txDescCache
.link
;
1946 txDmaAddr
= txDescCache
.link
& 0x3fffffff;
1947 txDmaData
= &txDescCache
;
1948 txDmaLen
= sizeof(ns_desc
);
1949 txDmaFree
= dmaDescFree
;
1957 panic("invalid state");
1960 DPRINTF(EthernetSM
, "entering next txState=%s\n",
1961 NsTxStateStrings
[txState
]);
1967 * @todo do we want to schedule a future kick?
1969 DPRINTF(EthernetSM
, "tx state machine exited txState=%s\n",
1970 NsTxStateStrings
[txState
]);
1974 NSGigE::transferDone()
1976 if (txFifo
.empty()) {
1977 DPRINTF(Ethernet
, "transfer complete: txFifo empty...nothing to do\n");
1981 DPRINTF(Ethernet
, "transfer complete: data in txFifo...schedule xmit\n");
1983 if (txEvent
.scheduled())
1984 txEvent
.reschedule(curTick
+ 1);
1986 txEvent
.schedule(curTick
+ 1);
1990 NSGigE::rxFilter(PacketPtr packet
)
1995 EthHdr
*eth
= packet
->eth();
1996 if (eth
->unicast()) {
1997 // If we're accepting all unicast addresses
2001 // If we make a perfect match
2002 if (acceptPerfect
&&
2003 memcmp(rom
.perfectMatch
, packet
->data
, EADDR_LEN
) == 0)
2006 if (acceptArp
&& eth
->type() == ETH_TYPE_ARP
)
2009 } else if (eth
->broadcast()) {
2010 // if we're accepting broadcasts
2011 if (acceptBroadcast
)
2014 } else if (eth
->multicast()) {
2015 // if we're accepting all multicasts
2016 if (acceptMulticast
)
2022 DPRINTF(Ethernet
, "rxFilter drop\n");
2023 DDUMP(EthernetData
, packet
->data
, packet
->length
);
2030 NSGigE::recvPacket(PacketPtr packet
)
2032 rxBytes
+= packet
->length
;
2035 DPRINTF(Ethernet
, "Receiving packet from wire, rxFifoAvail=%d\n",
2036 maxRxFifoSize
- rxFifoCnt
);
2039 DPRINTF(Ethernet
, "receive disabled...packet dropped\n");
2041 interface
->recvDone();
2045 if (rxFilterEnable
&& rxFilter(packet
)) {
2046 DPRINTF(Ethernet
, "packet filtered...dropped\n");
2047 interface
->recvDone();
2051 if ((rxFifoCnt
+ packet
->length
) >= maxRxFifoSize
) {
2053 "packet will not fit in receive buffer...packet dropped\n");
2054 devIntrPost(ISR_RXORN
);
2058 rxFifo
.push_back(packet
);
2059 rxFifoCnt
+= packet
->length
;
2060 interface
->recvDone();
2066 //=====================================================================
2070 NSGigE::serialize(ostream
&os
)
2072 // Serialize the PciDev base class
2073 PciDev::serialize(os
);
2076 * Finalize any DMA events now.
2078 if (rxDmaReadEvent
.scheduled())
2080 if (rxDmaWriteEvent
.scheduled())
2082 if (txDmaReadEvent
.scheduled())
2084 if (txDmaWriteEvent
.scheduled())
2088 * Serialize the device registers
2090 SERIALIZE_SCALAR(regs
.command
);
2091 SERIALIZE_SCALAR(regs
.config
);
2092 SERIALIZE_SCALAR(regs
.mear
);
2093 SERIALIZE_SCALAR(regs
.ptscr
);
2094 SERIALIZE_SCALAR(regs
.isr
);
2095 SERIALIZE_SCALAR(regs
.imr
);
2096 SERIALIZE_SCALAR(regs
.ier
);
2097 SERIALIZE_SCALAR(regs
.ihr
);
2098 SERIALIZE_SCALAR(regs
.txdp
);
2099 SERIALIZE_SCALAR(regs
.txdp_hi
);
2100 SERIALIZE_SCALAR(regs
.txcfg
);
2101 SERIALIZE_SCALAR(regs
.gpior
);
2102 SERIALIZE_SCALAR(regs
.rxdp
);
2103 SERIALIZE_SCALAR(regs
.rxdp_hi
);
2104 SERIALIZE_SCALAR(regs
.rxcfg
);
2105 SERIALIZE_SCALAR(regs
.pqcr
);
2106 SERIALIZE_SCALAR(regs
.wcsr
);
2107 SERIALIZE_SCALAR(regs
.pcr
);
2108 SERIALIZE_SCALAR(regs
.rfcr
);
2109 SERIALIZE_SCALAR(regs
.rfdr
);
2110 SERIALIZE_SCALAR(regs
.srr
);
2111 SERIALIZE_SCALAR(regs
.mibc
);
2112 SERIALIZE_SCALAR(regs
.vrcr
);
2113 SERIALIZE_SCALAR(regs
.vtcr
);
2114 SERIALIZE_SCALAR(regs
.vdr
);
2115 SERIALIZE_SCALAR(regs
.ccsr
);
2116 SERIALIZE_SCALAR(regs
.tbicr
);
2117 SERIALIZE_SCALAR(regs
.tbisr
);
2118 SERIALIZE_SCALAR(regs
.tanar
);
2119 SERIALIZE_SCALAR(regs
.tanlpar
);
2120 SERIALIZE_SCALAR(regs
.taner
);
2121 SERIALIZE_SCALAR(regs
.tesr
);
2123 SERIALIZE_ARRAY(rom
.perfectMatch
, EADDR_LEN
);
2125 SERIALIZE_SCALAR(ioEnable
);
2128 * Serialize the data Fifos
2130 int txNumPkts
= txFifo
.size();
2131 SERIALIZE_SCALAR(txNumPkts
);
2133 pktiter_t end
= txFifo
.end();
2134 for (pktiter_t p
= txFifo
.begin(); p
!= end
; ++p
) {
2135 nameOut(os
, csprintf("%s.txFifo%d", name(), i
++));
2136 (*p
)->serialize(os
);
2139 int rxNumPkts
= rxFifo
.size();
2140 SERIALIZE_SCALAR(rxNumPkts
);
2143 for (pktiter_t p
= rxFifo
.begin(); p
!= end
; ++p
) {
2144 nameOut(os
, csprintf("%s.rxFifo%d", name(), i
++));
2145 (*p
)->serialize(os
);
2149 * Serialize the various helper variables
2151 bool txPacketExists
= txPacket
;
2152 SERIALIZE_SCALAR(txPacketExists
);
2153 if (txPacketExists
) {
2154 nameOut(os
, csprintf("%s.txPacket", name()));
2155 txPacket
->serialize(os
);
2156 uint32_t txPktBufPtr
= (uint32_t) (txPacketBufPtr
- txPacket
->data
);
2157 SERIALIZE_SCALAR(txPktBufPtr
);
2160 bool rxPacketExists
= rxPacket
;
2161 SERIALIZE_SCALAR(rxPacketExists
);
2162 if (rxPacketExists
) {
2163 nameOut(os
, csprintf("%s.rxPacket", name()));
2164 rxPacket
->serialize(os
);
2165 uint32_t rxPktBufPtr
= (uint32_t) (rxPacketBufPtr
- rxPacket
->data
);
2166 SERIALIZE_SCALAR(rxPktBufPtr
);
2169 SERIALIZE_SCALAR(txXferLen
);
2170 SERIALIZE_SCALAR(rxXferLen
);
2173 * Serialize DescCaches
2175 SERIALIZE_SCALAR(txDescCache
.link
);
2176 SERIALIZE_SCALAR(txDescCache
.bufptr
);
2177 SERIALIZE_SCALAR(txDescCache
.cmdsts
);
2178 SERIALIZE_SCALAR(txDescCache
.extsts
);
2179 SERIALIZE_SCALAR(rxDescCache
.link
);
2180 SERIALIZE_SCALAR(rxDescCache
.bufptr
);
2181 SERIALIZE_SCALAR(rxDescCache
.cmdsts
);
2182 SERIALIZE_SCALAR(rxDescCache
.extsts
);
2185 * Serialize tx state machine
2187 int txState
= this->txState
;
2188 SERIALIZE_SCALAR(txState
);
2189 SERIALIZE_SCALAR(txEnable
);
2190 SERIALIZE_SCALAR(CTDD
);
2191 SERIALIZE_SCALAR(txFifoAvail
);
2192 SERIALIZE_SCALAR(txFragPtr
);
2193 SERIALIZE_SCALAR(txDescCnt
);
2194 int txDmaState
= this->txDmaState
;
2195 SERIALIZE_SCALAR(txDmaState
);
2198 * Serialize rx state machine
2200 int rxState
= this->rxState
;
2201 SERIALIZE_SCALAR(rxState
);
2202 SERIALIZE_SCALAR(rxEnable
);
2203 SERIALIZE_SCALAR(CRDD
);
2204 SERIALIZE_SCALAR(rxPktBytes
);
2205 SERIALIZE_SCALAR(rxFifoCnt
);
2206 SERIALIZE_SCALAR(rxDescCnt
);
2207 int rxDmaState
= this->rxDmaState
;
2208 SERIALIZE_SCALAR(rxDmaState
);
2210 SERIALIZE_SCALAR(extstsEnable
);
2213 * If there's a pending transmit, store the time so we can
2214 * reschedule it later
2216 Tick transmitTick
= txEvent
.scheduled() ? txEvent
.when() - curTick
: 0;
2217 SERIALIZE_SCALAR(transmitTick
);
2220 * receive address filter settings
2222 SERIALIZE_SCALAR(rxFilterEnable
);
2223 SERIALIZE_SCALAR(acceptBroadcast
);
2224 SERIALIZE_SCALAR(acceptMulticast
);
2225 SERIALIZE_SCALAR(acceptUnicast
);
2226 SERIALIZE_SCALAR(acceptPerfect
);
2227 SERIALIZE_SCALAR(acceptArp
);
2230 * Keep track of pending interrupt status.
2232 SERIALIZE_SCALAR(intrTick
);
2233 SERIALIZE_SCALAR(cpuPendingIntr
);
2234 Tick intrEventTick
= 0;
2236 intrEventTick
= intrEvent
->when();
2237 SERIALIZE_SCALAR(intrEventTick
);
2242 NSGigE::unserialize(Checkpoint
*cp
, const std::string
§ion
)
2244 // Unserialize the PciDev base class
2245 PciDev::unserialize(cp
, section
);
2247 UNSERIALIZE_SCALAR(regs
.command
);
2248 UNSERIALIZE_SCALAR(regs
.config
);
2249 UNSERIALIZE_SCALAR(regs
.mear
);
2250 UNSERIALIZE_SCALAR(regs
.ptscr
);
2251 UNSERIALIZE_SCALAR(regs
.isr
);
2252 UNSERIALIZE_SCALAR(regs
.imr
);
2253 UNSERIALIZE_SCALAR(regs
.ier
);
2254 UNSERIALIZE_SCALAR(regs
.ihr
);
2255 UNSERIALIZE_SCALAR(regs
.txdp
);
2256 UNSERIALIZE_SCALAR(regs
.txdp_hi
);
2257 UNSERIALIZE_SCALAR(regs
.txcfg
);
2258 UNSERIALIZE_SCALAR(regs
.gpior
);
2259 UNSERIALIZE_SCALAR(regs
.rxdp
);
2260 UNSERIALIZE_SCALAR(regs
.rxdp_hi
);
2261 UNSERIALIZE_SCALAR(regs
.rxcfg
);
2262 UNSERIALIZE_SCALAR(regs
.pqcr
);
2263 UNSERIALIZE_SCALAR(regs
.wcsr
);
2264 UNSERIALIZE_SCALAR(regs
.pcr
);
2265 UNSERIALIZE_SCALAR(regs
.rfcr
);
2266 UNSERIALIZE_SCALAR(regs
.rfdr
);
2267 UNSERIALIZE_SCALAR(regs
.srr
);
2268 UNSERIALIZE_SCALAR(regs
.mibc
);
2269 UNSERIALIZE_SCALAR(regs
.vrcr
);
2270 UNSERIALIZE_SCALAR(regs
.vtcr
);
2271 UNSERIALIZE_SCALAR(regs
.vdr
);
2272 UNSERIALIZE_SCALAR(regs
.ccsr
);
2273 UNSERIALIZE_SCALAR(regs
.tbicr
);
2274 UNSERIALIZE_SCALAR(regs
.tbisr
);
2275 UNSERIALIZE_SCALAR(regs
.tanar
);
2276 UNSERIALIZE_SCALAR(regs
.tanlpar
);
2277 UNSERIALIZE_SCALAR(regs
.taner
);
2278 UNSERIALIZE_SCALAR(regs
.tesr
);
2280 UNSERIALIZE_ARRAY(rom
.perfectMatch
, EADDR_LEN
);
2282 UNSERIALIZE_SCALAR(ioEnable
);
2285 * unserialize the data fifos
2288 UNSERIALIZE_SCALAR(txNumPkts
);
2290 for (i
= 0; i
< txNumPkts
; ++i
) {
2291 PacketPtr p
= new PacketData
;
2292 p
->unserialize(cp
, csprintf("%s.rxFifo%d", section
, i
));
2293 txFifo
.push_back(p
);
2297 UNSERIALIZE_SCALAR(rxNumPkts
);
2298 for (i
= 0; i
< rxNumPkts
; ++i
) {
2299 PacketPtr p
= new PacketData
;
2300 p
->unserialize(cp
, csprintf("%s.rxFifo%d", section
, i
));
2301 rxFifo
.push_back(p
);
2305 * unserialize the various helper variables
2307 bool txPacketExists
;
2308 UNSERIALIZE_SCALAR(txPacketExists
);
2309 if (txPacketExists
) {
2310 txPacket
= new PacketData
;
2311 txPacket
->unserialize(cp
, csprintf("%s.txPacket", section
));
2312 uint32_t txPktBufPtr
;
2313 UNSERIALIZE_SCALAR(txPktBufPtr
);
2314 txPacketBufPtr
= (uint8_t *) txPacket
->data
+ txPktBufPtr
;
2318 bool rxPacketExists
;
2319 UNSERIALIZE_SCALAR(rxPacketExists
);
2321 if (rxPacketExists
) {
2322 rxPacket
= new PacketData
;
2323 rxPacket
->unserialize(cp
, csprintf("%s.rxPacket", section
));
2324 uint32_t rxPktBufPtr
;
2325 UNSERIALIZE_SCALAR(rxPktBufPtr
);
2326 rxPacketBufPtr
= (uint8_t *) rxPacket
->data
+ rxPktBufPtr
;
2330 UNSERIALIZE_SCALAR(txXferLen
);
2331 UNSERIALIZE_SCALAR(rxXferLen
);
2334 * Unserialize DescCaches
2336 UNSERIALIZE_SCALAR(txDescCache
.link
);
2337 UNSERIALIZE_SCALAR(txDescCache
.bufptr
);
2338 UNSERIALIZE_SCALAR(txDescCache
.cmdsts
);
2339 UNSERIALIZE_SCALAR(txDescCache
.extsts
);
2340 UNSERIALIZE_SCALAR(rxDescCache
.link
);
2341 UNSERIALIZE_SCALAR(rxDescCache
.bufptr
);
2342 UNSERIALIZE_SCALAR(rxDescCache
.cmdsts
);
2343 UNSERIALIZE_SCALAR(rxDescCache
.extsts
);
2346 * unserialize tx state machine
2349 UNSERIALIZE_SCALAR(txState
);
2350 this->txState
= (TxState
) txState
;
2351 UNSERIALIZE_SCALAR(txEnable
);
2352 UNSERIALIZE_SCALAR(CTDD
);
2353 UNSERIALIZE_SCALAR(txFifoAvail
);
2354 UNSERIALIZE_SCALAR(txFragPtr
);
2355 UNSERIALIZE_SCALAR(txDescCnt
);
2357 UNSERIALIZE_SCALAR(txDmaState
);
2358 this->txDmaState
= (DmaState
) txDmaState
;
2361 * unserialize rx state machine
2364 UNSERIALIZE_SCALAR(rxState
);
2365 this->rxState
= (RxState
) rxState
;
2366 UNSERIALIZE_SCALAR(rxEnable
);
2367 UNSERIALIZE_SCALAR(CRDD
);
2368 UNSERIALIZE_SCALAR(rxPktBytes
);
2369 UNSERIALIZE_SCALAR(rxFifoCnt
);
2370 UNSERIALIZE_SCALAR(rxDescCnt
);
2372 UNSERIALIZE_SCALAR(rxDmaState
);
2373 this->rxDmaState
= (DmaState
) rxDmaState
;
2375 UNSERIALIZE_SCALAR(extstsEnable
);
2378 * If there's a pending transmit, reschedule it now
2381 UNSERIALIZE_SCALAR(transmitTick
);
2383 txEvent
.schedule(curTick
+ transmitTick
);
2386 * unserialize receive address filter settings
2388 UNSERIALIZE_SCALAR(rxFilterEnable
);
2389 UNSERIALIZE_SCALAR(acceptBroadcast
);
2390 UNSERIALIZE_SCALAR(acceptMulticast
);
2391 UNSERIALIZE_SCALAR(acceptUnicast
);
2392 UNSERIALIZE_SCALAR(acceptPerfect
);
2393 UNSERIALIZE_SCALAR(acceptArp
);
2396 * Keep track of pending interrupt status.
2398 UNSERIALIZE_SCALAR(intrTick
);
2399 UNSERIALIZE_SCALAR(cpuPendingIntr
);
2401 UNSERIALIZE_SCALAR(intrEventTick
);
2402 if (intrEventTick
) {
2403 intrEvent
= new IntrEvent(this, true);
2404 intrEvent
->schedule(intrEventTick
);
2408 * re-add addrRanges to bus bridges
2411 pioInterface
->addAddrRange(RangeSize(BARAddrs
[0], BARSize
[0]));
2412 pioInterface
->addAddrRange(RangeSize(BARAddrs
[1], BARSize
[1]));
2417 NSGigE::cacheAccess(MemReqPtr
&req
)
2419 DPRINTF(EthernetPIO
, "timing access to paddr=%#x (daddr=%#x)\n",
2420 req
->paddr
, req
->paddr
- addr
);
2421 return curTick
+ pioLatency
;
2424 BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt
)
2426 SimObjectParam
<EtherInt
*> peer
;
2427 SimObjectParam
<NSGigE
*> device
;
2429 END_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt
)
2431 BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigEInt
)
2433 INIT_PARAM_DFLT(peer
, "peer interface", NULL
),
2434 INIT_PARAM(device
, "Ethernet device of this interface")
2436 END_INIT_SIM_OBJECT_PARAMS(NSGigEInt
)
2438 CREATE_SIM_OBJECT(NSGigEInt
)
2440 NSGigEInt
*dev_int
= new NSGigEInt(getInstanceName(), device
);
2442 EtherInt
*p
= (EtherInt
*)peer
;
2444 dev_int
->setPeer(p
);
2445 p
->setPeer(dev_int
);
2451 REGISTER_SIM_OBJECT("NSGigEInt", NSGigEInt
)
2454 BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigE
)
2456 Param
<Tick
> tx_delay
;
2457 Param
<Tick
> rx_delay
;
2458 SimObjectParam
<IntrControl
*> intr_ctrl
;
2459 Param
<Tick
> intr_delay
;
2460 SimObjectParam
<MemoryController
*> mmu
;
2461 SimObjectParam
<PhysicalMemory
*> physmem
;
2462 Param
<bool> rx_filter
;
2463 Param
<string
> hardware_address
;
2464 SimObjectParam
<Bus
*> header_bus
;
2465 SimObjectParam
<Bus
*> payload_bus
;
2466 SimObjectParam
<HierParams
*> hier
;
2467 Param
<Tick
> pio_latency
;
2468 Param
<bool> dma_desc_free
;
2469 Param
<bool> dma_data_free
;
2470 Param
<Tick
> dma_read_delay
;
2471 Param
<Tick
> dma_write_delay
;
2472 Param
<Tick
> dma_read_factor
;
2473 Param
<Tick
> dma_write_factor
;
2474 SimObjectParam
<PciConfigAll
*> configspace
;
2475 SimObjectParam
<PciConfigData
*> configdata
;
2476 SimObjectParam
<Tsunami
*> tsunami
;
2477 Param
<uint32_t> pci_bus
;
2478 Param
<uint32_t> pci_dev
;
2479 Param
<uint32_t> pci_func
;
2480 Param
<uint32_t> tx_fifo_size
;
2481 Param
<uint32_t> rx_fifo_size
;
2483 END_DECLARE_SIM_OBJECT_PARAMS(NSGigE
)
2485 BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigE
)
2487 INIT_PARAM_DFLT(tx_delay
, "Transmit Delay", 1000),
2488 INIT_PARAM_DFLT(rx_delay
, "Receive Delay", 1000),
2489 INIT_PARAM(intr_ctrl
, "Interrupt Controller"),
2490 INIT_PARAM_DFLT(intr_delay
, "Interrupt Delay in microseconds", 0),
2491 INIT_PARAM(mmu
, "Memory Controller"),
2492 INIT_PARAM(physmem
, "Physical Memory"),
2493 INIT_PARAM_DFLT(rx_filter
, "Enable Receive Filter", true),
2494 INIT_PARAM_DFLT(hardware_address
, "Ethernet Hardware Address",
2495 "00:99:00:00:00:01"),
2496 INIT_PARAM_DFLT(header_bus
, "The IO Bus to attach to for headers", NULL
),
2497 INIT_PARAM_DFLT(payload_bus
, "The IO Bus to attach to for payload", NULL
),
2498 INIT_PARAM_DFLT(hier
, "Hierarchy global variables", &defaultHierParams
),
2499 INIT_PARAM_DFLT(pio_latency
, "Programmed IO latency in bus cycles", 1),
2500 INIT_PARAM_DFLT(dma_desc_free
, "DMA of Descriptors is free", false),
2501 INIT_PARAM_DFLT(dma_data_free
, "DMA of Data is free", false),
2502 INIT_PARAM_DFLT(dma_read_delay
, "fixed delay for dma reads", 0),
2503 INIT_PARAM_DFLT(dma_write_delay
, "fixed delay for dma writes", 0),
2504 INIT_PARAM_DFLT(dma_read_factor
, "multiplier for dma reads", 0),
2505 INIT_PARAM_DFLT(dma_write_factor
, "multiplier for dma writes", 0),
2506 INIT_PARAM(configspace
, "PCI Configspace"),
2507 INIT_PARAM(configdata
, "PCI Config data"),
2508 INIT_PARAM(tsunami
, "Tsunami"),
2509 INIT_PARAM(pci_bus
, "PCI bus"),
2510 INIT_PARAM(pci_dev
, "PCI device number"),
2511 INIT_PARAM(pci_func
, "PCI function code"),
2512 INIT_PARAM_DFLT(tx_fifo_size
, "max size in bytes of txFifo", 131072),
2513 INIT_PARAM_DFLT(rx_fifo_size
, "max size in bytes of rxFifo", 131072)
2515 END_INIT_SIM_OBJECT_PARAMS(NSGigE
)
2518 CREATE_SIM_OBJECT(NSGigE
)
2521 sscanf(((string
)hardware_address
).c_str(), "%x:%x:%x:%x:%x:%x",
2522 &eaddr
[0], &eaddr
[1], &eaddr
[2], &eaddr
[3], &eaddr
[4], &eaddr
[5]);
2524 return new NSGigE(getInstanceName(), intr_ctrl
, intr_delay
,
2525 physmem
, tx_delay
, rx_delay
, mmu
, hier
, header_bus
,
2526 payload_bus
, pio_latency
, dma_desc_free
, dma_data_free
,
2527 dma_read_delay
, dma_write_delay
, dma_read_factor
,
2528 dma_write_factor
, configspace
, configdata
,
2529 tsunami
, pci_bus
, pci_dev
, pci_func
, rx_filter
, eaddr
,
2530 tx_fifo_size
, rx_fifo_size
);
2533 REGISTER_SIM_OBJECT("NSGigE", NSGigE
)