2 * Copyright (c) 2004 The Regents of The University of Michigan
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 * Device module for modelling the National Semiconductor
31 * DP83820 ethernet controller. Does not support priority queueing
37 #include "base/inet.hh"
38 #include "cpu/exec_context.hh"
39 #include "cpu/intr_control.hh"
41 #include "dev/etherlink.hh"
42 #include "dev/ns_gige.hh"
43 #include "dev/pciconfigall.hh"
44 #include "dev/tsunami_cchip.hh"
45 #include "mem/bus/bus.hh"
46 #include "mem/bus/dma_interface.hh"
47 #include "mem/bus/pio_interface.hh"
48 #include "mem/bus/pio_interface_impl.hh"
49 #include "mem/functional_mem/memory_control.hh"
50 #include "mem/functional_mem/physical_memory.hh"
51 #include "sim/builder.hh"
52 #include "sim/debug.hh"
53 #include "sim/host.hh"
54 #include "sim/sim_stats.hh"
55 #include "targetarch/vtophys.hh"
57 const char *NsRxStateStrings
[] =
68 const char *NsTxStateStrings
[] =
79 const char *NsDmaState
[] =
91 ///////////////////////////////////////////////////////////////////////
95 NSGigE::NSGigE(const std::string
&name
, IntrControl
*i
, Tick intr_delay
,
96 PhysicalMemory
*pmem
, Tick tx_delay
, Tick rx_delay
,
97 MemoryController
*mmu
, HierParams
*hier
, Bus
*header_bus
,
98 Bus
*payload_bus
, Tick pio_latency
, bool dma_desc_free
,
99 bool dma_data_free
, Tick dma_read_delay
, Tick dma_write_delay
,
100 Tick dma_read_factor
, Tick dma_write_factor
, PciConfigAll
*cf
,
101 PciConfigData
*cd
, Tsunami
*t
, uint32_t bus
, uint32_t dev
,
102 uint32_t func
, bool rx_filter
, const int eaddr
[6],
103 uint32_t tx_fifo_size
, uint32_t rx_fifo_size
)
104 : PciDev(name
, mmu
, cf
, cd
, bus
, dev
, func
), tsunami(t
), ioEnable(false),
105 maxTxFifoSize(tx_fifo_size
), maxRxFifoSize(rx_fifo_size
),
106 txPacket(0), rxPacket(0), txPacketBufPtr(NULL
), rxPacketBufPtr(NULL
),
107 txXferLen(0), rxXferLen(0), txState(txIdle
), txEnable(false),
108 CTDD(false), txFifoAvail(tx_fifo_size
),
109 txFragPtr(0), txDescCnt(0), txDmaState(dmaIdle
), rxState(rxIdle
),
110 rxEnable(false), CRDD(false), rxPktBytes(0), rxFifoCnt(0),
111 rxFragPtr(0), rxDescCnt(0), rxDmaState(dmaIdle
), extstsEnable(false),
112 rxDmaReadEvent(this), rxDmaWriteEvent(this),
113 txDmaReadEvent(this), txDmaWriteEvent(this),
114 dmaDescFree(dma_desc_free
), dmaDataFree(dma_data_free
),
115 txDelay(tx_delay
), rxDelay(rx_delay
), rxKickTick(0), txKickTick(0),
116 txEvent(this), rxFilterEnable(rx_filter
), acceptBroadcast(false),
117 acceptMulticast(false), acceptUnicast(false),
118 acceptPerfect(false), acceptArp(false),
119 physmem(pmem
), intctrl(i
), intrTick(0), cpuPendingIntr(false),
120 intrEvent(0), interface(0)
122 tsunami
->ethernet
= this;
125 pioInterface
= newPioInterface(name
, hier
, header_bus
, this,
126 &NSGigE::cacheAccess
);
128 pioLatency
= pio_latency
* header_bus
->clockRatio
;
131 dmaInterface
= new DMAInterface
<Bus
>(name
+ ".dma",
132 header_bus
, payload_bus
, 1);
134 dmaInterface
= new DMAInterface
<Bus
>(name
+ ".dma",
135 header_bus
, header_bus
, 1);
136 } else if (payload_bus
) {
137 pioInterface
= newPioInterface(name
, hier
, payload_bus
, this,
138 &NSGigE::cacheAccess
);
140 pioLatency
= pio_latency
* payload_bus
->clockRatio
;
142 dmaInterface
= new DMAInterface
<Bus
>(name
+ ".dma", payload_bus
,
147 intrDelay
= US2Ticks(intr_delay
);
148 dmaReadDelay
= dma_read_delay
;
149 dmaWriteDelay
= dma_write_delay
;
150 dmaReadFactor
= dma_read_factor
;
151 dmaWriteFactor
= dma_write_factor
;
154 rom
.perfectMatch
[0] = eaddr
[0];
155 rom
.perfectMatch
[1] = eaddr
[1];
156 rom
.perfectMatch
[2] = eaddr
[2];
157 rom
.perfectMatch
[3] = eaddr
[3];
158 rom
.perfectMatch
[4] = eaddr
[4];
159 rom
.perfectMatch
[5] = eaddr
[5];
169 .name(name() + ".txBytes")
170 .desc("Bytes Transmitted")
175 .name(name() + ".rxBytes")
176 .desc("Bytes Received")
181 .name(name() + ".txPackets")
182 .desc("Number of Packets Transmitted")
187 .name(name() + ".rxPackets")
188 .desc("Number of Packets Received")
193 .name(name() + ".txIpChecksums")
194 .desc("Number of tx IP Checksums done by device")
200 .name(name() + ".rxIpChecksums")
201 .desc("Number of rx IP Checksums done by device")
207 .name(name() + ".txTcpChecksums")
208 .desc("Number of tx TCP Checksums done by device")
214 .name(name() + ".rxTcpChecksums")
215 .desc("Number of rx TCP Checksums done by device")
221 .name(name() + ".txUdpChecksums")
222 .desc("Number of tx UDP Checksums done by device")
228 .name(name() + ".rxUdpChecksums")
229 .desc("Number of rx UDP Checksums done by device")
235 .name(name() + ".descDMAReads")
236 .desc("Number of descriptors the device read w/ DMA")
241 .name(name() + ".descDMAWrites")
242 .desc("Number of descriptors the device wrote w/ DMA")
247 .name(name() + ".descDmaReadBytes")
248 .desc("number of descriptor bytes read w/ DMA")
253 .name(name() + ".descDmaWriteBytes")
254 .desc("number of descriptor bytes write w/ DMA")
260 .name(name() + ".txBandwidth")
261 .desc("Transmit Bandwidth (bits/s)")
267 .name(name() + ".rxBandwidth")
268 .desc("Receive Bandwidth (bits/s)")
274 .name(name() + ".txPPS")
275 .desc("Packet Tranmission Rate (packets/s)")
281 .name(name() + ".rxPPS")
282 .desc("Packet Reception Rate (packets/s)")
287 txBandwidth
= txBytes
* Stats::constant(8) / simSeconds
;
288 rxBandwidth
= rxBytes
* Stats::constant(8) / simSeconds
;
289 txPacketRate
= txPackets
/ simSeconds
;
290 rxPacketRate
= rxPackets
/ simSeconds
;
294 * This is to read the PCI general configuration registers
297 NSGigE::ReadConfig(int offset
, int size
, uint8_t *data
)
299 if (offset
< PCI_DEVICE_SPECIFIC
)
300 PciDev::ReadConfig(offset
, size
, data
);
302 panic("Device specific PCI config space not implemented!\n");
306 * This is to write to the PCI general configuration registers
309 NSGigE::WriteConfig(int offset
, int size
, uint32_t data
)
311 if (offset
< PCI_DEVICE_SPECIFIC
)
312 PciDev::WriteConfig(offset
, size
, data
);
314 panic("Device specific PCI config space not implemented!\n");
316 // Need to catch writes to BARs to update the PIO interface
318 // seems to work fine without all these PCI settings, but i
319 // put in the IO to double check, an assertion will fail if we
320 // need to properly implement it
322 if (config
.data
[offset
] & PCI_CMD_IOSE
)
328 if (config
.data
[offset
] & PCI_CMD_BME
) {
335 if (config
.data
[offset
] & PCI_CMD_MSE
) {
344 case PCI0_BASE_ADDR0
:
345 if (BARAddrs
[0] != 0) {
347 pioInterface
->addAddrRange(BARAddrs
[0],
348 BARAddrs
[0] + BARSize
[0] - 1);
350 BARAddrs
[0] &= PA_UNCACHED_MASK
;
353 case PCI0_BASE_ADDR1
:
354 if (BARAddrs
[1] != 0) {
356 pioInterface
->addAddrRange(BARAddrs
[1],
357 BARAddrs
[1] + BARSize
[1] - 1);
359 BARAddrs
[1] &= PA_UNCACHED_MASK
;
366 * This reads the device registers, which are detailed in the NS83820
370 NSGigE::read(MemReqPtr
&req
, uint8_t *data
)
374 //The mask is to give you only the offset into the device register file
375 Addr daddr
= req
->paddr
& 0xfff;
376 DPRINTF(EthernetPIO
, "read da=%#x pa=%#x va=%#x size=%d\n",
377 daddr
, req
->paddr
, req
->vaddr
, req
->size
);
380 // there are some reserved registers, you can see ns_gige_reg.h and
381 // the spec sheet for details
382 if (daddr
> LAST
&& daddr
<= RESERVED
) {
383 panic("Accessing reserved register");
384 } else if (daddr
> RESERVED
&& daddr
<= 0x3FC) {
385 ReadConfig(daddr
& 0xff, req
->size
, data
);
387 } else if (daddr
>= MIB_START
&& daddr
<= MIB_END
) {
388 // don't implement all the MIB's. hopefully the kernel
389 // doesn't actually DEPEND upon their values
390 // MIB are just hardware stats keepers
391 uint32_t ®
= *(uint32_t *) data
;
394 } else if (daddr
> 0x3FC)
395 panic("Something is messed up!\n");
398 case sizeof(uint32_t):
400 uint32_t ®
= *(uint32_t *)data
;
405 //these are supposed to be cleared on a read
406 reg
&= ~(CR_RXD
| CR_TXD
| CR_TXR
| CR_RXR
);
423 devIntrClear(ISR_ALL
);
478 // see the spec sheet for how RFCR and RFDR work
479 // basically, you write to RFCR to tell the machine
480 // what you want to do next, then you act upon RFDR,
481 // and the device will be prepared b/c of what you
488 switch (regs
.rfcr
& RFCR_RFADDR
) {
490 reg
= rom
.perfectMatch
[1];
492 reg
+= rom
.perfectMatch
[0];
495 reg
= rom
.perfectMatch
[3] << 8;
496 reg
+= rom
.perfectMatch
[2];
499 reg
= rom
.perfectMatch
[5] << 8;
500 reg
+= rom
.perfectMatch
[4];
503 panic("reading RFDR for something other than PMATCH!\n");
504 // didn't implement other RFDR functionality b/c
505 // driver didn't use it
515 reg
&= ~(MIBC_MIBS
| MIBC_ACLR
);
559 panic("reading unimplemented register: addr=%#x", daddr
);
562 DPRINTF(EthernetPIO
, "read from %#x: data=%d data=%#x\n",
568 panic("accessing register with invalid size: addr=%#x, size=%d",
576 NSGigE::write(MemReqPtr
&req
, const uint8_t *data
)
580 Addr daddr
= req
->paddr
& 0xfff;
581 DPRINTF(EthernetPIO
, "write da=%#x pa=%#x va=%#x size=%d\n",
582 daddr
, req
->paddr
, req
->vaddr
, req
->size
);
584 if (daddr
> LAST
&& daddr
<= RESERVED
) {
585 panic("Accessing reserved register");
586 } else if (daddr
> RESERVED
&& daddr
<= 0x3FC) {
587 WriteConfig(daddr
& 0xff, req
->size
, *(uint32_t *)data
);
589 } else if (daddr
> 0x3FC)
590 panic("Something is messed up!\n");
592 if (req
->size
== sizeof(uint32_t)) {
593 uint32_t reg
= *(uint32_t *)data
;
594 DPRINTF(EthernetPIO
, "write data=%d data=%#x\n", reg
, reg
);
601 } else if (reg
& CR_TXE
) {
604 // the kernel is enabling the transmit machine
605 if (txState
== txIdle
)
611 } else if (reg
& CR_RXE
) {
614 if (rxState
== rxIdle
)
625 devIntrPost(ISR_SWI
);
636 if (reg
& CFG_LNKSTS
||
639 reg
& CFG_RESERVED
||
642 panic("writing to read-only or reserved CFG bits!\n");
644 regs
.config
|= reg
& ~(CFG_LNKSTS
| CFG_SPDSTS
| CFG_DUPSTS
|
645 CFG_RESERVED
| CFG_T64ADDR
| CFG_PCI64_DET
);
647 // all these #if 0's are because i don't THINK the kernel needs to
648 // have these implemented. if there is a problem relating to one of
649 // these, you may need to add functionality in.
651 if (reg
& CFG_TBI_EN
) ;
652 if (reg
& CFG_MODE_1000
) ;
655 if (reg
& CFG_AUTO_1000
)
656 panic("CFG_AUTO_1000 not implemented!\n");
659 if (reg
& CFG_PINT_DUPSTS
||
660 reg
& CFG_PINT_LNKSTS
||
661 reg
& CFG_PINT_SPDSTS
)
664 if (reg
& CFG_TMRTEST
) ;
665 if (reg
& CFG_MRM_DIS
) ;
666 if (reg
& CFG_MWI_DIS
) ;
668 if (reg
& CFG_T64ADDR
)
669 panic("CFG_T64ADDR is read only register!\n");
671 if (reg
& CFG_PCI64_DET
)
672 panic("CFG_PCI64_DET is read only register!\n");
674 if (reg
& CFG_DATA64_EN
) ;
675 if (reg
& CFG_M64ADDR
) ;
676 if (reg
& CFG_PHY_RST
) ;
677 if (reg
& CFG_PHY_DIS
) ;
680 if (reg
& CFG_EXTSTS_EN
)
683 extstsEnable
= false;
686 if (reg
& CFG_REQALG
) ;
690 if (reg
& CFG_PESEL
) ;
691 if (reg
& CFG_BROM_DIS
) ;
692 if (reg
& CFG_EXT_125
) ;
699 // since phy is completely faked, MEAR_MD* don't matter
700 // and since the driver never uses MEAR_EE*, they don't
703 if (reg
& MEAR_EEDI
) ;
704 if (reg
& MEAR_EEDO
) ; // this one is read only
705 if (reg
& MEAR_EECLK
) ;
706 if (reg
& MEAR_EESEL
) ;
707 if (reg
& MEAR_MDIO
) ;
708 if (reg
& MEAR_MDDIR
) ;
709 if (reg
& MEAR_MDC
) ;
714 regs
.ptscr
= reg
& ~(PTSCR_RBIST_RDONLY
);
715 // these control BISTs for various parts of chip - we
716 // don't care or do just fake that the BIST is done
717 if (reg
& PTSCR_RBIST_EN
)
718 regs
.ptscr
|= PTSCR_RBIST_DONE
;
719 if (reg
& PTSCR_EEBIST_EN
)
720 regs
.ptscr
&= ~PTSCR_EEBIST_EN
;
721 if (reg
& PTSCR_EELOAD_EN
)
722 regs
.ptscr
&= ~PTSCR_EELOAD_EN
;
725 case ISR
: /* writing to the ISR has no effect */
726 panic("ISR is a read only register!\n");
739 /* not going to implement real interrupt holdoff */
743 regs
.txdp
= (reg
& 0xFFFFFFFC);
744 assert(txState
== txIdle
);
755 if (reg
& TXCFG_CSI
) ;
756 if (reg
& TXCFG_HBI
) ;
757 if (reg
& TXCFG_MLB
) ;
758 if (reg
& TXCFG_ATP
) ;
759 if (reg
& TXCFG_ECRETRY
) {
761 * this could easily be implemented, but considering
762 * the network is just a fake pipe, wouldn't make
767 if (reg
& TXCFG_BRST_DIS
) ;
771 /* we handle our own DMA, ignore the kernel's exhortations */
772 if (reg
& TXCFG_MXDMA
) ;
775 // also, we currently don't care about fill/drain
776 // thresholds though this may change in the future with
777 // more realistic networks or a driver which changes it
778 // according to feedback
784 /* these just control general purpose i/o pins, don't matter */
799 if (reg
& RXCFG_AEP
) ;
800 if (reg
& RXCFG_ARP
) ;
801 if (reg
& RXCFG_STRIPCRC
) ;
802 if (reg
& RXCFG_RX_RD
) ;
803 if (reg
& RXCFG_ALP
) ;
804 if (reg
& RXCFG_AIRL
) ;
806 /* we handle our own DMA, ignore what kernel says about it */
807 if (reg
& RXCFG_MXDMA
) ;
809 //also, we currently don't care about fill/drain thresholds
810 //though this may change in the future with more realistic
811 //networks or a driver which changes it according to feedback
812 if (reg
& (RXCFG_DRTH
| RXCFG_DRTH0
)) ;
817 /* there is no priority queueing used in the linux 2.6 driver */
822 /* not going to implement wake on LAN */
827 /* not going to implement pause control */
834 rxFilterEnable
= (reg
& RFCR_RFEN
) ? true : false;
835 acceptBroadcast
= (reg
& RFCR_AAB
) ? true : false;
836 acceptMulticast
= (reg
& RFCR_AAM
) ? true : false;
837 acceptUnicast
= (reg
& RFCR_AAU
) ? true : false;
838 acceptPerfect
= (reg
& RFCR_APM
) ? true : false;
839 acceptArp
= (reg
& RFCR_AARP
) ? true : false;
843 panic("RFCR_APAT not implemented!\n");
846 if (reg
& RFCR_MHEN
|| reg
& RFCR_UHEN
)
847 panic("hash filtering not implemented!\n");
850 panic("RFCR_ULM not implemented!\n");
855 panic("the driver never writes to RFDR, something is wrong!\n");
858 panic("the driver never uses BRAR, something is wrong!\n");
861 panic("the driver never uses BRDR, something is wrong!\n");
864 panic("SRR is read only register!\n");
867 panic("the driver never uses MIBC, something is wrong!\n");
878 panic("the driver never uses VDR, something is wrong!\n");
882 /* not going to implement clockrun stuff */
888 if (reg
& TBICR_MR_LOOPBACK
)
889 panic("TBICR_MR_LOOPBACK never used, something wrong!\n");
891 if (reg
& TBICR_MR_AN_ENABLE
) {
892 regs
.tanlpar
= regs
.tanar
;
893 regs
.tbisr
|= (TBISR_MR_AN_COMPLETE
| TBISR_MR_LINK_STATUS
);
897 if (reg
& TBICR_MR_RESTART_AN
) ;
903 panic("TBISR is read only register!\n");
908 panic("this isn't used in driver, something wrong!\n");
911 panic("this isn't used in driver, something wrong!\n");
915 panic("this should only be written to by the fake phy!\n");
918 panic("TANER is read only register!\n");
925 panic("invalid register access daddr=%#x", daddr
);
928 panic("Invalid Request Size");
935 NSGigE::devIntrPost(uint32_t interrupts
)
937 if (interrupts
& ISR_RESERVE
)
938 panic("Cannot set a reserved interrupt");
940 if (interrupts
& ISR_NOIMPL
)
941 warn("interrupt not implemented %#x\n", interrupts
);
943 interrupts
&= ~ISR_NOIMPL
;
944 regs
.isr
|= interrupts
;
946 DPRINTF(EthernetIntr
,
947 "interrupt written to ISR: intr=%#x isr=%#x imr=%#x\n",
948 interrupts
, regs
.isr
, regs
.imr
);
950 if ((regs
.isr
& regs
.imr
)) {
952 if (!(regs
.isr
& regs
.imr
& ISR_NODELAY
))
959 NSGigE::devIntrClear(uint32_t interrupts
)
961 if (interrupts
& ISR_RESERVE
)
962 panic("Cannot clear a reserved interrupt");
964 interrupts
&= ~ISR_NOIMPL
;
965 regs
.isr
&= ~interrupts
;
967 DPRINTF(EthernetIntr
,
968 "interrupt cleared from ISR: intr=%x isr=%x imr=%x\n",
969 interrupts
, regs
.isr
, regs
.imr
);
971 if (!(regs
.isr
& regs
.imr
))
976 NSGigE::devIntrChangeMask()
978 DPRINTF(EthernetIntr
, "interrupt mask changed: isr=%x imr=%x masked=%x\n",
979 regs
.isr
, regs
.imr
, regs
.isr
& regs
.imr
);
981 if (regs
.isr
& regs
.imr
)
982 cpuIntrPost(curTick
);
988 NSGigE::cpuIntrPost(Tick when
)
990 // If the interrupt you want to post is later than an interrupt
991 // already scheduled, just let it post in the coming one and don't
993 // HOWEVER, must be sure that the scheduled intrTick is in the
994 // future (this was formerly the source of a bug)
996 * @todo this warning should be removed and the intrTick code should
999 assert(when
>= curTick
);
1000 assert(intrTick
>= curTick
|| intrTick
== 0);
1001 if (when
> intrTick
&& intrTick
!= 0) {
1002 DPRINTF(EthernetIntr
, "don't need to schedule event...intrTick=%d\n",
1008 if (intrTick
< curTick
) {
1013 DPRINTF(EthernetIntr
, "going to schedule an interrupt for intrTick=%d\n",
1017 intrEvent
->squash();
1018 intrEvent
= new IntrEvent(this, true);
1019 intrEvent
->schedule(intrTick
);
1023 NSGigE::cpuInterrupt()
1025 assert(intrTick
== curTick
);
1027 // Whether or not there's a pending interrupt, we don't care about
1032 // Don't send an interrupt if there's already one
1033 if (cpuPendingIntr
) {
1034 DPRINTF(EthernetIntr
,
1035 "would send an interrupt now, but there's already pending\n");
1038 cpuPendingIntr
= true;
1040 DPRINTF(EthernetIntr
, "posting cchip interrupt\n");
1041 tsunami
->cchip
->postDRIR(configData
->config
.hdr
.pci0
.interruptLine
);
1046 NSGigE::cpuIntrClear()
1048 if (!cpuPendingIntr
)
1052 intrEvent
->squash();
1058 cpuPendingIntr
= false;
1060 DPRINTF(EthernetIntr
, "clearing cchip interrupt\n");
1061 tsunami
->cchip
->clearDRIR(configData
->config
.hdr
.pci0
.interruptLine
);
1065 NSGigE::cpuIntrPending() const
1066 { return cpuPendingIntr
; }
1072 DPRINTF(Ethernet
, "transmit reset\n");
1075 txFifoAvail
= maxTxFifoSize
;
1078 assert(txDescCnt
== 0);
1081 assert(txDmaState
== dmaIdle
);
1087 DPRINTF(Ethernet
, "receive reset\n");
1090 assert(rxPktBytes
== 0);
1094 assert(rxDescCnt
== 0);
1095 assert(rxDmaState
== dmaIdle
);
1103 memset(®s
, 0, sizeof(regs
));
1104 regs
.config
= CFG_LNKSTS
;
1105 regs
.mear
= MEAR_MDDIR
| MEAR_EEDO
;
1106 regs
.txcfg
= 0x120; // set drain threshold to 1024 bytes and
1107 // fill threshold to 32 bytes
1108 regs
.rxcfg
= 0x4; // set drain threshold to 16 bytes
1109 regs
.srr
= 0x0103; // set the silicon revision to rev B or 0x103
1110 regs
.mibc
= MIBC_FRZ
;
1111 regs
.vdr
= 0x81; // set the vlan tag type to 802.1q
1112 regs
.tesr
= 0xc000; // TBI capable of both full and half duplex
1114 extstsEnable
= false;
1115 acceptBroadcast
= false;
1116 acceptMulticast
= false;
1117 acceptUnicast
= false;
1118 acceptPerfect
= false;
1123 NSGigE::rxDmaReadCopy()
1125 assert(rxDmaState
== dmaReading
);
1127 physmem
->dma_read((uint8_t *)rxDmaData
, rxDmaAddr
, rxDmaLen
);
1128 rxDmaState
= dmaIdle
;
1130 DPRINTF(EthernetDMA
, "rx dma read paddr=%#x len=%d\n",
1131 rxDmaAddr
, rxDmaLen
);
1132 DDUMP(EthernetDMA
, rxDmaData
, rxDmaLen
);
1136 NSGigE::doRxDmaRead()
1138 assert(rxDmaState
== dmaIdle
|| rxDmaState
== dmaReadWaiting
);
1139 rxDmaState
= dmaReading
;
1141 if (dmaInterface
&& !rxDmaFree
) {
1142 if (dmaInterface
->busy())
1143 rxDmaState
= dmaReadWaiting
;
1145 dmaInterface
->doDMA(Read
, rxDmaAddr
, rxDmaLen
, curTick
,
1146 &rxDmaReadEvent
, true);
1150 if (dmaReadDelay
== 0 && dmaReadFactor
== 0) {
1155 Tick factor
= ((rxDmaLen
+ ULL(63)) >> ULL(6)) * dmaReadFactor
;
1156 Tick start
= curTick
+ dmaReadDelay
+ factor
;
1157 rxDmaReadEvent
.schedule(start
);
1162 NSGigE::rxDmaReadDone()
1164 assert(rxDmaState
== dmaReading
);
1167 // If the transmit state machine has a pending DMA, let it go first
1168 if (txDmaState
== dmaReadWaiting
|| txDmaState
== dmaWriteWaiting
)
1175 NSGigE::rxDmaWriteCopy()
1177 assert(rxDmaState
== dmaWriting
);
1179 physmem
->dma_write(rxDmaAddr
, (uint8_t *)rxDmaData
, rxDmaLen
);
1180 rxDmaState
= dmaIdle
;
1182 DPRINTF(EthernetDMA
, "rx dma write paddr=%#x len=%d\n",
1183 rxDmaAddr
, rxDmaLen
);
1184 DDUMP(EthernetDMA
, rxDmaData
, rxDmaLen
);
1188 NSGigE::doRxDmaWrite()
1190 assert(rxDmaState
== dmaIdle
|| rxDmaState
== dmaWriteWaiting
);
1191 rxDmaState
= dmaWriting
;
1193 if (dmaInterface
&& !rxDmaFree
) {
1194 if (dmaInterface
->busy())
1195 rxDmaState
= dmaWriteWaiting
;
1197 dmaInterface
->doDMA(WriteInvalidate
, rxDmaAddr
, rxDmaLen
, curTick
,
1198 &rxDmaWriteEvent
, true);
1202 if (dmaWriteDelay
== 0 && dmaWriteFactor
== 0) {
1207 Tick factor
= ((rxDmaLen
+ ULL(63)) >> ULL(6)) * dmaWriteFactor
;
1208 Tick start
= curTick
+ dmaWriteDelay
+ factor
;
1209 rxDmaWriteEvent
.schedule(start
);
1214 NSGigE::rxDmaWriteDone()
1216 assert(rxDmaState
== dmaWriting
);
1219 // If the transmit state machine has a pending DMA, let it go first
1220 if (txDmaState
== dmaReadWaiting
|| txDmaState
== dmaWriteWaiting
)
1229 DPRINTF(EthernetSM
, "receive kick rxState=%s (rxBuf.size=%d)\n",
1230 NsRxStateStrings
[rxState
], rxFifo
.size());
1232 if (rxKickTick
> curTick
) {
1233 DPRINTF(EthernetSM
, "receive kick exiting, can't run till %d\n",
1239 switch(rxDmaState
) {
1240 case dmaReadWaiting
:
1244 case dmaWriteWaiting
:
1252 // see state machine from spec for details
1253 // the way this works is, if you finish work on one state and can
1254 // go directly to another, you do that through jumping to the
1255 // label "next". however, if you have intermediate work, like DMA
1256 // so that you can't go to the next state yet, you go to exit and
1257 // exit the loop. however, when the DMA is done it will trigger
1258 // an event and come back to this loop.
1262 DPRINTF(EthernetSM
, "Receive Disabled! Nothing to do.\n");
1267 rxState
= rxDescRefr
;
1269 rxDmaAddr
= regs
.rxdp
& 0x3fffffff;
1270 rxDmaData
= &rxDescCache
+ offsetof(ns_desc
, link
);
1271 rxDmaLen
= sizeof(rxDescCache
.link
);
1272 rxDmaFree
= dmaDescFree
;
1275 descDmaRdBytes
+= rxDmaLen
;
1280 rxState
= rxDescRead
;
1282 rxDmaAddr
= regs
.rxdp
& 0x3fffffff;
1283 rxDmaData
= &rxDescCache
;
1284 rxDmaLen
= sizeof(ns_desc
);
1285 rxDmaFree
= dmaDescFree
;
1288 descDmaRdBytes
+= rxDmaLen
;
1296 if (rxDmaState
!= dmaIdle
)
1299 rxState
= rxAdvance
;
1303 if (rxDmaState
!= dmaIdle
)
1306 DPRINTF(EthernetDesc
,
1307 "rxDescCache: addr=%08x read descriptor\n",
1308 regs
.rxdp
& 0x3fffffff);
1309 DPRINTF(EthernetDesc
,
1310 "rxDescCache: link=%08x bufptr=%08x cmdsts=%08x extsts=%08x\n",
1311 rxDescCache
.link
, rxDescCache
.bufptr
, rxDescCache
.cmdsts
,
1312 rxDescCache
.extsts
);
1314 if (rxDescCache
.cmdsts
& CMDSTS_OWN
) {
1315 devIntrPost(ISR_RXIDLE
);
1319 rxState
= rxFifoBlock
;
1320 rxFragPtr
= rxDescCache
.bufptr
;
1321 rxDescCnt
= rxDescCache
.cmdsts
& CMDSTS_LEN_MASK
;
1328 * @todo in reality, we should be able to start processing
1329 * the packet as it arrives, and not have to wait for the
1330 * full packet ot be in the receive fifo.
1335 DPRINTF(EthernetSM
, "****processing receive of new packet****\n");
1337 // If we don't have a packet, grab a new one from the fifo.
1338 rxPacket
= rxFifo
.front();
1339 rxPktBytes
= rxPacket
->length
;
1340 rxPacketBufPtr
= rxPacket
->data
;
1343 if (DTRACE(Ethernet
)) {
1344 const IpHdr
*ip
= rxPacket
->ip();
1346 DPRINTF(Ethernet
, "ID is %d\n", ip
->id());
1347 const TcpHdr
*tcp
= rxPacket
->tcp();
1349 DPRINTF(Ethernet
, "Src Port=%d, Dest Port=%d\n",
1350 tcp
->sport(), tcp
->dport());
1356 // sanity check - i think the driver behaves like this
1357 assert(rxDescCnt
>= rxPktBytes
);
1359 // Must clear the value before popping to decrement the
1361 rxFifo
.front() = NULL
;
1363 rxFifoCnt
-= rxPacket
->length
;
1367 // dont' need the && rxDescCnt > 0 if driver sanity check
1369 if (rxPktBytes
> 0) {
1370 rxState
= rxFragWrite
;
1371 // don't need min<>(rxPktBytes,rxDescCnt) if above sanity
1373 rxXferLen
= rxPktBytes
;
1375 rxDmaAddr
= rxFragPtr
& 0x3fffffff;
1376 rxDmaData
= rxPacketBufPtr
;
1377 rxDmaLen
= rxXferLen
;
1378 rxDmaFree
= dmaDataFree
;
1384 rxState
= rxDescWrite
;
1386 //if (rxPktBytes == 0) { /* packet is done */
1387 assert(rxPktBytes
== 0);
1388 DPRINTF(EthernetSM
, "done with receiving packet\n");
1390 rxDescCache
.cmdsts
|= CMDSTS_OWN
;
1391 rxDescCache
.cmdsts
&= ~CMDSTS_MORE
;
1392 rxDescCache
.cmdsts
|= CMDSTS_OK
;
1393 rxDescCache
.cmdsts
&= 0xffff0000;
1394 rxDescCache
.cmdsts
+= rxPacket
->length
; //i.e. set CMDSTS_SIZE
1398 * all the driver uses these are for its own stats keeping
1399 * which we don't care about, aren't necessary for
1400 * functionality and doing this would just slow us down.
1401 * if they end up using this in a later version for
1402 * functional purposes, just undef
1404 if (rxFilterEnable
) {
1405 rxDescCache
.cmdsts
&= ~CMDSTS_DEST_MASK
;
1406 EthHdr
*eth
= rxFifoFront()->eth();
1408 rxDescCache
.cmdsts
|= CMDSTS_DEST_SELF
;
1409 if (eth
->multicast())
1410 rxDescCache
.cmdsts
|= CMDSTS_DEST_MULTI
;
1411 if (eth
->broadcast())
1412 rxDescCache
.cmdsts
|= CMDSTS_DEST_MASK
;
1416 if (extstsEnable
&& rxPacket
->ip()) {
1417 rxDescCache
.extsts
|= EXTSTS_IPPKT
;
1419 IpHdr
*ip
= rxPacket
->ip();
1420 if (ip
->ip_cksum() != 0) {
1421 DPRINTF(EthernetCksum
, "Rx IP Checksum Error\n");
1422 rxDescCache
.extsts
|= EXTSTS_IPERR
;
1424 if (rxPacket
->tcp()) {
1425 rxDescCache
.extsts
|= EXTSTS_TCPPKT
;
1427 if (ip
->tu_cksum() != 0) {
1428 DPRINTF(EthernetCksum
, "Rx TCP Checksum Error\n");
1429 rxDescCache
.extsts
|= EXTSTS_TCPERR
;
1432 } else if (rxPacket
->udp()) {
1433 rxDescCache
.extsts
|= EXTSTS_UDPPKT
;
1435 if (ip
->tu_cksum() != 0) {
1436 DPRINTF(EthernetCksum
, "Rx UDP Checksum Error\n");
1437 rxDescCache
.extsts
|= EXTSTS_UDPERR
;
1444 * the driver seems to always receive into desc buffers
1445 * of size 1514, so you never have a pkt that is split
1446 * into multiple descriptors on the receive side, so
1447 * i don't implement that case, hence the assert above.
1450 DPRINTF(EthernetDesc
,
1451 "rxDescCache: addr=%08x writeback cmdsts extsts\n",
1452 regs
.rxdp
& 0x3fffffff);
1453 DPRINTF(EthernetDesc
,
1454 "rxDescCache: link=%08x bufptr=%08x cmdsts=%08x extsts=%08x\n",
1455 rxDescCache
.link
, rxDescCache
.bufptr
, rxDescCache
.cmdsts
,
1456 rxDescCache
.extsts
);
1458 rxDmaAddr
= (regs
.rxdp
+ offsetof(ns_desc
, cmdsts
)) & 0x3fffffff;
1459 rxDmaData
= &(rxDescCache
.cmdsts
);
1460 rxDmaLen
= sizeof(rxDescCache
.cmdsts
) + sizeof(rxDescCache
.extsts
);
1461 rxDmaFree
= dmaDescFree
;
1464 descDmaWrBytes
+= rxDmaLen
;
1472 if (rxDmaState
!= dmaIdle
)
1475 rxPacketBufPtr
+= rxXferLen
;
1476 rxFragPtr
+= rxXferLen
;
1477 rxPktBytes
-= rxXferLen
;
1479 rxState
= rxFifoBlock
;
1483 if (rxDmaState
!= dmaIdle
)
1486 assert(rxDescCache
.cmdsts
& CMDSTS_OWN
);
1488 assert(rxPacket
== 0);
1489 devIntrPost(ISR_RXOK
);
1491 if (rxDescCache
.cmdsts
& CMDSTS_INTR
)
1492 devIntrPost(ISR_RXDESC
);
1495 DPRINTF(EthernetSM
, "Halting the RX state machine\n");
1499 rxState
= rxAdvance
;
1503 if (rxDescCache
.link
== 0) {
1504 devIntrPost(ISR_RXIDLE
);
1509 rxState
= rxDescRead
;
1510 regs
.rxdp
= rxDescCache
.link
;
1513 rxDmaAddr
= regs
.rxdp
& 0x3fffffff;
1514 rxDmaData
= &rxDescCache
;
1515 rxDmaLen
= sizeof(ns_desc
);
1516 rxDmaFree
= dmaDescFree
;
1524 panic("Invalid rxState!");
1527 DPRINTF(EthernetSM
, "entering next rxState=%s\n",
1528 NsRxStateStrings
[rxState
]);
1534 * @todo do we want to schedule a future kick?
1536 DPRINTF(EthernetSM
, "rx state machine exited rxState=%s\n",
1537 NsRxStateStrings
[rxState
]);
1543 if (txFifo
.empty()) {
1544 DPRINTF(Ethernet
, "nothing to transmit\n");
1548 DPRINTF(Ethernet
, "Attempt Pkt Transmit: txFifo length=%d\n",
1549 maxTxFifoSize
- txFifoAvail
);
1550 if (interface
->sendPacket(txFifo
.front())) {
1552 if (DTRACE(Ethernet
)) {
1553 const IpHdr
*ip
= txFifo
.front()->ip();
1555 DPRINTF(Ethernet
, "ID is %d\n", ip
->id());
1556 const TcpHdr
*tcp
= txFifo
.front()->tcp();
1558 DPRINTF(Ethernet
, "Src Port=%d, Dest Port=%d\n",
1559 tcp
->sport(), tcp
->dport());
1565 DDUMP(Ethernet
, txFifo
.front()->data
, txFifo
.front()->length
);
1566 txBytes
+= txFifo
.front()->length
;
1569 txFifoAvail
+= txFifo
.front()->length
;
1571 DPRINTF(Ethernet
, "Successful Xmit! now txFifoAvail is %d\n",
1573 txFifo
.front() = NULL
;
1577 * normally do a writeback of the descriptor here, and ONLY
1578 * after that is done, send this interrupt. but since our
1579 * stuff never actually fails, just do this interrupt here,
1580 * otherwise the code has to stray from this nice format.
1581 * besides, it's functionally the same.
1583 devIntrPost(ISR_TXOK
);
1586 "May need to rethink always sending the descriptors back?\n");
1589 if (!txFifo
.empty() && !txEvent
.scheduled()) {
1590 DPRINTF(Ethernet
, "reschedule transmit\n");
1591 txEvent
.schedule(curTick
+ 1000);
1596 NSGigE::txDmaReadCopy()
1598 assert(txDmaState
== dmaReading
);
1600 physmem
->dma_read((uint8_t *)txDmaData
, txDmaAddr
, txDmaLen
);
1601 txDmaState
= dmaIdle
;
1603 DPRINTF(EthernetDMA
, "tx dma read paddr=%#x len=%d\n",
1604 txDmaAddr
, txDmaLen
);
1605 DDUMP(EthernetDMA
, txDmaData
, txDmaLen
);
1609 NSGigE::doTxDmaRead()
1611 assert(txDmaState
== dmaIdle
|| txDmaState
== dmaReadWaiting
);
1612 txDmaState
= dmaReading
;
1614 if (dmaInterface
&& !txDmaFree
) {
1615 if (dmaInterface
->busy())
1616 txDmaState
= dmaReadWaiting
;
1618 dmaInterface
->doDMA(Read
, txDmaAddr
, txDmaLen
, curTick
,
1619 &txDmaReadEvent
, true);
1623 if (dmaReadDelay
== 0 && dmaReadFactor
== 0.0) {
1628 Tick factor
= ((txDmaLen
+ ULL(63)) >> ULL(6)) * dmaReadFactor
;
1629 Tick start
= curTick
+ dmaReadDelay
+ factor
;
1630 txDmaReadEvent
.schedule(start
);
1635 NSGigE::txDmaReadDone()
1637 assert(txDmaState
== dmaReading
);
1640 // If the receive state machine has a pending DMA, let it go first
1641 if (rxDmaState
== dmaReadWaiting
|| rxDmaState
== dmaWriteWaiting
)
1648 NSGigE::txDmaWriteCopy()
1650 assert(txDmaState
== dmaWriting
);
1652 physmem
->dma_write(txDmaAddr
, (uint8_t *)txDmaData
, txDmaLen
);
1653 txDmaState
= dmaIdle
;
1655 DPRINTF(EthernetDMA
, "tx dma write paddr=%#x len=%d\n",
1656 txDmaAddr
, txDmaLen
);
1657 DDUMP(EthernetDMA
, txDmaData
, txDmaLen
);
1661 NSGigE::doTxDmaWrite()
1663 assert(txDmaState
== dmaIdle
|| txDmaState
== dmaWriteWaiting
);
1664 txDmaState
= dmaWriting
;
1666 if (dmaInterface
&& !txDmaFree
) {
1667 if (dmaInterface
->busy())
1668 txDmaState
= dmaWriteWaiting
;
1670 dmaInterface
->doDMA(WriteInvalidate
, txDmaAddr
, txDmaLen
, curTick
,
1671 &txDmaWriteEvent
, true);
1675 if (dmaWriteDelay
== 0 && dmaWriteFactor
== 0.0) {
1680 Tick factor
= ((txDmaLen
+ ULL(63)) >> ULL(6)) * dmaWriteFactor
;
1681 Tick start
= curTick
+ dmaWriteDelay
+ factor
;
1682 txDmaWriteEvent
.schedule(start
);
1687 NSGigE::txDmaWriteDone()
1689 assert(txDmaState
== dmaWriting
);
1692 // If the receive state machine has a pending DMA, let it go first
1693 if (rxDmaState
== dmaReadWaiting
|| rxDmaState
== dmaWriteWaiting
)
1702 DPRINTF(EthernetSM
, "transmit kick txState=%s\n",
1703 NsTxStateStrings
[txState
]);
1705 if (txKickTick
> curTick
) {
1706 DPRINTF(EthernetSM
, "transmit kick exiting, can't run till %d\n",
1713 switch(txDmaState
) {
1714 case dmaReadWaiting
:
1718 case dmaWriteWaiting
:
1729 DPRINTF(EthernetSM
, "Transmit disabled. Nothing to do.\n");
1734 txState
= txDescRefr
;
1736 txDmaAddr
= regs
.txdp
& 0x3fffffff;
1737 txDmaData
= &txDescCache
+ offsetof(ns_desc
, link
);
1738 txDmaLen
= sizeof(txDescCache
.link
);
1739 txDmaFree
= dmaDescFree
;
1742 descDmaRdBytes
+= txDmaLen
;
1748 txState
= txDescRead
;
1750 txDmaAddr
= regs
.txdp
& 0x3fffffff;
1751 txDmaData
= &txDescCache
;
1752 txDmaLen
= sizeof(ns_desc
);
1753 txDmaFree
= dmaDescFree
;
1756 descDmaRdBytes
+= txDmaLen
;
1764 if (txDmaState
!= dmaIdle
)
1767 txState
= txAdvance
;
1771 if (txDmaState
!= dmaIdle
)
1774 DPRINTF(EthernetDesc
,
1775 "txDescCache: link=%08x bufptr=%08x cmdsts=%08x extsts=%08x\n",
1776 txDescCache
.link
, txDescCache
.bufptr
, txDescCache
.cmdsts
,
1777 txDescCache
.extsts
);
1779 if (txDescCache
.cmdsts
& CMDSTS_OWN
) {
1780 txState
= txFifoBlock
;
1781 txFragPtr
= txDescCache
.bufptr
;
1782 txDescCnt
= txDescCache
.cmdsts
& CMDSTS_LEN_MASK
;
1784 devIntrPost(ISR_TXIDLE
);
1792 DPRINTF(EthernetSM
, "****starting the tx of a new packet****\n");
1793 txPacket
= new PacketData
;
1794 txPacket
->data
= new uint8_t[16384];
1795 txPacketBufPtr
= txPacket
->data
;
1798 if (txDescCnt
== 0) {
1799 DPRINTF(EthernetSM
, "the txDescCnt == 0, done with descriptor\n");
1800 if (txDescCache
.cmdsts
& CMDSTS_MORE
) {
1801 DPRINTF(EthernetSM
, "there are more descriptors to come\n");
1802 txState
= txDescWrite
;
1804 txDescCache
.cmdsts
&= ~CMDSTS_OWN
;
1806 txDmaAddr
= regs
.txdp
+ offsetof(ns_desc
, cmdsts
);
1807 txDmaAddr
&= 0x3fffffff;
1808 txDmaData
= &(txDescCache
.cmdsts
);
1809 txDmaLen
= sizeof(txDescCache
.cmdsts
);
1810 txDmaFree
= dmaDescFree
;
1815 } else { /* this packet is totally done */
1816 DPRINTF(EthernetSM
, "This packet is done, let's wrap it up\n");
1817 /* deal with the the packet that just finished */
1818 if ((regs
.vtcr
& VTCR_PPCHK
) && extstsEnable
) {
1819 IpHdr
*ip
= txPacket
->ip();
1820 if (txDescCache
.extsts
& EXTSTS_UDPPKT
) {
1821 UdpHdr
*udp
= txPacket
->udp();
1823 udp
->sum(ip
->tu_cksum());
1825 } else if (txDescCache
.extsts
& EXTSTS_TCPPKT
) {
1826 TcpHdr
*tcp
= txPacket
->tcp();
1828 tcp
->sum(ip
->tu_cksum());
1831 if (txDescCache
.extsts
& EXTSTS_IPPKT
) {
1833 ip
->sum(ip
->ip_cksum());
1838 txPacket
->length
= txPacketBufPtr
- txPacket
->data
;
1839 // this is just because the receive can't handle a
1840 // packet bigger want to make sure
1841 assert(txPacket
->length
<= 1514);
1842 txFifo
.push_back(txPacket
);
1845 * this following section is not tqo spec, but
1846 * functionally shouldn't be any different. normally,
1847 * the chip will wait til the transmit has occurred
1848 * before writing back the descriptor because it has
1849 * to wait to see that it was successfully transmitted
1850 * to decide whether to set CMDSTS_OK or not.
1851 * however, in the simulator since it is always
1852 * successfully transmitted, and writing it exactly to
1853 * spec would complicate the code, we just do it here
1856 txDescCache
.cmdsts
&= ~CMDSTS_OWN
;
1857 txDescCache
.cmdsts
|= CMDSTS_OK
;
1859 DPRINTF(EthernetDesc
,
1860 "txDesc writeback: cmdsts=%08x extsts=%08x\n",
1861 txDescCache
.cmdsts
, txDescCache
.extsts
);
1863 txDmaAddr
= regs
.txdp
+ offsetof(ns_desc
, cmdsts
);
1864 txDmaAddr
&= 0x3fffffff;
1865 txDmaData
= &(txDescCache
.cmdsts
);
1866 txDmaLen
= sizeof(txDescCache
.cmdsts
) +
1867 sizeof(txDescCache
.extsts
);
1868 txDmaFree
= dmaDescFree
;
1871 descDmaWrBytes
+= txDmaLen
;
1877 DPRINTF(EthernetSM
, "halting TX state machine\n");
1881 txState
= txAdvance
;
1887 DPRINTF(EthernetSM
, "this descriptor isn't done yet\n");
1889 txState
= txFragRead
;
1892 * The number of bytes transferred is either whatever
1893 * is left in the descriptor (txDescCnt), or if there
1894 * is not enough room in the fifo, just whatever room
1895 * is left in the fifo
1897 txXferLen
= min
<uint32_t>(txDescCnt
, txFifoAvail
);
1899 txDmaAddr
= txFragPtr
& 0x3fffffff;
1900 txDmaData
= txPacketBufPtr
;
1901 txDmaLen
= txXferLen
;
1902 txDmaFree
= dmaDataFree
;
1907 txState
= txFifoBlock
;
1917 if (txDmaState
!= dmaIdle
)
1920 txPacketBufPtr
+= txXferLen
;
1921 txFragPtr
+= txXferLen
;
1922 txDescCnt
-= txXferLen
;
1923 txFifoAvail
-= txXferLen
;
1925 txState
= txFifoBlock
;
1929 if (txDmaState
!= dmaIdle
)
1932 if (txDescCache
.cmdsts
& CMDSTS_INTR
)
1933 devIntrPost(ISR_TXDESC
);
1935 txState
= txAdvance
;
1939 if (txDescCache
.link
== 0) {
1940 devIntrPost(ISR_TXIDLE
);
1944 txState
= txDescRead
;
1945 regs
.txdp
= txDescCache
.link
;
1948 txDmaAddr
= txDescCache
.link
& 0x3fffffff;
1949 txDmaData
= &txDescCache
;
1950 txDmaLen
= sizeof(ns_desc
);
1951 txDmaFree
= dmaDescFree
;
1959 panic("invalid state");
1962 DPRINTF(EthernetSM
, "entering next txState=%s\n",
1963 NsTxStateStrings
[txState
]);
1969 * @todo do we want to schedule a future kick?
1971 DPRINTF(EthernetSM
, "tx state machine exited txState=%s\n",
1972 NsTxStateStrings
[txState
]);
1976 NSGigE::transferDone()
1978 if (txFifo
.empty()) {
1979 DPRINTF(Ethernet
, "transfer complete: txFifo empty...nothing to do\n");
1983 DPRINTF(Ethernet
, "transfer complete: data in txFifo...schedule xmit\n");
1985 if (txEvent
.scheduled())
1986 txEvent
.reschedule(curTick
+ 1);
1988 txEvent
.schedule(curTick
+ 1);
1992 NSGigE::rxFilter(PacketPtr packet
)
1997 EthHdr
*eth
= packet
->eth();
1998 if (eth
->unicast()) {
1999 // If we're accepting all unicast addresses
2003 // If we make a perfect match
2004 if (acceptPerfect
&&
2005 memcmp(rom
.perfectMatch
, packet
->data
, EADDR_LEN
) == 0)
2008 if (acceptArp
&& eth
->type() == ETH_TYPE_ARP
)
2011 } else if (eth
->broadcast()) {
2012 // if we're accepting broadcasts
2013 if (acceptBroadcast
)
2016 } else if (eth
->multicast()) {
2017 // if we're accepting all multicasts
2018 if (acceptMulticast
)
2024 DPRINTF(Ethernet
, "rxFilter drop\n");
2025 DDUMP(EthernetData
, packet
->data
, packet
->length
);
2032 NSGigE::recvPacket(PacketPtr packet
)
2034 rxBytes
+= packet
->length
;
2037 DPRINTF(Ethernet
, "Receiving packet from wire, rxFifoAvail=%d\n",
2038 maxRxFifoSize
- rxFifoCnt
);
2041 DPRINTF(Ethernet
, "receive disabled...packet dropped\n");
2043 interface
->recvDone();
2047 if (rxFilterEnable
&& rxFilter(packet
)) {
2048 DPRINTF(Ethernet
, "packet filtered...dropped\n");
2049 interface
->recvDone();
2053 if ((rxFifoCnt
+ packet
->length
) >= maxRxFifoSize
) {
2055 "packet will not fit in receive buffer...packet dropped\n");
2056 devIntrPost(ISR_RXORN
);
2060 rxFifo
.push_back(packet
);
2061 rxFifoCnt
+= packet
->length
;
2062 interface
->recvDone();
2068 //=====================================================================
2072 NSGigE::serialize(ostream
&os
)
2074 // Serialize the PciDev base class
2075 PciDev::serialize(os
);
2078 * Finalize any DMA events now.
2080 if (rxDmaReadEvent
.scheduled())
2082 if (rxDmaWriteEvent
.scheduled())
2084 if (txDmaReadEvent
.scheduled())
2086 if (txDmaWriteEvent
.scheduled())
2090 * Serialize the device registers
2092 SERIALIZE_SCALAR(regs
.command
);
2093 SERIALIZE_SCALAR(regs
.config
);
2094 SERIALIZE_SCALAR(regs
.mear
);
2095 SERIALIZE_SCALAR(regs
.ptscr
);
2096 SERIALIZE_SCALAR(regs
.isr
);
2097 SERIALIZE_SCALAR(regs
.imr
);
2098 SERIALIZE_SCALAR(regs
.ier
);
2099 SERIALIZE_SCALAR(regs
.ihr
);
2100 SERIALIZE_SCALAR(regs
.txdp
);
2101 SERIALIZE_SCALAR(regs
.txdp_hi
);
2102 SERIALIZE_SCALAR(regs
.txcfg
);
2103 SERIALIZE_SCALAR(regs
.gpior
);
2104 SERIALIZE_SCALAR(regs
.rxdp
);
2105 SERIALIZE_SCALAR(regs
.rxdp_hi
);
2106 SERIALIZE_SCALAR(regs
.rxcfg
);
2107 SERIALIZE_SCALAR(regs
.pqcr
);
2108 SERIALIZE_SCALAR(regs
.wcsr
);
2109 SERIALIZE_SCALAR(regs
.pcr
);
2110 SERIALIZE_SCALAR(regs
.rfcr
);
2111 SERIALIZE_SCALAR(regs
.rfdr
);
2112 SERIALIZE_SCALAR(regs
.srr
);
2113 SERIALIZE_SCALAR(regs
.mibc
);
2114 SERIALIZE_SCALAR(regs
.vrcr
);
2115 SERIALIZE_SCALAR(regs
.vtcr
);
2116 SERIALIZE_SCALAR(regs
.vdr
);
2117 SERIALIZE_SCALAR(regs
.ccsr
);
2118 SERIALIZE_SCALAR(regs
.tbicr
);
2119 SERIALIZE_SCALAR(regs
.tbisr
);
2120 SERIALIZE_SCALAR(regs
.tanar
);
2121 SERIALIZE_SCALAR(regs
.tanlpar
);
2122 SERIALIZE_SCALAR(regs
.taner
);
2123 SERIALIZE_SCALAR(regs
.tesr
);
2125 SERIALIZE_ARRAY(rom
.perfectMatch
, EADDR_LEN
);
2127 SERIALIZE_SCALAR(ioEnable
);
2130 * Serialize the data Fifos
2132 int txNumPkts
= txFifo
.size();
2133 SERIALIZE_SCALAR(txNumPkts
);
2135 pktiter_t end
= txFifo
.end();
2136 for (pktiter_t p
= txFifo
.begin(); p
!= end
; ++p
) {
2137 nameOut(os
, csprintf("%s.txFifo%d", name(), i
++));
2138 (*p
)->serialize(os
);
2141 int rxNumPkts
= rxFifo
.size();
2142 SERIALIZE_SCALAR(rxNumPkts
);
2145 for (pktiter_t p
= rxFifo
.begin(); p
!= end
; ++p
) {
2146 nameOut(os
, csprintf("%s.rxFifo%d", name(), i
++));
2147 (*p
)->serialize(os
);
2151 * Serialize the various helper variables
2153 bool txPacketExists
= txPacket
;
2154 SERIALIZE_SCALAR(txPacketExists
);
2155 if (txPacketExists
) {
2156 nameOut(os
, csprintf("%s.txPacket", name()));
2157 txPacket
->serialize(os
);
2158 uint32_t txPktBufPtr
= (uint32_t) (txPacketBufPtr
- txPacket
->data
);
2159 SERIALIZE_SCALAR(txPktBufPtr
);
2162 bool rxPacketExists
= rxPacket
;
2163 SERIALIZE_SCALAR(rxPacketExists
);
2164 if (rxPacketExists
) {
2165 nameOut(os
, csprintf("%s.rxPacket", name()));
2166 rxPacket
->serialize(os
);
2167 uint32_t rxPktBufPtr
= (uint32_t) (rxPacketBufPtr
- rxPacket
->data
);
2168 SERIALIZE_SCALAR(rxPktBufPtr
);
2171 SERIALIZE_SCALAR(txXferLen
);
2172 SERIALIZE_SCALAR(rxXferLen
);
2175 * Serialize DescCaches
2177 SERIALIZE_SCALAR(txDescCache
.link
);
2178 SERIALIZE_SCALAR(txDescCache
.bufptr
);
2179 SERIALIZE_SCALAR(txDescCache
.cmdsts
);
2180 SERIALIZE_SCALAR(txDescCache
.extsts
);
2181 SERIALIZE_SCALAR(rxDescCache
.link
);
2182 SERIALIZE_SCALAR(rxDescCache
.bufptr
);
2183 SERIALIZE_SCALAR(rxDescCache
.cmdsts
);
2184 SERIALIZE_SCALAR(rxDescCache
.extsts
);
2187 * Serialize tx state machine
2189 int txState
= this->txState
;
2190 SERIALIZE_SCALAR(txState
);
2191 SERIALIZE_SCALAR(txEnable
);
2192 SERIALIZE_SCALAR(CTDD
);
2193 SERIALIZE_SCALAR(txFifoAvail
);
2194 SERIALIZE_SCALAR(txFragPtr
);
2195 SERIALIZE_SCALAR(txDescCnt
);
2196 int txDmaState
= this->txDmaState
;
2197 SERIALIZE_SCALAR(txDmaState
);
2200 * Serialize rx state machine
2202 int rxState
= this->rxState
;
2203 SERIALIZE_SCALAR(rxState
);
2204 SERIALIZE_SCALAR(rxEnable
);
2205 SERIALIZE_SCALAR(CRDD
);
2206 SERIALIZE_SCALAR(rxPktBytes
);
2207 SERIALIZE_SCALAR(rxFifoCnt
);
2208 SERIALIZE_SCALAR(rxDescCnt
);
2209 int rxDmaState
= this->rxDmaState
;
2210 SERIALIZE_SCALAR(rxDmaState
);
2212 SERIALIZE_SCALAR(extstsEnable
);
2215 * If there's a pending transmit, store the time so we can
2216 * reschedule it later
2218 Tick transmitTick
= txEvent
.scheduled() ? txEvent
.when() - curTick
: 0;
2219 SERIALIZE_SCALAR(transmitTick
);
2222 * receive address filter settings
2224 SERIALIZE_SCALAR(rxFilterEnable
);
2225 SERIALIZE_SCALAR(acceptBroadcast
);
2226 SERIALIZE_SCALAR(acceptMulticast
);
2227 SERIALIZE_SCALAR(acceptUnicast
);
2228 SERIALIZE_SCALAR(acceptPerfect
);
2229 SERIALIZE_SCALAR(acceptArp
);
2232 * Keep track of pending interrupt status.
2234 SERIALIZE_SCALAR(intrTick
);
2235 SERIALIZE_SCALAR(cpuPendingIntr
);
2236 Tick intrEventTick
= 0;
2238 intrEventTick
= intrEvent
->when();
2239 SERIALIZE_SCALAR(intrEventTick
);
2244 NSGigE::unserialize(Checkpoint
*cp
, const std::string
§ion
)
2246 // Unserialize the PciDev base class
2247 PciDev::unserialize(cp
, section
);
2249 UNSERIALIZE_SCALAR(regs
.command
);
2250 UNSERIALIZE_SCALAR(regs
.config
);
2251 UNSERIALIZE_SCALAR(regs
.mear
);
2252 UNSERIALIZE_SCALAR(regs
.ptscr
);
2253 UNSERIALIZE_SCALAR(regs
.isr
);
2254 UNSERIALIZE_SCALAR(regs
.imr
);
2255 UNSERIALIZE_SCALAR(regs
.ier
);
2256 UNSERIALIZE_SCALAR(regs
.ihr
);
2257 UNSERIALIZE_SCALAR(regs
.txdp
);
2258 UNSERIALIZE_SCALAR(regs
.txdp_hi
);
2259 UNSERIALIZE_SCALAR(regs
.txcfg
);
2260 UNSERIALIZE_SCALAR(regs
.gpior
);
2261 UNSERIALIZE_SCALAR(regs
.rxdp
);
2262 UNSERIALIZE_SCALAR(regs
.rxdp_hi
);
2263 UNSERIALIZE_SCALAR(regs
.rxcfg
);
2264 UNSERIALIZE_SCALAR(regs
.pqcr
);
2265 UNSERIALIZE_SCALAR(regs
.wcsr
);
2266 UNSERIALIZE_SCALAR(regs
.pcr
);
2267 UNSERIALIZE_SCALAR(regs
.rfcr
);
2268 UNSERIALIZE_SCALAR(regs
.rfdr
);
2269 UNSERIALIZE_SCALAR(regs
.srr
);
2270 UNSERIALIZE_SCALAR(regs
.mibc
);
2271 UNSERIALIZE_SCALAR(regs
.vrcr
);
2272 UNSERIALIZE_SCALAR(regs
.vtcr
);
2273 UNSERIALIZE_SCALAR(regs
.vdr
);
2274 UNSERIALIZE_SCALAR(regs
.ccsr
);
2275 UNSERIALIZE_SCALAR(regs
.tbicr
);
2276 UNSERIALIZE_SCALAR(regs
.tbisr
);
2277 UNSERIALIZE_SCALAR(regs
.tanar
);
2278 UNSERIALIZE_SCALAR(regs
.tanlpar
);
2279 UNSERIALIZE_SCALAR(regs
.taner
);
2280 UNSERIALIZE_SCALAR(regs
.tesr
);
2282 UNSERIALIZE_ARRAY(rom
.perfectMatch
, EADDR_LEN
);
2284 UNSERIALIZE_SCALAR(ioEnable
);
2287 * unserialize the data fifos
2290 UNSERIALIZE_SCALAR(txNumPkts
);
2292 for (i
= 0; i
< txNumPkts
; ++i
) {
2293 PacketPtr p
= new PacketData
;
2294 p
->unserialize(cp
, csprintf("%s.rxFifo%d", section
, i
));
2295 txFifo
.push_back(p
);
2299 UNSERIALIZE_SCALAR(rxNumPkts
);
2300 for (i
= 0; i
< rxNumPkts
; ++i
) {
2301 PacketPtr p
= new PacketData
;
2302 p
->unserialize(cp
, csprintf("%s.rxFifo%d", section
, i
));
2303 rxFifo
.push_back(p
);
2307 * unserialize the various helper variables
2309 bool txPacketExists
;
2310 UNSERIALIZE_SCALAR(txPacketExists
);
2311 if (txPacketExists
) {
2312 txPacket
= new PacketData
;
2313 txPacket
->unserialize(cp
, csprintf("%s.txPacket", section
));
2314 uint32_t txPktBufPtr
;
2315 UNSERIALIZE_SCALAR(txPktBufPtr
);
2316 txPacketBufPtr
= (uint8_t *) txPacket
->data
+ txPktBufPtr
;
2320 bool rxPacketExists
;
2321 UNSERIALIZE_SCALAR(rxPacketExists
);
2323 if (rxPacketExists
) {
2324 rxPacket
= new PacketData
;
2325 rxPacket
->unserialize(cp
, csprintf("%s.rxPacket", section
));
2326 uint32_t rxPktBufPtr
;
2327 UNSERIALIZE_SCALAR(rxPktBufPtr
);
2328 rxPacketBufPtr
= (uint8_t *) rxPacket
->data
+ rxPktBufPtr
;
2332 UNSERIALIZE_SCALAR(txXferLen
);
2333 UNSERIALIZE_SCALAR(rxXferLen
);
2336 * Unserialize DescCaches
2338 UNSERIALIZE_SCALAR(txDescCache
.link
);
2339 UNSERIALIZE_SCALAR(txDescCache
.bufptr
);
2340 UNSERIALIZE_SCALAR(txDescCache
.cmdsts
);
2341 UNSERIALIZE_SCALAR(txDescCache
.extsts
);
2342 UNSERIALIZE_SCALAR(rxDescCache
.link
);
2343 UNSERIALIZE_SCALAR(rxDescCache
.bufptr
);
2344 UNSERIALIZE_SCALAR(rxDescCache
.cmdsts
);
2345 UNSERIALIZE_SCALAR(rxDescCache
.extsts
);
2348 * unserialize tx state machine
2351 UNSERIALIZE_SCALAR(txState
);
2352 this->txState
= (TxState
) txState
;
2353 UNSERIALIZE_SCALAR(txEnable
);
2354 UNSERIALIZE_SCALAR(CTDD
);
2355 UNSERIALIZE_SCALAR(txFifoAvail
);
2356 UNSERIALIZE_SCALAR(txFragPtr
);
2357 UNSERIALIZE_SCALAR(txDescCnt
);
2359 UNSERIALIZE_SCALAR(txDmaState
);
2360 this->txDmaState
= (DmaState
) txDmaState
;
2363 * unserialize rx state machine
2366 UNSERIALIZE_SCALAR(rxState
);
2367 this->rxState
= (RxState
) rxState
;
2368 UNSERIALIZE_SCALAR(rxEnable
);
2369 UNSERIALIZE_SCALAR(CRDD
);
2370 UNSERIALIZE_SCALAR(rxPktBytes
);
2371 UNSERIALIZE_SCALAR(rxFifoCnt
);
2372 UNSERIALIZE_SCALAR(rxDescCnt
);
2374 UNSERIALIZE_SCALAR(rxDmaState
);
2375 this->rxDmaState
= (DmaState
) rxDmaState
;
2377 UNSERIALIZE_SCALAR(extstsEnable
);
2380 * If there's a pending transmit, reschedule it now
2383 UNSERIALIZE_SCALAR(transmitTick
);
2385 txEvent
.schedule(curTick
+ transmitTick
);
2388 * unserialize receive address filter settings
2390 UNSERIALIZE_SCALAR(rxFilterEnable
);
2391 UNSERIALIZE_SCALAR(acceptBroadcast
);
2392 UNSERIALIZE_SCALAR(acceptMulticast
);
2393 UNSERIALIZE_SCALAR(acceptUnicast
);
2394 UNSERIALIZE_SCALAR(acceptPerfect
);
2395 UNSERIALIZE_SCALAR(acceptArp
);
2398 * Keep track of pending interrupt status.
2400 UNSERIALIZE_SCALAR(intrTick
);
2401 UNSERIALIZE_SCALAR(cpuPendingIntr
);
2403 UNSERIALIZE_SCALAR(intrEventTick
);
2404 if (intrEventTick
) {
2405 intrEvent
= new IntrEvent(this, true);
2406 intrEvent
->schedule(intrEventTick
);
2410 * re-add addrRanges to bus bridges
2413 pioInterface
->addAddrRange(BARAddrs
[0], BARAddrs
[0] + BARSize
[0] - 1);
2414 pioInterface
->addAddrRange(BARAddrs
[1], BARAddrs
[1] + BARSize
[1] - 1);
2419 NSGigE::cacheAccess(MemReqPtr
&req
)
2421 DPRINTF(EthernetPIO
, "timing access to paddr=%#x (daddr=%#x)\n",
2422 req
->paddr
, req
->paddr
- addr
);
2423 return curTick
+ pioLatency
;
2426 BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt
)
2428 SimObjectParam
<EtherInt
*> peer
;
2429 SimObjectParam
<NSGigE
*> device
;
2431 END_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt
)
2433 BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigEInt
)
2435 INIT_PARAM_DFLT(peer
, "peer interface", NULL
),
2436 INIT_PARAM(device
, "Ethernet device of this interface")
2438 END_INIT_SIM_OBJECT_PARAMS(NSGigEInt
)
2440 CREATE_SIM_OBJECT(NSGigEInt
)
2442 NSGigEInt
*dev_int
= new NSGigEInt(getInstanceName(), device
);
2444 EtherInt
*p
= (EtherInt
*)peer
;
2446 dev_int
->setPeer(p
);
2447 p
->setPeer(dev_int
);
2453 REGISTER_SIM_OBJECT("NSGigEInt", NSGigEInt
)
2456 BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigE
)
2458 Param
<Tick
> tx_delay
;
2459 Param
<Tick
> rx_delay
;
2460 SimObjectParam
<IntrControl
*> intr_ctrl
;
2461 Param
<Tick
> intr_delay
;
2462 SimObjectParam
<MemoryController
*> mmu
;
2463 SimObjectParam
<PhysicalMemory
*> physmem
;
2464 Param
<bool> rx_filter
;
2465 Param
<string
> hardware_address
;
2466 SimObjectParam
<Bus
*> header_bus
;
2467 SimObjectParam
<Bus
*> payload_bus
;
2468 SimObjectParam
<HierParams
*> hier
;
2469 Param
<Tick
> pio_latency
;
2470 Param
<bool> dma_desc_free
;
2471 Param
<bool> dma_data_free
;
2472 Param
<Tick
> dma_read_delay
;
2473 Param
<Tick
> dma_write_delay
;
2474 Param
<Tick
> dma_read_factor
;
2475 Param
<Tick
> dma_write_factor
;
2476 SimObjectParam
<PciConfigAll
*> configspace
;
2477 SimObjectParam
<PciConfigData
*> configdata
;
2478 SimObjectParam
<Tsunami
*> tsunami
;
2479 Param
<uint32_t> pci_bus
;
2480 Param
<uint32_t> pci_dev
;
2481 Param
<uint32_t> pci_func
;
2482 Param
<uint32_t> tx_fifo_size
;
2483 Param
<uint32_t> rx_fifo_size
;
2485 END_DECLARE_SIM_OBJECT_PARAMS(NSGigE
)
2487 BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigE
)
2489 INIT_PARAM_DFLT(tx_delay
, "Transmit Delay", 1000),
2490 INIT_PARAM_DFLT(rx_delay
, "Receive Delay", 1000),
2491 INIT_PARAM(intr_ctrl
, "Interrupt Controller"),
2492 INIT_PARAM_DFLT(intr_delay
, "Interrupt Delay in microseconds", 0),
2493 INIT_PARAM(mmu
, "Memory Controller"),
2494 INIT_PARAM(physmem
, "Physical Memory"),
2495 INIT_PARAM_DFLT(rx_filter
, "Enable Receive Filter", true),
2496 INIT_PARAM_DFLT(hardware_address
, "Ethernet Hardware Address",
2497 "00:99:00:00:00:01"),
2498 INIT_PARAM_DFLT(header_bus
, "The IO Bus to attach to for headers", NULL
),
2499 INIT_PARAM_DFLT(payload_bus
, "The IO Bus to attach to for payload", NULL
),
2500 INIT_PARAM_DFLT(hier
, "Hierarchy global variables", &defaultHierParams
),
2501 INIT_PARAM_DFLT(pio_latency
, "Programmed IO latency in bus cycles", 1),
2502 INIT_PARAM_DFLT(dma_desc_free
, "DMA of Descriptors is free", false),
2503 INIT_PARAM_DFLT(dma_data_free
, "DMA of Data is free", false),
2504 INIT_PARAM_DFLT(dma_read_delay
, "fixed delay for dma reads", 0),
2505 INIT_PARAM_DFLT(dma_write_delay
, "fixed delay for dma writes", 0),
2506 INIT_PARAM_DFLT(dma_read_factor
, "multiplier for dma reads", 0),
2507 INIT_PARAM_DFLT(dma_write_factor
, "multiplier for dma writes", 0),
2508 INIT_PARAM(configspace
, "PCI Configspace"),
2509 INIT_PARAM(configdata
, "PCI Config data"),
2510 INIT_PARAM(tsunami
, "Tsunami"),
2511 INIT_PARAM(pci_bus
, "PCI bus"),
2512 INIT_PARAM(pci_dev
, "PCI device number"),
2513 INIT_PARAM(pci_func
, "PCI function code"),
2514 INIT_PARAM_DFLT(tx_fifo_size
, "max size in bytes of txFifo", 131072),
2515 INIT_PARAM_DFLT(rx_fifo_size
, "max size in bytes of rxFifo", 131072)
2517 END_INIT_SIM_OBJECT_PARAMS(NSGigE
)
2520 CREATE_SIM_OBJECT(NSGigE
)
2523 sscanf(((string
)hardware_address
).c_str(), "%x:%x:%x:%x:%x:%x",
2524 &eaddr
[0], &eaddr
[1], &eaddr
[2], &eaddr
[3], &eaddr
[4], &eaddr
[5]);
2526 return new NSGigE(getInstanceName(), intr_ctrl
, intr_delay
,
2527 physmem
, tx_delay
, rx_delay
, mmu
, hier
, header_bus
,
2528 payload_bus
, pio_latency
, dma_desc_free
, dma_data_free
,
2529 dma_read_delay
, dma_write_delay
, dma_read_factor
,
2530 dma_write_factor
, configspace
, configdata
,
2531 tsunami
, pci_bus
, pci_dev
, pci_func
, rx_filter
, eaddr
,
2532 tx_fifo_size
, rx_fifo_size
);
2535 REGISTER_SIM_OBJECT("NSGigE", NSGigE
)