2 * Copyright (c) 2004 The Regents of The University of Michigan
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 * Device module for modelling the National Semiconductor
31 * DP83820 ethernet controller. Does not support priority queueing
37 #include "base/inet.hh"
38 #include "cpu/exec_context.hh"
39 #include "cpu/intr_control.hh"
41 #include "dev/etherlink.hh"
42 #include "dev/ns_gige.hh"
43 #include "dev/pciconfigall.hh"
44 #include "mem/bus/bus.hh"
45 #include "mem/bus/dma_interface.hh"
46 #include "mem/bus/pio_interface.hh"
47 #include "mem/bus/pio_interface_impl.hh"
48 #include "mem/functional_mem/memory_control.hh"
49 #include "mem/functional_mem/physical_memory.hh"
50 #include "sim/builder.hh"
51 #include "sim/debug.hh"
52 #include "sim/host.hh"
53 #include "sim/stats.hh"
54 #include "targetarch/vtophys.hh"
56 const char *NsRxStateStrings
[] =
67 const char *NsTxStateStrings
[] =
78 const char *NsDmaState
[] =
90 ///////////////////////////////////////////////////////////////////////
94 NSGigE::NSGigE(Params
*p
)
95 : PciDev(p
), ioEnable(false),
96 txFifo(p
->tx_fifo_size
), rxFifo(p
->rx_fifo_size
),
97 txPacket(0), rxPacket(0), txPacketBufPtr(NULL
), rxPacketBufPtr(NULL
),
98 txXferLen(0), rxXferLen(0), txState(txIdle
), txEnable(false),
100 txFragPtr(0), txDescCnt(0), txDmaState(dmaIdle
), rxState(rxIdle
),
101 rxEnable(false), CRDD(false), rxPktBytes(0),
102 rxFragPtr(0), rxDescCnt(0), rxDmaState(dmaIdle
), extstsEnable(false),
103 rxDmaReadEvent(this), rxDmaWriteEvent(this),
104 txDmaReadEvent(this), txDmaWriteEvent(this),
105 dmaDescFree(p
->dma_desc_free
), dmaDataFree(p
->dma_data_free
),
106 txDelay(p
->tx_delay
), rxDelay(p
->rx_delay
),
107 rxKickTick(0), txKickTick(0),
108 txEvent(this), rxFilterEnable(p
->rx_filter
), acceptBroadcast(false),
109 acceptMulticast(false), acceptUnicast(false),
110 acceptPerfect(false), acceptArp(false),
111 physmem(p
->pmem
), intrTick(0), cpuPendingIntr(false),
112 intrEvent(0), interface(0)
115 pioInterface
= newPioInterface(name(), p
->hier
,
117 &NSGigE::cacheAccess
);
119 pioLatency
= p
->pio_latency
* p
->header_bus
->clockRatio
;
122 dmaInterface
= new DMAInterface
<Bus
>(name() + ".dma",
126 dmaInterface
= new DMAInterface
<Bus
>(name() + ".dma",
129 } else if (p
->payload_bus
) {
130 pioInterface
= newPioInterface(name(), p
->hier
,
131 p
->payload_bus
, this,
132 &NSGigE::cacheAccess
);
134 pioLatency
= p
->pio_latency
* p
->payload_bus
->clockRatio
;
136 dmaInterface
= new DMAInterface
<Bus
>(name() + ".dma",
142 intrDelay
= US2Ticks(p
->intr_delay
);
143 dmaReadDelay
= p
->dma_read_delay
;
144 dmaWriteDelay
= p
->dma_write_delay
;
145 dmaReadFactor
= p
->dma_read_factor
;
146 dmaWriteFactor
= p
->dma_write_factor
;
149 memcpy(&rom
.perfectMatch
, p
->eaddr
.bytes(), ETH_ADDR_LEN
);
159 .name(name() + ".txBytes")
160 .desc("Bytes Transmitted")
165 .name(name() + ".rxBytes")
166 .desc("Bytes Received")
171 .name(name() + ".txPackets")
172 .desc("Number of Packets Transmitted")
177 .name(name() + ".rxPackets")
178 .desc("Number of Packets Received")
183 .name(name() + ".txIpChecksums")
184 .desc("Number of tx IP Checksums done by device")
190 .name(name() + ".rxIpChecksums")
191 .desc("Number of rx IP Checksums done by device")
197 .name(name() + ".txTcpChecksums")
198 .desc("Number of tx TCP Checksums done by device")
204 .name(name() + ".rxTcpChecksums")
205 .desc("Number of rx TCP Checksums done by device")
211 .name(name() + ".txUdpChecksums")
212 .desc("Number of tx UDP Checksums done by device")
218 .name(name() + ".rxUdpChecksums")
219 .desc("Number of rx UDP Checksums done by device")
225 .name(name() + ".descDMAReads")
226 .desc("Number of descriptors the device read w/ DMA")
231 .name(name() + ".descDMAWrites")
232 .desc("Number of descriptors the device wrote w/ DMA")
237 .name(name() + ".descDmaReadBytes")
238 .desc("number of descriptor bytes read w/ DMA")
243 .name(name() + ".descDmaWriteBytes")
244 .desc("number of descriptor bytes write w/ DMA")
250 .name(name() + ".txBandwidth")
251 .desc("Transmit Bandwidth (bits/s)")
257 .name(name() + ".rxBandwidth")
258 .desc("Receive Bandwidth (bits/s)")
264 .name(name() + ".txPPS")
265 .desc("Packet Tranmission Rate (packets/s)")
271 .name(name() + ".rxPPS")
272 .desc("Packet Reception Rate (packets/s)")
277 txBandwidth
= txBytes
* Stats::constant(8) / simSeconds
;
278 rxBandwidth
= rxBytes
* Stats::constant(8) / simSeconds
;
279 txPacketRate
= txPackets
/ simSeconds
;
280 rxPacketRate
= rxPackets
/ simSeconds
;
284 * This is to read the PCI general configuration registers
287 NSGigE::ReadConfig(int offset
, int size
, uint8_t *data
)
289 if (offset
< PCI_DEVICE_SPECIFIC
)
290 PciDev::ReadConfig(offset
, size
, data
);
292 panic("Device specific PCI config space not implemented!\n");
296 * This is to write to the PCI general configuration registers
299 NSGigE::WriteConfig(int offset
, int size
, uint32_t data
)
301 if (offset
< PCI_DEVICE_SPECIFIC
)
302 PciDev::WriteConfig(offset
, size
, data
);
304 panic("Device specific PCI config space not implemented!\n");
306 // Need to catch writes to BARs to update the PIO interface
308 // seems to work fine without all these PCI settings, but i
309 // put in the IO to double check, an assertion will fail if we
310 // need to properly implement it
312 if (config
.data
[offset
] & PCI_CMD_IOSE
)
318 if (config
.data
[offset
] & PCI_CMD_BME
) {
325 if (config
.data
[offset
] & PCI_CMD_MSE
) {
334 case PCI0_BASE_ADDR0
:
335 if (BARAddrs
[0] != 0) {
337 pioInterface
->addAddrRange(RangeSize(BARAddrs
[0], BARSize
[0]));
339 BARAddrs
[0] &= EV5::PAddrUncachedMask
;
342 case PCI0_BASE_ADDR1
:
343 if (BARAddrs
[1] != 0) {
345 pioInterface
->addAddrRange(RangeSize(BARAddrs
[1], BARSize
[1]));
347 BARAddrs
[1] &= EV5::PAddrUncachedMask
;
354 * This reads the device registers, which are detailed in the NS83820
358 NSGigE::read(MemReqPtr
&req
, uint8_t *data
)
362 //The mask is to give you only the offset into the device register file
363 Addr daddr
= req
->paddr
& 0xfff;
364 DPRINTF(EthernetPIO
, "read da=%#x pa=%#x va=%#x size=%d\n",
365 daddr
, req
->paddr
, req
->vaddr
, req
->size
);
368 // there are some reserved registers, you can see ns_gige_reg.h and
369 // the spec sheet for details
370 if (daddr
> LAST
&& daddr
<= RESERVED
) {
371 panic("Accessing reserved register");
372 } else if (daddr
> RESERVED
&& daddr
<= 0x3FC) {
373 ReadConfig(daddr
& 0xff, req
->size
, data
);
375 } else if (daddr
>= MIB_START
&& daddr
<= MIB_END
) {
376 // don't implement all the MIB's. hopefully the kernel
377 // doesn't actually DEPEND upon their values
378 // MIB are just hardware stats keepers
379 uint32_t ®
= *(uint32_t *) data
;
382 } else if (daddr
> 0x3FC)
383 panic("Something is messed up!\n");
386 case sizeof(uint32_t):
388 uint32_t ®
= *(uint32_t *)data
;
393 //these are supposed to be cleared on a read
394 reg
&= ~(CR_RXD
| CR_TXD
| CR_TXR
| CR_RXR
);
411 devIntrClear(ISR_ALL
);
466 // see the spec sheet for how RFCR and RFDR work
467 // basically, you write to RFCR to tell the machine
468 // what you want to do next, then you act upon RFDR,
469 // and the device will be prepared b/c of what you
476 switch (regs
.rfcr
& RFCR_RFADDR
) {
478 reg
= rom
.perfectMatch
[1];
480 reg
+= rom
.perfectMatch
[0];
483 reg
= rom
.perfectMatch
[3] << 8;
484 reg
+= rom
.perfectMatch
[2];
487 reg
= rom
.perfectMatch
[5] << 8;
488 reg
+= rom
.perfectMatch
[4];
491 panic("reading RFDR for something other than PMATCH!\n");
492 // didn't implement other RFDR functionality b/c
493 // driver didn't use it
503 reg
&= ~(MIBC_MIBS
| MIBC_ACLR
);
547 panic("reading unimplemented register: addr=%#x", daddr
);
550 DPRINTF(EthernetPIO
, "read from %#x: data=%d data=%#x\n",
556 panic("accessing register with invalid size: addr=%#x, size=%d",
564 NSGigE::write(MemReqPtr
&req
, const uint8_t *data
)
568 Addr daddr
= req
->paddr
& 0xfff;
569 DPRINTF(EthernetPIO
, "write da=%#x pa=%#x va=%#x size=%d\n",
570 daddr
, req
->paddr
, req
->vaddr
, req
->size
);
572 if (daddr
> LAST
&& daddr
<= RESERVED
) {
573 panic("Accessing reserved register");
574 } else if (daddr
> RESERVED
&& daddr
<= 0x3FC) {
575 WriteConfig(daddr
& 0xff, req
->size
, *(uint32_t *)data
);
577 } else if (daddr
> 0x3FC)
578 panic("Something is messed up!\n");
580 if (req
->size
== sizeof(uint32_t)) {
581 uint32_t reg
= *(uint32_t *)data
;
582 DPRINTF(EthernetPIO
, "write data=%d data=%#x\n", reg
, reg
);
589 } else if (reg
& CR_TXE
) {
592 // the kernel is enabling the transmit machine
593 if (txState
== txIdle
)
599 } else if (reg
& CR_RXE
) {
602 if (rxState
== rxIdle
)
613 devIntrPost(ISR_SWI
);
624 if (reg
& CFG_LNKSTS
||
627 reg
& CFG_RESERVED
||
630 panic("writing to read-only or reserved CFG bits!\n");
632 regs
.config
|= reg
& ~(CFG_LNKSTS
| CFG_SPDSTS
| CFG_DUPSTS
|
633 CFG_RESERVED
| CFG_T64ADDR
| CFG_PCI64_DET
);
635 // all these #if 0's are because i don't THINK the kernel needs to
636 // have these implemented. if there is a problem relating to one of
637 // these, you may need to add functionality in.
639 if (reg
& CFG_TBI_EN
) ;
640 if (reg
& CFG_MODE_1000
) ;
643 if (reg
& CFG_AUTO_1000
)
644 panic("CFG_AUTO_1000 not implemented!\n");
647 if (reg
& CFG_PINT_DUPSTS
||
648 reg
& CFG_PINT_LNKSTS
||
649 reg
& CFG_PINT_SPDSTS
)
652 if (reg
& CFG_TMRTEST
) ;
653 if (reg
& CFG_MRM_DIS
) ;
654 if (reg
& CFG_MWI_DIS
) ;
656 if (reg
& CFG_T64ADDR
)
657 panic("CFG_T64ADDR is read only register!\n");
659 if (reg
& CFG_PCI64_DET
)
660 panic("CFG_PCI64_DET is read only register!\n");
662 if (reg
& CFG_DATA64_EN
) ;
663 if (reg
& CFG_M64ADDR
) ;
664 if (reg
& CFG_PHY_RST
) ;
665 if (reg
& CFG_PHY_DIS
) ;
668 if (reg
& CFG_EXTSTS_EN
)
671 extstsEnable
= false;
674 if (reg
& CFG_REQALG
) ;
678 if (reg
& CFG_PESEL
) ;
679 if (reg
& CFG_BROM_DIS
) ;
680 if (reg
& CFG_EXT_125
) ;
687 // since phy is completely faked, MEAR_MD* don't matter
688 // and since the driver never uses MEAR_EE*, they don't
691 if (reg
& MEAR_EEDI
) ;
692 if (reg
& MEAR_EEDO
) ; // this one is read only
693 if (reg
& MEAR_EECLK
) ;
694 if (reg
& MEAR_EESEL
) ;
695 if (reg
& MEAR_MDIO
) ;
696 if (reg
& MEAR_MDDIR
) ;
697 if (reg
& MEAR_MDC
) ;
702 regs
.ptscr
= reg
& ~(PTSCR_RBIST_RDONLY
);
703 // these control BISTs for various parts of chip - we
704 // don't care or do just fake that the BIST is done
705 if (reg
& PTSCR_RBIST_EN
)
706 regs
.ptscr
|= PTSCR_RBIST_DONE
;
707 if (reg
& PTSCR_EEBIST_EN
)
708 regs
.ptscr
&= ~PTSCR_EEBIST_EN
;
709 if (reg
& PTSCR_EELOAD_EN
)
710 regs
.ptscr
&= ~PTSCR_EELOAD_EN
;
713 case ISR
: /* writing to the ISR has no effect */
714 panic("ISR is a read only register!\n");
727 /* not going to implement real interrupt holdoff */
731 regs
.txdp
= (reg
& 0xFFFFFFFC);
732 assert(txState
== txIdle
);
743 if (reg
& TXCFG_CSI
) ;
744 if (reg
& TXCFG_HBI
) ;
745 if (reg
& TXCFG_MLB
) ;
746 if (reg
& TXCFG_ATP
) ;
747 if (reg
& TXCFG_ECRETRY
) {
749 * this could easily be implemented, but considering
750 * the network is just a fake pipe, wouldn't make
755 if (reg
& TXCFG_BRST_DIS
) ;
759 /* we handle our own DMA, ignore the kernel's exhortations */
760 if (reg
& TXCFG_MXDMA
) ;
763 // also, we currently don't care about fill/drain
764 // thresholds though this may change in the future with
765 // more realistic networks or a driver which changes it
766 // according to feedback
772 /* these just control general purpose i/o pins, don't matter */
787 if (reg
& RXCFG_AEP
) ;
788 if (reg
& RXCFG_ARP
) ;
789 if (reg
& RXCFG_STRIPCRC
) ;
790 if (reg
& RXCFG_RX_RD
) ;
791 if (reg
& RXCFG_ALP
) ;
792 if (reg
& RXCFG_AIRL
) ;
794 /* we handle our own DMA, ignore what kernel says about it */
795 if (reg
& RXCFG_MXDMA
) ;
797 //also, we currently don't care about fill/drain thresholds
798 //though this may change in the future with more realistic
799 //networks or a driver which changes it according to feedback
800 if (reg
& (RXCFG_DRTH
| RXCFG_DRTH0
)) ;
805 /* there is no priority queueing used in the linux 2.6 driver */
810 /* not going to implement wake on LAN */
815 /* not going to implement pause control */
822 rxFilterEnable
= (reg
& RFCR_RFEN
) ? true : false;
823 acceptBroadcast
= (reg
& RFCR_AAB
) ? true : false;
824 acceptMulticast
= (reg
& RFCR_AAM
) ? true : false;
825 acceptUnicast
= (reg
& RFCR_AAU
) ? true : false;
826 acceptPerfect
= (reg
& RFCR_APM
) ? true : false;
827 acceptArp
= (reg
& RFCR_AARP
) ? true : false;
831 panic("RFCR_APAT not implemented!\n");
834 if (reg
& RFCR_MHEN
|| reg
& RFCR_UHEN
)
835 panic("hash filtering not implemented!\n");
838 panic("RFCR_ULM not implemented!\n");
843 panic("the driver never writes to RFDR, something is wrong!\n");
846 panic("the driver never uses BRAR, something is wrong!\n");
849 panic("the driver never uses BRDR, something is wrong!\n");
852 panic("SRR is read only register!\n");
855 panic("the driver never uses MIBC, something is wrong!\n");
866 panic("the driver never uses VDR, something is wrong!\n");
870 /* not going to implement clockrun stuff */
876 if (reg
& TBICR_MR_LOOPBACK
)
877 panic("TBICR_MR_LOOPBACK never used, something wrong!\n");
879 if (reg
& TBICR_MR_AN_ENABLE
) {
880 regs
.tanlpar
= regs
.tanar
;
881 regs
.tbisr
|= (TBISR_MR_AN_COMPLETE
| TBISR_MR_LINK_STATUS
);
885 if (reg
& TBICR_MR_RESTART_AN
) ;
891 panic("TBISR is read only register!\n");
896 panic("this isn't used in driver, something wrong!\n");
899 panic("this isn't used in driver, something wrong!\n");
903 panic("this should only be written to by the fake phy!\n");
906 panic("TANER is read only register!\n");
913 panic("invalid register access daddr=%#x", daddr
);
916 panic("Invalid Request Size");
923 NSGigE::devIntrPost(uint32_t interrupts
)
925 if (interrupts
& ISR_RESERVE
)
926 panic("Cannot set a reserved interrupt");
928 if (interrupts
& ISR_NOIMPL
)
929 warn("interrupt not implemented %#x\n", interrupts
);
931 interrupts
&= ~ISR_NOIMPL
;
932 regs
.isr
|= interrupts
;
934 DPRINTF(EthernetIntr
,
935 "interrupt written to ISR: intr=%#x isr=%#x imr=%#x\n",
936 interrupts
, regs
.isr
, regs
.imr
);
938 if ((regs
.isr
& regs
.imr
)) {
940 if (!(regs
.isr
& regs
.imr
& ISR_NODELAY
))
947 NSGigE::devIntrClear(uint32_t interrupts
)
949 if (interrupts
& ISR_RESERVE
)
950 panic("Cannot clear a reserved interrupt");
952 interrupts
&= ~ISR_NOIMPL
;
953 regs
.isr
&= ~interrupts
;
955 DPRINTF(EthernetIntr
,
956 "interrupt cleared from ISR: intr=%x isr=%x imr=%x\n",
957 interrupts
, regs
.isr
, regs
.imr
);
959 if (!(regs
.isr
& regs
.imr
))
964 NSGigE::devIntrChangeMask()
966 DPRINTF(EthernetIntr
, "interrupt mask changed: isr=%x imr=%x masked=%x\n",
967 regs
.isr
, regs
.imr
, regs
.isr
& regs
.imr
);
969 if (regs
.isr
& regs
.imr
)
970 cpuIntrPost(curTick
);
976 NSGigE::cpuIntrPost(Tick when
)
978 // If the interrupt you want to post is later than an interrupt
979 // already scheduled, just let it post in the coming one and don't
981 // HOWEVER, must be sure that the scheduled intrTick is in the
982 // future (this was formerly the source of a bug)
984 * @todo this warning should be removed and the intrTick code should
987 assert(when
>= curTick
);
988 assert(intrTick
>= curTick
|| intrTick
== 0);
989 if (when
> intrTick
&& intrTick
!= 0) {
990 DPRINTF(EthernetIntr
, "don't need to schedule event...intrTick=%d\n",
996 if (intrTick
< curTick
) {
1001 DPRINTF(EthernetIntr
, "going to schedule an interrupt for intrTick=%d\n",
1005 intrEvent
->squash();
1006 intrEvent
= new IntrEvent(this, true);
1007 intrEvent
->schedule(intrTick
);
1011 NSGigE::cpuInterrupt()
1013 assert(intrTick
== curTick
);
1015 // Whether or not there's a pending interrupt, we don't care about
1020 // Don't send an interrupt if there's already one
1021 if (cpuPendingIntr
) {
1022 DPRINTF(EthernetIntr
,
1023 "would send an interrupt now, but there's already pending\n");
1026 cpuPendingIntr
= true;
1028 DPRINTF(EthernetIntr
, "posting interrupt\n");
1034 NSGigE::cpuIntrClear()
1036 if (!cpuPendingIntr
)
1040 intrEvent
->squash();
1046 cpuPendingIntr
= false;
1048 DPRINTF(EthernetIntr
, "clearing interrupt\n");
1053 NSGigE::cpuIntrPending() const
1054 { return cpuPendingIntr
; }
1060 DPRINTF(Ethernet
, "transmit reset\n");
1065 assert(txDescCnt
== 0);
1068 assert(txDmaState
== dmaIdle
);
1074 DPRINTF(Ethernet
, "receive reset\n");
1077 assert(rxPktBytes
== 0);
1080 assert(rxDescCnt
== 0);
1081 assert(rxDmaState
== dmaIdle
);
1089 memset(®s
, 0, sizeof(regs
));
1090 regs
.config
= CFG_LNKSTS
;
1091 regs
.mear
= MEAR_MDDIR
| MEAR_EEDO
;
1092 regs
.txcfg
= 0x120; // set drain threshold to 1024 bytes and
1093 // fill threshold to 32 bytes
1094 regs
.rxcfg
= 0x4; // set drain threshold to 16 bytes
1095 regs
.srr
= 0x0103; // set the silicon revision to rev B or 0x103
1096 regs
.mibc
= MIBC_FRZ
;
1097 regs
.vdr
= 0x81; // set the vlan tag type to 802.1q
1098 regs
.tesr
= 0xc000; // TBI capable of both full and half duplex
1100 extstsEnable
= false;
1101 acceptBroadcast
= false;
1102 acceptMulticast
= false;
1103 acceptUnicast
= false;
1104 acceptPerfect
= false;
1109 NSGigE::rxDmaReadCopy()
1111 assert(rxDmaState
== dmaReading
);
1113 physmem
->dma_read((uint8_t *)rxDmaData
, rxDmaAddr
, rxDmaLen
);
1114 rxDmaState
= dmaIdle
;
1116 DPRINTF(EthernetDMA
, "rx dma read paddr=%#x len=%d\n",
1117 rxDmaAddr
, rxDmaLen
);
1118 DDUMP(EthernetDMA
, rxDmaData
, rxDmaLen
);
1122 NSGigE::doRxDmaRead()
1124 assert(rxDmaState
== dmaIdle
|| rxDmaState
== dmaReadWaiting
);
1125 rxDmaState
= dmaReading
;
1127 if (dmaInterface
&& !rxDmaFree
) {
1128 if (dmaInterface
->busy())
1129 rxDmaState
= dmaReadWaiting
;
1131 dmaInterface
->doDMA(Read
, rxDmaAddr
, rxDmaLen
, curTick
,
1132 &rxDmaReadEvent
, true);
1136 if (dmaReadDelay
== 0 && dmaReadFactor
== 0) {
1141 Tick factor
= ((rxDmaLen
+ ULL(63)) >> ULL(6)) * dmaReadFactor
;
1142 Tick start
= curTick
+ dmaReadDelay
+ factor
;
1143 rxDmaReadEvent
.schedule(start
);
1148 NSGigE::rxDmaReadDone()
1150 assert(rxDmaState
== dmaReading
);
1153 // If the transmit state machine has a pending DMA, let it go first
1154 if (txDmaState
== dmaReadWaiting
|| txDmaState
== dmaWriteWaiting
)
1161 NSGigE::rxDmaWriteCopy()
1163 assert(rxDmaState
== dmaWriting
);
1165 physmem
->dma_write(rxDmaAddr
, (uint8_t *)rxDmaData
, rxDmaLen
);
1166 rxDmaState
= dmaIdle
;
1168 DPRINTF(EthernetDMA
, "rx dma write paddr=%#x len=%d\n",
1169 rxDmaAddr
, rxDmaLen
);
1170 DDUMP(EthernetDMA
, rxDmaData
, rxDmaLen
);
1174 NSGigE::doRxDmaWrite()
1176 assert(rxDmaState
== dmaIdle
|| rxDmaState
== dmaWriteWaiting
);
1177 rxDmaState
= dmaWriting
;
1179 if (dmaInterface
&& !rxDmaFree
) {
1180 if (dmaInterface
->busy())
1181 rxDmaState
= dmaWriteWaiting
;
1183 dmaInterface
->doDMA(WriteInvalidate
, rxDmaAddr
, rxDmaLen
, curTick
,
1184 &rxDmaWriteEvent
, true);
1188 if (dmaWriteDelay
== 0 && dmaWriteFactor
== 0) {
1193 Tick factor
= ((rxDmaLen
+ ULL(63)) >> ULL(6)) * dmaWriteFactor
;
1194 Tick start
= curTick
+ dmaWriteDelay
+ factor
;
1195 rxDmaWriteEvent
.schedule(start
);
1200 NSGigE::rxDmaWriteDone()
1202 assert(rxDmaState
== dmaWriting
);
1205 // If the transmit state machine has a pending DMA, let it go first
1206 if (txDmaState
== dmaReadWaiting
|| txDmaState
== dmaWriteWaiting
)
1215 DPRINTF(EthernetSM
, "receive kick rxState=%s (rxBuf.size=%d)\n",
1216 NsRxStateStrings
[rxState
], rxFifo
.size());
1218 if (rxKickTick
> curTick
) {
1219 DPRINTF(EthernetSM
, "receive kick exiting, can't run till %d\n",
1225 switch(rxDmaState
) {
1226 case dmaReadWaiting
:
1230 case dmaWriteWaiting
:
1238 // see state machine from spec for details
1239 // the way this works is, if you finish work on one state and can
1240 // go directly to another, you do that through jumping to the
1241 // label "next". however, if you have intermediate work, like DMA
1242 // so that you can't go to the next state yet, you go to exit and
1243 // exit the loop. however, when the DMA is done it will trigger
1244 // an event and come back to this loop.
1248 DPRINTF(EthernetSM
, "Receive Disabled! Nothing to do.\n");
1253 rxState
= rxDescRefr
;
1255 rxDmaAddr
= regs
.rxdp
& 0x3fffffff;
1256 rxDmaData
= &rxDescCache
+ offsetof(ns_desc
, link
);
1257 rxDmaLen
= sizeof(rxDescCache
.link
);
1258 rxDmaFree
= dmaDescFree
;
1261 descDmaRdBytes
+= rxDmaLen
;
1266 rxState
= rxDescRead
;
1268 rxDmaAddr
= regs
.rxdp
& 0x3fffffff;
1269 rxDmaData
= &rxDescCache
;
1270 rxDmaLen
= sizeof(ns_desc
);
1271 rxDmaFree
= dmaDescFree
;
1274 descDmaRdBytes
+= rxDmaLen
;
1282 if (rxDmaState
!= dmaIdle
)
1285 rxState
= rxAdvance
;
1289 if (rxDmaState
!= dmaIdle
)
1292 DPRINTF(EthernetDesc
,
1293 "rxDescCache: addr=%08x read descriptor\n",
1294 regs
.rxdp
& 0x3fffffff);
1295 DPRINTF(EthernetDesc
,
1296 "rxDescCache: link=%08x bufptr=%08x cmdsts=%08x extsts=%08x\n",
1297 rxDescCache
.link
, rxDescCache
.bufptr
, rxDescCache
.cmdsts
,
1298 rxDescCache
.extsts
);
1300 if (rxDescCache
.cmdsts
& CMDSTS_OWN
) {
1301 devIntrPost(ISR_RXIDLE
);
1305 rxState
= rxFifoBlock
;
1306 rxFragPtr
= rxDescCache
.bufptr
;
1307 rxDescCnt
= rxDescCache
.cmdsts
& CMDSTS_LEN_MASK
;
1314 * @todo in reality, we should be able to start processing
1315 * the packet as it arrives, and not have to wait for the
1316 * full packet ot be in the receive fifo.
1321 DPRINTF(EthernetSM
, "****processing receive of new packet****\n");
1323 // If we don't have a packet, grab a new one from the fifo.
1324 rxPacket
= rxFifo
.front();
1325 rxPktBytes
= rxPacket
->length
;
1326 rxPacketBufPtr
= rxPacket
->data
;
1329 if (DTRACE(Ethernet
)) {
1332 DPRINTF(Ethernet
, "ID is %d\n", ip
->id());
1335 DPRINTF(Ethernet
, "Src Port=%d, Dest Port=%d\n",
1336 tcp
->sport(), tcp
->dport());
1342 // sanity check - i think the driver behaves like this
1343 assert(rxDescCnt
>= rxPktBytes
);
1345 // Must clear the value before popping to decrement the
1351 // dont' need the && rxDescCnt > 0 if driver sanity check
1353 if (rxPktBytes
> 0) {
1354 rxState
= rxFragWrite
;
1355 // don't need min<>(rxPktBytes,rxDescCnt) if above sanity
1357 rxXferLen
= rxPktBytes
;
1359 rxDmaAddr
= rxFragPtr
& 0x3fffffff;
1360 rxDmaData
= rxPacketBufPtr
;
1361 rxDmaLen
= rxXferLen
;
1362 rxDmaFree
= dmaDataFree
;
1368 rxState
= rxDescWrite
;
1370 //if (rxPktBytes == 0) { /* packet is done */
1371 assert(rxPktBytes
== 0);
1372 DPRINTF(EthernetSM
, "done with receiving packet\n");
1374 rxDescCache
.cmdsts
|= CMDSTS_OWN
;
1375 rxDescCache
.cmdsts
&= ~CMDSTS_MORE
;
1376 rxDescCache
.cmdsts
|= CMDSTS_OK
;
1377 rxDescCache
.cmdsts
&= 0xffff0000;
1378 rxDescCache
.cmdsts
+= rxPacket
->length
; //i.e. set CMDSTS_SIZE
1382 * all the driver uses these are for its own stats keeping
1383 * which we don't care about, aren't necessary for
1384 * functionality and doing this would just slow us down.
1385 * if they end up using this in a later version for
1386 * functional purposes, just undef
1388 if (rxFilterEnable
) {
1389 rxDescCache
.cmdsts
&= ~CMDSTS_DEST_MASK
;
1390 const EthAddr
&dst
= rxFifoFront()->dst();
1392 rxDescCache
.cmdsts
|= CMDSTS_DEST_SELF
;
1393 if (dst
->multicast())
1394 rxDescCache
.cmdsts
|= CMDSTS_DEST_MULTI
;
1395 if (dst
->broadcast())
1396 rxDescCache
.cmdsts
|= CMDSTS_DEST_MASK
;
1401 if (extstsEnable
&& ip
) {
1402 rxDescCache
.extsts
|= EXTSTS_IPPKT
;
1404 if (cksum(ip
) != 0) {
1405 DPRINTF(EthernetCksum
, "Rx IP Checksum Error\n");
1406 rxDescCache
.extsts
|= EXTSTS_IPERR
;
1411 rxDescCache
.extsts
|= EXTSTS_TCPPKT
;
1413 if (cksum(tcp
) != 0) {
1414 DPRINTF(EthernetCksum
, "Rx TCP Checksum Error\n");
1415 rxDescCache
.extsts
|= EXTSTS_TCPERR
;
1419 rxDescCache
.extsts
|= EXTSTS_UDPPKT
;
1421 if (cksum(udp
) != 0) {
1422 DPRINTF(EthernetCksum
, "Rx UDP Checksum Error\n");
1423 rxDescCache
.extsts
|= EXTSTS_UDPERR
;
1430 * the driver seems to always receive into desc buffers
1431 * of size 1514, so you never have a pkt that is split
1432 * into multiple descriptors on the receive side, so
1433 * i don't implement that case, hence the assert above.
1436 DPRINTF(EthernetDesc
,
1437 "rxDescCache: addr=%08x writeback cmdsts extsts\n",
1438 regs
.rxdp
& 0x3fffffff);
1439 DPRINTF(EthernetDesc
,
1440 "rxDescCache: link=%08x bufptr=%08x cmdsts=%08x extsts=%08x\n",
1441 rxDescCache
.link
, rxDescCache
.bufptr
, rxDescCache
.cmdsts
,
1442 rxDescCache
.extsts
);
1444 rxDmaAddr
= (regs
.rxdp
+ offsetof(ns_desc
, cmdsts
)) & 0x3fffffff;
1445 rxDmaData
= &(rxDescCache
.cmdsts
);
1446 rxDmaLen
= sizeof(rxDescCache
.cmdsts
) + sizeof(rxDescCache
.extsts
);
1447 rxDmaFree
= dmaDescFree
;
1450 descDmaWrBytes
+= rxDmaLen
;
1458 if (rxDmaState
!= dmaIdle
)
1461 rxPacketBufPtr
+= rxXferLen
;
1462 rxFragPtr
+= rxXferLen
;
1463 rxPktBytes
-= rxXferLen
;
1465 rxState
= rxFifoBlock
;
1469 if (rxDmaState
!= dmaIdle
)
1472 assert(rxDescCache
.cmdsts
& CMDSTS_OWN
);
1474 assert(rxPacket
== 0);
1475 devIntrPost(ISR_RXOK
);
1477 if (rxDescCache
.cmdsts
& CMDSTS_INTR
)
1478 devIntrPost(ISR_RXDESC
);
1481 DPRINTF(EthernetSM
, "Halting the RX state machine\n");
1485 rxState
= rxAdvance
;
1489 if (rxDescCache
.link
== 0) {
1490 devIntrPost(ISR_RXIDLE
);
1495 rxState
= rxDescRead
;
1496 regs
.rxdp
= rxDescCache
.link
;
1499 rxDmaAddr
= regs
.rxdp
& 0x3fffffff;
1500 rxDmaData
= &rxDescCache
;
1501 rxDmaLen
= sizeof(ns_desc
);
1502 rxDmaFree
= dmaDescFree
;
1510 panic("Invalid rxState!");
1513 DPRINTF(EthernetSM
, "entering next rxState=%s\n",
1514 NsRxStateStrings
[rxState
]);
1520 * @todo do we want to schedule a future kick?
1522 DPRINTF(EthernetSM
, "rx state machine exited rxState=%s\n",
1523 NsRxStateStrings
[rxState
]);
1529 if (txFifo
.empty()) {
1530 DPRINTF(Ethernet
, "nothing to transmit\n");
1534 DPRINTF(Ethernet
, "Attempt Pkt Transmit: txFifo length=%d\n",
1536 if (interface
->sendPacket(txFifo
.front())) {
1538 if (DTRACE(Ethernet
)) {
1539 IpPtr
ip(txFifo
.front());
1541 DPRINTF(Ethernet
, "ID is %d\n", ip
->id());
1544 DPRINTF(Ethernet
, "Src Port=%d, Dest Port=%d\n",
1545 tcp
->sport(), tcp
->dport());
1551 DDUMP(Ethernet
, txFifo
.front()->data
, txFifo
.front()->length
);
1552 txBytes
+= txFifo
.front()->length
;
1555 DPRINTF(Ethernet
, "Successful Xmit! now txFifoAvail is %d\n",
1560 * normally do a writeback of the descriptor here, and ONLY
1561 * after that is done, send this interrupt. but since our
1562 * stuff never actually fails, just do this interrupt here,
1563 * otherwise the code has to stray from this nice format.
1564 * besides, it's functionally the same.
1566 devIntrPost(ISR_TXOK
);
1569 "May need to rethink always sending the descriptors back?\n");
1572 if (!txFifo
.empty() && !txEvent
.scheduled()) {
1573 DPRINTF(Ethernet
, "reschedule transmit\n");
1574 txEvent
.schedule(curTick
+ 1000);
1579 NSGigE::txDmaReadCopy()
1581 assert(txDmaState
== dmaReading
);
1583 physmem
->dma_read((uint8_t *)txDmaData
, txDmaAddr
, txDmaLen
);
1584 txDmaState
= dmaIdle
;
1586 DPRINTF(EthernetDMA
, "tx dma read paddr=%#x len=%d\n",
1587 txDmaAddr
, txDmaLen
);
1588 DDUMP(EthernetDMA
, txDmaData
, txDmaLen
);
1592 NSGigE::doTxDmaRead()
1594 assert(txDmaState
== dmaIdle
|| txDmaState
== dmaReadWaiting
);
1595 txDmaState
= dmaReading
;
1597 if (dmaInterface
&& !txDmaFree
) {
1598 if (dmaInterface
->busy())
1599 txDmaState
= dmaReadWaiting
;
1601 dmaInterface
->doDMA(Read
, txDmaAddr
, txDmaLen
, curTick
,
1602 &txDmaReadEvent
, true);
1606 if (dmaReadDelay
== 0 && dmaReadFactor
== 0.0) {
1611 Tick factor
= ((txDmaLen
+ ULL(63)) >> ULL(6)) * dmaReadFactor
;
1612 Tick start
= curTick
+ dmaReadDelay
+ factor
;
1613 txDmaReadEvent
.schedule(start
);
1618 NSGigE::txDmaReadDone()
1620 assert(txDmaState
== dmaReading
);
1623 // If the receive state machine has a pending DMA, let it go first
1624 if (rxDmaState
== dmaReadWaiting
|| rxDmaState
== dmaWriteWaiting
)
1631 NSGigE::txDmaWriteCopy()
1633 assert(txDmaState
== dmaWriting
);
1635 physmem
->dma_write(txDmaAddr
, (uint8_t *)txDmaData
, txDmaLen
);
1636 txDmaState
= dmaIdle
;
1638 DPRINTF(EthernetDMA
, "tx dma write paddr=%#x len=%d\n",
1639 txDmaAddr
, txDmaLen
);
1640 DDUMP(EthernetDMA
, txDmaData
, txDmaLen
);
1644 NSGigE::doTxDmaWrite()
1646 assert(txDmaState
== dmaIdle
|| txDmaState
== dmaWriteWaiting
);
1647 txDmaState
= dmaWriting
;
1649 if (dmaInterface
&& !txDmaFree
) {
1650 if (dmaInterface
->busy())
1651 txDmaState
= dmaWriteWaiting
;
1653 dmaInterface
->doDMA(WriteInvalidate
, txDmaAddr
, txDmaLen
, curTick
,
1654 &txDmaWriteEvent
, true);
1658 if (dmaWriteDelay
== 0 && dmaWriteFactor
== 0.0) {
1663 Tick factor
= ((txDmaLen
+ ULL(63)) >> ULL(6)) * dmaWriteFactor
;
1664 Tick start
= curTick
+ dmaWriteDelay
+ factor
;
1665 txDmaWriteEvent
.schedule(start
);
1670 NSGigE::txDmaWriteDone()
1672 assert(txDmaState
== dmaWriting
);
1675 // If the receive state machine has a pending DMA, let it go first
1676 if (rxDmaState
== dmaReadWaiting
|| rxDmaState
== dmaWriteWaiting
)
1685 DPRINTF(EthernetSM
, "transmit kick txState=%s\n",
1686 NsTxStateStrings
[txState
]);
1688 if (txKickTick
> curTick
) {
1689 DPRINTF(EthernetSM
, "transmit kick exiting, can't run till %d\n",
1696 switch(txDmaState
) {
1697 case dmaReadWaiting
:
1701 case dmaWriteWaiting
:
1712 DPRINTF(EthernetSM
, "Transmit disabled. Nothing to do.\n");
1717 txState
= txDescRefr
;
1719 txDmaAddr
= regs
.txdp
& 0x3fffffff;
1720 txDmaData
= &txDescCache
+ offsetof(ns_desc
, link
);
1721 txDmaLen
= sizeof(txDescCache
.link
);
1722 txDmaFree
= dmaDescFree
;
1725 descDmaRdBytes
+= txDmaLen
;
1731 txState
= txDescRead
;
1733 txDmaAddr
= regs
.txdp
& 0x3fffffff;
1734 txDmaData
= &txDescCache
;
1735 txDmaLen
= sizeof(ns_desc
);
1736 txDmaFree
= dmaDescFree
;
1739 descDmaRdBytes
+= txDmaLen
;
1747 if (txDmaState
!= dmaIdle
)
1750 txState
= txAdvance
;
1754 if (txDmaState
!= dmaIdle
)
1757 DPRINTF(EthernetDesc
,
1758 "txDescCache: link=%08x bufptr=%08x cmdsts=%08x extsts=%08x\n",
1759 txDescCache
.link
, txDescCache
.bufptr
, txDescCache
.cmdsts
,
1760 txDescCache
.extsts
);
1762 if (txDescCache
.cmdsts
& CMDSTS_OWN
) {
1763 txState
= txFifoBlock
;
1764 txFragPtr
= txDescCache
.bufptr
;
1765 txDescCnt
= txDescCache
.cmdsts
& CMDSTS_LEN_MASK
;
1767 devIntrPost(ISR_TXIDLE
);
1775 DPRINTF(EthernetSM
, "****starting the tx of a new packet****\n");
1776 txPacket
= new PacketData
;
1777 txPacket
->data
= new uint8_t[16384];
1778 txPacketBufPtr
= txPacket
->data
;
1781 if (txDescCnt
== 0) {
1782 DPRINTF(EthernetSM
, "the txDescCnt == 0, done with descriptor\n");
1783 if (txDescCache
.cmdsts
& CMDSTS_MORE
) {
1784 DPRINTF(EthernetSM
, "there are more descriptors to come\n");
1785 txState
= txDescWrite
;
1787 txDescCache
.cmdsts
&= ~CMDSTS_OWN
;
1789 txDmaAddr
= regs
.txdp
+ offsetof(ns_desc
, cmdsts
);
1790 txDmaAddr
&= 0x3fffffff;
1791 txDmaData
= &(txDescCache
.cmdsts
);
1792 txDmaLen
= sizeof(txDescCache
.cmdsts
);
1793 txDmaFree
= dmaDescFree
;
1798 } else { /* this packet is totally done */
1799 DPRINTF(EthernetSM
, "This packet is done, let's wrap it up\n");
1800 /* deal with the the packet that just finished */
1801 if ((regs
.vtcr
& VTCR_PPCHK
) && extstsEnable
) {
1803 if (txDescCache
.extsts
& EXTSTS_UDPPKT
) {
1806 udp
->sum(cksum(udp
));
1808 } else if (txDescCache
.extsts
& EXTSTS_TCPPKT
) {
1811 tcp
->sum(cksum(tcp
));
1814 if (txDescCache
.extsts
& EXTSTS_IPPKT
) {
1821 txPacket
->length
= txPacketBufPtr
- txPacket
->data
;
1822 // this is just because the receive can't handle a
1823 // packet bigger want to make sure
1824 assert(txPacket
->length
<= 1514);
1825 txFifo
.push(txPacket
);
1828 * this following section is not tqo spec, but
1829 * functionally shouldn't be any different. normally,
1830 * the chip will wait til the transmit has occurred
1831 * before writing back the descriptor because it has
1832 * to wait to see that it was successfully transmitted
1833 * to decide whether to set CMDSTS_OK or not.
1834 * however, in the simulator since it is always
1835 * successfully transmitted, and writing it exactly to
1836 * spec would complicate the code, we just do it here
1839 txDescCache
.cmdsts
&= ~CMDSTS_OWN
;
1840 txDescCache
.cmdsts
|= CMDSTS_OK
;
1842 DPRINTF(EthernetDesc
,
1843 "txDesc writeback: cmdsts=%08x extsts=%08x\n",
1844 txDescCache
.cmdsts
, txDescCache
.extsts
);
1846 txDmaAddr
= regs
.txdp
+ offsetof(ns_desc
, cmdsts
);
1847 txDmaAddr
&= 0x3fffffff;
1848 txDmaData
= &(txDescCache
.cmdsts
);
1849 txDmaLen
= sizeof(txDescCache
.cmdsts
) +
1850 sizeof(txDescCache
.extsts
);
1851 txDmaFree
= dmaDescFree
;
1854 descDmaWrBytes
+= txDmaLen
;
1860 DPRINTF(EthernetSM
, "halting TX state machine\n");
1864 txState
= txAdvance
;
1870 DPRINTF(EthernetSM
, "this descriptor isn't done yet\n");
1871 if (!txFifo
.full()) {
1872 txState
= txFragRead
;
1875 * The number of bytes transferred is either whatever
1876 * is left in the descriptor (txDescCnt), or if there
1877 * is not enough room in the fifo, just whatever room
1878 * is left in the fifo
1880 txXferLen
= min
<uint32_t>(txDescCnt
, txFifo
.avail());
1882 txDmaAddr
= txFragPtr
& 0x3fffffff;
1883 txDmaData
= txPacketBufPtr
;
1884 txDmaLen
= txXferLen
;
1885 txDmaFree
= dmaDataFree
;
1890 txState
= txFifoBlock
;
1900 if (txDmaState
!= dmaIdle
)
1903 txPacketBufPtr
+= txXferLen
;
1904 txFragPtr
+= txXferLen
;
1905 txDescCnt
-= txXferLen
;
1907 txState
= txFifoBlock
;
1911 if (txDmaState
!= dmaIdle
)
1914 if (txDescCache
.cmdsts
& CMDSTS_INTR
)
1915 devIntrPost(ISR_TXDESC
);
1917 txState
= txAdvance
;
1921 if (txDescCache
.link
== 0) {
1922 devIntrPost(ISR_TXIDLE
);
1926 txState
= txDescRead
;
1927 regs
.txdp
= txDescCache
.link
;
1930 txDmaAddr
= txDescCache
.link
& 0x3fffffff;
1931 txDmaData
= &txDescCache
;
1932 txDmaLen
= sizeof(ns_desc
);
1933 txDmaFree
= dmaDescFree
;
1941 panic("invalid state");
1944 DPRINTF(EthernetSM
, "entering next txState=%s\n",
1945 NsTxStateStrings
[txState
]);
1951 * @todo do we want to schedule a future kick?
1953 DPRINTF(EthernetSM
, "tx state machine exited txState=%s\n",
1954 NsTxStateStrings
[txState
]);
1958 NSGigE::transferDone()
1960 if (txFifo
.empty()) {
1961 DPRINTF(Ethernet
, "transfer complete: txFifo empty...nothing to do\n");
1965 DPRINTF(Ethernet
, "transfer complete: data in txFifo...schedule xmit\n");
1967 if (txEvent
.scheduled())
1968 txEvent
.reschedule(curTick
+ 1);
1970 txEvent
.schedule(curTick
+ 1);
1974 NSGigE::rxFilter(const PacketPtr
&packet
)
1976 EthPtr eth
= packet
;
1980 const EthAddr
&dst
= eth
->dst();
1981 if (dst
.unicast()) {
1982 // If we're accepting all unicast addresses
1986 // If we make a perfect match
1987 if (acceptPerfect
&& dst
== rom
.perfectMatch
)
1990 if (acceptArp
&& eth
->type() == ETH_TYPE_ARP
)
1993 } else if (dst
.broadcast()) {
1994 // if we're accepting broadcasts
1995 if (acceptBroadcast
)
1998 } else if (dst
.multicast()) {
1999 // if we're accepting all multicasts
2000 if (acceptMulticast
)
2006 DPRINTF(Ethernet
, "rxFilter drop\n");
2007 DDUMP(EthernetData
, packet
->data
, packet
->length
);
2014 NSGigE::recvPacket(PacketPtr packet
)
2016 rxBytes
+= packet
->length
;
2019 DPRINTF(Ethernet
, "Receiving packet from wire, rxFifoAvail=%d\n",
2023 DPRINTF(Ethernet
, "receive disabled...packet dropped\n");
2025 interface
->recvDone();
2029 if (rxFilterEnable
&& rxFilter(packet
)) {
2030 DPRINTF(Ethernet
, "packet filtered...dropped\n");
2031 interface
->recvDone();
2035 if (rxFifo
.avail() < packet
->length
) {
2037 "packet will not fit in receive buffer...packet dropped\n");
2038 devIntrPost(ISR_RXORN
);
2042 rxFifo
.push(packet
);
2043 interface
->recvDone();
2049 //=====================================================================
2053 NSGigE::serialize(ostream
&os
)
2055 // Serialize the PciDev base class
2056 PciDev::serialize(os
);
2059 * Finalize any DMA events now.
2061 if (rxDmaReadEvent
.scheduled())
2063 if (rxDmaWriteEvent
.scheduled())
2065 if (txDmaReadEvent
.scheduled())
2067 if (txDmaWriteEvent
.scheduled())
2071 * Serialize the device registers
2073 SERIALIZE_SCALAR(regs
.command
);
2074 SERIALIZE_SCALAR(regs
.config
);
2075 SERIALIZE_SCALAR(regs
.mear
);
2076 SERIALIZE_SCALAR(regs
.ptscr
);
2077 SERIALIZE_SCALAR(regs
.isr
);
2078 SERIALIZE_SCALAR(regs
.imr
);
2079 SERIALIZE_SCALAR(regs
.ier
);
2080 SERIALIZE_SCALAR(regs
.ihr
);
2081 SERIALIZE_SCALAR(regs
.txdp
);
2082 SERIALIZE_SCALAR(regs
.txdp_hi
);
2083 SERIALIZE_SCALAR(regs
.txcfg
);
2084 SERIALIZE_SCALAR(regs
.gpior
);
2085 SERIALIZE_SCALAR(regs
.rxdp
);
2086 SERIALIZE_SCALAR(regs
.rxdp_hi
);
2087 SERIALIZE_SCALAR(regs
.rxcfg
);
2088 SERIALIZE_SCALAR(regs
.pqcr
);
2089 SERIALIZE_SCALAR(regs
.wcsr
);
2090 SERIALIZE_SCALAR(regs
.pcr
);
2091 SERIALIZE_SCALAR(regs
.rfcr
);
2092 SERIALIZE_SCALAR(regs
.rfdr
);
2093 SERIALIZE_SCALAR(regs
.srr
);
2094 SERIALIZE_SCALAR(regs
.mibc
);
2095 SERIALIZE_SCALAR(regs
.vrcr
);
2096 SERIALIZE_SCALAR(regs
.vtcr
);
2097 SERIALIZE_SCALAR(regs
.vdr
);
2098 SERIALIZE_SCALAR(regs
.ccsr
);
2099 SERIALIZE_SCALAR(regs
.tbicr
);
2100 SERIALIZE_SCALAR(regs
.tbisr
);
2101 SERIALIZE_SCALAR(regs
.tanar
);
2102 SERIALIZE_SCALAR(regs
.tanlpar
);
2103 SERIALIZE_SCALAR(regs
.taner
);
2104 SERIALIZE_SCALAR(regs
.tesr
);
2106 SERIALIZE_ARRAY(rom
.perfectMatch
, ETH_ADDR_LEN
);
2108 SERIALIZE_SCALAR(ioEnable
);
2111 * Serialize the data Fifos
2113 rxFifo
.serialize("rxFifo", os
);
2114 txFifo
.serialize("txFifo", os
);
2117 * Serialize the various helper variables
2119 bool txPacketExists
= txPacket
;
2120 SERIALIZE_SCALAR(txPacketExists
);
2121 if (txPacketExists
) {
2122 txPacket
->serialize("txPacket", os
);
2123 uint32_t txPktBufPtr
= (uint32_t) (txPacketBufPtr
- txPacket
->data
);
2124 SERIALIZE_SCALAR(txPktBufPtr
);
2127 bool rxPacketExists
= rxPacket
;
2128 SERIALIZE_SCALAR(rxPacketExists
);
2129 if (rxPacketExists
) {
2130 rxPacket
->serialize("rxPacket", os
);
2131 uint32_t rxPktBufPtr
= (uint32_t) (rxPacketBufPtr
- rxPacket
->data
);
2132 SERIALIZE_SCALAR(rxPktBufPtr
);
2135 SERIALIZE_SCALAR(txXferLen
);
2136 SERIALIZE_SCALAR(rxXferLen
);
2139 * Serialize DescCaches
2141 SERIALIZE_SCALAR(txDescCache
.link
);
2142 SERIALIZE_SCALAR(txDescCache
.bufptr
);
2143 SERIALIZE_SCALAR(txDescCache
.cmdsts
);
2144 SERIALIZE_SCALAR(txDescCache
.extsts
);
2145 SERIALIZE_SCALAR(rxDescCache
.link
);
2146 SERIALIZE_SCALAR(rxDescCache
.bufptr
);
2147 SERIALIZE_SCALAR(rxDescCache
.cmdsts
);
2148 SERIALIZE_SCALAR(rxDescCache
.extsts
);
2151 * Serialize tx state machine
2153 int txState
= this->txState
;
2154 SERIALIZE_SCALAR(txState
);
2155 SERIALIZE_SCALAR(txEnable
);
2156 SERIALIZE_SCALAR(CTDD
);
2157 SERIALIZE_SCALAR(txFragPtr
);
2158 SERIALIZE_SCALAR(txDescCnt
);
2159 int txDmaState
= this->txDmaState
;
2160 SERIALIZE_SCALAR(txDmaState
);
2163 * Serialize rx state machine
2165 int rxState
= this->rxState
;
2166 SERIALIZE_SCALAR(rxState
);
2167 SERIALIZE_SCALAR(rxEnable
);
2168 SERIALIZE_SCALAR(CRDD
);
2169 SERIALIZE_SCALAR(rxPktBytes
);
2170 SERIALIZE_SCALAR(rxDescCnt
);
2171 int rxDmaState
= this->rxDmaState
;
2172 SERIALIZE_SCALAR(rxDmaState
);
2174 SERIALIZE_SCALAR(extstsEnable
);
2177 * If there's a pending transmit, store the time so we can
2178 * reschedule it later
2180 Tick transmitTick
= txEvent
.scheduled() ? txEvent
.when() - curTick
: 0;
2181 SERIALIZE_SCALAR(transmitTick
);
2184 * receive address filter settings
2186 SERIALIZE_SCALAR(rxFilterEnable
);
2187 SERIALIZE_SCALAR(acceptBroadcast
);
2188 SERIALIZE_SCALAR(acceptMulticast
);
2189 SERIALIZE_SCALAR(acceptUnicast
);
2190 SERIALIZE_SCALAR(acceptPerfect
);
2191 SERIALIZE_SCALAR(acceptArp
);
2194 * Keep track of pending interrupt status.
2196 SERIALIZE_SCALAR(intrTick
);
2197 SERIALIZE_SCALAR(cpuPendingIntr
);
2198 Tick intrEventTick
= 0;
2200 intrEventTick
= intrEvent
->when();
2201 SERIALIZE_SCALAR(intrEventTick
);
2206 NSGigE::unserialize(Checkpoint
*cp
, const std::string
§ion
)
2208 // Unserialize the PciDev base class
2209 PciDev::unserialize(cp
, section
);
2211 UNSERIALIZE_SCALAR(regs
.command
);
2212 UNSERIALIZE_SCALAR(regs
.config
);
2213 UNSERIALIZE_SCALAR(regs
.mear
);
2214 UNSERIALIZE_SCALAR(regs
.ptscr
);
2215 UNSERIALIZE_SCALAR(regs
.isr
);
2216 UNSERIALIZE_SCALAR(regs
.imr
);
2217 UNSERIALIZE_SCALAR(regs
.ier
);
2218 UNSERIALIZE_SCALAR(regs
.ihr
);
2219 UNSERIALIZE_SCALAR(regs
.txdp
);
2220 UNSERIALIZE_SCALAR(regs
.txdp_hi
);
2221 UNSERIALIZE_SCALAR(regs
.txcfg
);
2222 UNSERIALIZE_SCALAR(regs
.gpior
);
2223 UNSERIALIZE_SCALAR(regs
.rxdp
);
2224 UNSERIALIZE_SCALAR(regs
.rxdp_hi
);
2225 UNSERIALIZE_SCALAR(regs
.rxcfg
);
2226 UNSERIALIZE_SCALAR(regs
.pqcr
);
2227 UNSERIALIZE_SCALAR(regs
.wcsr
);
2228 UNSERIALIZE_SCALAR(regs
.pcr
);
2229 UNSERIALIZE_SCALAR(regs
.rfcr
);
2230 UNSERIALIZE_SCALAR(regs
.rfdr
);
2231 UNSERIALIZE_SCALAR(regs
.srr
);
2232 UNSERIALIZE_SCALAR(regs
.mibc
);
2233 UNSERIALIZE_SCALAR(regs
.vrcr
);
2234 UNSERIALIZE_SCALAR(regs
.vtcr
);
2235 UNSERIALIZE_SCALAR(regs
.vdr
);
2236 UNSERIALIZE_SCALAR(regs
.ccsr
);
2237 UNSERIALIZE_SCALAR(regs
.tbicr
);
2238 UNSERIALIZE_SCALAR(regs
.tbisr
);
2239 UNSERIALIZE_SCALAR(regs
.tanar
);
2240 UNSERIALIZE_SCALAR(regs
.tanlpar
);
2241 UNSERIALIZE_SCALAR(regs
.taner
);
2242 UNSERIALIZE_SCALAR(regs
.tesr
);
2244 UNSERIALIZE_ARRAY(rom
.perfectMatch
, ETH_ADDR_LEN
);
2246 UNSERIALIZE_SCALAR(ioEnable
);
2249 * unserialize the data fifos
2251 rxFifo
.unserialize("rxFifo", cp
, section
);
2252 txFifo
.unserialize("txFifo", cp
, section
);
2255 * unserialize the various helper variables
2257 bool txPacketExists
;
2258 UNSERIALIZE_SCALAR(txPacketExists
);
2259 if (txPacketExists
) {
2260 txPacket
= new PacketData
;
2261 txPacket
->unserialize("txPacket", cp
, section
);
2262 uint32_t txPktBufPtr
;
2263 UNSERIALIZE_SCALAR(txPktBufPtr
);
2264 txPacketBufPtr
= (uint8_t *) txPacket
->data
+ txPktBufPtr
;
2268 bool rxPacketExists
;
2269 UNSERIALIZE_SCALAR(rxPacketExists
);
2271 if (rxPacketExists
) {
2272 rxPacket
= new PacketData
;
2273 rxPacket
->unserialize("rxPacket", cp
, section
);
2274 uint32_t rxPktBufPtr
;
2275 UNSERIALIZE_SCALAR(rxPktBufPtr
);
2276 rxPacketBufPtr
= (uint8_t *) rxPacket
->data
+ rxPktBufPtr
;
2280 UNSERIALIZE_SCALAR(txXferLen
);
2281 UNSERIALIZE_SCALAR(rxXferLen
);
2284 * Unserialize DescCaches
2286 UNSERIALIZE_SCALAR(txDescCache
.link
);
2287 UNSERIALIZE_SCALAR(txDescCache
.bufptr
);
2288 UNSERIALIZE_SCALAR(txDescCache
.cmdsts
);
2289 UNSERIALIZE_SCALAR(txDescCache
.extsts
);
2290 UNSERIALIZE_SCALAR(rxDescCache
.link
);
2291 UNSERIALIZE_SCALAR(rxDescCache
.bufptr
);
2292 UNSERIALIZE_SCALAR(rxDescCache
.cmdsts
);
2293 UNSERIALIZE_SCALAR(rxDescCache
.extsts
);
2296 * unserialize tx state machine
2299 UNSERIALIZE_SCALAR(txState
);
2300 this->txState
= (TxState
) txState
;
2301 UNSERIALIZE_SCALAR(txEnable
);
2302 UNSERIALIZE_SCALAR(CTDD
);
2303 UNSERIALIZE_SCALAR(txFragPtr
);
2304 UNSERIALIZE_SCALAR(txDescCnt
);
2306 UNSERIALIZE_SCALAR(txDmaState
);
2307 this->txDmaState
= (DmaState
) txDmaState
;
2310 * unserialize rx state machine
2313 UNSERIALIZE_SCALAR(rxState
);
2314 this->rxState
= (RxState
) rxState
;
2315 UNSERIALIZE_SCALAR(rxEnable
);
2316 UNSERIALIZE_SCALAR(CRDD
);
2317 UNSERIALIZE_SCALAR(rxPktBytes
);
2318 UNSERIALIZE_SCALAR(rxDescCnt
);
2320 UNSERIALIZE_SCALAR(rxDmaState
);
2321 this->rxDmaState
= (DmaState
) rxDmaState
;
2323 UNSERIALIZE_SCALAR(extstsEnable
);
2326 * If there's a pending transmit, reschedule it now
2329 UNSERIALIZE_SCALAR(transmitTick
);
2331 txEvent
.schedule(curTick
+ transmitTick
);
2334 * unserialize receive address filter settings
2336 UNSERIALIZE_SCALAR(rxFilterEnable
);
2337 UNSERIALIZE_SCALAR(acceptBroadcast
);
2338 UNSERIALIZE_SCALAR(acceptMulticast
);
2339 UNSERIALIZE_SCALAR(acceptUnicast
);
2340 UNSERIALIZE_SCALAR(acceptPerfect
);
2341 UNSERIALIZE_SCALAR(acceptArp
);
2344 * Keep track of pending interrupt status.
2346 UNSERIALIZE_SCALAR(intrTick
);
2347 UNSERIALIZE_SCALAR(cpuPendingIntr
);
2349 UNSERIALIZE_SCALAR(intrEventTick
);
2350 if (intrEventTick
) {
2351 intrEvent
= new IntrEvent(this, true);
2352 intrEvent
->schedule(intrEventTick
);
2356 * re-add addrRanges to bus bridges
2359 pioInterface
->addAddrRange(RangeSize(BARAddrs
[0], BARSize
[0]));
2360 pioInterface
->addAddrRange(RangeSize(BARAddrs
[1], BARSize
[1]));
2365 NSGigE::cacheAccess(MemReqPtr
&req
)
2367 DPRINTF(EthernetPIO
, "timing access to paddr=%#x (daddr=%#x)\n",
2368 req
->paddr
, req
->paddr
- addr
);
2369 return curTick
+ pioLatency
;
2372 BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt
)
2374 SimObjectParam
<EtherInt
*> peer
;
2375 SimObjectParam
<NSGigE
*> device
;
2377 END_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt
)
2379 BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigEInt
)
2381 INIT_PARAM_DFLT(peer
, "peer interface", NULL
),
2382 INIT_PARAM(device
, "Ethernet device of this interface")
2384 END_INIT_SIM_OBJECT_PARAMS(NSGigEInt
)
2386 CREATE_SIM_OBJECT(NSGigEInt
)
2388 NSGigEInt
*dev_int
= new NSGigEInt(getInstanceName(), device
);
2390 EtherInt
*p
= (EtherInt
*)peer
;
2392 dev_int
->setPeer(p
);
2393 p
->setPeer(dev_int
);
2399 REGISTER_SIM_OBJECT("NSGigEInt", NSGigEInt
)
2402 BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigE
)
2404 Param
<Tick
> tx_delay
;
2405 Param
<Tick
> rx_delay
;
2406 Param
<Tick
> intr_delay
;
2407 SimObjectParam
<MemoryController
*> mmu
;
2408 SimObjectParam
<PhysicalMemory
*> physmem
;
2409 Param
<bool> rx_filter
;
2410 Param
<string
> hardware_address
;
2411 SimObjectParam
<Bus
*> header_bus
;
2412 SimObjectParam
<Bus
*> payload_bus
;
2413 SimObjectParam
<HierParams
*> hier
;
2414 Param
<Tick
> pio_latency
;
2415 Param
<bool> dma_desc_free
;
2416 Param
<bool> dma_data_free
;
2417 Param
<Tick
> dma_read_delay
;
2418 Param
<Tick
> dma_write_delay
;
2419 Param
<Tick
> dma_read_factor
;
2420 Param
<Tick
> dma_write_factor
;
2421 SimObjectParam
<PciConfigAll
*> configspace
;
2422 SimObjectParam
<PciConfigData
*> configdata
;
2423 SimObjectParam
<Platform
*> platform
;
2424 Param
<uint32_t> pci_bus
;
2425 Param
<uint32_t> pci_dev
;
2426 Param
<uint32_t> pci_func
;
2427 Param
<uint32_t> tx_fifo_size
;
2428 Param
<uint32_t> rx_fifo_size
;
2430 END_DECLARE_SIM_OBJECT_PARAMS(NSGigE
)
2432 BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigE
)
2434 INIT_PARAM_DFLT(tx_delay
, "Transmit Delay", 1000),
2435 INIT_PARAM_DFLT(rx_delay
, "Receive Delay", 1000),
2436 INIT_PARAM_DFLT(intr_delay
, "Interrupt Delay in microseconds", 0),
2437 INIT_PARAM(mmu
, "Memory Controller"),
2438 INIT_PARAM(physmem
, "Physical Memory"),
2439 INIT_PARAM_DFLT(rx_filter
, "Enable Receive Filter", true),
2440 INIT_PARAM_DFLT(hardware_address
, "Ethernet Hardware Address",
2441 "00:99:00:00:00:01"),
2442 INIT_PARAM_DFLT(header_bus
, "The IO Bus to attach to for headers", NULL
),
2443 INIT_PARAM_DFLT(payload_bus
, "The IO Bus to attach to for payload", NULL
),
2444 INIT_PARAM_DFLT(hier
, "Hierarchy global variables", &defaultHierParams
),
2445 INIT_PARAM_DFLT(pio_latency
, "Programmed IO latency in bus cycles", 1),
2446 INIT_PARAM_DFLT(dma_desc_free
, "DMA of Descriptors is free", false),
2447 INIT_PARAM_DFLT(dma_data_free
, "DMA of Data is free", false),
2448 INIT_PARAM_DFLT(dma_read_delay
, "fixed delay for dma reads", 0),
2449 INIT_PARAM_DFLT(dma_write_delay
, "fixed delay for dma writes", 0),
2450 INIT_PARAM_DFLT(dma_read_factor
, "multiplier for dma reads", 0),
2451 INIT_PARAM_DFLT(dma_write_factor
, "multiplier for dma writes", 0),
2452 INIT_PARAM(configspace
, "PCI Configspace"),
2453 INIT_PARAM(configdata
, "PCI Config data"),
2454 INIT_PARAM(platform
, "Platform"),
2455 INIT_PARAM(pci_bus
, "PCI bus"),
2456 INIT_PARAM(pci_dev
, "PCI device number"),
2457 INIT_PARAM(pci_func
, "PCI function code"),
2458 INIT_PARAM_DFLT(tx_fifo_size
, "max size in bytes of txFifo", 131072),
2459 INIT_PARAM_DFLT(rx_fifo_size
, "max size in bytes of rxFifo", 131072)
2461 END_INIT_SIM_OBJECT_PARAMS(NSGigE
)
2464 CREATE_SIM_OBJECT(NSGigE
)
2466 NSGigE::Params
*params
= new NSGigE::Params
;
2468 params
->name
= getInstanceName();
2470 params
->configSpace
= configspace
;
2471 params
->configData
= configdata
;
2472 params
->plat
= platform
;
2473 params
->busNum
= pci_bus
;
2474 params
->deviceNum
= pci_dev
;
2475 params
->functionNum
= pci_func
;
2477 params
->intr_delay
= intr_delay
;
2478 params
->pmem
= physmem
;
2479 params
->tx_delay
= tx_delay
;
2480 params
->rx_delay
= rx_delay
;
2481 params
->hier
= hier
;
2482 params
->header_bus
= header_bus
;
2483 params
->payload_bus
= payload_bus
;
2484 params
->pio_latency
= pio_latency
;
2485 params
->dma_desc_free
= dma_desc_free
;
2486 params
->dma_data_free
= dma_data_free
;
2487 params
->dma_read_delay
= dma_read_delay
;
2488 params
->dma_write_delay
= dma_write_delay
;
2489 params
->dma_read_factor
= dma_read_factor
;
2490 params
->dma_write_factor
= dma_write_factor
;
2491 params
->rx_filter
= rx_filter
;
2492 params
->eaddr
= hardware_address
;
2493 params
->tx_fifo_size
= tx_fifo_size
;
2494 params
->rx_fifo_size
= rx_fifo_size
;
2495 return new NSGigE(params
);
2498 REGISTER_SIM_OBJECT("NSGigE", NSGigE
)