2 * Copyright (c) 2004-2005 The Regents of The University of Michigan
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 * Authors: Nathan Binkert
33 * Device module for modelling the National Semiconductor
34 * DP83820 ethernet controller. Does not support priority queueing
39 #include "base/inet.hh"
40 #include "cpu/thread_context.hh"
41 #include "dev/etherlink.hh"
42 #include "dev/ns_gige.hh"
43 #include "dev/pciconfigall.hh"
44 #include "mem/packet.hh"
45 #include "mem/packet_access.hh"
46 #include "params/NSGigE.hh"
47 #include "params/NSGigEInt.hh"
48 #include "sim/debug.hh"
49 #include "sim/host.hh"
50 #include "sim/stats.hh"
51 #include "sim/system.hh"
53 const char *NsRxStateStrings
[] =
64 const char *NsTxStateStrings
[] =
75 const char *NsDmaState
[] =
86 using namespace TheISA
;
88 ///////////////////////////////////////////////////////////////////////
92 NSGigE::NSGigE(Params
*p
)
93 : PciDev(p
), ioEnable(false),
94 txFifo(p
->tx_fifo_size
), rxFifo(p
->rx_fifo_size
),
95 txPacket(0), rxPacket(0), txPacketBufPtr(NULL
), rxPacketBufPtr(NULL
),
96 txXferLen(0), rxXferLen(0), rxDmaFree(false), txDmaFree(false),
98 txState(txIdle
), txEnable(false), CTDD(false), txHalt(false),
99 txFragPtr(0), txDescCnt(0), txDmaState(dmaIdle
), rxState(rxIdle
),
100 rxEnable(false), CRDD(false), rxPktBytes(0), rxHalt(false),
101 rxFragPtr(0), rxDescCnt(0), rxDmaState(dmaIdle
), extstsEnable(false),
102 eepromState(eepromStart
), eepromClk(false), eepromBitsToRx(0),
103 eepromOpcode(0), eepromAddress(0), eepromData(0),
104 dmaReadDelay(p
->dma_read_delay
), dmaWriteDelay(p
->dma_write_delay
),
105 dmaReadFactor(p
->dma_read_factor
), dmaWriteFactor(p
->dma_write_factor
),
106 rxDmaData(NULL
), rxDmaAddr(0), rxDmaLen(0),
107 txDmaData(NULL
), txDmaAddr(0), txDmaLen(0),
108 rxDmaReadEvent(this), rxDmaWriteEvent(this),
109 txDmaReadEvent(this), txDmaWriteEvent(this),
110 dmaDescFree(p
->dma_desc_free
), dmaDataFree(p
->dma_data_free
),
111 txDelay(p
->tx_delay
), rxDelay(p
->rx_delay
),
112 rxKickTick(0), rxKickEvent(this), txKickTick(0), txKickEvent(this),
113 txEvent(this), rxFilterEnable(p
->rx_filter
),
114 acceptBroadcast(false), acceptMulticast(false), acceptUnicast(false),
115 acceptPerfect(false), acceptArp(false), multicastHashEnable(false),
116 intrDelay(p
->intr_delay
), intrTick(0), cpuPendingIntr(false),
117 intrEvent(0), interface(0)
122 memcpy(&rom
.perfectMatch
, p
->hardware_address
.bytes(), ETH_ADDR_LEN
);
124 memset(&rxDesc32
, 0, sizeof(rxDesc32
));
125 memset(&txDesc32
, 0, sizeof(txDesc32
));
126 memset(&rxDesc64
, 0, sizeof(rxDesc64
));
127 memset(&txDesc64
, 0, sizeof(txDesc64
));
137 .name(name() + ".txBytes")
138 .desc("Bytes Transmitted")
143 .name(name() + ".rxBytes")
144 .desc("Bytes Received")
149 .name(name() + ".txPackets")
150 .desc("Number of Packets Transmitted")
155 .name(name() + ".rxPackets")
156 .desc("Number of Packets Received")
161 .name(name() + ".txIpChecksums")
162 .desc("Number of tx IP Checksums done by device")
168 .name(name() + ".rxIpChecksums")
169 .desc("Number of rx IP Checksums done by device")
175 .name(name() + ".txTcpChecksums")
176 .desc("Number of tx TCP Checksums done by device")
182 .name(name() + ".rxTcpChecksums")
183 .desc("Number of rx TCP Checksums done by device")
189 .name(name() + ".txUdpChecksums")
190 .desc("Number of tx UDP Checksums done by device")
196 .name(name() + ".rxUdpChecksums")
197 .desc("Number of rx UDP Checksums done by device")
203 .name(name() + ".descDMAReads")
204 .desc("Number of descriptors the device read w/ DMA")
209 .name(name() + ".descDMAWrites")
210 .desc("Number of descriptors the device wrote w/ DMA")
215 .name(name() + ".descDmaReadBytes")
216 .desc("number of descriptor bytes read w/ DMA")
221 .name(name() + ".descDmaWriteBytes")
222 .desc("number of descriptor bytes write w/ DMA")
227 .name(name() + ".txBandwidth")
228 .desc("Transmit Bandwidth (bits/s)")
234 .name(name() + ".rxBandwidth")
235 .desc("Receive Bandwidth (bits/s)")
241 .name(name() + ".totBandwidth")
242 .desc("Total Bandwidth (bits/s)")
248 .name(name() + ".totPackets")
249 .desc("Total Packets")
255 .name(name() + ".totBytes")
262 .name(name() + ".totPPS")
263 .desc("Total Tranmission Rate (packets/s)")
269 .name(name() + ".txPPS")
270 .desc("Packet Tranmission Rate (packets/s)")
276 .name(name() + ".rxPPS")
277 .desc("Packet Reception Rate (packets/s)")
283 .name(name() + ".postedSwi")
284 .desc("number of software interrupts posted to CPU")
289 .name(name() + ".totalSwi")
290 .desc("total number of Swi written to ISR")
295 .name(name() + ".coalescedSwi")
296 .desc("average number of Swi's coalesced into each post")
301 .name(name() + ".postedRxIdle")
302 .desc("number of rxIdle interrupts posted to CPU")
307 .name(name() + ".totalRxIdle")
308 .desc("total number of RxIdle written to ISR")
313 .name(name() + ".coalescedRxIdle")
314 .desc("average number of RxIdle's coalesced into each post")
319 .name(name() + ".postedRxOk")
320 .desc("number of RxOk interrupts posted to CPU")
325 .name(name() + ".totalRxOk")
326 .desc("total number of RxOk written to ISR")
331 .name(name() + ".coalescedRxOk")
332 .desc("average number of RxOk's coalesced into each post")
337 .name(name() + ".postedRxDesc")
338 .desc("number of RxDesc interrupts posted to CPU")
343 .name(name() + ".totalRxDesc")
344 .desc("total number of RxDesc written to ISR")
349 .name(name() + ".coalescedRxDesc")
350 .desc("average number of RxDesc's coalesced into each post")
355 .name(name() + ".postedTxOk")
356 .desc("number of TxOk interrupts posted to CPU")
361 .name(name() + ".totalTxOk")
362 .desc("total number of TxOk written to ISR")
367 .name(name() + ".coalescedTxOk")
368 .desc("average number of TxOk's coalesced into each post")
373 .name(name() + ".postedTxIdle")
374 .desc("number of TxIdle interrupts posted to CPU")
379 .name(name() + ".totalTxIdle")
380 .desc("total number of TxIdle written to ISR")
385 .name(name() + ".coalescedTxIdle")
386 .desc("average number of TxIdle's coalesced into each post")
391 .name(name() + ".postedTxDesc")
392 .desc("number of TxDesc interrupts posted to CPU")
397 .name(name() + ".totalTxDesc")
398 .desc("total number of TxDesc written to ISR")
403 .name(name() + ".coalescedTxDesc")
404 .desc("average number of TxDesc's coalesced into each post")
409 .name(name() + ".postedRxOrn")
410 .desc("number of RxOrn posted to CPU")
415 .name(name() + ".totalRxOrn")
416 .desc("total number of RxOrn written to ISR")
421 .name(name() + ".coalescedRxOrn")
422 .desc("average number of RxOrn's coalesced into each post")
427 .name(name() + ".coalescedTotal")
428 .desc("average number of interrupts coalesced into each post")
433 .name(name() + ".postedInterrupts")
434 .desc("number of posts to CPU")
439 .name(name() + ".droppedPackets")
440 .desc("number of packets dropped")
444 coalescedSwi
= totalSwi
/ postedInterrupts
;
445 coalescedRxIdle
= totalRxIdle
/ postedInterrupts
;
446 coalescedRxOk
= totalRxOk
/ postedInterrupts
;
447 coalescedRxDesc
= totalRxDesc
/ postedInterrupts
;
448 coalescedTxOk
= totalTxOk
/ postedInterrupts
;
449 coalescedTxIdle
= totalTxIdle
/ postedInterrupts
;
450 coalescedTxDesc
= totalTxDesc
/ postedInterrupts
;
451 coalescedRxOrn
= totalRxOrn
/ postedInterrupts
;
453 coalescedTotal
= (totalSwi
+ totalRxIdle
+ totalRxOk
+ totalRxDesc
+
454 totalTxOk
+ totalTxIdle
+ totalTxDesc
+
455 totalRxOrn
) / postedInterrupts
;
457 txBandwidth
= txBytes
* Stats::constant(8) / simSeconds
;
458 rxBandwidth
= rxBytes
* Stats::constant(8) / simSeconds
;
459 totBandwidth
= txBandwidth
+ rxBandwidth
;
460 totBytes
= txBytes
+ rxBytes
;
461 totPackets
= txPackets
+ rxPackets
;
463 txPacketRate
= txPackets
/ simSeconds
;
464 rxPacketRate
= rxPackets
/ simSeconds
;
469 * This is to write to the PCI general configuration registers
472 NSGigE::writeConfig(PacketPtr pkt
)
474 int offset
= pkt
->getAddr() & PCI_CONFIG_SIZE
;
475 if (offset
< PCI_DEVICE_SPECIFIC
)
476 PciDev::writeConfig(pkt
);
478 panic("Device specific PCI config space not implemented!\n");
481 // seems to work fine without all these PCI settings, but i
482 // put in the IO to double check, an assertion will fail if we
483 // need to properly implement it
485 if (config
.data
[offset
] & PCI_CMD_IOSE
)
496 * This reads the device registers, which are detailed in the NS83820
500 NSGigE::read(PacketPtr pkt
)
506 //The mask is to give you only the offset into the device register file
507 Addr daddr
= pkt
->getAddr() & 0xfff;
508 DPRINTF(EthernetPIO
, "read da=%#x pa=%#x size=%d\n",
509 daddr
, pkt
->getAddr(), pkt
->getSize());
512 // there are some reserved registers, you can see ns_gige_reg.h and
513 // the spec sheet for details
514 if (daddr
> LAST
&& daddr
<= RESERVED
) {
515 panic("Accessing reserved register");
516 } else if (daddr
> RESERVED
&& daddr
<= 0x3FC) {
517 return readConfig(pkt
);
518 } else if (daddr
>= MIB_START
&& daddr
<= MIB_END
) {
519 // don't implement all the MIB's. hopefully the kernel
520 // doesn't actually DEPEND upon their values
521 // MIB are just hardware stats keepers
522 pkt
->set
<uint32_t>(0);
523 pkt
->makeAtomicResponse();
525 } else if (daddr
> 0x3FC)
526 panic("Something is messed up!\n");
528 assert(pkt
->getSize() == sizeof(uint32_t));
529 uint32_t ®
= *pkt
->getPtr
<uint32_t>();
535 //these are supposed to be cleared on a read
536 reg
&= ~(CR_RXD
| CR_TXD
| CR_TXR
| CR_RXR
);
553 devIntrClear(ISR_ALL
);
608 // see the spec sheet for how RFCR and RFDR work
609 // basically, you write to RFCR to tell the machine
610 // what you want to do next, then you act upon RFDR,
611 // and the device will be prepared b/c of what you
618 rfaddr
= (uint16_t)(regs
.rfcr
& RFCR_RFADDR
);
620 // Read from perfect match ROM octets
622 reg
= rom
.perfectMatch
[1];
624 reg
+= rom
.perfectMatch
[0];
627 reg
= rom
.perfectMatch
[3] << 8;
628 reg
+= rom
.perfectMatch
[2];
631 reg
= rom
.perfectMatch
[5] << 8;
632 reg
+= rom
.perfectMatch
[4];
635 // Read filter hash table
636 if (rfaddr
>= FHASH_ADDR
&&
637 rfaddr
< FHASH_ADDR
+ FHASH_SIZE
) {
639 // Only word-aligned reads supported
641 panic("unaligned read from filter hash table!");
643 reg
= rom
.filterHash
[rfaddr
- FHASH_ADDR
+ 1] << 8;
644 reg
+= rom
.filterHash
[rfaddr
- FHASH_ADDR
];
648 panic("reading RFDR for something other than pattern"
649 " matching or hashing! %#x\n", rfaddr
);
659 reg
&= ~(MIBC_MIBS
| MIBC_ACLR
);
704 if (params()->rx_thread
)
705 reg
|= M5REG_RX_THREAD
;
706 if (params()->tx_thread
)
707 reg
|= M5REG_TX_THREAD
;
713 panic("reading unimplemented register: addr=%#x", daddr
);
716 DPRINTF(EthernetPIO
, "read from %#x: data=%d data=%#x\n",
719 pkt
->makeAtomicResponse();
724 NSGigE::write(PacketPtr pkt
)
728 Addr daddr
= pkt
->getAddr() & 0xfff;
729 DPRINTF(EthernetPIO
, "write da=%#x pa=%#x size=%d\n",
730 daddr
, pkt
->getAddr(), pkt
->getSize());
732 if (daddr
> LAST
&& daddr
<= RESERVED
) {
733 panic("Accessing reserved register");
734 } else if (daddr
> RESERVED
&& daddr
<= 0x3FC) {
735 return writeConfig(pkt
);
736 } else if (daddr
> 0x3FC)
737 panic("Something is messed up!\n");
739 if (pkt
->getSize() == sizeof(uint32_t)) {
740 uint32_t reg
= pkt
->get
<uint32_t>();
743 DPRINTF(EthernetPIO
, "write data=%d data=%#x\n", reg
, reg
);
750 } else if (reg
& CR_TXE
) {
753 // the kernel is enabling the transmit machine
754 if (txState
== txIdle
)
760 } else if (reg
& CR_RXE
) {
763 if (rxState
== rxIdle
)
774 devIntrPost(ISR_SWI
);
785 if (reg
& CFGR_LNKSTS
||
788 reg
& CFGR_RESERVED
||
789 reg
& CFGR_T64ADDR
||
790 reg
& CFGR_PCI64_DET
)
792 // First clear all writable bits
793 regs
.config
&= CFGR_LNKSTS
| CFGR_SPDSTS
| CFGR_DUPSTS
|
794 CFGR_RESERVED
| CFGR_T64ADDR
|
796 // Now set the appropriate writable bits
797 regs
.config
|= reg
& ~(CFGR_LNKSTS
| CFGR_SPDSTS
| CFGR_DUPSTS
|
798 CFGR_RESERVED
| CFGR_T64ADDR
|
801 // all these #if 0's are because i don't THINK the kernel needs to
802 // have these implemented. if there is a problem relating to one of
803 // these, you may need to add functionality in.
804 if (reg
& CFGR_TBI_EN
) ;
805 if (reg
& CFGR_MODE_1000
) ;
807 if (reg
& CFGR_AUTO_1000
)
808 panic("CFGR_AUTO_1000 not implemented!\n");
810 if (reg
& CFGR_PINT_DUPSTS
||
811 reg
& CFGR_PINT_LNKSTS
||
812 reg
& CFGR_PINT_SPDSTS
)
815 if (reg
& CFGR_TMRTEST
) ;
816 if (reg
& CFGR_MRM_DIS
) ;
817 if (reg
& CFGR_MWI_DIS
) ;
819 if (reg
& CFGR_T64ADDR
) ;
820 // panic("CFGR_T64ADDR is read only register!\n");
822 if (reg
& CFGR_PCI64_DET
)
823 panic("CFGR_PCI64_DET is read only register!\n");
825 if (reg
& CFGR_DATA64_EN
) ;
826 if (reg
& CFGR_M64ADDR
) ;
827 if (reg
& CFGR_PHY_RST
) ;
828 if (reg
& CFGR_PHY_DIS
) ;
830 if (reg
& CFGR_EXTSTS_EN
)
833 extstsEnable
= false;
835 if (reg
& CFGR_REQALG
) ;
837 if (reg
& CFGR_POW
) ;
838 if (reg
& CFGR_EXD
) ;
839 if (reg
& CFGR_PESEL
) ;
840 if (reg
& CFGR_BROM_DIS
) ;
841 if (reg
& CFGR_EXT_125
) ;
842 if (reg
& CFGR_BEM
) ;
846 // Clear writable bits
847 regs
.mear
&= MEAR_EEDO
;
848 // Set appropriate writable bits
849 regs
.mear
|= reg
& ~MEAR_EEDO
;
851 // FreeBSD uses the EEPROM to read PMATCH (for the MAC address)
852 // even though it could get it through RFDR
853 if (reg
& MEAR_EESEL
) {
854 // Rising edge of clock
855 if (reg
& MEAR_EECLK
&& !eepromClk
)
859 eepromState
= eepromStart
;
860 regs
.mear
&= ~MEAR_EEDI
;
863 eepromClk
= reg
& MEAR_EECLK
;
865 // since phy is completely faked, MEAR_MD* don't matter
866 if (reg
& MEAR_MDIO
) ;
867 if (reg
& MEAR_MDDIR
) ;
868 if (reg
& MEAR_MDC
) ;
872 regs
.ptscr
= reg
& ~(PTSCR_RBIST_RDONLY
);
873 // these control BISTs for various parts of chip - we
874 // don't care or do just fake that the BIST is done
875 if (reg
& PTSCR_RBIST_EN
)
876 regs
.ptscr
|= PTSCR_RBIST_DONE
;
877 if (reg
& PTSCR_EEBIST_EN
)
878 regs
.ptscr
&= ~PTSCR_EEBIST_EN
;
879 if (reg
& PTSCR_EELOAD_EN
)
880 regs
.ptscr
&= ~PTSCR_EELOAD_EN
;
883 case ISR
: /* writing to the ISR has no effect */
884 panic("ISR is a read only register!\n");
897 /* not going to implement real interrupt holdoff */
901 regs
.txdp
= (reg
& 0xFFFFFFFC);
902 assert(txState
== txIdle
);
913 if (reg
& TX_CFG_CSI
) ;
914 if (reg
& TX_CFG_HBI
) ;
915 if (reg
& TX_CFG_MLB
) ;
916 if (reg
& TX_CFG_ATP
) ;
917 if (reg
& TX_CFG_ECRETRY
) {
919 * this could easily be implemented, but considering
920 * the network is just a fake pipe, wouldn't make
925 if (reg
& TX_CFG_BRST_DIS
) ;
929 /* we handle our own DMA, ignore the kernel's exhortations */
930 if (reg
& TX_CFG_MXDMA
) ;
933 // also, we currently don't care about fill/drain
934 // thresholds though this may change in the future with
935 // more realistic networks or a driver which changes it
936 // according to feedback
941 // Only write writable bits
942 regs
.gpior
&= GPIOR_UNUSED
| GPIOR_GP5_IN
| GPIOR_GP4_IN
943 | GPIOR_GP3_IN
| GPIOR_GP2_IN
| GPIOR_GP1_IN
;
944 regs
.gpior
|= reg
& ~(GPIOR_UNUSED
| GPIOR_GP5_IN
| GPIOR_GP4_IN
945 | GPIOR_GP3_IN
| GPIOR_GP2_IN
| GPIOR_GP1_IN
);
946 /* these just control general purpose i/o pins, don't matter */
961 if (reg
& RX_CFG_AEP
) ;
962 if (reg
& RX_CFG_ARP
) ;
963 if (reg
& RX_CFG_STRIPCRC
) ;
964 if (reg
& RX_CFG_RX_RD
) ;
965 if (reg
& RX_CFG_ALP
) ;
966 if (reg
& RX_CFG_AIRL
) ;
968 /* we handle our own DMA, ignore what kernel says about it */
969 if (reg
& RX_CFG_MXDMA
) ;
971 //also, we currently don't care about fill/drain thresholds
972 //though this may change in the future with more realistic
973 //networks or a driver which changes it according to feedback
974 if (reg
& (RX_CFG_DRTH
| RX_CFG_DRTH0
)) ;
979 /* there is no priority queueing used in the linux 2.6 driver */
984 /* not going to implement wake on LAN */
989 /* not going to implement pause control */
996 rxFilterEnable
= (reg
& RFCR_RFEN
) ? true : false;
997 acceptBroadcast
= (reg
& RFCR_AAB
) ? true : false;
998 acceptMulticast
= (reg
& RFCR_AAM
) ? true : false;
999 acceptUnicast
= (reg
& RFCR_AAU
) ? true : false;
1000 acceptPerfect
= (reg
& RFCR_APM
) ? true : false;
1001 acceptArp
= (reg
& RFCR_AARP
) ? true : false;
1002 multicastHashEnable
= (reg
& RFCR_MHEN
) ? true : false;
1005 if (reg
& RFCR_APAT
)
1006 panic("RFCR_APAT not implemented!\n");
1008 if (reg
& RFCR_UHEN
)
1009 panic("Unicast hash filtering not used by drivers!\n");
1012 panic("RFCR_ULM not implemented!\n");
1017 rfaddr
= (uint16_t)(regs
.rfcr
& RFCR_RFADDR
);
1020 rom
.perfectMatch
[0] = (uint8_t)reg
;
1021 rom
.perfectMatch
[1] = (uint8_t)(reg
>> 8);
1024 rom
.perfectMatch
[2] = (uint8_t)reg
;
1025 rom
.perfectMatch
[3] = (uint8_t)(reg
>> 8);
1028 rom
.perfectMatch
[4] = (uint8_t)reg
;
1029 rom
.perfectMatch
[5] = (uint8_t)(reg
>> 8);
1033 if (rfaddr
>= FHASH_ADDR
&&
1034 rfaddr
< FHASH_ADDR
+ FHASH_SIZE
) {
1036 // Only word-aligned writes supported
1038 panic("unaligned write to filter hash table!");
1040 rom
.filterHash
[rfaddr
- FHASH_ADDR
] = (uint8_t)reg
;
1041 rom
.filterHash
[rfaddr
- FHASH_ADDR
+ 1]
1042 = (uint8_t)(reg
>> 8);
1045 panic("writing RFDR for something other than pattern matching\
1046 or hashing! %#x\n", rfaddr
);
1054 panic("the driver never uses BRDR, something is wrong!\n");
1057 panic("SRR is read only register!\n");
1060 panic("the driver never uses MIBC, something is wrong!\n");
1071 panic("the driver never uses VDR, something is wrong!\n");
1074 /* not going to implement clockrun stuff */
1080 if (reg
& TBICR_MR_LOOPBACK
)
1081 panic("TBICR_MR_LOOPBACK never used, something wrong!\n");
1083 if (reg
& TBICR_MR_AN_ENABLE
) {
1084 regs
.tanlpar
= regs
.tanar
;
1085 regs
.tbisr
|= (TBISR_MR_AN_COMPLETE
| TBISR_MR_LINK_STATUS
);
1089 if (reg
& TBICR_MR_RESTART_AN
) ;
1095 panic("TBISR is read only register!\n");
1098 // Only write the writable bits
1099 regs
.tanar
&= TANAR_RF1
| TANAR_RF2
| TANAR_UNUSED
;
1100 regs
.tanar
|= reg
& ~(TANAR_RF1
| TANAR_RF2
| TANAR_UNUSED
);
1102 // Pause capability unimplemented
1104 if (reg
& TANAR_PS2
) ;
1105 if (reg
& TANAR_PS1
) ;
1111 panic("this should only be written to by the fake phy!\n");
1114 panic("TANER is read only register!\n");
1121 panic("invalid register access daddr=%#x", daddr
);
1124 panic("Invalid Request Size");
1126 pkt
->makeAtomicResponse();
1131 NSGigE::devIntrPost(uint32_t interrupts
)
1133 if (interrupts
& ISR_RESERVE
)
1134 panic("Cannot set a reserved interrupt");
1136 if (interrupts
& ISR_NOIMPL
)
1137 warn("interrupt not implemented %#x\n", interrupts
);
1139 interrupts
&= ISR_IMPL
;
1140 regs
.isr
|= interrupts
;
1142 if (interrupts
& regs
.imr
) {
1143 if (interrupts
& ISR_SWI
) {
1146 if (interrupts
& ISR_RXIDLE
) {
1149 if (interrupts
& ISR_RXOK
) {
1152 if (interrupts
& ISR_RXDESC
) {
1155 if (interrupts
& ISR_TXOK
) {
1158 if (interrupts
& ISR_TXIDLE
) {
1161 if (interrupts
& ISR_TXDESC
) {
1164 if (interrupts
& ISR_RXORN
) {
1169 DPRINTF(EthernetIntr
,
1170 "interrupt written to ISR: intr=%#x isr=%#x imr=%#x\n",
1171 interrupts
, regs
.isr
, regs
.imr
);
1173 if ((regs
.isr
& regs
.imr
)) {
1174 Tick when
= curTick
;
1175 if ((regs
.isr
& regs
.imr
& ISR_NODELAY
) == 0)
1181 /* writing this interrupt counting stats inside this means that this function
1182 is now limited to being used to clear all interrupts upon the kernel
1183 reading isr and servicing. just telling you in case you were thinking
1187 NSGigE::devIntrClear(uint32_t interrupts
)
1189 if (interrupts
& ISR_RESERVE
)
1190 panic("Cannot clear a reserved interrupt");
1192 if (regs
.isr
& regs
.imr
& ISR_SWI
) {
1195 if (regs
.isr
& regs
.imr
& ISR_RXIDLE
) {
1198 if (regs
.isr
& regs
.imr
& ISR_RXOK
) {
1201 if (regs
.isr
& regs
.imr
& ISR_RXDESC
) {
1204 if (regs
.isr
& regs
.imr
& ISR_TXOK
) {
1207 if (regs
.isr
& regs
.imr
& ISR_TXIDLE
) {
1210 if (regs
.isr
& regs
.imr
& ISR_TXDESC
) {
1213 if (regs
.isr
& regs
.imr
& ISR_RXORN
) {
1217 if (regs
.isr
& regs
.imr
& ISR_IMPL
)
1220 interrupts
&= ~ISR_NOIMPL
;
1221 regs
.isr
&= ~interrupts
;
1223 DPRINTF(EthernetIntr
,
1224 "interrupt cleared from ISR: intr=%x isr=%x imr=%x\n",
1225 interrupts
, regs
.isr
, regs
.imr
);
1227 if (!(regs
.isr
& regs
.imr
))
1232 NSGigE::devIntrChangeMask()
1234 DPRINTF(EthernetIntr
, "interrupt mask changed: isr=%x imr=%x masked=%x\n",
1235 regs
.isr
, regs
.imr
, regs
.isr
& regs
.imr
);
1237 if (regs
.isr
& regs
.imr
)
1238 cpuIntrPost(curTick
);
1244 NSGigE::cpuIntrPost(Tick when
)
1246 // If the interrupt you want to post is later than an interrupt
1247 // already scheduled, just let it post in the coming one and don't
1248 // schedule another.
1249 // HOWEVER, must be sure that the scheduled intrTick is in the
1250 // future (this was formerly the source of a bug)
1252 * @todo this warning should be removed and the intrTick code should
1255 assert(when
>= curTick
);
1256 assert(intrTick
>= curTick
|| intrTick
== 0);
1257 if (when
> intrTick
&& intrTick
!= 0) {
1258 DPRINTF(EthernetIntr
, "don't need to schedule event...intrTick=%d\n",
1264 if (intrTick
< curTick
) {
1269 DPRINTF(EthernetIntr
, "going to schedule an interrupt for intrTick=%d\n",
1273 intrEvent
->squash();
1274 intrEvent
= new IntrEvent(this, intrTick
, true);
1278 NSGigE::cpuInterrupt()
1280 assert(intrTick
== curTick
);
1282 // Whether or not there's a pending interrupt, we don't care about
1287 // Don't send an interrupt if there's already one
1288 if (cpuPendingIntr
) {
1289 DPRINTF(EthernetIntr
,
1290 "would send an interrupt now, but there's already pending\n");
1293 cpuPendingIntr
= true;
1295 DPRINTF(EthernetIntr
, "posting interrupt\n");
1301 NSGigE::cpuIntrClear()
1303 if (!cpuPendingIntr
)
1307 intrEvent
->squash();
1313 cpuPendingIntr
= false;
1315 DPRINTF(EthernetIntr
, "clearing interrupt\n");
1320 NSGigE::cpuIntrPending() const
1321 { return cpuPendingIntr
; }
1327 DPRINTF(Ethernet
, "transmit reset\n");
1332 assert(txDescCnt
== 0);
1335 assert(txDmaState
== dmaIdle
);
1341 DPRINTF(Ethernet
, "receive reset\n");
1344 assert(rxPktBytes
== 0);
1347 assert(rxDescCnt
== 0);
1348 assert(rxDmaState
== dmaIdle
);
1356 memset(®s
, 0, sizeof(regs
));
1357 regs
.config
= (CFGR_LNKSTS
| CFGR_TBI_EN
| CFGR_MODE_1000
);
1359 regs
.txcfg
= 0x120; // set drain threshold to 1024 bytes and
1360 // fill threshold to 32 bytes
1361 regs
.rxcfg
= 0x4; // set drain threshold to 16 bytes
1362 regs
.srr
= 0x0103; // set the silicon revision to rev B or 0x103
1363 regs
.mibc
= MIBC_FRZ
;
1364 regs
.vdr
= 0x81; // set the vlan tag type to 802.1q
1365 regs
.tesr
= 0xc000; // TBI capable of both full and half duplex
1366 regs
.brar
= 0xffffffff;
1368 extstsEnable
= false;
1369 acceptBroadcast
= false;
1370 acceptMulticast
= false;
1371 acceptUnicast
= false;
1372 acceptPerfect
= false;
1377 NSGigE::doRxDmaRead()
1379 assert(rxDmaState
== dmaIdle
|| rxDmaState
== dmaReadWaiting
);
1380 rxDmaState
= dmaReading
;
1382 if (dmaPending() || getState() != Running
)
1383 rxDmaState
= dmaReadWaiting
;
1385 dmaRead(rxDmaAddr
, rxDmaLen
, &rxDmaReadEvent
, (uint8_t*)rxDmaData
);
1391 NSGigE::rxDmaReadDone()
1393 assert(rxDmaState
== dmaReading
);
1394 rxDmaState
= dmaIdle
;
1396 DPRINTF(EthernetDMA
, "rx dma read paddr=%#x len=%d\n",
1397 rxDmaAddr
, rxDmaLen
);
1398 DDUMP(EthernetDMA
, rxDmaData
, rxDmaLen
);
1400 // If the transmit state machine has a pending DMA, let it go first
1401 if (txDmaState
== dmaReadWaiting
|| txDmaState
== dmaWriteWaiting
)
1408 NSGigE::doRxDmaWrite()
1410 assert(rxDmaState
== dmaIdle
|| rxDmaState
== dmaWriteWaiting
);
1411 rxDmaState
= dmaWriting
;
1413 if (dmaPending() || getState() != Running
)
1414 rxDmaState
= dmaWriteWaiting
;
1416 dmaWrite(rxDmaAddr
, rxDmaLen
, &rxDmaWriteEvent
, (uint8_t*)rxDmaData
);
1421 NSGigE::rxDmaWriteDone()
1423 assert(rxDmaState
== dmaWriting
);
1424 rxDmaState
= dmaIdle
;
1426 DPRINTF(EthernetDMA
, "rx dma write paddr=%#x len=%d\n",
1427 rxDmaAddr
, rxDmaLen
);
1428 DDUMP(EthernetDMA
, rxDmaData
, rxDmaLen
);
1430 // If the transmit state machine has a pending DMA, let it go first
1431 if (txDmaState
== dmaReadWaiting
|| txDmaState
== dmaWriteWaiting
)
1440 bool is64bit
= (bool)(regs
.config
& CFGR_M64ADDR
);
1443 "receive kick rxState=%s (rxBuf.size=%d) %d-bit\n",
1444 NsRxStateStrings
[rxState
], rxFifo
.size(), is64bit
? 64 : 32);
1447 uint32_t &cmdsts
= is64bit
? rxDesc64
.cmdsts
: rxDesc32
.cmdsts
;
1448 uint32_t &extsts
= is64bit
? rxDesc64
.extsts
: rxDesc32
.extsts
;
1452 if (rxKickTick
> curTick
) {
1453 DPRINTF(EthernetSM
, "receive kick exiting, can't run till %d\n",
1459 // Go to the next state machine clock tick.
1460 rxKickTick
= curTick
+ cycles(1);
1463 switch(rxDmaState
) {
1464 case dmaReadWaiting
:
1468 case dmaWriteWaiting
:
1476 link
= is64bit
? (Addr
)rxDesc64
.link
: (Addr
)rxDesc32
.link
;
1477 bufptr
= is64bit
? (Addr
)rxDesc64
.bufptr
: (Addr
)rxDesc32
.bufptr
;
1479 // see state machine from spec for details
1480 // the way this works is, if you finish work on one state and can
1481 // go directly to another, you do that through jumping to the
1482 // label "next". however, if you have intermediate work, like DMA
1483 // so that you can't go to the next state yet, you go to exit and
1484 // exit the loop. however, when the DMA is done it will trigger
1485 // an event and come back to this loop.
1489 DPRINTF(EthernetSM
, "Receive Disabled! Nothing to do.\n");
1494 rxState
= rxDescRefr
;
1496 rxDmaAddr
= regs
.rxdp
& 0x3fffffff;
1498 is64bit
? (void *)&rxDesc64
.link
: (void *)&rxDesc32
.link
;
1499 rxDmaLen
= is64bit
? sizeof(rxDesc64
.link
) : sizeof(rxDesc32
.link
);
1500 rxDmaFree
= dmaDescFree
;
1503 descDmaRdBytes
+= rxDmaLen
;
1508 rxState
= rxDescRead
;
1510 rxDmaAddr
= regs
.rxdp
& 0x3fffffff;
1511 rxDmaData
= is64bit
? (void *)&rxDesc64
: (void *)&rxDesc32
;
1512 rxDmaLen
= is64bit
? sizeof(rxDesc64
) : sizeof(rxDesc32
);
1513 rxDmaFree
= dmaDescFree
;
1516 descDmaRdBytes
+= rxDmaLen
;
1524 if (rxDmaState
!= dmaIdle
)
1527 rxState
= rxAdvance
;
1531 if (rxDmaState
!= dmaIdle
)
1534 DPRINTF(EthernetDesc
, "rxDesc: addr=%08x read descriptor\n",
1535 regs
.rxdp
& 0x3fffffff);
1536 DPRINTF(EthernetDesc
,
1537 "rxDesc: link=%#x bufptr=%#x cmdsts=%08x extsts=%08x\n",
1538 link
, bufptr
, cmdsts
, extsts
);
1540 if (cmdsts
& CMDSTS_OWN
) {
1541 devIntrPost(ISR_RXIDLE
);
1545 rxState
= rxFifoBlock
;
1547 rxDescCnt
= cmdsts
& CMDSTS_LEN_MASK
;
1554 * @todo in reality, we should be able to start processing
1555 * the packet as it arrives, and not have to wait for the
1556 * full packet ot be in the receive fifo.
1561 DPRINTF(EthernetSM
, "****processing receive of new packet****\n");
1563 // If we don't have a packet, grab a new one from the fifo.
1564 rxPacket
= rxFifo
.front();
1565 rxPktBytes
= rxPacket
->length
;
1566 rxPacketBufPtr
= rxPacket
->data
;
1569 if (DTRACE(Ethernet
)) {
1572 DPRINTF(Ethernet
, "ID is %d\n", ip
->id());
1576 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n",
1577 tcp
->sport(), tcp
->dport(), tcp
->seq(),
1584 // sanity check - i think the driver behaves like this
1585 assert(rxDescCnt
>= rxPktBytes
);
1590 // dont' need the && rxDescCnt > 0 if driver sanity check
1592 if (rxPktBytes
> 0) {
1593 rxState
= rxFragWrite
;
1594 // don't need min<>(rxPktBytes,rxDescCnt) if above sanity
1596 rxXferLen
= rxPktBytes
;
1598 rxDmaAddr
= rxFragPtr
& 0x3fffffff;
1599 rxDmaData
= rxPacketBufPtr
;
1600 rxDmaLen
= rxXferLen
;
1601 rxDmaFree
= dmaDataFree
;
1607 rxState
= rxDescWrite
;
1609 //if (rxPktBytes == 0) { /* packet is done */
1610 assert(rxPktBytes
== 0);
1611 DPRINTF(EthernetSM
, "done with receiving packet\n");
1613 cmdsts
|= CMDSTS_OWN
;
1614 cmdsts
&= ~CMDSTS_MORE
;
1615 cmdsts
|= CMDSTS_OK
;
1616 cmdsts
&= 0xffff0000;
1617 cmdsts
+= rxPacket
->length
; //i.e. set CMDSTS_SIZE
1621 * all the driver uses these are for its own stats keeping
1622 * which we don't care about, aren't necessary for
1623 * functionality and doing this would just slow us down.
1624 * if they end up using this in a later version for
1625 * functional purposes, just undef
1627 if (rxFilterEnable
) {
1628 cmdsts
&= ~CMDSTS_DEST_MASK
;
1629 const EthAddr
&dst
= rxFifoFront()->dst();
1631 cmdsts
|= CMDSTS_DEST_SELF
;
1632 if (dst
->multicast())
1633 cmdsts
|= CMDSTS_DEST_MULTI
;
1634 if (dst
->broadcast())
1635 cmdsts
|= CMDSTS_DEST_MASK
;
1640 if (extstsEnable
&& ip
) {
1641 extsts
|= EXTSTS_IPPKT
;
1643 if (cksum(ip
) != 0) {
1644 DPRINTF(EthernetCksum
, "Rx IP Checksum Error\n");
1645 extsts
|= EXTSTS_IPERR
;
1650 extsts
|= EXTSTS_TCPPKT
;
1652 if (cksum(tcp
) != 0) {
1653 DPRINTF(EthernetCksum
, "Rx TCP Checksum Error\n");
1654 extsts
|= EXTSTS_TCPERR
;
1658 extsts
|= EXTSTS_UDPPKT
;
1660 if (cksum(udp
) != 0) {
1661 DPRINTF(EthernetCksum
, "Rx UDP Checksum Error\n");
1662 extsts
|= EXTSTS_UDPERR
;
1669 * the driver seems to always receive into desc buffers
1670 * of size 1514, so you never have a pkt that is split
1671 * into multiple descriptors on the receive side, so
1672 * i don't implement that case, hence the assert above.
1675 DPRINTF(EthernetDesc
,
1676 "rxDesc: addr=%08x writeback cmdsts extsts\n",
1677 regs
.rxdp
& 0x3fffffff);
1678 DPRINTF(EthernetDesc
,
1679 "rxDesc: link=%#x bufptr=%#x cmdsts=%08x extsts=%08x\n",
1680 link
, bufptr
, cmdsts
, extsts
);
1682 rxDmaAddr
= regs
.rxdp
& 0x3fffffff;
1683 rxDmaData
= &cmdsts
;
1685 rxDmaAddr
+= offsetof(ns_desc64
, cmdsts
);
1686 rxDmaLen
= sizeof(rxDesc64
.cmdsts
) + sizeof(rxDesc64
.extsts
);
1688 rxDmaAddr
+= offsetof(ns_desc32
, cmdsts
);
1689 rxDmaLen
= sizeof(rxDesc32
.cmdsts
) + sizeof(rxDesc32
.extsts
);
1691 rxDmaFree
= dmaDescFree
;
1694 descDmaWrBytes
+= rxDmaLen
;
1702 if (rxDmaState
!= dmaIdle
)
1705 rxPacketBufPtr
+= rxXferLen
;
1706 rxFragPtr
+= rxXferLen
;
1707 rxPktBytes
-= rxXferLen
;
1709 rxState
= rxFifoBlock
;
1713 if (rxDmaState
!= dmaIdle
)
1716 assert(cmdsts
& CMDSTS_OWN
);
1718 assert(rxPacket
== 0);
1719 devIntrPost(ISR_RXOK
);
1721 if (cmdsts
& CMDSTS_INTR
)
1722 devIntrPost(ISR_RXDESC
);
1725 DPRINTF(EthernetSM
, "Halting the RX state machine\n");
1729 rxState
= rxAdvance
;
1734 devIntrPost(ISR_RXIDLE
);
1739 if (rxDmaState
!= dmaIdle
)
1741 rxState
= rxDescRead
;
1745 rxDmaAddr
= regs
.rxdp
& 0x3fffffff;
1746 rxDmaData
= is64bit
? (void *)&rxDesc64
: (void *)&rxDesc32
;
1747 rxDmaLen
= is64bit
? sizeof(rxDesc64
) : sizeof(rxDesc32
);
1748 rxDmaFree
= dmaDescFree
;
1756 panic("Invalid rxState!");
1759 DPRINTF(EthernetSM
, "entering next rxState=%s\n",
1760 NsRxStateStrings
[rxState
]);
1765 * @todo do we want to schedule a future kick?
1767 DPRINTF(EthernetSM
, "rx state machine exited rxState=%s\n",
1768 NsRxStateStrings
[rxState
]);
1770 if (clock
&& !rxKickEvent
.scheduled())
1771 rxKickEvent
.schedule(rxKickTick
);
1777 if (txFifo
.empty()) {
1778 DPRINTF(Ethernet
, "nothing to transmit\n");
1782 DPRINTF(Ethernet
, "Attempt Pkt Transmit: txFifo length=%d\n",
1784 if (interface
->sendPacket(txFifo
.front())) {
1786 if (DTRACE(Ethernet
)) {
1787 IpPtr
ip(txFifo
.front());
1789 DPRINTF(Ethernet
, "ID is %d\n", ip
->id());
1793 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n",
1794 tcp
->sport(), tcp
->dport(), tcp
->seq(),
1801 DDUMP(EthernetData
, txFifo
.front()->data
, txFifo
.front()->length
);
1802 txBytes
+= txFifo
.front()->length
;
1805 DPRINTF(Ethernet
, "Successful Xmit! now txFifoAvail is %d\n",
1810 * normally do a writeback of the descriptor here, and ONLY
1811 * after that is done, send this interrupt. but since our
1812 * stuff never actually fails, just do this interrupt here,
1813 * otherwise the code has to stray from this nice format.
1814 * besides, it's functionally the same.
1816 devIntrPost(ISR_TXOK
);
1819 if (!txFifo
.empty() && !txEvent
.scheduled()) {
1820 DPRINTF(Ethernet
, "reschedule transmit\n");
1821 txEvent
.schedule(curTick
+ retryTime
);
1826 NSGigE::doTxDmaRead()
1828 assert(txDmaState
== dmaIdle
|| txDmaState
== dmaReadWaiting
);
1829 txDmaState
= dmaReading
;
1831 if (dmaPending() || getState() != Running
)
1832 txDmaState
= dmaReadWaiting
;
1834 dmaRead(txDmaAddr
, txDmaLen
, &txDmaReadEvent
, (uint8_t*)txDmaData
);
1840 NSGigE::txDmaReadDone()
1842 assert(txDmaState
== dmaReading
);
1843 txDmaState
= dmaIdle
;
1845 DPRINTF(EthernetDMA
, "tx dma read paddr=%#x len=%d\n",
1846 txDmaAddr
, txDmaLen
);
1847 DDUMP(EthernetDMA
, txDmaData
, txDmaLen
);
1849 // If the receive state machine has a pending DMA, let it go first
1850 if (rxDmaState
== dmaReadWaiting
|| rxDmaState
== dmaWriteWaiting
)
1857 NSGigE::doTxDmaWrite()
1859 assert(txDmaState
== dmaIdle
|| txDmaState
== dmaWriteWaiting
);
1860 txDmaState
= dmaWriting
;
1862 if (dmaPending() || getState() != Running
)
1863 txDmaState
= dmaWriteWaiting
;
1865 dmaWrite(txDmaAddr
, txDmaLen
, &txDmaWriteEvent
, (uint8_t*)txDmaData
);
1870 NSGigE::txDmaWriteDone()
1872 assert(txDmaState
== dmaWriting
);
1873 txDmaState
= dmaIdle
;
1875 DPRINTF(EthernetDMA
, "tx dma write paddr=%#x len=%d\n",
1876 txDmaAddr
, txDmaLen
);
1877 DDUMP(EthernetDMA
, txDmaData
, txDmaLen
);
1879 // If the receive state machine has a pending DMA, let it go first
1880 if (rxDmaState
== dmaReadWaiting
|| rxDmaState
== dmaWriteWaiting
)
1889 bool is64bit
= (bool)(regs
.config
& CFGR_M64ADDR
);
1891 DPRINTF(EthernetSM
, "transmit kick txState=%s %d-bit\n",
1892 NsTxStateStrings
[txState
], is64bit
? 64 : 32);
1895 uint32_t &cmdsts
= is64bit
? txDesc64
.cmdsts
: txDesc32
.cmdsts
;
1896 uint32_t &extsts
= is64bit
? txDesc64
.extsts
: txDesc32
.extsts
;
1900 if (txKickTick
> curTick
) {
1901 DPRINTF(EthernetSM
, "transmit kick exiting, can't run till %d\n",
1906 // Go to the next state machine clock tick.
1907 txKickTick
= curTick
+ cycles(1);
1910 switch(txDmaState
) {
1911 case dmaReadWaiting
:
1915 case dmaWriteWaiting
:
1923 link
= is64bit
? (Addr
)txDesc64
.link
: (Addr
)txDesc32
.link
;
1924 bufptr
= is64bit
? (Addr
)txDesc64
.bufptr
: (Addr
)txDesc32
.bufptr
;
1928 DPRINTF(EthernetSM
, "Transmit disabled. Nothing to do.\n");
1933 txState
= txDescRefr
;
1935 txDmaAddr
= regs
.txdp
& 0x3fffffff;
1937 is64bit
? (void *)&txDesc64
.link
: (void *)&txDesc32
.link
;
1938 txDmaLen
= is64bit
? sizeof(txDesc64
.link
) : sizeof(txDesc32
.link
);
1939 txDmaFree
= dmaDescFree
;
1942 descDmaRdBytes
+= txDmaLen
;
1948 txState
= txDescRead
;
1950 txDmaAddr
= regs
.txdp
& 0x3fffffff;
1951 txDmaData
= is64bit
? (void *)&txDesc64
: (void *)&txDesc32
;
1952 txDmaLen
= is64bit
? sizeof(txDesc64
) : sizeof(txDesc32
);
1953 txDmaFree
= dmaDescFree
;
1956 descDmaRdBytes
+= txDmaLen
;
1964 if (txDmaState
!= dmaIdle
)
1967 txState
= txAdvance
;
1971 if (txDmaState
!= dmaIdle
)
1974 DPRINTF(EthernetDesc
, "txDesc: addr=%08x read descriptor\n",
1975 regs
.txdp
& 0x3fffffff);
1976 DPRINTF(EthernetDesc
,
1977 "txDesc: link=%#x bufptr=%#x cmdsts=%#08x extsts=%#08x\n",
1978 link
, bufptr
, cmdsts
, extsts
);
1980 if (cmdsts
& CMDSTS_OWN
) {
1981 txState
= txFifoBlock
;
1983 txDescCnt
= cmdsts
& CMDSTS_LEN_MASK
;
1985 devIntrPost(ISR_TXIDLE
);
1993 DPRINTF(EthernetSM
, "****starting the tx of a new packet****\n");
1994 txPacket
= new EthPacketData(16384);
1995 txPacketBufPtr
= txPacket
->data
;
1998 if (txDescCnt
== 0) {
1999 DPRINTF(EthernetSM
, "the txDescCnt == 0, done with descriptor\n");
2000 if (cmdsts
& CMDSTS_MORE
) {
2001 DPRINTF(EthernetSM
, "there are more descriptors to come\n");
2002 txState
= txDescWrite
;
2004 cmdsts
&= ~CMDSTS_OWN
;
2006 txDmaAddr
= regs
.txdp
& 0x3fffffff;
2007 txDmaData
= &cmdsts
;
2009 txDmaAddr
+= offsetof(ns_desc64
, cmdsts
);
2010 txDmaLen
= sizeof(txDesc64
.cmdsts
);
2012 txDmaAddr
+= offsetof(ns_desc32
, cmdsts
);
2013 txDmaLen
= sizeof(txDesc32
.cmdsts
);
2015 txDmaFree
= dmaDescFree
;
2020 } else { /* this packet is totally done */
2021 DPRINTF(EthernetSM
, "This packet is done, let's wrap it up\n");
2022 /* deal with the the packet that just finished */
2023 if ((regs
.vtcr
& VTCR_PPCHK
) && extstsEnable
) {
2025 if (extsts
& EXTSTS_UDPPKT
) {
2028 udp
->sum(cksum(udp
));
2030 } else if (extsts
& EXTSTS_TCPPKT
) {
2033 tcp
->sum(cksum(tcp
));
2036 if (extsts
& EXTSTS_IPPKT
) {
2043 txPacket
->length
= txPacketBufPtr
- txPacket
->data
;
2044 // this is just because the receive can't handle a
2045 // packet bigger want to make sure
2046 if (txPacket
->length
> 1514)
2047 panic("transmit packet too large, %s > 1514\n",
2053 txFifo
.push(txPacket
);
2057 * this following section is not tqo spec, but
2058 * functionally shouldn't be any different. normally,
2059 * the chip will wait til the transmit has occurred
2060 * before writing back the descriptor because it has
2061 * to wait to see that it was successfully transmitted
2062 * to decide whether to set CMDSTS_OK or not.
2063 * however, in the simulator since it is always
2064 * successfully transmitted, and writing it exactly to
2065 * spec would complicate the code, we just do it here
2068 cmdsts
&= ~CMDSTS_OWN
;
2069 cmdsts
|= CMDSTS_OK
;
2071 DPRINTF(EthernetDesc
,
2072 "txDesc writeback: cmdsts=%08x extsts=%08x\n",
2075 txDmaFree
= dmaDescFree
;
2076 txDmaAddr
= regs
.txdp
& 0x3fffffff;
2077 txDmaData
= &cmdsts
;
2079 txDmaAddr
+= offsetof(ns_desc64
, cmdsts
);
2081 sizeof(txDesc64
.cmdsts
) + sizeof(txDesc64
.extsts
);
2083 txDmaAddr
+= offsetof(ns_desc32
, cmdsts
);
2085 sizeof(txDesc32
.cmdsts
) + sizeof(txDesc32
.extsts
);
2089 descDmaWrBytes
+= txDmaLen
;
2095 DPRINTF(EthernetSM
, "halting TX state machine\n");
2099 txState
= txAdvance
;
2105 DPRINTF(EthernetSM
, "this descriptor isn't done yet\n");
2106 if (!txFifo
.full()) {
2107 txState
= txFragRead
;
2110 * The number of bytes transferred is either whatever
2111 * is left in the descriptor (txDescCnt), or if there
2112 * is not enough room in the fifo, just whatever room
2113 * is left in the fifo
2115 txXferLen
= min
<uint32_t>(txDescCnt
, txFifo
.avail());
2117 txDmaAddr
= txFragPtr
& 0x3fffffff;
2118 txDmaData
= txPacketBufPtr
;
2119 txDmaLen
= txXferLen
;
2120 txDmaFree
= dmaDataFree
;
2125 txState
= txFifoBlock
;
2135 if (txDmaState
!= dmaIdle
)
2138 txPacketBufPtr
+= txXferLen
;
2139 txFragPtr
+= txXferLen
;
2140 txDescCnt
-= txXferLen
;
2141 txFifo
.reserve(txXferLen
);
2143 txState
= txFifoBlock
;
2147 if (txDmaState
!= dmaIdle
)
2150 if (cmdsts
& CMDSTS_INTR
)
2151 devIntrPost(ISR_TXDESC
);
2154 DPRINTF(EthernetSM
, "halting TX state machine\n");
2158 txState
= txAdvance
;
2163 devIntrPost(ISR_TXIDLE
);
2167 if (txDmaState
!= dmaIdle
)
2169 txState
= txDescRead
;
2173 txDmaAddr
= link
& 0x3fffffff;
2174 txDmaData
= is64bit
? (void *)&txDesc64
: (void *)&txDesc32
;
2175 txDmaLen
= is64bit
? sizeof(txDesc64
) : sizeof(txDesc32
);
2176 txDmaFree
= dmaDescFree
;
2184 panic("invalid state");
2187 DPRINTF(EthernetSM
, "entering next txState=%s\n",
2188 NsTxStateStrings
[txState
]);
2193 * @todo do we want to schedule a future kick?
2195 DPRINTF(EthernetSM
, "tx state machine exited txState=%s\n",
2196 NsTxStateStrings
[txState
]);
2198 if (clock
&& !txKickEvent
.scheduled())
2199 txKickEvent
.schedule(txKickTick
);
2203 * Advance the EEPROM state machine
2204 * Called on rising edge of EEPROM clock bit in MEAR
2207 NSGigE::eepromKick()
2209 switch (eepromState
) {
2213 // Wait for start bit
2214 if (regs
.mear
& MEAR_EEDI
) {
2215 // Set up to get 2 opcode bits
2216 eepromState
= eepromGetOpcode
;
2222 case eepromGetOpcode
:
2224 eepromOpcode
+= (regs
.mear
& MEAR_EEDI
) ? 1 : 0;
2227 // Done getting opcode
2228 if (eepromBitsToRx
== 0) {
2229 if (eepromOpcode
!= EEPROM_READ
)
2230 panic("only EEPROM reads are implemented!");
2232 // Set up to get address
2233 eepromState
= eepromGetAddress
;
2239 case eepromGetAddress
:
2240 eepromAddress
<<= 1;
2241 eepromAddress
+= (regs
.mear
& MEAR_EEDI
) ? 1 : 0;
2244 // Done getting address
2245 if (eepromBitsToRx
== 0) {
2247 if (eepromAddress
>= EEPROM_SIZE
)
2248 panic("EEPROM read access out of range!");
2250 switch (eepromAddress
) {
2252 case EEPROM_PMATCH2_ADDR
:
2253 eepromData
= rom
.perfectMatch
[5];
2255 eepromData
+= rom
.perfectMatch
[4];
2258 case EEPROM_PMATCH1_ADDR
:
2259 eepromData
= rom
.perfectMatch
[3];
2261 eepromData
+= rom
.perfectMatch
[2];
2264 case EEPROM_PMATCH0_ADDR
:
2265 eepromData
= rom
.perfectMatch
[1];
2267 eepromData
+= rom
.perfectMatch
[0];
2271 panic("FreeBSD driver only uses EEPROM to read PMATCH!");
2273 // Set up to read data
2274 eepromState
= eepromRead
;
2275 eepromBitsToRx
= 16;
2277 // Clear data in bit
2278 regs
.mear
&= ~MEAR_EEDI
;
2283 // Clear Data Out bit
2284 regs
.mear
&= ~MEAR_EEDO
;
2285 // Set bit to value of current EEPROM bit
2286 regs
.mear
|= (eepromData
& 0x8000) ? MEAR_EEDO
: 0x0;
2292 if (eepromBitsToRx
== 0) {
2293 eepromState
= eepromStart
;
2298 panic("invalid EEPROM state");
2304 NSGigE::transferDone()
2306 if (txFifo
.empty()) {
2307 DPRINTF(Ethernet
, "transfer complete: txFifo empty...nothing to do\n");
2311 DPRINTF(Ethernet
, "transfer complete: data in txFifo...schedule xmit\n");
2313 txEvent
.reschedule(curTick
+ cycles(1), true);
2317 NSGigE::rxFilter(const EthPacketPtr
&packet
)
2319 EthPtr eth
= packet
;
2323 const EthAddr
&dst
= eth
->dst();
2324 if (dst
.unicast()) {
2325 // If we're accepting all unicast addresses
2329 // If we make a perfect match
2330 if (acceptPerfect
&& dst
== rom
.perfectMatch
)
2333 if (acceptArp
&& eth
->type() == ETH_TYPE_ARP
)
2336 } else if (dst
.broadcast()) {
2337 // if we're accepting broadcasts
2338 if (acceptBroadcast
)
2341 } else if (dst
.multicast()) {
2342 // if we're accepting all multicasts
2343 if (acceptMulticast
)
2346 // Multicast hashing faked - all packets accepted
2347 if (multicastHashEnable
)
2352 DPRINTF(Ethernet
, "rxFilter drop\n");
2353 DDUMP(EthernetData
, packet
->data
, packet
->length
);
2360 NSGigE::recvPacket(EthPacketPtr packet
)
2362 rxBytes
+= packet
->length
;
2365 DPRINTF(Ethernet
, "Receiving packet from wire, rxFifoAvail=%d\n",
2369 DPRINTF(Ethernet
, "receive disabled...packet dropped\n");
2373 if (!rxFilterEnable
) {
2375 "receive packet filtering disabled . . . packet dropped\n");
2379 if (rxFilter(packet
)) {
2380 DPRINTF(Ethernet
, "packet filtered...dropped\n");
2384 if (rxFifo
.avail() < packet
->length
) {
2390 "packet won't fit in receive buffer...pkt ID %d dropped\n",
2393 DPRINTF(Ethernet
, "Seq=%d\n", tcp
->seq());
2398 devIntrPost(ISR_RXORN
);
2402 rxFifo
.push(packet
);
2412 SimObject::resume();
2414 // During drain we could have left the state machines in a waiting state and
2415 // they wouldn't get out until some other event occured to kick them.
2416 // This way they'll get out immediately
2422 //=====================================================================
2426 NSGigE::serialize(ostream
&os
)
2428 // Serialize the PciDev base class
2429 PciDev::serialize(os
);
2432 * Finalize any DMA events now.
2434 // @todo will mem system save pending dma?
2437 * Serialize the device registers
2439 SERIALIZE_SCALAR(regs
.command
);
2440 SERIALIZE_SCALAR(regs
.config
);
2441 SERIALIZE_SCALAR(regs
.mear
);
2442 SERIALIZE_SCALAR(regs
.ptscr
);
2443 SERIALIZE_SCALAR(regs
.isr
);
2444 SERIALIZE_SCALAR(regs
.imr
);
2445 SERIALIZE_SCALAR(regs
.ier
);
2446 SERIALIZE_SCALAR(regs
.ihr
);
2447 SERIALIZE_SCALAR(regs
.txdp
);
2448 SERIALIZE_SCALAR(regs
.txdp_hi
);
2449 SERIALIZE_SCALAR(regs
.txcfg
);
2450 SERIALIZE_SCALAR(regs
.gpior
);
2451 SERIALIZE_SCALAR(regs
.rxdp
);
2452 SERIALIZE_SCALAR(regs
.rxdp_hi
);
2453 SERIALIZE_SCALAR(regs
.rxcfg
);
2454 SERIALIZE_SCALAR(regs
.pqcr
);
2455 SERIALIZE_SCALAR(regs
.wcsr
);
2456 SERIALIZE_SCALAR(regs
.pcr
);
2457 SERIALIZE_SCALAR(regs
.rfcr
);
2458 SERIALIZE_SCALAR(regs
.rfdr
);
2459 SERIALIZE_SCALAR(regs
.brar
);
2460 SERIALIZE_SCALAR(regs
.brdr
);
2461 SERIALIZE_SCALAR(regs
.srr
);
2462 SERIALIZE_SCALAR(regs
.mibc
);
2463 SERIALIZE_SCALAR(regs
.vrcr
);
2464 SERIALIZE_SCALAR(regs
.vtcr
);
2465 SERIALIZE_SCALAR(regs
.vdr
);
2466 SERIALIZE_SCALAR(regs
.ccsr
);
2467 SERIALIZE_SCALAR(regs
.tbicr
);
2468 SERIALIZE_SCALAR(regs
.tbisr
);
2469 SERIALIZE_SCALAR(regs
.tanar
);
2470 SERIALIZE_SCALAR(regs
.tanlpar
);
2471 SERIALIZE_SCALAR(regs
.taner
);
2472 SERIALIZE_SCALAR(regs
.tesr
);
2474 SERIALIZE_ARRAY(rom
.perfectMatch
, ETH_ADDR_LEN
);
2475 SERIALIZE_ARRAY(rom
.filterHash
, FHASH_SIZE
);
2477 SERIALIZE_SCALAR(ioEnable
);
2480 * Serialize the data Fifos
2482 rxFifo
.serialize("rxFifo", os
);
2483 txFifo
.serialize("txFifo", os
);
2486 * Serialize the various helper variables
2488 bool txPacketExists
= txPacket
;
2489 SERIALIZE_SCALAR(txPacketExists
);
2490 if (txPacketExists
) {
2491 txPacket
->length
= txPacketBufPtr
- txPacket
->data
;
2492 txPacket
->serialize("txPacket", os
);
2493 uint32_t txPktBufPtr
= (uint32_t) (txPacketBufPtr
- txPacket
->data
);
2494 SERIALIZE_SCALAR(txPktBufPtr
);
2497 bool rxPacketExists
= rxPacket
;
2498 SERIALIZE_SCALAR(rxPacketExists
);
2499 if (rxPacketExists
) {
2500 rxPacket
->serialize("rxPacket", os
);
2501 uint32_t rxPktBufPtr
= (uint32_t) (rxPacketBufPtr
- rxPacket
->data
);
2502 SERIALIZE_SCALAR(rxPktBufPtr
);
2505 SERIALIZE_SCALAR(txXferLen
);
2506 SERIALIZE_SCALAR(rxXferLen
);
2509 * Serialize Cached Descriptors
2511 SERIALIZE_SCALAR(rxDesc64
.link
);
2512 SERIALIZE_SCALAR(rxDesc64
.bufptr
);
2513 SERIALIZE_SCALAR(rxDesc64
.cmdsts
);
2514 SERIALIZE_SCALAR(rxDesc64
.extsts
);
2515 SERIALIZE_SCALAR(txDesc64
.link
);
2516 SERIALIZE_SCALAR(txDesc64
.bufptr
);
2517 SERIALIZE_SCALAR(txDesc64
.cmdsts
);
2518 SERIALIZE_SCALAR(txDesc64
.extsts
);
2519 SERIALIZE_SCALAR(rxDesc32
.link
);
2520 SERIALIZE_SCALAR(rxDesc32
.bufptr
);
2521 SERIALIZE_SCALAR(rxDesc32
.cmdsts
);
2522 SERIALIZE_SCALAR(rxDesc32
.extsts
);
2523 SERIALIZE_SCALAR(txDesc32
.link
);
2524 SERIALIZE_SCALAR(txDesc32
.bufptr
);
2525 SERIALIZE_SCALAR(txDesc32
.cmdsts
);
2526 SERIALIZE_SCALAR(txDesc32
.extsts
);
2527 SERIALIZE_SCALAR(extstsEnable
);
2530 * Serialize tx state machine
2532 int txState
= this->txState
;
2533 SERIALIZE_SCALAR(txState
);
2534 SERIALIZE_SCALAR(txEnable
);
2535 SERIALIZE_SCALAR(CTDD
);
2536 SERIALIZE_SCALAR(txFragPtr
);
2537 SERIALIZE_SCALAR(txDescCnt
);
2538 int txDmaState
= this->txDmaState
;
2539 SERIALIZE_SCALAR(txDmaState
);
2540 SERIALIZE_SCALAR(txKickTick
);
2543 * Serialize rx state machine
2545 int rxState
= this->rxState
;
2546 SERIALIZE_SCALAR(rxState
);
2547 SERIALIZE_SCALAR(rxEnable
);
2548 SERIALIZE_SCALAR(CRDD
);
2549 SERIALIZE_SCALAR(rxPktBytes
);
2550 SERIALIZE_SCALAR(rxFragPtr
);
2551 SERIALIZE_SCALAR(rxDescCnt
);
2552 int rxDmaState
= this->rxDmaState
;
2553 SERIALIZE_SCALAR(rxDmaState
);
2554 SERIALIZE_SCALAR(rxKickTick
);
2557 * Serialize EEPROM state machine
2559 int eepromState
= this->eepromState
;
2560 SERIALIZE_SCALAR(eepromState
);
2561 SERIALIZE_SCALAR(eepromClk
);
2562 SERIALIZE_SCALAR(eepromBitsToRx
);
2563 SERIALIZE_SCALAR(eepromOpcode
);
2564 SERIALIZE_SCALAR(eepromAddress
);
2565 SERIALIZE_SCALAR(eepromData
);
2568 * If there's a pending transmit, store the time so we can
2569 * reschedule it later
2571 Tick transmitTick
= txEvent
.scheduled() ? txEvent
.when() - curTick
: 0;
2572 SERIALIZE_SCALAR(transmitTick
);
2575 * receive address filter settings
2577 SERIALIZE_SCALAR(rxFilterEnable
);
2578 SERIALIZE_SCALAR(acceptBroadcast
);
2579 SERIALIZE_SCALAR(acceptMulticast
);
2580 SERIALIZE_SCALAR(acceptUnicast
);
2581 SERIALIZE_SCALAR(acceptPerfect
);
2582 SERIALIZE_SCALAR(acceptArp
);
2583 SERIALIZE_SCALAR(multicastHashEnable
);
2586 * Keep track of pending interrupt status.
2588 SERIALIZE_SCALAR(intrTick
);
2589 SERIALIZE_SCALAR(cpuPendingIntr
);
2590 Tick intrEventTick
= 0;
2592 intrEventTick
= intrEvent
->when();
2593 SERIALIZE_SCALAR(intrEventTick
);
2598 NSGigE::unserialize(Checkpoint
*cp
, const std::string
§ion
)
2600 // Unserialize the PciDev base class
2601 PciDev::unserialize(cp
, section
);
2603 UNSERIALIZE_SCALAR(regs
.command
);
2604 UNSERIALIZE_SCALAR(regs
.config
);
2605 UNSERIALIZE_SCALAR(regs
.mear
);
2606 UNSERIALIZE_SCALAR(regs
.ptscr
);
2607 UNSERIALIZE_SCALAR(regs
.isr
);
2608 UNSERIALIZE_SCALAR(regs
.imr
);
2609 UNSERIALIZE_SCALAR(regs
.ier
);
2610 UNSERIALIZE_SCALAR(regs
.ihr
);
2611 UNSERIALIZE_SCALAR(regs
.txdp
);
2612 UNSERIALIZE_SCALAR(regs
.txdp_hi
);
2613 UNSERIALIZE_SCALAR(regs
.txcfg
);
2614 UNSERIALIZE_SCALAR(regs
.gpior
);
2615 UNSERIALIZE_SCALAR(regs
.rxdp
);
2616 UNSERIALIZE_SCALAR(regs
.rxdp_hi
);
2617 UNSERIALIZE_SCALAR(regs
.rxcfg
);
2618 UNSERIALIZE_SCALAR(regs
.pqcr
);
2619 UNSERIALIZE_SCALAR(regs
.wcsr
);
2620 UNSERIALIZE_SCALAR(regs
.pcr
);
2621 UNSERIALIZE_SCALAR(regs
.rfcr
);
2622 UNSERIALIZE_SCALAR(regs
.rfdr
);
2623 UNSERIALIZE_SCALAR(regs
.brar
);
2624 UNSERIALIZE_SCALAR(regs
.brdr
);
2625 UNSERIALIZE_SCALAR(regs
.srr
);
2626 UNSERIALIZE_SCALAR(regs
.mibc
);
2627 UNSERIALIZE_SCALAR(regs
.vrcr
);
2628 UNSERIALIZE_SCALAR(regs
.vtcr
);
2629 UNSERIALIZE_SCALAR(regs
.vdr
);
2630 UNSERIALIZE_SCALAR(regs
.ccsr
);
2631 UNSERIALIZE_SCALAR(regs
.tbicr
);
2632 UNSERIALIZE_SCALAR(regs
.tbisr
);
2633 UNSERIALIZE_SCALAR(regs
.tanar
);
2634 UNSERIALIZE_SCALAR(regs
.tanlpar
);
2635 UNSERIALIZE_SCALAR(regs
.taner
);
2636 UNSERIALIZE_SCALAR(regs
.tesr
);
2638 UNSERIALIZE_ARRAY(rom
.perfectMatch
, ETH_ADDR_LEN
);
2639 UNSERIALIZE_ARRAY(rom
.filterHash
, FHASH_SIZE
);
2641 UNSERIALIZE_SCALAR(ioEnable
);
2644 * unserialize the data fifos
2646 rxFifo
.unserialize("rxFifo", cp
, section
);
2647 txFifo
.unserialize("txFifo", cp
, section
);
2650 * unserialize the various helper variables
2652 bool txPacketExists
;
2653 UNSERIALIZE_SCALAR(txPacketExists
);
2654 if (txPacketExists
) {
2655 txPacket
= new EthPacketData(16384);
2656 txPacket
->unserialize("txPacket", cp
, section
);
2657 uint32_t txPktBufPtr
;
2658 UNSERIALIZE_SCALAR(txPktBufPtr
);
2659 txPacketBufPtr
= (uint8_t *) txPacket
->data
+ txPktBufPtr
;
2663 bool rxPacketExists
;
2664 UNSERIALIZE_SCALAR(rxPacketExists
);
2666 if (rxPacketExists
) {
2667 rxPacket
= new EthPacketData(16384);
2668 rxPacket
->unserialize("rxPacket", cp
, section
);
2669 uint32_t rxPktBufPtr
;
2670 UNSERIALIZE_SCALAR(rxPktBufPtr
);
2671 rxPacketBufPtr
= (uint8_t *) rxPacket
->data
+ rxPktBufPtr
;
2675 UNSERIALIZE_SCALAR(txXferLen
);
2676 UNSERIALIZE_SCALAR(rxXferLen
);
2679 * Unserialize Cached Descriptors
2681 UNSERIALIZE_SCALAR(rxDesc64
.link
);
2682 UNSERIALIZE_SCALAR(rxDesc64
.bufptr
);
2683 UNSERIALIZE_SCALAR(rxDesc64
.cmdsts
);
2684 UNSERIALIZE_SCALAR(rxDesc64
.extsts
);
2685 UNSERIALIZE_SCALAR(txDesc64
.link
);
2686 UNSERIALIZE_SCALAR(txDesc64
.bufptr
);
2687 UNSERIALIZE_SCALAR(txDesc64
.cmdsts
);
2688 UNSERIALIZE_SCALAR(txDesc64
.extsts
);
2689 UNSERIALIZE_SCALAR(rxDesc32
.link
);
2690 UNSERIALIZE_SCALAR(rxDesc32
.bufptr
);
2691 UNSERIALIZE_SCALAR(rxDesc32
.cmdsts
);
2692 UNSERIALIZE_SCALAR(rxDesc32
.extsts
);
2693 UNSERIALIZE_SCALAR(txDesc32
.link
);
2694 UNSERIALIZE_SCALAR(txDesc32
.bufptr
);
2695 UNSERIALIZE_SCALAR(txDesc32
.cmdsts
);
2696 UNSERIALIZE_SCALAR(txDesc32
.extsts
);
2697 UNSERIALIZE_SCALAR(extstsEnable
);
2700 * unserialize tx state machine
2703 UNSERIALIZE_SCALAR(txState
);
2704 this->txState
= (TxState
) txState
;
2705 UNSERIALIZE_SCALAR(txEnable
);
2706 UNSERIALIZE_SCALAR(CTDD
);
2707 UNSERIALIZE_SCALAR(txFragPtr
);
2708 UNSERIALIZE_SCALAR(txDescCnt
);
2710 UNSERIALIZE_SCALAR(txDmaState
);
2711 this->txDmaState
= (DmaState
) txDmaState
;
2712 UNSERIALIZE_SCALAR(txKickTick
);
2714 txKickEvent
.schedule(txKickTick
);
2717 * unserialize rx state machine
2720 UNSERIALIZE_SCALAR(rxState
);
2721 this->rxState
= (RxState
) rxState
;
2722 UNSERIALIZE_SCALAR(rxEnable
);
2723 UNSERIALIZE_SCALAR(CRDD
);
2724 UNSERIALIZE_SCALAR(rxPktBytes
);
2725 UNSERIALIZE_SCALAR(rxFragPtr
);
2726 UNSERIALIZE_SCALAR(rxDescCnt
);
2728 UNSERIALIZE_SCALAR(rxDmaState
);
2729 this->rxDmaState
= (DmaState
) rxDmaState
;
2730 UNSERIALIZE_SCALAR(rxKickTick
);
2732 rxKickEvent
.schedule(rxKickTick
);
2735 * Unserialize EEPROM state machine
2738 UNSERIALIZE_SCALAR(eepromState
);
2739 this->eepromState
= (EEPROMState
) eepromState
;
2740 UNSERIALIZE_SCALAR(eepromClk
);
2741 UNSERIALIZE_SCALAR(eepromBitsToRx
);
2742 UNSERIALIZE_SCALAR(eepromOpcode
);
2743 UNSERIALIZE_SCALAR(eepromAddress
);
2744 UNSERIALIZE_SCALAR(eepromData
);
2747 * If there's a pending transmit, reschedule it now
2750 UNSERIALIZE_SCALAR(transmitTick
);
2752 txEvent
.schedule(curTick
+ transmitTick
);
2755 * unserialize receive address filter settings
2757 UNSERIALIZE_SCALAR(rxFilterEnable
);
2758 UNSERIALIZE_SCALAR(acceptBroadcast
);
2759 UNSERIALIZE_SCALAR(acceptMulticast
);
2760 UNSERIALIZE_SCALAR(acceptUnicast
);
2761 UNSERIALIZE_SCALAR(acceptPerfect
);
2762 UNSERIALIZE_SCALAR(acceptArp
);
2763 UNSERIALIZE_SCALAR(multicastHashEnable
);
2766 * Keep track of pending interrupt status.
2768 UNSERIALIZE_SCALAR(intrTick
);
2769 UNSERIALIZE_SCALAR(cpuPendingIntr
);
2771 UNSERIALIZE_SCALAR(intrEventTick
);
2772 if (intrEventTick
) {
2773 intrEvent
= new IntrEvent(this, intrEventTick
, true);
2778 NSGigEIntParams::create()
2780 NSGigEInt
*dev_int
= new NSGigEInt(name
, device
);
2782 EtherInt
*p
= (EtherInt
*)peer
;
2784 dev_int
->setPeer(p
);
2785 p
->setPeer(dev_int
);
2792 NSGigEParams::create()
2794 return new NSGigE(this);