2 * Copyright (c) 2004-2005 The Regents of The University of Michigan
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 * Authors: Nathan Binkert
33 * Device module for modelling the National Semiconductor
34 * DP83820 ethernet controller. Does not support priority queueing
39 #include "arch/alpha/ev5.hh"
40 #include "base/inet.hh"
41 #include "cpu/thread_context.hh"
42 #include "dev/etherlink.hh"
43 #include "dev/ns_gige.hh"
44 #include "dev/pciconfigall.hh"
45 #include "mem/packet.hh"
46 #include "sim/builder.hh"
47 #include "sim/debug.hh"
48 #include "sim/host.hh"
49 #include "sim/stats.hh"
50 #include "sim/system.hh"
52 const char *NsRxStateStrings
[] =
63 const char *NsTxStateStrings
[] =
74 const char *NsDmaState
[] =
85 using namespace TheISA
;
87 ///////////////////////////////////////////////////////////////////////
91 NSGigE::NSGigE(Params
*p
)
92 : PciDev(p
), ioEnable(false),
93 txFifo(p
->tx_fifo_size
), rxFifo(p
->rx_fifo_size
),
94 txPacket(0), rxPacket(0), txPacketBufPtr(NULL
), rxPacketBufPtr(NULL
),
95 txXferLen(0), rxXferLen(0), clock(p
->clock
),
96 txState(txIdle
), txEnable(false), CTDD(false),
97 txFragPtr(0), txDescCnt(0), txDmaState(dmaIdle
), rxState(rxIdle
),
98 rxEnable(false), CRDD(false), rxPktBytes(0),
99 rxFragPtr(0), rxDescCnt(0), rxDmaState(dmaIdle
), extstsEnable(false),
100 eepromState(eepromStart
), rxDmaReadEvent(this), rxDmaWriteEvent(this),
101 txDmaReadEvent(this), txDmaWriteEvent(this),
102 dmaDescFree(p
->dma_desc_free
), dmaDataFree(p
->dma_data_free
),
103 txDelay(p
->tx_delay
), rxDelay(p
->rx_delay
),
104 rxKickTick(0), rxKickEvent(this), txKickTick(0), txKickEvent(this),
105 txEvent(this), rxFilterEnable(p
->rx_filter
), acceptBroadcast(false),
106 acceptMulticast(false), acceptUnicast(false),
107 acceptPerfect(false), acceptArp(false), multicastHashEnable(false),
108 intrTick(0), cpuPendingIntr(false),
109 intrEvent(0), interface(0)
112 intrDelay
= p
->intr_delay
;
113 dmaReadDelay
= p
->dma_read_delay
;
114 dmaWriteDelay
= p
->dma_write_delay
;
115 dmaReadFactor
= p
->dma_read_factor
;
116 dmaWriteFactor
= p
->dma_write_factor
;
119 memcpy(&rom
.perfectMatch
, p
->eaddr
.bytes(), ETH_ADDR_LEN
);
121 memset(&rxDesc32
, 0, sizeof(rxDesc32
));
122 memset(&txDesc32
, 0, sizeof(txDesc32
));
123 memset(&rxDesc64
, 0, sizeof(rxDesc64
));
124 memset(&txDesc64
, 0, sizeof(txDesc64
));
134 .name(name() + ".txBytes")
135 .desc("Bytes Transmitted")
140 .name(name() + ".rxBytes")
141 .desc("Bytes Received")
146 .name(name() + ".txPackets")
147 .desc("Number of Packets Transmitted")
152 .name(name() + ".rxPackets")
153 .desc("Number of Packets Received")
158 .name(name() + ".txIpChecksums")
159 .desc("Number of tx IP Checksums done by device")
165 .name(name() + ".rxIpChecksums")
166 .desc("Number of rx IP Checksums done by device")
172 .name(name() + ".txTcpChecksums")
173 .desc("Number of tx TCP Checksums done by device")
179 .name(name() + ".rxTcpChecksums")
180 .desc("Number of rx TCP Checksums done by device")
186 .name(name() + ".txUdpChecksums")
187 .desc("Number of tx UDP Checksums done by device")
193 .name(name() + ".rxUdpChecksums")
194 .desc("Number of rx UDP Checksums done by device")
200 .name(name() + ".descDMAReads")
201 .desc("Number of descriptors the device read w/ DMA")
206 .name(name() + ".descDMAWrites")
207 .desc("Number of descriptors the device wrote w/ DMA")
212 .name(name() + ".descDmaReadBytes")
213 .desc("number of descriptor bytes read w/ DMA")
218 .name(name() + ".descDmaWriteBytes")
219 .desc("number of descriptor bytes write w/ DMA")
224 .name(name() + ".txBandwidth")
225 .desc("Transmit Bandwidth (bits/s)")
231 .name(name() + ".rxBandwidth")
232 .desc("Receive Bandwidth (bits/s)")
238 .name(name() + ".totBandwidth")
239 .desc("Total Bandwidth (bits/s)")
245 .name(name() + ".totPackets")
246 .desc("Total Packets")
252 .name(name() + ".totBytes")
259 .name(name() + ".totPPS")
260 .desc("Total Tranmission Rate (packets/s)")
266 .name(name() + ".txPPS")
267 .desc("Packet Tranmission Rate (packets/s)")
273 .name(name() + ".rxPPS")
274 .desc("Packet Reception Rate (packets/s)")
280 .name(name() + ".postedSwi")
281 .desc("number of software interrupts posted to CPU")
286 .name(name() + ".totalSwi")
287 .desc("total number of Swi written to ISR")
292 .name(name() + ".coalescedSwi")
293 .desc("average number of Swi's coalesced into each post")
298 .name(name() + ".postedRxIdle")
299 .desc("number of rxIdle interrupts posted to CPU")
304 .name(name() + ".totalRxIdle")
305 .desc("total number of RxIdle written to ISR")
310 .name(name() + ".coalescedRxIdle")
311 .desc("average number of RxIdle's coalesced into each post")
316 .name(name() + ".postedRxOk")
317 .desc("number of RxOk interrupts posted to CPU")
322 .name(name() + ".totalRxOk")
323 .desc("total number of RxOk written to ISR")
328 .name(name() + ".coalescedRxOk")
329 .desc("average number of RxOk's coalesced into each post")
334 .name(name() + ".postedRxDesc")
335 .desc("number of RxDesc interrupts posted to CPU")
340 .name(name() + ".totalRxDesc")
341 .desc("total number of RxDesc written to ISR")
346 .name(name() + ".coalescedRxDesc")
347 .desc("average number of RxDesc's coalesced into each post")
352 .name(name() + ".postedTxOk")
353 .desc("number of TxOk interrupts posted to CPU")
358 .name(name() + ".totalTxOk")
359 .desc("total number of TxOk written to ISR")
364 .name(name() + ".coalescedTxOk")
365 .desc("average number of TxOk's coalesced into each post")
370 .name(name() + ".postedTxIdle")
371 .desc("number of TxIdle interrupts posted to CPU")
376 .name(name() + ".totalTxIdle")
377 .desc("total number of TxIdle written to ISR")
382 .name(name() + ".coalescedTxIdle")
383 .desc("average number of TxIdle's coalesced into each post")
388 .name(name() + ".postedTxDesc")
389 .desc("number of TxDesc interrupts posted to CPU")
394 .name(name() + ".totalTxDesc")
395 .desc("total number of TxDesc written to ISR")
400 .name(name() + ".coalescedTxDesc")
401 .desc("average number of TxDesc's coalesced into each post")
406 .name(name() + ".postedRxOrn")
407 .desc("number of RxOrn posted to CPU")
412 .name(name() + ".totalRxOrn")
413 .desc("total number of RxOrn written to ISR")
418 .name(name() + ".coalescedRxOrn")
419 .desc("average number of RxOrn's coalesced into each post")
424 .name(name() + ".coalescedTotal")
425 .desc("average number of interrupts coalesced into each post")
430 .name(name() + ".postedInterrupts")
431 .desc("number of posts to CPU")
436 .name(name() + ".droppedPackets")
437 .desc("number of packets dropped")
441 coalescedSwi
= totalSwi
/ postedInterrupts
;
442 coalescedRxIdle
= totalRxIdle
/ postedInterrupts
;
443 coalescedRxOk
= totalRxOk
/ postedInterrupts
;
444 coalescedRxDesc
= totalRxDesc
/ postedInterrupts
;
445 coalescedTxOk
= totalTxOk
/ postedInterrupts
;
446 coalescedTxIdle
= totalTxIdle
/ postedInterrupts
;
447 coalescedTxDesc
= totalTxDesc
/ postedInterrupts
;
448 coalescedRxOrn
= totalRxOrn
/ postedInterrupts
;
450 coalescedTotal
= (totalSwi
+ totalRxIdle
+ totalRxOk
+ totalRxDesc
+
451 totalTxOk
+ totalTxIdle
+ totalTxDesc
+
452 totalRxOrn
) / postedInterrupts
;
454 txBandwidth
= txBytes
* Stats::constant(8) / simSeconds
;
455 rxBandwidth
= rxBytes
* Stats::constant(8) / simSeconds
;
456 totBandwidth
= txBandwidth
+ rxBandwidth
;
457 totBytes
= txBytes
+ rxBytes
;
458 totPackets
= txPackets
+ rxPackets
;
460 txPacketRate
= txPackets
/ simSeconds
;
461 rxPacketRate
= rxPackets
/ simSeconds
;
466 * This is to write to the PCI general configuration registers
469 NSGigE::writeConfig(Packet
*pkt
)
471 int offset
= pkt
->getAddr() & PCI_CONFIG_SIZE
;
472 if (offset
< PCI_DEVICE_SPECIFIC
)
473 PciDev::writeConfig(pkt
);
475 panic("Device specific PCI config space not implemented!\n");
478 // seems to work fine without all these PCI settings, but i
479 // put in the IO to double check, an assertion will fail if we
480 // need to properly implement it
482 if (config
.data
[offset
] & PCI_CMD_IOSE
)
488 pkt
->result
= Packet::Success
;
493 * This reads the device registers, which are detailed in the NS83820
497 NSGigE::read(Packet
*pkt
)
503 //The mask is to give you only the offset into the device register file
504 Addr daddr
= pkt
->getAddr() & 0xfff;
505 DPRINTF(EthernetPIO
, "read da=%#x pa=%#x size=%d\n",
506 daddr
, pkt
->getAddr(), pkt
->getSize());
509 // there are some reserved registers, you can see ns_gige_reg.h and
510 // the spec sheet for details
511 if (daddr
> LAST
&& daddr
<= RESERVED
) {
512 panic("Accessing reserved register");
513 } else if (daddr
> RESERVED
&& daddr
<= 0x3FC) {
514 return readConfig(pkt
);
515 } else if (daddr
>= MIB_START
&& daddr
<= MIB_END
) {
516 // don't implement all the MIB's. hopefully the kernel
517 // doesn't actually DEPEND upon their values
518 // MIB are just hardware stats keepers
519 pkt
->set
<uint32_t>(0);
520 pkt
->result
= Packet::Success
;
522 } else if (daddr
> 0x3FC)
523 panic("Something is messed up!\n");
525 assert(pkt
->getSize() == sizeof(uint32_t));
526 uint32_t ®
= *pkt
->getPtr
<uint32_t>();
532 //these are supposed to be cleared on a read
533 reg
&= ~(CR_RXD
| CR_TXD
| CR_TXR
| CR_RXR
);
550 devIntrClear(ISR_ALL
);
605 // see the spec sheet for how RFCR and RFDR work
606 // basically, you write to RFCR to tell the machine
607 // what you want to do next, then you act upon RFDR,
608 // and the device will be prepared b/c of what you
615 rfaddr
= (uint16_t)(regs
.rfcr
& RFCR_RFADDR
);
617 // Read from perfect match ROM octets
619 reg
= rom
.perfectMatch
[1];
621 reg
+= rom
.perfectMatch
[0];
624 reg
= rom
.perfectMatch
[3] << 8;
625 reg
+= rom
.perfectMatch
[2];
628 reg
= rom
.perfectMatch
[5] << 8;
629 reg
+= rom
.perfectMatch
[4];
632 // Read filter hash table
633 if (rfaddr
>= FHASH_ADDR
&&
634 rfaddr
< FHASH_ADDR
+ FHASH_SIZE
) {
636 // Only word-aligned reads supported
638 panic("unaligned read from filter hash table!");
640 reg
= rom
.filterHash
[rfaddr
- FHASH_ADDR
+ 1] << 8;
641 reg
+= rom
.filterHash
[rfaddr
- FHASH_ADDR
];
645 panic("reading RFDR for something other than pattern"
646 " matching or hashing! %#x\n", rfaddr
);
656 reg
&= ~(MIBC_MIBS
| MIBC_ACLR
);
701 if (params()->rx_thread
)
702 reg
|= M5REG_RX_THREAD
;
703 if (params()->tx_thread
)
704 reg
|= M5REG_TX_THREAD
;
710 panic("reading unimplemented register: addr=%#x", daddr
);
713 DPRINTF(EthernetPIO
, "read from %#x: data=%d data=%#x\n",
716 pkt
->result
= Packet::Success
;
721 NSGigE::write(Packet
*pkt
)
725 Addr daddr
= pkt
->getAddr() & 0xfff;
726 DPRINTF(EthernetPIO
, "write da=%#x pa=%#x size=%d\n",
727 daddr
, pkt
->getAddr(), pkt
->getSize());
729 if (daddr
> LAST
&& daddr
<= RESERVED
) {
730 panic("Accessing reserved register");
731 } else if (daddr
> RESERVED
&& daddr
<= 0x3FC) {
732 return writeConfig(pkt
);
733 } else if (daddr
> 0x3FC)
734 panic("Something is messed up!\n");
736 if (pkt
->getSize() == sizeof(uint32_t)) {
737 uint32_t reg
= pkt
->get
<uint32_t>();
740 DPRINTF(EthernetPIO
, "write data=%d data=%#x\n", reg
, reg
);
747 } else if (reg
& CR_TXE
) {
750 // the kernel is enabling the transmit machine
751 if (txState
== txIdle
)
757 } else if (reg
& CR_RXE
) {
760 if (rxState
== rxIdle
)
771 devIntrPost(ISR_SWI
);
782 if (reg
& CFGR_LNKSTS
||
785 reg
& CFGR_RESERVED
||
786 reg
& CFGR_T64ADDR
||
787 reg
& CFGR_PCI64_DET
)
789 // First clear all writable bits
790 regs
.config
&= CFGR_LNKSTS
| CFGR_SPDSTS
| CFGR_DUPSTS
|
791 CFGR_RESERVED
| CFGR_T64ADDR
|
793 // Now set the appropriate writable bits
794 regs
.config
|= reg
& ~(CFGR_LNKSTS
| CFGR_SPDSTS
| CFGR_DUPSTS
|
795 CFGR_RESERVED
| CFGR_T64ADDR
|
798 // all these #if 0's are because i don't THINK the kernel needs to
799 // have these implemented. if there is a problem relating to one of
800 // these, you may need to add functionality in.
801 if (reg
& CFGR_TBI_EN
) ;
802 if (reg
& CFGR_MODE_1000
) ;
804 if (reg
& CFGR_AUTO_1000
)
805 panic("CFGR_AUTO_1000 not implemented!\n");
807 if (reg
& CFGR_PINT_DUPSTS
||
808 reg
& CFGR_PINT_LNKSTS
||
809 reg
& CFGR_PINT_SPDSTS
)
812 if (reg
& CFGR_TMRTEST
) ;
813 if (reg
& CFGR_MRM_DIS
) ;
814 if (reg
& CFGR_MWI_DIS
) ;
816 if (reg
& CFGR_T64ADDR
) ;
817 // panic("CFGR_T64ADDR is read only register!\n");
819 if (reg
& CFGR_PCI64_DET
)
820 panic("CFGR_PCI64_DET is read only register!\n");
822 if (reg
& CFGR_DATA64_EN
) ;
823 if (reg
& CFGR_M64ADDR
) ;
824 if (reg
& CFGR_PHY_RST
) ;
825 if (reg
& CFGR_PHY_DIS
) ;
827 if (reg
& CFGR_EXTSTS_EN
)
830 extstsEnable
= false;
832 if (reg
& CFGR_REQALG
) ;
834 if (reg
& CFGR_POW
) ;
835 if (reg
& CFGR_EXD
) ;
836 if (reg
& CFGR_PESEL
) ;
837 if (reg
& CFGR_BROM_DIS
) ;
838 if (reg
& CFGR_EXT_125
) ;
839 if (reg
& CFGR_BEM
) ;
843 // Clear writable bits
844 regs
.mear
&= MEAR_EEDO
;
845 // Set appropriate writable bits
846 regs
.mear
|= reg
& ~MEAR_EEDO
;
848 // FreeBSD uses the EEPROM to read PMATCH (for the MAC address)
849 // even though it could get it through RFDR
850 if (reg
& MEAR_EESEL
) {
851 // Rising edge of clock
852 if (reg
& MEAR_EECLK
&& !eepromClk
)
856 eepromState
= eepromStart
;
857 regs
.mear
&= ~MEAR_EEDI
;
860 eepromClk
= reg
& MEAR_EECLK
;
862 // since phy is completely faked, MEAR_MD* don't matter
863 if (reg
& MEAR_MDIO
) ;
864 if (reg
& MEAR_MDDIR
) ;
865 if (reg
& MEAR_MDC
) ;
869 regs
.ptscr
= reg
& ~(PTSCR_RBIST_RDONLY
);
870 // these control BISTs for various parts of chip - we
871 // don't care or do just fake that the BIST is done
872 if (reg
& PTSCR_RBIST_EN
)
873 regs
.ptscr
|= PTSCR_RBIST_DONE
;
874 if (reg
& PTSCR_EEBIST_EN
)
875 regs
.ptscr
&= ~PTSCR_EEBIST_EN
;
876 if (reg
& PTSCR_EELOAD_EN
)
877 regs
.ptscr
&= ~PTSCR_EELOAD_EN
;
880 case ISR
: /* writing to the ISR has no effect */
881 panic("ISR is a read only register!\n");
894 /* not going to implement real interrupt holdoff */
898 regs
.txdp
= (reg
& 0xFFFFFFFC);
899 assert(txState
== txIdle
);
910 if (reg
& TX_CFG_CSI
) ;
911 if (reg
& TX_CFG_HBI
) ;
912 if (reg
& TX_CFG_MLB
) ;
913 if (reg
& TX_CFG_ATP
) ;
914 if (reg
& TX_CFG_ECRETRY
) {
916 * this could easily be implemented, but considering
917 * the network is just a fake pipe, wouldn't make
922 if (reg
& TX_CFG_BRST_DIS
) ;
926 /* we handle our own DMA, ignore the kernel's exhortations */
927 if (reg
& TX_CFG_MXDMA
) ;
930 // also, we currently don't care about fill/drain
931 // thresholds though this may change in the future with
932 // more realistic networks or a driver which changes it
933 // according to feedback
938 // Only write writable bits
939 regs
.gpior
&= GPIOR_UNUSED
| GPIOR_GP5_IN
| GPIOR_GP4_IN
940 | GPIOR_GP3_IN
| GPIOR_GP2_IN
| GPIOR_GP1_IN
;
941 regs
.gpior
|= reg
& ~(GPIOR_UNUSED
| GPIOR_GP5_IN
| GPIOR_GP4_IN
942 | GPIOR_GP3_IN
| GPIOR_GP2_IN
| GPIOR_GP1_IN
);
943 /* these just control general purpose i/o pins, don't matter */
958 if (reg
& RX_CFG_AEP
) ;
959 if (reg
& RX_CFG_ARP
) ;
960 if (reg
& RX_CFG_STRIPCRC
) ;
961 if (reg
& RX_CFG_RX_RD
) ;
962 if (reg
& RX_CFG_ALP
) ;
963 if (reg
& RX_CFG_AIRL
) ;
965 /* we handle our own DMA, ignore what kernel says about it */
966 if (reg
& RX_CFG_MXDMA
) ;
968 //also, we currently don't care about fill/drain thresholds
969 //though this may change in the future with more realistic
970 //networks or a driver which changes it according to feedback
971 if (reg
& (RX_CFG_DRTH
| RX_CFG_DRTH0
)) ;
976 /* there is no priority queueing used in the linux 2.6 driver */
981 /* not going to implement wake on LAN */
986 /* not going to implement pause control */
993 rxFilterEnable
= (reg
& RFCR_RFEN
) ? true : false;
994 acceptBroadcast
= (reg
& RFCR_AAB
) ? true : false;
995 acceptMulticast
= (reg
& RFCR_AAM
) ? true : false;
996 acceptUnicast
= (reg
& RFCR_AAU
) ? true : false;
997 acceptPerfect
= (reg
& RFCR_APM
) ? true : false;
998 acceptArp
= (reg
& RFCR_AARP
) ? true : false;
999 multicastHashEnable
= (reg
& RFCR_MHEN
) ? true : false;
1002 if (reg
& RFCR_APAT
)
1003 panic("RFCR_APAT not implemented!\n");
1005 if (reg
& RFCR_UHEN
)
1006 panic("Unicast hash filtering not used by drivers!\n");
1009 panic("RFCR_ULM not implemented!\n");
1014 rfaddr
= (uint16_t)(regs
.rfcr
& RFCR_RFADDR
);
1017 rom
.perfectMatch
[0] = (uint8_t)reg
;
1018 rom
.perfectMatch
[1] = (uint8_t)(reg
>> 8);
1021 rom
.perfectMatch
[2] = (uint8_t)reg
;
1022 rom
.perfectMatch
[3] = (uint8_t)(reg
>> 8);
1025 rom
.perfectMatch
[4] = (uint8_t)reg
;
1026 rom
.perfectMatch
[5] = (uint8_t)(reg
>> 8);
1030 if (rfaddr
>= FHASH_ADDR
&&
1031 rfaddr
< FHASH_ADDR
+ FHASH_SIZE
) {
1033 // Only word-aligned writes supported
1035 panic("unaligned write to filter hash table!");
1037 rom
.filterHash
[rfaddr
- FHASH_ADDR
] = (uint8_t)reg
;
1038 rom
.filterHash
[rfaddr
- FHASH_ADDR
+ 1]
1039 = (uint8_t)(reg
>> 8);
1042 panic("writing RFDR for something other than pattern matching\
1043 or hashing! %#x\n", rfaddr
);
1051 panic("the driver never uses BRDR, something is wrong!\n");
1054 panic("SRR is read only register!\n");
1057 panic("the driver never uses MIBC, something is wrong!\n");
1068 panic("the driver never uses VDR, something is wrong!\n");
1071 /* not going to implement clockrun stuff */
1077 if (reg
& TBICR_MR_LOOPBACK
)
1078 panic("TBICR_MR_LOOPBACK never used, something wrong!\n");
1080 if (reg
& TBICR_MR_AN_ENABLE
) {
1081 regs
.tanlpar
= regs
.tanar
;
1082 regs
.tbisr
|= (TBISR_MR_AN_COMPLETE
| TBISR_MR_LINK_STATUS
);
1086 if (reg
& TBICR_MR_RESTART_AN
) ;
1092 panic("TBISR is read only register!\n");
1095 // Only write the writable bits
1096 regs
.tanar
&= TANAR_RF1
| TANAR_RF2
| TANAR_UNUSED
;
1097 regs
.tanar
|= reg
& ~(TANAR_RF1
| TANAR_RF2
| TANAR_UNUSED
);
1099 // Pause capability unimplemented
1101 if (reg
& TANAR_PS2
) ;
1102 if (reg
& TANAR_PS1
) ;
1108 panic("this should only be written to by the fake phy!\n");
1111 panic("TANER is read only register!\n");
1118 panic("invalid register access daddr=%#x", daddr
);
1121 panic("Invalid Request Size");
1123 pkt
->result
= Packet::Success
;
1128 NSGigE::devIntrPost(uint32_t interrupts
)
1130 if (interrupts
& ISR_RESERVE
)
1131 panic("Cannot set a reserved interrupt");
1133 if (interrupts
& ISR_NOIMPL
)
1134 warn("interrupt not implemented %#x\n", interrupts
);
1136 interrupts
&= ISR_IMPL
;
1137 regs
.isr
|= interrupts
;
1139 if (interrupts
& regs
.imr
) {
1140 if (interrupts
& ISR_SWI
) {
1143 if (interrupts
& ISR_RXIDLE
) {
1146 if (interrupts
& ISR_RXOK
) {
1149 if (interrupts
& ISR_RXDESC
) {
1152 if (interrupts
& ISR_TXOK
) {
1155 if (interrupts
& ISR_TXIDLE
) {
1158 if (interrupts
& ISR_TXDESC
) {
1161 if (interrupts
& ISR_RXORN
) {
1166 DPRINTF(EthernetIntr
,
1167 "interrupt written to ISR: intr=%#x isr=%#x imr=%#x\n",
1168 interrupts
, regs
.isr
, regs
.imr
);
1170 if ((regs
.isr
& regs
.imr
)) {
1171 Tick when
= curTick
;
1172 if ((regs
.isr
& regs
.imr
& ISR_NODELAY
) == 0)
1178 /* writing this interrupt counting stats inside this means that this function
1179 is now limited to being used to clear all interrupts upon the kernel
1180 reading isr and servicing. just telling you in case you were thinking
1184 NSGigE::devIntrClear(uint32_t interrupts
)
1186 if (interrupts
& ISR_RESERVE
)
1187 panic("Cannot clear a reserved interrupt");
1189 if (regs
.isr
& regs
.imr
& ISR_SWI
) {
1192 if (regs
.isr
& regs
.imr
& ISR_RXIDLE
) {
1195 if (regs
.isr
& regs
.imr
& ISR_RXOK
) {
1198 if (regs
.isr
& regs
.imr
& ISR_RXDESC
) {
1201 if (regs
.isr
& regs
.imr
& ISR_TXOK
) {
1204 if (regs
.isr
& regs
.imr
& ISR_TXIDLE
) {
1207 if (regs
.isr
& regs
.imr
& ISR_TXDESC
) {
1210 if (regs
.isr
& regs
.imr
& ISR_RXORN
) {
1214 if (regs
.isr
& regs
.imr
& ISR_IMPL
)
1217 interrupts
&= ~ISR_NOIMPL
;
1218 regs
.isr
&= ~interrupts
;
1220 DPRINTF(EthernetIntr
,
1221 "interrupt cleared from ISR: intr=%x isr=%x imr=%x\n",
1222 interrupts
, regs
.isr
, regs
.imr
);
1224 if (!(regs
.isr
& regs
.imr
))
1229 NSGigE::devIntrChangeMask()
1231 DPRINTF(EthernetIntr
, "interrupt mask changed: isr=%x imr=%x masked=%x\n",
1232 regs
.isr
, regs
.imr
, regs
.isr
& regs
.imr
);
1234 if (regs
.isr
& regs
.imr
)
1235 cpuIntrPost(curTick
);
1241 NSGigE::cpuIntrPost(Tick when
)
1243 // If the interrupt you want to post is later than an interrupt
1244 // already scheduled, just let it post in the coming one and don't
1245 // schedule another.
1246 // HOWEVER, must be sure that the scheduled intrTick is in the
1247 // future (this was formerly the source of a bug)
1249 * @todo this warning should be removed and the intrTick code should
1252 assert(when
>= curTick
);
1253 assert(intrTick
>= curTick
|| intrTick
== 0);
1254 if (when
> intrTick
&& intrTick
!= 0) {
1255 DPRINTF(EthernetIntr
, "don't need to schedule event...intrTick=%d\n",
1261 if (intrTick
< curTick
) {
1266 DPRINTF(EthernetIntr
, "going to schedule an interrupt for intrTick=%d\n",
1270 intrEvent
->squash();
1271 intrEvent
= new IntrEvent(this, true);
1272 intrEvent
->schedule(intrTick
);
1276 NSGigE::cpuInterrupt()
1278 assert(intrTick
== curTick
);
1280 // Whether or not there's a pending interrupt, we don't care about
1285 // Don't send an interrupt if there's already one
1286 if (cpuPendingIntr
) {
1287 DPRINTF(EthernetIntr
,
1288 "would send an interrupt now, but there's already pending\n");
1291 cpuPendingIntr
= true;
1293 DPRINTF(EthernetIntr
, "posting interrupt\n");
1299 NSGigE::cpuIntrClear()
1301 if (!cpuPendingIntr
)
1305 intrEvent
->squash();
1311 cpuPendingIntr
= false;
1313 DPRINTF(EthernetIntr
, "clearing interrupt\n");
1318 NSGigE::cpuIntrPending() const
1319 { return cpuPendingIntr
; }
1325 DPRINTF(Ethernet
, "transmit reset\n");
1330 assert(txDescCnt
== 0);
1333 assert(txDmaState
== dmaIdle
);
1339 DPRINTF(Ethernet
, "receive reset\n");
1342 assert(rxPktBytes
== 0);
1345 assert(rxDescCnt
== 0);
1346 assert(rxDmaState
== dmaIdle
);
1354 memset(®s
, 0, sizeof(regs
));
1355 regs
.config
= (CFGR_LNKSTS
| CFGR_TBI_EN
| CFGR_MODE_1000
);
1357 regs
.txcfg
= 0x120; // set drain threshold to 1024 bytes and
1358 // fill threshold to 32 bytes
1359 regs
.rxcfg
= 0x4; // set drain threshold to 16 bytes
1360 regs
.srr
= 0x0103; // set the silicon revision to rev B or 0x103
1361 regs
.mibc
= MIBC_FRZ
;
1362 regs
.vdr
= 0x81; // set the vlan tag type to 802.1q
1363 regs
.tesr
= 0xc000; // TBI capable of both full and half duplex
1364 regs
.brar
= 0xffffffff;
1366 extstsEnable
= false;
1367 acceptBroadcast
= false;
1368 acceptMulticast
= false;
1369 acceptUnicast
= false;
1370 acceptPerfect
= false;
1375 NSGigE::doRxDmaRead()
1377 assert(rxDmaState
== dmaIdle
|| rxDmaState
== dmaReadWaiting
);
1378 rxDmaState
= dmaReading
;
1380 if (dmaPending() || getState() != Running
)
1381 rxDmaState
= dmaReadWaiting
;
1383 dmaRead(rxDmaAddr
, rxDmaLen
, &rxDmaReadEvent
, (uint8_t*)rxDmaData
);
1389 NSGigE::rxDmaReadDone()
1391 assert(rxDmaState
== dmaReading
);
1392 rxDmaState
= dmaIdle
;
1394 DPRINTF(EthernetDMA
, "rx dma read paddr=%#x len=%d\n",
1395 rxDmaAddr
, rxDmaLen
);
1396 DDUMP(EthernetDMA
, rxDmaData
, rxDmaLen
);
1398 // If the transmit state machine has a pending DMA, let it go first
1399 if (txDmaState
== dmaReadWaiting
|| txDmaState
== dmaWriteWaiting
)
1406 NSGigE::doRxDmaWrite()
1408 assert(rxDmaState
== dmaIdle
|| rxDmaState
== dmaWriteWaiting
);
1409 rxDmaState
= dmaWriting
;
1411 if (dmaPending() || getState() != Running
)
1412 rxDmaState
= dmaWriteWaiting
;
1414 dmaWrite(rxDmaAddr
, rxDmaLen
, &rxDmaWriteEvent
, (uint8_t*)rxDmaData
);
1419 NSGigE::rxDmaWriteDone()
1421 assert(rxDmaState
== dmaWriting
);
1422 rxDmaState
= dmaIdle
;
1424 DPRINTF(EthernetDMA
, "rx dma write paddr=%#x len=%d\n",
1425 rxDmaAddr
, rxDmaLen
);
1426 DDUMP(EthernetDMA
, rxDmaData
, rxDmaLen
);
1428 // If the transmit state machine has a pending DMA, let it go first
1429 if (txDmaState
== dmaReadWaiting
|| txDmaState
== dmaWriteWaiting
)
1438 bool is64bit
= (bool)(regs
.config
& CFGR_M64ADDR
);
1441 "receive kick rxState=%s (rxBuf.size=%d) %d-bit\n",
1442 NsRxStateStrings
[rxState
], rxFifo
.size(), is64bit
? 64 : 32);
1445 uint32_t &cmdsts
= is64bit
? rxDesc64
.cmdsts
: rxDesc32
.cmdsts
;
1446 uint32_t &extsts
= is64bit
? rxDesc64
.extsts
: rxDesc32
.extsts
;
1450 if (rxKickTick
> curTick
) {
1451 DPRINTF(EthernetSM
, "receive kick exiting, can't run till %d\n",
1457 // Go to the next state machine clock tick.
1458 rxKickTick
= curTick
+ cycles(1);
1461 switch(rxDmaState
) {
1462 case dmaReadWaiting
:
1466 case dmaWriteWaiting
:
1474 link
= is64bit
? (Addr
)rxDesc64
.link
: (Addr
)rxDesc32
.link
;
1475 bufptr
= is64bit
? (Addr
)rxDesc64
.bufptr
: (Addr
)rxDesc32
.bufptr
;
1477 // see state machine from spec for details
1478 // the way this works is, if you finish work on one state and can
1479 // go directly to another, you do that through jumping to the
1480 // label "next". however, if you have intermediate work, like DMA
1481 // so that you can't go to the next state yet, you go to exit and
1482 // exit the loop. however, when the DMA is done it will trigger
1483 // an event and come back to this loop.
1487 DPRINTF(EthernetSM
, "Receive Disabled! Nothing to do.\n");
1492 rxState
= rxDescRefr
;
1494 rxDmaAddr
= regs
.rxdp
& 0x3fffffff;
1496 is64bit
? (void *)&rxDesc64
.link
: (void *)&rxDesc32
.link
;
1497 rxDmaLen
= is64bit
? sizeof(rxDesc64
.link
) : sizeof(rxDesc32
.link
);
1498 rxDmaFree
= dmaDescFree
;
1501 descDmaRdBytes
+= rxDmaLen
;
1506 rxState
= rxDescRead
;
1508 rxDmaAddr
= regs
.rxdp
& 0x3fffffff;
1509 rxDmaData
= is64bit
? (void *)&rxDesc64
: (void *)&rxDesc32
;
1510 rxDmaLen
= is64bit
? sizeof(rxDesc64
) : sizeof(rxDesc32
);
1511 rxDmaFree
= dmaDescFree
;
1514 descDmaRdBytes
+= rxDmaLen
;
1522 if (rxDmaState
!= dmaIdle
)
1525 rxState
= rxAdvance
;
1529 if (rxDmaState
!= dmaIdle
)
1532 DPRINTF(EthernetDesc
, "rxDesc: addr=%08x read descriptor\n",
1533 regs
.rxdp
& 0x3fffffff);
1534 DPRINTF(EthernetDesc
,
1535 "rxDesc: link=%#x bufptr=%#x cmdsts=%08x extsts=%08x\n",
1536 link
, bufptr
, cmdsts
, extsts
);
1538 if (cmdsts
& CMDSTS_OWN
) {
1539 devIntrPost(ISR_RXIDLE
);
1543 rxState
= rxFifoBlock
;
1545 rxDescCnt
= cmdsts
& CMDSTS_LEN_MASK
;
1552 * @todo in reality, we should be able to start processing
1553 * the packet as it arrives, and not have to wait for the
1554 * full packet ot be in the receive fifo.
1559 DPRINTF(EthernetSM
, "****processing receive of new packet****\n");
1561 // If we don't have a packet, grab a new one from the fifo.
1562 rxPacket
= rxFifo
.front();
1563 rxPktBytes
= rxPacket
->length
;
1564 rxPacketBufPtr
= rxPacket
->data
;
1567 if (DTRACE(Ethernet
)) {
1570 DPRINTF(Ethernet
, "ID is %d\n", ip
->id());
1574 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n",
1575 tcp
->sport(), tcp
->dport(), tcp
->seq(),
1582 // sanity check - i think the driver behaves like this
1583 assert(rxDescCnt
>= rxPktBytes
);
1588 // dont' need the && rxDescCnt > 0 if driver sanity check
1590 if (rxPktBytes
> 0) {
1591 rxState
= rxFragWrite
;
1592 // don't need min<>(rxPktBytes,rxDescCnt) if above sanity
1594 rxXferLen
= rxPktBytes
;
1596 rxDmaAddr
= rxFragPtr
& 0x3fffffff;
1597 rxDmaData
= rxPacketBufPtr
;
1598 rxDmaLen
= rxXferLen
;
1599 rxDmaFree
= dmaDataFree
;
1605 rxState
= rxDescWrite
;
1607 //if (rxPktBytes == 0) { /* packet is done */
1608 assert(rxPktBytes
== 0);
1609 DPRINTF(EthernetSM
, "done with receiving packet\n");
1611 cmdsts
|= CMDSTS_OWN
;
1612 cmdsts
&= ~CMDSTS_MORE
;
1613 cmdsts
|= CMDSTS_OK
;
1614 cmdsts
&= 0xffff0000;
1615 cmdsts
+= rxPacket
->length
; //i.e. set CMDSTS_SIZE
1619 * all the driver uses these are for its own stats keeping
1620 * which we don't care about, aren't necessary for
1621 * functionality and doing this would just slow us down.
1622 * if they end up using this in a later version for
1623 * functional purposes, just undef
1625 if (rxFilterEnable
) {
1626 cmdsts
&= ~CMDSTS_DEST_MASK
;
1627 const EthAddr
&dst
= rxFifoFront()->dst();
1629 cmdsts
|= CMDSTS_DEST_SELF
;
1630 if (dst
->multicast())
1631 cmdsts
|= CMDSTS_DEST_MULTI
;
1632 if (dst
->broadcast())
1633 cmdsts
|= CMDSTS_DEST_MASK
;
1638 if (extstsEnable
&& ip
) {
1639 extsts
|= EXTSTS_IPPKT
;
1641 if (cksum(ip
) != 0) {
1642 DPRINTF(EthernetCksum
, "Rx IP Checksum Error\n");
1643 extsts
|= EXTSTS_IPERR
;
1648 extsts
|= EXTSTS_TCPPKT
;
1650 if (cksum(tcp
) != 0) {
1651 DPRINTF(EthernetCksum
, "Rx TCP Checksum Error\n");
1652 extsts
|= EXTSTS_TCPERR
;
1656 extsts
|= EXTSTS_UDPPKT
;
1658 if (cksum(udp
) != 0) {
1659 DPRINTF(EthernetCksum
, "Rx UDP Checksum Error\n");
1660 extsts
|= EXTSTS_UDPERR
;
1667 * the driver seems to always receive into desc buffers
1668 * of size 1514, so you never have a pkt that is split
1669 * into multiple descriptors on the receive side, so
1670 * i don't implement that case, hence the assert above.
1673 DPRINTF(EthernetDesc
,
1674 "rxDesc: addr=%08x writeback cmdsts extsts\n",
1675 regs
.rxdp
& 0x3fffffff);
1676 DPRINTF(EthernetDesc
,
1677 "rxDesc: link=%#x bufptr=%#x cmdsts=%08x extsts=%08x\n",
1678 link
, bufptr
, cmdsts
, extsts
);
1680 rxDmaAddr
= regs
.rxdp
& 0x3fffffff;
1681 rxDmaData
= &cmdsts
;
1683 rxDmaAddr
+= offsetof(ns_desc64
, cmdsts
);
1684 rxDmaLen
= sizeof(rxDesc64
.cmdsts
) + sizeof(rxDesc64
.extsts
);
1686 rxDmaAddr
+= offsetof(ns_desc32
, cmdsts
);
1687 rxDmaLen
= sizeof(rxDesc32
.cmdsts
) + sizeof(rxDesc32
.extsts
);
1689 rxDmaFree
= dmaDescFree
;
1692 descDmaWrBytes
+= rxDmaLen
;
1700 if (rxDmaState
!= dmaIdle
)
1703 rxPacketBufPtr
+= rxXferLen
;
1704 rxFragPtr
+= rxXferLen
;
1705 rxPktBytes
-= rxXferLen
;
1707 rxState
= rxFifoBlock
;
1711 if (rxDmaState
!= dmaIdle
)
1714 assert(cmdsts
& CMDSTS_OWN
);
1716 assert(rxPacket
== 0);
1717 devIntrPost(ISR_RXOK
);
1719 if (cmdsts
& CMDSTS_INTR
)
1720 devIntrPost(ISR_RXDESC
);
1723 DPRINTF(EthernetSM
, "Halting the RX state machine\n");
1727 rxState
= rxAdvance
;
1732 devIntrPost(ISR_RXIDLE
);
1737 if (rxDmaState
!= dmaIdle
)
1739 rxState
= rxDescRead
;
1743 rxDmaAddr
= regs
.rxdp
& 0x3fffffff;
1744 rxDmaData
= is64bit
? (void *)&rxDesc64
: (void *)&rxDesc32
;
1745 rxDmaLen
= is64bit
? sizeof(rxDesc64
) : sizeof(rxDesc32
);
1746 rxDmaFree
= dmaDescFree
;
1754 panic("Invalid rxState!");
1757 DPRINTF(EthernetSM
, "entering next rxState=%s\n",
1758 NsRxStateStrings
[rxState
]);
1763 * @todo do we want to schedule a future kick?
1765 DPRINTF(EthernetSM
, "rx state machine exited rxState=%s\n",
1766 NsRxStateStrings
[rxState
]);
1768 if (clock
&& !rxKickEvent
.scheduled())
1769 rxKickEvent
.schedule(rxKickTick
);
1775 if (txFifo
.empty()) {
1776 DPRINTF(Ethernet
, "nothing to transmit\n");
1780 DPRINTF(Ethernet
, "Attempt Pkt Transmit: txFifo length=%d\n",
1782 if (interface
->sendPacket(txFifo
.front())) {
1784 if (DTRACE(Ethernet
)) {
1785 IpPtr
ip(txFifo
.front());
1787 DPRINTF(Ethernet
, "ID is %d\n", ip
->id());
1791 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n",
1792 tcp
->sport(), tcp
->dport(), tcp
->seq(),
1799 DDUMP(EthernetData
, txFifo
.front()->data
, txFifo
.front()->length
);
1800 txBytes
+= txFifo
.front()->length
;
1803 DPRINTF(Ethernet
, "Successful Xmit! now txFifoAvail is %d\n",
1808 * normally do a writeback of the descriptor here, and ONLY
1809 * after that is done, send this interrupt. but since our
1810 * stuff never actually fails, just do this interrupt here,
1811 * otherwise the code has to stray from this nice format.
1812 * besides, it's functionally the same.
1814 devIntrPost(ISR_TXOK
);
1817 if (!txFifo
.empty() && !txEvent
.scheduled()) {
1818 DPRINTF(Ethernet
, "reschedule transmit\n");
1819 txEvent
.schedule(curTick
+ retryTime
);
1824 NSGigE::doTxDmaRead()
1826 assert(txDmaState
== dmaIdle
|| txDmaState
== dmaReadWaiting
);
1827 txDmaState
= dmaReading
;
1829 if (dmaPending() || getState() != Running
)
1830 txDmaState
= dmaReadWaiting
;
1832 dmaRead(txDmaAddr
, txDmaLen
, &txDmaReadEvent
, (uint8_t*)txDmaData
);
1838 NSGigE::txDmaReadDone()
1840 assert(txDmaState
== dmaReading
);
1841 txDmaState
= dmaIdle
;
1843 DPRINTF(EthernetDMA
, "tx dma read paddr=%#x len=%d\n",
1844 txDmaAddr
, txDmaLen
);
1845 DDUMP(EthernetDMA
, txDmaData
, txDmaLen
);
1847 // If the receive state machine has a pending DMA, let it go first
1848 if (rxDmaState
== dmaReadWaiting
|| rxDmaState
== dmaWriteWaiting
)
1855 NSGigE::doTxDmaWrite()
1857 assert(txDmaState
== dmaIdle
|| txDmaState
== dmaWriteWaiting
);
1858 txDmaState
= dmaWriting
;
1860 if (dmaPending() || getState() != Running
)
1861 txDmaState
= dmaWriteWaiting
;
1863 dmaWrite(txDmaAddr
, txDmaLen
, &txDmaWriteEvent
, (uint8_t*)txDmaData
);
1868 NSGigE::txDmaWriteDone()
1870 assert(txDmaState
== dmaWriting
);
1871 txDmaState
= dmaIdle
;
1873 DPRINTF(EthernetDMA
, "tx dma write paddr=%#x len=%d\n",
1874 txDmaAddr
, txDmaLen
);
1875 DDUMP(EthernetDMA
, txDmaData
, txDmaLen
);
1877 // If the receive state machine has a pending DMA, let it go first
1878 if (rxDmaState
== dmaReadWaiting
|| rxDmaState
== dmaWriteWaiting
)
1887 bool is64bit
= (bool)(regs
.config
& CFGR_M64ADDR
);
1889 DPRINTF(EthernetSM
, "transmit kick txState=%s %d-bit\n",
1890 NsTxStateStrings
[txState
], is64bit
? 64 : 32);
1893 uint32_t &cmdsts
= is64bit
? txDesc64
.cmdsts
: txDesc32
.cmdsts
;
1894 uint32_t &extsts
= is64bit
? txDesc64
.extsts
: txDesc32
.extsts
;
1898 if (txKickTick
> curTick
) {
1899 DPRINTF(EthernetSM
, "transmit kick exiting, can't run till %d\n",
1904 // Go to the next state machine clock tick.
1905 txKickTick
= curTick
+ cycles(1);
1908 switch(txDmaState
) {
1909 case dmaReadWaiting
:
1913 case dmaWriteWaiting
:
1921 link
= is64bit
? (Addr
)txDesc64
.link
: (Addr
)txDesc32
.link
;
1922 bufptr
= is64bit
? (Addr
)txDesc64
.bufptr
: (Addr
)txDesc32
.bufptr
;
1926 DPRINTF(EthernetSM
, "Transmit disabled. Nothing to do.\n");
1931 txState
= txDescRefr
;
1933 txDmaAddr
= regs
.txdp
& 0x3fffffff;
1935 is64bit
? (void *)&txDesc64
.link
: (void *)&txDesc32
.link
;
1936 txDmaLen
= is64bit
? sizeof(txDesc64
.link
) : sizeof(txDesc32
.link
);
1937 txDmaFree
= dmaDescFree
;
1940 descDmaRdBytes
+= txDmaLen
;
1946 txState
= txDescRead
;
1948 txDmaAddr
= regs
.txdp
& 0x3fffffff;
1949 txDmaData
= is64bit
? (void *)&txDesc64
: (void *)&txDesc32
;
1950 txDmaLen
= is64bit
? sizeof(txDesc64
) : sizeof(txDesc32
);
1951 txDmaFree
= dmaDescFree
;
1954 descDmaRdBytes
+= txDmaLen
;
1962 if (txDmaState
!= dmaIdle
)
1965 txState
= txAdvance
;
1969 if (txDmaState
!= dmaIdle
)
1972 DPRINTF(EthernetDesc
, "txDesc: addr=%08x read descriptor\n",
1973 regs
.txdp
& 0x3fffffff);
1974 DPRINTF(EthernetDesc
,
1975 "txDesc: link=%#x bufptr=%#x cmdsts=%#08x extsts=%#08x\n",
1976 link
, bufptr
, cmdsts
, extsts
);
1978 if (cmdsts
& CMDSTS_OWN
) {
1979 txState
= txFifoBlock
;
1981 txDescCnt
= cmdsts
& CMDSTS_LEN_MASK
;
1983 devIntrPost(ISR_TXIDLE
);
1991 DPRINTF(EthernetSM
, "****starting the tx of a new packet****\n");
1992 txPacket
= new EthPacketData(16384);
1993 txPacketBufPtr
= txPacket
->data
;
1996 if (txDescCnt
== 0) {
1997 DPRINTF(EthernetSM
, "the txDescCnt == 0, done with descriptor\n");
1998 if (cmdsts
& CMDSTS_MORE
) {
1999 DPRINTF(EthernetSM
, "there are more descriptors to come\n");
2000 txState
= txDescWrite
;
2002 cmdsts
&= ~CMDSTS_OWN
;
2004 txDmaAddr
= regs
.txdp
& 0x3fffffff;
2005 txDmaData
= &cmdsts
;
2007 txDmaAddr
+= offsetof(ns_desc64
, cmdsts
);
2008 txDmaLen
= sizeof(txDesc64
.cmdsts
);
2010 txDmaAddr
+= offsetof(ns_desc32
, cmdsts
);
2011 txDmaLen
= sizeof(txDesc32
.cmdsts
);
2013 txDmaFree
= dmaDescFree
;
2018 } else { /* this packet is totally done */
2019 DPRINTF(EthernetSM
, "This packet is done, let's wrap it up\n");
2020 /* deal with the the packet that just finished */
2021 if ((regs
.vtcr
& VTCR_PPCHK
) && extstsEnable
) {
2023 if (extsts
& EXTSTS_UDPPKT
) {
2026 udp
->sum(cksum(udp
));
2028 } else if (extsts
& EXTSTS_TCPPKT
) {
2031 tcp
->sum(cksum(tcp
));
2034 if (extsts
& EXTSTS_IPPKT
) {
2041 txPacket
->length
= txPacketBufPtr
- txPacket
->data
;
2042 // this is just because the receive can't handle a
2043 // packet bigger want to make sure
2044 if (txPacket
->length
> 1514)
2045 panic("transmit packet too large, %s > 1514\n",
2051 txFifo
.push(txPacket
);
2055 * this following section is not tqo spec, but
2056 * functionally shouldn't be any different. normally,
2057 * the chip will wait til the transmit has occurred
2058 * before writing back the descriptor because it has
2059 * to wait to see that it was successfully transmitted
2060 * to decide whether to set CMDSTS_OK or not.
2061 * however, in the simulator since it is always
2062 * successfully transmitted, and writing it exactly to
2063 * spec would complicate the code, we just do it here
2066 cmdsts
&= ~CMDSTS_OWN
;
2067 cmdsts
|= CMDSTS_OK
;
2069 DPRINTF(EthernetDesc
,
2070 "txDesc writeback: cmdsts=%08x extsts=%08x\n",
2073 txDmaFree
= dmaDescFree
;
2074 txDmaAddr
= regs
.txdp
& 0x3fffffff;
2075 txDmaData
= &cmdsts
;
2077 txDmaAddr
+= offsetof(ns_desc64
, cmdsts
);
2079 sizeof(txDesc64
.cmdsts
) + sizeof(txDesc64
.extsts
);
2081 txDmaAddr
+= offsetof(ns_desc32
, cmdsts
);
2083 sizeof(txDesc32
.cmdsts
) + sizeof(txDesc32
.extsts
);
2087 descDmaWrBytes
+= txDmaLen
;
2093 DPRINTF(EthernetSM
, "halting TX state machine\n");
2097 txState
= txAdvance
;
2103 DPRINTF(EthernetSM
, "this descriptor isn't done yet\n");
2104 if (!txFifo
.full()) {
2105 txState
= txFragRead
;
2108 * The number of bytes transferred is either whatever
2109 * is left in the descriptor (txDescCnt), or if there
2110 * is not enough room in the fifo, just whatever room
2111 * is left in the fifo
2113 txXferLen
= min
<uint32_t>(txDescCnt
, txFifo
.avail());
2115 txDmaAddr
= txFragPtr
& 0x3fffffff;
2116 txDmaData
= txPacketBufPtr
;
2117 txDmaLen
= txXferLen
;
2118 txDmaFree
= dmaDataFree
;
2123 txState
= txFifoBlock
;
2133 if (txDmaState
!= dmaIdle
)
2136 txPacketBufPtr
+= txXferLen
;
2137 txFragPtr
+= txXferLen
;
2138 txDescCnt
-= txXferLen
;
2139 txFifo
.reserve(txXferLen
);
2141 txState
= txFifoBlock
;
2145 if (txDmaState
!= dmaIdle
)
2148 if (cmdsts
& CMDSTS_INTR
)
2149 devIntrPost(ISR_TXDESC
);
2152 DPRINTF(EthernetSM
, "halting TX state machine\n");
2156 txState
= txAdvance
;
2161 devIntrPost(ISR_TXIDLE
);
2165 if (txDmaState
!= dmaIdle
)
2167 txState
= txDescRead
;
2171 txDmaAddr
= link
& 0x3fffffff;
2172 txDmaData
= is64bit
? (void *)&txDesc64
: (void *)&txDesc32
;
2173 txDmaLen
= is64bit
? sizeof(txDesc64
) : sizeof(txDesc32
);
2174 txDmaFree
= dmaDescFree
;
2182 panic("invalid state");
2185 DPRINTF(EthernetSM
, "entering next txState=%s\n",
2186 NsTxStateStrings
[txState
]);
2191 * @todo do we want to schedule a future kick?
2193 DPRINTF(EthernetSM
, "tx state machine exited txState=%s\n",
2194 NsTxStateStrings
[txState
]);
2196 if (clock
&& !txKickEvent
.scheduled())
2197 txKickEvent
.schedule(txKickTick
);
2201 * Advance the EEPROM state machine
2202 * Called on rising edge of EEPROM clock bit in MEAR
2205 NSGigE::eepromKick()
2207 switch (eepromState
) {
2211 // Wait for start bit
2212 if (regs
.mear
& MEAR_EEDI
) {
2213 // Set up to get 2 opcode bits
2214 eepromState
= eepromGetOpcode
;
2220 case eepromGetOpcode
:
2222 eepromOpcode
+= (regs
.mear
& MEAR_EEDI
) ? 1 : 0;
2225 // Done getting opcode
2226 if (eepromBitsToRx
== 0) {
2227 if (eepromOpcode
!= EEPROM_READ
)
2228 panic("only EEPROM reads are implemented!");
2230 // Set up to get address
2231 eepromState
= eepromGetAddress
;
2237 case eepromGetAddress
:
2238 eepromAddress
<<= 1;
2239 eepromAddress
+= (regs
.mear
& MEAR_EEDI
) ? 1 : 0;
2242 // Done getting address
2243 if (eepromBitsToRx
== 0) {
2245 if (eepromAddress
>= EEPROM_SIZE
)
2246 panic("EEPROM read access out of range!");
2248 switch (eepromAddress
) {
2250 case EEPROM_PMATCH2_ADDR
:
2251 eepromData
= rom
.perfectMatch
[5];
2253 eepromData
+= rom
.perfectMatch
[4];
2256 case EEPROM_PMATCH1_ADDR
:
2257 eepromData
= rom
.perfectMatch
[3];
2259 eepromData
+= rom
.perfectMatch
[2];
2262 case EEPROM_PMATCH0_ADDR
:
2263 eepromData
= rom
.perfectMatch
[1];
2265 eepromData
+= rom
.perfectMatch
[0];
2269 panic("FreeBSD driver only uses EEPROM to read PMATCH!");
2271 // Set up to read data
2272 eepromState
= eepromRead
;
2273 eepromBitsToRx
= 16;
2275 // Clear data in bit
2276 regs
.mear
&= ~MEAR_EEDI
;
2281 // Clear Data Out bit
2282 regs
.mear
&= ~MEAR_EEDO
;
2283 // Set bit to value of current EEPROM bit
2284 regs
.mear
|= (eepromData
& 0x8000) ? MEAR_EEDO
: 0x0;
2290 if (eepromBitsToRx
== 0) {
2291 eepromState
= eepromStart
;
2296 panic("invalid EEPROM state");
2302 NSGigE::transferDone()
2304 if (txFifo
.empty()) {
2305 DPRINTF(Ethernet
, "transfer complete: txFifo empty...nothing to do\n");
2309 DPRINTF(Ethernet
, "transfer complete: data in txFifo...schedule xmit\n");
2311 if (txEvent
.scheduled())
2312 txEvent
.reschedule(curTick
+ cycles(1));
2314 txEvent
.schedule(curTick
+ cycles(1));
2318 NSGigE::rxFilter(const EthPacketPtr
&packet
)
2320 EthPtr eth
= packet
;
2324 const EthAddr
&dst
= eth
->dst();
2325 if (dst
.unicast()) {
2326 // If we're accepting all unicast addresses
2330 // If we make a perfect match
2331 if (acceptPerfect
&& dst
== rom
.perfectMatch
)
2334 if (acceptArp
&& eth
->type() == ETH_TYPE_ARP
)
2337 } else if (dst
.broadcast()) {
2338 // if we're accepting broadcasts
2339 if (acceptBroadcast
)
2342 } else if (dst
.multicast()) {
2343 // if we're accepting all multicasts
2344 if (acceptMulticast
)
2347 // Multicast hashing faked - all packets accepted
2348 if (multicastHashEnable
)
2353 DPRINTF(Ethernet
, "rxFilter drop\n");
2354 DDUMP(EthernetData
, packet
->data
, packet
->length
);
2361 NSGigE::recvPacket(EthPacketPtr packet
)
2363 rxBytes
+= packet
->length
;
2366 DPRINTF(Ethernet
, "Receiving packet from wire, rxFifoAvail=%d\n",
2370 DPRINTF(Ethernet
, "receive disabled...packet dropped\n");
2374 if (!rxFilterEnable
) {
2376 "receive packet filtering disabled . . . packet dropped\n");
2380 if (rxFilter(packet
)) {
2381 DPRINTF(Ethernet
, "packet filtered...dropped\n");
2385 if (rxFifo
.avail() < packet
->length
) {
2391 "packet won't fit in receive buffer...pkt ID %d dropped\n",
2394 DPRINTF(Ethernet
, "Seq=%d\n", tcp
->seq());
2399 devIntrPost(ISR_RXORN
);
2403 rxFifo
.push(packet
);
2413 SimObject::resume();
2415 // During drain we could have left the state machines in a waiting state and
2416 // they wouldn't get out until some other event occured to kick them.
2417 // This way they'll get out immediately
2423 //=====================================================================
2427 NSGigE::serialize(ostream
&os
)
2429 // Serialize the PciDev base class
2430 PciDev::serialize(os
);
2433 * Finalize any DMA events now.
2435 // @todo will mem system save pending dma?
2438 * Serialize the device registers
2440 SERIALIZE_SCALAR(regs
.command
);
2441 SERIALIZE_SCALAR(regs
.config
);
2442 SERIALIZE_SCALAR(regs
.mear
);
2443 SERIALIZE_SCALAR(regs
.ptscr
);
2444 SERIALIZE_SCALAR(regs
.isr
);
2445 SERIALIZE_SCALAR(regs
.imr
);
2446 SERIALIZE_SCALAR(regs
.ier
);
2447 SERIALIZE_SCALAR(regs
.ihr
);
2448 SERIALIZE_SCALAR(regs
.txdp
);
2449 SERIALIZE_SCALAR(regs
.txdp_hi
);
2450 SERIALIZE_SCALAR(regs
.txcfg
);
2451 SERIALIZE_SCALAR(regs
.gpior
);
2452 SERIALIZE_SCALAR(regs
.rxdp
);
2453 SERIALIZE_SCALAR(regs
.rxdp_hi
);
2454 SERIALIZE_SCALAR(regs
.rxcfg
);
2455 SERIALIZE_SCALAR(regs
.pqcr
);
2456 SERIALIZE_SCALAR(regs
.wcsr
);
2457 SERIALIZE_SCALAR(regs
.pcr
);
2458 SERIALIZE_SCALAR(regs
.rfcr
);
2459 SERIALIZE_SCALAR(regs
.rfdr
);
2460 SERIALIZE_SCALAR(regs
.brar
);
2461 SERIALIZE_SCALAR(regs
.brdr
);
2462 SERIALIZE_SCALAR(regs
.srr
);
2463 SERIALIZE_SCALAR(regs
.mibc
);
2464 SERIALIZE_SCALAR(regs
.vrcr
);
2465 SERIALIZE_SCALAR(regs
.vtcr
);
2466 SERIALIZE_SCALAR(regs
.vdr
);
2467 SERIALIZE_SCALAR(regs
.ccsr
);
2468 SERIALIZE_SCALAR(regs
.tbicr
);
2469 SERIALIZE_SCALAR(regs
.tbisr
);
2470 SERIALIZE_SCALAR(regs
.tanar
);
2471 SERIALIZE_SCALAR(regs
.tanlpar
);
2472 SERIALIZE_SCALAR(regs
.taner
);
2473 SERIALIZE_SCALAR(regs
.tesr
);
2475 SERIALIZE_ARRAY(rom
.perfectMatch
, ETH_ADDR_LEN
);
2476 SERIALIZE_ARRAY(rom
.filterHash
, FHASH_SIZE
);
2478 SERIALIZE_SCALAR(ioEnable
);
2481 * Serialize the data Fifos
2483 rxFifo
.serialize("rxFifo", os
);
2484 txFifo
.serialize("txFifo", os
);
2487 * Serialize the various helper variables
2489 bool txPacketExists
= txPacket
;
2490 SERIALIZE_SCALAR(txPacketExists
);
2491 if (txPacketExists
) {
2492 txPacket
->length
= txPacketBufPtr
- txPacket
->data
;
2493 txPacket
->serialize("txPacket", os
);
2494 uint32_t txPktBufPtr
= (uint32_t) (txPacketBufPtr
- txPacket
->data
);
2495 SERIALIZE_SCALAR(txPktBufPtr
);
2498 bool rxPacketExists
= rxPacket
;
2499 SERIALIZE_SCALAR(rxPacketExists
);
2500 if (rxPacketExists
) {
2501 rxPacket
->serialize("rxPacket", os
);
2502 uint32_t rxPktBufPtr
= (uint32_t) (rxPacketBufPtr
- rxPacket
->data
);
2503 SERIALIZE_SCALAR(rxPktBufPtr
);
2506 SERIALIZE_SCALAR(txXferLen
);
2507 SERIALIZE_SCALAR(rxXferLen
);
2510 * Serialize Cached Descriptors
2512 SERIALIZE_SCALAR(rxDesc64
.link
);
2513 SERIALIZE_SCALAR(rxDesc64
.bufptr
);
2514 SERIALIZE_SCALAR(rxDesc64
.cmdsts
);
2515 SERIALIZE_SCALAR(rxDesc64
.extsts
);
2516 SERIALIZE_SCALAR(txDesc64
.link
);
2517 SERIALIZE_SCALAR(txDesc64
.bufptr
);
2518 SERIALIZE_SCALAR(txDesc64
.cmdsts
);
2519 SERIALIZE_SCALAR(txDesc64
.extsts
);
2520 SERIALIZE_SCALAR(rxDesc32
.link
);
2521 SERIALIZE_SCALAR(rxDesc32
.bufptr
);
2522 SERIALIZE_SCALAR(rxDesc32
.cmdsts
);
2523 SERIALIZE_SCALAR(rxDesc32
.extsts
);
2524 SERIALIZE_SCALAR(txDesc32
.link
);
2525 SERIALIZE_SCALAR(txDesc32
.bufptr
);
2526 SERIALIZE_SCALAR(txDesc32
.cmdsts
);
2527 SERIALIZE_SCALAR(txDesc32
.extsts
);
2528 SERIALIZE_SCALAR(extstsEnable
);
2531 * Serialize tx state machine
2533 int txState
= this->txState
;
2534 SERIALIZE_SCALAR(txState
);
2535 SERIALIZE_SCALAR(txEnable
);
2536 SERIALIZE_SCALAR(CTDD
);
2537 SERIALIZE_SCALAR(txFragPtr
);
2538 SERIALIZE_SCALAR(txDescCnt
);
2539 int txDmaState
= this->txDmaState
;
2540 SERIALIZE_SCALAR(txDmaState
);
2541 SERIALIZE_SCALAR(txKickTick
);
2544 * Serialize rx state machine
2546 int rxState
= this->rxState
;
2547 SERIALIZE_SCALAR(rxState
);
2548 SERIALIZE_SCALAR(rxEnable
);
2549 SERIALIZE_SCALAR(CRDD
);
2550 SERIALIZE_SCALAR(rxPktBytes
);
2551 SERIALIZE_SCALAR(rxFragPtr
);
2552 SERIALIZE_SCALAR(rxDescCnt
);
2553 int rxDmaState
= this->rxDmaState
;
2554 SERIALIZE_SCALAR(rxDmaState
);
2555 SERIALIZE_SCALAR(rxKickTick
);
2558 * Serialize EEPROM state machine
2560 int eepromState
= this->eepromState
;
2561 SERIALIZE_SCALAR(eepromState
);
2562 SERIALIZE_SCALAR(eepromClk
);
2563 SERIALIZE_SCALAR(eepromBitsToRx
);
2564 SERIALIZE_SCALAR(eepromOpcode
);
2565 SERIALIZE_SCALAR(eepromAddress
);
2566 SERIALIZE_SCALAR(eepromData
);
2569 * If there's a pending transmit, store the time so we can
2570 * reschedule it later
2572 Tick transmitTick
= txEvent
.scheduled() ? txEvent
.when() - curTick
: 0;
2573 SERIALIZE_SCALAR(transmitTick
);
2576 * receive address filter settings
2578 SERIALIZE_SCALAR(rxFilterEnable
);
2579 SERIALIZE_SCALAR(acceptBroadcast
);
2580 SERIALIZE_SCALAR(acceptMulticast
);
2581 SERIALIZE_SCALAR(acceptUnicast
);
2582 SERIALIZE_SCALAR(acceptPerfect
);
2583 SERIALIZE_SCALAR(acceptArp
);
2584 SERIALIZE_SCALAR(multicastHashEnable
);
2587 * Keep track of pending interrupt status.
2589 SERIALIZE_SCALAR(intrTick
);
2590 SERIALIZE_SCALAR(cpuPendingIntr
);
2591 Tick intrEventTick
= 0;
2593 intrEventTick
= intrEvent
->when();
2594 SERIALIZE_SCALAR(intrEventTick
);
2599 NSGigE::unserialize(Checkpoint
*cp
, const std::string
§ion
)
2601 // Unserialize the PciDev base class
2602 PciDev::unserialize(cp
, section
);
2604 UNSERIALIZE_SCALAR(regs
.command
);
2605 UNSERIALIZE_SCALAR(regs
.config
);
2606 UNSERIALIZE_SCALAR(regs
.mear
);
2607 UNSERIALIZE_SCALAR(regs
.ptscr
);
2608 UNSERIALIZE_SCALAR(regs
.isr
);
2609 UNSERIALIZE_SCALAR(regs
.imr
);
2610 UNSERIALIZE_SCALAR(regs
.ier
);
2611 UNSERIALIZE_SCALAR(regs
.ihr
);
2612 UNSERIALIZE_SCALAR(regs
.txdp
);
2613 UNSERIALIZE_SCALAR(regs
.txdp_hi
);
2614 UNSERIALIZE_SCALAR(regs
.txcfg
);
2615 UNSERIALIZE_SCALAR(regs
.gpior
);
2616 UNSERIALIZE_SCALAR(regs
.rxdp
);
2617 UNSERIALIZE_SCALAR(regs
.rxdp_hi
);
2618 UNSERIALIZE_SCALAR(regs
.rxcfg
);
2619 UNSERIALIZE_SCALAR(regs
.pqcr
);
2620 UNSERIALIZE_SCALAR(regs
.wcsr
);
2621 UNSERIALIZE_SCALAR(regs
.pcr
);
2622 UNSERIALIZE_SCALAR(regs
.rfcr
);
2623 UNSERIALIZE_SCALAR(regs
.rfdr
);
2624 UNSERIALIZE_SCALAR(regs
.brar
);
2625 UNSERIALIZE_SCALAR(regs
.brdr
);
2626 UNSERIALIZE_SCALAR(regs
.srr
);
2627 UNSERIALIZE_SCALAR(regs
.mibc
);
2628 UNSERIALIZE_SCALAR(regs
.vrcr
);
2629 UNSERIALIZE_SCALAR(regs
.vtcr
);
2630 UNSERIALIZE_SCALAR(regs
.vdr
);
2631 UNSERIALIZE_SCALAR(regs
.ccsr
);
2632 UNSERIALIZE_SCALAR(regs
.tbicr
);
2633 UNSERIALIZE_SCALAR(regs
.tbisr
);
2634 UNSERIALIZE_SCALAR(regs
.tanar
);
2635 UNSERIALIZE_SCALAR(regs
.tanlpar
);
2636 UNSERIALIZE_SCALAR(regs
.taner
);
2637 UNSERIALIZE_SCALAR(regs
.tesr
);
2639 UNSERIALIZE_ARRAY(rom
.perfectMatch
, ETH_ADDR_LEN
);
2640 UNSERIALIZE_ARRAY(rom
.filterHash
, FHASH_SIZE
);
2642 UNSERIALIZE_SCALAR(ioEnable
);
2645 * unserialize the data fifos
2647 rxFifo
.unserialize("rxFifo", cp
, section
);
2648 txFifo
.unserialize("txFifo", cp
, section
);
2651 * unserialize the various helper variables
2653 bool txPacketExists
;
2654 UNSERIALIZE_SCALAR(txPacketExists
);
2655 if (txPacketExists
) {
2656 txPacket
= new EthPacketData(16384);
2657 txPacket
->unserialize("txPacket", cp
, section
);
2658 uint32_t txPktBufPtr
;
2659 UNSERIALIZE_SCALAR(txPktBufPtr
);
2660 txPacketBufPtr
= (uint8_t *) txPacket
->data
+ txPktBufPtr
;
2664 bool rxPacketExists
;
2665 UNSERIALIZE_SCALAR(rxPacketExists
);
2667 if (rxPacketExists
) {
2668 rxPacket
= new EthPacketData(16384);
2669 rxPacket
->unserialize("rxPacket", cp
, section
);
2670 uint32_t rxPktBufPtr
;
2671 UNSERIALIZE_SCALAR(rxPktBufPtr
);
2672 rxPacketBufPtr
= (uint8_t *) rxPacket
->data
+ rxPktBufPtr
;
2676 UNSERIALIZE_SCALAR(txXferLen
);
2677 UNSERIALIZE_SCALAR(rxXferLen
);
2680 * Unserialize Cached Descriptors
2682 UNSERIALIZE_SCALAR(rxDesc64
.link
);
2683 UNSERIALIZE_SCALAR(rxDesc64
.bufptr
);
2684 UNSERIALIZE_SCALAR(rxDesc64
.cmdsts
);
2685 UNSERIALIZE_SCALAR(rxDesc64
.extsts
);
2686 UNSERIALIZE_SCALAR(txDesc64
.link
);
2687 UNSERIALIZE_SCALAR(txDesc64
.bufptr
);
2688 UNSERIALIZE_SCALAR(txDesc64
.cmdsts
);
2689 UNSERIALIZE_SCALAR(txDesc64
.extsts
);
2690 UNSERIALIZE_SCALAR(rxDesc32
.link
);
2691 UNSERIALIZE_SCALAR(rxDesc32
.bufptr
);
2692 UNSERIALIZE_SCALAR(rxDesc32
.cmdsts
);
2693 UNSERIALIZE_SCALAR(rxDesc32
.extsts
);
2694 UNSERIALIZE_SCALAR(txDesc32
.link
);
2695 UNSERIALIZE_SCALAR(txDesc32
.bufptr
);
2696 UNSERIALIZE_SCALAR(txDesc32
.cmdsts
);
2697 UNSERIALIZE_SCALAR(txDesc32
.extsts
);
2698 UNSERIALIZE_SCALAR(extstsEnable
);
2701 * unserialize tx state machine
2704 UNSERIALIZE_SCALAR(txState
);
2705 this->txState
= (TxState
) txState
;
2706 UNSERIALIZE_SCALAR(txEnable
);
2707 UNSERIALIZE_SCALAR(CTDD
);
2708 UNSERIALIZE_SCALAR(txFragPtr
);
2709 UNSERIALIZE_SCALAR(txDescCnt
);
2711 UNSERIALIZE_SCALAR(txDmaState
);
2712 this->txDmaState
= (DmaState
) txDmaState
;
2713 UNSERIALIZE_SCALAR(txKickTick
);
2715 txKickEvent
.schedule(txKickTick
);
2718 * unserialize rx state machine
2721 UNSERIALIZE_SCALAR(rxState
);
2722 this->rxState
= (RxState
) rxState
;
2723 UNSERIALIZE_SCALAR(rxEnable
);
2724 UNSERIALIZE_SCALAR(CRDD
);
2725 UNSERIALIZE_SCALAR(rxPktBytes
);
2726 UNSERIALIZE_SCALAR(rxFragPtr
);
2727 UNSERIALIZE_SCALAR(rxDescCnt
);
2729 UNSERIALIZE_SCALAR(rxDmaState
);
2730 this->rxDmaState
= (DmaState
) rxDmaState
;
2731 UNSERIALIZE_SCALAR(rxKickTick
);
2733 rxKickEvent
.schedule(rxKickTick
);
2736 * Unserialize EEPROM state machine
2739 UNSERIALIZE_SCALAR(eepromState
);
2740 this->eepromState
= (EEPROMState
) eepromState
;
2741 UNSERIALIZE_SCALAR(eepromClk
);
2742 UNSERIALIZE_SCALAR(eepromBitsToRx
);
2743 UNSERIALIZE_SCALAR(eepromOpcode
);
2744 UNSERIALIZE_SCALAR(eepromAddress
);
2745 UNSERIALIZE_SCALAR(eepromData
);
2748 * If there's a pending transmit, reschedule it now
2751 UNSERIALIZE_SCALAR(transmitTick
);
2753 txEvent
.schedule(curTick
+ transmitTick
);
2756 * unserialize receive address filter settings
2758 UNSERIALIZE_SCALAR(rxFilterEnable
);
2759 UNSERIALIZE_SCALAR(acceptBroadcast
);
2760 UNSERIALIZE_SCALAR(acceptMulticast
);
2761 UNSERIALIZE_SCALAR(acceptUnicast
);
2762 UNSERIALIZE_SCALAR(acceptPerfect
);
2763 UNSERIALIZE_SCALAR(acceptArp
);
2764 UNSERIALIZE_SCALAR(multicastHashEnable
);
2767 * Keep track of pending interrupt status.
2769 UNSERIALIZE_SCALAR(intrTick
);
2770 UNSERIALIZE_SCALAR(cpuPendingIntr
);
2772 UNSERIALIZE_SCALAR(intrEventTick
);
2773 if (intrEventTick
) {
2774 intrEvent
= new IntrEvent(this, true);
2775 intrEvent
->schedule(intrEventTick
);
2779 BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt
)
2781 SimObjectParam
<EtherInt
*> peer
;
2782 SimObjectParam
<NSGigE
*> device
;
2784 END_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt
)
2786 BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigEInt
)
2788 INIT_PARAM_DFLT(peer
, "peer interface", NULL
),
2789 INIT_PARAM(device
, "Ethernet device of this interface")
2791 END_INIT_SIM_OBJECT_PARAMS(NSGigEInt
)
2793 CREATE_SIM_OBJECT(NSGigEInt
)
2795 NSGigEInt
*dev_int
= new NSGigEInt(getInstanceName(), device
);
2797 EtherInt
*p
= (EtherInt
*)peer
;
2799 dev_int
->setPeer(p
);
2800 p
->setPeer(dev_int
);
2806 REGISTER_SIM_OBJECT("NSGigEInt", NSGigEInt
)
2809 BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigE
)
2811 SimObjectParam
<System
*> system
;
2812 SimObjectParam
<Platform
*> platform
;
2813 SimObjectParam
<PciConfigData
*> configdata
;
2814 Param
<uint32_t> pci_bus
;
2815 Param
<uint32_t> pci_dev
;
2816 Param
<uint32_t> pci_func
;
2817 Param
<Tick
> pio_latency
;
2818 Param
<Tick
> config_latency
;
2821 Param
<bool> dma_desc_free
;
2822 Param
<bool> dma_data_free
;
2823 Param
<Tick
> dma_read_delay
;
2824 Param
<Tick
> dma_write_delay
;
2825 Param
<Tick
> dma_read_factor
;
2826 Param
<Tick
> dma_write_factor
;
2827 Param
<bool> dma_no_allocate
;
2828 Param
<Tick
> intr_delay
;
2830 Param
<Tick
> rx_delay
;
2831 Param
<Tick
> tx_delay
;
2832 Param
<uint32_t> rx_fifo_size
;
2833 Param
<uint32_t> tx_fifo_size
;
2835 Param
<bool> rx_filter
;
2836 Param
<string
> hardware_address
;
2837 Param
<bool> rx_thread
;
2838 Param
<bool> tx_thread
;
2841 END_DECLARE_SIM_OBJECT_PARAMS(NSGigE
)
2843 BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigE
)
2845 INIT_PARAM(system
, "System pointer"),
2846 INIT_PARAM(platform
, "Platform pointer"),
2847 INIT_PARAM(configdata
, "PCI Config data"),
2848 INIT_PARAM(pci_bus
, "PCI bus ID"),
2849 INIT_PARAM(pci_dev
, "PCI device number"),
2850 INIT_PARAM(pci_func
, "PCI function code"),
2851 INIT_PARAM_DFLT(pio_latency
, "Programmed IO latency in bus cycles", 1),
2852 INIT_PARAM(config_latency
, "Number of cycles for a config read or write"),
2853 INIT_PARAM(clock
, "State machine cycle time"),
2855 INIT_PARAM(dma_desc_free
, "DMA of Descriptors is free"),
2856 INIT_PARAM(dma_data_free
, "DMA of Data is free"),
2857 INIT_PARAM(dma_read_delay
, "fixed delay for dma reads"),
2858 INIT_PARAM(dma_write_delay
, "fixed delay for dma writes"),
2859 INIT_PARAM(dma_read_factor
, "multiplier for dma reads"),
2860 INIT_PARAM(dma_write_factor
, "multiplier for dma writes"),
2861 INIT_PARAM(dma_no_allocate
, "Should DMA reads allocate cache lines"),
2862 INIT_PARAM(intr_delay
, "Interrupt Delay in microseconds"),
2864 INIT_PARAM(rx_delay
, "Receive Delay"),
2865 INIT_PARAM(tx_delay
, "Transmit Delay"),
2866 INIT_PARAM(rx_fifo_size
, "max size in bytes of rxFifo"),
2867 INIT_PARAM(tx_fifo_size
, "max size in bytes of txFifo"),
2869 INIT_PARAM(rx_filter
, "Enable Receive Filter"),
2870 INIT_PARAM(hardware_address
, "Ethernet Hardware Address"),
2871 INIT_PARAM(rx_thread
, ""),
2872 INIT_PARAM(tx_thread
, ""),
2875 END_INIT_SIM_OBJECT_PARAMS(NSGigE
)
2878 CREATE_SIM_OBJECT(NSGigE
)
2880 NSGigE::Params
*params
= new NSGigE::Params
;
2882 params
->name
= getInstanceName();
2883 params
->platform
= platform
;
2884 params
->system
= system
;
2885 params
->configData
= configdata
;
2886 params
->busNum
= pci_bus
;
2887 params
->deviceNum
= pci_dev
;
2888 params
->functionNum
= pci_func
;
2889 params
->pio_delay
= pio_latency
;
2890 params
->config_delay
= config_latency
;
2892 params
->clock
= clock
;
2893 params
->dma_desc_free
= dma_desc_free
;
2894 params
->dma_data_free
= dma_data_free
;
2895 params
->dma_read_delay
= dma_read_delay
;
2896 params
->dma_write_delay
= dma_write_delay
;
2897 params
->dma_read_factor
= dma_read_factor
;
2898 params
->dma_write_factor
= dma_write_factor
;
2899 params
->dma_no_allocate
= dma_no_allocate
;
2900 params
->pio_delay
= pio_latency
;
2901 params
->intr_delay
= intr_delay
;
2903 params
->rx_delay
= rx_delay
;
2904 params
->tx_delay
= tx_delay
;
2905 params
->rx_fifo_size
= rx_fifo_size
;
2906 params
->tx_fifo_size
= tx_fifo_size
;
2908 params
->rx_filter
= rx_filter
;
2909 params
->eaddr
= hardware_address
;
2910 params
->rx_thread
= rx_thread
;
2911 params
->tx_thread
= tx_thread
;
2914 return new NSGigE(params
);
2917 REGISTER_SIM_OBJECT("NSGigE", NSGigE
)