2 * Copyright (c) 2004-2005 The Regents of The University of Michigan
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 * Device module for modelling the National Semiconductor
31 * DP83820 ethernet controller. Does not support priority queueing
37 #include "base/inet.hh"
38 #include "cpu/exec_context.hh"
39 #include "dev/etherlink.hh"
40 #include "dev/ns_gige.hh"
41 #include "dev/pciconfigall.hh"
42 #include "mem/bus/bus.hh"
43 #include "mem/bus/dma_interface.hh"
44 #include "mem/bus/pio_interface.hh"
45 #include "mem/bus/pio_interface_impl.hh"
46 #include "mem/functional/memory_control.hh"
47 #include "mem/functional/physical.hh"
48 #include "sim/builder.hh"
49 #include "sim/debug.hh"
50 #include "sim/host.hh"
51 #include "sim/stats.hh"
52 #include "arch/vtophys.hh"
54 const char *NsRxStateStrings
[] =
65 const char *NsTxStateStrings
[] =
76 const char *NsDmaState
[] =
87 using namespace TheISA
;
89 ///////////////////////////////////////////////////////////////////////
93 NSGigE::NSGigE(Params
*p
)
94 : PciDev(p
), ioEnable(false),
95 txFifo(p
->tx_fifo_size
), rxFifo(p
->rx_fifo_size
),
96 txPacket(0), rxPacket(0), txPacketBufPtr(NULL
), rxPacketBufPtr(NULL
),
97 txXferLen(0), rxXferLen(0), clock(p
->clock
),
98 txState(txIdle
), txEnable(false), CTDD(false),
99 txFragPtr(0), txDescCnt(0), txDmaState(dmaIdle
), rxState(rxIdle
),
100 rxEnable(false), CRDD(false), rxPktBytes(0),
101 rxFragPtr(0), rxDescCnt(0), rxDmaState(dmaIdle
), extstsEnable(false),
102 eepromState(eepromStart
), rxDmaReadEvent(this), rxDmaWriteEvent(this),
103 txDmaReadEvent(this), txDmaWriteEvent(this),
104 dmaDescFree(p
->dma_desc_free
), dmaDataFree(p
->dma_data_free
),
105 txDelay(p
->tx_delay
), rxDelay(p
->rx_delay
),
106 rxKickTick(0), rxKickEvent(this), txKickTick(0), txKickEvent(this),
107 txEvent(this), rxFilterEnable(p
->rx_filter
), acceptBroadcast(false),
108 acceptMulticast(false), acceptUnicast(false),
109 acceptPerfect(false), acceptArp(false), multicastHashEnable(false),
110 physmem(p
->pmem
), intrTick(0), cpuPendingIntr(false),
111 intrEvent(0), interface(0)
114 pioInterface
= newPioInterface(name() + ".pio", p
->hier
,
116 &NSGigE::cacheAccess
);
117 pioLatency
= p
->pio_latency
* p
->pio_bus
->clockRate
;
122 dmaInterface
= new DMAInterface
<Bus
>(name() + ".dma",
127 dmaInterface
= new DMAInterface
<Bus
>(name() + ".dma",
131 } else if (p
->payload_bus
)
132 panic("Must define a header bus if defining a payload bus");
134 intrDelay
= p
->intr_delay
;
135 dmaReadDelay
= p
->dma_read_delay
;
136 dmaWriteDelay
= p
->dma_write_delay
;
137 dmaReadFactor
= p
->dma_read_factor
;
138 dmaWriteFactor
= p
->dma_write_factor
;
141 memcpy(&rom
.perfectMatch
, p
->eaddr
.bytes(), ETH_ADDR_LEN
);
143 memset(&rxDesc32
, 0, sizeof(rxDesc32
));
144 memset(&txDesc32
, 0, sizeof(txDesc32
));
145 memset(&rxDesc64
, 0, sizeof(rxDesc64
));
146 memset(&txDesc64
, 0, sizeof(txDesc64
));
156 .name(name() + ".txBytes")
157 .desc("Bytes Transmitted")
162 .name(name() + ".rxBytes")
163 .desc("Bytes Received")
168 .name(name() + ".txPackets")
169 .desc("Number of Packets Transmitted")
174 .name(name() + ".rxPackets")
175 .desc("Number of Packets Received")
180 .name(name() + ".txIpChecksums")
181 .desc("Number of tx IP Checksums done by device")
187 .name(name() + ".rxIpChecksums")
188 .desc("Number of rx IP Checksums done by device")
194 .name(name() + ".txTcpChecksums")
195 .desc("Number of tx TCP Checksums done by device")
201 .name(name() + ".rxTcpChecksums")
202 .desc("Number of rx TCP Checksums done by device")
208 .name(name() + ".txUdpChecksums")
209 .desc("Number of tx UDP Checksums done by device")
215 .name(name() + ".rxUdpChecksums")
216 .desc("Number of rx UDP Checksums done by device")
222 .name(name() + ".descDMAReads")
223 .desc("Number of descriptors the device read w/ DMA")
228 .name(name() + ".descDMAWrites")
229 .desc("Number of descriptors the device wrote w/ DMA")
234 .name(name() + ".descDmaReadBytes")
235 .desc("number of descriptor bytes read w/ DMA")
240 .name(name() + ".descDmaWriteBytes")
241 .desc("number of descriptor bytes write w/ DMA")
246 .name(name() + ".txBandwidth")
247 .desc("Transmit Bandwidth (bits/s)")
253 .name(name() + ".rxBandwidth")
254 .desc("Receive Bandwidth (bits/s)")
260 .name(name() + ".totBandwidth")
261 .desc("Total Bandwidth (bits/s)")
267 .name(name() + ".totPackets")
268 .desc("Total Packets")
274 .name(name() + ".totBytes")
281 .name(name() + ".totPPS")
282 .desc("Total Tranmission Rate (packets/s)")
288 .name(name() + ".txPPS")
289 .desc("Packet Tranmission Rate (packets/s)")
295 .name(name() + ".rxPPS")
296 .desc("Packet Reception Rate (packets/s)")
302 .name(name() + ".postedSwi")
303 .desc("number of software interrupts posted to CPU")
308 .name(name() + ".totalSwi")
309 .desc("total number of Swi written to ISR")
314 .name(name() + ".coalescedSwi")
315 .desc("average number of Swi's coalesced into each post")
320 .name(name() + ".postedRxIdle")
321 .desc("number of rxIdle interrupts posted to CPU")
326 .name(name() + ".totalRxIdle")
327 .desc("total number of RxIdle written to ISR")
332 .name(name() + ".coalescedRxIdle")
333 .desc("average number of RxIdle's coalesced into each post")
338 .name(name() + ".postedRxOk")
339 .desc("number of RxOk interrupts posted to CPU")
344 .name(name() + ".totalRxOk")
345 .desc("total number of RxOk written to ISR")
350 .name(name() + ".coalescedRxOk")
351 .desc("average number of RxOk's coalesced into each post")
356 .name(name() + ".postedRxDesc")
357 .desc("number of RxDesc interrupts posted to CPU")
362 .name(name() + ".totalRxDesc")
363 .desc("total number of RxDesc written to ISR")
368 .name(name() + ".coalescedRxDesc")
369 .desc("average number of RxDesc's coalesced into each post")
374 .name(name() + ".postedTxOk")
375 .desc("number of TxOk interrupts posted to CPU")
380 .name(name() + ".totalTxOk")
381 .desc("total number of TxOk written to ISR")
386 .name(name() + ".coalescedTxOk")
387 .desc("average number of TxOk's coalesced into each post")
392 .name(name() + ".postedTxIdle")
393 .desc("number of TxIdle interrupts posted to CPU")
398 .name(name() + ".totalTxIdle")
399 .desc("total number of TxIdle written to ISR")
404 .name(name() + ".coalescedTxIdle")
405 .desc("average number of TxIdle's coalesced into each post")
410 .name(name() + ".postedTxDesc")
411 .desc("number of TxDesc interrupts posted to CPU")
416 .name(name() + ".totalTxDesc")
417 .desc("total number of TxDesc written to ISR")
422 .name(name() + ".coalescedTxDesc")
423 .desc("average number of TxDesc's coalesced into each post")
428 .name(name() + ".postedRxOrn")
429 .desc("number of RxOrn posted to CPU")
434 .name(name() + ".totalRxOrn")
435 .desc("total number of RxOrn written to ISR")
440 .name(name() + ".coalescedRxOrn")
441 .desc("average number of RxOrn's coalesced into each post")
446 .name(name() + ".coalescedTotal")
447 .desc("average number of interrupts coalesced into each post")
452 .name(name() + ".postedInterrupts")
453 .desc("number of posts to CPU")
458 .name(name() + ".droppedPackets")
459 .desc("number of packets dropped")
463 coalescedSwi
= totalSwi
/ postedInterrupts
;
464 coalescedRxIdle
= totalRxIdle
/ postedInterrupts
;
465 coalescedRxOk
= totalRxOk
/ postedInterrupts
;
466 coalescedRxDesc
= totalRxDesc
/ postedInterrupts
;
467 coalescedTxOk
= totalTxOk
/ postedInterrupts
;
468 coalescedTxIdle
= totalTxIdle
/ postedInterrupts
;
469 coalescedTxDesc
= totalTxDesc
/ postedInterrupts
;
470 coalescedRxOrn
= totalRxOrn
/ postedInterrupts
;
472 coalescedTotal
= (totalSwi
+ totalRxIdle
+ totalRxOk
+ totalRxDesc
+
473 totalTxOk
+ totalTxIdle
+ totalTxDesc
+
474 totalRxOrn
) / postedInterrupts
;
476 txBandwidth
= txBytes
* Stats::constant(8) / simSeconds
;
477 rxBandwidth
= rxBytes
* Stats::constant(8) / simSeconds
;
478 totBandwidth
= txBandwidth
+ rxBandwidth
;
479 totBytes
= txBytes
+ rxBytes
;
480 totPackets
= txPackets
+ rxPackets
;
482 txPacketRate
= txPackets
/ simSeconds
;
483 rxPacketRate
= rxPackets
/ simSeconds
;
487 * This is to read the PCI general configuration registers
490 NSGigE::readConfig(int offset
, int size
, uint8_t *data
)
492 if (offset
< PCI_DEVICE_SPECIFIC
)
493 PciDev::readConfig(offset
, size
, data
);
495 panic("Device specific PCI config space not implemented!\n");
499 * This is to write to the PCI general configuration registers
502 NSGigE::writeConfig(int offset
, int size
, const uint8_t* data
)
504 if (offset
< PCI_DEVICE_SPECIFIC
)
505 PciDev::writeConfig(offset
, size
, data
);
507 panic("Device specific PCI config space not implemented!\n");
509 // Need to catch writes to BARs to update the PIO interface
511 // seems to work fine without all these PCI settings, but i
512 // put in the IO to double check, an assertion will fail if we
513 // need to properly implement it
515 if (config
.data
[offset
] & PCI_CMD_IOSE
)
521 if (config
.data
[offset
] & PCI_CMD_BME
) {
528 if (config
.data
[offset
] & PCI_CMD_MSE
) {
537 case PCI0_BASE_ADDR0
:
538 if (BARAddrs
[0] != 0) {
540 pioInterface
->addAddrRange(RangeSize(BARAddrs
[0], BARSize
[0]));
542 BARAddrs
[0] &= EV5::PAddrUncachedMask
;
545 case PCI0_BASE_ADDR1
:
546 if (BARAddrs
[1] != 0) {
548 pioInterface
->addAddrRange(RangeSize(BARAddrs
[1], BARSize
[1]));
550 BARAddrs
[1] &= EV5::PAddrUncachedMask
;
557 * This reads the device registers, which are detailed in the NS83820
561 NSGigE::read(MemReqPtr
&req
, uint8_t *data
)
565 //The mask is to give you only the offset into the device register file
566 Addr daddr
= req
->paddr
& 0xfff;
567 DPRINTF(EthernetPIO
, "read da=%#x pa=%#x va=%#x size=%d\n",
568 daddr
, req
->paddr
, req
->vaddr
, req
->size
);
571 // there are some reserved registers, you can see ns_gige_reg.h and
572 // the spec sheet for details
573 if (daddr
> LAST
&& daddr
<= RESERVED
) {
574 panic("Accessing reserved register");
575 } else if (daddr
> RESERVED
&& daddr
<= 0x3FC) {
576 readConfig(daddr
& 0xff, req
->size
, data
);
578 } else if (daddr
>= MIB_START
&& daddr
<= MIB_END
) {
579 // don't implement all the MIB's. hopefully the kernel
580 // doesn't actually DEPEND upon their values
581 // MIB are just hardware stats keepers
582 uint32_t ®
= *(uint32_t *) data
;
585 } else if (daddr
> 0x3FC)
586 panic("Something is messed up!\n");
589 case sizeof(uint32_t):
591 uint32_t ®
= *(uint32_t *)data
;
597 //these are supposed to be cleared on a read
598 reg
&= ~(CR_RXD
| CR_TXD
| CR_TXR
| CR_RXR
);
615 devIntrClear(ISR_ALL
);
670 // see the spec sheet for how RFCR and RFDR work
671 // basically, you write to RFCR to tell the machine
672 // what you want to do next, then you act upon RFDR,
673 // and the device will be prepared b/c of what you
680 rfaddr
= (uint16_t)(regs
.rfcr
& RFCR_RFADDR
);
682 // Read from perfect match ROM octets
684 reg
= rom
.perfectMatch
[1];
686 reg
+= rom
.perfectMatch
[0];
689 reg
= rom
.perfectMatch
[3] << 8;
690 reg
+= rom
.perfectMatch
[2];
693 reg
= rom
.perfectMatch
[5] << 8;
694 reg
+= rom
.perfectMatch
[4];
697 // Read filter hash table
698 if (rfaddr
>= FHASH_ADDR
&&
699 rfaddr
< FHASH_ADDR
+ FHASH_SIZE
) {
701 // Only word-aligned reads supported
703 panic("unaligned read from filter hash table!");
705 reg
= rom
.filterHash
[rfaddr
- FHASH_ADDR
+ 1] << 8;
706 reg
+= rom
.filterHash
[rfaddr
- FHASH_ADDR
];
710 panic("reading RFDR for something other than pattern"
711 " matching or hashing! %#x\n", rfaddr
);
721 reg
&= ~(MIBC_MIBS
| MIBC_ACLR
);
766 if (params()->rx_thread
)
767 reg
|= M5REG_RX_THREAD
;
768 if (params()->tx_thread
)
769 reg
|= M5REG_TX_THREAD
;
775 panic("reading unimplemented register: addr=%#x", daddr
);
778 DPRINTF(EthernetPIO
, "read from %#x: data=%d data=%#x\n",
784 panic("accessing register with invalid size: addr=%#x, size=%d",
792 NSGigE::write(MemReqPtr
&req
, const uint8_t *data
)
796 Addr daddr
= req
->paddr
& 0xfff;
797 DPRINTF(EthernetPIO
, "write da=%#x pa=%#x va=%#x size=%d\n",
798 daddr
, req
->paddr
, req
->vaddr
, req
->size
);
800 if (daddr
> LAST
&& daddr
<= RESERVED
) {
801 panic("Accessing reserved register");
802 } else if (daddr
> RESERVED
&& daddr
<= 0x3FC) {
803 writeConfig(daddr
& 0xff, req
->size
, data
);
805 } else if (daddr
> 0x3FC)
806 panic("Something is messed up!\n");
808 if (req
->size
== sizeof(uint32_t)) {
809 uint32_t reg
= *(uint32_t *)data
;
812 DPRINTF(EthernetPIO
, "write data=%d data=%#x\n", reg
, reg
);
819 } else if (reg
& CR_TXE
) {
822 // the kernel is enabling the transmit machine
823 if (txState
== txIdle
)
829 } else if (reg
& CR_RXE
) {
832 if (rxState
== rxIdle
)
843 devIntrPost(ISR_SWI
);
854 if (reg
& CFGR_LNKSTS
||
857 reg
& CFGR_RESERVED
||
858 reg
& CFGR_T64ADDR
||
859 reg
& CFGR_PCI64_DET
)
861 // First clear all writable bits
862 regs
.config
&= CFGR_LNKSTS
| CFGR_SPDSTS
| CFGR_DUPSTS
|
863 CFGR_RESERVED
| CFGR_T64ADDR
|
865 // Now set the appropriate writable bits
866 regs
.config
|= reg
& ~(CFGR_LNKSTS
| CFGR_SPDSTS
| CFGR_DUPSTS
|
867 CFGR_RESERVED
| CFGR_T64ADDR
|
870 // all these #if 0's are because i don't THINK the kernel needs to
871 // have these implemented. if there is a problem relating to one of
872 // these, you may need to add functionality in.
873 if (reg
& CFGR_TBI_EN
) ;
874 if (reg
& CFGR_MODE_1000
) ;
876 if (reg
& CFGR_AUTO_1000
)
877 panic("CFGR_AUTO_1000 not implemented!\n");
879 if (reg
& CFGR_PINT_DUPSTS
||
880 reg
& CFGR_PINT_LNKSTS
||
881 reg
& CFGR_PINT_SPDSTS
)
884 if (reg
& CFGR_TMRTEST
) ;
885 if (reg
& CFGR_MRM_DIS
) ;
886 if (reg
& CFGR_MWI_DIS
) ;
888 if (reg
& CFGR_T64ADDR
) ;
889 // panic("CFGR_T64ADDR is read only register!\n");
891 if (reg
& CFGR_PCI64_DET
)
892 panic("CFGR_PCI64_DET is read only register!\n");
894 if (reg
& CFGR_DATA64_EN
) ;
895 if (reg
& CFGR_M64ADDR
) ;
896 if (reg
& CFGR_PHY_RST
) ;
897 if (reg
& CFGR_PHY_DIS
) ;
899 if (reg
& CFGR_EXTSTS_EN
)
902 extstsEnable
= false;
904 if (reg
& CFGR_REQALG
) ;
906 if (reg
& CFGR_POW
) ;
907 if (reg
& CFGR_EXD
) ;
908 if (reg
& CFGR_PESEL
) ;
909 if (reg
& CFGR_BROM_DIS
) ;
910 if (reg
& CFGR_EXT_125
) ;
911 if (reg
& CFGR_BEM
) ;
915 // Clear writable bits
916 regs
.mear
&= MEAR_EEDO
;
917 // Set appropriate writable bits
918 regs
.mear
|= reg
& ~MEAR_EEDO
;
920 // FreeBSD uses the EEPROM to read PMATCH (for the MAC address)
921 // even though it could get it through RFDR
922 if (reg
& MEAR_EESEL
) {
923 // Rising edge of clock
924 if (reg
& MEAR_EECLK
&& !eepromClk
)
928 eepromState
= eepromStart
;
929 regs
.mear
&= ~MEAR_EEDI
;
932 eepromClk
= reg
& MEAR_EECLK
;
934 // since phy is completely faked, MEAR_MD* don't matter
935 if (reg
& MEAR_MDIO
) ;
936 if (reg
& MEAR_MDDIR
) ;
937 if (reg
& MEAR_MDC
) ;
941 regs
.ptscr
= reg
& ~(PTSCR_RBIST_RDONLY
);
942 // these control BISTs for various parts of chip - we
943 // don't care or do just fake that the BIST is done
944 if (reg
& PTSCR_RBIST_EN
)
945 regs
.ptscr
|= PTSCR_RBIST_DONE
;
946 if (reg
& PTSCR_EEBIST_EN
)
947 regs
.ptscr
&= ~PTSCR_EEBIST_EN
;
948 if (reg
& PTSCR_EELOAD_EN
)
949 regs
.ptscr
&= ~PTSCR_EELOAD_EN
;
952 case ISR
: /* writing to the ISR has no effect */
953 panic("ISR is a read only register!\n");
966 /* not going to implement real interrupt holdoff */
970 regs
.txdp
= (reg
& 0xFFFFFFFC);
971 assert(txState
== txIdle
);
982 if (reg
& TX_CFG_CSI
) ;
983 if (reg
& TX_CFG_HBI
) ;
984 if (reg
& TX_CFG_MLB
) ;
985 if (reg
& TX_CFG_ATP
) ;
986 if (reg
& TX_CFG_ECRETRY
) {
988 * this could easily be implemented, but considering
989 * the network is just a fake pipe, wouldn't make
994 if (reg
& TX_CFG_BRST_DIS
) ;
998 /* we handle our own DMA, ignore the kernel's exhortations */
999 if (reg
& TX_CFG_MXDMA
) ;
1002 // also, we currently don't care about fill/drain
1003 // thresholds though this may change in the future with
1004 // more realistic networks or a driver which changes it
1005 // according to feedback
1010 // Only write writable bits
1011 regs
.gpior
&= GPIOR_UNUSED
| GPIOR_GP5_IN
| GPIOR_GP4_IN
1012 | GPIOR_GP3_IN
| GPIOR_GP2_IN
| GPIOR_GP1_IN
;
1013 regs
.gpior
|= reg
& ~(GPIOR_UNUSED
| GPIOR_GP5_IN
| GPIOR_GP4_IN
1014 | GPIOR_GP3_IN
| GPIOR_GP2_IN
| GPIOR_GP1_IN
);
1015 /* these just control general purpose i/o pins, don't matter */
1030 if (reg
& RX_CFG_AEP
) ;
1031 if (reg
& RX_CFG_ARP
) ;
1032 if (reg
& RX_CFG_STRIPCRC
) ;
1033 if (reg
& RX_CFG_RX_RD
) ;
1034 if (reg
& RX_CFG_ALP
) ;
1035 if (reg
& RX_CFG_AIRL
) ;
1037 /* we handle our own DMA, ignore what kernel says about it */
1038 if (reg
& RX_CFG_MXDMA
) ;
1040 //also, we currently don't care about fill/drain thresholds
1041 //though this may change in the future with more realistic
1042 //networks or a driver which changes it according to feedback
1043 if (reg
& (RX_CFG_DRTH
| RX_CFG_DRTH0
)) ;
1048 /* there is no priority queueing used in the linux 2.6 driver */
1053 /* not going to implement wake on LAN */
1058 /* not going to implement pause control */
1065 rxFilterEnable
= (reg
& RFCR_RFEN
) ? true : false;
1066 acceptBroadcast
= (reg
& RFCR_AAB
) ? true : false;
1067 acceptMulticast
= (reg
& RFCR_AAM
) ? true : false;
1068 acceptUnicast
= (reg
& RFCR_AAU
) ? true : false;
1069 acceptPerfect
= (reg
& RFCR_APM
) ? true : false;
1070 acceptArp
= (reg
& RFCR_AARP
) ? true : false;
1071 multicastHashEnable
= (reg
& RFCR_MHEN
) ? true : false;
1074 if (reg
& RFCR_APAT
)
1075 panic("RFCR_APAT not implemented!\n");
1077 if (reg
& RFCR_UHEN
)
1078 panic("Unicast hash filtering not used by drivers!\n");
1081 panic("RFCR_ULM not implemented!\n");
1086 rfaddr
= (uint16_t)(regs
.rfcr
& RFCR_RFADDR
);
1089 rom
.perfectMatch
[0] = (uint8_t)reg
;
1090 rom
.perfectMatch
[1] = (uint8_t)(reg
>> 8);
1093 rom
.perfectMatch
[2] = (uint8_t)reg
;
1094 rom
.perfectMatch
[3] = (uint8_t)(reg
>> 8);
1097 rom
.perfectMatch
[4] = (uint8_t)reg
;
1098 rom
.perfectMatch
[5] = (uint8_t)(reg
>> 8);
1102 if (rfaddr
>= FHASH_ADDR
&&
1103 rfaddr
< FHASH_ADDR
+ FHASH_SIZE
) {
1105 // Only word-aligned writes supported
1107 panic("unaligned write to filter hash table!");
1109 rom
.filterHash
[rfaddr
- FHASH_ADDR
] = (uint8_t)reg
;
1110 rom
.filterHash
[rfaddr
- FHASH_ADDR
+ 1]
1111 = (uint8_t)(reg
>> 8);
1114 panic("writing RFDR for something other than pattern matching\
1115 or hashing! %#x\n", rfaddr
);
1123 panic("the driver never uses BRDR, something is wrong!\n");
1126 panic("SRR is read only register!\n");
1129 panic("the driver never uses MIBC, something is wrong!\n");
1140 panic("the driver never uses VDR, something is wrong!\n");
1143 /* not going to implement clockrun stuff */
1149 if (reg
& TBICR_MR_LOOPBACK
)
1150 panic("TBICR_MR_LOOPBACK never used, something wrong!\n");
1152 if (reg
& TBICR_MR_AN_ENABLE
) {
1153 regs
.tanlpar
= regs
.tanar
;
1154 regs
.tbisr
|= (TBISR_MR_AN_COMPLETE
| TBISR_MR_LINK_STATUS
);
1158 if (reg
& TBICR_MR_RESTART_AN
) ;
1164 panic("TBISR is read only register!\n");
1167 // Only write the writable bits
1168 regs
.tanar
&= TANAR_RF1
| TANAR_RF2
| TANAR_UNUSED
;
1169 regs
.tanar
|= reg
& ~(TANAR_RF1
| TANAR_RF2
| TANAR_UNUSED
);
1171 // Pause capability unimplemented
1173 if (reg
& TANAR_PS2
) ;
1174 if (reg
& TANAR_PS1
) ;
1180 panic("this should only be written to by the fake phy!\n");
1183 panic("TANER is read only register!\n");
1190 panic("invalid register access daddr=%#x", daddr
);
1193 panic("Invalid Request Size");
1200 NSGigE::devIntrPost(uint32_t interrupts
)
1202 if (interrupts
& ISR_RESERVE
)
1203 panic("Cannot set a reserved interrupt");
1205 if (interrupts
& ISR_NOIMPL
)
1206 warn("interrupt not implemented %#x\n", interrupts
);
1208 interrupts
&= ISR_IMPL
;
1209 regs
.isr
|= interrupts
;
1211 if (interrupts
& regs
.imr
) {
1212 if (interrupts
& ISR_SWI
) {
1215 if (interrupts
& ISR_RXIDLE
) {
1218 if (interrupts
& ISR_RXOK
) {
1221 if (interrupts
& ISR_RXDESC
) {
1224 if (interrupts
& ISR_TXOK
) {
1227 if (interrupts
& ISR_TXIDLE
) {
1230 if (interrupts
& ISR_TXDESC
) {
1233 if (interrupts
& ISR_RXORN
) {
1238 DPRINTF(EthernetIntr
,
1239 "interrupt written to ISR: intr=%#x isr=%#x imr=%#x\n",
1240 interrupts
, regs
.isr
, regs
.imr
);
1242 if ((regs
.isr
& regs
.imr
)) {
1243 Tick when
= curTick
;
1244 if ((regs
.isr
& regs
.imr
& ISR_NODELAY
) == 0)
1250 /* writing this interrupt counting stats inside this means that this function
1251 is now limited to being used to clear all interrupts upon the kernel
1252 reading isr and servicing. just telling you in case you were thinking
1256 NSGigE::devIntrClear(uint32_t interrupts
)
1258 if (interrupts
& ISR_RESERVE
)
1259 panic("Cannot clear a reserved interrupt");
1261 if (regs
.isr
& regs
.imr
& ISR_SWI
) {
1264 if (regs
.isr
& regs
.imr
& ISR_RXIDLE
) {
1267 if (regs
.isr
& regs
.imr
& ISR_RXOK
) {
1270 if (regs
.isr
& regs
.imr
& ISR_RXDESC
) {
1273 if (regs
.isr
& regs
.imr
& ISR_TXOK
) {
1276 if (regs
.isr
& regs
.imr
& ISR_TXIDLE
) {
1279 if (regs
.isr
& regs
.imr
& ISR_TXDESC
) {
1282 if (regs
.isr
& regs
.imr
& ISR_RXORN
) {
1286 if (regs
.isr
& regs
.imr
& ISR_IMPL
)
1289 interrupts
&= ~ISR_NOIMPL
;
1290 regs
.isr
&= ~interrupts
;
1292 DPRINTF(EthernetIntr
,
1293 "interrupt cleared from ISR: intr=%x isr=%x imr=%x\n",
1294 interrupts
, regs
.isr
, regs
.imr
);
1296 if (!(regs
.isr
& regs
.imr
))
1301 NSGigE::devIntrChangeMask()
1303 DPRINTF(EthernetIntr
, "interrupt mask changed: isr=%x imr=%x masked=%x\n",
1304 regs
.isr
, regs
.imr
, regs
.isr
& regs
.imr
);
1306 if (regs
.isr
& regs
.imr
)
1307 cpuIntrPost(curTick
);
1313 NSGigE::cpuIntrPost(Tick when
)
1315 // If the interrupt you want to post is later than an interrupt
1316 // already scheduled, just let it post in the coming one and don't
1317 // schedule another.
1318 // HOWEVER, must be sure that the scheduled intrTick is in the
1319 // future (this was formerly the source of a bug)
1321 * @todo this warning should be removed and the intrTick code should
1324 assert(when
>= curTick
);
1325 assert(intrTick
>= curTick
|| intrTick
== 0);
1326 if (when
> intrTick
&& intrTick
!= 0) {
1327 DPRINTF(EthernetIntr
, "don't need to schedule event...intrTick=%d\n",
1333 if (intrTick
< curTick
) {
1338 DPRINTF(EthernetIntr
, "going to schedule an interrupt for intrTick=%d\n",
1342 intrEvent
->squash();
1343 intrEvent
= new IntrEvent(this, true);
1344 intrEvent
->schedule(intrTick
);
1348 NSGigE::cpuInterrupt()
1350 assert(intrTick
== curTick
);
1352 // Whether or not there's a pending interrupt, we don't care about
1357 // Don't send an interrupt if there's already one
1358 if (cpuPendingIntr
) {
1359 DPRINTF(EthernetIntr
,
1360 "would send an interrupt now, but there's already pending\n");
1363 cpuPendingIntr
= true;
1365 DPRINTF(EthernetIntr
, "posting interrupt\n");
1371 NSGigE::cpuIntrClear()
1373 if (!cpuPendingIntr
)
1377 intrEvent
->squash();
1383 cpuPendingIntr
= false;
1385 DPRINTF(EthernetIntr
, "clearing interrupt\n");
1390 NSGigE::cpuIntrPending() const
1391 { return cpuPendingIntr
; }
1397 DPRINTF(Ethernet
, "transmit reset\n");
1402 assert(txDescCnt
== 0);
1405 assert(txDmaState
== dmaIdle
);
1411 DPRINTF(Ethernet
, "receive reset\n");
1414 assert(rxPktBytes
== 0);
1417 assert(rxDescCnt
== 0);
1418 assert(rxDmaState
== dmaIdle
);
1426 memset(®s
, 0, sizeof(regs
));
1427 regs
.config
= (CFGR_LNKSTS
| CFGR_TBI_EN
| CFGR_MODE_1000
);
1429 regs
.txcfg
= 0x120; // set drain threshold to 1024 bytes and
1430 // fill threshold to 32 bytes
1431 regs
.rxcfg
= 0x4; // set drain threshold to 16 bytes
1432 regs
.srr
= 0x0103; // set the silicon revision to rev B or 0x103
1433 regs
.mibc
= MIBC_FRZ
;
1434 regs
.vdr
= 0x81; // set the vlan tag type to 802.1q
1435 regs
.tesr
= 0xc000; // TBI capable of both full and half duplex
1436 regs
.brar
= 0xffffffff;
1438 extstsEnable
= false;
1439 acceptBroadcast
= false;
1440 acceptMulticast
= false;
1441 acceptUnicast
= false;
1442 acceptPerfect
= false;
1447 NSGigE::rxDmaReadCopy()
1449 assert(rxDmaState
== dmaReading
);
1451 physmem
->dma_read((uint8_t *)rxDmaData
, rxDmaAddr
, rxDmaLen
);
1452 rxDmaState
= dmaIdle
;
1454 DPRINTF(EthernetDMA
, "rx dma read paddr=%#x len=%d\n",
1455 rxDmaAddr
, rxDmaLen
);
1456 DDUMP(EthernetDMA
, rxDmaData
, rxDmaLen
);
1460 NSGigE::doRxDmaRead()
1462 assert(rxDmaState
== dmaIdle
|| rxDmaState
== dmaReadWaiting
);
1463 rxDmaState
= dmaReading
;
1465 if (dmaInterface
&& !rxDmaFree
) {
1466 if (dmaInterface
->busy())
1467 rxDmaState
= dmaReadWaiting
;
1469 dmaInterface
->doDMA(Read
, rxDmaAddr
, rxDmaLen
, curTick
,
1470 &rxDmaReadEvent
, true);
1474 if (dmaReadDelay
== 0 && dmaReadFactor
== 0) {
1479 Tick factor
= ((rxDmaLen
+ ULL(63)) >> ULL(6)) * dmaReadFactor
;
1480 Tick start
= curTick
+ dmaReadDelay
+ factor
;
1481 rxDmaReadEvent
.schedule(start
);
1486 NSGigE::rxDmaReadDone()
1488 assert(rxDmaState
== dmaReading
);
1491 // If the transmit state machine has a pending DMA, let it go first
1492 if (txDmaState
== dmaReadWaiting
|| txDmaState
== dmaWriteWaiting
)
1499 NSGigE::rxDmaWriteCopy()
1501 assert(rxDmaState
== dmaWriting
);
1503 physmem
->dma_write(rxDmaAddr
, (uint8_t *)rxDmaData
, rxDmaLen
);
1504 rxDmaState
= dmaIdle
;
1506 DPRINTF(EthernetDMA
, "rx dma write paddr=%#x len=%d\n",
1507 rxDmaAddr
, rxDmaLen
);
1508 DDUMP(EthernetDMA
, rxDmaData
, rxDmaLen
);
1512 NSGigE::doRxDmaWrite()
1514 assert(rxDmaState
== dmaIdle
|| rxDmaState
== dmaWriteWaiting
);
1515 rxDmaState
= dmaWriting
;
1517 if (dmaInterface
&& !rxDmaFree
) {
1518 if (dmaInterface
->busy())
1519 rxDmaState
= dmaWriteWaiting
;
1521 dmaInterface
->doDMA(WriteInvalidate
, rxDmaAddr
, rxDmaLen
, curTick
,
1522 &rxDmaWriteEvent
, true);
1526 if (dmaWriteDelay
== 0 && dmaWriteFactor
== 0) {
1531 Tick factor
= ((rxDmaLen
+ ULL(63)) >> ULL(6)) * dmaWriteFactor
;
1532 Tick start
= curTick
+ dmaWriteDelay
+ factor
;
1533 rxDmaWriteEvent
.schedule(start
);
1538 NSGigE::rxDmaWriteDone()
1540 assert(rxDmaState
== dmaWriting
);
1543 // If the transmit state machine has a pending DMA, let it go first
1544 if (txDmaState
== dmaReadWaiting
|| txDmaState
== dmaWriteWaiting
)
1553 bool is64bit
= (bool)(regs
.config
& CFGR_M64ADDR
);
1556 "receive kick rxState=%s (rxBuf.size=%d) %d-bit\n",
1557 NsRxStateStrings
[rxState
], rxFifo
.size(), is64bit
? 64 : 32);
1560 uint32_t &cmdsts
= is64bit
? rxDesc64
.cmdsts
: rxDesc32
.cmdsts
;
1561 uint32_t &extsts
= is64bit
? rxDesc64
.extsts
: rxDesc32
.extsts
;
1565 if (rxKickTick
> curTick
) {
1566 DPRINTF(EthernetSM
, "receive kick exiting, can't run till %d\n",
1572 // Go to the next state machine clock tick.
1573 rxKickTick
= curTick
+ cycles(1);
1576 switch(rxDmaState
) {
1577 case dmaReadWaiting
:
1581 case dmaWriteWaiting
:
1589 link
= is64bit
? (Addr
)rxDesc64
.link
: (Addr
)rxDesc32
.link
;
1590 bufptr
= is64bit
? (Addr
)rxDesc64
.bufptr
: (Addr
)rxDesc32
.bufptr
;
1592 // see state machine from spec for details
1593 // the way this works is, if you finish work on one state and can
1594 // go directly to another, you do that through jumping to the
1595 // label "next". however, if you have intermediate work, like DMA
1596 // so that you can't go to the next state yet, you go to exit and
1597 // exit the loop. however, when the DMA is done it will trigger
1598 // an event and come back to this loop.
1602 DPRINTF(EthernetSM
, "Receive Disabled! Nothing to do.\n");
1607 rxState
= rxDescRefr
;
1609 rxDmaAddr
= regs
.rxdp
& 0x3fffffff;
1611 is64bit
? (void *)&rxDesc64
.link
: (void *)&rxDesc32
.link
;
1612 rxDmaLen
= is64bit
? sizeof(rxDesc64
.link
) : sizeof(rxDesc32
.link
);
1613 rxDmaFree
= dmaDescFree
;
1616 descDmaRdBytes
+= rxDmaLen
;
1621 rxState
= rxDescRead
;
1623 rxDmaAddr
= regs
.rxdp
& 0x3fffffff;
1624 rxDmaData
= is64bit
? (void *)&rxDesc64
: (void *)&rxDesc32
;
1625 rxDmaLen
= is64bit
? sizeof(rxDesc64
) : sizeof(rxDesc32
);
1626 rxDmaFree
= dmaDescFree
;
1629 descDmaRdBytes
+= rxDmaLen
;
1637 if (rxDmaState
!= dmaIdle
)
1640 rxState
= rxAdvance
;
1644 if (rxDmaState
!= dmaIdle
)
1647 DPRINTF(EthernetDesc
, "rxDesc: addr=%08x read descriptor\n",
1648 regs
.rxdp
& 0x3fffffff);
1649 DPRINTF(EthernetDesc
,
1650 "rxDesc: link=%#x bufptr=%#x cmdsts=%08x extsts=%08x\n",
1651 link
, bufptr
, cmdsts
, extsts
);
1653 if (cmdsts
& CMDSTS_OWN
) {
1654 devIntrPost(ISR_RXIDLE
);
1658 rxState
= rxFifoBlock
;
1660 rxDescCnt
= cmdsts
& CMDSTS_LEN_MASK
;
1667 * @todo in reality, we should be able to start processing
1668 * the packet as it arrives, and not have to wait for the
1669 * full packet ot be in the receive fifo.
1674 DPRINTF(EthernetSM
, "****processing receive of new packet****\n");
1676 // If we don't have a packet, grab a new one from the fifo.
1677 rxPacket
= rxFifo
.front();
1678 rxPktBytes
= rxPacket
->length
;
1679 rxPacketBufPtr
= rxPacket
->data
;
1682 if (DTRACE(Ethernet
)) {
1685 DPRINTF(Ethernet
, "ID is %d\n", ip
->id());
1689 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n",
1690 tcp
->sport(), tcp
->dport(), tcp
->seq(),
1697 // sanity check - i think the driver behaves like this
1698 assert(rxDescCnt
>= rxPktBytes
);
1703 // dont' need the && rxDescCnt > 0 if driver sanity check
1705 if (rxPktBytes
> 0) {
1706 rxState
= rxFragWrite
;
1707 // don't need min<>(rxPktBytes,rxDescCnt) if above sanity
1709 rxXferLen
= rxPktBytes
;
1711 rxDmaAddr
= rxFragPtr
& 0x3fffffff;
1712 rxDmaData
= rxPacketBufPtr
;
1713 rxDmaLen
= rxXferLen
;
1714 rxDmaFree
= dmaDataFree
;
1720 rxState
= rxDescWrite
;
1722 //if (rxPktBytes == 0) { /* packet is done */
1723 assert(rxPktBytes
== 0);
1724 DPRINTF(EthernetSM
, "done with receiving packet\n");
1726 cmdsts
|= CMDSTS_OWN
;
1727 cmdsts
&= ~CMDSTS_MORE
;
1728 cmdsts
|= CMDSTS_OK
;
1729 cmdsts
&= 0xffff0000;
1730 cmdsts
+= rxPacket
->length
; //i.e. set CMDSTS_SIZE
1734 * all the driver uses these are for its own stats keeping
1735 * which we don't care about, aren't necessary for
1736 * functionality and doing this would just slow us down.
1737 * if they end up using this in a later version for
1738 * functional purposes, just undef
1740 if (rxFilterEnable
) {
1741 cmdsts
&= ~CMDSTS_DEST_MASK
;
1742 const EthAddr
&dst
= rxFifoFront()->dst();
1744 cmdsts
|= CMDSTS_DEST_SELF
;
1745 if (dst
->multicast())
1746 cmdsts
|= CMDSTS_DEST_MULTI
;
1747 if (dst
->broadcast())
1748 cmdsts
|= CMDSTS_DEST_MASK
;
1753 if (extstsEnable
&& ip
) {
1754 extsts
|= EXTSTS_IPPKT
;
1756 if (cksum(ip
) != 0) {
1757 DPRINTF(EthernetCksum
, "Rx IP Checksum Error\n");
1758 extsts
|= EXTSTS_IPERR
;
1763 extsts
|= EXTSTS_TCPPKT
;
1765 if (cksum(tcp
) != 0) {
1766 DPRINTF(EthernetCksum
, "Rx TCP Checksum Error\n");
1767 extsts
|= EXTSTS_TCPERR
;
1771 extsts
|= EXTSTS_UDPPKT
;
1773 if (cksum(udp
) != 0) {
1774 DPRINTF(EthernetCksum
, "Rx UDP Checksum Error\n");
1775 extsts
|= EXTSTS_UDPERR
;
1782 * the driver seems to always receive into desc buffers
1783 * of size 1514, so you never have a pkt that is split
1784 * into multiple descriptors on the receive side, so
1785 * i don't implement that case, hence the assert above.
1788 DPRINTF(EthernetDesc
,
1789 "rxDesc: addr=%08x writeback cmdsts extsts\n",
1790 regs
.rxdp
& 0x3fffffff);
1791 DPRINTF(EthernetDesc
,
1792 "rxDesc: link=%#x bufptr=%#x cmdsts=%08x extsts=%08x\n",
1793 link
, bufptr
, cmdsts
, extsts
);
1795 rxDmaAddr
= regs
.rxdp
& 0x3fffffff;
1796 rxDmaData
= &cmdsts
;
1798 rxDmaAddr
+= offsetof(ns_desc64
, cmdsts
);
1799 rxDmaLen
= sizeof(rxDesc64
.cmdsts
) + sizeof(rxDesc64
.extsts
);
1801 rxDmaAddr
+= offsetof(ns_desc32
, cmdsts
);
1802 rxDmaLen
= sizeof(rxDesc32
.cmdsts
) + sizeof(rxDesc32
.extsts
);
1804 rxDmaFree
= dmaDescFree
;
1807 descDmaWrBytes
+= rxDmaLen
;
1815 if (rxDmaState
!= dmaIdle
)
1818 rxPacketBufPtr
+= rxXferLen
;
1819 rxFragPtr
+= rxXferLen
;
1820 rxPktBytes
-= rxXferLen
;
1822 rxState
= rxFifoBlock
;
1826 if (rxDmaState
!= dmaIdle
)
1829 assert(cmdsts
& CMDSTS_OWN
);
1831 assert(rxPacket
== 0);
1832 devIntrPost(ISR_RXOK
);
1834 if (cmdsts
& CMDSTS_INTR
)
1835 devIntrPost(ISR_RXDESC
);
1838 DPRINTF(EthernetSM
, "Halting the RX state machine\n");
1842 rxState
= rxAdvance
;
1847 devIntrPost(ISR_RXIDLE
);
1852 if (rxDmaState
!= dmaIdle
)
1854 rxState
= rxDescRead
;
1858 rxDmaAddr
= regs
.rxdp
& 0x3fffffff;
1859 rxDmaData
= is64bit
? (void *)&rxDesc64
: (void *)&rxDesc32
;
1860 rxDmaLen
= is64bit
? sizeof(rxDesc64
) : sizeof(rxDesc32
);
1861 rxDmaFree
= dmaDescFree
;
1869 panic("Invalid rxState!");
1872 DPRINTF(EthernetSM
, "entering next rxState=%s\n",
1873 NsRxStateStrings
[rxState
]);
1878 * @todo do we want to schedule a future kick?
1880 DPRINTF(EthernetSM
, "rx state machine exited rxState=%s\n",
1881 NsRxStateStrings
[rxState
]);
1883 if (clock
&& !rxKickEvent
.scheduled())
1884 rxKickEvent
.schedule(rxKickTick
);
1890 if (txFifo
.empty()) {
1891 DPRINTF(Ethernet
, "nothing to transmit\n");
1895 DPRINTF(Ethernet
, "Attempt Pkt Transmit: txFifo length=%d\n",
1897 if (interface
->sendPacket(txFifo
.front())) {
1899 if (DTRACE(Ethernet
)) {
1900 IpPtr
ip(txFifo
.front());
1902 DPRINTF(Ethernet
, "ID is %d\n", ip
->id());
1906 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n",
1907 tcp
->sport(), tcp
->dport(), tcp
->seq(),
1914 DDUMP(EthernetData
, txFifo
.front()->data
, txFifo
.front()->length
);
1915 txBytes
+= txFifo
.front()->length
;
1918 DPRINTF(Ethernet
, "Successful Xmit! now txFifoAvail is %d\n",
1923 * normally do a writeback of the descriptor here, and ONLY
1924 * after that is done, send this interrupt. but since our
1925 * stuff never actually fails, just do this interrupt here,
1926 * otherwise the code has to stray from this nice format.
1927 * besides, it's functionally the same.
1929 devIntrPost(ISR_TXOK
);
1932 if (!txFifo
.empty() && !txEvent
.scheduled()) {
1933 DPRINTF(Ethernet
, "reschedule transmit\n");
1934 txEvent
.schedule(curTick
+ retryTime
);
1939 NSGigE::txDmaReadCopy()
1941 assert(txDmaState
== dmaReading
);
1943 physmem
->dma_read((uint8_t *)txDmaData
, txDmaAddr
, txDmaLen
);
1944 txDmaState
= dmaIdle
;
1946 DPRINTF(EthernetDMA
, "tx dma read paddr=%#x len=%d\n",
1947 txDmaAddr
, txDmaLen
);
1948 DDUMP(EthernetDMA
, txDmaData
, txDmaLen
);
1952 NSGigE::doTxDmaRead()
1954 assert(txDmaState
== dmaIdle
|| txDmaState
== dmaReadWaiting
);
1955 txDmaState
= dmaReading
;
1957 if (dmaInterface
&& !txDmaFree
) {
1958 if (dmaInterface
->busy())
1959 txDmaState
= dmaReadWaiting
;
1961 dmaInterface
->doDMA(Read
, txDmaAddr
, txDmaLen
, curTick
,
1962 &txDmaReadEvent
, true);
1966 if (dmaReadDelay
== 0 && dmaReadFactor
== 0.0) {
1971 Tick factor
= ((txDmaLen
+ ULL(63)) >> ULL(6)) * dmaReadFactor
;
1972 Tick start
= curTick
+ dmaReadDelay
+ factor
;
1973 txDmaReadEvent
.schedule(start
);
1978 NSGigE::txDmaReadDone()
1980 assert(txDmaState
== dmaReading
);
1983 // If the receive state machine has a pending DMA, let it go first
1984 if (rxDmaState
== dmaReadWaiting
|| rxDmaState
== dmaWriteWaiting
)
1991 NSGigE::txDmaWriteCopy()
1993 assert(txDmaState
== dmaWriting
);
1995 physmem
->dma_write(txDmaAddr
, (uint8_t *)txDmaData
, txDmaLen
);
1996 txDmaState
= dmaIdle
;
1998 DPRINTF(EthernetDMA
, "tx dma write paddr=%#x len=%d\n",
1999 txDmaAddr
, txDmaLen
);
2000 DDUMP(EthernetDMA
, txDmaData
, txDmaLen
);
2004 NSGigE::doTxDmaWrite()
2006 assert(txDmaState
== dmaIdle
|| txDmaState
== dmaWriteWaiting
);
2007 txDmaState
= dmaWriting
;
2009 if (dmaInterface
&& !txDmaFree
) {
2010 if (dmaInterface
->busy())
2011 txDmaState
= dmaWriteWaiting
;
2013 dmaInterface
->doDMA(WriteInvalidate
, txDmaAddr
, txDmaLen
, curTick
,
2014 &txDmaWriteEvent
, true);
2018 if (dmaWriteDelay
== 0 && dmaWriteFactor
== 0.0) {
2023 Tick factor
= ((txDmaLen
+ ULL(63)) >> ULL(6)) * dmaWriteFactor
;
2024 Tick start
= curTick
+ dmaWriteDelay
+ factor
;
2025 txDmaWriteEvent
.schedule(start
);
2030 NSGigE::txDmaWriteDone()
2032 assert(txDmaState
== dmaWriting
);
2035 // If the receive state machine has a pending DMA, let it go first
2036 if (rxDmaState
== dmaReadWaiting
|| rxDmaState
== dmaWriteWaiting
)
2045 bool is64bit
= (bool)(regs
.config
& CFGR_M64ADDR
);
2047 DPRINTF(EthernetSM
, "transmit kick txState=%s %d-bit\n",
2048 NsTxStateStrings
[txState
], is64bit
? 64 : 32);
2051 uint32_t &cmdsts
= is64bit
? txDesc64
.cmdsts
: txDesc32
.cmdsts
;
2052 uint32_t &extsts
= is64bit
? txDesc64
.extsts
: txDesc32
.extsts
;
2056 if (txKickTick
> curTick
) {
2057 DPRINTF(EthernetSM
, "transmit kick exiting, can't run till %d\n",
2062 // Go to the next state machine clock tick.
2063 txKickTick
= curTick
+ cycles(1);
2066 switch(txDmaState
) {
2067 case dmaReadWaiting
:
2071 case dmaWriteWaiting
:
2079 link
= is64bit
? (Addr
)txDesc64
.link
: (Addr
)txDesc32
.link
;
2080 bufptr
= is64bit
? (Addr
)txDesc64
.bufptr
: (Addr
)txDesc32
.bufptr
;
2084 DPRINTF(EthernetSM
, "Transmit disabled. Nothing to do.\n");
2089 txState
= txDescRefr
;
2091 txDmaAddr
= regs
.txdp
& 0x3fffffff;
2093 is64bit
? (void *)&txDesc64
.link
: (void *)&txDesc32
.link
;
2094 txDmaLen
= is64bit
? sizeof(txDesc64
.link
) : sizeof(txDesc32
.link
);
2095 txDmaFree
= dmaDescFree
;
2098 descDmaRdBytes
+= txDmaLen
;
2104 txState
= txDescRead
;
2106 txDmaAddr
= regs
.txdp
& 0x3fffffff;
2107 txDmaData
= is64bit
? (void *)&txDesc64
: (void *)&txDesc32
;
2108 txDmaLen
= is64bit
? sizeof(txDesc64
) : sizeof(txDesc32
);
2109 txDmaFree
= dmaDescFree
;
2112 descDmaRdBytes
+= txDmaLen
;
2120 if (txDmaState
!= dmaIdle
)
2123 txState
= txAdvance
;
2127 if (txDmaState
!= dmaIdle
)
2130 DPRINTF(EthernetDesc
, "txDesc: addr=%08x read descriptor\n",
2131 regs
.txdp
& 0x3fffffff);
2132 DPRINTF(EthernetDesc
,
2133 "txDesc: link=%#x bufptr=%#x cmdsts=%#08x extsts=%#08x\n",
2134 link
, bufptr
, cmdsts
, extsts
);
2136 if (cmdsts
& CMDSTS_OWN
) {
2137 txState
= txFifoBlock
;
2139 txDescCnt
= cmdsts
& CMDSTS_LEN_MASK
;
2141 devIntrPost(ISR_TXIDLE
);
2149 DPRINTF(EthernetSM
, "****starting the tx of a new packet****\n");
2150 txPacket
= new PacketData(16384);
2151 txPacketBufPtr
= txPacket
->data
;
2154 if (txDescCnt
== 0) {
2155 DPRINTF(EthernetSM
, "the txDescCnt == 0, done with descriptor\n");
2156 if (cmdsts
& CMDSTS_MORE
) {
2157 DPRINTF(EthernetSM
, "there are more descriptors to come\n");
2158 txState
= txDescWrite
;
2160 cmdsts
&= ~CMDSTS_OWN
;
2162 txDmaAddr
= regs
.txdp
& 0x3fffffff;
2163 txDmaData
= &cmdsts
;
2165 txDmaAddr
+= offsetof(ns_desc64
, cmdsts
);
2166 txDmaLen
= sizeof(txDesc64
.cmdsts
);
2168 txDmaAddr
+= offsetof(ns_desc32
, cmdsts
);
2169 txDmaLen
= sizeof(txDesc32
.cmdsts
);
2171 txDmaFree
= dmaDescFree
;
2176 } else { /* this packet is totally done */
2177 DPRINTF(EthernetSM
, "This packet is done, let's wrap it up\n");
2178 /* deal with the the packet that just finished */
2179 if ((regs
.vtcr
& VTCR_PPCHK
) && extstsEnable
) {
2181 if (extsts
& EXTSTS_UDPPKT
) {
2184 udp
->sum(cksum(udp
));
2186 } else if (extsts
& EXTSTS_TCPPKT
) {
2189 tcp
->sum(cksum(tcp
));
2192 if (extsts
& EXTSTS_IPPKT
) {
2199 txPacket
->length
= txPacketBufPtr
- txPacket
->data
;
2200 // this is just because the receive can't handle a
2201 // packet bigger want to make sure
2202 if (txPacket
->length
> 1514)
2203 panic("transmit packet too large, %s > 1514\n",
2209 txFifo
.push(txPacket
);
2213 * this following section is not tqo spec, but
2214 * functionally shouldn't be any different. normally,
2215 * the chip will wait til the transmit has occurred
2216 * before writing back the descriptor because it has
2217 * to wait to see that it was successfully transmitted
2218 * to decide whether to set CMDSTS_OK or not.
2219 * however, in the simulator since it is always
2220 * successfully transmitted, and writing it exactly to
2221 * spec would complicate the code, we just do it here
2224 cmdsts
&= ~CMDSTS_OWN
;
2225 cmdsts
|= CMDSTS_OK
;
2227 DPRINTF(EthernetDesc
,
2228 "txDesc writeback: cmdsts=%08x extsts=%08x\n",
2231 txDmaFree
= dmaDescFree
;
2232 txDmaAddr
= regs
.txdp
& 0x3fffffff;
2233 txDmaData
= &cmdsts
;
2235 txDmaAddr
+= offsetof(ns_desc64
, cmdsts
);
2237 sizeof(txDesc64
.cmdsts
) + sizeof(txDesc64
.extsts
);
2239 txDmaAddr
+= offsetof(ns_desc32
, cmdsts
);
2241 sizeof(txDesc32
.cmdsts
) + sizeof(txDesc32
.extsts
);
2245 descDmaWrBytes
+= txDmaLen
;
2251 DPRINTF(EthernetSM
, "halting TX state machine\n");
2255 txState
= txAdvance
;
2261 DPRINTF(EthernetSM
, "this descriptor isn't done yet\n");
2262 if (!txFifo
.full()) {
2263 txState
= txFragRead
;
2266 * The number of bytes transferred is either whatever
2267 * is left in the descriptor (txDescCnt), or if there
2268 * is not enough room in the fifo, just whatever room
2269 * is left in the fifo
2271 txXferLen
= min
<uint32_t>(txDescCnt
, txFifo
.avail());
2273 txDmaAddr
= txFragPtr
& 0x3fffffff;
2274 txDmaData
= txPacketBufPtr
;
2275 txDmaLen
= txXferLen
;
2276 txDmaFree
= dmaDataFree
;
2281 txState
= txFifoBlock
;
2291 if (txDmaState
!= dmaIdle
)
2294 txPacketBufPtr
+= txXferLen
;
2295 txFragPtr
+= txXferLen
;
2296 txDescCnt
-= txXferLen
;
2297 txFifo
.reserve(txXferLen
);
2299 txState
= txFifoBlock
;
2303 if (txDmaState
!= dmaIdle
)
2306 if (cmdsts
& CMDSTS_INTR
)
2307 devIntrPost(ISR_TXDESC
);
2310 DPRINTF(EthernetSM
, "halting TX state machine\n");
2314 txState
= txAdvance
;
2319 devIntrPost(ISR_TXIDLE
);
2323 if (txDmaState
!= dmaIdle
)
2325 txState
= txDescRead
;
2329 txDmaAddr
= link
& 0x3fffffff;
2330 txDmaData
= is64bit
? (void *)&txDesc64
: (void *)&txDesc32
;
2331 txDmaLen
= is64bit
? sizeof(txDesc64
) : sizeof(txDesc32
);
2332 txDmaFree
= dmaDescFree
;
2340 panic("invalid state");
2343 DPRINTF(EthernetSM
, "entering next txState=%s\n",
2344 NsTxStateStrings
[txState
]);
2349 * @todo do we want to schedule a future kick?
2351 DPRINTF(EthernetSM
, "tx state machine exited txState=%s\n",
2352 NsTxStateStrings
[txState
]);
2354 if (clock
&& !txKickEvent
.scheduled())
2355 txKickEvent
.schedule(txKickTick
);
2359 * Advance the EEPROM state machine
2360 * Called on rising edge of EEPROM clock bit in MEAR
2363 NSGigE::eepromKick()
2365 switch (eepromState
) {
2369 // Wait for start bit
2370 if (regs
.mear
& MEAR_EEDI
) {
2371 // Set up to get 2 opcode bits
2372 eepromState
= eepromGetOpcode
;
2378 case eepromGetOpcode
:
2380 eepromOpcode
+= (regs
.mear
& MEAR_EEDI
) ? 1 : 0;
2383 // Done getting opcode
2384 if (eepromBitsToRx
== 0) {
2385 if (eepromOpcode
!= EEPROM_READ
)
2386 panic("only EEPROM reads are implemented!");
2388 // Set up to get address
2389 eepromState
= eepromGetAddress
;
2395 case eepromGetAddress
:
2396 eepromAddress
<<= 1;
2397 eepromAddress
+= (regs
.mear
& MEAR_EEDI
) ? 1 : 0;
2400 // Done getting address
2401 if (eepromBitsToRx
== 0) {
2403 if (eepromAddress
>= EEPROM_SIZE
)
2404 panic("EEPROM read access out of range!");
2406 switch (eepromAddress
) {
2408 case EEPROM_PMATCH2_ADDR
:
2409 eepromData
= rom
.perfectMatch
[5];
2411 eepromData
+= rom
.perfectMatch
[4];
2414 case EEPROM_PMATCH1_ADDR
:
2415 eepromData
= rom
.perfectMatch
[3];
2417 eepromData
+= rom
.perfectMatch
[2];
2420 case EEPROM_PMATCH0_ADDR
:
2421 eepromData
= rom
.perfectMatch
[1];
2423 eepromData
+= rom
.perfectMatch
[0];
2427 panic("FreeBSD driver only uses EEPROM to read PMATCH!");
2429 // Set up to read data
2430 eepromState
= eepromRead
;
2431 eepromBitsToRx
= 16;
2433 // Clear data in bit
2434 regs
.mear
&= ~MEAR_EEDI
;
2439 // Clear Data Out bit
2440 regs
.mear
&= ~MEAR_EEDO
;
2441 // Set bit to value of current EEPROM bit
2442 regs
.mear
|= (eepromData
& 0x8000) ? MEAR_EEDO
: 0x0;
2448 if (eepromBitsToRx
== 0) {
2449 eepromState
= eepromStart
;
2454 panic("invalid EEPROM state");
2460 NSGigE::transferDone()
2462 if (txFifo
.empty()) {
2463 DPRINTF(Ethernet
, "transfer complete: txFifo empty...nothing to do\n");
2467 DPRINTF(Ethernet
, "transfer complete: data in txFifo...schedule xmit\n");
2469 if (txEvent
.scheduled())
2470 txEvent
.reschedule(curTick
+ cycles(1));
2472 txEvent
.schedule(curTick
+ cycles(1));
2476 NSGigE::rxFilter(const PacketPtr
&packet
)
2478 EthPtr eth
= packet
;
2482 const EthAddr
&dst
= eth
->dst();
2483 if (dst
.unicast()) {
2484 // If we're accepting all unicast addresses
2488 // If we make a perfect match
2489 if (acceptPerfect
&& dst
== rom
.perfectMatch
)
2492 if (acceptArp
&& eth
->type() == ETH_TYPE_ARP
)
2495 } else if (dst
.broadcast()) {
2496 // if we're accepting broadcasts
2497 if (acceptBroadcast
)
2500 } else if (dst
.multicast()) {
2501 // if we're accepting all multicasts
2502 if (acceptMulticast
)
2505 // Multicast hashing faked - all packets accepted
2506 if (multicastHashEnable
)
2511 DPRINTF(Ethernet
, "rxFilter drop\n");
2512 DDUMP(EthernetData
, packet
->data
, packet
->length
);
2519 NSGigE::recvPacket(PacketPtr packet
)
2521 rxBytes
+= packet
->length
;
2524 DPRINTF(Ethernet
, "Receiving packet from wire, rxFifoAvail=%d\n",
2528 DPRINTF(Ethernet
, "receive disabled...packet dropped\n");
2532 if (!rxFilterEnable
) {
2534 "receive packet filtering disabled . . . packet dropped\n");
2538 if (rxFilter(packet
)) {
2539 DPRINTF(Ethernet
, "packet filtered...dropped\n");
2543 if (rxFifo
.avail() < packet
->length
) {
2549 "packet won't fit in receive buffer...pkt ID %d dropped\n",
2552 DPRINTF(Ethernet
, "Seq=%d\n", tcp
->seq());
2557 devIntrPost(ISR_RXORN
);
2561 rxFifo
.push(packet
);
2567 //=====================================================================
2571 NSGigE::serialize(ostream
&os
)
2573 // Serialize the PciDev base class
2574 PciDev::serialize(os
);
2577 * Finalize any DMA events now.
2579 if (rxDmaReadEvent
.scheduled())
2581 if (rxDmaWriteEvent
.scheduled())
2583 if (txDmaReadEvent
.scheduled())
2585 if (txDmaWriteEvent
.scheduled())
2589 * Serialize the device registers
2591 SERIALIZE_SCALAR(regs
.command
);
2592 SERIALIZE_SCALAR(regs
.config
);
2593 SERIALIZE_SCALAR(regs
.mear
);
2594 SERIALIZE_SCALAR(regs
.ptscr
);
2595 SERIALIZE_SCALAR(regs
.isr
);
2596 SERIALIZE_SCALAR(regs
.imr
);
2597 SERIALIZE_SCALAR(regs
.ier
);
2598 SERIALIZE_SCALAR(regs
.ihr
);
2599 SERIALIZE_SCALAR(regs
.txdp
);
2600 SERIALIZE_SCALAR(regs
.txdp_hi
);
2601 SERIALIZE_SCALAR(regs
.txcfg
);
2602 SERIALIZE_SCALAR(regs
.gpior
);
2603 SERIALIZE_SCALAR(regs
.rxdp
);
2604 SERIALIZE_SCALAR(regs
.rxdp_hi
);
2605 SERIALIZE_SCALAR(regs
.rxcfg
);
2606 SERIALIZE_SCALAR(regs
.pqcr
);
2607 SERIALIZE_SCALAR(regs
.wcsr
);
2608 SERIALIZE_SCALAR(regs
.pcr
);
2609 SERIALIZE_SCALAR(regs
.rfcr
);
2610 SERIALIZE_SCALAR(regs
.rfdr
);
2611 SERIALIZE_SCALAR(regs
.brar
);
2612 SERIALIZE_SCALAR(regs
.brdr
);
2613 SERIALIZE_SCALAR(regs
.srr
);
2614 SERIALIZE_SCALAR(regs
.mibc
);
2615 SERIALIZE_SCALAR(regs
.vrcr
);
2616 SERIALIZE_SCALAR(regs
.vtcr
);
2617 SERIALIZE_SCALAR(regs
.vdr
);
2618 SERIALIZE_SCALAR(regs
.ccsr
);
2619 SERIALIZE_SCALAR(regs
.tbicr
);
2620 SERIALIZE_SCALAR(regs
.tbisr
);
2621 SERIALIZE_SCALAR(regs
.tanar
);
2622 SERIALIZE_SCALAR(regs
.tanlpar
);
2623 SERIALIZE_SCALAR(regs
.taner
);
2624 SERIALIZE_SCALAR(regs
.tesr
);
2626 SERIALIZE_ARRAY(rom
.perfectMatch
, ETH_ADDR_LEN
);
2627 SERIALIZE_ARRAY(rom
.filterHash
, FHASH_SIZE
);
2629 SERIALIZE_SCALAR(ioEnable
);
2632 * Serialize the data Fifos
2634 rxFifo
.serialize("rxFifo", os
);
2635 txFifo
.serialize("txFifo", os
);
2638 * Serialize the various helper variables
2640 bool txPacketExists
= txPacket
;
2641 SERIALIZE_SCALAR(txPacketExists
);
2642 if (txPacketExists
) {
2643 txPacket
->length
= txPacketBufPtr
- txPacket
->data
;
2644 txPacket
->serialize("txPacket", os
);
2645 uint32_t txPktBufPtr
= (uint32_t) (txPacketBufPtr
- txPacket
->data
);
2646 SERIALIZE_SCALAR(txPktBufPtr
);
2649 bool rxPacketExists
= rxPacket
;
2650 SERIALIZE_SCALAR(rxPacketExists
);
2651 if (rxPacketExists
) {
2652 rxPacket
->serialize("rxPacket", os
);
2653 uint32_t rxPktBufPtr
= (uint32_t) (rxPacketBufPtr
- rxPacket
->data
);
2654 SERIALIZE_SCALAR(rxPktBufPtr
);
2657 SERIALIZE_SCALAR(txXferLen
);
2658 SERIALIZE_SCALAR(rxXferLen
);
2661 * Serialize Cached Descriptors
2663 SERIALIZE_SCALAR(rxDesc64
.link
);
2664 SERIALIZE_SCALAR(rxDesc64
.bufptr
);
2665 SERIALIZE_SCALAR(rxDesc64
.cmdsts
);
2666 SERIALIZE_SCALAR(rxDesc64
.extsts
);
2667 SERIALIZE_SCALAR(txDesc64
.link
);
2668 SERIALIZE_SCALAR(txDesc64
.bufptr
);
2669 SERIALIZE_SCALAR(txDesc64
.cmdsts
);
2670 SERIALIZE_SCALAR(txDesc64
.extsts
);
2671 SERIALIZE_SCALAR(rxDesc32
.link
);
2672 SERIALIZE_SCALAR(rxDesc32
.bufptr
);
2673 SERIALIZE_SCALAR(rxDesc32
.cmdsts
);
2674 SERIALIZE_SCALAR(rxDesc32
.extsts
);
2675 SERIALIZE_SCALAR(txDesc32
.link
);
2676 SERIALIZE_SCALAR(txDesc32
.bufptr
);
2677 SERIALIZE_SCALAR(txDesc32
.cmdsts
);
2678 SERIALIZE_SCALAR(txDesc32
.extsts
);
2679 SERIALIZE_SCALAR(extstsEnable
);
2682 * Serialize tx state machine
2684 int txState
= this->txState
;
2685 SERIALIZE_SCALAR(txState
);
2686 SERIALIZE_SCALAR(txEnable
);
2687 SERIALIZE_SCALAR(CTDD
);
2688 SERIALIZE_SCALAR(txFragPtr
);
2689 SERIALIZE_SCALAR(txDescCnt
);
2690 int txDmaState
= this->txDmaState
;
2691 SERIALIZE_SCALAR(txDmaState
);
2692 SERIALIZE_SCALAR(txKickTick
);
2695 * Serialize rx state machine
2697 int rxState
= this->rxState
;
2698 SERIALIZE_SCALAR(rxState
);
2699 SERIALIZE_SCALAR(rxEnable
);
2700 SERIALIZE_SCALAR(CRDD
);
2701 SERIALIZE_SCALAR(rxPktBytes
);
2702 SERIALIZE_SCALAR(rxFragPtr
);
2703 SERIALIZE_SCALAR(rxDescCnt
);
2704 int rxDmaState
= this->rxDmaState
;
2705 SERIALIZE_SCALAR(rxDmaState
);
2706 SERIALIZE_SCALAR(rxKickTick
);
2709 * Serialize EEPROM state machine
2711 int eepromState
= this->eepromState
;
2712 SERIALIZE_SCALAR(eepromState
);
2713 SERIALIZE_SCALAR(eepromClk
);
2714 SERIALIZE_SCALAR(eepromBitsToRx
);
2715 SERIALIZE_SCALAR(eepromOpcode
);
2716 SERIALIZE_SCALAR(eepromAddress
);
2717 SERIALIZE_SCALAR(eepromData
);
2720 * If there's a pending transmit, store the time so we can
2721 * reschedule it later
2723 Tick transmitTick
= txEvent
.scheduled() ? txEvent
.when() - curTick
: 0;
2724 SERIALIZE_SCALAR(transmitTick
);
2727 * receive address filter settings
2729 SERIALIZE_SCALAR(rxFilterEnable
);
2730 SERIALIZE_SCALAR(acceptBroadcast
);
2731 SERIALIZE_SCALAR(acceptMulticast
);
2732 SERIALIZE_SCALAR(acceptUnicast
);
2733 SERIALIZE_SCALAR(acceptPerfect
);
2734 SERIALIZE_SCALAR(acceptArp
);
2735 SERIALIZE_SCALAR(multicastHashEnable
);
2738 * Keep track of pending interrupt status.
2740 SERIALIZE_SCALAR(intrTick
);
2741 SERIALIZE_SCALAR(cpuPendingIntr
);
2742 Tick intrEventTick
= 0;
2744 intrEventTick
= intrEvent
->when();
2745 SERIALIZE_SCALAR(intrEventTick
);
2750 NSGigE::unserialize(Checkpoint
*cp
, const std::string
§ion
)
2752 // Unserialize the PciDev base class
2753 PciDev::unserialize(cp
, section
);
2755 UNSERIALIZE_SCALAR(regs
.command
);
2756 UNSERIALIZE_SCALAR(regs
.config
);
2757 UNSERIALIZE_SCALAR(regs
.mear
);
2758 UNSERIALIZE_SCALAR(regs
.ptscr
);
2759 UNSERIALIZE_SCALAR(regs
.isr
);
2760 UNSERIALIZE_SCALAR(regs
.imr
);
2761 UNSERIALIZE_SCALAR(regs
.ier
);
2762 UNSERIALIZE_SCALAR(regs
.ihr
);
2763 UNSERIALIZE_SCALAR(regs
.txdp
);
2764 UNSERIALIZE_SCALAR(regs
.txdp_hi
);
2765 UNSERIALIZE_SCALAR(regs
.txcfg
);
2766 UNSERIALIZE_SCALAR(regs
.gpior
);
2767 UNSERIALIZE_SCALAR(regs
.rxdp
);
2768 UNSERIALIZE_SCALAR(regs
.rxdp_hi
);
2769 UNSERIALIZE_SCALAR(regs
.rxcfg
);
2770 UNSERIALIZE_SCALAR(regs
.pqcr
);
2771 UNSERIALIZE_SCALAR(regs
.wcsr
);
2772 UNSERIALIZE_SCALAR(regs
.pcr
);
2773 UNSERIALIZE_SCALAR(regs
.rfcr
);
2774 UNSERIALIZE_SCALAR(regs
.rfdr
);
2775 UNSERIALIZE_SCALAR(regs
.brar
);
2776 UNSERIALIZE_SCALAR(regs
.brdr
);
2777 UNSERIALIZE_SCALAR(regs
.srr
);
2778 UNSERIALIZE_SCALAR(regs
.mibc
);
2779 UNSERIALIZE_SCALAR(regs
.vrcr
);
2780 UNSERIALIZE_SCALAR(regs
.vtcr
);
2781 UNSERIALIZE_SCALAR(regs
.vdr
);
2782 UNSERIALIZE_SCALAR(regs
.ccsr
);
2783 UNSERIALIZE_SCALAR(regs
.tbicr
);
2784 UNSERIALIZE_SCALAR(regs
.tbisr
);
2785 UNSERIALIZE_SCALAR(regs
.tanar
);
2786 UNSERIALIZE_SCALAR(regs
.tanlpar
);
2787 UNSERIALIZE_SCALAR(regs
.taner
);
2788 UNSERIALIZE_SCALAR(regs
.tesr
);
2790 UNSERIALIZE_ARRAY(rom
.perfectMatch
, ETH_ADDR_LEN
);
2791 UNSERIALIZE_ARRAY(rom
.filterHash
, FHASH_SIZE
);
2793 UNSERIALIZE_SCALAR(ioEnable
);
2796 * unserialize the data fifos
2798 rxFifo
.unserialize("rxFifo", cp
, section
);
2799 txFifo
.unserialize("txFifo", cp
, section
);
2802 * unserialize the various helper variables
2804 bool txPacketExists
;
2805 UNSERIALIZE_SCALAR(txPacketExists
);
2806 if (txPacketExists
) {
2807 txPacket
= new PacketData(16384);
2808 txPacket
->unserialize("txPacket", cp
, section
);
2809 uint32_t txPktBufPtr
;
2810 UNSERIALIZE_SCALAR(txPktBufPtr
);
2811 txPacketBufPtr
= (uint8_t *) txPacket
->data
+ txPktBufPtr
;
2815 bool rxPacketExists
;
2816 UNSERIALIZE_SCALAR(rxPacketExists
);
2818 if (rxPacketExists
) {
2819 rxPacket
= new PacketData(16384);
2820 rxPacket
->unserialize("rxPacket", cp
, section
);
2821 uint32_t rxPktBufPtr
;
2822 UNSERIALIZE_SCALAR(rxPktBufPtr
);
2823 rxPacketBufPtr
= (uint8_t *) rxPacket
->data
+ rxPktBufPtr
;
2827 UNSERIALIZE_SCALAR(txXferLen
);
2828 UNSERIALIZE_SCALAR(rxXferLen
);
2831 * Unserialize Cached Descriptors
2833 UNSERIALIZE_SCALAR(rxDesc64
.link
);
2834 UNSERIALIZE_SCALAR(rxDesc64
.bufptr
);
2835 UNSERIALIZE_SCALAR(rxDesc64
.cmdsts
);
2836 UNSERIALIZE_SCALAR(rxDesc64
.extsts
);
2837 UNSERIALIZE_SCALAR(txDesc64
.link
);
2838 UNSERIALIZE_SCALAR(txDesc64
.bufptr
);
2839 UNSERIALIZE_SCALAR(txDesc64
.cmdsts
);
2840 UNSERIALIZE_SCALAR(txDesc64
.extsts
);
2841 UNSERIALIZE_SCALAR(rxDesc32
.link
);
2842 UNSERIALIZE_SCALAR(rxDesc32
.bufptr
);
2843 UNSERIALIZE_SCALAR(rxDesc32
.cmdsts
);
2844 UNSERIALIZE_SCALAR(rxDesc32
.extsts
);
2845 UNSERIALIZE_SCALAR(txDesc32
.link
);
2846 UNSERIALIZE_SCALAR(txDesc32
.bufptr
);
2847 UNSERIALIZE_SCALAR(txDesc32
.cmdsts
);
2848 UNSERIALIZE_SCALAR(txDesc32
.extsts
);
2849 UNSERIALIZE_SCALAR(extstsEnable
);
2852 * unserialize tx state machine
2855 UNSERIALIZE_SCALAR(txState
);
2856 this->txState
= (TxState
) txState
;
2857 UNSERIALIZE_SCALAR(txEnable
);
2858 UNSERIALIZE_SCALAR(CTDD
);
2859 UNSERIALIZE_SCALAR(txFragPtr
);
2860 UNSERIALIZE_SCALAR(txDescCnt
);
2862 UNSERIALIZE_SCALAR(txDmaState
);
2863 this->txDmaState
= (DmaState
) txDmaState
;
2864 UNSERIALIZE_SCALAR(txKickTick
);
2866 txKickEvent
.schedule(txKickTick
);
2869 * unserialize rx state machine
2872 UNSERIALIZE_SCALAR(rxState
);
2873 this->rxState
= (RxState
) rxState
;
2874 UNSERIALIZE_SCALAR(rxEnable
);
2875 UNSERIALIZE_SCALAR(CRDD
);
2876 UNSERIALIZE_SCALAR(rxPktBytes
);
2877 UNSERIALIZE_SCALAR(rxFragPtr
);
2878 UNSERIALIZE_SCALAR(rxDescCnt
);
2880 UNSERIALIZE_SCALAR(rxDmaState
);
2881 this->rxDmaState
= (DmaState
) rxDmaState
;
2882 UNSERIALIZE_SCALAR(rxKickTick
);
2884 rxKickEvent
.schedule(rxKickTick
);
2887 * Unserialize EEPROM state machine
2890 UNSERIALIZE_SCALAR(eepromState
);
2891 this->eepromState
= (EEPROMState
) eepromState
;
2892 UNSERIALIZE_SCALAR(eepromClk
);
2893 UNSERIALIZE_SCALAR(eepromBitsToRx
);
2894 UNSERIALIZE_SCALAR(eepromOpcode
);
2895 UNSERIALIZE_SCALAR(eepromAddress
);
2896 UNSERIALIZE_SCALAR(eepromData
);
2899 * If there's a pending transmit, reschedule it now
2902 UNSERIALIZE_SCALAR(transmitTick
);
2904 txEvent
.schedule(curTick
+ transmitTick
);
2907 * unserialize receive address filter settings
2909 UNSERIALIZE_SCALAR(rxFilterEnable
);
2910 UNSERIALIZE_SCALAR(acceptBroadcast
);
2911 UNSERIALIZE_SCALAR(acceptMulticast
);
2912 UNSERIALIZE_SCALAR(acceptUnicast
);
2913 UNSERIALIZE_SCALAR(acceptPerfect
);
2914 UNSERIALIZE_SCALAR(acceptArp
);
2915 UNSERIALIZE_SCALAR(multicastHashEnable
);
2918 * Keep track of pending interrupt status.
2920 UNSERIALIZE_SCALAR(intrTick
);
2921 UNSERIALIZE_SCALAR(cpuPendingIntr
);
2923 UNSERIALIZE_SCALAR(intrEventTick
);
2924 if (intrEventTick
) {
2925 intrEvent
= new IntrEvent(this, true);
2926 intrEvent
->schedule(intrEventTick
);
2930 * re-add addrRanges to bus bridges
2933 pioInterface
->addAddrRange(RangeSize(BARAddrs
[0], BARSize
[0]));
2934 pioInterface
->addAddrRange(RangeSize(BARAddrs
[1], BARSize
[1]));
2939 NSGigE::cacheAccess(MemReqPtr
&req
)
2941 DPRINTF(EthernetPIO
, "timing access to paddr=%#x (daddr=%#x)\n",
2942 req
->paddr
, req
->paddr
& 0xfff);
2944 return curTick
+ pioLatency
;
2947 BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt
)
2949 SimObjectParam
<EtherInt
*> peer
;
2950 SimObjectParam
<NSGigE
*> device
;
2952 END_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt
)
2954 BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigEInt
)
2956 INIT_PARAM_DFLT(peer
, "peer interface", NULL
),
2957 INIT_PARAM(device
, "Ethernet device of this interface")
2959 END_INIT_SIM_OBJECT_PARAMS(NSGigEInt
)
2961 CREATE_SIM_OBJECT(NSGigEInt
)
2963 NSGigEInt
*dev_int
= new NSGigEInt(getInstanceName(), device
);
2965 EtherInt
*p
= (EtherInt
*)peer
;
2967 dev_int
->setPeer(p
);
2968 p
->setPeer(dev_int
);
2974 REGISTER_SIM_OBJECT("NSGigEInt", NSGigEInt
)
2977 BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigE
)
2982 SimObjectParam
<MemoryController
*> mmu
;
2983 SimObjectParam
<PhysicalMemory
*> physmem
;
2984 SimObjectParam
<PciConfigAll
*> configspace
;
2985 SimObjectParam
<PciConfigData
*> configdata
;
2986 SimObjectParam
<Platform
*> platform
;
2987 Param
<uint32_t> pci_bus
;
2988 Param
<uint32_t> pci_dev
;
2989 Param
<uint32_t> pci_func
;
2991 SimObjectParam
<HierParams
*> hier
;
2992 SimObjectParam
<Bus
*> pio_bus
;
2993 SimObjectParam
<Bus
*> dma_bus
;
2994 SimObjectParam
<Bus
*> payload_bus
;
2995 Param
<bool> dma_desc_free
;
2996 Param
<bool> dma_data_free
;
2997 Param
<Tick
> dma_read_delay
;
2998 Param
<Tick
> dma_write_delay
;
2999 Param
<Tick
> dma_read_factor
;
3000 Param
<Tick
> dma_write_factor
;
3001 Param
<bool> dma_no_allocate
;
3002 Param
<Tick
> pio_latency
;
3003 Param
<Tick
> intr_delay
;
3005 Param
<Tick
> rx_delay
;
3006 Param
<Tick
> tx_delay
;
3007 Param
<uint32_t> rx_fifo_size
;
3008 Param
<uint32_t> tx_fifo_size
;
3010 Param
<bool> rx_filter
;
3011 Param
<string
> hardware_address
;
3012 Param
<bool> rx_thread
;
3013 Param
<bool> tx_thread
;
3016 END_DECLARE_SIM_OBJECT_PARAMS(NSGigE
)
3018 BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigE
)
3020 INIT_PARAM(clock
, "State machine processor frequency"),
3022 INIT_PARAM(addr
, "Device Address"),
3023 INIT_PARAM(mmu
, "Memory Controller"),
3024 INIT_PARAM(physmem
, "Physical Memory"),
3025 INIT_PARAM(configspace
, "PCI Configspace"),
3026 INIT_PARAM(configdata
, "PCI Config data"),
3027 INIT_PARAM(platform
, "Platform"),
3028 INIT_PARAM(pci_bus
, "PCI bus"),
3029 INIT_PARAM(pci_dev
, "PCI device number"),
3030 INIT_PARAM(pci_func
, "PCI function code"),
3032 INIT_PARAM(hier
, "Hierarchy global variables"),
3033 INIT_PARAM(pio_bus
, ""),
3034 INIT_PARAM(dma_bus
, ""),
3035 INIT_PARAM(payload_bus
, "The IO Bus to attach to for payload"),
3036 INIT_PARAM(dma_desc_free
, "DMA of Descriptors is free"),
3037 INIT_PARAM(dma_data_free
, "DMA of Data is free"),
3038 INIT_PARAM(dma_read_delay
, "fixed delay for dma reads"),
3039 INIT_PARAM(dma_write_delay
, "fixed delay for dma writes"),
3040 INIT_PARAM(dma_read_factor
, "multiplier for dma reads"),
3041 INIT_PARAM(dma_write_factor
, "multiplier for dma writes"),
3042 INIT_PARAM(dma_no_allocate
, "Should DMA reads allocate cache lines"),
3043 INIT_PARAM(pio_latency
, "Programmed IO latency in bus cycles"),
3044 INIT_PARAM(intr_delay
, "Interrupt Delay in microseconds"),
3046 INIT_PARAM(rx_delay
, "Receive Delay"),
3047 INIT_PARAM(tx_delay
, "Transmit Delay"),
3048 INIT_PARAM(rx_fifo_size
, "max size in bytes of rxFifo"),
3049 INIT_PARAM(tx_fifo_size
, "max size in bytes of txFifo"),
3051 INIT_PARAM(rx_filter
, "Enable Receive Filter"),
3052 INIT_PARAM(hardware_address
, "Ethernet Hardware Address"),
3053 INIT_PARAM(rx_thread
, ""),
3054 INIT_PARAM(tx_thread
, ""),
3057 END_INIT_SIM_OBJECT_PARAMS(NSGigE
)
3060 CREATE_SIM_OBJECT(NSGigE
)
3062 NSGigE::Params
*params
= new NSGigE::Params
;
3064 params
->name
= getInstanceName();
3066 params
->clock
= clock
;
3069 params
->pmem
= physmem
;
3070 params
->configSpace
= configspace
;
3071 params
->configData
= configdata
;
3072 params
->plat
= platform
;
3073 params
->busNum
= pci_bus
;
3074 params
->deviceNum
= pci_dev
;
3075 params
->functionNum
= pci_func
;
3077 params
->hier
= hier
;
3078 params
->pio_bus
= pio_bus
;
3079 params
->header_bus
= dma_bus
;
3080 params
->payload_bus
= payload_bus
;
3081 params
->dma_desc_free
= dma_desc_free
;
3082 params
->dma_data_free
= dma_data_free
;
3083 params
->dma_read_delay
= dma_read_delay
;
3084 params
->dma_write_delay
= dma_write_delay
;
3085 params
->dma_read_factor
= dma_read_factor
;
3086 params
->dma_write_factor
= dma_write_factor
;
3087 params
->dma_no_allocate
= dma_no_allocate
;
3088 params
->pio_latency
= pio_latency
;
3089 params
->intr_delay
= intr_delay
;
3091 params
->rx_delay
= rx_delay
;
3092 params
->tx_delay
= tx_delay
;
3093 params
->rx_fifo_size
= rx_fifo_size
;
3094 params
->tx_fifo_size
= tx_fifo_size
;
3096 params
->rx_filter
= rx_filter
;
3097 params
->eaddr
= hardware_address
;
3098 params
->rx_thread
= rx_thread
;
3099 params
->tx_thread
= tx_thread
;
3102 return new NSGigE(params
);
3105 REGISTER_SIM_OBJECT("NSGigE", NSGigE
)