9010850ab8a67cc1e09c497d768cbf1540cc0022
2 * Copyright (c) 2004-2005 The Regents of The University of Michigan
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 * Device module for modelling the National Semiconductor
31 * DP83820 ethernet controller. Does not support priority queueing
37 #include "base/inet.hh"
38 #include "cpu/exec_context.hh"
39 #include "dev/etherlink.hh"
40 #include "dev/ns_gige.hh"
41 #include "dev/pciconfigall.hh"
42 #include "mem/bus/bus.hh"
43 #include "mem/bus/dma_interface.hh"
44 #include "mem/bus/pio_interface.hh"
45 #include "mem/bus/pio_interface_impl.hh"
46 #include "mem/functional/memory_control.hh"
47 #include "mem/functional/physical.hh"
48 #include "sim/builder.hh"
49 #include "sim/debug.hh"
50 #include "sim/host.hh"
51 #include "sim/stats.hh"
52 #include "targetarch/vtophys.hh"
54 const char *NsRxStateStrings
[] =
65 const char *NsTxStateStrings
[] =
76 const char *NsDmaState
[] =
88 ///////////////////////////////////////////////////////////////////////
92 NSGigE::NSGigE(Params
*p
)
93 : PciDev(p
), ioEnable(false),
94 txFifo(p
->tx_fifo_size
), rxFifo(p
->rx_fifo_size
),
95 txPacket(0), rxPacket(0), txPacketBufPtr(NULL
), rxPacketBufPtr(NULL
),
96 txXferLen(0), rxXferLen(0), clock(p
->clock
),
97 txState(txIdle
), txEnable(false), CTDD(false),
98 txFragPtr(0), txDescCnt(0), txDmaState(dmaIdle
), rxState(rxIdle
),
99 rxEnable(false), CRDD(false), rxPktBytes(0),
100 rxFragPtr(0), rxDescCnt(0), rxDmaState(dmaIdle
), extstsEnable(false),
101 eepromState(eepromStart
), rxDmaReadEvent(this), rxDmaWriteEvent(this),
102 txDmaReadEvent(this), txDmaWriteEvent(this),
103 dmaDescFree(p
->dma_desc_free
), dmaDataFree(p
->dma_data_free
),
104 txDelay(p
->tx_delay
), rxDelay(p
->rx_delay
),
105 rxKickTick(0), rxKickEvent(this), txKickTick(0), txKickEvent(this),
106 txEvent(this), rxFilterEnable(p
->rx_filter
), acceptBroadcast(false),
107 acceptMulticast(false), acceptUnicast(false),
108 acceptPerfect(false), acceptArp(false), multicastHashEnable(false),
109 physmem(p
->pmem
), intrTick(0), cpuPendingIntr(false),
110 intrEvent(0), interface(0)
113 pioInterface
= newPioInterface(name() + ".pio", p
->hier
,
115 &NSGigE::cacheAccess
);
116 pioLatency
= p
->pio_latency
* p
->pio_bus
->clockRate
;
121 dmaInterface
= new DMAInterface
<Bus
>(name() + ".dma",
126 dmaInterface
= new DMAInterface
<Bus
>(name() + ".dma",
130 } else if (p
->payload_bus
)
131 panic("Must define a header bus if defining a payload bus");
133 pioDelayWrite
= p
->pio_delay_write
&& pioInterface
;
135 intrDelay
= p
->intr_delay
;
136 dmaReadDelay
= p
->dma_read_delay
;
137 dmaWriteDelay
= p
->dma_write_delay
;
138 dmaReadFactor
= p
->dma_read_factor
;
139 dmaWriteFactor
= p
->dma_write_factor
;
142 memcpy(&rom
.perfectMatch
, p
->eaddr
.bytes(), ETH_ADDR_LEN
);
144 memset(&rxDesc32
, 0, sizeof(rxDesc32
));
145 memset(&txDesc32
, 0, sizeof(txDesc32
));
146 memset(&rxDesc64
, 0, sizeof(rxDesc64
));
147 memset(&txDesc64
, 0, sizeof(txDesc64
));
157 .name(name() + ".txBytes")
158 .desc("Bytes Transmitted")
163 .name(name() + ".rxBytes")
164 .desc("Bytes Received")
169 .name(name() + ".txPackets")
170 .desc("Number of Packets Transmitted")
175 .name(name() + ".rxPackets")
176 .desc("Number of Packets Received")
181 .name(name() + ".txIpChecksums")
182 .desc("Number of tx IP Checksums done by device")
188 .name(name() + ".rxIpChecksums")
189 .desc("Number of rx IP Checksums done by device")
195 .name(name() + ".txTcpChecksums")
196 .desc("Number of tx TCP Checksums done by device")
202 .name(name() + ".rxTcpChecksums")
203 .desc("Number of rx TCP Checksums done by device")
209 .name(name() + ".txUdpChecksums")
210 .desc("Number of tx UDP Checksums done by device")
216 .name(name() + ".rxUdpChecksums")
217 .desc("Number of rx UDP Checksums done by device")
223 .name(name() + ".descDMAReads")
224 .desc("Number of descriptors the device read w/ DMA")
229 .name(name() + ".descDMAWrites")
230 .desc("Number of descriptors the device wrote w/ DMA")
235 .name(name() + ".descDmaReadBytes")
236 .desc("number of descriptor bytes read w/ DMA")
241 .name(name() + ".descDmaWriteBytes")
242 .desc("number of descriptor bytes write w/ DMA")
247 .name(name() + ".txBandwidth")
248 .desc("Transmit Bandwidth (bits/s)")
254 .name(name() + ".rxBandwidth")
255 .desc("Receive Bandwidth (bits/s)")
261 .name(name() + ".totBandwidth")
262 .desc("Total Bandwidth (bits/s)")
268 .name(name() + ".totPackets")
269 .desc("Total Packets")
275 .name(name() + ".totBytes")
282 .name(name() + ".totPPS")
283 .desc("Total Tranmission Rate (packets/s)")
289 .name(name() + ".txPPS")
290 .desc("Packet Tranmission Rate (packets/s)")
296 .name(name() + ".rxPPS")
297 .desc("Packet Reception Rate (packets/s)")
303 .name(name() + ".postedSwi")
304 .desc("number of software interrupts posted to CPU")
309 .name(name() + ".totalSwi")
310 .desc("total number of Swi written to ISR")
315 .name(name() + ".coalescedSwi")
316 .desc("average number of Swi's coalesced into each post")
321 .name(name() + ".postedRxIdle")
322 .desc("number of rxIdle interrupts posted to CPU")
327 .name(name() + ".totalRxIdle")
328 .desc("total number of RxIdle written to ISR")
333 .name(name() + ".coalescedRxIdle")
334 .desc("average number of RxIdle's coalesced into each post")
339 .name(name() + ".postedRxOk")
340 .desc("number of RxOk interrupts posted to CPU")
345 .name(name() + ".totalRxOk")
346 .desc("total number of RxOk written to ISR")
351 .name(name() + ".coalescedRxOk")
352 .desc("average number of RxOk's coalesced into each post")
357 .name(name() + ".postedRxDesc")
358 .desc("number of RxDesc interrupts posted to CPU")
363 .name(name() + ".totalRxDesc")
364 .desc("total number of RxDesc written to ISR")
369 .name(name() + ".coalescedRxDesc")
370 .desc("average number of RxDesc's coalesced into each post")
375 .name(name() + ".postedTxOk")
376 .desc("number of TxOk interrupts posted to CPU")
381 .name(name() + ".totalTxOk")
382 .desc("total number of TxOk written to ISR")
387 .name(name() + ".coalescedTxOk")
388 .desc("average number of TxOk's coalesced into each post")
393 .name(name() + ".postedTxIdle")
394 .desc("number of TxIdle interrupts posted to CPU")
399 .name(name() + ".totalTxIdle")
400 .desc("total number of TxIdle written to ISR")
405 .name(name() + ".coalescedTxIdle")
406 .desc("average number of TxIdle's coalesced into each post")
411 .name(name() + ".postedTxDesc")
412 .desc("number of TxDesc interrupts posted to CPU")
417 .name(name() + ".totalTxDesc")
418 .desc("total number of TxDesc written to ISR")
423 .name(name() + ".coalescedTxDesc")
424 .desc("average number of TxDesc's coalesced into each post")
429 .name(name() + ".postedRxOrn")
430 .desc("number of RxOrn posted to CPU")
435 .name(name() + ".totalRxOrn")
436 .desc("total number of RxOrn written to ISR")
441 .name(name() + ".coalescedRxOrn")
442 .desc("average number of RxOrn's coalesced into each post")
447 .name(name() + ".coalescedTotal")
448 .desc("average number of interrupts coalesced into each post")
453 .name(name() + ".postedInterrupts")
454 .desc("number of posts to CPU")
459 .name(name() + ".droppedPackets")
460 .desc("number of packets dropped")
464 coalescedSwi
= totalSwi
/ postedInterrupts
;
465 coalescedRxIdle
= totalRxIdle
/ postedInterrupts
;
466 coalescedRxOk
= totalRxOk
/ postedInterrupts
;
467 coalescedRxDesc
= totalRxDesc
/ postedInterrupts
;
468 coalescedTxOk
= totalTxOk
/ postedInterrupts
;
469 coalescedTxIdle
= totalTxIdle
/ postedInterrupts
;
470 coalescedTxDesc
= totalTxDesc
/ postedInterrupts
;
471 coalescedRxOrn
= totalRxOrn
/ postedInterrupts
;
473 coalescedTotal
= (totalSwi
+ totalRxIdle
+ totalRxOk
+ totalRxDesc
+
474 totalTxOk
+ totalTxIdle
+ totalTxDesc
+
475 totalRxOrn
) / postedInterrupts
;
477 txBandwidth
= txBytes
* Stats::constant(8) / simSeconds
;
478 rxBandwidth
= rxBytes
* Stats::constant(8) / simSeconds
;
479 totBandwidth
= txBandwidth
+ rxBandwidth
;
480 totBytes
= txBytes
+ rxBytes
;
481 totPackets
= txPackets
+ rxPackets
;
483 txPacketRate
= txPackets
/ simSeconds
;
484 rxPacketRate
= rxPackets
/ simSeconds
;
488 * This is to read the PCI general configuration registers
491 NSGigE::readConfig(int offset
, int size
, uint8_t *data
)
493 if (offset
< PCI_DEVICE_SPECIFIC
)
494 PciDev::readConfig(offset
, size
, data
);
496 panic("Device specific PCI config space not implemented!\n");
500 * This is to write to the PCI general configuration registers
503 NSGigE::writeConfig(int offset
, int size
, const uint8_t* data
)
505 if (offset
< PCI_DEVICE_SPECIFIC
)
506 PciDev::writeConfig(offset
, size
, data
);
508 panic("Device specific PCI config space not implemented!\n");
510 // Need to catch writes to BARs to update the PIO interface
512 // seems to work fine without all these PCI settings, but i
513 // put in the IO to double check, an assertion will fail if we
514 // need to properly implement it
516 if (config
.data
[offset
] & PCI_CMD_IOSE
)
522 if (config
.data
[offset
] & PCI_CMD_BME
) {
529 if (config
.data
[offset
] & PCI_CMD_MSE
) {
538 case PCI0_BASE_ADDR0
:
539 if (BARAddrs
[0] != 0) {
541 pioInterface
->addAddrRange(RangeSize(BARAddrs
[0], BARSize
[0]));
543 BARAddrs
[0] &= EV5::PAddrUncachedMask
;
546 case PCI0_BASE_ADDR1
:
547 if (BARAddrs
[1] != 0) {
549 pioInterface
->addAddrRange(RangeSize(BARAddrs
[1], BARSize
[1]));
551 BARAddrs
[1] &= EV5::PAddrUncachedMask
;
558 * This reads the device registers, which are detailed in the NS83820
562 NSGigE::read(MemReqPtr
&req
, uint8_t *data
)
566 //The mask is to give you only the offset into the device register file
567 Addr daddr
= req
->paddr
& 0xfff;
568 DPRINTF(EthernetPIO
, "read da=%#x pa=%#x va=%#x size=%d\n",
569 daddr
, req
->paddr
, req
->vaddr
, req
->size
);
572 // there are some reserved registers, you can see ns_gige_reg.h and
573 // the spec sheet for details
574 if (daddr
> LAST
&& daddr
<= RESERVED
) {
575 panic("Accessing reserved register");
576 } else if (daddr
> RESERVED
&& daddr
<= 0x3FC) {
577 readConfig(daddr
& 0xff, req
->size
, data
);
579 } else if (daddr
>= MIB_START
&& daddr
<= MIB_END
) {
580 // don't implement all the MIB's. hopefully the kernel
581 // doesn't actually DEPEND upon their values
582 // MIB are just hardware stats keepers
583 uint32_t ®
= *(uint32_t *) data
;
586 } else if (daddr
> 0x3FC)
587 panic("Something is messed up!\n");
590 case sizeof(uint32_t):
592 uint32_t ®
= *(uint32_t *)data
;
598 //these are supposed to be cleared on a read
599 reg
&= ~(CR_RXD
| CR_TXD
| CR_TXR
| CR_RXR
);
616 devIntrClear(ISR_ALL
);
671 // see the spec sheet for how RFCR and RFDR work
672 // basically, you write to RFCR to tell the machine
673 // what you want to do next, then you act upon RFDR,
674 // and the device will be prepared b/c of what you
681 rfaddr
= (uint16_t)(regs
.rfcr
& RFCR_RFADDR
);
683 // Read from perfect match ROM octets
685 reg
= rom
.perfectMatch
[1];
687 reg
+= rom
.perfectMatch
[0];
690 reg
= rom
.perfectMatch
[3] << 8;
691 reg
+= rom
.perfectMatch
[2];
694 reg
= rom
.perfectMatch
[5] << 8;
695 reg
+= rom
.perfectMatch
[4];
698 // Read filter hash table
699 if (rfaddr
>= FHASH_ADDR
&&
700 rfaddr
< FHASH_ADDR
+ FHASH_SIZE
) {
702 // Only word-aligned reads supported
704 panic("unaligned read from filter hash table!");
706 reg
= rom
.filterHash
[rfaddr
- FHASH_ADDR
+ 1] << 8;
707 reg
+= rom
.filterHash
[rfaddr
- FHASH_ADDR
];
711 panic("reading RFDR for something other than pattern"
712 " matching or hashing! %#x\n", rfaddr
);
722 reg
&= ~(MIBC_MIBS
| MIBC_ACLR
);
767 if (params()->rx_thread
)
768 reg
|= M5REG_RX_THREAD
;
769 if (params()->tx_thread
)
770 reg
|= M5REG_TX_THREAD
;
774 panic("reading unimplemented register: addr=%#x", daddr
);
777 DPRINTF(EthernetPIO
, "read from %#x: data=%d data=%#x\n",
783 panic("accessing register with invalid size: addr=%#x, size=%d",
791 NSGigE::write(MemReqPtr
&req
, const uint8_t *data
)
795 Addr daddr
= req
->paddr
& 0xfff;
796 DPRINTF(EthernetPIO
, "write da=%#x pa=%#x va=%#x size=%d\n",
797 daddr
, req
->paddr
, req
->vaddr
, req
->size
);
799 if (daddr
> LAST
&& daddr
<= RESERVED
) {
800 panic("Accessing reserved register");
801 } else if (daddr
> RESERVED
&& daddr
<= 0x3FC) {
802 writeConfig(daddr
& 0xff, req
->size
, data
);
804 } else if (daddr
> 0x3FC)
805 panic("Something is messed up!\n");
808 int cpu
= (req
->xc
->regs
.ipr
[TheISA::IPR_PALtemp16
] >> 8) & 0xff;
809 if (cpu
>= writeQueue
.size())
810 writeQueue
.resize(cpu
+ 1);
811 writeQueue
[cpu
].push_back(RegWriteData(daddr
, *(uint32_t *)data
));
814 if (req
->size
== sizeof(uint32_t)) {
815 uint32_t reg
= *(uint32_t *)data
;
818 DPRINTF(EthernetPIO
, "write data=%d data=%#x\n", reg
, reg
);
825 } else if (reg
& CR_TXE
) {
826 if (!pioDelayWrite
) {
829 // the kernel is enabling the transmit machine
830 if (txState
== txIdle
)
837 } else if (reg
& CR_RXE
) {
838 if (!pioDelayWrite
) {
841 if (rxState
== rxIdle
)
853 devIntrPost(ISR_SWI
);
864 if (reg
& CFGR_LNKSTS
||
867 reg
& CFGR_RESERVED
||
868 reg
& CFGR_T64ADDR
||
869 reg
& CFGR_PCI64_DET
)
871 // First clear all writable bits
872 regs
.config
&= CFGR_LNKSTS
| CFGR_SPDSTS
| CFGR_DUPSTS
|
873 CFGR_RESERVED
| CFGR_T64ADDR
|
875 // Now set the appropriate writable bits
876 regs
.config
|= reg
& ~(CFGR_LNKSTS
| CFGR_SPDSTS
| CFGR_DUPSTS
|
877 CFGR_RESERVED
| CFGR_T64ADDR
|
880 // all these #if 0's are because i don't THINK the kernel needs to
881 // have these implemented. if there is a problem relating to one of
882 // these, you may need to add functionality in.
883 if (reg
& CFGR_TBI_EN
) ;
884 if (reg
& CFGR_MODE_1000
) ;
886 if (reg
& CFGR_AUTO_1000
)
887 panic("CFGR_AUTO_1000 not implemented!\n");
889 if (reg
& CFGR_PINT_DUPSTS
||
890 reg
& CFGR_PINT_LNKSTS
||
891 reg
& CFGR_PINT_SPDSTS
)
894 if (reg
& CFGR_TMRTEST
) ;
895 if (reg
& CFGR_MRM_DIS
) ;
896 if (reg
& CFGR_MWI_DIS
) ;
898 if (reg
& CFGR_T64ADDR
) ;
899 // panic("CFGR_T64ADDR is read only register!\n");
901 if (reg
& CFGR_PCI64_DET
)
902 panic("CFGR_PCI64_DET is read only register!\n");
904 if (reg
& CFGR_DATA64_EN
) ;
905 if (reg
& CFGR_M64ADDR
) ;
906 if (reg
& CFGR_PHY_RST
) ;
907 if (reg
& CFGR_PHY_DIS
) ;
909 if (reg
& CFGR_EXTSTS_EN
)
912 extstsEnable
= false;
914 if (reg
& CFGR_REQALG
) ;
916 if (reg
& CFGR_POW
) ;
917 if (reg
& CFGR_EXD
) ;
918 if (reg
& CFGR_PESEL
) ;
919 if (reg
& CFGR_BROM_DIS
) ;
920 if (reg
& CFGR_EXT_125
) ;
921 if (reg
& CFGR_BEM
) ;
925 // Clear writable bits
926 regs
.mear
&= MEAR_EEDO
;
927 // Set appropriate writable bits
928 regs
.mear
|= reg
& ~MEAR_EEDO
;
930 // FreeBSD uses the EEPROM to read PMATCH (for the MAC address)
931 // even though it could get it through RFDR
932 if (reg
& MEAR_EESEL
) {
933 // Rising edge of clock
934 if (reg
& MEAR_EECLK
&& !eepromClk
)
938 eepromState
= eepromStart
;
939 regs
.mear
&= ~MEAR_EEDI
;
942 eepromClk
= reg
& MEAR_EECLK
;
944 // since phy is completely faked, MEAR_MD* don't matter
945 if (reg
& MEAR_MDIO
) ;
946 if (reg
& MEAR_MDDIR
) ;
947 if (reg
& MEAR_MDC
) ;
951 regs
.ptscr
= reg
& ~(PTSCR_RBIST_RDONLY
);
952 // these control BISTs for various parts of chip - we
953 // don't care or do just fake that the BIST is done
954 if (reg
& PTSCR_RBIST_EN
)
955 regs
.ptscr
|= PTSCR_RBIST_DONE
;
956 if (reg
& PTSCR_EEBIST_EN
)
957 regs
.ptscr
&= ~PTSCR_EEBIST_EN
;
958 if (reg
& PTSCR_EELOAD_EN
)
959 regs
.ptscr
&= ~PTSCR_EELOAD_EN
;
962 case ISR
: /* writing to the ISR has no effect */
963 panic("ISR is a read only register!\n");
976 /* not going to implement real interrupt holdoff */
980 regs
.txdp
= (reg
& 0xFFFFFFFC);
981 assert(txState
== txIdle
);
992 if (reg
& TX_CFG_CSI
) ;
993 if (reg
& TX_CFG_HBI
) ;
994 if (reg
& TX_CFG_MLB
) ;
995 if (reg
& TX_CFG_ATP
) ;
996 if (reg
& TX_CFG_ECRETRY
) {
998 * this could easily be implemented, but considering
999 * the network is just a fake pipe, wouldn't make
1004 if (reg
& TX_CFG_BRST_DIS
) ;
1008 /* we handle our own DMA, ignore the kernel's exhortations */
1009 if (reg
& TX_CFG_MXDMA
) ;
1012 // also, we currently don't care about fill/drain
1013 // thresholds though this may change in the future with
1014 // more realistic networks or a driver which changes it
1015 // according to feedback
1020 // Only write writable bits
1021 regs
.gpior
&= GPIOR_UNUSED
| GPIOR_GP5_IN
| GPIOR_GP4_IN
1022 | GPIOR_GP3_IN
| GPIOR_GP2_IN
| GPIOR_GP1_IN
;
1023 regs
.gpior
|= reg
& ~(GPIOR_UNUSED
| GPIOR_GP5_IN
| GPIOR_GP4_IN
1024 | GPIOR_GP3_IN
| GPIOR_GP2_IN
| GPIOR_GP1_IN
);
1025 /* these just control general purpose i/o pins, don't matter */
1040 if (reg
& RX_CFG_AEP
) ;
1041 if (reg
& RX_CFG_ARP
) ;
1042 if (reg
& RX_CFG_STRIPCRC
) ;
1043 if (reg
& RX_CFG_RX_RD
) ;
1044 if (reg
& RX_CFG_ALP
) ;
1045 if (reg
& RX_CFG_AIRL
) ;
1047 /* we handle our own DMA, ignore what kernel says about it */
1048 if (reg
& RX_CFG_MXDMA
) ;
1050 //also, we currently don't care about fill/drain thresholds
1051 //though this may change in the future with more realistic
1052 //networks or a driver which changes it according to feedback
1053 if (reg
& (RX_CFG_DRTH
| RX_CFG_DRTH0
)) ;
1058 /* there is no priority queueing used in the linux 2.6 driver */
1063 /* not going to implement wake on LAN */
1068 /* not going to implement pause control */
1075 rxFilterEnable
= (reg
& RFCR_RFEN
) ? true : false;
1076 acceptBroadcast
= (reg
& RFCR_AAB
) ? true : false;
1077 acceptMulticast
= (reg
& RFCR_AAM
) ? true : false;
1078 acceptUnicast
= (reg
& RFCR_AAU
) ? true : false;
1079 acceptPerfect
= (reg
& RFCR_APM
) ? true : false;
1080 acceptArp
= (reg
& RFCR_AARP
) ? true : false;
1081 multicastHashEnable
= (reg
& RFCR_MHEN
) ? true : false;
1084 if (reg
& RFCR_APAT
)
1085 panic("RFCR_APAT not implemented!\n");
1087 if (reg
& RFCR_UHEN
)
1088 panic("Unicast hash filtering not used by drivers!\n");
1091 panic("RFCR_ULM not implemented!\n");
1096 rfaddr
= (uint16_t)(regs
.rfcr
& RFCR_RFADDR
);
1099 rom
.perfectMatch
[0] = (uint8_t)reg
;
1100 rom
.perfectMatch
[1] = (uint8_t)(reg
>> 8);
1103 rom
.perfectMatch
[2] = (uint8_t)reg
;
1104 rom
.perfectMatch
[3] = (uint8_t)(reg
>> 8);
1107 rom
.perfectMatch
[4] = (uint8_t)reg
;
1108 rom
.perfectMatch
[5] = (uint8_t)(reg
>> 8);
1112 if (rfaddr
>= FHASH_ADDR
&&
1113 rfaddr
< FHASH_ADDR
+ FHASH_SIZE
) {
1115 // Only word-aligned writes supported
1117 panic("unaligned write to filter hash table!");
1119 rom
.filterHash
[rfaddr
- FHASH_ADDR
] = (uint8_t)reg
;
1120 rom
.filterHash
[rfaddr
- FHASH_ADDR
+ 1]
1121 = (uint8_t)(reg
>> 8);
1124 panic("writing RFDR for something other than pattern matching\
1125 or hashing! %#x\n", rfaddr
);
1133 panic("the driver never uses BRDR, something is wrong!\n");
1136 panic("SRR is read only register!\n");
1139 panic("the driver never uses MIBC, something is wrong!\n");
1150 panic("the driver never uses VDR, something is wrong!\n");
1153 /* not going to implement clockrun stuff */
1159 if (reg
& TBICR_MR_LOOPBACK
)
1160 panic("TBICR_MR_LOOPBACK never used, something wrong!\n");
1162 if (reg
& TBICR_MR_AN_ENABLE
) {
1163 regs
.tanlpar
= regs
.tanar
;
1164 regs
.tbisr
|= (TBISR_MR_AN_COMPLETE
| TBISR_MR_LINK_STATUS
);
1168 if (reg
& TBICR_MR_RESTART_AN
) ;
1174 panic("TBISR is read only register!\n");
1177 // Only write the writable bits
1178 regs
.tanar
&= TANAR_RF1
| TANAR_RF2
| TANAR_UNUSED
;
1179 regs
.tanar
|= reg
& ~(TANAR_RF1
| TANAR_RF2
| TANAR_UNUSED
);
1181 // Pause capability unimplemented
1183 if (reg
& TANAR_PS2
) ;
1184 if (reg
& TANAR_PS1
) ;
1190 panic("this should only be written to by the fake phy!\n");
1193 panic("TANER is read only register!\n");
1200 panic("invalid register access daddr=%#x", daddr
);
1203 panic("Invalid Request Size");
1210 NSGigE::devIntrPost(uint32_t interrupts
)
1212 if (interrupts
& ISR_RESERVE
)
1213 panic("Cannot set a reserved interrupt");
1215 if (interrupts
& ISR_NOIMPL
)
1216 warn("interrupt not implemented %#x\n", interrupts
);
1218 interrupts
&= ISR_IMPL
;
1219 regs
.isr
|= interrupts
;
1221 if (interrupts
& regs
.imr
) {
1222 if (interrupts
& ISR_SWI
) {
1225 if (interrupts
& ISR_RXIDLE
) {
1228 if (interrupts
& ISR_RXOK
) {
1231 if (interrupts
& ISR_RXDESC
) {
1234 if (interrupts
& ISR_TXOK
) {
1237 if (interrupts
& ISR_TXIDLE
) {
1240 if (interrupts
& ISR_TXDESC
) {
1243 if (interrupts
& ISR_RXORN
) {
1248 DPRINTF(EthernetIntr
,
1249 "interrupt written to ISR: intr=%#x isr=%#x imr=%#x\n",
1250 interrupts
, regs
.isr
, regs
.imr
);
1252 if ((regs
.isr
& regs
.imr
)) {
1253 Tick when
= curTick
;
1254 if ((regs
.isr
& regs
.imr
& ISR_NODELAY
) == 0)
1260 /* writing this interrupt counting stats inside this means that this function
1261 is now limited to being used to clear all interrupts upon the kernel
1262 reading isr and servicing. just telling you in case you were thinking
1266 NSGigE::devIntrClear(uint32_t interrupts
)
1268 if (interrupts
& ISR_RESERVE
)
1269 panic("Cannot clear a reserved interrupt");
1271 if (regs
.isr
& regs
.imr
& ISR_SWI
) {
1274 if (regs
.isr
& regs
.imr
& ISR_RXIDLE
) {
1277 if (regs
.isr
& regs
.imr
& ISR_RXOK
) {
1280 if (regs
.isr
& regs
.imr
& ISR_RXDESC
) {
1283 if (regs
.isr
& regs
.imr
& ISR_TXOK
) {
1286 if (regs
.isr
& regs
.imr
& ISR_TXIDLE
) {
1289 if (regs
.isr
& regs
.imr
& ISR_TXDESC
) {
1292 if (regs
.isr
& regs
.imr
& ISR_RXORN
) {
1296 if (regs
.isr
& regs
.imr
& ISR_IMPL
)
1299 interrupts
&= ~ISR_NOIMPL
;
1300 regs
.isr
&= ~interrupts
;
1302 DPRINTF(EthernetIntr
,
1303 "interrupt cleared from ISR: intr=%x isr=%x imr=%x\n",
1304 interrupts
, regs
.isr
, regs
.imr
);
1306 if (!(regs
.isr
& regs
.imr
))
1311 NSGigE::devIntrChangeMask()
1313 DPRINTF(EthernetIntr
, "interrupt mask changed: isr=%x imr=%x masked=%x\n",
1314 regs
.isr
, regs
.imr
, regs
.isr
& regs
.imr
);
1316 if (regs
.isr
& regs
.imr
)
1317 cpuIntrPost(curTick
);
1323 NSGigE::cpuIntrPost(Tick when
)
1325 // If the interrupt you want to post is later than an interrupt
1326 // already scheduled, just let it post in the coming one and don't
1327 // schedule another.
1328 // HOWEVER, must be sure that the scheduled intrTick is in the
1329 // future (this was formerly the source of a bug)
1331 * @todo this warning should be removed and the intrTick code should
1334 assert(when
>= curTick
);
1335 assert(intrTick
>= curTick
|| intrTick
== 0);
1336 if (when
> intrTick
&& intrTick
!= 0) {
1337 DPRINTF(EthernetIntr
, "don't need to schedule event...intrTick=%d\n",
1343 if (intrTick
< curTick
) {
1348 DPRINTF(EthernetIntr
, "going to schedule an interrupt for intrTick=%d\n",
1352 intrEvent
->squash();
1353 intrEvent
= new IntrEvent(this, true);
1354 intrEvent
->schedule(intrTick
);
1358 NSGigE::cpuInterrupt()
1360 assert(intrTick
== curTick
);
1362 // Whether or not there's a pending interrupt, we don't care about
1367 // Don't send an interrupt if there's already one
1368 if (cpuPendingIntr
) {
1369 DPRINTF(EthernetIntr
,
1370 "would send an interrupt now, but there's already pending\n");
1373 cpuPendingIntr
= true;
1375 DPRINTF(EthernetIntr
, "posting interrupt\n");
1381 NSGigE::cpuIntrClear()
1383 if (!cpuPendingIntr
)
1387 intrEvent
->squash();
1393 cpuPendingIntr
= false;
1395 DPRINTF(EthernetIntr
, "clearing interrupt\n");
1400 NSGigE::cpuIntrPending() const
1401 { return cpuPendingIntr
; }
1407 DPRINTF(Ethernet
, "transmit reset\n");
1412 assert(txDescCnt
== 0);
1415 assert(txDmaState
== dmaIdle
);
1421 DPRINTF(Ethernet
, "receive reset\n");
1424 assert(rxPktBytes
== 0);
1427 assert(rxDescCnt
== 0);
1428 assert(rxDmaState
== dmaIdle
);
1436 memset(®s
, 0, sizeof(regs
));
1437 regs
.config
= (CFGR_LNKSTS
| CFGR_TBI_EN
| CFGR_MODE_1000
);
1439 regs
.txcfg
= 0x120; // set drain threshold to 1024 bytes and
1440 // fill threshold to 32 bytes
1441 regs
.rxcfg
= 0x4; // set drain threshold to 16 bytes
1442 regs
.srr
= 0x0103; // set the silicon revision to rev B or 0x103
1443 regs
.mibc
= MIBC_FRZ
;
1444 regs
.vdr
= 0x81; // set the vlan tag type to 802.1q
1445 regs
.tesr
= 0xc000; // TBI capable of both full and half duplex
1446 regs
.brar
= 0xffffffff;
1448 extstsEnable
= false;
1449 acceptBroadcast
= false;
1450 acceptMulticast
= false;
1451 acceptUnicast
= false;
1452 acceptPerfect
= false;
1457 NSGigE::rxDmaReadCopy()
1459 assert(rxDmaState
== dmaReading
);
1461 physmem
->dma_read((uint8_t *)rxDmaData
, rxDmaAddr
, rxDmaLen
);
1462 rxDmaState
= dmaIdle
;
1464 DPRINTF(EthernetDMA
, "rx dma read paddr=%#x len=%d\n",
1465 rxDmaAddr
, rxDmaLen
);
1466 DDUMP(EthernetDMA
, rxDmaData
, rxDmaLen
);
1470 NSGigE::doRxDmaRead()
1472 assert(rxDmaState
== dmaIdle
|| rxDmaState
== dmaReadWaiting
);
1473 rxDmaState
= dmaReading
;
1475 if (dmaInterface
&& !rxDmaFree
) {
1476 if (dmaInterface
->busy())
1477 rxDmaState
= dmaReadWaiting
;
1479 dmaInterface
->doDMA(Read
, rxDmaAddr
, rxDmaLen
, curTick
,
1480 &rxDmaReadEvent
, true);
1484 if (dmaReadDelay
== 0 && dmaReadFactor
== 0) {
1489 Tick factor
= ((rxDmaLen
+ ULL(63)) >> ULL(6)) * dmaReadFactor
;
1490 Tick start
= curTick
+ dmaReadDelay
+ factor
;
1491 rxDmaReadEvent
.schedule(start
);
1496 NSGigE::rxDmaReadDone()
1498 assert(rxDmaState
== dmaReading
);
1501 // If the transmit state machine has a pending DMA, let it go first
1502 if (txDmaState
== dmaReadWaiting
|| txDmaState
== dmaWriteWaiting
)
1509 NSGigE::rxDmaWriteCopy()
1511 assert(rxDmaState
== dmaWriting
);
1513 physmem
->dma_write(rxDmaAddr
, (uint8_t *)rxDmaData
, rxDmaLen
);
1514 rxDmaState
= dmaIdle
;
1516 DPRINTF(EthernetDMA
, "rx dma write paddr=%#x len=%d\n",
1517 rxDmaAddr
, rxDmaLen
);
1518 DDUMP(EthernetDMA
, rxDmaData
, rxDmaLen
);
1522 NSGigE::doRxDmaWrite()
1524 assert(rxDmaState
== dmaIdle
|| rxDmaState
== dmaWriteWaiting
);
1525 rxDmaState
= dmaWriting
;
1527 if (dmaInterface
&& !rxDmaFree
) {
1528 if (dmaInterface
->busy())
1529 rxDmaState
= dmaWriteWaiting
;
1531 dmaInterface
->doDMA(WriteInvalidate
, rxDmaAddr
, rxDmaLen
, curTick
,
1532 &rxDmaWriteEvent
, true);
1536 if (dmaWriteDelay
== 0 && dmaWriteFactor
== 0) {
1541 Tick factor
= ((rxDmaLen
+ ULL(63)) >> ULL(6)) * dmaWriteFactor
;
1542 Tick start
= curTick
+ dmaWriteDelay
+ factor
;
1543 rxDmaWriteEvent
.schedule(start
);
1548 NSGigE::rxDmaWriteDone()
1550 assert(rxDmaState
== dmaWriting
);
1553 // If the transmit state machine has a pending DMA, let it go first
1554 if (txDmaState
== dmaReadWaiting
|| txDmaState
== dmaWriteWaiting
)
1563 bool is64bit
= (bool)(regs
.config
& CFGR_M64ADDR
);
1566 "receive kick rxState=%s (rxBuf.size=%d) %d-bit\n",
1567 NsRxStateStrings
[rxState
], rxFifo
.size(), is64bit
? 64 : 32);
1570 uint32_t &cmdsts
= is64bit
? rxDesc64
.cmdsts
: rxDesc32
.cmdsts
;
1571 uint32_t &extsts
= is64bit
? rxDesc64
.extsts
: rxDesc32
.extsts
;
1575 if (rxKickTick
> curTick
) {
1576 DPRINTF(EthernetSM
, "receive kick exiting, can't run till %d\n",
1582 // Go to the next state machine clock tick.
1583 rxKickTick
= curTick
+ cycles(1);
1586 switch(rxDmaState
) {
1587 case dmaReadWaiting
:
1591 case dmaWriteWaiting
:
1599 link
= is64bit
? (Addr
)rxDesc64
.link
: (Addr
)rxDesc32
.link
;
1600 bufptr
= is64bit
? (Addr
)rxDesc64
.bufptr
: (Addr
)rxDesc32
.bufptr
;
1602 // see state machine from spec for details
1603 // the way this works is, if you finish work on one state and can
1604 // go directly to another, you do that through jumping to the
1605 // label "next". however, if you have intermediate work, like DMA
1606 // so that you can't go to the next state yet, you go to exit and
1607 // exit the loop. however, when the DMA is done it will trigger
1608 // an event and come back to this loop.
1612 DPRINTF(EthernetSM
, "Receive Disabled! Nothing to do.\n");
1617 rxState
= rxDescRefr
;
1619 rxDmaAddr
= regs
.rxdp
& 0x3fffffff;
1621 is64bit
? (void *)&rxDesc64
.link
: (void *)&rxDesc32
.link
;
1622 rxDmaLen
= is64bit
? sizeof(rxDesc64
.link
) : sizeof(rxDesc32
.link
);
1623 rxDmaFree
= dmaDescFree
;
1626 descDmaRdBytes
+= rxDmaLen
;
1631 rxState
= rxDescRead
;
1633 rxDmaAddr
= regs
.rxdp
& 0x3fffffff;
1634 rxDmaData
= is64bit
? (void *)&rxDesc64
: (void *)&rxDesc32
;
1635 rxDmaLen
= is64bit
? sizeof(rxDesc64
) : sizeof(rxDesc32
);
1636 rxDmaFree
= dmaDescFree
;
1639 descDmaRdBytes
+= rxDmaLen
;
1647 if (rxDmaState
!= dmaIdle
)
1650 rxState
= rxAdvance
;
1654 if (rxDmaState
!= dmaIdle
)
1657 DPRINTF(EthernetDesc
, "rxDesc: addr=%08x read descriptor\n",
1658 regs
.rxdp
& 0x3fffffff);
1659 DPRINTF(EthernetDesc
,
1660 "rxDesc: link=%#x bufptr=%#x cmdsts=%08x extsts=%08x\n",
1661 link
, bufptr
, cmdsts
, extsts
);
1663 if (cmdsts
& CMDSTS_OWN
) {
1664 devIntrPost(ISR_RXIDLE
);
1668 rxState
= rxFifoBlock
;
1670 rxDescCnt
= cmdsts
& CMDSTS_LEN_MASK
;
1677 * @todo in reality, we should be able to start processing
1678 * the packet as it arrives, and not have to wait for the
1679 * full packet ot be in the receive fifo.
1684 DPRINTF(EthernetSM
, "****processing receive of new packet****\n");
1686 // If we don't have a packet, grab a new one from the fifo.
1687 rxPacket
= rxFifo
.front();
1688 rxPktBytes
= rxPacket
->length
;
1689 rxPacketBufPtr
= rxPacket
->data
;
1692 if (DTRACE(Ethernet
)) {
1695 DPRINTF(Ethernet
, "ID is %d\n", ip
->id());
1699 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n",
1700 tcp
->sport(), tcp
->dport(), tcp
->seq(),
1707 // sanity check - i think the driver behaves like this
1708 assert(rxDescCnt
>= rxPktBytes
);
1713 // dont' need the && rxDescCnt > 0 if driver sanity check
1715 if (rxPktBytes
> 0) {
1716 rxState
= rxFragWrite
;
1717 // don't need min<>(rxPktBytes,rxDescCnt) if above sanity
1719 rxXferLen
= rxPktBytes
;
1721 rxDmaAddr
= rxFragPtr
& 0x3fffffff;
1722 rxDmaData
= rxPacketBufPtr
;
1723 rxDmaLen
= rxXferLen
;
1724 rxDmaFree
= dmaDataFree
;
1730 rxState
= rxDescWrite
;
1732 //if (rxPktBytes == 0) { /* packet is done */
1733 assert(rxPktBytes
== 0);
1734 DPRINTF(EthernetSM
, "done with receiving packet\n");
1736 cmdsts
|= CMDSTS_OWN
;
1737 cmdsts
&= ~CMDSTS_MORE
;
1738 cmdsts
|= CMDSTS_OK
;
1739 cmdsts
&= 0xffff0000;
1740 cmdsts
+= rxPacket
->length
; //i.e. set CMDSTS_SIZE
1744 * all the driver uses these are for its own stats keeping
1745 * which we don't care about, aren't necessary for
1746 * functionality and doing this would just slow us down.
1747 * if they end up using this in a later version for
1748 * functional purposes, just undef
1750 if (rxFilterEnable
) {
1751 cmdsts
&= ~CMDSTS_DEST_MASK
;
1752 const EthAddr
&dst
= rxFifoFront()->dst();
1754 cmdsts
|= CMDSTS_DEST_SELF
;
1755 if (dst
->multicast())
1756 cmdsts
|= CMDSTS_DEST_MULTI
;
1757 if (dst
->broadcast())
1758 cmdsts
|= CMDSTS_DEST_MASK
;
1763 if (extstsEnable
&& ip
) {
1764 extsts
|= EXTSTS_IPPKT
;
1766 if (cksum(ip
) != 0) {
1767 DPRINTF(EthernetCksum
, "Rx IP Checksum Error\n");
1768 extsts
|= EXTSTS_IPERR
;
1773 extsts
|= EXTSTS_TCPPKT
;
1775 if (cksum(tcp
) != 0) {
1776 DPRINTF(EthernetCksum
, "Rx TCP Checksum Error\n");
1777 extsts
|= EXTSTS_TCPERR
;
1781 extsts
|= EXTSTS_UDPPKT
;
1783 if (cksum(udp
) != 0) {
1784 DPRINTF(EthernetCksum
, "Rx UDP Checksum Error\n");
1785 extsts
|= EXTSTS_UDPERR
;
1792 * the driver seems to always receive into desc buffers
1793 * of size 1514, so you never have a pkt that is split
1794 * into multiple descriptors on the receive side, so
1795 * i don't implement that case, hence the assert above.
1798 DPRINTF(EthernetDesc
,
1799 "rxDesc: addr=%08x writeback cmdsts extsts\n",
1800 regs
.rxdp
& 0x3fffffff);
1801 DPRINTF(EthernetDesc
,
1802 "rxDesc: link=%#x bufptr=%#x cmdsts=%08x extsts=%08x\n",
1803 link
, bufptr
, cmdsts
, extsts
);
1805 rxDmaAddr
= regs
.rxdp
& 0x3fffffff;
1806 rxDmaData
= &cmdsts
;
1808 rxDmaAddr
+= offsetof(ns_desc64
, cmdsts
);
1809 rxDmaLen
= sizeof(rxDesc64
.cmdsts
) + sizeof(rxDesc64
.extsts
);
1811 rxDmaAddr
+= offsetof(ns_desc32
, cmdsts
);
1812 rxDmaLen
= sizeof(rxDesc32
.cmdsts
) + sizeof(rxDesc32
.extsts
);
1814 rxDmaFree
= dmaDescFree
;
1817 descDmaWrBytes
+= rxDmaLen
;
1825 if (rxDmaState
!= dmaIdle
)
1828 rxPacketBufPtr
+= rxXferLen
;
1829 rxFragPtr
+= rxXferLen
;
1830 rxPktBytes
-= rxXferLen
;
1832 rxState
= rxFifoBlock
;
1836 if (rxDmaState
!= dmaIdle
)
1839 assert(cmdsts
& CMDSTS_OWN
);
1841 assert(rxPacket
== 0);
1842 devIntrPost(ISR_RXOK
);
1844 if (cmdsts
& CMDSTS_INTR
)
1845 devIntrPost(ISR_RXDESC
);
1848 DPRINTF(EthernetSM
, "Halting the RX state machine\n");
1852 rxState
= rxAdvance
;
1857 devIntrPost(ISR_RXIDLE
);
1862 if (rxDmaState
!= dmaIdle
)
1864 rxState
= rxDescRead
;
1868 rxDmaAddr
= regs
.rxdp
& 0x3fffffff;
1869 rxDmaData
= is64bit
? (void *)&rxDesc64
: (void *)&rxDesc32
;
1870 rxDmaLen
= is64bit
? sizeof(rxDesc64
) : sizeof(rxDesc32
);
1871 rxDmaFree
= dmaDescFree
;
1879 panic("Invalid rxState!");
1882 DPRINTF(EthernetSM
, "entering next rxState=%s\n",
1883 NsRxStateStrings
[rxState
]);
1888 * @todo do we want to schedule a future kick?
1890 DPRINTF(EthernetSM
, "rx state machine exited rxState=%s\n",
1891 NsRxStateStrings
[rxState
]);
1893 if (clock
&& !rxKickEvent
.scheduled())
1894 rxKickEvent
.schedule(rxKickTick
);
1900 if (txFifo
.empty()) {
1901 DPRINTF(Ethernet
, "nothing to transmit\n");
1905 DPRINTF(Ethernet
, "Attempt Pkt Transmit: txFifo length=%d\n",
1907 if (interface
->sendPacket(txFifo
.front())) {
1909 if (DTRACE(Ethernet
)) {
1910 IpPtr
ip(txFifo
.front());
1912 DPRINTF(Ethernet
, "ID is %d\n", ip
->id());
1916 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n",
1917 tcp
->sport(), tcp
->dport(), tcp
->seq(),
1924 DDUMP(EthernetData
, txFifo
.front()->data
, txFifo
.front()->length
);
1925 txBytes
+= txFifo
.front()->length
;
1928 DPRINTF(Ethernet
, "Successful Xmit! now txFifoAvail is %d\n",
1933 * normally do a writeback of the descriptor here, and ONLY
1934 * after that is done, send this interrupt. but since our
1935 * stuff never actually fails, just do this interrupt here,
1936 * otherwise the code has to stray from this nice format.
1937 * besides, it's functionally the same.
1939 devIntrPost(ISR_TXOK
);
1942 if (!txFifo
.empty() && !txEvent
.scheduled()) {
1943 DPRINTF(Ethernet
, "reschedule transmit\n");
1944 txEvent
.schedule(curTick
+ retryTime
);
1949 NSGigE::txDmaReadCopy()
1951 assert(txDmaState
== dmaReading
);
1953 physmem
->dma_read((uint8_t *)txDmaData
, txDmaAddr
, txDmaLen
);
1954 txDmaState
= dmaIdle
;
1956 DPRINTF(EthernetDMA
, "tx dma read paddr=%#x len=%d\n",
1957 txDmaAddr
, txDmaLen
);
1958 DDUMP(EthernetDMA
, txDmaData
, txDmaLen
);
1962 NSGigE::doTxDmaRead()
1964 assert(txDmaState
== dmaIdle
|| txDmaState
== dmaReadWaiting
);
1965 txDmaState
= dmaReading
;
1967 if (dmaInterface
&& !txDmaFree
) {
1968 if (dmaInterface
->busy())
1969 txDmaState
= dmaReadWaiting
;
1971 dmaInterface
->doDMA(Read
, txDmaAddr
, txDmaLen
, curTick
,
1972 &txDmaReadEvent
, true);
1976 if (dmaReadDelay
== 0 && dmaReadFactor
== 0.0) {
1981 Tick factor
= ((txDmaLen
+ ULL(63)) >> ULL(6)) * dmaReadFactor
;
1982 Tick start
= curTick
+ dmaReadDelay
+ factor
;
1983 txDmaReadEvent
.schedule(start
);
1988 NSGigE::txDmaReadDone()
1990 assert(txDmaState
== dmaReading
);
1993 // If the receive state machine has a pending DMA, let it go first
1994 if (rxDmaState
== dmaReadWaiting
|| rxDmaState
== dmaWriteWaiting
)
2001 NSGigE::txDmaWriteCopy()
2003 assert(txDmaState
== dmaWriting
);
2005 physmem
->dma_write(txDmaAddr
, (uint8_t *)txDmaData
, txDmaLen
);
2006 txDmaState
= dmaIdle
;
2008 DPRINTF(EthernetDMA
, "tx dma write paddr=%#x len=%d\n",
2009 txDmaAddr
, txDmaLen
);
2010 DDUMP(EthernetDMA
, txDmaData
, txDmaLen
);
2014 NSGigE::doTxDmaWrite()
2016 assert(txDmaState
== dmaIdle
|| txDmaState
== dmaWriteWaiting
);
2017 txDmaState
= dmaWriting
;
2019 if (dmaInterface
&& !txDmaFree
) {
2020 if (dmaInterface
->busy())
2021 txDmaState
= dmaWriteWaiting
;
2023 dmaInterface
->doDMA(WriteInvalidate
, txDmaAddr
, txDmaLen
, curTick
,
2024 &txDmaWriteEvent
, true);
2028 if (dmaWriteDelay
== 0 && dmaWriteFactor
== 0.0) {
2033 Tick factor
= ((txDmaLen
+ ULL(63)) >> ULL(6)) * dmaWriteFactor
;
2034 Tick start
= curTick
+ dmaWriteDelay
+ factor
;
2035 txDmaWriteEvent
.schedule(start
);
2040 NSGigE::txDmaWriteDone()
2042 assert(txDmaState
== dmaWriting
);
2045 // If the receive state machine has a pending DMA, let it go first
2046 if (rxDmaState
== dmaReadWaiting
|| rxDmaState
== dmaWriteWaiting
)
2055 bool is64bit
= (bool)(regs
.config
& CFGR_M64ADDR
);
2057 DPRINTF(EthernetSM
, "transmit kick txState=%s %d-bit\n",
2058 NsTxStateStrings
[txState
], is64bit
? 64 : 32);
2061 uint32_t &cmdsts
= is64bit
? txDesc64
.cmdsts
: txDesc32
.cmdsts
;
2062 uint32_t &extsts
= is64bit
? txDesc64
.extsts
: txDesc32
.extsts
;
2066 if (txKickTick
> curTick
) {
2067 DPRINTF(EthernetSM
, "transmit kick exiting, can't run till %d\n",
2072 // Go to the next state machine clock tick.
2073 txKickTick
= curTick
+ cycles(1);
2076 switch(txDmaState
) {
2077 case dmaReadWaiting
:
2081 case dmaWriteWaiting
:
2089 link
= is64bit
? (Addr
)txDesc64
.link
: (Addr
)txDesc32
.link
;
2090 bufptr
= is64bit
? (Addr
)txDesc64
.bufptr
: (Addr
)txDesc32
.bufptr
;
2094 DPRINTF(EthernetSM
, "Transmit disabled. Nothing to do.\n");
2099 txState
= txDescRefr
;
2101 txDmaAddr
= regs
.txdp
& 0x3fffffff;
2103 is64bit
? (void *)&txDesc64
.link
: (void *)&txDesc32
.link
;
2104 txDmaLen
= is64bit
? sizeof(txDesc64
.link
) : sizeof(txDesc32
.link
);
2105 txDmaFree
= dmaDescFree
;
2108 descDmaRdBytes
+= txDmaLen
;
2114 txState
= txDescRead
;
2116 txDmaAddr
= regs
.txdp
& 0x3fffffff;
2117 txDmaData
= is64bit
? (void *)&txDesc64
: (void *)&txDesc32
;
2118 txDmaLen
= is64bit
? sizeof(txDesc64
) : sizeof(txDesc32
);
2119 txDmaFree
= dmaDescFree
;
2122 descDmaRdBytes
+= txDmaLen
;
2130 if (txDmaState
!= dmaIdle
)
2133 txState
= txAdvance
;
2137 if (txDmaState
!= dmaIdle
)
2140 DPRINTF(EthernetDesc
, "txDesc: addr=%08x read descriptor\n",
2141 regs
.txdp
& 0x3fffffff);
2142 DPRINTF(EthernetDesc
,
2143 "txDesc: link=%#x bufptr=%#x cmdsts=%#08x extsts=%#08x\n",
2144 link
, bufptr
, cmdsts
, extsts
);
2146 if (cmdsts
& CMDSTS_OWN
) {
2147 txState
= txFifoBlock
;
2149 txDescCnt
= cmdsts
& CMDSTS_LEN_MASK
;
2151 devIntrPost(ISR_TXIDLE
);
2159 DPRINTF(EthernetSM
, "****starting the tx of a new packet****\n");
2160 txPacket
= new PacketData(16384);
2161 txPacketBufPtr
= txPacket
->data
;
2164 if (txDescCnt
== 0) {
2165 DPRINTF(EthernetSM
, "the txDescCnt == 0, done with descriptor\n");
2166 if (cmdsts
& CMDSTS_MORE
) {
2167 DPRINTF(EthernetSM
, "there are more descriptors to come\n");
2168 txState
= txDescWrite
;
2170 cmdsts
&= ~CMDSTS_OWN
;
2172 txDmaAddr
= regs
.txdp
& 0x3fffffff;
2173 txDmaData
= &cmdsts
;
2175 txDmaAddr
+= offsetof(ns_desc64
, cmdsts
);
2176 txDmaLen
= sizeof(txDesc64
.cmdsts
);
2178 txDmaAddr
+= offsetof(ns_desc32
, cmdsts
);
2179 txDmaLen
= sizeof(txDesc32
.cmdsts
);
2181 txDmaFree
= dmaDescFree
;
2186 } else { /* this packet is totally done */
2187 DPRINTF(EthernetSM
, "This packet is done, let's wrap it up\n");
2188 /* deal with the the packet that just finished */
2189 if ((regs
.vtcr
& VTCR_PPCHK
) && extstsEnable
) {
2191 if (extsts
& EXTSTS_UDPPKT
) {
2194 udp
->sum(cksum(udp
));
2196 } else if (extsts
& EXTSTS_TCPPKT
) {
2199 tcp
->sum(cksum(tcp
));
2202 if (extsts
& EXTSTS_IPPKT
) {
2209 txPacket
->length
= txPacketBufPtr
- txPacket
->data
;
2210 // this is just because the receive can't handle a
2211 // packet bigger want to make sure
2212 if (txPacket
->length
> 1514)
2213 panic("transmit packet too large, %s > 1514\n",
2219 txFifo
.push(txPacket
);
2223 * this following section is not tqo spec, but
2224 * functionally shouldn't be any different. normally,
2225 * the chip will wait til the transmit has occurred
2226 * before writing back the descriptor because it has
2227 * to wait to see that it was successfully transmitted
2228 * to decide whether to set CMDSTS_OK or not.
2229 * however, in the simulator since it is always
2230 * successfully transmitted, and writing it exactly to
2231 * spec would complicate the code, we just do it here
2234 cmdsts
&= ~CMDSTS_OWN
;
2235 cmdsts
|= CMDSTS_OK
;
2237 DPRINTF(EthernetDesc
,
2238 "txDesc writeback: cmdsts=%08x extsts=%08x\n",
2241 txDmaFree
= dmaDescFree
;
2242 txDmaAddr
= regs
.txdp
& 0x3fffffff;
2243 txDmaData
= &cmdsts
;
2245 txDmaAddr
+= offsetof(ns_desc64
, cmdsts
);
2247 sizeof(txDesc64
.cmdsts
) + sizeof(txDesc64
.extsts
);
2249 txDmaAddr
+= offsetof(ns_desc32
, cmdsts
);
2251 sizeof(txDesc32
.cmdsts
) + sizeof(txDesc32
.extsts
);
2255 descDmaWrBytes
+= txDmaLen
;
2261 DPRINTF(EthernetSM
, "halting TX state machine\n");
2265 txState
= txAdvance
;
2271 DPRINTF(EthernetSM
, "this descriptor isn't done yet\n");
2272 if (!txFifo
.full()) {
2273 txState
= txFragRead
;
2276 * The number of bytes transferred is either whatever
2277 * is left in the descriptor (txDescCnt), or if there
2278 * is not enough room in the fifo, just whatever room
2279 * is left in the fifo
2281 txXferLen
= min
<uint32_t>(txDescCnt
, txFifo
.avail());
2283 txDmaAddr
= txFragPtr
& 0x3fffffff;
2284 txDmaData
= txPacketBufPtr
;
2285 txDmaLen
= txXferLen
;
2286 txDmaFree
= dmaDataFree
;
2291 txState
= txFifoBlock
;
2301 if (txDmaState
!= dmaIdle
)
2304 txPacketBufPtr
+= txXferLen
;
2305 txFragPtr
+= txXferLen
;
2306 txDescCnt
-= txXferLen
;
2307 txFifo
.reserve(txXferLen
);
2309 txState
= txFifoBlock
;
2313 if (txDmaState
!= dmaIdle
)
2316 if (cmdsts
& CMDSTS_INTR
)
2317 devIntrPost(ISR_TXDESC
);
2320 DPRINTF(EthernetSM
, "halting TX state machine\n");
2324 txState
= txAdvance
;
2329 devIntrPost(ISR_TXIDLE
);
2333 if (txDmaState
!= dmaIdle
)
2335 txState
= txDescRead
;
2339 txDmaAddr
= link
& 0x3fffffff;
2340 txDmaData
= is64bit
? (void *)&txDesc64
: (void *)&txDesc32
;
2341 txDmaLen
= is64bit
? sizeof(txDesc64
) : sizeof(txDesc32
);
2342 txDmaFree
= dmaDescFree
;
2350 panic("invalid state");
2353 DPRINTF(EthernetSM
, "entering next txState=%s\n",
2354 NsTxStateStrings
[txState
]);
2359 * @todo do we want to schedule a future kick?
2361 DPRINTF(EthernetSM
, "tx state machine exited txState=%s\n",
2362 NsTxStateStrings
[txState
]);
2364 if (clock
&& !txKickEvent
.scheduled())
2365 txKickEvent
.schedule(txKickTick
);
2369 * Advance the EEPROM state machine
2370 * Called on rising edge of EEPROM clock bit in MEAR
2373 NSGigE::eepromKick()
2375 switch (eepromState
) {
2379 // Wait for start bit
2380 if (regs
.mear
& MEAR_EEDI
) {
2381 // Set up to get 2 opcode bits
2382 eepromState
= eepromGetOpcode
;
2388 case eepromGetOpcode
:
2390 eepromOpcode
+= (regs
.mear
& MEAR_EEDI
) ? 1 : 0;
2393 // Done getting opcode
2394 if (eepromBitsToRx
== 0) {
2395 if (eepromOpcode
!= EEPROM_READ
)
2396 panic("only EEPROM reads are implemented!");
2398 // Set up to get address
2399 eepromState
= eepromGetAddress
;
2405 case eepromGetAddress
:
2406 eepromAddress
<<= 1;
2407 eepromAddress
+= (regs
.mear
& MEAR_EEDI
) ? 1 : 0;
2410 // Done getting address
2411 if (eepromBitsToRx
== 0) {
2413 if (eepromAddress
>= EEPROM_SIZE
)
2414 panic("EEPROM read access out of range!");
2416 switch (eepromAddress
) {
2418 case EEPROM_PMATCH2_ADDR
:
2419 eepromData
= rom
.perfectMatch
[5];
2421 eepromData
+= rom
.perfectMatch
[4];
2424 case EEPROM_PMATCH1_ADDR
:
2425 eepromData
= rom
.perfectMatch
[3];
2427 eepromData
+= rom
.perfectMatch
[2];
2430 case EEPROM_PMATCH0_ADDR
:
2431 eepromData
= rom
.perfectMatch
[1];
2433 eepromData
+= rom
.perfectMatch
[0];
2437 panic("FreeBSD driver only uses EEPROM to read PMATCH!");
2439 // Set up to read data
2440 eepromState
= eepromRead
;
2441 eepromBitsToRx
= 16;
2443 // Clear data in bit
2444 regs
.mear
&= ~MEAR_EEDI
;
2449 // Clear Data Out bit
2450 regs
.mear
&= ~MEAR_EEDO
;
2451 // Set bit to value of current EEPROM bit
2452 regs
.mear
|= (eepromData
& 0x8000) ? MEAR_EEDO
: 0x0;
2458 if (eepromBitsToRx
== 0) {
2459 eepromState
= eepromStart
;
2464 panic("invalid EEPROM state");
2470 NSGigE::transferDone()
2472 if (txFifo
.empty()) {
2473 DPRINTF(Ethernet
, "transfer complete: txFifo empty...nothing to do\n");
2477 DPRINTF(Ethernet
, "transfer complete: data in txFifo...schedule xmit\n");
2479 if (txEvent
.scheduled())
2480 txEvent
.reschedule(curTick
+ cycles(1));
2482 txEvent
.schedule(curTick
+ cycles(1));
2486 NSGigE::rxFilter(const PacketPtr
&packet
)
2488 EthPtr eth
= packet
;
2492 const EthAddr
&dst
= eth
->dst();
2493 if (dst
.unicast()) {
2494 // If we're accepting all unicast addresses
2498 // If we make a perfect match
2499 if (acceptPerfect
&& dst
== rom
.perfectMatch
)
2502 if (acceptArp
&& eth
->type() == ETH_TYPE_ARP
)
2505 } else if (dst
.broadcast()) {
2506 // if we're accepting broadcasts
2507 if (acceptBroadcast
)
2510 } else if (dst
.multicast()) {
2511 // if we're accepting all multicasts
2512 if (acceptMulticast
)
2515 // Multicast hashing faked - all packets accepted
2516 if (multicastHashEnable
)
2521 DPRINTF(Ethernet
, "rxFilter drop\n");
2522 DDUMP(EthernetData
, packet
->data
, packet
->length
);
2529 NSGigE::recvPacket(PacketPtr packet
)
2531 rxBytes
+= packet
->length
;
2534 DPRINTF(Ethernet
, "Receiving packet from wire, rxFifoAvail=%d\n",
2538 DPRINTF(Ethernet
, "receive disabled...packet dropped\n");
2542 if (!rxFilterEnable
) {
2544 "receive packet filtering disabled . . . packet dropped\n");
2548 if (rxFilter(packet
)) {
2549 DPRINTF(Ethernet
, "packet filtered...dropped\n");
2553 if (rxFifo
.avail() < packet
->length
) {
2559 "packet won't fit in receive buffer...pkt ID %d dropped\n",
2562 DPRINTF(Ethernet
, "Seq=%d\n", tcp
->seq());
2567 devIntrPost(ISR_RXORN
);
2571 rxFifo
.push(packet
);
2577 //=====================================================================
2581 NSGigE::serialize(ostream
&os
)
2583 // Serialize the PciDev base class
2584 PciDev::serialize(os
);
2587 * Finalize any DMA events now.
2589 if (rxDmaReadEvent
.scheduled())
2591 if (rxDmaWriteEvent
.scheduled())
2593 if (txDmaReadEvent
.scheduled())
2595 if (txDmaWriteEvent
.scheduled())
2599 * Serialize the device registers
2601 SERIALIZE_SCALAR(regs
.command
);
2602 SERIALIZE_SCALAR(regs
.config
);
2603 SERIALIZE_SCALAR(regs
.mear
);
2604 SERIALIZE_SCALAR(regs
.ptscr
);
2605 SERIALIZE_SCALAR(regs
.isr
);
2606 SERIALIZE_SCALAR(regs
.imr
);
2607 SERIALIZE_SCALAR(regs
.ier
);
2608 SERIALIZE_SCALAR(regs
.ihr
);
2609 SERIALIZE_SCALAR(regs
.txdp
);
2610 SERIALIZE_SCALAR(regs
.txdp_hi
);
2611 SERIALIZE_SCALAR(regs
.txcfg
);
2612 SERIALIZE_SCALAR(regs
.gpior
);
2613 SERIALIZE_SCALAR(regs
.rxdp
);
2614 SERIALIZE_SCALAR(regs
.rxdp_hi
);
2615 SERIALIZE_SCALAR(regs
.rxcfg
);
2616 SERIALIZE_SCALAR(regs
.pqcr
);
2617 SERIALIZE_SCALAR(regs
.wcsr
);
2618 SERIALIZE_SCALAR(regs
.pcr
);
2619 SERIALIZE_SCALAR(regs
.rfcr
);
2620 SERIALIZE_SCALAR(regs
.rfdr
);
2621 SERIALIZE_SCALAR(regs
.brar
);
2622 SERIALIZE_SCALAR(regs
.brdr
);
2623 SERIALIZE_SCALAR(regs
.srr
);
2624 SERIALIZE_SCALAR(regs
.mibc
);
2625 SERIALIZE_SCALAR(regs
.vrcr
);
2626 SERIALIZE_SCALAR(regs
.vtcr
);
2627 SERIALIZE_SCALAR(regs
.vdr
);
2628 SERIALIZE_SCALAR(regs
.ccsr
);
2629 SERIALIZE_SCALAR(regs
.tbicr
);
2630 SERIALIZE_SCALAR(regs
.tbisr
);
2631 SERIALIZE_SCALAR(regs
.tanar
);
2632 SERIALIZE_SCALAR(regs
.tanlpar
);
2633 SERIALIZE_SCALAR(regs
.taner
);
2634 SERIALIZE_SCALAR(regs
.tesr
);
2636 SERIALIZE_ARRAY(rom
.perfectMatch
, ETH_ADDR_LEN
);
2637 SERIALIZE_ARRAY(rom
.filterHash
, FHASH_SIZE
);
2639 SERIALIZE_SCALAR(ioEnable
);
2642 * Serialize the data Fifos
2644 rxFifo
.serialize("rxFifo", os
);
2645 txFifo
.serialize("txFifo", os
);
2648 * Serialize the various helper variables
2650 bool txPacketExists
= txPacket
;
2651 SERIALIZE_SCALAR(txPacketExists
);
2652 if (txPacketExists
) {
2653 txPacket
->length
= txPacketBufPtr
- txPacket
->data
;
2654 txPacket
->serialize("txPacket", os
);
2655 uint32_t txPktBufPtr
= (uint32_t) (txPacketBufPtr
- txPacket
->data
);
2656 SERIALIZE_SCALAR(txPktBufPtr
);
2659 bool rxPacketExists
= rxPacket
;
2660 SERIALIZE_SCALAR(rxPacketExists
);
2661 if (rxPacketExists
) {
2662 rxPacket
->serialize("rxPacket", os
);
2663 uint32_t rxPktBufPtr
= (uint32_t) (rxPacketBufPtr
- rxPacket
->data
);
2664 SERIALIZE_SCALAR(rxPktBufPtr
);
2667 SERIALIZE_SCALAR(txXferLen
);
2668 SERIALIZE_SCALAR(rxXferLen
);
2671 * Serialize Cached Descriptors
2673 SERIALIZE_SCALAR(rxDesc64
.link
);
2674 SERIALIZE_SCALAR(rxDesc64
.bufptr
);
2675 SERIALIZE_SCALAR(rxDesc64
.cmdsts
);
2676 SERIALIZE_SCALAR(rxDesc64
.extsts
);
2677 SERIALIZE_SCALAR(txDesc64
.link
);
2678 SERIALIZE_SCALAR(txDesc64
.bufptr
);
2679 SERIALIZE_SCALAR(txDesc64
.cmdsts
);
2680 SERIALIZE_SCALAR(txDesc64
.extsts
);
2681 SERIALIZE_SCALAR(rxDesc32
.link
);
2682 SERIALIZE_SCALAR(rxDesc32
.bufptr
);
2683 SERIALIZE_SCALAR(rxDesc32
.cmdsts
);
2684 SERIALIZE_SCALAR(rxDesc32
.extsts
);
2685 SERIALIZE_SCALAR(txDesc32
.link
);
2686 SERIALIZE_SCALAR(txDesc32
.bufptr
);
2687 SERIALIZE_SCALAR(txDesc32
.cmdsts
);
2688 SERIALIZE_SCALAR(txDesc32
.extsts
);
2689 SERIALIZE_SCALAR(extstsEnable
);
2692 * Serialize tx state machine
2694 int txState
= this->txState
;
2695 SERIALIZE_SCALAR(txState
);
2696 SERIALIZE_SCALAR(txEnable
);
2697 SERIALIZE_SCALAR(CTDD
);
2698 SERIALIZE_SCALAR(txFragPtr
);
2699 SERIALIZE_SCALAR(txDescCnt
);
2700 int txDmaState
= this->txDmaState
;
2701 SERIALIZE_SCALAR(txDmaState
);
2702 SERIALIZE_SCALAR(txKickTick
);
2705 * Serialize rx state machine
2707 int rxState
= this->rxState
;
2708 SERIALIZE_SCALAR(rxState
);
2709 SERIALIZE_SCALAR(rxEnable
);
2710 SERIALIZE_SCALAR(CRDD
);
2711 SERIALIZE_SCALAR(rxPktBytes
);
2712 SERIALIZE_SCALAR(rxFragPtr
);
2713 SERIALIZE_SCALAR(rxDescCnt
);
2714 int rxDmaState
= this->rxDmaState
;
2715 SERIALIZE_SCALAR(rxDmaState
);
2716 SERIALIZE_SCALAR(rxKickTick
);
2719 * Serialize EEPROM state machine
2721 int eepromState
= this->eepromState
;
2722 SERIALIZE_SCALAR(eepromState
);
2723 SERIALIZE_SCALAR(eepromClk
);
2724 SERIALIZE_SCALAR(eepromBitsToRx
);
2725 SERIALIZE_SCALAR(eepromOpcode
);
2726 SERIALIZE_SCALAR(eepromAddress
);
2727 SERIALIZE_SCALAR(eepromData
);
2730 * If there's a pending transmit, store the time so we can
2731 * reschedule it later
2733 Tick transmitTick
= txEvent
.scheduled() ? txEvent
.when() - curTick
: 0;
2734 SERIALIZE_SCALAR(transmitTick
);
2737 * receive address filter settings
2739 SERIALIZE_SCALAR(rxFilterEnable
);
2740 SERIALIZE_SCALAR(acceptBroadcast
);
2741 SERIALIZE_SCALAR(acceptMulticast
);
2742 SERIALIZE_SCALAR(acceptUnicast
);
2743 SERIALIZE_SCALAR(acceptPerfect
);
2744 SERIALIZE_SCALAR(acceptArp
);
2745 SERIALIZE_SCALAR(multicastHashEnable
);
2748 * Keep track of pending interrupt status.
2750 SERIALIZE_SCALAR(intrTick
);
2751 SERIALIZE_SCALAR(cpuPendingIntr
);
2752 Tick intrEventTick
= 0;
2754 intrEventTick
= intrEvent
->when();
2755 SERIALIZE_SCALAR(intrEventTick
);
2760 NSGigE::unserialize(Checkpoint
*cp
, const std::string
§ion
)
2762 // Unserialize the PciDev base class
2763 PciDev::unserialize(cp
, section
);
2765 UNSERIALIZE_SCALAR(regs
.command
);
2766 UNSERIALIZE_SCALAR(regs
.config
);
2767 UNSERIALIZE_SCALAR(regs
.mear
);
2768 UNSERIALIZE_SCALAR(regs
.ptscr
);
2769 UNSERIALIZE_SCALAR(regs
.isr
);
2770 UNSERIALIZE_SCALAR(regs
.imr
);
2771 UNSERIALIZE_SCALAR(regs
.ier
);
2772 UNSERIALIZE_SCALAR(regs
.ihr
);
2773 UNSERIALIZE_SCALAR(regs
.txdp
);
2774 UNSERIALIZE_SCALAR(regs
.txdp_hi
);
2775 UNSERIALIZE_SCALAR(regs
.txcfg
);
2776 UNSERIALIZE_SCALAR(regs
.gpior
);
2777 UNSERIALIZE_SCALAR(regs
.rxdp
);
2778 UNSERIALIZE_SCALAR(regs
.rxdp_hi
);
2779 UNSERIALIZE_SCALAR(regs
.rxcfg
);
2780 UNSERIALIZE_SCALAR(regs
.pqcr
);
2781 UNSERIALIZE_SCALAR(regs
.wcsr
);
2782 UNSERIALIZE_SCALAR(regs
.pcr
);
2783 UNSERIALIZE_SCALAR(regs
.rfcr
);
2784 UNSERIALIZE_SCALAR(regs
.rfdr
);
2785 UNSERIALIZE_SCALAR(regs
.brar
);
2786 UNSERIALIZE_SCALAR(regs
.brdr
);
2787 UNSERIALIZE_SCALAR(regs
.srr
);
2788 UNSERIALIZE_SCALAR(regs
.mibc
);
2789 UNSERIALIZE_SCALAR(regs
.vrcr
);
2790 UNSERIALIZE_SCALAR(regs
.vtcr
);
2791 UNSERIALIZE_SCALAR(regs
.vdr
);
2792 UNSERIALIZE_SCALAR(regs
.ccsr
);
2793 UNSERIALIZE_SCALAR(regs
.tbicr
);
2794 UNSERIALIZE_SCALAR(regs
.tbisr
);
2795 UNSERIALIZE_SCALAR(regs
.tanar
);
2796 UNSERIALIZE_SCALAR(regs
.tanlpar
);
2797 UNSERIALIZE_SCALAR(regs
.taner
);
2798 UNSERIALIZE_SCALAR(regs
.tesr
);
2800 UNSERIALIZE_ARRAY(rom
.perfectMatch
, ETH_ADDR_LEN
);
2801 UNSERIALIZE_ARRAY(rom
.filterHash
, FHASH_SIZE
);
2803 UNSERIALIZE_SCALAR(ioEnable
);
2806 * unserialize the data fifos
2808 rxFifo
.unserialize("rxFifo", cp
, section
);
2809 txFifo
.unserialize("txFifo", cp
, section
);
2812 * unserialize the various helper variables
2814 bool txPacketExists
;
2815 UNSERIALIZE_SCALAR(txPacketExists
);
2816 if (txPacketExists
) {
2817 txPacket
= new PacketData(16384);
2818 txPacket
->unserialize("txPacket", cp
, section
);
2819 uint32_t txPktBufPtr
;
2820 UNSERIALIZE_SCALAR(txPktBufPtr
);
2821 txPacketBufPtr
= (uint8_t *) txPacket
->data
+ txPktBufPtr
;
2825 bool rxPacketExists
;
2826 UNSERIALIZE_SCALAR(rxPacketExists
);
2828 if (rxPacketExists
) {
2829 rxPacket
= new PacketData(16384);
2830 rxPacket
->unserialize("rxPacket", cp
, section
);
2831 uint32_t rxPktBufPtr
;
2832 UNSERIALIZE_SCALAR(rxPktBufPtr
);
2833 rxPacketBufPtr
= (uint8_t *) rxPacket
->data
+ rxPktBufPtr
;
2837 UNSERIALIZE_SCALAR(txXferLen
);
2838 UNSERIALIZE_SCALAR(rxXferLen
);
2841 * Unserialize Cached Descriptors
2843 UNSERIALIZE_SCALAR(rxDesc64
.link
);
2844 UNSERIALIZE_SCALAR(rxDesc64
.bufptr
);
2845 UNSERIALIZE_SCALAR(rxDesc64
.cmdsts
);
2846 UNSERIALIZE_SCALAR(rxDesc64
.extsts
);
2847 UNSERIALIZE_SCALAR(txDesc64
.link
);
2848 UNSERIALIZE_SCALAR(txDesc64
.bufptr
);
2849 UNSERIALIZE_SCALAR(txDesc64
.cmdsts
);
2850 UNSERIALIZE_SCALAR(txDesc64
.extsts
);
2851 UNSERIALIZE_SCALAR(rxDesc32
.link
);
2852 UNSERIALIZE_SCALAR(rxDesc32
.bufptr
);
2853 UNSERIALIZE_SCALAR(rxDesc32
.cmdsts
);
2854 UNSERIALIZE_SCALAR(rxDesc32
.extsts
);
2855 UNSERIALIZE_SCALAR(txDesc32
.link
);
2856 UNSERIALIZE_SCALAR(txDesc32
.bufptr
);
2857 UNSERIALIZE_SCALAR(txDesc32
.cmdsts
);
2858 UNSERIALIZE_SCALAR(txDesc32
.extsts
);
2859 UNSERIALIZE_SCALAR(extstsEnable
);
2862 * unserialize tx state machine
2865 UNSERIALIZE_SCALAR(txState
);
2866 this->txState
= (TxState
) txState
;
2867 UNSERIALIZE_SCALAR(txEnable
);
2868 UNSERIALIZE_SCALAR(CTDD
);
2869 UNSERIALIZE_SCALAR(txFragPtr
);
2870 UNSERIALIZE_SCALAR(txDescCnt
);
2872 UNSERIALIZE_SCALAR(txDmaState
);
2873 this->txDmaState
= (DmaState
) txDmaState
;
2874 UNSERIALIZE_SCALAR(txKickTick
);
2876 txKickEvent
.schedule(txKickTick
);
2879 * unserialize rx state machine
2882 UNSERIALIZE_SCALAR(rxState
);
2883 this->rxState
= (RxState
) rxState
;
2884 UNSERIALIZE_SCALAR(rxEnable
);
2885 UNSERIALIZE_SCALAR(CRDD
);
2886 UNSERIALIZE_SCALAR(rxPktBytes
);
2887 UNSERIALIZE_SCALAR(rxFragPtr
);
2888 UNSERIALIZE_SCALAR(rxDescCnt
);
2890 UNSERIALIZE_SCALAR(rxDmaState
);
2891 this->rxDmaState
= (DmaState
) rxDmaState
;
2892 UNSERIALIZE_SCALAR(rxKickTick
);
2894 rxKickEvent
.schedule(rxKickTick
);
2897 * Unserialize EEPROM state machine
2900 UNSERIALIZE_SCALAR(eepromState
);
2901 this->eepromState
= (EEPROMState
) eepromState
;
2902 UNSERIALIZE_SCALAR(eepromClk
);
2903 UNSERIALIZE_SCALAR(eepromBitsToRx
);
2904 UNSERIALIZE_SCALAR(eepromOpcode
);
2905 UNSERIALIZE_SCALAR(eepromAddress
);
2906 UNSERIALIZE_SCALAR(eepromData
);
2909 * If there's a pending transmit, reschedule it now
2912 UNSERIALIZE_SCALAR(transmitTick
);
2914 txEvent
.schedule(curTick
+ transmitTick
);
2917 * unserialize receive address filter settings
2919 UNSERIALIZE_SCALAR(rxFilterEnable
);
2920 UNSERIALIZE_SCALAR(acceptBroadcast
);
2921 UNSERIALIZE_SCALAR(acceptMulticast
);
2922 UNSERIALIZE_SCALAR(acceptUnicast
);
2923 UNSERIALIZE_SCALAR(acceptPerfect
);
2924 UNSERIALIZE_SCALAR(acceptArp
);
2925 UNSERIALIZE_SCALAR(multicastHashEnable
);
2928 * Keep track of pending interrupt status.
2930 UNSERIALIZE_SCALAR(intrTick
);
2931 UNSERIALIZE_SCALAR(cpuPendingIntr
);
2933 UNSERIALIZE_SCALAR(intrEventTick
);
2934 if (intrEventTick
) {
2935 intrEvent
= new IntrEvent(this, true);
2936 intrEvent
->schedule(intrEventTick
);
2940 * re-add addrRanges to bus bridges
2943 pioInterface
->addAddrRange(RangeSize(BARAddrs
[0], BARSize
[0]));
2944 pioInterface
->addAddrRange(RangeSize(BARAddrs
[1], BARSize
[1]));
2949 NSGigE::cacheAccess(MemReqPtr
&req
)
2951 Addr daddr
= req
->paddr
& 0xfff;
2952 DPRINTF(EthernetPIO
, "timing access to paddr=%#x (daddr=%#x)\n",
2955 if (!pioDelayWrite
|| !req
->cmd
.isWrite())
2956 return curTick
+ pioLatency
;
2958 int cpu
= (req
->xc
->regs
.ipr
[TheISA::IPR_PALtemp16
] >> 8) & 0xff;
2959 std::list
<RegWriteData
> &wq
= writeQueue
[cpu
];
2961 panic("WriteQueue for cpu %d empty timing daddr=%#x", cpu
, daddr
);
2963 const RegWriteData
&data
= wq
.front();
2964 if (data
.daddr
!= daddr
)
2965 panic("read mismatch on cpu %d, daddr functional=%#x timing=%#x",
2966 cpu
, data
.daddr
, daddr
);
2969 if ((data
.value
& (CR_TXD
| CR_TXE
)) == CR_TXE
) {
2971 if (txState
== txIdle
)
2975 if ((data
.value
& (CR_RXD
| CR_RXE
)) == CR_RXE
) {
2977 if (rxState
== rxIdle
)
2983 return curTick
+ pioLatency
;
2986 BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt
)
2988 SimObjectParam
<EtherInt
*> peer
;
2989 SimObjectParam
<NSGigE
*> device
;
2991 END_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt
)
2993 BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigEInt
)
2995 INIT_PARAM_DFLT(peer
, "peer interface", NULL
),
2996 INIT_PARAM(device
, "Ethernet device of this interface")
2998 END_INIT_SIM_OBJECT_PARAMS(NSGigEInt
)
3000 CREATE_SIM_OBJECT(NSGigEInt
)
3002 NSGigEInt
*dev_int
= new NSGigEInt(getInstanceName(), device
);
3004 EtherInt
*p
= (EtherInt
*)peer
;
3006 dev_int
->setPeer(p
);
3007 p
->setPeer(dev_int
);
3013 REGISTER_SIM_OBJECT("NSGigEInt", NSGigEInt
)
3016 BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigE
)
3021 SimObjectParam
<MemoryController
*> mmu
;
3022 SimObjectParam
<PhysicalMemory
*> physmem
;
3023 SimObjectParam
<PciConfigAll
*> configspace
;
3024 SimObjectParam
<PciConfigData
*> configdata
;
3025 SimObjectParam
<Platform
*> platform
;
3026 Param
<uint32_t> pci_bus
;
3027 Param
<uint32_t> pci_dev
;
3028 Param
<uint32_t> pci_func
;
3030 SimObjectParam
<HierParams
*> hier
;
3031 SimObjectParam
<Bus
*> pio_bus
;
3032 SimObjectParam
<Bus
*> dma_bus
;
3033 SimObjectParam
<Bus
*> payload_bus
;
3034 Param
<bool> dma_desc_free
;
3035 Param
<bool> dma_data_free
;
3036 Param
<Tick
> dma_read_delay
;
3037 Param
<Tick
> dma_write_delay
;
3038 Param
<Tick
> dma_read_factor
;
3039 Param
<Tick
> dma_write_factor
;
3040 Param
<bool> dma_no_allocate
;
3041 Param
<Tick
> pio_latency
;
3042 Param
<bool> pio_delay_write
;
3043 Param
<Tick
> intr_delay
;
3045 Param
<Tick
> rx_delay
;
3046 Param
<Tick
> tx_delay
;
3047 Param
<uint32_t> rx_fifo_size
;
3048 Param
<uint32_t> tx_fifo_size
;
3050 Param
<bool> rx_filter
;
3051 Param
<string
> hardware_address
;
3052 Param
<bool> rx_thread
;
3053 Param
<bool> tx_thread
;
3055 END_DECLARE_SIM_OBJECT_PARAMS(NSGigE
)
3057 BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigE
)
3059 INIT_PARAM(clock
, "State machine processor frequency"),
3061 INIT_PARAM(addr
, "Device Address"),
3062 INIT_PARAM(mmu
, "Memory Controller"),
3063 INIT_PARAM(physmem
, "Physical Memory"),
3064 INIT_PARAM(configspace
, "PCI Configspace"),
3065 INIT_PARAM(configdata
, "PCI Config data"),
3066 INIT_PARAM(platform
, "Platform"),
3067 INIT_PARAM(pci_bus
, "PCI bus"),
3068 INIT_PARAM(pci_dev
, "PCI device number"),
3069 INIT_PARAM(pci_func
, "PCI function code"),
3071 INIT_PARAM(hier
, "Hierarchy global variables"),
3072 INIT_PARAM(pio_bus
, ""),
3073 INIT_PARAM(dma_bus
, ""),
3074 INIT_PARAM(payload_bus
, "The IO Bus to attach to for payload"),
3075 INIT_PARAM(dma_desc_free
, "DMA of Descriptors is free"),
3076 INIT_PARAM(dma_data_free
, "DMA of Data is free"),
3077 INIT_PARAM(dma_read_delay
, "fixed delay for dma reads"),
3078 INIT_PARAM(dma_write_delay
, "fixed delay for dma writes"),
3079 INIT_PARAM(dma_read_factor
, "multiplier for dma reads"),
3080 INIT_PARAM(dma_write_factor
, "multiplier for dma writes"),
3081 INIT_PARAM(dma_no_allocate
, "Should DMA reads allocate cache lines"),
3082 INIT_PARAM(pio_latency
, "Programmed IO latency in bus cycles"),
3083 INIT_PARAM(pio_delay_write
, ""),
3084 INIT_PARAM(intr_delay
, "Interrupt Delay in microseconds"),
3086 INIT_PARAM(rx_delay
, "Receive Delay"),
3087 INIT_PARAM(tx_delay
, "Transmit Delay"),
3088 INIT_PARAM(rx_fifo_size
, "max size in bytes of rxFifo"),
3089 INIT_PARAM(tx_fifo_size
, "max size in bytes of txFifo"),
3091 INIT_PARAM(rx_filter
, "Enable Receive Filter"),
3092 INIT_PARAM(hardware_address
, "Ethernet Hardware Address"),
3093 INIT_PARAM(rx_thread
, ""),
3094 INIT_PARAM(tx_thread
, "")
3096 END_INIT_SIM_OBJECT_PARAMS(NSGigE
)
3099 CREATE_SIM_OBJECT(NSGigE
)
3101 NSGigE::Params
*params
= new NSGigE::Params
;
3103 params
->name
= getInstanceName();
3105 params
->clock
= clock
;
3108 params
->pmem
= physmem
;
3109 params
->configSpace
= configspace
;
3110 params
->configData
= configdata
;
3111 params
->plat
= platform
;
3112 params
->busNum
= pci_bus
;
3113 params
->deviceNum
= pci_dev
;
3114 params
->functionNum
= pci_func
;
3116 params
->hier
= hier
;
3117 params
->pio_bus
= pio_bus
;
3118 params
->header_bus
= dma_bus
;
3119 params
->payload_bus
= payload_bus
;
3120 params
->dma_desc_free
= dma_desc_free
;
3121 params
->dma_data_free
= dma_data_free
;
3122 params
->dma_read_delay
= dma_read_delay
;
3123 params
->dma_write_delay
= dma_write_delay
;
3124 params
->dma_read_factor
= dma_read_factor
;
3125 params
->dma_write_factor
= dma_write_factor
;
3126 params
->dma_no_allocate
= dma_no_allocate
;
3127 params
->pio_latency
= pio_latency
;
3128 params
->pio_delay_write
= pio_delay_write
;
3129 params
->intr_delay
= intr_delay
;
3131 params
->rx_delay
= rx_delay
;
3132 params
->tx_delay
= tx_delay
;
3133 params
->rx_fifo_size
= rx_fifo_size
;
3134 params
->tx_fifo_size
= tx_fifo_size
;
3136 params
->rx_filter
= rx_filter
;
3137 params
->eaddr
= hardware_address
;
3138 params
->rx_thread
= rx_thread
;
3139 params
->tx_thread
= tx_thread
;
3141 return new NSGigE(params
);
3144 REGISTER_SIM_OBJECT("NSGigE", NSGigE
)