2 * Copyright (c) 2006 The Regents of The University of Michigan
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 * Device model for Intel's 8254x line of gigabit ethernet controllers.
33 * In particular an 82547 revision 2 (82547GI) MAC because it seems to have the
34 * fewest workarounds in the driver. It will probably work with most of the
35 * other MACs with slight modifications.
40 * @todo really there are multiple dma engines.. we should implement them.
45 #include "base/inet.hh"
46 #include "base/trace.hh"
47 #include "dev/i8254xGBe.hh"
48 #include "mem/packet.hh"
49 #include "mem/packet_access.hh"
50 #include "params/IGbE.hh"
51 #include "sim/stats.hh"
52 #include "sim/system.hh"
54 using namespace iGbReg
;
57 IGbE::IGbE(const Params
*p
)
58 : EtherDevice(p
), etherInt(NULL
), drainEvent(NULL
), useFlowControl(p
->use_flow_control
),
59 rxFifo(p
->rx_fifo_size
), txFifo(p
->tx_fifo_size
), rxTick(false),
60 txTick(false), txFifoTick(false), rxDmaPacket(false), pktOffset(0),
61 fetchDelay(p
->fetch_delay
), wbDelay(p
->wb_delay
),
62 fetchCompDelay(p
->fetch_comp_delay
), wbCompDelay(p
->wb_comp_delay
),
63 rxWriteDelay(p
->rx_write_delay
), txReadDelay(p
->tx_read_delay
),
64 rdtrEvent(this), radvEvent(this),
65 tadvEvent(this), tidvEvent(this), tickEvent(this), interEvent(this),
66 rxDescCache(this, name()+".RxDesc", p
->rx_desc_cache_size
),
67 txDescCache(this, name()+".TxDesc", p
->tx_desc_cache_size
),
68 clock(p
->clock
), lastInterrupt(0)
70 etherInt
= new IGbEInt(name() + ".int", this);
72 // Initialized internal registers per Intel documentation
73 // All registers intialized to 0 by per register constructor
78 regs
.sts
.speed(3); // Say we're 1000Mbps
79 regs
.sts
.fd(1); // full duplex
80 regs
.sts
.lu(1); // link up
86 regs
.rxdctl
.wthresh(1);
100 // clear all 64 16 bit words of the eeprom
101 memset(&flash
, 0, EEPROM_SIZE
*2);
103 // Set the MAC address
104 memcpy(flash
, p
->hardware_address
.bytes(), ETH_ADDR_LEN
);
105 for (int x
= 0; x
< ETH_ADDR_LEN
/2; x
++)
106 flash
[x
] = htobe(flash
[x
]);
109 for (int x
= 0; x
< EEPROM_SIZE
; x
++)
110 csum
+= htobe(flash
[x
]);
113 // Magic happy checksum value
114 flash
[EEPROM_SIZE
-1] = htobe((uint16_t)(EEPROM_CSUM
- csum
));
116 // Store the MAC address as queue ID
117 macAddr
= p
->hardware_address
;
131 IGbE::getEthPort(const std::string
&if_name
, int idx
)
134 if (if_name
== "interface") {
135 if (etherInt
->getPeer())
136 panic("Port already connected to\n");
143 IGbE::writeConfig(PacketPtr pkt
)
145 int offset
= pkt
->getAddr() & PCI_CONFIG_SIZE
;
146 if (offset
< PCI_DEVICE_SPECIFIC
)
147 PciDev::writeConfig(pkt
);
149 panic("Device specific PCI config space not implemented.\n");
152 /// Some work may need to be done here based for the pci COMMAND bits.
159 IGbE::read(PacketPtr pkt
)
164 if (!getBAR(pkt
->getAddr(), bar
, daddr
))
165 panic("Invalid PCI memory access to unmapped memory.\n");
167 // Only Memory register BAR is allowed
170 // Only 32bit accesses allowed
171 assert(pkt
->getSize() == 4);
173 DPRINTF(Ethernet
, "Read device register %#X\n", daddr
);
178 /// Handle read of register here
184 pkt
->set
<uint32_t>(regs
.ctrl());
187 pkt
->set
<uint32_t>(regs
.sts());
190 pkt
->set
<uint32_t>(regs
.eecd());
193 pkt
->set
<uint32_t>(regs
.eerd());
196 pkt
->set
<uint32_t>(regs
.ctrl_ext());
199 pkt
->set
<uint32_t>(regs
.mdic());
202 DPRINTF(Ethernet
, "Reading ICR. ICR=%#x IMR=%#x IAM=%#x IAME=%d\n", regs
.icr(),
203 regs
.imr
, regs
.iam
, regs
.ctrl_ext
.iame());
204 pkt
->set
<uint32_t>(regs
.icr());
205 if (regs
.icr
.int_assert() || regs
.imr
== 0) {
206 regs
.icr
= regs
.icr() & ~mask(30);
207 DPRINTF(Ethernet
, "Cleared ICR. ICR=%#x\n", regs
.icr());
209 if (regs
.ctrl_ext
.iame() && regs
.icr
.int_assert())
210 regs
.imr
&= ~regs
.iam
;
214 // This is only useful for MSI, but the driver reads it every time
215 // Just don't do anything
216 pkt
->set
<uint32_t>(0);
219 pkt
->set
<uint32_t>(regs
.itr());
222 pkt
->set
<uint32_t>(regs
.rctl());
225 pkt
->set
<uint32_t>(regs
.fcttv());
228 pkt
->set
<uint32_t>(regs
.tctl());
231 pkt
->set
<uint32_t>(regs
.pba());
235 pkt
->set
<uint32_t>(0); // We don't care, so just return 0
238 pkt
->set
<uint32_t>(regs
.fcrtl());
241 pkt
->set
<uint32_t>(regs
.fcrth());
244 pkt
->set
<uint32_t>(regs
.rdba
.rdbal());
247 pkt
->set
<uint32_t>(regs
.rdba
.rdbah());
250 pkt
->set
<uint32_t>(regs
.rdlen());
253 pkt
->set
<uint32_t>(regs
.srrctl());
256 pkt
->set
<uint32_t>(regs
.rdh());
259 pkt
->set
<uint32_t>(regs
.rdt());
262 pkt
->set
<uint32_t>(regs
.rdtr());
263 if (regs
.rdtr
.fpd()) {
264 rxDescCache
.writeback(0);
265 DPRINTF(EthernetIntr
, "Posting interrupt because of RDTR.FPD write\n");
266 postInterrupt(IT_RXT
);
271 pkt
->set
<uint32_t>(regs
.rxdctl());
274 pkt
->set
<uint32_t>(regs
.radv());
277 pkt
->set
<uint32_t>(regs
.tdba
.tdbal());
280 pkt
->set
<uint32_t>(regs
.tdba
.tdbah());
283 pkt
->set
<uint32_t>(regs
.tdlen());
286 pkt
->set
<uint32_t>(regs
.tdh());
289 pkt
->set
<uint32_t>(regs
.txdca_ctl());
292 pkt
->set
<uint32_t>(regs
.tdt());
295 pkt
->set
<uint32_t>(regs
.tidv());
298 pkt
->set
<uint32_t>(regs
.txdctl());
301 pkt
->set
<uint32_t>(regs
.tadv());
304 pkt
->set
<uint32_t>(regs
.tdwba
& mask(32));
307 pkt
->set
<uint32_t>(regs
.tdwba
>> 32);
310 pkt
->set
<uint32_t>(regs
.rxcsum());
313 pkt
->set
<uint32_t>(regs
.rlpml
);
316 pkt
->set
<uint32_t>(regs
.rfctl());
319 pkt
->set
<uint32_t>(regs
.manc());
322 pkt
->set
<uint32_t>(regs
.swsm());
326 pkt
->set
<uint32_t>(regs
.fwsm());
329 pkt
->set
<uint32_t>(regs
.sw_fw_sync
);
332 if (!(daddr
>= REG_VFTA
&& daddr
< (REG_VFTA
+ VLAN_FILTER_TABLE_SIZE
*4)) &&
333 !(daddr
>= REG_RAL
&& daddr
< (REG_RAL
+ RCV_ADDRESS_TABLE_SIZE
*8)) &&
334 !(daddr
>= REG_MTA
&& daddr
< (REG_MTA
+ MULTICAST_TABLE_SIZE
*4)) &&
335 !(daddr
>= REG_CRCERRS
&& daddr
< (REG_CRCERRS
+ STATS_REGS_SIZE
)))
336 panic("Read request to unknown register number: %#x\n", daddr
);
338 pkt
->set
<uint32_t>(0);
341 pkt
->makeAtomicResponse();
346 IGbE::write(PacketPtr pkt
)
352 if (!getBAR(pkt
->getAddr(), bar
, daddr
))
353 panic("Invalid PCI memory access to unmapped memory.\n");
355 // Only Memory register BAR is allowed
358 // Only 32bit accesses allowed
359 assert(pkt
->getSize() == sizeof(uint32_t));
361 DPRINTF(Ethernet
, "Wrote device register %#X value %#X\n", daddr
, pkt
->get
<uint32_t>());
364 /// Handle write of register here
366 uint32_t val
= pkt
->get
<uint32_t>();
374 if (regs
.ctrl
.tfce())
375 warn("TX Flow control enabled, should implement\n");
376 if (regs
.ctrl
.rfce())
377 warn("RX Flow control enabled, should implement\n");
387 oldClk
= regs
.eecd
.sk();
389 // See if this is a eeprom access and emulate accordingly
390 if (!oldClk
&& regs
.eecd
.sk()) {
392 eeOpcode
= eeOpcode
<< 1 | regs
.eecd
.din();
394 } else if (eeAddrBits
< 8 && eeOpcode
== EEPROM_READ_OPCODE_SPI
) {
395 eeAddr
= eeAddr
<< 1 | regs
.eecd
.din();
397 } else if (eeDataBits
< 16 && eeOpcode
== EEPROM_READ_OPCODE_SPI
) {
398 assert(eeAddr
>>1 < EEPROM_SIZE
);
399 DPRINTF(EthernetEEPROM
, "EEPROM bit read: %d word: %#X\n",
400 flash
[eeAddr
>>1] >> eeDataBits
& 0x1, flash
[eeAddr
>>1]);
401 regs
.eecd
.dout((flash
[eeAddr
>>1] >> (15-eeDataBits
)) & 0x1);
403 } else if (eeDataBits
< 8 && eeOpcode
== EEPROM_RDSR_OPCODE_SPI
) {
407 panic("What's going on with eeprom interface? opcode:"
408 " %#x:%d addr: %#x:%d, data: %d\n", (uint32_t)eeOpcode
,
409 (uint32_t)eeOpBits
, (uint32_t)eeAddr
,
410 (uint32_t)eeAddrBits
, (uint32_t)eeDataBits
);
412 // Reset everything for the next command
413 if ((eeDataBits
== 16 && eeOpcode
== EEPROM_READ_OPCODE_SPI
) ||
414 (eeDataBits
== 8 && eeOpcode
== EEPROM_RDSR_OPCODE_SPI
)) {
422 DPRINTF(EthernetEEPROM
, "EEPROM: opcode: %#X:%d addr: %#X:%d\n",
423 (uint32_t)eeOpcode
, (uint32_t) eeOpBits
,
424 (uint32_t)eeAddr
>>1, (uint32_t)eeAddrBits
);
425 if (eeOpBits
== 8 && !(eeOpcode
== EEPROM_READ_OPCODE_SPI
||
426 eeOpcode
== EEPROM_RDSR_OPCODE_SPI
))
427 panic("Unknown eeprom opcode: %#X:%d\n", (uint32_t)eeOpcode
,
432 // If driver requests eeprom access, immediately give it to it
433 regs
.eecd
.ee_gnt(regs
.eecd
.ee_req());
437 if (regs
.eerd
.start()) {
439 assert(regs
.eerd
.addr() < EEPROM_SIZE
);
440 regs
.eerd
.data(flash
[regs
.eerd
.addr()]);
442 DPRINTF(EthernetEEPROM
, "EEPROM: read addr: %#X data %#x\n",
443 regs
.eerd
.addr(), regs
.eerd
.data());
449 panic("No support for interrupt on mdic complete\n");
450 if (regs
.mdic
.phyadd() != 1)
451 panic("No support for reading anything but phy\n");
452 DPRINTF(Ethernet
, "%s phy address %x\n", regs
.mdic
.op() == 1 ? "Writing"
453 : "Reading", regs
.mdic
.regadd());
454 switch (regs
.mdic
.regadd()) {
456 regs
.mdic
.data(0x796D); // link up
459 regs
.mdic
.data(params()->phy_pid
);
462 regs
.mdic
.data(params()->phy_epid
);
465 regs
.mdic
.data(0x7C00);
468 regs
.mdic
.data(0x3000);
471 regs
.mdic
.data(0x180); // some random length
479 DPRINTF(Ethernet
, "Writing ICR. ICR=%#x IMR=%#x IAM=%#x IAME=%d\n", regs
.icr(),
480 regs
.imr
, regs
.iam
, regs
.ctrl_ext
.iame());
481 if (regs
.ctrl_ext
.iame())
482 regs
.imr
&= ~regs
.iam
;
483 regs
.icr
= ~bits(val
,30,0) & regs
.icr();
490 DPRINTF(EthernetIntr
, "Posting interrupt because of ICS write\n");
491 postInterrupt((IntTypes
)val
);
507 if (regs
.rctl
.rst()) {
509 DPRINTF(EthernetSM
, "RXS: Got RESET!\n");
527 if (regs
.tctl
.en() && !oldtctl
.en()) {
533 regs
.pba
.txa(64 - regs
.pba
.rxa());
543 ; // We don't care, so don't store anything
546 warn("Writing to IVAR0, ignoring...\n");
555 regs
.rdba
.rdbal( val
& ~mask(4));
556 rxDescCache
.areaChanged();
559 regs
.rdba
.rdbah(val
);
560 rxDescCache
.areaChanged();
563 regs
.rdlen
= val
& ~mask(7);
564 rxDescCache
.areaChanged();
571 rxDescCache
.areaChanged();
575 DPRINTF(EthernetSM
, "RXS: RDT Updated.\n");
576 if (getState() == SimObject::Running
) {
577 DPRINTF(EthernetSM
, "RXS: RDT Fetching Descriptors!\n");
578 rxDescCache
.fetchDescriptors();
580 DPRINTF(EthernetSM
, "RXS: RDT NOT Fetching Desc b/c draining!\n");
593 regs
.tdba
.tdbal( val
& ~mask(4));
594 txDescCache
.areaChanged();
597 regs
.tdba
.tdbah(val
);
598 txDescCache
.areaChanged();
601 regs
.tdlen
= val
& ~mask(7);
602 txDescCache
.areaChanged();
606 txDescCache
.areaChanged();
609 regs
.txdca_ctl
= val
;
610 if (regs
.txdca_ctl
.enabled())
611 panic("No support for DCA\n");
615 DPRINTF(EthernetSM
, "TXS: TX Tail pointer updated\n");
616 if (getState() == SimObject::Running
) {
617 DPRINTF(EthernetSM
, "TXS: TDT Fetching Descriptors!\n");
618 txDescCache
.fetchDescriptors();
620 DPRINTF(EthernetSM
, "TXS: TDT NOT Fetching Desc b/c draining!\n");
633 regs
.tdwba
&= ~mask(32);
635 txDescCache
.completionWriteback(regs
.tdwba
& ~mask(1), regs
.tdwba
& mask(1));
638 regs
.tdwba
&= mask(32);
639 regs
.tdwba
|= (uint64_t)val
<< 32;
640 txDescCache
.completionWriteback(regs
.tdwba
& ~mask(1), regs
.tdwba
& mask(1));
650 if (regs
.rfctl
.exsten())
651 panic("Extended RX descriptors not implemented\n");
658 if (regs
.fwsm
.eep_fw_semaphore())
659 regs
.swsm
.swesmbi(0);
662 regs
.sw_fw_sync
= val
;
665 if (!(daddr
>= REG_VFTA
&& daddr
< (REG_VFTA
+ VLAN_FILTER_TABLE_SIZE
*4)) &&
666 !(daddr
>= REG_RAL
&& daddr
< (REG_RAL
+ RCV_ADDRESS_TABLE_SIZE
*8)) &&
667 !(daddr
>= REG_MTA
&& daddr
< (REG_MTA
+ MULTICAST_TABLE_SIZE
*4)))
668 panic("Write request to unknown register number: %#x\n", daddr
);
671 pkt
->makeAtomicResponse();
676 IGbE::postInterrupt(IntTypes t
, bool now
)
680 // Interrupt is already pending
681 if (t
& regs
.icr() && !now
)
684 regs
.icr
= regs
.icr() | t
;
686 Tick itr_interval
= Clock::Int::ns
* 256 * regs
.itr
.interval();
687 DPRINTF(EthernetIntr
, "EINT: postInterrupt() curTick: %d itr: %d interval: %d\n",
688 curTick
, regs
.itr
.interval(), itr_interval
);
690 if (regs
.itr
.interval() == 0 || now
|| lastInterrupt
+ itr_interval
<= curTick
) {
691 if (interEvent
.scheduled()) {
692 deschedule(interEvent
);
696 Tick int_time
= lastInterrupt
+ itr_interval
;
697 assert(int_time
> 0);
698 DPRINTF(EthernetIntr
, "EINT: Scheduling timer interrupt for tick %d\n",
700 if (!interEvent
.scheduled()) {
701 schedule(interEvent
, int_time
);
707 IGbE::delayIntEvent()
719 if (!(regs
.icr() & regs
.imr
)) {
720 DPRINTF(Ethernet
, "Interrupt Masked. Not Posting\n");
724 DPRINTF(Ethernet
, "Posting Interrupt\n");
727 if (interEvent
.scheduled()) {
728 deschedule(interEvent
);
731 if (rdtrEvent
.scheduled()) {
733 deschedule(rdtrEvent
);
735 if (radvEvent
.scheduled()) {
737 deschedule(radvEvent
);
739 if (tadvEvent
.scheduled()) {
741 deschedule(tadvEvent
);
743 if (tidvEvent
.scheduled()) {
745 deschedule(tidvEvent
);
748 regs
.icr
.int_assert(1);
749 DPRINTF(EthernetIntr
, "EINT: Posting interrupt to CPU now. Vector %#x\n",
754 lastInterrupt
= curTick
;
760 if (regs
.icr
.int_assert()) {
761 regs
.icr
.int_assert(0);
762 DPRINTF(EthernetIntr
, "EINT: Clearing interrupt to CPU now. Vector %#x\n",
771 DPRINTF(Ethernet
, "Checking interrupts icr: %#x imr: %#x\n", regs
.icr(),
773 // Check if we need to clear the cpu interrupt
774 if (!(regs
.icr() & regs
.imr
)) {
775 DPRINTF(Ethernet
, "Mask cleaned all interrupts\n");
776 if (interEvent
.scheduled())
777 deschedule(interEvent
);
778 if (regs
.icr
.int_assert())
781 DPRINTF(Ethernet
, "ITR = %#X itr.interval = %#X\n", regs
.itr(), regs
.itr
.interval());
783 if (regs
.icr() & regs
.imr
) {
784 if (regs
.itr
.interval() == 0) {
787 DPRINTF(Ethernet
, "Possibly scheduling interrupt because of imr write\n");
788 if (!interEvent
.scheduled()) {
789 DPRINTF(Ethernet
, "Scheduling for %d\n", curTick
+ Clock::Int::ns
790 * 256 * regs
.itr
.interval());
792 curTick
+ Clock::Int::ns
* 256 * regs
.itr
.interval());
801 IGbE::RxDescCache::RxDescCache(IGbE
*i
, const std::string n
, int s
)
802 : DescCache
<RxDesc
>(i
, n
, s
), pktDone(false), splitCount(0),
803 pktEvent(this), pktHdrEvent(this), pktDataEvent(this)
806 annSmFetch
= "RX Desc Fetch";
807 annSmWb
= "RX Desc Writeback";
808 annUnusedDescQ
= "RX Unused Descriptors";
809 annUnusedCacheQ
= "RX Unused Descriptor Cache";
810 annUsedCacheQ
= "RX Used Descriptor Cache";
811 annUsedDescQ
= "RX Used Descriptors";
812 annDescQ
= "RX Descriptors";
816 IGbE::RxDescCache::pktSplitDone()
819 DPRINTF(EthernetDesc
, "Part of split packet done: splitcount now %d\n", splitCount
);
820 assert(splitCount
<= 2);
824 DPRINTF(EthernetDesc
, "Part of split packet done: calling pktComplete()\n");
829 IGbE::RxDescCache::writePacket(EthPacketPtr packet
, int pkt_offset
)
831 assert(unusedCache
.size());
832 //if (!unusedCache.size())
837 int buf_len
, hdr_len
;
839 RxDesc
*desc
= unusedCache
.front();
840 switch (igbe
->regs
.srrctl
.desctype()) {
842 assert(pkt_offset
== 0);
843 bytesCopied
= packet
->length
;
844 DPRINTF(EthernetDesc
, "Packet Length: %d Desc Size: %d\n",
845 packet
->length
, igbe
->regs
.rctl
.descSize());
846 assert(packet
->length
< igbe
->regs
.rctl
.descSize());
847 igbe
->dmaWrite(igbe
->platform
->pciToDma(desc
->legacy
.buf
), packet
->length
, &pktEvent
,
848 packet
->data
, igbe
->rxWriteDelay
);
850 case RXDT_ADV_ONEBUF
:
851 assert(pkt_offset
== 0);
852 bytesCopied
= packet
->length
;
853 buf_len
= igbe
->regs
.rctl
.lpe() ? igbe
->regs
.srrctl
.bufLen() :
854 igbe
->regs
.rctl
.descSize();
855 DPRINTF(EthernetDesc
, "Packet Length: %d srrctl: %#x Desc Size: %d\n",
856 packet
->length
, igbe
->regs
.srrctl(), buf_len
);
857 assert(packet
->length
< buf_len
);
858 igbe
->dmaWrite(igbe
->platform
->pciToDma(desc
->adv_read
.pkt
), packet
->length
, &pktEvent
,
859 packet
->data
, igbe
->rxWriteDelay
);
860 desc
->adv_wb
.header_len
= htole(0);
861 desc
->adv_wb
.sph
= htole(0);
862 desc
->adv_wb
.pkt_len
= htole((uint16_t)(pktPtr
->length
));
864 case RXDT_ADV_SPLIT_A
:
867 buf_len
= igbe
->regs
.rctl
.lpe() ? igbe
->regs
.srrctl
.bufLen() :
868 igbe
->regs
.rctl
.descSize();
869 hdr_len
= igbe
->regs
.rctl
.lpe() ? igbe
->regs
.srrctl
.hdrLen() : 0;
870 DPRINTF(EthernetDesc
, "lpe: %d Packet Length: %d offset: %d srrctl: %#x hdr addr: %#x Hdr Size: %d desc addr: %#x Desc Size: %d\n",
871 igbe
->regs
.rctl
.lpe(), packet
->length
, pkt_offset
, igbe
->regs
.srrctl(), desc
->adv_read
.hdr
, hdr_len
, desc
->adv_read
.pkt
, buf_len
);
873 split_point
= hsplit(pktPtr
);
875 if (packet
->length
<= hdr_len
) {
876 bytesCopied
= packet
->length
;
877 assert(pkt_offset
== 0);
878 DPRINTF(EthernetDesc
, "Header Splitting: Entire packet being placed in header\n");
879 igbe
->dmaWrite(igbe
->platform
->pciToDma(desc
->adv_read
.hdr
), packet
->length
, &pktEvent
,
880 packet
->data
, igbe
->rxWriteDelay
);
881 desc
->adv_wb
.header_len
= htole((uint16_t)packet
->length
);
882 desc
->adv_wb
.sph
= htole(0);
883 desc
->adv_wb
.pkt_len
= htole(0);
884 } else if (split_point
) {
886 // we are only copying some data, header/data has already been
888 int max_to_copy
= std::min(packet
->length
- pkt_offset
, buf_len
);
889 bytesCopied
+= max_to_copy
;
890 DPRINTF(EthernetDesc
, "Header Splitting: Continuing data buffer copy\n");
891 igbe
->dmaWrite(igbe
->platform
->pciToDma(desc
->adv_read
.pkt
),max_to_copy
, &pktEvent
,
892 packet
->data
+ pkt_offset
, igbe
->rxWriteDelay
);
893 desc
->adv_wb
.header_len
= htole(0);
894 desc
->adv_wb
.pkt_len
= htole((uint16_t)max_to_copy
);
895 desc
->adv_wb
.sph
= htole(0);
897 int max_to_copy
= std::min(packet
->length
- split_point
, buf_len
);
898 bytesCopied
+= max_to_copy
+ split_point
;
900 DPRINTF(EthernetDesc
, "Header Splitting: splitting at %d\n",
902 igbe
->dmaWrite(igbe
->platform
->pciToDma(desc
->adv_read
.hdr
), split_point
, &pktHdrEvent
,
903 packet
->data
, igbe
->rxWriteDelay
);
904 igbe
->dmaWrite(igbe
->platform
->pciToDma(desc
->adv_read
.pkt
),
905 max_to_copy
, &pktDataEvent
, packet
->data
+ split_point
, igbe
->rxWriteDelay
);
906 desc
->adv_wb
.header_len
= htole(split_point
);
907 desc
->adv_wb
.sph
= 1;
908 desc
->adv_wb
.pkt_len
= htole((uint16_t)(max_to_copy
));
911 panic("Header split not fitting within header buffer or undecodable"
912 " packet not fitting in header unsupported\n");
916 panic("Unimplemnted RX receive buffer type: %d\n",
917 igbe
->regs
.srrctl
.desctype());
924 IGbE::RxDescCache::pktComplete()
926 assert(unusedCache
.size());
928 desc
= unusedCache
.front();
930 igbe
->anBegin("RXS", "Update Desc");
932 uint16_t crcfixup
= igbe
->regs
.rctl
.secrc() ? 0 : 4 ;
933 DPRINTF(EthernetDesc
, "pktPtr->length: %d bytesCopied: %d stripcrc offset: %d value written: %d %d\n",
934 pktPtr
->length
, bytesCopied
, crcfixup
,
935 htole((uint16_t)(pktPtr
->length
+ crcfixup
)),
936 (uint16_t)(pktPtr
->length
+ crcfixup
));
938 // no support for anything but starting at 0
939 assert(igbe
->regs
.rxcsum
.pcss() == 0);
941 DPRINTF(EthernetDesc
, "Packet written to memory updating Descriptor\n");
943 uint16_t status
= RXDS_DD
;
945 uint16_t ext_err
= 0;
950 assert(bytesCopied
<= pktPtr
->length
);
951 if (bytesCopied
== pktPtr
->length
)
957 DPRINTF(EthernetDesc
, "Proccesing Ip packet with Id=%d\n", ip
->id());
961 if (igbe
->regs
.rxcsum
.ipofld()) {
962 DPRINTF(EthernetDesc
, "Checking IP checksum\n");
964 csum
= htole(cksum(ip
));
965 igbe
->rxIpChecksums
++;
966 if (cksum(ip
) != 0) {
968 ext_err
|= RXDEE_IPE
;
969 DPRINTF(EthernetDesc
, "Checksum is bad!!\n");
973 if (tcp
&& igbe
->regs
.rxcsum
.tuofld()) {
974 DPRINTF(EthernetDesc
, "Checking TCP checksum\n");
975 status
|= RXDS_TCPCS
;
977 csum
= htole(cksum(tcp
));
978 igbe
->rxTcpChecksums
++;
979 if (cksum(tcp
) != 0) {
980 DPRINTF(EthernetDesc
, "Checksum is bad!!\n");
982 ext_err
|= RXDEE_TCPE
;
987 if (udp
&& igbe
->regs
.rxcsum
.tuofld()) {
988 DPRINTF(EthernetDesc
, "Checking UDP checksum\n");
989 status
|= RXDS_UDPCS
;
991 csum
= htole(cksum(udp
));
992 igbe
->rxUdpChecksums
++;
993 if (cksum(udp
) != 0) {
994 DPRINTF(EthernetDesc
, "Checksum is bad!!\n");
995 ext_err
|= RXDEE_TCPE
;
1000 DPRINTF(EthernetSM
, "Proccesing Non-Ip packet\n");
1003 switch (igbe
->regs
.srrctl
.desctype()) {
1005 desc
->legacy
.len
= htole((uint16_t)(pktPtr
->length
+ crcfixup
));
1006 desc
->legacy
.status
= htole(status
);
1007 desc
->legacy
.errors
= htole(err
);
1008 // No vlan support at this point... just set it to 0
1009 desc
->legacy
.vlan
= 0;
1011 case RXDT_ADV_SPLIT_A
:
1012 case RXDT_ADV_ONEBUF
:
1013 desc
->adv_wb
.rss_type
= htole(0);
1014 desc
->adv_wb
.pkt_type
= htole(ptype
);
1015 if (igbe
->regs
.rxcsum
.pcsd()) {
1016 // no rss support right now
1017 desc
->adv_wb
.rss_hash
= htole(0);
1019 desc
->adv_wb
.id
= htole(ip_id
);
1020 desc
->adv_wb
.csum
= htole(csum
);
1022 desc
->adv_wb
.status
= htole(status
);
1023 desc
->adv_wb
.errors
= htole(ext_err
);
1025 desc
->adv_wb
.vlan_tag
= htole(0);
1028 panic("Unimplemnted RX receive buffer type %d\n",
1029 igbe
->regs
.srrctl
.desctype());
1032 DPRINTF(EthernetDesc
, "Descriptor complete w0: %#x w1: %#x\n",
1033 desc
->adv_read
.pkt
, desc
->adv_read
.hdr
);
1035 if (bytesCopied
== pktPtr
->length
) {
1036 DPRINTF(EthernetDesc
, "Packet completely written to descriptor buffers\n");
1037 // Deal with the rx timer interrupts
1038 if (igbe
->regs
.rdtr
.delay()) {
1039 DPRINTF(EthernetSM
, "RXS: Scheduling DTR for %d\n",
1040 igbe
->regs
.rdtr
.delay() * igbe
->intClock());
1041 igbe
->reschedule(igbe
->rdtrEvent
,
1042 curTick
+ igbe
->regs
.rdtr
.delay() * igbe
->intClock(), true);
1045 if (igbe
->regs
.radv
.idv()) {
1046 DPRINTF(EthernetSM
, "RXS: Scheduling ADV for %d\n",
1047 igbe
->regs
.radv
.idv() * igbe
->intClock());
1048 if (!igbe
->radvEvent
.scheduled()) {
1049 igbe
->schedule(igbe
->radvEvent
,
1050 curTick
+ igbe
->regs
.radv
.idv() * igbe
->intClock());
1054 // if neither radv or rdtr, maybe itr is set...
1055 if (!igbe
->regs
.rdtr
.delay() && !igbe
->regs
.radv
.idv()) {
1056 DPRINTF(EthernetSM
, "RXS: Receive interrupt delay disabled, posting IT_RXT\n");
1057 igbe
->postInterrupt(IT_RXT
);
1060 // If the packet is small enough, interrupt appropriately
1061 // I wonder if this is delayed or not?!
1062 if (pktPtr
->length
<= igbe
->regs
.rsrpd
.idv()) {
1063 DPRINTF(EthernetSM
, "RXS: Posting IT_SRPD beacuse small packet received\n");
1064 igbe
->postInterrupt(IT_SRPD
);
1074 igbe
->anBegin("RXS", "Done Updating Desc");
1075 DPRINTF(EthernetDesc
, "Processing of this descriptor complete\n");
1076 igbe
->anDq("RXS", annUnusedCacheQ
);
1077 unusedCache
.pop_front();
1078 igbe
->anQ("RXS", annUsedCacheQ
);
1079 usedCache
.push_back(desc
);
1083 IGbE::RxDescCache::enableSm()
1085 if (!igbe
->drainEvent
) {
1086 igbe
->rxTick
= true;
1087 igbe
->restartClock();
1092 IGbE::RxDescCache::packetDone()
1102 IGbE::RxDescCache::hasOutstandingEvents()
1104 return pktEvent
.scheduled() || wbEvent
.scheduled() ||
1105 fetchEvent
.scheduled() || pktHdrEvent
.scheduled() ||
1106 pktDataEvent
.scheduled();
1111 IGbE::RxDescCache::serialize(std::ostream
&os
)
1113 DescCache
<RxDesc
>::serialize(os
);
1114 SERIALIZE_SCALAR(pktDone
);
1115 SERIALIZE_SCALAR(splitCount
);
1116 SERIALIZE_SCALAR(bytesCopied
);
1120 IGbE::RxDescCache::unserialize(Checkpoint
*cp
, const std::string
§ion
)
1122 DescCache
<RxDesc
>::unserialize(cp
, section
);
1123 UNSERIALIZE_SCALAR(pktDone
);
1124 UNSERIALIZE_SCALAR(splitCount
);
1125 UNSERIALIZE_SCALAR(bytesCopied
);
1129 ///////////////////////////////////// IGbE::TxDesc /////////////////////////////////
1131 IGbE::TxDescCache::TxDescCache(IGbE
*i
, const std::string n
, int s
)
1132 : DescCache
<TxDesc
>(i
,n
, s
), pktDone(false), isTcp(false), pktWaiting(false),
1133 completionAddress(0), completionEnabled(false),
1134 useTso(false), pktEvent(this), headerEvent(this), nullEvent(this)
1137 annSmFetch
= "TX Desc Fetch";
1138 annSmWb
= "TX Desc Writeback";
1139 annUnusedDescQ
= "TX Unused Descriptors";
1140 annUnusedCacheQ
= "TX Unused Descriptor Cache";
1141 annUsedCacheQ
= "TX Used Descriptor Cache";
1142 annUsedDescQ
= "TX Used Descriptors";
1143 annDescQ
= "TX Descriptors";
1147 IGbE::TxDescCache::processContextDesc()
1149 assert(unusedCache
.size());
1152 DPRINTF(EthernetDesc
, "Checking and processing context descriptors\n");
1154 while (!useTso
&& unusedCache
.size() && TxdOp::isContext(unusedCache
.front())) {
1155 DPRINTF(EthernetDesc
, "Got context descriptor type...\n");
1157 desc
= unusedCache
.front();
1158 DPRINTF(EthernetDesc
, "Descriptor upper: %#x lower: %#X\n",
1159 desc
->d1
, desc
->d2
);
1162 // is this going to be a tcp or udp packet?
1163 isTcp
= TxdOp::tcp(desc
) ? true : false;
1165 // setup all the TSO variables, they'll be ignored if we don't use
1166 // tso for this connection
1167 tsoHeaderLen
= TxdOp::hdrlen(desc
);
1168 tsoMss
= TxdOp::mss(desc
);
1170 if (TxdOp::isType(desc
, TxdOp::TXD_CNXT
) && TxdOp::tse(desc
)) {
1171 DPRINTF(EthernetDesc
, "TCP offload enabled for packet hdrlen: %d mss: %d paylen %d\n",
1172 TxdOp::hdrlen(desc
), TxdOp::mss(desc
), TxdOp::getLen(desc
));
1174 tsoTotalLen
= TxdOp::getLen(desc
);
1175 tsoLoadedHeader
= false;
1176 tsoDescBytesUsed
= 0;
1179 tsoPktHasHeader
= false;
1185 unusedCache
.pop_front();
1186 igbe
->anDq("TXS", annUnusedCacheQ
);
1187 usedCache
.push_back(desc
);
1188 igbe
->anQ("TXS", annUsedCacheQ
);
1191 if (!unusedCache
.size())
1194 desc
= unusedCache
.front();
1195 if (!useTso
&& TxdOp::isType(desc
, TxdOp::TXD_ADVDATA
) && TxdOp::tse(desc
)) {
1196 DPRINTF(EthernetDesc
, "TCP offload(adv) enabled for packet hdrlen: %d mss: %d paylen %d\n",
1197 tsoHeaderLen
, tsoMss
, TxdOp::getTsoLen(desc
));
1199 tsoTotalLen
= TxdOp::getTsoLen(desc
);
1200 tsoLoadedHeader
= false;
1201 tsoDescBytesUsed
= 0;
1204 tsoPktHasHeader
= false;
1208 if (useTso
&& !tsoLoadedHeader
) {
1209 // we need to fetch a header
1210 DPRINTF(EthernetDesc
, "Starting DMA of TSO header\n");
1211 assert(TxdOp::isData(desc
) && TxdOp::getLen(desc
) >= tsoHeaderLen
);
1213 assert(tsoHeaderLen
<= 256);
1214 igbe
->dmaRead(igbe
->platform
->pciToDma(TxdOp::getBuf(desc
)),
1215 tsoHeaderLen
, &headerEvent
, tsoHeader
, 0);
1220 IGbE::TxDescCache::headerComplete()
1222 DPRINTF(EthernetDesc
, "TSO: Fetching TSO header complete\n");
1225 assert(unusedCache
.size());
1226 TxDesc
*desc
= unusedCache
.front();
1227 DPRINTF(EthernetDesc
, "TSO: len: %d tsoHeaderLen: %d\n",
1228 TxdOp::getLen(desc
), tsoHeaderLen
);
1230 if (TxdOp::getLen(desc
) == tsoHeaderLen
) {
1231 tsoDescBytesUsed
= 0;
1232 tsoLoadedHeader
= true;
1233 unusedCache
.pop_front();
1234 usedCache
.push_back(desc
);
1236 // I don't think this case happens, I think the headrer is always
1237 // it's own packet, if it wasn't it might be as simple as just
1238 // incrementing descBytesUsed by the header length, but I'm not
1240 panic("TSO header part of bigger packet, not implemented\n");
1247 IGbE::TxDescCache::getPacketSize(EthPacketPtr p
)
1252 if (!unusedCache
.size())
1255 DPRINTF(EthernetDesc
, "Starting processing of descriptor\n");
1257 assert(!useTso
|| tsoLoadedHeader
);
1258 desc
= unusedCache
.front();
1262 DPRINTF(EthernetDesc
, "getPacket(): TxDescriptor data d1: %#llx d2: %#llx\n", desc
->d1
, desc
->d2
);
1263 DPRINTF(EthernetDesc
, "TSO: use: %d hdrlen: %d mss: %d total: %d used: %d loaded hdr: %d\n",
1264 useTso
, tsoHeaderLen
, tsoMss
, tsoTotalLen
, tsoUsedLen
, tsoLoadedHeader
);
1265 DPRINTF(EthernetDesc
, "TSO: descBytesUsed: %d copyBytes: %d this descLen: %d\n",
1266 tsoDescBytesUsed
, tsoCopyBytes
, TxdOp::getLen(desc
));
1267 DPRINTF(EthernetDesc
, "TSO: pktHasHeader: %d\n", tsoPktHasHeader
);
1269 if (tsoPktHasHeader
)
1270 tsoCopyBytes
= std::min((tsoMss
+ tsoHeaderLen
) - p
->length
, TxdOp::getLen(desc
) - tsoDescBytesUsed
);
1272 tsoCopyBytes
= std::min(tsoMss
, TxdOp::getLen(desc
) - tsoDescBytesUsed
);
1273 Addr pkt_size
= tsoCopyBytes
+ (tsoPktHasHeader
? 0 : tsoHeaderLen
);
1274 DPRINTF(EthernetDesc
, "TSO: Next packet is %d bytes\n", pkt_size
);
1278 DPRINTF(EthernetDesc
, "Next TX packet is %d bytes\n",
1279 TxdOp::getLen(unusedCache
.front()));
1280 return TxdOp::getLen(desc
);
1284 IGbE::TxDescCache::getPacketData(EthPacketPtr p
)
1286 assert(unusedCache
.size());
1289 desc
= unusedCache
.front();
1291 DPRINTF(EthernetDesc
, "getPacketData(): TxDescriptor data d1: %#llx d2: %#llx\n", desc
->d1
, desc
->d2
);
1292 assert((TxdOp::isLegacy(desc
) || TxdOp::isData(desc
)) && TxdOp::getLen(desc
));
1298 DPRINTF(EthernetDesc
, "Starting DMA of packet at offset %d\n", p
->length
);
1301 assert(tsoLoadedHeader
);
1302 if (!tsoPktHasHeader
) {
1303 DPRINTF(EthernetDesc
, "Loading TSO header (%d bytes) into start of packet\n",
1305 memcpy(p
->data
, &tsoHeader
,tsoHeaderLen
);
1306 p
->length
+=tsoHeaderLen
;
1307 tsoPktHasHeader
= true;
1312 tsoDescBytesUsed
+= tsoCopyBytes
;
1313 assert(tsoDescBytesUsed
<= TxdOp::getLen(desc
));
1314 DPRINTF(EthernetDesc
, "Starting DMA of packet at offset %d length: %d\n",
1315 p
->length
, tsoCopyBytes
);
1316 igbe
->dmaRead(igbe
->platform
->pciToDma(TxdOp::getBuf(desc
)) + tsoDescBytesUsed
,
1317 tsoCopyBytes
, &pktEvent
, p
->data
+ p
->length
, igbe
->txReadDelay
);
1319 igbe
->dmaRead(igbe
->platform
->pciToDma(TxdOp::getBuf(desc
)),
1320 TxdOp::getLen(desc
), &pktEvent
, p
->data
+ p
->length
, igbe
->txReadDelay
);
1325 IGbE::TxDescCache::pktComplete()
1329 assert(unusedCache
.size());
1332 igbe
->anBegin("TXS", "Update Desc");
1334 DPRINTF(EthernetDesc
, "DMA of packet complete\n");
1337 desc
= unusedCache
.front();
1338 assert((TxdOp::isLegacy(desc
) || TxdOp::isData(desc
)) && TxdOp::getLen(desc
));
1340 DPRINTF(EthernetDesc
, "TxDescriptor data d1: %#llx d2: %#llx\n", desc
->d1
, desc
->d2
);
1341 DPRINTF(EthernetDesc
, "TSO: use: %d hdrlen: %d mss: %d total: %d used: %d loaded hdr: %d\n",
1342 useTso
, tsoHeaderLen
, tsoMss
, tsoTotalLen
, tsoUsedLen
, tsoLoadedHeader
);
1344 // Set the length of the data in the EtherPacket
1346 pktPtr
->length
+= tsoCopyBytes
;
1347 tsoUsedLen
+= tsoCopyBytes
;
1349 pktPtr
->length
+= TxdOp::getLen(desc
);
1351 DPRINTF(EthernetDesc
, "TSO: descBytesUsed: %d copyBytes: %d\n",
1352 tsoDescBytesUsed
, tsoCopyBytes
);
1355 if ((!TxdOp::eop(desc
) && !useTso
) ||
1356 (pktPtr
->length
< ( tsoMss
+ tsoHeaderLen
) &&
1357 tsoTotalLen
!= tsoUsedLen
&& useTso
)) {
1358 assert(!useTso
|| (tsoDescBytesUsed
== TxdOp::getLen(desc
)));
1359 igbe
->anDq("TXS", annUnusedCacheQ
);
1360 unusedCache
.pop_front();
1361 igbe
->anQ("TXS", annUsedCacheQ
);
1362 usedCache
.push_back(desc
);
1364 tsoDescBytesUsed
= 0;
1367 pktMultiDesc
= true;
1369 DPRINTF(EthernetDesc
, "Partial Packet Descriptor of %d bytes Done\n",
1379 pktMultiDesc
= false;
1380 // no support for vlans
1381 assert(!TxdOp::vle(desc
));
1383 // we only support single packet descriptors at this point
1385 assert(TxdOp::eop(desc
));
1387 // set that this packet is done
1388 if (TxdOp::rs(desc
))
1391 DPRINTF(EthernetDesc
, "TxDescriptor data d1: %#llx d2: %#llx\n", desc
->d1
, desc
->d2
);
1396 DPRINTF(EthernetDesc
, "TSO: Modifying IP header. Id + %d\n",
1398 ip
->id(ip
->id() + tsoPkts
++);
1399 ip
->len(pktPtr
->length
- EthPtr(pktPtr
)->size());
1403 DPRINTF(EthernetDesc
, "TSO: Modifying TCP header. old seq %d + %d\n",
1404 tcp
->seq(), tsoPrevSeq
);
1405 tcp
->seq(tcp
->seq() + tsoPrevSeq
);
1406 if (tsoUsedLen
!= tsoTotalLen
)
1407 tcp
->flags(tcp
->flags() & ~9); // clear fin & psh
1411 DPRINTF(EthernetDesc
, "TSO: Modifying UDP header.\n");
1412 udp
->len(pktPtr
->length
- EthPtr(pktPtr
)->size());
1415 tsoPrevSeq
= tsoUsedLen
;
1418 if (DTRACE(EthernetDesc
)) {
1421 DPRINTF(EthernetDesc
, "Proccesing Ip packet with Id=%d\n",
1424 DPRINTF(EthernetSM
, "Proccesing Non-Ip packet\n");
1427 // Checksums are only ofloaded for new descriptor types
1428 if (TxdOp::isData(desc
) && ( TxdOp::ixsm(desc
) || TxdOp::txsm(desc
)) ) {
1429 DPRINTF(EthernetDesc
, "Calculating checksums for packet\n");
1432 if (TxdOp::ixsm(desc
)) {
1435 igbe
->txIpChecksums
++;
1436 DPRINTF(EthernetDesc
, "Calculated IP checksum\n");
1438 if (TxdOp::txsm(desc
)) {
1443 tcp
->sum(cksum(tcp
));
1444 igbe
->txTcpChecksums
++;
1445 DPRINTF(EthernetDesc
, "Calculated TCP checksum\n");
1449 udp
->sum(cksum(udp
));
1450 igbe
->txUdpChecksums
++;
1451 DPRINTF(EthernetDesc
, "Calculated UDP checksum\n");
1453 panic("Told to checksum, but don't know how\n");
1458 if (TxdOp::ide(desc
)) {
1459 // Deal with the rx timer interrupts
1460 DPRINTF(EthernetDesc
, "Descriptor had IDE set\n");
1461 if (igbe
->regs
.tidv
.idv()) {
1462 DPRINTF(EthernetDesc
, "setting tidv\n");
1463 igbe
->reschedule(igbe
->tidvEvent
,
1464 curTick
+ igbe
->regs
.tidv
.idv() * igbe
->intClock(), true);
1467 if (igbe
->regs
.tadv
.idv() && igbe
->regs
.tidv
.idv()) {
1468 DPRINTF(EthernetDesc
, "setting tadv\n");
1469 if (!igbe
->tadvEvent
.scheduled()) {
1470 igbe
->schedule(igbe
->tadvEvent
,
1471 curTick
+ igbe
->regs
.tadv
.idv() * igbe
->intClock());
1477 if (!useTso
|| TxdOp::getLen(desc
) == tsoDescBytesUsed
) {
1478 DPRINTF(EthernetDesc
, "Descriptor Done\n");
1479 igbe
->anDq("TXS", annUnusedCacheQ
);
1480 unusedCache
.pop_front();
1481 igbe
->anQ("TXS", annUsedCacheQ
);
1482 usedCache
.push_back(desc
);
1483 tsoDescBytesUsed
= 0;
1486 if (useTso
&& tsoUsedLen
== tsoTotalLen
)
1490 DPRINTF(EthernetDesc
, "------Packet of %d bytes ready for transmission-------\n",
1495 tsoPktHasHeader
= false;
1497 if (igbe
->regs
.txdctl
.wthresh() == 0) {
1498 igbe
->anBegin("TXS", "Desc Writeback");
1499 DPRINTF(EthernetDesc
, "WTHRESH == 0, writing back descriptor\n");
1501 } else if (igbe
->regs
.txdctl
.gran() && igbe
->regs
.txdctl
.wthresh() >=
1502 descInBlock(usedCache
.size())) {
1503 DPRINTF(EthernetDesc
, "used > WTHRESH, writing back descriptor\n");
1504 igbe
->anBegin("TXS", "Desc Writeback");
1505 writeback((igbe
->cacheBlockSize()-1)>>4);
1506 } else if (igbe
->regs
.txdctl
.wthresh() >= usedCache
.size()) {
1507 DPRINTF(EthernetDesc
, "used > WTHRESH, writing back descriptor\n");
1508 igbe
->anBegin("TXS", "Desc Writeback");
1509 writeback((igbe
->cacheBlockSize()-1)>>4);
1517 IGbE::TxDescCache::actionAfterWb()
1519 DPRINTF(EthernetDesc
, "actionAfterWb() completionEnabled: %d\n",
1521 igbe
->postInterrupt(iGbReg::IT_TXDW
);
1522 if (completionEnabled
) {
1523 descEnd
= igbe
->regs
.tdh();
1524 DPRINTF(EthernetDesc
, "Completion writing back value: %d to addr: %#x\n", descEnd
,
1526 igbe
->dmaWrite(igbe
->platform
->pciToDma(mbits(completionAddress
, 63, 2)),
1527 sizeof(descEnd
), &nullEvent
, (uint8_t*)&descEnd
, 0);
1532 IGbE::TxDescCache::serialize(std::ostream
&os
)
1534 DescCache
<TxDesc
>::serialize(os
);
1535 SERIALIZE_SCALAR(pktDone
);
1536 SERIALIZE_SCALAR(isTcp
);
1537 SERIALIZE_SCALAR(pktWaiting
);
1538 SERIALIZE_SCALAR(pktMultiDesc
);
1540 SERIALIZE_SCALAR(useTso
);
1541 SERIALIZE_SCALAR(tsoHeaderLen
);
1542 SERIALIZE_SCALAR(tsoMss
);
1543 SERIALIZE_SCALAR(tsoTotalLen
);
1544 SERIALIZE_SCALAR(tsoUsedLen
);
1545 SERIALIZE_SCALAR(tsoPrevSeq
);;
1546 SERIALIZE_SCALAR(tsoPktPayloadBytes
);
1547 SERIALIZE_SCALAR(tsoLoadedHeader
);
1548 SERIALIZE_SCALAR(tsoPktHasHeader
);
1549 SERIALIZE_ARRAY(tsoHeader
, 256);
1550 SERIALIZE_SCALAR(tsoDescBytesUsed
);
1551 SERIALIZE_SCALAR(tsoCopyBytes
);
1552 SERIALIZE_SCALAR(tsoPkts
);
1554 SERIALIZE_SCALAR(completionAddress
);
1555 SERIALIZE_SCALAR(completionEnabled
);
1556 SERIALIZE_SCALAR(descEnd
);
1560 IGbE::TxDescCache::unserialize(Checkpoint
*cp
, const std::string
§ion
)
1562 DescCache
<TxDesc
>::unserialize(cp
, section
);
1563 UNSERIALIZE_SCALAR(pktDone
);
1564 UNSERIALIZE_SCALAR(isTcp
);
1565 UNSERIALIZE_SCALAR(pktWaiting
);
1566 UNSERIALIZE_SCALAR(pktMultiDesc
);
1568 UNSERIALIZE_SCALAR(useTso
);
1569 UNSERIALIZE_SCALAR(tsoHeaderLen
);
1570 UNSERIALIZE_SCALAR(tsoMss
);
1571 UNSERIALIZE_SCALAR(tsoTotalLen
);
1572 UNSERIALIZE_SCALAR(tsoUsedLen
);
1573 UNSERIALIZE_SCALAR(tsoPrevSeq
);;
1574 UNSERIALIZE_SCALAR(tsoPktPayloadBytes
);
1575 UNSERIALIZE_SCALAR(tsoLoadedHeader
);
1576 UNSERIALIZE_SCALAR(tsoPktHasHeader
);
1577 UNSERIALIZE_ARRAY(tsoHeader
, 256);
1578 UNSERIALIZE_SCALAR(tsoDescBytesUsed
);
1579 UNSERIALIZE_SCALAR(tsoCopyBytes
);
1580 UNSERIALIZE_SCALAR(tsoPkts
);
1582 UNSERIALIZE_SCALAR(completionAddress
);
1583 UNSERIALIZE_SCALAR(completionEnabled
);
1584 UNSERIALIZE_SCALAR(descEnd
);
1588 IGbE::TxDescCache::packetAvailable()
1598 IGbE::TxDescCache::enableSm()
1600 if (!igbe
->drainEvent
) {
1601 igbe
->txTick
= true;
1602 igbe
->restartClock();
1607 IGbE::TxDescCache::hasOutstandingEvents()
1609 return pktEvent
.scheduled() || wbEvent
.scheduled() ||
1610 fetchEvent
.scheduled();
1614 ///////////////////////////////////// IGbE /////////////////////////////////
1617 IGbE::restartClock()
1619 if (!tickEvent
.scheduled() && (rxTick
|| txTick
|| txFifoTick
) &&
1620 getState() == SimObject::Running
)
1621 schedule(tickEvent
, (curTick
/ ticks(1)) * ticks(1) + ticks(1));
1625 IGbE::drain(Event
*de
)
1628 count
= pioPort
->drain(de
) + dmaPort
->drain(de
);
1629 if (rxDescCache
.hasOutstandingEvents() ||
1630 txDescCache
.hasOutstandingEvents()) {
1639 if (tickEvent
.scheduled())
1640 deschedule(tickEvent
);
1643 changeState(Draining
);
1645 changeState(Drained
);
1647 DPRINTF(EthernetSM
, "got drain() returning %d", count
);
1654 SimObject::resume();
1661 DPRINTF(EthernetSM
, "resuming from drain");
1670 DPRINTF(EthernetSM
, "checkDrain() in drain\n");
1674 if (!rxDescCache
.hasOutstandingEvents() &&
1675 !txDescCache
.hasOutstandingEvents()) {
1676 drainEvent
->process();
1682 IGbE::txStateMachine()
1684 if (!regs
.tctl
.en()) {
1686 DPRINTF(EthernetSM
, "TXS: TX disabled, stopping ticking\n");
1690 // If we have a packet available and it's length is not 0 (meaning it's not
1691 // a multidescriptor packet) put it in the fifo, otherwise an the next
1692 // iteration we'll get the rest of the data
1693 if (txPacket
&& txDescCache
.packetAvailable()
1694 && !txDescCache
.packetMultiDesc() && txPacket
->length
) {
1697 anQ("TXS", "TX FIFO Q");
1698 DPRINTF(EthernetSM
, "TXS: packet placed in TX FIFO\n");
1699 success
= txFifo
.push(txPacket
);
1700 txFifoTick
= true && !drainEvent
;
1703 anBegin("TXS", "Desc Writeback");
1704 txDescCache
.writeback((cacheBlockSize()-1)>>4);
1708 // Only support descriptor granularity
1709 if (regs
.txdctl
.lwthresh() && txDescCache
.descLeft() < (regs
.txdctl
.lwthresh() * 8)) {
1710 DPRINTF(EthernetSM
, "TXS: LWTHRESH caused posting of TXDLOW\n");
1711 postInterrupt(IT_TXDLOW
);
1715 txPacket
= new EthPacketData(16384);
1718 if (!txDescCache
.packetWaiting()) {
1719 if (txDescCache
.descLeft() == 0) {
1720 postInterrupt(IT_TXQE
);
1721 anBegin("TXS", "Desc Writeback");
1722 txDescCache
.writeback(0);
1723 anBegin("TXS", "Desc Fetch");
1724 anWe("TXS", txDescCache
.annUnusedCacheQ
);
1725 txDescCache
.fetchDescriptors();
1726 DPRINTF(EthernetSM
, "TXS: No descriptors left in ring, forcing "
1727 "writeback stopping ticking and posting TXQE\n");
1733 if (!(txDescCache
.descUnused())) {
1734 anBegin("TXS", "Desc Fetch");
1735 txDescCache
.fetchDescriptors();
1736 anWe("TXS", txDescCache
.annUnusedCacheQ
);
1737 DPRINTF(EthernetSM
, "TXS: No descriptors available in cache, fetching and stopping ticking\n");
1741 anPq("TXS", txDescCache
.annUnusedCacheQ
);
1744 txDescCache
.processContextDesc();
1745 if (txDescCache
.packetWaiting()) {
1746 DPRINTF(EthernetSM
, "TXS: Fetching TSO header, stopping ticking\n");
1752 size
= txDescCache
.getPacketSize(txPacket
);
1753 if (size
> 0 && txFifo
.avail() > size
) {
1754 anRq("TXS", "TX FIFO Q");
1755 anBegin("TXS", "DMA Packet");
1756 DPRINTF(EthernetSM
, "TXS: Reserving %d bytes in FIFO and begining "
1757 "DMA of next packet\n", size
);
1758 txFifo
.reserve(size
);
1759 txDescCache
.getPacketData(txPacket
);
1760 } else if (size
<= 0) {
1761 DPRINTF(EthernetSM
, "TXS: getPacketSize returned: %d\n", size
);
1762 DPRINTF(EthernetSM
, "TXS: No packets to get, writing back used descriptors\n");
1763 anBegin("TXS", "Desc Writeback");
1764 txDescCache
.writeback(0);
1766 anWf("TXS", "TX FIFO Q");
1767 DPRINTF(EthernetSM
, "TXS: FIFO full, stopping ticking until space "
1768 "available in FIFO\n");
1775 DPRINTF(EthernetSM
, "TXS: Nothing to do, stopping ticking\n");
1780 IGbE::ethRxPkt(EthPacketPtr pkt
)
1782 rxBytes
+= pkt
->length
;
1785 DPRINTF(Ethernet
, "RxFIFO: Receiving pcakte from wire\n");
1786 anBegin("RXQ", "Wire Recv");
1789 if (!regs
.rctl
.en()) {
1790 DPRINTF(Ethernet
, "RxFIFO: RX not enabled, dropping\n");
1791 anBegin("RXQ", "FIFO Drop", CPA::FL_BAD
);
1795 // restart the state machines if they are stopped
1796 rxTick
= true && !drainEvent
;
1797 if ((rxTick
|| txTick
) && !tickEvent
.scheduled()) {
1798 DPRINTF(EthernetSM
, "RXS: received packet into fifo, starting ticking\n");
1802 if (!rxFifo
.push(pkt
)) {
1803 DPRINTF(Ethernet
, "RxFIFO: Packet won't fit in fifo... dropped\n");
1804 postInterrupt(IT_RXO
, true);
1805 anBegin("RXQ", "FIFO Drop", CPA::FL_BAD
);
1809 if (CPA::available() && cpa
->enabled()) {
1810 assert(sys
->numSystemsRunning
<= 2);
1812 if (sys
->systemList
[0] == sys
)
1813 other_sys
= sys
->systemList
[1];
1815 other_sys
= sys
->systemList
[0];
1817 cpa
->hwDq(CPA::FL_NONE
, sys
, macAddr
, "RXQ", "WireQ", 0, other_sys
);
1818 anQ("RXQ", "RX FIFO Q");
1819 cpa
->hwWe(CPA::FL_NONE
, sys
, macAddr
, "RXQ", "WireQ", 0, other_sys
);
1827 IGbE::rxStateMachine()
1829 if (!regs
.rctl
.en()) {
1831 DPRINTF(EthernetSM
, "RXS: RX disabled, stopping ticking\n");
1835 // If the packet is done check for interrupts/descriptors/etc
1836 if (rxDescCache
.packetDone()) {
1837 rxDmaPacket
= false;
1838 DPRINTF(EthernetSM
, "RXS: Packet completed DMA to memory\n");
1839 int descLeft
= rxDescCache
.descLeft();
1840 DPRINTF(EthernetSM
, "RXS: descLeft: %d rdmts: %d rdlen: %d\n",
1841 descLeft
, regs
.rctl
.rdmts(), regs
.rdlen());
1842 switch (regs
.rctl
.rdmts()) {
1843 case 2: if (descLeft
> .125 * regs
.rdlen()) break;
1844 case 1: if (descLeft
> .250 * regs
.rdlen()) break;
1845 case 0: if (descLeft
> .500 * regs
.rdlen()) break;
1846 DPRINTF(Ethernet
, "RXS: Interrupting (RXDMT) because of descriptors left\n");
1847 postInterrupt(IT_RXDMT
);
1852 rxDescCache
.writeback(0);
1854 if (descLeft
== 0) {
1855 anBegin("RXS", "Writeback Descriptors");
1856 rxDescCache
.writeback(0);
1857 DPRINTF(EthernetSM
, "RXS: No descriptors left in ring, forcing"
1858 " writeback and stopping ticking\n");
1862 // only support descriptor granulaties
1863 assert(regs
.rxdctl
.gran());
1865 if (regs
.rxdctl
.wthresh() >= rxDescCache
.descUsed()) {
1866 DPRINTF(EthernetSM
, "RXS: Writing back because WTHRESH >= descUsed\n");
1867 anBegin("RXS", "Writeback Descriptors");
1868 if (regs
.rxdctl
.wthresh() < (cacheBlockSize()>>4))
1869 rxDescCache
.writeback(regs
.rxdctl
.wthresh()-1);
1871 rxDescCache
.writeback((cacheBlockSize()-1)>>4);
1874 if ((rxDescCache
.descUnused() < regs
.rxdctl
.pthresh()) &&
1875 ((rxDescCache
.descLeft() - rxDescCache
.descUnused()) > regs
.rxdctl
.hthresh())) {
1876 DPRINTF(EthernetSM
, "RXS: Fetching descriptors because descUnused < PTHRESH\n");
1877 anBegin("RXS", "Fetch Descriptors");
1878 rxDescCache
.fetchDescriptors();
1881 if (rxDescCache
.descUnused() == 0) {
1882 anBegin("RXS", "Fetch Descriptors");
1883 rxDescCache
.fetchDescriptors();
1884 anWe("RXS", rxDescCache
.annUnusedCacheQ
);
1885 DPRINTF(EthernetSM
, "RXS: No descriptors available in cache, "
1886 "fetching descriptors and stopping ticking\n");
1893 DPRINTF(EthernetSM
, "RXS: stopping ticking until packet DMA completes\n");
1898 if (!rxDescCache
.descUnused()) {
1899 anBegin("RXS", "Fetch Descriptors");
1900 rxDescCache
.fetchDescriptors();
1901 anWe("RXS", rxDescCache
.annUnusedCacheQ
);
1902 DPRINTF(EthernetSM
, "RXS: No descriptors available in cache, stopping ticking\n");
1904 DPRINTF(EthernetSM
, "RXS: No descriptors available, fetching\n");
1907 anPq("RXS", rxDescCache
.annUnusedCacheQ
);
1909 if (rxFifo
.empty()) {
1910 anWe("RXS", "RX FIFO Q");
1911 DPRINTF(EthernetSM
, "RXS: RxFIFO empty, stopping ticking\n");
1915 anPq("RXS", "RX FIFO Q");
1916 anBegin("RXS", "Get Desc");
1919 pkt
= rxFifo
.front();
1922 pktOffset
= rxDescCache
.writePacket(pkt
, pktOffset
);
1923 DPRINTF(EthernetSM
, "RXS: Writing packet into memory\n");
1924 if (pktOffset
== pkt
->length
) {
1925 anBegin( "RXS", "FIFO Dequeue");
1926 DPRINTF(EthernetSM
, "RXS: Removing packet from FIFO\n");
1928 anDq("RXS", "RX FIFO Q");
1932 DPRINTF(EthernetSM
, "RXS: stopping ticking until packet DMA completes\n");
1935 anBegin("RXS", "DMA Packet");
1941 if (txFifo
.empty()) {
1942 anWe("TXQ", "TX FIFO Q");
1948 anPq("TXQ", "TX FIFO Q");
1949 if (etherInt
->sendPacket(txFifo
.front())) {
1950 cpa
->hwQ(CPA::FL_NONE
, sys
, macAddr
, "TXQ", "WireQ", 0);
1951 if (DTRACE(EthernetSM
)) {
1952 IpPtr
ip(txFifo
.front());
1954 DPRINTF(EthernetSM
, "Transmitting Ip packet with Id=%d\n",
1957 DPRINTF(EthernetSM
, "Transmitting Non-Ip packet\n");
1959 anDq("TXQ", "TX FIFO Q");
1960 anBegin("TXQ", "Wire Send");
1961 DPRINTF(EthernetSM
, "TxFIFO: Successful transmit, bytes available in fifo: %d\n",
1964 txBytes
+= txFifo
.front()->length
;
1970 // We'll get woken up when the packet ethTxDone() gets called
1978 DPRINTF(EthernetSM
, "IGbE: -------------- Cycle --------------\n");
1990 if (rxTick
|| txTick
|| txFifoTick
)
1991 schedule(tickEvent
, curTick
+ ticks(1));
1997 anBegin("TXQ", "Send Done");
1998 // restart the tx state machines if they are stopped
1999 // fifo to send another packet
2000 // tx sm to put more data into the fifo
2001 txFifoTick
= true && !drainEvent
;
2002 if (txDescCache
.descLeft() != 0 && !drainEvent
)
2007 DPRINTF(EthernetSM
, "TxFIFO: Transmission complete\n");
2011 IGbE::serialize(std::ostream
&os
)
2013 PciDev::serialize(os
);
2016 SERIALIZE_SCALAR(eeOpBits
);
2017 SERIALIZE_SCALAR(eeAddrBits
);
2018 SERIALIZE_SCALAR(eeDataBits
);
2019 SERIALIZE_SCALAR(eeOpcode
);
2020 SERIALIZE_SCALAR(eeAddr
);
2021 SERIALIZE_SCALAR(lastInterrupt
);
2022 SERIALIZE_ARRAY(flash
,iGbReg::EEPROM_SIZE
);
2024 rxFifo
.serialize("rxfifo", os
);
2025 txFifo
.serialize("txfifo", os
);
2027 bool txPktExists
= txPacket
;
2028 SERIALIZE_SCALAR(txPktExists
);
2030 txPacket
->serialize("txpacket", os
);
2032 Tick rdtr_time
= 0, radv_time
= 0, tidv_time
= 0, tadv_time
= 0,
2035 if (rdtrEvent
.scheduled())
2036 rdtr_time
= rdtrEvent
.when();
2037 SERIALIZE_SCALAR(rdtr_time
);
2039 if (radvEvent
.scheduled())
2040 radv_time
= radvEvent
.when();
2041 SERIALIZE_SCALAR(radv_time
);
2043 if (tidvEvent
.scheduled())
2044 tidv_time
= tidvEvent
.when();
2045 SERIALIZE_SCALAR(tidv_time
);
2047 if (tadvEvent
.scheduled())
2048 tadv_time
= tadvEvent
.when();
2049 SERIALIZE_SCALAR(tadv_time
);
2051 if (interEvent
.scheduled())
2052 inter_time
= interEvent
.when();
2053 SERIALIZE_SCALAR(inter_time
);
2055 SERIALIZE_SCALAR(pktOffset
);
2057 nameOut(os
, csprintf("%s.TxDescCache", name()));
2058 txDescCache
.serialize(os
);
2060 nameOut(os
, csprintf("%s.RxDescCache", name()));
2061 rxDescCache
.serialize(os
);
2065 IGbE::unserialize(Checkpoint
*cp
, const std::string
§ion
)
2067 PciDev::unserialize(cp
, section
);
2069 regs
.unserialize(cp
, section
);
2070 UNSERIALIZE_SCALAR(eeOpBits
);
2071 UNSERIALIZE_SCALAR(eeAddrBits
);
2072 UNSERIALIZE_SCALAR(eeDataBits
);
2073 UNSERIALIZE_SCALAR(eeOpcode
);
2074 UNSERIALIZE_SCALAR(eeAddr
);
2075 UNSERIALIZE_SCALAR(lastInterrupt
);
2076 UNSERIALIZE_ARRAY(flash
,iGbReg::EEPROM_SIZE
);
2078 rxFifo
.unserialize("rxfifo", cp
, section
);
2079 txFifo
.unserialize("txfifo", cp
, section
);
2082 UNSERIALIZE_SCALAR(txPktExists
);
2084 txPacket
= new EthPacketData(16384);
2085 txPacket
->unserialize("txpacket", cp
, section
);
2092 Tick rdtr_time
, radv_time
, tidv_time
, tadv_time
, inter_time
;
2093 UNSERIALIZE_SCALAR(rdtr_time
);
2094 UNSERIALIZE_SCALAR(radv_time
);
2095 UNSERIALIZE_SCALAR(tidv_time
);
2096 UNSERIALIZE_SCALAR(tadv_time
);
2097 UNSERIALIZE_SCALAR(inter_time
);
2100 schedule(rdtrEvent
, rdtr_time
);
2103 schedule(radvEvent
, radv_time
);
2106 schedule(tidvEvent
, tidv_time
);
2109 schedule(tadvEvent
, tadv_time
);
2112 schedule(interEvent
, inter_time
);
2114 UNSERIALIZE_SCALAR(pktOffset
);
2116 txDescCache
.unserialize(cp
, csprintf("%s.TxDescCache", section
));
2118 rxDescCache
.unserialize(cp
, csprintf("%s.RxDescCache", section
));
2122 IGbEParams::create()
2124 return new IGbE(this);