2 * Copyright (c) 2006 The Regents of The University of Michigan
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 * Device model for Intel's 8254x line of gigabit ethernet controllers.
33 * In particular an 82547 revision 2 (82547GI) MAC because it seems to have the
34 * fewest workarounds in the driver. It will probably work with most of the
35 * other MACs with slight modifications.
38 #include "dev/net/i8254xGBe.hh"
41 * @todo really there are multiple dma engines.. we should implement them.
47 #include "base/inet.hh"
48 #include "base/trace.hh"
49 #include "debug/Drain.hh"
50 #include "debug/EthernetAll.hh"
51 #include "mem/packet.hh"
52 #include "mem/packet_access.hh"
53 #include "params/IGbE.hh"
54 #include "sim/stats.hh"
55 #include "sim/system.hh"
57 using namespace iGbReg
;
60 IGbE::IGbE(const Params
*p
)
61 : EtherDevice(p
), etherInt(NULL
), cpa(NULL
),
62 rxFifo(p
->rx_fifo_size
), txFifo(p
->tx_fifo_size
), inTick(false),
63 rxTick(false), txTick(false), txFifoTick(false), rxDmaPacket(false),
64 pktOffset(0), fetchDelay(p
->fetch_delay
), wbDelay(p
->wb_delay
),
65 fetchCompDelay(p
->fetch_comp_delay
), wbCompDelay(p
->wb_comp_delay
),
66 rxWriteDelay(p
->rx_write_delay
), txReadDelay(p
->tx_read_delay
),
67 rdtrEvent([this]{ rdtrProcess(); }, name()),
68 radvEvent([this]{ radvProcess(); }, name()),
69 tadvEvent([this]{ tadvProcess(); }, name()),
70 tidvEvent([this]{ tidvProcess(); }, name()),
71 tickEvent([this]{ tick(); }, name()),
72 interEvent([this]{ delayIntEvent(); }, name()),
73 rxDescCache(this, name()+".RxDesc", p
->rx_desc_cache_size
),
74 txDescCache(this, name()+".TxDesc", p
->tx_desc_cache_size
),
77 etherInt
= new IGbEInt(name() + ".int", this);
79 // Initialized internal registers per Intel documentation
80 // All registers intialized to 0 by per register constructor
85 regs
.sts
.speed(3); // Say we're 1000Mbps
86 regs
.sts
.fd(1); // full duplex
87 regs
.sts
.lu(1); // link up
93 regs
.rxdctl
.wthresh(1);
107 // clear all 64 16 bit words of the eeprom
108 memset(&flash
, 0, EEPROM_SIZE
*2);
110 // Set the MAC address
111 memcpy(flash
, p
->hardware_address
.bytes(), ETH_ADDR_LEN
);
112 for (int x
= 0; x
< ETH_ADDR_LEN
/2; x
++)
113 flash
[x
] = htobe(flash
[x
]);
116 for (int x
= 0; x
< EEPROM_SIZE
; x
++)
117 csum
+= htobe(flash
[x
]);
120 // Magic happy checksum value
121 flash
[EEPROM_SIZE
-1] = htobe((uint16_t)(EEPROM_CSUM
- csum
));
123 // Store the MAC address as queue ID
124 macAddr
= p
->hardware_address
;
143 IGbE::getPort(const std::string
&if_name
, PortID idx
)
145 if (if_name
== "interface")
147 return EtherDevice::getPort(if_name
, idx
);
151 IGbE::writeConfig(PacketPtr pkt
)
153 int offset
= pkt
->getAddr() & PCI_CONFIG_SIZE
;
154 if (offset
< PCI_DEVICE_SPECIFIC
)
155 PciDevice::writeConfig(pkt
);
157 panic("Device specific PCI config space not implemented.\n");
160 // Some work may need to be done here based for the pci COMMAND bits.
166 // Handy macro for range-testing register access addresses
167 #define IN_RANGE(val, base, len) (val >= base && val < (base + len))
170 IGbE::read(PacketPtr pkt
)
175 if (!getBAR(pkt
->getAddr(), bar
, daddr
))
176 panic("Invalid PCI memory access to unmapped memory.\n");
178 // Only Memory register BAR is allowed
181 // Only 32bit accesses allowed
182 assert(pkt
->getSize() == 4);
184 DPRINTF(Ethernet
, "Read device register %#X\n", daddr
);
187 // Handle read of register here
193 pkt
->setLE
<uint32_t>(regs
.ctrl());
196 pkt
->setLE
<uint32_t>(regs
.sts());
199 pkt
->setLE
<uint32_t>(regs
.eecd());
202 pkt
->setLE
<uint32_t>(regs
.eerd());
205 pkt
->setLE
<uint32_t>(regs
.ctrl_ext());
208 pkt
->setLE
<uint32_t>(regs
.mdic());
211 DPRINTF(Ethernet
, "Reading ICR. ICR=%#x IMR=%#x IAM=%#x IAME=%d\n",
212 regs
.icr(), regs
.imr
, regs
.iam
, regs
.ctrl_ext
.iame());
213 pkt
->setLE
<uint32_t>(regs
.icr());
214 if (regs
.icr
.int_assert() || regs
.imr
== 0) {
215 regs
.icr
= regs
.icr() & ~mask(30);
216 DPRINTF(Ethernet
, "Cleared ICR. ICR=%#x\n", regs
.icr());
218 if (regs
.ctrl_ext
.iame() && regs
.icr
.int_assert())
219 regs
.imr
&= ~regs
.iam
;
223 // This is only useful for MSI, but the driver reads it every time
224 // Just don't do anything
225 pkt
->setLE
<uint32_t>(0);
228 pkt
->setLE
<uint32_t>(regs
.itr());
231 pkt
->setLE
<uint32_t>(regs
.rctl());
234 pkt
->setLE
<uint32_t>(regs
.fcttv());
237 pkt
->setLE
<uint32_t>(regs
.tctl());
240 pkt
->setLE
<uint32_t>(regs
.pba());
246 pkt
->setLE
<uint32_t>(0); // We don't care, so just return 0
249 pkt
->setLE
<uint32_t>(regs
.fcrtl());
252 pkt
->setLE
<uint32_t>(regs
.fcrth());
255 pkt
->setLE
<uint32_t>(regs
.rdba
.rdbal());
258 pkt
->setLE
<uint32_t>(regs
.rdba
.rdbah());
261 pkt
->setLE
<uint32_t>(regs
.rdlen());
264 pkt
->setLE
<uint32_t>(regs
.srrctl());
267 pkt
->setLE
<uint32_t>(regs
.rdh());
270 pkt
->setLE
<uint32_t>(regs
.rdt());
273 pkt
->setLE
<uint32_t>(regs
.rdtr());
274 if (regs
.rdtr
.fpd()) {
275 rxDescCache
.writeback(0);
276 DPRINTF(EthernetIntr
,
277 "Posting interrupt because of RDTR.FPD write\n");
278 postInterrupt(IT_RXT
);
283 pkt
->setLE
<uint32_t>(regs
.rxdctl());
286 pkt
->setLE
<uint32_t>(regs
.radv());
289 pkt
->setLE
<uint32_t>(regs
.tdba
.tdbal());
292 pkt
->setLE
<uint32_t>(regs
.tdba
.tdbah());
295 pkt
->setLE
<uint32_t>(regs
.tdlen());
298 pkt
->setLE
<uint32_t>(regs
.tdh());
301 pkt
->setLE
<uint32_t>(regs
.txdca_ctl());
304 pkt
->setLE
<uint32_t>(regs
.tdt());
307 pkt
->setLE
<uint32_t>(regs
.tidv());
310 pkt
->setLE
<uint32_t>(regs
.txdctl());
313 pkt
->setLE
<uint32_t>(regs
.tadv());
316 pkt
->setLE
<uint32_t>(regs
.tdwba
& mask(32));
319 pkt
->setLE
<uint32_t>(regs
.tdwba
>> 32);
322 pkt
->setLE
<uint32_t>(regs
.rxcsum());
325 pkt
->setLE
<uint32_t>(regs
.rlpml
);
328 pkt
->setLE
<uint32_t>(regs
.rfctl());
331 pkt
->setLE
<uint32_t>(regs
.manc());
334 pkt
->setLE
<uint32_t>(regs
.swsm());
338 pkt
->setLE
<uint32_t>(regs
.fwsm());
341 pkt
->setLE
<uint32_t>(regs
.sw_fw_sync
);
344 if (!IN_RANGE(daddr
, REG_VFTA
, VLAN_FILTER_TABLE_SIZE
*4) &&
345 !IN_RANGE(daddr
, REG_RAL
, RCV_ADDRESS_TABLE_SIZE
*8) &&
346 !IN_RANGE(daddr
, REG_MTA
, MULTICAST_TABLE_SIZE
*4) &&
347 !IN_RANGE(daddr
, REG_CRCERRS
, STATS_REGS_SIZE
))
348 panic("Read request to unknown register number: %#x\n", daddr
);
350 pkt
->setLE
<uint32_t>(0);
353 pkt
->makeAtomicResponse();
358 IGbE::write(PacketPtr pkt
)
364 if (!getBAR(pkt
->getAddr(), bar
, daddr
))
365 panic("Invalid PCI memory access to unmapped memory.\n");
367 // Only Memory register BAR is allowed
370 // Only 32bit accesses allowed
371 assert(pkt
->getSize() == sizeof(uint32_t));
373 DPRINTF(Ethernet
, "Wrote device register %#X value %#X\n",
374 daddr
, pkt
->getLE
<uint32_t>());
377 // Handle write of register here
379 uint32_t val
= pkt
->getLE
<uint32_t>();
387 if (regs
.ctrl
.tfce())
388 warn("TX Flow control enabled, should implement\n");
389 if (regs
.ctrl
.rfce())
390 warn("RX Flow control enabled, should implement\n");
400 oldClk
= regs
.eecd
.sk();
402 // See if this is a eeprom access and emulate accordingly
403 if (!oldClk
&& regs
.eecd
.sk()) {
405 eeOpcode
= eeOpcode
<< 1 | regs
.eecd
.din();
407 } else if (eeAddrBits
< 8 && eeOpcode
== EEPROM_READ_OPCODE_SPI
) {
408 eeAddr
= eeAddr
<< 1 | regs
.eecd
.din();
410 } else if (eeDataBits
< 16 && eeOpcode
== EEPROM_READ_OPCODE_SPI
) {
411 assert(eeAddr
>>1 < EEPROM_SIZE
);
412 DPRINTF(EthernetEEPROM
, "EEPROM bit read: %d word: %#X\n",
413 flash
[eeAddr
>>1] >> eeDataBits
& 0x1,
415 regs
.eecd
.dout((flash
[eeAddr
>>1] >> (15-eeDataBits
)) & 0x1);
417 } else if (eeDataBits
< 8 && eeOpcode
== EEPROM_RDSR_OPCODE_SPI
) {
421 panic("What's going on with eeprom interface? opcode:"
422 " %#x:%d addr: %#x:%d, data: %d\n", (uint32_t)eeOpcode
,
423 (uint32_t)eeOpBits
, (uint32_t)eeAddr
,
424 (uint32_t)eeAddrBits
, (uint32_t)eeDataBits
);
426 // Reset everything for the next command
427 if ((eeDataBits
== 16 && eeOpcode
== EEPROM_READ_OPCODE_SPI
) ||
428 (eeDataBits
== 8 && eeOpcode
== EEPROM_RDSR_OPCODE_SPI
)) {
436 DPRINTF(EthernetEEPROM
, "EEPROM: opcode: %#X:%d addr: %#X:%d\n",
437 (uint32_t)eeOpcode
, (uint32_t) eeOpBits
,
438 (uint32_t)eeAddr
>>1, (uint32_t)eeAddrBits
);
439 if (eeOpBits
== 8 && !(eeOpcode
== EEPROM_READ_OPCODE_SPI
||
440 eeOpcode
== EEPROM_RDSR_OPCODE_SPI
))
441 panic("Unknown eeprom opcode: %#X:%d\n", (uint32_t)eeOpcode
,
446 // If driver requests eeprom access, immediately give it to it
447 regs
.eecd
.ee_gnt(regs
.eecd
.ee_req());
451 if (regs
.eerd
.start()) {
453 assert(regs
.eerd
.addr() < EEPROM_SIZE
);
454 regs
.eerd
.data(flash
[regs
.eerd
.addr()]);
456 DPRINTF(EthernetEEPROM
, "EEPROM: read addr: %#X data %#x\n",
457 regs
.eerd
.addr(), regs
.eerd
.data());
463 panic("No support for interrupt on mdic complete\n");
464 if (regs
.mdic
.phyadd() != 1)
465 panic("No support for reading anything but phy\n");
466 DPRINTF(Ethernet
, "%s phy address %x\n",
467 regs
.mdic
.op() == 1 ? "Writing" : "Reading",
469 switch (regs
.mdic
.regadd()) {
471 regs
.mdic
.data(0x796D); // link up
474 regs
.mdic
.data(params()->phy_pid
);
477 regs
.mdic
.data(params()->phy_epid
);
480 regs
.mdic
.data(0x7C00);
483 regs
.mdic
.data(0x3000);
486 regs
.mdic
.data(0x180); // some random length
494 DPRINTF(Ethernet
, "Writing ICR. ICR=%#x IMR=%#x IAM=%#x IAME=%d\n",
495 regs
.icr(), regs
.imr
, regs
.iam
, regs
.ctrl_ext
.iame());
496 if (regs
.ctrl_ext
.iame())
497 regs
.imr
&= ~regs
.iam
;
498 regs
.icr
= ~bits(val
,30,0) & regs
.icr();
505 DPRINTF(EthernetIntr
, "Posting interrupt because of ICS write\n");
506 postInterrupt((IntTypes
)val
);
522 if (regs
.rctl
.rst()) {
524 DPRINTF(EthernetSM
, "RXS: Got RESET!\n");
542 if (regs
.tctl
.en() && !oldtctl
.en()) {
548 regs
.pba
.txa(64 - regs
.pba
.rxa());
560 ; // We don't care, so don't store anything
563 warn("Writing to IVAR0, ignoring...\n");
572 regs
.rdba
.rdbal( val
& ~mask(4));
573 rxDescCache
.areaChanged();
576 regs
.rdba
.rdbah(val
);
577 rxDescCache
.areaChanged();
580 regs
.rdlen
= val
& ~mask(7);
581 rxDescCache
.areaChanged();
588 rxDescCache
.areaChanged();
592 DPRINTF(EthernetSM
, "RXS: RDT Updated.\n");
593 if (drainState() == DrainState::Running
) {
594 DPRINTF(EthernetSM
, "RXS: RDT Fetching Descriptors!\n");
595 rxDescCache
.fetchDescriptors();
597 DPRINTF(EthernetSM
, "RXS: RDT NOT Fetching Desc b/c draining!\n");
610 regs
.tdba
.tdbal( val
& ~mask(4));
611 txDescCache
.areaChanged();
614 regs
.tdba
.tdbah(val
);
615 txDescCache
.areaChanged();
618 regs
.tdlen
= val
& ~mask(7);
619 txDescCache
.areaChanged();
623 txDescCache
.areaChanged();
626 regs
.txdca_ctl
= val
;
627 if (regs
.txdca_ctl
.enabled())
628 panic("No support for DCA\n");
632 DPRINTF(EthernetSM
, "TXS: TX Tail pointer updated\n");
633 if (drainState() == DrainState::Running
) {
634 DPRINTF(EthernetSM
, "TXS: TDT Fetching Descriptors!\n");
635 txDescCache
.fetchDescriptors();
637 DPRINTF(EthernetSM
, "TXS: TDT NOT Fetching Desc b/c draining!\n");
650 regs
.tdwba
&= ~mask(32);
652 txDescCache
.completionWriteback(regs
.tdwba
& ~mask(1),
653 regs
.tdwba
& mask(1));
656 regs
.tdwba
&= mask(32);
657 regs
.tdwba
|= (uint64_t)val
<< 32;
658 txDescCache
.completionWriteback(regs
.tdwba
& ~mask(1),
659 regs
.tdwba
& mask(1));
669 if (regs
.rfctl
.exsten())
670 panic("Extended RX descriptors not implemented\n");
677 if (regs
.fwsm
.eep_fw_semaphore())
678 regs
.swsm
.swesmbi(0);
681 regs
.sw_fw_sync
= val
;
684 if (!IN_RANGE(daddr
, REG_VFTA
, VLAN_FILTER_TABLE_SIZE
*4) &&
685 !IN_RANGE(daddr
, REG_RAL
, RCV_ADDRESS_TABLE_SIZE
*8) &&
686 !IN_RANGE(daddr
, REG_MTA
, MULTICAST_TABLE_SIZE
*4))
687 panic("Write request to unknown register number: %#x\n", daddr
);
690 pkt
->makeAtomicResponse();
695 IGbE::postInterrupt(IntTypes t
, bool now
)
699 // Interrupt is already pending
700 if (t
& regs
.icr() && !now
)
703 regs
.icr
= regs
.icr() | t
;
705 Tick itr_interval
= SimClock::Int::ns
* 256 * regs
.itr
.interval();
706 DPRINTF(EthernetIntr
,
707 "EINT: postInterrupt() curTick(): %d itr: %d interval: %d\n",
708 curTick(), regs
.itr
.interval(), itr_interval
);
710 if (regs
.itr
.interval() == 0 || now
||
711 lastInterrupt
+ itr_interval
<= curTick()) {
712 if (interEvent
.scheduled()) {
713 deschedule(interEvent
);
717 Tick int_time
= lastInterrupt
+ itr_interval
;
718 assert(int_time
> 0);
719 DPRINTF(EthernetIntr
, "EINT: Scheduling timer interrupt for tick %d\n",
721 if (!interEvent
.scheduled()) {
722 schedule(interEvent
, int_time
);
728 IGbE::delayIntEvent()
740 if (!(regs
.icr() & regs
.imr
)) {
741 DPRINTF(Ethernet
, "Interrupt Masked. Not Posting\n");
745 DPRINTF(Ethernet
, "Posting Interrupt\n");
748 if (interEvent
.scheduled()) {
749 deschedule(interEvent
);
752 if (rdtrEvent
.scheduled()) {
754 deschedule(rdtrEvent
);
756 if (radvEvent
.scheduled()) {
758 deschedule(radvEvent
);
760 if (tadvEvent
.scheduled()) {
762 deschedule(tadvEvent
);
764 if (tidvEvent
.scheduled()) {
766 deschedule(tidvEvent
);
769 regs
.icr
.int_assert(1);
770 DPRINTF(EthernetIntr
, "EINT: Posting interrupt to CPU now. Vector %#x\n",
775 lastInterrupt
= curTick();
781 if (regs
.icr
.int_assert()) {
782 regs
.icr
.int_assert(0);
783 DPRINTF(EthernetIntr
,
784 "EINT: Clearing interrupt to CPU now. Vector %#x\n",
793 DPRINTF(Ethernet
, "Checking interrupts icr: %#x imr: %#x\n", regs
.icr(),
795 // Check if we need to clear the cpu interrupt
796 if (!(regs
.icr() & regs
.imr
)) {
797 DPRINTF(Ethernet
, "Mask cleaned all interrupts\n");
798 if (interEvent
.scheduled())
799 deschedule(interEvent
);
800 if (regs
.icr
.int_assert())
803 DPRINTF(Ethernet
, "ITR = %#X itr.interval = %#X\n",
804 regs
.itr(), regs
.itr
.interval());
806 if (regs
.icr() & regs
.imr
) {
807 if (regs
.itr
.interval() == 0) {
811 "Possibly scheduling interrupt because of imr write\n");
812 if (!interEvent
.scheduled()) {
813 Tick t
= curTick() + SimClock::Int::ns
* 256 * regs
.itr
.interval();
814 DPRINTF(Ethernet
, "Scheduling for %d\n", t
);
815 schedule(interEvent
, t
);
822 ///////////////////////////// IGbE::DescCache //////////////////////////////
825 IGbE::DescCache
<T
>::DescCache(IGbE
*i
, const std::string n
, int s
)
826 : igbe(i
), _name(n
), cachePnt(0), size(s
), curFetching(0),
827 wbOut(0), moreToWb(false), wbAlignment(0), pktPtr(NULL
),
828 wbDelayEvent([this]{ writeback1(); }, n
),
829 fetchDelayEvent([this]{ fetchDescriptors1(); }, n
),
830 fetchEvent([this]{ fetchComplete(); }, n
),
831 wbEvent([this]{ wbComplete(); }, n
)
833 fetchBuf
= new T
[size
];
838 IGbE::DescCache
<T
>::~DescCache()
847 IGbE::DescCache
<T
>::areaChanged()
849 if (usedCache
.size() > 0 || curFetching
|| wbOut
)
850 panic("Descriptor Address, Length or Head changed. Bad\n");
857 IGbE::DescCache
<T
>::writeback(Addr aMask
)
859 int curHead
= descHead();
860 int max_to_wb
= usedCache
.size();
862 // Check if this writeback is less restrictive that the previous
863 // and if so setup another one immediately following it
865 if (aMask
< wbAlignment
) {
869 DPRINTF(EthernetDesc
,
870 "Writing back already in process, returning\n");
878 DPRINTF(EthernetDesc
, "Writing back descriptors head: %d tail: "
879 "%d len: %d cachePnt: %d max_to_wb: %d descleft: %d\n",
880 curHead
, descTail(), descLen(), cachePnt
, max_to_wb
,
883 if (max_to_wb
+ curHead
>= descLen()) {
884 max_to_wb
= descLen() - curHead
;
886 // this is by definition aligned correctly
887 } else if (wbAlignment
!= 0) {
888 // align the wb point to the mask
889 max_to_wb
= max_to_wb
& ~wbAlignment
;
892 DPRINTF(EthernetDesc
, "Writing back %d descriptors\n", max_to_wb
);
894 if (max_to_wb
<= 0) {
895 if (usedCache
.size())
896 igbe
->anBegin(annSmWb
, "Wait Alignment", CPA::FL_WAIT
);
898 igbe
->anWe(annSmWb
, annUsedCacheQ
);
904 assert(!wbDelayEvent
.scheduled());
905 igbe
->schedule(wbDelayEvent
, curTick() + igbe
->wbDelay
);
906 igbe
->anBegin(annSmWb
, "Prepare Writeback Desc");
911 IGbE::DescCache
<T
>::writeback1()
913 // If we're draining delay issuing this DMA
914 if (igbe
->drainState() != DrainState::Running
) {
915 igbe
->schedule(wbDelayEvent
, curTick() + igbe
->wbDelay
);
919 DPRINTF(EthernetDesc
, "Begining DMA of %d descriptors\n", wbOut
);
921 for (int x
= 0; x
< wbOut
; x
++) {
922 assert(usedCache
.size());
923 memcpy(&wbBuf
[x
], usedCache
[x
], sizeof(T
));
924 igbe
->anPq(annSmWb
, annUsedCacheQ
);
925 igbe
->anPq(annSmWb
, annDescQ
);
926 igbe
->anQ(annSmWb
, annUsedDescQ
);
930 igbe
->anBegin(annSmWb
, "Writeback Desc DMA");
933 igbe
->dmaWrite(pciToDma(descBase() + descHead() * sizeof(T
)),
934 wbOut
* sizeof(T
), &wbEvent
, (uint8_t*)wbBuf
,
940 IGbE::DescCache
<T
>::fetchDescriptors()
945 DPRINTF(EthernetDesc
,
946 "Currently fetching %d descriptors, returning\n",
951 if (descTail() >= cachePnt
)
952 max_to_fetch
= descTail() - cachePnt
;
954 max_to_fetch
= descLen() - cachePnt
;
956 size_t free_cache
= size
- usedCache
.size() - unusedCache
.size();
959 igbe
->anWe(annSmFetch
, annUnusedDescQ
);
961 igbe
->anPq(annSmFetch
, annUnusedDescQ
, max_to_fetch
);
965 igbe
->anWf(annSmFetch
, annDescQ
);
967 igbe
->anRq(annSmFetch
, annDescQ
, free_cache
);
970 max_to_fetch
= std::min(max_to_fetch
, free_cache
);
973 DPRINTF(EthernetDesc
, "Fetching descriptors head: %d tail: "
974 "%d len: %d cachePnt: %d max_to_fetch: %d descleft: %d\n",
975 descHead(), descTail(), descLen(), cachePnt
,
976 max_to_fetch
, descLeft());
979 if (max_to_fetch
== 0)
982 // So we don't have two descriptor fetches going on at once
983 curFetching
= max_to_fetch
;
985 assert(!fetchDelayEvent
.scheduled());
986 igbe
->schedule(fetchDelayEvent
, curTick() + igbe
->fetchDelay
);
987 igbe
->anBegin(annSmFetch
, "Prepare Fetch Desc");
992 IGbE::DescCache
<T
>::fetchDescriptors1()
994 // If we're draining delay issuing this DMA
995 if (igbe
->drainState() != DrainState::Running
) {
996 igbe
->schedule(fetchDelayEvent
, curTick() + igbe
->fetchDelay
);
1000 igbe
->anBegin(annSmFetch
, "Fetch Desc");
1002 DPRINTF(EthernetDesc
, "Fetching descriptors at %#x (%#x), size: %#x\n",
1003 descBase() + cachePnt
* sizeof(T
),
1004 pciToDma(descBase() + cachePnt
* sizeof(T
)),
1005 curFetching
* sizeof(T
));
1006 assert(curFetching
);
1007 igbe
->dmaRead(pciToDma(descBase() + cachePnt
* sizeof(T
)),
1008 curFetching
* sizeof(T
), &fetchEvent
, (uint8_t*)fetchBuf
,
1009 igbe
->fetchCompDelay
);
1014 IGbE::DescCache
<T
>::fetchComplete()
1017 igbe
->anBegin(annSmFetch
, "Fetch Complete");
1018 for (int x
= 0; x
< curFetching
; x
++) {
1020 memcpy(newDesc
, &fetchBuf
[x
], sizeof(T
));
1021 unusedCache
.push_back(newDesc
);
1022 igbe
->anDq(annSmFetch
, annUnusedDescQ
);
1023 igbe
->anQ(annSmFetch
, annUnusedCacheQ
);
1024 igbe
->anQ(annSmFetch
, annDescQ
);
1029 int oldCp
= cachePnt
;
1032 cachePnt
+= curFetching
;
1033 assert(cachePnt
<= descLen());
1034 if (cachePnt
== descLen())
1039 DPRINTF(EthernetDesc
, "Fetching complete cachePnt %d -> %d\n",
1042 if ((descTail() >= cachePnt
? (descTail() - cachePnt
) : (descLen() -
1045 igbe
->anWe(annSmFetch
, annUnusedDescQ
);
1046 } else if (!(size
- usedCache
.size() - unusedCache
.size())) {
1047 igbe
->anWf(annSmFetch
, annDescQ
);
1049 igbe
->anBegin(annSmFetch
, "Wait", CPA::FL_WAIT
);
1058 IGbE::DescCache
<T
>::wbComplete()
1061 igbe
->anBegin(annSmWb
, "Finish Writeback");
1063 long curHead
= descHead();
1065 long oldHead
= curHead
;
1068 for (int x
= 0; x
< wbOut
; x
++) {
1069 assert(usedCache
.size());
1070 delete usedCache
[0];
1071 usedCache
.pop_front();
1073 igbe
->anDq(annSmWb
, annUsedCacheQ
);
1074 igbe
->anDq(annSmWb
, annDescQ
);
1080 if (curHead
>= descLen())
1081 curHead
-= descLen();
1084 updateHead(curHead
);
1086 DPRINTF(EthernetDesc
, "Writeback complete curHead %d -> %d\n",
1089 // If we still have more to wb, call wb now
1093 DPRINTF(EthernetDesc
, "Writeback has more todo\n");
1094 writeback(wbAlignment
);
1099 if (usedCache
.size())
1100 igbe
->anBegin(annSmWb
, "Wait", CPA::FL_WAIT
);
1102 igbe
->anWe(annSmWb
, annUsedCacheQ
);
1109 IGbE::DescCache
<T
>::reset()
1111 DPRINTF(EthernetDesc
, "Reseting descriptor cache\n");
1112 for (typename
CacheType::size_type x
= 0; x
< usedCache
.size(); x
++)
1113 delete usedCache
[x
];
1114 for (typename
CacheType::size_type x
= 0; x
< unusedCache
.size(); x
++)
1115 delete unusedCache
[x
];
1118 unusedCache
.clear();
1126 IGbE::DescCache
<T
>::serialize(CheckpointOut
&cp
) const
1128 SERIALIZE_SCALAR(cachePnt
);
1129 SERIALIZE_SCALAR(curFetching
);
1130 SERIALIZE_SCALAR(wbOut
);
1131 SERIALIZE_SCALAR(moreToWb
);
1132 SERIALIZE_SCALAR(wbAlignment
);
1134 typename
CacheType::size_type usedCacheSize
= usedCache
.size();
1135 SERIALIZE_SCALAR(usedCacheSize
);
1136 for (typename
CacheType::size_type x
= 0; x
< usedCacheSize
; x
++) {
1137 arrayParamOut(cp
, csprintf("usedCache_%d", x
),
1138 (uint8_t*)usedCache
[x
],sizeof(T
));
1141 typename
CacheType::size_type unusedCacheSize
= unusedCache
.size();
1142 SERIALIZE_SCALAR(unusedCacheSize
);
1143 for (typename
CacheType::size_type x
= 0; x
< unusedCacheSize
; x
++) {
1144 arrayParamOut(cp
, csprintf("unusedCache_%d", x
),
1145 (uint8_t*)unusedCache
[x
],sizeof(T
));
1148 Tick fetch_delay
= 0, wb_delay
= 0;
1149 if (fetchDelayEvent
.scheduled())
1150 fetch_delay
= fetchDelayEvent
.when();
1151 SERIALIZE_SCALAR(fetch_delay
);
1152 if (wbDelayEvent
.scheduled())
1153 wb_delay
= wbDelayEvent
.when();
1154 SERIALIZE_SCALAR(wb_delay
);
1161 IGbE::DescCache
<T
>::unserialize(CheckpointIn
&cp
)
1163 UNSERIALIZE_SCALAR(cachePnt
);
1164 UNSERIALIZE_SCALAR(curFetching
);
1165 UNSERIALIZE_SCALAR(wbOut
);
1166 UNSERIALIZE_SCALAR(moreToWb
);
1167 UNSERIALIZE_SCALAR(wbAlignment
);
1169 typename
CacheType::size_type usedCacheSize
;
1170 UNSERIALIZE_SCALAR(usedCacheSize
);
1172 for (typename
CacheType::size_type x
= 0; x
< usedCacheSize
; x
++) {
1174 arrayParamIn(cp
, csprintf("usedCache_%d", x
),
1175 (uint8_t*)temp
,sizeof(T
));
1176 usedCache
.push_back(temp
);
1179 typename
CacheType::size_type unusedCacheSize
;
1180 UNSERIALIZE_SCALAR(unusedCacheSize
);
1181 for (typename
CacheType::size_type x
= 0; x
< unusedCacheSize
; x
++) {
1183 arrayParamIn(cp
, csprintf("unusedCache_%d", x
),
1184 (uint8_t*)temp
,sizeof(T
));
1185 unusedCache
.push_back(temp
);
1187 Tick fetch_delay
= 0, wb_delay
= 0;
1188 UNSERIALIZE_SCALAR(fetch_delay
);
1189 UNSERIALIZE_SCALAR(wb_delay
);
1191 igbe
->schedule(fetchDelayEvent
, fetch_delay
);
1193 igbe
->schedule(wbDelayEvent
, wb_delay
);
1198 ///////////////////////////// IGbE::RxDescCache //////////////////////////////
1200 IGbE::RxDescCache::RxDescCache(IGbE
*i
, const std::string n
, int s
)
1201 : DescCache
<RxDesc
>(i
, n
, s
), pktDone(false), splitCount(0),
1202 pktEvent([this]{ pktComplete(); }, n
),
1203 pktHdrEvent([this]{ pktSplitDone(); }, n
),
1204 pktDataEvent([this]{ pktSplitDone(); }, n
)
1207 annSmFetch
= "RX Desc Fetch";
1208 annSmWb
= "RX Desc Writeback";
1209 annUnusedDescQ
= "RX Unused Descriptors";
1210 annUnusedCacheQ
= "RX Unused Descriptor Cache";
1211 annUsedCacheQ
= "RX Used Descriptor Cache";
1212 annUsedDescQ
= "RX Used Descriptors";
1213 annDescQ
= "RX Descriptors";
1217 IGbE::RxDescCache::pktSplitDone()
1220 DPRINTF(EthernetDesc
,
1221 "Part of split packet done: splitcount now %d\n", splitCount
);
1222 assert(splitCount
<= 2);
1223 if (splitCount
!= 2)
1226 DPRINTF(EthernetDesc
,
1227 "Part of split packet done: calling pktComplete()\n");
1232 IGbE::RxDescCache::writePacket(EthPacketPtr packet
, int pkt_offset
)
1234 assert(unusedCache
.size());
1235 //if (!unusedCache.size())
1240 unsigned buf_len
, hdr_len
;
1242 RxDesc
*desc
= unusedCache
.front();
1243 switch (igbe
->regs
.srrctl
.desctype()) {
1245 assert(pkt_offset
== 0);
1246 bytesCopied
= packet
->length
;
1247 DPRINTF(EthernetDesc
, "Packet Length: %d Desc Size: %d\n",
1248 packet
->length
, igbe
->regs
.rctl
.descSize());
1249 assert(packet
->length
< igbe
->regs
.rctl
.descSize());
1250 igbe
->dmaWrite(pciToDma(desc
->legacy
.buf
),
1251 packet
->length
, &pktEvent
, packet
->data
,
1252 igbe
->rxWriteDelay
);
1254 case RXDT_ADV_ONEBUF
:
1255 assert(pkt_offset
== 0);
1256 bytesCopied
= packet
->length
;
1257 buf_len
= igbe
->regs
.rctl
.lpe() ? igbe
->regs
.srrctl
.bufLen() :
1258 igbe
->regs
.rctl
.descSize();
1259 DPRINTF(EthernetDesc
, "Packet Length: %d srrctl: %#x Desc Size: %d\n",
1260 packet
->length
, igbe
->regs
.srrctl(), buf_len
);
1261 assert(packet
->length
< buf_len
);
1262 igbe
->dmaWrite(pciToDma(desc
->adv_read
.pkt
),
1263 packet
->length
, &pktEvent
, packet
->data
,
1264 igbe
->rxWriteDelay
);
1265 desc
->adv_wb
.header_len
= htole(0);
1266 desc
->adv_wb
.sph
= htole(0);
1267 desc
->adv_wb
.pkt_len
= htole((uint16_t)(pktPtr
->length
));
1269 case RXDT_ADV_SPLIT_A
:
1272 buf_len
= igbe
->regs
.rctl
.lpe() ? igbe
->regs
.srrctl
.bufLen() :
1273 igbe
->regs
.rctl
.descSize();
1274 hdr_len
= igbe
->regs
.rctl
.lpe() ? igbe
->regs
.srrctl
.hdrLen() : 0;
1275 DPRINTF(EthernetDesc
,
1276 "lpe: %d Packet Length: %d offset: %d srrctl: %#x "
1277 "hdr addr: %#x Hdr Size: %d desc addr: %#x Desc Size: %d\n",
1278 igbe
->regs
.rctl
.lpe(), packet
->length
, pkt_offset
,
1279 igbe
->regs
.srrctl(), desc
->adv_read
.hdr
, hdr_len
,
1280 desc
->adv_read
.pkt
, buf_len
);
1282 split_point
= hsplit(pktPtr
);
1284 if (packet
->length
<= hdr_len
) {
1285 bytesCopied
= packet
->length
;
1286 assert(pkt_offset
== 0);
1287 DPRINTF(EthernetDesc
, "Hdr split: Entire packet in header\n");
1288 igbe
->dmaWrite(pciToDma(desc
->adv_read
.hdr
),
1289 packet
->length
, &pktEvent
, packet
->data
,
1290 igbe
->rxWriteDelay
);
1291 desc
->adv_wb
.header_len
= htole((uint16_t)packet
->length
);
1292 desc
->adv_wb
.sph
= htole(0);
1293 desc
->adv_wb
.pkt_len
= htole(0);
1294 } else if (split_point
) {
1296 // we are only copying some data, header/data has already been
1299 std::min(packet
->length
- pkt_offset
, buf_len
);
1300 bytesCopied
+= max_to_copy
;
1301 DPRINTF(EthernetDesc
,
1302 "Hdr split: Continuing data buffer copy\n");
1303 igbe
->dmaWrite(pciToDma(desc
->adv_read
.pkt
),
1304 max_to_copy
, &pktEvent
,
1305 packet
->data
+ pkt_offset
, igbe
->rxWriteDelay
);
1306 desc
->adv_wb
.header_len
= htole(0);
1307 desc
->adv_wb
.pkt_len
= htole((uint16_t)max_to_copy
);
1308 desc
->adv_wb
.sph
= htole(0);
1311 std::min(packet
->length
- split_point
, buf_len
);
1312 bytesCopied
+= max_to_copy
+ split_point
;
1314 DPRINTF(EthernetDesc
, "Hdr split: splitting at %d\n",
1316 igbe
->dmaWrite(pciToDma(desc
->adv_read
.hdr
),
1317 split_point
, &pktHdrEvent
,
1318 packet
->data
, igbe
->rxWriteDelay
);
1319 igbe
->dmaWrite(pciToDma(desc
->adv_read
.pkt
),
1320 max_to_copy
, &pktDataEvent
,
1321 packet
->data
+ split_point
, igbe
->rxWriteDelay
);
1322 desc
->adv_wb
.header_len
= htole(split_point
);
1323 desc
->adv_wb
.sph
= 1;
1324 desc
->adv_wb
.pkt_len
= htole((uint16_t)(max_to_copy
));
1327 panic("Header split not fitting within header buffer or "
1328 "undecodable packet not fitting in header unsupported\n");
1332 panic("Unimplemnted RX receive buffer type: %d\n",
1333 igbe
->regs
.srrctl
.desctype());
1340 IGbE::RxDescCache::pktComplete()
1342 assert(unusedCache
.size());
1344 desc
= unusedCache
.front();
1346 igbe
->anBegin("RXS", "Update Desc");
1348 uint16_t crcfixup
= igbe
->regs
.rctl
.secrc() ? 0 : 4 ;
1349 DPRINTF(EthernetDesc
, "pktPtr->length: %d bytesCopied: %d "
1350 "stripcrc offset: %d value written: %d %d\n",
1351 pktPtr
->length
, bytesCopied
, crcfixup
,
1352 htole((uint16_t)(pktPtr
->length
+ crcfixup
)),
1353 (uint16_t)(pktPtr
->length
+ crcfixup
));
1355 // no support for anything but starting at 0
1356 assert(igbe
->regs
.rxcsum
.pcss() == 0);
1358 DPRINTF(EthernetDesc
, "Packet written to memory updating Descriptor\n");
1360 uint16_t status
= RXDS_DD
;
1362 uint16_t ext_err
= 0;
1367 assert(bytesCopied
<= pktPtr
->length
);
1368 if (bytesCopied
== pktPtr
->length
)
1376 DPRINTF(EthernetDesc
, "Proccesing Ip packet with Id=%d\n",
1384 if (ip
&& igbe
->regs
.rxcsum
.ipofld()) {
1385 DPRINTF(EthernetDesc
, "Checking IP checksum\n");
1386 status
|= RXDS_IPCS
;
1387 csum
= htole(cksum(ip
));
1388 igbe
->rxIpChecksums
++;
1389 if (cksum(ip
) != 0) {
1391 ext_err
|= RXDEE_IPE
;
1392 DPRINTF(EthernetDesc
, "Checksum is bad!!\n");
1395 TcpPtr tcp
= ip
? TcpPtr(ip
) : TcpPtr(ip6
);
1396 if (tcp
&& igbe
->regs
.rxcsum
.tuofld()) {
1397 DPRINTF(EthernetDesc
, "Checking TCP checksum\n");
1398 status
|= RXDS_TCPCS
;
1400 csum
= htole(cksum(tcp
));
1401 igbe
->rxTcpChecksums
++;
1402 if (cksum(tcp
) != 0) {
1403 DPRINTF(EthernetDesc
, "Checksum is bad!!\n");
1405 ext_err
|= RXDEE_TCPE
;
1409 UdpPtr udp
= ip
? UdpPtr(ip
) : UdpPtr(ip6
);
1410 if (udp
&& igbe
->regs
.rxcsum
.tuofld()) {
1411 DPRINTF(EthernetDesc
, "Checking UDP checksum\n");
1412 status
|= RXDS_UDPCS
;
1414 csum
= htole(cksum(udp
));
1415 igbe
->rxUdpChecksums
++;
1416 if (cksum(udp
) != 0) {
1417 DPRINTF(EthernetDesc
, "Checksum is bad!!\n");
1418 ext_err
|= RXDEE_TCPE
;
1423 DPRINTF(EthernetSM
, "Proccesing Non-Ip packet\n");
1426 switch (igbe
->regs
.srrctl
.desctype()) {
1428 desc
->legacy
.len
= htole((uint16_t)(pktPtr
->length
+ crcfixup
));
1429 desc
->legacy
.status
= htole(status
);
1430 desc
->legacy
.errors
= htole(err
);
1431 // No vlan support at this point... just set it to 0
1432 desc
->legacy
.vlan
= 0;
1434 case RXDT_ADV_SPLIT_A
:
1435 case RXDT_ADV_ONEBUF
:
1436 desc
->adv_wb
.rss_type
= htole(0);
1437 desc
->adv_wb
.pkt_type
= htole(ptype
);
1438 if (igbe
->regs
.rxcsum
.pcsd()) {
1439 // no rss support right now
1440 desc
->adv_wb
.rss_hash
= htole(0);
1442 desc
->adv_wb
.id
= htole(ip_id
);
1443 desc
->adv_wb
.csum
= htole(csum
);
1445 desc
->adv_wb
.status
= htole(status
);
1446 desc
->adv_wb
.errors
= htole(ext_err
);
1448 desc
->adv_wb
.vlan_tag
= htole(0);
1451 panic("Unimplemnted RX receive buffer type %d\n",
1452 igbe
->regs
.srrctl
.desctype());
1455 DPRINTF(EthernetDesc
, "Descriptor complete w0: %#x w1: %#x\n",
1456 desc
->adv_read
.pkt
, desc
->adv_read
.hdr
);
1458 if (bytesCopied
== pktPtr
->length
) {
1459 DPRINTF(EthernetDesc
,
1460 "Packet completely written to descriptor buffers\n");
1461 // Deal with the rx timer interrupts
1462 if (igbe
->regs
.rdtr
.delay()) {
1463 Tick delay
= igbe
->regs
.rdtr
.delay() * igbe
->intClock();
1464 DPRINTF(EthernetSM
, "RXS: Scheduling DTR for %d\n", delay
);
1465 igbe
->reschedule(igbe
->rdtrEvent
, curTick() + delay
);
1468 if (igbe
->regs
.radv
.idv()) {
1469 Tick delay
= igbe
->regs
.radv
.idv() * igbe
->intClock();
1470 DPRINTF(EthernetSM
, "RXS: Scheduling ADV for %d\n", delay
);
1471 if (!igbe
->radvEvent
.scheduled()) {
1472 igbe
->schedule(igbe
->radvEvent
, curTick() + delay
);
1476 // if neither radv or rdtr, maybe itr is set...
1477 if (!igbe
->regs
.rdtr
.delay() && !igbe
->regs
.radv
.idv()) {
1479 "RXS: Receive interrupt delay disabled, posting IT_RXT\n");
1480 igbe
->postInterrupt(IT_RXT
);
1483 // If the packet is small enough, interrupt appropriately
1484 // I wonder if this is delayed or not?!
1485 if (pktPtr
->length
<= igbe
->regs
.rsrpd
.idv()) {
1487 "RXS: Posting IT_SRPD beacuse small packet received\n");
1488 igbe
->postInterrupt(IT_SRPD
);
1498 igbe
->anBegin("RXS", "Done Updating Desc");
1499 DPRINTF(EthernetDesc
, "Processing of this descriptor complete\n");
1500 igbe
->anDq("RXS", annUnusedCacheQ
);
1501 unusedCache
.pop_front();
1502 igbe
->anQ("RXS", annUsedCacheQ
);
1503 usedCache
.push_back(desc
);
1507 IGbE::RxDescCache::enableSm()
1509 if (igbe
->drainState() != DrainState::Draining
) {
1510 igbe
->rxTick
= true;
1511 igbe
->restartClock();
1516 IGbE::RxDescCache::packetDone()
1526 IGbE::RxDescCache::hasOutstandingEvents()
1528 return pktEvent
.scheduled() || wbEvent
.scheduled() ||
1529 fetchEvent
.scheduled() || pktHdrEvent
.scheduled() ||
1530 pktDataEvent
.scheduled();
1535 IGbE::RxDescCache::serialize(CheckpointOut
&cp
) const
1537 DescCache
<RxDesc
>::serialize(cp
);
1538 SERIALIZE_SCALAR(pktDone
);
1539 SERIALIZE_SCALAR(splitCount
);
1540 SERIALIZE_SCALAR(bytesCopied
);
1544 IGbE::RxDescCache::unserialize(CheckpointIn
&cp
)
1546 DescCache
<RxDesc
>::unserialize(cp
);
1547 UNSERIALIZE_SCALAR(pktDone
);
1548 UNSERIALIZE_SCALAR(splitCount
);
1549 UNSERIALIZE_SCALAR(bytesCopied
);
1553 ///////////////////////////// IGbE::TxDescCache //////////////////////////////
1555 IGbE::TxDescCache::TxDescCache(IGbE
*i
, const std::string n
, int s
)
1556 : DescCache
<TxDesc
>(i
,n
, s
), pktDone(false), isTcp(false),
1557 pktWaiting(false), pktMultiDesc(false),
1558 completionAddress(0), completionEnabled(false),
1559 useTso(false), tsoHeaderLen(0), tsoMss(0), tsoTotalLen(0), tsoUsedLen(0),
1560 tsoPrevSeq(0), tsoPktPayloadBytes(0), tsoLoadedHeader(false),
1561 tsoPktHasHeader(false), tsoDescBytesUsed(0), tsoCopyBytes(0), tsoPkts(0),
1562 pktEvent([this]{ pktComplete(); }, n
),
1563 headerEvent([this]{ headerComplete(); }, n
),
1564 nullEvent([this]{ nullCallback(); }, n
)
1566 annSmFetch
= "TX Desc Fetch";
1567 annSmWb
= "TX Desc Writeback";
1568 annUnusedDescQ
= "TX Unused Descriptors";
1569 annUnusedCacheQ
= "TX Unused Descriptor Cache";
1570 annUsedCacheQ
= "TX Used Descriptor Cache";
1571 annUsedDescQ
= "TX Used Descriptors";
1572 annDescQ
= "TX Descriptors";
1576 IGbE::TxDescCache::processContextDesc()
1578 assert(unusedCache
.size());
1581 DPRINTF(EthernetDesc
, "Checking and processing context descriptors\n");
1583 while (!useTso
&& unusedCache
.size() &&
1584 TxdOp::isContext(unusedCache
.front())) {
1585 DPRINTF(EthernetDesc
, "Got context descriptor type...\n");
1587 desc
= unusedCache
.front();
1588 DPRINTF(EthernetDesc
, "Descriptor upper: %#x lower: %#X\n",
1589 desc
->d1
, desc
->d2
);
1592 // is this going to be a tcp or udp packet?
1593 isTcp
= TxdOp::tcp(desc
) ? true : false;
1595 // setup all the TSO variables, they'll be ignored if we don't use
1596 // tso for this connection
1597 tsoHeaderLen
= TxdOp::hdrlen(desc
);
1598 tsoMss
= TxdOp::mss(desc
);
1600 if (TxdOp::isType(desc
, TxdOp::TXD_CNXT
) && TxdOp::tse(desc
)) {
1601 DPRINTF(EthernetDesc
, "TCP offload enabled for packet hdrlen: "
1602 "%d mss: %d paylen %d\n", TxdOp::hdrlen(desc
),
1603 TxdOp::mss(desc
), TxdOp::getLen(desc
));
1605 tsoTotalLen
= TxdOp::getLen(desc
);
1606 tsoLoadedHeader
= false;
1607 tsoDescBytesUsed
= 0;
1610 tsoPktHasHeader
= false;
1616 unusedCache
.pop_front();
1617 igbe
->anDq("TXS", annUnusedCacheQ
);
1618 usedCache
.push_back(desc
);
1619 igbe
->anQ("TXS", annUsedCacheQ
);
1622 if (!unusedCache
.size())
1625 desc
= unusedCache
.front();
1626 if (!useTso
&& TxdOp::isType(desc
, TxdOp::TXD_ADVDATA
) &&
1628 DPRINTF(EthernetDesc
, "TCP offload(adv) enabled for packet "
1629 "hdrlen: %d mss: %d paylen %d\n",
1630 tsoHeaderLen
, tsoMss
, TxdOp::getTsoLen(desc
));
1632 tsoTotalLen
= TxdOp::getTsoLen(desc
);
1633 tsoLoadedHeader
= false;
1634 tsoDescBytesUsed
= 0;
1637 tsoPktHasHeader
= false;
1641 if (useTso
&& !tsoLoadedHeader
) {
1642 // we need to fetch a header
1643 DPRINTF(EthernetDesc
, "Starting DMA of TSO header\n");
1644 assert(TxdOp::isData(desc
) && TxdOp::getLen(desc
) >= tsoHeaderLen
);
1646 assert(tsoHeaderLen
<= 256);
1647 igbe
->dmaRead(pciToDma(TxdOp::getBuf(desc
)),
1648 tsoHeaderLen
, &headerEvent
, tsoHeader
, 0);
1653 IGbE::TxDescCache::headerComplete()
1655 DPRINTF(EthernetDesc
, "TSO: Fetching TSO header complete\n");
1658 assert(unusedCache
.size());
1659 TxDesc
*desc
= unusedCache
.front();
1660 DPRINTF(EthernetDesc
, "TSO: len: %d tsoHeaderLen: %d\n",
1661 TxdOp::getLen(desc
), tsoHeaderLen
);
1663 if (TxdOp::getLen(desc
) == tsoHeaderLen
) {
1664 tsoDescBytesUsed
= 0;
1665 tsoLoadedHeader
= true;
1666 unusedCache
.pop_front();
1667 usedCache
.push_back(desc
);
1669 DPRINTF(EthernetDesc
, "TSO: header part of larger payload\n");
1670 tsoDescBytesUsed
= tsoHeaderLen
;
1671 tsoLoadedHeader
= true;
1678 IGbE::TxDescCache::getPacketSize(EthPacketPtr p
)
1680 if (!unusedCache
.size())
1683 DPRINTF(EthernetDesc
, "Starting processing of descriptor\n");
1685 assert(!useTso
|| tsoLoadedHeader
);
1686 TxDesc
*desc
= unusedCache
.front();
1689 DPRINTF(EthernetDesc
, "getPacket(): TxDescriptor data "
1690 "d1: %#llx d2: %#llx\n", desc
->d1
, desc
->d2
);
1691 DPRINTF(EthernetDesc
, "TSO: use: %d hdrlen: %d mss: %d total: %d "
1692 "used: %d loaded hdr: %d\n", useTso
, tsoHeaderLen
, tsoMss
,
1693 tsoTotalLen
, tsoUsedLen
, tsoLoadedHeader
);
1695 if (tsoPktHasHeader
)
1696 tsoCopyBytes
= std::min((tsoMss
+ tsoHeaderLen
) - p
->length
,
1697 TxdOp::getLen(desc
) - tsoDescBytesUsed
);
1699 tsoCopyBytes
= std::min(tsoMss
,
1700 TxdOp::getLen(desc
) - tsoDescBytesUsed
);
1702 tsoCopyBytes
+ (tsoPktHasHeader
? 0 : tsoHeaderLen
);
1704 DPRINTF(EthernetDesc
, "TSO: descBytesUsed: %d copyBytes: %d "
1705 "this descLen: %d\n",
1706 tsoDescBytesUsed
, tsoCopyBytes
, TxdOp::getLen(desc
));
1707 DPRINTF(EthernetDesc
, "TSO: pktHasHeader: %d\n", tsoPktHasHeader
);
1708 DPRINTF(EthernetDesc
, "TSO: Next packet is %d bytes\n", pkt_size
);
1712 DPRINTF(EthernetDesc
, "Next TX packet is %d bytes\n",
1713 TxdOp::getLen(unusedCache
.front()));
1714 return TxdOp::getLen(desc
);
1718 IGbE::TxDescCache::getPacketData(EthPacketPtr p
)
1720 assert(unusedCache
.size());
1723 desc
= unusedCache
.front();
1725 DPRINTF(EthernetDesc
, "getPacketData(): TxDescriptor data "
1726 "d1: %#llx d2: %#llx\n", desc
->d1
, desc
->d2
);
1727 assert((TxdOp::isLegacy(desc
) || TxdOp::isData(desc
)) &&
1728 TxdOp::getLen(desc
));
1734 DPRINTF(EthernetDesc
, "Starting DMA of packet at offset %d\n", p
->length
);
1737 assert(tsoLoadedHeader
);
1738 if (!tsoPktHasHeader
) {
1739 DPRINTF(EthernetDesc
,
1740 "Loading TSO header (%d bytes) into start of packet\n",
1742 memcpy(p
->data
, &tsoHeader
,tsoHeaderLen
);
1743 p
->length
+=tsoHeaderLen
;
1744 tsoPktHasHeader
= true;
1749 DPRINTF(EthernetDesc
,
1750 "Starting DMA of packet at offset %d length: %d\n",
1751 p
->length
, tsoCopyBytes
);
1752 igbe
->dmaRead(pciToDma(TxdOp::getBuf(desc
))
1754 tsoCopyBytes
, &pktEvent
, p
->data
+ p
->length
,
1756 tsoDescBytesUsed
+= tsoCopyBytes
;
1757 assert(tsoDescBytesUsed
<= TxdOp::getLen(desc
));
1759 igbe
->dmaRead(pciToDma(TxdOp::getBuf(desc
)),
1760 TxdOp::getLen(desc
), &pktEvent
, p
->data
+ p
->length
,
1766 IGbE::TxDescCache::pktComplete()
1770 assert(unusedCache
.size());
1773 igbe
->anBegin("TXS", "Update Desc");
1775 DPRINTF(EthernetDesc
, "DMA of packet complete\n");
1778 desc
= unusedCache
.front();
1779 assert((TxdOp::isLegacy(desc
) || TxdOp::isData(desc
)) &&
1780 TxdOp::getLen(desc
));
1782 DPRINTF(EthernetDesc
, "TxDescriptor data d1: %#llx d2: %#llx\n",
1783 desc
->d1
, desc
->d2
);
1785 // Set the length of the data in the EtherPacket
1787 DPRINTF(EthernetDesc
, "TSO: use: %d hdrlen: %d mss: %d total: %d "
1788 "used: %d loaded hdr: %d\n", useTso
, tsoHeaderLen
, tsoMss
,
1789 tsoTotalLen
, tsoUsedLen
, tsoLoadedHeader
);
1790 pktPtr
->simLength
+= tsoCopyBytes
;
1791 pktPtr
->length
+= tsoCopyBytes
;
1792 tsoUsedLen
+= tsoCopyBytes
;
1793 DPRINTF(EthernetDesc
, "TSO: descBytesUsed: %d copyBytes: %d\n",
1794 tsoDescBytesUsed
, tsoCopyBytes
);
1796 pktPtr
->simLength
+= TxdOp::getLen(desc
);
1797 pktPtr
->length
+= TxdOp::getLen(desc
);
1802 if ((!TxdOp::eop(desc
) && !useTso
) ||
1803 (pktPtr
->length
< ( tsoMss
+ tsoHeaderLen
) &&
1804 tsoTotalLen
!= tsoUsedLen
&& useTso
)) {
1805 assert(!useTso
|| (tsoDescBytesUsed
== TxdOp::getLen(desc
)));
1806 igbe
->anDq("TXS", annUnusedCacheQ
);
1807 unusedCache
.pop_front();
1808 igbe
->anQ("TXS", annUsedCacheQ
);
1809 usedCache
.push_back(desc
);
1811 tsoDescBytesUsed
= 0;
1814 pktMultiDesc
= true;
1816 DPRINTF(EthernetDesc
, "Partial Packet Descriptor of %d bytes Done\n",
1826 pktMultiDesc
= false;
1827 // no support for vlans
1828 assert(!TxdOp::vle(desc
));
1830 // we only support single packet descriptors at this point
1832 assert(TxdOp::eop(desc
));
1834 // set that this packet is done
1835 if (TxdOp::rs(desc
))
1838 DPRINTF(EthernetDesc
, "TxDescriptor data d1: %#llx d2: %#llx\n",
1839 desc
->d1
, desc
->d2
);
1845 DPRINTF(EthernetDesc
, "TSO: Modifying IP header. Id + %d\n",
1847 ip
->id(ip
->id() + tsoPkts
++);
1848 ip
->len(pktPtr
->length
- EthPtr(pktPtr
)->size());
1851 ip6
->plen(pktPtr
->length
- EthPtr(pktPtr
)->size());
1852 TcpPtr tcp
= ip
? TcpPtr(ip
) : TcpPtr(ip6
);
1854 DPRINTF(EthernetDesc
,
1855 "TSO: Modifying TCP header. old seq %d + %d\n",
1856 tcp
->seq(), tsoPrevSeq
);
1857 tcp
->seq(tcp
->seq() + tsoPrevSeq
);
1858 if (tsoUsedLen
!= tsoTotalLen
)
1859 tcp
->flags(tcp
->flags() & ~9); // clear fin & psh
1861 UdpPtr udp
= ip
? UdpPtr(ip
) : UdpPtr(ip6
);
1863 DPRINTF(EthernetDesc
, "TSO: Modifying UDP header.\n");
1864 udp
->len(pktPtr
->length
- EthPtr(pktPtr
)->size());
1866 tsoPrevSeq
= tsoUsedLen
;
1869 if (DTRACE(EthernetDesc
)) {
1872 DPRINTF(EthernetDesc
, "Proccesing Ip packet with Id=%d\n",
1875 DPRINTF(EthernetSM
, "Proccesing Non-Ip packet\n");
1878 // Checksums are only ofloaded for new descriptor types
1879 if (TxdOp::isData(desc
) && (TxdOp::ixsm(desc
) || TxdOp::txsm(desc
))) {
1880 DPRINTF(EthernetDesc
, "Calculating checksums for packet\n");
1884 if (ip
&& TxdOp::ixsm(desc
)) {
1887 igbe
->txIpChecksums
++;
1888 DPRINTF(EthernetDesc
, "Calculated IP checksum\n");
1890 if (TxdOp::txsm(desc
)) {
1891 TcpPtr tcp
= ip
? TcpPtr(ip
) : TcpPtr(ip6
);
1892 UdpPtr udp
= ip
? UdpPtr(ip
) : UdpPtr(ip6
);
1895 tcp
->sum(cksum(tcp
));
1896 igbe
->txTcpChecksums
++;
1897 DPRINTF(EthernetDesc
, "Calculated TCP checksum\n");
1901 udp
->sum(cksum(udp
));
1902 igbe
->txUdpChecksums
++;
1903 DPRINTF(EthernetDesc
, "Calculated UDP checksum\n");
1905 panic("Told to checksum, but don't know how\n");
1910 if (TxdOp::ide(desc
)) {
1911 // Deal with the rx timer interrupts
1912 DPRINTF(EthernetDesc
, "Descriptor had IDE set\n");
1913 if (igbe
->regs
.tidv
.idv()) {
1914 Tick delay
= igbe
->regs
.tidv
.idv() * igbe
->intClock();
1915 DPRINTF(EthernetDesc
, "setting tidv\n");
1916 igbe
->reschedule(igbe
->tidvEvent
, curTick() + delay
, true);
1919 if (igbe
->regs
.tadv
.idv() && igbe
->regs
.tidv
.idv()) {
1920 Tick delay
= igbe
->regs
.tadv
.idv() * igbe
->intClock();
1921 DPRINTF(EthernetDesc
, "setting tadv\n");
1922 if (!igbe
->tadvEvent
.scheduled()) {
1923 igbe
->schedule(igbe
->tadvEvent
, curTick() + delay
);
1929 if (!useTso
|| TxdOp::getLen(desc
) == tsoDescBytesUsed
) {
1930 DPRINTF(EthernetDesc
, "Descriptor Done\n");
1931 igbe
->anDq("TXS", annUnusedCacheQ
);
1932 unusedCache
.pop_front();
1933 igbe
->anQ("TXS", annUsedCacheQ
);
1934 usedCache
.push_back(desc
);
1935 tsoDescBytesUsed
= 0;
1938 if (useTso
&& tsoUsedLen
== tsoTotalLen
)
1942 DPRINTF(EthernetDesc
,
1943 "------Packet of %d bytes ready for transmission-------\n",
1948 tsoPktHasHeader
= false;
1950 if (igbe
->regs
.txdctl
.wthresh() == 0) {
1951 igbe
->anBegin("TXS", "Desc Writeback");
1952 DPRINTF(EthernetDesc
, "WTHRESH == 0, writing back descriptor\n");
1954 } else if (!igbe
->regs
.txdctl
.gran() && igbe
->regs
.txdctl
.wthresh() <=
1955 descInBlock(usedCache
.size())) {
1956 DPRINTF(EthernetDesc
, "used > WTHRESH, writing back descriptor\n");
1957 igbe
->anBegin("TXS", "Desc Writeback");
1958 writeback((igbe
->cacheBlockSize()-1)>>4);
1959 } else if (igbe
->regs
.txdctl
.wthresh() <= usedCache
.size()) {
1960 DPRINTF(EthernetDesc
, "used > WTHRESH, writing back descriptor\n");
1961 igbe
->anBegin("TXS", "Desc Writeback");
1962 writeback((igbe
->cacheBlockSize()-1)>>4);
1970 IGbE::TxDescCache::actionAfterWb()
1972 DPRINTF(EthernetDesc
, "actionAfterWb() completionEnabled: %d\n",
1974 igbe
->postInterrupt(iGbReg::IT_TXDW
);
1975 if (completionEnabled
) {
1976 descEnd
= igbe
->regs
.tdh();
1977 DPRINTF(EthernetDesc
,
1978 "Completion writing back value: %d to addr: %#x\n", descEnd
,
1980 igbe
->dmaWrite(pciToDma(mbits(completionAddress
, 63, 2)),
1981 sizeof(descEnd
), &nullEvent
, (uint8_t*)&descEnd
, 0);
1986 IGbE::TxDescCache::serialize(CheckpointOut
&cp
) const
1988 DescCache
<TxDesc
>::serialize(cp
);
1990 SERIALIZE_SCALAR(pktDone
);
1991 SERIALIZE_SCALAR(isTcp
);
1992 SERIALIZE_SCALAR(pktWaiting
);
1993 SERIALIZE_SCALAR(pktMultiDesc
);
1995 SERIALIZE_SCALAR(useTso
);
1996 SERIALIZE_SCALAR(tsoHeaderLen
);
1997 SERIALIZE_SCALAR(tsoMss
);
1998 SERIALIZE_SCALAR(tsoTotalLen
);
1999 SERIALIZE_SCALAR(tsoUsedLen
);
2000 SERIALIZE_SCALAR(tsoPrevSeq
);;
2001 SERIALIZE_SCALAR(tsoPktPayloadBytes
);
2002 SERIALIZE_SCALAR(tsoLoadedHeader
);
2003 SERIALIZE_SCALAR(tsoPktHasHeader
);
2004 SERIALIZE_ARRAY(tsoHeader
, 256);
2005 SERIALIZE_SCALAR(tsoDescBytesUsed
);
2006 SERIALIZE_SCALAR(tsoCopyBytes
);
2007 SERIALIZE_SCALAR(tsoPkts
);
2009 SERIALIZE_SCALAR(completionAddress
);
2010 SERIALIZE_SCALAR(completionEnabled
);
2011 SERIALIZE_SCALAR(descEnd
);
2015 IGbE::TxDescCache::unserialize(CheckpointIn
&cp
)
2017 DescCache
<TxDesc
>::unserialize(cp
);
2019 UNSERIALIZE_SCALAR(pktDone
);
2020 UNSERIALIZE_SCALAR(isTcp
);
2021 UNSERIALIZE_SCALAR(pktWaiting
);
2022 UNSERIALIZE_SCALAR(pktMultiDesc
);
2024 UNSERIALIZE_SCALAR(useTso
);
2025 UNSERIALIZE_SCALAR(tsoHeaderLen
);
2026 UNSERIALIZE_SCALAR(tsoMss
);
2027 UNSERIALIZE_SCALAR(tsoTotalLen
);
2028 UNSERIALIZE_SCALAR(tsoUsedLen
);
2029 UNSERIALIZE_SCALAR(tsoPrevSeq
);;
2030 UNSERIALIZE_SCALAR(tsoPktPayloadBytes
);
2031 UNSERIALIZE_SCALAR(tsoLoadedHeader
);
2032 UNSERIALIZE_SCALAR(tsoPktHasHeader
);
2033 UNSERIALIZE_ARRAY(tsoHeader
, 256);
2034 UNSERIALIZE_SCALAR(tsoDescBytesUsed
);
2035 UNSERIALIZE_SCALAR(tsoCopyBytes
);
2036 UNSERIALIZE_SCALAR(tsoPkts
);
2038 UNSERIALIZE_SCALAR(completionAddress
);
2039 UNSERIALIZE_SCALAR(completionEnabled
);
2040 UNSERIALIZE_SCALAR(descEnd
);
2044 IGbE::TxDescCache::packetAvailable()
2054 IGbE::TxDescCache::enableSm()
2056 if (igbe
->drainState() != DrainState::Draining
) {
2057 igbe
->txTick
= true;
2058 igbe
->restartClock();
2063 IGbE::TxDescCache::hasOutstandingEvents()
2065 return pktEvent
.scheduled() || wbEvent
.scheduled() ||
2066 fetchEvent
.scheduled();
2070 ///////////////////////////////////// IGbE /////////////////////////////////
2073 IGbE::restartClock()
2075 if (!tickEvent
.scheduled() && (rxTick
|| txTick
|| txFifoTick
) &&
2076 drainState() == DrainState::Running
)
2077 schedule(tickEvent
, clockEdge(Cycles(1)));
2083 unsigned int count(0);
2084 if (rxDescCache
.hasOutstandingEvents() ||
2085 txDescCache
.hasOutstandingEvents()) {
2093 if (tickEvent
.scheduled())
2094 deschedule(tickEvent
);
2097 DPRINTF(Drain
, "IGbE not drained\n");
2098 return DrainState::Draining
;
2100 return DrainState::Drained
;
2106 Drainable::drainResume();
2113 DPRINTF(EthernetSM
, "resuming from drain");
2119 if (drainState() != DrainState::Draining
)
2125 if (!rxDescCache
.hasOutstandingEvents() &&
2126 !txDescCache
.hasOutstandingEvents()) {
2127 DPRINTF(Drain
, "IGbE done draining, processing drain event\n");
2133 IGbE::txStateMachine()
2135 if (!regs
.tctl
.en()) {
2137 DPRINTF(EthernetSM
, "TXS: TX disabled, stopping ticking\n");
2141 // If we have a packet available and it's length is not 0 (meaning it's not
2142 // a multidescriptor packet) put it in the fifo, otherwise an the next
2143 // iteration we'll get the rest of the data
2144 if (txPacket
&& txDescCache
.packetAvailable()
2145 && !txDescCache
.packetMultiDesc() && txPacket
->length
) {
2146 anQ("TXS", "TX FIFO Q");
2147 DPRINTF(EthernetSM
, "TXS: packet placed in TX FIFO\n");
2151 txFifo
.push(txPacket
);
2152 txFifoTick
= true && drainState() != DrainState::Draining
;
2155 anBegin("TXS", "Desc Writeback");
2156 txDescCache
.writeback((cacheBlockSize()-1)>>4);
2160 // Only support descriptor granularity
2161 if (regs
.txdctl
.lwthresh() &&
2162 txDescCache
.descLeft() < (regs
.txdctl
.lwthresh() * 8)) {
2163 DPRINTF(EthernetSM
, "TXS: LWTHRESH caused posting of TXDLOW\n");
2164 postInterrupt(IT_TXDLOW
);
2168 txPacket
= std::make_shared
<EthPacketData
>(16384);
2171 if (!txDescCache
.packetWaiting()) {
2172 if (txDescCache
.descLeft() == 0) {
2173 postInterrupt(IT_TXQE
);
2174 anBegin("TXS", "Desc Writeback");
2175 txDescCache
.writeback(0);
2176 anBegin("TXS", "Desc Fetch");
2177 anWe("TXS", txDescCache
.annUnusedCacheQ
);
2178 txDescCache
.fetchDescriptors();
2179 DPRINTF(EthernetSM
, "TXS: No descriptors left in ring, forcing "
2180 "writeback stopping ticking and posting TXQE\n");
2186 if (!(txDescCache
.descUnused())) {
2187 anBegin("TXS", "Desc Fetch");
2188 txDescCache
.fetchDescriptors();
2189 anWe("TXS", txDescCache
.annUnusedCacheQ
);
2190 DPRINTF(EthernetSM
, "TXS: No descriptors available in cache, "
2191 "fetching and stopping ticking\n");
2195 anPq("TXS", txDescCache
.annUnusedCacheQ
);
2198 txDescCache
.processContextDesc();
2199 if (txDescCache
.packetWaiting()) {
2201 "TXS: Fetching TSO header, stopping ticking\n");
2206 unsigned size
= txDescCache
.getPacketSize(txPacket
);
2207 if (size
> 0 && txFifo
.avail() > size
) {
2208 anRq("TXS", "TX FIFO Q");
2209 anBegin("TXS", "DMA Packet");
2210 DPRINTF(EthernetSM
, "TXS: Reserving %d bytes in FIFO and "
2211 "beginning DMA of next packet\n", size
);
2212 txFifo
.reserve(size
);
2213 txDescCache
.getPacketData(txPacket
);
2214 } else if (size
== 0) {
2215 DPRINTF(EthernetSM
, "TXS: getPacketSize returned: %d\n", size
);
2217 "TXS: No packets to get, writing back used descriptors\n");
2218 anBegin("TXS", "Desc Writeback");
2219 txDescCache
.writeback(0);
2221 anWf("TXS", "TX FIFO Q");
2222 DPRINTF(EthernetSM
, "TXS: FIFO full, stopping ticking until space "
2223 "available in FIFO\n");
2230 DPRINTF(EthernetSM
, "TXS: Nothing to do, stopping ticking\n");
2235 IGbE::ethRxPkt(EthPacketPtr pkt
)
2237 rxBytes
+= pkt
->length
;
2240 DPRINTF(Ethernet
, "RxFIFO: Receiving pcakte from wire\n");
2241 anBegin("RXQ", "Wire Recv");
2244 if (!regs
.rctl
.en()) {
2245 DPRINTF(Ethernet
, "RxFIFO: RX not enabled, dropping\n");
2246 anBegin("RXQ", "FIFO Drop", CPA::FL_BAD
);
2250 // restart the state machines if they are stopped
2251 rxTick
= true && drainState() != DrainState::Draining
;
2252 if ((rxTick
|| txTick
) && !tickEvent
.scheduled()) {
2254 "RXS: received packet into fifo, starting ticking\n");
2258 if (!rxFifo
.push(pkt
)) {
2259 DPRINTF(Ethernet
, "RxFIFO: Packet won't fit in fifo... dropped\n");
2260 postInterrupt(IT_RXO
, true);
2261 anBegin("RXQ", "FIFO Drop", CPA::FL_BAD
);
2265 if (CPA::available() && cpa
->enabled()) {
2266 assert(sys
->numSystemsRunning
<= 2);
2268 if (sys
->systemList
[0] == sys
)
2269 other_sys
= sys
->systemList
[1];
2271 other_sys
= sys
->systemList
[0];
2273 cpa
->hwDq(CPA::FL_NONE
, sys
, macAddr
, "RXQ", "WireQ", 0, other_sys
);
2274 anQ("RXQ", "RX FIFO Q");
2275 cpa
->hwWe(CPA::FL_NONE
, sys
, macAddr
, "RXQ", "WireQ", 0, other_sys
);
2283 IGbE::rxStateMachine()
2285 if (!regs
.rctl
.en()) {
2287 DPRINTF(EthernetSM
, "RXS: RX disabled, stopping ticking\n");
2291 // If the packet is done check for interrupts/descriptors/etc
2292 if (rxDescCache
.packetDone()) {
2293 rxDmaPacket
= false;
2294 DPRINTF(EthernetSM
, "RXS: Packet completed DMA to memory\n");
2295 int descLeft
= rxDescCache
.descLeft();
2296 DPRINTF(EthernetSM
, "RXS: descLeft: %d rdmts: %d rdlen: %d\n",
2297 descLeft
, regs
.rctl
.rdmts(), regs
.rdlen());
2299 // rdmts 2->1/8, 1->1/4, 0->1/2
2300 int ratio
= (1ULL << (regs
.rctl
.rdmts() + 1));
2301 if (descLeft
* ratio
<= regs
.rdlen()) {
2302 DPRINTF(Ethernet
, "RXS: Interrupting (RXDMT) "
2303 "because of descriptors left\n");
2304 postInterrupt(IT_RXDMT
);
2308 rxDescCache
.writeback(0);
2310 if (descLeft
== 0) {
2311 anBegin("RXS", "Writeback Descriptors");
2312 rxDescCache
.writeback(0);
2313 DPRINTF(EthernetSM
, "RXS: No descriptors left in ring, forcing"
2314 " writeback and stopping ticking\n");
2318 // only support descriptor granulaties
2319 assert(regs
.rxdctl
.gran());
2321 if (regs
.rxdctl
.wthresh() >= rxDescCache
.descUsed()) {
2323 "RXS: Writing back because WTHRESH >= descUsed\n");
2324 anBegin("RXS", "Writeback Descriptors");
2325 if (regs
.rxdctl
.wthresh() < (cacheBlockSize()>>4))
2326 rxDescCache
.writeback(regs
.rxdctl
.wthresh()-1);
2328 rxDescCache
.writeback((cacheBlockSize()-1)>>4);
2331 if ((rxDescCache
.descUnused() < regs
.rxdctl
.pthresh()) &&
2332 ((rxDescCache
.descLeft() - rxDescCache
.descUnused()) >
2333 regs
.rxdctl
.hthresh())) {
2334 DPRINTF(EthernetSM
, "RXS: Fetching descriptors because "
2335 "descUnused < PTHRESH\n");
2336 anBegin("RXS", "Fetch Descriptors");
2337 rxDescCache
.fetchDescriptors();
2340 if (rxDescCache
.descUnused() == 0) {
2341 anBegin("RXS", "Fetch Descriptors");
2342 rxDescCache
.fetchDescriptors();
2343 anWe("RXS", rxDescCache
.annUnusedCacheQ
);
2344 DPRINTF(EthernetSM
, "RXS: No descriptors available in cache, "
2345 "fetching descriptors and stopping ticking\n");
2353 "RXS: stopping ticking until packet DMA completes\n");
2358 if (!rxDescCache
.descUnused()) {
2359 anBegin("RXS", "Fetch Descriptors");
2360 rxDescCache
.fetchDescriptors();
2361 anWe("RXS", rxDescCache
.annUnusedCacheQ
);
2362 DPRINTF(EthernetSM
, "RXS: No descriptors available in cache, "
2363 "stopping ticking\n");
2365 DPRINTF(EthernetSM
, "RXS: No descriptors available, fetching\n");
2368 anPq("RXS", rxDescCache
.annUnusedCacheQ
);
2370 if (rxFifo
.empty()) {
2371 anWe("RXS", "RX FIFO Q");
2372 DPRINTF(EthernetSM
, "RXS: RxFIFO empty, stopping ticking\n");
2376 anPq("RXS", "RX FIFO Q");
2377 anBegin("RXS", "Get Desc");
2380 pkt
= rxFifo
.front();
2383 pktOffset
= rxDescCache
.writePacket(pkt
, pktOffset
);
2384 DPRINTF(EthernetSM
, "RXS: Writing packet into memory\n");
2385 if (pktOffset
== pkt
->length
) {
2386 anBegin( "RXS", "FIFO Dequeue");
2387 DPRINTF(EthernetSM
, "RXS: Removing packet from FIFO\n");
2389 anDq("RXS", "RX FIFO Q");
2393 DPRINTF(EthernetSM
, "RXS: stopping ticking until packet DMA completes\n");
2396 anBegin("RXS", "DMA Packet");
2404 if (txFifo
.empty()) {
2405 anWe("TXQ", "TX FIFO Q");
2410 anPq("TXQ", "TX FIFO Q");
2411 if (etherInt
->sendPacket(txFifo
.front())) {
2412 anQ("TXQ", "WireQ");
2413 if (DTRACE(EthernetSM
)) {
2414 IpPtr
ip(txFifo
.front());
2416 DPRINTF(EthernetSM
, "Transmitting Ip packet with Id=%d\n",
2419 DPRINTF(EthernetSM
, "Transmitting Non-Ip packet\n");
2421 anDq("TXQ", "TX FIFO Q");
2422 anBegin("TXQ", "Wire Send");
2424 "TxFIFO: Successful transmit, bytes available in fifo: %d\n",
2427 txBytes
+= txFifo
.front()->length
;
2437 DPRINTF(EthernetSM
, "IGbE: -------------- Cycle --------------\n");
2447 // If txWire returns and txFifoTick is still set, that means the data we
2448 // sent to the other end was already accepted and we can send another
2449 // frame right away. This is consistent with the previous behavior which
2450 // would send another frame if one was ready in ethTxDone. This version
2451 // avoids growing the stack with each frame sent which can cause stack
2456 if (rxTick
|| txTick
|| txFifoTick
)
2457 schedule(tickEvent
, curTick() + clockPeriod());
2465 anBegin("TXQ", "Send Done");
2466 // restart the tx state machines if they are stopped
2467 // fifo to send another packet
2468 // tx sm to put more data into the fifo
2469 txFifoTick
= true && drainState() != DrainState::Draining
;
2470 if (txDescCache
.descLeft() != 0 && drainState() != DrainState::Draining
)
2475 DPRINTF(EthernetSM
, "TxFIFO: Transmission complete\n");
2479 IGbE::serialize(CheckpointOut
&cp
) const
2481 PciDevice::serialize(cp
);
2484 SERIALIZE_SCALAR(eeOpBits
);
2485 SERIALIZE_SCALAR(eeAddrBits
);
2486 SERIALIZE_SCALAR(eeDataBits
);
2487 SERIALIZE_SCALAR(eeOpcode
);
2488 SERIALIZE_SCALAR(eeAddr
);
2489 SERIALIZE_SCALAR(lastInterrupt
);
2490 SERIALIZE_ARRAY(flash
,iGbReg::EEPROM_SIZE
);
2492 rxFifo
.serialize("rxfifo", cp
);
2493 txFifo
.serialize("txfifo", cp
);
2495 bool txPktExists
= txPacket
!= nullptr;
2496 SERIALIZE_SCALAR(txPktExists
);
2498 txPacket
->serialize("txpacket", cp
);
2500 Tick rdtr_time
= 0, radv_time
= 0, tidv_time
= 0, tadv_time
= 0,
2503 if (rdtrEvent
.scheduled())
2504 rdtr_time
= rdtrEvent
.when();
2505 SERIALIZE_SCALAR(rdtr_time
);
2507 if (radvEvent
.scheduled())
2508 radv_time
= radvEvent
.when();
2509 SERIALIZE_SCALAR(radv_time
);
2511 if (tidvEvent
.scheduled())
2512 tidv_time
= tidvEvent
.when();
2513 SERIALIZE_SCALAR(tidv_time
);
2515 if (tadvEvent
.scheduled())
2516 tadv_time
= tadvEvent
.when();
2517 SERIALIZE_SCALAR(tadv_time
);
2519 if (interEvent
.scheduled())
2520 inter_time
= interEvent
.when();
2521 SERIALIZE_SCALAR(inter_time
);
2523 SERIALIZE_SCALAR(pktOffset
);
2525 txDescCache
.serializeSection(cp
, "TxDescCache");
2526 rxDescCache
.serializeSection(cp
, "RxDescCache");
2530 IGbE::unserialize(CheckpointIn
&cp
)
2532 PciDevice::unserialize(cp
);
2534 regs
.unserialize(cp
);
2535 UNSERIALIZE_SCALAR(eeOpBits
);
2536 UNSERIALIZE_SCALAR(eeAddrBits
);
2537 UNSERIALIZE_SCALAR(eeDataBits
);
2538 UNSERIALIZE_SCALAR(eeOpcode
);
2539 UNSERIALIZE_SCALAR(eeAddr
);
2540 UNSERIALIZE_SCALAR(lastInterrupt
);
2541 UNSERIALIZE_ARRAY(flash
,iGbReg::EEPROM_SIZE
);
2543 rxFifo
.unserialize("rxfifo", cp
);
2544 txFifo
.unserialize("txfifo", cp
);
2547 UNSERIALIZE_SCALAR(txPktExists
);
2549 txPacket
= std::make_shared
<EthPacketData
>(16384);
2550 txPacket
->unserialize("txpacket", cp
);
2557 Tick rdtr_time
, radv_time
, tidv_time
, tadv_time
, inter_time
;
2558 UNSERIALIZE_SCALAR(rdtr_time
);
2559 UNSERIALIZE_SCALAR(radv_time
);
2560 UNSERIALIZE_SCALAR(tidv_time
);
2561 UNSERIALIZE_SCALAR(tadv_time
);
2562 UNSERIALIZE_SCALAR(inter_time
);
2565 schedule(rdtrEvent
, rdtr_time
);
2568 schedule(radvEvent
, radv_time
);
2571 schedule(tidvEvent
, tidv_time
);
2574 schedule(tadvEvent
, tadv_time
);
2577 schedule(interEvent
, inter_time
);
2579 UNSERIALIZE_SCALAR(pktOffset
);
2581 txDescCache
.unserializeSection(cp
, "TxDescCache");
2582 rxDescCache
.unserializeSection(cp
, "RxDescCache");
2586 IGbEParams::create()
2588 return new IGbE(this);