2 * Copyright (c) 2006 The Regents of The University of Michigan
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 * Device model for Intel's 8254x line of gigabit ethernet controllers.
33 * In particular an 82547 revision 2 (82547GI) MAC because it seems to have the
34 * fewest workarounds in the driver. It will probably work with most of the
35 * other MACs with slight modifications.
40 * @todo really there are multiple dma engines.. we should implement them.
45 #include "base/inet.hh"
46 #include "base/trace.hh"
47 #include "debug/EthernetAll.hh"
48 #include "dev/i8254xGBe.hh"
49 #include "mem/packet.hh"
50 #include "mem/packet_access.hh"
51 #include "params/IGbE.hh"
52 #include "sim/stats.hh"
53 #include "sim/system.hh"
55 using namespace iGbReg
;
58 IGbE::IGbE(const Params
*p
)
59 : EtherDevice(p
), etherInt(NULL
), drainEvent(NULL
),
60 useFlowControl(p
->use_flow_control
),
61 rxFifo(p
->rx_fifo_size
), txFifo(p
->tx_fifo_size
), rxTick(false),
62 txTick(false), txFifoTick(false), rxDmaPacket(false), pktOffset(0),
63 fetchDelay(p
->fetch_delay
), wbDelay(p
->wb_delay
),
64 fetchCompDelay(p
->fetch_comp_delay
), wbCompDelay(p
->wb_comp_delay
),
65 rxWriteDelay(p
->rx_write_delay
), txReadDelay(p
->tx_read_delay
),
66 rdtrEvent(this), radvEvent(this),
67 tadvEvent(this), tidvEvent(this), tickEvent(this), interEvent(this),
68 rxDescCache(this, name()+".RxDesc", p
->rx_desc_cache_size
),
69 txDescCache(this, name()+".TxDesc", p
->tx_desc_cache_size
),
70 clock(p
->clock
), lastInterrupt(0)
72 etherInt
= new IGbEInt(name() + ".int", this);
74 // Initialized internal registers per Intel documentation
75 // All registers intialized to 0 by per register constructor
80 regs
.sts
.speed(3); // Say we're 1000Mbps
81 regs
.sts
.fd(1); // full duplex
82 regs
.sts
.lu(1); // link up
88 regs
.rxdctl
.wthresh(1);
102 // clear all 64 16 bit words of the eeprom
103 memset(&flash
, 0, EEPROM_SIZE
*2);
105 // Set the MAC address
106 memcpy(flash
, p
->hardware_address
.bytes(), ETH_ADDR_LEN
);
107 for (int x
= 0; x
< ETH_ADDR_LEN
/2; x
++)
108 flash
[x
] = htobe(flash
[x
]);
111 for (int x
= 0; x
< EEPROM_SIZE
; x
++)
112 csum
+= htobe(flash
[x
]);
115 // Magic happy checksum value
116 flash
[EEPROM_SIZE
-1] = htobe((uint16_t)(EEPROM_CSUM
- csum
));
118 // Store the MAC address as queue ID
119 macAddr
= p
->hardware_address
;
138 IGbE::getEthPort(const std::string
&if_name
, int idx
)
141 if (if_name
== "interface") {
142 if (etherInt
->getPeer())
143 panic("Port already connected to\n");
150 IGbE::writeConfig(PacketPtr pkt
)
152 int offset
= pkt
->getAddr() & PCI_CONFIG_SIZE
;
153 if (offset
< PCI_DEVICE_SPECIFIC
)
154 PciDev::writeConfig(pkt
);
156 panic("Device specific PCI config space not implemented.\n");
159 // Some work may need to be done here based for the pci COMMAND bits.
165 // Handy macro for range-testing register access addresses
166 #define IN_RANGE(val, base, len) (val >= base && val < (base + len))
169 IGbE::read(PacketPtr pkt
)
174 if (!getBAR(pkt
->getAddr(), bar
, daddr
))
175 panic("Invalid PCI memory access to unmapped memory.\n");
177 // Only Memory register BAR is allowed
180 // Only 32bit accesses allowed
181 assert(pkt
->getSize() == 4);
183 DPRINTF(Ethernet
, "Read device register %#X\n", daddr
);
188 // Handle read of register here
194 pkt
->set
<uint32_t>(regs
.ctrl());
197 pkt
->set
<uint32_t>(regs
.sts());
200 pkt
->set
<uint32_t>(regs
.eecd());
203 pkt
->set
<uint32_t>(regs
.eerd());
206 pkt
->set
<uint32_t>(regs
.ctrl_ext());
209 pkt
->set
<uint32_t>(regs
.mdic());
212 DPRINTF(Ethernet
, "Reading ICR. ICR=%#x IMR=%#x IAM=%#x IAME=%d\n",
213 regs
.icr(), regs
.imr
, regs
.iam
, regs
.ctrl_ext
.iame());
214 pkt
->set
<uint32_t>(regs
.icr());
215 if (regs
.icr
.int_assert() || regs
.imr
== 0) {
216 regs
.icr
= regs
.icr() & ~mask(30);
217 DPRINTF(Ethernet
, "Cleared ICR. ICR=%#x\n", regs
.icr());
219 if (regs
.ctrl_ext
.iame() && regs
.icr
.int_assert())
220 regs
.imr
&= ~regs
.iam
;
224 // This is only useful for MSI, but the driver reads it every time
225 // Just don't do anything
226 pkt
->set
<uint32_t>(0);
229 pkt
->set
<uint32_t>(regs
.itr());
232 pkt
->set
<uint32_t>(regs
.rctl());
235 pkt
->set
<uint32_t>(regs
.fcttv());
238 pkt
->set
<uint32_t>(regs
.tctl());
241 pkt
->set
<uint32_t>(regs
.pba());
245 pkt
->set
<uint32_t>(0); // We don't care, so just return 0
248 pkt
->set
<uint32_t>(regs
.fcrtl());
251 pkt
->set
<uint32_t>(regs
.fcrth());
254 pkt
->set
<uint32_t>(regs
.rdba
.rdbal());
257 pkt
->set
<uint32_t>(regs
.rdba
.rdbah());
260 pkt
->set
<uint32_t>(regs
.rdlen());
263 pkt
->set
<uint32_t>(regs
.srrctl());
266 pkt
->set
<uint32_t>(regs
.rdh());
269 pkt
->set
<uint32_t>(regs
.rdt());
272 pkt
->set
<uint32_t>(regs
.rdtr());
273 if (regs
.rdtr
.fpd()) {
274 rxDescCache
.writeback(0);
275 DPRINTF(EthernetIntr
,
276 "Posting interrupt because of RDTR.FPD write\n");
277 postInterrupt(IT_RXT
);
282 pkt
->set
<uint32_t>(regs
.rxdctl());
285 pkt
->set
<uint32_t>(regs
.radv());
288 pkt
->set
<uint32_t>(regs
.tdba
.tdbal());
291 pkt
->set
<uint32_t>(regs
.tdba
.tdbah());
294 pkt
->set
<uint32_t>(regs
.tdlen());
297 pkt
->set
<uint32_t>(regs
.tdh());
300 pkt
->set
<uint32_t>(regs
.txdca_ctl());
303 pkt
->set
<uint32_t>(regs
.tdt());
306 pkt
->set
<uint32_t>(regs
.tidv());
309 pkt
->set
<uint32_t>(regs
.txdctl());
312 pkt
->set
<uint32_t>(regs
.tadv());
315 pkt
->set
<uint32_t>(regs
.tdwba
& mask(32));
318 pkt
->set
<uint32_t>(regs
.tdwba
>> 32);
321 pkt
->set
<uint32_t>(regs
.rxcsum());
324 pkt
->set
<uint32_t>(regs
.rlpml
);
327 pkt
->set
<uint32_t>(regs
.rfctl());
330 pkt
->set
<uint32_t>(regs
.manc());
333 pkt
->set
<uint32_t>(regs
.swsm());
337 pkt
->set
<uint32_t>(regs
.fwsm());
340 pkt
->set
<uint32_t>(regs
.sw_fw_sync
);
343 if (!IN_RANGE(daddr
, REG_VFTA
, VLAN_FILTER_TABLE_SIZE
*4) &&
344 !IN_RANGE(daddr
, REG_RAL
, RCV_ADDRESS_TABLE_SIZE
*8) &&
345 !IN_RANGE(daddr
, REG_MTA
, MULTICAST_TABLE_SIZE
*4) &&
346 !IN_RANGE(daddr
, REG_CRCERRS
, STATS_REGS_SIZE
))
347 panic("Read request to unknown register number: %#x\n", daddr
);
349 pkt
->set
<uint32_t>(0);
352 pkt
->makeAtomicResponse();
357 IGbE::write(PacketPtr pkt
)
363 if (!getBAR(pkt
->getAddr(), bar
, daddr
))
364 panic("Invalid PCI memory access to unmapped memory.\n");
366 // Only Memory register BAR is allowed
369 // Only 32bit accesses allowed
370 assert(pkt
->getSize() == sizeof(uint32_t));
372 DPRINTF(Ethernet
, "Wrote device register %#X value %#X\n",
373 daddr
, pkt
->get
<uint32_t>());
376 // Handle write of register here
378 uint32_t val
= pkt
->get
<uint32_t>();
386 if (regs
.ctrl
.tfce())
387 warn("TX Flow control enabled, should implement\n");
388 if (regs
.ctrl
.rfce())
389 warn("RX Flow control enabled, should implement\n");
399 oldClk
= regs
.eecd
.sk();
401 // See if this is a eeprom access and emulate accordingly
402 if (!oldClk
&& regs
.eecd
.sk()) {
404 eeOpcode
= eeOpcode
<< 1 | regs
.eecd
.din();
406 } else if (eeAddrBits
< 8 && eeOpcode
== EEPROM_READ_OPCODE_SPI
) {
407 eeAddr
= eeAddr
<< 1 | regs
.eecd
.din();
409 } else if (eeDataBits
< 16 && eeOpcode
== EEPROM_READ_OPCODE_SPI
) {
410 assert(eeAddr
>>1 < EEPROM_SIZE
);
411 DPRINTF(EthernetEEPROM
, "EEPROM bit read: %d word: %#X\n",
412 flash
[eeAddr
>>1] >> eeDataBits
& 0x1,
414 regs
.eecd
.dout((flash
[eeAddr
>>1] >> (15-eeDataBits
)) & 0x1);
416 } else if (eeDataBits
< 8 && eeOpcode
== EEPROM_RDSR_OPCODE_SPI
) {
420 panic("What's going on with eeprom interface? opcode:"
421 " %#x:%d addr: %#x:%d, data: %d\n", (uint32_t)eeOpcode
,
422 (uint32_t)eeOpBits
, (uint32_t)eeAddr
,
423 (uint32_t)eeAddrBits
, (uint32_t)eeDataBits
);
425 // Reset everything for the next command
426 if ((eeDataBits
== 16 && eeOpcode
== EEPROM_READ_OPCODE_SPI
) ||
427 (eeDataBits
== 8 && eeOpcode
== EEPROM_RDSR_OPCODE_SPI
)) {
435 DPRINTF(EthernetEEPROM
, "EEPROM: opcode: %#X:%d addr: %#X:%d\n",
436 (uint32_t)eeOpcode
, (uint32_t) eeOpBits
,
437 (uint32_t)eeAddr
>>1, (uint32_t)eeAddrBits
);
438 if (eeOpBits
== 8 && !(eeOpcode
== EEPROM_READ_OPCODE_SPI
||
439 eeOpcode
== EEPROM_RDSR_OPCODE_SPI
))
440 panic("Unknown eeprom opcode: %#X:%d\n", (uint32_t)eeOpcode
,
445 // If driver requests eeprom access, immediately give it to it
446 regs
.eecd
.ee_gnt(regs
.eecd
.ee_req());
450 if (regs
.eerd
.start()) {
452 assert(regs
.eerd
.addr() < EEPROM_SIZE
);
453 regs
.eerd
.data(flash
[regs
.eerd
.addr()]);
455 DPRINTF(EthernetEEPROM
, "EEPROM: read addr: %#X data %#x\n",
456 regs
.eerd
.addr(), regs
.eerd
.data());
462 panic("No support for interrupt on mdic complete\n");
463 if (regs
.mdic
.phyadd() != 1)
464 panic("No support for reading anything but phy\n");
465 DPRINTF(Ethernet
, "%s phy address %x\n",
466 regs
.mdic
.op() == 1 ? "Writing" : "Reading",
468 switch (regs
.mdic
.regadd()) {
470 regs
.mdic
.data(0x796D); // link up
473 regs
.mdic
.data(params()->phy_pid
);
476 regs
.mdic
.data(params()->phy_epid
);
479 regs
.mdic
.data(0x7C00);
482 regs
.mdic
.data(0x3000);
485 regs
.mdic
.data(0x180); // some random length
493 DPRINTF(Ethernet
, "Writing ICR. ICR=%#x IMR=%#x IAM=%#x IAME=%d\n",
494 regs
.icr(), regs
.imr
, regs
.iam
, regs
.ctrl_ext
.iame());
495 if (regs
.ctrl_ext
.iame())
496 regs
.imr
&= ~regs
.iam
;
497 regs
.icr
= ~bits(val
,30,0) & regs
.icr();
504 DPRINTF(EthernetIntr
, "Posting interrupt because of ICS write\n");
505 postInterrupt((IntTypes
)val
);
521 if (regs
.rctl
.rst()) {
523 DPRINTF(EthernetSM
, "RXS: Got RESET!\n");
541 if (regs
.tctl
.en() && !oldtctl
.en()) {
547 regs
.pba
.txa(64 - regs
.pba
.rxa());
557 ; // We don't care, so don't store anything
560 warn("Writing to IVAR0, ignoring...\n");
569 regs
.rdba
.rdbal( val
& ~mask(4));
570 rxDescCache
.areaChanged();
573 regs
.rdba
.rdbah(val
);
574 rxDescCache
.areaChanged();
577 regs
.rdlen
= val
& ~mask(7);
578 rxDescCache
.areaChanged();
585 rxDescCache
.areaChanged();
589 DPRINTF(EthernetSM
, "RXS: RDT Updated.\n");
590 if (getState() == SimObject::Running
) {
591 DPRINTF(EthernetSM
, "RXS: RDT Fetching Descriptors!\n");
592 rxDescCache
.fetchDescriptors();
594 DPRINTF(EthernetSM
, "RXS: RDT NOT Fetching Desc b/c draining!\n");
607 regs
.tdba
.tdbal( val
& ~mask(4));
608 txDescCache
.areaChanged();
611 regs
.tdba
.tdbah(val
);
612 txDescCache
.areaChanged();
615 regs
.tdlen
= val
& ~mask(7);
616 txDescCache
.areaChanged();
620 txDescCache
.areaChanged();
623 regs
.txdca_ctl
= val
;
624 if (regs
.txdca_ctl
.enabled())
625 panic("No support for DCA\n");
629 DPRINTF(EthernetSM
, "TXS: TX Tail pointer updated\n");
630 if (getState() == SimObject::Running
) {
631 DPRINTF(EthernetSM
, "TXS: TDT Fetching Descriptors!\n");
632 txDescCache
.fetchDescriptors();
634 DPRINTF(EthernetSM
, "TXS: TDT NOT Fetching Desc b/c draining!\n");
647 regs
.tdwba
&= ~mask(32);
649 txDescCache
.completionWriteback(regs
.tdwba
& ~mask(1),
650 regs
.tdwba
& mask(1));
653 regs
.tdwba
&= mask(32);
654 regs
.tdwba
|= (uint64_t)val
<< 32;
655 txDescCache
.completionWriteback(regs
.tdwba
& ~mask(1),
656 regs
.tdwba
& mask(1));
666 if (regs
.rfctl
.exsten())
667 panic("Extended RX descriptors not implemented\n");
674 if (regs
.fwsm
.eep_fw_semaphore())
675 regs
.swsm
.swesmbi(0);
678 regs
.sw_fw_sync
= val
;
681 if (!IN_RANGE(daddr
, REG_VFTA
, VLAN_FILTER_TABLE_SIZE
*4) &&
682 !IN_RANGE(daddr
, REG_RAL
, RCV_ADDRESS_TABLE_SIZE
*8) &&
683 !IN_RANGE(daddr
, REG_MTA
, MULTICAST_TABLE_SIZE
*4))
684 panic("Write request to unknown register number: %#x\n", daddr
);
687 pkt
->makeAtomicResponse();
692 IGbE::postInterrupt(IntTypes t
, bool now
)
696 // Interrupt is already pending
697 if (t
& regs
.icr() && !now
)
700 regs
.icr
= regs
.icr() | t
;
702 Tick itr_interval
= SimClock::Int::ns
* 256 * regs
.itr
.interval();
703 DPRINTF(EthernetIntr
,
704 "EINT: postInterrupt() curTick(): %d itr: %d interval: %d\n",
705 curTick(), regs
.itr
.interval(), itr_interval
);
707 if (regs
.itr
.interval() == 0 || now
||
708 lastInterrupt
+ itr_interval
<= curTick()) {
709 if (interEvent
.scheduled()) {
710 deschedule(interEvent
);
714 Tick int_time
= lastInterrupt
+ itr_interval
;
715 assert(int_time
> 0);
716 DPRINTF(EthernetIntr
, "EINT: Scheduling timer interrupt for tick %d\n",
718 if (!interEvent
.scheduled()) {
719 schedule(interEvent
, int_time
);
725 IGbE::delayIntEvent()
737 if (!(regs
.icr() & regs
.imr
)) {
738 DPRINTF(Ethernet
, "Interrupt Masked. Not Posting\n");
742 DPRINTF(Ethernet
, "Posting Interrupt\n");
745 if (interEvent
.scheduled()) {
746 deschedule(interEvent
);
749 if (rdtrEvent
.scheduled()) {
751 deschedule(rdtrEvent
);
753 if (radvEvent
.scheduled()) {
755 deschedule(radvEvent
);
757 if (tadvEvent
.scheduled()) {
759 deschedule(tadvEvent
);
761 if (tidvEvent
.scheduled()) {
763 deschedule(tidvEvent
);
766 regs
.icr
.int_assert(1);
767 DPRINTF(EthernetIntr
, "EINT: Posting interrupt to CPU now. Vector %#x\n",
772 lastInterrupt
= curTick();
778 if (regs
.icr
.int_assert()) {
779 regs
.icr
.int_assert(0);
780 DPRINTF(EthernetIntr
,
781 "EINT: Clearing interrupt to CPU now. Vector %#x\n",
790 DPRINTF(Ethernet
, "Checking interrupts icr: %#x imr: %#x\n", regs
.icr(),
792 // Check if we need to clear the cpu interrupt
793 if (!(regs
.icr() & regs
.imr
)) {
794 DPRINTF(Ethernet
, "Mask cleaned all interrupts\n");
795 if (interEvent
.scheduled())
796 deschedule(interEvent
);
797 if (regs
.icr
.int_assert())
800 DPRINTF(Ethernet
, "ITR = %#X itr.interval = %#X\n",
801 regs
.itr(), regs
.itr
.interval());
803 if (regs
.icr() & regs
.imr
) {
804 if (regs
.itr
.interval() == 0) {
808 "Possibly scheduling interrupt because of imr write\n");
809 if (!interEvent
.scheduled()) {
810 Tick t
= curTick() + SimClock::Int::ns
* 256 * regs
.itr
.interval();
811 DPRINTF(Ethernet
, "Scheduling for %d\n", t
);
812 schedule(interEvent
, t
);
819 ///////////////////////////// IGbE::DescCache //////////////////////////////
822 IGbE::DescCache
<T
>::DescCache(IGbE
*i
, const std::string n
, int s
)
823 : igbe(i
), _name(n
), cachePnt(0), size(s
), curFetching(0),
824 wbOut(0), pktPtr(NULL
), wbDelayEvent(this),
825 fetchDelayEvent(this), fetchEvent(this), wbEvent(this)
827 fetchBuf
= new T
[size
];
832 IGbE::DescCache
<T
>::~DescCache()
841 IGbE::DescCache
<T
>::areaChanged()
843 if (usedCache
.size() > 0 || curFetching
|| wbOut
)
844 panic("Descriptor Address, Length or Head changed. Bad\n");
851 IGbE::DescCache
<T
>::writeback(Addr aMask
)
853 int curHead
= descHead();
854 int max_to_wb
= usedCache
.size();
856 // Check if this writeback is less restrictive that the previous
857 // and if so setup another one immediately following it
859 if (aMask
< wbAlignment
) {
863 DPRINTF(EthernetDesc
,
864 "Writing back already in process, returning\n");
872 DPRINTF(EthernetDesc
, "Writing back descriptors head: %d tail: "
873 "%d len: %d cachePnt: %d max_to_wb: %d descleft: %d\n",
874 curHead
, descTail(), descLen(), cachePnt
, max_to_wb
,
877 if (max_to_wb
+ curHead
>= descLen()) {
878 max_to_wb
= descLen() - curHead
;
880 // this is by definition aligned correctly
881 } else if (wbAlignment
!= 0) {
882 // align the wb point to the mask
883 max_to_wb
= max_to_wb
& ~wbAlignment
;
886 DPRINTF(EthernetDesc
, "Writing back %d descriptors\n", max_to_wb
);
888 if (max_to_wb
<= 0) {
889 if (usedCache
.size())
890 igbe
->anBegin(annSmWb
, "Wait Alignment", CPA::FL_WAIT
);
892 igbe
->anWe(annSmWb
, annUsedCacheQ
);
898 assert(!wbDelayEvent
.scheduled());
899 igbe
->schedule(wbDelayEvent
, curTick() + igbe
->wbDelay
);
900 igbe
->anBegin(annSmWb
, "Prepare Writeback Desc");
905 IGbE::DescCache
<T
>::writeback1()
907 // If we're draining delay issuing this DMA
908 if (igbe
->getState() != SimObject::Running
) {
909 igbe
->schedule(wbDelayEvent
, curTick() + igbe
->wbDelay
);
913 DPRINTF(EthernetDesc
, "Begining DMA of %d descriptors\n", wbOut
);
915 for (int x
= 0; x
< wbOut
; x
++) {
916 assert(usedCache
.size());
917 memcpy(&wbBuf
[x
], usedCache
[x
], sizeof(T
));
918 igbe
->anPq(annSmWb
, annUsedCacheQ
);
919 igbe
->anPq(annSmWb
, annDescQ
);
920 igbe
->anQ(annSmWb
, annUsedDescQ
);
924 igbe
->anBegin(annSmWb
, "Writeback Desc DMA");
927 igbe
->dmaWrite(pciToDma(descBase() + descHead() * sizeof(T
)),
928 wbOut
* sizeof(T
), &wbEvent
, (uint8_t*)wbBuf
,
934 IGbE::DescCache
<T
>::fetchDescriptors()
939 DPRINTF(EthernetDesc
,
940 "Currently fetching %d descriptors, returning\n",
945 if (descTail() >= cachePnt
)
946 max_to_fetch
= descTail() - cachePnt
;
948 max_to_fetch
= descLen() - cachePnt
;
950 size_t free_cache
= size
- usedCache
.size() - unusedCache
.size();
953 igbe
->anWe(annSmFetch
, annUnusedDescQ
);
955 igbe
->anPq(annSmFetch
, annUnusedDescQ
, max_to_fetch
);
959 igbe
->anWf(annSmFetch
, annDescQ
);
961 igbe
->anRq(annSmFetch
, annDescQ
, free_cache
);
964 max_to_fetch
= std::min(max_to_fetch
, free_cache
);
967 DPRINTF(EthernetDesc
, "Fetching descriptors head: %d tail: "
968 "%d len: %d cachePnt: %d max_to_fetch: %d descleft: %d\n",
969 descHead(), descTail(), descLen(), cachePnt
,
970 max_to_fetch
, descLeft());
973 if (max_to_fetch
== 0)
976 // So we don't have two descriptor fetches going on at once
977 curFetching
= max_to_fetch
;
979 assert(!fetchDelayEvent
.scheduled());
980 igbe
->schedule(fetchDelayEvent
, curTick() + igbe
->fetchDelay
);
981 igbe
->anBegin(annSmFetch
, "Prepare Fetch Desc");
986 IGbE::DescCache
<T
>::fetchDescriptors1()
988 // If we're draining delay issuing this DMA
989 if (igbe
->getState() != SimObject::Running
) {
990 igbe
->schedule(fetchDelayEvent
, curTick() + igbe
->fetchDelay
);
994 igbe
->anBegin(annSmFetch
, "Fetch Desc");
996 DPRINTF(EthernetDesc
, "Fetching descriptors at %#x (%#x), size: %#x\n",
997 descBase() + cachePnt
* sizeof(T
),
998 pciToDma(descBase() + cachePnt
* sizeof(T
)),
999 curFetching
* sizeof(T
));
1000 assert(curFetching
);
1001 igbe
->dmaRead(pciToDma(descBase() + cachePnt
* sizeof(T
)),
1002 curFetching
* sizeof(T
), &fetchEvent
, (uint8_t*)fetchBuf
,
1003 igbe
->fetchCompDelay
);
1008 IGbE::DescCache
<T
>::fetchComplete()
1011 igbe
->anBegin(annSmFetch
, "Fetch Complete");
1012 for (int x
= 0; x
< curFetching
; x
++) {
1014 memcpy(newDesc
, &fetchBuf
[x
], sizeof(T
));
1015 unusedCache
.push_back(newDesc
);
1016 igbe
->anDq(annSmFetch
, annUnusedDescQ
);
1017 igbe
->anQ(annSmFetch
, annUnusedCacheQ
);
1018 igbe
->anQ(annSmFetch
, annDescQ
);
1023 int oldCp
= cachePnt
;
1026 cachePnt
+= curFetching
;
1027 assert(cachePnt
<= descLen());
1028 if (cachePnt
== descLen())
1033 DPRINTF(EthernetDesc
, "Fetching complete cachePnt %d -> %d\n",
1036 if ((descTail() >= cachePnt
? (descTail() - cachePnt
) : (descLen() -
1039 igbe
->anWe(annSmFetch
, annUnusedDescQ
);
1040 } else if (!(size
- usedCache
.size() - unusedCache
.size())) {
1041 igbe
->anWf(annSmFetch
, annDescQ
);
1043 igbe
->anBegin(annSmFetch
, "Wait", CPA::FL_WAIT
);
1052 IGbE::DescCache
<T
>::wbComplete()
1055 igbe
->anBegin(annSmWb
, "Finish Writeback");
1057 long curHead
= descHead();
1059 long oldHead
= curHead
;
1062 for (int x
= 0; x
< wbOut
; x
++) {
1063 assert(usedCache
.size());
1064 delete usedCache
[0];
1065 usedCache
.pop_front();
1067 igbe
->anDq(annSmWb
, annUsedCacheQ
);
1068 igbe
->anDq(annSmWb
, annDescQ
);
1074 if (curHead
>= descLen())
1075 curHead
-= descLen();
1078 updateHead(curHead
);
1080 DPRINTF(EthernetDesc
, "Writeback complete curHead %d -> %d\n",
1083 // If we still have more to wb, call wb now
1087 DPRINTF(EthernetDesc
, "Writeback has more todo\n");
1088 writeback(wbAlignment
);
1093 if (usedCache
.size())
1094 igbe
->anBegin(annSmWb
, "Wait", CPA::FL_WAIT
);
1096 igbe
->anWe(annSmWb
, annUsedCacheQ
);
1103 IGbE::DescCache
<T
>::reset()
1105 DPRINTF(EthernetDesc
, "Reseting descriptor cache\n");
1106 for (typename
CacheType::size_type x
= 0; x
< usedCache
.size(); x
++)
1107 delete usedCache
[x
];
1108 for (typename
CacheType::size_type x
= 0; x
< unusedCache
.size(); x
++)
1109 delete unusedCache
[x
];
1112 unusedCache
.clear();
1120 IGbE::DescCache
<T
>::serialize(std::ostream
&os
)
1122 SERIALIZE_SCALAR(cachePnt
);
1123 SERIALIZE_SCALAR(curFetching
);
1124 SERIALIZE_SCALAR(wbOut
);
1125 SERIALIZE_SCALAR(moreToWb
);
1126 SERIALIZE_SCALAR(wbAlignment
);
1128 typename
CacheType::size_type usedCacheSize
= usedCache
.size();
1129 SERIALIZE_SCALAR(usedCacheSize
);
1130 for (typename
CacheType::size_type x
= 0; x
< usedCacheSize
; x
++) {
1131 arrayParamOut(os
, csprintf("usedCache_%d", x
),
1132 (uint8_t*)usedCache
[x
],sizeof(T
));
1135 typename
CacheType::size_type unusedCacheSize
= unusedCache
.size();
1136 SERIALIZE_SCALAR(unusedCacheSize
);
1137 for (typename
CacheType::size_type x
= 0; x
< unusedCacheSize
; x
++) {
1138 arrayParamOut(os
, csprintf("unusedCache_%d", x
),
1139 (uint8_t*)unusedCache
[x
],sizeof(T
));
1142 Tick fetch_delay
= 0, wb_delay
= 0;
1143 if (fetchDelayEvent
.scheduled())
1144 fetch_delay
= fetchDelayEvent
.when();
1145 SERIALIZE_SCALAR(fetch_delay
);
1146 if (wbDelayEvent
.scheduled())
1147 wb_delay
= wbDelayEvent
.when();
1148 SERIALIZE_SCALAR(wb_delay
);
1155 IGbE::DescCache
<T
>::unserialize(Checkpoint
*cp
, const std::string
§ion
)
1157 UNSERIALIZE_SCALAR(cachePnt
);
1158 UNSERIALIZE_SCALAR(curFetching
);
1159 UNSERIALIZE_SCALAR(wbOut
);
1160 UNSERIALIZE_SCALAR(moreToWb
);
1161 UNSERIALIZE_SCALAR(wbAlignment
);
1163 typename
CacheType::size_type usedCacheSize
;
1164 UNSERIALIZE_SCALAR(usedCacheSize
);
1166 for (typename
CacheType::size_type x
= 0; x
< usedCacheSize
; x
++) {
1168 arrayParamIn(cp
, section
, csprintf("usedCache_%d", x
),
1169 (uint8_t*)temp
,sizeof(T
));
1170 usedCache
.push_back(temp
);
1173 typename
CacheType::size_type unusedCacheSize
;
1174 UNSERIALIZE_SCALAR(unusedCacheSize
);
1175 for (typename
CacheType::size_type x
= 0; x
< unusedCacheSize
; x
++) {
1177 arrayParamIn(cp
, section
, csprintf("unusedCache_%d", x
),
1178 (uint8_t*)temp
,sizeof(T
));
1179 unusedCache
.push_back(temp
);
1181 Tick fetch_delay
= 0, wb_delay
= 0;
1182 UNSERIALIZE_SCALAR(fetch_delay
);
1183 UNSERIALIZE_SCALAR(wb_delay
);
1185 igbe
->schedule(fetchDelayEvent
, fetch_delay
);
1187 igbe
->schedule(wbDelayEvent
, wb_delay
);
1192 ///////////////////////////// IGbE::RxDescCache //////////////////////////////
1194 IGbE::RxDescCache::RxDescCache(IGbE
*i
, const std::string n
, int s
)
1195 : DescCache
<RxDesc
>(i
, n
, s
), pktDone(false), splitCount(0),
1196 pktEvent(this), pktHdrEvent(this), pktDataEvent(this)
1199 annSmFetch
= "RX Desc Fetch";
1200 annSmWb
= "RX Desc Writeback";
1201 annUnusedDescQ
= "RX Unused Descriptors";
1202 annUnusedCacheQ
= "RX Unused Descriptor Cache";
1203 annUsedCacheQ
= "RX Used Descriptor Cache";
1204 annUsedDescQ
= "RX Used Descriptors";
1205 annDescQ
= "RX Descriptors";
1209 IGbE::RxDescCache::pktSplitDone()
1212 DPRINTF(EthernetDesc
,
1213 "Part of split packet done: splitcount now %d\n", splitCount
);
1214 assert(splitCount
<= 2);
1215 if (splitCount
!= 2)
1218 DPRINTF(EthernetDesc
,
1219 "Part of split packet done: calling pktComplete()\n");
1224 IGbE::RxDescCache::writePacket(EthPacketPtr packet
, int pkt_offset
)
1226 assert(unusedCache
.size());
1227 //if (!unusedCache.size())
1232 unsigned buf_len
, hdr_len
;
1234 RxDesc
*desc
= unusedCache
.front();
1235 switch (igbe
->regs
.srrctl
.desctype()) {
1237 assert(pkt_offset
== 0);
1238 bytesCopied
= packet
->length
;
1239 DPRINTF(EthernetDesc
, "Packet Length: %d Desc Size: %d\n",
1240 packet
->length
, igbe
->regs
.rctl
.descSize());
1241 assert(packet
->length
< igbe
->regs
.rctl
.descSize());
1242 igbe
->dmaWrite(pciToDma(desc
->legacy
.buf
),
1243 packet
->length
, &pktEvent
, packet
->data
,
1244 igbe
->rxWriteDelay
);
1246 case RXDT_ADV_ONEBUF
:
1247 assert(pkt_offset
== 0);
1248 bytesCopied
= packet
->length
;
1249 buf_len
= igbe
->regs
.rctl
.lpe() ? igbe
->regs
.srrctl
.bufLen() :
1250 igbe
->regs
.rctl
.descSize();
1251 DPRINTF(EthernetDesc
, "Packet Length: %d srrctl: %#x Desc Size: %d\n",
1252 packet
->length
, igbe
->regs
.srrctl(), buf_len
);
1253 assert(packet
->length
< buf_len
);
1254 igbe
->dmaWrite(pciToDma(desc
->adv_read
.pkt
),
1255 packet
->length
, &pktEvent
, packet
->data
,
1256 igbe
->rxWriteDelay
);
1257 desc
->adv_wb
.header_len
= htole(0);
1258 desc
->adv_wb
.sph
= htole(0);
1259 desc
->adv_wb
.pkt_len
= htole((uint16_t)(pktPtr
->length
));
1261 case RXDT_ADV_SPLIT_A
:
1264 buf_len
= igbe
->regs
.rctl
.lpe() ? igbe
->regs
.srrctl
.bufLen() :
1265 igbe
->regs
.rctl
.descSize();
1266 hdr_len
= igbe
->regs
.rctl
.lpe() ? igbe
->regs
.srrctl
.hdrLen() : 0;
1267 DPRINTF(EthernetDesc
,
1268 "lpe: %d Packet Length: %d offset: %d srrctl: %#x "
1269 "hdr addr: %#x Hdr Size: %d desc addr: %#x Desc Size: %d\n",
1270 igbe
->regs
.rctl
.lpe(), packet
->length
, pkt_offset
,
1271 igbe
->regs
.srrctl(), desc
->adv_read
.hdr
, hdr_len
,
1272 desc
->adv_read
.pkt
, buf_len
);
1274 split_point
= hsplit(pktPtr
);
1276 if (packet
->length
<= hdr_len
) {
1277 bytesCopied
= packet
->length
;
1278 assert(pkt_offset
== 0);
1279 DPRINTF(EthernetDesc
, "Hdr split: Entire packet in header\n");
1280 igbe
->dmaWrite(pciToDma(desc
->adv_read
.hdr
),
1281 packet
->length
, &pktEvent
, packet
->data
,
1282 igbe
->rxWriteDelay
);
1283 desc
->adv_wb
.header_len
= htole((uint16_t)packet
->length
);
1284 desc
->adv_wb
.sph
= htole(0);
1285 desc
->adv_wb
.pkt_len
= htole(0);
1286 } else if (split_point
) {
1288 // we are only copying some data, header/data has already been
1291 std::min(packet
->length
- pkt_offset
, buf_len
);
1292 bytesCopied
+= max_to_copy
;
1293 DPRINTF(EthernetDesc
,
1294 "Hdr split: Continuing data buffer copy\n");
1295 igbe
->dmaWrite(pciToDma(desc
->adv_read
.pkt
),
1296 max_to_copy
, &pktEvent
,
1297 packet
->data
+ pkt_offset
, igbe
->rxWriteDelay
);
1298 desc
->adv_wb
.header_len
= htole(0);
1299 desc
->adv_wb
.pkt_len
= htole((uint16_t)max_to_copy
);
1300 desc
->adv_wb
.sph
= htole(0);
1303 std::min(packet
->length
- split_point
, buf_len
);
1304 bytesCopied
+= max_to_copy
+ split_point
;
1306 DPRINTF(EthernetDesc
, "Hdr split: splitting at %d\n",
1308 igbe
->dmaWrite(pciToDma(desc
->adv_read
.hdr
),
1309 split_point
, &pktHdrEvent
,
1310 packet
->data
, igbe
->rxWriteDelay
);
1311 igbe
->dmaWrite(pciToDma(desc
->adv_read
.pkt
),
1312 max_to_copy
, &pktDataEvent
,
1313 packet
->data
+ split_point
, igbe
->rxWriteDelay
);
1314 desc
->adv_wb
.header_len
= htole(split_point
);
1315 desc
->adv_wb
.sph
= 1;
1316 desc
->adv_wb
.pkt_len
= htole((uint16_t)(max_to_copy
));
1319 panic("Header split not fitting within header buffer or "
1320 "undecodable packet not fitting in header unsupported\n");
1324 panic("Unimplemnted RX receive buffer type: %d\n",
1325 igbe
->regs
.srrctl
.desctype());
1332 IGbE::RxDescCache::pktComplete()
1334 assert(unusedCache
.size());
1336 desc
= unusedCache
.front();
1338 igbe
->anBegin("RXS", "Update Desc");
1340 uint16_t crcfixup
= igbe
->regs
.rctl
.secrc() ? 0 : 4 ;
1341 DPRINTF(EthernetDesc
, "pktPtr->length: %d bytesCopied: %d "
1342 "stripcrc offset: %d value written: %d %d\n",
1343 pktPtr
->length
, bytesCopied
, crcfixup
,
1344 htole((uint16_t)(pktPtr
->length
+ crcfixup
)),
1345 (uint16_t)(pktPtr
->length
+ crcfixup
));
1347 // no support for anything but starting at 0
1348 assert(igbe
->regs
.rxcsum
.pcss() == 0);
1350 DPRINTF(EthernetDesc
, "Packet written to memory updating Descriptor\n");
1352 uint16_t status
= RXDS_DD
;
1354 uint16_t ext_err
= 0;
1359 assert(bytesCopied
<= pktPtr
->length
);
1360 if (bytesCopied
== pktPtr
->length
)
1366 DPRINTF(EthernetDesc
, "Proccesing Ip packet with Id=%d\n", ip
->id());
1370 if (igbe
->regs
.rxcsum
.ipofld()) {
1371 DPRINTF(EthernetDesc
, "Checking IP checksum\n");
1372 status
|= RXDS_IPCS
;
1373 csum
= htole(cksum(ip
));
1374 igbe
->rxIpChecksums
++;
1375 if (cksum(ip
) != 0) {
1377 ext_err
|= RXDEE_IPE
;
1378 DPRINTF(EthernetDesc
, "Checksum is bad!!\n");
1382 if (tcp
&& igbe
->regs
.rxcsum
.tuofld()) {
1383 DPRINTF(EthernetDesc
, "Checking TCP checksum\n");
1384 status
|= RXDS_TCPCS
;
1386 csum
= htole(cksum(tcp
));
1387 igbe
->rxTcpChecksums
++;
1388 if (cksum(tcp
) != 0) {
1389 DPRINTF(EthernetDesc
, "Checksum is bad!!\n");
1391 ext_err
|= RXDEE_TCPE
;
1396 if (udp
&& igbe
->regs
.rxcsum
.tuofld()) {
1397 DPRINTF(EthernetDesc
, "Checking UDP checksum\n");
1398 status
|= RXDS_UDPCS
;
1400 csum
= htole(cksum(udp
));
1401 igbe
->rxUdpChecksums
++;
1402 if (cksum(udp
) != 0) {
1403 DPRINTF(EthernetDesc
, "Checksum is bad!!\n");
1404 ext_err
|= RXDEE_TCPE
;
1409 DPRINTF(EthernetSM
, "Proccesing Non-Ip packet\n");
1412 switch (igbe
->regs
.srrctl
.desctype()) {
1414 desc
->legacy
.len
= htole((uint16_t)(pktPtr
->length
+ crcfixup
));
1415 desc
->legacy
.status
= htole(status
);
1416 desc
->legacy
.errors
= htole(err
);
1417 // No vlan support at this point... just set it to 0
1418 desc
->legacy
.vlan
= 0;
1420 case RXDT_ADV_SPLIT_A
:
1421 case RXDT_ADV_ONEBUF
:
1422 desc
->adv_wb
.rss_type
= htole(0);
1423 desc
->adv_wb
.pkt_type
= htole(ptype
);
1424 if (igbe
->regs
.rxcsum
.pcsd()) {
1425 // no rss support right now
1426 desc
->adv_wb
.rss_hash
= htole(0);
1428 desc
->adv_wb
.id
= htole(ip_id
);
1429 desc
->adv_wb
.csum
= htole(csum
);
1431 desc
->adv_wb
.status
= htole(status
);
1432 desc
->adv_wb
.errors
= htole(ext_err
);
1434 desc
->adv_wb
.vlan_tag
= htole(0);
1437 panic("Unimplemnted RX receive buffer type %d\n",
1438 igbe
->regs
.srrctl
.desctype());
1441 DPRINTF(EthernetDesc
, "Descriptor complete w0: %#x w1: %#x\n",
1442 desc
->adv_read
.pkt
, desc
->adv_read
.hdr
);
1444 if (bytesCopied
== pktPtr
->length
) {
1445 DPRINTF(EthernetDesc
,
1446 "Packet completely written to descriptor buffers\n");
1447 // Deal with the rx timer interrupts
1448 if (igbe
->regs
.rdtr
.delay()) {
1449 Tick delay
= igbe
->regs
.rdtr
.delay() * igbe
->intClock();
1450 DPRINTF(EthernetSM
, "RXS: Scheduling DTR for %d\n", delay
);
1451 igbe
->reschedule(igbe
->rdtrEvent
, curTick() + delay
);
1454 if (igbe
->regs
.radv
.idv()) {
1455 Tick delay
= igbe
->regs
.radv
.idv() * igbe
->intClock();
1456 DPRINTF(EthernetSM
, "RXS: Scheduling ADV for %d\n", delay
);
1457 if (!igbe
->radvEvent
.scheduled()) {
1458 igbe
->schedule(igbe
->radvEvent
, curTick() + delay
);
1462 // if neither radv or rdtr, maybe itr is set...
1463 if (!igbe
->regs
.rdtr
.delay() && !igbe
->regs
.radv
.idv()) {
1465 "RXS: Receive interrupt delay disabled, posting IT_RXT\n");
1466 igbe
->postInterrupt(IT_RXT
);
1469 // If the packet is small enough, interrupt appropriately
1470 // I wonder if this is delayed or not?!
1471 if (pktPtr
->length
<= igbe
->regs
.rsrpd
.idv()) {
1473 "RXS: Posting IT_SRPD beacuse small packet received\n");
1474 igbe
->postInterrupt(IT_SRPD
);
1484 igbe
->anBegin("RXS", "Done Updating Desc");
1485 DPRINTF(EthernetDesc
, "Processing of this descriptor complete\n");
1486 igbe
->anDq("RXS", annUnusedCacheQ
);
1487 unusedCache
.pop_front();
1488 igbe
->anQ("RXS", annUsedCacheQ
);
1489 usedCache
.push_back(desc
);
1493 IGbE::RxDescCache::enableSm()
1495 if (!igbe
->drainEvent
) {
1496 igbe
->rxTick
= true;
1497 igbe
->restartClock();
1502 IGbE::RxDescCache::packetDone()
1512 IGbE::RxDescCache::hasOutstandingEvents()
1514 return pktEvent
.scheduled() || wbEvent
.scheduled() ||
1515 fetchEvent
.scheduled() || pktHdrEvent
.scheduled() ||
1516 pktDataEvent
.scheduled();
1521 IGbE::RxDescCache::serialize(std::ostream
&os
)
1523 DescCache
<RxDesc
>::serialize(os
);
1524 SERIALIZE_SCALAR(pktDone
);
1525 SERIALIZE_SCALAR(splitCount
);
1526 SERIALIZE_SCALAR(bytesCopied
);
1530 IGbE::RxDescCache::unserialize(Checkpoint
*cp
, const std::string
§ion
)
1532 DescCache
<RxDesc
>::unserialize(cp
, section
);
1533 UNSERIALIZE_SCALAR(pktDone
);
1534 UNSERIALIZE_SCALAR(splitCount
);
1535 UNSERIALIZE_SCALAR(bytesCopied
);
1539 ///////////////////////////// IGbE::TxDescCache //////////////////////////////
1541 IGbE::TxDescCache::TxDescCache(IGbE
*i
, const std::string n
, int s
)
1542 : DescCache
<TxDesc
>(i
,n
, s
), pktDone(false), isTcp(false),
1543 pktWaiting(false), completionAddress(0), completionEnabled(false),
1544 useTso(false), tsoHeaderLen(0), tsoMss(0), tsoTotalLen(0), tsoUsedLen(0),
1545 tsoPrevSeq(0), tsoPktPayloadBytes(0), tsoLoadedHeader(false),
1546 tsoPktHasHeader(false), tsoDescBytesUsed(0), tsoCopyBytes(0), tsoPkts(0),
1547 pktEvent(this), headerEvent(this), nullEvent(this)
1549 annSmFetch
= "TX Desc Fetch";
1550 annSmWb
= "TX Desc Writeback";
1551 annUnusedDescQ
= "TX Unused Descriptors";
1552 annUnusedCacheQ
= "TX Unused Descriptor Cache";
1553 annUsedCacheQ
= "TX Used Descriptor Cache";
1554 annUsedDescQ
= "TX Used Descriptors";
1555 annDescQ
= "TX Descriptors";
1559 IGbE::TxDescCache::processContextDesc()
1561 assert(unusedCache
.size());
1564 DPRINTF(EthernetDesc
, "Checking and processing context descriptors\n");
1566 while (!useTso
&& unusedCache
.size() &&
1567 TxdOp::isContext(unusedCache
.front())) {
1568 DPRINTF(EthernetDesc
, "Got context descriptor type...\n");
1570 desc
= unusedCache
.front();
1571 DPRINTF(EthernetDesc
, "Descriptor upper: %#x lower: %#X\n",
1572 desc
->d1
, desc
->d2
);
1575 // is this going to be a tcp or udp packet?
1576 isTcp
= TxdOp::tcp(desc
) ? true : false;
1578 // setup all the TSO variables, they'll be ignored if we don't use
1579 // tso for this connection
1580 tsoHeaderLen
= TxdOp::hdrlen(desc
);
1581 tsoMss
= TxdOp::mss(desc
);
1583 if (TxdOp::isType(desc
, TxdOp::TXD_CNXT
) && TxdOp::tse(desc
)) {
1584 DPRINTF(EthernetDesc
, "TCP offload enabled for packet hdrlen: "
1585 "%d mss: %d paylen %d\n", TxdOp::hdrlen(desc
),
1586 TxdOp::mss(desc
), TxdOp::getLen(desc
));
1588 tsoTotalLen
= TxdOp::getLen(desc
);
1589 tsoLoadedHeader
= false;
1590 tsoDescBytesUsed
= 0;
1593 tsoPktHasHeader
= false;
1599 unusedCache
.pop_front();
1600 igbe
->anDq("TXS", annUnusedCacheQ
);
1601 usedCache
.push_back(desc
);
1602 igbe
->anQ("TXS", annUsedCacheQ
);
1605 if (!unusedCache
.size())
1608 desc
= unusedCache
.front();
1609 if (!useTso
&& TxdOp::isType(desc
, TxdOp::TXD_ADVDATA
) &&
1611 DPRINTF(EthernetDesc
, "TCP offload(adv) enabled for packet "
1612 "hdrlen: %d mss: %d paylen %d\n",
1613 tsoHeaderLen
, tsoMss
, TxdOp::getTsoLen(desc
));
1615 tsoTotalLen
= TxdOp::getTsoLen(desc
);
1616 tsoLoadedHeader
= false;
1617 tsoDescBytesUsed
= 0;
1620 tsoPktHasHeader
= false;
1624 if (useTso
&& !tsoLoadedHeader
) {
1625 // we need to fetch a header
1626 DPRINTF(EthernetDesc
, "Starting DMA of TSO header\n");
1627 assert(TxdOp::isData(desc
) && TxdOp::getLen(desc
) >= tsoHeaderLen
);
1629 assert(tsoHeaderLen
<= 256);
1630 igbe
->dmaRead(pciToDma(TxdOp::getBuf(desc
)),
1631 tsoHeaderLen
, &headerEvent
, tsoHeader
, 0);
1636 IGbE::TxDescCache::headerComplete()
1638 DPRINTF(EthernetDesc
, "TSO: Fetching TSO header complete\n");
1641 assert(unusedCache
.size());
1642 TxDesc
*desc
= unusedCache
.front();
1643 DPRINTF(EthernetDesc
, "TSO: len: %d tsoHeaderLen: %d\n",
1644 TxdOp::getLen(desc
), tsoHeaderLen
);
1646 if (TxdOp::getLen(desc
) == tsoHeaderLen
) {
1647 tsoDescBytesUsed
= 0;
1648 tsoLoadedHeader
= true;
1649 unusedCache
.pop_front();
1650 usedCache
.push_back(desc
);
1652 // I don't think this case happens, I think the headrer is always
1653 // it's own packet, if it wasn't it might be as simple as just
1654 // incrementing descBytesUsed by the header length, but I'm not
1656 panic("TSO header part of bigger packet, not implemented\n");
1663 IGbE::TxDescCache::getPacketSize(EthPacketPtr p
)
1665 if (!unusedCache
.size())
1668 DPRINTF(EthernetDesc
, "Starting processing of descriptor\n");
1670 assert(!useTso
|| tsoLoadedHeader
);
1671 TxDesc
*desc
= unusedCache
.front();
1674 DPRINTF(EthernetDesc
, "getPacket(): TxDescriptor data "
1675 "d1: %#llx d2: %#llx\n", desc
->d1
, desc
->d2
);
1676 DPRINTF(EthernetDesc
, "TSO: use: %d hdrlen: %d mss: %d total: %d "
1677 "used: %d loaded hdr: %d\n", useTso
, tsoHeaderLen
, tsoMss
,
1678 tsoTotalLen
, tsoUsedLen
, tsoLoadedHeader
);
1680 if (tsoPktHasHeader
)
1681 tsoCopyBytes
= std::min((tsoMss
+ tsoHeaderLen
) - p
->length
,
1682 TxdOp::getLen(desc
) - tsoDescBytesUsed
);
1684 tsoCopyBytes
= std::min(tsoMss
,
1685 TxdOp::getLen(desc
) - tsoDescBytesUsed
);
1687 tsoCopyBytes
+ (tsoPktHasHeader
? 0 : tsoHeaderLen
);
1689 DPRINTF(EthernetDesc
, "TSO: descBytesUsed: %d copyBytes: %d "
1690 "this descLen: %d\n",
1691 tsoDescBytesUsed
, tsoCopyBytes
, TxdOp::getLen(desc
));
1692 DPRINTF(EthernetDesc
, "TSO: pktHasHeader: %d\n", tsoPktHasHeader
);
1693 DPRINTF(EthernetDesc
, "TSO: Next packet is %d bytes\n", pkt_size
);
1697 DPRINTF(EthernetDesc
, "Next TX packet is %d bytes\n",
1698 TxdOp::getLen(unusedCache
.front()));
1699 return TxdOp::getLen(desc
);
1703 IGbE::TxDescCache::getPacketData(EthPacketPtr p
)
1705 assert(unusedCache
.size());
1708 desc
= unusedCache
.front();
1710 DPRINTF(EthernetDesc
, "getPacketData(): TxDescriptor data "
1711 "d1: %#llx d2: %#llx\n", desc
->d1
, desc
->d2
);
1712 assert((TxdOp::isLegacy(desc
) || TxdOp::isData(desc
)) &&
1713 TxdOp::getLen(desc
));
1719 DPRINTF(EthernetDesc
, "Starting DMA of packet at offset %d\n", p
->length
);
1722 assert(tsoLoadedHeader
);
1723 if (!tsoPktHasHeader
) {
1724 DPRINTF(EthernetDesc
,
1725 "Loading TSO header (%d bytes) into start of packet\n",
1727 memcpy(p
->data
, &tsoHeader
,tsoHeaderLen
);
1728 p
->length
+=tsoHeaderLen
;
1729 tsoPktHasHeader
= true;
1734 DPRINTF(EthernetDesc
,
1735 "Starting DMA of packet at offset %d length: %d\n",
1736 p
->length
, tsoCopyBytes
);
1737 igbe
->dmaRead(pciToDma(TxdOp::getBuf(desc
))
1739 tsoCopyBytes
, &pktEvent
, p
->data
+ p
->length
,
1741 tsoDescBytesUsed
+= tsoCopyBytes
;
1742 assert(tsoDescBytesUsed
<= TxdOp::getLen(desc
));
1744 igbe
->dmaRead(pciToDma(TxdOp::getBuf(desc
)),
1745 TxdOp::getLen(desc
), &pktEvent
, p
->data
+ p
->length
,
1751 IGbE::TxDescCache::pktComplete()
1755 assert(unusedCache
.size());
1758 igbe
->anBegin("TXS", "Update Desc");
1760 DPRINTF(EthernetDesc
, "DMA of packet complete\n");
1763 desc
= unusedCache
.front();
1764 assert((TxdOp::isLegacy(desc
) || TxdOp::isData(desc
)) &&
1765 TxdOp::getLen(desc
));
1767 DPRINTF(EthernetDesc
, "TxDescriptor data d1: %#llx d2: %#llx\n",
1768 desc
->d1
, desc
->d2
);
1770 // Set the length of the data in the EtherPacket
1772 DPRINTF(EthernetDesc
, "TSO: use: %d hdrlen: %d mss: %d total: %d "
1773 "used: %d loaded hdr: %d\n", useTso
, tsoHeaderLen
, tsoMss
,
1774 tsoTotalLen
, tsoUsedLen
, tsoLoadedHeader
);
1775 pktPtr
->length
+= tsoCopyBytes
;
1776 tsoUsedLen
+= tsoCopyBytes
;
1777 DPRINTF(EthernetDesc
, "TSO: descBytesUsed: %d copyBytes: %d\n",
1778 tsoDescBytesUsed
, tsoCopyBytes
);
1780 pktPtr
->length
+= TxdOp::getLen(desc
);
1784 if ((!TxdOp::eop(desc
) && !useTso
) ||
1785 (pktPtr
->length
< ( tsoMss
+ tsoHeaderLen
) &&
1786 tsoTotalLen
!= tsoUsedLen
&& useTso
)) {
1787 assert(!useTso
|| (tsoDescBytesUsed
== TxdOp::getLen(desc
)));
1788 igbe
->anDq("TXS", annUnusedCacheQ
);
1789 unusedCache
.pop_front();
1790 igbe
->anQ("TXS", annUsedCacheQ
);
1791 usedCache
.push_back(desc
);
1793 tsoDescBytesUsed
= 0;
1796 pktMultiDesc
= true;
1798 DPRINTF(EthernetDesc
, "Partial Packet Descriptor of %d bytes Done\n",
1808 pktMultiDesc
= false;
1809 // no support for vlans
1810 assert(!TxdOp::vle(desc
));
1812 // we only support single packet descriptors at this point
1814 assert(TxdOp::eop(desc
));
1816 // set that this packet is done
1817 if (TxdOp::rs(desc
))
1820 DPRINTF(EthernetDesc
, "TxDescriptor data d1: %#llx d2: %#llx\n",
1821 desc
->d1
, desc
->d2
);
1826 DPRINTF(EthernetDesc
, "TSO: Modifying IP header. Id + %d\n",
1828 ip
->id(ip
->id() + tsoPkts
++);
1829 ip
->len(pktPtr
->length
- EthPtr(pktPtr
)->size());
1833 DPRINTF(EthernetDesc
,
1834 "TSO: Modifying TCP header. old seq %d + %d\n",
1835 tcp
->seq(), tsoPrevSeq
);
1836 tcp
->seq(tcp
->seq() + tsoPrevSeq
);
1837 if (tsoUsedLen
!= tsoTotalLen
)
1838 tcp
->flags(tcp
->flags() & ~9); // clear fin & psh
1842 DPRINTF(EthernetDesc
, "TSO: Modifying UDP header.\n");
1843 udp
->len(pktPtr
->length
- EthPtr(pktPtr
)->size());
1846 tsoPrevSeq
= tsoUsedLen
;
1849 if (DTRACE(EthernetDesc
)) {
1852 DPRINTF(EthernetDesc
, "Proccesing Ip packet with Id=%d\n",
1855 DPRINTF(EthernetSM
, "Proccesing Non-Ip packet\n");
1858 // Checksums are only ofloaded for new descriptor types
1859 if (TxdOp::isData(desc
) && ( TxdOp::ixsm(desc
) || TxdOp::txsm(desc
)) ) {
1860 DPRINTF(EthernetDesc
, "Calculating checksums for packet\n");
1863 if (TxdOp::ixsm(desc
)) {
1866 igbe
->txIpChecksums
++;
1867 DPRINTF(EthernetDesc
, "Calculated IP checksum\n");
1869 if (TxdOp::txsm(desc
)) {
1874 tcp
->sum(cksum(tcp
));
1875 igbe
->txTcpChecksums
++;
1876 DPRINTF(EthernetDesc
, "Calculated TCP checksum\n");
1880 udp
->sum(cksum(udp
));
1881 igbe
->txUdpChecksums
++;
1882 DPRINTF(EthernetDesc
, "Calculated UDP checksum\n");
1884 panic("Told to checksum, but don't know how\n");
1889 if (TxdOp::ide(desc
)) {
1890 // Deal with the rx timer interrupts
1891 DPRINTF(EthernetDesc
, "Descriptor had IDE set\n");
1892 if (igbe
->regs
.tidv
.idv()) {
1893 Tick delay
= igbe
->regs
.tidv
.idv() * igbe
->intClock();
1894 DPRINTF(EthernetDesc
, "setting tidv\n");
1895 igbe
->reschedule(igbe
->tidvEvent
, curTick() + delay
, true);
1898 if (igbe
->regs
.tadv
.idv() && igbe
->regs
.tidv
.idv()) {
1899 Tick delay
= igbe
->regs
.tadv
.idv() * igbe
->intClock();
1900 DPRINTF(EthernetDesc
, "setting tadv\n");
1901 if (!igbe
->tadvEvent
.scheduled()) {
1902 igbe
->schedule(igbe
->tadvEvent
, curTick() + delay
);
1908 if (!useTso
|| TxdOp::getLen(desc
) == tsoDescBytesUsed
) {
1909 DPRINTF(EthernetDesc
, "Descriptor Done\n");
1910 igbe
->anDq("TXS", annUnusedCacheQ
);
1911 unusedCache
.pop_front();
1912 igbe
->anQ("TXS", annUsedCacheQ
);
1913 usedCache
.push_back(desc
);
1914 tsoDescBytesUsed
= 0;
1917 if (useTso
&& tsoUsedLen
== tsoTotalLen
)
1921 DPRINTF(EthernetDesc
,
1922 "------Packet of %d bytes ready for transmission-------\n",
1927 tsoPktHasHeader
= false;
1929 if (igbe
->regs
.txdctl
.wthresh() == 0) {
1930 igbe
->anBegin("TXS", "Desc Writeback");
1931 DPRINTF(EthernetDesc
, "WTHRESH == 0, writing back descriptor\n");
1933 } else if (!igbe
->regs
.txdctl
.gran() && igbe
->regs
.txdctl
.wthresh() <=
1934 descInBlock(usedCache
.size())) {
1935 DPRINTF(EthernetDesc
, "used > WTHRESH, writing back descriptor\n");
1936 igbe
->anBegin("TXS", "Desc Writeback");
1937 writeback((igbe
->cacheBlockSize()-1)>>4);
1938 } else if (igbe
->regs
.txdctl
.wthresh() <= usedCache
.size()) {
1939 DPRINTF(EthernetDesc
, "used > WTHRESH, writing back descriptor\n");
1940 igbe
->anBegin("TXS", "Desc Writeback");
1941 writeback((igbe
->cacheBlockSize()-1)>>4);
1949 IGbE::TxDescCache::actionAfterWb()
1951 DPRINTF(EthernetDesc
, "actionAfterWb() completionEnabled: %d\n",
1953 igbe
->postInterrupt(iGbReg::IT_TXDW
);
1954 if (completionEnabled
) {
1955 descEnd
= igbe
->regs
.tdh();
1956 DPRINTF(EthernetDesc
,
1957 "Completion writing back value: %d to addr: %#x\n", descEnd
,
1959 igbe
->dmaWrite(pciToDma(mbits(completionAddress
, 63, 2)),
1960 sizeof(descEnd
), &nullEvent
, (uint8_t*)&descEnd
, 0);
1965 IGbE::TxDescCache::serialize(std::ostream
&os
)
1967 DescCache
<TxDesc
>::serialize(os
);
1968 SERIALIZE_SCALAR(pktDone
);
1969 SERIALIZE_SCALAR(isTcp
);
1970 SERIALIZE_SCALAR(pktWaiting
);
1971 SERIALIZE_SCALAR(pktMultiDesc
);
1973 SERIALIZE_SCALAR(useTso
);
1974 SERIALIZE_SCALAR(tsoHeaderLen
);
1975 SERIALIZE_SCALAR(tsoMss
);
1976 SERIALIZE_SCALAR(tsoTotalLen
);
1977 SERIALIZE_SCALAR(tsoUsedLen
);
1978 SERIALIZE_SCALAR(tsoPrevSeq
);;
1979 SERIALIZE_SCALAR(tsoPktPayloadBytes
);
1980 SERIALIZE_SCALAR(tsoLoadedHeader
);
1981 SERIALIZE_SCALAR(tsoPktHasHeader
);
1982 SERIALIZE_ARRAY(tsoHeader
, 256);
1983 SERIALIZE_SCALAR(tsoDescBytesUsed
);
1984 SERIALIZE_SCALAR(tsoCopyBytes
);
1985 SERIALIZE_SCALAR(tsoPkts
);
1987 SERIALIZE_SCALAR(completionAddress
);
1988 SERIALIZE_SCALAR(completionEnabled
);
1989 SERIALIZE_SCALAR(descEnd
);
1993 IGbE::TxDescCache::unserialize(Checkpoint
*cp
, const std::string
§ion
)
1995 DescCache
<TxDesc
>::unserialize(cp
, section
);
1996 UNSERIALIZE_SCALAR(pktDone
);
1997 UNSERIALIZE_SCALAR(isTcp
);
1998 UNSERIALIZE_SCALAR(pktWaiting
);
1999 UNSERIALIZE_SCALAR(pktMultiDesc
);
2001 UNSERIALIZE_SCALAR(useTso
);
2002 UNSERIALIZE_SCALAR(tsoHeaderLen
);
2003 UNSERIALIZE_SCALAR(tsoMss
);
2004 UNSERIALIZE_SCALAR(tsoTotalLen
);
2005 UNSERIALIZE_SCALAR(tsoUsedLen
);
2006 UNSERIALIZE_SCALAR(tsoPrevSeq
);;
2007 UNSERIALIZE_SCALAR(tsoPktPayloadBytes
);
2008 UNSERIALIZE_SCALAR(tsoLoadedHeader
);
2009 UNSERIALIZE_SCALAR(tsoPktHasHeader
);
2010 UNSERIALIZE_ARRAY(tsoHeader
, 256);
2011 UNSERIALIZE_SCALAR(tsoDescBytesUsed
);
2012 UNSERIALIZE_SCALAR(tsoCopyBytes
);
2013 UNSERIALIZE_SCALAR(tsoPkts
);
2015 UNSERIALIZE_SCALAR(completionAddress
);
2016 UNSERIALIZE_SCALAR(completionEnabled
);
2017 UNSERIALIZE_SCALAR(descEnd
);
2021 IGbE::TxDescCache::packetAvailable()
2031 IGbE::TxDescCache::enableSm()
2033 if (!igbe
->drainEvent
) {
2034 igbe
->txTick
= true;
2035 igbe
->restartClock();
2040 IGbE::TxDescCache::hasOutstandingEvents()
2042 return pktEvent
.scheduled() || wbEvent
.scheduled() ||
2043 fetchEvent
.scheduled();
2047 ///////////////////////////////////// IGbE /////////////////////////////////
2050 IGbE::restartClock()
2052 if (!tickEvent
.scheduled() && (rxTick
|| txTick
|| txFifoTick
) &&
2053 getState() == SimObject::Running
)
2054 schedule(tickEvent
, (curTick() / ticks(1)) * ticks(1) + ticks(1));
2058 IGbE::drain(Event
*de
)
2061 count
= pioPort
.drain(de
) + dmaPort
.drain(de
);
2062 if (rxDescCache
.hasOutstandingEvents() ||
2063 txDescCache
.hasOutstandingEvents()) {
2072 if (tickEvent
.scheduled())
2073 deschedule(tickEvent
);
2076 changeState(Draining
);
2078 changeState(Drained
);
2080 DPRINTF(EthernetSM
, "got drain() returning %d", count
);
2087 SimObject::resume();
2094 DPRINTF(EthernetSM
, "resuming from drain");
2103 DPRINTF(EthernetSM
, "checkDrain() in drain\n");
2107 if (!rxDescCache
.hasOutstandingEvents() &&
2108 !txDescCache
.hasOutstandingEvents()) {
2109 drainEvent
->process();
2115 IGbE::txStateMachine()
2117 if (!regs
.tctl
.en()) {
2119 DPRINTF(EthernetSM
, "TXS: TX disabled, stopping ticking\n");
2123 // If we have a packet available and it's length is not 0 (meaning it's not
2124 // a multidescriptor packet) put it in the fifo, otherwise an the next
2125 // iteration we'll get the rest of the data
2126 if (txPacket
&& txDescCache
.packetAvailable()
2127 && !txDescCache
.packetMultiDesc() && txPacket
->length
) {
2128 anQ("TXS", "TX FIFO Q");
2129 DPRINTF(EthernetSM
, "TXS: packet placed in TX FIFO\n");
2133 txFifo
.push(txPacket
);
2134 txFifoTick
= true && !drainEvent
;
2137 anBegin("TXS", "Desc Writeback");
2138 txDescCache
.writeback((cacheBlockSize()-1)>>4);
2142 // Only support descriptor granularity
2143 if (regs
.txdctl
.lwthresh() &&
2144 txDescCache
.descLeft() < (regs
.txdctl
.lwthresh() * 8)) {
2145 DPRINTF(EthernetSM
, "TXS: LWTHRESH caused posting of TXDLOW\n");
2146 postInterrupt(IT_TXDLOW
);
2150 txPacket
= new EthPacketData(16384);
2153 if (!txDescCache
.packetWaiting()) {
2154 if (txDescCache
.descLeft() == 0) {
2155 postInterrupt(IT_TXQE
);
2156 anBegin("TXS", "Desc Writeback");
2157 txDescCache
.writeback(0);
2158 anBegin("TXS", "Desc Fetch");
2159 anWe("TXS", txDescCache
.annUnusedCacheQ
);
2160 txDescCache
.fetchDescriptors();
2161 DPRINTF(EthernetSM
, "TXS: No descriptors left in ring, forcing "
2162 "writeback stopping ticking and posting TXQE\n");
2168 if (!(txDescCache
.descUnused())) {
2169 anBegin("TXS", "Desc Fetch");
2170 txDescCache
.fetchDescriptors();
2171 anWe("TXS", txDescCache
.annUnusedCacheQ
);
2172 DPRINTF(EthernetSM
, "TXS: No descriptors available in cache, "
2173 "fetching and stopping ticking\n");
2177 anPq("TXS", txDescCache
.annUnusedCacheQ
);
2180 txDescCache
.processContextDesc();
2181 if (txDescCache
.packetWaiting()) {
2183 "TXS: Fetching TSO header, stopping ticking\n");
2188 unsigned size
= txDescCache
.getPacketSize(txPacket
);
2189 if (size
> 0 && txFifo
.avail() > size
) {
2190 anRq("TXS", "TX FIFO Q");
2191 anBegin("TXS", "DMA Packet");
2192 DPRINTF(EthernetSM
, "TXS: Reserving %d bytes in FIFO and "
2193 "beginning DMA of next packet\n", size
);
2194 txFifo
.reserve(size
);
2195 txDescCache
.getPacketData(txPacket
);
2196 } else if (size
== 0) {
2197 DPRINTF(EthernetSM
, "TXS: getPacketSize returned: %d\n", size
);
2199 "TXS: No packets to get, writing back used descriptors\n");
2200 anBegin("TXS", "Desc Writeback");
2201 txDescCache
.writeback(0);
2203 anWf("TXS", "TX FIFO Q");
2204 DPRINTF(EthernetSM
, "TXS: FIFO full, stopping ticking until space "
2205 "available in FIFO\n");
2212 DPRINTF(EthernetSM
, "TXS: Nothing to do, stopping ticking\n");
2217 IGbE::ethRxPkt(EthPacketPtr pkt
)
2219 rxBytes
+= pkt
->length
;
2222 DPRINTF(Ethernet
, "RxFIFO: Receiving pcakte from wire\n");
2223 anBegin("RXQ", "Wire Recv");
2226 if (!regs
.rctl
.en()) {
2227 DPRINTF(Ethernet
, "RxFIFO: RX not enabled, dropping\n");
2228 anBegin("RXQ", "FIFO Drop", CPA::FL_BAD
);
2232 // restart the state machines if they are stopped
2233 rxTick
= true && !drainEvent
;
2234 if ((rxTick
|| txTick
) && !tickEvent
.scheduled()) {
2236 "RXS: received packet into fifo, starting ticking\n");
2240 if (!rxFifo
.push(pkt
)) {
2241 DPRINTF(Ethernet
, "RxFIFO: Packet won't fit in fifo... dropped\n");
2242 postInterrupt(IT_RXO
, true);
2243 anBegin("RXQ", "FIFO Drop", CPA::FL_BAD
);
2247 if (CPA::available() && cpa
->enabled()) {
2248 assert(sys
->numSystemsRunning
<= 2);
2250 if (sys
->systemList
[0] == sys
)
2251 other_sys
= sys
->systemList
[1];
2253 other_sys
= sys
->systemList
[0];
2255 cpa
->hwDq(CPA::FL_NONE
, sys
, macAddr
, "RXQ", "WireQ", 0, other_sys
);
2256 anQ("RXQ", "RX FIFO Q");
2257 cpa
->hwWe(CPA::FL_NONE
, sys
, macAddr
, "RXQ", "WireQ", 0, other_sys
);
2265 IGbE::rxStateMachine()
2267 if (!regs
.rctl
.en()) {
2269 DPRINTF(EthernetSM
, "RXS: RX disabled, stopping ticking\n");
2273 // If the packet is done check for interrupts/descriptors/etc
2274 if (rxDescCache
.packetDone()) {
2275 rxDmaPacket
= false;
2276 DPRINTF(EthernetSM
, "RXS: Packet completed DMA to memory\n");
2277 int descLeft
= rxDescCache
.descLeft();
2278 DPRINTF(EthernetSM
, "RXS: descLeft: %d rdmts: %d rdlen: %d\n",
2279 descLeft
, regs
.rctl
.rdmts(), regs
.rdlen());
2280 switch (regs
.rctl
.rdmts()) {
2281 case 2: if (descLeft
> .125 * regs
.rdlen()) break;
2282 case 1: if (descLeft
> .250 * regs
.rdlen()) break;
2283 case 0: if (descLeft
> .500 * regs
.rdlen()) break;
2284 DPRINTF(Ethernet
, "RXS: Interrupting (RXDMT) "
2285 "because of descriptors left\n");
2286 postInterrupt(IT_RXDMT
);
2291 rxDescCache
.writeback(0);
2293 if (descLeft
== 0) {
2294 anBegin("RXS", "Writeback Descriptors");
2295 rxDescCache
.writeback(0);
2296 DPRINTF(EthernetSM
, "RXS: No descriptors left in ring, forcing"
2297 " writeback and stopping ticking\n");
2301 // only support descriptor granulaties
2302 assert(regs
.rxdctl
.gran());
2304 if (regs
.rxdctl
.wthresh() >= rxDescCache
.descUsed()) {
2306 "RXS: Writing back because WTHRESH >= descUsed\n");
2307 anBegin("RXS", "Writeback Descriptors");
2308 if (regs
.rxdctl
.wthresh() < (cacheBlockSize()>>4))
2309 rxDescCache
.writeback(regs
.rxdctl
.wthresh()-1);
2311 rxDescCache
.writeback((cacheBlockSize()-1)>>4);
2314 if ((rxDescCache
.descUnused() < regs
.rxdctl
.pthresh()) &&
2315 ((rxDescCache
.descLeft() - rxDescCache
.descUnused()) >
2316 regs
.rxdctl
.hthresh())) {
2317 DPRINTF(EthernetSM
, "RXS: Fetching descriptors because "
2318 "descUnused < PTHRESH\n");
2319 anBegin("RXS", "Fetch Descriptors");
2320 rxDescCache
.fetchDescriptors();
2323 if (rxDescCache
.descUnused() == 0) {
2324 anBegin("RXS", "Fetch Descriptors");
2325 rxDescCache
.fetchDescriptors();
2326 anWe("RXS", rxDescCache
.annUnusedCacheQ
);
2327 DPRINTF(EthernetSM
, "RXS: No descriptors available in cache, "
2328 "fetching descriptors and stopping ticking\n");
2336 "RXS: stopping ticking until packet DMA completes\n");
2341 if (!rxDescCache
.descUnused()) {
2342 anBegin("RXS", "Fetch Descriptors");
2343 rxDescCache
.fetchDescriptors();
2344 anWe("RXS", rxDescCache
.annUnusedCacheQ
);
2345 DPRINTF(EthernetSM
, "RXS: No descriptors available in cache, "
2346 "stopping ticking\n");
2348 DPRINTF(EthernetSM
, "RXS: No descriptors available, fetching\n");
2351 anPq("RXS", rxDescCache
.annUnusedCacheQ
);
2353 if (rxFifo
.empty()) {
2354 anWe("RXS", "RX FIFO Q");
2355 DPRINTF(EthernetSM
, "RXS: RxFIFO empty, stopping ticking\n");
2359 anPq("RXS", "RX FIFO Q");
2360 anBegin("RXS", "Get Desc");
2363 pkt
= rxFifo
.front();
2366 pktOffset
= rxDescCache
.writePacket(pkt
, pktOffset
);
2367 DPRINTF(EthernetSM
, "RXS: Writing packet into memory\n");
2368 if (pktOffset
== pkt
->length
) {
2369 anBegin( "RXS", "FIFO Dequeue");
2370 DPRINTF(EthernetSM
, "RXS: Removing packet from FIFO\n");
2372 anDq("RXS", "RX FIFO Q");
2376 DPRINTF(EthernetSM
, "RXS: stopping ticking until packet DMA completes\n");
2379 anBegin("RXS", "DMA Packet");
2385 if (txFifo
.empty()) {
2386 anWe("TXQ", "TX FIFO Q");
2392 anPq("TXQ", "TX FIFO Q");
2393 if (etherInt
->sendPacket(txFifo
.front())) {
2394 cpa
->hwQ(CPA::FL_NONE
, sys
, macAddr
, "TXQ", "WireQ", 0);
2395 if (DTRACE(EthernetSM
)) {
2396 IpPtr
ip(txFifo
.front());
2398 DPRINTF(EthernetSM
, "Transmitting Ip packet with Id=%d\n",
2401 DPRINTF(EthernetSM
, "Transmitting Non-Ip packet\n");
2403 anDq("TXQ", "TX FIFO Q");
2404 anBegin("TXQ", "Wire Send");
2406 "TxFIFO: Successful transmit, bytes available in fifo: %d\n",
2409 txBytes
+= txFifo
.front()->length
;
2415 // We'll get woken up when the packet ethTxDone() gets called
2423 DPRINTF(EthernetSM
, "IGbE: -------------- Cycle --------------\n");
2435 if (rxTick
|| txTick
|| txFifoTick
)
2436 schedule(tickEvent
, curTick() + ticks(1));
2442 anBegin("TXQ", "Send Done");
2443 // restart the tx state machines if they are stopped
2444 // fifo to send another packet
2445 // tx sm to put more data into the fifo
2446 txFifoTick
= true && !drainEvent
;
2447 if (txDescCache
.descLeft() != 0 && !drainEvent
)
2452 DPRINTF(EthernetSM
, "TxFIFO: Transmission complete\n");
2456 IGbE::serialize(std::ostream
&os
)
2458 PciDev::serialize(os
);
2461 SERIALIZE_SCALAR(eeOpBits
);
2462 SERIALIZE_SCALAR(eeAddrBits
);
2463 SERIALIZE_SCALAR(eeDataBits
);
2464 SERIALIZE_SCALAR(eeOpcode
);
2465 SERIALIZE_SCALAR(eeAddr
);
2466 SERIALIZE_SCALAR(lastInterrupt
);
2467 SERIALIZE_ARRAY(flash
,iGbReg::EEPROM_SIZE
);
2469 rxFifo
.serialize("rxfifo", os
);
2470 txFifo
.serialize("txfifo", os
);
2472 bool txPktExists
= txPacket
;
2473 SERIALIZE_SCALAR(txPktExists
);
2475 txPacket
->serialize("txpacket", os
);
2477 Tick rdtr_time
= 0, radv_time
= 0, tidv_time
= 0, tadv_time
= 0,
2480 if (rdtrEvent
.scheduled())
2481 rdtr_time
= rdtrEvent
.when();
2482 SERIALIZE_SCALAR(rdtr_time
);
2484 if (radvEvent
.scheduled())
2485 radv_time
= radvEvent
.when();
2486 SERIALIZE_SCALAR(radv_time
);
2488 if (tidvEvent
.scheduled())
2489 tidv_time
= tidvEvent
.when();
2490 SERIALIZE_SCALAR(tidv_time
);
2492 if (tadvEvent
.scheduled())
2493 tadv_time
= tadvEvent
.when();
2494 SERIALIZE_SCALAR(tadv_time
);
2496 if (interEvent
.scheduled())
2497 inter_time
= interEvent
.when();
2498 SERIALIZE_SCALAR(inter_time
);
2500 SERIALIZE_SCALAR(pktOffset
);
2502 nameOut(os
, csprintf("%s.TxDescCache", name()));
2503 txDescCache
.serialize(os
);
2505 nameOut(os
, csprintf("%s.RxDescCache", name()));
2506 rxDescCache
.serialize(os
);
2510 IGbE::unserialize(Checkpoint
*cp
, const std::string
§ion
)
2512 PciDev::unserialize(cp
, section
);
2514 regs
.unserialize(cp
, section
);
2515 UNSERIALIZE_SCALAR(eeOpBits
);
2516 UNSERIALIZE_SCALAR(eeAddrBits
);
2517 UNSERIALIZE_SCALAR(eeDataBits
);
2518 UNSERIALIZE_SCALAR(eeOpcode
);
2519 UNSERIALIZE_SCALAR(eeAddr
);
2520 UNSERIALIZE_SCALAR(lastInterrupt
);
2521 UNSERIALIZE_ARRAY(flash
,iGbReg::EEPROM_SIZE
);
2523 rxFifo
.unserialize("rxfifo", cp
, section
);
2524 txFifo
.unserialize("txfifo", cp
, section
);
2527 UNSERIALIZE_SCALAR(txPktExists
);
2529 txPacket
= new EthPacketData(16384);
2530 txPacket
->unserialize("txpacket", cp
, section
);
2537 Tick rdtr_time
, radv_time
, tidv_time
, tadv_time
, inter_time
;
2538 UNSERIALIZE_SCALAR(rdtr_time
);
2539 UNSERIALIZE_SCALAR(radv_time
);
2540 UNSERIALIZE_SCALAR(tidv_time
);
2541 UNSERIALIZE_SCALAR(tadv_time
);
2542 UNSERIALIZE_SCALAR(inter_time
);
2545 schedule(rdtrEvent
, rdtr_time
);
2548 schedule(radvEvent
, radv_time
);
2551 schedule(tidvEvent
, tidv_time
);
2554 schedule(tadvEvent
, tadv_time
);
2557 schedule(interEvent
, inter_time
);
2559 UNSERIALIZE_SCALAR(pktOffset
);
2561 txDescCache
.unserialize(cp
, csprintf("%s.TxDescCache", section
));
2563 rxDescCache
.unserialize(cp
, csprintf("%s.RxDescCache", section
));
2567 IGbEParams::create()
2569 return new IGbE(this);