IGbE: Clean up debug printing and proprly account for copied bytes.
[gem5.git] / src / dev / i8254xGBe.cc
1 /*
2 * Copyright (c) 2006 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Ali Saidi
29 */
30
31 /* @file
32 * Device model for Intel's 8254x line of gigabit ethernet controllers.
33 * In particular an 82547 revision 2 (82547GI) MAC because it seems to have the
34 * fewest workarounds in the driver. It will probably work with most of the
35 * other MACs with slight modifications.
36 */
37
38
39 /*
40 * @todo really there are multiple dma engines.. we should implement them.
41 */
42
43 #include <algorithm>
44
45 #include "base/inet.hh"
46 #include "base/trace.hh"
47 #include "debug/EthernetAll.hh"
48 #include "dev/i8254xGBe.hh"
49 #include "mem/packet.hh"
50 #include "mem/packet_access.hh"
51 #include "params/IGbE.hh"
52 #include "sim/stats.hh"
53 #include "sim/system.hh"
54
55 using namespace iGbReg;
56 using namespace Net;
57
58 IGbE::IGbE(const Params *p)
59 : EtherDevice(p), etherInt(NULL), drainEvent(NULL),
60 useFlowControl(p->use_flow_control),
61 rxFifo(p->rx_fifo_size), txFifo(p->tx_fifo_size), rxTick(false),
62 txTick(false), txFifoTick(false), rxDmaPacket(false), pktOffset(0),
63 fetchDelay(p->fetch_delay), wbDelay(p->wb_delay),
64 fetchCompDelay(p->fetch_comp_delay), wbCompDelay(p->wb_comp_delay),
65 rxWriteDelay(p->rx_write_delay), txReadDelay(p->tx_read_delay),
66 rdtrEvent(this), radvEvent(this),
67 tadvEvent(this), tidvEvent(this), tickEvent(this), interEvent(this),
68 rxDescCache(this, name()+".RxDesc", p->rx_desc_cache_size),
69 txDescCache(this, name()+".TxDesc", p->tx_desc_cache_size),
70 clock(p->clock), lastInterrupt(0)
71 {
72 etherInt = new IGbEInt(name() + ".int", this);
73
74 // Initialized internal registers per Intel documentation
75 // All registers intialized to 0 by per register constructor
76 regs.ctrl.fd(1);
77 regs.ctrl.lrst(1);
78 regs.ctrl.speed(2);
79 regs.ctrl.frcspd(1);
80 regs.sts.speed(3); // Say we're 1000Mbps
81 regs.sts.fd(1); // full duplex
82 regs.sts.lu(1); // link up
83 regs.eecd.fwe(1);
84 regs.eecd.ee_type(1);
85 regs.imr = 0;
86 regs.iam = 0;
87 regs.rxdctl.gran(1);
88 regs.rxdctl.wthresh(1);
89 regs.fcrth(1);
90 regs.tdwba = 0;
91 regs.rlpml = 0;
92 regs.sw_fw_sync = 0;
93
94 regs.pba.rxa(0x30);
95 regs.pba.txa(0x10);
96
97 eeOpBits = 0;
98 eeAddrBits = 0;
99 eeDataBits = 0;
100 eeOpcode = 0;
101
102 // clear all 64 16 bit words of the eeprom
103 memset(&flash, 0, EEPROM_SIZE*2);
104
105 // Set the MAC address
106 memcpy(flash, p->hardware_address.bytes(), ETH_ADDR_LEN);
107 for (int x = 0; x < ETH_ADDR_LEN/2; x++)
108 flash[x] = htobe(flash[x]);
109
110 uint16_t csum = 0;
111 for (int x = 0; x < EEPROM_SIZE; x++)
112 csum += htobe(flash[x]);
113
114
115 // Magic happy checksum value
116 flash[EEPROM_SIZE-1] = htobe((uint16_t)(EEPROM_CSUM - csum));
117
118 // Store the MAC address as queue ID
119 macAddr = p->hardware_address;
120
121 rxFifo.clear();
122 txFifo.clear();
123 }
124
125 void
126 IGbE::init()
127 {
128 cpa = CPA::cpa();
129 PciDev::init();
130 }
131
132 EtherInt*
133 IGbE::getEthPort(const std::string &if_name, int idx)
134 {
135
136 if (if_name == "interface") {
137 if (etherInt->getPeer())
138 panic("Port already connected to\n");
139 return etherInt;
140 }
141 return NULL;
142 }
143
144 Tick
145 IGbE::writeConfig(PacketPtr pkt)
146 {
147 int offset = pkt->getAddr() & PCI_CONFIG_SIZE;
148 if (offset < PCI_DEVICE_SPECIFIC)
149 PciDev::writeConfig(pkt);
150 else
151 panic("Device specific PCI config space not implemented.\n");
152
153 //
154 // Some work may need to be done here based for the pci COMMAND bits.
155 //
156
157 return pioDelay;
158 }
159
160 // Handy macro for range-testing register access addresses
161 #define IN_RANGE(val, base, len) (val >= base && val < (base + len))
162
163 Tick
164 IGbE::read(PacketPtr pkt)
165 {
166 int bar;
167 Addr daddr;
168
169 if (!getBAR(pkt->getAddr(), bar, daddr))
170 panic("Invalid PCI memory access to unmapped memory.\n");
171
172 // Only Memory register BAR is allowed
173 assert(bar == 0);
174
175 // Only 32bit accesses allowed
176 assert(pkt->getSize() == 4);
177
178 DPRINTF(Ethernet, "Read device register %#X\n", daddr);
179
180 pkt->allocate();
181
182 //
183 // Handle read of register here
184 //
185
186
187 switch (daddr) {
188 case REG_CTRL:
189 pkt->set<uint32_t>(regs.ctrl());
190 break;
191 case REG_STATUS:
192 pkt->set<uint32_t>(regs.sts());
193 break;
194 case REG_EECD:
195 pkt->set<uint32_t>(regs.eecd());
196 break;
197 case REG_EERD:
198 pkt->set<uint32_t>(regs.eerd());
199 break;
200 case REG_CTRL_EXT:
201 pkt->set<uint32_t>(regs.ctrl_ext());
202 break;
203 case REG_MDIC:
204 pkt->set<uint32_t>(regs.mdic());
205 break;
206 case REG_ICR:
207 DPRINTF(Ethernet, "Reading ICR. ICR=%#x IMR=%#x IAM=%#x IAME=%d\n",
208 regs.icr(), regs.imr, regs.iam, regs.ctrl_ext.iame());
209 pkt->set<uint32_t>(regs.icr());
210 if (regs.icr.int_assert() || regs.imr == 0) {
211 regs.icr = regs.icr() & ~mask(30);
212 DPRINTF(Ethernet, "Cleared ICR. ICR=%#x\n", regs.icr());
213 }
214 if (regs.ctrl_ext.iame() && regs.icr.int_assert())
215 regs.imr &= ~regs.iam;
216 chkInterrupt();
217 break;
218 case REG_EICR:
219 // This is only useful for MSI, but the driver reads it every time
220 // Just don't do anything
221 pkt->set<uint32_t>(0);
222 break;
223 case REG_ITR:
224 pkt->set<uint32_t>(regs.itr());
225 break;
226 case REG_RCTL:
227 pkt->set<uint32_t>(regs.rctl());
228 break;
229 case REG_FCTTV:
230 pkt->set<uint32_t>(regs.fcttv());
231 break;
232 case REG_TCTL:
233 pkt->set<uint32_t>(regs.tctl());
234 break;
235 case REG_PBA:
236 pkt->set<uint32_t>(regs.pba());
237 break;
238 case REG_WUC:
239 case REG_LEDCTL:
240 pkt->set<uint32_t>(0); // We don't care, so just return 0
241 break;
242 case REG_FCRTL:
243 pkt->set<uint32_t>(regs.fcrtl());
244 break;
245 case REG_FCRTH:
246 pkt->set<uint32_t>(regs.fcrth());
247 break;
248 case REG_RDBAL:
249 pkt->set<uint32_t>(regs.rdba.rdbal());
250 break;
251 case REG_RDBAH:
252 pkt->set<uint32_t>(regs.rdba.rdbah());
253 break;
254 case REG_RDLEN:
255 pkt->set<uint32_t>(regs.rdlen());
256 break;
257 case REG_SRRCTL:
258 pkt->set<uint32_t>(regs.srrctl());
259 break;
260 case REG_RDH:
261 pkt->set<uint32_t>(regs.rdh());
262 break;
263 case REG_RDT:
264 pkt->set<uint32_t>(regs.rdt());
265 break;
266 case REG_RDTR:
267 pkt->set<uint32_t>(regs.rdtr());
268 if (regs.rdtr.fpd()) {
269 rxDescCache.writeback(0);
270 DPRINTF(EthernetIntr,
271 "Posting interrupt because of RDTR.FPD write\n");
272 postInterrupt(IT_RXT);
273 regs.rdtr.fpd(0);
274 }
275 break;
276 case REG_RXDCTL:
277 pkt->set<uint32_t>(regs.rxdctl());
278 break;
279 case REG_RADV:
280 pkt->set<uint32_t>(regs.radv());
281 break;
282 case REG_TDBAL:
283 pkt->set<uint32_t>(regs.tdba.tdbal());
284 break;
285 case REG_TDBAH:
286 pkt->set<uint32_t>(regs.tdba.tdbah());
287 break;
288 case REG_TDLEN:
289 pkt->set<uint32_t>(regs.tdlen());
290 break;
291 case REG_TDH:
292 pkt->set<uint32_t>(regs.tdh());
293 break;
294 case REG_TXDCA_CTL:
295 pkt->set<uint32_t>(regs.txdca_ctl());
296 break;
297 case REG_TDT:
298 pkt->set<uint32_t>(regs.tdt());
299 break;
300 case REG_TIDV:
301 pkt->set<uint32_t>(regs.tidv());
302 break;
303 case REG_TXDCTL:
304 pkt->set<uint32_t>(regs.txdctl());
305 break;
306 case REG_TADV:
307 pkt->set<uint32_t>(regs.tadv());
308 break;
309 case REG_TDWBAL:
310 pkt->set<uint32_t>(regs.tdwba & mask(32));
311 break;
312 case REG_TDWBAH:
313 pkt->set<uint32_t>(regs.tdwba >> 32);
314 break;
315 case REG_RXCSUM:
316 pkt->set<uint32_t>(regs.rxcsum());
317 break;
318 case REG_RLPML:
319 pkt->set<uint32_t>(regs.rlpml);
320 break;
321 case REG_RFCTL:
322 pkt->set<uint32_t>(regs.rfctl());
323 break;
324 case REG_MANC:
325 pkt->set<uint32_t>(regs.manc());
326 break;
327 case REG_SWSM:
328 pkt->set<uint32_t>(regs.swsm());
329 regs.swsm.smbi(1);
330 break;
331 case REG_FWSM:
332 pkt->set<uint32_t>(regs.fwsm());
333 break;
334 case REG_SWFWSYNC:
335 pkt->set<uint32_t>(regs.sw_fw_sync);
336 break;
337 default:
338 if (!IN_RANGE(daddr, REG_VFTA, VLAN_FILTER_TABLE_SIZE*4) &&
339 !IN_RANGE(daddr, REG_RAL, RCV_ADDRESS_TABLE_SIZE*8) &&
340 !IN_RANGE(daddr, REG_MTA, MULTICAST_TABLE_SIZE*4) &&
341 !IN_RANGE(daddr, REG_CRCERRS, STATS_REGS_SIZE))
342 panic("Read request to unknown register number: %#x\n", daddr);
343 else
344 pkt->set<uint32_t>(0);
345 };
346
347 pkt->makeAtomicResponse();
348 return pioDelay;
349 }
350
351 Tick
352 IGbE::write(PacketPtr pkt)
353 {
354 int bar;
355 Addr daddr;
356
357
358 if (!getBAR(pkt->getAddr(), bar, daddr))
359 panic("Invalid PCI memory access to unmapped memory.\n");
360
361 // Only Memory register BAR is allowed
362 assert(bar == 0);
363
364 // Only 32bit accesses allowed
365 assert(pkt->getSize() == sizeof(uint32_t));
366
367 DPRINTF(Ethernet, "Wrote device register %#X value %#X\n",
368 daddr, pkt->get<uint32_t>());
369
370 //
371 // Handle write of register here
372 //
373 uint32_t val = pkt->get<uint32_t>();
374
375 Regs::RCTL oldrctl;
376 Regs::TCTL oldtctl;
377
378 switch (daddr) {
379 case REG_CTRL:
380 regs.ctrl = val;
381 if (regs.ctrl.tfce())
382 warn("TX Flow control enabled, should implement\n");
383 if (regs.ctrl.rfce())
384 warn("RX Flow control enabled, should implement\n");
385 break;
386 case REG_CTRL_EXT:
387 regs.ctrl_ext = val;
388 break;
389 case REG_STATUS:
390 regs.sts = val;
391 break;
392 case REG_EECD:
393 int oldClk;
394 oldClk = regs.eecd.sk();
395 regs.eecd = val;
396 // See if this is a eeprom access and emulate accordingly
397 if (!oldClk && regs.eecd.sk()) {
398 if (eeOpBits < 8) {
399 eeOpcode = eeOpcode << 1 | regs.eecd.din();
400 eeOpBits++;
401 } else if (eeAddrBits < 8 && eeOpcode == EEPROM_READ_OPCODE_SPI) {
402 eeAddr = eeAddr << 1 | regs.eecd.din();
403 eeAddrBits++;
404 } else if (eeDataBits < 16 && eeOpcode == EEPROM_READ_OPCODE_SPI) {
405 assert(eeAddr>>1 < EEPROM_SIZE);
406 DPRINTF(EthernetEEPROM, "EEPROM bit read: %d word: %#X\n",
407 flash[eeAddr>>1] >> eeDataBits & 0x1,
408 flash[eeAddr>>1]);
409 regs.eecd.dout((flash[eeAddr>>1] >> (15-eeDataBits)) & 0x1);
410 eeDataBits++;
411 } else if (eeDataBits < 8 && eeOpcode == EEPROM_RDSR_OPCODE_SPI) {
412 regs.eecd.dout(0);
413 eeDataBits++;
414 } else
415 panic("What's going on with eeprom interface? opcode:"
416 " %#x:%d addr: %#x:%d, data: %d\n", (uint32_t)eeOpcode,
417 (uint32_t)eeOpBits, (uint32_t)eeAddr,
418 (uint32_t)eeAddrBits, (uint32_t)eeDataBits);
419
420 // Reset everything for the next command
421 if ((eeDataBits == 16 && eeOpcode == EEPROM_READ_OPCODE_SPI) ||
422 (eeDataBits == 8 && eeOpcode == EEPROM_RDSR_OPCODE_SPI)) {
423 eeOpBits = 0;
424 eeAddrBits = 0;
425 eeDataBits = 0;
426 eeOpcode = 0;
427 eeAddr = 0;
428 }
429
430 DPRINTF(EthernetEEPROM, "EEPROM: opcode: %#X:%d addr: %#X:%d\n",
431 (uint32_t)eeOpcode, (uint32_t) eeOpBits,
432 (uint32_t)eeAddr>>1, (uint32_t)eeAddrBits);
433 if (eeOpBits == 8 && !(eeOpcode == EEPROM_READ_OPCODE_SPI ||
434 eeOpcode == EEPROM_RDSR_OPCODE_SPI ))
435 panic("Unknown eeprom opcode: %#X:%d\n", (uint32_t)eeOpcode,
436 (uint32_t)eeOpBits);
437
438
439 }
440 // If driver requests eeprom access, immediately give it to it
441 regs.eecd.ee_gnt(regs.eecd.ee_req());
442 break;
443 case REG_EERD:
444 regs.eerd = val;
445 if (regs.eerd.start()) {
446 regs.eerd.done(1);
447 assert(regs.eerd.addr() < EEPROM_SIZE);
448 regs.eerd.data(flash[regs.eerd.addr()]);
449 regs.eerd.start(0);
450 DPRINTF(EthernetEEPROM, "EEPROM: read addr: %#X data %#x\n",
451 regs.eerd.addr(), regs.eerd.data());
452 }
453 break;
454 case REG_MDIC:
455 regs.mdic = val;
456 if (regs.mdic.i())
457 panic("No support for interrupt on mdic complete\n");
458 if (regs.mdic.phyadd() != 1)
459 panic("No support for reading anything but phy\n");
460 DPRINTF(Ethernet, "%s phy address %x\n",
461 regs.mdic.op() == 1 ? "Writing" : "Reading",
462 regs.mdic.regadd());
463 switch (regs.mdic.regadd()) {
464 case PHY_PSTATUS:
465 regs.mdic.data(0x796D); // link up
466 break;
467 case PHY_PID:
468 regs.mdic.data(params()->phy_pid);
469 break;
470 case PHY_EPID:
471 regs.mdic.data(params()->phy_epid);
472 break;
473 case PHY_GSTATUS:
474 regs.mdic.data(0x7C00);
475 break;
476 case PHY_EPSTATUS:
477 regs.mdic.data(0x3000);
478 break;
479 case PHY_AGC:
480 regs.mdic.data(0x180); // some random length
481 break;
482 default:
483 regs.mdic.data(0);
484 }
485 regs.mdic.r(1);
486 break;
487 case REG_ICR:
488 DPRINTF(Ethernet, "Writing ICR. ICR=%#x IMR=%#x IAM=%#x IAME=%d\n",
489 regs.icr(), regs.imr, regs.iam, regs.ctrl_ext.iame());
490 if (regs.ctrl_ext.iame())
491 regs.imr &= ~regs.iam;
492 regs.icr = ~bits(val,30,0) & regs.icr();
493 chkInterrupt();
494 break;
495 case REG_ITR:
496 regs.itr = val;
497 break;
498 case REG_ICS:
499 DPRINTF(EthernetIntr, "Posting interrupt because of ICS write\n");
500 postInterrupt((IntTypes)val);
501 break;
502 case REG_IMS:
503 regs.imr |= val;
504 chkInterrupt();
505 break;
506 case REG_IMC:
507 regs.imr &= ~val;
508 chkInterrupt();
509 break;
510 case REG_IAM:
511 regs.iam = val;
512 break;
513 case REG_RCTL:
514 oldrctl = regs.rctl;
515 regs.rctl = val;
516 if (regs.rctl.rst()) {
517 rxDescCache.reset();
518 DPRINTF(EthernetSM, "RXS: Got RESET!\n");
519 rxFifo.clear();
520 regs.rctl.rst(0);
521 }
522 if (regs.rctl.en())
523 rxTick = true;
524 restartClock();
525 break;
526 case REG_FCTTV:
527 regs.fcttv = val;
528 break;
529 case REG_TCTL:
530 regs.tctl = val;
531 oldtctl = regs.tctl;
532 regs.tctl = val;
533 if (regs.tctl.en())
534 txTick = true;
535 restartClock();
536 if (regs.tctl.en() && !oldtctl.en()) {
537 txDescCache.reset();
538 }
539 break;
540 case REG_PBA:
541 regs.pba.rxa(val);
542 regs.pba.txa(64 - regs.pba.rxa());
543 break;
544 case REG_WUC:
545 case REG_LEDCTL:
546 case REG_FCAL:
547 case REG_FCAH:
548 case REG_FCT:
549 case REG_VET:
550 case REG_AIFS:
551 case REG_TIPG:
552 ; // We don't care, so don't store anything
553 break;
554 case REG_IVAR0:
555 warn("Writing to IVAR0, ignoring...\n");
556 break;
557 case REG_FCRTL:
558 regs.fcrtl = val;
559 break;
560 case REG_FCRTH:
561 regs.fcrth = val;
562 break;
563 case REG_RDBAL:
564 regs.rdba.rdbal( val & ~mask(4));
565 rxDescCache.areaChanged();
566 break;
567 case REG_RDBAH:
568 regs.rdba.rdbah(val);
569 rxDescCache.areaChanged();
570 break;
571 case REG_RDLEN:
572 regs.rdlen = val & ~mask(7);
573 rxDescCache.areaChanged();
574 break;
575 case REG_SRRCTL:
576 regs.srrctl = val;
577 break;
578 case REG_RDH:
579 regs.rdh = val;
580 rxDescCache.areaChanged();
581 break;
582 case REG_RDT:
583 regs.rdt = val;
584 DPRINTF(EthernetSM, "RXS: RDT Updated.\n");
585 if (getState() == SimObject::Running) {
586 DPRINTF(EthernetSM, "RXS: RDT Fetching Descriptors!\n");
587 rxDescCache.fetchDescriptors();
588 } else {
589 DPRINTF(EthernetSM, "RXS: RDT NOT Fetching Desc b/c draining!\n");
590 }
591 break;
592 case REG_RDTR:
593 regs.rdtr = val;
594 break;
595 case REG_RADV:
596 regs.radv = val;
597 break;
598 case REG_RXDCTL:
599 regs.rxdctl = val;
600 break;
601 case REG_TDBAL:
602 regs.tdba.tdbal( val & ~mask(4));
603 txDescCache.areaChanged();
604 break;
605 case REG_TDBAH:
606 regs.tdba.tdbah(val);
607 txDescCache.areaChanged();
608 break;
609 case REG_TDLEN:
610 regs.tdlen = val & ~mask(7);
611 txDescCache.areaChanged();
612 break;
613 case REG_TDH:
614 regs.tdh = val;
615 txDescCache.areaChanged();
616 break;
617 case REG_TXDCA_CTL:
618 regs.txdca_ctl = val;
619 if (regs.txdca_ctl.enabled())
620 panic("No support for DCA\n");
621 break;
622 case REG_TDT:
623 regs.tdt = val;
624 DPRINTF(EthernetSM, "TXS: TX Tail pointer updated\n");
625 if (getState() == SimObject::Running) {
626 DPRINTF(EthernetSM, "TXS: TDT Fetching Descriptors!\n");
627 txDescCache.fetchDescriptors();
628 } else {
629 DPRINTF(EthernetSM, "TXS: TDT NOT Fetching Desc b/c draining!\n");
630 }
631 break;
632 case REG_TIDV:
633 regs.tidv = val;
634 break;
635 case REG_TXDCTL:
636 regs.txdctl = val;
637 break;
638 case REG_TADV:
639 regs.tadv = val;
640 break;
641 case REG_TDWBAL:
642 regs.tdwba &= ~mask(32);
643 regs.tdwba |= val;
644 txDescCache.completionWriteback(regs.tdwba & ~mask(1),
645 regs.tdwba & mask(1));
646 break;
647 case REG_TDWBAH:
648 regs.tdwba &= mask(32);
649 regs.tdwba |= (uint64_t)val << 32;
650 txDescCache.completionWriteback(regs.tdwba & ~mask(1),
651 regs.tdwba & mask(1));
652 break;
653 case REG_RXCSUM:
654 regs.rxcsum = val;
655 break;
656 case REG_RLPML:
657 regs.rlpml = val;
658 break;
659 case REG_RFCTL:
660 regs.rfctl = val;
661 if (regs.rfctl.exsten())
662 panic("Extended RX descriptors not implemented\n");
663 break;
664 case REG_MANC:
665 regs.manc = val;
666 break;
667 case REG_SWSM:
668 regs.swsm = val;
669 if (regs.fwsm.eep_fw_semaphore())
670 regs.swsm.swesmbi(0);
671 break;
672 case REG_SWFWSYNC:
673 regs.sw_fw_sync = val;
674 break;
675 default:
676 if (!IN_RANGE(daddr, REG_VFTA, VLAN_FILTER_TABLE_SIZE*4) &&
677 !IN_RANGE(daddr, REG_RAL, RCV_ADDRESS_TABLE_SIZE*8) &&
678 !IN_RANGE(daddr, REG_MTA, MULTICAST_TABLE_SIZE*4))
679 panic("Write request to unknown register number: %#x\n", daddr);
680 };
681
682 pkt->makeAtomicResponse();
683 return pioDelay;
684 }
685
686 void
687 IGbE::postInterrupt(IntTypes t, bool now)
688 {
689 assert(t);
690
691 // Interrupt is already pending
692 if (t & regs.icr() && !now)
693 return;
694
695 regs.icr = regs.icr() | t;
696
697 Tick itr_interval = SimClock::Int::ns * 256 * regs.itr.interval();
698 DPRINTF(EthernetIntr,
699 "EINT: postInterrupt() curTick(): %d itr: %d interval: %d\n",
700 curTick(), regs.itr.interval(), itr_interval);
701
702 if (regs.itr.interval() == 0 || now ||
703 lastInterrupt + itr_interval <= curTick()) {
704 if (interEvent.scheduled()) {
705 deschedule(interEvent);
706 }
707 cpuPostInt();
708 } else {
709 Tick int_time = lastInterrupt + itr_interval;
710 assert(int_time > 0);
711 DPRINTF(EthernetIntr, "EINT: Scheduling timer interrupt for tick %d\n",
712 int_time);
713 if (!interEvent.scheduled()) {
714 schedule(interEvent, int_time);
715 }
716 }
717 }
718
719 void
720 IGbE::delayIntEvent()
721 {
722 cpuPostInt();
723 }
724
725
726 void
727 IGbE::cpuPostInt()
728 {
729
730 postedInterrupts++;
731
732 if (!(regs.icr() & regs.imr)) {
733 DPRINTF(Ethernet, "Interrupt Masked. Not Posting\n");
734 return;
735 }
736
737 DPRINTF(Ethernet, "Posting Interrupt\n");
738
739
740 if (interEvent.scheduled()) {
741 deschedule(interEvent);
742 }
743
744 if (rdtrEvent.scheduled()) {
745 regs.icr.rxt0(1);
746 deschedule(rdtrEvent);
747 }
748 if (radvEvent.scheduled()) {
749 regs.icr.rxt0(1);
750 deschedule(radvEvent);
751 }
752 if (tadvEvent.scheduled()) {
753 regs.icr.txdw(1);
754 deschedule(tadvEvent);
755 }
756 if (tidvEvent.scheduled()) {
757 regs.icr.txdw(1);
758 deschedule(tidvEvent);
759 }
760
761 regs.icr.int_assert(1);
762 DPRINTF(EthernetIntr, "EINT: Posting interrupt to CPU now. Vector %#x\n",
763 regs.icr());
764
765 intrPost();
766
767 lastInterrupt = curTick();
768 }
769
770 void
771 IGbE::cpuClearInt()
772 {
773 if (regs.icr.int_assert()) {
774 regs.icr.int_assert(0);
775 DPRINTF(EthernetIntr,
776 "EINT: Clearing interrupt to CPU now. Vector %#x\n",
777 regs.icr());
778 intrClear();
779 }
780 }
781
782 void
783 IGbE::chkInterrupt()
784 {
785 DPRINTF(Ethernet, "Checking interrupts icr: %#x imr: %#x\n", regs.icr(),
786 regs.imr);
787 // Check if we need to clear the cpu interrupt
788 if (!(regs.icr() & regs.imr)) {
789 DPRINTF(Ethernet, "Mask cleaned all interrupts\n");
790 if (interEvent.scheduled())
791 deschedule(interEvent);
792 if (regs.icr.int_assert())
793 cpuClearInt();
794 }
795 DPRINTF(Ethernet, "ITR = %#X itr.interval = %#X\n",
796 regs.itr(), regs.itr.interval());
797
798 if (regs.icr() & regs.imr) {
799 if (regs.itr.interval() == 0) {
800 cpuPostInt();
801 } else {
802 DPRINTF(Ethernet,
803 "Possibly scheduling interrupt because of imr write\n");
804 if (!interEvent.scheduled()) {
805 Tick t = curTick() + SimClock::Int::ns * 256 * regs.itr.interval();
806 DPRINTF(Ethernet, "Scheduling for %d\n", t);
807 schedule(interEvent, t);
808 }
809 }
810 }
811 }
812
813
814 ///////////////////////////// IGbE::DescCache //////////////////////////////
815
816 template<class T>
817 IGbE::DescCache<T>::DescCache(IGbE *i, const std::string n, int s)
818 : igbe(i), _name(n), cachePnt(0), size(s), curFetching(0),
819 wbOut(0), pktPtr(NULL), wbDelayEvent(this),
820 fetchDelayEvent(this), fetchEvent(this), wbEvent(this)
821 {
822 fetchBuf = new T[size];
823 wbBuf = new T[size];
824 }
825
826 template<class T>
827 IGbE::DescCache<T>::~DescCache()
828 {
829 reset();
830 }
831
832 template<class T>
833 void
834 IGbE::DescCache<T>::areaChanged()
835 {
836 if (usedCache.size() > 0 || curFetching || wbOut)
837 panic("Descriptor Address, Length or Head changed. Bad\n");
838 reset();
839
840 }
841
842 template<class T>
843 void
844 IGbE::DescCache<T>::writeback(Addr aMask)
845 {
846 int curHead = descHead();
847 int max_to_wb = usedCache.size();
848
849 // Check if this writeback is less restrictive that the previous
850 // and if so setup another one immediately following it
851 if (wbOut) {
852 if (aMask < wbAlignment) {
853 moreToWb = true;
854 wbAlignment = aMask;
855 }
856 DPRINTF(EthernetDesc,
857 "Writing back already in process, returning\n");
858 return;
859 }
860
861 moreToWb = false;
862 wbAlignment = aMask;
863
864
865 DPRINTF(EthernetDesc, "Writing back descriptors head: %d tail: "
866 "%d len: %d cachePnt: %d max_to_wb: %d descleft: %d\n",
867 curHead, descTail(), descLen(), cachePnt, max_to_wb,
868 descLeft());
869
870 if (max_to_wb + curHead >= descLen()) {
871 max_to_wb = descLen() - curHead;
872 moreToWb = true;
873 // this is by definition aligned correctly
874 } else if (wbAlignment != 0) {
875 // align the wb point to the mask
876 max_to_wb = max_to_wb & ~wbAlignment;
877 }
878
879 DPRINTF(EthernetDesc, "Writing back %d descriptors\n", max_to_wb);
880
881 if (max_to_wb <= 0) {
882 if (usedCache.size())
883 igbe->anBegin(annSmWb, "Wait Alignment", CPA::FL_WAIT);
884 else
885 igbe->anWe(annSmWb, annUsedCacheQ);
886 return;
887 }
888
889 wbOut = max_to_wb;
890
891 assert(!wbDelayEvent.scheduled());
892 igbe->schedule(wbDelayEvent, curTick() + igbe->wbDelay);
893 igbe->anBegin(annSmWb, "Prepare Writeback Desc");
894 }
895
896 template<class T>
897 void
898 IGbE::DescCache<T>::writeback1()
899 {
900 // If we're draining delay issuing this DMA
901 if (igbe->getState() != SimObject::Running) {
902 igbe->schedule(wbDelayEvent, curTick() + igbe->wbDelay);
903 return;
904 }
905
906 DPRINTF(EthernetDesc, "Begining DMA of %d descriptors\n", wbOut);
907
908 for (int x = 0; x < wbOut; x++) {
909 assert(usedCache.size());
910 memcpy(&wbBuf[x], usedCache[x], sizeof(T));
911 igbe->anPq(annSmWb, annUsedCacheQ);
912 igbe->anPq(annSmWb, annDescQ);
913 igbe->anQ(annSmWb, annUsedDescQ);
914 }
915
916
917 igbe->anBegin(annSmWb, "Writeback Desc DMA");
918
919 assert(wbOut);
920 igbe->dmaWrite(pciToDma(descBase() + descHead() * sizeof(T)),
921 wbOut * sizeof(T), &wbEvent, (uint8_t*)wbBuf,
922 igbe->wbCompDelay);
923 }
924
925 template<class T>
926 void
927 IGbE::DescCache<T>::fetchDescriptors()
928 {
929 size_t max_to_fetch;
930
931 if (curFetching) {
932 DPRINTF(EthernetDesc,
933 "Currently fetching %d descriptors, returning\n",
934 curFetching);
935 return;
936 }
937
938 if (descTail() >= cachePnt)
939 max_to_fetch = descTail() - cachePnt;
940 else
941 max_to_fetch = descLen() - cachePnt;
942
943 size_t free_cache = size - usedCache.size() - unusedCache.size();
944
945 if (!max_to_fetch)
946 igbe->anWe(annSmFetch, annUnusedDescQ);
947 else
948 igbe->anPq(annSmFetch, annUnusedDescQ, max_to_fetch);
949
950 if (max_to_fetch) {
951 if (!free_cache)
952 igbe->anWf(annSmFetch, annDescQ);
953 else
954 igbe->anRq(annSmFetch, annDescQ, free_cache);
955 }
956
957 max_to_fetch = std::min(max_to_fetch, free_cache);
958
959
960 DPRINTF(EthernetDesc, "Fetching descriptors head: %d tail: "
961 "%d len: %d cachePnt: %d max_to_fetch: %d descleft: %d\n",
962 descHead(), descTail(), descLen(), cachePnt,
963 max_to_fetch, descLeft());
964
965 // Nothing to do
966 if (max_to_fetch == 0)
967 return;
968
969 // So we don't have two descriptor fetches going on at once
970 curFetching = max_to_fetch;
971
972 assert(!fetchDelayEvent.scheduled());
973 igbe->schedule(fetchDelayEvent, curTick() + igbe->fetchDelay);
974 igbe->anBegin(annSmFetch, "Prepare Fetch Desc");
975 }
976
977 template<class T>
978 void
979 IGbE::DescCache<T>::fetchDescriptors1()
980 {
981 // If we're draining delay issuing this DMA
982 if (igbe->getState() != SimObject::Running) {
983 igbe->schedule(fetchDelayEvent, curTick() + igbe->fetchDelay);
984 return;
985 }
986
987 igbe->anBegin(annSmFetch, "Fetch Desc");
988
989 DPRINTF(EthernetDesc, "Fetching descriptors at %#x (%#x), size: %#x\n",
990 descBase() + cachePnt * sizeof(T),
991 pciToDma(descBase() + cachePnt * sizeof(T)),
992 curFetching * sizeof(T));
993 assert(curFetching);
994 igbe->dmaRead(pciToDma(descBase() + cachePnt * sizeof(T)),
995 curFetching * sizeof(T), &fetchEvent, (uint8_t*)fetchBuf,
996 igbe->fetchCompDelay);
997 }
998
999 template<class T>
1000 void
1001 IGbE::DescCache<T>::fetchComplete()
1002 {
1003 T *newDesc;
1004 igbe->anBegin(annSmFetch, "Fetch Complete");
1005 for (int x = 0; x < curFetching; x++) {
1006 newDesc = new T;
1007 memcpy(newDesc, &fetchBuf[x], sizeof(T));
1008 unusedCache.push_back(newDesc);
1009 igbe->anDq(annSmFetch, annUnusedDescQ);
1010 igbe->anQ(annSmFetch, annUnusedCacheQ);
1011 igbe->anQ(annSmFetch, annDescQ);
1012 }
1013
1014
1015 #ifndef NDEBUG
1016 int oldCp = cachePnt;
1017 #endif
1018
1019 cachePnt += curFetching;
1020 assert(cachePnt <= descLen());
1021 if (cachePnt == descLen())
1022 cachePnt = 0;
1023
1024 curFetching = 0;
1025
1026 DPRINTF(EthernetDesc, "Fetching complete cachePnt %d -> %d\n",
1027 oldCp, cachePnt);
1028
1029 if ((descTail() >= cachePnt ? (descTail() - cachePnt) : (descLen() -
1030 cachePnt)) == 0)
1031 {
1032 igbe->anWe(annSmFetch, annUnusedDescQ);
1033 } else if (!(size - usedCache.size() - unusedCache.size())) {
1034 igbe->anWf(annSmFetch, annDescQ);
1035 } else {
1036 igbe->anBegin(annSmFetch, "Wait", CPA::FL_WAIT);
1037 }
1038
1039 enableSm();
1040 igbe->checkDrain();
1041 }
1042
1043 template<class T>
1044 void
1045 IGbE::DescCache<T>::wbComplete()
1046 {
1047
1048 igbe->anBegin(annSmWb, "Finish Writeback");
1049
1050 long curHead = descHead();
1051 #ifndef NDEBUG
1052 long oldHead = curHead;
1053 #endif
1054
1055 for (int x = 0; x < wbOut; x++) {
1056 assert(usedCache.size());
1057 delete usedCache[0];
1058 usedCache.pop_front();
1059
1060 igbe->anDq(annSmWb, annUsedCacheQ);
1061 igbe->anDq(annSmWb, annDescQ);
1062 }
1063
1064 curHead += wbOut;
1065 wbOut = 0;
1066
1067 if (curHead >= descLen())
1068 curHead -= descLen();
1069
1070 // Update the head
1071 updateHead(curHead);
1072
1073 DPRINTF(EthernetDesc, "Writeback complete curHead %d -> %d\n",
1074 oldHead, curHead);
1075
1076 // If we still have more to wb, call wb now
1077 actionAfterWb();
1078 if (moreToWb) {
1079 moreToWb = false;
1080 DPRINTF(EthernetDesc, "Writeback has more todo\n");
1081 writeback(wbAlignment);
1082 }
1083
1084 if (!wbOut) {
1085 igbe->checkDrain();
1086 if (usedCache.size())
1087 igbe->anBegin(annSmWb, "Wait", CPA::FL_WAIT);
1088 else
1089 igbe->anWe(annSmWb, annUsedCacheQ);
1090 }
1091 fetchAfterWb();
1092 }
1093
1094 template<class T>
1095 void
1096 IGbE::DescCache<T>::reset()
1097 {
1098 DPRINTF(EthernetDesc, "Reseting descriptor cache\n");
1099 for (typename CacheType::size_type x = 0; x < usedCache.size(); x++)
1100 delete usedCache[x];
1101 for (typename CacheType::size_type x = 0; x < unusedCache.size(); x++)
1102 delete unusedCache[x];
1103
1104 usedCache.clear();
1105 unusedCache.clear();
1106
1107 cachePnt = 0;
1108
1109 }
1110
1111 template<class T>
1112 void
1113 IGbE::DescCache<T>::serialize(std::ostream &os)
1114 {
1115 SERIALIZE_SCALAR(cachePnt);
1116 SERIALIZE_SCALAR(curFetching);
1117 SERIALIZE_SCALAR(wbOut);
1118 SERIALIZE_SCALAR(moreToWb);
1119 SERIALIZE_SCALAR(wbAlignment);
1120
1121 typename CacheType::size_type usedCacheSize = usedCache.size();
1122 SERIALIZE_SCALAR(usedCacheSize);
1123 for (typename CacheType::size_type x = 0; x < usedCacheSize; x++) {
1124 arrayParamOut(os, csprintf("usedCache_%d", x),
1125 (uint8_t*)usedCache[x],sizeof(T));
1126 }
1127
1128 typename CacheType::size_type unusedCacheSize = unusedCache.size();
1129 SERIALIZE_SCALAR(unusedCacheSize);
1130 for (typename CacheType::size_type x = 0; x < unusedCacheSize; x++) {
1131 arrayParamOut(os, csprintf("unusedCache_%d", x),
1132 (uint8_t*)unusedCache[x],sizeof(T));
1133 }
1134
1135 Tick fetch_delay = 0, wb_delay = 0;
1136 if (fetchDelayEvent.scheduled())
1137 fetch_delay = fetchDelayEvent.when();
1138 SERIALIZE_SCALAR(fetch_delay);
1139 if (wbDelayEvent.scheduled())
1140 wb_delay = wbDelayEvent.when();
1141 SERIALIZE_SCALAR(wb_delay);
1142
1143
1144 }
1145
1146 template<class T>
1147 void
1148 IGbE::DescCache<T>::unserialize(Checkpoint *cp, const std::string &section)
1149 {
1150 UNSERIALIZE_SCALAR(cachePnt);
1151 UNSERIALIZE_SCALAR(curFetching);
1152 UNSERIALIZE_SCALAR(wbOut);
1153 UNSERIALIZE_SCALAR(moreToWb);
1154 UNSERIALIZE_SCALAR(wbAlignment);
1155
1156 typename CacheType::size_type usedCacheSize;
1157 UNSERIALIZE_SCALAR(usedCacheSize);
1158 T *temp;
1159 for (typename CacheType::size_type x = 0; x < usedCacheSize; x++) {
1160 temp = new T;
1161 arrayParamIn(cp, section, csprintf("usedCache_%d", x),
1162 (uint8_t*)temp,sizeof(T));
1163 usedCache.push_back(temp);
1164 }
1165
1166 typename CacheType::size_type unusedCacheSize;
1167 UNSERIALIZE_SCALAR(unusedCacheSize);
1168 for (typename CacheType::size_type x = 0; x < unusedCacheSize; x++) {
1169 temp = new T;
1170 arrayParamIn(cp, section, csprintf("unusedCache_%d", x),
1171 (uint8_t*)temp,sizeof(T));
1172 unusedCache.push_back(temp);
1173 }
1174 Tick fetch_delay = 0, wb_delay = 0;
1175 UNSERIALIZE_SCALAR(fetch_delay);
1176 UNSERIALIZE_SCALAR(wb_delay);
1177 if (fetch_delay)
1178 igbe->schedule(fetchDelayEvent, fetch_delay);
1179 if (wb_delay)
1180 igbe->schedule(wbDelayEvent, wb_delay);
1181
1182
1183 }
1184
1185 ///////////////////////////// IGbE::RxDescCache //////////////////////////////
1186
1187 IGbE::RxDescCache::RxDescCache(IGbE *i, const std::string n, int s)
1188 : DescCache<RxDesc>(i, n, s), pktDone(false), splitCount(0),
1189 pktEvent(this), pktHdrEvent(this), pktDataEvent(this)
1190
1191 {
1192 annSmFetch = "RX Desc Fetch";
1193 annSmWb = "RX Desc Writeback";
1194 annUnusedDescQ = "RX Unused Descriptors";
1195 annUnusedCacheQ = "RX Unused Descriptor Cache";
1196 annUsedCacheQ = "RX Used Descriptor Cache";
1197 annUsedDescQ = "RX Used Descriptors";
1198 annDescQ = "RX Descriptors";
1199 }
1200
1201 void
1202 IGbE::RxDescCache::pktSplitDone()
1203 {
1204 splitCount++;
1205 DPRINTF(EthernetDesc,
1206 "Part of split packet done: splitcount now %d\n", splitCount);
1207 assert(splitCount <= 2);
1208 if (splitCount != 2)
1209 return;
1210 splitCount = 0;
1211 DPRINTF(EthernetDesc,
1212 "Part of split packet done: calling pktComplete()\n");
1213 pktComplete();
1214 }
1215
1216 int
1217 IGbE::RxDescCache::writePacket(EthPacketPtr packet, int pkt_offset)
1218 {
1219 assert(unusedCache.size());
1220 //if (!unusedCache.size())
1221 // return false;
1222
1223 pktPtr = packet;
1224 pktDone = false;
1225 unsigned buf_len, hdr_len;
1226
1227 RxDesc *desc = unusedCache.front();
1228 switch (igbe->regs.srrctl.desctype()) {
1229 case RXDT_LEGACY:
1230 assert(pkt_offset == 0);
1231 bytesCopied = packet->length;
1232 DPRINTF(EthernetDesc, "Packet Length: %d Desc Size: %d\n",
1233 packet->length, igbe->regs.rctl.descSize());
1234 assert(packet->length < igbe->regs.rctl.descSize());
1235 igbe->dmaWrite(pciToDma(desc->legacy.buf),
1236 packet->length, &pktEvent, packet->data,
1237 igbe->rxWriteDelay);
1238 break;
1239 case RXDT_ADV_ONEBUF:
1240 assert(pkt_offset == 0);
1241 bytesCopied = packet->length;
1242 buf_len = igbe->regs.rctl.lpe() ? igbe->regs.srrctl.bufLen() :
1243 igbe->regs.rctl.descSize();
1244 DPRINTF(EthernetDesc, "Packet Length: %d srrctl: %#x Desc Size: %d\n",
1245 packet->length, igbe->regs.srrctl(), buf_len);
1246 assert(packet->length < buf_len);
1247 igbe->dmaWrite(pciToDma(desc->adv_read.pkt),
1248 packet->length, &pktEvent, packet->data,
1249 igbe->rxWriteDelay);
1250 desc->adv_wb.header_len = htole(0);
1251 desc->adv_wb.sph = htole(0);
1252 desc->adv_wb.pkt_len = htole((uint16_t)(pktPtr->length));
1253 break;
1254 case RXDT_ADV_SPLIT_A:
1255 int split_point;
1256
1257 buf_len = igbe->regs.rctl.lpe() ? igbe->regs.srrctl.bufLen() :
1258 igbe->regs.rctl.descSize();
1259 hdr_len = igbe->regs.rctl.lpe() ? igbe->regs.srrctl.hdrLen() : 0;
1260 DPRINTF(EthernetDesc,
1261 "lpe: %d Packet Length: %d offset: %d srrctl: %#x "
1262 "hdr addr: %#x Hdr Size: %d desc addr: %#x Desc Size: %d\n",
1263 igbe->regs.rctl.lpe(), packet->length, pkt_offset,
1264 igbe->regs.srrctl(), desc->adv_read.hdr, hdr_len,
1265 desc->adv_read.pkt, buf_len);
1266
1267 split_point = hsplit(pktPtr);
1268
1269 if (packet->length <= hdr_len) {
1270 bytesCopied = packet->length;
1271 assert(pkt_offset == 0);
1272 DPRINTF(EthernetDesc, "Hdr split: Entire packet in header\n");
1273 igbe->dmaWrite(pciToDma(desc->adv_read.hdr),
1274 packet->length, &pktEvent, packet->data,
1275 igbe->rxWriteDelay);
1276 desc->adv_wb.header_len = htole((uint16_t)packet->length);
1277 desc->adv_wb.sph = htole(0);
1278 desc->adv_wb.pkt_len = htole(0);
1279 } else if (split_point) {
1280 if (pkt_offset) {
1281 // we are only copying some data, header/data has already been
1282 // copied
1283 int max_to_copy =
1284 std::min(packet->length - pkt_offset, buf_len);
1285 bytesCopied += max_to_copy;
1286 DPRINTF(EthernetDesc,
1287 "Hdr split: Continuing data buffer copy\n");
1288 igbe->dmaWrite(pciToDma(desc->adv_read.pkt),
1289 max_to_copy, &pktEvent,
1290 packet->data + pkt_offset, igbe->rxWriteDelay);
1291 desc->adv_wb.header_len = htole(0);
1292 desc->adv_wb.pkt_len = htole((uint16_t)max_to_copy);
1293 desc->adv_wb.sph = htole(0);
1294 } else {
1295 int max_to_copy =
1296 std::min(packet->length - split_point, buf_len);
1297 bytesCopied += max_to_copy + split_point;
1298
1299 DPRINTF(EthernetDesc, "Hdr split: splitting at %d\n",
1300 split_point);
1301 igbe->dmaWrite(pciToDma(desc->adv_read.hdr),
1302 split_point, &pktHdrEvent,
1303 packet->data, igbe->rxWriteDelay);
1304 igbe->dmaWrite(pciToDma(desc->adv_read.pkt),
1305 max_to_copy, &pktDataEvent,
1306 packet->data + split_point, igbe->rxWriteDelay);
1307 desc->adv_wb.header_len = htole(split_point);
1308 desc->adv_wb.sph = 1;
1309 desc->adv_wb.pkt_len = htole((uint16_t)(max_to_copy));
1310 }
1311 } else {
1312 panic("Header split not fitting within header buffer or "
1313 "undecodable packet not fitting in header unsupported\n");
1314 }
1315 break;
1316 default:
1317 panic("Unimplemnted RX receive buffer type: %d\n",
1318 igbe->regs.srrctl.desctype());
1319 }
1320 return bytesCopied;
1321
1322 }
1323
1324 void
1325 IGbE::RxDescCache::pktComplete()
1326 {
1327 assert(unusedCache.size());
1328 RxDesc *desc;
1329 desc = unusedCache.front();
1330
1331 igbe->anBegin("RXS", "Update Desc");
1332
1333 uint16_t crcfixup = igbe->regs.rctl.secrc() ? 0 : 4 ;
1334 DPRINTF(EthernetDesc, "pktPtr->length: %d bytesCopied: %d "
1335 "stripcrc offset: %d value written: %d %d\n",
1336 pktPtr->length, bytesCopied, crcfixup,
1337 htole((uint16_t)(pktPtr->length + crcfixup)),
1338 (uint16_t)(pktPtr->length + crcfixup));
1339
1340 // no support for anything but starting at 0
1341 assert(igbe->regs.rxcsum.pcss() == 0);
1342
1343 DPRINTF(EthernetDesc, "Packet written to memory updating Descriptor\n");
1344
1345 uint16_t status = RXDS_DD;
1346 uint8_t err = 0;
1347 uint16_t ext_err = 0;
1348 uint16_t csum = 0;
1349 uint16_t ptype = 0;
1350 uint16_t ip_id = 0;
1351
1352 assert(bytesCopied <= pktPtr->length);
1353 if (bytesCopied == pktPtr->length)
1354 status |= RXDS_EOP;
1355
1356 IpPtr ip(pktPtr);
1357
1358 if (ip) {
1359 DPRINTF(EthernetDesc, "Proccesing Ip packet with Id=%d\n", ip->id());
1360 ptype |= RXDP_IPV4;
1361 ip_id = ip->id();
1362
1363 if (igbe->regs.rxcsum.ipofld()) {
1364 DPRINTF(EthernetDesc, "Checking IP checksum\n");
1365 status |= RXDS_IPCS;
1366 csum = htole(cksum(ip));
1367 igbe->rxIpChecksums++;
1368 if (cksum(ip) != 0) {
1369 err |= RXDE_IPE;
1370 ext_err |= RXDEE_IPE;
1371 DPRINTF(EthernetDesc, "Checksum is bad!!\n");
1372 }
1373 }
1374 TcpPtr tcp(ip);
1375 if (tcp && igbe->regs.rxcsum.tuofld()) {
1376 DPRINTF(EthernetDesc, "Checking TCP checksum\n");
1377 status |= RXDS_TCPCS;
1378 ptype |= RXDP_TCP;
1379 csum = htole(cksum(tcp));
1380 igbe->rxTcpChecksums++;
1381 if (cksum(tcp) != 0) {
1382 DPRINTF(EthernetDesc, "Checksum is bad!!\n");
1383 err |= RXDE_TCPE;
1384 ext_err |= RXDEE_TCPE;
1385 }
1386 }
1387
1388 UdpPtr udp(ip);
1389 if (udp && igbe->regs.rxcsum.tuofld()) {
1390 DPRINTF(EthernetDesc, "Checking UDP checksum\n");
1391 status |= RXDS_UDPCS;
1392 ptype |= RXDP_UDP;
1393 csum = htole(cksum(udp));
1394 igbe->rxUdpChecksums++;
1395 if (cksum(udp) != 0) {
1396 DPRINTF(EthernetDesc, "Checksum is bad!!\n");
1397 ext_err |= RXDEE_TCPE;
1398 err |= RXDE_TCPE;
1399 }
1400 }
1401 } else { // if ip
1402 DPRINTF(EthernetSM, "Proccesing Non-Ip packet\n");
1403 }
1404
1405 switch (igbe->regs.srrctl.desctype()) {
1406 case RXDT_LEGACY:
1407 desc->legacy.len = htole((uint16_t)(pktPtr->length + crcfixup));
1408 desc->legacy.status = htole(status);
1409 desc->legacy.errors = htole(err);
1410 // No vlan support at this point... just set it to 0
1411 desc->legacy.vlan = 0;
1412 break;
1413 case RXDT_ADV_SPLIT_A:
1414 case RXDT_ADV_ONEBUF:
1415 desc->adv_wb.rss_type = htole(0);
1416 desc->adv_wb.pkt_type = htole(ptype);
1417 if (igbe->regs.rxcsum.pcsd()) {
1418 // no rss support right now
1419 desc->adv_wb.rss_hash = htole(0);
1420 } else {
1421 desc->adv_wb.id = htole(ip_id);
1422 desc->adv_wb.csum = htole(csum);
1423 }
1424 desc->adv_wb.status = htole(status);
1425 desc->adv_wb.errors = htole(ext_err);
1426 // no vlan support
1427 desc->adv_wb.vlan_tag = htole(0);
1428 break;
1429 default:
1430 panic("Unimplemnted RX receive buffer type %d\n",
1431 igbe->regs.srrctl.desctype());
1432 }
1433
1434 DPRINTF(EthernetDesc, "Descriptor complete w0: %#x w1: %#x\n",
1435 desc->adv_read.pkt, desc->adv_read.hdr);
1436
1437 if (bytesCopied == pktPtr->length) {
1438 DPRINTF(EthernetDesc,
1439 "Packet completely written to descriptor buffers\n");
1440 // Deal with the rx timer interrupts
1441 if (igbe->regs.rdtr.delay()) {
1442 Tick delay = igbe->regs.rdtr.delay() * igbe->intClock();
1443 DPRINTF(EthernetSM, "RXS: Scheduling DTR for %d\n", delay);
1444 igbe->reschedule(igbe->rdtrEvent, curTick() + delay);
1445 }
1446
1447 if (igbe->regs.radv.idv()) {
1448 Tick delay = igbe->regs.radv.idv() * igbe->intClock();
1449 DPRINTF(EthernetSM, "RXS: Scheduling ADV for %d\n", delay);
1450 if (!igbe->radvEvent.scheduled()) {
1451 igbe->schedule(igbe->radvEvent, curTick() + delay);
1452 }
1453 }
1454
1455 // if neither radv or rdtr, maybe itr is set...
1456 if (!igbe->regs.rdtr.delay() && !igbe->regs.radv.idv()) {
1457 DPRINTF(EthernetSM,
1458 "RXS: Receive interrupt delay disabled, posting IT_RXT\n");
1459 igbe->postInterrupt(IT_RXT);
1460 }
1461
1462 // If the packet is small enough, interrupt appropriately
1463 // I wonder if this is delayed or not?!
1464 if (pktPtr->length <= igbe->regs.rsrpd.idv()) {
1465 DPRINTF(EthernetSM,
1466 "RXS: Posting IT_SRPD beacuse small packet received\n");
1467 igbe->postInterrupt(IT_SRPD);
1468 }
1469 bytesCopied = 0;
1470 }
1471
1472 pktPtr = NULL;
1473 igbe->checkDrain();
1474 enableSm();
1475 pktDone = true;
1476
1477 igbe->anBegin("RXS", "Done Updating Desc");
1478 DPRINTF(EthernetDesc, "Processing of this descriptor complete\n");
1479 igbe->anDq("RXS", annUnusedCacheQ);
1480 unusedCache.pop_front();
1481 igbe->anQ("RXS", annUsedCacheQ);
1482 usedCache.push_back(desc);
1483 }
1484
1485 void
1486 IGbE::RxDescCache::enableSm()
1487 {
1488 if (!igbe->drainEvent) {
1489 igbe->rxTick = true;
1490 igbe->restartClock();
1491 }
1492 }
1493
1494 bool
1495 IGbE::RxDescCache::packetDone()
1496 {
1497 if (pktDone) {
1498 pktDone = false;
1499 return true;
1500 }
1501 return false;
1502 }
1503
1504 bool
1505 IGbE::RxDescCache::hasOutstandingEvents()
1506 {
1507 return pktEvent.scheduled() || wbEvent.scheduled() ||
1508 fetchEvent.scheduled() || pktHdrEvent.scheduled() ||
1509 pktDataEvent.scheduled();
1510
1511 }
1512
1513 void
1514 IGbE::RxDescCache::serialize(std::ostream &os)
1515 {
1516 DescCache<RxDesc>::serialize(os);
1517 SERIALIZE_SCALAR(pktDone);
1518 SERIALIZE_SCALAR(splitCount);
1519 SERIALIZE_SCALAR(bytesCopied);
1520 }
1521
1522 void
1523 IGbE::RxDescCache::unserialize(Checkpoint *cp, const std::string &section)
1524 {
1525 DescCache<RxDesc>::unserialize(cp, section);
1526 UNSERIALIZE_SCALAR(pktDone);
1527 UNSERIALIZE_SCALAR(splitCount);
1528 UNSERIALIZE_SCALAR(bytesCopied);
1529 }
1530
1531
1532 ///////////////////////////// IGbE::TxDescCache //////////////////////////////
1533
1534 IGbE::TxDescCache::TxDescCache(IGbE *i, const std::string n, int s)
1535 : DescCache<TxDesc>(i,n, s), pktDone(false), isTcp(false),
1536 pktWaiting(false), completionAddress(0), completionEnabled(false),
1537 useTso(false), tsoHeaderLen(0), tsoMss(0), tsoTotalLen(0), tsoUsedLen(0),
1538 tsoPrevSeq(0), tsoPktPayloadBytes(0), tsoLoadedHeader(false),
1539 tsoPktHasHeader(false), tsoDescBytesUsed(0), tsoCopyBytes(0), tsoPkts(0),
1540 pktEvent(this), headerEvent(this), nullEvent(this)
1541 {
1542 annSmFetch = "TX Desc Fetch";
1543 annSmWb = "TX Desc Writeback";
1544 annUnusedDescQ = "TX Unused Descriptors";
1545 annUnusedCacheQ = "TX Unused Descriptor Cache";
1546 annUsedCacheQ = "TX Used Descriptor Cache";
1547 annUsedDescQ = "TX Used Descriptors";
1548 annDescQ = "TX Descriptors";
1549 }
1550
1551 void
1552 IGbE::TxDescCache::processContextDesc()
1553 {
1554 assert(unusedCache.size());
1555 TxDesc *desc;
1556
1557 DPRINTF(EthernetDesc, "Checking and processing context descriptors\n");
1558
1559 while (!useTso && unusedCache.size() &&
1560 TxdOp::isContext(unusedCache.front())) {
1561 DPRINTF(EthernetDesc, "Got context descriptor type...\n");
1562
1563 desc = unusedCache.front();
1564 DPRINTF(EthernetDesc, "Descriptor upper: %#x lower: %#X\n",
1565 desc->d1, desc->d2);
1566
1567
1568 // is this going to be a tcp or udp packet?
1569 isTcp = TxdOp::tcp(desc) ? true : false;
1570
1571 // setup all the TSO variables, they'll be ignored if we don't use
1572 // tso for this connection
1573 tsoHeaderLen = TxdOp::hdrlen(desc);
1574 tsoMss = TxdOp::mss(desc);
1575
1576 if (TxdOp::isType(desc, TxdOp::TXD_CNXT) && TxdOp::tse(desc)) {
1577 DPRINTF(EthernetDesc, "TCP offload enabled for packet hdrlen: "
1578 "%d mss: %d paylen %d\n", TxdOp::hdrlen(desc),
1579 TxdOp::mss(desc), TxdOp::getLen(desc));
1580 useTso = true;
1581 tsoTotalLen = TxdOp::getLen(desc);
1582 tsoLoadedHeader = false;
1583 tsoDescBytesUsed = 0;
1584 tsoUsedLen = 0;
1585 tsoPrevSeq = 0;
1586 tsoPktHasHeader = false;
1587 tsoPkts = 0;
1588 tsoCopyBytes = 0;
1589 }
1590
1591 TxdOp::setDd(desc);
1592 unusedCache.pop_front();
1593 igbe->anDq("TXS", annUnusedCacheQ);
1594 usedCache.push_back(desc);
1595 igbe->anQ("TXS", annUsedCacheQ);
1596 }
1597
1598 if (!unusedCache.size())
1599 return;
1600
1601 desc = unusedCache.front();
1602 if (!useTso && TxdOp::isType(desc, TxdOp::TXD_ADVDATA) &&
1603 TxdOp::tse(desc)) {
1604 DPRINTF(EthernetDesc, "TCP offload(adv) enabled for packet "
1605 "hdrlen: %d mss: %d paylen %d\n",
1606 tsoHeaderLen, tsoMss, TxdOp::getTsoLen(desc));
1607 useTso = true;
1608 tsoTotalLen = TxdOp::getTsoLen(desc);
1609 tsoLoadedHeader = false;
1610 tsoDescBytesUsed = 0;
1611 tsoUsedLen = 0;
1612 tsoPrevSeq = 0;
1613 tsoPktHasHeader = false;
1614 tsoPkts = 0;
1615 }
1616
1617 if (useTso && !tsoLoadedHeader) {
1618 // we need to fetch a header
1619 DPRINTF(EthernetDesc, "Starting DMA of TSO header\n");
1620 assert(TxdOp::isData(desc) && TxdOp::getLen(desc) >= tsoHeaderLen);
1621 pktWaiting = true;
1622 assert(tsoHeaderLen <= 256);
1623 igbe->dmaRead(pciToDma(TxdOp::getBuf(desc)),
1624 tsoHeaderLen, &headerEvent, tsoHeader, 0);
1625 }
1626 }
1627
1628 void
1629 IGbE::TxDescCache::headerComplete()
1630 {
1631 DPRINTF(EthernetDesc, "TSO: Fetching TSO header complete\n");
1632 pktWaiting = false;
1633
1634 assert(unusedCache.size());
1635 TxDesc *desc = unusedCache.front();
1636 DPRINTF(EthernetDesc, "TSO: len: %d tsoHeaderLen: %d\n",
1637 TxdOp::getLen(desc), tsoHeaderLen);
1638
1639 if (TxdOp::getLen(desc) == tsoHeaderLen) {
1640 tsoDescBytesUsed = 0;
1641 tsoLoadedHeader = true;
1642 unusedCache.pop_front();
1643 usedCache.push_back(desc);
1644 } else {
1645 // I don't think this case happens, I think the headrer is always
1646 // it's own packet, if it wasn't it might be as simple as just
1647 // incrementing descBytesUsed by the header length, but I'm not
1648 // completely sure
1649 panic("TSO header part of bigger packet, not implemented\n");
1650 }
1651 enableSm();
1652 igbe->checkDrain();
1653 }
1654
1655 unsigned
1656 IGbE::TxDescCache::getPacketSize(EthPacketPtr p)
1657 {
1658 if (!unusedCache.size())
1659 return 0;
1660
1661 DPRINTF(EthernetDesc, "Starting processing of descriptor\n");
1662
1663 assert(!useTso || tsoLoadedHeader);
1664 TxDesc *desc = unusedCache.front();
1665
1666 if (useTso) {
1667 DPRINTF(EthernetDesc, "getPacket(): TxDescriptor data "
1668 "d1: %#llx d2: %#llx\n", desc->d1, desc->d2);
1669 DPRINTF(EthernetDesc, "TSO: use: %d hdrlen: %d mss: %d total: %d "
1670 "used: %d loaded hdr: %d\n", useTso, tsoHeaderLen, tsoMss,
1671 tsoTotalLen, tsoUsedLen, tsoLoadedHeader);
1672
1673 if (tsoPktHasHeader)
1674 tsoCopyBytes = std::min((tsoMss + tsoHeaderLen) - p->length,
1675 TxdOp::getLen(desc) - tsoDescBytesUsed);
1676 else
1677 tsoCopyBytes = std::min(tsoMss,
1678 TxdOp::getLen(desc) - tsoDescBytesUsed);
1679 unsigned pkt_size =
1680 tsoCopyBytes + (tsoPktHasHeader ? 0 : tsoHeaderLen);
1681
1682 DPRINTF(EthernetDesc, "TSO: descBytesUsed: %d copyBytes: %d "
1683 "this descLen: %d\n",
1684 tsoDescBytesUsed, tsoCopyBytes, TxdOp::getLen(desc));
1685 DPRINTF(EthernetDesc, "TSO: pktHasHeader: %d\n", tsoPktHasHeader);
1686 DPRINTF(EthernetDesc, "TSO: Next packet is %d bytes\n", pkt_size);
1687 return pkt_size;
1688 }
1689
1690 DPRINTF(EthernetDesc, "Next TX packet is %d bytes\n",
1691 TxdOp::getLen(unusedCache.front()));
1692 return TxdOp::getLen(desc);
1693 }
1694
1695 void
1696 IGbE::TxDescCache::getPacketData(EthPacketPtr p)
1697 {
1698 assert(unusedCache.size());
1699
1700 TxDesc *desc;
1701 desc = unusedCache.front();
1702
1703 DPRINTF(EthernetDesc, "getPacketData(): TxDescriptor data "
1704 "d1: %#llx d2: %#llx\n", desc->d1, desc->d2);
1705 assert((TxdOp::isLegacy(desc) || TxdOp::isData(desc)) &&
1706 TxdOp::getLen(desc));
1707
1708 pktPtr = p;
1709
1710 pktWaiting = true;
1711
1712 DPRINTF(EthernetDesc, "Starting DMA of packet at offset %d\n", p->length);
1713
1714 if (useTso) {
1715 assert(tsoLoadedHeader);
1716 if (!tsoPktHasHeader) {
1717 DPRINTF(EthernetDesc,
1718 "Loading TSO header (%d bytes) into start of packet\n",
1719 tsoHeaderLen);
1720 memcpy(p->data, &tsoHeader,tsoHeaderLen);
1721 p->length +=tsoHeaderLen;
1722 tsoPktHasHeader = true;
1723 }
1724 }
1725
1726 if (useTso) {
1727 DPRINTF(EthernetDesc,
1728 "Starting DMA of packet at offset %d length: %d\n",
1729 p->length, tsoCopyBytes);
1730 igbe->dmaRead(pciToDma(TxdOp::getBuf(desc))
1731 + tsoDescBytesUsed,
1732 tsoCopyBytes, &pktEvent, p->data + p->length,
1733 igbe->txReadDelay);
1734 tsoDescBytesUsed += tsoCopyBytes;
1735 assert(tsoDescBytesUsed <= TxdOp::getLen(desc));
1736 } else {
1737 igbe->dmaRead(pciToDma(TxdOp::getBuf(desc)),
1738 TxdOp::getLen(desc), &pktEvent, p->data + p->length,
1739 igbe->txReadDelay);
1740 }
1741 }
1742
1743 void
1744 IGbE::TxDescCache::pktComplete()
1745 {
1746
1747 TxDesc *desc;
1748 assert(unusedCache.size());
1749 assert(pktPtr);
1750
1751 igbe->anBegin("TXS", "Update Desc");
1752
1753 DPRINTF(EthernetDesc, "DMA of packet complete\n");
1754
1755
1756 desc = unusedCache.front();
1757 assert((TxdOp::isLegacy(desc) || TxdOp::isData(desc)) &&
1758 TxdOp::getLen(desc));
1759
1760 DPRINTF(EthernetDesc, "TxDescriptor data d1: %#llx d2: %#llx\n",
1761 desc->d1, desc->d2);
1762
1763 // Set the length of the data in the EtherPacket
1764 if (useTso) {
1765 DPRINTF(EthernetDesc, "TSO: use: %d hdrlen: %d mss: %d total: %d "
1766 "used: %d loaded hdr: %d\n", useTso, tsoHeaderLen, tsoMss,
1767 tsoTotalLen, tsoUsedLen, tsoLoadedHeader);
1768 pktPtr->length += tsoCopyBytes;
1769 tsoUsedLen += tsoCopyBytes;
1770 DPRINTF(EthernetDesc, "TSO: descBytesUsed: %d copyBytes: %d\n",
1771 tsoDescBytesUsed, tsoCopyBytes);
1772 } else
1773 pktPtr->length += TxdOp::getLen(desc);
1774
1775
1776
1777 if ((!TxdOp::eop(desc) && !useTso) ||
1778 (pktPtr->length < ( tsoMss + tsoHeaderLen) &&
1779 tsoTotalLen != tsoUsedLen && useTso)) {
1780 assert(!useTso || (tsoDescBytesUsed == TxdOp::getLen(desc)));
1781 igbe->anDq("TXS", annUnusedCacheQ);
1782 unusedCache.pop_front();
1783 igbe->anQ("TXS", annUsedCacheQ);
1784 usedCache.push_back(desc);
1785
1786 tsoDescBytesUsed = 0;
1787 pktDone = true;
1788 pktWaiting = false;
1789 pktMultiDesc = true;
1790
1791 DPRINTF(EthernetDesc, "Partial Packet Descriptor of %d bytes Done\n",
1792 pktPtr->length);
1793 pktPtr = NULL;
1794
1795 enableSm();
1796 igbe->checkDrain();
1797 return;
1798 }
1799
1800
1801 pktMultiDesc = false;
1802 // no support for vlans
1803 assert(!TxdOp::vle(desc));
1804
1805 // we only support single packet descriptors at this point
1806 if (!useTso)
1807 assert(TxdOp::eop(desc));
1808
1809 // set that this packet is done
1810 if (TxdOp::rs(desc))
1811 TxdOp::setDd(desc);
1812
1813 DPRINTF(EthernetDesc, "TxDescriptor data d1: %#llx d2: %#llx\n",
1814 desc->d1, desc->d2);
1815
1816 if (useTso) {
1817 IpPtr ip(pktPtr);
1818 if (ip) {
1819 DPRINTF(EthernetDesc, "TSO: Modifying IP header. Id + %d\n",
1820 tsoPkts);
1821 ip->id(ip->id() + tsoPkts++);
1822 ip->len(pktPtr->length - EthPtr(pktPtr)->size());
1823
1824 TcpPtr tcp(ip);
1825 if (tcp) {
1826 DPRINTF(EthernetDesc,
1827 "TSO: Modifying TCP header. old seq %d + %d\n",
1828 tcp->seq(), tsoPrevSeq);
1829 tcp->seq(tcp->seq() + tsoPrevSeq);
1830 if (tsoUsedLen != tsoTotalLen)
1831 tcp->flags(tcp->flags() & ~9); // clear fin & psh
1832 }
1833 UdpPtr udp(ip);
1834 if (udp) {
1835 DPRINTF(EthernetDesc, "TSO: Modifying UDP header.\n");
1836 udp->len(pktPtr->length - EthPtr(pktPtr)->size());
1837 }
1838 }
1839 tsoPrevSeq = tsoUsedLen;
1840 }
1841
1842 if (DTRACE(EthernetDesc)) {
1843 IpPtr ip(pktPtr);
1844 if (ip)
1845 DPRINTF(EthernetDesc, "Proccesing Ip packet with Id=%d\n",
1846 ip->id());
1847 else
1848 DPRINTF(EthernetSM, "Proccesing Non-Ip packet\n");
1849 }
1850
1851 // Checksums are only ofloaded for new descriptor types
1852 if (TxdOp::isData(desc) && ( TxdOp::ixsm(desc) || TxdOp::txsm(desc)) ) {
1853 DPRINTF(EthernetDesc, "Calculating checksums for packet\n");
1854 IpPtr ip(pktPtr);
1855 assert(ip);
1856 if (TxdOp::ixsm(desc)) {
1857 ip->sum(0);
1858 ip->sum(cksum(ip));
1859 igbe->txIpChecksums++;
1860 DPRINTF(EthernetDesc, "Calculated IP checksum\n");
1861 }
1862 if (TxdOp::txsm(desc)) {
1863 TcpPtr tcp(ip);
1864 UdpPtr udp(ip);
1865 if (tcp) {
1866 tcp->sum(0);
1867 tcp->sum(cksum(tcp));
1868 igbe->txTcpChecksums++;
1869 DPRINTF(EthernetDesc, "Calculated TCP checksum\n");
1870 } else if (udp) {
1871 assert(udp);
1872 udp->sum(0);
1873 udp->sum(cksum(udp));
1874 igbe->txUdpChecksums++;
1875 DPRINTF(EthernetDesc, "Calculated UDP checksum\n");
1876 } else {
1877 panic("Told to checksum, but don't know how\n");
1878 }
1879 }
1880 }
1881
1882 if (TxdOp::ide(desc)) {
1883 // Deal with the rx timer interrupts
1884 DPRINTF(EthernetDesc, "Descriptor had IDE set\n");
1885 if (igbe->regs.tidv.idv()) {
1886 Tick delay = igbe->regs.tidv.idv() * igbe->intClock();
1887 DPRINTF(EthernetDesc, "setting tidv\n");
1888 igbe->reschedule(igbe->tidvEvent, curTick() + delay, true);
1889 }
1890
1891 if (igbe->regs.tadv.idv() && igbe->regs.tidv.idv()) {
1892 Tick delay = igbe->regs.tadv.idv() * igbe->intClock();
1893 DPRINTF(EthernetDesc, "setting tadv\n");
1894 if (!igbe->tadvEvent.scheduled()) {
1895 igbe->schedule(igbe->tadvEvent, curTick() + delay);
1896 }
1897 }
1898 }
1899
1900
1901 if (!useTso || TxdOp::getLen(desc) == tsoDescBytesUsed) {
1902 DPRINTF(EthernetDesc, "Descriptor Done\n");
1903 igbe->anDq("TXS", annUnusedCacheQ);
1904 unusedCache.pop_front();
1905 igbe->anQ("TXS", annUsedCacheQ);
1906 usedCache.push_back(desc);
1907 tsoDescBytesUsed = 0;
1908 }
1909
1910 if (useTso && tsoUsedLen == tsoTotalLen)
1911 useTso = false;
1912
1913
1914 DPRINTF(EthernetDesc,
1915 "------Packet of %d bytes ready for transmission-------\n",
1916 pktPtr->length);
1917 pktDone = true;
1918 pktWaiting = false;
1919 pktPtr = NULL;
1920 tsoPktHasHeader = false;
1921
1922 if (igbe->regs.txdctl.wthresh() == 0) {
1923 igbe->anBegin("TXS", "Desc Writeback");
1924 DPRINTF(EthernetDesc, "WTHRESH == 0, writing back descriptor\n");
1925 writeback(0);
1926 } else if (igbe->regs.txdctl.gran() && igbe->regs.txdctl.wthresh() >=
1927 descInBlock(usedCache.size())) {
1928 DPRINTF(EthernetDesc, "used > WTHRESH, writing back descriptor\n");
1929 igbe->anBegin("TXS", "Desc Writeback");
1930 writeback((igbe->cacheBlockSize()-1)>>4);
1931 } else if (igbe->regs.txdctl.wthresh() >= usedCache.size()) {
1932 DPRINTF(EthernetDesc, "used > WTHRESH, writing back descriptor\n");
1933 igbe->anBegin("TXS", "Desc Writeback");
1934 writeback((igbe->cacheBlockSize()-1)>>4);
1935 }
1936
1937 enableSm();
1938 igbe->checkDrain();
1939 }
1940
1941 void
1942 IGbE::TxDescCache::actionAfterWb()
1943 {
1944 DPRINTF(EthernetDesc, "actionAfterWb() completionEnabled: %d\n",
1945 completionEnabled);
1946 igbe->postInterrupt(iGbReg::IT_TXDW);
1947 if (completionEnabled) {
1948 descEnd = igbe->regs.tdh();
1949 DPRINTF(EthernetDesc,
1950 "Completion writing back value: %d to addr: %#x\n", descEnd,
1951 completionAddress);
1952 igbe->dmaWrite(pciToDma(mbits(completionAddress, 63, 2)),
1953 sizeof(descEnd), &nullEvent, (uint8_t*)&descEnd, 0);
1954 }
1955 }
1956
1957 void
1958 IGbE::TxDescCache::serialize(std::ostream &os)
1959 {
1960 DescCache<TxDesc>::serialize(os);
1961 SERIALIZE_SCALAR(pktDone);
1962 SERIALIZE_SCALAR(isTcp);
1963 SERIALIZE_SCALAR(pktWaiting);
1964 SERIALIZE_SCALAR(pktMultiDesc);
1965
1966 SERIALIZE_SCALAR(useTso);
1967 SERIALIZE_SCALAR(tsoHeaderLen);
1968 SERIALIZE_SCALAR(tsoMss);
1969 SERIALIZE_SCALAR(tsoTotalLen);
1970 SERIALIZE_SCALAR(tsoUsedLen);
1971 SERIALIZE_SCALAR(tsoPrevSeq);;
1972 SERIALIZE_SCALAR(tsoPktPayloadBytes);
1973 SERIALIZE_SCALAR(tsoLoadedHeader);
1974 SERIALIZE_SCALAR(tsoPktHasHeader);
1975 SERIALIZE_ARRAY(tsoHeader, 256);
1976 SERIALIZE_SCALAR(tsoDescBytesUsed);
1977 SERIALIZE_SCALAR(tsoCopyBytes);
1978 SERIALIZE_SCALAR(tsoPkts);
1979
1980 SERIALIZE_SCALAR(completionAddress);
1981 SERIALIZE_SCALAR(completionEnabled);
1982 SERIALIZE_SCALAR(descEnd);
1983 }
1984
1985 void
1986 IGbE::TxDescCache::unserialize(Checkpoint *cp, const std::string &section)
1987 {
1988 DescCache<TxDesc>::unserialize(cp, section);
1989 UNSERIALIZE_SCALAR(pktDone);
1990 UNSERIALIZE_SCALAR(isTcp);
1991 UNSERIALIZE_SCALAR(pktWaiting);
1992 UNSERIALIZE_SCALAR(pktMultiDesc);
1993
1994 UNSERIALIZE_SCALAR(useTso);
1995 UNSERIALIZE_SCALAR(tsoHeaderLen);
1996 UNSERIALIZE_SCALAR(tsoMss);
1997 UNSERIALIZE_SCALAR(tsoTotalLen);
1998 UNSERIALIZE_SCALAR(tsoUsedLen);
1999 UNSERIALIZE_SCALAR(tsoPrevSeq);;
2000 UNSERIALIZE_SCALAR(tsoPktPayloadBytes);
2001 UNSERIALIZE_SCALAR(tsoLoadedHeader);
2002 UNSERIALIZE_SCALAR(tsoPktHasHeader);
2003 UNSERIALIZE_ARRAY(tsoHeader, 256);
2004 UNSERIALIZE_SCALAR(tsoDescBytesUsed);
2005 UNSERIALIZE_SCALAR(tsoCopyBytes);
2006 UNSERIALIZE_SCALAR(tsoPkts);
2007
2008 UNSERIALIZE_SCALAR(completionAddress);
2009 UNSERIALIZE_SCALAR(completionEnabled);
2010 UNSERIALIZE_SCALAR(descEnd);
2011 }
2012
2013 bool
2014 IGbE::TxDescCache::packetAvailable()
2015 {
2016 if (pktDone) {
2017 pktDone = false;
2018 return true;
2019 }
2020 return false;
2021 }
2022
2023 void
2024 IGbE::TxDescCache::enableSm()
2025 {
2026 if (!igbe->drainEvent) {
2027 igbe->txTick = true;
2028 igbe->restartClock();
2029 }
2030 }
2031
2032 bool
2033 IGbE::TxDescCache::hasOutstandingEvents()
2034 {
2035 return pktEvent.scheduled() || wbEvent.scheduled() ||
2036 fetchEvent.scheduled();
2037 }
2038
2039
2040 ///////////////////////////////////// IGbE /////////////////////////////////
2041
2042 void
2043 IGbE::restartClock()
2044 {
2045 if (!tickEvent.scheduled() && (rxTick || txTick || txFifoTick) &&
2046 getState() == SimObject::Running)
2047 schedule(tickEvent, (curTick() / ticks(1)) * ticks(1) + ticks(1));
2048 }
2049
2050 unsigned int
2051 IGbE::drain(Event *de)
2052 {
2053 unsigned int count;
2054 count = pioPort->drain(de) + dmaPort->drain(de);
2055 if (rxDescCache.hasOutstandingEvents() ||
2056 txDescCache.hasOutstandingEvents()) {
2057 count++;
2058 drainEvent = de;
2059 }
2060
2061 txFifoTick = false;
2062 txTick = false;
2063 rxTick = false;
2064
2065 if (tickEvent.scheduled())
2066 deschedule(tickEvent);
2067
2068 if (count)
2069 changeState(Draining);
2070 else
2071 changeState(Drained);
2072
2073 DPRINTF(EthernetSM, "got drain() returning %d", count);
2074 return count;
2075 }
2076
2077 void
2078 IGbE::resume()
2079 {
2080 SimObject::resume();
2081
2082 txFifoTick = true;
2083 txTick = true;
2084 rxTick = true;
2085
2086 restartClock();
2087 DPRINTF(EthernetSM, "resuming from drain");
2088 }
2089
2090 void
2091 IGbE::checkDrain()
2092 {
2093 if (!drainEvent)
2094 return;
2095
2096 DPRINTF(EthernetSM, "checkDrain() in drain\n");
2097 txFifoTick = false;
2098 txTick = false;
2099 rxTick = false;
2100 if (!rxDescCache.hasOutstandingEvents() &&
2101 !txDescCache.hasOutstandingEvents()) {
2102 drainEvent->process();
2103 drainEvent = NULL;
2104 }
2105 }
2106
2107 void
2108 IGbE::txStateMachine()
2109 {
2110 if (!regs.tctl.en()) {
2111 txTick = false;
2112 DPRINTF(EthernetSM, "TXS: TX disabled, stopping ticking\n");
2113 return;
2114 }
2115
2116 // If we have a packet available and it's length is not 0 (meaning it's not
2117 // a multidescriptor packet) put it in the fifo, otherwise an the next
2118 // iteration we'll get the rest of the data
2119 if (txPacket && txDescCache.packetAvailable()
2120 && !txDescCache.packetMultiDesc() && txPacket->length) {
2121 bool success;
2122
2123 anQ("TXS", "TX FIFO Q");
2124 DPRINTF(EthernetSM, "TXS: packet placed in TX FIFO\n");
2125 success = txFifo.push(txPacket);
2126 txFifoTick = true && !drainEvent;
2127 assert(success);
2128 txPacket = NULL;
2129 anBegin("TXS", "Desc Writeback");
2130 txDescCache.writeback((cacheBlockSize()-1)>>4);
2131 return;
2132 }
2133
2134 // Only support descriptor granularity
2135 if (regs.txdctl.lwthresh() &&
2136 txDescCache.descLeft() < (regs.txdctl.lwthresh() * 8)) {
2137 DPRINTF(EthernetSM, "TXS: LWTHRESH caused posting of TXDLOW\n");
2138 postInterrupt(IT_TXDLOW);
2139 }
2140
2141 if (!txPacket) {
2142 txPacket = new EthPacketData(16384);
2143 }
2144
2145 if (!txDescCache.packetWaiting()) {
2146 if (txDescCache.descLeft() == 0) {
2147 postInterrupt(IT_TXQE);
2148 anBegin("TXS", "Desc Writeback");
2149 txDescCache.writeback(0);
2150 anBegin("TXS", "Desc Fetch");
2151 anWe("TXS", txDescCache.annUnusedCacheQ);
2152 txDescCache.fetchDescriptors();
2153 DPRINTF(EthernetSM, "TXS: No descriptors left in ring, forcing "
2154 "writeback stopping ticking and posting TXQE\n");
2155 txTick = false;
2156 return;
2157 }
2158
2159
2160 if (!(txDescCache.descUnused())) {
2161 anBegin("TXS", "Desc Fetch");
2162 txDescCache.fetchDescriptors();
2163 anWe("TXS", txDescCache.annUnusedCacheQ);
2164 DPRINTF(EthernetSM, "TXS: No descriptors available in cache, "
2165 "fetching and stopping ticking\n");
2166 txTick = false;
2167 return;
2168 }
2169 anPq("TXS", txDescCache.annUnusedCacheQ);
2170
2171
2172 txDescCache.processContextDesc();
2173 if (txDescCache.packetWaiting()) {
2174 DPRINTF(EthernetSM,
2175 "TXS: Fetching TSO header, stopping ticking\n");
2176 txTick = false;
2177 return;
2178 }
2179
2180 unsigned size = txDescCache.getPacketSize(txPacket);
2181 if (size > 0 && txFifo.avail() > size) {
2182 anRq("TXS", "TX FIFO Q");
2183 anBegin("TXS", "DMA Packet");
2184 DPRINTF(EthernetSM, "TXS: Reserving %d bytes in FIFO and "
2185 "beginning DMA of next packet\n", size);
2186 txFifo.reserve(size);
2187 txDescCache.getPacketData(txPacket);
2188 } else if (size == 0) {
2189 DPRINTF(EthernetSM, "TXS: getPacketSize returned: %d\n", size);
2190 DPRINTF(EthernetSM,
2191 "TXS: No packets to get, writing back used descriptors\n");
2192 anBegin("TXS", "Desc Writeback");
2193 txDescCache.writeback(0);
2194 } else {
2195 anWf("TXS", "TX FIFO Q");
2196 DPRINTF(EthernetSM, "TXS: FIFO full, stopping ticking until space "
2197 "available in FIFO\n");
2198 txTick = false;
2199 }
2200
2201
2202 return;
2203 }
2204 DPRINTF(EthernetSM, "TXS: Nothing to do, stopping ticking\n");
2205 txTick = false;
2206 }
2207
2208 bool
2209 IGbE::ethRxPkt(EthPacketPtr pkt)
2210 {
2211 rxBytes += pkt->length;
2212 rxPackets++;
2213
2214 DPRINTF(Ethernet, "RxFIFO: Receiving pcakte from wire\n");
2215 anBegin("RXQ", "Wire Recv");
2216
2217
2218 if (!regs.rctl.en()) {
2219 DPRINTF(Ethernet, "RxFIFO: RX not enabled, dropping\n");
2220 anBegin("RXQ", "FIFO Drop", CPA::FL_BAD);
2221 return true;
2222 }
2223
2224 // restart the state machines if they are stopped
2225 rxTick = true && !drainEvent;
2226 if ((rxTick || txTick) && !tickEvent.scheduled()) {
2227 DPRINTF(EthernetSM,
2228 "RXS: received packet into fifo, starting ticking\n");
2229 restartClock();
2230 }
2231
2232 if (!rxFifo.push(pkt)) {
2233 DPRINTF(Ethernet, "RxFIFO: Packet won't fit in fifo... dropped\n");
2234 postInterrupt(IT_RXO, true);
2235 anBegin("RXQ", "FIFO Drop", CPA::FL_BAD);
2236 return false;
2237 }
2238
2239 if (CPA::available() && cpa->enabled()) {
2240 assert(sys->numSystemsRunning <= 2);
2241 System *other_sys;
2242 if (sys->systemList[0] == sys)
2243 other_sys = sys->systemList[1];
2244 else
2245 other_sys = sys->systemList[0];
2246
2247 cpa->hwDq(CPA::FL_NONE, sys, macAddr, "RXQ", "WireQ", 0, other_sys);
2248 anQ("RXQ", "RX FIFO Q");
2249 cpa->hwWe(CPA::FL_NONE, sys, macAddr, "RXQ", "WireQ", 0, other_sys);
2250 }
2251
2252 return true;
2253 }
2254
2255
2256 void
2257 IGbE::rxStateMachine()
2258 {
2259 if (!regs.rctl.en()) {
2260 rxTick = false;
2261 DPRINTF(EthernetSM, "RXS: RX disabled, stopping ticking\n");
2262 return;
2263 }
2264
2265 // If the packet is done check for interrupts/descriptors/etc
2266 if (rxDescCache.packetDone()) {
2267 rxDmaPacket = false;
2268 DPRINTF(EthernetSM, "RXS: Packet completed DMA to memory\n");
2269 int descLeft = rxDescCache.descLeft();
2270 DPRINTF(EthernetSM, "RXS: descLeft: %d rdmts: %d rdlen: %d\n",
2271 descLeft, regs.rctl.rdmts(), regs.rdlen());
2272 switch (regs.rctl.rdmts()) {
2273 case 2: if (descLeft > .125 * regs.rdlen()) break;
2274 case 1: if (descLeft > .250 * regs.rdlen()) break;
2275 case 0: if (descLeft > .500 * regs.rdlen()) break;
2276 DPRINTF(Ethernet, "RXS: Interrupting (RXDMT) "
2277 "because of descriptors left\n");
2278 postInterrupt(IT_RXDMT);
2279 break;
2280 }
2281
2282 if (rxFifo.empty())
2283 rxDescCache.writeback(0);
2284
2285 if (descLeft == 0) {
2286 anBegin("RXS", "Writeback Descriptors");
2287 rxDescCache.writeback(0);
2288 DPRINTF(EthernetSM, "RXS: No descriptors left in ring, forcing"
2289 " writeback and stopping ticking\n");
2290 rxTick = false;
2291 }
2292
2293 // only support descriptor granulaties
2294 assert(regs.rxdctl.gran());
2295
2296 if (regs.rxdctl.wthresh() >= rxDescCache.descUsed()) {
2297 DPRINTF(EthernetSM,
2298 "RXS: Writing back because WTHRESH >= descUsed\n");
2299 anBegin("RXS", "Writeback Descriptors");
2300 if (regs.rxdctl.wthresh() < (cacheBlockSize()>>4))
2301 rxDescCache.writeback(regs.rxdctl.wthresh()-1);
2302 else
2303 rxDescCache.writeback((cacheBlockSize()-1)>>4);
2304 }
2305
2306 if ((rxDescCache.descUnused() < regs.rxdctl.pthresh()) &&
2307 ((rxDescCache.descLeft() - rxDescCache.descUnused()) >
2308 regs.rxdctl.hthresh())) {
2309 DPRINTF(EthernetSM, "RXS: Fetching descriptors because "
2310 "descUnused < PTHRESH\n");
2311 anBegin("RXS", "Fetch Descriptors");
2312 rxDescCache.fetchDescriptors();
2313 }
2314
2315 if (rxDescCache.descUnused() == 0) {
2316 anBegin("RXS", "Fetch Descriptors");
2317 rxDescCache.fetchDescriptors();
2318 anWe("RXS", rxDescCache.annUnusedCacheQ);
2319 DPRINTF(EthernetSM, "RXS: No descriptors available in cache, "
2320 "fetching descriptors and stopping ticking\n");
2321 rxTick = false;
2322 }
2323 return;
2324 }
2325
2326 if (rxDmaPacket) {
2327 DPRINTF(EthernetSM,
2328 "RXS: stopping ticking until packet DMA completes\n");
2329 rxTick = false;
2330 return;
2331 }
2332
2333 if (!rxDescCache.descUnused()) {
2334 anBegin("RXS", "Fetch Descriptors");
2335 rxDescCache.fetchDescriptors();
2336 anWe("RXS", rxDescCache.annUnusedCacheQ);
2337 DPRINTF(EthernetSM, "RXS: No descriptors available in cache, "
2338 "stopping ticking\n");
2339 rxTick = false;
2340 DPRINTF(EthernetSM, "RXS: No descriptors available, fetching\n");
2341 return;
2342 }
2343 anPq("RXS", rxDescCache.annUnusedCacheQ);
2344
2345 if (rxFifo.empty()) {
2346 anWe("RXS", "RX FIFO Q");
2347 DPRINTF(EthernetSM, "RXS: RxFIFO empty, stopping ticking\n");
2348 rxTick = false;
2349 return;
2350 }
2351 anPq("RXS", "RX FIFO Q");
2352 anBegin("RXS", "Get Desc");
2353
2354 EthPacketPtr pkt;
2355 pkt = rxFifo.front();
2356
2357
2358 pktOffset = rxDescCache.writePacket(pkt, pktOffset);
2359 DPRINTF(EthernetSM, "RXS: Writing packet into memory\n");
2360 if (pktOffset == pkt->length) {
2361 anBegin( "RXS", "FIFO Dequeue");
2362 DPRINTF(EthernetSM, "RXS: Removing packet from FIFO\n");
2363 pktOffset = 0;
2364 anDq("RXS", "RX FIFO Q");
2365 rxFifo.pop();
2366 }
2367
2368 DPRINTF(EthernetSM, "RXS: stopping ticking until packet DMA completes\n");
2369 rxTick = false;
2370 rxDmaPacket = true;
2371 anBegin("RXS", "DMA Packet");
2372 }
2373
2374 void
2375 IGbE::txWire()
2376 {
2377 if (txFifo.empty()) {
2378 anWe("TXQ", "TX FIFO Q");
2379 txFifoTick = false;
2380 return;
2381 }
2382
2383
2384 anPq("TXQ", "TX FIFO Q");
2385 if (etherInt->sendPacket(txFifo.front())) {
2386 cpa->hwQ(CPA::FL_NONE, sys, macAddr, "TXQ", "WireQ", 0);
2387 if (DTRACE(EthernetSM)) {
2388 IpPtr ip(txFifo.front());
2389 if (ip)
2390 DPRINTF(EthernetSM, "Transmitting Ip packet with Id=%d\n",
2391 ip->id());
2392 else
2393 DPRINTF(EthernetSM, "Transmitting Non-Ip packet\n");
2394 }
2395 anDq("TXQ", "TX FIFO Q");
2396 anBegin("TXQ", "Wire Send");
2397 DPRINTF(EthernetSM,
2398 "TxFIFO: Successful transmit, bytes available in fifo: %d\n",
2399 txFifo.avail());
2400
2401 txBytes += txFifo.front()->length;
2402 txPackets++;
2403 txFifoTick = false;
2404
2405 txFifo.pop();
2406 } else {
2407 // We'll get woken up when the packet ethTxDone() gets called
2408 txFifoTick = false;
2409 }
2410 }
2411
2412 void
2413 IGbE::tick()
2414 {
2415 DPRINTF(EthernetSM, "IGbE: -------------- Cycle --------------\n");
2416
2417 if (rxTick)
2418 rxStateMachine();
2419
2420 if (txTick)
2421 txStateMachine();
2422
2423 if (txFifoTick)
2424 txWire();
2425
2426
2427 if (rxTick || txTick || txFifoTick)
2428 schedule(tickEvent, curTick() + ticks(1));
2429 }
2430
2431 void
2432 IGbE::ethTxDone()
2433 {
2434 anBegin("TXQ", "Send Done");
2435 // restart the tx state machines if they are stopped
2436 // fifo to send another packet
2437 // tx sm to put more data into the fifo
2438 txFifoTick = true && !drainEvent;
2439 if (txDescCache.descLeft() != 0 && !drainEvent)
2440 txTick = true;
2441
2442 restartClock();
2443 txWire();
2444 DPRINTF(EthernetSM, "TxFIFO: Transmission complete\n");
2445 }
2446
2447 void
2448 IGbE::serialize(std::ostream &os)
2449 {
2450 PciDev::serialize(os);
2451
2452 regs.serialize(os);
2453 SERIALIZE_SCALAR(eeOpBits);
2454 SERIALIZE_SCALAR(eeAddrBits);
2455 SERIALIZE_SCALAR(eeDataBits);
2456 SERIALIZE_SCALAR(eeOpcode);
2457 SERIALIZE_SCALAR(eeAddr);
2458 SERIALIZE_SCALAR(lastInterrupt);
2459 SERIALIZE_ARRAY(flash,iGbReg::EEPROM_SIZE);
2460
2461 rxFifo.serialize("rxfifo", os);
2462 txFifo.serialize("txfifo", os);
2463
2464 bool txPktExists = txPacket;
2465 SERIALIZE_SCALAR(txPktExists);
2466 if (txPktExists)
2467 txPacket->serialize("txpacket", os);
2468
2469 Tick rdtr_time = 0, radv_time = 0, tidv_time = 0, tadv_time = 0,
2470 inter_time = 0;
2471
2472 if (rdtrEvent.scheduled())
2473 rdtr_time = rdtrEvent.when();
2474 SERIALIZE_SCALAR(rdtr_time);
2475
2476 if (radvEvent.scheduled())
2477 radv_time = radvEvent.when();
2478 SERIALIZE_SCALAR(radv_time);
2479
2480 if (tidvEvent.scheduled())
2481 tidv_time = tidvEvent.when();
2482 SERIALIZE_SCALAR(tidv_time);
2483
2484 if (tadvEvent.scheduled())
2485 tadv_time = tadvEvent.when();
2486 SERIALIZE_SCALAR(tadv_time);
2487
2488 if (interEvent.scheduled())
2489 inter_time = interEvent.when();
2490 SERIALIZE_SCALAR(inter_time);
2491
2492 SERIALIZE_SCALAR(pktOffset);
2493
2494 nameOut(os, csprintf("%s.TxDescCache", name()));
2495 txDescCache.serialize(os);
2496
2497 nameOut(os, csprintf("%s.RxDescCache", name()));
2498 rxDescCache.serialize(os);
2499 }
2500
2501 void
2502 IGbE::unserialize(Checkpoint *cp, const std::string &section)
2503 {
2504 PciDev::unserialize(cp, section);
2505
2506 regs.unserialize(cp, section);
2507 UNSERIALIZE_SCALAR(eeOpBits);
2508 UNSERIALIZE_SCALAR(eeAddrBits);
2509 UNSERIALIZE_SCALAR(eeDataBits);
2510 UNSERIALIZE_SCALAR(eeOpcode);
2511 UNSERIALIZE_SCALAR(eeAddr);
2512 UNSERIALIZE_SCALAR(lastInterrupt);
2513 UNSERIALIZE_ARRAY(flash,iGbReg::EEPROM_SIZE);
2514
2515 rxFifo.unserialize("rxfifo", cp, section);
2516 txFifo.unserialize("txfifo", cp, section);
2517
2518 bool txPktExists;
2519 UNSERIALIZE_SCALAR(txPktExists);
2520 if (txPktExists) {
2521 txPacket = new EthPacketData(16384);
2522 txPacket->unserialize("txpacket", cp, section);
2523 }
2524
2525 rxTick = true;
2526 txTick = true;
2527 txFifoTick = true;
2528
2529 Tick rdtr_time, radv_time, tidv_time, tadv_time, inter_time;
2530 UNSERIALIZE_SCALAR(rdtr_time);
2531 UNSERIALIZE_SCALAR(radv_time);
2532 UNSERIALIZE_SCALAR(tidv_time);
2533 UNSERIALIZE_SCALAR(tadv_time);
2534 UNSERIALIZE_SCALAR(inter_time);
2535
2536 if (rdtr_time)
2537 schedule(rdtrEvent, rdtr_time);
2538
2539 if (radv_time)
2540 schedule(radvEvent, radv_time);
2541
2542 if (tidv_time)
2543 schedule(tidvEvent, tidv_time);
2544
2545 if (tadv_time)
2546 schedule(tadvEvent, tadv_time);
2547
2548 if (inter_time)
2549 schedule(interEvent, inter_time);
2550
2551 UNSERIALIZE_SCALAR(pktOffset);
2552
2553 txDescCache.unserialize(cp, csprintf("%s.TxDescCache", section));
2554
2555 rxDescCache.unserialize(cp, csprintf("%s.RxDescCache", section));
2556 }
2557
2558 IGbE *
2559 IGbEParams::create()
2560 {
2561 return new IGbE(this);
2562 }