X86: Extend mov2int and mov2fp so they can support insert and extract instructions.
[gem5.git] / src / dev / i8254xGBe.cc
1 /*
2 * Copyright (c) 2006 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Ali Saidi
29 */
30
31 /* @file
32 * Device model for Intel's 8254x line of gigabit ethernet controllers.
33 * In particular an 82547 revision 2 (82547GI) MAC because it seems to have the
34 * fewest workarounds in the driver. It will probably work with most of the
35 * other MACs with slight modifications.
36 */
37
38
39 /*
40 * @todo really there are multiple dma engines.. we should implement them.
41 */
42
43 #include <algorithm>
44
45 #include "base/inet.hh"
46 #include "base/trace.hh"
47 #include "dev/i8254xGBe.hh"
48 #include "mem/packet.hh"
49 #include "mem/packet_access.hh"
50 #include "params/IGbE.hh"
51 #include "sim/stats.hh"
52 #include "sim/system.hh"
53
54 using namespace iGbReg;
55 using namespace Net;
56
57 IGbE::IGbE(const Params *p)
58 : EtherDevice(p), etherInt(NULL), drainEvent(NULL),
59 useFlowControl(p->use_flow_control),
60 rxFifo(p->rx_fifo_size), txFifo(p->tx_fifo_size), rxTick(false),
61 txTick(false), txFifoTick(false), rxDmaPacket(false), pktOffset(0),
62 fetchDelay(p->fetch_delay), wbDelay(p->wb_delay),
63 fetchCompDelay(p->fetch_comp_delay), wbCompDelay(p->wb_comp_delay),
64 rxWriteDelay(p->rx_write_delay), txReadDelay(p->tx_read_delay),
65 rdtrEvent(this), radvEvent(this),
66 tadvEvent(this), tidvEvent(this), tickEvent(this), interEvent(this),
67 rxDescCache(this, name()+".RxDesc", p->rx_desc_cache_size),
68 txDescCache(this, name()+".TxDesc", p->tx_desc_cache_size),
69 clock(p->clock), lastInterrupt(0)
70 {
71 etherInt = new IGbEInt(name() + ".int", this);
72
73 // Initialized internal registers per Intel documentation
74 // All registers intialized to 0 by per register constructor
75 regs.ctrl.fd(1);
76 regs.ctrl.lrst(1);
77 regs.ctrl.speed(2);
78 regs.ctrl.frcspd(1);
79 regs.sts.speed(3); // Say we're 1000Mbps
80 regs.sts.fd(1); // full duplex
81 regs.sts.lu(1); // link up
82 regs.eecd.fwe(1);
83 regs.eecd.ee_type(1);
84 regs.imr = 0;
85 regs.iam = 0;
86 regs.rxdctl.gran(1);
87 regs.rxdctl.wthresh(1);
88 regs.fcrth(1);
89 regs.tdwba = 0;
90 regs.rlpml = 0;
91 regs.sw_fw_sync = 0;
92
93 regs.pba.rxa(0x30);
94 regs.pba.txa(0x10);
95
96 eeOpBits = 0;
97 eeAddrBits = 0;
98 eeDataBits = 0;
99 eeOpcode = 0;
100
101 // clear all 64 16 bit words of the eeprom
102 memset(&flash, 0, EEPROM_SIZE*2);
103
104 // Set the MAC address
105 memcpy(flash, p->hardware_address.bytes(), ETH_ADDR_LEN);
106 for (int x = 0; x < ETH_ADDR_LEN/2; x++)
107 flash[x] = htobe(flash[x]);
108
109 uint16_t csum = 0;
110 for (int x = 0; x < EEPROM_SIZE; x++)
111 csum += htobe(flash[x]);
112
113
114 // Magic happy checksum value
115 flash[EEPROM_SIZE-1] = htobe((uint16_t)(EEPROM_CSUM - csum));
116
117 // Store the MAC address as queue ID
118 macAddr = p->hardware_address;
119
120 rxFifo.clear();
121 txFifo.clear();
122 }
123
124 void
125 IGbE::init()
126 {
127 cpa = CPA::cpa();
128 PciDev::init();
129 }
130
131 EtherInt*
132 IGbE::getEthPort(const std::string &if_name, int idx)
133 {
134
135 if (if_name == "interface") {
136 if (etherInt->getPeer())
137 panic("Port already connected to\n");
138 return etherInt;
139 }
140 return NULL;
141 }
142
143 Tick
144 IGbE::writeConfig(PacketPtr pkt)
145 {
146 int offset = pkt->getAddr() & PCI_CONFIG_SIZE;
147 if (offset < PCI_DEVICE_SPECIFIC)
148 PciDev::writeConfig(pkt);
149 else
150 panic("Device specific PCI config space not implemented.\n");
151
152 //
153 // Some work may need to be done here based for the pci COMMAND bits.
154 //
155
156 return pioDelay;
157 }
158
159 // Handy macro for range-testing register access addresses
160 #define IN_RANGE(val, base, len) (val >= base && val < (base + len))
161
162 Tick
163 IGbE::read(PacketPtr pkt)
164 {
165 int bar;
166 Addr daddr;
167
168 if (!getBAR(pkt->getAddr(), bar, daddr))
169 panic("Invalid PCI memory access to unmapped memory.\n");
170
171 // Only Memory register BAR is allowed
172 assert(bar == 0);
173
174 // Only 32bit accesses allowed
175 assert(pkt->getSize() == 4);
176
177 DPRINTF(Ethernet, "Read device register %#X\n", daddr);
178
179 pkt->allocate();
180
181 //
182 // Handle read of register here
183 //
184
185
186 switch (daddr) {
187 case REG_CTRL:
188 pkt->set<uint32_t>(regs.ctrl());
189 break;
190 case REG_STATUS:
191 pkt->set<uint32_t>(regs.sts());
192 break;
193 case REG_EECD:
194 pkt->set<uint32_t>(regs.eecd());
195 break;
196 case REG_EERD:
197 pkt->set<uint32_t>(regs.eerd());
198 break;
199 case REG_CTRL_EXT:
200 pkt->set<uint32_t>(regs.ctrl_ext());
201 break;
202 case REG_MDIC:
203 pkt->set<uint32_t>(regs.mdic());
204 break;
205 case REG_ICR:
206 DPRINTF(Ethernet, "Reading ICR. ICR=%#x IMR=%#x IAM=%#x IAME=%d\n",
207 regs.icr(), regs.imr, regs.iam, regs.ctrl_ext.iame());
208 pkt->set<uint32_t>(regs.icr());
209 if (regs.icr.int_assert() || regs.imr == 0) {
210 regs.icr = regs.icr() & ~mask(30);
211 DPRINTF(Ethernet, "Cleared ICR. ICR=%#x\n", regs.icr());
212 }
213 if (regs.ctrl_ext.iame() && regs.icr.int_assert())
214 regs.imr &= ~regs.iam;
215 chkInterrupt();
216 break;
217 case REG_EICR:
218 // This is only useful for MSI, but the driver reads it every time
219 // Just don't do anything
220 pkt->set<uint32_t>(0);
221 break;
222 case REG_ITR:
223 pkt->set<uint32_t>(regs.itr());
224 break;
225 case REG_RCTL:
226 pkt->set<uint32_t>(regs.rctl());
227 break;
228 case REG_FCTTV:
229 pkt->set<uint32_t>(regs.fcttv());
230 break;
231 case REG_TCTL:
232 pkt->set<uint32_t>(regs.tctl());
233 break;
234 case REG_PBA:
235 pkt->set<uint32_t>(regs.pba());
236 break;
237 case REG_WUC:
238 case REG_LEDCTL:
239 pkt->set<uint32_t>(0); // We don't care, so just return 0
240 break;
241 case REG_FCRTL:
242 pkt->set<uint32_t>(regs.fcrtl());
243 break;
244 case REG_FCRTH:
245 pkt->set<uint32_t>(regs.fcrth());
246 break;
247 case REG_RDBAL:
248 pkt->set<uint32_t>(regs.rdba.rdbal());
249 break;
250 case REG_RDBAH:
251 pkt->set<uint32_t>(regs.rdba.rdbah());
252 break;
253 case REG_RDLEN:
254 pkt->set<uint32_t>(regs.rdlen());
255 break;
256 case REG_SRRCTL:
257 pkt->set<uint32_t>(regs.srrctl());
258 break;
259 case REG_RDH:
260 pkt->set<uint32_t>(regs.rdh());
261 break;
262 case REG_RDT:
263 pkt->set<uint32_t>(regs.rdt());
264 break;
265 case REG_RDTR:
266 pkt->set<uint32_t>(regs.rdtr());
267 if (regs.rdtr.fpd()) {
268 rxDescCache.writeback(0);
269 DPRINTF(EthernetIntr,
270 "Posting interrupt because of RDTR.FPD write\n");
271 postInterrupt(IT_RXT);
272 regs.rdtr.fpd(0);
273 }
274 break;
275 case REG_RXDCTL:
276 pkt->set<uint32_t>(regs.rxdctl());
277 break;
278 case REG_RADV:
279 pkt->set<uint32_t>(regs.radv());
280 break;
281 case REG_TDBAL:
282 pkt->set<uint32_t>(regs.tdba.tdbal());
283 break;
284 case REG_TDBAH:
285 pkt->set<uint32_t>(regs.tdba.tdbah());
286 break;
287 case REG_TDLEN:
288 pkt->set<uint32_t>(regs.tdlen());
289 break;
290 case REG_TDH:
291 pkt->set<uint32_t>(regs.tdh());
292 break;
293 case REG_TXDCA_CTL:
294 pkt->set<uint32_t>(regs.txdca_ctl());
295 break;
296 case REG_TDT:
297 pkt->set<uint32_t>(regs.tdt());
298 break;
299 case REG_TIDV:
300 pkt->set<uint32_t>(regs.tidv());
301 break;
302 case REG_TXDCTL:
303 pkt->set<uint32_t>(regs.txdctl());
304 break;
305 case REG_TADV:
306 pkt->set<uint32_t>(regs.tadv());
307 break;
308 case REG_TDWBAL:
309 pkt->set<uint32_t>(regs.tdwba & mask(32));
310 break;
311 case REG_TDWBAH:
312 pkt->set<uint32_t>(regs.tdwba >> 32);
313 break;
314 case REG_RXCSUM:
315 pkt->set<uint32_t>(regs.rxcsum());
316 break;
317 case REG_RLPML:
318 pkt->set<uint32_t>(regs.rlpml);
319 break;
320 case REG_RFCTL:
321 pkt->set<uint32_t>(regs.rfctl());
322 break;
323 case REG_MANC:
324 pkt->set<uint32_t>(regs.manc());
325 break;
326 case REG_SWSM:
327 pkt->set<uint32_t>(regs.swsm());
328 regs.swsm.smbi(1);
329 break;
330 case REG_FWSM:
331 pkt->set<uint32_t>(regs.fwsm());
332 break;
333 case REG_SWFWSYNC:
334 pkt->set<uint32_t>(regs.sw_fw_sync);
335 break;
336 default:
337 if (!IN_RANGE(daddr, REG_VFTA, VLAN_FILTER_TABLE_SIZE*4) &&
338 !IN_RANGE(daddr, REG_RAL, RCV_ADDRESS_TABLE_SIZE*8) &&
339 !IN_RANGE(daddr, REG_MTA, MULTICAST_TABLE_SIZE*4) &&
340 !IN_RANGE(daddr, REG_CRCERRS, STATS_REGS_SIZE))
341 panic("Read request to unknown register number: %#x\n", daddr);
342 else
343 pkt->set<uint32_t>(0);
344 };
345
346 pkt->makeAtomicResponse();
347 return pioDelay;
348 }
349
350 Tick
351 IGbE::write(PacketPtr pkt)
352 {
353 int bar;
354 Addr daddr;
355
356
357 if (!getBAR(pkt->getAddr(), bar, daddr))
358 panic("Invalid PCI memory access to unmapped memory.\n");
359
360 // Only Memory register BAR is allowed
361 assert(bar == 0);
362
363 // Only 32bit accesses allowed
364 assert(pkt->getSize() == sizeof(uint32_t));
365
366 DPRINTF(Ethernet, "Wrote device register %#X value %#X\n",
367 daddr, pkt->get<uint32_t>());
368
369 //
370 // Handle write of register here
371 //
372 uint32_t val = pkt->get<uint32_t>();
373
374 Regs::RCTL oldrctl;
375 Regs::TCTL oldtctl;
376
377 switch (daddr) {
378 case REG_CTRL:
379 regs.ctrl = val;
380 if (regs.ctrl.tfce())
381 warn("TX Flow control enabled, should implement\n");
382 if (regs.ctrl.rfce())
383 warn("RX Flow control enabled, should implement\n");
384 break;
385 case REG_CTRL_EXT:
386 regs.ctrl_ext = val;
387 break;
388 case REG_STATUS:
389 regs.sts = val;
390 break;
391 case REG_EECD:
392 int oldClk;
393 oldClk = regs.eecd.sk();
394 regs.eecd = val;
395 // See if this is a eeprom access and emulate accordingly
396 if (!oldClk && regs.eecd.sk()) {
397 if (eeOpBits < 8) {
398 eeOpcode = eeOpcode << 1 | regs.eecd.din();
399 eeOpBits++;
400 } else if (eeAddrBits < 8 && eeOpcode == EEPROM_READ_OPCODE_SPI) {
401 eeAddr = eeAddr << 1 | regs.eecd.din();
402 eeAddrBits++;
403 } else if (eeDataBits < 16 && eeOpcode == EEPROM_READ_OPCODE_SPI) {
404 assert(eeAddr>>1 < EEPROM_SIZE);
405 DPRINTF(EthernetEEPROM, "EEPROM bit read: %d word: %#X\n",
406 flash[eeAddr>>1] >> eeDataBits & 0x1,
407 flash[eeAddr>>1]);
408 regs.eecd.dout((flash[eeAddr>>1] >> (15-eeDataBits)) & 0x1);
409 eeDataBits++;
410 } else if (eeDataBits < 8 && eeOpcode == EEPROM_RDSR_OPCODE_SPI) {
411 regs.eecd.dout(0);
412 eeDataBits++;
413 } else
414 panic("What's going on with eeprom interface? opcode:"
415 " %#x:%d addr: %#x:%d, data: %d\n", (uint32_t)eeOpcode,
416 (uint32_t)eeOpBits, (uint32_t)eeAddr,
417 (uint32_t)eeAddrBits, (uint32_t)eeDataBits);
418
419 // Reset everything for the next command
420 if ((eeDataBits == 16 && eeOpcode == EEPROM_READ_OPCODE_SPI) ||
421 (eeDataBits == 8 && eeOpcode == EEPROM_RDSR_OPCODE_SPI)) {
422 eeOpBits = 0;
423 eeAddrBits = 0;
424 eeDataBits = 0;
425 eeOpcode = 0;
426 eeAddr = 0;
427 }
428
429 DPRINTF(EthernetEEPROM, "EEPROM: opcode: %#X:%d addr: %#X:%d\n",
430 (uint32_t)eeOpcode, (uint32_t) eeOpBits,
431 (uint32_t)eeAddr>>1, (uint32_t)eeAddrBits);
432 if (eeOpBits == 8 && !(eeOpcode == EEPROM_READ_OPCODE_SPI ||
433 eeOpcode == EEPROM_RDSR_OPCODE_SPI ))
434 panic("Unknown eeprom opcode: %#X:%d\n", (uint32_t)eeOpcode,
435 (uint32_t)eeOpBits);
436
437
438 }
439 // If driver requests eeprom access, immediately give it to it
440 regs.eecd.ee_gnt(regs.eecd.ee_req());
441 break;
442 case REG_EERD:
443 regs.eerd = val;
444 if (regs.eerd.start()) {
445 regs.eerd.done(1);
446 assert(regs.eerd.addr() < EEPROM_SIZE);
447 regs.eerd.data(flash[regs.eerd.addr()]);
448 regs.eerd.start(0);
449 DPRINTF(EthernetEEPROM, "EEPROM: read addr: %#X data %#x\n",
450 regs.eerd.addr(), regs.eerd.data());
451 }
452 break;
453 case REG_MDIC:
454 regs.mdic = val;
455 if (regs.mdic.i())
456 panic("No support for interrupt on mdic complete\n");
457 if (regs.mdic.phyadd() != 1)
458 panic("No support for reading anything but phy\n");
459 DPRINTF(Ethernet, "%s phy address %x\n",
460 regs.mdic.op() == 1 ? "Writing" : "Reading",
461 regs.mdic.regadd());
462 switch (regs.mdic.regadd()) {
463 case PHY_PSTATUS:
464 regs.mdic.data(0x796D); // link up
465 break;
466 case PHY_PID:
467 regs.mdic.data(params()->phy_pid);
468 break;
469 case PHY_EPID:
470 regs.mdic.data(params()->phy_epid);
471 break;
472 case PHY_GSTATUS:
473 regs.mdic.data(0x7C00);
474 break;
475 case PHY_EPSTATUS:
476 regs.mdic.data(0x3000);
477 break;
478 case PHY_AGC:
479 regs.mdic.data(0x180); // some random length
480 break;
481 default:
482 regs.mdic.data(0);
483 }
484 regs.mdic.r(1);
485 break;
486 case REG_ICR:
487 DPRINTF(Ethernet, "Writing ICR. ICR=%#x IMR=%#x IAM=%#x IAME=%d\n",
488 regs.icr(), regs.imr, regs.iam, regs.ctrl_ext.iame());
489 if (regs.ctrl_ext.iame())
490 regs.imr &= ~regs.iam;
491 regs.icr = ~bits(val,30,0) & regs.icr();
492 chkInterrupt();
493 break;
494 case REG_ITR:
495 regs.itr = val;
496 break;
497 case REG_ICS:
498 DPRINTF(EthernetIntr, "Posting interrupt because of ICS write\n");
499 postInterrupt((IntTypes)val);
500 break;
501 case REG_IMS:
502 regs.imr |= val;
503 chkInterrupt();
504 break;
505 case REG_IMC:
506 regs.imr &= ~val;
507 chkInterrupt();
508 break;
509 case REG_IAM:
510 regs.iam = val;
511 break;
512 case REG_RCTL:
513 oldrctl = regs.rctl;
514 regs.rctl = val;
515 if (regs.rctl.rst()) {
516 rxDescCache.reset();
517 DPRINTF(EthernetSM, "RXS: Got RESET!\n");
518 rxFifo.clear();
519 regs.rctl.rst(0);
520 }
521 if (regs.rctl.en())
522 rxTick = true;
523 restartClock();
524 break;
525 case REG_FCTTV:
526 regs.fcttv = val;
527 break;
528 case REG_TCTL:
529 regs.tctl = val;
530 oldtctl = regs.tctl;
531 regs.tctl = val;
532 if (regs.tctl.en())
533 txTick = true;
534 restartClock();
535 if (regs.tctl.en() && !oldtctl.en()) {
536 txDescCache.reset();
537 }
538 break;
539 case REG_PBA:
540 regs.pba.rxa(val);
541 regs.pba.txa(64 - regs.pba.rxa());
542 break;
543 case REG_WUC:
544 case REG_LEDCTL:
545 case REG_FCAL:
546 case REG_FCAH:
547 case REG_FCT:
548 case REG_VET:
549 case REG_AIFS:
550 case REG_TIPG:
551 ; // We don't care, so don't store anything
552 break;
553 case REG_IVAR0:
554 warn("Writing to IVAR0, ignoring...\n");
555 break;
556 case REG_FCRTL:
557 regs.fcrtl = val;
558 break;
559 case REG_FCRTH:
560 regs.fcrth = val;
561 break;
562 case REG_RDBAL:
563 regs.rdba.rdbal( val & ~mask(4));
564 rxDescCache.areaChanged();
565 break;
566 case REG_RDBAH:
567 regs.rdba.rdbah(val);
568 rxDescCache.areaChanged();
569 break;
570 case REG_RDLEN:
571 regs.rdlen = val & ~mask(7);
572 rxDescCache.areaChanged();
573 break;
574 case REG_SRRCTL:
575 regs.srrctl = val;
576 break;
577 case REG_RDH:
578 regs.rdh = val;
579 rxDescCache.areaChanged();
580 break;
581 case REG_RDT:
582 regs.rdt = val;
583 DPRINTF(EthernetSM, "RXS: RDT Updated.\n");
584 if (getState() == SimObject::Running) {
585 DPRINTF(EthernetSM, "RXS: RDT Fetching Descriptors!\n");
586 rxDescCache.fetchDescriptors();
587 } else {
588 DPRINTF(EthernetSM, "RXS: RDT NOT Fetching Desc b/c draining!\n");
589 }
590 break;
591 case REG_RDTR:
592 regs.rdtr = val;
593 break;
594 case REG_RADV:
595 regs.radv = val;
596 break;
597 case REG_RXDCTL:
598 regs.rxdctl = val;
599 break;
600 case REG_TDBAL:
601 regs.tdba.tdbal( val & ~mask(4));
602 txDescCache.areaChanged();
603 break;
604 case REG_TDBAH:
605 regs.tdba.tdbah(val);
606 txDescCache.areaChanged();
607 break;
608 case REG_TDLEN:
609 regs.tdlen = val & ~mask(7);
610 txDescCache.areaChanged();
611 break;
612 case REG_TDH:
613 regs.tdh = val;
614 txDescCache.areaChanged();
615 break;
616 case REG_TXDCA_CTL:
617 regs.txdca_ctl = val;
618 if (regs.txdca_ctl.enabled())
619 panic("No support for DCA\n");
620 break;
621 case REG_TDT:
622 regs.tdt = val;
623 DPRINTF(EthernetSM, "TXS: TX Tail pointer updated\n");
624 if (getState() == SimObject::Running) {
625 DPRINTF(EthernetSM, "TXS: TDT Fetching Descriptors!\n");
626 txDescCache.fetchDescriptors();
627 } else {
628 DPRINTF(EthernetSM, "TXS: TDT NOT Fetching Desc b/c draining!\n");
629 }
630 break;
631 case REG_TIDV:
632 regs.tidv = val;
633 break;
634 case REG_TXDCTL:
635 regs.txdctl = val;
636 break;
637 case REG_TADV:
638 regs.tadv = val;
639 break;
640 case REG_TDWBAL:
641 regs.tdwba &= ~mask(32);
642 regs.tdwba |= val;
643 txDescCache.completionWriteback(regs.tdwba & ~mask(1),
644 regs.tdwba & mask(1));
645 break;
646 case REG_TDWBAH:
647 regs.tdwba &= mask(32);
648 regs.tdwba |= (uint64_t)val << 32;
649 txDescCache.completionWriteback(regs.tdwba & ~mask(1),
650 regs.tdwba & mask(1));
651 break;
652 case REG_RXCSUM:
653 regs.rxcsum = val;
654 break;
655 case REG_RLPML:
656 regs.rlpml = val;
657 break;
658 case REG_RFCTL:
659 regs.rfctl = val;
660 if (regs.rfctl.exsten())
661 panic("Extended RX descriptors not implemented\n");
662 break;
663 case REG_MANC:
664 regs.manc = val;
665 break;
666 case REG_SWSM:
667 regs.swsm = val;
668 if (regs.fwsm.eep_fw_semaphore())
669 regs.swsm.swesmbi(0);
670 break;
671 case REG_SWFWSYNC:
672 regs.sw_fw_sync = val;
673 break;
674 default:
675 if (!IN_RANGE(daddr, REG_VFTA, VLAN_FILTER_TABLE_SIZE*4) &&
676 !IN_RANGE(daddr, REG_RAL, RCV_ADDRESS_TABLE_SIZE*8) &&
677 !IN_RANGE(daddr, REG_MTA, MULTICAST_TABLE_SIZE*4))
678 panic("Write request to unknown register number: %#x\n", daddr);
679 };
680
681 pkt->makeAtomicResponse();
682 return pioDelay;
683 }
684
685 void
686 IGbE::postInterrupt(IntTypes t, bool now)
687 {
688 assert(t);
689
690 // Interrupt is already pending
691 if (t & regs.icr() && !now)
692 return;
693
694 regs.icr = regs.icr() | t;
695
696 Tick itr_interval = Clock::Int::ns * 256 * regs.itr.interval();
697 DPRINTF(EthernetIntr,
698 "EINT: postInterrupt() curTick: %d itr: %d interval: %d\n",
699 curTick, regs.itr.interval(), itr_interval);
700
701 if (regs.itr.interval() == 0 || now ||
702 lastInterrupt + itr_interval <= curTick) {
703 if (interEvent.scheduled()) {
704 deschedule(interEvent);
705 }
706 cpuPostInt();
707 } else {
708 Tick int_time = lastInterrupt + itr_interval;
709 assert(int_time > 0);
710 DPRINTF(EthernetIntr, "EINT: Scheduling timer interrupt for tick %d\n",
711 int_time);
712 if (!interEvent.scheduled()) {
713 schedule(interEvent, int_time);
714 }
715 }
716 }
717
718 void
719 IGbE::delayIntEvent()
720 {
721 cpuPostInt();
722 }
723
724
725 void
726 IGbE::cpuPostInt()
727 {
728
729 postedInterrupts++;
730
731 if (!(regs.icr() & regs.imr)) {
732 DPRINTF(Ethernet, "Interrupt Masked. Not Posting\n");
733 return;
734 }
735
736 DPRINTF(Ethernet, "Posting Interrupt\n");
737
738
739 if (interEvent.scheduled()) {
740 deschedule(interEvent);
741 }
742
743 if (rdtrEvent.scheduled()) {
744 regs.icr.rxt0(1);
745 deschedule(rdtrEvent);
746 }
747 if (radvEvent.scheduled()) {
748 regs.icr.rxt0(1);
749 deschedule(radvEvent);
750 }
751 if (tadvEvent.scheduled()) {
752 regs.icr.txdw(1);
753 deschedule(tadvEvent);
754 }
755 if (tidvEvent.scheduled()) {
756 regs.icr.txdw(1);
757 deschedule(tidvEvent);
758 }
759
760 regs.icr.int_assert(1);
761 DPRINTF(EthernetIntr, "EINT: Posting interrupt to CPU now. Vector %#x\n",
762 regs.icr());
763
764 intrPost();
765
766 lastInterrupt = curTick;
767 }
768
769 void
770 IGbE::cpuClearInt()
771 {
772 if (regs.icr.int_assert()) {
773 regs.icr.int_assert(0);
774 DPRINTF(EthernetIntr,
775 "EINT: Clearing interrupt to CPU now. Vector %#x\n",
776 regs.icr());
777 intrClear();
778 }
779 }
780
781 void
782 IGbE::chkInterrupt()
783 {
784 DPRINTF(Ethernet, "Checking interrupts icr: %#x imr: %#x\n", regs.icr(),
785 regs.imr);
786 // Check if we need to clear the cpu interrupt
787 if (!(regs.icr() & regs.imr)) {
788 DPRINTF(Ethernet, "Mask cleaned all interrupts\n");
789 if (interEvent.scheduled())
790 deschedule(interEvent);
791 if (regs.icr.int_assert())
792 cpuClearInt();
793 }
794 DPRINTF(Ethernet, "ITR = %#X itr.interval = %#X\n",
795 regs.itr(), regs.itr.interval());
796
797 if (regs.icr() & regs.imr) {
798 if (regs.itr.interval() == 0) {
799 cpuPostInt();
800 } else {
801 DPRINTF(Ethernet,
802 "Possibly scheduling interrupt because of imr write\n");
803 if (!interEvent.scheduled()) {
804 Tick t = curTick + Clock::Int::ns * 256 * regs.itr.interval();
805 DPRINTF(Ethernet, "Scheduling for %d\n", t);
806 schedule(interEvent, t);
807 }
808 }
809 }
810 }
811
812
813 ///////////////////////////// IGbE::DescCache //////////////////////////////
814
815 template<class T>
816 IGbE::DescCache<T>::DescCache(IGbE *i, const std::string n, int s)
817 : igbe(i), _name(n), cachePnt(0), size(s), curFetching(0),
818 wbOut(0), pktPtr(NULL), wbDelayEvent(this),
819 fetchDelayEvent(this), fetchEvent(this), wbEvent(this)
820 {
821 fetchBuf = new T[size];
822 wbBuf = new T[size];
823 }
824
825 template<class T>
826 IGbE::DescCache<T>::~DescCache()
827 {
828 reset();
829 }
830
831 template<class T>
832 void
833 IGbE::DescCache<T>::areaChanged()
834 {
835 if (usedCache.size() > 0 || curFetching || wbOut)
836 panic("Descriptor Address, Length or Head changed. Bad\n");
837 reset();
838
839 }
840
841 template<class T>
842 void
843 IGbE::DescCache<T>::writeback(Addr aMask)
844 {
845 int curHead = descHead();
846 int max_to_wb = usedCache.size();
847
848 // Check if this writeback is less restrictive that the previous
849 // and if so setup another one immediately following it
850 if (wbOut) {
851 if (aMask < wbAlignment) {
852 moreToWb = true;
853 wbAlignment = aMask;
854 }
855 DPRINTF(EthernetDesc,
856 "Writing back already in process, returning\n");
857 return;
858 }
859
860 moreToWb = false;
861 wbAlignment = aMask;
862
863
864 DPRINTF(EthernetDesc, "Writing back descriptors head: %d tail: "
865 "%d len: %d cachePnt: %d max_to_wb: %d descleft: %d\n",
866 curHead, descTail(), descLen(), cachePnt, max_to_wb,
867 descLeft());
868
869 if (max_to_wb + curHead >= descLen()) {
870 max_to_wb = descLen() - curHead;
871 moreToWb = true;
872 // this is by definition aligned correctly
873 } else if (wbAlignment != 0) {
874 // align the wb point to the mask
875 max_to_wb = max_to_wb & ~wbAlignment;
876 }
877
878 DPRINTF(EthernetDesc, "Writing back %d descriptors\n", max_to_wb);
879
880 if (max_to_wb <= 0) {
881 if (usedCache.size())
882 igbe->anBegin(annSmWb, "Wait Alignment", CPA::FL_WAIT);
883 else
884 igbe->anWe(annSmWb, annUsedCacheQ);
885 return;
886 }
887
888 wbOut = max_to_wb;
889
890 assert(!wbDelayEvent.scheduled());
891 igbe->schedule(wbDelayEvent, curTick + igbe->wbDelay);
892 igbe->anBegin(annSmWb, "Prepare Writeback Desc");
893 }
894
895 template<class T>
896 void
897 IGbE::DescCache<T>::writeback1()
898 {
899 // If we're draining delay issuing this DMA
900 if (igbe->getState() != SimObject::Running) {
901 igbe->schedule(wbDelayEvent, curTick + igbe->wbDelay);
902 return;
903 }
904
905 DPRINTF(EthernetDesc, "Begining DMA of %d descriptors\n", wbOut);
906
907 for (int x = 0; x < wbOut; x++) {
908 assert(usedCache.size());
909 memcpy(&wbBuf[x], usedCache[x], sizeof(T));
910 igbe->anPq(annSmWb, annUsedCacheQ);
911 igbe->anPq(annSmWb, annDescQ);
912 igbe->anQ(annSmWb, annUsedDescQ);
913 }
914
915
916 igbe->anBegin(annSmWb, "Writeback Desc DMA");
917
918 assert(wbOut);
919 igbe->dmaWrite(pciToDma(descBase() + descHead() * sizeof(T)),
920 wbOut * sizeof(T), &wbEvent, (uint8_t*)wbBuf,
921 igbe->wbCompDelay);
922 }
923
924 template<class T>
925 void
926 IGbE::DescCache<T>::fetchDescriptors()
927 {
928 size_t max_to_fetch;
929
930 if (curFetching) {
931 DPRINTF(EthernetDesc,
932 "Currently fetching %d descriptors, returning\n",
933 curFetching);
934 return;
935 }
936
937 if (descTail() >= cachePnt)
938 max_to_fetch = descTail() - cachePnt;
939 else
940 max_to_fetch = descLen() - cachePnt;
941
942 size_t free_cache = size - usedCache.size() - unusedCache.size();
943
944 if (!max_to_fetch)
945 igbe->anWe(annSmFetch, annUnusedDescQ);
946 else
947 igbe->anPq(annSmFetch, annUnusedDescQ, max_to_fetch);
948
949 if (max_to_fetch) {
950 if (!free_cache)
951 igbe->anWf(annSmFetch, annDescQ);
952 else
953 igbe->anRq(annSmFetch, annDescQ, free_cache);
954 }
955
956 max_to_fetch = std::min(max_to_fetch, free_cache);
957
958
959 DPRINTF(EthernetDesc, "Fetching descriptors head: %d tail: "
960 "%d len: %d cachePnt: %d max_to_fetch: %d descleft: %d\n",
961 descHead(), descTail(), descLen(), cachePnt,
962 max_to_fetch, descLeft());
963
964 // Nothing to do
965 if (max_to_fetch == 0)
966 return;
967
968 // So we don't have two descriptor fetches going on at once
969 curFetching = max_to_fetch;
970
971 assert(!fetchDelayEvent.scheduled());
972 igbe->schedule(fetchDelayEvent, curTick + igbe->fetchDelay);
973 igbe->anBegin(annSmFetch, "Prepare Fetch Desc");
974 }
975
976 template<class T>
977 void
978 IGbE::DescCache<T>::fetchDescriptors1()
979 {
980 // If we're draining delay issuing this DMA
981 if (igbe->getState() != SimObject::Running) {
982 igbe->schedule(fetchDelayEvent, curTick + igbe->fetchDelay);
983 return;
984 }
985
986 igbe->anBegin(annSmFetch, "Fetch Desc");
987
988 DPRINTF(EthernetDesc, "Fetching descriptors at %#x (%#x), size: %#x\n",
989 descBase() + cachePnt * sizeof(T),
990 pciToDma(descBase() + cachePnt * sizeof(T)),
991 curFetching * sizeof(T));
992 assert(curFetching);
993 igbe->dmaRead(pciToDma(descBase() + cachePnt * sizeof(T)),
994 curFetching * sizeof(T), &fetchEvent, (uint8_t*)fetchBuf,
995 igbe->fetchCompDelay);
996 }
997
998 template<class T>
999 void
1000 IGbE::DescCache<T>::fetchComplete()
1001 {
1002 T *newDesc;
1003 igbe->anBegin(annSmFetch, "Fetch Complete");
1004 for (int x = 0; x < curFetching; x++) {
1005 newDesc = new T;
1006 memcpy(newDesc, &fetchBuf[x], sizeof(T));
1007 unusedCache.push_back(newDesc);
1008 igbe->anDq(annSmFetch, annUnusedDescQ);
1009 igbe->anQ(annSmFetch, annUnusedCacheQ);
1010 igbe->anQ(annSmFetch, annDescQ);
1011 }
1012
1013
1014 #ifndef NDEBUG
1015 int oldCp = cachePnt;
1016 #endif
1017
1018 cachePnt += curFetching;
1019 assert(cachePnt <= descLen());
1020 if (cachePnt == descLen())
1021 cachePnt = 0;
1022
1023 curFetching = 0;
1024
1025 DPRINTF(EthernetDesc, "Fetching complete cachePnt %d -> %d\n",
1026 oldCp, cachePnt);
1027
1028 if ((descTail() >= cachePnt ? (descTail() - cachePnt) : (descLen() -
1029 cachePnt)) == 0)
1030 {
1031 igbe->anWe(annSmFetch, annUnusedDescQ);
1032 } else if (!(size - usedCache.size() - unusedCache.size())) {
1033 igbe->anWf(annSmFetch, annDescQ);
1034 } else {
1035 igbe->anBegin(annSmFetch, "Wait", CPA::FL_WAIT);
1036 }
1037
1038 enableSm();
1039 igbe->checkDrain();
1040 }
1041
1042 template<class T>
1043 void
1044 IGbE::DescCache<T>::wbComplete()
1045 {
1046
1047 igbe->anBegin(annSmWb, "Finish Writeback");
1048
1049 long curHead = descHead();
1050 #ifndef NDEBUG
1051 long oldHead = curHead;
1052 #endif
1053
1054 for (int x = 0; x < wbOut; x++) {
1055 assert(usedCache.size());
1056 delete usedCache[0];
1057 usedCache.pop_front();
1058
1059 igbe->anDq(annSmWb, annUsedCacheQ);
1060 igbe->anDq(annSmWb, annDescQ);
1061 }
1062
1063 curHead += wbOut;
1064 wbOut = 0;
1065
1066 if (curHead >= descLen())
1067 curHead -= descLen();
1068
1069 // Update the head
1070 updateHead(curHead);
1071
1072 DPRINTF(EthernetDesc, "Writeback complete curHead %d -> %d\n",
1073 oldHead, curHead);
1074
1075 // If we still have more to wb, call wb now
1076 actionAfterWb();
1077 if (moreToWb) {
1078 moreToWb = false;
1079 DPRINTF(EthernetDesc, "Writeback has more todo\n");
1080 writeback(wbAlignment);
1081 }
1082
1083 if (!wbOut) {
1084 igbe->checkDrain();
1085 if (usedCache.size())
1086 igbe->anBegin(annSmWb, "Wait", CPA::FL_WAIT);
1087 else
1088 igbe->anWe(annSmWb, annUsedCacheQ);
1089 }
1090 fetchAfterWb();
1091 }
1092
1093 template<class T>
1094 void
1095 IGbE::DescCache<T>::reset()
1096 {
1097 DPRINTF(EthernetDesc, "Reseting descriptor cache\n");
1098 for (typename CacheType::size_type x = 0; x < usedCache.size(); x++)
1099 delete usedCache[x];
1100 for (typename CacheType::size_type x = 0; x < unusedCache.size(); x++)
1101 delete unusedCache[x];
1102
1103 usedCache.clear();
1104 unusedCache.clear();
1105
1106 cachePnt = 0;
1107
1108 }
1109
1110 template<class T>
1111 void
1112 IGbE::DescCache<T>::serialize(std::ostream &os)
1113 {
1114 SERIALIZE_SCALAR(cachePnt);
1115 SERIALIZE_SCALAR(curFetching);
1116 SERIALIZE_SCALAR(wbOut);
1117 SERIALIZE_SCALAR(moreToWb);
1118 SERIALIZE_SCALAR(wbAlignment);
1119
1120 typename CacheType::size_type usedCacheSize = usedCache.size();
1121 SERIALIZE_SCALAR(usedCacheSize);
1122 for (typename CacheType::size_type x = 0; x < usedCacheSize; x++) {
1123 arrayParamOut(os, csprintf("usedCache_%d", x),
1124 (uint8_t*)usedCache[x],sizeof(T));
1125 }
1126
1127 typename CacheType::size_type unusedCacheSize = unusedCache.size();
1128 SERIALIZE_SCALAR(unusedCacheSize);
1129 for (typename CacheType::size_type x = 0; x < unusedCacheSize; x++) {
1130 arrayParamOut(os, csprintf("unusedCache_%d", x),
1131 (uint8_t*)unusedCache[x],sizeof(T));
1132 }
1133
1134 Tick fetch_delay = 0, wb_delay = 0;
1135 if (fetchDelayEvent.scheduled())
1136 fetch_delay = fetchDelayEvent.when();
1137 SERIALIZE_SCALAR(fetch_delay);
1138 if (wbDelayEvent.scheduled())
1139 wb_delay = wbDelayEvent.when();
1140 SERIALIZE_SCALAR(wb_delay);
1141
1142
1143 }
1144
1145 template<class T>
1146 void
1147 IGbE::DescCache<T>::unserialize(Checkpoint *cp, const std::string &section)
1148 {
1149 UNSERIALIZE_SCALAR(cachePnt);
1150 UNSERIALIZE_SCALAR(curFetching);
1151 UNSERIALIZE_SCALAR(wbOut);
1152 UNSERIALIZE_SCALAR(moreToWb);
1153 UNSERIALIZE_SCALAR(wbAlignment);
1154
1155 typename CacheType::size_type usedCacheSize;
1156 UNSERIALIZE_SCALAR(usedCacheSize);
1157 T *temp;
1158 for (typename CacheType::size_type x = 0; x < usedCacheSize; x++) {
1159 temp = new T;
1160 arrayParamIn(cp, section, csprintf("usedCache_%d", x),
1161 (uint8_t*)temp,sizeof(T));
1162 usedCache.push_back(temp);
1163 }
1164
1165 typename CacheType::size_type unusedCacheSize;
1166 UNSERIALIZE_SCALAR(unusedCacheSize);
1167 for (typename CacheType::size_type x = 0; x < unusedCacheSize; x++) {
1168 temp = new T;
1169 arrayParamIn(cp, section, csprintf("unusedCache_%d", x),
1170 (uint8_t*)temp,sizeof(T));
1171 unusedCache.push_back(temp);
1172 }
1173 Tick fetch_delay = 0, wb_delay = 0;
1174 UNSERIALIZE_SCALAR(fetch_delay);
1175 UNSERIALIZE_SCALAR(wb_delay);
1176 if (fetch_delay)
1177 igbe->schedule(fetchDelayEvent, fetch_delay);
1178 if (wb_delay)
1179 igbe->schedule(wbDelayEvent, wb_delay);
1180
1181
1182 }
1183
1184 ///////////////////////////// IGbE::RxDescCache //////////////////////////////
1185
1186 IGbE::RxDescCache::RxDescCache(IGbE *i, const std::string n, int s)
1187 : DescCache<RxDesc>(i, n, s), pktDone(false), splitCount(0),
1188 pktEvent(this), pktHdrEvent(this), pktDataEvent(this)
1189
1190 {
1191 annSmFetch = "RX Desc Fetch";
1192 annSmWb = "RX Desc Writeback";
1193 annUnusedDescQ = "RX Unused Descriptors";
1194 annUnusedCacheQ = "RX Unused Descriptor Cache";
1195 annUsedCacheQ = "RX Used Descriptor Cache";
1196 annUsedDescQ = "RX Used Descriptors";
1197 annDescQ = "RX Descriptors";
1198 }
1199
1200 void
1201 IGbE::RxDescCache::pktSplitDone()
1202 {
1203 splitCount++;
1204 DPRINTF(EthernetDesc,
1205 "Part of split packet done: splitcount now %d\n", splitCount);
1206 assert(splitCount <= 2);
1207 if (splitCount != 2)
1208 return;
1209 splitCount = 0;
1210 DPRINTF(EthernetDesc,
1211 "Part of split packet done: calling pktComplete()\n");
1212 pktComplete();
1213 }
1214
1215 int
1216 IGbE::RxDescCache::writePacket(EthPacketPtr packet, int pkt_offset)
1217 {
1218 assert(unusedCache.size());
1219 //if (!unusedCache.size())
1220 // return false;
1221
1222 pktPtr = packet;
1223 pktDone = false;
1224 unsigned buf_len, hdr_len;
1225
1226 RxDesc *desc = unusedCache.front();
1227 switch (igbe->regs.srrctl.desctype()) {
1228 case RXDT_LEGACY:
1229 assert(pkt_offset == 0);
1230 bytesCopied = packet->length;
1231 DPRINTF(EthernetDesc, "Packet Length: %d Desc Size: %d\n",
1232 packet->length, igbe->regs.rctl.descSize());
1233 assert(packet->length < igbe->regs.rctl.descSize());
1234 igbe->dmaWrite(pciToDma(desc->legacy.buf),
1235 packet->length, &pktEvent, packet->data,
1236 igbe->rxWriteDelay);
1237 break;
1238 case RXDT_ADV_ONEBUF:
1239 assert(pkt_offset == 0);
1240 bytesCopied = packet->length;
1241 buf_len = igbe->regs.rctl.lpe() ? igbe->regs.srrctl.bufLen() :
1242 igbe->regs.rctl.descSize();
1243 DPRINTF(EthernetDesc, "Packet Length: %d srrctl: %#x Desc Size: %d\n",
1244 packet->length, igbe->regs.srrctl(), buf_len);
1245 assert(packet->length < buf_len);
1246 igbe->dmaWrite(pciToDma(desc->adv_read.pkt),
1247 packet->length, &pktEvent, packet->data,
1248 igbe->rxWriteDelay);
1249 desc->adv_wb.header_len = htole(0);
1250 desc->adv_wb.sph = htole(0);
1251 desc->adv_wb.pkt_len = htole((uint16_t)(pktPtr->length));
1252 break;
1253 case RXDT_ADV_SPLIT_A:
1254 int split_point;
1255
1256 buf_len = igbe->regs.rctl.lpe() ? igbe->regs.srrctl.bufLen() :
1257 igbe->regs.rctl.descSize();
1258 hdr_len = igbe->regs.rctl.lpe() ? igbe->regs.srrctl.hdrLen() : 0;
1259 DPRINTF(EthernetDesc,
1260 "lpe: %d Packet Length: %d offset: %d srrctl: %#x "
1261 "hdr addr: %#x Hdr Size: %d desc addr: %#x Desc Size: %d\n",
1262 igbe->regs.rctl.lpe(), packet->length, pkt_offset,
1263 igbe->regs.srrctl(), desc->adv_read.hdr, hdr_len,
1264 desc->adv_read.pkt, buf_len);
1265
1266 split_point = hsplit(pktPtr);
1267
1268 if (packet->length <= hdr_len) {
1269 bytesCopied = packet->length;
1270 assert(pkt_offset == 0);
1271 DPRINTF(EthernetDesc, "Hdr split: Entire packet in header\n");
1272 igbe->dmaWrite(pciToDma(desc->adv_read.hdr),
1273 packet->length, &pktEvent, packet->data,
1274 igbe->rxWriteDelay);
1275 desc->adv_wb.header_len = htole((uint16_t)packet->length);
1276 desc->adv_wb.sph = htole(0);
1277 desc->adv_wb.pkt_len = htole(0);
1278 } else if (split_point) {
1279 if (pkt_offset) {
1280 // we are only copying some data, header/data has already been
1281 // copied
1282 int max_to_copy =
1283 std::min(packet->length - pkt_offset, buf_len);
1284 bytesCopied += max_to_copy;
1285 DPRINTF(EthernetDesc,
1286 "Hdr split: Continuing data buffer copy\n");
1287 igbe->dmaWrite(pciToDma(desc->adv_read.pkt),
1288 max_to_copy, &pktEvent,
1289 packet->data + pkt_offset, igbe->rxWriteDelay);
1290 desc->adv_wb.header_len = htole(0);
1291 desc->adv_wb.pkt_len = htole((uint16_t)max_to_copy);
1292 desc->adv_wb.sph = htole(0);
1293 } else {
1294 int max_to_copy =
1295 std::min(packet->length - split_point, buf_len);
1296 bytesCopied += max_to_copy + split_point;
1297
1298 DPRINTF(EthernetDesc, "Hdr split: splitting at %d\n",
1299 split_point);
1300 igbe->dmaWrite(pciToDma(desc->adv_read.hdr),
1301 split_point, &pktHdrEvent,
1302 packet->data, igbe->rxWriteDelay);
1303 igbe->dmaWrite(pciToDma(desc->adv_read.pkt),
1304 max_to_copy, &pktDataEvent,
1305 packet->data + split_point, igbe->rxWriteDelay);
1306 desc->adv_wb.header_len = htole(split_point);
1307 desc->adv_wb.sph = 1;
1308 desc->adv_wb.pkt_len = htole((uint16_t)(max_to_copy));
1309 }
1310 } else {
1311 panic("Header split not fitting within header buffer or "
1312 "undecodable packet not fitting in header unsupported\n");
1313 }
1314 break;
1315 default:
1316 panic("Unimplemnted RX receive buffer type: %d\n",
1317 igbe->regs.srrctl.desctype());
1318 }
1319 return bytesCopied;
1320
1321 }
1322
1323 void
1324 IGbE::RxDescCache::pktComplete()
1325 {
1326 assert(unusedCache.size());
1327 RxDesc *desc;
1328 desc = unusedCache.front();
1329
1330 igbe->anBegin("RXS", "Update Desc");
1331
1332 uint16_t crcfixup = igbe->regs.rctl.secrc() ? 0 : 4 ;
1333 DPRINTF(EthernetDesc, "pktPtr->length: %d bytesCopied: %d "
1334 "stripcrc offset: %d value written: %d %d\n",
1335 pktPtr->length, bytesCopied, crcfixup,
1336 htole((uint16_t)(pktPtr->length + crcfixup)),
1337 (uint16_t)(pktPtr->length + crcfixup));
1338
1339 // no support for anything but starting at 0
1340 assert(igbe->regs.rxcsum.pcss() == 0);
1341
1342 DPRINTF(EthernetDesc, "Packet written to memory updating Descriptor\n");
1343
1344 uint16_t status = RXDS_DD;
1345 uint8_t err = 0;
1346 uint16_t ext_err = 0;
1347 uint16_t csum = 0;
1348 uint16_t ptype = 0;
1349 uint16_t ip_id = 0;
1350
1351 assert(bytesCopied <= pktPtr->length);
1352 if (bytesCopied == pktPtr->length)
1353 status |= RXDS_EOP;
1354
1355 IpPtr ip(pktPtr);
1356
1357 if (ip) {
1358 DPRINTF(EthernetDesc, "Proccesing Ip packet with Id=%d\n", ip->id());
1359 ptype |= RXDP_IPV4;
1360 ip_id = ip->id();
1361
1362 if (igbe->regs.rxcsum.ipofld()) {
1363 DPRINTF(EthernetDesc, "Checking IP checksum\n");
1364 status |= RXDS_IPCS;
1365 csum = htole(cksum(ip));
1366 igbe->rxIpChecksums++;
1367 if (cksum(ip) != 0) {
1368 err |= RXDE_IPE;
1369 ext_err |= RXDEE_IPE;
1370 DPRINTF(EthernetDesc, "Checksum is bad!!\n");
1371 }
1372 }
1373 TcpPtr tcp(ip);
1374 if (tcp && igbe->regs.rxcsum.tuofld()) {
1375 DPRINTF(EthernetDesc, "Checking TCP checksum\n");
1376 status |= RXDS_TCPCS;
1377 ptype |= RXDP_TCP;
1378 csum = htole(cksum(tcp));
1379 igbe->rxTcpChecksums++;
1380 if (cksum(tcp) != 0) {
1381 DPRINTF(EthernetDesc, "Checksum is bad!!\n");
1382 err |= RXDE_TCPE;
1383 ext_err |= RXDEE_TCPE;
1384 }
1385 }
1386
1387 UdpPtr udp(ip);
1388 if (udp && igbe->regs.rxcsum.tuofld()) {
1389 DPRINTF(EthernetDesc, "Checking UDP checksum\n");
1390 status |= RXDS_UDPCS;
1391 ptype |= RXDP_UDP;
1392 csum = htole(cksum(udp));
1393 igbe->rxUdpChecksums++;
1394 if (cksum(udp) != 0) {
1395 DPRINTF(EthernetDesc, "Checksum is bad!!\n");
1396 ext_err |= RXDEE_TCPE;
1397 err |= RXDE_TCPE;
1398 }
1399 }
1400 } else { // if ip
1401 DPRINTF(EthernetSM, "Proccesing Non-Ip packet\n");
1402 }
1403
1404 switch (igbe->regs.srrctl.desctype()) {
1405 case RXDT_LEGACY:
1406 desc->legacy.len = htole((uint16_t)(pktPtr->length + crcfixup));
1407 desc->legacy.status = htole(status);
1408 desc->legacy.errors = htole(err);
1409 // No vlan support at this point... just set it to 0
1410 desc->legacy.vlan = 0;
1411 break;
1412 case RXDT_ADV_SPLIT_A:
1413 case RXDT_ADV_ONEBUF:
1414 desc->adv_wb.rss_type = htole(0);
1415 desc->adv_wb.pkt_type = htole(ptype);
1416 if (igbe->regs.rxcsum.pcsd()) {
1417 // no rss support right now
1418 desc->adv_wb.rss_hash = htole(0);
1419 } else {
1420 desc->adv_wb.id = htole(ip_id);
1421 desc->adv_wb.csum = htole(csum);
1422 }
1423 desc->adv_wb.status = htole(status);
1424 desc->adv_wb.errors = htole(ext_err);
1425 // no vlan support
1426 desc->adv_wb.vlan_tag = htole(0);
1427 break;
1428 default:
1429 panic("Unimplemnted RX receive buffer type %d\n",
1430 igbe->regs.srrctl.desctype());
1431 }
1432
1433 DPRINTF(EthernetDesc, "Descriptor complete w0: %#x w1: %#x\n",
1434 desc->adv_read.pkt, desc->adv_read.hdr);
1435
1436 if (bytesCopied == pktPtr->length) {
1437 DPRINTF(EthernetDesc,
1438 "Packet completely written to descriptor buffers\n");
1439 // Deal with the rx timer interrupts
1440 if (igbe->regs.rdtr.delay()) {
1441 Tick delay = igbe->regs.rdtr.delay() * igbe->intClock();
1442 DPRINTF(EthernetSM, "RXS: Scheduling DTR for %d\n", delay);
1443 igbe->reschedule(igbe->rdtrEvent, curTick + delay);
1444 }
1445
1446 if (igbe->regs.radv.idv()) {
1447 Tick delay = igbe->regs.radv.idv() * igbe->intClock();
1448 DPRINTF(EthernetSM, "RXS: Scheduling ADV for %d\n", delay);
1449 if (!igbe->radvEvent.scheduled()) {
1450 igbe->schedule(igbe->radvEvent, curTick + delay);
1451 }
1452 }
1453
1454 // if neither radv or rdtr, maybe itr is set...
1455 if (!igbe->regs.rdtr.delay() && !igbe->regs.radv.idv()) {
1456 DPRINTF(EthernetSM,
1457 "RXS: Receive interrupt delay disabled, posting IT_RXT\n");
1458 igbe->postInterrupt(IT_RXT);
1459 }
1460
1461 // If the packet is small enough, interrupt appropriately
1462 // I wonder if this is delayed or not?!
1463 if (pktPtr->length <= igbe->regs.rsrpd.idv()) {
1464 DPRINTF(EthernetSM,
1465 "RXS: Posting IT_SRPD beacuse small packet received\n");
1466 igbe->postInterrupt(IT_SRPD);
1467 }
1468 bytesCopied = 0;
1469 }
1470
1471 pktPtr = NULL;
1472 igbe->checkDrain();
1473 enableSm();
1474 pktDone = true;
1475
1476 igbe->anBegin("RXS", "Done Updating Desc");
1477 DPRINTF(EthernetDesc, "Processing of this descriptor complete\n");
1478 igbe->anDq("RXS", annUnusedCacheQ);
1479 unusedCache.pop_front();
1480 igbe->anQ("RXS", annUsedCacheQ);
1481 usedCache.push_back(desc);
1482 }
1483
1484 void
1485 IGbE::RxDescCache::enableSm()
1486 {
1487 if (!igbe->drainEvent) {
1488 igbe->rxTick = true;
1489 igbe->restartClock();
1490 }
1491 }
1492
1493 bool
1494 IGbE::RxDescCache::packetDone()
1495 {
1496 if (pktDone) {
1497 pktDone = false;
1498 return true;
1499 }
1500 return false;
1501 }
1502
1503 bool
1504 IGbE::RxDescCache::hasOutstandingEvents()
1505 {
1506 return pktEvent.scheduled() || wbEvent.scheduled() ||
1507 fetchEvent.scheduled() || pktHdrEvent.scheduled() ||
1508 pktDataEvent.scheduled();
1509
1510 }
1511
1512 void
1513 IGbE::RxDescCache::serialize(std::ostream &os)
1514 {
1515 DescCache<RxDesc>::serialize(os);
1516 SERIALIZE_SCALAR(pktDone);
1517 SERIALIZE_SCALAR(splitCount);
1518 SERIALIZE_SCALAR(bytesCopied);
1519 }
1520
1521 void
1522 IGbE::RxDescCache::unserialize(Checkpoint *cp, const std::string &section)
1523 {
1524 DescCache<RxDesc>::unserialize(cp, section);
1525 UNSERIALIZE_SCALAR(pktDone);
1526 UNSERIALIZE_SCALAR(splitCount);
1527 UNSERIALIZE_SCALAR(bytesCopied);
1528 }
1529
1530
1531 ///////////////////////////// IGbE::TxDescCache //////////////////////////////
1532
1533 IGbE::TxDescCache::TxDescCache(IGbE *i, const std::string n, int s)
1534 : DescCache<TxDesc>(i,n, s), pktDone(false), isTcp(false),
1535 pktWaiting(false), completionAddress(0), completionEnabled(false),
1536 useTso(false), pktEvent(this), headerEvent(this), nullEvent(this)
1537 {
1538 annSmFetch = "TX Desc Fetch";
1539 annSmWb = "TX Desc Writeback";
1540 annUnusedDescQ = "TX Unused Descriptors";
1541 annUnusedCacheQ = "TX Unused Descriptor Cache";
1542 annUsedCacheQ = "TX Used Descriptor Cache";
1543 annUsedDescQ = "TX Used Descriptors";
1544 annDescQ = "TX Descriptors";
1545 }
1546
1547 void
1548 IGbE::TxDescCache::processContextDesc()
1549 {
1550 assert(unusedCache.size());
1551 TxDesc *desc;
1552
1553 DPRINTF(EthernetDesc, "Checking and processing context descriptors\n");
1554
1555 while (!useTso && unusedCache.size() &&
1556 TxdOp::isContext(unusedCache.front())) {
1557 DPRINTF(EthernetDesc, "Got context descriptor type...\n");
1558
1559 desc = unusedCache.front();
1560 DPRINTF(EthernetDesc, "Descriptor upper: %#x lower: %#X\n",
1561 desc->d1, desc->d2);
1562
1563
1564 // is this going to be a tcp or udp packet?
1565 isTcp = TxdOp::tcp(desc) ? true : false;
1566
1567 // setup all the TSO variables, they'll be ignored if we don't use
1568 // tso for this connection
1569 tsoHeaderLen = TxdOp::hdrlen(desc);
1570 tsoMss = TxdOp::mss(desc);
1571
1572 if (TxdOp::isType(desc, TxdOp::TXD_CNXT) && TxdOp::tse(desc)) {
1573 DPRINTF(EthernetDesc, "TCP offload enabled for packet hdrlen: "
1574 "%d mss: %d paylen %d\n", TxdOp::hdrlen(desc),
1575 TxdOp::mss(desc), TxdOp::getLen(desc));
1576 useTso = true;
1577 tsoTotalLen = TxdOp::getLen(desc);
1578 tsoLoadedHeader = false;
1579 tsoDescBytesUsed = 0;
1580 tsoUsedLen = 0;
1581 tsoPrevSeq = 0;
1582 tsoPktHasHeader = false;
1583 tsoPkts = 0;
1584
1585 }
1586
1587 TxdOp::setDd(desc);
1588 unusedCache.pop_front();
1589 igbe->anDq("TXS", annUnusedCacheQ);
1590 usedCache.push_back(desc);
1591 igbe->anQ("TXS", annUsedCacheQ);
1592 }
1593
1594 if (!unusedCache.size())
1595 return;
1596
1597 desc = unusedCache.front();
1598 if (!useTso && TxdOp::isType(desc, TxdOp::TXD_ADVDATA) &&
1599 TxdOp::tse(desc)) {
1600 DPRINTF(EthernetDesc, "TCP offload(adv) enabled for packet "
1601 "hdrlen: %d mss: %d paylen %d\n",
1602 tsoHeaderLen, tsoMss, TxdOp::getTsoLen(desc));
1603 useTso = true;
1604 tsoTotalLen = TxdOp::getTsoLen(desc);
1605 tsoLoadedHeader = false;
1606 tsoDescBytesUsed = 0;
1607 tsoUsedLen = 0;
1608 tsoPrevSeq = 0;
1609 tsoPktHasHeader = false;
1610 tsoPkts = 0;
1611 }
1612
1613 if (useTso && !tsoLoadedHeader) {
1614 // we need to fetch a header
1615 DPRINTF(EthernetDesc, "Starting DMA of TSO header\n");
1616 assert(TxdOp::isData(desc) && TxdOp::getLen(desc) >= tsoHeaderLen);
1617 pktWaiting = true;
1618 assert(tsoHeaderLen <= 256);
1619 igbe->dmaRead(pciToDma(TxdOp::getBuf(desc)),
1620 tsoHeaderLen, &headerEvent, tsoHeader, 0);
1621 }
1622 }
1623
1624 void
1625 IGbE::TxDescCache::headerComplete()
1626 {
1627 DPRINTF(EthernetDesc, "TSO: Fetching TSO header complete\n");
1628 pktWaiting = false;
1629
1630 assert(unusedCache.size());
1631 TxDesc *desc = unusedCache.front();
1632 DPRINTF(EthernetDesc, "TSO: len: %d tsoHeaderLen: %d\n",
1633 TxdOp::getLen(desc), tsoHeaderLen);
1634
1635 if (TxdOp::getLen(desc) == tsoHeaderLen) {
1636 tsoDescBytesUsed = 0;
1637 tsoLoadedHeader = true;
1638 unusedCache.pop_front();
1639 usedCache.push_back(desc);
1640 } else {
1641 // I don't think this case happens, I think the headrer is always
1642 // it's own packet, if it wasn't it might be as simple as just
1643 // incrementing descBytesUsed by the header length, but I'm not
1644 // completely sure
1645 panic("TSO header part of bigger packet, not implemented\n");
1646 }
1647 enableSm();
1648 igbe->checkDrain();
1649 }
1650
1651 unsigned
1652 IGbE::TxDescCache::getPacketSize(EthPacketPtr p)
1653 {
1654 if (!unusedCache.size())
1655 return 0;
1656
1657 DPRINTF(EthernetDesc, "Starting processing of descriptor\n");
1658
1659 assert(!useTso || tsoLoadedHeader);
1660 TxDesc *desc = unusedCache.front();
1661
1662 if (useTso) {
1663 DPRINTF(EthernetDesc, "getPacket(): TxDescriptor data "
1664 "d1: %#llx d2: %#llx\n", desc->d1, desc->d2);
1665 DPRINTF(EthernetDesc, "TSO: use: %d hdrlen: %d mss: %d total: %d "
1666 "used: %d loaded hdr: %d\n", useTso, tsoHeaderLen, tsoMss,
1667 tsoTotalLen, tsoUsedLen, tsoLoadedHeader);
1668 DPRINTF(EthernetDesc, "TSO: descBytesUsed: %d copyBytes: %d "
1669 "this descLen: %d\n",
1670 tsoDescBytesUsed, tsoCopyBytes, TxdOp::getLen(desc));
1671 DPRINTF(EthernetDesc, "TSO: pktHasHeader: %d\n", tsoPktHasHeader);
1672
1673 if (tsoPktHasHeader)
1674 tsoCopyBytes = std::min((tsoMss + tsoHeaderLen) - p->length,
1675 TxdOp::getLen(desc) - tsoDescBytesUsed);
1676 else
1677 tsoCopyBytes = std::min(tsoMss,
1678 TxdOp::getLen(desc) - tsoDescBytesUsed);
1679 unsigned pkt_size =
1680 tsoCopyBytes + (tsoPktHasHeader ? 0 : tsoHeaderLen);
1681 DPRINTF(EthernetDesc, "TSO: Next packet is %d bytes\n", pkt_size);
1682 return pkt_size;
1683 }
1684
1685 DPRINTF(EthernetDesc, "Next TX packet is %d bytes\n",
1686 TxdOp::getLen(unusedCache.front()));
1687 return TxdOp::getLen(desc);
1688 }
1689
1690 void
1691 IGbE::TxDescCache::getPacketData(EthPacketPtr p)
1692 {
1693 assert(unusedCache.size());
1694
1695 TxDesc *desc;
1696 desc = unusedCache.front();
1697
1698 DPRINTF(EthernetDesc, "getPacketData(): TxDescriptor data "
1699 "d1: %#llx d2: %#llx\n", desc->d1, desc->d2);
1700 assert((TxdOp::isLegacy(desc) || TxdOp::isData(desc)) &&
1701 TxdOp::getLen(desc));
1702
1703 pktPtr = p;
1704
1705 pktWaiting = true;
1706
1707 DPRINTF(EthernetDesc, "Starting DMA of packet at offset %d\n", p->length);
1708
1709 if (useTso) {
1710 assert(tsoLoadedHeader);
1711 if (!tsoPktHasHeader) {
1712 DPRINTF(EthernetDesc,
1713 "Loading TSO header (%d bytes) into start of packet\n",
1714 tsoHeaderLen);
1715 memcpy(p->data, &tsoHeader,tsoHeaderLen);
1716 p->length +=tsoHeaderLen;
1717 tsoPktHasHeader = true;
1718 }
1719 }
1720
1721 if (useTso) {
1722 tsoDescBytesUsed += tsoCopyBytes;
1723 assert(tsoDescBytesUsed <= TxdOp::getLen(desc));
1724 DPRINTF(EthernetDesc,
1725 "Starting DMA of packet at offset %d length: %d\n",
1726 p->length, tsoCopyBytes);
1727 igbe->dmaRead(pciToDma(TxdOp::getBuf(desc))
1728 + tsoDescBytesUsed,
1729 tsoCopyBytes, &pktEvent, p->data + p->length,
1730 igbe->txReadDelay);
1731 } else {
1732 igbe->dmaRead(pciToDma(TxdOp::getBuf(desc)),
1733 TxdOp::getLen(desc), &pktEvent, p->data + p->length,
1734 igbe->txReadDelay);
1735 }
1736 }
1737
1738 void
1739 IGbE::TxDescCache::pktComplete()
1740 {
1741
1742 TxDesc *desc;
1743 assert(unusedCache.size());
1744 assert(pktPtr);
1745
1746 igbe->anBegin("TXS", "Update Desc");
1747
1748 DPRINTF(EthernetDesc, "DMA of packet complete\n");
1749
1750
1751 desc = unusedCache.front();
1752 assert((TxdOp::isLegacy(desc) || TxdOp::isData(desc)) &&
1753 TxdOp::getLen(desc));
1754
1755 DPRINTF(EthernetDesc, "TxDescriptor data d1: %#llx d2: %#llx\n",
1756 desc->d1, desc->d2);
1757 DPRINTF(EthernetDesc, "TSO: use: %d hdrlen: %d mss: %d total: %d "
1758 "used: %d loaded hdr: %d\n", useTso, tsoHeaderLen, tsoMss,
1759 tsoTotalLen, tsoUsedLen, tsoLoadedHeader);
1760
1761 // Set the length of the data in the EtherPacket
1762 if (useTso) {
1763 pktPtr->length += tsoCopyBytes;
1764 tsoUsedLen += tsoCopyBytes;
1765 } else
1766 pktPtr->length += TxdOp::getLen(desc);
1767
1768 DPRINTF(EthernetDesc, "TSO: descBytesUsed: %d copyBytes: %d\n",
1769 tsoDescBytesUsed, tsoCopyBytes);
1770
1771
1772 if ((!TxdOp::eop(desc) && !useTso) ||
1773 (pktPtr->length < ( tsoMss + tsoHeaderLen) &&
1774 tsoTotalLen != tsoUsedLen && useTso)) {
1775 assert(!useTso || (tsoDescBytesUsed == TxdOp::getLen(desc)));
1776 igbe->anDq("TXS", annUnusedCacheQ);
1777 unusedCache.pop_front();
1778 igbe->anQ("TXS", annUsedCacheQ);
1779 usedCache.push_back(desc);
1780
1781 tsoDescBytesUsed = 0;
1782 pktDone = true;
1783 pktWaiting = false;
1784 pktMultiDesc = true;
1785
1786 DPRINTF(EthernetDesc, "Partial Packet Descriptor of %d bytes Done\n",
1787 pktPtr->length);
1788 pktPtr = NULL;
1789
1790 enableSm();
1791 igbe->checkDrain();
1792 return;
1793 }
1794
1795
1796 pktMultiDesc = false;
1797 // no support for vlans
1798 assert(!TxdOp::vle(desc));
1799
1800 // we only support single packet descriptors at this point
1801 if (!useTso)
1802 assert(TxdOp::eop(desc));
1803
1804 // set that this packet is done
1805 if (TxdOp::rs(desc))
1806 TxdOp::setDd(desc);
1807
1808 DPRINTF(EthernetDesc, "TxDescriptor data d1: %#llx d2: %#llx\n",
1809 desc->d1, desc->d2);
1810
1811 if (useTso) {
1812 IpPtr ip(pktPtr);
1813 if (ip) {
1814 DPRINTF(EthernetDesc, "TSO: Modifying IP header. Id + %d\n",
1815 tsoPkts);
1816 ip->id(ip->id() + tsoPkts++);
1817 ip->len(pktPtr->length - EthPtr(pktPtr)->size());
1818
1819 TcpPtr tcp(ip);
1820 if (tcp) {
1821 DPRINTF(EthernetDesc,
1822 "TSO: Modifying TCP header. old seq %d + %d\n",
1823 tcp->seq(), tsoPrevSeq);
1824 tcp->seq(tcp->seq() + tsoPrevSeq);
1825 if (tsoUsedLen != tsoTotalLen)
1826 tcp->flags(tcp->flags() & ~9); // clear fin & psh
1827 }
1828 UdpPtr udp(ip);
1829 if (udp) {
1830 DPRINTF(EthernetDesc, "TSO: Modifying UDP header.\n");
1831 udp->len(pktPtr->length - EthPtr(pktPtr)->size());
1832 }
1833 }
1834 tsoPrevSeq = tsoUsedLen;
1835 }
1836
1837 if (DTRACE(EthernetDesc)) {
1838 IpPtr ip(pktPtr);
1839 if (ip)
1840 DPRINTF(EthernetDesc, "Proccesing Ip packet with Id=%d\n",
1841 ip->id());
1842 else
1843 DPRINTF(EthernetSM, "Proccesing Non-Ip packet\n");
1844 }
1845
1846 // Checksums are only ofloaded for new descriptor types
1847 if (TxdOp::isData(desc) && ( TxdOp::ixsm(desc) || TxdOp::txsm(desc)) ) {
1848 DPRINTF(EthernetDesc, "Calculating checksums for packet\n");
1849 IpPtr ip(pktPtr);
1850 assert(ip);
1851 if (TxdOp::ixsm(desc)) {
1852 ip->sum(0);
1853 ip->sum(cksum(ip));
1854 igbe->txIpChecksums++;
1855 DPRINTF(EthernetDesc, "Calculated IP checksum\n");
1856 }
1857 if (TxdOp::txsm(desc)) {
1858 TcpPtr tcp(ip);
1859 UdpPtr udp(ip);
1860 if (tcp) {
1861 tcp->sum(0);
1862 tcp->sum(cksum(tcp));
1863 igbe->txTcpChecksums++;
1864 DPRINTF(EthernetDesc, "Calculated TCP checksum\n");
1865 } else if (udp) {
1866 assert(udp);
1867 udp->sum(0);
1868 udp->sum(cksum(udp));
1869 igbe->txUdpChecksums++;
1870 DPRINTF(EthernetDesc, "Calculated UDP checksum\n");
1871 } else {
1872 panic("Told to checksum, but don't know how\n");
1873 }
1874 }
1875 }
1876
1877 if (TxdOp::ide(desc)) {
1878 // Deal with the rx timer interrupts
1879 DPRINTF(EthernetDesc, "Descriptor had IDE set\n");
1880 if (igbe->regs.tidv.idv()) {
1881 Tick delay = igbe->regs.tidv.idv() * igbe->intClock();
1882 DPRINTF(EthernetDesc, "setting tidv\n");
1883 igbe->reschedule(igbe->tidvEvent, curTick + delay, true);
1884 }
1885
1886 if (igbe->regs.tadv.idv() && igbe->regs.tidv.idv()) {
1887 Tick delay = igbe->regs.tadv.idv() * igbe->intClock();
1888 DPRINTF(EthernetDesc, "setting tadv\n");
1889 if (!igbe->tadvEvent.scheduled()) {
1890 igbe->schedule(igbe->tadvEvent, curTick + delay);
1891 }
1892 }
1893 }
1894
1895
1896 if (!useTso || TxdOp::getLen(desc) == tsoDescBytesUsed) {
1897 DPRINTF(EthernetDesc, "Descriptor Done\n");
1898 igbe->anDq("TXS", annUnusedCacheQ);
1899 unusedCache.pop_front();
1900 igbe->anQ("TXS", annUsedCacheQ);
1901 usedCache.push_back(desc);
1902 tsoDescBytesUsed = 0;
1903 }
1904
1905 if (useTso && tsoUsedLen == tsoTotalLen)
1906 useTso = false;
1907
1908
1909 DPRINTF(EthernetDesc,
1910 "------Packet of %d bytes ready for transmission-------\n",
1911 pktPtr->length);
1912 pktDone = true;
1913 pktWaiting = false;
1914 pktPtr = NULL;
1915 tsoPktHasHeader = false;
1916
1917 if (igbe->regs.txdctl.wthresh() == 0) {
1918 igbe->anBegin("TXS", "Desc Writeback");
1919 DPRINTF(EthernetDesc, "WTHRESH == 0, writing back descriptor\n");
1920 writeback(0);
1921 } else if (igbe->regs.txdctl.gran() && igbe->regs.txdctl.wthresh() >=
1922 descInBlock(usedCache.size())) {
1923 DPRINTF(EthernetDesc, "used > WTHRESH, writing back descriptor\n");
1924 igbe->anBegin("TXS", "Desc Writeback");
1925 writeback((igbe->cacheBlockSize()-1)>>4);
1926 } else if (igbe->regs.txdctl.wthresh() >= usedCache.size()) {
1927 DPRINTF(EthernetDesc, "used > WTHRESH, writing back descriptor\n");
1928 igbe->anBegin("TXS", "Desc Writeback");
1929 writeback((igbe->cacheBlockSize()-1)>>4);
1930 }
1931
1932 enableSm();
1933 igbe->checkDrain();
1934 }
1935
1936 void
1937 IGbE::TxDescCache::actionAfterWb()
1938 {
1939 DPRINTF(EthernetDesc, "actionAfterWb() completionEnabled: %d\n",
1940 completionEnabled);
1941 igbe->postInterrupt(iGbReg::IT_TXDW);
1942 if (completionEnabled) {
1943 descEnd = igbe->regs.tdh();
1944 DPRINTF(EthernetDesc,
1945 "Completion writing back value: %d to addr: %#x\n", descEnd,
1946 completionAddress);
1947 igbe->dmaWrite(pciToDma(mbits(completionAddress, 63, 2)),
1948 sizeof(descEnd), &nullEvent, (uint8_t*)&descEnd, 0);
1949 }
1950 }
1951
1952 void
1953 IGbE::TxDescCache::serialize(std::ostream &os)
1954 {
1955 DescCache<TxDesc>::serialize(os);
1956 SERIALIZE_SCALAR(pktDone);
1957 SERIALIZE_SCALAR(isTcp);
1958 SERIALIZE_SCALAR(pktWaiting);
1959 SERIALIZE_SCALAR(pktMultiDesc);
1960
1961 SERIALIZE_SCALAR(useTso);
1962 SERIALIZE_SCALAR(tsoHeaderLen);
1963 SERIALIZE_SCALAR(tsoMss);
1964 SERIALIZE_SCALAR(tsoTotalLen);
1965 SERIALIZE_SCALAR(tsoUsedLen);
1966 SERIALIZE_SCALAR(tsoPrevSeq);;
1967 SERIALIZE_SCALAR(tsoPktPayloadBytes);
1968 SERIALIZE_SCALAR(tsoLoadedHeader);
1969 SERIALIZE_SCALAR(tsoPktHasHeader);
1970 SERIALIZE_ARRAY(tsoHeader, 256);
1971 SERIALIZE_SCALAR(tsoDescBytesUsed);
1972 SERIALIZE_SCALAR(tsoCopyBytes);
1973 SERIALIZE_SCALAR(tsoPkts);
1974
1975 SERIALIZE_SCALAR(completionAddress);
1976 SERIALIZE_SCALAR(completionEnabled);
1977 SERIALIZE_SCALAR(descEnd);
1978 }
1979
1980 void
1981 IGbE::TxDescCache::unserialize(Checkpoint *cp, const std::string &section)
1982 {
1983 DescCache<TxDesc>::unserialize(cp, section);
1984 UNSERIALIZE_SCALAR(pktDone);
1985 UNSERIALIZE_SCALAR(isTcp);
1986 UNSERIALIZE_SCALAR(pktWaiting);
1987 UNSERIALIZE_SCALAR(pktMultiDesc);
1988
1989 UNSERIALIZE_SCALAR(useTso);
1990 UNSERIALIZE_SCALAR(tsoHeaderLen);
1991 UNSERIALIZE_SCALAR(tsoMss);
1992 UNSERIALIZE_SCALAR(tsoTotalLen);
1993 UNSERIALIZE_SCALAR(tsoUsedLen);
1994 UNSERIALIZE_SCALAR(tsoPrevSeq);;
1995 UNSERIALIZE_SCALAR(tsoPktPayloadBytes);
1996 UNSERIALIZE_SCALAR(tsoLoadedHeader);
1997 UNSERIALIZE_SCALAR(tsoPktHasHeader);
1998 UNSERIALIZE_ARRAY(tsoHeader, 256);
1999 UNSERIALIZE_SCALAR(tsoDescBytesUsed);
2000 UNSERIALIZE_SCALAR(tsoCopyBytes);
2001 UNSERIALIZE_SCALAR(tsoPkts);
2002
2003 UNSERIALIZE_SCALAR(completionAddress);
2004 UNSERIALIZE_SCALAR(completionEnabled);
2005 UNSERIALIZE_SCALAR(descEnd);
2006 }
2007
2008 bool
2009 IGbE::TxDescCache::packetAvailable()
2010 {
2011 if (pktDone) {
2012 pktDone = false;
2013 return true;
2014 }
2015 return false;
2016 }
2017
2018 void
2019 IGbE::TxDescCache::enableSm()
2020 {
2021 if (!igbe->drainEvent) {
2022 igbe->txTick = true;
2023 igbe->restartClock();
2024 }
2025 }
2026
2027 bool
2028 IGbE::TxDescCache::hasOutstandingEvents()
2029 {
2030 return pktEvent.scheduled() || wbEvent.scheduled() ||
2031 fetchEvent.scheduled();
2032 }
2033
2034
2035 ///////////////////////////////////// IGbE /////////////////////////////////
2036
2037 void
2038 IGbE::restartClock()
2039 {
2040 if (!tickEvent.scheduled() && (rxTick || txTick || txFifoTick) &&
2041 getState() == SimObject::Running)
2042 schedule(tickEvent, (curTick / ticks(1)) * ticks(1) + ticks(1));
2043 }
2044
2045 unsigned int
2046 IGbE::drain(Event *de)
2047 {
2048 unsigned int count;
2049 count = pioPort->drain(de) + dmaPort->drain(de);
2050 if (rxDescCache.hasOutstandingEvents() ||
2051 txDescCache.hasOutstandingEvents()) {
2052 count++;
2053 drainEvent = de;
2054 }
2055
2056 txFifoTick = false;
2057 txTick = false;
2058 rxTick = false;
2059
2060 if (tickEvent.scheduled())
2061 deschedule(tickEvent);
2062
2063 if (count)
2064 changeState(Draining);
2065 else
2066 changeState(Drained);
2067
2068 DPRINTF(EthernetSM, "got drain() returning %d", count);
2069 return count;
2070 }
2071
2072 void
2073 IGbE::resume()
2074 {
2075 SimObject::resume();
2076
2077 txFifoTick = true;
2078 txTick = true;
2079 rxTick = true;
2080
2081 restartClock();
2082 DPRINTF(EthernetSM, "resuming from drain");
2083 }
2084
2085 void
2086 IGbE::checkDrain()
2087 {
2088 if (!drainEvent)
2089 return;
2090
2091 DPRINTF(EthernetSM, "checkDrain() in drain\n");
2092 txFifoTick = false;
2093 txTick = false;
2094 rxTick = false;
2095 if (!rxDescCache.hasOutstandingEvents() &&
2096 !txDescCache.hasOutstandingEvents()) {
2097 drainEvent->process();
2098 drainEvent = NULL;
2099 }
2100 }
2101
2102 void
2103 IGbE::txStateMachine()
2104 {
2105 if (!regs.tctl.en()) {
2106 txTick = false;
2107 DPRINTF(EthernetSM, "TXS: TX disabled, stopping ticking\n");
2108 return;
2109 }
2110
2111 // If we have a packet available and it's length is not 0 (meaning it's not
2112 // a multidescriptor packet) put it in the fifo, otherwise an the next
2113 // iteration we'll get the rest of the data
2114 if (txPacket && txDescCache.packetAvailable()
2115 && !txDescCache.packetMultiDesc() && txPacket->length) {
2116 bool success;
2117
2118 anQ("TXS", "TX FIFO Q");
2119 DPRINTF(EthernetSM, "TXS: packet placed in TX FIFO\n");
2120 success = txFifo.push(txPacket);
2121 txFifoTick = true && !drainEvent;
2122 assert(success);
2123 txPacket = NULL;
2124 anBegin("TXS", "Desc Writeback");
2125 txDescCache.writeback((cacheBlockSize()-1)>>4);
2126 return;
2127 }
2128
2129 // Only support descriptor granularity
2130 if (regs.txdctl.lwthresh() &&
2131 txDescCache.descLeft() < (regs.txdctl.lwthresh() * 8)) {
2132 DPRINTF(EthernetSM, "TXS: LWTHRESH caused posting of TXDLOW\n");
2133 postInterrupt(IT_TXDLOW);
2134 }
2135
2136 if (!txPacket) {
2137 txPacket = new EthPacketData(16384);
2138 }
2139
2140 if (!txDescCache.packetWaiting()) {
2141 if (txDescCache.descLeft() == 0) {
2142 postInterrupt(IT_TXQE);
2143 anBegin("TXS", "Desc Writeback");
2144 txDescCache.writeback(0);
2145 anBegin("TXS", "Desc Fetch");
2146 anWe("TXS", txDescCache.annUnusedCacheQ);
2147 txDescCache.fetchDescriptors();
2148 DPRINTF(EthernetSM, "TXS: No descriptors left in ring, forcing "
2149 "writeback stopping ticking and posting TXQE\n");
2150 txTick = false;
2151 return;
2152 }
2153
2154
2155 if (!(txDescCache.descUnused())) {
2156 anBegin("TXS", "Desc Fetch");
2157 txDescCache.fetchDescriptors();
2158 anWe("TXS", txDescCache.annUnusedCacheQ);
2159 DPRINTF(EthernetSM, "TXS: No descriptors available in cache, "
2160 "fetching and stopping ticking\n");
2161 txTick = false;
2162 return;
2163 }
2164 anPq("TXS", txDescCache.annUnusedCacheQ);
2165
2166
2167 txDescCache.processContextDesc();
2168 if (txDescCache.packetWaiting()) {
2169 DPRINTF(EthernetSM,
2170 "TXS: Fetching TSO header, stopping ticking\n");
2171 txTick = false;
2172 return;
2173 }
2174
2175 unsigned size = txDescCache.getPacketSize(txPacket);
2176 if (size > 0 && txFifo.avail() > size) {
2177 anRq("TXS", "TX FIFO Q");
2178 anBegin("TXS", "DMA Packet");
2179 DPRINTF(EthernetSM, "TXS: Reserving %d bytes in FIFO and "
2180 "beginning DMA of next packet\n", size);
2181 txFifo.reserve(size);
2182 txDescCache.getPacketData(txPacket);
2183 } else if (size == 0) {
2184 DPRINTF(EthernetSM, "TXS: getPacketSize returned: %d\n", size);
2185 DPRINTF(EthernetSM,
2186 "TXS: No packets to get, writing back used descriptors\n");
2187 anBegin("TXS", "Desc Writeback");
2188 txDescCache.writeback(0);
2189 } else {
2190 anWf("TXS", "TX FIFO Q");
2191 DPRINTF(EthernetSM, "TXS: FIFO full, stopping ticking until space "
2192 "available in FIFO\n");
2193 txTick = false;
2194 }
2195
2196
2197 return;
2198 }
2199 DPRINTF(EthernetSM, "TXS: Nothing to do, stopping ticking\n");
2200 txTick = false;
2201 }
2202
2203 bool
2204 IGbE::ethRxPkt(EthPacketPtr pkt)
2205 {
2206 rxBytes += pkt->length;
2207 rxPackets++;
2208
2209 DPRINTF(Ethernet, "RxFIFO: Receiving pcakte from wire\n");
2210 anBegin("RXQ", "Wire Recv");
2211
2212
2213 if (!regs.rctl.en()) {
2214 DPRINTF(Ethernet, "RxFIFO: RX not enabled, dropping\n");
2215 anBegin("RXQ", "FIFO Drop", CPA::FL_BAD);
2216 return true;
2217 }
2218
2219 // restart the state machines if they are stopped
2220 rxTick = true && !drainEvent;
2221 if ((rxTick || txTick) && !tickEvent.scheduled()) {
2222 DPRINTF(EthernetSM,
2223 "RXS: received packet into fifo, starting ticking\n");
2224 restartClock();
2225 }
2226
2227 if (!rxFifo.push(pkt)) {
2228 DPRINTF(Ethernet, "RxFIFO: Packet won't fit in fifo... dropped\n");
2229 postInterrupt(IT_RXO, true);
2230 anBegin("RXQ", "FIFO Drop", CPA::FL_BAD);
2231 return false;
2232 }
2233
2234 if (CPA::available() && cpa->enabled()) {
2235 assert(sys->numSystemsRunning <= 2);
2236 System *other_sys;
2237 if (sys->systemList[0] == sys)
2238 other_sys = sys->systemList[1];
2239 else
2240 other_sys = sys->systemList[0];
2241
2242 cpa->hwDq(CPA::FL_NONE, sys, macAddr, "RXQ", "WireQ", 0, other_sys);
2243 anQ("RXQ", "RX FIFO Q");
2244 cpa->hwWe(CPA::FL_NONE, sys, macAddr, "RXQ", "WireQ", 0, other_sys);
2245 }
2246
2247 return true;
2248 }
2249
2250
2251 void
2252 IGbE::rxStateMachine()
2253 {
2254 if (!regs.rctl.en()) {
2255 rxTick = false;
2256 DPRINTF(EthernetSM, "RXS: RX disabled, stopping ticking\n");
2257 return;
2258 }
2259
2260 // If the packet is done check for interrupts/descriptors/etc
2261 if (rxDescCache.packetDone()) {
2262 rxDmaPacket = false;
2263 DPRINTF(EthernetSM, "RXS: Packet completed DMA to memory\n");
2264 int descLeft = rxDescCache.descLeft();
2265 DPRINTF(EthernetSM, "RXS: descLeft: %d rdmts: %d rdlen: %d\n",
2266 descLeft, regs.rctl.rdmts(), regs.rdlen());
2267 switch (regs.rctl.rdmts()) {
2268 case 2: if (descLeft > .125 * regs.rdlen()) break;
2269 case 1: if (descLeft > .250 * regs.rdlen()) break;
2270 case 0: if (descLeft > .500 * regs.rdlen()) break;
2271 DPRINTF(Ethernet, "RXS: Interrupting (RXDMT) "
2272 "because of descriptors left\n");
2273 postInterrupt(IT_RXDMT);
2274 break;
2275 }
2276
2277 if (rxFifo.empty())
2278 rxDescCache.writeback(0);
2279
2280 if (descLeft == 0) {
2281 anBegin("RXS", "Writeback Descriptors");
2282 rxDescCache.writeback(0);
2283 DPRINTF(EthernetSM, "RXS: No descriptors left in ring, forcing"
2284 " writeback and stopping ticking\n");
2285 rxTick = false;
2286 }
2287
2288 // only support descriptor granulaties
2289 assert(regs.rxdctl.gran());
2290
2291 if (regs.rxdctl.wthresh() >= rxDescCache.descUsed()) {
2292 DPRINTF(EthernetSM,
2293 "RXS: Writing back because WTHRESH >= descUsed\n");
2294 anBegin("RXS", "Writeback Descriptors");
2295 if (regs.rxdctl.wthresh() < (cacheBlockSize()>>4))
2296 rxDescCache.writeback(regs.rxdctl.wthresh()-1);
2297 else
2298 rxDescCache.writeback((cacheBlockSize()-1)>>4);
2299 }
2300
2301 if ((rxDescCache.descUnused() < regs.rxdctl.pthresh()) &&
2302 ((rxDescCache.descLeft() - rxDescCache.descUnused()) >
2303 regs.rxdctl.hthresh())) {
2304 DPRINTF(EthernetSM, "RXS: Fetching descriptors because "
2305 "descUnused < PTHRESH\n");
2306 anBegin("RXS", "Fetch Descriptors");
2307 rxDescCache.fetchDescriptors();
2308 }
2309
2310 if (rxDescCache.descUnused() == 0) {
2311 anBegin("RXS", "Fetch Descriptors");
2312 rxDescCache.fetchDescriptors();
2313 anWe("RXS", rxDescCache.annUnusedCacheQ);
2314 DPRINTF(EthernetSM, "RXS: No descriptors available in cache, "
2315 "fetching descriptors and stopping ticking\n");
2316 rxTick = false;
2317 }
2318 return;
2319 }
2320
2321 if (rxDmaPacket) {
2322 DPRINTF(EthernetSM,
2323 "RXS: stopping ticking until packet DMA completes\n");
2324 rxTick = false;
2325 return;
2326 }
2327
2328 if (!rxDescCache.descUnused()) {
2329 anBegin("RXS", "Fetch Descriptors");
2330 rxDescCache.fetchDescriptors();
2331 anWe("RXS", rxDescCache.annUnusedCacheQ);
2332 DPRINTF(EthernetSM, "RXS: No descriptors available in cache, "
2333 "stopping ticking\n");
2334 rxTick = false;
2335 DPRINTF(EthernetSM, "RXS: No descriptors available, fetching\n");
2336 return;
2337 }
2338 anPq("RXS", rxDescCache.annUnusedCacheQ);
2339
2340 if (rxFifo.empty()) {
2341 anWe("RXS", "RX FIFO Q");
2342 DPRINTF(EthernetSM, "RXS: RxFIFO empty, stopping ticking\n");
2343 rxTick = false;
2344 return;
2345 }
2346 anPq("RXS", "RX FIFO Q");
2347 anBegin("RXS", "Get Desc");
2348
2349 EthPacketPtr pkt;
2350 pkt = rxFifo.front();
2351
2352
2353 pktOffset = rxDescCache.writePacket(pkt, pktOffset);
2354 DPRINTF(EthernetSM, "RXS: Writing packet into memory\n");
2355 if (pktOffset == pkt->length) {
2356 anBegin( "RXS", "FIFO Dequeue");
2357 DPRINTF(EthernetSM, "RXS: Removing packet from FIFO\n");
2358 pktOffset = 0;
2359 anDq("RXS", "RX FIFO Q");
2360 rxFifo.pop();
2361 }
2362
2363 DPRINTF(EthernetSM, "RXS: stopping ticking until packet DMA completes\n");
2364 rxTick = false;
2365 rxDmaPacket = true;
2366 anBegin("RXS", "DMA Packet");
2367 }
2368
2369 void
2370 IGbE::txWire()
2371 {
2372 if (txFifo.empty()) {
2373 anWe("TXQ", "TX FIFO Q");
2374 txFifoTick = false;
2375 return;
2376 }
2377
2378
2379 anPq("TXQ", "TX FIFO Q");
2380 if (etherInt->sendPacket(txFifo.front())) {
2381 cpa->hwQ(CPA::FL_NONE, sys, macAddr, "TXQ", "WireQ", 0);
2382 if (DTRACE(EthernetSM)) {
2383 IpPtr ip(txFifo.front());
2384 if (ip)
2385 DPRINTF(EthernetSM, "Transmitting Ip packet with Id=%d\n",
2386 ip->id());
2387 else
2388 DPRINTF(EthernetSM, "Transmitting Non-Ip packet\n");
2389 }
2390 anDq("TXQ", "TX FIFO Q");
2391 anBegin("TXQ", "Wire Send");
2392 DPRINTF(EthernetSM,
2393 "TxFIFO: Successful transmit, bytes available in fifo: %d\n",
2394 txFifo.avail());
2395
2396 txBytes += txFifo.front()->length;
2397 txPackets++;
2398 txFifoTick = false;
2399
2400 txFifo.pop();
2401 } else {
2402 // We'll get woken up when the packet ethTxDone() gets called
2403 txFifoTick = false;
2404 }
2405 }
2406
2407 void
2408 IGbE::tick()
2409 {
2410 DPRINTF(EthernetSM, "IGbE: -------------- Cycle --------------\n");
2411
2412 if (rxTick)
2413 rxStateMachine();
2414
2415 if (txTick)
2416 txStateMachine();
2417
2418 if (txFifoTick)
2419 txWire();
2420
2421
2422 if (rxTick || txTick || txFifoTick)
2423 schedule(tickEvent, curTick + ticks(1));
2424 }
2425
2426 void
2427 IGbE::ethTxDone()
2428 {
2429 anBegin("TXQ", "Send Done");
2430 // restart the tx state machines if they are stopped
2431 // fifo to send another packet
2432 // tx sm to put more data into the fifo
2433 txFifoTick = true && !drainEvent;
2434 if (txDescCache.descLeft() != 0 && !drainEvent)
2435 txTick = true;
2436
2437 restartClock();
2438 txWire();
2439 DPRINTF(EthernetSM, "TxFIFO: Transmission complete\n");
2440 }
2441
2442 void
2443 IGbE::serialize(std::ostream &os)
2444 {
2445 PciDev::serialize(os);
2446
2447 regs.serialize(os);
2448 SERIALIZE_SCALAR(eeOpBits);
2449 SERIALIZE_SCALAR(eeAddrBits);
2450 SERIALIZE_SCALAR(eeDataBits);
2451 SERIALIZE_SCALAR(eeOpcode);
2452 SERIALIZE_SCALAR(eeAddr);
2453 SERIALIZE_SCALAR(lastInterrupt);
2454 SERIALIZE_ARRAY(flash,iGbReg::EEPROM_SIZE);
2455
2456 rxFifo.serialize("rxfifo", os);
2457 txFifo.serialize("txfifo", os);
2458
2459 bool txPktExists = txPacket;
2460 SERIALIZE_SCALAR(txPktExists);
2461 if (txPktExists)
2462 txPacket->serialize("txpacket", os);
2463
2464 Tick rdtr_time = 0, radv_time = 0, tidv_time = 0, tadv_time = 0,
2465 inter_time = 0;
2466
2467 if (rdtrEvent.scheduled())
2468 rdtr_time = rdtrEvent.when();
2469 SERIALIZE_SCALAR(rdtr_time);
2470
2471 if (radvEvent.scheduled())
2472 radv_time = radvEvent.when();
2473 SERIALIZE_SCALAR(radv_time);
2474
2475 if (tidvEvent.scheduled())
2476 tidv_time = tidvEvent.when();
2477 SERIALIZE_SCALAR(tidv_time);
2478
2479 if (tadvEvent.scheduled())
2480 tadv_time = tadvEvent.when();
2481 SERIALIZE_SCALAR(tadv_time);
2482
2483 if (interEvent.scheduled())
2484 inter_time = interEvent.when();
2485 SERIALIZE_SCALAR(inter_time);
2486
2487 SERIALIZE_SCALAR(pktOffset);
2488
2489 nameOut(os, csprintf("%s.TxDescCache", name()));
2490 txDescCache.serialize(os);
2491
2492 nameOut(os, csprintf("%s.RxDescCache", name()));
2493 rxDescCache.serialize(os);
2494 }
2495
2496 void
2497 IGbE::unserialize(Checkpoint *cp, const std::string &section)
2498 {
2499 PciDev::unserialize(cp, section);
2500
2501 regs.unserialize(cp, section);
2502 UNSERIALIZE_SCALAR(eeOpBits);
2503 UNSERIALIZE_SCALAR(eeAddrBits);
2504 UNSERIALIZE_SCALAR(eeDataBits);
2505 UNSERIALIZE_SCALAR(eeOpcode);
2506 UNSERIALIZE_SCALAR(eeAddr);
2507 UNSERIALIZE_SCALAR(lastInterrupt);
2508 UNSERIALIZE_ARRAY(flash,iGbReg::EEPROM_SIZE);
2509
2510 rxFifo.unserialize("rxfifo", cp, section);
2511 txFifo.unserialize("txfifo", cp, section);
2512
2513 bool txPktExists;
2514 UNSERIALIZE_SCALAR(txPktExists);
2515 if (txPktExists) {
2516 txPacket = new EthPacketData(16384);
2517 txPacket->unserialize("txpacket", cp, section);
2518 }
2519
2520 rxTick = true;
2521 txTick = true;
2522 txFifoTick = true;
2523
2524 Tick rdtr_time, radv_time, tidv_time, tadv_time, inter_time;
2525 UNSERIALIZE_SCALAR(rdtr_time);
2526 UNSERIALIZE_SCALAR(radv_time);
2527 UNSERIALIZE_SCALAR(tidv_time);
2528 UNSERIALIZE_SCALAR(tadv_time);
2529 UNSERIALIZE_SCALAR(inter_time);
2530
2531 if (rdtr_time)
2532 schedule(rdtrEvent, rdtr_time);
2533
2534 if (radv_time)
2535 schedule(radvEvent, radv_time);
2536
2537 if (tidv_time)
2538 schedule(tidvEvent, tidv_time);
2539
2540 if (tadv_time)
2541 schedule(tadvEvent, tadv_time);
2542
2543 if (inter_time)
2544 schedule(interEvent, inter_time);
2545
2546 UNSERIALIZE_SCALAR(pktOffset);
2547
2548 txDescCache.unserialize(cp, csprintf("%s.TxDescCache", section));
2549
2550 rxDescCache.unserialize(cp, csprintf("%s.RxDescCache", section));
2551 }
2552
2553 IGbE *
2554 IGbEParams::create()
2555 {
2556 return new IGbE(this);
2557 }