Fix: Address a few benign memory leaks
[gem5.git] / src / dev / i8254xGBe.cc
1 /*
2 * Copyright (c) 2006 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Ali Saidi
29 */
30
31 /* @file
32 * Device model for Intel's 8254x line of gigabit ethernet controllers.
33 * In particular an 82547 revision 2 (82547GI) MAC because it seems to have the
34 * fewest workarounds in the driver. It will probably work with most of the
35 * other MACs with slight modifications.
36 */
37
38
39 /*
40 * @todo really there are multiple dma engines.. we should implement them.
41 */
42
43 #include <algorithm>
44
45 #include "base/inet.hh"
46 #include "base/trace.hh"
47 #include "debug/EthernetAll.hh"
48 #include "dev/i8254xGBe.hh"
49 #include "mem/packet.hh"
50 #include "mem/packet_access.hh"
51 #include "params/IGbE.hh"
52 #include "sim/stats.hh"
53 #include "sim/system.hh"
54
55 using namespace iGbReg;
56 using namespace Net;
57
58 IGbE::IGbE(const Params *p)
59 : EtherDevice(p), etherInt(NULL), drainEvent(NULL),
60 useFlowControl(p->use_flow_control),
61 rxFifo(p->rx_fifo_size), txFifo(p->tx_fifo_size), rxTick(false),
62 txTick(false), txFifoTick(false), rxDmaPacket(false), pktOffset(0),
63 fetchDelay(p->fetch_delay), wbDelay(p->wb_delay),
64 fetchCompDelay(p->fetch_comp_delay), wbCompDelay(p->wb_comp_delay),
65 rxWriteDelay(p->rx_write_delay), txReadDelay(p->tx_read_delay),
66 rdtrEvent(this), radvEvent(this),
67 tadvEvent(this), tidvEvent(this), tickEvent(this), interEvent(this),
68 rxDescCache(this, name()+".RxDesc", p->rx_desc_cache_size),
69 txDescCache(this, name()+".TxDesc", p->tx_desc_cache_size),
70 clock(p->clock), lastInterrupt(0)
71 {
72 etherInt = new IGbEInt(name() + ".int", this);
73
74 // Initialized internal registers per Intel documentation
75 // All registers intialized to 0 by per register constructor
76 regs.ctrl.fd(1);
77 regs.ctrl.lrst(1);
78 regs.ctrl.speed(2);
79 regs.ctrl.frcspd(1);
80 regs.sts.speed(3); // Say we're 1000Mbps
81 regs.sts.fd(1); // full duplex
82 regs.sts.lu(1); // link up
83 regs.eecd.fwe(1);
84 regs.eecd.ee_type(1);
85 regs.imr = 0;
86 regs.iam = 0;
87 regs.rxdctl.gran(1);
88 regs.rxdctl.wthresh(1);
89 regs.fcrth(1);
90 regs.tdwba = 0;
91 regs.rlpml = 0;
92 regs.sw_fw_sync = 0;
93
94 regs.pba.rxa(0x30);
95 regs.pba.txa(0x10);
96
97 eeOpBits = 0;
98 eeAddrBits = 0;
99 eeDataBits = 0;
100 eeOpcode = 0;
101
102 // clear all 64 16 bit words of the eeprom
103 memset(&flash, 0, EEPROM_SIZE*2);
104
105 // Set the MAC address
106 memcpy(flash, p->hardware_address.bytes(), ETH_ADDR_LEN);
107 for (int x = 0; x < ETH_ADDR_LEN/2; x++)
108 flash[x] = htobe(flash[x]);
109
110 uint16_t csum = 0;
111 for (int x = 0; x < EEPROM_SIZE; x++)
112 csum += htobe(flash[x]);
113
114
115 // Magic happy checksum value
116 flash[EEPROM_SIZE-1] = htobe((uint16_t)(EEPROM_CSUM - csum));
117
118 // Store the MAC address as queue ID
119 macAddr = p->hardware_address;
120
121 rxFifo.clear();
122 txFifo.clear();
123 }
124
125 IGbE::~IGbE()
126 {
127 delete etherInt;
128 }
129
130 void
131 IGbE::init()
132 {
133 cpa = CPA::cpa();
134 PciDev::init();
135 }
136
137 EtherInt*
138 IGbE::getEthPort(const std::string &if_name, int idx)
139 {
140
141 if (if_name == "interface") {
142 if (etherInt->getPeer())
143 panic("Port already connected to\n");
144 return etherInt;
145 }
146 return NULL;
147 }
148
149 Tick
150 IGbE::writeConfig(PacketPtr pkt)
151 {
152 int offset = pkt->getAddr() & PCI_CONFIG_SIZE;
153 if (offset < PCI_DEVICE_SPECIFIC)
154 PciDev::writeConfig(pkt);
155 else
156 panic("Device specific PCI config space not implemented.\n");
157
158 //
159 // Some work may need to be done here based for the pci COMMAND bits.
160 //
161
162 return pioDelay;
163 }
164
165 // Handy macro for range-testing register access addresses
166 #define IN_RANGE(val, base, len) (val >= base && val < (base + len))
167
168 Tick
169 IGbE::read(PacketPtr pkt)
170 {
171 int bar;
172 Addr daddr;
173
174 if (!getBAR(pkt->getAddr(), bar, daddr))
175 panic("Invalid PCI memory access to unmapped memory.\n");
176
177 // Only Memory register BAR is allowed
178 assert(bar == 0);
179
180 // Only 32bit accesses allowed
181 assert(pkt->getSize() == 4);
182
183 DPRINTF(Ethernet, "Read device register %#X\n", daddr);
184
185 pkt->allocate();
186
187 //
188 // Handle read of register here
189 //
190
191
192 switch (daddr) {
193 case REG_CTRL:
194 pkt->set<uint32_t>(regs.ctrl());
195 break;
196 case REG_STATUS:
197 pkt->set<uint32_t>(regs.sts());
198 break;
199 case REG_EECD:
200 pkt->set<uint32_t>(regs.eecd());
201 break;
202 case REG_EERD:
203 pkt->set<uint32_t>(regs.eerd());
204 break;
205 case REG_CTRL_EXT:
206 pkt->set<uint32_t>(regs.ctrl_ext());
207 break;
208 case REG_MDIC:
209 pkt->set<uint32_t>(regs.mdic());
210 break;
211 case REG_ICR:
212 DPRINTF(Ethernet, "Reading ICR. ICR=%#x IMR=%#x IAM=%#x IAME=%d\n",
213 regs.icr(), regs.imr, regs.iam, regs.ctrl_ext.iame());
214 pkt->set<uint32_t>(regs.icr());
215 if (regs.icr.int_assert() || regs.imr == 0) {
216 regs.icr = regs.icr() & ~mask(30);
217 DPRINTF(Ethernet, "Cleared ICR. ICR=%#x\n", regs.icr());
218 }
219 if (regs.ctrl_ext.iame() && regs.icr.int_assert())
220 regs.imr &= ~regs.iam;
221 chkInterrupt();
222 break;
223 case REG_EICR:
224 // This is only useful for MSI, but the driver reads it every time
225 // Just don't do anything
226 pkt->set<uint32_t>(0);
227 break;
228 case REG_ITR:
229 pkt->set<uint32_t>(regs.itr());
230 break;
231 case REG_RCTL:
232 pkt->set<uint32_t>(regs.rctl());
233 break;
234 case REG_FCTTV:
235 pkt->set<uint32_t>(regs.fcttv());
236 break;
237 case REG_TCTL:
238 pkt->set<uint32_t>(regs.tctl());
239 break;
240 case REG_PBA:
241 pkt->set<uint32_t>(regs.pba());
242 break;
243 case REG_WUC:
244 case REG_LEDCTL:
245 pkt->set<uint32_t>(0); // We don't care, so just return 0
246 break;
247 case REG_FCRTL:
248 pkt->set<uint32_t>(regs.fcrtl());
249 break;
250 case REG_FCRTH:
251 pkt->set<uint32_t>(regs.fcrth());
252 break;
253 case REG_RDBAL:
254 pkt->set<uint32_t>(regs.rdba.rdbal());
255 break;
256 case REG_RDBAH:
257 pkt->set<uint32_t>(regs.rdba.rdbah());
258 break;
259 case REG_RDLEN:
260 pkt->set<uint32_t>(regs.rdlen());
261 break;
262 case REG_SRRCTL:
263 pkt->set<uint32_t>(regs.srrctl());
264 break;
265 case REG_RDH:
266 pkt->set<uint32_t>(regs.rdh());
267 break;
268 case REG_RDT:
269 pkt->set<uint32_t>(regs.rdt());
270 break;
271 case REG_RDTR:
272 pkt->set<uint32_t>(regs.rdtr());
273 if (regs.rdtr.fpd()) {
274 rxDescCache.writeback(0);
275 DPRINTF(EthernetIntr,
276 "Posting interrupt because of RDTR.FPD write\n");
277 postInterrupt(IT_RXT);
278 regs.rdtr.fpd(0);
279 }
280 break;
281 case REG_RXDCTL:
282 pkt->set<uint32_t>(regs.rxdctl());
283 break;
284 case REG_RADV:
285 pkt->set<uint32_t>(regs.radv());
286 break;
287 case REG_TDBAL:
288 pkt->set<uint32_t>(regs.tdba.tdbal());
289 break;
290 case REG_TDBAH:
291 pkt->set<uint32_t>(regs.tdba.tdbah());
292 break;
293 case REG_TDLEN:
294 pkt->set<uint32_t>(regs.tdlen());
295 break;
296 case REG_TDH:
297 pkt->set<uint32_t>(regs.tdh());
298 break;
299 case REG_TXDCA_CTL:
300 pkt->set<uint32_t>(regs.txdca_ctl());
301 break;
302 case REG_TDT:
303 pkt->set<uint32_t>(regs.tdt());
304 break;
305 case REG_TIDV:
306 pkt->set<uint32_t>(regs.tidv());
307 break;
308 case REG_TXDCTL:
309 pkt->set<uint32_t>(regs.txdctl());
310 break;
311 case REG_TADV:
312 pkt->set<uint32_t>(regs.tadv());
313 break;
314 case REG_TDWBAL:
315 pkt->set<uint32_t>(regs.tdwba & mask(32));
316 break;
317 case REG_TDWBAH:
318 pkt->set<uint32_t>(regs.tdwba >> 32);
319 break;
320 case REG_RXCSUM:
321 pkt->set<uint32_t>(regs.rxcsum());
322 break;
323 case REG_RLPML:
324 pkt->set<uint32_t>(regs.rlpml);
325 break;
326 case REG_RFCTL:
327 pkt->set<uint32_t>(regs.rfctl());
328 break;
329 case REG_MANC:
330 pkt->set<uint32_t>(regs.manc());
331 break;
332 case REG_SWSM:
333 pkt->set<uint32_t>(regs.swsm());
334 regs.swsm.smbi(1);
335 break;
336 case REG_FWSM:
337 pkt->set<uint32_t>(regs.fwsm());
338 break;
339 case REG_SWFWSYNC:
340 pkt->set<uint32_t>(regs.sw_fw_sync);
341 break;
342 default:
343 if (!IN_RANGE(daddr, REG_VFTA, VLAN_FILTER_TABLE_SIZE*4) &&
344 !IN_RANGE(daddr, REG_RAL, RCV_ADDRESS_TABLE_SIZE*8) &&
345 !IN_RANGE(daddr, REG_MTA, MULTICAST_TABLE_SIZE*4) &&
346 !IN_RANGE(daddr, REG_CRCERRS, STATS_REGS_SIZE))
347 panic("Read request to unknown register number: %#x\n", daddr);
348 else
349 pkt->set<uint32_t>(0);
350 };
351
352 pkt->makeAtomicResponse();
353 return pioDelay;
354 }
355
356 Tick
357 IGbE::write(PacketPtr pkt)
358 {
359 int bar;
360 Addr daddr;
361
362
363 if (!getBAR(pkt->getAddr(), bar, daddr))
364 panic("Invalid PCI memory access to unmapped memory.\n");
365
366 // Only Memory register BAR is allowed
367 assert(bar == 0);
368
369 // Only 32bit accesses allowed
370 assert(pkt->getSize() == sizeof(uint32_t));
371
372 DPRINTF(Ethernet, "Wrote device register %#X value %#X\n",
373 daddr, pkt->get<uint32_t>());
374
375 //
376 // Handle write of register here
377 //
378 uint32_t val = pkt->get<uint32_t>();
379
380 Regs::RCTL oldrctl;
381 Regs::TCTL oldtctl;
382
383 switch (daddr) {
384 case REG_CTRL:
385 regs.ctrl = val;
386 if (regs.ctrl.tfce())
387 warn("TX Flow control enabled, should implement\n");
388 if (regs.ctrl.rfce())
389 warn("RX Flow control enabled, should implement\n");
390 break;
391 case REG_CTRL_EXT:
392 regs.ctrl_ext = val;
393 break;
394 case REG_STATUS:
395 regs.sts = val;
396 break;
397 case REG_EECD:
398 int oldClk;
399 oldClk = regs.eecd.sk();
400 regs.eecd = val;
401 // See if this is a eeprom access and emulate accordingly
402 if (!oldClk && regs.eecd.sk()) {
403 if (eeOpBits < 8) {
404 eeOpcode = eeOpcode << 1 | regs.eecd.din();
405 eeOpBits++;
406 } else if (eeAddrBits < 8 && eeOpcode == EEPROM_READ_OPCODE_SPI) {
407 eeAddr = eeAddr << 1 | regs.eecd.din();
408 eeAddrBits++;
409 } else if (eeDataBits < 16 && eeOpcode == EEPROM_READ_OPCODE_SPI) {
410 assert(eeAddr>>1 < EEPROM_SIZE);
411 DPRINTF(EthernetEEPROM, "EEPROM bit read: %d word: %#X\n",
412 flash[eeAddr>>1] >> eeDataBits & 0x1,
413 flash[eeAddr>>1]);
414 regs.eecd.dout((flash[eeAddr>>1] >> (15-eeDataBits)) & 0x1);
415 eeDataBits++;
416 } else if (eeDataBits < 8 && eeOpcode == EEPROM_RDSR_OPCODE_SPI) {
417 regs.eecd.dout(0);
418 eeDataBits++;
419 } else
420 panic("What's going on with eeprom interface? opcode:"
421 " %#x:%d addr: %#x:%d, data: %d\n", (uint32_t)eeOpcode,
422 (uint32_t)eeOpBits, (uint32_t)eeAddr,
423 (uint32_t)eeAddrBits, (uint32_t)eeDataBits);
424
425 // Reset everything for the next command
426 if ((eeDataBits == 16 && eeOpcode == EEPROM_READ_OPCODE_SPI) ||
427 (eeDataBits == 8 && eeOpcode == EEPROM_RDSR_OPCODE_SPI)) {
428 eeOpBits = 0;
429 eeAddrBits = 0;
430 eeDataBits = 0;
431 eeOpcode = 0;
432 eeAddr = 0;
433 }
434
435 DPRINTF(EthernetEEPROM, "EEPROM: opcode: %#X:%d addr: %#X:%d\n",
436 (uint32_t)eeOpcode, (uint32_t) eeOpBits,
437 (uint32_t)eeAddr>>1, (uint32_t)eeAddrBits);
438 if (eeOpBits == 8 && !(eeOpcode == EEPROM_READ_OPCODE_SPI ||
439 eeOpcode == EEPROM_RDSR_OPCODE_SPI ))
440 panic("Unknown eeprom opcode: %#X:%d\n", (uint32_t)eeOpcode,
441 (uint32_t)eeOpBits);
442
443
444 }
445 // If driver requests eeprom access, immediately give it to it
446 regs.eecd.ee_gnt(regs.eecd.ee_req());
447 break;
448 case REG_EERD:
449 regs.eerd = val;
450 if (regs.eerd.start()) {
451 regs.eerd.done(1);
452 assert(regs.eerd.addr() < EEPROM_SIZE);
453 regs.eerd.data(flash[regs.eerd.addr()]);
454 regs.eerd.start(0);
455 DPRINTF(EthernetEEPROM, "EEPROM: read addr: %#X data %#x\n",
456 regs.eerd.addr(), regs.eerd.data());
457 }
458 break;
459 case REG_MDIC:
460 regs.mdic = val;
461 if (regs.mdic.i())
462 panic("No support for interrupt on mdic complete\n");
463 if (regs.mdic.phyadd() != 1)
464 panic("No support for reading anything but phy\n");
465 DPRINTF(Ethernet, "%s phy address %x\n",
466 regs.mdic.op() == 1 ? "Writing" : "Reading",
467 regs.mdic.regadd());
468 switch (regs.mdic.regadd()) {
469 case PHY_PSTATUS:
470 regs.mdic.data(0x796D); // link up
471 break;
472 case PHY_PID:
473 regs.mdic.data(params()->phy_pid);
474 break;
475 case PHY_EPID:
476 regs.mdic.data(params()->phy_epid);
477 break;
478 case PHY_GSTATUS:
479 regs.mdic.data(0x7C00);
480 break;
481 case PHY_EPSTATUS:
482 regs.mdic.data(0x3000);
483 break;
484 case PHY_AGC:
485 regs.mdic.data(0x180); // some random length
486 break;
487 default:
488 regs.mdic.data(0);
489 }
490 regs.mdic.r(1);
491 break;
492 case REG_ICR:
493 DPRINTF(Ethernet, "Writing ICR. ICR=%#x IMR=%#x IAM=%#x IAME=%d\n",
494 regs.icr(), regs.imr, regs.iam, regs.ctrl_ext.iame());
495 if (regs.ctrl_ext.iame())
496 regs.imr &= ~regs.iam;
497 regs.icr = ~bits(val,30,0) & regs.icr();
498 chkInterrupt();
499 break;
500 case REG_ITR:
501 regs.itr = val;
502 break;
503 case REG_ICS:
504 DPRINTF(EthernetIntr, "Posting interrupt because of ICS write\n");
505 postInterrupt((IntTypes)val);
506 break;
507 case REG_IMS:
508 regs.imr |= val;
509 chkInterrupt();
510 break;
511 case REG_IMC:
512 regs.imr &= ~val;
513 chkInterrupt();
514 break;
515 case REG_IAM:
516 regs.iam = val;
517 break;
518 case REG_RCTL:
519 oldrctl = regs.rctl;
520 regs.rctl = val;
521 if (regs.rctl.rst()) {
522 rxDescCache.reset();
523 DPRINTF(EthernetSM, "RXS: Got RESET!\n");
524 rxFifo.clear();
525 regs.rctl.rst(0);
526 }
527 if (regs.rctl.en())
528 rxTick = true;
529 restartClock();
530 break;
531 case REG_FCTTV:
532 regs.fcttv = val;
533 break;
534 case REG_TCTL:
535 regs.tctl = val;
536 oldtctl = regs.tctl;
537 regs.tctl = val;
538 if (regs.tctl.en())
539 txTick = true;
540 restartClock();
541 if (regs.tctl.en() && !oldtctl.en()) {
542 txDescCache.reset();
543 }
544 break;
545 case REG_PBA:
546 regs.pba.rxa(val);
547 regs.pba.txa(64 - regs.pba.rxa());
548 break;
549 case REG_WUC:
550 case REG_LEDCTL:
551 case REG_FCAL:
552 case REG_FCAH:
553 case REG_FCT:
554 case REG_VET:
555 case REG_AIFS:
556 case REG_TIPG:
557 ; // We don't care, so don't store anything
558 break;
559 case REG_IVAR0:
560 warn("Writing to IVAR0, ignoring...\n");
561 break;
562 case REG_FCRTL:
563 regs.fcrtl = val;
564 break;
565 case REG_FCRTH:
566 regs.fcrth = val;
567 break;
568 case REG_RDBAL:
569 regs.rdba.rdbal( val & ~mask(4));
570 rxDescCache.areaChanged();
571 break;
572 case REG_RDBAH:
573 regs.rdba.rdbah(val);
574 rxDescCache.areaChanged();
575 break;
576 case REG_RDLEN:
577 regs.rdlen = val & ~mask(7);
578 rxDescCache.areaChanged();
579 break;
580 case REG_SRRCTL:
581 regs.srrctl = val;
582 break;
583 case REG_RDH:
584 regs.rdh = val;
585 rxDescCache.areaChanged();
586 break;
587 case REG_RDT:
588 regs.rdt = val;
589 DPRINTF(EthernetSM, "RXS: RDT Updated.\n");
590 if (getState() == SimObject::Running) {
591 DPRINTF(EthernetSM, "RXS: RDT Fetching Descriptors!\n");
592 rxDescCache.fetchDescriptors();
593 } else {
594 DPRINTF(EthernetSM, "RXS: RDT NOT Fetching Desc b/c draining!\n");
595 }
596 break;
597 case REG_RDTR:
598 regs.rdtr = val;
599 break;
600 case REG_RADV:
601 regs.radv = val;
602 break;
603 case REG_RXDCTL:
604 regs.rxdctl = val;
605 break;
606 case REG_TDBAL:
607 regs.tdba.tdbal( val & ~mask(4));
608 txDescCache.areaChanged();
609 break;
610 case REG_TDBAH:
611 regs.tdba.tdbah(val);
612 txDescCache.areaChanged();
613 break;
614 case REG_TDLEN:
615 regs.tdlen = val & ~mask(7);
616 txDescCache.areaChanged();
617 break;
618 case REG_TDH:
619 regs.tdh = val;
620 txDescCache.areaChanged();
621 break;
622 case REG_TXDCA_CTL:
623 regs.txdca_ctl = val;
624 if (regs.txdca_ctl.enabled())
625 panic("No support for DCA\n");
626 break;
627 case REG_TDT:
628 regs.tdt = val;
629 DPRINTF(EthernetSM, "TXS: TX Tail pointer updated\n");
630 if (getState() == SimObject::Running) {
631 DPRINTF(EthernetSM, "TXS: TDT Fetching Descriptors!\n");
632 txDescCache.fetchDescriptors();
633 } else {
634 DPRINTF(EthernetSM, "TXS: TDT NOT Fetching Desc b/c draining!\n");
635 }
636 break;
637 case REG_TIDV:
638 regs.tidv = val;
639 break;
640 case REG_TXDCTL:
641 regs.txdctl = val;
642 break;
643 case REG_TADV:
644 regs.tadv = val;
645 break;
646 case REG_TDWBAL:
647 regs.tdwba &= ~mask(32);
648 regs.tdwba |= val;
649 txDescCache.completionWriteback(regs.tdwba & ~mask(1),
650 regs.tdwba & mask(1));
651 break;
652 case REG_TDWBAH:
653 regs.tdwba &= mask(32);
654 regs.tdwba |= (uint64_t)val << 32;
655 txDescCache.completionWriteback(regs.tdwba & ~mask(1),
656 regs.tdwba & mask(1));
657 break;
658 case REG_RXCSUM:
659 regs.rxcsum = val;
660 break;
661 case REG_RLPML:
662 regs.rlpml = val;
663 break;
664 case REG_RFCTL:
665 regs.rfctl = val;
666 if (regs.rfctl.exsten())
667 panic("Extended RX descriptors not implemented\n");
668 break;
669 case REG_MANC:
670 regs.manc = val;
671 break;
672 case REG_SWSM:
673 regs.swsm = val;
674 if (regs.fwsm.eep_fw_semaphore())
675 regs.swsm.swesmbi(0);
676 break;
677 case REG_SWFWSYNC:
678 regs.sw_fw_sync = val;
679 break;
680 default:
681 if (!IN_RANGE(daddr, REG_VFTA, VLAN_FILTER_TABLE_SIZE*4) &&
682 !IN_RANGE(daddr, REG_RAL, RCV_ADDRESS_TABLE_SIZE*8) &&
683 !IN_RANGE(daddr, REG_MTA, MULTICAST_TABLE_SIZE*4))
684 panic("Write request to unknown register number: %#x\n", daddr);
685 };
686
687 pkt->makeAtomicResponse();
688 return pioDelay;
689 }
690
691 void
692 IGbE::postInterrupt(IntTypes t, bool now)
693 {
694 assert(t);
695
696 // Interrupt is already pending
697 if (t & regs.icr() && !now)
698 return;
699
700 regs.icr = regs.icr() | t;
701
702 Tick itr_interval = SimClock::Int::ns * 256 * regs.itr.interval();
703 DPRINTF(EthernetIntr,
704 "EINT: postInterrupt() curTick(): %d itr: %d interval: %d\n",
705 curTick(), regs.itr.interval(), itr_interval);
706
707 if (regs.itr.interval() == 0 || now ||
708 lastInterrupt + itr_interval <= curTick()) {
709 if (interEvent.scheduled()) {
710 deschedule(interEvent);
711 }
712 cpuPostInt();
713 } else {
714 Tick int_time = lastInterrupt + itr_interval;
715 assert(int_time > 0);
716 DPRINTF(EthernetIntr, "EINT: Scheduling timer interrupt for tick %d\n",
717 int_time);
718 if (!interEvent.scheduled()) {
719 schedule(interEvent, int_time);
720 }
721 }
722 }
723
724 void
725 IGbE::delayIntEvent()
726 {
727 cpuPostInt();
728 }
729
730
731 void
732 IGbE::cpuPostInt()
733 {
734
735 postedInterrupts++;
736
737 if (!(regs.icr() & regs.imr)) {
738 DPRINTF(Ethernet, "Interrupt Masked. Not Posting\n");
739 return;
740 }
741
742 DPRINTF(Ethernet, "Posting Interrupt\n");
743
744
745 if (interEvent.scheduled()) {
746 deschedule(interEvent);
747 }
748
749 if (rdtrEvent.scheduled()) {
750 regs.icr.rxt0(1);
751 deschedule(rdtrEvent);
752 }
753 if (radvEvent.scheduled()) {
754 regs.icr.rxt0(1);
755 deschedule(radvEvent);
756 }
757 if (tadvEvent.scheduled()) {
758 regs.icr.txdw(1);
759 deschedule(tadvEvent);
760 }
761 if (tidvEvent.scheduled()) {
762 regs.icr.txdw(1);
763 deschedule(tidvEvent);
764 }
765
766 regs.icr.int_assert(1);
767 DPRINTF(EthernetIntr, "EINT: Posting interrupt to CPU now. Vector %#x\n",
768 regs.icr());
769
770 intrPost();
771
772 lastInterrupt = curTick();
773 }
774
775 void
776 IGbE::cpuClearInt()
777 {
778 if (regs.icr.int_assert()) {
779 regs.icr.int_assert(0);
780 DPRINTF(EthernetIntr,
781 "EINT: Clearing interrupt to CPU now. Vector %#x\n",
782 regs.icr());
783 intrClear();
784 }
785 }
786
787 void
788 IGbE::chkInterrupt()
789 {
790 DPRINTF(Ethernet, "Checking interrupts icr: %#x imr: %#x\n", regs.icr(),
791 regs.imr);
792 // Check if we need to clear the cpu interrupt
793 if (!(regs.icr() & regs.imr)) {
794 DPRINTF(Ethernet, "Mask cleaned all interrupts\n");
795 if (interEvent.scheduled())
796 deschedule(interEvent);
797 if (regs.icr.int_assert())
798 cpuClearInt();
799 }
800 DPRINTF(Ethernet, "ITR = %#X itr.interval = %#X\n",
801 regs.itr(), regs.itr.interval());
802
803 if (regs.icr() & regs.imr) {
804 if (regs.itr.interval() == 0) {
805 cpuPostInt();
806 } else {
807 DPRINTF(Ethernet,
808 "Possibly scheduling interrupt because of imr write\n");
809 if (!interEvent.scheduled()) {
810 Tick t = curTick() + SimClock::Int::ns * 256 * regs.itr.interval();
811 DPRINTF(Ethernet, "Scheduling for %d\n", t);
812 schedule(interEvent, t);
813 }
814 }
815 }
816 }
817
818
819 ///////////////////////////// IGbE::DescCache //////////////////////////////
820
821 template<class T>
822 IGbE::DescCache<T>::DescCache(IGbE *i, const std::string n, int s)
823 : igbe(i), _name(n), cachePnt(0), size(s), curFetching(0),
824 wbOut(0), pktPtr(NULL), wbDelayEvent(this),
825 fetchDelayEvent(this), fetchEvent(this), wbEvent(this)
826 {
827 fetchBuf = new T[size];
828 wbBuf = new T[size];
829 }
830
831 template<class T>
832 IGbE::DescCache<T>::~DescCache()
833 {
834 reset();
835 delete[] fetchBuf;
836 delete[] wbBuf;
837 }
838
839 template<class T>
840 void
841 IGbE::DescCache<T>::areaChanged()
842 {
843 if (usedCache.size() > 0 || curFetching || wbOut)
844 panic("Descriptor Address, Length or Head changed. Bad\n");
845 reset();
846
847 }
848
849 template<class T>
850 void
851 IGbE::DescCache<T>::writeback(Addr aMask)
852 {
853 int curHead = descHead();
854 int max_to_wb = usedCache.size();
855
856 // Check if this writeback is less restrictive that the previous
857 // and if so setup another one immediately following it
858 if (wbOut) {
859 if (aMask < wbAlignment) {
860 moreToWb = true;
861 wbAlignment = aMask;
862 }
863 DPRINTF(EthernetDesc,
864 "Writing back already in process, returning\n");
865 return;
866 }
867
868 moreToWb = false;
869 wbAlignment = aMask;
870
871
872 DPRINTF(EthernetDesc, "Writing back descriptors head: %d tail: "
873 "%d len: %d cachePnt: %d max_to_wb: %d descleft: %d\n",
874 curHead, descTail(), descLen(), cachePnt, max_to_wb,
875 descLeft());
876
877 if (max_to_wb + curHead >= descLen()) {
878 max_to_wb = descLen() - curHead;
879 moreToWb = true;
880 // this is by definition aligned correctly
881 } else if (wbAlignment != 0) {
882 // align the wb point to the mask
883 max_to_wb = max_to_wb & ~wbAlignment;
884 }
885
886 DPRINTF(EthernetDesc, "Writing back %d descriptors\n", max_to_wb);
887
888 if (max_to_wb <= 0) {
889 if (usedCache.size())
890 igbe->anBegin(annSmWb, "Wait Alignment", CPA::FL_WAIT);
891 else
892 igbe->anWe(annSmWb, annUsedCacheQ);
893 return;
894 }
895
896 wbOut = max_to_wb;
897
898 assert(!wbDelayEvent.scheduled());
899 igbe->schedule(wbDelayEvent, curTick() + igbe->wbDelay);
900 igbe->anBegin(annSmWb, "Prepare Writeback Desc");
901 }
902
903 template<class T>
904 void
905 IGbE::DescCache<T>::writeback1()
906 {
907 // If we're draining delay issuing this DMA
908 if (igbe->getState() != SimObject::Running) {
909 igbe->schedule(wbDelayEvent, curTick() + igbe->wbDelay);
910 return;
911 }
912
913 DPRINTF(EthernetDesc, "Begining DMA of %d descriptors\n", wbOut);
914
915 for (int x = 0; x < wbOut; x++) {
916 assert(usedCache.size());
917 memcpy(&wbBuf[x], usedCache[x], sizeof(T));
918 igbe->anPq(annSmWb, annUsedCacheQ);
919 igbe->anPq(annSmWb, annDescQ);
920 igbe->anQ(annSmWb, annUsedDescQ);
921 }
922
923
924 igbe->anBegin(annSmWb, "Writeback Desc DMA");
925
926 assert(wbOut);
927 igbe->dmaWrite(pciToDma(descBase() + descHead() * sizeof(T)),
928 wbOut * sizeof(T), &wbEvent, (uint8_t*)wbBuf,
929 igbe->wbCompDelay);
930 }
931
932 template<class T>
933 void
934 IGbE::DescCache<T>::fetchDescriptors()
935 {
936 size_t max_to_fetch;
937
938 if (curFetching) {
939 DPRINTF(EthernetDesc,
940 "Currently fetching %d descriptors, returning\n",
941 curFetching);
942 return;
943 }
944
945 if (descTail() >= cachePnt)
946 max_to_fetch = descTail() - cachePnt;
947 else
948 max_to_fetch = descLen() - cachePnt;
949
950 size_t free_cache = size - usedCache.size() - unusedCache.size();
951
952 if (!max_to_fetch)
953 igbe->anWe(annSmFetch, annUnusedDescQ);
954 else
955 igbe->anPq(annSmFetch, annUnusedDescQ, max_to_fetch);
956
957 if (max_to_fetch) {
958 if (!free_cache)
959 igbe->anWf(annSmFetch, annDescQ);
960 else
961 igbe->anRq(annSmFetch, annDescQ, free_cache);
962 }
963
964 max_to_fetch = std::min(max_to_fetch, free_cache);
965
966
967 DPRINTF(EthernetDesc, "Fetching descriptors head: %d tail: "
968 "%d len: %d cachePnt: %d max_to_fetch: %d descleft: %d\n",
969 descHead(), descTail(), descLen(), cachePnt,
970 max_to_fetch, descLeft());
971
972 // Nothing to do
973 if (max_to_fetch == 0)
974 return;
975
976 // So we don't have two descriptor fetches going on at once
977 curFetching = max_to_fetch;
978
979 assert(!fetchDelayEvent.scheduled());
980 igbe->schedule(fetchDelayEvent, curTick() + igbe->fetchDelay);
981 igbe->anBegin(annSmFetch, "Prepare Fetch Desc");
982 }
983
984 template<class T>
985 void
986 IGbE::DescCache<T>::fetchDescriptors1()
987 {
988 // If we're draining delay issuing this DMA
989 if (igbe->getState() != SimObject::Running) {
990 igbe->schedule(fetchDelayEvent, curTick() + igbe->fetchDelay);
991 return;
992 }
993
994 igbe->anBegin(annSmFetch, "Fetch Desc");
995
996 DPRINTF(EthernetDesc, "Fetching descriptors at %#x (%#x), size: %#x\n",
997 descBase() + cachePnt * sizeof(T),
998 pciToDma(descBase() + cachePnt * sizeof(T)),
999 curFetching * sizeof(T));
1000 assert(curFetching);
1001 igbe->dmaRead(pciToDma(descBase() + cachePnt * sizeof(T)),
1002 curFetching * sizeof(T), &fetchEvent, (uint8_t*)fetchBuf,
1003 igbe->fetchCompDelay);
1004 }
1005
1006 template<class T>
1007 void
1008 IGbE::DescCache<T>::fetchComplete()
1009 {
1010 T *newDesc;
1011 igbe->anBegin(annSmFetch, "Fetch Complete");
1012 for (int x = 0; x < curFetching; x++) {
1013 newDesc = new T;
1014 memcpy(newDesc, &fetchBuf[x], sizeof(T));
1015 unusedCache.push_back(newDesc);
1016 igbe->anDq(annSmFetch, annUnusedDescQ);
1017 igbe->anQ(annSmFetch, annUnusedCacheQ);
1018 igbe->anQ(annSmFetch, annDescQ);
1019 }
1020
1021
1022 #ifndef NDEBUG
1023 int oldCp = cachePnt;
1024 #endif
1025
1026 cachePnt += curFetching;
1027 assert(cachePnt <= descLen());
1028 if (cachePnt == descLen())
1029 cachePnt = 0;
1030
1031 curFetching = 0;
1032
1033 DPRINTF(EthernetDesc, "Fetching complete cachePnt %d -> %d\n",
1034 oldCp, cachePnt);
1035
1036 if ((descTail() >= cachePnt ? (descTail() - cachePnt) : (descLen() -
1037 cachePnt)) == 0)
1038 {
1039 igbe->anWe(annSmFetch, annUnusedDescQ);
1040 } else if (!(size - usedCache.size() - unusedCache.size())) {
1041 igbe->anWf(annSmFetch, annDescQ);
1042 } else {
1043 igbe->anBegin(annSmFetch, "Wait", CPA::FL_WAIT);
1044 }
1045
1046 enableSm();
1047 igbe->checkDrain();
1048 }
1049
1050 template<class T>
1051 void
1052 IGbE::DescCache<T>::wbComplete()
1053 {
1054
1055 igbe->anBegin(annSmWb, "Finish Writeback");
1056
1057 long curHead = descHead();
1058 #ifndef NDEBUG
1059 long oldHead = curHead;
1060 #endif
1061
1062 for (int x = 0; x < wbOut; x++) {
1063 assert(usedCache.size());
1064 delete usedCache[0];
1065 usedCache.pop_front();
1066
1067 igbe->anDq(annSmWb, annUsedCacheQ);
1068 igbe->anDq(annSmWb, annDescQ);
1069 }
1070
1071 curHead += wbOut;
1072 wbOut = 0;
1073
1074 if (curHead >= descLen())
1075 curHead -= descLen();
1076
1077 // Update the head
1078 updateHead(curHead);
1079
1080 DPRINTF(EthernetDesc, "Writeback complete curHead %d -> %d\n",
1081 oldHead, curHead);
1082
1083 // If we still have more to wb, call wb now
1084 actionAfterWb();
1085 if (moreToWb) {
1086 moreToWb = false;
1087 DPRINTF(EthernetDesc, "Writeback has more todo\n");
1088 writeback(wbAlignment);
1089 }
1090
1091 if (!wbOut) {
1092 igbe->checkDrain();
1093 if (usedCache.size())
1094 igbe->anBegin(annSmWb, "Wait", CPA::FL_WAIT);
1095 else
1096 igbe->anWe(annSmWb, annUsedCacheQ);
1097 }
1098 fetchAfterWb();
1099 }
1100
1101 template<class T>
1102 void
1103 IGbE::DescCache<T>::reset()
1104 {
1105 DPRINTF(EthernetDesc, "Reseting descriptor cache\n");
1106 for (typename CacheType::size_type x = 0; x < usedCache.size(); x++)
1107 delete usedCache[x];
1108 for (typename CacheType::size_type x = 0; x < unusedCache.size(); x++)
1109 delete unusedCache[x];
1110
1111 usedCache.clear();
1112 unusedCache.clear();
1113
1114 cachePnt = 0;
1115
1116 }
1117
1118 template<class T>
1119 void
1120 IGbE::DescCache<T>::serialize(std::ostream &os)
1121 {
1122 SERIALIZE_SCALAR(cachePnt);
1123 SERIALIZE_SCALAR(curFetching);
1124 SERIALIZE_SCALAR(wbOut);
1125 SERIALIZE_SCALAR(moreToWb);
1126 SERIALIZE_SCALAR(wbAlignment);
1127
1128 typename CacheType::size_type usedCacheSize = usedCache.size();
1129 SERIALIZE_SCALAR(usedCacheSize);
1130 for (typename CacheType::size_type x = 0; x < usedCacheSize; x++) {
1131 arrayParamOut(os, csprintf("usedCache_%d", x),
1132 (uint8_t*)usedCache[x],sizeof(T));
1133 }
1134
1135 typename CacheType::size_type unusedCacheSize = unusedCache.size();
1136 SERIALIZE_SCALAR(unusedCacheSize);
1137 for (typename CacheType::size_type x = 0; x < unusedCacheSize; x++) {
1138 arrayParamOut(os, csprintf("unusedCache_%d", x),
1139 (uint8_t*)unusedCache[x],sizeof(T));
1140 }
1141
1142 Tick fetch_delay = 0, wb_delay = 0;
1143 if (fetchDelayEvent.scheduled())
1144 fetch_delay = fetchDelayEvent.when();
1145 SERIALIZE_SCALAR(fetch_delay);
1146 if (wbDelayEvent.scheduled())
1147 wb_delay = wbDelayEvent.when();
1148 SERIALIZE_SCALAR(wb_delay);
1149
1150
1151 }
1152
1153 template<class T>
1154 void
1155 IGbE::DescCache<T>::unserialize(Checkpoint *cp, const std::string &section)
1156 {
1157 UNSERIALIZE_SCALAR(cachePnt);
1158 UNSERIALIZE_SCALAR(curFetching);
1159 UNSERIALIZE_SCALAR(wbOut);
1160 UNSERIALIZE_SCALAR(moreToWb);
1161 UNSERIALIZE_SCALAR(wbAlignment);
1162
1163 typename CacheType::size_type usedCacheSize;
1164 UNSERIALIZE_SCALAR(usedCacheSize);
1165 T *temp;
1166 for (typename CacheType::size_type x = 0; x < usedCacheSize; x++) {
1167 temp = new T;
1168 arrayParamIn(cp, section, csprintf("usedCache_%d", x),
1169 (uint8_t*)temp,sizeof(T));
1170 usedCache.push_back(temp);
1171 }
1172
1173 typename CacheType::size_type unusedCacheSize;
1174 UNSERIALIZE_SCALAR(unusedCacheSize);
1175 for (typename CacheType::size_type x = 0; x < unusedCacheSize; x++) {
1176 temp = new T;
1177 arrayParamIn(cp, section, csprintf("unusedCache_%d", x),
1178 (uint8_t*)temp,sizeof(T));
1179 unusedCache.push_back(temp);
1180 }
1181 Tick fetch_delay = 0, wb_delay = 0;
1182 UNSERIALIZE_SCALAR(fetch_delay);
1183 UNSERIALIZE_SCALAR(wb_delay);
1184 if (fetch_delay)
1185 igbe->schedule(fetchDelayEvent, fetch_delay);
1186 if (wb_delay)
1187 igbe->schedule(wbDelayEvent, wb_delay);
1188
1189
1190 }
1191
1192 ///////////////////////////// IGbE::RxDescCache //////////////////////////////
1193
1194 IGbE::RxDescCache::RxDescCache(IGbE *i, const std::string n, int s)
1195 : DescCache<RxDesc>(i, n, s), pktDone(false), splitCount(0),
1196 pktEvent(this), pktHdrEvent(this), pktDataEvent(this)
1197
1198 {
1199 annSmFetch = "RX Desc Fetch";
1200 annSmWb = "RX Desc Writeback";
1201 annUnusedDescQ = "RX Unused Descriptors";
1202 annUnusedCacheQ = "RX Unused Descriptor Cache";
1203 annUsedCacheQ = "RX Used Descriptor Cache";
1204 annUsedDescQ = "RX Used Descriptors";
1205 annDescQ = "RX Descriptors";
1206 }
1207
1208 void
1209 IGbE::RxDescCache::pktSplitDone()
1210 {
1211 splitCount++;
1212 DPRINTF(EthernetDesc,
1213 "Part of split packet done: splitcount now %d\n", splitCount);
1214 assert(splitCount <= 2);
1215 if (splitCount != 2)
1216 return;
1217 splitCount = 0;
1218 DPRINTF(EthernetDesc,
1219 "Part of split packet done: calling pktComplete()\n");
1220 pktComplete();
1221 }
1222
1223 int
1224 IGbE::RxDescCache::writePacket(EthPacketPtr packet, int pkt_offset)
1225 {
1226 assert(unusedCache.size());
1227 //if (!unusedCache.size())
1228 // return false;
1229
1230 pktPtr = packet;
1231 pktDone = false;
1232 unsigned buf_len, hdr_len;
1233
1234 RxDesc *desc = unusedCache.front();
1235 switch (igbe->regs.srrctl.desctype()) {
1236 case RXDT_LEGACY:
1237 assert(pkt_offset == 0);
1238 bytesCopied = packet->length;
1239 DPRINTF(EthernetDesc, "Packet Length: %d Desc Size: %d\n",
1240 packet->length, igbe->regs.rctl.descSize());
1241 assert(packet->length < igbe->regs.rctl.descSize());
1242 igbe->dmaWrite(pciToDma(desc->legacy.buf),
1243 packet->length, &pktEvent, packet->data,
1244 igbe->rxWriteDelay);
1245 break;
1246 case RXDT_ADV_ONEBUF:
1247 assert(pkt_offset == 0);
1248 bytesCopied = packet->length;
1249 buf_len = igbe->regs.rctl.lpe() ? igbe->regs.srrctl.bufLen() :
1250 igbe->regs.rctl.descSize();
1251 DPRINTF(EthernetDesc, "Packet Length: %d srrctl: %#x Desc Size: %d\n",
1252 packet->length, igbe->regs.srrctl(), buf_len);
1253 assert(packet->length < buf_len);
1254 igbe->dmaWrite(pciToDma(desc->adv_read.pkt),
1255 packet->length, &pktEvent, packet->data,
1256 igbe->rxWriteDelay);
1257 desc->adv_wb.header_len = htole(0);
1258 desc->adv_wb.sph = htole(0);
1259 desc->adv_wb.pkt_len = htole((uint16_t)(pktPtr->length));
1260 break;
1261 case RXDT_ADV_SPLIT_A:
1262 int split_point;
1263
1264 buf_len = igbe->regs.rctl.lpe() ? igbe->regs.srrctl.bufLen() :
1265 igbe->regs.rctl.descSize();
1266 hdr_len = igbe->regs.rctl.lpe() ? igbe->regs.srrctl.hdrLen() : 0;
1267 DPRINTF(EthernetDesc,
1268 "lpe: %d Packet Length: %d offset: %d srrctl: %#x "
1269 "hdr addr: %#x Hdr Size: %d desc addr: %#x Desc Size: %d\n",
1270 igbe->regs.rctl.lpe(), packet->length, pkt_offset,
1271 igbe->regs.srrctl(), desc->adv_read.hdr, hdr_len,
1272 desc->adv_read.pkt, buf_len);
1273
1274 split_point = hsplit(pktPtr);
1275
1276 if (packet->length <= hdr_len) {
1277 bytesCopied = packet->length;
1278 assert(pkt_offset == 0);
1279 DPRINTF(EthernetDesc, "Hdr split: Entire packet in header\n");
1280 igbe->dmaWrite(pciToDma(desc->adv_read.hdr),
1281 packet->length, &pktEvent, packet->data,
1282 igbe->rxWriteDelay);
1283 desc->adv_wb.header_len = htole((uint16_t)packet->length);
1284 desc->adv_wb.sph = htole(0);
1285 desc->adv_wb.pkt_len = htole(0);
1286 } else if (split_point) {
1287 if (pkt_offset) {
1288 // we are only copying some data, header/data has already been
1289 // copied
1290 int max_to_copy =
1291 std::min(packet->length - pkt_offset, buf_len);
1292 bytesCopied += max_to_copy;
1293 DPRINTF(EthernetDesc,
1294 "Hdr split: Continuing data buffer copy\n");
1295 igbe->dmaWrite(pciToDma(desc->adv_read.pkt),
1296 max_to_copy, &pktEvent,
1297 packet->data + pkt_offset, igbe->rxWriteDelay);
1298 desc->adv_wb.header_len = htole(0);
1299 desc->adv_wb.pkt_len = htole((uint16_t)max_to_copy);
1300 desc->adv_wb.sph = htole(0);
1301 } else {
1302 int max_to_copy =
1303 std::min(packet->length - split_point, buf_len);
1304 bytesCopied += max_to_copy + split_point;
1305
1306 DPRINTF(EthernetDesc, "Hdr split: splitting at %d\n",
1307 split_point);
1308 igbe->dmaWrite(pciToDma(desc->adv_read.hdr),
1309 split_point, &pktHdrEvent,
1310 packet->data, igbe->rxWriteDelay);
1311 igbe->dmaWrite(pciToDma(desc->adv_read.pkt),
1312 max_to_copy, &pktDataEvent,
1313 packet->data + split_point, igbe->rxWriteDelay);
1314 desc->adv_wb.header_len = htole(split_point);
1315 desc->adv_wb.sph = 1;
1316 desc->adv_wb.pkt_len = htole((uint16_t)(max_to_copy));
1317 }
1318 } else {
1319 panic("Header split not fitting within header buffer or "
1320 "undecodable packet not fitting in header unsupported\n");
1321 }
1322 break;
1323 default:
1324 panic("Unimplemnted RX receive buffer type: %d\n",
1325 igbe->regs.srrctl.desctype());
1326 }
1327 return bytesCopied;
1328
1329 }
1330
1331 void
1332 IGbE::RxDescCache::pktComplete()
1333 {
1334 assert(unusedCache.size());
1335 RxDesc *desc;
1336 desc = unusedCache.front();
1337
1338 igbe->anBegin("RXS", "Update Desc");
1339
1340 uint16_t crcfixup = igbe->regs.rctl.secrc() ? 0 : 4 ;
1341 DPRINTF(EthernetDesc, "pktPtr->length: %d bytesCopied: %d "
1342 "stripcrc offset: %d value written: %d %d\n",
1343 pktPtr->length, bytesCopied, crcfixup,
1344 htole((uint16_t)(pktPtr->length + crcfixup)),
1345 (uint16_t)(pktPtr->length + crcfixup));
1346
1347 // no support for anything but starting at 0
1348 assert(igbe->regs.rxcsum.pcss() == 0);
1349
1350 DPRINTF(EthernetDesc, "Packet written to memory updating Descriptor\n");
1351
1352 uint16_t status = RXDS_DD;
1353 uint8_t err = 0;
1354 uint16_t ext_err = 0;
1355 uint16_t csum = 0;
1356 uint16_t ptype = 0;
1357 uint16_t ip_id = 0;
1358
1359 assert(bytesCopied <= pktPtr->length);
1360 if (bytesCopied == pktPtr->length)
1361 status |= RXDS_EOP;
1362
1363 IpPtr ip(pktPtr);
1364
1365 if (ip) {
1366 DPRINTF(EthernetDesc, "Proccesing Ip packet with Id=%d\n", ip->id());
1367 ptype |= RXDP_IPV4;
1368 ip_id = ip->id();
1369
1370 if (igbe->regs.rxcsum.ipofld()) {
1371 DPRINTF(EthernetDesc, "Checking IP checksum\n");
1372 status |= RXDS_IPCS;
1373 csum = htole(cksum(ip));
1374 igbe->rxIpChecksums++;
1375 if (cksum(ip) != 0) {
1376 err |= RXDE_IPE;
1377 ext_err |= RXDEE_IPE;
1378 DPRINTF(EthernetDesc, "Checksum is bad!!\n");
1379 }
1380 }
1381 TcpPtr tcp(ip);
1382 if (tcp && igbe->regs.rxcsum.tuofld()) {
1383 DPRINTF(EthernetDesc, "Checking TCP checksum\n");
1384 status |= RXDS_TCPCS;
1385 ptype |= RXDP_TCP;
1386 csum = htole(cksum(tcp));
1387 igbe->rxTcpChecksums++;
1388 if (cksum(tcp) != 0) {
1389 DPRINTF(EthernetDesc, "Checksum is bad!!\n");
1390 err |= RXDE_TCPE;
1391 ext_err |= RXDEE_TCPE;
1392 }
1393 }
1394
1395 UdpPtr udp(ip);
1396 if (udp && igbe->regs.rxcsum.tuofld()) {
1397 DPRINTF(EthernetDesc, "Checking UDP checksum\n");
1398 status |= RXDS_UDPCS;
1399 ptype |= RXDP_UDP;
1400 csum = htole(cksum(udp));
1401 igbe->rxUdpChecksums++;
1402 if (cksum(udp) != 0) {
1403 DPRINTF(EthernetDesc, "Checksum is bad!!\n");
1404 ext_err |= RXDEE_TCPE;
1405 err |= RXDE_TCPE;
1406 }
1407 }
1408 } else { // if ip
1409 DPRINTF(EthernetSM, "Proccesing Non-Ip packet\n");
1410 }
1411
1412 switch (igbe->regs.srrctl.desctype()) {
1413 case RXDT_LEGACY:
1414 desc->legacy.len = htole((uint16_t)(pktPtr->length + crcfixup));
1415 desc->legacy.status = htole(status);
1416 desc->legacy.errors = htole(err);
1417 // No vlan support at this point... just set it to 0
1418 desc->legacy.vlan = 0;
1419 break;
1420 case RXDT_ADV_SPLIT_A:
1421 case RXDT_ADV_ONEBUF:
1422 desc->adv_wb.rss_type = htole(0);
1423 desc->adv_wb.pkt_type = htole(ptype);
1424 if (igbe->regs.rxcsum.pcsd()) {
1425 // no rss support right now
1426 desc->adv_wb.rss_hash = htole(0);
1427 } else {
1428 desc->adv_wb.id = htole(ip_id);
1429 desc->adv_wb.csum = htole(csum);
1430 }
1431 desc->adv_wb.status = htole(status);
1432 desc->adv_wb.errors = htole(ext_err);
1433 // no vlan support
1434 desc->adv_wb.vlan_tag = htole(0);
1435 break;
1436 default:
1437 panic("Unimplemnted RX receive buffer type %d\n",
1438 igbe->regs.srrctl.desctype());
1439 }
1440
1441 DPRINTF(EthernetDesc, "Descriptor complete w0: %#x w1: %#x\n",
1442 desc->adv_read.pkt, desc->adv_read.hdr);
1443
1444 if (bytesCopied == pktPtr->length) {
1445 DPRINTF(EthernetDesc,
1446 "Packet completely written to descriptor buffers\n");
1447 // Deal with the rx timer interrupts
1448 if (igbe->regs.rdtr.delay()) {
1449 Tick delay = igbe->regs.rdtr.delay() * igbe->intClock();
1450 DPRINTF(EthernetSM, "RXS: Scheduling DTR for %d\n", delay);
1451 igbe->reschedule(igbe->rdtrEvent, curTick() + delay);
1452 }
1453
1454 if (igbe->regs.radv.idv()) {
1455 Tick delay = igbe->regs.radv.idv() * igbe->intClock();
1456 DPRINTF(EthernetSM, "RXS: Scheduling ADV for %d\n", delay);
1457 if (!igbe->radvEvent.scheduled()) {
1458 igbe->schedule(igbe->radvEvent, curTick() + delay);
1459 }
1460 }
1461
1462 // if neither radv or rdtr, maybe itr is set...
1463 if (!igbe->regs.rdtr.delay() && !igbe->regs.radv.idv()) {
1464 DPRINTF(EthernetSM,
1465 "RXS: Receive interrupt delay disabled, posting IT_RXT\n");
1466 igbe->postInterrupt(IT_RXT);
1467 }
1468
1469 // If the packet is small enough, interrupt appropriately
1470 // I wonder if this is delayed or not?!
1471 if (pktPtr->length <= igbe->regs.rsrpd.idv()) {
1472 DPRINTF(EthernetSM,
1473 "RXS: Posting IT_SRPD beacuse small packet received\n");
1474 igbe->postInterrupt(IT_SRPD);
1475 }
1476 bytesCopied = 0;
1477 }
1478
1479 pktPtr = NULL;
1480 igbe->checkDrain();
1481 enableSm();
1482 pktDone = true;
1483
1484 igbe->anBegin("RXS", "Done Updating Desc");
1485 DPRINTF(EthernetDesc, "Processing of this descriptor complete\n");
1486 igbe->anDq("RXS", annUnusedCacheQ);
1487 unusedCache.pop_front();
1488 igbe->anQ("RXS", annUsedCacheQ);
1489 usedCache.push_back(desc);
1490 }
1491
1492 void
1493 IGbE::RxDescCache::enableSm()
1494 {
1495 if (!igbe->drainEvent) {
1496 igbe->rxTick = true;
1497 igbe->restartClock();
1498 }
1499 }
1500
1501 bool
1502 IGbE::RxDescCache::packetDone()
1503 {
1504 if (pktDone) {
1505 pktDone = false;
1506 return true;
1507 }
1508 return false;
1509 }
1510
1511 bool
1512 IGbE::RxDescCache::hasOutstandingEvents()
1513 {
1514 return pktEvent.scheduled() || wbEvent.scheduled() ||
1515 fetchEvent.scheduled() || pktHdrEvent.scheduled() ||
1516 pktDataEvent.scheduled();
1517
1518 }
1519
1520 void
1521 IGbE::RxDescCache::serialize(std::ostream &os)
1522 {
1523 DescCache<RxDesc>::serialize(os);
1524 SERIALIZE_SCALAR(pktDone);
1525 SERIALIZE_SCALAR(splitCount);
1526 SERIALIZE_SCALAR(bytesCopied);
1527 }
1528
1529 void
1530 IGbE::RxDescCache::unserialize(Checkpoint *cp, const std::string &section)
1531 {
1532 DescCache<RxDesc>::unserialize(cp, section);
1533 UNSERIALIZE_SCALAR(pktDone);
1534 UNSERIALIZE_SCALAR(splitCount);
1535 UNSERIALIZE_SCALAR(bytesCopied);
1536 }
1537
1538
1539 ///////////////////////////// IGbE::TxDescCache //////////////////////////////
1540
1541 IGbE::TxDescCache::TxDescCache(IGbE *i, const std::string n, int s)
1542 : DescCache<TxDesc>(i,n, s), pktDone(false), isTcp(false),
1543 pktWaiting(false), completionAddress(0), completionEnabled(false),
1544 useTso(false), tsoHeaderLen(0), tsoMss(0), tsoTotalLen(0), tsoUsedLen(0),
1545 tsoPrevSeq(0), tsoPktPayloadBytes(0), tsoLoadedHeader(false),
1546 tsoPktHasHeader(false), tsoDescBytesUsed(0), tsoCopyBytes(0), tsoPkts(0),
1547 pktEvent(this), headerEvent(this), nullEvent(this)
1548 {
1549 annSmFetch = "TX Desc Fetch";
1550 annSmWb = "TX Desc Writeback";
1551 annUnusedDescQ = "TX Unused Descriptors";
1552 annUnusedCacheQ = "TX Unused Descriptor Cache";
1553 annUsedCacheQ = "TX Used Descriptor Cache";
1554 annUsedDescQ = "TX Used Descriptors";
1555 annDescQ = "TX Descriptors";
1556 }
1557
1558 void
1559 IGbE::TxDescCache::processContextDesc()
1560 {
1561 assert(unusedCache.size());
1562 TxDesc *desc;
1563
1564 DPRINTF(EthernetDesc, "Checking and processing context descriptors\n");
1565
1566 while (!useTso && unusedCache.size() &&
1567 TxdOp::isContext(unusedCache.front())) {
1568 DPRINTF(EthernetDesc, "Got context descriptor type...\n");
1569
1570 desc = unusedCache.front();
1571 DPRINTF(EthernetDesc, "Descriptor upper: %#x lower: %#X\n",
1572 desc->d1, desc->d2);
1573
1574
1575 // is this going to be a tcp or udp packet?
1576 isTcp = TxdOp::tcp(desc) ? true : false;
1577
1578 // setup all the TSO variables, they'll be ignored if we don't use
1579 // tso for this connection
1580 tsoHeaderLen = TxdOp::hdrlen(desc);
1581 tsoMss = TxdOp::mss(desc);
1582
1583 if (TxdOp::isType(desc, TxdOp::TXD_CNXT) && TxdOp::tse(desc)) {
1584 DPRINTF(EthernetDesc, "TCP offload enabled for packet hdrlen: "
1585 "%d mss: %d paylen %d\n", TxdOp::hdrlen(desc),
1586 TxdOp::mss(desc), TxdOp::getLen(desc));
1587 useTso = true;
1588 tsoTotalLen = TxdOp::getLen(desc);
1589 tsoLoadedHeader = false;
1590 tsoDescBytesUsed = 0;
1591 tsoUsedLen = 0;
1592 tsoPrevSeq = 0;
1593 tsoPktHasHeader = false;
1594 tsoPkts = 0;
1595 tsoCopyBytes = 0;
1596 }
1597
1598 TxdOp::setDd(desc);
1599 unusedCache.pop_front();
1600 igbe->anDq("TXS", annUnusedCacheQ);
1601 usedCache.push_back(desc);
1602 igbe->anQ("TXS", annUsedCacheQ);
1603 }
1604
1605 if (!unusedCache.size())
1606 return;
1607
1608 desc = unusedCache.front();
1609 if (!useTso && TxdOp::isType(desc, TxdOp::TXD_ADVDATA) &&
1610 TxdOp::tse(desc)) {
1611 DPRINTF(EthernetDesc, "TCP offload(adv) enabled for packet "
1612 "hdrlen: %d mss: %d paylen %d\n",
1613 tsoHeaderLen, tsoMss, TxdOp::getTsoLen(desc));
1614 useTso = true;
1615 tsoTotalLen = TxdOp::getTsoLen(desc);
1616 tsoLoadedHeader = false;
1617 tsoDescBytesUsed = 0;
1618 tsoUsedLen = 0;
1619 tsoPrevSeq = 0;
1620 tsoPktHasHeader = false;
1621 tsoPkts = 0;
1622 }
1623
1624 if (useTso && !tsoLoadedHeader) {
1625 // we need to fetch a header
1626 DPRINTF(EthernetDesc, "Starting DMA of TSO header\n");
1627 assert(TxdOp::isData(desc) && TxdOp::getLen(desc) >= tsoHeaderLen);
1628 pktWaiting = true;
1629 assert(tsoHeaderLen <= 256);
1630 igbe->dmaRead(pciToDma(TxdOp::getBuf(desc)),
1631 tsoHeaderLen, &headerEvent, tsoHeader, 0);
1632 }
1633 }
1634
1635 void
1636 IGbE::TxDescCache::headerComplete()
1637 {
1638 DPRINTF(EthernetDesc, "TSO: Fetching TSO header complete\n");
1639 pktWaiting = false;
1640
1641 assert(unusedCache.size());
1642 TxDesc *desc = unusedCache.front();
1643 DPRINTF(EthernetDesc, "TSO: len: %d tsoHeaderLen: %d\n",
1644 TxdOp::getLen(desc), tsoHeaderLen);
1645
1646 if (TxdOp::getLen(desc) == tsoHeaderLen) {
1647 tsoDescBytesUsed = 0;
1648 tsoLoadedHeader = true;
1649 unusedCache.pop_front();
1650 usedCache.push_back(desc);
1651 } else {
1652 // I don't think this case happens, I think the headrer is always
1653 // it's own packet, if it wasn't it might be as simple as just
1654 // incrementing descBytesUsed by the header length, but I'm not
1655 // completely sure
1656 panic("TSO header part of bigger packet, not implemented\n");
1657 }
1658 enableSm();
1659 igbe->checkDrain();
1660 }
1661
1662 unsigned
1663 IGbE::TxDescCache::getPacketSize(EthPacketPtr p)
1664 {
1665 if (!unusedCache.size())
1666 return 0;
1667
1668 DPRINTF(EthernetDesc, "Starting processing of descriptor\n");
1669
1670 assert(!useTso || tsoLoadedHeader);
1671 TxDesc *desc = unusedCache.front();
1672
1673 if (useTso) {
1674 DPRINTF(EthernetDesc, "getPacket(): TxDescriptor data "
1675 "d1: %#llx d2: %#llx\n", desc->d1, desc->d2);
1676 DPRINTF(EthernetDesc, "TSO: use: %d hdrlen: %d mss: %d total: %d "
1677 "used: %d loaded hdr: %d\n", useTso, tsoHeaderLen, tsoMss,
1678 tsoTotalLen, tsoUsedLen, tsoLoadedHeader);
1679
1680 if (tsoPktHasHeader)
1681 tsoCopyBytes = std::min((tsoMss + tsoHeaderLen) - p->length,
1682 TxdOp::getLen(desc) - tsoDescBytesUsed);
1683 else
1684 tsoCopyBytes = std::min(tsoMss,
1685 TxdOp::getLen(desc) - tsoDescBytesUsed);
1686 unsigned pkt_size =
1687 tsoCopyBytes + (tsoPktHasHeader ? 0 : tsoHeaderLen);
1688
1689 DPRINTF(EthernetDesc, "TSO: descBytesUsed: %d copyBytes: %d "
1690 "this descLen: %d\n",
1691 tsoDescBytesUsed, tsoCopyBytes, TxdOp::getLen(desc));
1692 DPRINTF(EthernetDesc, "TSO: pktHasHeader: %d\n", tsoPktHasHeader);
1693 DPRINTF(EthernetDesc, "TSO: Next packet is %d bytes\n", pkt_size);
1694 return pkt_size;
1695 }
1696
1697 DPRINTF(EthernetDesc, "Next TX packet is %d bytes\n",
1698 TxdOp::getLen(unusedCache.front()));
1699 return TxdOp::getLen(desc);
1700 }
1701
1702 void
1703 IGbE::TxDescCache::getPacketData(EthPacketPtr p)
1704 {
1705 assert(unusedCache.size());
1706
1707 TxDesc *desc;
1708 desc = unusedCache.front();
1709
1710 DPRINTF(EthernetDesc, "getPacketData(): TxDescriptor data "
1711 "d1: %#llx d2: %#llx\n", desc->d1, desc->d2);
1712 assert((TxdOp::isLegacy(desc) || TxdOp::isData(desc)) &&
1713 TxdOp::getLen(desc));
1714
1715 pktPtr = p;
1716
1717 pktWaiting = true;
1718
1719 DPRINTF(EthernetDesc, "Starting DMA of packet at offset %d\n", p->length);
1720
1721 if (useTso) {
1722 assert(tsoLoadedHeader);
1723 if (!tsoPktHasHeader) {
1724 DPRINTF(EthernetDesc,
1725 "Loading TSO header (%d bytes) into start of packet\n",
1726 tsoHeaderLen);
1727 memcpy(p->data, &tsoHeader,tsoHeaderLen);
1728 p->length +=tsoHeaderLen;
1729 tsoPktHasHeader = true;
1730 }
1731 }
1732
1733 if (useTso) {
1734 DPRINTF(EthernetDesc,
1735 "Starting DMA of packet at offset %d length: %d\n",
1736 p->length, tsoCopyBytes);
1737 igbe->dmaRead(pciToDma(TxdOp::getBuf(desc))
1738 + tsoDescBytesUsed,
1739 tsoCopyBytes, &pktEvent, p->data + p->length,
1740 igbe->txReadDelay);
1741 tsoDescBytesUsed += tsoCopyBytes;
1742 assert(tsoDescBytesUsed <= TxdOp::getLen(desc));
1743 } else {
1744 igbe->dmaRead(pciToDma(TxdOp::getBuf(desc)),
1745 TxdOp::getLen(desc), &pktEvent, p->data + p->length,
1746 igbe->txReadDelay);
1747 }
1748 }
1749
1750 void
1751 IGbE::TxDescCache::pktComplete()
1752 {
1753
1754 TxDesc *desc;
1755 assert(unusedCache.size());
1756 assert(pktPtr);
1757
1758 igbe->anBegin("TXS", "Update Desc");
1759
1760 DPRINTF(EthernetDesc, "DMA of packet complete\n");
1761
1762
1763 desc = unusedCache.front();
1764 assert((TxdOp::isLegacy(desc) || TxdOp::isData(desc)) &&
1765 TxdOp::getLen(desc));
1766
1767 DPRINTF(EthernetDesc, "TxDescriptor data d1: %#llx d2: %#llx\n",
1768 desc->d1, desc->d2);
1769
1770 // Set the length of the data in the EtherPacket
1771 if (useTso) {
1772 DPRINTF(EthernetDesc, "TSO: use: %d hdrlen: %d mss: %d total: %d "
1773 "used: %d loaded hdr: %d\n", useTso, tsoHeaderLen, tsoMss,
1774 tsoTotalLen, tsoUsedLen, tsoLoadedHeader);
1775 pktPtr->length += tsoCopyBytes;
1776 tsoUsedLen += tsoCopyBytes;
1777 DPRINTF(EthernetDesc, "TSO: descBytesUsed: %d copyBytes: %d\n",
1778 tsoDescBytesUsed, tsoCopyBytes);
1779 } else
1780 pktPtr->length += TxdOp::getLen(desc);
1781
1782
1783
1784 if ((!TxdOp::eop(desc) && !useTso) ||
1785 (pktPtr->length < ( tsoMss + tsoHeaderLen) &&
1786 tsoTotalLen != tsoUsedLen && useTso)) {
1787 assert(!useTso || (tsoDescBytesUsed == TxdOp::getLen(desc)));
1788 igbe->anDq("TXS", annUnusedCacheQ);
1789 unusedCache.pop_front();
1790 igbe->anQ("TXS", annUsedCacheQ);
1791 usedCache.push_back(desc);
1792
1793 tsoDescBytesUsed = 0;
1794 pktDone = true;
1795 pktWaiting = false;
1796 pktMultiDesc = true;
1797
1798 DPRINTF(EthernetDesc, "Partial Packet Descriptor of %d bytes Done\n",
1799 pktPtr->length);
1800 pktPtr = NULL;
1801
1802 enableSm();
1803 igbe->checkDrain();
1804 return;
1805 }
1806
1807
1808 pktMultiDesc = false;
1809 // no support for vlans
1810 assert(!TxdOp::vle(desc));
1811
1812 // we only support single packet descriptors at this point
1813 if (!useTso)
1814 assert(TxdOp::eop(desc));
1815
1816 // set that this packet is done
1817 if (TxdOp::rs(desc))
1818 TxdOp::setDd(desc);
1819
1820 DPRINTF(EthernetDesc, "TxDescriptor data d1: %#llx d2: %#llx\n",
1821 desc->d1, desc->d2);
1822
1823 if (useTso) {
1824 IpPtr ip(pktPtr);
1825 if (ip) {
1826 DPRINTF(EthernetDesc, "TSO: Modifying IP header. Id + %d\n",
1827 tsoPkts);
1828 ip->id(ip->id() + tsoPkts++);
1829 ip->len(pktPtr->length - EthPtr(pktPtr)->size());
1830
1831 TcpPtr tcp(ip);
1832 if (tcp) {
1833 DPRINTF(EthernetDesc,
1834 "TSO: Modifying TCP header. old seq %d + %d\n",
1835 tcp->seq(), tsoPrevSeq);
1836 tcp->seq(tcp->seq() + tsoPrevSeq);
1837 if (tsoUsedLen != tsoTotalLen)
1838 tcp->flags(tcp->flags() & ~9); // clear fin & psh
1839 }
1840 UdpPtr udp(ip);
1841 if (udp) {
1842 DPRINTF(EthernetDesc, "TSO: Modifying UDP header.\n");
1843 udp->len(pktPtr->length - EthPtr(pktPtr)->size());
1844 }
1845 }
1846 tsoPrevSeq = tsoUsedLen;
1847 }
1848
1849 if (DTRACE(EthernetDesc)) {
1850 IpPtr ip(pktPtr);
1851 if (ip)
1852 DPRINTF(EthernetDesc, "Proccesing Ip packet with Id=%d\n",
1853 ip->id());
1854 else
1855 DPRINTF(EthernetSM, "Proccesing Non-Ip packet\n");
1856 }
1857
1858 // Checksums are only ofloaded for new descriptor types
1859 if (TxdOp::isData(desc) && ( TxdOp::ixsm(desc) || TxdOp::txsm(desc)) ) {
1860 DPRINTF(EthernetDesc, "Calculating checksums for packet\n");
1861 IpPtr ip(pktPtr);
1862 assert(ip);
1863 if (TxdOp::ixsm(desc)) {
1864 ip->sum(0);
1865 ip->sum(cksum(ip));
1866 igbe->txIpChecksums++;
1867 DPRINTF(EthernetDesc, "Calculated IP checksum\n");
1868 }
1869 if (TxdOp::txsm(desc)) {
1870 TcpPtr tcp(ip);
1871 UdpPtr udp(ip);
1872 if (tcp) {
1873 tcp->sum(0);
1874 tcp->sum(cksum(tcp));
1875 igbe->txTcpChecksums++;
1876 DPRINTF(EthernetDesc, "Calculated TCP checksum\n");
1877 } else if (udp) {
1878 assert(udp);
1879 udp->sum(0);
1880 udp->sum(cksum(udp));
1881 igbe->txUdpChecksums++;
1882 DPRINTF(EthernetDesc, "Calculated UDP checksum\n");
1883 } else {
1884 panic("Told to checksum, but don't know how\n");
1885 }
1886 }
1887 }
1888
1889 if (TxdOp::ide(desc)) {
1890 // Deal with the rx timer interrupts
1891 DPRINTF(EthernetDesc, "Descriptor had IDE set\n");
1892 if (igbe->regs.tidv.idv()) {
1893 Tick delay = igbe->regs.tidv.idv() * igbe->intClock();
1894 DPRINTF(EthernetDesc, "setting tidv\n");
1895 igbe->reschedule(igbe->tidvEvent, curTick() + delay, true);
1896 }
1897
1898 if (igbe->regs.tadv.idv() && igbe->regs.tidv.idv()) {
1899 Tick delay = igbe->regs.tadv.idv() * igbe->intClock();
1900 DPRINTF(EthernetDesc, "setting tadv\n");
1901 if (!igbe->tadvEvent.scheduled()) {
1902 igbe->schedule(igbe->tadvEvent, curTick() + delay);
1903 }
1904 }
1905 }
1906
1907
1908 if (!useTso || TxdOp::getLen(desc) == tsoDescBytesUsed) {
1909 DPRINTF(EthernetDesc, "Descriptor Done\n");
1910 igbe->anDq("TXS", annUnusedCacheQ);
1911 unusedCache.pop_front();
1912 igbe->anQ("TXS", annUsedCacheQ);
1913 usedCache.push_back(desc);
1914 tsoDescBytesUsed = 0;
1915 }
1916
1917 if (useTso && tsoUsedLen == tsoTotalLen)
1918 useTso = false;
1919
1920
1921 DPRINTF(EthernetDesc,
1922 "------Packet of %d bytes ready for transmission-------\n",
1923 pktPtr->length);
1924 pktDone = true;
1925 pktWaiting = false;
1926 pktPtr = NULL;
1927 tsoPktHasHeader = false;
1928
1929 if (igbe->regs.txdctl.wthresh() == 0) {
1930 igbe->anBegin("TXS", "Desc Writeback");
1931 DPRINTF(EthernetDesc, "WTHRESH == 0, writing back descriptor\n");
1932 writeback(0);
1933 } else if (!igbe->regs.txdctl.gran() && igbe->regs.txdctl.wthresh() <=
1934 descInBlock(usedCache.size())) {
1935 DPRINTF(EthernetDesc, "used > WTHRESH, writing back descriptor\n");
1936 igbe->anBegin("TXS", "Desc Writeback");
1937 writeback((igbe->cacheBlockSize()-1)>>4);
1938 } else if (igbe->regs.txdctl.wthresh() <= usedCache.size()) {
1939 DPRINTF(EthernetDesc, "used > WTHRESH, writing back descriptor\n");
1940 igbe->anBegin("TXS", "Desc Writeback");
1941 writeback((igbe->cacheBlockSize()-1)>>4);
1942 }
1943
1944 enableSm();
1945 igbe->checkDrain();
1946 }
1947
1948 void
1949 IGbE::TxDescCache::actionAfterWb()
1950 {
1951 DPRINTF(EthernetDesc, "actionAfterWb() completionEnabled: %d\n",
1952 completionEnabled);
1953 igbe->postInterrupt(iGbReg::IT_TXDW);
1954 if (completionEnabled) {
1955 descEnd = igbe->regs.tdh();
1956 DPRINTF(EthernetDesc,
1957 "Completion writing back value: %d to addr: %#x\n", descEnd,
1958 completionAddress);
1959 igbe->dmaWrite(pciToDma(mbits(completionAddress, 63, 2)),
1960 sizeof(descEnd), &nullEvent, (uint8_t*)&descEnd, 0);
1961 }
1962 }
1963
1964 void
1965 IGbE::TxDescCache::serialize(std::ostream &os)
1966 {
1967 DescCache<TxDesc>::serialize(os);
1968 SERIALIZE_SCALAR(pktDone);
1969 SERIALIZE_SCALAR(isTcp);
1970 SERIALIZE_SCALAR(pktWaiting);
1971 SERIALIZE_SCALAR(pktMultiDesc);
1972
1973 SERIALIZE_SCALAR(useTso);
1974 SERIALIZE_SCALAR(tsoHeaderLen);
1975 SERIALIZE_SCALAR(tsoMss);
1976 SERIALIZE_SCALAR(tsoTotalLen);
1977 SERIALIZE_SCALAR(tsoUsedLen);
1978 SERIALIZE_SCALAR(tsoPrevSeq);;
1979 SERIALIZE_SCALAR(tsoPktPayloadBytes);
1980 SERIALIZE_SCALAR(tsoLoadedHeader);
1981 SERIALIZE_SCALAR(tsoPktHasHeader);
1982 SERIALIZE_ARRAY(tsoHeader, 256);
1983 SERIALIZE_SCALAR(tsoDescBytesUsed);
1984 SERIALIZE_SCALAR(tsoCopyBytes);
1985 SERIALIZE_SCALAR(tsoPkts);
1986
1987 SERIALIZE_SCALAR(completionAddress);
1988 SERIALIZE_SCALAR(completionEnabled);
1989 SERIALIZE_SCALAR(descEnd);
1990 }
1991
1992 void
1993 IGbE::TxDescCache::unserialize(Checkpoint *cp, const std::string &section)
1994 {
1995 DescCache<TxDesc>::unserialize(cp, section);
1996 UNSERIALIZE_SCALAR(pktDone);
1997 UNSERIALIZE_SCALAR(isTcp);
1998 UNSERIALIZE_SCALAR(pktWaiting);
1999 UNSERIALIZE_SCALAR(pktMultiDesc);
2000
2001 UNSERIALIZE_SCALAR(useTso);
2002 UNSERIALIZE_SCALAR(tsoHeaderLen);
2003 UNSERIALIZE_SCALAR(tsoMss);
2004 UNSERIALIZE_SCALAR(tsoTotalLen);
2005 UNSERIALIZE_SCALAR(tsoUsedLen);
2006 UNSERIALIZE_SCALAR(tsoPrevSeq);;
2007 UNSERIALIZE_SCALAR(tsoPktPayloadBytes);
2008 UNSERIALIZE_SCALAR(tsoLoadedHeader);
2009 UNSERIALIZE_SCALAR(tsoPktHasHeader);
2010 UNSERIALIZE_ARRAY(tsoHeader, 256);
2011 UNSERIALIZE_SCALAR(tsoDescBytesUsed);
2012 UNSERIALIZE_SCALAR(tsoCopyBytes);
2013 UNSERIALIZE_SCALAR(tsoPkts);
2014
2015 UNSERIALIZE_SCALAR(completionAddress);
2016 UNSERIALIZE_SCALAR(completionEnabled);
2017 UNSERIALIZE_SCALAR(descEnd);
2018 }
2019
2020 bool
2021 IGbE::TxDescCache::packetAvailable()
2022 {
2023 if (pktDone) {
2024 pktDone = false;
2025 return true;
2026 }
2027 return false;
2028 }
2029
2030 void
2031 IGbE::TxDescCache::enableSm()
2032 {
2033 if (!igbe->drainEvent) {
2034 igbe->txTick = true;
2035 igbe->restartClock();
2036 }
2037 }
2038
2039 bool
2040 IGbE::TxDescCache::hasOutstandingEvents()
2041 {
2042 return pktEvent.scheduled() || wbEvent.scheduled() ||
2043 fetchEvent.scheduled();
2044 }
2045
2046
2047 ///////////////////////////////////// IGbE /////////////////////////////////
2048
2049 void
2050 IGbE::restartClock()
2051 {
2052 if (!tickEvent.scheduled() && (rxTick || txTick || txFifoTick) &&
2053 getState() == SimObject::Running)
2054 schedule(tickEvent, (curTick() / ticks(1)) * ticks(1) + ticks(1));
2055 }
2056
2057 unsigned int
2058 IGbE::drain(Event *de)
2059 {
2060 unsigned int count;
2061 count = pioPort.drain(de) + dmaPort.drain(de);
2062 if (rxDescCache.hasOutstandingEvents() ||
2063 txDescCache.hasOutstandingEvents()) {
2064 count++;
2065 drainEvent = de;
2066 }
2067
2068 txFifoTick = false;
2069 txTick = false;
2070 rxTick = false;
2071
2072 if (tickEvent.scheduled())
2073 deschedule(tickEvent);
2074
2075 if (count)
2076 changeState(Draining);
2077 else
2078 changeState(Drained);
2079
2080 DPRINTF(EthernetSM, "got drain() returning %d", count);
2081 return count;
2082 }
2083
2084 void
2085 IGbE::resume()
2086 {
2087 SimObject::resume();
2088
2089 txFifoTick = true;
2090 txTick = true;
2091 rxTick = true;
2092
2093 restartClock();
2094 DPRINTF(EthernetSM, "resuming from drain");
2095 }
2096
2097 void
2098 IGbE::checkDrain()
2099 {
2100 if (!drainEvent)
2101 return;
2102
2103 DPRINTF(EthernetSM, "checkDrain() in drain\n");
2104 txFifoTick = false;
2105 txTick = false;
2106 rxTick = false;
2107 if (!rxDescCache.hasOutstandingEvents() &&
2108 !txDescCache.hasOutstandingEvents()) {
2109 drainEvent->process();
2110 drainEvent = NULL;
2111 }
2112 }
2113
2114 void
2115 IGbE::txStateMachine()
2116 {
2117 if (!regs.tctl.en()) {
2118 txTick = false;
2119 DPRINTF(EthernetSM, "TXS: TX disabled, stopping ticking\n");
2120 return;
2121 }
2122
2123 // If we have a packet available and it's length is not 0 (meaning it's not
2124 // a multidescriptor packet) put it in the fifo, otherwise an the next
2125 // iteration we'll get the rest of the data
2126 if (txPacket && txDescCache.packetAvailable()
2127 && !txDescCache.packetMultiDesc() && txPacket->length) {
2128 anQ("TXS", "TX FIFO Q");
2129 DPRINTF(EthernetSM, "TXS: packet placed in TX FIFO\n");
2130 #ifndef NDEBUG
2131 bool success =
2132 #endif
2133 txFifo.push(txPacket);
2134 txFifoTick = true && !drainEvent;
2135 assert(success);
2136 txPacket = NULL;
2137 anBegin("TXS", "Desc Writeback");
2138 txDescCache.writeback((cacheBlockSize()-1)>>4);
2139 return;
2140 }
2141
2142 // Only support descriptor granularity
2143 if (regs.txdctl.lwthresh() &&
2144 txDescCache.descLeft() < (regs.txdctl.lwthresh() * 8)) {
2145 DPRINTF(EthernetSM, "TXS: LWTHRESH caused posting of TXDLOW\n");
2146 postInterrupt(IT_TXDLOW);
2147 }
2148
2149 if (!txPacket) {
2150 txPacket = new EthPacketData(16384);
2151 }
2152
2153 if (!txDescCache.packetWaiting()) {
2154 if (txDescCache.descLeft() == 0) {
2155 postInterrupt(IT_TXQE);
2156 anBegin("TXS", "Desc Writeback");
2157 txDescCache.writeback(0);
2158 anBegin("TXS", "Desc Fetch");
2159 anWe("TXS", txDescCache.annUnusedCacheQ);
2160 txDescCache.fetchDescriptors();
2161 DPRINTF(EthernetSM, "TXS: No descriptors left in ring, forcing "
2162 "writeback stopping ticking and posting TXQE\n");
2163 txTick = false;
2164 return;
2165 }
2166
2167
2168 if (!(txDescCache.descUnused())) {
2169 anBegin("TXS", "Desc Fetch");
2170 txDescCache.fetchDescriptors();
2171 anWe("TXS", txDescCache.annUnusedCacheQ);
2172 DPRINTF(EthernetSM, "TXS: No descriptors available in cache, "
2173 "fetching and stopping ticking\n");
2174 txTick = false;
2175 return;
2176 }
2177 anPq("TXS", txDescCache.annUnusedCacheQ);
2178
2179
2180 txDescCache.processContextDesc();
2181 if (txDescCache.packetWaiting()) {
2182 DPRINTF(EthernetSM,
2183 "TXS: Fetching TSO header, stopping ticking\n");
2184 txTick = false;
2185 return;
2186 }
2187
2188 unsigned size = txDescCache.getPacketSize(txPacket);
2189 if (size > 0 && txFifo.avail() > size) {
2190 anRq("TXS", "TX FIFO Q");
2191 anBegin("TXS", "DMA Packet");
2192 DPRINTF(EthernetSM, "TXS: Reserving %d bytes in FIFO and "
2193 "beginning DMA of next packet\n", size);
2194 txFifo.reserve(size);
2195 txDescCache.getPacketData(txPacket);
2196 } else if (size == 0) {
2197 DPRINTF(EthernetSM, "TXS: getPacketSize returned: %d\n", size);
2198 DPRINTF(EthernetSM,
2199 "TXS: No packets to get, writing back used descriptors\n");
2200 anBegin("TXS", "Desc Writeback");
2201 txDescCache.writeback(0);
2202 } else {
2203 anWf("TXS", "TX FIFO Q");
2204 DPRINTF(EthernetSM, "TXS: FIFO full, stopping ticking until space "
2205 "available in FIFO\n");
2206 txTick = false;
2207 }
2208
2209
2210 return;
2211 }
2212 DPRINTF(EthernetSM, "TXS: Nothing to do, stopping ticking\n");
2213 txTick = false;
2214 }
2215
2216 bool
2217 IGbE::ethRxPkt(EthPacketPtr pkt)
2218 {
2219 rxBytes += pkt->length;
2220 rxPackets++;
2221
2222 DPRINTF(Ethernet, "RxFIFO: Receiving pcakte from wire\n");
2223 anBegin("RXQ", "Wire Recv");
2224
2225
2226 if (!regs.rctl.en()) {
2227 DPRINTF(Ethernet, "RxFIFO: RX not enabled, dropping\n");
2228 anBegin("RXQ", "FIFO Drop", CPA::FL_BAD);
2229 return true;
2230 }
2231
2232 // restart the state machines if they are stopped
2233 rxTick = true && !drainEvent;
2234 if ((rxTick || txTick) && !tickEvent.scheduled()) {
2235 DPRINTF(EthernetSM,
2236 "RXS: received packet into fifo, starting ticking\n");
2237 restartClock();
2238 }
2239
2240 if (!rxFifo.push(pkt)) {
2241 DPRINTF(Ethernet, "RxFIFO: Packet won't fit in fifo... dropped\n");
2242 postInterrupt(IT_RXO, true);
2243 anBegin("RXQ", "FIFO Drop", CPA::FL_BAD);
2244 return false;
2245 }
2246
2247 if (CPA::available() && cpa->enabled()) {
2248 assert(sys->numSystemsRunning <= 2);
2249 System *other_sys;
2250 if (sys->systemList[0] == sys)
2251 other_sys = sys->systemList[1];
2252 else
2253 other_sys = sys->systemList[0];
2254
2255 cpa->hwDq(CPA::FL_NONE, sys, macAddr, "RXQ", "WireQ", 0, other_sys);
2256 anQ("RXQ", "RX FIFO Q");
2257 cpa->hwWe(CPA::FL_NONE, sys, macAddr, "RXQ", "WireQ", 0, other_sys);
2258 }
2259
2260 return true;
2261 }
2262
2263
2264 void
2265 IGbE::rxStateMachine()
2266 {
2267 if (!regs.rctl.en()) {
2268 rxTick = false;
2269 DPRINTF(EthernetSM, "RXS: RX disabled, stopping ticking\n");
2270 return;
2271 }
2272
2273 // If the packet is done check for interrupts/descriptors/etc
2274 if (rxDescCache.packetDone()) {
2275 rxDmaPacket = false;
2276 DPRINTF(EthernetSM, "RXS: Packet completed DMA to memory\n");
2277 int descLeft = rxDescCache.descLeft();
2278 DPRINTF(EthernetSM, "RXS: descLeft: %d rdmts: %d rdlen: %d\n",
2279 descLeft, regs.rctl.rdmts(), regs.rdlen());
2280 switch (regs.rctl.rdmts()) {
2281 case 2: if (descLeft > .125 * regs.rdlen()) break;
2282 case 1: if (descLeft > .250 * regs.rdlen()) break;
2283 case 0: if (descLeft > .500 * regs.rdlen()) break;
2284 DPRINTF(Ethernet, "RXS: Interrupting (RXDMT) "
2285 "because of descriptors left\n");
2286 postInterrupt(IT_RXDMT);
2287 break;
2288 }
2289
2290 if (rxFifo.empty())
2291 rxDescCache.writeback(0);
2292
2293 if (descLeft == 0) {
2294 anBegin("RXS", "Writeback Descriptors");
2295 rxDescCache.writeback(0);
2296 DPRINTF(EthernetSM, "RXS: No descriptors left in ring, forcing"
2297 " writeback and stopping ticking\n");
2298 rxTick = false;
2299 }
2300
2301 // only support descriptor granulaties
2302 assert(regs.rxdctl.gran());
2303
2304 if (regs.rxdctl.wthresh() >= rxDescCache.descUsed()) {
2305 DPRINTF(EthernetSM,
2306 "RXS: Writing back because WTHRESH >= descUsed\n");
2307 anBegin("RXS", "Writeback Descriptors");
2308 if (regs.rxdctl.wthresh() < (cacheBlockSize()>>4))
2309 rxDescCache.writeback(regs.rxdctl.wthresh()-1);
2310 else
2311 rxDescCache.writeback((cacheBlockSize()-1)>>4);
2312 }
2313
2314 if ((rxDescCache.descUnused() < regs.rxdctl.pthresh()) &&
2315 ((rxDescCache.descLeft() - rxDescCache.descUnused()) >
2316 regs.rxdctl.hthresh())) {
2317 DPRINTF(EthernetSM, "RXS: Fetching descriptors because "
2318 "descUnused < PTHRESH\n");
2319 anBegin("RXS", "Fetch Descriptors");
2320 rxDescCache.fetchDescriptors();
2321 }
2322
2323 if (rxDescCache.descUnused() == 0) {
2324 anBegin("RXS", "Fetch Descriptors");
2325 rxDescCache.fetchDescriptors();
2326 anWe("RXS", rxDescCache.annUnusedCacheQ);
2327 DPRINTF(EthernetSM, "RXS: No descriptors available in cache, "
2328 "fetching descriptors and stopping ticking\n");
2329 rxTick = false;
2330 }
2331 return;
2332 }
2333
2334 if (rxDmaPacket) {
2335 DPRINTF(EthernetSM,
2336 "RXS: stopping ticking until packet DMA completes\n");
2337 rxTick = false;
2338 return;
2339 }
2340
2341 if (!rxDescCache.descUnused()) {
2342 anBegin("RXS", "Fetch Descriptors");
2343 rxDescCache.fetchDescriptors();
2344 anWe("RXS", rxDescCache.annUnusedCacheQ);
2345 DPRINTF(EthernetSM, "RXS: No descriptors available in cache, "
2346 "stopping ticking\n");
2347 rxTick = false;
2348 DPRINTF(EthernetSM, "RXS: No descriptors available, fetching\n");
2349 return;
2350 }
2351 anPq("RXS", rxDescCache.annUnusedCacheQ);
2352
2353 if (rxFifo.empty()) {
2354 anWe("RXS", "RX FIFO Q");
2355 DPRINTF(EthernetSM, "RXS: RxFIFO empty, stopping ticking\n");
2356 rxTick = false;
2357 return;
2358 }
2359 anPq("RXS", "RX FIFO Q");
2360 anBegin("RXS", "Get Desc");
2361
2362 EthPacketPtr pkt;
2363 pkt = rxFifo.front();
2364
2365
2366 pktOffset = rxDescCache.writePacket(pkt, pktOffset);
2367 DPRINTF(EthernetSM, "RXS: Writing packet into memory\n");
2368 if (pktOffset == pkt->length) {
2369 anBegin( "RXS", "FIFO Dequeue");
2370 DPRINTF(EthernetSM, "RXS: Removing packet from FIFO\n");
2371 pktOffset = 0;
2372 anDq("RXS", "RX FIFO Q");
2373 rxFifo.pop();
2374 }
2375
2376 DPRINTF(EthernetSM, "RXS: stopping ticking until packet DMA completes\n");
2377 rxTick = false;
2378 rxDmaPacket = true;
2379 anBegin("RXS", "DMA Packet");
2380 }
2381
2382 void
2383 IGbE::txWire()
2384 {
2385 if (txFifo.empty()) {
2386 anWe("TXQ", "TX FIFO Q");
2387 txFifoTick = false;
2388 return;
2389 }
2390
2391
2392 anPq("TXQ", "TX FIFO Q");
2393 if (etherInt->sendPacket(txFifo.front())) {
2394 cpa->hwQ(CPA::FL_NONE, sys, macAddr, "TXQ", "WireQ", 0);
2395 if (DTRACE(EthernetSM)) {
2396 IpPtr ip(txFifo.front());
2397 if (ip)
2398 DPRINTF(EthernetSM, "Transmitting Ip packet with Id=%d\n",
2399 ip->id());
2400 else
2401 DPRINTF(EthernetSM, "Transmitting Non-Ip packet\n");
2402 }
2403 anDq("TXQ", "TX FIFO Q");
2404 anBegin("TXQ", "Wire Send");
2405 DPRINTF(EthernetSM,
2406 "TxFIFO: Successful transmit, bytes available in fifo: %d\n",
2407 txFifo.avail());
2408
2409 txBytes += txFifo.front()->length;
2410 txPackets++;
2411 txFifoTick = false;
2412
2413 txFifo.pop();
2414 } else {
2415 // We'll get woken up when the packet ethTxDone() gets called
2416 txFifoTick = false;
2417 }
2418 }
2419
2420 void
2421 IGbE::tick()
2422 {
2423 DPRINTF(EthernetSM, "IGbE: -------------- Cycle --------------\n");
2424
2425 if (rxTick)
2426 rxStateMachine();
2427
2428 if (txTick)
2429 txStateMachine();
2430
2431 if (txFifoTick)
2432 txWire();
2433
2434
2435 if (rxTick || txTick || txFifoTick)
2436 schedule(tickEvent, curTick() + ticks(1));
2437 }
2438
2439 void
2440 IGbE::ethTxDone()
2441 {
2442 anBegin("TXQ", "Send Done");
2443 // restart the tx state machines if they are stopped
2444 // fifo to send another packet
2445 // tx sm to put more data into the fifo
2446 txFifoTick = true && !drainEvent;
2447 if (txDescCache.descLeft() != 0 && !drainEvent)
2448 txTick = true;
2449
2450 restartClock();
2451 txWire();
2452 DPRINTF(EthernetSM, "TxFIFO: Transmission complete\n");
2453 }
2454
2455 void
2456 IGbE::serialize(std::ostream &os)
2457 {
2458 PciDev::serialize(os);
2459
2460 regs.serialize(os);
2461 SERIALIZE_SCALAR(eeOpBits);
2462 SERIALIZE_SCALAR(eeAddrBits);
2463 SERIALIZE_SCALAR(eeDataBits);
2464 SERIALIZE_SCALAR(eeOpcode);
2465 SERIALIZE_SCALAR(eeAddr);
2466 SERIALIZE_SCALAR(lastInterrupt);
2467 SERIALIZE_ARRAY(flash,iGbReg::EEPROM_SIZE);
2468
2469 rxFifo.serialize("rxfifo", os);
2470 txFifo.serialize("txfifo", os);
2471
2472 bool txPktExists = txPacket;
2473 SERIALIZE_SCALAR(txPktExists);
2474 if (txPktExists)
2475 txPacket->serialize("txpacket", os);
2476
2477 Tick rdtr_time = 0, radv_time = 0, tidv_time = 0, tadv_time = 0,
2478 inter_time = 0;
2479
2480 if (rdtrEvent.scheduled())
2481 rdtr_time = rdtrEvent.when();
2482 SERIALIZE_SCALAR(rdtr_time);
2483
2484 if (radvEvent.scheduled())
2485 radv_time = radvEvent.when();
2486 SERIALIZE_SCALAR(radv_time);
2487
2488 if (tidvEvent.scheduled())
2489 tidv_time = tidvEvent.when();
2490 SERIALIZE_SCALAR(tidv_time);
2491
2492 if (tadvEvent.scheduled())
2493 tadv_time = tadvEvent.when();
2494 SERIALIZE_SCALAR(tadv_time);
2495
2496 if (interEvent.scheduled())
2497 inter_time = interEvent.when();
2498 SERIALIZE_SCALAR(inter_time);
2499
2500 SERIALIZE_SCALAR(pktOffset);
2501
2502 nameOut(os, csprintf("%s.TxDescCache", name()));
2503 txDescCache.serialize(os);
2504
2505 nameOut(os, csprintf("%s.RxDescCache", name()));
2506 rxDescCache.serialize(os);
2507 }
2508
2509 void
2510 IGbE::unserialize(Checkpoint *cp, const std::string &section)
2511 {
2512 PciDev::unserialize(cp, section);
2513
2514 regs.unserialize(cp, section);
2515 UNSERIALIZE_SCALAR(eeOpBits);
2516 UNSERIALIZE_SCALAR(eeAddrBits);
2517 UNSERIALIZE_SCALAR(eeDataBits);
2518 UNSERIALIZE_SCALAR(eeOpcode);
2519 UNSERIALIZE_SCALAR(eeAddr);
2520 UNSERIALIZE_SCALAR(lastInterrupt);
2521 UNSERIALIZE_ARRAY(flash,iGbReg::EEPROM_SIZE);
2522
2523 rxFifo.unserialize("rxfifo", cp, section);
2524 txFifo.unserialize("txfifo", cp, section);
2525
2526 bool txPktExists;
2527 UNSERIALIZE_SCALAR(txPktExists);
2528 if (txPktExists) {
2529 txPacket = new EthPacketData(16384);
2530 txPacket->unserialize("txpacket", cp, section);
2531 }
2532
2533 rxTick = true;
2534 txTick = true;
2535 txFifoTick = true;
2536
2537 Tick rdtr_time, radv_time, tidv_time, tadv_time, inter_time;
2538 UNSERIALIZE_SCALAR(rdtr_time);
2539 UNSERIALIZE_SCALAR(radv_time);
2540 UNSERIALIZE_SCALAR(tidv_time);
2541 UNSERIALIZE_SCALAR(tadv_time);
2542 UNSERIALIZE_SCALAR(inter_time);
2543
2544 if (rdtr_time)
2545 schedule(rdtrEvent, rdtr_time);
2546
2547 if (radv_time)
2548 schedule(radvEvent, radv_time);
2549
2550 if (tidv_time)
2551 schedule(tidvEvent, tidv_time);
2552
2553 if (tadv_time)
2554 schedule(tadvEvent, tadv_time);
2555
2556 if (inter_time)
2557 schedule(interEvent, inter_time);
2558
2559 UNSERIALIZE_SCALAR(pktOffset);
2560
2561 txDescCache.unserialize(cp, csprintf("%s.TxDescCache", section));
2562
2563 rxDescCache.unserialize(cp, csprintf("%s.RxDescCache", section));
2564 }
2565
2566 IGbE *
2567 IGbEParams::create()
2568 {
2569 return new IGbE(this);
2570 }