IGbE: Fix two e1000 driver bugs that I missed before.
[gem5.git] / src / dev / i8254xGBe.cc
1 /*
2 * Copyright (c) 2006 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Ali Saidi
29 */
30
31 /* @file
32 * Device model for Intel's 8254x line of gigabit ethernet controllers.
33 * In particular an 82547 revision 2 (82547GI) MAC because it seems to have the
34 * fewest workarounds in the driver. It will probably work with most of the
35 * other MACs with slight modifications.
36 */
37
38
39 /*
40 * @todo really there are multiple dma engines.. we should implement them.
41 */
42
43 #include <algorithm>
44
45 #include "base/inet.hh"
46 #include "base/trace.hh"
47 #include "dev/i8254xGBe.hh"
48 #include "mem/packet.hh"
49 #include "mem/packet_access.hh"
50 #include "params/IGbE.hh"
51 #include "sim/stats.hh"
52 #include "sim/system.hh"
53
54 using namespace iGbReg;
55 using namespace Net;
56
57 IGbE::IGbE(const Params *p)
58 : EtherDevice(p), etherInt(NULL), drainEvent(NULL), useFlowControl(p->use_flow_control),
59 rxFifo(p->rx_fifo_size), txFifo(p->tx_fifo_size), rxTick(false),
60 txTick(false), txFifoTick(false), rxDmaPacket(false), pktOffset(0),
61 fetchDelay(p->fetch_delay), wbDelay(p->wb_delay),
62 fetchCompDelay(p->fetch_comp_delay), wbCompDelay(p->wb_comp_delay),
63 rxWriteDelay(p->rx_write_delay), txReadDelay(p->tx_read_delay),
64 rdtrEvent(this), radvEvent(this),
65 tadvEvent(this), tidvEvent(this), tickEvent(this), interEvent(this),
66 rxDescCache(this, name()+".RxDesc", p->rx_desc_cache_size),
67 txDescCache(this, name()+".TxDesc", p->tx_desc_cache_size),
68 clock(p->clock), lastInterrupt(0)
69 {
70 etherInt = new IGbEInt(name() + ".int", this);
71
72 // Initialized internal registers per Intel documentation
73 // All registers intialized to 0 by per register constructor
74 regs.ctrl.fd(1);
75 regs.ctrl.lrst(1);
76 regs.ctrl.speed(2);
77 regs.ctrl.frcspd(1);
78 regs.sts.speed(3); // Say we're 1000Mbps
79 regs.sts.fd(1); // full duplex
80 regs.sts.lu(1); // link up
81 regs.eecd.fwe(1);
82 regs.eecd.ee_type(1);
83 regs.imr = 0;
84 regs.iam = 0;
85 regs.rxdctl.gran(1);
86 regs.rxdctl.wthresh(1);
87 regs.fcrth(1);
88 regs.tdwba = 0;
89 regs.rlpml = 0;
90 regs.sw_fw_sync = 0;
91
92 regs.pba.rxa(0x30);
93 regs.pba.txa(0x10);
94
95 eeOpBits = 0;
96 eeAddrBits = 0;
97 eeDataBits = 0;
98 eeOpcode = 0;
99
100 // clear all 64 16 bit words of the eeprom
101 memset(&flash, 0, EEPROM_SIZE*2);
102
103 // Set the MAC address
104 memcpy(flash, p->hardware_address.bytes(), ETH_ADDR_LEN);
105 for (int x = 0; x < ETH_ADDR_LEN/2; x++)
106 flash[x] = htobe(flash[x]);
107
108 uint16_t csum = 0;
109 for (int x = 0; x < EEPROM_SIZE; x++)
110 csum += htobe(flash[x]);
111
112
113 // Magic happy checksum value
114 flash[EEPROM_SIZE-1] = htobe((uint16_t)(EEPROM_CSUM - csum));
115
116 rxFifo.clear();
117 txFifo.clear();
118 }
119
120 EtherInt*
121 IGbE::getEthPort(const std::string &if_name, int idx)
122 {
123
124 if (if_name == "interface") {
125 if (etherInt->getPeer())
126 panic("Port already connected to\n");
127 return etherInt;
128 }
129 return NULL;
130 }
131
132 Tick
133 IGbE::writeConfig(PacketPtr pkt)
134 {
135 int offset = pkt->getAddr() & PCI_CONFIG_SIZE;
136 if (offset < PCI_DEVICE_SPECIFIC)
137 PciDev::writeConfig(pkt);
138 else
139 panic("Device specific PCI config space not implemented.\n");
140
141 ///
142 /// Some work may need to be done here based for the pci COMMAND bits.
143 ///
144
145 return pioDelay;
146 }
147
148 Tick
149 IGbE::read(PacketPtr pkt)
150 {
151 int bar;
152 Addr daddr;
153
154 if (!getBAR(pkt->getAddr(), bar, daddr))
155 panic("Invalid PCI memory access to unmapped memory.\n");
156
157 // Only Memory register BAR is allowed
158 assert(bar == 0);
159
160 // Only 32bit accesses allowed
161 assert(pkt->getSize() == 4);
162
163 DPRINTF(Ethernet, "Read device register %#X\n", daddr);
164
165 pkt->allocate();
166
167 ///
168 /// Handle read of register here
169 ///
170
171
172 switch (daddr) {
173 case REG_CTRL:
174 pkt->set<uint32_t>(regs.ctrl());
175 break;
176 case REG_STATUS:
177 pkt->set<uint32_t>(regs.sts());
178 break;
179 case REG_EECD:
180 pkt->set<uint32_t>(regs.eecd());
181 break;
182 case REG_EERD:
183 pkt->set<uint32_t>(regs.eerd());
184 break;
185 case REG_CTRL_EXT:
186 pkt->set<uint32_t>(regs.ctrl_ext());
187 break;
188 case REG_MDIC:
189 pkt->set<uint32_t>(regs.mdic());
190 break;
191 case REG_ICR:
192 DPRINTF(Ethernet, "Reading ICR. ICR=%#x IMR=%#x IAM=%#x IAME=%d\n", regs.icr(),
193 regs.imr, regs.iam, regs.ctrl_ext.iame());
194 pkt->set<uint32_t>(regs.icr());
195 if (regs.icr.int_assert() || regs.imr == 0) {
196 regs.icr = regs.icr() & ~mask(30);
197 DPRINTF(Ethernet, "Cleared ICR. ICR=%#x\n", regs.icr());
198 }
199 if (regs.ctrl_ext.iame() && regs.icr.int_assert())
200 regs.imr &= ~regs.iam;
201 chkInterrupt();
202 break;
203 case REG_EICR:
204 // This is only useful for MSI, but the driver reads it every time
205 // Just don't do anything
206 pkt->set<uint32_t>(0);
207 break;
208 case REG_ITR:
209 pkt->set<uint32_t>(regs.itr());
210 break;
211 case REG_RCTL:
212 pkt->set<uint32_t>(regs.rctl());
213 break;
214 case REG_FCTTV:
215 pkt->set<uint32_t>(regs.fcttv());
216 break;
217 case REG_TCTL:
218 pkt->set<uint32_t>(regs.tctl());
219 break;
220 case REG_PBA:
221 pkt->set<uint32_t>(regs.pba());
222 break;
223 case REG_WUC:
224 case REG_LEDCTL:
225 pkt->set<uint32_t>(0); // We don't care, so just return 0
226 break;
227 case REG_FCRTL:
228 pkt->set<uint32_t>(regs.fcrtl());
229 break;
230 case REG_FCRTH:
231 pkt->set<uint32_t>(regs.fcrth());
232 break;
233 case REG_RDBAL:
234 pkt->set<uint32_t>(regs.rdba.rdbal());
235 break;
236 case REG_RDBAH:
237 pkt->set<uint32_t>(regs.rdba.rdbah());
238 break;
239 case REG_RDLEN:
240 pkt->set<uint32_t>(regs.rdlen());
241 break;
242 case REG_SRRCTL:
243 pkt->set<uint32_t>(regs.srrctl());
244 break;
245 case REG_RDH:
246 pkt->set<uint32_t>(regs.rdh());
247 break;
248 case REG_RDT:
249 pkt->set<uint32_t>(regs.rdt());
250 break;
251 case REG_RDTR:
252 pkt->set<uint32_t>(regs.rdtr());
253 if (regs.rdtr.fpd()) {
254 rxDescCache.writeback(0);
255 DPRINTF(EthernetIntr, "Posting interrupt because of RDTR.FPD write\n");
256 postInterrupt(IT_RXT);
257 regs.rdtr.fpd(0);
258 }
259 break;
260 case REG_RXDCTL:
261 pkt->set<uint32_t>(regs.rxdctl());
262 break;
263 case REG_RADV:
264 pkt->set<uint32_t>(regs.radv());
265 break;
266 case REG_TDBAL:
267 pkt->set<uint32_t>(regs.tdba.tdbal());
268 break;
269 case REG_TDBAH:
270 pkt->set<uint32_t>(regs.tdba.tdbah());
271 break;
272 case REG_TDLEN:
273 pkt->set<uint32_t>(regs.tdlen());
274 break;
275 case REG_TDH:
276 pkt->set<uint32_t>(regs.tdh());
277 break;
278 case REG_TXDCA_CTL:
279 pkt->set<uint32_t>(regs.txdca_ctl());
280 break;
281 case REG_TDT:
282 pkt->set<uint32_t>(regs.tdt());
283 break;
284 case REG_TIDV:
285 pkt->set<uint32_t>(regs.tidv());
286 break;
287 case REG_TXDCTL:
288 pkt->set<uint32_t>(regs.txdctl());
289 break;
290 case REG_TADV:
291 pkt->set<uint32_t>(regs.tadv());
292 break;
293 case REG_TDWBAL:
294 pkt->set<uint32_t>(regs.tdwba & mask(32));
295 break;
296 case REG_TDWBAH:
297 pkt->set<uint32_t>(regs.tdwba >> 32);
298 break;
299 case REG_RXCSUM:
300 pkt->set<uint32_t>(regs.rxcsum());
301 break;
302 case REG_RLPML:
303 pkt->set<uint32_t>(regs.rlpml);
304 break;
305 case REG_RFCTL:
306 pkt->set<uint32_t>(regs.rfctl());
307 break;
308 case REG_MANC:
309 pkt->set<uint32_t>(regs.manc());
310 break;
311 case REG_SWSM:
312 pkt->set<uint32_t>(regs.swsm());
313 regs.swsm.smbi(1);
314 break;
315 case REG_FWSM:
316 pkt->set<uint32_t>(regs.fwsm());
317 break;
318 case REG_SWFWSYNC:
319 pkt->set<uint32_t>(regs.sw_fw_sync);
320 break;
321 default:
322 if (!(daddr >= REG_VFTA && daddr < (REG_VFTA + VLAN_FILTER_TABLE_SIZE*4)) &&
323 !(daddr >= REG_RAL && daddr < (REG_RAL + RCV_ADDRESS_TABLE_SIZE*8)) &&
324 !(daddr >= REG_MTA && daddr < (REG_MTA + MULTICAST_TABLE_SIZE*4)) &&
325 !(daddr >= REG_CRCERRS && daddr < (REG_CRCERRS + STATS_REGS_SIZE)))
326 panic("Read request to unknown register number: %#x\n", daddr);
327 else
328 pkt->set<uint32_t>(0);
329 };
330
331 pkt->makeAtomicResponse();
332 return pioDelay;
333 }
334
335 Tick
336 IGbE::write(PacketPtr pkt)
337 {
338 int bar;
339 Addr daddr;
340
341
342 if (!getBAR(pkt->getAddr(), bar, daddr))
343 panic("Invalid PCI memory access to unmapped memory.\n");
344
345 // Only Memory register BAR is allowed
346 assert(bar == 0);
347
348 // Only 32bit accesses allowed
349 assert(pkt->getSize() == sizeof(uint32_t));
350
351 DPRINTF(Ethernet, "Wrote device register %#X value %#X\n", daddr, pkt->get<uint32_t>());
352
353 ///
354 /// Handle write of register here
355 ///
356 uint32_t val = pkt->get<uint32_t>();
357
358 Regs::RCTL oldrctl;
359 Regs::TCTL oldtctl;
360
361 switch (daddr) {
362 case REG_CTRL:
363 regs.ctrl = val;
364 if (regs.ctrl.tfce())
365 warn("TX Flow control enabled, should implement\n");
366 if (regs.ctrl.rfce())
367 warn("RX Flow control enabled, should implement\n");
368 break;
369 case REG_CTRL_EXT:
370 regs.ctrl_ext = val;
371 break;
372 case REG_STATUS:
373 regs.sts = val;
374 break;
375 case REG_EECD:
376 int oldClk;
377 oldClk = regs.eecd.sk();
378 regs.eecd = val;
379 // See if this is a eeprom access and emulate accordingly
380 if (!oldClk && regs.eecd.sk()) {
381 if (eeOpBits < 8) {
382 eeOpcode = eeOpcode << 1 | regs.eecd.din();
383 eeOpBits++;
384 } else if (eeAddrBits < 8 && eeOpcode == EEPROM_READ_OPCODE_SPI) {
385 eeAddr = eeAddr << 1 | regs.eecd.din();
386 eeAddrBits++;
387 } else if (eeDataBits < 16 && eeOpcode == EEPROM_READ_OPCODE_SPI) {
388 assert(eeAddr>>1 < EEPROM_SIZE);
389 DPRINTF(EthernetEEPROM, "EEPROM bit read: %d word: %#X\n",
390 flash[eeAddr>>1] >> eeDataBits & 0x1, flash[eeAddr>>1]);
391 regs.eecd.dout((flash[eeAddr>>1] >> (15-eeDataBits)) & 0x1);
392 eeDataBits++;
393 } else if (eeDataBits < 8 && eeOpcode == EEPROM_RDSR_OPCODE_SPI) {
394 regs.eecd.dout(0);
395 eeDataBits++;
396 } else
397 panic("What's going on with eeprom interface? opcode:"
398 " %#x:%d addr: %#x:%d, data: %d\n", (uint32_t)eeOpcode,
399 (uint32_t)eeOpBits, (uint32_t)eeAddr,
400 (uint32_t)eeAddrBits, (uint32_t)eeDataBits);
401
402 // Reset everything for the next command
403 if ((eeDataBits == 16 && eeOpcode == EEPROM_READ_OPCODE_SPI) ||
404 (eeDataBits == 8 && eeOpcode == EEPROM_RDSR_OPCODE_SPI)) {
405 eeOpBits = 0;
406 eeAddrBits = 0;
407 eeDataBits = 0;
408 eeOpcode = 0;
409 eeAddr = 0;
410 }
411
412 DPRINTF(EthernetEEPROM, "EEPROM: opcode: %#X:%d addr: %#X:%d\n",
413 (uint32_t)eeOpcode, (uint32_t) eeOpBits,
414 (uint32_t)eeAddr>>1, (uint32_t)eeAddrBits);
415 if (eeOpBits == 8 && !(eeOpcode == EEPROM_READ_OPCODE_SPI ||
416 eeOpcode == EEPROM_RDSR_OPCODE_SPI ))
417 panic("Unknown eeprom opcode: %#X:%d\n", (uint32_t)eeOpcode,
418 (uint32_t)eeOpBits);
419
420
421 }
422 // If driver requests eeprom access, immediately give it to it
423 regs.eecd.ee_gnt(regs.eecd.ee_req());
424 break;
425 case REG_EERD:
426 regs.eerd = val;
427 if (regs.eerd.start()) {
428 regs.eerd.done(1);
429 assert(regs.eerd.addr() < EEPROM_SIZE);
430 regs.eerd.data(flash[regs.eerd.addr()]);
431 regs.eerd.start(0);
432 DPRINTF(EthernetEEPROM, "EEPROM: read addr: %#X data %#x\n",
433 regs.eerd.addr(), regs.eerd.data());
434 }
435 break;
436 case REG_MDIC:
437 regs.mdic = val;
438 if (regs.mdic.i())
439 panic("No support for interrupt on mdic complete\n");
440 if (regs.mdic.phyadd() != 1)
441 panic("No support for reading anything but phy\n");
442 DPRINTF(Ethernet, "%s phy address %x\n", regs.mdic.op() == 1 ? "Writing"
443 : "Reading", regs.mdic.regadd());
444 switch (regs.mdic.regadd()) {
445 case PHY_PSTATUS:
446 regs.mdic.data(0x796D); // link up
447 break;
448 case PHY_PID:
449 regs.mdic.data(params()->phy_pid);
450 break;
451 case PHY_EPID:
452 regs.mdic.data(params()->phy_epid);
453 break;
454 case PHY_GSTATUS:
455 regs.mdic.data(0x7C00);
456 break;
457 case PHY_EPSTATUS:
458 regs.mdic.data(0x3000);
459 break;
460 case PHY_AGC:
461 regs.mdic.data(0x180); // some random length
462 break;
463 default:
464 regs.mdic.data(0);
465 }
466 regs.mdic.r(1);
467 break;
468 case REG_ICR:
469 DPRINTF(Ethernet, "Writing ICR. ICR=%#x IMR=%#x IAM=%#x IAME=%d\n", regs.icr(),
470 regs.imr, regs.iam, regs.ctrl_ext.iame());
471 if (regs.ctrl_ext.iame())
472 regs.imr &= ~regs.iam;
473 regs.icr = ~bits(val,30,0) & regs.icr();
474 chkInterrupt();
475 break;
476 case REG_ITR:
477 regs.itr = val;
478 break;
479 case REG_ICS:
480 DPRINTF(EthernetIntr, "Posting interrupt because of ICS write\n");
481 postInterrupt((IntTypes)val);
482 break;
483 case REG_IMS:
484 regs.imr |= val;
485 chkInterrupt();
486 break;
487 case REG_IMC:
488 regs.imr &= ~val;
489 chkInterrupt();
490 break;
491 case REG_IAM:
492 regs.iam = val;
493 break;
494 case REG_RCTL:
495 oldrctl = regs.rctl;
496 regs.rctl = val;
497 if (regs.rctl.rst()) {
498 rxDescCache.reset();
499 DPRINTF(EthernetSM, "RXS: Got RESET!\n");
500 rxFifo.clear();
501 regs.rctl.rst(0);
502 }
503 if (regs.rctl.en())
504 rxTick = true;
505 restartClock();
506 break;
507 case REG_FCTTV:
508 regs.fcttv = val;
509 break;
510 case REG_TCTL:
511 regs.tctl = val;
512 oldtctl = regs.tctl;
513 regs.tctl = val;
514 if (regs.tctl.en())
515 txTick = true;
516 restartClock();
517 if (regs.tctl.en() && !oldtctl.en()) {
518 txDescCache.reset();
519 }
520 break;
521 case REG_PBA:
522 regs.pba.rxa(val);
523 regs.pba.txa(64 - regs.pba.rxa());
524 break;
525 case REG_WUC:
526 case REG_LEDCTL:
527 case REG_FCAL:
528 case REG_FCAH:
529 case REG_FCT:
530 case REG_VET:
531 case REG_AIFS:
532 case REG_TIPG:
533 ; // We don't care, so don't store anything
534 break;
535 case REG_IVAR0:
536 warn("Writing to IVAR0, ignoring...\n");
537 break;
538 case REG_FCRTL:
539 regs.fcrtl = val;
540 break;
541 case REG_FCRTH:
542 regs.fcrth = val;
543 break;
544 case REG_RDBAL:
545 regs.rdba.rdbal( val & ~mask(4));
546 rxDescCache.areaChanged();
547 break;
548 case REG_RDBAH:
549 regs.rdba.rdbah(val);
550 rxDescCache.areaChanged();
551 break;
552 case REG_RDLEN:
553 regs.rdlen = val & ~mask(7);
554 rxDescCache.areaChanged();
555 break;
556 case REG_SRRCTL:
557 regs.srrctl = val;
558 break;
559 case REG_RDH:
560 regs.rdh = val;
561 rxDescCache.areaChanged();
562 break;
563 case REG_RDT:
564 regs.rdt = val;
565 DPRINTF(EthernetSM, "RXS: RDT Updated.\n");
566 if (getState() == SimObject::Running) {
567 DPRINTF(EthernetSM, "RXS: RDT Fetching Descriptors!\n");
568 rxDescCache.fetchDescriptors();
569 } else {
570 DPRINTF(EthernetSM, "RXS: RDT NOT Fetching Desc b/c draining!\n");
571 }
572 break;
573 case REG_RDTR:
574 regs.rdtr = val;
575 break;
576 case REG_RADV:
577 regs.radv = val;
578 break;
579 case REG_RXDCTL:
580 regs.rxdctl = val;
581 break;
582 case REG_TDBAL:
583 regs.tdba.tdbal( val & ~mask(4));
584 txDescCache.areaChanged();
585 break;
586 case REG_TDBAH:
587 regs.tdba.tdbah(val);
588 txDescCache.areaChanged();
589 break;
590 case REG_TDLEN:
591 regs.tdlen = val & ~mask(7);
592 txDescCache.areaChanged();
593 break;
594 case REG_TDH:
595 regs.tdh = val;
596 txDescCache.areaChanged();
597 break;
598 case REG_TXDCA_CTL:
599 regs.txdca_ctl = val;
600 if (regs.txdca_ctl.enabled())
601 panic("No support for DCA\n");
602 break;
603 case REG_TDT:
604 regs.tdt = val;
605 DPRINTF(EthernetSM, "TXS: TX Tail pointer updated\n");
606 if (getState() == SimObject::Running) {
607 DPRINTF(EthernetSM, "TXS: TDT Fetching Descriptors!\n");
608 txDescCache.fetchDescriptors();
609 } else {
610 DPRINTF(EthernetSM, "TXS: TDT NOT Fetching Desc b/c draining!\n");
611 }
612 break;
613 case REG_TIDV:
614 regs.tidv = val;
615 break;
616 case REG_TXDCTL:
617 regs.txdctl = val;
618 break;
619 case REG_TADV:
620 regs.tadv = val;
621 break;
622 case REG_TDWBAL:
623 regs.tdwba &= ~mask(32);
624 regs.tdwba |= val;
625 txDescCache.completionWriteback(regs.tdwba & ~mask(1), regs.tdwba & mask(1));
626 break;
627 case REG_TDWBAH:
628 regs.tdwba &= mask(32);
629 regs.tdwba |= (uint64_t)val << 32;
630 txDescCache.completionWriteback(regs.tdwba & ~mask(1), regs.tdwba & mask(1));
631 break;
632 case REG_RXCSUM:
633 regs.rxcsum = val;
634 break;
635 case REG_RLPML:
636 regs.rlpml = val;
637 break;
638 case REG_RFCTL:
639 regs.rfctl = val;
640 if (regs.rfctl.exsten())
641 panic("Extended RX descriptors not implemented\n");
642 break;
643 case REG_MANC:
644 regs.manc = val;
645 break;
646 case REG_SWSM:
647 regs.swsm = val;
648 if (regs.fwsm.eep_fw_semaphore())
649 regs.swsm.swesmbi(0);
650 break;
651 case REG_SWFWSYNC:
652 regs.sw_fw_sync = val;
653 break;
654 default:
655 if (!(daddr >= REG_VFTA && daddr < (REG_VFTA + VLAN_FILTER_TABLE_SIZE*4)) &&
656 !(daddr >= REG_RAL && daddr < (REG_RAL + RCV_ADDRESS_TABLE_SIZE*8)) &&
657 !(daddr >= REG_MTA && daddr < (REG_MTA + MULTICAST_TABLE_SIZE*4)))
658 panic("Write request to unknown register number: %#x\n", daddr);
659 };
660
661 pkt->makeAtomicResponse();
662 return pioDelay;
663 }
664
665 void
666 IGbE::postInterrupt(IntTypes t, bool now)
667 {
668 assert(t);
669
670 // Interrupt is already pending
671 if (t & regs.icr() && !now)
672 return;
673
674 regs.icr = regs.icr() | t;
675
676 Tick itr_interval = Clock::Int::ns * 256 * regs.itr.interval();
677 DPRINTF(EthernetIntr, "EINT: postInterrupt() curTick: %d itr: %d interval: %d\n",
678 curTick, regs.itr.interval(), itr_interval);
679
680 if (regs.itr.interval() == 0 || now || lastInterrupt + itr_interval <= curTick) {
681 if (interEvent.scheduled()) {
682 deschedule(interEvent);
683 }
684 cpuPostInt();
685 } else {
686 Tick int_time = lastInterrupt + itr_interval;
687 assert(int_time > 0);
688 DPRINTF(EthernetIntr, "EINT: Scheduling timer interrupt for tick %d\n",
689 int_time);
690 if (!interEvent.scheduled()) {
691 schedule(interEvent, int_time);
692 }
693 }
694 }
695
696 void
697 IGbE::delayIntEvent()
698 {
699 cpuPostInt();
700 }
701
702
703 void
704 IGbE::cpuPostInt()
705 {
706
707 postedInterrupts++;
708
709 if (!(regs.icr() & regs.imr)) {
710 DPRINTF(Ethernet, "Interrupt Masked. Not Posting\n");
711 return;
712 }
713
714 DPRINTF(Ethernet, "Posting Interrupt\n");
715
716
717 if (interEvent.scheduled()) {
718 deschedule(interEvent);
719 }
720
721 if (rdtrEvent.scheduled()) {
722 regs.icr.rxt0(1);
723 deschedule(rdtrEvent);
724 }
725 if (radvEvent.scheduled()) {
726 regs.icr.rxt0(1);
727 deschedule(radvEvent);
728 }
729 if (tadvEvent.scheduled()) {
730 regs.icr.txdw(1);
731 deschedule(tadvEvent);
732 }
733 if (tidvEvent.scheduled()) {
734 regs.icr.txdw(1);
735 deschedule(tidvEvent);
736 }
737
738 regs.icr.int_assert(1);
739 DPRINTF(EthernetIntr, "EINT: Posting interrupt to CPU now. Vector %#x\n",
740 regs.icr());
741
742 intrPost();
743
744 lastInterrupt = curTick;
745 }
746
747 void
748 IGbE::cpuClearInt()
749 {
750 if (regs.icr.int_assert()) {
751 regs.icr.int_assert(0);
752 DPRINTF(EthernetIntr, "EINT: Clearing interrupt to CPU now. Vector %#x\n",
753 regs.icr());
754 intrClear();
755 }
756 }
757
758 void
759 IGbE::chkInterrupt()
760 {
761 DPRINTF(Ethernet, "Checking interrupts icr: %#x imr: %#x\n", regs.icr(),
762 regs.imr);
763 // Check if we need to clear the cpu interrupt
764 if (!(regs.icr() & regs.imr)) {
765 DPRINTF(Ethernet, "Mask cleaned all interrupts\n");
766 if (interEvent.scheduled())
767 deschedule(interEvent);
768 if (regs.icr.int_assert())
769 cpuClearInt();
770 }
771 DPRINTF(Ethernet, "ITR = %#X itr.interval = %#X\n", regs.itr(), regs.itr.interval());
772
773 if (regs.icr() & regs.imr) {
774 if (regs.itr.interval() == 0) {
775 cpuPostInt();
776 } else {
777 DPRINTF(Ethernet, "Possibly scheduling interrupt because of imr write\n");
778 if (!interEvent.scheduled()) {
779 DPRINTF(Ethernet, "Scheduling for %d\n", curTick + Clock::Int::ns
780 * 256 * regs.itr.interval());
781 schedule(interEvent,
782 curTick + Clock::Int::ns * 256 * regs.itr.interval());
783 }
784 }
785 }
786
787
788 }
789
790
791 IGbE::RxDescCache::RxDescCache(IGbE *i, const std::string n, int s)
792 : DescCache<RxDesc>(i, n, s), pktDone(false), splitCount(0),
793 pktEvent(this), pktHdrEvent(this), pktDataEvent(this)
794
795 {
796 }
797
798 void
799 IGbE::RxDescCache::pktSplitDone()
800 {
801 splitCount++;
802 DPRINTF(EthernetDesc, "Part of split packet done: splitcount now %d\n", splitCount);
803 assert(splitCount <= 2);
804 if (splitCount != 2)
805 return;
806 splitCount = 0;
807 DPRINTF(EthernetDesc, "Part of split packet done: calling pktComplete()\n");
808 pktComplete();
809 }
810
811 int
812 IGbE::RxDescCache::writePacket(EthPacketPtr packet, int pkt_offset)
813 {
814 assert(unusedCache.size());
815 //if (!unusedCache.size())
816 // return false;
817
818 pktPtr = packet;
819 pktDone = false;
820 int buf_len, hdr_len;
821
822 RxDesc *desc = unusedCache.front();
823 switch (igbe->regs.srrctl.desctype()) {
824 case RXDT_LEGACY:
825 assert(pkt_offset == 0);
826 bytesCopied = packet->length;
827 DPRINTF(EthernetDesc, "Packet Length: %d Desc Size: %d\n",
828 packet->length, igbe->regs.rctl.descSize());
829 assert(packet->length < igbe->regs.rctl.descSize());
830 igbe->dmaWrite(igbe->platform->pciToDma(desc->legacy.buf), packet->length, &pktEvent,
831 packet->data, igbe->rxWriteDelay);
832 break;
833 case RXDT_ADV_ONEBUF:
834 assert(pkt_offset == 0);
835 bytesCopied = packet->length;
836 buf_len = igbe->regs.rctl.lpe() ? igbe->regs.srrctl.bufLen() :
837 igbe->regs.rctl.descSize();
838 DPRINTF(EthernetDesc, "Packet Length: %d srrctl: %#x Desc Size: %d\n",
839 packet->length, igbe->regs.srrctl(), buf_len);
840 assert(packet->length < buf_len);
841 igbe->dmaWrite(igbe->platform->pciToDma(desc->adv_read.pkt), packet->length, &pktEvent,
842 packet->data, igbe->rxWriteDelay);
843 desc->adv_wb.header_len = htole(0);
844 desc->adv_wb.sph = htole(0);
845 desc->adv_wb.pkt_len = htole((uint16_t)(pktPtr->length));
846 break;
847 case RXDT_ADV_SPLIT_A:
848 int split_point;
849
850 buf_len = igbe->regs.rctl.lpe() ? igbe->regs.srrctl.bufLen() :
851 igbe->regs.rctl.descSize();
852 hdr_len = igbe->regs.rctl.lpe() ? igbe->regs.srrctl.hdrLen() : 0;
853 DPRINTF(EthernetDesc, "lpe: %d Packet Length: %d offset: %d srrctl: %#x hdr addr: %#x Hdr Size: %d desc addr: %#x Desc Size: %d\n",
854 igbe->regs.rctl.lpe(), packet->length, pkt_offset, igbe->regs.srrctl(), desc->adv_read.hdr, hdr_len, desc->adv_read.pkt, buf_len);
855
856 split_point = hsplit(pktPtr);
857
858 if (packet->length <= hdr_len) {
859 bytesCopied = packet->length;
860 assert(pkt_offset == 0);
861 DPRINTF(EthernetDesc, "Header Splitting: Entire packet being placed in header\n");
862 igbe->dmaWrite(igbe->platform->pciToDma(desc->adv_read.hdr), packet->length, &pktEvent,
863 packet->data, igbe->rxWriteDelay);
864 desc->adv_wb.header_len = htole((uint16_t)packet->length);
865 desc->adv_wb.sph = htole(0);
866 desc->adv_wb.pkt_len = htole(0);
867 } else if (split_point) {
868 if (pkt_offset) {
869 // we are only copying some data, header/data has already been
870 // copied
871 int max_to_copy = std::min(packet->length - pkt_offset, buf_len);
872 bytesCopied += max_to_copy;
873 DPRINTF(EthernetDesc, "Header Splitting: Continuing data buffer copy\n");
874 igbe->dmaWrite(igbe->platform->pciToDma(desc->adv_read.pkt),max_to_copy, &pktEvent,
875 packet->data + pkt_offset, igbe->rxWriteDelay);
876 desc->adv_wb.header_len = htole(0);
877 desc->adv_wb.pkt_len = htole((uint16_t)max_to_copy);
878 desc->adv_wb.sph = htole(0);
879 } else {
880 int max_to_copy = std::min(packet->length - split_point, buf_len);
881 bytesCopied += max_to_copy + split_point;
882
883 DPRINTF(EthernetDesc, "Header Splitting: splitting at %d\n",
884 split_point);
885 igbe->dmaWrite(igbe->platform->pciToDma(desc->adv_read.hdr), split_point, &pktHdrEvent,
886 packet->data, igbe->rxWriteDelay);
887 igbe->dmaWrite(igbe->platform->pciToDma(desc->adv_read.pkt),
888 max_to_copy, &pktDataEvent, packet->data + split_point, igbe->rxWriteDelay);
889 desc->adv_wb.header_len = htole(split_point);
890 desc->adv_wb.sph = 1;
891 desc->adv_wb.pkt_len = htole((uint16_t)(max_to_copy));
892 }
893 } else {
894 panic("Header split not fitting within header buffer or undecodable"
895 " packet not fitting in header unsupported\n");
896 }
897 break;
898 default:
899 panic("Unimplemnted RX receive buffer type: %d\n",
900 igbe->regs.srrctl.desctype());
901 }
902 return bytesCopied;
903
904 }
905
906 void
907 IGbE::RxDescCache::pktComplete()
908 {
909 assert(unusedCache.size());
910 RxDesc *desc;
911 desc = unusedCache.front();
912
913 uint16_t crcfixup = igbe->regs.rctl.secrc() ? 0 : 4 ;
914 DPRINTF(EthernetDesc, "pktPtr->length: %d bytesCopied: %d stripcrc offset: %d value written: %d %d\n",
915 pktPtr->length, bytesCopied, crcfixup,
916 htole((uint16_t)(pktPtr->length + crcfixup)),
917 (uint16_t)(pktPtr->length + crcfixup));
918
919 // no support for anything but starting at 0
920 assert(igbe->regs.rxcsum.pcss() == 0);
921
922 DPRINTF(EthernetDesc, "Packet written to memory updating Descriptor\n");
923
924 uint16_t status = RXDS_DD;
925 uint8_t err = 0;
926 uint16_t ext_err = 0;
927 uint16_t csum = 0;
928 uint16_t ptype = 0;
929 uint16_t ip_id = 0;
930
931 assert(bytesCopied <= pktPtr->length);
932 if (bytesCopied == pktPtr->length)
933 status |= RXDS_EOP;
934
935 IpPtr ip(pktPtr);
936
937 if (ip) {
938 DPRINTF(EthernetDesc, "Proccesing Ip packet with Id=%d\n", ip->id());
939 ptype |= RXDP_IPV4;
940 ip_id = ip->id();
941
942 if (igbe->regs.rxcsum.ipofld()) {
943 DPRINTF(EthernetDesc, "Checking IP checksum\n");
944 status |= RXDS_IPCS;
945 csum = htole(cksum(ip));
946 igbe->rxIpChecksums++;
947 if (cksum(ip) != 0) {
948 err |= RXDE_IPE;
949 ext_err |= RXDEE_IPE;
950 DPRINTF(EthernetDesc, "Checksum is bad!!\n");
951 }
952 }
953 TcpPtr tcp(ip);
954 if (tcp && igbe->regs.rxcsum.tuofld()) {
955 DPRINTF(EthernetDesc, "Checking TCP checksum\n");
956 status |= RXDS_TCPCS;
957 ptype |= RXDP_TCP;
958 csum = htole(cksum(tcp));
959 igbe->rxTcpChecksums++;
960 if (cksum(tcp) != 0) {
961 DPRINTF(EthernetDesc, "Checksum is bad!!\n");
962 err |= RXDE_TCPE;
963 ext_err |= RXDEE_TCPE;
964 }
965 }
966
967 UdpPtr udp(ip);
968 if (udp && igbe->regs.rxcsum.tuofld()) {
969 DPRINTF(EthernetDesc, "Checking UDP checksum\n");
970 status |= RXDS_UDPCS;
971 ptype |= RXDP_UDP;
972 csum = htole(cksum(udp));
973 igbe->rxUdpChecksums++;
974 if (cksum(udp) != 0) {
975 DPRINTF(EthernetDesc, "Checksum is bad!!\n");
976 ext_err |= RXDEE_TCPE;
977 err |= RXDE_TCPE;
978 }
979 }
980 } else { // if ip
981 DPRINTF(EthernetSM, "Proccesing Non-Ip packet\n");
982 }
983
984 switch (igbe->regs.srrctl.desctype()) {
985 case RXDT_LEGACY:
986 desc->legacy.len = htole((uint16_t)(pktPtr->length + crcfixup));
987 desc->legacy.status = htole(status);
988 desc->legacy.errors = htole(err);
989 // No vlan support at this point... just set it to 0
990 desc->legacy.vlan = 0;
991 break;
992 case RXDT_ADV_SPLIT_A:
993 case RXDT_ADV_ONEBUF:
994 desc->adv_wb.rss_type = htole(0);
995 desc->adv_wb.pkt_type = htole(ptype);
996 if (igbe->regs.rxcsum.pcsd()) {
997 // no rss support right now
998 desc->adv_wb.rss_hash = htole(0);
999 } else {
1000 desc->adv_wb.id = htole(ip_id);
1001 desc->adv_wb.csum = htole(csum);
1002 }
1003 desc->adv_wb.status = htole(status);
1004 desc->adv_wb.errors = htole(ext_err);
1005 // no vlan support
1006 desc->adv_wb.vlan_tag = htole(0);
1007 break;
1008 default:
1009 panic("Unimplemnted RX receive buffer type %d\n",
1010 igbe->regs.srrctl.desctype());
1011 }
1012
1013 DPRINTF(EthernetDesc, "Descriptor complete w0: %#x w1: %#x\n",
1014 desc->adv_read.pkt, desc->adv_read.hdr);
1015
1016 if (bytesCopied == pktPtr->length) {
1017 DPRINTF(EthernetDesc, "Packet completely written to descriptor buffers\n");
1018 // Deal with the rx timer interrupts
1019 if (igbe->regs.rdtr.delay()) {
1020 DPRINTF(EthernetSM, "RXS: Scheduling DTR for %d\n",
1021 igbe->regs.rdtr.delay() * igbe->intClock());
1022 igbe->reschedule(igbe->rdtrEvent,
1023 curTick + igbe->regs.rdtr.delay() * igbe->intClock(), true);
1024 }
1025
1026 if (igbe->regs.radv.idv()) {
1027 DPRINTF(EthernetSM, "RXS: Scheduling ADV for %d\n",
1028 igbe->regs.radv.idv() * igbe->intClock());
1029 if (!igbe->radvEvent.scheduled()) {
1030 igbe->schedule(igbe->radvEvent,
1031 curTick + igbe->regs.radv.idv() * igbe->intClock());
1032 }
1033 }
1034
1035 // if neither radv or rdtr, maybe itr is set...
1036 if (!igbe->regs.rdtr.delay() && !igbe->regs.radv.idv()) {
1037 DPRINTF(EthernetSM, "RXS: Receive interrupt delay disabled, posting IT_RXT\n");
1038 igbe->postInterrupt(IT_RXT);
1039 }
1040
1041 // If the packet is small enough, interrupt appropriately
1042 // I wonder if this is delayed or not?!
1043 if (pktPtr->length <= igbe->regs.rsrpd.idv()) {
1044 DPRINTF(EthernetSM, "RXS: Posting IT_SRPD beacuse small packet received\n");
1045 igbe->postInterrupt(IT_SRPD);
1046 }
1047 bytesCopied = 0;
1048 }
1049
1050 pktPtr = NULL;
1051 igbe->checkDrain();
1052 enableSm();
1053 pktDone = true;
1054
1055 DPRINTF(EthernetDesc, "Processing of this descriptor complete\n");
1056 unusedCache.pop_front();
1057 usedCache.push_back(desc);
1058 }
1059
1060 void
1061 IGbE::RxDescCache::enableSm()
1062 {
1063 if (!igbe->drainEvent) {
1064 igbe->rxTick = true;
1065 igbe->restartClock();
1066 }
1067 }
1068
1069 bool
1070 IGbE::RxDescCache::packetDone()
1071 {
1072 if (pktDone) {
1073 pktDone = false;
1074 return true;
1075 }
1076 return false;
1077 }
1078
1079 bool
1080 IGbE::RxDescCache::hasOutstandingEvents()
1081 {
1082 return pktEvent.scheduled() || wbEvent.scheduled() ||
1083 fetchEvent.scheduled() || pktHdrEvent.scheduled() ||
1084 pktDataEvent.scheduled();
1085
1086 }
1087
1088 void
1089 IGbE::RxDescCache::serialize(std::ostream &os)
1090 {
1091 DescCache<RxDesc>::serialize(os);
1092 SERIALIZE_SCALAR(pktDone);
1093 SERIALIZE_SCALAR(splitCount);
1094 SERIALIZE_SCALAR(bytesCopied);
1095 }
1096
1097 void
1098 IGbE::RxDescCache::unserialize(Checkpoint *cp, const std::string &section)
1099 {
1100 DescCache<RxDesc>::unserialize(cp, section);
1101 UNSERIALIZE_SCALAR(pktDone);
1102 UNSERIALIZE_SCALAR(splitCount);
1103 UNSERIALIZE_SCALAR(bytesCopied);
1104 }
1105
1106
1107 ///////////////////////////////////// IGbE::TxDesc /////////////////////////////////
1108
1109 IGbE::TxDescCache::TxDescCache(IGbE *i, const std::string n, int s)
1110 : DescCache<TxDesc>(i,n, s), pktDone(false), isTcp(false), pktWaiting(false),
1111 completionAddress(0), completionEnabled(false),
1112 useTso(false), pktEvent(this), headerEvent(this), nullEvent(this)
1113
1114 {
1115 }
1116
1117 void
1118 IGbE::TxDescCache::processContextDesc()
1119 {
1120 assert(unusedCache.size());
1121 TxDesc *desc;
1122
1123 DPRINTF(EthernetDesc, "Checking and processing context descriptors\n");
1124
1125 while (!useTso && unusedCache.size() && TxdOp::isContext(unusedCache.front())) {
1126 DPRINTF(EthernetDesc, "Got context descriptor type...\n");
1127
1128 desc = unusedCache.front();
1129 DPRINTF(EthernetDesc, "Descriptor upper: %#x lower: %#X\n",
1130 desc->d1, desc->d2);
1131
1132
1133 // is this going to be a tcp or udp packet?
1134 isTcp = TxdOp::tcp(desc) ? true : false;
1135
1136 // setup all the TSO variables, they'll be ignored if we don't use
1137 // tso for this connection
1138 tsoHeaderLen = TxdOp::hdrlen(desc);
1139 tsoMss = TxdOp::mss(desc);
1140
1141 if (TxdOp::isType(desc, TxdOp::TXD_CNXT) && TxdOp::tse(desc)) {
1142 DPRINTF(EthernetDesc, "TCP offload enabled for packet hdrlen: %d mss: %d paylen %d\n",
1143 TxdOp::hdrlen(desc), TxdOp::mss(desc), TxdOp::getLen(desc));
1144 useTso = true;
1145 tsoTotalLen = TxdOp::getLen(desc);
1146 tsoLoadedHeader = false;
1147 tsoDescBytesUsed = 0;
1148 tsoUsedLen = 0;
1149 tsoPrevSeq = 0;
1150 tsoPktHasHeader = false;
1151 tsoPkts = 0;
1152
1153 }
1154
1155 TxdOp::setDd(desc);
1156 unusedCache.pop_front();
1157 usedCache.push_back(desc);
1158 }
1159
1160 if (!unusedCache.size())
1161 return;
1162
1163 desc = unusedCache.front();
1164 if (!useTso && TxdOp::isType(desc, TxdOp::TXD_ADVDATA) && TxdOp::tse(desc)) {
1165 DPRINTF(EthernetDesc, "TCP offload(adv) enabled for packet hdrlen: %d mss: %d paylen %d\n",
1166 tsoHeaderLen, tsoMss, TxdOp::getTsoLen(desc));
1167 useTso = true;
1168 tsoTotalLen = TxdOp::getTsoLen(desc);
1169 tsoLoadedHeader = false;
1170 tsoDescBytesUsed = 0;
1171 tsoUsedLen = 0;
1172 tsoPrevSeq = 0;
1173 tsoPktHasHeader = false;
1174 tsoPkts = 0;
1175 }
1176
1177 if (useTso && !tsoLoadedHeader) {
1178 // we need to fetch a header
1179 DPRINTF(EthernetDesc, "Starting DMA of TSO header\n");
1180 assert(TxdOp::isData(desc) && TxdOp::getLen(desc) >= tsoHeaderLen);
1181 pktWaiting = true;
1182 assert(tsoHeaderLen <= 256);
1183 igbe->dmaRead(igbe->platform->pciToDma(TxdOp::getBuf(desc)),
1184 tsoHeaderLen, &headerEvent, tsoHeader, 0);
1185 }
1186 }
1187
1188 void
1189 IGbE::TxDescCache::headerComplete()
1190 {
1191 DPRINTF(EthernetDesc, "TSO: Fetching TSO header complete\n");
1192 pktWaiting = false;
1193
1194 assert(unusedCache.size());
1195 TxDesc *desc = unusedCache.front();
1196 DPRINTF(EthernetDesc, "TSO: len: %d tsoHeaderLen: %d\n",
1197 TxdOp::getLen(desc), tsoHeaderLen);
1198
1199 if (TxdOp::getLen(desc) == tsoHeaderLen) {
1200 tsoDescBytesUsed = 0;
1201 tsoLoadedHeader = true;
1202 unusedCache.pop_front();
1203 usedCache.push_back(desc);
1204 } else {
1205 // I don't think this case happens, I think the headrer is always
1206 // it's own packet, if it wasn't it might be as simple as just
1207 // incrementing descBytesUsed by the header length, but I'm not
1208 // completely sure
1209 panic("TSO header part of bigger packet, not implemented\n");
1210 }
1211 enableSm();
1212 igbe->checkDrain();
1213 }
1214
1215 int
1216 IGbE::TxDescCache::getPacketSize(EthPacketPtr p)
1217 {
1218 TxDesc *desc;
1219
1220
1221 if (!unusedCache.size())
1222 return -1;
1223
1224 DPRINTF(EthernetDesc, "Starting processing of descriptor\n");
1225
1226 assert(!useTso || tsoLoadedHeader);
1227 desc = unusedCache.front();
1228
1229
1230 if (useTso) {
1231 DPRINTF(EthernetDesc, "getPacket(): TxDescriptor data d1: %#llx d2: %#llx\n", desc->d1, desc->d2);
1232 DPRINTF(EthernetDesc, "TSO: use: %d hdrlen: %d mss: %d total: %d used: %d loaded hdr: %d\n",
1233 useTso, tsoHeaderLen, tsoMss, tsoTotalLen, tsoUsedLen, tsoLoadedHeader);
1234 DPRINTF(EthernetDesc, "TSO: descBytesUsed: %d copyBytes: %d this descLen: %d\n",
1235 tsoDescBytesUsed, tsoCopyBytes, TxdOp::getLen(desc));
1236 DPRINTF(EthernetDesc, "TSO: pktHasHeader: %d\n", tsoPktHasHeader);
1237
1238 if (tsoPktHasHeader)
1239 tsoCopyBytes = std::min((tsoMss + tsoHeaderLen) - p->length, TxdOp::getLen(desc) - tsoDescBytesUsed);
1240 else
1241 tsoCopyBytes = std::min(tsoMss, TxdOp::getLen(desc) - tsoDescBytesUsed);
1242 Addr pkt_size = tsoCopyBytes + (tsoPktHasHeader ? 0 : tsoHeaderLen);
1243 DPRINTF(EthernetDesc, "TSO: Next packet is %d bytes\n", pkt_size);
1244 return pkt_size;
1245 }
1246
1247 DPRINTF(EthernetDesc, "Next TX packet is %d bytes\n",
1248 TxdOp::getLen(unusedCache.front()));
1249 return TxdOp::getLen(desc);
1250 }
1251
1252 void
1253 IGbE::TxDescCache::getPacketData(EthPacketPtr p)
1254 {
1255 assert(unusedCache.size());
1256
1257 TxDesc *desc;
1258 desc = unusedCache.front();
1259
1260 DPRINTF(EthernetDesc, "getPacketData(): TxDescriptor data d1: %#llx d2: %#llx\n", desc->d1, desc->d2);
1261 assert((TxdOp::isLegacy(desc) || TxdOp::isData(desc)) && TxdOp::getLen(desc));
1262
1263 pktPtr = p;
1264
1265 pktWaiting = true;
1266
1267 DPRINTF(EthernetDesc, "Starting DMA of packet at offset %d\n", p->length);
1268
1269 if (useTso) {
1270 assert(tsoLoadedHeader);
1271 if (!tsoPktHasHeader) {
1272 DPRINTF(EthernetDesc, "Loading TSO header (%d bytes) into start of packet\n",
1273 tsoHeaderLen);
1274 memcpy(p->data, &tsoHeader,tsoHeaderLen);
1275 p->length +=tsoHeaderLen;
1276 tsoPktHasHeader = true;
1277 }
1278 }
1279
1280 if (useTso) {
1281 tsoDescBytesUsed += tsoCopyBytes;
1282 assert(tsoDescBytesUsed <= TxdOp::getLen(desc));
1283 DPRINTF(EthernetDesc, "Starting DMA of packet at offset %d length: %d\n",
1284 p->length, tsoCopyBytes);
1285 igbe->dmaRead(igbe->platform->pciToDma(TxdOp::getBuf(desc)) + tsoDescBytesUsed,
1286 tsoCopyBytes, &pktEvent, p->data + p->length, igbe->txReadDelay);
1287 } else {
1288 igbe->dmaRead(igbe->platform->pciToDma(TxdOp::getBuf(desc)),
1289 TxdOp::getLen(desc), &pktEvent, p->data + p->length, igbe->txReadDelay);
1290 }
1291 }
1292
1293 void
1294 IGbE::TxDescCache::pktComplete()
1295 {
1296
1297 TxDesc *desc;
1298 assert(unusedCache.size());
1299 assert(pktPtr);
1300
1301 DPRINTF(EthernetDesc, "DMA of packet complete\n");
1302
1303
1304 desc = unusedCache.front();
1305 assert((TxdOp::isLegacy(desc) || TxdOp::isData(desc)) && TxdOp::getLen(desc));
1306
1307 DPRINTF(EthernetDesc, "TxDescriptor data d1: %#llx d2: %#llx\n", desc->d1, desc->d2);
1308 DPRINTF(EthernetDesc, "TSO: use: %d hdrlen: %d mss: %d total: %d used: %d loaded hdr: %d\n",
1309 useTso, tsoHeaderLen, tsoMss, tsoTotalLen, tsoUsedLen, tsoLoadedHeader);
1310
1311 // Set the length of the data in the EtherPacket
1312 if (useTso) {
1313 pktPtr->length += tsoCopyBytes;
1314 tsoUsedLen += tsoCopyBytes;
1315 } else
1316 pktPtr->length += TxdOp::getLen(desc);
1317
1318 DPRINTF(EthernetDesc, "TSO: descBytesUsed: %d copyBytes: %d\n",
1319 tsoDescBytesUsed, tsoCopyBytes);
1320
1321
1322 if ((!TxdOp::eop(desc) && !useTso) ||
1323 (pktPtr->length < ( tsoMss + tsoHeaderLen) &&
1324 tsoTotalLen != tsoUsedLen && useTso)) {
1325 assert(!useTso || (tsoDescBytesUsed == TxdOp::getLen(desc)));
1326 unusedCache.pop_front();
1327 usedCache.push_back(desc);
1328
1329 tsoDescBytesUsed = 0;
1330 pktDone = true;
1331 pktWaiting = false;
1332 pktMultiDesc = true;
1333
1334 DPRINTF(EthernetDesc, "Partial Packet Descriptor of %d bytes Done\n",
1335 pktPtr->length);
1336 pktPtr = NULL;
1337
1338 enableSm();
1339 igbe->checkDrain();
1340 return;
1341 }
1342
1343
1344 pktMultiDesc = false;
1345 // no support for vlans
1346 assert(!TxdOp::vle(desc));
1347
1348 // we only support single packet descriptors at this point
1349 if (!useTso)
1350 assert(TxdOp::eop(desc));
1351
1352 // set that this packet is done
1353 if (TxdOp::rs(desc))
1354 TxdOp::setDd(desc);
1355
1356 DPRINTF(EthernetDesc, "TxDescriptor data d1: %#llx d2: %#llx\n", desc->d1, desc->d2);
1357
1358 if (useTso) {
1359 IpPtr ip(pktPtr);
1360 if (ip) {
1361 DPRINTF(EthernetDesc, "TSO: Modifying IP header. Id + %d\n",
1362 tsoPkts);
1363 ip->id(ip->id() + tsoPkts++);
1364 ip->len(pktPtr->length - EthPtr(pktPtr)->size());
1365
1366 TcpPtr tcp(ip);
1367 if (tcp) {
1368 DPRINTF(EthernetDesc, "TSO: Modifying TCP header. old seq %d + %d\n",
1369 tcp->seq(), tsoPrevSeq);
1370 tcp->seq(tcp->seq() + tsoPrevSeq);
1371 if (tsoUsedLen != tsoTotalLen)
1372 tcp->flags(tcp->flags() & ~9); // clear fin & psh
1373 }
1374 UdpPtr udp(ip);
1375 if (udp) {
1376 DPRINTF(EthernetDesc, "TSO: Modifying UDP header.\n");
1377 udp->len(pktPtr->length - EthPtr(pktPtr)->size());
1378 }
1379 }
1380 tsoPrevSeq = tsoUsedLen;
1381 }
1382
1383 if (DTRACE(EthernetDesc)) {
1384 IpPtr ip(pktPtr);
1385 if (ip)
1386 DPRINTF(EthernetDesc, "Proccesing Ip packet with Id=%d\n",
1387 ip->id());
1388 else
1389 DPRINTF(EthernetSM, "Proccesing Non-Ip packet\n");
1390 }
1391
1392 // Checksums are only ofloaded for new descriptor types
1393 if (TxdOp::isData(desc) && ( TxdOp::ixsm(desc) || TxdOp::txsm(desc)) ) {
1394 DPRINTF(EthernetDesc, "Calculating checksums for packet\n");
1395 IpPtr ip(pktPtr);
1396 assert(ip);
1397 if (TxdOp::ixsm(desc)) {
1398 ip->sum(0);
1399 ip->sum(cksum(ip));
1400 igbe->txIpChecksums++;
1401 DPRINTF(EthernetDesc, "Calculated IP checksum\n");
1402 }
1403 if (TxdOp::txsm(desc)) {
1404 TcpPtr tcp(ip);
1405 UdpPtr udp(ip);
1406 if (tcp) {
1407 tcp->sum(0);
1408 tcp->sum(cksum(tcp));
1409 igbe->txTcpChecksums++;
1410 DPRINTF(EthernetDesc, "Calculated TCP checksum\n");
1411 } else if (udp) {
1412 assert(udp);
1413 udp->sum(0);
1414 udp->sum(cksum(udp));
1415 igbe->txUdpChecksums++;
1416 DPRINTF(EthernetDesc, "Calculated UDP checksum\n");
1417 } else {
1418 panic("Told to checksum, but don't know how\n");
1419 }
1420 }
1421 }
1422
1423 if (TxdOp::ide(desc)) {
1424 // Deal with the rx timer interrupts
1425 DPRINTF(EthernetDesc, "Descriptor had IDE set\n");
1426 if (igbe->regs.tidv.idv()) {
1427 DPRINTF(EthernetDesc, "setting tidv\n");
1428 igbe->reschedule(igbe->tidvEvent,
1429 curTick + igbe->regs.tidv.idv() * igbe->intClock(), true);
1430 }
1431
1432 if (igbe->regs.tadv.idv() && igbe->regs.tidv.idv()) {
1433 DPRINTF(EthernetDesc, "setting tadv\n");
1434 if (!igbe->tadvEvent.scheduled()) {
1435 igbe->schedule(igbe->tadvEvent,
1436 curTick + igbe->regs.tadv.idv() * igbe->intClock());
1437 }
1438 }
1439 }
1440
1441
1442 if (!useTso || TxdOp::getLen(desc) == tsoDescBytesUsed) {
1443 DPRINTF(EthernetDesc, "Descriptor Done\n");
1444 unusedCache.pop_front();
1445 usedCache.push_back(desc);
1446 tsoDescBytesUsed = 0;
1447 }
1448
1449 if (useTso && tsoUsedLen == tsoTotalLen)
1450 useTso = false;
1451
1452
1453 DPRINTF(EthernetDesc, "------Packet of %d bytes ready for transmission-------\n",
1454 pktPtr->length);
1455 pktDone = true;
1456 pktWaiting = false;
1457 pktPtr = NULL;
1458 tsoPktHasHeader = false;
1459
1460 if (igbe->regs.txdctl.wthresh() == 0) {
1461 DPRINTF(EthernetDesc, "WTHRESH == 0, writing back descriptor\n");
1462 writeback(0);
1463 } else if (igbe->regs.txdctl.gran() && igbe->regs.txdctl.wthresh() >=
1464 descInBlock(usedCache.size())) {
1465 DPRINTF(EthernetDesc, "used > WTHRESH, writing back descriptor\n");
1466 writeback((igbe->cacheBlockSize()-1)>>4);
1467 } else if (igbe->regs.txdctl.wthresh() >= usedCache.size()) {
1468 DPRINTF(EthernetDesc, "used > WTHRESH, writing back descriptor\n");
1469 writeback((igbe->cacheBlockSize()-1)>>4);
1470 }
1471
1472 enableSm();
1473 igbe->checkDrain();
1474 }
1475
1476 void
1477 IGbE::TxDescCache::actionAfterWb()
1478 {
1479 DPRINTF(EthernetDesc, "actionAfterWb() completionEnabled: %d\n",
1480 completionEnabled);
1481 igbe->postInterrupt(iGbReg::IT_TXDW);
1482 if (completionEnabled) {
1483 descEnd = igbe->regs.tdh();
1484 DPRINTF(EthernetDesc, "Completion writing back value: %d to addr: %#x\n", descEnd,
1485 completionAddress);
1486 igbe->dmaWrite(igbe->platform->pciToDma(mbits(completionAddress, 63, 2)),
1487 sizeof(descEnd), &nullEvent, (uint8_t*)&descEnd, 0);
1488 }
1489 }
1490
1491 void
1492 IGbE::TxDescCache::serialize(std::ostream &os)
1493 {
1494 DescCache<TxDesc>::serialize(os);
1495 SERIALIZE_SCALAR(pktDone);
1496 SERIALIZE_SCALAR(isTcp);
1497 SERIALIZE_SCALAR(pktWaiting);
1498 SERIALIZE_SCALAR(pktMultiDesc);
1499
1500 SERIALIZE_SCALAR(useTso);
1501 SERIALIZE_SCALAR(tsoHeaderLen);
1502 SERIALIZE_SCALAR(tsoMss);
1503 SERIALIZE_SCALAR(tsoTotalLen);
1504 SERIALIZE_SCALAR(tsoUsedLen);
1505 SERIALIZE_SCALAR(tsoPrevSeq);;
1506 SERIALIZE_SCALAR(tsoPktPayloadBytes);
1507 SERIALIZE_SCALAR(tsoLoadedHeader);
1508 SERIALIZE_SCALAR(tsoPktHasHeader);
1509 SERIALIZE_ARRAY(tsoHeader, 256);
1510 SERIALIZE_SCALAR(tsoDescBytesUsed);
1511 SERIALIZE_SCALAR(tsoCopyBytes);
1512 SERIALIZE_SCALAR(tsoPkts);
1513
1514 SERIALIZE_SCALAR(completionAddress);
1515 SERIALIZE_SCALAR(completionEnabled);
1516 SERIALIZE_SCALAR(descEnd);
1517 }
1518
1519 void
1520 IGbE::TxDescCache::unserialize(Checkpoint *cp, const std::string &section)
1521 {
1522 DescCache<TxDesc>::unserialize(cp, section);
1523 UNSERIALIZE_SCALAR(pktDone);
1524 UNSERIALIZE_SCALAR(isTcp);
1525 UNSERIALIZE_SCALAR(pktWaiting);
1526 UNSERIALIZE_SCALAR(pktMultiDesc);
1527
1528 UNSERIALIZE_SCALAR(useTso);
1529 UNSERIALIZE_SCALAR(tsoHeaderLen);
1530 UNSERIALIZE_SCALAR(tsoMss);
1531 UNSERIALIZE_SCALAR(tsoTotalLen);
1532 UNSERIALIZE_SCALAR(tsoUsedLen);
1533 UNSERIALIZE_SCALAR(tsoPrevSeq);;
1534 UNSERIALIZE_SCALAR(tsoPktPayloadBytes);
1535 UNSERIALIZE_SCALAR(tsoLoadedHeader);
1536 UNSERIALIZE_SCALAR(tsoPktHasHeader);
1537 UNSERIALIZE_ARRAY(tsoHeader, 256);
1538 UNSERIALIZE_SCALAR(tsoDescBytesUsed);
1539 UNSERIALIZE_SCALAR(tsoCopyBytes);
1540 UNSERIALIZE_SCALAR(tsoPkts);
1541
1542 UNSERIALIZE_SCALAR(completionAddress);
1543 UNSERIALIZE_SCALAR(completionEnabled);
1544 UNSERIALIZE_SCALAR(descEnd);
1545 }
1546
1547 bool
1548 IGbE::TxDescCache::packetAvailable()
1549 {
1550 if (pktDone) {
1551 pktDone = false;
1552 return true;
1553 }
1554 return false;
1555 }
1556
1557 void
1558 IGbE::TxDescCache::enableSm()
1559 {
1560 if (!igbe->drainEvent) {
1561 igbe->txTick = true;
1562 igbe->restartClock();
1563 }
1564 }
1565
1566 bool
1567 IGbE::TxDescCache::hasOutstandingEvents()
1568 {
1569 return pktEvent.scheduled() || wbEvent.scheduled() ||
1570 fetchEvent.scheduled();
1571 }
1572
1573
1574 ///////////////////////////////////// IGbE /////////////////////////////////
1575
1576 void
1577 IGbE::restartClock()
1578 {
1579 if (!tickEvent.scheduled() && (rxTick || txTick || txFifoTick) &&
1580 getState() == SimObject::Running)
1581 schedule(tickEvent, (curTick / ticks(1)) * ticks(1) + ticks(1));
1582 }
1583
1584 unsigned int
1585 IGbE::drain(Event *de)
1586 {
1587 unsigned int count;
1588 count = pioPort->drain(de) + dmaPort->drain(de);
1589 if (rxDescCache.hasOutstandingEvents() ||
1590 txDescCache.hasOutstandingEvents()) {
1591 count++;
1592 drainEvent = de;
1593 }
1594
1595 txFifoTick = false;
1596 txTick = false;
1597 rxTick = false;
1598
1599 if (tickEvent.scheduled())
1600 deschedule(tickEvent);
1601
1602 if (count)
1603 changeState(Draining);
1604 else
1605 changeState(Drained);
1606
1607 return count;
1608 }
1609
1610 void
1611 IGbE::resume()
1612 {
1613 SimObject::resume();
1614
1615 txFifoTick = true;
1616 txTick = true;
1617 rxTick = true;
1618
1619 restartClock();
1620 }
1621
1622 void
1623 IGbE::checkDrain()
1624 {
1625 if (!drainEvent)
1626 return;
1627
1628 txFifoTick = false;
1629 txTick = false;
1630 rxTick = false;
1631 if (!rxDescCache.hasOutstandingEvents() &&
1632 !txDescCache.hasOutstandingEvents()) {
1633 drainEvent->process();
1634 drainEvent = NULL;
1635 }
1636 }
1637
1638 void
1639 IGbE::txStateMachine()
1640 {
1641 if (!regs.tctl.en()) {
1642 txTick = false;
1643 DPRINTF(EthernetSM, "TXS: TX disabled, stopping ticking\n");
1644 return;
1645 }
1646
1647 // If we have a packet available and it's length is not 0 (meaning it's not
1648 // a multidescriptor packet) put it in the fifo, otherwise an the next
1649 // iteration we'll get the rest of the data
1650 if (txPacket && txDescCache.packetAvailable()
1651 && !txDescCache.packetMultiDesc() && txPacket->length) {
1652 bool success;
1653
1654 DPRINTF(EthernetSM, "TXS: packet placed in TX FIFO\n");
1655 success = txFifo.push(txPacket);
1656 txFifoTick = true && !drainEvent;
1657 assert(success);
1658 txPacket = NULL;
1659 txDescCache.writeback((cacheBlockSize()-1)>>4);
1660 return;
1661 }
1662
1663 // Only support descriptor granularity
1664 if (regs.txdctl.lwthresh() && txDescCache.descLeft() < (regs.txdctl.lwthresh() * 8)) {
1665 DPRINTF(EthernetSM, "TXS: LWTHRESH caused posting of TXDLOW\n");
1666 postInterrupt(IT_TXDLOW);
1667 }
1668
1669 if (!txPacket) {
1670 txPacket = new EthPacketData(16384);
1671 }
1672
1673 if (!txDescCache.packetWaiting()) {
1674 if (txDescCache.descLeft() == 0) {
1675 postInterrupt(IT_TXQE);
1676 txDescCache.writeback(0);
1677 txDescCache.fetchDescriptors();
1678 DPRINTF(EthernetSM, "TXS: No descriptors left in ring, forcing "
1679 "writeback stopping ticking and posting TXQE\n");
1680 txTick = false;
1681 return;
1682 }
1683
1684
1685 if (!(txDescCache.descUnused())) {
1686 txDescCache.fetchDescriptors();
1687 DPRINTF(EthernetSM, "TXS: No descriptors available in cache, fetching and stopping ticking\n");
1688 txTick = false;
1689 return;
1690 }
1691
1692
1693 txDescCache.processContextDesc();
1694 if (txDescCache.packetWaiting()) {
1695 DPRINTF(EthernetSM, "TXS: Fetching TSO header, stopping ticking\n");
1696 txTick = false;
1697 return;
1698 }
1699
1700 int size;
1701 size = txDescCache.getPacketSize(txPacket);
1702 if (size > 0 && txFifo.avail() > size) {
1703 DPRINTF(EthernetSM, "TXS: Reserving %d bytes in FIFO and begining "
1704 "DMA of next packet\n", size);
1705 txFifo.reserve(size);
1706 txDescCache.getPacketData(txPacket);
1707 } else if (size <= 0) {
1708 DPRINTF(EthernetSM, "TXS: getPacketSize returned: %d\n", size);
1709 DPRINTF(EthernetSM, "TXS: No packets to get, writing back used descriptors\n");
1710 txDescCache.writeback(0);
1711 } else {
1712 DPRINTF(EthernetSM, "TXS: FIFO full, stopping ticking until space "
1713 "available in FIFO\n");
1714 txTick = false;
1715 }
1716
1717
1718 return;
1719 }
1720 DPRINTF(EthernetSM, "TXS: Nothing to do, stopping ticking\n");
1721 txTick = false;
1722 }
1723
1724 bool
1725 IGbE::ethRxPkt(EthPacketPtr pkt)
1726 {
1727 rxBytes += pkt->length;
1728 rxPackets++;
1729
1730 DPRINTF(Ethernet, "RxFIFO: Receiving pcakte from wire\n");
1731
1732 if (!regs.rctl.en()) {
1733 DPRINTF(Ethernet, "RxFIFO: RX not enabled, dropping\n");
1734 return true;
1735 }
1736
1737 // restart the state machines if they are stopped
1738 rxTick = true && !drainEvent;
1739 if ((rxTick || txTick) && !tickEvent.scheduled()) {
1740 DPRINTF(EthernetSM, "RXS: received packet into fifo, starting ticking\n");
1741 restartClock();
1742 }
1743
1744 if (!rxFifo.push(pkt)) {
1745 DPRINTF(Ethernet, "RxFIFO: Packet won't fit in fifo... dropped\n");
1746 postInterrupt(IT_RXO, true);
1747 return false;
1748 }
1749
1750 return true;
1751 }
1752
1753
1754 void
1755 IGbE::rxStateMachine()
1756 {
1757 if (!regs.rctl.en()) {
1758 rxTick = false;
1759 DPRINTF(EthernetSM, "RXS: RX disabled, stopping ticking\n");
1760 return;
1761 }
1762
1763 // If the packet is done check for interrupts/descriptors/etc
1764 if (rxDescCache.packetDone()) {
1765 rxDmaPacket = false;
1766 DPRINTF(EthernetSM, "RXS: Packet completed DMA to memory\n");
1767 int descLeft = rxDescCache.descLeft();
1768 DPRINTF(EthernetSM, "RXS: descLeft: %d rdmts: %d rdlen: %d\n",
1769 descLeft, regs.rctl.rdmts(), regs.rdlen());
1770 switch (regs.rctl.rdmts()) {
1771 case 2: if (descLeft > .125 * regs.rdlen()) break;
1772 case 1: if (descLeft > .250 * regs.rdlen()) break;
1773 case 0: if (descLeft > .500 * regs.rdlen()) break;
1774 DPRINTF(Ethernet, "RXS: Interrupting (RXDMT) because of descriptors left\n");
1775 postInterrupt(IT_RXDMT);
1776 break;
1777 }
1778
1779 if (rxFifo.empty())
1780 rxDescCache.writeback(0);
1781
1782 if (descLeft == 0) {
1783 rxDescCache.writeback(0);
1784 DPRINTF(EthernetSM, "RXS: No descriptors left in ring, forcing"
1785 " writeback and stopping ticking\n");
1786 rxTick = false;
1787 }
1788
1789 // only support descriptor granulaties
1790 assert(regs.rxdctl.gran());
1791
1792 if (regs.rxdctl.wthresh() >= rxDescCache.descUsed()) {
1793 DPRINTF(EthernetSM, "RXS: Writing back because WTHRESH >= descUsed\n");
1794 if (regs.rxdctl.wthresh() < (cacheBlockSize()>>4))
1795 rxDescCache.writeback(regs.rxdctl.wthresh()-1);
1796 else
1797 rxDescCache.writeback((cacheBlockSize()-1)>>4);
1798 }
1799
1800 if ((rxDescCache.descUnused() < regs.rxdctl.pthresh()) &&
1801 ((rxDescCache.descLeft() - rxDescCache.descUnused()) > regs.rxdctl.hthresh())) {
1802 DPRINTF(EthernetSM, "RXS: Fetching descriptors because descUnused < PTHRESH\n");
1803 rxDescCache.fetchDescriptors();
1804 }
1805
1806 if (rxDescCache.descUnused() == 0) {
1807 rxDescCache.fetchDescriptors();
1808 DPRINTF(EthernetSM, "RXS: No descriptors available in cache, "
1809 "fetching descriptors and stopping ticking\n");
1810 rxTick = false;
1811 }
1812 return;
1813 }
1814
1815 if (rxDmaPacket) {
1816 DPRINTF(EthernetSM, "RXS: stopping ticking until packet DMA completes\n");
1817 rxTick = false;
1818 return;
1819 }
1820
1821 if (!rxDescCache.descUnused()) {
1822 rxDescCache.fetchDescriptors();
1823 DPRINTF(EthernetSM, "RXS: No descriptors available in cache, stopping ticking\n");
1824 rxTick = false;
1825 DPRINTF(EthernetSM, "RXS: No descriptors available, fetching\n");
1826 return;
1827 }
1828
1829 if (rxFifo.empty()) {
1830 DPRINTF(EthernetSM, "RXS: RxFIFO empty, stopping ticking\n");
1831 rxTick = false;
1832 return;
1833 }
1834
1835 EthPacketPtr pkt;
1836 pkt = rxFifo.front();
1837
1838
1839 pktOffset = rxDescCache.writePacket(pkt, pktOffset);
1840 DPRINTF(EthernetSM, "RXS: Writing packet into memory\n");
1841 if (pktOffset == pkt->length) {
1842 DPRINTF(EthernetSM, "RXS: Removing packet from FIFO\n");
1843 pktOffset = 0;
1844 rxFifo.pop();
1845 }
1846
1847 DPRINTF(EthernetSM, "RXS: stopping ticking until packet DMA completes\n");
1848 rxTick = false;
1849 rxDmaPacket = true;
1850 }
1851
1852 void
1853 IGbE::txWire()
1854 {
1855 if (txFifo.empty()) {
1856 txFifoTick = false;
1857 return;
1858 }
1859
1860
1861 if (etherInt->sendPacket(txFifo.front())) {
1862 if (DTRACE(EthernetSM)) {
1863 IpPtr ip(txFifo.front());
1864 if (ip)
1865 DPRINTF(EthernetSM, "Transmitting Ip packet with Id=%d\n",
1866 ip->id());
1867 else
1868 DPRINTF(EthernetSM, "Transmitting Non-Ip packet\n");
1869 }
1870 DPRINTF(EthernetSM, "TxFIFO: Successful transmit, bytes available in fifo: %d\n",
1871 txFifo.avail());
1872
1873 txBytes += txFifo.front()->length;
1874 txPackets++;
1875 txFifoTick = false;
1876
1877 txFifo.pop();
1878 } else {
1879 // We'll get woken up when the packet ethTxDone() gets called
1880 txFifoTick = false;
1881 }
1882 }
1883
1884 void
1885 IGbE::tick()
1886 {
1887 DPRINTF(EthernetSM, "IGbE: -------------- Cycle --------------\n");
1888
1889 if (rxTick)
1890 rxStateMachine();
1891
1892 if (txTick)
1893 txStateMachine();
1894
1895 if (txFifoTick)
1896 txWire();
1897
1898
1899 if (rxTick || txTick || txFifoTick)
1900 schedule(tickEvent, curTick + ticks(1));
1901 }
1902
1903 void
1904 IGbE::ethTxDone()
1905 {
1906 // restart the tx state machines if they are stopped
1907 // fifo to send another packet
1908 // tx sm to put more data into the fifo
1909 txFifoTick = true && !drainEvent;
1910 if (txDescCache.descLeft() != 0 && !drainEvent)
1911 txTick = true;
1912
1913 restartClock();
1914 txWire();
1915 DPRINTF(EthernetSM, "TxFIFO: Transmission complete\n");
1916 }
1917
1918 void
1919 IGbE::serialize(std::ostream &os)
1920 {
1921 PciDev::serialize(os);
1922
1923 regs.serialize(os);
1924 SERIALIZE_SCALAR(eeOpBits);
1925 SERIALIZE_SCALAR(eeAddrBits);
1926 SERIALIZE_SCALAR(eeDataBits);
1927 SERIALIZE_SCALAR(eeOpcode);
1928 SERIALIZE_SCALAR(eeAddr);
1929 SERIALIZE_SCALAR(lastInterrupt);
1930 SERIALIZE_ARRAY(flash,iGbReg::EEPROM_SIZE);
1931
1932 rxFifo.serialize("rxfifo", os);
1933 txFifo.serialize("txfifo", os);
1934
1935 bool txPktExists = txPacket;
1936 SERIALIZE_SCALAR(txPktExists);
1937 if (txPktExists)
1938 txPacket->serialize("txpacket", os);
1939
1940 Tick rdtr_time = 0, radv_time = 0, tidv_time = 0, tadv_time = 0,
1941 inter_time = 0;
1942
1943 if (rdtrEvent.scheduled())
1944 rdtr_time = rdtrEvent.when();
1945 SERIALIZE_SCALAR(rdtr_time);
1946
1947 if (radvEvent.scheduled())
1948 radv_time = radvEvent.when();
1949 SERIALIZE_SCALAR(radv_time);
1950
1951 if (tidvEvent.scheduled())
1952 tidv_time = tidvEvent.when();
1953 SERIALIZE_SCALAR(tidv_time);
1954
1955 if (tadvEvent.scheduled())
1956 tadv_time = tadvEvent.when();
1957 SERIALIZE_SCALAR(tadv_time);
1958
1959 if (interEvent.scheduled())
1960 inter_time = interEvent.when();
1961 SERIALIZE_SCALAR(inter_time);
1962
1963 SERIALIZE_SCALAR(pktOffset);
1964
1965 nameOut(os, csprintf("%s.TxDescCache", name()));
1966 txDescCache.serialize(os);
1967
1968 nameOut(os, csprintf("%s.RxDescCache", name()));
1969 rxDescCache.serialize(os);
1970 }
1971
1972 void
1973 IGbE::unserialize(Checkpoint *cp, const std::string &section)
1974 {
1975 PciDev::unserialize(cp, section);
1976
1977 regs.unserialize(cp, section);
1978 UNSERIALIZE_SCALAR(eeOpBits);
1979 UNSERIALIZE_SCALAR(eeAddrBits);
1980 UNSERIALIZE_SCALAR(eeDataBits);
1981 UNSERIALIZE_SCALAR(eeOpcode);
1982 UNSERIALIZE_SCALAR(eeAddr);
1983 UNSERIALIZE_SCALAR(lastInterrupt);
1984 UNSERIALIZE_ARRAY(flash,iGbReg::EEPROM_SIZE);
1985
1986 rxFifo.unserialize("rxfifo", cp, section);
1987 txFifo.unserialize("txfifo", cp, section);
1988
1989 bool txPktExists;
1990 UNSERIALIZE_SCALAR(txPktExists);
1991 if (txPktExists) {
1992 txPacket = new EthPacketData(16384);
1993 txPacket->unserialize("txpacket", cp, section);
1994 }
1995
1996 rxTick = true;
1997 txTick = true;
1998 txFifoTick = true;
1999
2000 Tick rdtr_time, radv_time, tidv_time, tadv_time, inter_time;
2001 UNSERIALIZE_SCALAR(rdtr_time);
2002 UNSERIALIZE_SCALAR(radv_time);
2003 UNSERIALIZE_SCALAR(tidv_time);
2004 UNSERIALIZE_SCALAR(tadv_time);
2005 UNSERIALIZE_SCALAR(inter_time);
2006
2007 if (rdtr_time)
2008 schedule(rdtrEvent, rdtr_time);
2009
2010 if (radv_time)
2011 schedule(radvEvent, radv_time);
2012
2013 if (tidv_time)
2014 schedule(tidvEvent, tidv_time);
2015
2016 if (tadv_time)
2017 schedule(tadvEvent, tadv_time);
2018
2019 if (inter_time)
2020 schedule(interEvent, inter_time);
2021
2022 UNSERIALIZE_SCALAR(pktOffset);
2023
2024 txDescCache.unserialize(cp, csprintf("%s.TxDescCache", section));
2025
2026 rxDescCache.unserialize(cp, csprintf("%s.RxDescCache", section));
2027 }
2028
2029 IGbE *
2030 IGbEParams::create()
2031 {
2032 return new IGbE(this);
2033 }