d75e37a8a476cb59001724aeb5c3845a03d899d7
[gem5.git] / src / dev / i8254xGBe.cc
1 /*
2 * Copyright (c) 2006 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Ali Saidi
29 */
30
31 /* @file
32 * Device model for Intel's 8254x line of gigabit ethernet controllers.
33 * In particular an 82547 revision 2 (82547GI) MAC because it seems to have the
34 * fewest workarounds in the driver. It will probably work with most of the
35 * other MACs with slight modifications.
36 */
37
38
39 /*
40 * @todo really there are multiple dma engines.. we should implement them.
41 */
42
43 #include <algorithm>
44
45 #include "base/inet.hh"
46 #include "base/trace.hh"
47 #include "dev/i8254xGBe.hh"
48 #include "mem/packet.hh"
49 #include "mem/packet_access.hh"
50 #include "params/IGbE.hh"
51 #include "params/IGbEInt.hh"
52 #include "sim/stats.hh"
53 #include "sim/system.hh"
54
55 using namespace iGbReg;
56 using namespace Net;
57
58 IGbE::IGbE(Params *p)
59 : PciDev(p), etherInt(NULL), drainEvent(NULL), useFlowControl(p->use_flow_control),
60 rxFifo(p->rx_fifo_size), txFifo(p->tx_fifo_size), rxTick(false),
61 txTick(false), txFifoTick(false), rdtrEvent(this), radvEvent(this),
62 tadvEvent(this), tidvEvent(this), tickEvent(this), interEvent(this),
63 rxDescCache(this, name()+".RxDesc", p->rx_desc_cache_size),
64 txDescCache(this, name()+".TxDesc", p->tx_desc_cache_size), clock(p->clock)
65 {
66 // Initialized internal registers per Intel documentation
67 // All registers intialized to 0 by per register constructor
68 regs.ctrl.fd(1);
69 regs.ctrl.lrst(1);
70 regs.ctrl.speed(2);
71 regs.ctrl.frcspd(1);
72 regs.sts.speed(3); // Say we're 1000Mbps
73 regs.sts.fd(1); // full duplex
74 regs.sts.lu(1); // link up
75 regs.eecd.fwe(1);
76 regs.eecd.ee_type(1);
77 regs.imr = 0;
78 regs.iam = 0;
79 regs.rxdctl.gran(1);
80 regs.rxdctl.wthresh(1);
81 regs.fcrth(1);
82
83 regs.pba.rxa(0x30);
84 regs.pba.txa(0x10);
85
86 eeOpBits = 0;
87 eeAddrBits = 0;
88 eeDataBits = 0;
89 eeOpcode = 0;
90
91 // clear all 64 16 bit words of the eeprom
92 memset(&flash, 0, EEPROM_SIZE*2);
93
94 // Set the MAC address
95 memcpy(flash, p->hardware_address.bytes(), ETH_ADDR_LEN);
96 for (int x = 0; x < ETH_ADDR_LEN/2; x++)
97 flash[x] = htobe(flash[x]);
98
99 uint16_t csum = 0;
100 for (int x = 0; x < EEPROM_SIZE; x++)
101 csum += htobe(flash[x]);
102
103
104 // Magic happy checksum value
105 flash[EEPROM_SIZE-1] = htobe((uint16_t)(EEPROM_CSUM - csum));
106
107 rxFifo.clear();
108 txFifo.clear();
109 }
110
111
112 Tick
113 IGbE::writeConfig(PacketPtr pkt)
114 {
115 int offset = pkt->getAddr() & PCI_CONFIG_SIZE;
116 if (offset < PCI_DEVICE_SPECIFIC)
117 PciDev::writeConfig(pkt);
118 else
119 panic("Device specific PCI config space not implemented.\n");
120
121 ///
122 /// Some work may need to be done here based for the pci COMMAND bits.
123 ///
124
125 return pioDelay;
126 }
127
128 Tick
129 IGbE::read(PacketPtr pkt)
130 {
131 int bar;
132 Addr daddr;
133
134 if (!getBAR(pkt->getAddr(), bar, daddr))
135 panic("Invalid PCI memory access to unmapped memory.\n");
136
137 // Only Memory register BAR is allowed
138 assert(bar == 0);
139
140 // Only 32bit accesses allowed
141 assert(pkt->getSize() == 4);
142
143 DPRINTF(Ethernet, "Read device register %#X\n", daddr);
144
145 pkt->allocate();
146
147 ///
148 /// Handle read of register here
149 ///
150
151
152 switch (daddr) {
153 case REG_CTRL:
154 pkt->set<uint32_t>(regs.ctrl());
155 break;
156 case REG_STATUS:
157 pkt->set<uint32_t>(regs.sts());
158 break;
159 case REG_EECD:
160 pkt->set<uint32_t>(regs.eecd());
161 break;
162 case REG_EERD:
163 pkt->set<uint32_t>(regs.eerd());
164 break;
165 case REG_CTRL_EXT:
166 pkt->set<uint32_t>(regs.ctrl_ext());
167 break;
168 case REG_MDIC:
169 pkt->set<uint32_t>(regs.mdic());
170 break;
171 case REG_ICR:
172 DPRINTF(Ethernet, "Reading ICR. ICR=%#x IMR=%#x IAM=%#x IAME=%d\n", regs.icr(),
173 regs.imr, regs.iam, regs.ctrl_ext.iame());
174 pkt->set<uint32_t>(regs.icr());
175 if (regs.icr.int_assert() || regs.imr == 0) {
176 regs.icr = regs.icr() & ~mask(30);
177 DPRINTF(Ethernet, "Cleared ICR. ICR=%#x\n", regs.icr());
178 }
179 if (regs.ctrl_ext.iame() && regs.icr.int_assert())
180 regs.imr &= ~regs.iam;
181 chkInterrupt();
182 break;
183 case REG_ITR:
184 pkt->set<uint32_t>(regs.itr());
185 break;
186 case REG_RCTL:
187 pkt->set<uint32_t>(regs.rctl());
188 break;
189 case REG_FCTTV:
190 pkt->set<uint32_t>(regs.fcttv());
191 break;
192 case REG_TCTL:
193 pkt->set<uint32_t>(regs.tctl());
194 break;
195 case REG_PBA:
196 pkt->set<uint32_t>(regs.pba());
197 break;
198 case REG_WUC:
199 case REG_LEDCTL:
200 pkt->set<uint32_t>(0); // We don't care, so just return 0
201 break;
202 case REG_FCRTL:
203 pkt->set<uint32_t>(regs.fcrtl());
204 break;
205 case REG_FCRTH:
206 pkt->set<uint32_t>(regs.fcrth());
207 break;
208 case REG_RDBAL:
209 pkt->set<uint32_t>(regs.rdba.rdbal());
210 break;
211 case REG_RDBAH:
212 pkt->set<uint32_t>(regs.rdba.rdbah());
213 break;
214 case REG_RDLEN:
215 pkt->set<uint32_t>(regs.rdlen());
216 break;
217 case REG_RDH:
218 pkt->set<uint32_t>(regs.rdh());
219 break;
220 case REG_RDT:
221 pkt->set<uint32_t>(regs.rdt());
222 break;
223 case REG_RDTR:
224 pkt->set<uint32_t>(regs.rdtr());
225 if (regs.rdtr.fpd()) {
226 rxDescCache.writeback(0);
227 DPRINTF(EthernetIntr, "Posting interrupt because of RDTR.FPD write\n");
228 postInterrupt(IT_RXT);
229 regs.rdtr.fpd(0);
230 }
231 break;
232 case REG_RADV:
233 pkt->set<uint32_t>(regs.radv());
234 break;
235 case REG_TDBAL:
236 pkt->set<uint32_t>(regs.tdba.tdbal());
237 break;
238 case REG_TDBAH:
239 pkt->set<uint32_t>(regs.tdba.tdbah());
240 break;
241 case REG_TDLEN:
242 pkt->set<uint32_t>(regs.tdlen());
243 break;
244 case REG_TDH:
245 pkt->set<uint32_t>(regs.tdh());
246 break;
247 case REG_TDT:
248 pkt->set<uint32_t>(regs.tdt());
249 break;
250 case REG_TIDV:
251 pkt->set<uint32_t>(regs.tidv());
252 break;
253 case REG_TXDCTL:
254 pkt->set<uint32_t>(regs.txdctl());
255 break;
256 case REG_TADV:
257 pkt->set<uint32_t>(regs.tadv());
258 break;
259 case REG_RXCSUM:
260 pkt->set<uint32_t>(regs.rxcsum());
261 break;
262 case REG_MANC:
263 pkt->set<uint32_t>(regs.manc());
264 break;
265 default:
266 if (!(daddr >= REG_VFTA && daddr < (REG_VFTA + VLAN_FILTER_TABLE_SIZE*4)) &&
267 !(daddr >= REG_RAL && daddr < (REG_RAL + RCV_ADDRESS_TABLE_SIZE*8)) &&
268 !(daddr >= REG_MTA && daddr < (REG_MTA + MULTICAST_TABLE_SIZE*4)) &&
269 !(daddr >= REG_CRCERRS && daddr < (REG_CRCERRS + STATS_REGS_SIZE)))
270 panic("Read request to unknown register number: %#x\n", daddr);
271 else
272 pkt->set<uint32_t>(0);
273 };
274
275 pkt->makeAtomicResponse();
276 return pioDelay;
277 }
278
279 Tick
280 IGbE::write(PacketPtr pkt)
281 {
282 int bar;
283 Addr daddr;
284
285
286 if (!getBAR(pkt->getAddr(), bar, daddr))
287 panic("Invalid PCI memory access to unmapped memory.\n");
288
289 // Only Memory register BAR is allowed
290 assert(bar == 0);
291
292 // Only 32bit accesses allowed
293 assert(pkt->getSize() == sizeof(uint32_t));
294
295 DPRINTF(Ethernet, "Wrote device register %#X value %#X\n", daddr, pkt->get<uint32_t>());
296
297 ///
298 /// Handle write of register here
299 ///
300 uint32_t val = pkt->get<uint32_t>();
301
302 Regs::RCTL oldrctl;
303 Regs::TCTL oldtctl;
304
305 switch (daddr) {
306 case REG_CTRL:
307 regs.ctrl = val;
308 if (regs.ctrl.tfce())
309 warn("TX Flow control enabled, should implement\n");
310 if (regs.ctrl.rfce())
311 warn("RX Flow control enabled, should implement\n");
312 break;
313 case REG_CTRL_EXT:
314 regs.ctrl_ext = val;
315 break;
316 case REG_STATUS:
317 regs.sts = val;
318 break;
319 case REG_EECD:
320 int oldClk;
321 oldClk = regs.eecd.sk();
322 regs.eecd = val;
323 // See if this is a eeprom access and emulate accordingly
324 if (!oldClk && regs.eecd.sk()) {
325 if (eeOpBits < 8) {
326 eeOpcode = eeOpcode << 1 | regs.eecd.din();
327 eeOpBits++;
328 } else if (eeAddrBits < 8 && eeOpcode == EEPROM_READ_OPCODE_SPI) {
329 eeAddr = eeAddr << 1 | regs.eecd.din();
330 eeAddrBits++;
331 } else if (eeDataBits < 16 && eeOpcode == EEPROM_READ_OPCODE_SPI) {
332 assert(eeAddr>>1 < EEPROM_SIZE);
333 DPRINTF(EthernetEEPROM, "EEPROM bit read: %d word: %#X\n",
334 flash[eeAddr>>1] >> eeDataBits & 0x1, flash[eeAddr>>1]);
335 regs.eecd.dout((flash[eeAddr>>1] >> (15-eeDataBits)) & 0x1);
336 eeDataBits++;
337 } else if (eeDataBits < 8 && eeOpcode == EEPROM_RDSR_OPCODE_SPI) {
338 regs.eecd.dout(0);
339 eeDataBits++;
340 } else
341 panic("What's going on with eeprom interface? opcode:"
342 " %#x:%d addr: %#x:%d, data: %d\n", (uint32_t)eeOpcode,
343 (uint32_t)eeOpBits, (uint32_t)eeAddr,
344 (uint32_t)eeAddrBits, (uint32_t)eeDataBits);
345
346 // Reset everything for the next command
347 if ((eeDataBits == 16 && eeOpcode == EEPROM_READ_OPCODE_SPI) ||
348 (eeDataBits == 8 && eeOpcode == EEPROM_RDSR_OPCODE_SPI)) {
349 eeOpBits = 0;
350 eeAddrBits = 0;
351 eeDataBits = 0;
352 eeOpcode = 0;
353 eeAddr = 0;
354 }
355
356 DPRINTF(EthernetEEPROM, "EEPROM: opcode: %#X:%d addr: %#X:%d\n",
357 (uint32_t)eeOpcode, (uint32_t) eeOpBits,
358 (uint32_t)eeAddr>>1, (uint32_t)eeAddrBits);
359 if (eeOpBits == 8 && !(eeOpcode == EEPROM_READ_OPCODE_SPI ||
360 eeOpcode == EEPROM_RDSR_OPCODE_SPI ))
361 panic("Unknown eeprom opcode: %#X:%d\n", (uint32_t)eeOpcode,
362 (uint32_t)eeOpBits);
363
364
365 }
366 // If driver requests eeprom access, immediately give it to it
367 regs.eecd.ee_gnt(regs.eecd.ee_req());
368 break;
369 case REG_EERD:
370 regs.eerd = val;
371 break;
372 case REG_MDIC:
373 regs.mdic = val;
374 if (regs.mdic.i())
375 panic("No support for interrupt on mdic complete\n");
376 if (regs.mdic.phyadd() != 1)
377 panic("No support for reading anything but phy\n");
378 DPRINTF(Ethernet, "%s phy address %x\n", regs.mdic.op() == 1 ? "Writing"
379 : "Reading", regs.mdic.regadd());
380 switch (regs.mdic.regadd()) {
381 case PHY_PSTATUS:
382 regs.mdic.data(0x796D); // link up
383 break;
384 case PHY_PID:
385 regs.mdic.data(0x02A8);
386 break;
387 case PHY_EPID:
388 regs.mdic.data(0x0380);
389 break;
390 case PHY_GSTATUS:
391 regs.mdic.data(0x7C00);
392 break;
393 case PHY_EPSTATUS:
394 regs.mdic.data(0x3000);
395 break;
396 case PHY_AGC:
397 regs.mdic.data(0x180); // some random length
398 break;
399 default:
400 regs.mdic.data(0);
401 }
402 regs.mdic.r(1);
403 break;
404 case REG_ICR:
405 DPRINTF(Ethernet, "Writing ICR. ICR=%#x IMR=%#x IAM=%#x IAME=%d\n", regs.icr(),
406 regs.imr, regs.iam, regs.ctrl_ext.iame());
407 if (regs.ctrl_ext.iame())
408 regs.imr &= ~regs.iam;
409 regs.icr = ~bits(val,30,0) & regs.icr();
410 chkInterrupt();
411 break;
412 case REG_ITR:
413 regs.itr = val;
414 break;
415 case REG_ICS:
416 DPRINTF(EthernetIntr, "Posting interrupt because of ICS write\n");
417 postInterrupt((IntTypes)val);
418 break;
419 case REG_IMS:
420 regs.imr |= val;
421 chkInterrupt();
422 break;
423 case REG_IMC:
424 regs.imr &= ~val;
425 chkInterrupt();
426 break;
427 case REG_IAM:
428 regs.iam = val;
429 break;
430 case REG_RCTL:
431 oldrctl = regs.rctl;
432 regs.rctl = val;
433 if (regs.rctl.rst()) {
434 rxDescCache.reset();
435 DPRINTF(EthernetSM, "RXS: Got RESET!\n");
436 rxFifo.clear();
437 regs.rctl.rst(0);
438 }
439 if (regs.rctl.en())
440 rxTick = true;
441 restartClock();
442 break;
443 case REG_FCTTV:
444 regs.fcttv = val;
445 break;
446 case REG_TCTL:
447 regs.tctl = val;
448 oldtctl = regs.tctl;
449 regs.tctl = val;
450 if (regs.tctl.en())
451 txTick = true;
452 restartClock();
453 if (regs.tctl.en() && !oldtctl.en()) {
454 txDescCache.reset();
455 }
456 break;
457 case REG_PBA:
458 regs.pba.rxa(val);
459 regs.pba.txa(64 - regs.pba.rxa());
460 break;
461 case REG_WUC:
462 case REG_LEDCTL:
463 case REG_FCAL:
464 case REG_FCAH:
465 case REG_FCT:
466 case REG_VET:
467 case REG_AIFS:
468 case REG_TIPG:
469 ; // We don't care, so don't store anything
470 break;
471 case REG_FCRTL:
472 regs.fcrtl = val;
473 break;
474 case REG_FCRTH:
475 regs.fcrth = val;
476 break;
477 case REG_RDBAL:
478 regs.rdba.rdbal( val & ~mask(4));
479 rxDescCache.areaChanged();
480 break;
481 case REG_RDBAH:
482 regs.rdba.rdbah(val);
483 rxDescCache.areaChanged();
484 break;
485 case REG_RDLEN:
486 regs.rdlen = val & ~mask(7);
487 rxDescCache.areaChanged();
488 break;
489 case REG_RDH:
490 regs.rdh = val;
491 rxDescCache.areaChanged();
492 break;
493 case REG_RDT:
494 regs.rdt = val;
495 rxTick = true;
496 restartClock();
497 break;
498 case REG_RDTR:
499 regs.rdtr = val;
500 break;
501 case REG_RADV:
502 regs.radv = val;
503 break;
504 case REG_TDBAL:
505 regs.tdba.tdbal( val & ~mask(4));
506 txDescCache.areaChanged();
507 break;
508 case REG_TDBAH:
509 regs.tdba.tdbah(val);
510 txDescCache.areaChanged();
511 break;
512 case REG_TDLEN:
513 regs.tdlen = val & ~mask(7);
514 txDescCache.areaChanged();
515 break;
516 case REG_TDH:
517 regs.tdh = val;
518 txDescCache.areaChanged();
519 break;
520 case REG_TDT:
521 regs.tdt = val;
522 txTick = true;
523 restartClock();
524 break;
525 case REG_TIDV:
526 regs.tidv = val;
527 break;
528 case REG_TXDCTL:
529 regs.txdctl = val;
530 break;
531 case REG_TADV:
532 regs.tadv = val;
533 break;
534 case REG_RXCSUM:
535 regs.rxcsum = val;
536 break;
537 case REG_MANC:
538 regs.manc = val;
539 break;
540 default:
541 if (!(daddr >= REG_VFTA && daddr < (REG_VFTA + VLAN_FILTER_TABLE_SIZE*4)) &&
542 !(daddr >= REG_RAL && daddr < (REG_RAL + RCV_ADDRESS_TABLE_SIZE*8)) &&
543 !(daddr >= REG_MTA && daddr < (REG_MTA + MULTICAST_TABLE_SIZE*4)))
544 panic("Write request to unknown register number: %#x\n", daddr);
545 };
546
547 pkt->makeAtomicResponse();
548 return pioDelay;
549 }
550
551 void
552 IGbE::postInterrupt(IntTypes t, bool now)
553 {
554 assert(t);
555
556 // Interrupt is already pending
557 if (t & regs.icr())
558 return;
559
560 if (regs.icr() & regs.imr)
561 {
562 regs.icr = regs.icr() | t;
563 if (!interEvent.scheduled())
564 interEvent.schedule(curTick + Clock::Int::ns * 256 *
565 regs.itr.interval());
566 } else {
567 regs.icr = regs.icr() | t;
568 if (regs.itr.interval() == 0 || now) {
569 if (interEvent.scheduled())
570 interEvent.deschedule();
571 cpuPostInt();
572 } else {
573 DPRINTF(EthernetIntr, "EINT: Scheduling timer interrupt for %d ticks\n",
574 Clock::Int::ns * 256 * regs.itr.interval());
575 if (!interEvent.scheduled())
576 interEvent.schedule(curTick + Clock::Int::ns * 256 * regs.itr.interval());
577 }
578 }
579 }
580
581 void
582 IGbE::cpuPostInt()
583 {
584 if (rdtrEvent.scheduled()) {
585 regs.icr.rxt0(1);
586 rdtrEvent.deschedule();
587 }
588 if (radvEvent.scheduled()) {
589 regs.icr.rxt0(1);
590 radvEvent.deschedule();
591 }
592 if (tadvEvent.scheduled()) {
593 regs.icr.txdw(1);
594 tadvEvent.deschedule();
595 }
596 if (tidvEvent.scheduled()) {
597 regs.icr.txdw(1);
598 tidvEvent.deschedule();
599 }
600
601 regs.icr.int_assert(1);
602 DPRINTF(EthernetIntr, "EINT: Posting interrupt to CPU now. Vector %#x\n",
603 regs.icr());
604 intrPost();
605 }
606
607 void
608 IGbE::cpuClearInt()
609 {
610 if (regs.icr.int_assert()) {
611 regs.icr.int_assert(0);
612 DPRINTF(EthernetIntr, "EINT: Clearing interrupt to CPU now. Vector %#x\n",
613 regs.icr());
614 intrClear();
615 }
616 }
617
618 void
619 IGbE::chkInterrupt()
620 {
621 // Check if we need to clear the cpu interrupt
622 if (!(regs.icr() & regs.imr)) {
623 if (interEvent.scheduled())
624 interEvent.deschedule();
625 if (regs.icr.int_assert())
626 cpuClearInt();
627 }
628
629 if (regs.icr() & regs.imr) {
630 if (regs.itr.interval() == 0) {
631 cpuPostInt();
632 } else {
633 if (!interEvent.scheduled())
634 interEvent.schedule(curTick + Clock::Int::ns * 256 * regs.itr.interval());
635 }
636 }
637
638
639 }
640
641
642 IGbE::RxDescCache::RxDescCache(IGbE *i, const std::string n, int s)
643 : DescCache<RxDesc>(i, n, s), pktDone(false), pktEvent(this)
644
645 {
646 }
647
648 bool
649 IGbE::RxDescCache::writePacket(EthPacketPtr packet)
650 {
651 // We shouldn't have to deal with any of these yet
652 DPRINTF(EthernetDesc, "Packet Length: %d Desc Size: %d\n",
653 packet->length, igbe->regs.rctl.descSize());
654 assert(packet->length < igbe->regs.rctl.descSize());
655
656 if (!unusedCache.size())
657 return false;
658
659 pktPtr = packet;
660 pktDone = false;
661 igbe->dmaWrite(igbe->platform->pciToDma(unusedCache.front()->buf),
662 packet->length, &pktEvent, packet->data);
663 return true;
664 }
665
666 void
667 IGbE::RxDescCache::pktComplete()
668 {
669 assert(unusedCache.size());
670 RxDesc *desc;
671 desc = unusedCache.front();
672
673 uint16_t crcfixup = igbe->regs.rctl.secrc() ? 0 : 4 ;
674 desc->len = htole((uint16_t)(pktPtr->length + crcfixup));
675 DPRINTF(EthernetDesc, "pktPtr->length: %d stripcrc offset: %d value written: %d %d\n",
676 pktPtr->length, crcfixup,
677 htole((uint16_t)(pktPtr->length + crcfixup)),
678 (uint16_t)(pktPtr->length + crcfixup));
679
680 // no support for anything but starting at 0
681 assert(igbe->regs.rxcsum.pcss() == 0);
682
683 DPRINTF(EthernetDesc, "Packet written to memory updating Descriptor\n");
684
685 uint8_t status = RXDS_DD | RXDS_EOP;
686 uint8_t err = 0;
687
688 IpPtr ip(pktPtr);
689
690 if (ip) {
691 DPRINTF(EthernetDesc, "Proccesing Ip packet with Id=%d\n", ip->id());
692
693 if (igbe->regs.rxcsum.ipofld()) {
694 DPRINTF(EthernetDesc, "Checking IP checksum\n");
695 status |= RXDS_IPCS;
696 desc->csum = htole(cksum(ip));
697 if (cksum(ip) != 0) {
698 err |= RXDE_IPE;
699 DPRINTF(EthernetDesc, "Checksum is bad!!\n");
700 }
701 }
702 TcpPtr tcp(ip);
703 if (tcp && igbe->regs.rxcsum.tuofld()) {
704 DPRINTF(EthernetDesc, "Checking TCP checksum\n");
705 status |= RXDS_TCPCS;
706 desc->csum = htole(cksum(tcp));
707 if (cksum(tcp) != 0) {
708 DPRINTF(EthernetDesc, "Checksum is bad!!\n");
709 err |= RXDE_TCPE;
710 }
711 }
712
713 UdpPtr udp(ip);
714 if (udp && igbe->regs.rxcsum.tuofld()) {
715 DPRINTF(EthernetDesc, "Checking UDP checksum\n");
716 status |= RXDS_UDPCS;
717 desc->csum = htole(cksum(udp));
718 if (cksum(udp) != 0) {
719 DPRINTF(EthernetDesc, "Checksum is bad!!\n");
720 err |= RXDE_TCPE;
721 }
722 }
723 } else { // if ip
724 DPRINTF(EthernetSM, "Proccesing Non-Ip packet\n");
725 }
726
727
728 desc->status = htole(status);
729 desc->errors = htole(err);
730
731 // No vlan support at this point... just set it to 0
732 desc->vlan = 0;
733
734 // Deal with the rx timer interrupts
735 if (igbe->regs.rdtr.delay()) {
736 DPRINTF(EthernetSM, "RXS: Scheduling DTR for %d\n",
737 igbe->regs.rdtr.delay() * igbe->intClock());
738 igbe->rdtrEvent.reschedule(curTick + igbe->regs.rdtr.delay() *
739 igbe->intClock(),true);
740 }
741
742 if (igbe->regs.radv.idv() && igbe->regs.rdtr.delay()) {
743 DPRINTF(EthernetSM, "RXS: Scheduling ADV for %d\n",
744 igbe->regs.radv.idv() * igbe->intClock());
745 if (!igbe->radvEvent.scheduled())
746 igbe->radvEvent.schedule(curTick + igbe->regs.radv.idv() *
747 igbe->intClock());
748 }
749
750 // if neither radv or rdtr, maybe itr is set...
751 if (!igbe->regs.rdtr.delay()) {
752 DPRINTF(EthernetSM, "RXS: Receive interrupt delay disabled, posting IT_RXT\n");
753 igbe->postInterrupt(IT_RXT);
754 }
755
756 // If the packet is small enough, interrupt appropriately
757 // I wonder if this is delayed or not?!
758 if (pktPtr->length <= igbe->regs.rsrpd.idv()) {
759 DPRINTF(EthernetSM, "RXS: Posting IT_SRPD beacuse small packet received\n");
760 igbe->postInterrupt(IT_SRPD);
761 }
762
763 DPRINTF(EthernetDesc, "Processing of this descriptor complete\n");
764 unusedCache.pop_front();
765 usedCache.push_back(desc);
766 pktPtr = NULL;
767 enableSm();
768 pktDone = true;
769 igbe->checkDrain();
770 }
771
772 void
773 IGbE::RxDescCache::enableSm()
774 {
775 igbe->rxTick = true;
776 igbe->restartClock();
777 }
778
779 bool
780 IGbE::RxDescCache::packetDone()
781 {
782 if (pktDone) {
783 pktDone = false;
784 return true;
785 }
786 return false;
787 }
788
789 bool
790 IGbE::RxDescCache::hasOutstandingEvents()
791 {
792 return pktEvent.scheduled() || wbEvent.scheduled() ||
793 fetchEvent.scheduled();
794 }
795
796 void
797 IGbE::RxDescCache::serialize(std::ostream &os)
798 {
799 DescCache<RxDesc>::serialize(os);
800 SERIALIZE_SCALAR(pktDone);
801 }
802
803 void
804 IGbE::RxDescCache::unserialize(Checkpoint *cp, const std::string &section)
805 {
806 DescCache<RxDesc>::unserialize(cp, section);
807 UNSERIALIZE_SCALAR(pktDone);
808 }
809
810
811 ///////////////////////////////////// IGbE::TxDesc /////////////////////////////////
812
813 IGbE::TxDescCache::TxDescCache(IGbE *i, const std::string n, int s)
814 : DescCache<TxDesc>(i,n, s), pktDone(false), isTcp(false), pktWaiting(false),
815 pktEvent(this)
816
817 {
818 }
819
820 int
821 IGbE::TxDescCache::getPacketSize()
822 {
823 assert(unusedCache.size());
824
825 TxDesc *desc;
826
827 DPRINTF(EthernetDesc, "Starting processing of descriptor\n");
828
829 while (unusedCache.size() && TxdOp::isContext(unusedCache.front())) {
830 DPRINTF(EthernetDesc, "Got context descriptor type... skipping\n");
831
832 // I think we can just ignore these for now?
833 desc = unusedCache.front();
834 // is this going to be a tcp or udp packet?
835 isTcp = TxdOp::tcp(desc) ? true : false;
836
837 // make sure it's ipv4
838 assert(TxdOp::ip(desc));
839
840 TxdOp::setDd(desc);
841 unusedCache.pop_front();
842 usedCache.push_back(desc);
843 }
844
845 if (!unusedCache.size())
846 return -1;
847
848 DPRINTF(EthernetDesc, "Next TX packet is %d bytes\n",
849 TxdOp::getLen(unusedCache.front()));
850
851 return TxdOp::getLen(unusedCache.front());
852 }
853
854 void
855 IGbE::TxDescCache::getPacketData(EthPacketPtr p)
856 {
857 assert(unusedCache.size());
858
859 TxDesc *desc;
860 desc = unusedCache.front();
861
862 assert((TxdOp::isLegacy(desc) || TxdOp::isData(desc)) && TxdOp::getLen(desc));
863
864 pktPtr = p;
865
866 pktWaiting = true;
867
868 DPRINTF(EthernetDesc, "Starting DMA of packet\n");
869 igbe->dmaRead(igbe->platform->pciToDma(TxdOp::getBuf(desc)),
870 TxdOp::getLen(desc), &pktEvent, p->data + p->length);
871
872
873 }
874
875 void
876 IGbE::TxDescCache::pktComplete()
877 {
878
879 TxDesc *desc;
880 assert(unusedCache.size());
881 assert(pktPtr);
882
883 DPRINTF(EthernetDesc, "DMA of packet complete\n");
884
885
886 desc = unusedCache.front();
887 assert((TxdOp::isLegacy(desc) || TxdOp::isData(desc)) && TxdOp::getLen(desc));
888
889 DPRINTF(EthernetDesc, "TxDescriptor data d1: %#llx d2: %#llx\n", desc->d1, desc->d2);
890
891 if (!TxdOp::eop(desc)) {
892 // This only supports two descriptors per tx packet
893 assert(pktPtr->length == 0);
894 pktPtr->length = TxdOp::getLen(desc);
895 unusedCache.pop_front();
896 usedCache.push_back(desc);
897 pktDone = true;
898 pktWaiting = false;
899 pktPtr = NULL;
900
901 DPRINTF(EthernetDesc, "Partial Packet Descriptor Done\n");
902 enableSm();
903 return;
904 }
905
906 // Set the length of the data in the EtherPacket
907 pktPtr->length += TxdOp::getLen(desc);
908
909 // no support for vlans
910 assert(!TxdOp::vle(desc));
911
912 // we alway report status
913 assert(TxdOp::rs(desc));
914
915 // we only support single packet descriptors at this point
916 assert(TxdOp::eop(desc));
917
918 // set that this packet is done
919 TxdOp::setDd(desc);
920
921 DPRINTF(EthernetDesc, "TxDescriptor data d1: %#llx d2: %#llx\n", desc->d1, desc->d2);
922
923 if (DTRACE(EthernetDesc)) {
924 IpPtr ip(pktPtr);
925 if (ip)
926 DPRINTF(EthernetDesc, "Proccesing Ip packet with Id=%d\n",
927 ip->id());
928 else
929 DPRINTF(EthernetSM, "Proccesing Non-Ip packet\n");
930 }
931
932 // Checksums are only ofloaded for new descriptor types
933 if (TxdOp::isData(desc) && ( TxdOp::ixsm(desc) || TxdOp::txsm(desc)) ) {
934 DPRINTF(EthernetDesc, "Calculating checksums for packet\n");
935 IpPtr ip(pktPtr);
936
937 if (TxdOp::ixsm(desc)) {
938 ip->sum(0);
939 ip->sum(cksum(ip));
940 DPRINTF(EthernetDesc, "Calculated IP checksum\n");
941 }
942 if (TxdOp::txsm(desc)) {
943 if (isTcp) {
944 TcpPtr tcp(ip);
945 assert(tcp);
946 tcp->sum(0);
947 tcp->sum(cksum(tcp));
948 DPRINTF(EthernetDesc, "Calculated TCP checksum\n");
949 } else {
950 UdpPtr udp(ip);
951 assert(udp);
952 udp->sum(0);
953 udp->sum(cksum(udp));
954 DPRINTF(EthernetDesc, "Calculated UDP checksum\n");
955 }
956 }
957 }
958
959 if (TxdOp::ide(desc)) {
960 // Deal with the rx timer interrupts
961 DPRINTF(EthernetDesc, "Descriptor had IDE set\n");
962 if (igbe->regs.tidv.idv()) {
963 DPRINTF(EthernetDesc, "setting tidv\n");
964 igbe->tidvEvent.reschedule(curTick + igbe->regs.tidv.idv() *
965 igbe->intClock(), true);
966 }
967
968 if (igbe->regs.tadv.idv() && igbe->regs.tidv.idv()) {
969 DPRINTF(EthernetDesc, "setting tadv\n");
970 if (!igbe->tadvEvent.scheduled())
971 igbe->tadvEvent.schedule(curTick + igbe->regs.tadv.idv() *
972 igbe->intClock());
973 }
974 }
975
976
977
978 unusedCache.pop_front();
979 usedCache.push_back(desc);
980 pktDone = true;
981 pktWaiting = false;
982 pktPtr = NULL;
983
984 DPRINTF(EthernetDesc, "Descriptor Done\n");
985
986 if (igbe->regs.txdctl.wthresh() == 0) {
987 DPRINTF(EthernetDesc, "WTHRESH == 0, writing back descriptor\n");
988 writeback(0);
989 } else if (igbe->regs.txdctl.wthresh() >= usedCache.size()) {
990 DPRINTF(EthernetDesc, "used > WTHRESH, writing back descriptor\n");
991 writeback((igbe->cacheBlockSize()-1)>>4);
992 }
993 enableSm();
994 igbe->checkDrain();
995 }
996
997 void
998 IGbE::TxDescCache::serialize(std::ostream &os)
999 {
1000 DescCache<TxDesc>::serialize(os);
1001 SERIALIZE_SCALAR(pktDone);
1002 SERIALIZE_SCALAR(isTcp);
1003 SERIALIZE_SCALAR(pktWaiting);
1004 }
1005
1006 void
1007 IGbE::TxDescCache::unserialize(Checkpoint *cp, const std::string &section)
1008 {
1009 DescCache<TxDesc>::unserialize(cp, section);
1010 UNSERIALIZE_SCALAR(pktDone);
1011 UNSERIALIZE_SCALAR(isTcp);
1012 UNSERIALIZE_SCALAR(pktWaiting);
1013 }
1014
1015 bool
1016 IGbE::TxDescCache::packetAvailable()
1017 {
1018 if (pktDone) {
1019 pktDone = false;
1020 return true;
1021 }
1022 return false;
1023 }
1024
1025 void
1026 IGbE::TxDescCache::enableSm()
1027 {
1028 igbe->txTick = true;
1029 igbe->restartClock();
1030 }
1031
1032 bool
1033 IGbE::TxDescCache::hasOutstandingEvents()
1034 {
1035 return pktEvent.scheduled() || wbEvent.scheduled() ||
1036 fetchEvent.scheduled();
1037 }
1038
1039
1040 ///////////////////////////////////// IGbE /////////////////////////////////
1041
1042 void
1043 IGbE::restartClock()
1044 {
1045 if (!tickEvent.scheduled() && (rxTick || txTick || txFifoTick) && getState() ==
1046 SimObject::Running)
1047 tickEvent.schedule((curTick/cycles(1)) * cycles(1) + cycles(1));
1048 }
1049
1050 unsigned int
1051 IGbE::drain(Event *de)
1052 {
1053 unsigned int count;
1054 count = pioPort->drain(de) + dmaPort->drain(de);
1055 if (rxDescCache.hasOutstandingEvents() ||
1056 txDescCache.hasOutstandingEvents()) {
1057 count++;
1058 drainEvent = de;
1059 }
1060
1061 txFifoTick = false;
1062 txTick = false;
1063 rxTick = false;
1064
1065 if (tickEvent.scheduled())
1066 tickEvent.deschedule();
1067
1068 if (count)
1069 changeState(Draining);
1070 else
1071 changeState(Drained);
1072
1073 return count;
1074 }
1075
1076 void
1077 IGbE::resume()
1078 {
1079 SimObject::resume();
1080
1081 txFifoTick = true;
1082 txTick = true;
1083 rxTick = true;
1084
1085 restartClock();
1086 }
1087
1088 void
1089 IGbE::checkDrain()
1090 {
1091 if (!drainEvent)
1092 return;
1093
1094 if (rxDescCache.hasOutstandingEvents() ||
1095 txDescCache.hasOutstandingEvents()) {
1096 drainEvent->process();
1097 drainEvent = NULL;
1098 }
1099 }
1100
1101 void
1102 IGbE::txStateMachine()
1103 {
1104 if (!regs.tctl.en()) {
1105 txTick = false;
1106 DPRINTF(EthernetSM, "TXS: TX disabled, stopping ticking\n");
1107 return;
1108 }
1109
1110 // If we have a packet available and it's length is not 0 (meaning it's not
1111 // a multidescriptor packet) put it in the fifo, otherwise an the next
1112 // iteration we'll get the rest of the data
1113 if (txPacket && txDescCache.packetAvailable() && txPacket->length) {
1114 bool success;
1115 DPRINTF(EthernetSM, "TXS: packet placed in TX FIFO\n");
1116 success = txFifo.push(txPacket);
1117 txFifoTick = true;
1118 assert(success);
1119 txPacket = NULL;
1120 txDescCache.writeback((cacheBlockSize()-1)>>4);
1121 return;
1122 }
1123
1124 // Only support descriptor granularity
1125 assert(regs.txdctl.gran());
1126 if (regs.txdctl.lwthresh() && txDescCache.descLeft() < (regs.txdctl.lwthresh() * 8)) {
1127 DPRINTF(EthernetSM, "TXS: LWTHRESH caused posting of TXDLOW\n");
1128 postInterrupt(IT_TXDLOW);
1129 }
1130
1131 if (!txPacket) {
1132 txPacket = new EthPacketData(16384);
1133 }
1134
1135 if (!txDescCache.packetWaiting()) {
1136 if (txDescCache.descLeft() == 0) {
1137 DPRINTF(EthernetSM, "TXS: No descriptors left in ring, forcing "
1138 "writeback stopping ticking and posting TXQE\n");
1139 txDescCache.writeback(0);
1140 txTick = false;
1141 postInterrupt(IT_TXQE, true);
1142 return;
1143 }
1144
1145
1146 if (!(txDescCache.descUnused())) {
1147 DPRINTF(EthernetSM, "TXS: No descriptors available in cache, fetching and stopping ticking\n");
1148 txTick = false;
1149 txDescCache.fetchDescriptors();
1150 return;
1151 }
1152
1153 int size;
1154 size = txDescCache.getPacketSize();
1155 if (size > 0 && txFifo.avail() > size) {
1156 DPRINTF(EthernetSM, "TXS: Reserving %d bytes in FIFO and begining "
1157 "DMA of next packet\n", size);
1158 txFifo.reserve(size);
1159 txDescCache.getPacketData(txPacket);
1160 } else if (size <= 0) {
1161 DPRINTF(EthernetSM, "TXS: No packets to get, writing back used descriptors\n");
1162 txDescCache.writeback(0);
1163 } else {
1164 DPRINTF(EthernetSM, "TXS: FIFO full, stopping ticking until space "
1165 "available in FIFO\n");
1166 txDescCache.writeback((cacheBlockSize()-1)>>4);
1167 txTick = false;
1168 }
1169
1170
1171 return;
1172 }
1173 DPRINTF(EthernetSM, "TXS: Nothing to do, stopping ticking\n");
1174 txTick = false;
1175 }
1176
1177 bool
1178 IGbE::ethRxPkt(EthPacketPtr pkt)
1179 {
1180 DPRINTF(Ethernet, "RxFIFO: Receiving pcakte from wire\n");
1181 if (!regs.rctl.en()) {
1182 DPRINTF(Ethernet, "RxFIFO: RX not enabled, dropping\n");
1183 return true;
1184 }
1185
1186 // restart the state machines if they are stopped
1187 rxTick = true;
1188 if ((rxTick || txTick) && !tickEvent.scheduled()) {
1189 DPRINTF(EthernetSM, "RXS: received packet into fifo, starting ticking\n");
1190 restartClock();
1191 }
1192
1193 if (!rxFifo.push(pkt)) {
1194 DPRINTF(Ethernet, "RxFIFO: Packet won't fit in fifo... dropped\n");
1195 postInterrupt(IT_RXO, true);
1196 return false;
1197 }
1198 return true;
1199 }
1200
1201
1202 void
1203 IGbE::rxStateMachine()
1204 {
1205 if (!regs.rctl.en()) {
1206 rxTick = false;
1207 DPRINTF(EthernetSM, "RXS: RX disabled, stopping ticking\n");
1208 return;
1209 }
1210
1211 // If the packet is done check for interrupts/descriptors/etc
1212 if (rxDescCache.packetDone()) {
1213 rxDmaPacket = false;
1214 DPRINTF(EthernetSM, "RXS: Packet completed DMA to memory\n");
1215 int descLeft = rxDescCache.descLeft();
1216 switch (regs.rctl.rdmts()) {
1217 case 2: if (descLeft > .125 * regs.rdlen()) break;
1218 case 1: if (descLeft > .250 * regs.rdlen()) break;
1219 case 0: if (descLeft > .500 * regs.rdlen()) break;
1220 DPRINTF(Ethernet, "RXS: Interrupting (RXDMT) because of descriptors left\n");
1221 postInterrupt(IT_RXDMT);
1222 break;
1223 }
1224
1225 if (descLeft == 0) {
1226 DPRINTF(EthernetSM, "RXS: No descriptors left in ring, forcing"
1227 " writeback and stopping ticking\n");
1228 rxDescCache.writeback(0);
1229 rxTick = false;
1230 }
1231
1232 // only support descriptor granulaties
1233 assert(regs.rxdctl.gran());
1234
1235 if (regs.rxdctl.wthresh() >= rxDescCache.descUsed()) {
1236 DPRINTF(EthernetSM, "RXS: Writing back because WTHRESH >= descUsed\n");
1237 if (regs.rxdctl.wthresh() < (cacheBlockSize()>>4))
1238 rxDescCache.writeback(regs.rxdctl.wthresh()-1);
1239 else
1240 rxDescCache.writeback((cacheBlockSize()-1)>>4);
1241 }
1242
1243 if ((rxDescCache.descUnused() < regs.rxdctl.pthresh()) &&
1244 ((rxDescCache.descLeft() - rxDescCache.descUnused()) > regs.rxdctl.hthresh())) {
1245 DPRINTF(EthernetSM, "RXS: Fetching descriptors because descUnused < PTHRESH\n");
1246 rxDescCache.fetchDescriptors();
1247 }
1248
1249 if (rxDescCache.descUnused() == 0) {
1250 DPRINTF(EthernetSM, "RXS: No descriptors available in cache, "
1251 "fetching descriptors and stopping ticking\n");
1252 rxTick = false;
1253 rxDescCache.fetchDescriptors();
1254 }
1255 return;
1256 }
1257
1258 if (rxDmaPacket) {
1259 DPRINTF(EthernetSM, "RXS: stopping ticking until packet DMA completes\n");
1260 rxTick = false;
1261 return;
1262 }
1263
1264 if (!rxDescCache.descUnused()) {
1265 DPRINTF(EthernetSM, "RXS: No descriptors available in cache, stopping ticking\n");
1266 rxTick = false;
1267 DPRINTF(EthernetSM, "RXS: No descriptors available, fetching\n");
1268 rxDescCache.fetchDescriptors();
1269 return;
1270 }
1271
1272 if (rxFifo.empty()) {
1273 DPRINTF(EthernetSM, "RXS: RxFIFO empty, stopping ticking\n");
1274 rxTick = false;
1275 return;
1276 }
1277
1278 EthPacketPtr pkt;
1279 pkt = rxFifo.front();
1280
1281 DPRINTF(EthernetSM, "RXS: Writing packet into memory\n");
1282 if (!rxDescCache.writePacket(pkt)) {
1283 return;
1284 }
1285
1286 DPRINTF(EthernetSM, "RXS: Removing packet from FIFO\n");
1287 rxFifo.pop();
1288 DPRINTF(EthernetSM, "RXS: stopping ticking until packet DMA completes\n");
1289 rxTick = false;
1290 rxDmaPacket = true;
1291 }
1292
1293 void
1294 IGbE::txWire()
1295 {
1296 if (txFifo.empty()) {
1297 txFifoTick = false;
1298 return;
1299 }
1300
1301
1302 if (etherInt->sendPacket(txFifo.front())) {
1303 DPRINTF(EthernetSM, "TxFIFO: Successful transmit, bytes available in fifo: %d\n",
1304 txFifo.avail());
1305 txFifo.pop();
1306 } else {
1307 // We'll get woken up when the packet ethTxDone() gets called
1308 txFifoTick = false;
1309 }
1310
1311 }
1312
1313 void
1314 IGbE::tick()
1315 {
1316 DPRINTF(EthernetSM, "IGbE: -------------- Cycle --------------\n");
1317
1318 if (rxTick)
1319 rxStateMachine();
1320
1321 if (txTick)
1322 txStateMachine();
1323
1324 if (txFifoTick)
1325 txWire();
1326
1327
1328 if (rxTick || txTick || txFifoTick)
1329 tickEvent.schedule(curTick + cycles(1));
1330 }
1331
1332 void
1333 IGbE::ethTxDone()
1334 {
1335 // restart the tx state machines if they are stopped
1336 // fifo to send another packet
1337 // tx sm to put more data into the fifo
1338 txFifoTick = true;
1339 txTick = true;
1340
1341 restartClock();
1342 DPRINTF(EthernetSM, "TxFIFO: Transmission complete\n");
1343 }
1344
1345 void
1346 IGbE::serialize(std::ostream &os)
1347 {
1348 PciDev::serialize(os);
1349
1350 regs.serialize(os);
1351 SERIALIZE_SCALAR(eeOpBits);
1352 SERIALIZE_SCALAR(eeAddrBits);
1353 SERIALIZE_SCALAR(eeDataBits);
1354 SERIALIZE_SCALAR(eeOpcode);
1355 SERIALIZE_SCALAR(eeAddr);
1356 SERIALIZE_ARRAY(flash,iGbReg::EEPROM_SIZE);
1357
1358 rxFifo.serialize("rxfifo", os);
1359 txFifo.serialize("txfifo", os);
1360
1361 bool txPktExists = txPacket;
1362 SERIALIZE_SCALAR(txPktExists);
1363 if (txPktExists)
1364 txPacket->serialize("txpacket", os);
1365
1366 Tick rdtr_time = 0, radv_time = 0, tidv_time = 0, tadv_time = 0,
1367 inter_time = 0;
1368
1369 if (rdtrEvent.scheduled())
1370 rdtr_time = rdtrEvent.when();
1371 SERIALIZE_SCALAR(rdtr_time);
1372
1373 if (radvEvent.scheduled())
1374 radv_time = radvEvent.when();
1375 SERIALIZE_SCALAR(radv_time);
1376
1377 if (tidvEvent.scheduled())
1378 rdtr_time = tidvEvent.when();
1379 SERIALIZE_SCALAR(tidv_time);
1380
1381 if (tadvEvent.scheduled())
1382 rdtr_time = tadvEvent.when();
1383 SERIALIZE_SCALAR(tadv_time);
1384
1385 if (interEvent.scheduled())
1386 rdtr_time = interEvent.when();
1387 SERIALIZE_SCALAR(inter_time);
1388
1389 nameOut(os, csprintf("%s.TxDescCache", name()));
1390 txDescCache.serialize(os);
1391
1392 nameOut(os, csprintf("%s.RxDescCache", name()));
1393 rxDescCache.serialize(os);
1394 }
1395
1396 void
1397 IGbE::unserialize(Checkpoint *cp, const std::string &section)
1398 {
1399 PciDev::unserialize(cp, section);
1400
1401 regs.unserialize(cp, section);
1402 UNSERIALIZE_SCALAR(eeOpBits);
1403 UNSERIALIZE_SCALAR(eeAddrBits);
1404 UNSERIALIZE_SCALAR(eeDataBits);
1405 UNSERIALIZE_SCALAR(eeOpcode);
1406 UNSERIALIZE_SCALAR(eeAddr);
1407 UNSERIALIZE_ARRAY(flash,iGbReg::EEPROM_SIZE);
1408
1409 rxFifo.unserialize("rxfifo", cp, section);
1410 txFifo.unserialize("txfifo", cp, section);
1411
1412 bool txPktExists;
1413 UNSERIALIZE_SCALAR(txPktExists);
1414 if (txPktExists) {
1415 txPacket = new EthPacketData(16384);
1416 txPacket->unserialize("txpacket", cp, section);
1417 }
1418
1419 rxTick = true;
1420 txTick = true;
1421 txFifoTick = true;
1422
1423 Tick rdtr_time, radv_time, tidv_time, tadv_time, inter_time;
1424 UNSERIALIZE_SCALAR(rdtr_time);
1425 UNSERIALIZE_SCALAR(radv_time);
1426 UNSERIALIZE_SCALAR(tidv_time);
1427 UNSERIALIZE_SCALAR(tadv_time);
1428 UNSERIALIZE_SCALAR(inter_time);
1429
1430 if (rdtr_time)
1431 rdtrEvent.schedule(rdtr_time);
1432
1433 if (radv_time)
1434 radvEvent.schedule(radv_time);
1435
1436 if (tidv_time)
1437 tidvEvent.schedule(tidv_time);
1438
1439 if (tadv_time)
1440 tadvEvent.schedule(tadv_time);
1441
1442 if (inter_time)
1443 interEvent.schedule(inter_time);
1444
1445 txDescCache.unserialize(cp, csprintf("%s.TxDescCache", section));
1446
1447 rxDescCache.unserialize(cp, csprintf("%s.RxDescCache", section));
1448 }
1449
1450 IGbEInt *
1451 IGbEIntParams::create()
1452 {
1453 IGbEInt *dev_int = new IGbEInt(name, device);
1454
1455 EtherInt *p = (EtherInt *)peer;
1456 if (p) {
1457 dev_int->setPeer(p);
1458 p->setPeer(dev_int);
1459 }
1460
1461 return dev_int;
1462 }
1463
1464 IGbE *
1465 IGbEParams::create()
1466 {
1467 return new IGbE(this);
1468 }