Merge from head.
[gem5.git] / src / dev / i8254xGBe.cc
1 /*
2 * Copyright (c) 2006 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Ali Saidi
29 */
30
31 /* @file
32 * Device model for Intel's 8254x line of gigabit ethernet controllers.
33 * In particular an 82547 revision 2 (82547GI) MAC because it seems to have the
34 * fewest workarounds in the driver. It will probably work with most of the
35 * other MACs with slight modifications.
36 */
37
38
39 /*
40 * @todo really there are multiple dma engines.. we should implement them.
41 */
42
43 #include "base/inet.hh"
44 #include "base/trace.hh"
45 #include "dev/i8254xGBe.hh"
46 #include "mem/packet.hh"
47 #include "mem/packet_access.hh"
48 #include "sim/builder.hh"
49 #include "sim/stats.hh"
50 #include "sim/system.hh"
51
52 #include <algorithm>
53
54 using namespace iGbReg;
55 using namespace Net;
56
57 IGbE::IGbE(Params *p)
58 : PciDev(p), etherInt(NULL), drainEvent(NULL), useFlowControl(p->use_flow_control),
59 rxFifo(p->rx_fifo_size), txFifo(p->tx_fifo_size), rxTick(false),
60 txTick(false), txFifoTick(false), rdtrEvent(this), radvEvent(this),
61 tadvEvent(this), tidvEvent(this), tickEvent(this), interEvent(this),
62 rxDescCache(this, name()+".RxDesc", p->rx_desc_cache_size),
63 txDescCache(this, name()+".TxDesc", p->tx_desc_cache_size), clock(p->clock)
64 {
65 // Initialized internal registers per Intel documentation
66 // All registers intialized to 0 by per register constructor
67 regs.ctrl.fd(1);
68 regs.ctrl.lrst(1);
69 regs.ctrl.speed(2);
70 regs.ctrl.frcspd(1);
71 regs.sts.speed(3); // Say we're 1000Mbps
72 regs.sts.fd(1); // full duplex
73 regs.sts.lu(1); // link up
74 regs.eecd.fwe(1);
75 regs.eecd.ee_type(1);
76 regs.imr = 0;
77 regs.iam = 0;
78 regs.rxdctl.gran(1);
79 regs.rxdctl.wthresh(1);
80 regs.fcrth(1);
81
82 regs.pba.rxa(0x30);
83 regs.pba.txa(0x10);
84
85 eeOpBits = 0;
86 eeAddrBits = 0;
87 eeDataBits = 0;
88 eeOpcode = 0;
89
90 // clear all 64 16 bit words of the eeprom
91 memset(&flash, 0, EEPROM_SIZE*2);
92
93 // Set the MAC address
94 memcpy(flash, p->hardware_address.bytes(), ETH_ADDR_LEN);
95 for (int x = 0; x < ETH_ADDR_LEN/2; x++)
96 flash[x] = htobe(flash[x]);
97
98 uint16_t csum = 0;
99 for (int x = 0; x < EEPROM_SIZE; x++)
100 csum += htobe(flash[x]);
101
102
103 // Magic happy checksum value
104 flash[EEPROM_SIZE-1] = htobe((uint16_t)(EEPROM_CSUM - csum));
105
106 rxFifo.clear();
107 txFifo.clear();
108 }
109
110
111 Tick
112 IGbE::writeConfig(PacketPtr pkt)
113 {
114 int offset = pkt->getAddr() & PCI_CONFIG_SIZE;
115 if (offset < PCI_DEVICE_SPECIFIC)
116 PciDev::writeConfig(pkt);
117 else
118 panic("Device specific PCI config space not implemented.\n");
119
120 ///
121 /// Some work may need to be done here based for the pci COMMAND bits.
122 ///
123
124 return pioDelay;
125 }
126
127 Tick
128 IGbE::read(PacketPtr pkt)
129 {
130 int bar;
131 Addr daddr;
132
133 if (!getBAR(pkt->getAddr(), bar, daddr))
134 panic("Invalid PCI memory access to unmapped memory.\n");
135
136 // Only Memory register BAR is allowed
137 assert(bar == 0);
138
139 // Only 32bit accesses allowed
140 assert(pkt->getSize() == 4);
141
142 DPRINTF(Ethernet, "Read device register %#X\n", daddr);
143
144 pkt->allocate();
145
146 ///
147 /// Handle read of register here
148 ///
149
150
151 switch (daddr) {
152 case REG_CTRL:
153 pkt->set<uint32_t>(regs.ctrl());
154 break;
155 case REG_STATUS:
156 pkt->set<uint32_t>(regs.sts());
157 break;
158 case REG_EECD:
159 pkt->set<uint32_t>(regs.eecd());
160 break;
161 case REG_EERD:
162 pkt->set<uint32_t>(regs.eerd());
163 break;
164 case REG_CTRL_EXT:
165 pkt->set<uint32_t>(regs.ctrl_ext());
166 break;
167 case REG_MDIC:
168 pkt->set<uint32_t>(regs.mdic());
169 break;
170 case REG_ICR:
171 DPRINTF(Ethernet, "Reading ICR. ICR=%#x IMR=%#x IAM=%#x IAME=%d\n", regs.icr(),
172 regs.imr, regs.iam, regs.ctrl_ext.iame());
173 pkt->set<uint32_t>(regs.icr());
174 if (regs.icr.int_assert() || regs.imr == 0) {
175 regs.icr = regs.icr() & ~mask(30);
176 DPRINTF(Ethernet, "Cleared ICR. ICR=%#x\n", regs.icr());
177 }
178 if (regs.ctrl_ext.iame() && regs.icr.int_assert())
179 regs.imr &= ~regs.iam;
180 chkInterrupt();
181 break;
182 case REG_ITR:
183 pkt->set<uint32_t>(regs.itr());
184 break;
185 case REG_RCTL:
186 pkt->set<uint32_t>(regs.rctl());
187 break;
188 case REG_FCTTV:
189 pkt->set<uint32_t>(regs.fcttv());
190 break;
191 case REG_TCTL:
192 pkt->set<uint32_t>(regs.tctl());
193 break;
194 case REG_PBA:
195 pkt->set<uint32_t>(regs.pba());
196 break;
197 case REG_WUC:
198 case REG_LEDCTL:
199 pkt->set<uint32_t>(0); // We don't care, so just return 0
200 break;
201 case REG_FCRTL:
202 pkt->set<uint32_t>(regs.fcrtl());
203 break;
204 case REG_FCRTH:
205 pkt->set<uint32_t>(regs.fcrth());
206 break;
207 case REG_RDBAL:
208 pkt->set<uint32_t>(regs.rdba.rdbal());
209 break;
210 case REG_RDBAH:
211 pkt->set<uint32_t>(regs.rdba.rdbah());
212 break;
213 case REG_RDLEN:
214 pkt->set<uint32_t>(regs.rdlen());
215 break;
216 case REG_RDH:
217 pkt->set<uint32_t>(regs.rdh());
218 break;
219 case REG_RDT:
220 pkt->set<uint32_t>(regs.rdt());
221 break;
222 case REG_RDTR:
223 pkt->set<uint32_t>(regs.rdtr());
224 if (regs.rdtr.fpd()) {
225 rxDescCache.writeback(0);
226 DPRINTF(EthernetIntr, "Posting interrupt because of RDTR.FPD write\n");
227 postInterrupt(IT_RXT);
228 regs.rdtr.fpd(0);
229 }
230 break;
231 case REG_RADV:
232 pkt->set<uint32_t>(regs.radv());
233 break;
234 case REG_TDBAL:
235 pkt->set<uint32_t>(regs.tdba.tdbal());
236 break;
237 case REG_TDBAH:
238 pkt->set<uint32_t>(regs.tdba.tdbah());
239 break;
240 case REG_TDLEN:
241 pkt->set<uint32_t>(regs.tdlen());
242 break;
243 case REG_TDH:
244 pkt->set<uint32_t>(regs.tdh());
245 break;
246 case REG_TDT:
247 pkt->set<uint32_t>(regs.tdt());
248 break;
249 case REG_TIDV:
250 pkt->set<uint32_t>(regs.tidv());
251 break;
252 case REG_TXDCTL:
253 pkt->set<uint32_t>(regs.txdctl());
254 break;
255 case REG_TADV:
256 pkt->set<uint32_t>(regs.tadv());
257 break;
258 case REG_RXCSUM:
259 pkt->set<uint32_t>(regs.rxcsum());
260 break;
261 case REG_MANC:
262 pkt->set<uint32_t>(regs.manc());
263 break;
264 default:
265 if (!(daddr >= REG_VFTA && daddr < (REG_VFTA + VLAN_FILTER_TABLE_SIZE*4)) &&
266 !(daddr >= REG_RAL && daddr < (REG_RAL + RCV_ADDRESS_TABLE_SIZE*8)) &&
267 !(daddr >= REG_MTA && daddr < (REG_MTA + MULTICAST_TABLE_SIZE*4)) &&
268 !(daddr >= REG_CRCERRS && daddr < (REG_CRCERRS + STATS_REGS_SIZE)))
269 panic("Read request to unknown register number: %#x\n", daddr);
270 else
271 pkt->set<uint32_t>(0);
272 };
273
274 pkt->makeAtomicResponse();
275 return pioDelay;
276 }
277
278 Tick
279 IGbE::write(PacketPtr pkt)
280 {
281 int bar;
282 Addr daddr;
283
284
285 if (!getBAR(pkt->getAddr(), bar, daddr))
286 panic("Invalid PCI memory access to unmapped memory.\n");
287
288 // Only Memory register BAR is allowed
289 assert(bar == 0);
290
291 // Only 32bit accesses allowed
292 assert(pkt->getSize() == sizeof(uint32_t));
293
294 DPRINTF(Ethernet, "Wrote device register %#X value %#X\n", daddr, pkt->get<uint32_t>());
295
296 ///
297 /// Handle write of register here
298 ///
299 uint32_t val = pkt->get<uint32_t>();
300
301 Regs::RCTL oldrctl;
302 Regs::TCTL oldtctl;
303
304 switch (daddr) {
305 case REG_CTRL:
306 regs.ctrl = val;
307 if (regs.ctrl.tfce())
308 warn("TX Flow control enabled, should implement\n");
309 if (regs.ctrl.rfce())
310 warn("RX Flow control enabled, should implement\n");
311 break;
312 case REG_CTRL_EXT:
313 regs.ctrl_ext = val;
314 break;
315 case REG_STATUS:
316 regs.sts = val;
317 break;
318 case REG_EECD:
319 int oldClk;
320 oldClk = regs.eecd.sk();
321 regs.eecd = val;
322 // See if this is a eeprom access and emulate accordingly
323 if (!oldClk && regs.eecd.sk()) {
324 if (eeOpBits < 8) {
325 eeOpcode = eeOpcode << 1 | regs.eecd.din();
326 eeOpBits++;
327 } else if (eeAddrBits < 8 && eeOpcode == EEPROM_READ_OPCODE_SPI) {
328 eeAddr = eeAddr << 1 | regs.eecd.din();
329 eeAddrBits++;
330 } else if (eeDataBits < 16 && eeOpcode == EEPROM_READ_OPCODE_SPI) {
331 assert(eeAddr>>1 < EEPROM_SIZE);
332 DPRINTF(EthernetEEPROM, "EEPROM bit read: %d word: %#X\n",
333 flash[eeAddr>>1] >> eeDataBits & 0x1, flash[eeAddr>>1]);
334 regs.eecd.dout((flash[eeAddr>>1] >> (15-eeDataBits)) & 0x1);
335 eeDataBits++;
336 } else if (eeDataBits < 8 && eeOpcode == EEPROM_RDSR_OPCODE_SPI) {
337 regs.eecd.dout(0);
338 eeDataBits++;
339 } else
340 panic("What's going on with eeprom interface? opcode:"
341 " %#x:%d addr: %#x:%d, data: %d\n", (uint32_t)eeOpcode,
342 (uint32_t)eeOpBits, (uint32_t)eeAddr,
343 (uint32_t)eeAddrBits, (uint32_t)eeDataBits);
344
345 // Reset everything for the next command
346 if ((eeDataBits == 16 && eeOpcode == EEPROM_READ_OPCODE_SPI) ||
347 (eeDataBits == 8 && eeOpcode == EEPROM_RDSR_OPCODE_SPI)) {
348 eeOpBits = 0;
349 eeAddrBits = 0;
350 eeDataBits = 0;
351 eeOpcode = 0;
352 eeAddr = 0;
353 }
354
355 DPRINTF(EthernetEEPROM, "EEPROM: opcode: %#X:%d addr: %#X:%d\n",
356 (uint32_t)eeOpcode, (uint32_t) eeOpBits,
357 (uint32_t)eeAddr>>1, (uint32_t)eeAddrBits);
358 if (eeOpBits == 8 && !(eeOpcode == EEPROM_READ_OPCODE_SPI ||
359 eeOpcode == EEPROM_RDSR_OPCODE_SPI ))
360 panic("Unknown eeprom opcode: %#X:%d\n", (uint32_t)eeOpcode,
361 (uint32_t)eeOpBits);
362
363
364 }
365 // If driver requests eeprom access, immediately give it to it
366 regs.eecd.ee_gnt(regs.eecd.ee_req());
367 break;
368 case REG_EERD:
369 regs.eerd = val;
370 break;
371 case REG_MDIC:
372 regs.mdic = val;
373 if (regs.mdic.i())
374 panic("No support for interrupt on mdic complete\n");
375 if (regs.mdic.phyadd() != 1)
376 panic("No support for reading anything but phy\n");
377 DPRINTF(Ethernet, "%s phy address %x\n", regs.mdic.op() == 1 ? "Writing"
378 : "Reading", regs.mdic.regadd());
379 switch (regs.mdic.regadd()) {
380 case PHY_PSTATUS:
381 regs.mdic.data(0x796D); // link up
382 break;
383 case PHY_PID:
384 regs.mdic.data(0x02A8);
385 break;
386 case PHY_EPID:
387 regs.mdic.data(0x0380);
388 break;
389 case PHY_GSTATUS:
390 regs.mdic.data(0x7C00);
391 break;
392 case PHY_EPSTATUS:
393 regs.mdic.data(0x3000);
394 break;
395 case PHY_AGC:
396 regs.mdic.data(0x180); // some random length
397 break;
398 default:
399 regs.mdic.data(0);
400 }
401 regs.mdic.r(1);
402 break;
403 case REG_ICR:
404 DPRINTF(Ethernet, "Writing ICR. ICR=%#x IMR=%#x IAM=%#x IAME=%d\n", regs.icr(),
405 regs.imr, regs.iam, regs.ctrl_ext.iame());
406 if (regs.ctrl_ext.iame())
407 regs.imr &= ~regs.iam;
408 regs.icr = ~bits(val,30,0) & regs.icr();
409 chkInterrupt();
410 break;
411 case REG_ITR:
412 regs.itr = val;
413 break;
414 case REG_ICS:
415 DPRINTF(EthernetIntr, "Posting interrupt because of ICS write\n");
416 postInterrupt((IntTypes)val);
417 break;
418 case REG_IMS:
419 regs.imr |= val;
420 chkInterrupt();
421 break;
422 case REG_IMC:
423 regs.imr &= ~val;
424 chkInterrupt();
425 break;
426 case REG_IAM:
427 regs.iam = val;
428 break;
429 case REG_RCTL:
430 oldrctl = regs.rctl;
431 regs.rctl = val;
432 if (regs.rctl.rst()) {
433 rxDescCache.reset();
434 DPRINTF(EthernetSM, "RXS: Got RESET!\n");
435 rxFifo.clear();
436 regs.rctl.rst(0);
437 }
438 if (regs.rctl.en())
439 rxTick = true;
440 restartClock();
441 break;
442 case REG_FCTTV:
443 regs.fcttv = val;
444 break;
445 case REG_TCTL:
446 regs.tctl = val;
447 oldtctl = regs.tctl;
448 regs.tctl = val;
449 if (regs.tctl.en())
450 txTick = true;
451 restartClock();
452 if (regs.tctl.en() && !oldtctl.en()) {
453 txDescCache.reset();
454 }
455 break;
456 case REG_PBA:
457 regs.pba.rxa(val);
458 regs.pba.txa(64 - regs.pba.rxa());
459 break;
460 case REG_WUC:
461 case REG_LEDCTL:
462 case REG_FCAL:
463 case REG_FCAH:
464 case REG_FCT:
465 case REG_VET:
466 case REG_AIFS:
467 case REG_TIPG:
468 ; // We don't care, so don't store anything
469 break;
470 case REG_FCRTL:
471 regs.fcrtl = val;
472 break;
473 case REG_FCRTH:
474 regs.fcrth = val;
475 break;
476 case REG_RDBAL:
477 regs.rdba.rdbal( val & ~mask(4));
478 rxDescCache.areaChanged();
479 break;
480 case REG_RDBAH:
481 regs.rdba.rdbah(val);
482 rxDescCache.areaChanged();
483 break;
484 case REG_RDLEN:
485 regs.rdlen = val & ~mask(7);
486 rxDescCache.areaChanged();
487 break;
488 case REG_RDH:
489 regs.rdh = val;
490 rxDescCache.areaChanged();
491 break;
492 case REG_RDT:
493 regs.rdt = val;
494 rxTick = true;
495 restartClock();
496 break;
497 case REG_RDTR:
498 regs.rdtr = val;
499 break;
500 case REG_RADV:
501 regs.radv = val;
502 break;
503 case REG_TDBAL:
504 regs.tdba.tdbal( val & ~mask(4));
505 txDescCache.areaChanged();
506 break;
507 case REG_TDBAH:
508 regs.tdba.tdbah(val);
509 txDescCache.areaChanged();
510 break;
511 case REG_TDLEN:
512 regs.tdlen = val & ~mask(7);
513 txDescCache.areaChanged();
514 break;
515 case REG_TDH:
516 regs.tdh = val;
517 txDescCache.areaChanged();
518 break;
519 case REG_TDT:
520 regs.tdt = val;
521 txTick = true;
522 restartClock();
523 break;
524 case REG_TIDV:
525 regs.tidv = val;
526 break;
527 case REG_TXDCTL:
528 regs.txdctl = val;
529 break;
530 case REG_TADV:
531 regs.tadv = val;
532 break;
533 case REG_RXCSUM:
534 regs.rxcsum = val;
535 break;
536 case REG_MANC:
537 regs.manc = val;
538 break;
539 default:
540 if (!(daddr >= REG_VFTA && daddr < (REG_VFTA + VLAN_FILTER_TABLE_SIZE*4)) &&
541 !(daddr >= REG_RAL && daddr < (REG_RAL + RCV_ADDRESS_TABLE_SIZE*8)) &&
542 !(daddr >= REG_MTA && daddr < (REG_MTA + MULTICAST_TABLE_SIZE*4)))
543 panic("Write request to unknown register number: %#x\n", daddr);
544 };
545
546 pkt->makeAtomicResponse();
547 return pioDelay;
548 }
549
550 void
551 IGbE::postInterrupt(IntTypes t, bool now)
552 {
553 assert(t);
554
555 // Interrupt is already pending
556 if (t & regs.icr())
557 return;
558
559 if (regs.icr() & regs.imr)
560 {
561 regs.icr = regs.icr() | t;
562 if (!interEvent.scheduled())
563 interEvent.schedule(curTick + Clock::Int::ns * 256 *
564 regs.itr.interval());
565 } else {
566 regs.icr = regs.icr() | t;
567 if (regs.itr.interval() == 0 || now) {
568 if (interEvent.scheduled())
569 interEvent.deschedule();
570 cpuPostInt();
571 } else {
572 DPRINTF(EthernetIntr, "EINT: Scheduling timer interrupt for %d ticks\n",
573 Clock::Int::ns * 256 * regs.itr.interval());
574 if (!interEvent.scheduled())
575 interEvent.schedule(curTick + Clock::Int::ns * 256 * regs.itr.interval());
576 }
577 }
578 }
579
580 void
581 IGbE::cpuPostInt()
582 {
583 if (rdtrEvent.scheduled()) {
584 regs.icr.rxt0(1);
585 rdtrEvent.deschedule();
586 }
587 if (radvEvent.scheduled()) {
588 regs.icr.rxt0(1);
589 radvEvent.deschedule();
590 }
591 if (tadvEvent.scheduled()) {
592 regs.icr.txdw(1);
593 tadvEvent.deschedule();
594 }
595 if (tidvEvent.scheduled()) {
596 regs.icr.txdw(1);
597 tidvEvent.deschedule();
598 }
599
600 regs.icr.int_assert(1);
601 DPRINTF(EthernetIntr, "EINT: Posting interrupt to CPU now. Vector %#x\n",
602 regs.icr());
603 intrPost();
604 }
605
606 void
607 IGbE::cpuClearInt()
608 {
609 if (regs.icr.int_assert()) {
610 regs.icr.int_assert(0);
611 DPRINTF(EthernetIntr, "EINT: Clearing interrupt to CPU now. Vector %#x\n",
612 regs.icr());
613 intrClear();
614 }
615 }
616
617 void
618 IGbE::chkInterrupt()
619 {
620 // Check if we need to clear the cpu interrupt
621 if (!(regs.icr() & regs.imr)) {
622 if (interEvent.scheduled())
623 interEvent.deschedule();
624 if (regs.icr.int_assert())
625 cpuClearInt();
626 }
627
628 if (regs.icr() & regs.imr) {
629 if (regs.itr.interval() == 0) {
630 cpuPostInt();
631 } else {
632 if (!interEvent.scheduled())
633 interEvent.schedule(curTick + Clock::Int::ns * 256 * regs.itr.interval());
634 }
635 }
636
637
638 }
639
640
641 IGbE::RxDescCache::RxDescCache(IGbE *i, const std::string n, int s)
642 : DescCache<RxDesc>(i, n, s), pktDone(false), pktEvent(this)
643
644 {
645 }
646
647 bool
648 IGbE::RxDescCache::writePacket(EthPacketPtr packet)
649 {
650 // We shouldn't have to deal with any of these yet
651 DPRINTF(EthernetDesc, "Packet Length: %d Desc Size: %d\n",
652 packet->length, igbe->regs.rctl.descSize());
653 assert(packet->length < igbe->regs.rctl.descSize());
654
655 if (!unusedCache.size())
656 return false;
657
658 pktPtr = packet;
659 pktDone = false;
660 igbe->dmaWrite(igbe->platform->pciToDma(unusedCache.front()->buf),
661 packet->length, &pktEvent, packet->data);
662 return true;
663 }
664
665 void
666 IGbE::RxDescCache::pktComplete()
667 {
668 assert(unusedCache.size());
669 RxDesc *desc;
670 desc = unusedCache.front();
671
672 uint16_t crcfixup = igbe->regs.rctl.secrc() ? 0 : 4 ;
673 desc->len = htole((uint16_t)(pktPtr->length + crcfixup));
674 DPRINTF(EthernetDesc, "pktPtr->length: %d stripcrc offset: %d value written: %d %d\n",
675 pktPtr->length, crcfixup,
676 htole((uint16_t)(pktPtr->length + crcfixup)),
677 (uint16_t)(pktPtr->length + crcfixup));
678
679 // no support for anything but starting at 0
680 assert(igbe->regs.rxcsum.pcss() == 0);
681
682 DPRINTF(EthernetDesc, "Packet written to memory updating Descriptor\n");
683
684 uint8_t status = RXDS_DD | RXDS_EOP;
685 uint8_t err = 0;
686
687 IpPtr ip(pktPtr);
688
689 if (ip) {
690 DPRINTF(EthernetDesc, "Proccesing Ip packet with Id=%d\n", ip->id());
691
692 if (igbe->regs.rxcsum.ipofld()) {
693 DPRINTF(EthernetDesc, "Checking IP checksum\n");
694 status |= RXDS_IPCS;
695 desc->csum = htole(cksum(ip));
696 if (cksum(ip) != 0) {
697 err |= RXDE_IPE;
698 DPRINTF(EthernetDesc, "Checksum is bad!!\n");
699 }
700 }
701 TcpPtr tcp(ip);
702 if (tcp && igbe->regs.rxcsum.tuofld()) {
703 DPRINTF(EthernetDesc, "Checking TCP checksum\n");
704 status |= RXDS_TCPCS;
705 desc->csum = htole(cksum(tcp));
706 if (cksum(tcp) != 0) {
707 DPRINTF(EthernetDesc, "Checksum is bad!!\n");
708 err |= RXDE_TCPE;
709 }
710 }
711
712 UdpPtr udp(ip);
713 if (udp && igbe->regs.rxcsum.tuofld()) {
714 DPRINTF(EthernetDesc, "Checking UDP checksum\n");
715 status |= RXDS_UDPCS;
716 desc->csum = htole(cksum(udp));
717 if (cksum(udp) != 0) {
718 DPRINTF(EthernetDesc, "Checksum is bad!!\n");
719 err |= RXDE_TCPE;
720 }
721 }
722 } else { // if ip
723 DPRINTF(EthernetSM, "Proccesing Non-Ip packet\n");
724 }
725
726
727 desc->status = htole(status);
728 desc->errors = htole(err);
729
730 // No vlan support at this point... just set it to 0
731 desc->vlan = 0;
732
733 // Deal with the rx timer interrupts
734 if (igbe->regs.rdtr.delay()) {
735 DPRINTF(EthernetSM, "RXS: Scheduling DTR for %d\n",
736 igbe->regs.rdtr.delay() * igbe->intClock());
737 igbe->rdtrEvent.reschedule(curTick + igbe->regs.rdtr.delay() *
738 igbe->intClock(),true);
739 }
740
741 if (igbe->regs.radv.idv() && igbe->regs.rdtr.delay()) {
742 DPRINTF(EthernetSM, "RXS: Scheduling ADV for %d\n",
743 igbe->regs.radv.idv() * igbe->intClock());
744 if (!igbe->radvEvent.scheduled())
745 igbe->radvEvent.schedule(curTick + igbe->regs.radv.idv() *
746 igbe->intClock());
747 }
748
749 // if neither radv or rdtr, maybe itr is set...
750 if (!igbe->regs.rdtr.delay()) {
751 DPRINTF(EthernetSM, "RXS: Receive interrupt delay disabled, posting IT_RXT\n");
752 igbe->postInterrupt(IT_RXT);
753 }
754
755 // If the packet is small enough, interrupt appropriately
756 // I wonder if this is delayed or not?!
757 if (pktPtr->length <= igbe->regs.rsrpd.idv()) {
758 DPRINTF(EthernetSM, "RXS: Posting IT_SRPD beacuse small packet received\n");
759 igbe->postInterrupt(IT_SRPD);
760 }
761
762 DPRINTF(EthernetDesc, "Processing of this descriptor complete\n");
763 unusedCache.pop_front();
764 usedCache.push_back(desc);
765 pktPtr = NULL;
766 enableSm();
767 pktDone = true;
768 igbe->checkDrain();
769 }
770
771 void
772 IGbE::RxDescCache::enableSm()
773 {
774 igbe->rxTick = true;
775 igbe->restartClock();
776 }
777
778 bool
779 IGbE::RxDescCache::packetDone()
780 {
781 if (pktDone) {
782 pktDone = false;
783 return true;
784 }
785 return false;
786 }
787
788 bool
789 IGbE::RxDescCache::hasOutstandingEvents()
790 {
791 return pktEvent.scheduled() || wbEvent.scheduled() ||
792 fetchEvent.scheduled();
793 }
794
795 void
796 IGbE::RxDescCache::serialize(std::ostream &os)
797 {
798 DescCache<RxDesc>::serialize(os);
799 SERIALIZE_SCALAR(pktDone);
800 }
801
802 void
803 IGbE::RxDescCache::unserialize(Checkpoint *cp, const std::string &section)
804 {
805 DescCache<RxDesc>::unserialize(cp, section);
806 UNSERIALIZE_SCALAR(pktDone);
807 }
808
809
810 ///////////////////////////////////// IGbE::TxDesc /////////////////////////////////
811
812 IGbE::TxDescCache::TxDescCache(IGbE *i, const std::string n, int s)
813 : DescCache<TxDesc>(i,n, s), pktDone(false), isTcp(false), pktWaiting(false),
814 pktEvent(this)
815
816 {
817 }
818
819 int
820 IGbE::TxDescCache::getPacketSize()
821 {
822 assert(unusedCache.size());
823
824 TxDesc *desc;
825
826 DPRINTF(EthernetDesc, "Starting processing of descriptor\n");
827
828 while (unusedCache.size() && TxdOp::isContext(unusedCache.front())) {
829 DPRINTF(EthernetDesc, "Got context descriptor type... skipping\n");
830
831 // I think we can just ignore these for now?
832 desc = unusedCache.front();
833 // is this going to be a tcp or udp packet?
834 isTcp = TxdOp::tcp(desc) ? true : false;
835
836 // make sure it's ipv4
837 assert(TxdOp::ip(desc));
838
839 TxdOp::setDd(desc);
840 unusedCache.pop_front();
841 usedCache.push_back(desc);
842 }
843
844 if (!unusedCache.size())
845 return -1;
846
847 DPRINTF(EthernetDesc, "Next TX packet is %d bytes\n",
848 TxdOp::getLen(unusedCache.front()));
849
850 return TxdOp::getLen(unusedCache.front());
851 }
852
853 void
854 IGbE::TxDescCache::getPacketData(EthPacketPtr p)
855 {
856 assert(unusedCache.size());
857
858 TxDesc *desc;
859 desc = unusedCache.front();
860
861 assert((TxdOp::isLegacy(desc) || TxdOp::isData(desc)) && TxdOp::getLen(desc));
862
863 pktPtr = p;
864
865 pktWaiting = true;
866
867 DPRINTF(EthernetDesc, "Starting DMA of packet\n");
868 igbe->dmaRead(igbe->platform->pciToDma(TxdOp::getBuf(desc)),
869 TxdOp::getLen(desc), &pktEvent, p->data + p->length);
870
871
872 }
873
874 void
875 IGbE::TxDescCache::pktComplete()
876 {
877
878 TxDesc *desc;
879 assert(unusedCache.size());
880 assert(pktPtr);
881
882 DPRINTF(EthernetDesc, "DMA of packet complete\n");
883
884
885 desc = unusedCache.front();
886 assert((TxdOp::isLegacy(desc) || TxdOp::isData(desc)) && TxdOp::getLen(desc));
887
888 DPRINTF(EthernetDesc, "TxDescriptor data d1: %#llx d2: %#llx\n", desc->d1, desc->d2);
889
890 if (!TxdOp::eop(desc)) {
891 // This only supports two descriptors per tx packet
892 assert(pktPtr->length == 0);
893 pktPtr->length = TxdOp::getLen(desc);
894 unusedCache.pop_front();
895 usedCache.push_back(desc);
896 pktDone = true;
897 pktWaiting = false;
898 pktPtr = NULL;
899
900 DPRINTF(EthernetDesc, "Partial Packet Descriptor Done\n");
901 enableSm();
902 return;
903 }
904
905 // Set the length of the data in the EtherPacket
906 pktPtr->length += TxdOp::getLen(desc);
907
908 // no support for vlans
909 assert(!TxdOp::vle(desc));
910
911 // we alway report status
912 assert(TxdOp::rs(desc));
913
914 // we only support single packet descriptors at this point
915 assert(TxdOp::eop(desc));
916
917 // set that this packet is done
918 TxdOp::setDd(desc);
919
920 DPRINTF(EthernetDesc, "TxDescriptor data d1: %#llx d2: %#llx\n", desc->d1, desc->d2);
921
922 if (DTRACE(EthernetDesc)) {
923 IpPtr ip(pktPtr);
924 if (ip)
925 DPRINTF(EthernetDesc, "Proccesing Ip packet with Id=%d\n",
926 ip->id());
927 else
928 DPRINTF(EthernetSM, "Proccesing Non-Ip packet\n");
929 }
930
931 // Checksums are only ofloaded for new descriptor types
932 if (TxdOp::isData(desc) && ( TxdOp::ixsm(desc) || TxdOp::txsm(desc)) ) {
933 DPRINTF(EthernetDesc, "Calculating checksums for packet\n");
934 IpPtr ip(pktPtr);
935
936 if (TxdOp::ixsm(desc)) {
937 ip->sum(0);
938 ip->sum(cksum(ip));
939 DPRINTF(EthernetDesc, "Calculated IP checksum\n");
940 }
941 if (TxdOp::txsm(desc)) {
942 if (isTcp) {
943 TcpPtr tcp(ip);
944 assert(tcp);
945 tcp->sum(0);
946 tcp->sum(cksum(tcp));
947 DPRINTF(EthernetDesc, "Calculated TCP checksum\n");
948 } else {
949 UdpPtr udp(ip);
950 assert(udp);
951 udp->sum(0);
952 udp->sum(cksum(udp));
953 DPRINTF(EthernetDesc, "Calculated UDP checksum\n");
954 }
955 }
956 }
957
958 if (TxdOp::ide(desc)) {
959 // Deal with the rx timer interrupts
960 DPRINTF(EthernetDesc, "Descriptor had IDE set\n");
961 if (igbe->regs.tidv.idv()) {
962 DPRINTF(EthernetDesc, "setting tidv\n");
963 igbe->tidvEvent.reschedule(curTick + igbe->regs.tidv.idv() *
964 igbe->intClock(), true);
965 }
966
967 if (igbe->regs.tadv.idv() && igbe->regs.tidv.idv()) {
968 DPRINTF(EthernetDesc, "setting tadv\n");
969 if (!igbe->tadvEvent.scheduled())
970 igbe->tadvEvent.schedule(curTick + igbe->regs.tadv.idv() *
971 igbe->intClock());
972 }
973 }
974
975
976
977 unusedCache.pop_front();
978 usedCache.push_back(desc);
979 pktDone = true;
980 pktWaiting = false;
981 pktPtr = NULL;
982
983 DPRINTF(EthernetDesc, "Descriptor Done\n");
984
985 if (igbe->regs.txdctl.wthresh() == 0) {
986 DPRINTF(EthernetDesc, "WTHRESH == 0, writing back descriptor\n");
987 writeback(0);
988 } else if (igbe->regs.txdctl.wthresh() >= usedCache.size()) {
989 DPRINTF(EthernetDesc, "used > WTHRESH, writing back descriptor\n");
990 writeback((igbe->cacheBlockSize()-1)>>4);
991 }
992 enableSm();
993 igbe->checkDrain();
994 }
995
996 void
997 IGbE::TxDescCache::serialize(std::ostream &os)
998 {
999 DescCache<TxDesc>::serialize(os);
1000 SERIALIZE_SCALAR(pktDone);
1001 SERIALIZE_SCALAR(isTcp);
1002 SERIALIZE_SCALAR(pktWaiting);
1003 }
1004
1005 void
1006 IGbE::TxDescCache::unserialize(Checkpoint *cp, const std::string &section)
1007 {
1008 DescCache<TxDesc>::unserialize(cp, section);
1009 UNSERIALIZE_SCALAR(pktDone);
1010 UNSERIALIZE_SCALAR(isTcp);
1011 UNSERIALIZE_SCALAR(pktWaiting);
1012 }
1013
1014 bool
1015 IGbE::TxDescCache::packetAvailable()
1016 {
1017 if (pktDone) {
1018 pktDone = false;
1019 return true;
1020 }
1021 return false;
1022 }
1023
1024 void
1025 IGbE::TxDescCache::enableSm()
1026 {
1027 igbe->txTick = true;
1028 igbe->restartClock();
1029 }
1030
1031 bool
1032 IGbE::TxDescCache::hasOutstandingEvents()
1033 {
1034 return pktEvent.scheduled() || wbEvent.scheduled() ||
1035 fetchEvent.scheduled();
1036 }
1037
1038
1039 ///////////////////////////////////// IGbE /////////////////////////////////
1040
1041 void
1042 IGbE::restartClock()
1043 {
1044 if (!tickEvent.scheduled() && (rxTick || txTick || txFifoTick) && getState() ==
1045 SimObject::Running)
1046 tickEvent.schedule((curTick/cycles(1)) * cycles(1) + cycles(1));
1047 }
1048
1049 unsigned int
1050 IGbE::drain(Event *de)
1051 {
1052 unsigned int count;
1053 count = pioPort->drain(de) + dmaPort->drain(de);
1054 if (rxDescCache.hasOutstandingEvents() ||
1055 txDescCache.hasOutstandingEvents()) {
1056 count++;
1057 drainEvent = de;
1058 }
1059
1060 txFifoTick = false;
1061 txTick = false;
1062 rxTick = false;
1063
1064 if (tickEvent.scheduled())
1065 tickEvent.deschedule();
1066
1067 if (count)
1068 changeState(Draining);
1069 else
1070 changeState(Drained);
1071
1072 return count;
1073 }
1074
1075 void
1076 IGbE::resume()
1077 {
1078 SimObject::resume();
1079
1080 txFifoTick = true;
1081 txTick = true;
1082 rxTick = true;
1083
1084 restartClock();
1085 }
1086
1087 void
1088 IGbE::checkDrain()
1089 {
1090 if (!drainEvent)
1091 return;
1092
1093 if (rxDescCache.hasOutstandingEvents() ||
1094 txDescCache.hasOutstandingEvents()) {
1095 drainEvent->process();
1096 drainEvent = NULL;
1097 }
1098 }
1099
1100 void
1101 IGbE::txStateMachine()
1102 {
1103 if (!regs.tctl.en()) {
1104 txTick = false;
1105 DPRINTF(EthernetSM, "TXS: TX disabled, stopping ticking\n");
1106 return;
1107 }
1108
1109 // If we have a packet available and it's length is not 0 (meaning it's not
1110 // a multidescriptor packet) put it in the fifo, otherwise an the next
1111 // iteration we'll get the rest of the data
1112 if (txPacket && txDescCache.packetAvailable() && txPacket->length) {
1113 bool success;
1114 DPRINTF(EthernetSM, "TXS: packet placed in TX FIFO\n");
1115 success = txFifo.push(txPacket);
1116 txFifoTick = true;
1117 assert(success);
1118 txPacket = NULL;
1119 txDescCache.writeback((cacheBlockSize()-1)>>4);
1120 return;
1121 }
1122
1123 // Only support descriptor granularity
1124 assert(regs.txdctl.gran());
1125 if (regs.txdctl.lwthresh() && txDescCache.descLeft() < (regs.txdctl.lwthresh() * 8)) {
1126 DPRINTF(EthernetSM, "TXS: LWTHRESH caused posting of TXDLOW\n");
1127 postInterrupt(IT_TXDLOW);
1128 }
1129
1130 if (!txPacket) {
1131 txPacket = new EthPacketData(16384);
1132 }
1133
1134 if (!txDescCache.packetWaiting()) {
1135 if (txDescCache.descLeft() == 0) {
1136 DPRINTF(EthernetSM, "TXS: No descriptors left in ring, forcing "
1137 "writeback stopping ticking and posting TXQE\n");
1138 txDescCache.writeback(0);
1139 txTick = false;
1140 postInterrupt(IT_TXQE, true);
1141 return;
1142 }
1143
1144
1145 if (!(txDescCache.descUnused())) {
1146 DPRINTF(EthernetSM, "TXS: No descriptors available in cache, fetching and stopping ticking\n");
1147 txTick = false;
1148 txDescCache.fetchDescriptors();
1149 return;
1150 }
1151
1152 int size;
1153 size = txDescCache.getPacketSize();
1154 if (size > 0 && txFifo.avail() > size) {
1155 DPRINTF(EthernetSM, "TXS: Reserving %d bytes in FIFO and begining "
1156 "DMA of next packet\n", size);
1157 txFifo.reserve(size);
1158 txDescCache.getPacketData(txPacket);
1159 } else if (size <= 0) {
1160 DPRINTF(EthernetSM, "TXS: No packets to get, writing back used descriptors\n");
1161 txDescCache.writeback(0);
1162 } else {
1163 DPRINTF(EthernetSM, "TXS: FIFO full, stopping ticking until space "
1164 "available in FIFO\n");
1165 txDescCache.writeback((cacheBlockSize()-1)>>4);
1166 txTick = false;
1167 }
1168
1169
1170 return;
1171 }
1172 DPRINTF(EthernetSM, "TXS: Nothing to do, stopping ticking\n");
1173 txTick = false;
1174 }
1175
1176 bool
1177 IGbE::ethRxPkt(EthPacketPtr pkt)
1178 {
1179 DPRINTF(Ethernet, "RxFIFO: Receiving pcakte from wire\n");
1180 if (!regs.rctl.en()) {
1181 DPRINTF(Ethernet, "RxFIFO: RX not enabled, dropping\n");
1182 return true;
1183 }
1184
1185 // restart the state machines if they are stopped
1186 rxTick = true;
1187 if ((rxTick || txTick) && !tickEvent.scheduled()) {
1188 DPRINTF(EthernetSM, "RXS: received packet into fifo, starting ticking\n");
1189 restartClock();
1190 }
1191
1192 if (!rxFifo.push(pkt)) {
1193 DPRINTF(Ethernet, "RxFIFO: Packet won't fit in fifo... dropped\n");
1194 postInterrupt(IT_RXO, true);
1195 return false;
1196 }
1197 return true;
1198 }
1199
1200
1201 void
1202 IGbE::rxStateMachine()
1203 {
1204 if (!regs.rctl.en()) {
1205 rxTick = false;
1206 DPRINTF(EthernetSM, "RXS: RX disabled, stopping ticking\n");
1207 return;
1208 }
1209
1210 // If the packet is done check for interrupts/descriptors/etc
1211 if (rxDescCache.packetDone()) {
1212 rxDmaPacket = false;
1213 DPRINTF(EthernetSM, "RXS: Packet completed DMA to memory\n");
1214 int descLeft = rxDescCache.descLeft();
1215 switch (regs.rctl.rdmts()) {
1216 case 2: if (descLeft > .125 * regs.rdlen()) break;
1217 case 1: if (descLeft > .250 * regs.rdlen()) break;
1218 case 0: if (descLeft > .500 * regs.rdlen()) break;
1219 DPRINTF(Ethernet, "RXS: Interrupting (RXDMT) because of descriptors left\n");
1220 postInterrupt(IT_RXDMT);
1221 break;
1222 }
1223
1224 if (descLeft == 0) {
1225 DPRINTF(EthernetSM, "RXS: No descriptors left in ring, forcing"
1226 " writeback and stopping ticking\n");
1227 rxDescCache.writeback(0);
1228 rxTick = false;
1229 }
1230
1231 // only support descriptor granulaties
1232 assert(regs.rxdctl.gran());
1233
1234 if (regs.rxdctl.wthresh() >= rxDescCache.descUsed()) {
1235 DPRINTF(EthernetSM, "RXS: Writing back because WTHRESH >= descUsed\n");
1236 if (regs.rxdctl.wthresh() < (cacheBlockSize()>>4))
1237 rxDescCache.writeback(regs.rxdctl.wthresh()-1);
1238 else
1239 rxDescCache.writeback((cacheBlockSize()-1)>>4);
1240 }
1241
1242 if ((rxDescCache.descUnused() < regs.rxdctl.pthresh()) &&
1243 ((rxDescCache.descLeft() - rxDescCache.descUnused()) > regs.rxdctl.hthresh())) {
1244 DPRINTF(EthernetSM, "RXS: Fetching descriptors because descUnused < PTHRESH\n");
1245 rxDescCache.fetchDescriptors();
1246 }
1247
1248 if (rxDescCache.descUnused() == 0) {
1249 DPRINTF(EthernetSM, "RXS: No descriptors available in cache, "
1250 "fetching descriptors and stopping ticking\n");
1251 rxTick = false;
1252 rxDescCache.fetchDescriptors();
1253 }
1254 return;
1255 }
1256
1257 if (rxDmaPacket) {
1258 DPRINTF(EthernetSM, "RXS: stopping ticking until packet DMA completes\n");
1259 rxTick = false;
1260 return;
1261 }
1262
1263 if (!rxDescCache.descUnused()) {
1264 DPRINTF(EthernetSM, "RXS: No descriptors available in cache, stopping ticking\n");
1265 rxTick = false;
1266 DPRINTF(EthernetSM, "RXS: No descriptors available, fetching\n");
1267 rxDescCache.fetchDescriptors();
1268 return;
1269 }
1270
1271 if (rxFifo.empty()) {
1272 DPRINTF(EthernetSM, "RXS: RxFIFO empty, stopping ticking\n");
1273 rxTick = false;
1274 return;
1275 }
1276
1277 EthPacketPtr pkt;
1278 pkt = rxFifo.front();
1279
1280 DPRINTF(EthernetSM, "RXS: Writing packet into memory\n");
1281 if (!rxDescCache.writePacket(pkt)) {
1282 return;
1283 }
1284
1285 DPRINTF(EthernetSM, "RXS: Removing packet from FIFO\n");
1286 rxFifo.pop();
1287 DPRINTF(EthernetSM, "RXS: stopping ticking until packet DMA completes\n");
1288 rxTick = false;
1289 rxDmaPacket = true;
1290 }
1291
1292 void
1293 IGbE::txWire()
1294 {
1295 if (txFifo.empty()) {
1296 txFifoTick = false;
1297 return;
1298 }
1299
1300
1301 if (etherInt->sendPacket(txFifo.front())) {
1302 DPRINTF(EthernetSM, "TxFIFO: Successful transmit, bytes available in fifo: %d\n",
1303 txFifo.avail());
1304 txFifo.pop();
1305 } else {
1306 // We'll get woken up when the packet ethTxDone() gets called
1307 txFifoTick = false;
1308 }
1309
1310 }
1311
1312 void
1313 IGbE::tick()
1314 {
1315 DPRINTF(EthernetSM, "IGbE: -------------- Cycle --------------\n");
1316
1317 if (rxTick)
1318 rxStateMachine();
1319
1320 if (txTick)
1321 txStateMachine();
1322
1323 if (txFifoTick)
1324 txWire();
1325
1326
1327 if (rxTick || txTick || txFifoTick)
1328 tickEvent.schedule(curTick + cycles(1));
1329 }
1330
1331 void
1332 IGbE::ethTxDone()
1333 {
1334 // restart the tx state machines if they are stopped
1335 // fifo to send another packet
1336 // tx sm to put more data into the fifo
1337 txFifoTick = true;
1338 txTick = true;
1339
1340 restartClock();
1341 DPRINTF(EthernetSM, "TxFIFO: Transmission complete\n");
1342 }
1343
1344 void
1345 IGbE::serialize(std::ostream &os)
1346 {
1347 PciDev::serialize(os);
1348
1349 regs.serialize(os);
1350 SERIALIZE_SCALAR(eeOpBits);
1351 SERIALIZE_SCALAR(eeAddrBits);
1352 SERIALIZE_SCALAR(eeDataBits);
1353 SERIALIZE_SCALAR(eeOpcode);
1354 SERIALIZE_SCALAR(eeAddr);
1355 SERIALIZE_ARRAY(flash,iGbReg::EEPROM_SIZE);
1356
1357 rxFifo.serialize("rxfifo", os);
1358 txFifo.serialize("txfifo", os);
1359
1360 bool txPktExists = txPacket;
1361 SERIALIZE_SCALAR(txPktExists);
1362 if (txPktExists)
1363 txPacket->serialize("txpacket", os);
1364
1365 Tick rdtr_time = 0, radv_time = 0, tidv_time = 0, tadv_time = 0,
1366 inter_time = 0;
1367
1368 if (rdtrEvent.scheduled())
1369 rdtr_time = rdtrEvent.when();
1370 SERIALIZE_SCALAR(rdtr_time);
1371
1372 if (radvEvent.scheduled())
1373 radv_time = radvEvent.when();
1374 SERIALIZE_SCALAR(radv_time);
1375
1376 if (tidvEvent.scheduled())
1377 rdtr_time = tidvEvent.when();
1378 SERIALIZE_SCALAR(tidv_time);
1379
1380 if (tadvEvent.scheduled())
1381 rdtr_time = tadvEvent.when();
1382 SERIALIZE_SCALAR(tadv_time);
1383
1384 if (interEvent.scheduled())
1385 rdtr_time = interEvent.when();
1386 SERIALIZE_SCALAR(inter_time);
1387
1388 nameOut(os, csprintf("%s.TxDescCache", name()));
1389 txDescCache.serialize(os);
1390
1391 nameOut(os, csprintf("%s.RxDescCache", name()));
1392 rxDescCache.serialize(os);
1393 }
1394
1395 void
1396 IGbE::unserialize(Checkpoint *cp, const std::string &section)
1397 {
1398 PciDev::unserialize(cp, section);
1399
1400 regs.unserialize(cp, section);
1401 UNSERIALIZE_SCALAR(eeOpBits);
1402 UNSERIALIZE_SCALAR(eeAddrBits);
1403 UNSERIALIZE_SCALAR(eeDataBits);
1404 UNSERIALIZE_SCALAR(eeOpcode);
1405 UNSERIALIZE_SCALAR(eeAddr);
1406 UNSERIALIZE_ARRAY(flash,iGbReg::EEPROM_SIZE);
1407
1408 rxFifo.unserialize("rxfifo", cp, section);
1409 txFifo.unserialize("txfifo", cp, section);
1410
1411 bool txPktExists;
1412 UNSERIALIZE_SCALAR(txPktExists);
1413 if (txPktExists) {
1414 txPacket = new EthPacketData(16384);
1415 txPacket->unserialize("txpacket", cp, section);
1416 }
1417
1418 rxTick = true;
1419 txTick = true;
1420 txFifoTick = true;
1421
1422 Tick rdtr_time, radv_time, tidv_time, tadv_time, inter_time;
1423 UNSERIALIZE_SCALAR(rdtr_time);
1424 UNSERIALIZE_SCALAR(radv_time);
1425 UNSERIALIZE_SCALAR(tidv_time);
1426 UNSERIALIZE_SCALAR(tadv_time);
1427 UNSERIALIZE_SCALAR(inter_time);
1428
1429 if (rdtr_time)
1430 rdtrEvent.schedule(rdtr_time);
1431
1432 if (radv_time)
1433 radvEvent.schedule(radv_time);
1434
1435 if (tidv_time)
1436 tidvEvent.schedule(tidv_time);
1437
1438 if (tadv_time)
1439 tadvEvent.schedule(tadv_time);
1440
1441 if (inter_time)
1442 interEvent.schedule(inter_time);
1443
1444 txDescCache.unserialize(cp, csprintf("%s.TxDescCache", section));
1445
1446 rxDescCache.unserialize(cp, csprintf("%s.RxDescCache", section));
1447 }
1448
1449
1450 BEGIN_DECLARE_SIM_OBJECT_PARAMS(IGbEInt)
1451
1452 SimObjectParam<EtherInt *> peer;
1453 SimObjectParam<IGbE *> device;
1454
1455 END_DECLARE_SIM_OBJECT_PARAMS(IGbEInt)
1456
1457 BEGIN_INIT_SIM_OBJECT_PARAMS(IGbEInt)
1458
1459 INIT_PARAM_DFLT(peer, "peer interface", NULL),
1460 INIT_PARAM(device, "Ethernet device of this interface")
1461
1462 END_INIT_SIM_OBJECT_PARAMS(IGbEInt)
1463
1464 CREATE_SIM_OBJECT(IGbEInt)
1465 {
1466 IGbEInt *dev_int = new IGbEInt(getInstanceName(), device);
1467
1468 EtherInt *p = (EtherInt *)peer;
1469 if (p) {
1470 dev_int->setPeer(p);
1471 p->setPeer(dev_int);
1472 }
1473
1474 return dev_int;
1475 }
1476
1477 REGISTER_SIM_OBJECT("IGbEInt", IGbEInt)
1478
1479
1480 BEGIN_DECLARE_SIM_OBJECT_PARAMS(IGbE)
1481
1482 SimObjectParam<System *> system;
1483 SimObjectParam<Platform *> platform;
1484 Param<Tick> min_backoff_delay;
1485 Param<Tick> max_backoff_delay;
1486 SimObjectParam<PciConfigData *> configdata;
1487 Param<uint32_t> pci_bus;
1488 Param<uint32_t> pci_dev;
1489 Param<uint32_t> pci_func;
1490 Param<Tick> pio_latency;
1491 Param<Tick> config_latency;
1492 Param<std::string> hardware_address;
1493 Param<bool> use_flow_control;
1494 Param<int> rx_fifo_size;
1495 Param<int> tx_fifo_size;
1496 Param<int> rx_desc_cache_size;
1497 Param<int> tx_desc_cache_size;
1498 Param<Tick> clock;
1499
1500
1501 END_DECLARE_SIM_OBJECT_PARAMS(IGbE)
1502
1503 BEGIN_INIT_SIM_OBJECT_PARAMS(IGbE)
1504
1505 INIT_PARAM(system, "System pointer"),
1506 INIT_PARAM(platform, "Platform pointer"),
1507 INIT_PARAM(min_backoff_delay, "Minimum delay after receving a nack packed"),
1508 INIT_PARAM(max_backoff_delay, "Maximum delay after receving a nack packed"),
1509 INIT_PARAM(configdata, "PCI Config data"),
1510 INIT_PARAM(pci_bus, "PCI bus ID"),
1511 INIT_PARAM(pci_dev, "PCI device number"),
1512 INIT_PARAM(pci_func, "PCI function code"),
1513 INIT_PARAM_DFLT(pio_latency, "Programmed IO latency in bus cycles", 1),
1514 INIT_PARAM(config_latency, "Number of cycles for a config read or write"),
1515 INIT_PARAM(hardware_address, "Ethernet Hardware Address"),
1516 INIT_PARAM(use_flow_control,"Should the device use xon/off packets"),
1517 INIT_PARAM(rx_fifo_size,"Size of the RX FIFO"),
1518 INIT_PARAM(tx_fifo_size,"Size of the TX FIFO"),
1519 INIT_PARAM(rx_desc_cache_size,"Size of the RX descriptor cache"),
1520 INIT_PARAM(tx_desc_cache_size,"Size of the TX descriptor cache"),
1521 INIT_PARAM(clock,"Clock rate for the device to tick at")
1522
1523 END_INIT_SIM_OBJECT_PARAMS(IGbE)
1524
1525
1526 CREATE_SIM_OBJECT(IGbE)
1527 {
1528 IGbE::Params *params = new IGbE::Params;
1529
1530 params->name = getInstanceName();
1531 params->platform = platform;
1532 params->system = system;
1533 params->min_backoff_delay = min_backoff_delay;
1534 params->max_backoff_delay = max_backoff_delay;
1535 params->configData = configdata;
1536 params->busNum = pci_bus;
1537 params->deviceNum = pci_dev;
1538 params->functionNum = pci_func;
1539 params->pio_delay = pio_latency;
1540 params->config_delay = config_latency;
1541 params->hardware_address = hardware_address;
1542 params->use_flow_control = use_flow_control;
1543 params->rx_fifo_size = rx_fifo_size;
1544 params->tx_fifo_size = tx_fifo_size;
1545 params->rx_desc_cache_size = rx_desc_cache_size;
1546 params->tx_desc_cache_size = tx_desc_cache_size;
1547 params->clock = clock;
1548
1549
1550 return new IGbE(params);
1551 }
1552
1553 REGISTER_SIM_OBJECT("IGbE", IGbE)