6acd061322dccb66067db485554c7530532935cd
[gem5.git] / src / dev / i8254xGBe.cc
1 /*
2 * Copyright (c) 2006 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Ali Saidi
29 */
30
31 /* @file
32 * Device model for Intel's 8254x line of gigabit ethernet controllers.
33 * In particular an 82547 revision 2 (82547GI) MAC because it seems to have the
34 * fewest workarounds in the driver. It will probably work with most of the
35 * other MACs with slight modifications.
36 */
37
38
39 /*
40 * @todo really there are multiple dma engines.. we should implement them.
41 */
42
43 #include "base/inet.hh"
44 #include "base/trace.hh"
45 #include "dev/i8254xGBe.hh"
46 #include "mem/packet.hh"
47 #include "mem/packet_access.hh"
48 #include "sim/builder.hh"
49 #include "sim/stats.hh"
50 #include "sim/system.hh"
51
52 #include <algorithm>
53
54 using namespace iGbReg;
55 using namespace Net;
56
57 IGbE::IGbE(Params *p)
58 : PciDev(p), etherInt(NULL), drainEvent(NULL), useFlowControl(p->use_flow_control),
59 rxFifo(p->rx_fifo_size), txFifo(p->tx_fifo_size), rxTick(false),
60 txTick(false), txFifoTick(false), rdtrEvent(this), radvEvent(this),
61 tadvEvent(this), tidvEvent(this), tickEvent(this), interEvent(this),
62 rxDescCache(this, name()+".RxDesc", p->rx_desc_cache_size),
63 txDescCache(this, name()+".TxDesc", p->tx_desc_cache_size), clock(p->clock)
64 {
65 // Initialized internal registers per Intel documentation
66 // All registers intialized to 0 by per register constructor
67 regs.ctrl.fd(1);
68 regs.ctrl.lrst(1);
69 regs.ctrl.speed(2);
70 regs.ctrl.frcspd(1);
71 regs.sts.speed(3); // Say we're 1000Mbps
72 regs.sts.fd(1); // full duplex
73 regs.sts.lu(1); // link up
74 regs.eecd.fwe(1);
75 regs.eecd.ee_type(1);
76 regs.imr = 0;
77 regs.iam = 0;
78 regs.rxdctl.gran(1);
79 regs.rxdctl.wthresh(1);
80 regs.fcrth(1);
81
82 regs.pba.rxa(0x30);
83 regs.pba.txa(0x10);
84
85 eeOpBits = 0;
86 eeAddrBits = 0;
87 eeDataBits = 0;
88 eeOpcode = 0;
89
90 // clear all 64 16 bit words of the eeprom
91 memset(&flash, 0, EEPROM_SIZE*2);
92
93 // Set the MAC address
94 memcpy(flash, p->hardware_address.bytes(), ETH_ADDR_LEN);
95 for (int x = 0; x < ETH_ADDR_LEN/2; x++)
96 flash[x] = htobe(flash[x]);
97
98 uint16_t csum = 0;
99 for (int x = 0; x < EEPROM_SIZE; x++)
100 csum += htobe(flash[x]);
101
102
103 // Magic happy checksum value
104 flash[EEPROM_SIZE-1] = htobe((uint16_t)(EEPROM_CSUM - csum));
105
106 rxFifo.clear();
107 txFifo.clear();
108 }
109
110
111 Tick
112 IGbE::writeConfig(PacketPtr pkt)
113 {
114 int offset = pkt->getAddr() & PCI_CONFIG_SIZE;
115 if (offset < PCI_DEVICE_SPECIFIC)
116 PciDev::writeConfig(pkt);
117 else
118 panic("Device specific PCI config space not implemented.\n");
119
120 ///
121 /// Some work may need to be done here based for the pci COMMAND bits.
122 ///
123
124 return pioDelay;
125 }
126
127 Tick
128 IGbE::read(PacketPtr pkt)
129 {
130 int bar;
131 Addr daddr;
132
133 if (!getBAR(pkt->getAddr(), bar, daddr))
134 panic("Invalid PCI memory access to unmapped memory.\n");
135
136 // Only Memory register BAR is allowed
137 assert(bar == 0);
138
139 // Only 32bit accesses allowed
140 assert(pkt->getSize() == 4);
141
142 DPRINTF(Ethernet, "Read device register %#X\n", daddr);
143
144 pkt->allocate();
145
146 ///
147 /// Handle read of register here
148 ///
149
150
151 switch (daddr) {
152 case REG_CTRL:
153 pkt->set<uint32_t>(regs.ctrl());
154 break;
155 case REG_STATUS:
156 pkt->set<uint32_t>(regs.sts());
157 break;
158 case REG_EECD:
159 pkt->set<uint32_t>(regs.eecd());
160 break;
161 case REG_EERD:
162 pkt->set<uint32_t>(regs.eerd());
163 break;
164 case REG_CTRL_EXT:
165 pkt->set<uint32_t>(regs.ctrl_ext());
166 break;
167 case REG_MDIC:
168 pkt->set<uint32_t>(regs.mdic());
169 break;
170 case REG_ICR:
171 DPRINTF(Ethernet, "Reading ICR. ICR=%#x IMR=%#x IAM=%#x IAME=%d\n", regs.icr(),
172 regs.imr, regs.iam, regs.ctrl_ext.iame());
173 pkt->set<uint32_t>(regs.icr());
174 if (regs.icr.int_assert() || regs.imr == 0) {
175 regs.icr = regs.icr() & ~mask(30);
176 DPRINTF(Ethernet, "Cleared ICR. ICR=%#x\n", regs.icr());
177 }
178 if (regs.ctrl_ext.iame() && regs.icr.int_assert())
179 regs.imr &= ~regs.iam;
180 chkInterrupt();
181 break;
182 case REG_ITR:
183 pkt->set<uint32_t>(regs.itr());
184 break;
185 case REG_RCTL:
186 pkt->set<uint32_t>(regs.rctl());
187 break;
188 case REG_FCTTV:
189 pkt->set<uint32_t>(regs.fcttv());
190 break;
191 case REG_TCTL:
192 pkt->set<uint32_t>(regs.tctl());
193 break;
194 case REG_PBA:
195 pkt->set<uint32_t>(regs.pba());
196 break;
197 case REG_WUC:
198 case REG_LEDCTL:
199 pkt->set<uint32_t>(0); // We don't care, so just return 0
200 break;
201 case REG_FCRTL:
202 pkt->set<uint32_t>(regs.fcrtl());
203 break;
204 case REG_FCRTH:
205 pkt->set<uint32_t>(regs.fcrth());
206 break;
207 case REG_RDBAL:
208 pkt->set<uint32_t>(regs.rdba.rdbal());
209 break;
210 case REG_RDBAH:
211 pkt->set<uint32_t>(regs.rdba.rdbah());
212 break;
213 case REG_RDLEN:
214 pkt->set<uint32_t>(regs.rdlen());
215 break;
216 case REG_RDH:
217 pkt->set<uint32_t>(regs.rdh());
218 break;
219 case REG_RDT:
220 pkt->set<uint32_t>(regs.rdt());
221 break;
222 case REG_RDTR:
223 pkt->set<uint32_t>(regs.rdtr());
224 if (regs.rdtr.fpd()) {
225 rxDescCache.writeback(0);
226 DPRINTF(EthernetIntr, "Posting interrupt because of RDTR.FPD write\n");
227 postInterrupt(IT_RXT);
228 regs.rdtr.fpd(0);
229 }
230 break;
231 case REG_RADV:
232 pkt->set<uint32_t>(regs.radv());
233 break;
234 case REG_TDBAL:
235 pkt->set<uint32_t>(regs.tdba.tdbal());
236 break;
237 case REG_TDBAH:
238 pkt->set<uint32_t>(regs.tdba.tdbah());
239 break;
240 case REG_TDLEN:
241 pkt->set<uint32_t>(regs.tdlen());
242 break;
243 case REG_TDH:
244 pkt->set<uint32_t>(regs.tdh());
245 break;
246 case REG_TDT:
247 pkt->set<uint32_t>(regs.tdt());
248 break;
249 case REG_TIDV:
250 pkt->set<uint32_t>(regs.tidv());
251 break;
252 case REG_TXDCTL:
253 pkt->set<uint32_t>(regs.txdctl());
254 break;
255 case REG_TADV:
256 pkt->set<uint32_t>(regs.tadv());
257 break;
258 case REG_RXCSUM:
259 pkt->set<uint32_t>(regs.rxcsum());
260 break;
261 case REG_MANC:
262 pkt->set<uint32_t>(regs.manc());
263 break;
264 default:
265 if (!(daddr >= REG_VFTA && daddr < (REG_VFTA + VLAN_FILTER_TABLE_SIZE*4)) &&
266 !(daddr >= REG_RAL && daddr < (REG_RAL + RCV_ADDRESS_TABLE_SIZE*8)) &&
267 !(daddr >= REG_MTA && daddr < (REG_MTA + MULTICAST_TABLE_SIZE*4)) &&
268 !(daddr >= REG_CRCERRS && daddr < (REG_CRCERRS + STATS_REGS_SIZE)))
269 panic("Read request to unknown register number: %#x\n", daddr);
270 else
271 pkt->set<uint32_t>(0);
272 };
273
274 pkt->result = Packet::Success;
275 return pioDelay;
276 }
277
278 Tick
279 IGbE::write(PacketPtr pkt)
280 {
281 int bar;
282 Addr daddr;
283
284
285 if (!getBAR(pkt->getAddr(), bar, daddr))
286 panic("Invalid PCI memory access to unmapped memory.\n");
287
288 // Only Memory register BAR is allowed
289 assert(bar == 0);
290
291 // Only 32bit accesses allowed
292 assert(pkt->getSize() == sizeof(uint32_t));
293
294 DPRINTF(Ethernet, "Wrote device register %#X value %#X\n", daddr, pkt->get<uint32_t>());
295
296 ///
297 /// Handle write of register here
298 ///
299 uint32_t val = pkt->get<uint32_t>();
300
301 Regs::RCTL oldrctl;
302 Regs::TCTL oldtctl;
303
304 switch (daddr) {
305 case REG_CTRL:
306 regs.ctrl = val;
307 if (regs.ctrl.tfce())
308 warn("TX Flow control enabled, should implement\n");
309 if (regs.ctrl.rfce())
310 warn("RX Flow control enabled, should implement\n");
311 break;
312 case REG_CTRL_EXT:
313 regs.ctrl_ext = val;
314 break;
315 case REG_STATUS:
316 regs.sts = val;
317 break;
318 case REG_EECD:
319 int oldClk;
320 oldClk = regs.eecd.sk();
321 regs.eecd = val;
322 // See if this is a eeprom access and emulate accordingly
323 if (!oldClk && regs.eecd.sk()) {
324 if (eeOpBits < 8) {
325 eeOpcode = eeOpcode << 1 | regs.eecd.din();
326 eeOpBits++;
327 } else if (eeAddrBits < 8 && eeOpcode == EEPROM_READ_OPCODE_SPI) {
328 eeAddr = eeAddr << 1 | regs.eecd.din();
329 eeAddrBits++;
330 } else if (eeDataBits < 16 && eeOpcode == EEPROM_READ_OPCODE_SPI) {
331 assert(eeAddr>>1 < EEPROM_SIZE);
332 DPRINTF(EthernetEEPROM, "EEPROM bit read: %d word: %#X\n",
333 flash[eeAddr>>1] >> eeDataBits & 0x1, flash[eeAddr>>1]);
334 regs.eecd.dout((flash[eeAddr>>1] >> (15-eeDataBits)) & 0x1);
335 eeDataBits++;
336 } else if (eeDataBits < 8 && eeOpcode == EEPROM_RDSR_OPCODE_SPI) {
337 regs.eecd.dout(0);
338 eeDataBits++;
339 } else
340 panic("What's going on with eeprom interface? opcode:"
341 " %#x:%d addr: %#x:%d, data: %d\n", (uint32_t)eeOpcode,
342 (uint32_t)eeOpBits, (uint32_t)eeAddr,
343 (uint32_t)eeAddrBits, (uint32_t)eeDataBits);
344
345 // Reset everything for the next command
346 if ((eeDataBits == 16 && eeOpcode == EEPROM_READ_OPCODE_SPI) ||
347 (eeDataBits == 8 && eeOpcode == EEPROM_RDSR_OPCODE_SPI)) {
348 eeOpBits = 0;
349 eeAddrBits = 0;
350 eeDataBits = 0;
351 eeOpcode = 0;
352 eeAddr = 0;
353 }
354
355 DPRINTF(EthernetEEPROM, "EEPROM: opcode: %#X:%d addr: %#X:%d\n",
356 (uint32_t)eeOpcode, (uint32_t) eeOpBits,
357 (uint32_t)eeAddr>>1, (uint32_t)eeAddrBits);
358 if (eeOpBits == 8 && !(eeOpcode == EEPROM_READ_OPCODE_SPI ||
359 eeOpcode == EEPROM_RDSR_OPCODE_SPI ))
360 panic("Unknown eeprom opcode: %#X:%d\n", (uint32_t)eeOpcode,
361 (uint32_t)eeOpBits);
362
363
364 }
365 // If driver requests eeprom access, immediately give it to it
366 regs.eecd.ee_gnt(regs.eecd.ee_req());
367 break;
368 case REG_EERD:
369 regs.eerd = val;
370 break;
371 case REG_MDIC:
372 regs.mdic = val;
373 if (regs.mdic.i())
374 panic("No support for interrupt on mdic complete\n");
375 if (regs.mdic.phyadd() != 1)
376 panic("No support for reading anything but phy\n");
377 DPRINTF(Ethernet, "%s phy address %x\n", regs.mdic.op() == 1 ? "Writing"
378 : "Reading", regs.mdic.regadd());
379 switch (regs.mdic.regadd()) {
380 case PHY_PSTATUS:
381 regs.mdic.data(0x796D); // link up
382 break;
383 case PHY_PID:
384 regs.mdic.data(0x02A8);
385 break;
386 case PHY_EPID:
387 regs.mdic.data(0x0380);
388 break;
389 case PHY_GSTATUS:
390 regs.mdic.data(0x7C00);
391 break;
392 case PHY_EPSTATUS:
393 regs.mdic.data(0x3000);
394 break;
395 case PHY_AGC:
396 regs.mdic.data(0x180); // some random length
397 break;
398 default:
399 regs.mdic.data(0);
400 }
401 regs.mdic.r(1);
402 break;
403 case REG_ICR:
404 DPRINTF(Ethernet, "Writing ICR. ICR=%#x IMR=%#x IAM=%#x IAME=%d\n", regs.icr(),
405 regs.imr, regs.iam, regs.ctrl_ext.iame());
406 if (regs.ctrl_ext.iame())
407 regs.imr &= ~regs.iam;
408 regs.icr = ~bits(val,30,0) & regs.icr();
409 chkInterrupt();
410 break;
411 case REG_ITR:
412 regs.itr = val;
413 break;
414 case REG_ICS:
415 DPRINTF(EthernetIntr, "Posting interrupt because of ICS write\n");
416 postInterrupt((IntTypes)val);
417 break;
418 case REG_IMS:
419 regs.imr |= val;
420 chkInterrupt();
421 break;
422 case REG_IMC:
423 regs.imr &= ~val;
424 chkInterrupt();
425 break;
426 case REG_IAM:
427 regs.iam = val;
428 break;
429 case REG_RCTL:
430 oldrctl = regs.rctl;
431 regs.rctl = val;
432 if (regs.rctl.rst()) {
433 rxDescCache.reset();
434 DPRINTF(EthernetSM, "RXS: Got RESET!\n");
435 rxFifo.clear();
436 regs.rctl.rst(0);
437 }
438 if (regs.rctl.en())
439 rxTick = true;
440 restartClock();
441 break;
442 case REG_FCTTV:
443 regs.fcttv = val;
444 break;
445 case REG_TCTL:
446 regs.tctl = val;
447 oldtctl = regs.tctl;
448 regs.tctl = val;
449 if (regs.tctl.en())
450 txTick = true;
451 restartClock();
452 if (regs.tctl.en() && !oldtctl.en()) {
453 txDescCache.reset();
454 }
455 break;
456 case REG_PBA:
457 regs.pba.rxa(val);
458 regs.pba.txa(64 - regs.pba.rxa());
459 break;
460 case REG_WUC:
461 case REG_LEDCTL:
462 case REG_FCAL:
463 case REG_FCAH:
464 case REG_FCT:
465 case REG_VET:
466 case REG_AIFS:
467 case REG_TIPG:
468 ; // We don't care, so don't store anything
469 break;
470 case REG_FCRTL:
471 regs.fcrtl = val;
472 break;
473 case REG_FCRTH:
474 regs.fcrth = val;
475 break;
476 case REG_RDBAL:
477 regs.rdba.rdbal( val & ~mask(4));
478 rxDescCache.areaChanged();
479 break;
480 case REG_RDBAH:
481 regs.rdba.rdbah(val);
482 rxDescCache.areaChanged();
483 break;
484 case REG_RDLEN:
485 regs.rdlen = val & ~mask(7);
486 rxDescCache.areaChanged();
487 break;
488 case REG_RDH:
489 regs.rdh = val;
490 rxDescCache.areaChanged();
491 break;
492 case REG_RDT:
493 regs.rdt = val;
494 rxTick = true;
495 restartClock();
496 break;
497 case REG_RDTR:
498 regs.rdtr = val;
499 break;
500 case REG_RADV:
501 regs.radv = val;
502 break;
503 case REG_TDBAL:
504 regs.tdba.tdbal( val & ~mask(4));
505 txDescCache.areaChanged();
506 break;
507 case REG_TDBAH:
508 regs.tdba.tdbah(val);
509 txDescCache.areaChanged();
510 break;
511 case REG_TDLEN:
512 regs.tdlen = val & ~mask(7);
513 txDescCache.areaChanged();
514 break;
515 case REG_TDH:
516 regs.tdh = val;
517 txDescCache.areaChanged();
518 break;
519 case REG_TDT:
520 regs.tdt = val;
521 txTick = true;
522 restartClock();
523 break;
524 case REG_TIDV:
525 regs.tidv = val;
526 break;
527 case REG_TXDCTL:
528 regs.txdctl = val;
529 break;
530 case REG_TADV:
531 regs.tadv = val;
532 break;
533 case REG_RXCSUM:
534 regs.rxcsum = val;
535 break;
536 case REG_MANC:
537 regs.manc = val;
538 break;
539 default:
540 if (!(daddr >= REG_VFTA && daddr < (REG_VFTA + VLAN_FILTER_TABLE_SIZE*4)) &&
541 !(daddr >= REG_RAL && daddr < (REG_RAL + RCV_ADDRESS_TABLE_SIZE*8)) &&
542 !(daddr >= REG_MTA && daddr < (REG_MTA + MULTICAST_TABLE_SIZE*4)))
543 panic("Write request to unknown register number: %#x\n", daddr);
544 };
545
546 pkt->result = Packet::Success;
547 return pioDelay;
548 }
549
550 void
551 IGbE::postInterrupt(IntTypes t, bool now)
552 {
553 assert(t);
554
555 // Interrupt is already pending
556 if (t & regs.icr())
557 return;
558
559 if (regs.icr() & regs.imr)
560 {
561 regs.icr = regs.icr() | t;
562 if (!interEvent.scheduled())
563 interEvent.schedule(curTick + Clock::Int::ns * 256 *
564 regs.itr.interval());
565 } else {
566 regs.icr = regs.icr() | t;
567 if (regs.itr.interval() == 0 || now) {
568 if (interEvent.scheduled())
569 interEvent.deschedule();
570 cpuPostInt();
571 } else {
572 DPRINTF(EthernetIntr, "EINT: Scheduling timer interrupt for %d ticks\n",
573 Clock::Int::ns * 256 * regs.itr.interval());
574 if (!interEvent.scheduled())
575 interEvent.schedule(curTick + Clock::Int::ns * 256 * regs.itr.interval());
576 }
577 }
578 }
579
580 void
581 IGbE::cpuPostInt()
582 {
583 if (rdtrEvent.scheduled()) {
584 regs.icr.rxt0(1);
585 rdtrEvent.deschedule();
586 }
587 if (radvEvent.scheduled()) {
588 regs.icr.rxt0(1);
589 radvEvent.deschedule();
590 }
591 if (tadvEvent.scheduled()) {
592 regs.icr.txdw(1);
593 tadvEvent.deschedule();
594 }
595 if (tidvEvent.scheduled()) {
596 regs.icr.txdw(1);
597 tidvEvent.deschedule();
598 }
599
600 regs.icr.int_assert(1);
601 DPRINTF(EthernetIntr, "EINT: Posting interrupt to CPU now. Vector %#x\n",
602 regs.icr());
603 intrPost();
604 }
605
606 void
607 IGbE::cpuClearInt()
608 {
609 if (regs.icr.int_assert()) {
610 regs.icr.int_assert(0);
611 DPRINTF(EthernetIntr, "EINT: Clearing interrupt to CPU now. Vector %#x\n",
612 regs.icr());
613 intrClear();
614 }
615 }
616
617 void
618 IGbE::chkInterrupt()
619 {
620 // Check if we need to clear the cpu interrupt
621 if (!(regs.icr() & regs.imr)) {
622 if (interEvent.scheduled())
623 interEvent.deschedule();
624 if (regs.icr.int_assert())
625 cpuClearInt();
626 }
627
628 if (regs.icr() & regs.imr) {
629 if (regs.itr.interval() == 0) {
630 cpuPostInt();
631 } else {
632 if (!interEvent.scheduled())
633 interEvent.schedule(curTick + Clock::Int::ns * 256 * regs.itr.interval());
634 }
635 }
636
637
638 }
639
640
641 IGbE::RxDescCache::RxDescCache(IGbE *i, const std::string n, int s)
642 : DescCache<RxDesc>(i, n, s), pktDone(false), pktEvent(this)
643
644 {
645 }
646
647 bool
648 IGbE::RxDescCache::writePacket(EthPacketPtr packet)
649 {
650 // We shouldn't have to deal with any of these yet
651 DPRINTF(EthernetDesc, "Packet Length: %d Desc Size: %d\n",
652 packet->length, igbe->regs.rctl.descSize());
653 assert(packet->length < igbe->regs.rctl.descSize());
654
655 if (!unusedCache.size())
656 return false;
657
658 pktPtr = packet;
659
660 igbe->dmaWrite(igbe->platform->pciToDma(unusedCache.front()->buf),
661 packet->length, &pktEvent, packet->data);
662 return true;
663 }
664
665 void
666 IGbE::RxDescCache::pktComplete()
667 {
668 assert(unusedCache.size());
669 RxDesc *desc;
670 desc = unusedCache.front();
671
672 uint16_t crcfixup = igbe->regs.rctl.secrc() ? 0 : 4 ;
673 desc->len = htole((uint16_t)(pktPtr->length + crcfixup));
674 DPRINTF(EthernetDesc, "pktPtr->length: %d stripcrc offset: %d value written: %d %d\n",
675 pktPtr->length, crcfixup,
676 htole((uint16_t)(pktPtr->length + crcfixup)),
677 (uint16_t)(pktPtr->length + crcfixup));
678
679 // no support for anything but starting at 0
680 assert(igbe->regs.rxcsum.pcss() == 0);
681
682 DPRINTF(EthernetDesc, "Packet written to memory updating Descriptor\n");
683
684 uint8_t status = RXDS_DD | RXDS_EOP;
685 uint8_t err = 0;
686 IpPtr ip(pktPtr);
687 if (ip) {
688 if (igbe->regs.rxcsum.ipofld()) {
689 DPRINTF(EthernetDesc, "Checking IP checksum\n");
690 status |= RXDS_IPCS;
691 desc->csum = htole(cksum(ip));
692 if (cksum(ip) != 0) {
693 err |= RXDE_IPE;
694 DPRINTF(EthernetDesc, "Checksum is bad!!\n");
695 }
696 }
697 TcpPtr tcp(ip);
698 if (tcp && igbe->regs.rxcsum.tuofld()) {
699 DPRINTF(EthernetDesc, "Checking TCP checksum\n");
700 status |= RXDS_TCPCS;
701 desc->csum = htole(cksum(tcp));
702 if (cksum(tcp) != 0) {
703 DPRINTF(EthernetDesc, "Checksum is bad!!\n");
704 err |= RXDE_TCPE;
705 }
706 }
707
708 UdpPtr udp(ip);
709 if (udp && igbe->regs.rxcsum.tuofld()) {
710 DPRINTF(EthernetDesc, "Checking UDP checksum\n");
711 status |= RXDS_UDPCS;
712 desc->csum = htole(cksum(udp));
713 if (cksum(udp) != 0) {
714 DPRINTF(EthernetDesc, "Checksum is bad!!\n");
715 err |= RXDE_TCPE;
716 }
717 }
718 } // if ip
719
720 desc->status = htole(status);
721 desc->errors = htole(err);
722
723 // No vlan support at this point... just set it to 0
724 desc->vlan = 0;
725
726 // Deal with the rx timer interrupts
727 if (igbe->regs.rdtr.delay()) {
728 DPRINTF(EthernetSM, "RXS: Scheduling DTR for %d\n",
729 igbe->regs.rdtr.delay() * igbe->intClock());
730 if (igbe->rdtrEvent.scheduled())
731 igbe->rdtrEvent.reschedule(curTick + igbe->regs.rdtr.delay() *
732 igbe->intClock());
733 else
734 igbe->rdtrEvent.schedule(curTick + igbe->regs.rdtr.delay() *
735 igbe->intClock());
736 }
737
738 if (igbe->regs.radv.idv() && igbe->regs.rdtr.delay()) {
739 DPRINTF(EthernetSM, "RXS: Scheduling ADV for %d\n",
740 igbe->regs.radv.idv() * igbe->intClock());
741 if (!igbe->radvEvent.scheduled())
742 igbe->radvEvent.schedule(curTick + igbe->regs.radv.idv() *
743 igbe->intClock());
744 }
745
746 // if neither radv or rdtr, maybe itr is set...
747 if (!igbe->regs.rdtr.delay()) {
748 DPRINTF(EthernetSM, "RXS: Receive interrupt delay disabled, posting IT_RXT\n");
749 igbe->postInterrupt(IT_RXT);
750 }
751
752 // If the packet is small enough, interrupt appropriately
753 // I wonder if this is delayed or not?!
754 if (pktPtr->length <= igbe->regs.rsrpd.idv()) {
755 DPRINTF(EthernetSM, "RXS: Posting IT_SRPD beacuse small packet received\n");
756 igbe->postInterrupt(IT_SRPD);
757 }
758
759 DPRINTF(EthernetDesc, "Processing of this descriptor complete\n");
760 unusedCache.pop_front();
761 usedCache.push_back(desc);
762 pktPtr = NULL;
763 enableSm();
764 pktDone = true;
765 igbe->checkDrain();
766 }
767
768 void
769 IGbE::RxDescCache::enableSm()
770 {
771 igbe->rxTick = true;
772 igbe->restartClock();
773 }
774
775 bool
776 IGbE::RxDescCache::packetDone()
777 {
778 if (pktDone) {
779 pktDone = false;
780 return true;
781 }
782 return false;
783 }
784
785 bool
786 IGbE::RxDescCache::hasOutstandingEvents()
787 {
788 return pktEvent.scheduled() || wbEvent.scheduled() ||
789 fetchEvent.scheduled();
790 }
791
792 void
793 IGbE::RxDescCache::serialize(std::ostream &os)
794 {
795 DescCache<RxDesc>::serialize(os);
796 SERIALIZE_SCALAR(pktDone);
797 }
798
799 void
800 IGbE::RxDescCache::unserialize(Checkpoint *cp, const std::string &section)
801 {
802 DescCache<RxDesc>::unserialize(cp, section);
803 UNSERIALIZE_SCALAR(pktDone);
804 }
805
806
807 ///////////////////////////////////// IGbE::TxDesc /////////////////////////////////
808
809 IGbE::TxDescCache::TxDescCache(IGbE *i, const std::string n, int s)
810 : DescCache<TxDesc>(i,n, s), pktDone(false), isTcp(false), pktWaiting(false),
811 pktEvent(this)
812
813 {
814 }
815
816 int
817 IGbE::TxDescCache::getPacketSize()
818 {
819 assert(unusedCache.size());
820
821 TxDesc *desc;
822
823 DPRINTF(EthernetDesc, "Starting processing of descriptor\n");
824
825 while (unusedCache.size() && TxdOp::isContext(unusedCache.front())) {
826 DPRINTF(EthernetDesc, "Got context descriptor type... skipping\n");
827
828 // I think we can just ignore these for now?
829 desc = unusedCache.front();
830 // is this going to be a tcp or udp packet?
831 isTcp = TxdOp::tcp(desc) ? true : false;
832
833 // make sure it's ipv4
834 assert(TxdOp::ip(desc));
835
836 TxdOp::setDd(desc);
837 unusedCache.pop_front();
838 usedCache.push_back(desc);
839 }
840
841 if (!unusedCache.size())
842 return -1;
843
844 DPRINTF(EthernetDesc, "Next TX packet is %d bytes\n",
845 TxdOp::getLen(unusedCache.front()));
846
847 return TxdOp::getLen(unusedCache.front());
848 }
849
850 void
851 IGbE::TxDescCache::getPacketData(EthPacketPtr p)
852 {
853 assert(unusedCache.size());
854
855 TxDesc *desc;
856 desc = unusedCache.front();
857
858 assert((TxdOp::isLegacy(desc) || TxdOp::isData(desc)) && TxdOp::getLen(desc));
859
860 pktPtr = p;
861
862 pktWaiting = true;
863
864 DPRINTF(EthernetDesc, "Starting DMA of packet\n");
865 igbe->dmaRead(igbe->platform->pciToDma(TxdOp::getBuf(desc)),
866 TxdOp::getLen(desc), &pktEvent, p->data + p->length);
867
868
869 }
870
871 void
872 IGbE::TxDescCache::pktComplete()
873 {
874
875 TxDesc *desc;
876 assert(unusedCache.size());
877 assert(pktPtr);
878
879 DPRINTF(EthernetDesc, "DMA of packet complete\n");
880
881
882 desc = unusedCache.front();
883 assert((TxdOp::isLegacy(desc) || TxdOp::isData(desc)) && TxdOp::getLen(desc));
884
885 DPRINTF(EthernetDesc, "TxDescriptor data d1: %#llx d2: %#llx\n", desc->d1, desc->d2);
886
887 if (!TxdOp::eop(desc)) {
888 // This only supports two descriptors per tx packet
889 assert(pktPtr->length == 0);
890 pktPtr->length = TxdOp::getLen(desc);
891 unusedCache.pop_front();
892 usedCache.push_back(desc);
893 pktDone = true;
894 pktWaiting = false;
895 pktPtr = NULL;
896
897 DPRINTF(EthernetDesc, "Partial Packet Descriptor Done\n");
898 return;
899 }
900
901 // Set the length of the data in the EtherPacket
902 pktPtr->length += TxdOp::getLen(desc);
903
904 // no support for vlans
905 assert(!TxdOp::vle(desc));
906
907 // we alway report status
908 assert(TxdOp::rs(desc));
909
910 // we only support single packet descriptors at this point
911 assert(TxdOp::eop(desc));
912
913 // set that this packet is done
914 TxdOp::setDd(desc);
915
916 DPRINTF(EthernetDesc, "TxDescriptor data d1: %#llx d2: %#llx\n", desc->d1, desc->d2);
917
918 // Checksums are only ofloaded for new descriptor types
919 if (TxdOp::isData(desc) && ( TxdOp::ixsm(desc) || TxdOp::txsm(desc)) ) {
920 DPRINTF(EthernetDesc, "Calculating checksums for packet\n");
921 IpPtr ip(pktPtr);
922 if (TxdOp::ixsm(desc)) {
923 ip->sum(0);
924 ip->sum(cksum(ip));
925 DPRINTF(EthernetDesc, "Calculated IP checksum\n");
926 }
927 if (TxdOp::txsm(desc)) {
928 if (isTcp) {
929 TcpPtr tcp(ip);
930 assert(tcp);
931 tcp->sum(0);
932 tcp->sum(cksum(tcp));
933 DPRINTF(EthernetDesc, "Calculated TCP checksum\n");
934 } else {
935 UdpPtr udp(ip);
936 assert(udp);
937 udp->sum(0);
938 udp->sum(cksum(udp));
939 DPRINTF(EthernetDesc, "Calculated UDP checksum\n");
940 }
941 }
942 }
943
944 if (TxdOp::ide(desc)) {
945 // Deal with the rx timer interrupts
946 DPRINTF(EthernetDesc, "Descriptor had IDE set\n");
947 if (igbe->regs.tidv.idv()) {
948 DPRINTF(EthernetDesc, "setting tidv\n");
949 if (igbe->tidvEvent.scheduled())
950 igbe->tidvEvent.reschedule(curTick + igbe->regs.tidv.idv() *
951 igbe->intClock());
952 else
953 igbe->tidvEvent.schedule(curTick + igbe->regs.tidv.idv() *
954 igbe->intClock());
955 }
956
957 if (igbe->regs.tadv.idv() && igbe->regs.tidv.idv()) {
958 DPRINTF(EthernetDesc, "setting tadv\n");
959 if (!igbe->tadvEvent.scheduled())
960 igbe->tadvEvent.schedule(curTick + igbe->regs.tadv.idv() *
961 igbe->intClock());
962 }
963 }
964
965
966
967 unusedCache.pop_front();
968 usedCache.push_back(desc);
969 pktDone = true;
970 pktWaiting = false;
971 pktPtr = NULL;
972
973 DPRINTF(EthernetDesc, "Descriptor Done\n");
974
975 if (igbe->regs.txdctl.wthresh() == 0) {
976 DPRINTF(EthernetDesc, "WTHRESH == 0, writing back descriptor\n");
977 writeback(0);
978 } else if (igbe->regs.txdctl.wthresh() >= usedCache.size()) {
979 DPRINTF(EthernetDesc, "used > WTHRESH, writing back descriptor\n");
980 writeback((igbe->cacheBlockSize()-1)>>4);
981 }
982 igbe->checkDrain();
983 }
984
985 void
986 IGbE::TxDescCache::serialize(std::ostream &os)
987 {
988 DescCache<TxDesc>::serialize(os);
989 SERIALIZE_SCALAR(pktDone);
990 SERIALIZE_SCALAR(isTcp);
991 SERIALIZE_SCALAR(pktWaiting);
992 }
993
994 void
995 IGbE::TxDescCache::unserialize(Checkpoint *cp, const std::string &section)
996 {
997 DescCache<TxDesc>::unserialize(cp, section);
998 UNSERIALIZE_SCALAR(pktDone);
999 UNSERIALIZE_SCALAR(isTcp);
1000 UNSERIALIZE_SCALAR(pktWaiting);
1001 }
1002
1003 bool
1004 IGbE::TxDescCache::packetAvailable()
1005 {
1006 if (pktDone) {
1007 pktDone = false;
1008 return true;
1009 }
1010 return false;
1011 }
1012
1013 void
1014 IGbE::TxDescCache::enableSm()
1015 {
1016 igbe->txTick = true;
1017 igbe->restartClock();
1018 }
1019
1020 bool
1021 IGbE::TxDescCache::hasOutstandingEvents()
1022 {
1023 return pktEvent.scheduled() || wbEvent.scheduled() ||
1024 fetchEvent.scheduled();
1025 }
1026
1027
1028 ///////////////////////////////////// IGbE /////////////////////////////////
1029
1030 void
1031 IGbE::restartClock()
1032 {
1033 if (!tickEvent.scheduled() && (rxTick || txTick || txFifoTick) && getState() ==
1034 SimObject::Running)
1035 tickEvent.schedule((curTick/cycles(1)) * cycles(1) + cycles(1));
1036 }
1037
1038 unsigned int
1039 IGbE::drain(Event *de)
1040 {
1041 unsigned int count;
1042 count = pioPort->drain(de) + dmaPort->drain(de);
1043 if (rxDescCache.hasOutstandingEvents() ||
1044 txDescCache.hasOutstandingEvents()) {
1045 count++;
1046 drainEvent = de;
1047 }
1048
1049 txFifoTick = false;
1050 txTick = false;
1051 rxTick = false;
1052
1053 if (tickEvent.scheduled())
1054 tickEvent.deschedule();
1055
1056 if (count)
1057 changeState(Draining);
1058 else
1059 changeState(Drained);
1060
1061 return count;
1062 }
1063
1064 void
1065 IGbE::resume()
1066 {
1067 SimObject::resume();
1068
1069 txFifoTick = true;
1070 txTick = true;
1071 rxTick = true;
1072
1073 restartClock();
1074 }
1075
1076 void
1077 IGbE::checkDrain()
1078 {
1079 if (!drainEvent)
1080 return;
1081
1082 if (rxDescCache.hasOutstandingEvents() ||
1083 txDescCache.hasOutstandingEvents()) {
1084 drainEvent->process();
1085 drainEvent = NULL;
1086 }
1087 }
1088
1089 void
1090 IGbE::txStateMachine()
1091 {
1092 if (!regs.tctl.en()) {
1093 txTick = false;
1094 DPRINTF(EthernetSM, "TXS: TX disabled, stopping ticking\n");
1095 return;
1096 }
1097
1098 // If we have a packet available and it's length is not 0 (meaning it's not
1099 // a multidescriptor packet) put it in the fifo, otherwise an the next
1100 // iteration we'll get the rest of the data
1101 if (txPacket && txDescCache.packetAvailable() && txPacket->length) {
1102 bool success;
1103 DPRINTF(EthernetSM, "TXS: packet placed in TX FIFO\n");
1104 success = txFifo.push(txPacket);
1105 txFifoTick = true;
1106 assert(success);
1107 txPacket = NULL;
1108 txDescCache.writeback((cacheBlockSize()-1)>>4);
1109 return;
1110 }
1111
1112 // Only support descriptor granularity
1113 assert(regs.txdctl.gran());
1114 if (regs.txdctl.lwthresh() && txDescCache.descLeft() < (regs.txdctl.lwthresh() * 8)) {
1115 DPRINTF(EthernetSM, "TXS: LWTHRESH caused posting of TXDLOW\n");
1116 postInterrupt(IT_TXDLOW);
1117 }
1118
1119 if (!txPacket) {
1120 txPacket = new EthPacketData(16384);
1121 }
1122
1123 if (!txDescCache.packetWaiting()) {
1124 if (txDescCache.descLeft() == 0) {
1125 DPRINTF(EthernetSM, "TXS: No descriptors left in ring, forcing "
1126 "writeback stopping ticking and posting TXQE\n");
1127 txDescCache.writeback(0);
1128 txTick = false;
1129 postInterrupt(IT_TXQE, true);
1130 return;
1131 }
1132
1133
1134 if (!(txDescCache.descUnused())) {
1135 DPRINTF(EthernetSM, "TXS: No descriptors available in cache, fetching and stopping ticking\n");
1136 txTick = false;
1137 txDescCache.fetchDescriptors();
1138 return;
1139 }
1140
1141 int size;
1142 size = txDescCache.getPacketSize();
1143 if (size > 0 && txFifo.avail() > size) {
1144 DPRINTF(EthernetSM, "TXS: Reserving %d bytes in FIFO and begining "
1145 "DMA of next packet\n", size);
1146 txFifo.reserve(size);
1147 txDescCache.getPacketData(txPacket);
1148 } else if (size <= 0) {
1149 DPRINTF(EthernetSM, "TXS: No packets to get, writing back used descriptors\n");
1150 txDescCache.writeback(0);
1151 } else {
1152 DPRINTF(EthernetSM, "TXS: FIFO full, stopping ticking until space "
1153 "available in FIFO\n");
1154 txDescCache.writeback((cacheBlockSize()-1)>>4);
1155 txTick = false;
1156 }
1157
1158
1159 return;
1160 }
1161 }
1162
1163 bool
1164 IGbE::ethRxPkt(EthPacketPtr pkt)
1165 {
1166 DPRINTF(Ethernet, "RxFIFO: Receiving pcakte from wire\n");
1167 if (!regs.rctl.en()) {
1168 DPRINTF(Ethernet, "RxFIFO: RX not enabled, dropping\n");
1169 return true;
1170 }
1171
1172 // restart the state machines if they are stopped
1173 rxTick = true;
1174 if ((rxTick || txTick) && !tickEvent.scheduled()) {
1175 DPRINTF(EthernetSM, "RXS: received packet into fifo, starting ticking\n");
1176 restartClock();
1177 }
1178
1179 if (!rxFifo.push(pkt)) {
1180 DPRINTF(Ethernet, "RxFIFO: Packet won't fit in fifo... dropped\n");
1181 postInterrupt(IT_RXO, true);
1182 return false;
1183 }
1184 return true;
1185 }
1186
1187
1188 void
1189 IGbE::rxStateMachine()
1190 {
1191 if (!regs.rctl.en()) {
1192 rxTick = false;
1193 DPRINTF(EthernetSM, "RXS: RX disabled, stopping ticking\n");
1194 return;
1195 }
1196
1197 // If the packet is done check for interrupts/descriptors/etc
1198 if (rxDescCache.packetDone()) {
1199 DPRINTF(EthernetSM, "RXS: Packet completed DMA to memory\n");
1200 int descLeft = rxDescCache.descLeft();
1201 switch (regs.rctl.rdmts()) {
1202 case 2: if (descLeft > .125 * regs.rdlen()) break;
1203 case 1: if (descLeft > .250 * regs.rdlen()) break;
1204 case 0: if (descLeft > .500 * regs.rdlen()) break;
1205 DPRINTF(Ethernet, "RXS: Interrupting (RXDMT) because of descriptors left\n");
1206 postInterrupt(IT_RXDMT);
1207 break;
1208 }
1209
1210 if (descLeft == 0) {
1211 DPRINTF(EthernetSM, "RXS: No descriptors left in ring, forcing"
1212 " writeback and stopping ticking\n");
1213 rxDescCache.writeback(0);
1214 rxTick = false;
1215 }
1216
1217 // only support descriptor granulaties
1218 assert(regs.rxdctl.gran());
1219
1220 if (regs.rxdctl.wthresh() >= rxDescCache.descUsed()) {
1221 DPRINTF(EthernetSM, "RXS: Writing back because WTHRESH >= descUsed\n");
1222 if (regs.rxdctl.wthresh() < (cacheBlockSize()>>4))
1223 rxDescCache.writeback(regs.rxdctl.wthresh()-1);
1224 else
1225 rxDescCache.writeback((cacheBlockSize()-1)>>4);
1226 }
1227
1228 if ((rxDescCache.descUnused() < regs.rxdctl.pthresh()) &&
1229 ((rxDescCache.descLeft() - rxDescCache.descUnused()) > regs.rxdctl.hthresh())) {
1230 DPRINTF(EthernetSM, "RXS: Fetching descriptors because descUnused < PTHRESH\n");
1231 rxDescCache.fetchDescriptors();
1232 }
1233
1234 if (rxDescCache.descUnused() == 0) {
1235 DPRINTF(EthernetSM, "RXS: No descriptors available in cache, "
1236 "fetching descriptors and stopping ticking\n");
1237 rxTick = false;
1238 rxDescCache.fetchDescriptors();
1239 }
1240 return;
1241 }
1242
1243 if (!rxDescCache.descUnused()) {
1244 DPRINTF(EthernetSM, "RXS: No descriptors available in cache, stopping ticking\n");
1245 rxTick = false;
1246 DPRINTF(EthernetSM, "RXS: No descriptors available, fetching\n");
1247 rxDescCache.fetchDescriptors();
1248 return;
1249 }
1250
1251 if (rxFifo.empty()) {
1252 DPRINTF(EthernetSM, "RXS: RxFIFO empty, stopping ticking\n");
1253 rxTick = false;
1254 return;
1255 }
1256
1257 EthPacketPtr pkt;
1258 pkt = rxFifo.front();
1259
1260 DPRINTF(EthernetSM, "RXS: Writing packet into memory\n");
1261 if (!rxDescCache.writePacket(pkt)) {
1262 return;
1263 }
1264
1265 DPRINTF(EthernetSM, "RXS: Removing packet from FIFO\n");
1266 rxFifo.pop();
1267 DPRINTF(EthernetSM, "RXS: stopping ticking until packet DMA completes\n");
1268 rxTick = false;
1269 }
1270
1271 void
1272 IGbE::txWire()
1273 {
1274 if (txFifo.empty()) {
1275 txFifoTick = false;
1276 return;
1277 }
1278
1279
1280 if (etherInt->sendPacket(txFifo.front())) {
1281 DPRINTF(EthernetSM, "TxFIFO: Successful transmit, bytes available in fifo: %d\n",
1282 txFifo.avail());
1283 txFifo.pop();
1284 } else {
1285 // We'll get woken up when the packet ethTxDone() gets called
1286 txFifoTick = false;
1287 }
1288
1289 }
1290
1291 void
1292 IGbE::tick()
1293 {
1294 DPRINTF(EthernetSM, "IGbE: -------------- Cycle --------------\n");
1295
1296 if (rxTick)
1297 rxStateMachine();
1298
1299 if (txTick)
1300 txStateMachine();
1301
1302 if (txFifoTick)
1303 txWire();
1304
1305
1306 if (rxTick || txTick || txFifoTick)
1307 tickEvent.schedule(curTick + cycles(1));
1308 }
1309
1310 void
1311 IGbE::ethTxDone()
1312 {
1313 // restart the tx state machines if they are stopped
1314 // fifo to send another packet
1315 // tx sm to put more data into the fifo
1316 txFifoTick = true;
1317 txTick = true;
1318
1319 restartClock();
1320 DPRINTF(EthernetSM, "TxFIFO: Transmission complete\n");
1321 }
1322
1323 void
1324 IGbE::serialize(std::ostream &os)
1325 {
1326 PciDev::serialize(os);
1327
1328 regs.serialize(os);
1329 SERIALIZE_SCALAR(eeOpBits);
1330 SERIALIZE_SCALAR(eeAddrBits);
1331 SERIALIZE_SCALAR(eeDataBits);
1332 SERIALIZE_SCALAR(eeOpcode);
1333 SERIALIZE_SCALAR(eeAddr);
1334 SERIALIZE_ARRAY(flash,iGbReg::EEPROM_SIZE);
1335
1336 rxFifo.serialize("rxfifo", os);
1337 txFifo.serialize("txfifo", os);
1338
1339 bool txPktExists = txPacket;
1340 SERIALIZE_SCALAR(txPktExists);
1341 if (txPktExists)
1342 txPacket->serialize("txpacket", os);
1343
1344 Tick rdtr_time = 0, radv_time = 0, tidv_time = 0, tadv_time = 0,
1345 inter_time = 0;
1346
1347 if (rdtrEvent.scheduled())
1348 rdtr_time = rdtrEvent.when();
1349 SERIALIZE_SCALAR(rdtr_time);
1350
1351 if (radvEvent.scheduled())
1352 radv_time = radvEvent.when();
1353 SERIALIZE_SCALAR(radv_time);
1354
1355 if (tidvEvent.scheduled())
1356 rdtr_time = tidvEvent.when();
1357 SERIALIZE_SCALAR(tidv_time);
1358
1359 if (tadvEvent.scheduled())
1360 rdtr_time = tadvEvent.when();
1361 SERIALIZE_SCALAR(tadv_time);
1362
1363 if (interEvent.scheduled())
1364 rdtr_time = interEvent.when();
1365 SERIALIZE_SCALAR(inter_time);
1366
1367 nameOut(os, csprintf("%s.TxDescCache", name()));
1368 txDescCache.serialize(os);
1369
1370 nameOut(os, csprintf("%s.RxDescCache", name()));
1371 rxDescCache.serialize(os);
1372 }
1373
1374 void
1375 IGbE::unserialize(Checkpoint *cp, const std::string &section)
1376 {
1377 PciDev::unserialize(cp, section);
1378
1379 regs.unserialize(cp, section);
1380 UNSERIALIZE_SCALAR(eeOpBits);
1381 UNSERIALIZE_SCALAR(eeAddrBits);
1382 UNSERIALIZE_SCALAR(eeDataBits);
1383 UNSERIALIZE_SCALAR(eeOpcode);
1384 UNSERIALIZE_SCALAR(eeAddr);
1385 UNSERIALIZE_ARRAY(flash,iGbReg::EEPROM_SIZE);
1386
1387 rxFifo.unserialize("rxfifo", cp, section);
1388 txFifo.unserialize("txfifo", cp, section);
1389
1390 bool txPktExists;
1391 UNSERIALIZE_SCALAR(txPktExists);
1392 if (txPktExists) {
1393 txPacket = new EthPacketData(16384);
1394 txPacket->unserialize("txpacket", cp, section);
1395 }
1396
1397 rxTick = true;
1398 txTick = true;
1399 txFifoTick = true;
1400
1401 Tick rdtr_time, radv_time, tidv_time, tadv_time, inter_time;
1402 UNSERIALIZE_SCALAR(rdtr_time);
1403 UNSERIALIZE_SCALAR(radv_time);
1404 UNSERIALIZE_SCALAR(tidv_time);
1405 UNSERIALIZE_SCALAR(tadv_time);
1406 UNSERIALIZE_SCALAR(inter_time);
1407
1408 if (rdtr_time)
1409 rdtrEvent.schedule(rdtr_time);
1410
1411 if (radv_time)
1412 radvEvent.schedule(radv_time);
1413
1414 if (tidv_time)
1415 tidvEvent.schedule(tidv_time);
1416
1417 if (tadv_time)
1418 tadvEvent.schedule(tadv_time);
1419
1420 if (inter_time)
1421 interEvent.schedule(inter_time);
1422
1423 txDescCache.unserialize(cp, csprintf("%s.TxDescCache", section));
1424
1425 rxDescCache.unserialize(cp, csprintf("%s.RxDescCache", section));
1426 }
1427
1428
1429 BEGIN_DECLARE_SIM_OBJECT_PARAMS(IGbEInt)
1430
1431 SimObjectParam<EtherInt *> peer;
1432 SimObjectParam<IGbE *> device;
1433
1434 END_DECLARE_SIM_OBJECT_PARAMS(IGbEInt)
1435
1436 BEGIN_INIT_SIM_OBJECT_PARAMS(IGbEInt)
1437
1438 INIT_PARAM_DFLT(peer, "peer interface", NULL),
1439 INIT_PARAM(device, "Ethernet device of this interface")
1440
1441 END_INIT_SIM_OBJECT_PARAMS(IGbEInt)
1442
1443 CREATE_SIM_OBJECT(IGbEInt)
1444 {
1445 IGbEInt *dev_int = new IGbEInt(getInstanceName(), device);
1446
1447 EtherInt *p = (EtherInt *)peer;
1448 if (p) {
1449 dev_int->setPeer(p);
1450 p->setPeer(dev_int);
1451 }
1452
1453 return dev_int;
1454 }
1455
1456 REGISTER_SIM_OBJECT("IGbEInt", IGbEInt)
1457
1458
1459 BEGIN_DECLARE_SIM_OBJECT_PARAMS(IGbE)
1460
1461 SimObjectParam<System *> system;
1462 SimObjectParam<Platform *> platform;
1463 SimObjectParam<PciConfigData *> configdata;
1464 Param<uint32_t> pci_bus;
1465 Param<uint32_t> pci_dev;
1466 Param<uint32_t> pci_func;
1467 Param<Tick> pio_latency;
1468 Param<Tick> config_latency;
1469 Param<std::string> hardware_address;
1470 Param<bool> use_flow_control;
1471 Param<int> rx_fifo_size;
1472 Param<int> tx_fifo_size;
1473 Param<int> rx_desc_cache_size;
1474 Param<int> tx_desc_cache_size;
1475 Param<Tick> clock;
1476
1477
1478 END_DECLARE_SIM_OBJECT_PARAMS(IGbE)
1479
1480 BEGIN_INIT_SIM_OBJECT_PARAMS(IGbE)
1481
1482 INIT_PARAM(system, "System pointer"),
1483 INIT_PARAM(platform, "Platform pointer"),
1484 INIT_PARAM(configdata, "PCI Config data"),
1485 INIT_PARAM(pci_bus, "PCI bus ID"),
1486 INIT_PARAM(pci_dev, "PCI device number"),
1487 INIT_PARAM(pci_func, "PCI function code"),
1488 INIT_PARAM_DFLT(pio_latency, "Programmed IO latency in bus cycles", 1),
1489 INIT_PARAM(config_latency, "Number of cycles for a config read or write"),
1490 INIT_PARAM(hardware_address, "Ethernet Hardware Address"),
1491 INIT_PARAM(use_flow_control,"Should the device use xon/off packets"),
1492 INIT_PARAM(rx_fifo_size,"Size of the RX FIFO"),
1493 INIT_PARAM(tx_fifo_size,"Size of the TX FIFO"),
1494 INIT_PARAM(rx_desc_cache_size,"Size of the RX descriptor cache"),
1495 INIT_PARAM(tx_desc_cache_size,"Size of the TX descriptor cache"),
1496 INIT_PARAM(clock,"Clock rate for the device to tick at")
1497
1498 END_INIT_SIM_OBJECT_PARAMS(IGbE)
1499
1500
1501 CREATE_SIM_OBJECT(IGbE)
1502 {
1503 IGbE::Params *params = new IGbE::Params;
1504
1505 params->name = getInstanceName();
1506 params->platform = platform;
1507 params->system = system;
1508 params->configData = configdata;
1509 params->busNum = pci_bus;
1510 params->deviceNum = pci_dev;
1511 params->functionNum = pci_func;
1512 params->pio_delay = pio_latency;
1513 params->config_delay = config_latency;
1514 params->hardware_address = hardware_address;
1515 params->use_flow_control = use_flow_control;
1516 params->rx_fifo_size = rx_fifo_size;
1517 params->tx_fifo_size = tx_fifo_size;
1518 params->rx_desc_cache_size = rx_desc_cache_size;
1519 params->tx_desc_cache_size = tx_desc_cache_size;
1520 params->clock = clock;
1521
1522
1523 return new IGbE(params);
1524 }
1525
1526 REGISTER_SIM_OBJECT("IGbE", IGbE)