Merge zizzer.eecs.umich.edu:/z/m5/Bitkeeper/newmem
[gem5.git] / src / dev / i8254xGBe.cc
1 /*
2 * Copyright (c) 2006 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Ali Saidi
29 */
30
31 /* @file
32 * Device model for Intel's 8254x line of gigabit ethernet controllers.
33 * In particular an 82547 revision 2 (82547GI) MAC because it seems to have the
34 * fewest workarounds in the driver. It will probably work with most of the
35 * other MACs with slight modifications.
36 */
37
38
39 /*
40 * @todo really there are multiple dma engines.. we should implement them.
41 */
42
43 #include "base/inet.hh"
44 #include "base/trace.hh"
45 #include "dev/i8254xGBe.hh"
46 #include "mem/packet.hh"
47 #include "mem/packet_access.hh"
48 #include "sim/builder.hh"
49 #include "sim/stats.hh"
50 #include "sim/system.hh"
51
52 #include <algorithm>
53
54 using namespace iGbReg;
55 using namespace Net;
56
57 IGbE::IGbE(Params *p)
58 : PciDev(p), etherInt(NULL), drainEvent(NULL), useFlowControl(p->use_flow_control),
59 rxFifo(p->rx_fifo_size), txFifo(p->tx_fifo_size), rxTick(false),
60 txTick(false), txFifoTick(false), rdtrEvent(this), radvEvent(this),
61 tadvEvent(this), tidvEvent(this), tickEvent(this), interEvent(this),
62 rxDescCache(this, name()+".RxDesc", p->rx_desc_cache_size),
63 txDescCache(this, name()+".TxDesc", p->tx_desc_cache_size), clock(p->clock)
64 {
65 // Initialized internal registers per Intel documentation
66 // All registers intialized to 0 by per register constructor
67 regs.ctrl.fd(1);
68 regs.ctrl.lrst(1);
69 regs.ctrl.speed(2);
70 regs.ctrl.frcspd(1);
71 regs.sts.speed(3); // Say we're 1000Mbps
72 regs.sts.fd(1); // full duplex
73 regs.sts.lu(1); // link up
74 regs.eecd.fwe(1);
75 regs.eecd.ee_type(1);
76 regs.imr = 0;
77 regs.iam = 0;
78 regs.rxdctl.gran(1);
79 regs.rxdctl.wthresh(1);
80 regs.fcrth(1);
81
82 regs.pba.rxa(0x30);
83 regs.pba.txa(0x10);
84
85 eeOpBits = 0;
86 eeAddrBits = 0;
87 eeDataBits = 0;
88 eeOpcode = 0;
89
90 // clear all 64 16 bit words of the eeprom
91 memset(&flash, 0, EEPROM_SIZE*2);
92
93 // Set the MAC address
94 memcpy(flash, p->hardware_address.bytes(), ETH_ADDR_LEN);
95 for (int x = 0; x < ETH_ADDR_LEN/2; x++)
96 flash[x] = htobe(flash[x]);
97
98 uint16_t csum = 0;
99 for (int x = 0; x < EEPROM_SIZE; x++)
100 csum += htobe(flash[x]);
101
102
103 // Magic happy checksum value
104 flash[EEPROM_SIZE-1] = htobe((uint16_t)(EEPROM_CSUM - csum));
105
106 rxFifo.clear();
107 txFifo.clear();
108 }
109
110
111 Tick
112 IGbE::writeConfig(PacketPtr pkt)
113 {
114 int offset = pkt->getAddr() & PCI_CONFIG_SIZE;
115 if (offset < PCI_DEVICE_SPECIFIC)
116 PciDev::writeConfig(pkt);
117 else
118 panic("Device specific PCI config space not implemented.\n");
119
120 ///
121 /// Some work may need to be done here based for the pci COMMAND bits.
122 ///
123
124 return pioDelay;
125 }
126
127 Tick
128 IGbE::read(PacketPtr pkt)
129 {
130 int bar;
131 Addr daddr;
132
133 if (!getBAR(pkt->getAddr(), bar, daddr))
134 panic("Invalid PCI memory access to unmapped memory.\n");
135
136 // Only Memory register BAR is allowed
137 assert(bar == 0);
138
139 // Only 32bit accesses allowed
140 assert(pkt->getSize() == 4);
141
142 DPRINTF(Ethernet, "Read device register %#X\n", daddr);
143
144 pkt->allocate();
145
146 ///
147 /// Handle read of register here
148 ///
149
150
151 switch (daddr) {
152 case REG_CTRL:
153 pkt->set<uint32_t>(regs.ctrl());
154 break;
155 case REG_STATUS:
156 pkt->set<uint32_t>(regs.sts());
157 break;
158 case REG_EECD:
159 pkt->set<uint32_t>(regs.eecd());
160 break;
161 case REG_EERD:
162 pkt->set<uint32_t>(regs.eerd());
163 break;
164 case REG_CTRL_EXT:
165 pkt->set<uint32_t>(regs.ctrl_ext());
166 break;
167 case REG_MDIC:
168 pkt->set<uint32_t>(regs.mdic());
169 break;
170 case REG_ICR:
171 DPRINTF(Ethernet, "Reading ICR. ICR=%#x IMR=%#x IAM=%#x IAME=%d\n", regs.icr(),
172 regs.imr, regs.iam, regs.ctrl_ext.iame());
173 pkt->set<uint32_t>(regs.icr());
174 if (regs.icr.int_assert() || regs.imr == 0) {
175 regs.icr = regs.icr() & ~mask(30);
176 DPRINTF(Ethernet, "Cleared ICR. ICR=%#x\n", regs.icr());
177 }
178 if (regs.ctrl_ext.iame() && regs.icr.int_assert())
179 regs.imr &= ~regs.iam;
180 chkInterrupt();
181 break;
182 case REG_ITR:
183 pkt->set<uint32_t>(regs.itr());
184 break;
185 case REG_RCTL:
186 pkt->set<uint32_t>(regs.rctl());
187 break;
188 case REG_FCTTV:
189 pkt->set<uint32_t>(regs.fcttv());
190 break;
191 case REG_TCTL:
192 pkt->set<uint32_t>(regs.tctl());
193 break;
194 case REG_PBA:
195 pkt->set<uint32_t>(regs.pba());
196 break;
197 case REG_WUC:
198 case REG_LEDCTL:
199 pkt->set<uint32_t>(0); // We don't care, so just return 0
200 break;
201 case REG_FCRTL:
202 pkt->set<uint32_t>(regs.fcrtl());
203 break;
204 case REG_FCRTH:
205 pkt->set<uint32_t>(regs.fcrth());
206 break;
207 case REG_RDBAL:
208 pkt->set<uint32_t>(regs.rdba.rdbal());
209 break;
210 case REG_RDBAH:
211 pkt->set<uint32_t>(regs.rdba.rdbah());
212 break;
213 case REG_RDLEN:
214 pkt->set<uint32_t>(regs.rdlen());
215 break;
216 case REG_RDH:
217 pkt->set<uint32_t>(regs.rdh());
218 break;
219 case REG_RDT:
220 pkt->set<uint32_t>(regs.rdt());
221 break;
222 case REG_RDTR:
223 pkt->set<uint32_t>(regs.rdtr());
224 if (regs.rdtr.fpd()) {
225 rxDescCache.writeback(0);
226 DPRINTF(EthernetIntr, "Posting interrupt because of RDTR.FPD write\n");
227 postInterrupt(IT_RXT);
228 regs.rdtr.fpd(0);
229 }
230 break;
231 case REG_RADV:
232 pkt->set<uint32_t>(regs.radv());
233 break;
234 case REG_TDBAL:
235 pkt->set<uint32_t>(regs.tdba.tdbal());
236 break;
237 case REG_TDBAH:
238 pkt->set<uint32_t>(regs.tdba.tdbah());
239 break;
240 case REG_TDLEN:
241 pkt->set<uint32_t>(regs.tdlen());
242 break;
243 case REG_TDH:
244 pkt->set<uint32_t>(regs.tdh());
245 break;
246 case REG_TDT:
247 pkt->set<uint32_t>(regs.tdt());
248 break;
249 case REG_TIDV:
250 pkt->set<uint32_t>(regs.tidv());
251 break;
252 case REG_TXDCTL:
253 pkt->set<uint32_t>(regs.txdctl());
254 break;
255 case REG_TADV:
256 pkt->set<uint32_t>(regs.tadv());
257 break;
258 case REG_RXCSUM:
259 pkt->set<uint32_t>(regs.rxcsum());
260 break;
261 case REG_MANC:
262 pkt->set<uint32_t>(regs.manc());
263 break;
264 default:
265 if (!(daddr >= REG_VFTA && daddr < (REG_VFTA + VLAN_FILTER_TABLE_SIZE*4)) &&
266 !(daddr >= REG_RAL && daddr < (REG_RAL + RCV_ADDRESS_TABLE_SIZE*8)) &&
267 !(daddr >= REG_MTA && daddr < (REG_MTA + MULTICAST_TABLE_SIZE*4)) &&
268 !(daddr >= REG_CRCERRS && daddr < (REG_CRCERRS + STATS_REGS_SIZE)))
269 panic("Read request to unknown register number: %#x\n", daddr);
270 else
271 pkt->set<uint32_t>(0);
272 };
273
274 pkt->result = Packet::Success;
275 return pioDelay;
276 }
277
278 Tick
279 IGbE::write(PacketPtr pkt)
280 {
281 int bar;
282 Addr daddr;
283
284
285 if (!getBAR(pkt->getAddr(), bar, daddr))
286 panic("Invalid PCI memory access to unmapped memory.\n");
287
288 // Only Memory register BAR is allowed
289 assert(bar == 0);
290
291 // Only 32bit accesses allowed
292 assert(pkt->getSize() == sizeof(uint32_t));
293
294 DPRINTF(Ethernet, "Wrote device register %#X value %#X\n", daddr, pkt->get<uint32_t>());
295
296 ///
297 /// Handle write of register here
298 ///
299 uint32_t val = pkt->get<uint32_t>();
300
301 Regs::RCTL oldrctl;
302 Regs::TCTL oldtctl;
303
304 switch (daddr) {
305 case REG_CTRL:
306 regs.ctrl = val;
307 if (regs.ctrl.tfce())
308 warn("TX Flow control enabled, should implement\n");
309 if (regs.ctrl.rfce())
310 warn("RX Flow control enabled, should implement\n");
311 break;
312 case REG_CTRL_EXT:
313 regs.ctrl_ext = val;
314 break;
315 case REG_STATUS:
316 regs.sts = val;
317 break;
318 case REG_EECD:
319 int oldClk;
320 oldClk = regs.eecd.sk();
321 regs.eecd = val;
322 // See if this is a eeprom access and emulate accordingly
323 if (!oldClk && regs.eecd.sk()) {
324 if (eeOpBits < 8) {
325 eeOpcode = eeOpcode << 1 | regs.eecd.din();
326 eeOpBits++;
327 } else if (eeAddrBits < 8 && eeOpcode == EEPROM_READ_OPCODE_SPI) {
328 eeAddr = eeAddr << 1 | regs.eecd.din();
329 eeAddrBits++;
330 } else if (eeDataBits < 16 && eeOpcode == EEPROM_READ_OPCODE_SPI) {
331 assert(eeAddr>>1 < EEPROM_SIZE);
332 DPRINTF(EthernetEEPROM, "EEPROM bit read: %d word: %#X\n",
333 flash[eeAddr>>1] >> eeDataBits & 0x1, flash[eeAddr>>1]);
334 regs.eecd.dout((flash[eeAddr>>1] >> (15-eeDataBits)) & 0x1);
335 eeDataBits++;
336 } else if (eeDataBits < 8 && eeOpcode == EEPROM_RDSR_OPCODE_SPI) {
337 regs.eecd.dout(0);
338 eeDataBits++;
339 } else
340 panic("What's going on with eeprom interface? opcode:"
341 " %#x:%d addr: %#x:%d, data: %d\n", (uint32_t)eeOpcode,
342 (uint32_t)eeOpBits, (uint32_t)eeAddr,
343 (uint32_t)eeAddrBits, (uint32_t)eeDataBits);
344
345 // Reset everything for the next command
346 if ((eeDataBits == 16 && eeOpcode == EEPROM_READ_OPCODE_SPI) ||
347 (eeDataBits == 8 && eeOpcode == EEPROM_RDSR_OPCODE_SPI)) {
348 eeOpBits = 0;
349 eeAddrBits = 0;
350 eeDataBits = 0;
351 eeOpcode = 0;
352 eeAddr = 0;
353 }
354
355 DPRINTF(EthernetEEPROM, "EEPROM: opcode: %#X:%d addr: %#X:%d\n",
356 (uint32_t)eeOpcode, (uint32_t) eeOpBits,
357 (uint32_t)eeAddr>>1, (uint32_t)eeAddrBits);
358 if (eeOpBits == 8 && !(eeOpcode == EEPROM_READ_OPCODE_SPI ||
359 eeOpcode == EEPROM_RDSR_OPCODE_SPI ))
360 panic("Unknown eeprom opcode: %#X:%d\n", (uint32_t)eeOpcode,
361 (uint32_t)eeOpBits);
362
363
364 }
365 // If driver requests eeprom access, immediately give it to it
366 regs.eecd.ee_gnt(regs.eecd.ee_req());
367 break;
368 case REG_EERD:
369 regs.eerd = val;
370 break;
371 case REG_MDIC:
372 regs.mdic = val;
373 if (regs.mdic.i())
374 panic("No support for interrupt on mdic complete\n");
375 if (regs.mdic.phyadd() != 1)
376 panic("No support for reading anything but phy\n");
377 DPRINTF(Ethernet, "%s phy address %x\n", regs.mdic.op() == 1 ? "Writing"
378 : "Reading", regs.mdic.regadd());
379 switch (regs.mdic.regadd()) {
380 case PHY_PSTATUS:
381 regs.mdic.data(0x796D); // link up
382 break;
383 case PHY_PID:
384 regs.mdic.data(0x02A8);
385 break;
386 case PHY_EPID:
387 regs.mdic.data(0x0380);
388 break;
389 case PHY_GSTATUS:
390 regs.mdic.data(0x7C00);
391 break;
392 case PHY_EPSTATUS:
393 regs.mdic.data(0x3000);
394 break;
395 case PHY_AGC:
396 regs.mdic.data(0x180); // some random length
397 break;
398 default:
399 regs.mdic.data(0);
400 }
401 regs.mdic.r(1);
402 break;
403 case REG_ICR:
404 DPRINTF(Ethernet, "Writing ICR. ICR=%#x IMR=%#x IAM=%#x IAME=%d\n", regs.icr(),
405 regs.imr, regs.iam, regs.ctrl_ext.iame());
406 if (regs.ctrl_ext.iame())
407 regs.imr &= ~regs.iam;
408 regs.icr = ~bits(val,30,0) & regs.icr();
409 chkInterrupt();
410 break;
411 case REG_ITR:
412 regs.itr = val;
413 break;
414 case REG_ICS:
415 DPRINTF(EthernetIntr, "Posting interrupt because of ICS write\n");
416 postInterrupt((IntTypes)val);
417 break;
418 case REG_IMS:
419 regs.imr |= val;
420 chkInterrupt();
421 break;
422 case REG_IMC:
423 regs.imr &= ~val;
424 chkInterrupt();
425 break;
426 case REG_IAM:
427 regs.iam = val;
428 break;
429 case REG_RCTL:
430 oldrctl = regs.rctl;
431 regs.rctl = val;
432 if (regs.rctl.rst()) {
433 rxDescCache.reset();
434 DPRINTF(EthernetSM, "RXS: Got RESET!\n");
435 rxFifo.clear();
436 regs.rctl.rst(0);
437 }
438 if (regs.rctl.en())
439 rxTick = true;
440 restartClock();
441 break;
442 case REG_FCTTV:
443 regs.fcttv = val;
444 break;
445 case REG_TCTL:
446 regs.tctl = val;
447 oldtctl = regs.tctl;
448 regs.tctl = val;
449 if (regs.tctl.en())
450 txTick = true;
451 restartClock();
452 if (regs.tctl.en() && !oldtctl.en()) {
453 txDescCache.reset();
454 }
455 break;
456 case REG_PBA:
457 regs.pba.rxa(val);
458 regs.pba.txa(64 - regs.pba.rxa());
459 break;
460 case REG_WUC:
461 case REG_LEDCTL:
462 case REG_FCAL:
463 case REG_FCAH:
464 case REG_FCT:
465 case REG_VET:
466 case REG_AIFS:
467 case REG_TIPG:
468 ; // We don't care, so don't store anything
469 break;
470 case REG_FCRTL:
471 regs.fcrtl = val;
472 break;
473 case REG_FCRTH:
474 regs.fcrth = val;
475 break;
476 case REG_RDBAL:
477 regs.rdba.rdbal( val & ~mask(4));
478 rxDescCache.areaChanged();
479 break;
480 case REG_RDBAH:
481 regs.rdba.rdbah(val);
482 rxDescCache.areaChanged();
483 break;
484 case REG_RDLEN:
485 regs.rdlen = val & ~mask(7);
486 rxDescCache.areaChanged();
487 break;
488 case REG_RDH:
489 regs.rdh = val;
490 rxDescCache.areaChanged();
491 break;
492 case REG_RDT:
493 regs.rdt = val;
494 rxTick = true;
495 restartClock();
496 break;
497 case REG_RDTR:
498 regs.rdtr = val;
499 break;
500 case REG_RADV:
501 regs.radv = val;
502 break;
503 case REG_TDBAL:
504 regs.tdba.tdbal( val & ~mask(4));
505 txDescCache.areaChanged();
506 break;
507 case REG_TDBAH:
508 regs.tdba.tdbah(val);
509 txDescCache.areaChanged();
510 break;
511 case REG_TDLEN:
512 regs.tdlen = val & ~mask(7);
513 txDescCache.areaChanged();
514 break;
515 case REG_TDH:
516 regs.tdh = val;
517 txDescCache.areaChanged();
518 break;
519 case REG_TDT:
520 regs.tdt = val;
521 txTick = true;
522 restartClock();
523 break;
524 case REG_TIDV:
525 regs.tidv = val;
526 break;
527 case REG_TXDCTL:
528 regs.txdctl = val;
529 break;
530 case REG_TADV:
531 regs.tadv = val;
532 break;
533 case REG_RXCSUM:
534 regs.rxcsum = val;
535 break;
536 case REG_MANC:
537 regs.manc = val;
538 break;
539 default:
540 if (!(daddr >= REG_VFTA && daddr < (REG_VFTA + VLAN_FILTER_TABLE_SIZE*4)) &&
541 !(daddr >= REG_RAL && daddr < (REG_RAL + RCV_ADDRESS_TABLE_SIZE*8)) &&
542 !(daddr >= REG_MTA && daddr < (REG_MTA + MULTICAST_TABLE_SIZE*4)))
543 panic("Write request to unknown register number: %#x\n", daddr);
544 };
545
546 pkt->result = Packet::Success;
547 return pioDelay;
548 }
549
550 void
551 IGbE::postInterrupt(IntTypes t, bool now)
552 {
553 assert(t);
554
555 // Interrupt is already pending
556 if (t & regs.icr())
557 return;
558
559 if (regs.icr() & regs.imr)
560 {
561 regs.icr = regs.icr() | t;
562 if (!interEvent.scheduled())
563 interEvent.schedule(curTick + Clock::Int::ns * 256 *
564 regs.itr.interval());
565 } else {
566 regs.icr = regs.icr() | t;
567 if (regs.itr.interval() == 0 || now) {
568 if (interEvent.scheduled())
569 interEvent.deschedule();
570 cpuPostInt();
571 } else {
572 DPRINTF(EthernetIntr, "EINT: Scheduling timer interrupt for %d ticks\n",
573 Clock::Int::ns * 256 * regs.itr.interval());
574 if (!interEvent.scheduled())
575 interEvent.schedule(curTick + Clock::Int::ns * 256 * regs.itr.interval());
576 }
577 }
578 }
579
580 void
581 IGbE::cpuPostInt()
582 {
583 if (rdtrEvent.scheduled()) {
584 regs.icr.rxt0(1);
585 rdtrEvent.deschedule();
586 }
587 if (radvEvent.scheduled()) {
588 regs.icr.rxt0(1);
589 radvEvent.deschedule();
590 }
591 if (tadvEvent.scheduled()) {
592 regs.icr.txdw(1);
593 tadvEvent.deschedule();
594 }
595 if (tidvEvent.scheduled()) {
596 regs.icr.txdw(1);
597 tidvEvent.deschedule();
598 }
599
600 regs.icr.int_assert(1);
601 DPRINTF(EthernetIntr, "EINT: Posting interrupt to CPU now. Vector %#x\n",
602 regs.icr());
603 intrPost();
604 }
605
606 void
607 IGbE::cpuClearInt()
608 {
609 if (regs.icr.int_assert()) {
610 regs.icr.int_assert(0);
611 DPRINTF(EthernetIntr, "EINT: Clearing interrupt to CPU now. Vector %#x\n",
612 regs.icr());
613 intrClear();
614 }
615 }
616
617 void
618 IGbE::chkInterrupt()
619 {
620 // Check if we need to clear the cpu interrupt
621 if (!(regs.icr() & regs.imr)) {
622 if (interEvent.scheduled())
623 interEvent.deschedule();
624 if (regs.icr.int_assert())
625 cpuClearInt();
626 }
627
628 if (regs.icr() & regs.imr) {
629 if (regs.itr.interval() == 0) {
630 cpuPostInt();
631 } else {
632 if (!interEvent.scheduled())
633 interEvent.schedule(curTick + Clock::Int::ns * 256 * regs.itr.interval());
634 }
635 }
636
637
638 }
639
640
641 IGbE::RxDescCache::RxDescCache(IGbE *i, const std::string n, int s)
642 : DescCache<RxDesc>(i, n, s), pktDone(false), pktEvent(this)
643
644 {
645 }
646
647 bool
648 IGbE::RxDescCache::writePacket(EthPacketPtr packet)
649 {
650 // We shouldn't have to deal with any of these yet
651 DPRINTF(EthernetDesc, "Packet Length: %d Desc Size: %d\n",
652 packet->length, igbe->regs.rctl.descSize());
653 assert(packet->length < igbe->regs.rctl.descSize());
654
655 if (!unusedCache.size())
656 return false;
657
658 pktPtr = packet;
659
660 igbe->dmaWrite(igbe->platform->pciToDma(unusedCache.front()->buf),
661 packet->length, &pktEvent, packet->data);
662 return true;
663 }
664
665 void
666 IGbE::RxDescCache::pktComplete()
667 {
668 assert(unusedCache.size());
669 RxDesc *desc;
670 desc = unusedCache.front();
671
672 uint16_t crcfixup = igbe->regs.rctl.secrc() ? 0 : 4 ;
673 desc->len = htole((uint16_t)(pktPtr->length + crcfixup));
674 DPRINTF(EthernetDesc, "pktPtr->length: %d stripcrc offset: %d value written: %d %d\n",
675 pktPtr->length, crcfixup,
676 htole((uint16_t)(pktPtr->length + crcfixup)),
677 (uint16_t)(pktPtr->length + crcfixup));
678
679 // no support for anything but starting at 0
680 assert(igbe->regs.rxcsum.pcss() == 0);
681
682 DPRINTF(EthernetDesc, "Packet written to memory updating Descriptor\n");
683
684 uint8_t status = RXDS_DD | RXDS_EOP;
685 uint8_t err = 0;
686 IpPtr ip(pktPtr);
687 if (ip) {
688 if (igbe->regs.rxcsum.ipofld()) {
689 DPRINTF(EthernetDesc, "Checking IP checksum\n");
690 status |= RXDS_IPCS;
691 desc->csum = htole(cksum(ip));
692 if (cksum(ip) != 0) {
693 err |= RXDE_IPE;
694 DPRINTF(EthernetDesc, "Checksum is bad!!\n");
695 }
696 }
697 TcpPtr tcp(ip);
698 if (tcp && igbe->regs.rxcsum.tuofld()) {
699 DPRINTF(EthernetDesc, "Checking TCP checksum\n");
700 status |= RXDS_TCPCS;
701 desc->csum = htole(cksum(tcp));
702 if (cksum(tcp) != 0) {
703 DPRINTF(EthernetDesc, "Checksum is bad!!\n");
704 err |= RXDE_TCPE;
705 }
706 }
707
708 UdpPtr udp(ip);
709 if (udp && igbe->regs.rxcsum.tuofld()) {
710 DPRINTF(EthernetDesc, "Checking UDP checksum\n");
711 status |= RXDS_UDPCS;
712 desc->csum = htole(cksum(udp));
713 if (cksum(tcp) != 0) {
714 DPRINTF(EthernetDesc, "Checksum is bad!!\n");
715 err |= RXDE_TCPE;
716 }
717 }
718 } // if ip
719
720 desc->status = htole(status);
721 desc->errors = htole(err);
722
723 // No vlan support at this point... just set it to 0
724 desc->vlan = 0;
725
726 // Deal with the rx timer interrupts
727 if (igbe->regs.rdtr.delay()) {
728 DPRINTF(EthernetSM, "RXS: Scheduling DTR for %d\n",
729 igbe->regs.rdtr.delay() * igbe->intClock());
730 if (igbe->rdtrEvent.scheduled())
731 igbe->rdtrEvent.reschedule(curTick + igbe->regs.rdtr.delay() *
732 igbe->intClock());
733 else
734 igbe->rdtrEvent.schedule(curTick + igbe->regs.rdtr.delay() *
735 igbe->intClock());
736 }
737
738 if (igbe->regs.radv.idv() && igbe->regs.rdtr.delay()) {
739 DPRINTF(EthernetSM, "RXS: Scheduling ADV for %d\n",
740 igbe->regs.radv.idv() * igbe->intClock());
741 if (!igbe->radvEvent.scheduled())
742 igbe->radvEvent.schedule(curTick + igbe->regs.radv.idv() *
743 igbe->intClock());
744 }
745
746 // if neither radv or rdtr, maybe itr is set...
747 if (!igbe->regs.rdtr.delay()) {
748 DPRINTF(EthernetSM, "RXS: Receive interrupt delay disabled, posting IT_RXT\n");
749 igbe->postInterrupt(IT_RXT);
750 }
751
752 // If the packet is small enough, interrupt appropriately
753 // I wonder if this is delayed or not?!
754 if (pktPtr->length <= igbe->regs.rsrpd.idv()) {
755 DPRINTF(EthernetSM, "RXS: Posting IT_SRPD beacuse small packet received\n");
756 igbe->postInterrupt(IT_SRPD);
757 }
758
759 DPRINTF(EthernetDesc, "Processing of this descriptor complete\n");
760 unusedCache.pop_front();
761 usedCache.push_back(desc);
762 pktPtr = NULL;
763 enableSm();
764 pktDone = true;
765 igbe->checkDrain();
766 }
767
768 void
769 IGbE::RxDescCache::enableSm()
770 {
771 igbe->rxTick = true;
772 igbe->restartClock();
773 }
774
775 bool
776 IGbE::RxDescCache::packetDone()
777 {
778 if (pktDone) {
779 pktDone = false;
780 return true;
781 }
782 return false;
783 }
784
785 bool
786 IGbE::RxDescCache::hasOutstandingEvents()
787 {
788 return pktEvent.scheduled() || wbEvent.scheduled() ||
789 fetchEvent.scheduled();
790 }
791
792 void
793 IGbE::RxDescCache::serialize(std::ostream &os)
794 {
795 DescCache<RxDesc>::serialize(os);
796 SERIALIZE_SCALAR(pktDone);
797 }
798
799 void
800 IGbE::RxDescCache::unserialize(Checkpoint *cp, const std::string &section)
801 {
802 DescCache<RxDesc>::unserialize(cp, section);
803 UNSERIALIZE_SCALAR(pktDone);
804 }
805
806
807 ///////////////////////////////////// IGbE::TxDesc /////////////////////////////////
808
809 IGbE::TxDescCache::TxDescCache(IGbE *i, const std::string n, int s)
810 : DescCache<TxDesc>(i,n, s), pktDone(false), isTcp(false), pktWaiting(false),
811 pktEvent(this)
812
813 {
814 }
815
816 int
817 IGbE::TxDescCache::getPacketSize()
818 {
819 assert(unusedCache.size());
820
821 TxDesc *desc;
822
823 DPRINTF(EthernetDesc, "Starting processing of descriptor\n");
824
825 while (unusedCache.size() && TxdOp::isContext(unusedCache.front())) {
826 DPRINTF(EthernetDesc, "Got context descriptor type... skipping\n");
827
828 // I think we can just ignore these for now?
829 desc = unusedCache.front();
830 // is this going to be a tcp or udp packet?
831 isTcp = TxdOp::tcp(desc) ? true : false;
832
833 // make sure it's ipv4
834 assert(TxdOp::ip(desc));
835
836 TxdOp::setDd(desc);
837 unusedCache.pop_front();
838 usedCache.push_back(desc);
839 }
840
841 if (!unusedCache.size())
842 return -1;
843
844 DPRINTF(EthernetDesc, "Next TX packet is %d bytes\n",
845 TxdOp::getLen(unusedCache.front()));
846
847 return TxdOp::getLen(unusedCache.front());
848 }
849
850 void
851 IGbE::TxDescCache::getPacketData(EthPacketPtr p)
852 {
853 assert(unusedCache.size());
854
855 TxDesc *desc;
856 desc = unusedCache.front();
857
858 assert((TxdOp::isLegacy(desc) || TxdOp::isData(desc)) && TxdOp::getLen(desc));
859
860 pktPtr = p;
861
862 pktWaiting = true;
863
864 DPRINTF(EthernetDesc, "Starting DMA of packet\n");
865 igbe->dmaRead(igbe->platform->pciToDma(TxdOp::getBuf(desc)),
866 TxdOp::getLen(desc), &pktEvent, p->data + p->length);
867
868
869 }
870
871 void
872 IGbE::TxDescCache::pktComplete()
873 {
874
875 TxDesc *desc;
876 assert(unusedCache.size());
877 assert(pktPtr);
878
879 DPRINTF(EthernetDesc, "DMA of packet complete\n");
880
881
882 desc = unusedCache.front();
883 assert((TxdOp::isLegacy(desc) || TxdOp::isData(desc)) && TxdOp::getLen(desc));
884
885 DPRINTF(EthernetDesc, "TxDescriptor data d1: %#llx d2: %#llx\n", desc->d1, desc->d2);
886
887 if (!TxdOp::eop(desc)) {
888 // This only supports two descriptors per tx packet
889 assert(pktPtr->length == 0);
890 pktPtr->length = TxdOp::getLen(desc);
891 unusedCache.pop_front();
892 usedCache.push_back(desc);
893 pktDone = true;
894 pktWaiting = false;
895 pktPtr = NULL;
896
897 DPRINTF(EthernetDesc, "Partial Packet Descriptor Done\n");
898 return;
899 }
900
901 // Set the length of the data in the EtherPacket
902 pktPtr->length += TxdOp::getLen(desc);
903
904 // no support for vlans
905 assert(!TxdOp::vle(desc));
906
907 // we alway report status
908 assert(TxdOp::rs(desc));
909
910 // we only support single packet descriptors at this point
911 assert(TxdOp::eop(desc));
912
913 // set that this packet is done
914 TxdOp::setDd(desc);
915
916 DPRINTF(EthernetDesc, "TxDescriptor data d1: %#llx d2: %#llx\n", desc->d1, desc->d2);
917
918 // Checksums are only ofloaded for new descriptor types
919 if (TxdOp::isData(desc) && ( TxdOp::ixsm(desc) || TxdOp::txsm(desc)) ) {
920 DPRINTF(EthernetDesc, "Calculating checksums for packet\n");
921 IpPtr ip(pktPtr);
922 if (TxdOp::ixsm(desc)) {
923 ip->sum(0);
924 ip->sum(cksum(ip));
925 DPRINTF(EthernetDesc, "Calculated IP checksum\n");
926 }
927 if (TxdOp::txsm(desc)) {
928 if (isTcp) {
929 TcpPtr tcp(ip);
930 tcp->sum(0);
931 tcp->sum(cksum(tcp));
932 DPRINTF(EthernetDesc, "Calculated TCP checksum\n");
933 } else {
934 UdpPtr udp(ip);
935 udp->sum(0);
936 udp->sum(cksum(udp));
937 DPRINTF(EthernetDesc, "Calculated UDP checksum\n");
938 }
939 }
940 }
941
942 if (TxdOp::ide(desc)) {
943 // Deal with the rx timer interrupts
944 DPRINTF(EthernetDesc, "Descriptor had IDE set\n");
945 if (igbe->regs.tidv.idv()) {
946 DPRINTF(EthernetDesc, "setting tidv\n");
947 if (igbe->tidvEvent.scheduled())
948 igbe->tidvEvent.reschedule(curTick + igbe->regs.tidv.idv() *
949 igbe->intClock());
950 else
951 igbe->tidvEvent.schedule(curTick + igbe->regs.tidv.idv() *
952 igbe->intClock());
953 }
954
955 if (igbe->regs.tadv.idv() && igbe->regs.tidv.idv()) {
956 DPRINTF(EthernetDesc, "setting tadv\n");
957 if (!igbe->tadvEvent.scheduled())
958 igbe->tadvEvent.schedule(curTick + igbe->regs.tadv.idv() *
959 igbe->intClock());
960 }
961 }
962
963
964
965 unusedCache.pop_front();
966 usedCache.push_back(desc);
967 pktDone = true;
968 pktWaiting = false;
969 pktPtr = NULL;
970
971 DPRINTF(EthernetDesc, "Descriptor Done\n");
972
973 if (igbe->regs.txdctl.wthresh() == 0) {
974 DPRINTF(EthernetDesc, "WTHRESH == 0, writing back descriptor\n");
975 writeback(0);
976 } else if (igbe->regs.txdctl.wthresh() >= usedCache.size()) {
977 DPRINTF(EthernetDesc, "used > WTHRESH, writing back descriptor\n");
978 writeback((igbe->cacheBlockSize()-1)>>4);
979 }
980 igbe->checkDrain();
981 }
982
983 void
984 IGbE::TxDescCache::serialize(std::ostream &os)
985 {
986 DescCache<TxDesc>::serialize(os);
987 SERIALIZE_SCALAR(pktDone);
988 SERIALIZE_SCALAR(isTcp);
989 SERIALIZE_SCALAR(pktWaiting);
990 }
991
992 void
993 IGbE::TxDescCache::unserialize(Checkpoint *cp, const std::string &section)
994 {
995 DescCache<TxDesc>::unserialize(cp, section);
996 UNSERIALIZE_SCALAR(pktDone);
997 UNSERIALIZE_SCALAR(isTcp);
998 UNSERIALIZE_SCALAR(pktWaiting);
999 }
1000
1001 bool
1002 IGbE::TxDescCache::packetAvailable()
1003 {
1004 if (pktDone) {
1005 pktDone = false;
1006 return true;
1007 }
1008 return false;
1009 }
1010
1011 void
1012 IGbE::TxDescCache::enableSm()
1013 {
1014 igbe->txTick = true;
1015 igbe->restartClock();
1016 }
1017
1018 bool
1019 IGbE::TxDescCache::hasOutstandingEvents()
1020 {
1021 return pktEvent.scheduled() || wbEvent.scheduled() ||
1022 fetchEvent.scheduled();
1023 }
1024
1025
1026 ///////////////////////////////////// IGbE /////////////////////////////////
1027
1028 void
1029 IGbE::restartClock()
1030 {
1031 if (!tickEvent.scheduled() && (rxTick || txTick) && getState() ==
1032 SimObject::Running)
1033 tickEvent.schedule((curTick/cycles(1)) * cycles(1) + cycles(1));
1034 }
1035
1036 unsigned int
1037 IGbE::drain(Event *de)
1038 {
1039 unsigned int count;
1040 count = pioPort->drain(de) + dmaPort->drain(de);
1041 if (rxDescCache.hasOutstandingEvents() ||
1042 txDescCache.hasOutstandingEvents()) {
1043 count++;
1044 drainEvent = de;
1045 }
1046
1047 txFifoTick = false;
1048 txTick = false;
1049 rxTick = false;
1050
1051 if (tickEvent.scheduled())
1052 tickEvent.deschedule();
1053
1054 if (count)
1055 changeState(Draining);
1056 else
1057 changeState(Drained);
1058
1059 return count;
1060 }
1061
1062 void
1063 IGbE::resume()
1064 {
1065 SimObject::resume();
1066
1067 txFifoTick = true;
1068 txTick = true;
1069 rxTick = true;
1070
1071 restartClock();
1072 }
1073
1074 void
1075 IGbE::checkDrain()
1076 {
1077 if (!drainEvent)
1078 return;
1079
1080 if (rxDescCache.hasOutstandingEvents() ||
1081 txDescCache.hasOutstandingEvents()) {
1082 drainEvent->process();
1083 drainEvent = NULL;
1084 }
1085 }
1086
1087 void
1088 IGbE::txStateMachine()
1089 {
1090 if (!regs.tctl.en()) {
1091 txTick = false;
1092 DPRINTF(EthernetSM, "TXS: TX disabled, stopping ticking\n");
1093 return;
1094 }
1095
1096 // If we have a packet available and it's length is not 0 (meaning it's not
1097 // a multidescriptor packet) put it in the fifo, otherwise an the next
1098 // iteration we'll get the rest of the data
1099 if (txPacket && txDescCache.packetAvailable() && txPacket->length) {
1100 bool success;
1101 DPRINTF(EthernetSM, "TXS: packet placed in TX FIFO\n");
1102 success = txFifo.push(txPacket);
1103 txFifoTick = true;
1104 assert(success);
1105 txPacket = NULL;
1106 txDescCache.writeback((cacheBlockSize()-1)>>4);
1107 return;
1108 }
1109
1110 // Only support descriptor granularity
1111 assert(regs.txdctl.gran());
1112 if (regs.txdctl.lwthresh() && txDescCache.descLeft() < (regs.txdctl.lwthresh() * 8)) {
1113 DPRINTF(EthernetSM, "TXS: LWTHRESH caused posting of TXDLOW\n");
1114 postInterrupt(IT_TXDLOW);
1115 }
1116
1117 if (!txPacket) {
1118 txPacket = new EthPacketData(16384);
1119 }
1120
1121 if (!txDescCache.packetWaiting()) {
1122 if (txDescCache.descLeft() == 0) {
1123 DPRINTF(EthernetSM, "TXS: No descriptors left in ring, forcing "
1124 "writeback stopping ticking and posting TXQE\n");
1125 txDescCache.writeback(0);
1126 txTick = false;
1127 postInterrupt(IT_TXQE, true);
1128 return;
1129 }
1130
1131
1132 if (!(txDescCache.descUnused())) {
1133 DPRINTF(EthernetSM, "TXS: No descriptors available in cache, fetching and stopping ticking\n");
1134 txTick = false;
1135 txDescCache.fetchDescriptors();
1136 return;
1137 }
1138
1139 int size;
1140 size = txDescCache.getPacketSize();
1141 if (size > 0 && txFifo.avail() > size) {
1142 DPRINTF(EthernetSM, "TXS: Reserving %d bytes in FIFO and begining "
1143 "DMA of next packet\n", size);
1144 txFifo.reserve(size);
1145 txDescCache.getPacketData(txPacket);
1146 } else if (size <= 0) {
1147 DPRINTF(EthernetSM, "TXS: No packets to get, writing back used descriptors\n");
1148 txDescCache.writeback(0);
1149 } else {
1150 DPRINTF(EthernetSM, "TXS: FIFO full, stopping ticking until space "
1151 "available in FIFO\n");
1152 txDescCache.writeback((cacheBlockSize()-1)>>4);
1153 txTick = false;
1154 }
1155
1156
1157 return;
1158 }
1159 }
1160
1161 bool
1162 IGbE::ethRxPkt(EthPacketPtr pkt)
1163 {
1164 DPRINTF(Ethernet, "RxFIFO: Receiving pcakte from wire\n");
1165 if (!regs.rctl.en()) {
1166 DPRINTF(Ethernet, "RxFIFO: RX not enabled, dropping\n");
1167 return true;
1168 }
1169
1170 // restart the state machines if they are stopped
1171 rxTick = true;
1172 if ((rxTick || txTick) && !tickEvent.scheduled()) {
1173 DPRINTF(EthernetSM, "RXS: received packet into fifo, starting ticking\n");
1174 restartClock();
1175 }
1176
1177 if (!rxFifo.push(pkt)) {
1178 DPRINTF(Ethernet, "RxFIFO: Packet won't fit in fifo... dropped\n");
1179 postInterrupt(IT_RXO, true);
1180 return false;
1181 }
1182 return true;
1183 }
1184
1185
1186 void
1187 IGbE::rxStateMachine()
1188 {
1189 if (!regs.rctl.en()) {
1190 rxTick = false;
1191 DPRINTF(EthernetSM, "RXS: RX disabled, stopping ticking\n");
1192 return;
1193 }
1194
1195 // If the packet is done check for interrupts/descriptors/etc
1196 if (rxDescCache.packetDone()) {
1197 DPRINTF(EthernetSM, "RXS: Packet completed DMA to memory\n");
1198 int descLeft = rxDescCache.descLeft();
1199 switch (regs.rctl.rdmts()) {
1200 case 2: if (descLeft > .125 * regs.rdlen()) break;
1201 case 1: if (descLeft > .250 * regs.rdlen()) break;
1202 case 0: if (descLeft > .500 * regs.rdlen()) break;
1203 DPRINTF(Ethernet, "RXS: Interrupting (RXDMT) because of descriptors left\n");
1204 postInterrupt(IT_RXDMT);
1205 break;
1206 }
1207
1208 if (descLeft == 0) {
1209 DPRINTF(EthernetSM, "RXS: No descriptors left in ring, forcing"
1210 " writeback and stopping ticking\n");
1211 rxDescCache.writeback(0);
1212 rxTick = false;
1213 }
1214
1215 // only support descriptor granulaties
1216 assert(regs.rxdctl.gran());
1217
1218 if (regs.rxdctl.wthresh() >= rxDescCache.descUsed()) {
1219 DPRINTF(EthernetSM, "RXS: Writing back because WTHRESH >= descUsed\n");
1220 if (regs.rxdctl.wthresh() < (cacheBlockSize()>>4))
1221 rxDescCache.writeback(regs.rxdctl.wthresh()-1);
1222 else
1223 rxDescCache.writeback((cacheBlockSize()-1)>>4);
1224 }
1225
1226 if ((rxDescCache.descUnused() < regs.rxdctl.pthresh()) &&
1227 ((rxDescCache.descLeft() - rxDescCache.descUnused()) > regs.rxdctl.hthresh())) {
1228 DPRINTF(EthernetSM, "RXS: Fetching descriptors because descUnused < PTHRESH\n");
1229 rxDescCache.fetchDescriptors();
1230 }
1231
1232 if (rxDescCache.descUnused() == 0) {
1233 DPRINTF(EthernetSM, "RXS: No descriptors available in cache, "
1234 "fetching descriptors and stopping ticking\n");
1235 rxTick = false;
1236 rxDescCache.fetchDescriptors();
1237 }
1238 return;
1239 }
1240
1241 if (!rxDescCache.descUnused()) {
1242 DPRINTF(EthernetSM, "RXS: No descriptors available in cache, stopping ticking\n");
1243 rxTick = false;
1244 DPRINTF(EthernetSM, "RXS: No descriptors available, fetching\n");
1245 rxDescCache.fetchDescriptors();
1246 return;
1247 }
1248
1249 if (rxFifo.empty()) {
1250 DPRINTF(EthernetSM, "RXS: RxFIFO empty, stopping ticking\n");
1251 rxTick = false;
1252 return;
1253 }
1254
1255 EthPacketPtr pkt;
1256 pkt = rxFifo.front();
1257
1258 DPRINTF(EthernetSM, "RXS: Writing packet into memory\n");
1259 if (!rxDescCache.writePacket(pkt)) {
1260 return;
1261 }
1262
1263 DPRINTF(EthernetSM, "RXS: Removing packet from FIFO\n");
1264 rxFifo.pop();
1265 DPRINTF(EthernetSM, "RXS: stopping ticking until packet DMA completes\n");
1266 rxTick = false;
1267 }
1268
1269 void
1270 IGbE::txWire()
1271 {
1272 if (txFifo.empty()) {
1273 txFifoTick = false;
1274 return;
1275 }
1276
1277
1278 if (etherInt->sendPacket(txFifo.front())) {
1279 DPRINTF(EthernetSM, "TxFIFO: Successful transmit, bytes available in fifo: %d\n",
1280 txFifo.avail());
1281 txFifo.pop();
1282 } else {
1283 // We'll get woken up when the packet ethTxDone() gets called
1284 txFifoTick = false;
1285 }
1286
1287 }
1288
1289 void
1290 IGbE::tick()
1291 {
1292 DPRINTF(EthernetSM, "IGbE: -------------- Cycle --------------\n");
1293
1294 if (rxTick)
1295 rxStateMachine();
1296
1297 if (txTick)
1298 txStateMachine();
1299
1300 if (txFifoTick)
1301 txWire();
1302
1303
1304 if (rxTick || txTick || txFifoTick)
1305 tickEvent.schedule(curTick + cycles(1));
1306 }
1307
1308 void
1309 IGbE::ethTxDone()
1310 {
1311 // restart the tx state machines if they are stopped
1312 // fifo to send another packet
1313 // tx sm to put more data into the fifo
1314 txFifoTick = true;
1315 txTick = true;
1316
1317 restartClock();
1318 DPRINTF(EthernetSM, "TxFIFO: Transmission complete\n");
1319 }
1320
1321 void
1322 IGbE::serialize(std::ostream &os)
1323 {
1324 PciDev::serialize(os);
1325
1326 regs.serialize(os);
1327 SERIALIZE_SCALAR(eeOpBits);
1328 SERIALIZE_SCALAR(eeAddrBits);
1329 SERIALIZE_SCALAR(eeDataBits);
1330 SERIALIZE_SCALAR(eeOpcode);
1331 SERIALIZE_SCALAR(eeAddr);
1332 SERIALIZE_ARRAY(flash,iGbReg::EEPROM_SIZE);
1333
1334 rxFifo.serialize("rxfifo", os);
1335 txFifo.serialize("txfifo", os);
1336
1337 bool txPktExists = txPacket;
1338 SERIALIZE_SCALAR(txPktExists);
1339 if (txPktExists)
1340 txPacket->serialize("txpacket", os);
1341
1342 Tick rdtr_time = 0, radv_time = 0, tidv_time = 0, tadv_time = 0,
1343 inter_time = 0;
1344
1345 if (rdtrEvent.scheduled())
1346 rdtr_time = rdtrEvent.when();
1347 SERIALIZE_SCALAR(rdtr_time);
1348
1349 if (radvEvent.scheduled())
1350 radv_time = radvEvent.when();
1351 SERIALIZE_SCALAR(radv_time);
1352
1353 if (tidvEvent.scheduled())
1354 rdtr_time = tidvEvent.when();
1355 SERIALIZE_SCALAR(tidv_time);
1356
1357 if (tadvEvent.scheduled())
1358 rdtr_time = tadvEvent.when();
1359 SERIALIZE_SCALAR(tadv_time);
1360
1361 if (interEvent.scheduled())
1362 rdtr_time = interEvent.when();
1363 SERIALIZE_SCALAR(inter_time);
1364
1365 nameOut(os, csprintf("%s.TxDescCache", name()));
1366 txDescCache.serialize(os);
1367
1368 nameOut(os, csprintf("%s.RxDescCache", name()));
1369 rxDescCache.serialize(os);
1370 }
1371
1372 void
1373 IGbE::unserialize(Checkpoint *cp, const std::string &section)
1374 {
1375 PciDev::unserialize(cp, section);
1376
1377 regs.unserialize(cp, section);
1378 UNSERIALIZE_SCALAR(eeOpBits);
1379 UNSERIALIZE_SCALAR(eeAddrBits);
1380 UNSERIALIZE_SCALAR(eeDataBits);
1381 UNSERIALIZE_SCALAR(eeOpcode);
1382 UNSERIALIZE_SCALAR(eeAddr);
1383 UNSERIALIZE_ARRAY(flash,iGbReg::EEPROM_SIZE);
1384
1385 rxFifo.unserialize("rxfifo", cp, section);
1386 txFifo.unserialize("txfifo", cp, section);
1387
1388 bool txPktExists;
1389 UNSERIALIZE_SCALAR(txPktExists);
1390 if (txPktExists) {
1391 txPacket = new EthPacketData(16384);
1392 txPacket->unserialize("txpacket", cp, section);
1393 }
1394
1395 rxTick = true;
1396 txTick = true;
1397 txFifoTick = true;
1398
1399 Tick rdtr_time, radv_time, tidv_time, tadv_time, inter_time;
1400 UNSERIALIZE_SCALAR(rdtr_time);
1401 UNSERIALIZE_SCALAR(radv_time);
1402 UNSERIALIZE_SCALAR(tidv_time);
1403 UNSERIALIZE_SCALAR(tadv_time);
1404 UNSERIALIZE_SCALAR(inter_time);
1405
1406 if (rdtr_time)
1407 rdtrEvent.schedule(rdtr_time);
1408
1409 if (radv_time)
1410 radvEvent.schedule(radv_time);
1411
1412 if (tidv_time)
1413 tidvEvent.schedule(tidv_time);
1414
1415 if (tadv_time)
1416 tadvEvent.schedule(tadv_time);
1417
1418 if (inter_time)
1419 interEvent.schedule(inter_time);
1420
1421 txDescCache.unserialize(cp, csprintf("%s.TxDescCache", section));
1422
1423 rxDescCache.unserialize(cp, csprintf("%s.RxDescCache", section));
1424 }
1425
1426
1427 BEGIN_DECLARE_SIM_OBJECT_PARAMS(IGbEInt)
1428
1429 SimObjectParam<EtherInt *> peer;
1430 SimObjectParam<IGbE *> device;
1431
1432 END_DECLARE_SIM_OBJECT_PARAMS(IGbEInt)
1433
1434 BEGIN_INIT_SIM_OBJECT_PARAMS(IGbEInt)
1435
1436 INIT_PARAM_DFLT(peer, "peer interface", NULL),
1437 INIT_PARAM(device, "Ethernet device of this interface")
1438
1439 END_INIT_SIM_OBJECT_PARAMS(IGbEInt)
1440
1441 CREATE_SIM_OBJECT(IGbEInt)
1442 {
1443 IGbEInt *dev_int = new IGbEInt(getInstanceName(), device);
1444
1445 EtherInt *p = (EtherInt *)peer;
1446 if (p) {
1447 dev_int->setPeer(p);
1448 p->setPeer(dev_int);
1449 }
1450
1451 return dev_int;
1452 }
1453
1454 REGISTER_SIM_OBJECT("IGbEInt", IGbEInt)
1455
1456
1457 BEGIN_DECLARE_SIM_OBJECT_PARAMS(IGbE)
1458
1459 SimObjectParam<System *> system;
1460 SimObjectParam<Platform *> platform;
1461 SimObjectParam<PciConfigData *> configdata;
1462 Param<uint32_t> pci_bus;
1463 Param<uint32_t> pci_dev;
1464 Param<uint32_t> pci_func;
1465 Param<Tick> pio_latency;
1466 Param<Tick> config_latency;
1467 Param<std::string> hardware_address;
1468 Param<bool> use_flow_control;
1469 Param<int> rx_fifo_size;
1470 Param<int> tx_fifo_size;
1471 Param<int> rx_desc_cache_size;
1472 Param<int> tx_desc_cache_size;
1473 Param<Tick> clock;
1474
1475
1476 END_DECLARE_SIM_OBJECT_PARAMS(IGbE)
1477
1478 BEGIN_INIT_SIM_OBJECT_PARAMS(IGbE)
1479
1480 INIT_PARAM(system, "System pointer"),
1481 INIT_PARAM(platform, "Platform pointer"),
1482 INIT_PARAM(configdata, "PCI Config data"),
1483 INIT_PARAM(pci_bus, "PCI bus ID"),
1484 INIT_PARAM(pci_dev, "PCI device number"),
1485 INIT_PARAM(pci_func, "PCI function code"),
1486 INIT_PARAM_DFLT(pio_latency, "Programmed IO latency in bus cycles", 1),
1487 INIT_PARAM(config_latency, "Number of cycles for a config read or write"),
1488 INIT_PARAM(hardware_address, "Ethernet Hardware Address"),
1489 INIT_PARAM(use_flow_control,"Should the device use xon/off packets"),
1490 INIT_PARAM(rx_fifo_size,"Size of the RX FIFO"),
1491 INIT_PARAM(tx_fifo_size,"Size of the TX FIFO"),
1492 INIT_PARAM(rx_desc_cache_size,"Size of the RX descriptor cache"),
1493 INIT_PARAM(tx_desc_cache_size,"Size of the TX descriptor cache"),
1494 INIT_PARAM(clock,"Clock rate for the device to tick at")
1495
1496 END_INIT_SIM_OBJECT_PARAMS(IGbE)
1497
1498
1499 CREATE_SIM_OBJECT(IGbE)
1500 {
1501 IGbE::Params *params = new IGbE::Params;
1502
1503 params->name = getInstanceName();
1504 params->platform = platform;
1505 params->system = system;
1506 params->configData = configdata;
1507 params->busNum = pci_bus;
1508 params->deviceNum = pci_dev;
1509 params->functionNum = pci_func;
1510 params->pio_delay = pio_latency;
1511 params->config_delay = config_latency;
1512 params->hardware_address = hardware_address;
1513 params->use_flow_control = use_flow_control;
1514 params->rx_fifo_size = rx_fifo_size;
1515 params->tx_fifo_size = tx_fifo_size;
1516 params->rx_desc_cache_size = rx_desc_cache_size;
1517 params->tx_desc_cache_size = tx_desc_cache_size;
1518 params->clock = clock;
1519
1520
1521 return new IGbE(params);
1522 }
1523
1524 REGISTER_SIM_OBJECT("IGbE", IGbE)