Merge zizzer.eecs.umich.edu:/bk/newmem
[gem5.git] / src / dev / i8254xGBe.cc
1 /*
2 * Copyright (c) 2006 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Ali Saidi
29 */
30
31 /* @file
32 * Device model for Intel's 8254x line of gigabit ethernet controllers.
33 * In particular an 82547 revision 2 (82547GI) MAC because it seems to have the
34 * fewest workarounds in the driver. It will probably work with most of the
35 * other MACs with slight modifications.
36 */
37
38
39 /*
40 * @todo really there are multiple dma engines.. we should implement them.
41 */
42
43 #include "base/inet.hh"
44 #include "base/trace.hh"
45 #include "dev/i8254xGBe.hh"
46 #include "mem/packet.hh"
47 #include "mem/packet_access.hh"
48 #include "sim/builder.hh"
49 #include "sim/stats.hh"
50 #include "sim/system.hh"
51
52 #include <algorithm>
53
54 using namespace iGbReg;
55 using namespace Net;
56
57 IGbE::IGbE(Params *p)
58 : PciDev(p), etherInt(NULL), drainEvent(NULL), useFlowControl(p->use_flow_control),
59 rxFifo(p->rx_fifo_size), txFifo(p->tx_fifo_size), rxTick(false),
60 txTick(false), txFifoTick(false), rdtrEvent(this), radvEvent(this),
61 tadvEvent(this), tidvEvent(this), tickEvent(this), interEvent(this),
62 rxDescCache(this, name()+".RxDesc", p->rx_desc_cache_size),
63 txDescCache(this, name()+".TxDesc", p->tx_desc_cache_size), clock(p->clock)
64 {
65 // Initialized internal registers per Intel documentation
66 // All registers intialized to 0 by per register constructor
67 regs.ctrl.fd(1);
68 regs.ctrl.lrst(1);
69 regs.ctrl.speed(2);
70 regs.ctrl.frcspd(1);
71 regs.sts.speed(3); // Say we're 1000Mbps
72 regs.sts.fd(1); // full duplex
73 regs.sts.lu(1); // link up
74 regs.eecd.fwe(1);
75 regs.eecd.ee_type(1);
76 regs.imr = 0;
77 regs.iam = 0;
78 regs.rxdctl.gran(1);
79 regs.rxdctl.wthresh(1);
80 regs.fcrth(1);
81
82 regs.pba.rxa(0x30);
83 regs.pba.txa(0x10);
84
85 eeOpBits = 0;
86 eeAddrBits = 0;
87 eeDataBits = 0;
88 eeOpcode = 0;
89
90 // clear all 64 16 bit words of the eeprom
91 memset(&flash, 0, EEPROM_SIZE*2);
92
93 // Set the MAC address
94 memcpy(flash, p->hardware_address.bytes(), ETH_ADDR_LEN);
95 for (int x = 0; x < ETH_ADDR_LEN/2; x++)
96 flash[x] = htobe(flash[x]);
97
98 uint16_t csum = 0;
99 for (int x = 0; x < EEPROM_SIZE; x++)
100 csum += htobe(flash[x]);
101
102
103 // Magic happy checksum value
104 flash[EEPROM_SIZE-1] = htobe((uint16_t)(EEPROM_CSUM - csum));
105
106 rxFifo.clear();
107 txFifo.clear();
108 }
109
110
111 Tick
112 IGbE::writeConfig(PacketPtr pkt)
113 {
114 int offset = pkt->getAddr() & PCI_CONFIG_SIZE;
115 if (offset < PCI_DEVICE_SPECIFIC)
116 PciDev::writeConfig(pkt);
117 else
118 panic("Device specific PCI config space not implemented.\n");
119
120 ///
121 /// Some work may need to be done here based for the pci COMMAND bits.
122 ///
123
124 return pioDelay;
125 }
126
127 Tick
128 IGbE::read(PacketPtr pkt)
129 {
130 int bar;
131 Addr daddr;
132
133 if (!getBAR(pkt->getAddr(), bar, daddr))
134 panic("Invalid PCI memory access to unmapped memory.\n");
135
136 // Only Memory register BAR is allowed
137 assert(bar == 0);
138
139 // Only 32bit accesses allowed
140 assert(pkt->getSize() == 4);
141
142 DPRINTF(Ethernet, "Read device register %#X\n", daddr);
143
144 pkt->allocate();
145
146 ///
147 /// Handle read of register here
148 ///
149
150
151 switch (daddr) {
152 case REG_CTRL:
153 pkt->set<uint32_t>(regs.ctrl());
154 break;
155 case REG_STATUS:
156 pkt->set<uint32_t>(regs.sts());
157 break;
158 case REG_EECD:
159 pkt->set<uint32_t>(regs.eecd());
160 break;
161 case REG_EERD:
162 pkt->set<uint32_t>(regs.eerd());
163 break;
164 case REG_CTRL_EXT:
165 pkt->set<uint32_t>(regs.ctrl_ext());
166 break;
167 case REG_MDIC:
168 pkt->set<uint32_t>(regs.mdic());
169 break;
170 case REG_ICR:
171 DPRINTF(Ethernet, "Reading ICR. ICR=%#x IMR=%#x IAM=%#x IAME=%d\n", regs.icr(),
172 regs.imr, regs.iam, regs.ctrl_ext.iame());
173 pkt->set<uint32_t>(regs.icr());
174 if (regs.icr.int_assert() || regs.imr == 0) {
175 regs.icr = regs.icr() & ~mask(30);
176 DPRINTF(Ethernet, "Cleared ICR. ICR=%#x\n", regs.icr());
177 }
178 if (regs.ctrl_ext.iame() && regs.icr.int_assert())
179 regs.imr &= ~regs.iam;
180 chkInterrupt();
181 break;
182 case REG_ITR:
183 pkt->set<uint32_t>(regs.itr());
184 break;
185 case REG_RCTL:
186 pkt->set<uint32_t>(regs.rctl());
187 break;
188 case REG_FCTTV:
189 pkt->set<uint32_t>(regs.fcttv());
190 break;
191 case REG_TCTL:
192 pkt->set<uint32_t>(regs.tctl());
193 break;
194 case REG_PBA:
195 pkt->set<uint32_t>(regs.pba());
196 break;
197 case REG_WUC:
198 case REG_LEDCTL:
199 pkt->set<uint32_t>(0); // We don't care, so just return 0
200 break;
201 case REG_FCRTL:
202 pkt->set<uint32_t>(regs.fcrtl());
203 break;
204 case REG_FCRTH:
205 pkt->set<uint32_t>(regs.fcrth());
206 break;
207 case REG_RDBAL:
208 pkt->set<uint32_t>(regs.rdba.rdbal());
209 break;
210 case REG_RDBAH:
211 pkt->set<uint32_t>(regs.rdba.rdbah());
212 break;
213 case REG_RDLEN:
214 pkt->set<uint32_t>(regs.rdlen());
215 break;
216 case REG_RDH:
217 pkt->set<uint32_t>(regs.rdh());
218 break;
219 case REG_RDT:
220 pkt->set<uint32_t>(regs.rdt());
221 break;
222 case REG_RDTR:
223 pkt->set<uint32_t>(regs.rdtr());
224 if (regs.rdtr.fpd()) {
225 rxDescCache.writeback(0);
226 DPRINTF(EthernetIntr, "Posting interrupt because of RDTR.FPD write\n");
227 postInterrupt(IT_RXT);
228 regs.rdtr.fpd(0);
229 }
230 break;
231 case REG_RADV:
232 pkt->set<uint32_t>(regs.radv());
233 break;
234 case REG_TDBAL:
235 pkt->set<uint32_t>(regs.tdba.tdbal());
236 break;
237 case REG_TDBAH:
238 pkt->set<uint32_t>(regs.tdba.tdbah());
239 break;
240 case REG_TDLEN:
241 pkt->set<uint32_t>(regs.tdlen());
242 break;
243 case REG_TDH:
244 pkt->set<uint32_t>(regs.tdh());
245 break;
246 case REG_TDT:
247 pkt->set<uint32_t>(regs.tdt());
248 break;
249 case REG_TIDV:
250 pkt->set<uint32_t>(regs.tidv());
251 break;
252 case REG_TXDCTL:
253 pkt->set<uint32_t>(regs.txdctl());
254 break;
255 case REG_TADV:
256 pkt->set<uint32_t>(regs.tadv());
257 break;
258 case REG_RXCSUM:
259 pkt->set<uint32_t>(regs.rxcsum());
260 break;
261 case REG_MANC:
262 pkt->set<uint32_t>(regs.manc());
263 break;
264 default:
265 if (!(daddr >= REG_VFTA && daddr < (REG_VFTA + VLAN_FILTER_TABLE_SIZE*4)) &&
266 !(daddr >= REG_RAL && daddr < (REG_RAL + RCV_ADDRESS_TABLE_SIZE*8)) &&
267 !(daddr >= REG_MTA && daddr < (REG_MTA + MULTICAST_TABLE_SIZE*4)) &&
268 !(daddr >= REG_CRCERRS && daddr < (REG_CRCERRS + STATS_REGS_SIZE)))
269 panic("Read request to unknown register number: %#x\n", daddr);
270 else
271 pkt->set<uint32_t>(0);
272 };
273
274 pkt->result = Packet::Success;
275 return pioDelay;
276 }
277
278 Tick
279 IGbE::write(PacketPtr pkt)
280 {
281 int bar;
282 Addr daddr;
283
284
285 if (!getBAR(pkt->getAddr(), bar, daddr))
286 panic("Invalid PCI memory access to unmapped memory.\n");
287
288 // Only Memory register BAR is allowed
289 assert(bar == 0);
290
291 // Only 32bit accesses allowed
292 assert(pkt->getSize() == sizeof(uint32_t));
293
294 DPRINTF(Ethernet, "Wrote device register %#X value %#X\n", daddr, pkt->get<uint32_t>());
295
296 ///
297 /// Handle write of register here
298 ///
299 uint32_t val = pkt->get<uint32_t>();
300
301 Regs::RCTL oldrctl;
302 Regs::TCTL oldtctl;
303
304 switch (daddr) {
305 case REG_CTRL:
306 regs.ctrl = val;
307 if (regs.ctrl.tfce())
308 warn("TX Flow control enabled, should implement\n");
309 if (regs.ctrl.rfce())
310 warn("RX Flow control enabled, should implement\n");
311 break;
312 case REG_CTRL_EXT:
313 regs.ctrl_ext = val;
314 break;
315 case REG_STATUS:
316 regs.sts = val;
317 break;
318 case REG_EECD:
319 int oldClk;
320 oldClk = regs.eecd.sk();
321 regs.eecd = val;
322 // See if this is a eeprom access and emulate accordingly
323 if (!oldClk && regs.eecd.sk()) {
324 if (eeOpBits < 8) {
325 eeOpcode = eeOpcode << 1 | regs.eecd.din();
326 eeOpBits++;
327 } else if (eeAddrBits < 8 && eeOpcode == EEPROM_READ_OPCODE_SPI) {
328 eeAddr = eeAddr << 1 | regs.eecd.din();
329 eeAddrBits++;
330 } else if (eeDataBits < 16 && eeOpcode == EEPROM_READ_OPCODE_SPI) {
331 assert(eeAddr>>1 < EEPROM_SIZE);
332 DPRINTF(EthernetEEPROM, "EEPROM bit read: %d word: %#X\n",
333 flash[eeAddr>>1] >> eeDataBits & 0x1, flash[eeAddr>>1]);
334 regs.eecd.dout((flash[eeAddr>>1] >> (15-eeDataBits)) & 0x1);
335 eeDataBits++;
336 } else if (eeDataBits < 8 && eeOpcode == EEPROM_RDSR_OPCODE_SPI) {
337 regs.eecd.dout(0);
338 eeDataBits++;
339 } else
340 panic("What's going on with eeprom interface? opcode:"
341 " %#x:%d addr: %#x:%d, data: %d\n", (uint32_t)eeOpcode,
342 (uint32_t)eeOpBits, (uint32_t)eeAddr,
343 (uint32_t)eeAddrBits, (uint32_t)eeDataBits);
344
345 // Reset everything for the next command
346 if ((eeDataBits == 16 && eeOpcode == EEPROM_READ_OPCODE_SPI) ||
347 (eeDataBits == 8 && eeOpcode == EEPROM_RDSR_OPCODE_SPI)) {
348 eeOpBits = 0;
349 eeAddrBits = 0;
350 eeDataBits = 0;
351 eeOpcode = 0;
352 eeAddr = 0;
353 }
354
355 DPRINTF(EthernetEEPROM, "EEPROM: opcode: %#X:%d addr: %#X:%d\n",
356 (uint32_t)eeOpcode, (uint32_t) eeOpBits,
357 (uint32_t)eeAddr>>1, (uint32_t)eeAddrBits);
358 if (eeOpBits == 8 && !(eeOpcode == EEPROM_READ_OPCODE_SPI ||
359 eeOpcode == EEPROM_RDSR_OPCODE_SPI ))
360 panic("Unknown eeprom opcode: %#X:%d\n", (uint32_t)eeOpcode,
361 (uint32_t)eeOpBits);
362
363
364 }
365 // If driver requests eeprom access, immediately give it to it
366 regs.eecd.ee_gnt(regs.eecd.ee_req());
367 break;
368 case REG_EERD:
369 regs.eerd = val;
370 break;
371 case REG_MDIC:
372 regs.mdic = val;
373 if (regs.mdic.i())
374 panic("No support for interrupt on mdic complete\n");
375 if (regs.mdic.phyadd() != 1)
376 panic("No support for reading anything but phy\n");
377 DPRINTF(Ethernet, "%s phy address %x\n", regs.mdic.op() == 1 ? "Writing"
378 : "Reading", regs.mdic.regadd());
379 switch (regs.mdic.regadd()) {
380 case PHY_PSTATUS:
381 regs.mdic.data(0x796D); // link up
382 break;
383 case PHY_PID:
384 regs.mdic.data(0x02A8);
385 break;
386 case PHY_EPID:
387 regs.mdic.data(0x0380);
388 break;
389 case PHY_GSTATUS:
390 regs.mdic.data(0x7C00);
391 break;
392 case PHY_EPSTATUS:
393 regs.mdic.data(0x3000);
394 break;
395 case PHY_AGC:
396 regs.mdic.data(0x180); // some random length
397 break;
398 default:
399 regs.mdic.data(0);
400 }
401 regs.mdic.r(1);
402 break;
403 case REG_ICR:
404 DPRINTF(Ethernet, "Writing ICR. ICR=%#x IMR=%#x IAM=%#x IAME=%d\n", regs.icr(),
405 regs.imr, regs.iam, regs.ctrl_ext.iame());
406 if (regs.ctrl_ext.iame())
407 regs.imr &= ~regs.iam;
408 regs.icr = ~bits(val,30,0) & regs.icr();
409 chkInterrupt();
410 break;
411 case REG_ITR:
412 regs.itr = val;
413 break;
414 case REG_ICS:
415 DPRINTF(EthernetIntr, "Posting interrupt because of ICS write\n");
416 postInterrupt((IntTypes)val);
417 break;
418 case REG_IMS:
419 regs.imr |= val;
420 chkInterrupt();
421 break;
422 case REG_IMC:
423 regs.imr &= ~val;
424 chkInterrupt();
425 break;
426 case REG_IAM:
427 regs.iam = val;
428 break;
429 case REG_RCTL:
430 oldrctl = regs.rctl;
431 regs.rctl = val;
432 if (regs.rctl.rst()) {
433 rxDescCache.reset();
434 DPRINTF(EthernetSM, "RXS: Got RESET!\n");
435 rxFifo.clear();
436 regs.rctl.rst(0);
437 }
438 if (regs.rctl.en())
439 rxTick = true;
440 restartClock();
441 break;
442 case REG_FCTTV:
443 regs.fcttv = val;
444 break;
445 case REG_TCTL:
446 regs.tctl = val;
447 oldtctl = regs.tctl;
448 regs.tctl = val;
449 if (regs.tctl.en())
450 txTick = true;
451 restartClock();
452 if (regs.tctl.en() && !oldtctl.en()) {
453 txDescCache.reset();
454 }
455 break;
456 case REG_PBA:
457 regs.pba.rxa(val);
458 regs.pba.txa(64 - regs.pba.rxa());
459 break;
460 case REG_WUC:
461 case REG_LEDCTL:
462 case REG_FCAL:
463 case REG_FCAH:
464 case REG_FCT:
465 case REG_VET:
466 case REG_AIFS:
467 case REG_TIPG:
468 ; // We don't care, so don't store anything
469 break;
470 case REG_FCRTL:
471 regs.fcrtl = val;
472 break;
473 case REG_FCRTH:
474 regs.fcrth = val;
475 break;
476 case REG_RDBAL:
477 regs.rdba.rdbal( val & ~mask(4));
478 rxDescCache.areaChanged();
479 break;
480 case REG_RDBAH:
481 regs.rdba.rdbah(val);
482 rxDescCache.areaChanged();
483 break;
484 case REG_RDLEN:
485 regs.rdlen = val & ~mask(7);
486 rxDescCache.areaChanged();
487 break;
488 case REG_RDH:
489 regs.rdh = val;
490 rxDescCache.areaChanged();
491 break;
492 case REG_RDT:
493 regs.rdt = val;
494 rxTick = true;
495 restartClock();
496 break;
497 case REG_RDTR:
498 regs.rdtr = val;
499 break;
500 case REG_RADV:
501 regs.radv = val;
502 break;
503 case REG_TDBAL:
504 regs.tdba.tdbal( val & ~mask(4));
505 txDescCache.areaChanged();
506 break;
507 case REG_TDBAH:
508 regs.tdba.tdbah(val);
509 txDescCache.areaChanged();
510 break;
511 case REG_TDLEN:
512 regs.tdlen = val & ~mask(7);
513 txDescCache.areaChanged();
514 break;
515 case REG_TDH:
516 regs.tdh = val;
517 txDescCache.areaChanged();
518 break;
519 case REG_TDT:
520 regs.tdt = val;
521 txTick = true;
522 restartClock();
523 break;
524 case REG_TIDV:
525 regs.tidv = val;
526 break;
527 case REG_TXDCTL:
528 regs.txdctl = val;
529 break;
530 case REG_TADV:
531 regs.tadv = val;
532 break;
533 case REG_RXCSUM:
534 regs.rxcsum = val;
535 break;
536 case REG_MANC:
537 regs.manc = val;
538 break;
539 default:
540 if (!(daddr >= REG_VFTA && daddr < (REG_VFTA + VLAN_FILTER_TABLE_SIZE*4)) &&
541 !(daddr >= REG_RAL && daddr < (REG_RAL + RCV_ADDRESS_TABLE_SIZE*8)) &&
542 !(daddr >= REG_MTA && daddr < (REG_MTA + MULTICAST_TABLE_SIZE*4)))
543 panic("Write request to unknown register number: %#x\n", daddr);
544 };
545
546 pkt->result = Packet::Success;
547 return pioDelay;
548 }
549
550 void
551 IGbE::postInterrupt(IntTypes t, bool now)
552 {
553 assert(t);
554
555 // Interrupt is already pending
556 if (t & regs.icr())
557 return;
558
559 if (regs.icr() & regs.imr)
560 {
561 regs.icr = regs.icr() | t;
562 if (!interEvent.scheduled())
563 interEvent.schedule(curTick + Clock::Int::ns * 256 *
564 regs.itr.interval());
565 } else {
566 regs.icr = regs.icr() | t;
567 if (regs.itr.interval() == 0 || now) {
568 if (interEvent.scheduled())
569 interEvent.deschedule();
570 cpuPostInt();
571 } else {
572 DPRINTF(EthernetIntr, "EINT: Scheduling timer interrupt for %d ticks\n",
573 Clock::Int::ns * 256 * regs.itr.interval());
574 if (!interEvent.scheduled())
575 interEvent.schedule(curTick + Clock::Int::ns * 256 * regs.itr.interval());
576 }
577 }
578 }
579
580 void
581 IGbE::cpuPostInt()
582 {
583 if (rdtrEvent.scheduled()) {
584 regs.icr.rxt0(1);
585 rdtrEvent.deschedule();
586 }
587 if (radvEvent.scheduled()) {
588 regs.icr.rxt0(1);
589 radvEvent.deschedule();
590 }
591 if (tadvEvent.scheduled()) {
592 regs.icr.txdw(1);
593 tadvEvent.deschedule();
594 }
595 if (tidvEvent.scheduled()) {
596 regs.icr.txdw(1);
597 tidvEvent.deschedule();
598 }
599
600 regs.icr.int_assert(1);
601 DPRINTF(EthernetIntr, "EINT: Posting interrupt to CPU now. Vector %#x\n",
602 regs.icr());
603 intrPost();
604 }
605
606 void
607 IGbE::cpuClearInt()
608 {
609 if (regs.icr.int_assert()) {
610 regs.icr.int_assert(0);
611 DPRINTF(EthernetIntr, "EINT: Clearing interrupt to CPU now. Vector %#x\n",
612 regs.icr());
613 intrClear();
614 }
615 }
616
617 void
618 IGbE::chkInterrupt()
619 {
620 // Check if we need to clear the cpu interrupt
621 if (!(regs.icr() & regs.imr)) {
622 if (interEvent.scheduled())
623 interEvent.deschedule();
624 if (regs.icr.int_assert())
625 cpuClearInt();
626 }
627
628 if (regs.icr() & regs.imr) {
629 if (regs.itr.interval() == 0) {
630 cpuPostInt();
631 } else {
632 if (!interEvent.scheduled())
633 interEvent.schedule(curTick + Clock::Int::ns * 256 * regs.itr.interval());
634 }
635 }
636
637
638 }
639
640
641 IGbE::RxDescCache::RxDescCache(IGbE *i, const std::string n, int s)
642 : DescCache<RxDesc>(i, n, s), pktDone(false), pktEvent(this)
643
644 {
645 }
646
647 bool
648 IGbE::RxDescCache::writePacket(EthPacketPtr packet)
649 {
650 // We shouldn't have to deal with any of these yet
651 DPRINTF(EthernetDesc, "Packet Length: %d Desc Size: %d\n",
652 packet->length, igbe->regs.rctl.descSize());
653 assert(packet->length < igbe->regs.rctl.descSize());
654
655 if (!unusedCache.size())
656 return false;
657
658 pktPtr = packet;
659
660 igbe->dmaWrite(igbe->platform->pciToDma(unusedCache.front()->buf),
661 packet->length, &pktEvent, packet->data);
662 return true;
663 }
664
665 void
666 IGbE::RxDescCache::pktComplete()
667 {
668 assert(unusedCache.size());
669 RxDesc *desc;
670 desc = unusedCache.front();
671
672 uint16_t crcfixup = igbe->regs.rctl.secrc() ? 0 : 4 ;
673 desc->len = htole((uint16_t)(pktPtr->length + crcfixup));
674 DPRINTF(EthernetDesc, "pktPtr->length: %d stripcrc offset: %d value written: %d %d\n",
675 pktPtr->length, crcfixup,
676 htole((uint16_t)(pktPtr->length + crcfixup)),
677 (uint16_t)(pktPtr->length + crcfixup));
678
679 // no support for anything but starting at 0
680 assert(igbe->regs.rxcsum.pcss() == 0);
681
682 DPRINTF(EthernetDesc, "Packet written to memory updating Descriptor\n");
683
684 uint8_t status = RXDS_DD | RXDS_EOP;
685 uint8_t err = 0;
686 IpPtr ip(pktPtr);
687 if (ip) {
688 if (igbe->regs.rxcsum.ipofld()) {
689 DPRINTF(EthernetDesc, "Checking IP checksum\n");
690 status |= RXDS_IPCS;
691 desc->csum = htole(cksum(ip));
692 if (cksum(ip) != 0) {
693 err |= RXDE_IPE;
694 DPRINTF(EthernetDesc, "Checksum is bad!!\n");
695 }
696 }
697 TcpPtr tcp(ip);
698 if (tcp && igbe->regs.rxcsum.tuofld()) {
699 DPRINTF(EthernetDesc, "Checking TCP checksum\n");
700 status |= RXDS_TCPCS;
701 desc->csum = htole(cksum(tcp));
702 if (cksum(tcp) != 0) {
703 DPRINTF(EthernetDesc, "Checksum is bad!!\n");
704 err |= RXDE_TCPE;
705 }
706 }
707
708 UdpPtr udp(ip);
709 if (udp && igbe->regs.rxcsum.tuofld()) {
710 DPRINTF(EthernetDesc, "Checking UDP checksum\n");
711 status |= RXDS_UDPCS;
712 desc->csum = htole(cksum(udp));
713 if (cksum(udp) != 0) {
714 DPRINTF(EthernetDesc, "Checksum is bad!!\n");
715 err |= RXDE_TCPE;
716 }
717 }
718 } // if ip
719
720 desc->status = htole(status);
721 desc->errors = htole(err);
722
723 // No vlan support at this point... just set it to 0
724 desc->vlan = 0;
725
726 // Deal with the rx timer interrupts
727 if (igbe->regs.rdtr.delay()) {
728 DPRINTF(EthernetSM, "RXS: Scheduling DTR for %d\n",
729 igbe->regs.rdtr.delay() * igbe->intClock());
730 igbe->rdtrEvent.reschedule(curTick + igbe->regs.rdtr.delay() *
731 igbe->intClock(),true);
732 }
733
734 if (igbe->regs.radv.idv() && igbe->regs.rdtr.delay()) {
735 DPRINTF(EthernetSM, "RXS: Scheduling ADV for %d\n",
736 igbe->regs.radv.idv() * igbe->intClock());
737 if (!igbe->radvEvent.scheduled())
738 igbe->radvEvent.schedule(curTick + igbe->regs.radv.idv() *
739 igbe->intClock());
740 }
741
742 // if neither radv or rdtr, maybe itr is set...
743 if (!igbe->regs.rdtr.delay()) {
744 DPRINTF(EthernetSM, "RXS: Receive interrupt delay disabled, posting IT_RXT\n");
745 igbe->postInterrupt(IT_RXT);
746 }
747
748 // If the packet is small enough, interrupt appropriately
749 // I wonder if this is delayed or not?!
750 if (pktPtr->length <= igbe->regs.rsrpd.idv()) {
751 DPRINTF(EthernetSM, "RXS: Posting IT_SRPD beacuse small packet received\n");
752 igbe->postInterrupt(IT_SRPD);
753 }
754
755 DPRINTF(EthernetDesc, "Processing of this descriptor complete\n");
756 unusedCache.pop_front();
757 usedCache.push_back(desc);
758 pktPtr = NULL;
759 enableSm();
760 pktDone = true;
761 igbe->checkDrain();
762 }
763
764 void
765 IGbE::RxDescCache::enableSm()
766 {
767 igbe->rxTick = true;
768 igbe->restartClock();
769 }
770
771 bool
772 IGbE::RxDescCache::packetDone()
773 {
774 if (pktDone) {
775 pktDone = false;
776 return true;
777 }
778 return false;
779 }
780
781 bool
782 IGbE::RxDescCache::hasOutstandingEvents()
783 {
784 return pktEvent.scheduled() || wbEvent.scheduled() ||
785 fetchEvent.scheduled();
786 }
787
788 void
789 IGbE::RxDescCache::serialize(std::ostream &os)
790 {
791 DescCache<RxDesc>::serialize(os);
792 SERIALIZE_SCALAR(pktDone);
793 }
794
795 void
796 IGbE::RxDescCache::unserialize(Checkpoint *cp, const std::string &section)
797 {
798 DescCache<RxDesc>::unserialize(cp, section);
799 UNSERIALIZE_SCALAR(pktDone);
800 }
801
802
803 ///////////////////////////////////// IGbE::TxDesc /////////////////////////////////
804
805 IGbE::TxDescCache::TxDescCache(IGbE *i, const std::string n, int s)
806 : DescCache<TxDesc>(i,n, s), pktDone(false), isTcp(false), pktWaiting(false),
807 pktEvent(this)
808
809 {
810 }
811
812 int
813 IGbE::TxDescCache::getPacketSize()
814 {
815 assert(unusedCache.size());
816
817 TxDesc *desc;
818
819 DPRINTF(EthernetDesc, "Starting processing of descriptor\n");
820
821 while (unusedCache.size() && TxdOp::isContext(unusedCache.front())) {
822 DPRINTF(EthernetDesc, "Got context descriptor type... skipping\n");
823
824 // I think we can just ignore these for now?
825 desc = unusedCache.front();
826 // is this going to be a tcp or udp packet?
827 isTcp = TxdOp::tcp(desc) ? true : false;
828
829 // make sure it's ipv4
830 assert(TxdOp::ip(desc));
831
832 TxdOp::setDd(desc);
833 unusedCache.pop_front();
834 usedCache.push_back(desc);
835 }
836
837 if (!unusedCache.size())
838 return -1;
839
840 DPRINTF(EthernetDesc, "Next TX packet is %d bytes\n",
841 TxdOp::getLen(unusedCache.front()));
842
843 return TxdOp::getLen(unusedCache.front());
844 }
845
846 void
847 IGbE::TxDescCache::getPacketData(EthPacketPtr p)
848 {
849 assert(unusedCache.size());
850
851 TxDesc *desc;
852 desc = unusedCache.front();
853
854 assert((TxdOp::isLegacy(desc) || TxdOp::isData(desc)) && TxdOp::getLen(desc));
855
856 pktPtr = p;
857
858 pktWaiting = true;
859
860 DPRINTF(EthernetDesc, "Starting DMA of packet\n");
861 igbe->dmaRead(igbe->platform->pciToDma(TxdOp::getBuf(desc)),
862 TxdOp::getLen(desc), &pktEvent, p->data + p->length);
863
864
865 }
866
867 void
868 IGbE::TxDescCache::pktComplete()
869 {
870
871 TxDesc *desc;
872 assert(unusedCache.size());
873 assert(pktPtr);
874
875 DPRINTF(EthernetDesc, "DMA of packet complete\n");
876
877
878 desc = unusedCache.front();
879 assert((TxdOp::isLegacy(desc) || TxdOp::isData(desc)) && TxdOp::getLen(desc));
880
881 DPRINTF(EthernetDesc, "TxDescriptor data d1: %#llx d2: %#llx\n", desc->d1, desc->d2);
882
883 if (!TxdOp::eop(desc)) {
884 // This only supports two descriptors per tx packet
885 assert(pktPtr->length == 0);
886 pktPtr->length = TxdOp::getLen(desc);
887 unusedCache.pop_front();
888 usedCache.push_back(desc);
889 pktDone = true;
890 pktWaiting = false;
891 pktPtr = NULL;
892
893 DPRINTF(EthernetDesc, "Partial Packet Descriptor Done\n");
894 enableSm();
895 return;
896 }
897
898 // Set the length of the data in the EtherPacket
899 pktPtr->length += TxdOp::getLen(desc);
900
901 // no support for vlans
902 assert(!TxdOp::vle(desc));
903
904 // we alway report status
905 assert(TxdOp::rs(desc));
906
907 // we only support single packet descriptors at this point
908 assert(TxdOp::eop(desc));
909
910 // set that this packet is done
911 TxdOp::setDd(desc);
912
913 DPRINTF(EthernetDesc, "TxDescriptor data d1: %#llx d2: %#llx\n", desc->d1, desc->d2);
914
915 // Checksums are only ofloaded for new descriptor types
916 if (TxdOp::isData(desc) && ( TxdOp::ixsm(desc) || TxdOp::txsm(desc)) ) {
917 DPRINTF(EthernetDesc, "Calculating checksums for packet\n");
918 IpPtr ip(pktPtr);
919 if (TxdOp::ixsm(desc)) {
920 ip->sum(0);
921 ip->sum(cksum(ip));
922 DPRINTF(EthernetDesc, "Calculated IP checksum\n");
923 }
924 if (TxdOp::txsm(desc)) {
925 if (isTcp) {
926 TcpPtr tcp(ip);
927 assert(tcp);
928 tcp->sum(0);
929 tcp->sum(cksum(tcp));
930 DPRINTF(EthernetDesc, "Calculated TCP checksum\n");
931 } else {
932 UdpPtr udp(ip);
933 assert(udp);
934 udp->sum(0);
935 udp->sum(cksum(udp));
936 DPRINTF(EthernetDesc, "Calculated UDP checksum\n");
937 }
938 }
939 }
940
941 if (TxdOp::ide(desc)) {
942 // Deal with the rx timer interrupts
943 DPRINTF(EthernetDesc, "Descriptor had IDE set\n");
944 if (igbe->regs.tidv.idv()) {
945 DPRINTF(EthernetDesc, "setting tidv\n");
946 igbe->tidvEvent.reschedule(curTick + igbe->regs.tidv.idv() *
947 igbe->intClock(), true);
948 }
949
950 if (igbe->regs.tadv.idv() && igbe->regs.tidv.idv()) {
951 DPRINTF(EthernetDesc, "setting tadv\n");
952 if (!igbe->tadvEvent.scheduled())
953 igbe->tadvEvent.schedule(curTick + igbe->regs.tadv.idv() *
954 igbe->intClock());
955 }
956 }
957
958
959
960 unusedCache.pop_front();
961 usedCache.push_back(desc);
962 pktDone = true;
963 pktWaiting = false;
964 pktPtr = NULL;
965
966 DPRINTF(EthernetDesc, "Descriptor Done\n");
967
968 if (igbe->regs.txdctl.wthresh() == 0) {
969 DPRINTF(EthernetDesc, "WTHRESH == 0, writing back descriptor\n");
970 writeback(0);
971 } else if (igbe->regs.txdctl.wthresh() >= usedCache.size()) {
972 DPRINTF(EthernetDesc, "used > WTHRESH, writing back descriptor\n");
973 writeback((igbe->cacheBlockSize()-1)>>4);
974 }
975 enableSm();
976 igbe->checkDrain();
977 }
978
979 void
980 IGbE::TxDescCache::serialize(std::ostream &os)
981 {
982 DescCache<TxDesc>::serialize(os);
983 SERIALIZE_SCALAR(pktDone);
984 SERIALIZE_SCALAR(isTcp);
985 SERIALIZE_SCALAR(pktWaiting);
986 }
987
988 void
989 IGbE::TxDescCache::unserialize(Checkpoint *cp, const std::string &section)
990 {
991 DescCache<TxDesc>::unserialize(cp, section);
992 UNSERIALIZE_SCALAR(pktDone);
993 UNSERIALIZE_SCALAR(isTcp);
994 UNSERIALIZE_SCALAR(pktWaiting);
995 }
996
997 bool
998 IGbE::TxDescCache::packetAvailable()
999 {
1000 if (pktDone) {
1001 pktDone = false;
1002 return true;
1003 }
1004 return false;
1005 }
1006
1007 void
1008 IGbE::TxDescCache::enableSm()
1009 {
1010 igbe->txTick = true;
1011 igbe->restartClock();
1012 }
1013
1014 bool
1015 IGbE::TxDescCache::hasOutstandingEvents()
1016 {
1017 return pktEvent.scheduled() || wbEvent.scheduled() ||
1018 fetchEvent.scheduled();
1019 }
1020
1021
1022 ///////////////////////////////////// IGbE /////////////////////////////////
1023
1024 void
1025 IGbE::restartClock()
1026 {
1027 if (!tickEvent.scheduled() && (rxTick || txTick || txFifoTick) && getState() ==
1028 SimObject::Running)
1029 tickEvent.schedule((curTick/cycles(1)) * cycles(1) + cycles(1));
1030 }
1031
1032 unsigned int
1033 IGbE::drain(Event *de)
1034 {
1035 unsigned int count;
1036 count = pioPort->drain(de) + dmaPort->drain(de);
1037 if (rxDescCache.hasOutstandingEvents() ||
1038 txDescCache.hasOutstandingEvents()) {
1039 count++;
1040 drainEvent = de;
1041 }
1042
1043 txFifoTick = false;
1044 txTick = false;
1045 rxTick = false;
1046
1047 if (tickEvent.scheduled())
1048 tickEvent.deschedule();
1049
1050 if (count)
1051 changeState(Draining);
1052 else
1053 changeState(Drained);
1054
1055 return count;
1056 }
1057
1058 void
1059 IGbE::resume()
1060 {
1061 SimObject::resume();
1062
1063 txFifoTick = true;
1064 txTick = true;
1065 rxTick = true;
1066
1067 restartClock();
1068 }
1069
1070 void
1071 IGbE::checkDrain()
1072 {
1073 if (!drainEvent)
1074 return;
1075
1076 if (rxDescCache.hasOutstandingEvents() ||
1077 txDescCache.hasOutstandingEvents()) {
1078 drainEvent->process();
1079 drainEvent = NULL;
1080 }
1081 }
1082
1083 void
1084 IGbE::txStateMachine()
1085 {
1086 if (!regs.tctl.en()) {
1087 txTick = false;
1088 DPRINTF(EthernetSM, "TXS: TX disabled, stopping ticking\n");
1089 return;
1090 }
1091
1092 // If we have a packet available and it's length is not 0 (meaning it's not
1093 // a multidescriptor packet) put it in the fifo, otherwise an the next
1094 // iteration we'll get the rest of the data
1095 if (txPacket && txDescCache.packetAvailable() && txPacket->length) {
1096 bool success;
1097 DPRINTF(EthernetSM, "TXS: packet placed in TX FIFO\n");
1098 success = txFifo.push(txPacket);
1099 txFifoTick = true;
1100 assert(success);
1101 txPacket = NULL;
1102 txDescCache.writeback((cacheBlockSize()-1)>>4);
1103 return;
1104 }
1105
1106 // Only support descriptor granularity
1107 assert(regs.txdctl.gran());
1108 if (regs.txdctl.lwthresh() && txDescCache.descLeft() < (regs.txdctl.lwthresh() * 8)) {
1109 DPRINTF(EthernetSM, "TXS: LWTHRESH caused posting of TXDLOW\n");
1110 postInterrupt(IT_TXDLOW);
1111 }
1112
1113 if (!txPacket) {
1114 txPacket = new EthPacketData(16384);
1115 }
1116
1117 if (!txDescCache.packetWaiting()) {
1118 if (txDescCache.descLeft() == 0) {
1119 DPRINTF(EthernetSM, "TXS: No descriptors left in ring, forcing "
1120 "writeback stopping ticking and posting TXQE\n");
1121 txDescCache.writeback(0);
1122 txTick = false;
1123 postInterrupt(IT_TXQE, true);
1124 return;
1125 }
1126
1127
1128 if (!(txDescCache.descUnused())) {
1129 DPRINTF(EthernetSM, "TXS: No descriptors available in cache, fetching and stopping ticking\n");
1130 txTick = false;
1131 txDescCache.fetchDescriptors();
1132 return;
1133 }
1134
1135 int size;
1136 size = txDescCache.getPacketSize();
1137 if (size > 0 && txFifo.avail() > size) {
1138 DPRINTF(EthernetSM, "TXS: Reserving %d bytes in FIFO and begining "
1139 "DMA of next packet\n", size);
1140 txFifo.reserve(size);
1141 txDescCache.getPacketData(txPacket);
1142 } else if (size <= 0) {
1143 DPRINTF(EthernetSM, "TXS: No packets to get, writing back used descriptors\n");
1144 txDescCache.writeback(0);
1145 } else {
1146 DPRINTF(EthernetSM, "TXS: FIFO full, stopping ticking until space "
1147 "available in FIFO\n");
1148 txDescCache.writeback((cacheBlockSize()-1)>>4);
1149 txTick = false;
1150 }
1151
1152
1153 return;
1154 }
1155 DPRINTF(EthernetSM, "TXS: Nothing to do, stopping ticking\n");
1156 txTick = false;
1157 }
1158
1159 bool
1160 IGbE::ethRxPkt(EthPacketPtr pkt)
1161 {
1162 DPRINTF(Ethernet, "RxFIFO: Receiving pcakte from wire\n");
1163 if (!regs.rctl.en()) {
1164 DPRINTF(Ethernet, "RxFIFO: RX not enabled, dropping\n");
1165 return true;
1166 }
1167
1168 // restart the state machines if they are stopped
1169 rxTick = true;
1170 if ((rxTick || txTick) && !tickEvent.scheduled()) {
1171 DPRINTF(EthernetSM, "RXS: received packet into fifo, starting ticking\n");
1172 restartClock();
1173 }
1174
1175 if (!rxFifo.push(pkt)) {
1176 DPRINTF(Ethernet, "RxFIFO: Packet won't fit in fifo... dropped\n");
1177 postInterrupt(IT_RXO, true);
1178 return false;
1179 }
1180 return true;
1181 }
1182
1183
1184 void
1185 IGbE::rxStateMachine()
1186 {
1187 if (!regs.rctl.en()) {
1188 rxTick = false;
1189 DPRINTF(EthernetSM, "RXS: RX disabled, stopping ticking\n");
1190 return;
1191 }
1192
1193 // If the packet is done check for interrupts/descriptors/etc
1194 if (rxDescCache.packetDone()) {
1195 DPRINTF(EthernetSM, "RXS: Packet completed DMA to memory\n");
1196 int descLeft = rxDescCache.descLeft();
1197 switch (regs.rctl.rdmts()) {
1198 case 2: if (descLeft > .125 * regs.rdlen()) break;
1199 case 1: if (descLeft > .250 * regs.rdlen()) break;
1200 case 0: if (descLeft > .500 * regs.rdlen()) break;
1201 DPRINTF(Ethernet, "RXS: Interrupting (RXDMT) because of descriptors left\n");
1202 postInterrupt(IT_RXDMT);
1203 break;
1204 }
1205
1206 if (descLeft == 0) {
1207 DPRINTF(EthernetSM, "RXS: No descriptors left in ring, forcing"
1208 " writeback and stopping ticking\n");
1209 rxDescCache.writeback(0);
1210 rxTick = false;
1211 }
1212
1213 // only support descriptor granulaties
1214 assert(regs.rxdctl.gran());
1215
1216 if (regs.rxdctl.wthresh() >= rxDescCache.descUsed()) {
1217 DPRINTF(EthernetSM, "RXS: Writing back because WTHRESH >= descUsed\n");
1218 if (regs.rxdctl.wthresh() < (cacheBlockSize()>>4))
1219 rxDescCache.writeback(regs.rxdctl.wthresh()-1);
1220 else
1221 rxDescCache.writeback((cacheBlockSize()-1)>>4);
1222 }
1223
1224 if ((rxDescCache.descUnused() < regs.rxdctl.pthresh()) &&
1225 ((rxDescCache.descLeft() - rxDescCache.descUnused()) > regs.rxdctl.hthresh())) {
1226 DPRINTF(EthernetSM, "RXS: Fetching descriptors because descUnused < PTHRESH\n");
1227 rxDescCache.fetchDescriptors();
1228 }
1229
1230 if (rxDescCache.descUnused() == 0) {
1231 DPRINTF(EthernetSM, "RXS: No descriptors available in cache, "
1232 "fetching descriptors and stopping ticking\n");
1233 rxTick = false;
1234 rxDescCache.fetchDescriptors();
1235 }
1236 return;
1237 }
1238
1239 if (!rxDescCache.descUnused()) {
1240 DPRINTF(EthernetSM, "RXS: No descriptors available in cache, stopping ticking\n");
1241 rxTick = false;
1242 DPRINTF(EthernetSM, "RXS: No descriptors available, fetching\n");
1243 rxDescCache.fetchDescriptors();
1244 return;
1245 }
1246
1247 if (rxFifo.empty()) {
1248 DPRINTF(EthernetSM, "RXS: RxFIFO empty, stopping ticking\n");
1249 rxTick = false;
1250 return;
1251 }
1252
1253 EthPacketPtr pkt;
1254 pkt = rxFifo.front();
1255
1256 DPRINTF(EthernetSM, "RXS: Writing packet into memory\n");
1257 if (!rxDescCache.writePacket(pkt)) {
1258 return;
1259 }
1260
1261 DPRINTF(EthernetSM, "RXS: Removing packet from FIFO\n");
1262 rxFifo.pop();
1263 DPRINTF(EthernetSM, "RXS: stopping ticking until packet DMA completes\n");
1264 rxTick = false;
1265 }
1266
1267 void
1268 IGbE::txWire()
1269 {
1270 if (txFifo.empty()) {
1271 txFifoTick = false;
1272 return;
1273 }
1274
1275
1276 if (etherInt->sendPacket(txFifo.front())) {
1277 DPRINTF(EthernetSM, "TxFIFO: Successful transmit, bytes available in fifo: %d\n",
1278 txFifo.avail());
1279 txFifo.pop();
1280 } else {
1281 // We'll get woken up when the packet ethTxDone() gets called
1282 txFifoTick = false;
1283 }
1284
1285 }
1286
1287 void
1288 IGbE::tick()
1289 {
1290 DPRINTF(EthernetSM, "IGbE: -------------- Cycle --------------\n");
1291
1292 if (rxTick)
1293 rxStateMachine();
1294
1295 if (txTick)
1296 txStateMachine();
1297
1298 if (txFifoTick)
1299 txWire();
1300
1301
1302 if (rxTick || txTick || txFifoTick)
1303 tickEvent.schedule(curTick + cycles(1));
1304 }
1305
1306 void
1307 IGbE::ethTxDone()
1308 {
1309 // restart the tx state machines if they are stopped
1310 // fifo to send another packet
1311 // tx sm to put more data into the fifo
1312 txFifoTick = true;
1313 txTick = true;
1314
1315 restartClock();
1316 DPRINTF(EthernetSM, "TxFIFO: Transmission complete\n");
1317 }
1318
1319 void
1320 IGbE::serialize(std::ostream &os)
1321 {
1322 PciDev::serialize(os);
1323
1324 regs.serialize(os);
1325 SERIALIZE_SCALAR(eeOpBits);
1326 SERIALIZE_SCALAR(eeAddrBits);
1327 SERIALIZE_SCALAR(eeDataBits);
1328 SERIALIZE_SCALAR(eeOpcode);
1329 SERIALIZE_SCALAR(eeAddr);
1330 SERIALIZE_ARRAY(flash,iGbReg::EEPROM_SIZE);
1331
1332 rxFifo.serialize("rxfifo", os);
1333 txFifo.serialize("txfifo", os);
1334
1335 bool txPktExists = txPacket;
1336 SERIALIZE_SCALAR(txPktExists);
1337 if (txPktExists)
1338 txPacket->serialize("txpacket", os);
1339
1340 Tick rdtr_time = 0, radv_time = 0, tidv_time = 0, tadv_time = 0,
1341 inter_time = 0;
1342
1343 if (rdtrEvent.scheduled())
1344 rdtr_time = rdtrEvent.when();
1345 SERIALIZE_SCALAR(rdtr_time);
1346
1347 if (radvEvent.scheduled())
1348 radv_time = radvEvent.when();
1349 SERIALIZE_SCALAR(radv_time);
1350
1351 if (tidvEvent.scheduled())
1352 rdtr_time = tidvEvent.when();
1353 SERIALIZE_SCALAR(tidv_time);
1354
1355 if (tadvEvent.scheduled())
1356 rdtr_time = tadvEvent.when();
1357 SERIALIZE_SCALAR(tadv_time);
1358
1359 if (interEvent.scheduled())
1360 rdtr_time = interEvent.when();
1361 SERIALIZE_SCALAR(inter_time);
1362
1363 nameOut(os, csprintf("%s.TxDescCache", name()));
1364 txDescCache.serialize(os);
1365
1366 nameOut(os, csprintf("%s.RxDescCache", name()));
1367 rxDescCache.serialize(os);
1368 }
1369
1370 void
1371 IGbE::unserialize(Checkpoint *cp, const std::string &section)
1372 {
1373 PciDev::unserialize(cp, section);
1374
1375 regs.unserialize(cp, section);
1376 UNSERIALIZE_SCALAR(eeOpBits);
1377 UNSERIALIZE_SCALAR(eeAddrBits);
1378 UNSERIALIZE_SCALAR(eeDataBits);
1379 UNSERIALIZE_SCALAR(eeOpcode);
1380 UNSERIALIZE_SCALAR(eeAddr);
1381 UNSERIALIZE_ARRAY(flash,iGbReg::EEPROM_SIZE);
1382
1383 rxFifo.unserialize("rxfifo", cp, section);
1384 txFifo.unserialize("txfifo", cp, section);
1385
1386 bool txPktExists;
1387 UNSERIALIZE_SCALAR(txPktExists);
1388 if (txPktExists) {
1389 txPacket = new EthPacketData(16384);
1390 txPacket->unserialize("txpacket", cp, section);
1391 }
1392
1393 rxTick = true;
1394 txTick = true;
1395 txFifoTick = true;
1396
1397 Tick rdtr_time, radv_time, tidv_time, tadv_time, inter_time;
1398 UNSERIALIZE_SCALAR(rdtr_time);
1399 UNSERIALIZE_SCALAR(radv_time);
1400 UNSERIALIZE_SCALAR(tidv_time);
1401 UNSERIALIZE_SCALAR(tadv_time);
1402 UNSERIALIZE_SCALAR(inter_time);
1403
1404 if (rdtr_time)
1405 rdtrEvent.schedule(rdtr_time);
1406
1407 if (radv_time)
1408 radvEvent.schedule(radv_time);
1409
1410 if (tidv_time)
1411 tidvEvent.schedule(tidv_time);
1412
1413 if (tadv_time)
1414 tadvEvent.schedule(tadv_time);
1415
1416 if (inter_time)
1417 interEvent.schedule(inter_time);
1418
1419 txDescCache.unserialize(cp, csprintf("%s.TxDescCache", section));
1420
1421 rxDescCache.unserialize(cp, csprintf("%s.RxDescCache", section));
1422 }
1423
1424
1425 BEGIN_DECLARE_SIM_OBJECT_PARAMS(IGbEInt)
1426
1427 SimObjectParam<EtherInt *> peer;
1428 SimObjectParam<IGbE *> device;
1429
1430 END_DECLARE_SIM_OBJECT_PARAMS(IGbEInt)
1431
1432 BEGIN_INIT_SIM_OBJECT_PARAMS(IGbEInt)
1433
1434 INIT_PARAM_DFLT(peer, "peer interface", NULL),
1435 INIT_PARAM(device, "Ethernet device of this interface")
1436
1437 END_INIT_SIM_OBJECT_PARAMS(IGbEInt)
1438
1439 CREATE_SIM_OBJECT(IGbEInt)
1440 {
1441 IGbEInt *dev_int = new IGbEInt(getInstanceName(), device);
1442
1443 EtherInt *p = (EtherInt *)peer;
1444 if (p) {
1445 dev_int->setPeer(p);
1446 p->setPeer(dev_int);
1447 }
1448
1449 return dev_int;
1450 }
1451
1452 REGISTER_SIM_OBJECT("IGbEInt", IGbEInt)
1453
1454
1455 BEGIN_DECLARE_SIM_OBJECT_PARAMS(IGbE)
1456
1457 SimObjectParam<System *> system;
1458 SimObjectParam<Platform *> platform;
1459 Param<Tick> min_backoff_delay;
1460 Param<Tick> max_backoff_delay;
1461 SimObjectParam<PciConfigData *> configdata;
1462 Param<uint32_t> pci_bus;
1463 Param<uint32_t> pci_dev;
1464 Param<uint32_t> pci_func;
1465 Param<Tick> pio_latency;
1466 Param<Tick> config_latency;
1467 Param<std::string> hardware_address;
1468 Param<bool> use_flow_control;
1469 Param<int> rx_fifo_size;
1470 Param<int> tx_fifo_size;
1471 Param<int> rx_desc_cache_size;
1472 Param<int> tx_desc_cache_size;
1473 Param<Tick> clock;
1474
1475
1476 END_DECLARE_SIM_OBJECT_PARAMS(IGbE)
1477
1478 BEGIN_INIT_SIM_OBJECT_PARAMS(IGbE)
1479
1480 INIT_PARAM(system, "System pointer"),
1481 INIT_PARAM(platform, "Platform pointer"),
1482 INIT_PARAM(min_backoff_delay, "Minimum delay after receving a nack packed"),
1483 INIT_PARAM(max_backoff_delay, "Maximum delay after receving a nack packed"),
1484 INIT_PARAM(configdata, "PCI Config data"),
1485 INIT_PARAM(pci_bus, "PCI bus ID"),
1486 INIT_PARAM(pci_dev, "PCI device number"),
1487 INIT_PARAM(pci_func, "PCI function code"),
1488 INIT_PARAM_DFLT(pio_latency, "Programmed IO latency in bus cycles", 1),
1489 INIT_PARAM(config_latency, "Number of cycles for a config read or write"),
1490 INIT_PARAM(hardware_address, "Ethernet Hardware Address"),
1491 INIT_PARAM(use_flow_control,"Should the device use xon/off packets"),
1492 INIT_PARAM(rx_fifo_size,"Size of the RX FIFO"),
1493 INIT_PARAM(tx_fifo_size,"Size of the TX FIFO"),
1494 INIT_PARAM(rx_desc_cache_size,"Size of the RX descriptor cache"),
1495 INIT_PARAM(tx_desc_cache_size,"Size of the TX descriptor cache"),
1496 INIT_PARAM(clock,"Clock rate for the device to tick at")
1497
1498 END_INIT_SIM_OBJECT_PARAMS(IGbE)
1499
1500
1501 CREATE_SIM_OBJECT(IGbE)
1502 {
1503 IGbE::Params *params = new IGbE::Params;
1504
1505 params->name = getInstanceName();
1506 params->platform = platform;
1507 params->system = system;
1508 params->min_backoff_delay = min_backoff_delay;
1509 params->max_backoff_delay = max_backoff_delay;
1510 params->configData = configdata;
1511 params->busNum = pci_bus;
1512 params->deviceNum = pci_dev;
1513 params->functionNum = pci_func;
1514 params->pio_delay = pio_latency;
1515 params->config_delay = config_latency;
1516 params->hardware_address = hardware_address;
1517 params->use_flow_control = use_flow_control;
1518 params->rx_fifo_size = rx_fifo_size;
1519 params->tx_fifo_size = tx_fifo_size;
1520 params->rx_desc_cache_size = rx_desc_cache_size;
1521 params->tx_desc_cache_size = tx_desc_cache_size;
1522 params->clock = clock;
1523
1524
1525 return new IGbE(params);
1526 }
1527
1528 REGISTER_SIM_OBJECT("IGbE", IGbE)