Merge with head
[gem5.git] / src / dev / i8254xGBe.cc
1 /*
2 * Copyright (c) 2006 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Ali Saidi
29 */
30
31 /* @file
32 * Device model for Intel's 8254x line of gigabit ethernet controllers.
33 * In particular an 82547 revision 2 (82547GI) MAC because it seems to have the
34 * fewest workarounds in the driver. It will probably work with most of the
35 * other MACs with slight modifications.
36 */
37
38
39 /*
40 * @todo really there are multiple dma engines.. we should implement them.
41 */
42
43 #include <algorithm>
44
45 #include "base/inet.hh"
46 #include "base/trace.hh"
47 #include "dev/i8254xGBe.hh"
48 #include "mem/packet.hh"
49 #include "mem/packet_access.hh"
50 #include "params/IGbE.hh"
51 #include "sim/stats.hh"
52 #include "sim/system.hh"
53
54 using namespace iGbReg;
55 using namespace Net;
56
57 IGbE::IGbE(const Params *p)
58 : EtherDevice(p), etherInt(NULL), drainEvent(NULL), useFlowControl(p->use_flow_control),
59 rxFifo(p->rx_fifo_size), txFifo(p->tx_fifo_size), rxTick(false),
60 txTick(false), txFifoTick(false), rxDmaPacket(false), rdtrEvent(this), radvEvent(this),
61 tadvEvent(this), tidvEvent(this), tickEvent(this), interEvent(this),
62 rxDescCache(this, name()+".RxDesc", p->rx_desc_cache_size),
63 txDescCache(this, name()+".TxDesc", p->tx_desc_cache_size), clock(p->clock)
64 {
65 etherInt = new IGbEInt(name() + ".int", this);
66
67 // Initialized internal registers per Intel documentation
68 // All registers intialized to 0 by per register constructor
69 regs.ctrl.fd(1);
70 regs.ctrl.lrst(1);
71 regs.ctrl.speed(2);
72 regs.ctrl.frcspd(1);
73 regs.sts.speed(3); // Say we're 1000Mbps
74 regs.sts.fd(1); // full duplex
75 regs.sts.lu(1); // link up
76 regs.eecd.fwe(1);
77 regs.eecd.ee_type(1);
78 regs.imr = 0;
79 regs.iam = 0;
80 regs.rxdctl.gran(1);
81 regs.rxdctl.wthresh(1);
82 regs.fcrth(1);
83
84 regs.pba.rxa(0x30);
85 regs.pba.txa(0x10);
86
87 eeOpBits = 0;
88 eeAddrBits = 0;
89 eeDataBits = 0;
90 eeOpcode = 0;
91
92 // clear all 64 16 bit words of the eeprom
93 memset(&flash, 0, EEPROM_SIZE*2);
94
95 // Set the MAC address
96 memcpy(flash, p->hardware_address.bytes(), ETH_ADDR_LEN);
97 for (int x = 0; x < ETH_ADDR_LEN/2; x++)
98 flash[x] = htobe(flash[x]);
99
100 uint16_t csum = 0;
101 for (int x = 0; x < EEPROM_SIZE; x++)
102 csum += htobe(flash[x]);
103
104
105 // Magic happy checksum value
106 flash[EEPROM_SIZE-1] = htobe((uint16_t)(EEPROM_CSUM - csum));
107
108 rxFifo.clear();
109 txFifo.clear();
110 }
111
112 EtherInt*
113 IGbE::getEthPort(const std::string &if_name, int idx)
114 {
115
116 if (if_name == "interface") {
117 if (etherInt->getPeer())
118 panic("Port already connected to\n");
119 return etherInt;
120 }
121 return NULL;
122 }
123
124 Tick
125 IGbE::writeConfig(PacketPtr pkt)
126 {
127 int offset = pkt->getAddr() & PCI_CONFIG_SIZE;
128 if (offset < PCI_DEVICE_SPECIFIC)
129 PciDev::writeConfig(pkt);
130 else
131 panic("Device specific PCI config space not implemented.\n");
132
133 ///
134 /// Some work may need to be done here based for the pci COMMAND bits.
135 ///
136
137 return pioDelay;
138 }
139
140 Tick
141 IGbE::read(PacketPtr pkt)
142 {
143 int bar;
144 Addr daddr;
145
146 if (!getBAR(pkt->getAddr(), bar, daddr))
147 panic("Invalid PCI memory access to unmapped memory.\n");
148
149 // Only Memory register BAR is allowed
150 assert(bar == 0);
151
152 // Only 32bit accesses allowed
153 assert(pkt->getSize() == 4);
154
155 DPRINTF(Ethernet, "Read device register %#X\n", daddr);
156
157 pkt->allocate();
158
159 ///
160 /// Handle read of register here
161 ///
162
163
164 switch (daddr) {
165 case REG_CTRL:
166 pkt->set<uint32_t>(regs.ctrl());
167 break;
168 case REG_STATUS:
169 pkt->set<uint32_t>(regs.sts());
170 break;
171 case REG_EECD:
172 pkt->set<uint32_t>(regs.eecd());
173 break;
174 case REG_EERD:
175 pkt->set<uint32_t>(regs.eerd());
176 break;
177 case REG_CTRL_EXT:
178 pkt->set<uint32_t>(regs.ctrl_ext());
179 break;
180 case REG_MDIC:
181 pkt->set<uint32_t>(regs.mdic());
182 break;
183 case REG_ICR:
184 DPRINTF(Ethernet, "Reading ICR. ICR=%#x IMR=%#x IAM=%#x IAME=%d\n", regs.icr(),
185 regs.imr, regs.iam, regs.ctrl_ext.iame());
186 pkt->set<uint32_t>(regs.icr());
187 if (regs.icr.int_assert() || regs.imr == 0) {
188 regs.icr = regs.icr() & ~mask(30);
189 DPRINTF(Ethernet, "Cleared ICR. ICR=%#x\n", regs.icr());
190 }
191 if (regs.ctrl_ext.iame() && regs.icr.int_assert())
192 regs.imr &= ~regs.iam;
193 chkInterrupt();
194 break;
195 case REG_ITR:
196 pkt->set<uint32_t>(regs.itr());
197 break;
198 case REG_RCTL:
199 pkt->set<uint32_t>(regs.rctl());
200 break;
201 case REG_FCTTV:
202 pkt->set<uint32_t>(regs.fcttv());
203 break;
204 case REG_TCTL:
205 pkt->set<uint32_t>(regs.tctl());
206 break;
207 case REG_PBA:
208 pkt->set<uint32_t>(regs.pba());
209 break;
210 case REG_WUC:
211 case REG_LEDCTL:
212 pkt->set<uint32_t>(0); // We don't care, so just return 0
213 break;
214 case REG_FCRTL:
215 pkt->set<uint32_t>(regs.fcrtl());
216 break;
217 case REG_FCRTH:
218 pkt->set<uint32_t>(regs.fcrth());
219 break;
220 case REG_RDBAL:
221 pkt->set<uint32_t>(regs.rdba.rdbal());
222 break;
223 case REG_RDBAH:
224 pkt->set<uint32_t>(regs.rdba.rdbah());
225 break;
226 case REG_RDLEN:
227 pkt->set<uint32_t>(regs.rdlen());
228 break;
229 case REG_RDH:
230 pkt->set<uint32_t>(regs.rdh());
231 break;
232 case REG_RDT:
233 pkt->set<uint32_t>(regs.rdt());
234 break;
235 case REG_RDTR:
236 pkt->set<uint32_t>(regs.rdtr());
237 if (regs.rdtr.fpd()) {
238 rxDescCache.writeback(0);
239 DPRINTF(EthernetIntr, "Posting interrupt because of RDTR.FPD write\n");
240 postInterrupt(IT_RXT);
241 regs.rdtr.fpd(0);
242 }
243 break;
244 case REG_RADV:
245 pkt->set<uint32_t>(regs.radv());
246 break;
247 case REG_TDBAL:
248 pkt->set<uint32_t>(regs.tdba.tdbal());
249 break;
250 case REG_TDBAH:
251 pkt->set<uint32_t>(regs.tdba.tdbah());
252 break;
253 case REG_TDLEN:
254 pkt->set<uint32_t>(regs.tdlen());
255 break;
256 case REG_TDH:
257 pkt->set<uint32_t>(regs.tdh());
258 break;
259 case REG_TDT:
260 pkt->set<uint32_t>(regs.tdt());
261 break;
262 case REG_TIDV:
263 pkt->set<uint32_t>(regs.tidv());
264 break;
265 case REG_TXDCTL:
266 pkt->set<uint32_t>(regs.txdctl());
267 break;
268 case REG_TADV:
269 pkt->set<uint32_t>(regs.tadv());
270 break;
271 case REG_RXCSUM:
272 pkt->set<uint32_t>(regs.rxcsum());
273 break;
274 case REG_MANC:
275 pkt->set<uint32_t>(regs.manc());
276 break;
277 default:
278 if (!(daddr >= REG_VFTA && daddr < (REG_VFTA + VLAN_FILTER_TABLE_SIZE*4)) &&
279 !(daddr >= REG_RAL && daddr < (REG_RAL + RCV_ADDRESS_TABLE_SIZE*8)) &&
280 !(daddr >= REG_MTA && daddr < (REG_MTA + MULTICAST_TABLE_SIZE*4)) &&
281 !(daddr >= REG_CRCERRS && daddr < (REG_CRCERRS + STATS_REGS_SIZE)))
282 panic("Read request to unknown register number: %#x\n", daddr);
283 else
284 pkt->set<uint32_t>(0);
285 };
286
287 pkt->makeAtomicResponse();
288 return pioDelay;
289 }
290
291 Tick
292 IGbE::write(PacketPtr pkt)
293 {
294 int bar;
295 Addr daddr;
296
297
298 if (!getBAR(pkt->getAddr(), bar, daddr))
299 panic("Invalid PCI memory access to unmapped memory.\n");
300
301 // Only Memory register BAR is allowed
302 assert(bar == 0);
303
304 // Only 32bit accesses allowed
305 assert(pkt->getSize() == sizeof(uint32_t));
306
307 DPRINTF(Ethernet, "Wrote device register %#X value %#X\n", daddr, pkt->get<uint32_t>());
308
309 ///
310 /// Handle write of register here
311 ///
312 uint32_t val = pkt->get<uint32_t>();
313
314 Regs::RCTL oldrctl;
315 Regs::TCTL oldtctl;
316
317 switch (daddr) {
318 case REG_CTRL:
319 regs.ctrl = val;
320 if (regs.ctrl.tfce())
321 warn("TX Flow control enabled, should implement\n");
322 if (regs.ctrl.rfce())
323 warn("RX Flow control enabled, should implement\n");
324 break;
325 case REG_CTRL_EXT:
326 regs.ctrl_ext = val;
327 break;
328 case REG_STATUS:
329 regs.sts = val;
330 break;
331 case REG_EECD:
332 int oldClk;
333 oldClk = regs.eecd.sk();
334 regs.eecd = val;
335 // See if this is a eeprom access and emulate accordingly
336 if (!oldClk && regs.eecd.sk()) {
337 if (eeOpBits < 8) {
338 eeOpcode = eeOpcode << 1 | regs.eecd.din();
339 eeOpBits++;
340 } else if (eeAddrBits < 8 && eeOpcode == EEPROM_READ_OPCODE_SPI) {
341 eeAddr = eeAddr << 1 | regs.eecd.din();
342 eeAddrBits++;
343 } else if (eeDataBits < 16 && eeOpcode == EEPROM_READ_OPCODE_SPI) {
344 assert(eeAddr>>1 < EEPROM_SIZE);
345 DPRINTF(EthernetEEPROM, "EEPROM bit read: %d word: %#X\n",
346 flash[eeAddr>>1] >> eeDataBits & 0x1, flash[eeAddr>>1]);
347 regs.eecd.dout((flash[eeAddr>>1] >> (15-eeDataBits)) & 0x1);
348 eeDataBits++;
349 } else if (eeDataBits < 8 && eeOpcode == EEPROM_RDSR_OPCODE_SPI) {
350 regs.eecd.dout(0);
351 eeDataBits++;
352 } else
353 panic("What's going on with eeprom interface? opcode:"
354 " %#x:%d addr: %#x:%d, data: %d\n", (uint32_t)eeOpcode,
355 (uint32_t)eeOpBits, (uint32_t)eeAddr,
356 (uint32_t)eeAddrBits, (uint32_t)eeDataBits);
357
358 // Reset everything for the next command
359 if ((eeDataBits == 16 && eeOpcode == EEPROM_READ_OPCODE_SPI) ||
360 (eeDataBits == 8 && eeOpcode == EEPROM_RDSR_OPCODE_SPI)) {
361 eeOpBits = 0;
362 eeAddrBits = 0;
363 eeDataBits = 0;
364 eeOpcode = 0;
365 eeAddr = 0;
366 }
367
368 DPRINTF(EthernetEEPROM, "EEPROM: opcode: %#X:%d addr: %#X:%d\n",
369 (uint32_t)eeOpcode, (uint32_t) eeOpBits,
370 (uint32_t)eeAddr>>1, (uint32_t)eeAddrBits);
371 if (eeOpBits == 8 && !(eeOpcode == EEPROM_READ_OPCODE_SPI ||
372 eeOpcode == EEPROM_RDSR_OPCODE_SPI ))
373 panic("Unknown eeprom opcode: %#X:%d\n", (uint32_t)eeOpcode,
374 (uint32_t)eeOpBits);
375
376
377 }
378 // If driver requests eeprom access, immediately give it to it
379 regs.eecd.ee_gnt(regs.eecd.ee_req());
380 break;
381 case REG_EERD:
382 regs.eerd = val;
383 break;
384 case REG_MDIC:
385 regs.mdic = val;
386 if (regs.mdic.i())
387 panic("No support for interrupt on mdic complete\n");
388 if (regs.mdic.phyadd() != 1)
389 panic("No support for reading anything but phy\n");
390 DPRINTF(Ethernet, "%s phy address %x\n", regs.mdic.op() == 1 ? "Writing"
391 : "Reading", regs.mdic.regadd());
392 switch (regs.mdic.regadd()) {
393 case PHY_PSTATUS:
394 regs.mdic.data(0x796D); // link up
395 break;
396 case PHY_PID:
397 regs.mdic.data(0x02A8);
398 break;
399 case PHY_EPID:
400 regs.mdic.data(0x0380);
401 break;
402 case PHY_GSTATUS:
403 regs.mdic.data(0x7C00);
404 break;
405 case PHY_EPSTATUS:
406 regs.mdic.data(0x3000);
407 break;
408 case PHY_AGC:
409 regs.mdic.data(0x180); // some random length
410 break;
411 default:
412 regs.mdic.data(0);
413 }
414 regs.mdic.r(1);
415 break;
416 case REG_ICR:
417 DPRINTF(Ethernet, "Writing ICR. ICR=%#x IMR=%#x IAM=%#x IAME=%d\n", regs.icr(),
418 regs.imr, regs.iam, regs.ctrl_ext.iame());
419 if (regs.ctrl_ext.iame())
420 regs.imr &= ~regs.iam;
421 regs.icr = ~bits(val,30,0) & regs.icr();
422 chkInterrupt();
423 break;
424 case REG_ITR:
425 regs.itr = val;
426 break;
427 case REG_ICS:
428 DPRINTF(EthernetIntr, "Posting interrupt because of ICS write\n");
429 postInterrupt((IntTypes)val);
430 break;
431 case REG_IMS:
432 regs.imr |= val;
433 chkInterrupt();
434 break;
435 case REG_IMC:
436 regs.imr &= ~val;
437 chkInterrupt();
438 break;
439 case REG_IAM:
440 regs.iam = val;
441 break;
442 case REG_RCTL:
443 oldrctl = regs.rctl;
444 regs.rctl = val;
445 if (regs.rctl.rst()) {
446 rxDescCache.reset();
447 DPRINTF(EthernetSM, "RXS: Got RESET!\n");
448 rxFifo.clear();
449 regs.rctl.rst(0);
450 }
451 if (regs.rctl.en())
452 rxTick = true;
453 restartClock();
454 break;
455 case REG_FCTTV:
456 regs.fcttv = val;
457 break;
458 case REG_TCTL:
459 regs.tctl = val;
460 oldtctl = regs.tctl;
461 regs.tctl = val;
462 if (regs.tctl.en())
463 txTick = true;
464 restartClock();
465 if (regs.tctl.en() && !oldtctl.en()) {
466 txDescCache.reset();
467 }
468 break;
469 case REG_PBA:
470 regs.pba.rxa(val);
471 regs.pba.txa(64 - regs.pba.rxa());
472 break;
473 case REG_WUC:
474 case REG_LEDCTL:
475 case REG_FCAL:
476 case REG_FCAH:
477 case REG_FCT:
478 case REG_VET:
479 case REG_AIFS:
480 case REG_TIPG:
481 ; // We don't care, so don't store anything
482 break;
483 case REG_FCRTL:
484 regs.fcrtl = val;
485 break;
486 case REG_FCRTH:
487 regs.fcrth = val;
488 break;
489 case REG_RDBAL:
490 regs.rdba.rdbal( val & ~mask(4));
491 rxDescCache.areaChanged();
492 break;
493 case REG_RDBAH:
494 regs.rdba.rdbah(val);
495 rxDescCache.areaChanged();
496 break;
497 case REG_RDLEN:
498 regs.rdlen = val & ~mask(7);
499 rxDescCache.areaChanged();
500 break;
501 case REG_RDH:
502 regs.rdh = val;
503 rxDescCache.areaChanged();
504 break;
505 case REG_RDT:
506 regs.rdt = val;
507 DPRINTF(EthernetSM, "RXS: RDT Updated.\n");
508 if (getState() == SimObject::Running) {
509 DPRINTF(EthernetSM, "RXS: RDT Fetching Descriptors!\n");
510 rxDescCache.fetchDescriptors();
511 } else {
512 DPRINTF(EthernetSM, "RXS: RDT NOT Fetching Desc b/c draining!\n");
513 }
514 break;
515 case REG_RDTR:
516 regs.rdtr = val;
517 break;
518 case REG_RADV:
519 regs.radv = val;
520 break;
521 case REG_TDBAL:
522 regs.tdba.tdbal( val & ~mask(4));
523 txDescCache.areaChanged();
524 break;
525 case REG_TDBAH:
526 regs.tdba.tdbah(val);
527 txDescCache.areaChanged();
528 break;
529 case REG_TDLEN:
530 regs.tdlen = val & ~mask(7);
531 txDescCache.areaChanged();
532 break;
533 case REG_TDH:
534 regs.tdh = val;
535 txDescCache.areaChanged();
536 break;
537 case REG_TDT:
538 regs.tdt = val;
539 DPRINTF(EthernetSM, "TXS: TX Tail pointer updated\n");
540 if (getState() == SimObject::Running) {
541 DPRINTF(EthernetSM, "TXS: TDT Fetching Descriptors!\n");
542 txDescCache.fetchDescriptors();
543 } else {
544 DPRINTF(EthernetSM, "TXS: TDT NOT Fetching Desc b/c draining!\n");
545 }
546 break;
547 case REG_TIDV:
548 regs.tidv = val;
549 break;
550 case REG_TXDCTL:
551 regs.txdctl = val;
552 break;
553 case REG_TADV:
554 regs.tadv = val;
555 break;
556 case REG_RXCSUM:
557 regs.rxcsum = val;
558 break;
559 case REG_MANC:
560 regs.manc = val;
561 break;
562 default:
563 if (!(daddr >= REG_VFTA && daddr < (REG_VFTA + VLAN_FILTER_TABLE_SIZE*4)) &&
564 !(daddr >= REG_RAL && daddr < (REG_RAL + RCV_ADDRESS_TABLE_SIZE*8)) &&
565 !(daddr >= REG_MTA && daddr < (REG_MTA + MULTICAST_TABLE_SIZE*4)))
566 panic("Write request to unknown register number: %#x\n", daddr);
567 };
568
569 pkt->makeAtomicResponse();
570 return pioDelay;
571 }
572
573 void
574 IGbE::postInterrupt(IntTypes t, bool now)
575 {
576 assert(t);
577
578 // Interrupt is already pending
579 if (t & regs.icr() && !now)
580 return;
581
582 regs.icr = regs.icr() | t;
583 if (regs.itr.interval() == 0 || now) {
584 if (interEvent.scheduled()) {
585 interEvent.deschedule();
586 }
587 cpuPostInt();
588 } else {
589 DPRINTF(EthernetIntr, "EINT: Scheduling timer interrupt for %d ticks\n",
590 Clock::Int::ns * 256 * regs.itr.interval());
591 if (!interEvent.scheduled()) {
592 interEvent.schedule(curTick + Clock::Int::ns * 256 * regs.itr.interval());
593 }
594 }
595 }
596
597 void
598 IGbE::delayIntEvent()
599 {
600 cpuPostInt();
601 }
602
603
604 void
605 IGbE::cpuPostInt()
606 {
607
608 if (!(regs.icr() & regs.imr)) {
609 DPRINTF(Ethernet, "Interrupt Masked. Not Posting\n");
610 return;
611 }
612
613 DPRINTF(Ethernet, "Posting Interrupt\n");
614
615
616 if (interEvent.scheduled()) {
617 interEvent.deschedule();
618 }
619
620 if (rdtrEvent.scheduled()) {
621 regs.icr.rxt0(1);
622 rdtrEvent.deschedule();
623 }
624 if (radvEvent.scheduled()) {
625 regs.icr.rxt0(1);
626 radvEvent.deschedule();
627 }
628 if (tadvEvent.scheduled()) {
629 regs.icr.txdw(1);
630 tadvEvent.deschedule();
631 }
632 if (tidvEvent.scheduled()) {
633 regs.icr.txdw(1);
634 tidvEvent.deschedule();
635 }
636
637 regs.icr.int_assert(1);
638 DPRINTF(EthernetIntr, "EINT: Posting interrupt to CPU now. Vector %#x\n",
639 regs.icr());
640
641 intrPost();
642
643 }
644
645 void
646 IGbE::cpuClearInt()
647 {
648 if (regs.icr.int_assert()) {
649 regs.icr.int_assert(0);
650 DPRINTF(EthernetIntr, "EINT: Clearing interrupt to CPU now. Vector %#x\n",
651 regs.icr());
652 intrClear();
653 }
654 }
655
656 void
657 IGbE::chkInterrupt()
658 {
659 DPRINTF(Ethernet, "Checking interrupts icr: %#x imr: %#x\n", regs.icr(),
660 regs.imr);
661 // Check if we need to clear the cpu interrupt
662 if (!(regs.icr() & regs.imr)) {
663 DPRINTF(Ethernet, "Mask cleaned all interrupts\n");
664 if (interEvent.scheduled())
665 interEvent.deschedule();
666 if (regs.icr.int_assert())
667 cpuClearInt();
668 }
669 DPRINTF(Ethernet, "ITR = %#X itr.interval = %#X\n", regs.itr(), regs.itr.interval());
670
671 if (regs.icr() & regs.imr) {
672 if (regs.itr.interval() == 0) {
673 cpuPostInt();
674 } else {
675 DPRINTF(Ethernet, "Possibly scheduling interrupt because of imr write\n");
676 if (!interEvent.scheduled()) {
677 DPRINTF(Ethernet, "Scheduling for %d\n", curTick + Clock::Int::ns
678 * 256 * regs.itr.interval());
679 interEvent.schedule(curTick + Clock::Int::ns * 256 * regs.itr.interval());
680 }
681 }
682 }
683
684
685 }
686
687
688 IGbE::RxDescCache::RxDescCache(IGbE *i, const std::string n, int s)
689 : DescCache<RxDesc>(i, n, s), pktDone(false), pktEvent(this)
690
691 {
692 }
693
694 bool
695 IGbE::RxDescCache::writePacket(EthPacketPtr packet)
696 {
697 // We shouldn't have to deal with any of these yet
698 DPRINTF(EthernetDesc, "Packet Length: %d Desc Size: %d\n",
699 packet->length, igbe->regs.rctl.descSize());
700 assert(packet->length < igbe->regs.rctl.descSize());
701
702 if (!unusedCache.size())
703 return false;
704
705 pktPtr = packet;
706 pktDone = false;
707 igbe->dmaWrite(igbe->platform->pciToDma(unusedCache.front()->buf),
708 packet->length, &pktEvent, packet->data);
709 return true;
710 }
711
712 void
713 IGbE::RxDescCache::pktComplete()
714 {
715 assert(unusedCache.size());
716 RxDesc *desc;
717 desc = unusedCache.front();
718
719
720 uint16_t crcfixup = igbe->regs.rctl.secrc() ? 0 : 4 ;
721 desc->len = htole((uint16_t)(pktPtr->length + crcfixup));
722 DPRINTF(EthernetDesc, "pktPtr->length: %d stripcrc offset: %d value written: %d %d\n",
723 pktPtr->length, crcfixup,
724 htole((uint16_t)(pktPtr->length + crcfixup)),
725 (uint16_t)(pktPtr->length + crcfixup));
726
727 // no support for anything but starting at 0
728 assert(igbe->regs.rxcsum.pcss() == 0);
729
730 DPRINTF(EthernetDesc, "Packet written to memory updating Descriptor\n");
731
732 uint8_t status = RXDS_DD | RXDS_EOP;
733 uint8_t err = 0;
734
735 IpPtr ip(pktPtr);
736
737 if (ip) {
738 DPRINTF(EthernetDesc, "Proccesing Ip packet with Id=%d\n", ip->id());
739
740 if (igbe->regs.rxcsum.ipofld()) {
741 DPRINTF(EthernetDesc, "Checking IP checksum\n");
742 status |= RXDS_IPCS;
743 desc->csum = htole(cksum(ip));
744 if (cksum(ip) != 0) {
745 err |= RXDE_IPE;
746 DPRINTF(EthernetDesc, "Checksum is bad!!\n");
747 }
748 }
749 TcpPtr tcp(ip);
750 if (tcp && igbe->regs.rxcsum.tuofld()) {
751 DPRINTF(EthernetDesc, "Checking TCP checksum\n");
752 status |= RXDS_TCPCS;
753 desc->csum = htole(cksum(tcp));
754 if (cksum(tcp) != 0) {
755 DPRINTF(EthernetDesc, "Checksum is bad!!\n");
756 err |= RXDE_TCPE;
757 }
758 }
759
760 UdpPtr udp(ip);
761 if (udp && igbe->regs.rxcsum.tuofld()) {
762 DPRINTF(EthernetDesc, "Checking UDP checksum\n");
763 status |= RXDS_UDPCS;
764 desc->csum = htole(cksum(udp));
765 if (cksum(udp) != 0) {
766 DPRINTF(EthernetDesc, "Checksum is bad!!\n");
767 err |= RXDE_TCPE;
768 }
769 }
770 } else { // if ip
771 DPRINTF(EthernetSM, "Proccesing Non-Ip packet\n");
772 }
773
774
775 desc->status = htole(status);
776 desc->errors = htole(err);
777
778 // No vlan support at this point... just set it to 0
779 desc->vlan = 0;
780
781 // Deal with the rx timer interrupts
782 if (igbe->regs.rdtr.delay()) {
783 DPRINTF(EthernetSM, "RXS: Scheduling DTR for %d\n",
784 igbe->regs.rdtr.delay() * igbe->intClock());
785 igbe->rdtrEvent.reschedule(curTick + igbe->regs.rdtr.delay() *
786 igbe->intClock(),true);
787 }
788
789 if (igbe->regs.radv.idv() && igbe->regs.rdtr.delay()) {
790 DPRINTF(EthernetSM, "RXS: Scheduling ADV for %d\n",
791 igbe->regs.radv.idv() * igbe->intClock());
792 if (!igbe->radvEvent.scheduled()) {
793 igbe->radvEvent.schedule(curTick + igbe->regs.radv.idv() *
794 igbe->intClock());
795 }
796 }
797
798 // if neither radv or rdtr, maybe itr is set...
799 if (!igbe->regs.rdtr.delay()) {
800 DPRINTF(EthernetSM, "RXS: Receive interrupt delay disabled, posting IT_RXT\n");
801 igbe->postInterrupt(IT_RXT);
802 }
803
804 // If the packet is small enough, interrupt appropriately
805 // I wonder if this is delayed or not?!
806 if (pktPtr->length <= igbe->regs.rsrpd.idv()) {
807 DPRINTF(EthernetSM, "RXS: Posting IT_SRPD beacuse small packet received\n");
808 igbe->postInterrupt(IT_SRPD);
809 }
810
811 DPRINTF(EthernetDesc, "Processing of this descriptor complete\n");
812 unusedCache.pop_front();
813 usedCache.push_back(desc);
814
815
816 pktPtr = NULL;
817 enableSm();
818 pktDone = true;
819 igbe->checkDrain();
820
821 }
822
823 void
824 IGbE::RxDescCache::enableSm()
825 {
826 igbe->rxTick = true;
827 igbe->restartClock();
828 }
829
830 bool
831 IGbE::RxDescCache::packetDone()
832 {
833 if (pktDone) {
834 pktDone = false;
835 return true;
836 }
837 return false;
838 }
839
840 bool
841 IGbE::RxDescCache::hasOutstandingEvents()
842 {
843 return pktEvent.scheduled() || wbEvent.scheduled() ||
844 fetchEvent.scheduled();
845 }
846
847 void
848 IGbE::RxDescCache::serialize(std::ostream &os)
849 {
850 DescCache<RxDesc>::serialize(os);
851 SERIALIZE_SCALAR(pktDone);
852 }
853
854 void
855 IGbE::RxDescCache::unserialize(Checkpoint *cp, const std::string &section)
856 {
857 DescCache<RxDesc>::unserialize(cp, section);
858 UNSERIALIZE_SCALAR(pktDone);
859 }
860
861
862 ///////////////////////////////////// IGbE::TxDesc /////////////////////////////////
863
864 IGbE::TxDescCache::TxDescCache(IGbE *i, const std::string n, int s)
865 : DescCache<TxDesc>(i,n, s), pktDone(false), isTcp(false), pktWaiting(false),
866 pktEvent(this)
867
868 {
869 }
870
871 int
872 IGbE::TxDescCache::getPacketSize()
873 {
874 assert(unusedCache.size());
875
876 TxDesc *desc;
877
878 DPRINTF(EthernetDesc, "Starting processing of descriptor\n");
879
880 while (unusedCache.size() && TxdOp::isContext(unusedCache.front())) {
881 DPRINTF(EthernetDesc, "Got context descriptor type... skipping\n");
882
883 // I think we can just ignore these for now?
884 desc = unusedCache.front();
885 DPRINTF(EthernetDesc, "Descriptor upper: %#x lower: %#X\n", desc->d1,
886 desc->d2);
887 // is this going to be a tcp or udp packet?
888 isTcp = TxdOp::tcp(desc) ? true : false;
889
890 // make sure it's ipv4
891 //assert(TxdOp::ip(desc));
892
893 TxdOp::setDd(desc);
894 unusedCache.pop_front();
895 usedCache.push_back(desc);
896 }
897
898 if (!unusedCache.size())
899 return -1;
900
901 DPRINTF(EthernetDesc, "Next TX packet is %d bytes\n",
902 TxdOp::getLen(unusedCache.front()));
903
904 return TxdOp::getLen(unusedCache.front());
905 }
906
907 void
908 IGbE::TxDescCache::getPacketData(EthPacketPtr p)
909 {
910 assert(unusedCache.size());
911
912 TxDesc *desc;
913 desc = unusedCache.front();
914
915 assert((TxdOp::isLegacy(desc) || TxdOp::isData(desc)) && TxdOp::getLen(desc));
916
917 pktPtr = p;
918
919 pktWaiting = true;
920
921 DPRINTF(EthernetDesc, "Starting DMA of packet\n");
922 igbe->dmaRead(igbe->platform->pciToDma(TxdOp::getBuf(desc)),
923 TxdOp::getLen(desc), &pktEvent, p->data + p->length);
924
925
926 }
927
928 void
929 IGbE::TxDescCache::pktComplete()
930 {
931
932 TxDesc *desc;
933 assert(unusedCache.size());
934 assert(pktPtr);
935
936 DPRINTF(EthernetDesc, "DMA of packet complete\n");
937
938 desc = unusedCache.front();
939 assert((TxdOp::isLegacy(desc) || TxdOp::isData(desc)) && TxdOp::getLen(desc));
940
941 DPRINTF(EthernetDesc, "TxDescriptor data d1: %#llx d2: %#llx\n", desc->d1, desc->d2);
942
943 if (!TxdOp::eop(desc)) {
944 // This only supports two descriptors per tx packet
945 assert(pktPtr->length == 0);
946 pktPtr->length = TxdOp::getLen(desc);
947 unusedCache.pop_front();
948 usedCache.push_back(desc);
949 pktDone = true;
950 pktWaiting = false;
951 pktPtr = NULL;
952
953 DPRINTF(EthernetDesc, "Partial Packet Descriptor Done\n");
954 enableSm();
955 return;
956 }
957
958 // Set the length of the data in the EtherPacket
959 pktPtr->length += TxdOp::getLen(desc);
960
961 // no support for vlans
962 assert(!TxdOp::vle(desc));
963
964 // we alway report status
965 assert(TxdOp::rs(desc));
966
967 // we only support single packet descriptors at this point
968 assert(TxdOp::eop(desc));
969
970 // set that this packet is done
971 TxdOp::setDd(desc);
972
973 DPRINTF(EthernetDesc, "TxDescriptor data d1: %#llx d2: %#llx\n", desc->d1, desc->d2);
974
975 if (DTRACE(EthernetDesc)) {
976 IpPtr ip(pktPtr);
977 if (ip)
978 DPRINTF(EthernetDesc, "Proccesing Ip packet with Id=%d\n",
979 ip->id());
980 else
981 DPRINTF(EthernetSM, "Proccesing Non-Ip packet\n");
982 }
983
984 // Checksums are only ofloaded for new descriptor types
985 if (TxdOp::isData(desc) && ( TxdOp::ixsm(desc) || TxdOp::txsm(desc)) ) {
986 DPRINTF(EthernetDesc, "Calculating checksums for packet\n");
987 IpPtr ip(pktPtr);
988
989 if (TxdOp::ixsm(desc)) {
990 ip->sum(0);
991 ip->sum(cksum(ip));
992 DPRINTF(EthernetDesc, "Calculated IP checksum\n");
993 }
994 if (TxdOp::txsm(desc)) {
995 TcpPtr tcp(ip);
996 UdpPtr udp(ip);
997 if (tcp) {
998 tcp->sum(0);
999 tcp->sum(cksum(tcp));
1000 DPRINTF(EthernetDesc, "Calculated TCP checksum\n");
1001 } else if (udp) {
1002 assert(udp);
1003 udp->sum(0);
1004 udp->sum(cksum(udp));
1005 DPRINTF(EthernetDesc, "Calculated UDP checksum\n");
1006 } else {
1007 panic("Told to checksum, but don't know how\n");
1008 }
1009 }
1010 }
1011
1012 if (TxdOp::ide(desc)) {
1013 // Deal with the rx timer interrupts
1014 DPRINTF(EthernetDesc, "Descriptor had IDE set\n");
1015 if (igbe->regs.tidv.idv()) {
1016 DPRINTF(EthernetDesc, "setting tidv\n");
1017 igbe->tidvEvent.reschedule(curTick + igbe->regs.tidv.idv() *
1018 igbe->intClock(), true);
1019 }
1020
1021 if (igbe->regs.tadv.idv() && igbe->regs.tidv.idv()) {
1022 DPRINTF(EthernetDesc, "setting tadv\n");
1023 if (!igbe->tadvEvent.scheduled()) {
1024 igbe->tadvEvent.schedule(curTick + igbe->regs.tadv.idv() *
1025 igbe->intClock());
1026 }
1027 }
1028 }
1029
1030
1031
1032 unusedCache.pop_front();
1033 usedCache.push_back(desc);
1034 pktDone = true;
1035 pktWaiting = false;
1036 pktPtr = NULL;
1037
1038 DPRINTF(EthernetDesc, "Descriptor Done\n");
1039
1040 if (igbe->regs.txdctl.wthresh() == 0) {
1041 DPRINTF(EthernetDesc, "WTHRESH == 0, writing back descriptor\n");
1042 writeback(0);
1043 } else if (igbe->regs.txdctl.wthresh() >= usedCache.size()) {
1044 DPRINTF(EthernetDesc, "used > WTHRESH, writing back descriptor\n");
1045 writeback((igbe->cacheBlockSize()-1)>>4);
1046 }
1047 enableSm();
1048 igbe->checkDrain();
1049 }
1050
1051 void
1052 IGbE::TxDescCache::serialize(std::ostream &os)
1053 {
1054 DescCache<TxDesc>::serialize(os);
1055 SERIALIZE_SCALAR(pktDone);
1056 SERIALIZE_SCALAR(isTcp);
1057 SERIALIZE_SCALAR(pktWaiting);
1058 }
1059
1060 void
1061 IGbE::TxDescCache::unserialize(Checkpoint *cp, const std::string &section)
1062 {
1063 DescCache<TxDesc>::unserialize(cp, section);
1064 UNSERIALIZE_SCALAR(pktDone);
1065 UNSERIALIZE_SCALAR(isTcp);
1066 UNSERIALIZE_SCALAR(pktWaiting);
1067 }
1068
1069 bool
1070 IGbE::TxDescCache::packetAvailable()
1071 {
1072 if (pktDone) {
1073 pktDone = false;
1074 return true;
1075 }
1076 return false;
1077 }
1078
1079 void
1080 IGbE::TxDescCache::enableSm()
1081 {
1082 igbe->txTick = true;
1083 igbe->restartClock();
1084 }
1085
1086 bool
1087 IGbE::TxDescCache::hasOutstandingEvents()
1088 {
1089 return pktEvent.scheduled() || wbEvent.scheduled() ||
1090 fetchEvent.scheduled();
1091 }
1092
1093
1094 ///////////////////////////////////// IGbE /////////////////////////////////
1095
1096 void
1097 IGbE::restartClock()
1098 {
1099 if (!tickEvent.scheduled() && (rxTick || txTick || txFifoTick) && getState() ==
1100 SimObject::Running)
1101 tickEvent.schedule((curTick/cycles(1)) * cycles(1) + cycles(1));
1102 }
1103
1104 unsigned int
1105 IGbE::drain(Event *de)
1106 {
1107 unsigned int count;
1108 count = pioPort->drain(de) + dmaPort->drain(de);
1109 if (rxDescCache.hasOutstandingEvents() ||
1110 txDescCache.hasOutstandingEvents()) {
1111 count++;
1112 drainEvent = de;
1113 }
1114
1115 txFifoTick = false;
1116 txTick = false;
1117 rxTick = false;
1118
1119 if (tickEvent.scheduled())
1120 tickEvent.deschedule();
1121
1122 if (count)
1123 changeState(Draining);
1124 else
1125 changeState(Drained);
1126
1127 return count;
1128 }
1129
1130 void
1131 IGbE::resume()
1132 {
1133 SimObject::resume();
1134
1135 txFifoTick = true;
1136 txTick = true;
1137 rxTick = true;
1138
1139 restartClock();
1140 }
1141
1142 void
1143 IGbE::checkDrain()
1144 {
1145 if (!drainEvent)
1146 return;
1147
1148 txFifoTick = false;
1149 txTick = false;
1150 rxTick = false;
1151 if (!rxDescCache.hasOutstandingEvents() &&
1152 !txDescCache.hasOutstandingEvents()) {
1153 drainEvent->process();
1154 drainEvent = NULL;
1155 }
1156 }
1157
1158 void
1159 IGbE::txStateMachine()
1160 {
1161 if (!regs.tctl.en()) {
1162 txTick = false;
1163 DPRINTF(EthernetSM, "TXS: TX disabled, stopping ticking\n");
1164 return;
1165 }
1166
1167 // If we have a packet available and it's length is not 0 (meaning it's not
1168 // a multidescriptor packet) put it in the fifo, otherwise an the next
1169 // iteration we'll get the rest of the data
1170 if (txPacket && txDescCache.packetAvailable() && txPacket->length) {
1171 bool success;
1172
1173 DPRINTF(EthernetSM, "TXS: packet placed in TX FIFO\n");
1174 success = txFifo.push(txPacket);
1175 txFifoTick = true;
1176 assert(success);
1177 txPacket = NULL;
1178 txDescCache.writeback((cacheBlockSize()-1)>>4);
1179 return;
1180 }
1181
1182 // Only support descriptor granularity
1183 assert(regs.txdctl.gran());
1184 if (regs.txdctl.lwthresh() && txDescCache.descLeft() < (regs.txdctl.lwthresh() * 8)) {
1185 DPRINTF(EthernetSM, "TXS: LWTHRESH caused posting of TXDLOW\n");
1186 postInterrupt(IT_TXDLOW);
1187 }
1188
1189 if (!txPacket) {
1190 txPacket = new EthPacketData(16384);
1191 }
1192
1193 if (!txDescCache.packetWaiting()) {
1194 if (txDescCache.descLeft() == 0) {
1195 postInterrupt(IT_TXQE);
1196 txDescCache.writeback(0);
1197 DPRINTF(EthernetSM, "TXS: No descriptors left in ring, forcing "
1198 "writeback stopping ticking and posting TXQE\n");
1199 txDescCache.fetchDescriptors();
1200 txTick = false;
1201 return;
1202 }
1203
1204
1205 if (!(txDescCache.descUnused())) {
1206 DPRINTF(EthernetSM, "TXS: No descriptors available in cache, fetching and stopping ticking\n");
1207 txTick = false;
1208 txDescCache.fetchDescriptors();
1209 return;
1210 }
1211
1212 int size;
1213 size = txDescCache.getPacketSize();
1214 if (size > 0 && txFifo.avail() > size) {
1215 DPRINTF(EthernetSM, "TXS: Reserving %d bytes in FIFO and begining "
1216 "DMA of next packet\n", size);
1217 txFifo.reserve(size);
1218 txDescCache.getPacketData(txPacket);
1219 } else if (size <= 0) {
1220 DPRINTF(EthernetSM, "TXS: getPacketSize returned: %d\n", size);
1221 DPRINTF(EthernetSM, "TXS: No packets to get, writing back used descriptors\n");
1222 txDescCache.writeback(0);
1223 } else {
1224 txDescCache.writeback((cacheBlockSize()-1)>>4);
1225 DPRINTF(EthernetSM, "TXS: FIFO full, stopping ticking until space "
1226 "available in FIFO\n");
1227 txTick = false;
1228 }
1229
1230
1231 return;
1232 }
1233 DPRINTF(EthernetSM, "TXS: Nothing to do, stopping ticking\n");
1234 txTick = false;
1235 }
1236
1237 bool
1238 IGbE::ethRxPkt(EthPacketPtr pkt)
1239 {
1240 DPRINTF(Ethernet, "RxFIFO: Receiving pcakte from wire\n");
1241
1242 if (!regs.rctl.en()) {
1243 DPRINTF(Ethernet, "RxFIFO: RX not enabled, dropping\n");
1244 return true;
1245 }
1246
1247 // restart the state machines if they are stopped
1248 rxTick = true;
1249 if ((rxTick || txTick) && !tickEvent.scheduled()) {
1250 DPRINTF(EthernetSM, "RXS: received packet into fifo, starting ticking\n");
1251 restartClock();
1252 }
1253
1254 if (!rxFifo.push(pkt)) {
1255 DPRINTF(Ethernet, "RxFIFO: Packet won't fit in fifo... dropped\n");
1256 postInterrupt(IT_RXO, true);
1257 return false;
1258 }
1259 return true;
1260 }
1261
1262
1263 void
1264 IGbE::rxStateMachine()
1265 {
1266 if (!regs.rctl.en()) {
1267 rxTick = false;
1268 DPRINTF(EthernetSM, "RXS: RX disabled, stopping ticking\n");
1269 return;
1270 }
1271
1272 // If the packet is done check for interrupts/descriptors/etc
1273 if (rxDescCache.packetDone()) {
1274 rxDmaPacket = false;
1275 DPRINTF(EthernetSM, "RXS: Packet completed DMA to memory\n");
1276 int descLeft = rxDescCache.descLeft();
1277 switch (regs.rctl.rdmts()) {
1278 case 2: if (descLeft > .125 * regs.rdlen()) break;
1279 case 1: if (descLeft > .250 * regs.rdlen()) break;
1280 case 0: if (descLeft > .500 * regs.rdlen()) break;
1281 DPRINTF(Ethernet, "RXS: Interrupting (RXDMT) because of descriptors left\n");
1282 postInterrupt(IT_RXDMT);
1283 break;
1284 }
1285
1286 if (descLeft == 0) {
1287 rxDescCache.writeback(0);
1288 rxTick = false;
1289 }
1290
1291 // only support descriptor granulaties
1292 assert(regs.rxdctl.gran());
1293
1294 if (regs.rxdctl.wthresh() >= rxDescCache.descUsed()) {
1295 DPRINTF(EthernetSM, "RXS: Writing back because WTHRESH >= descUsed\n");
1296 if (regs.rxdctl.wthresh() < (cacheBlockSize()>>4))
1297 rxDescCache.writeback(regs.rxdctl.wthresh()-1);
1298 else
1299 rxDescCache.writeback((cacheBlockSize()-1)>>4);
1300 }
1301
1302 if ((rxDescCache.descUnused() < regs.rxdctl.pthresh()) &&
1303 ((rxDescCache.descLeft() - rxDescCache.descUnused()) > regs.rxdctl.hthresh())) {
1304 DPRINTF(EthernetSM, "RXS: Fetching descriptors because descUnused < PTHRESH\n");
1305 rxDescCache.fetchDescriptors();
1306 }
1307
1308 if (rxDescCache.descUnused() == 0) {
1309 DPRINTF(EthernetSM, "RXS: No descriptors available in cache, "
1310 "fetching descriptors and stopping ticking\n");
1311 rxTick = false;
1312 rxDescCache.fetchDescriptors();
1313 }
1314 return;
1315 }
1316
1317 if (rxDmaPacket) {
1318 DPRINTF(EthernetSM, "RXS: stopping ticking until packet DMA completes\n");
1319 rxTick = false;
1320 return;
1321 }
1322
1323 if (!rxDescCache.descUnused()) {
1324 DPRINTF(EthernetSM, "RXS: No descriptors available in cache, stopping ticking\n");
1325 rxTick = false;
1326 DPRINTF(EthernetSM, "RXS: No descriptors available, fetching\n");
1327 rxDescCache.fetchDescriptors();
1328 return;
1329 }
1330
1331 if (rxFifo.empty()) {
1332 DPRINTF(EthernetSM, "RXS: RxFIFO empty, stopping ticking\n");
1333 rxTick = false;
1334 return;
1335 }
1336
1337 EthPacketPtr pkt;
1338 pkt = rxFifo.front();
1339
1340 DPRINTF(EthernetSM, "RXS: Writing packet into memory\n");
1341 if (!rxDescCache.writePacket(pkt)) {
1342 return;
1343 }
1344
1345 DPRINTF(EthernetSM, "RXS: Removing packet from FIFO\n");
1346 rxFifo.pop();
1347 DPRINTF(EthernetSM, "RXS: stopping ticking until packet DMA completes\n");
1348 rxTick = false;
1349 rxDmaPacket = true;
1350 }
1351
1352 void
1353 IGbE::txWire()
1354 {
1355 if (txFifo.empty()) {
1356 txFifoTick = false;
1357 return;
1358 }
1359
1360 if (etherInt->askBusy()) {
1361 // We'll get woken up when the packet ethTxDone() gets called
1362 txFifoTick = false;
1363 } else {
1364 if (DTRACE(EthernetSM)) {
1365 IpPtr ip(txFifo.front());
1366 if (ip)
1367 DPRINTF(EthernetSM, "Transmitting Ip packet with Id=%d\n",
1368 ip->id());
1369 else
1370 DPRINTF(EthernetSM, "Transmitting Non-Ip packet\n");
1371 }
1372
1373 bool r = etherInt->sendPacket(txFifo.front());
1374 assert(r);
1375 r += 1;
1376 DPRINTF(EthernetSM, "TxFIFO: Successful transmit, bytes available in fifo: %d\n",
1377 txFifo.avail());
1378 txFifo.pop();
1379 }
1380 }
1381
1382 void
1383 IGbE::tick()
1384 {
1385 DPRINTF(EthernetSM, "IGbE: -------------- Cycle --------------\n");
1386
1387 if (rxTick)
1388 rxStateMachine();
1389
1390 if (txTick)
1391 txStateMachine();
1392
1393 if (txFifoTick)
1394 txWire();
1395
1396
1397 if (rxTick || txTick || txFifoTick)
1398 tickEvent.schedule(curTick + cycles(1));
1399 }
1400
1401 void
1402 IGbE::ethTxDone()
1403 {
1404 // restart the tx state machines if they are stopped
1405 // fifo to send another packet
1406 // tx sm to put more data into the fifo
1407 txFifoTick = true;
1408 if (txDescCache.descLeft() != 0)
1409 txTick = true;
1410
1411 restartClock();
1412 DPRINTF(EthernetSM, "TxFIFO: Transmission complete\n");
1413 }
1414
1415 void
1416 IGbE::serialize(std::ostream &os)
1417 {
1418 PciDev::serialize(os);
1419
1420 regs.serialize(os);
1421 SERIALIZE_SCALAR(eeOpBits);
1422 SERIALIZE_SCALAR(eeAddrBits);
1423 SERIALIZE_SCALAR(eeDataBits);
1424 SERIALIZE_SCALAR(eeOpcode);
1425 SERIALIZE_SCALAR(eeAddr);
1426 SERIALIZE_ARRAY(flash,iGbReg::EEPROM_SIZE);
1427
1428 rxFifo.serialize("rxfifo", os);
1429 txFifo.serialize("txfifo", os);
1430
1431 bool txPktExists = txPacket;
1432 SERIALIZE_SCALAR(txPktExists);
1433 if (txPktExists)
1434 txPacket->serialize("txpacket", os);
1435
1436 Tick rdtr_time = 0, radv_time = 0, tidv_time = 0, tadv_time = 0,
1437 inter_time = 0;
1438
1439 if (rdtrEvent.scheduled())
1440 rdtr_time = rdtrEvent.when();
1441 SERIALIZE_SCALAR(rdtr_time);
1442
1443 if (radvEvent.scheduled())
1444 radv_time = radvEvent.when();
1445 SERIALIZE_SCALAR(radv_time);
1446
1447 if (tidvEvent.scheduled())
1448 tidv_time = tidvEvent.when();
1449 SERIALIZE_SCALAR(tidv_time);
1450
1451 if (tadvEvent.scheduled())
1452 tadv_time = tadvEvent.when();
1453 SERIALIZE_SCALAR(tadv_time);
1454
1455 if (interEvent.scheduled())
1456 inter_time = interEvent.when();
1457 SERIALIZE_SCALAR(inter_time);
1458
1459 nameOut(os, csprintf("%s.TxDescCache", name()));
1460 txDescCache.serialize(os);
1461
1462 nameOut(os, csprintf("%s.RxDescCache", name()));
1463 rxDescCache.serialize(os);
1464 }
1465
1466 void
1467 IGbE::unserialize(Checkpoint *cp, const std::string &section)
1468 {
1469 PciDev::unserialize(cp, section);
1470
1471 regs.unserialize(cp, section);
1472 UNSERIALIZE_SCALAR(eeOpBits);
1473 UNSERIALIZE_SCALAR(eeAddrBits);
1474 UNSERIALIZE_SCALAR(eeDataBits);
1475 UNSERIALIZE_SCALAR(eeOpcode);
1476 UNSERIALIZE_SCALAR(eeAddr);
1477 UNSERIALIZE_ARRAY(flash,iGbReg::EEPROM_SIZE);
1478
1479 rxFifo.unserialize("rxfifo", cp, section);
1480 txFifo.unserialize("txfifo", cp, section);
1481
1482 bool txPktExists;
1483 UNSERIALIZE_SCALAR(txPktExists);
1484 if (txPktExists) {
1485 txPacket = new EthPacketData(16384);
1486 txPacket->unserialize("txpacket", cp, section);
1487 }
1488
1489 rxTick = true;
1490 txTick = true;
1491 txFifoTick = true;
1492
1493 Tick rdtr_time, radv_time, tidv_time, tadv_time, inter_time;
1494 UNSERIALIZE_SCALAR(rdtr_time);
1495 UNSERIALIZE_SCALAR(radv_time);
1496 UNSERIALIZE_SCALAR(tidv_time);
1497 UNSERIALIZE_SCALAR(tadv_time);
1498 UNSERIALIZE_SCALAR(inter_time);
1499
1500 if (rdtr_time)
1501 rdtrEvent.schedule(rdtr_time);
1502
1503 if (radv_time)
1504 radvEvent.schedule(radv_time);
1505
1506 if (tidv_time)
1507 tidvEvent.schedule(tidv_time);
1508
1509 if (tadv_time)
1510 tadvEvent.schedule(tadv_time);
1511
1512 if (inter_time)
1513 interEvent.schedule(inter_time);
1514
1515 txDescCache.unserialize(cp, csprintf("%s.TxDescCache", section));
1516
1517 rxDescCache.unserialize(cp, csprintf("%s.RxDescCache", section));
1518 }
1519
1520 IGbE *
1521 IGbEParams::create()
1522 {
1523 return new IGbE(this);
1524 }