rename AlphaConsole to AlphaBackdoor
[gem5.git] / src / dev / i8254xGBe.cc
1 /*
2 * Copyright (c) 2006 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Ali Saidi
29 */
30
31 /* @file
32 * Device model for Intel's 8254x line of gigabit ethernet controllers.
33 * In particular an 82547 revision 2 (82547GI) MAC because it seems to have the
34 * fewest workarounds in the driver. It will probably work with most of the
35 * other MACs with slight modifications.
36 */
37
38
39 /*
40 * @todo really there are multiple dma engines.. we should implement them.
41 */
42
43 #include <algorithm>
44
45 #include "base/inet.hh"
46 #include "base/trace.hh"
47 #include "dev/i8254xGBe.hh"
48 #include "mem/packet.hh"
49 #include "mem/packet_access.hh"
50 #include "params/IGbE.hh"
51 #include "sim/stats.hh"
52 #include "sim/system.hh"
53
54 using namespace iGbReg;
55 using namespace Net;
56
57 IGbE::IGbE(const Params *p)
58 : EtherDevice(p), etherInt(NULL), drainEvent(NULL), useFlowControl(p->use_flow_control),
59 rxFifo(p->rx_fifo_size), txFifo(p->tx_fifo_size), rxTick(false),
60 txTick(false), txFifoTick(false), rxDmaPacket(false), rdtrEvent(this), radvEvent(this),
61 tadvEvent(this), tidvEvent(this), tickEvent(this), interEvent(this),
62 rxDescCache(this, name()+".RxDesc", p->rx_desc_cache_size),
63 txDescCache(this, name()+".TxDesc", p->tx_desc_cache_size), clock(p->clock)
64 {
65 etherInt = new IGbEInt(name() + ".int", this);
66
67 // Initialized internal registers per Intel documentation
68 // All registers intialized to 0 by per register constructor
69 regs.ctrl.fd(1);
70 regs.ctrl.lrst(1);
71 regs.ctrl.speed(2);
72 regs.ctrl.frcspd(1);
73 regs.sts.speed(3); // Say we're 1000Mbps
74 regs.sts.fd(1); // full duplex
75 regs.sts.lu(1); // link up
76 regs.eecd.fwe(1);
77 regs.eecd.ee_type(1);
78 regs.imr = 0;
79 regs.iam = 0;
80 regs.rxdctl.gran(1);
81 regs.rxdctl.wthresh(1);
82 regs.fcrth(1);
83
84 regs.pba.rxa(0x30);
85 regs.pba.txa(0x10);
86
87 eeOpBits = 0;
88 eeAddrBits = 0;
89 eeDataBits = 0;
90 eeOpcode = 0;
91
92 // clear all 64 16 bit words of the eeprom
93 memset(&flash, 0, EEPROM_SIZE*2);
94
95 // Set the MAC address
96 memcpy(flash, p->hardware_address.bytes(), ETH_ADDR_LEN);
97 for (int x = 0; x < ETH_ADDR_LEN/2; x++)
98 flash[x] = htobe(flash[x]);
99
100 uint16_t csum = 0;
101 for (int x = 0; x < EEPROM_SIZE; x++)
102 csum += htobe(flash[x]);
103
104
105 // Magic happy checksum value
106 flash[EEPROM_SIZE-1] = htobe((uint16_t)(EEPROM_CSUM - csum));
107
108 rxFifo.clear();
109 txFifo.clear();
110 }
111
112 EtherInt*
113 IGbE::getEthPort(const std::string &if_name, int idx)
114 {
115
116 if (if_name == "interface") {
117 if (etherInt->getPeer())
118 panic("Port already connected to\n");
119 return etherInt;
120 }
121 return NULL;
122 }
123
124 Tick
125 IGbE::writeConfig(PacketPtr pkt)
126 {
127 int offset = pkt->getAddr() & PCI_CONFIG_SIZE;
128 if (offset < PCI_DEVICE_SPECIFIC)
129 PciDev::writeConfig(pkt);
130 else
131 panic("Device specific PCI config space not implemented.\n");
132
133 ///
134 /// Some work may need to be done here based for the pci COMMAND bits.
135 ///
136
137 return pioDelay;
138 }
139
140 Tick
141 IGbE::read(PacketPtr pkt)
142 {
143 int bar;
144 Addr daddr;
145
146 if (!getBAR(pkt->getAddr(), bar, daddr))
147 panic("Invalid PCI memory access to unmapped memory.\n");
148
149 // Only Memory register BAR is allowed
150 assert(bar == 0);
151
152 // Only 32bit accesses allowed
153 assert(pkt->getSize() == 4);
154
155 DPRINTF(Ethernet, "Read device register %#X\n", daddr);
156
157 pkt->allocate();
158
159 ///
160 /// Handle read of register here
161 ///
162
163
164 switch (daddr) {
165 case REG_CTRL:
166 pkt->set<uint32_t>(regs.ctrl());
167 break;
168 case REG_STATUS:
169 pkt->set<uint32_t>(regs.sts());
170 break;
171 case REG_EECD:
172 pkt->set<uint32_t>(regs.eecd());
173 break;
174 case REG_EERD:
175 pkt->set<uint32_t>(regs.eerd());
176 break;
177 case REG_CTRL_EXT:
178 pkt->set<uint32_t>(regs.ctrl_ext());
179 break;
180 case REG_MDIC:
181 pkt->set<uint32_t>(regs.mdic());
182 break;
183 case REG_ICR:
184 DPRINTF(Ethernet, "Reading ICR. ICR=%#x IMR=%#x IAM=%#x IAME=%d\n", regs.icr(),
185 regs.imr, regs.iam, regs.ctrl_ext.iame());
186 pkt->set<uint32_t>(regs.icr());
187 if (regs.icr.int_assert() || regs.imr == 0) {
188 regs.icr = regs.icr() & ~mask(30);
189 DPRINTF(Ethernet, "Cleared ICR. ICR=%#x\n", regs.icr());
190 }
191 if (regs.ctrl_ext.iame() && regs.icr.int_assert())
192 regs.imr &= ~regs.iam;
193 chkInterrupt();
194 break;
195 case REG_ITR:
196 pkt->set<uint32_t>(regs.itr());
197 break;
198 case REG_RCTL:
199 pkt->set<uint32_t>(regs.rctl());
200 break;
201 case REG_FCTTV:
202 pkt->set<uint32_t>(regs.fcttv());
203 break;
204 case REG_TCTL:
205 pkt->set<uint32_t>(regs.tctl());
206 break;
207 case REG_PBA:
208 pkt->set<uint32_t>(regs.pba());
209 break;
210 case REG_WUC:
211 case REG_LEDCTL:
212 pkt->set<uint32_t>(0); // We don't care, so just return 0
213 break;
214 case REG_FCRTL:
215 pkt->set<uint32_t>(regs.fcrtl());
216 break;
217 case REG_FCRTH:
218 pkt->set<uint32_t>(regs.fcrth());
219 break;
220 case REG_RDBAL:
221 pkt->set<uint32_t>(regs.rdba.rdbal());
222 break;
223 case REG_RDBAH:
224 pkt->set<uint32_t>(regs.rdba.rdbah());
225 break;
226 case REG_RDLEN:
227 pkt->set<uint32_t>(regs.rdlen());
228 break;
229 case REG_RDH:
230 pkt->set<uint32_t>(regs.rdh());
231 break;
232 case REG_RDT:
233 pkt->set<uint32_t>(regs.rdt());
234 break;
235 case REG_RDTR:
236 pkt->set<uint32_t>(regs.rdtr());
237 if (regs.rdtr.fpd()) {
238 rxDescCache.writeback(0);
239 DPRINTF(EthernetIntr, "Posting interrupt because of RDTR.FPD write\n");
240 postInterrupt(IT_RXT);
241 regs.rdtr.fpd(0);
242 }
243 break;
244 case REG_RADV:
245 pkt->set<uint32_t>(regs.radv());
246 break;
247 case REG_TDBAL:
248 pkt->set<uint32_t>(regs.tdba.tdbal());
249 break;
250 case REG_TDBAH:
251 pkt->set<uint32_t>(regs.tdba.tdbah());
252 break;
253 case REG_TDLEN:
254 pkt->set<uint32_t>(regs.tdlen());
255 break;
256 case REG_TDH:
257 pkt->set<uint32_t>(regs.tdh());
258 break;
259 case REG_TDT:
260 pkt->set<uint32_t>(regs.tdt());
261 break;
262 case REG_TIDV:
263 pkt->set<uint32_t>(regs.tidv());
264 break;
265 case REG_TXDCTL:
266 pkt->set<uint32_t>(regs.txdctl());
267 break;
268 case REG_TADV:
269 pkt->set<uint32_t>(regs.tadv());
270 break;
271 case REG_RXCSUM:
272 pkt->set<uint32_t>(regs.rxcsum());
273 break;
274 case REG_MANC:
275 pkt->set<uint32_t>(regs.manc());
276 break;
277 default:
278 if (!(daddr >= REG_VFTA && daddr < (REG_VFTA + VLAN_FILTER_TABLE_SIZE*4)) &&
279 !(daddr >= REG_RAL && daddr < (REG_RAL + RCV_ADDRESS_TABLE_SIZE*8)) &&
280 !(daddr >= REG_MTA && daddr < (REG_MTA + MULTICAST_TABLE_SIZE*4)) &&
281 !(daddr >= REG_CRCERRS && daddr < (REG_CRCERRS + STATS_REGS_SIZE)))
282 panic("Read request to unknown register number: %#x\n", daddr);
283 else
284 pkt->set<uint32_t>(0);
285 };
286
287 pkt->makeAtomicResponse();
288 return pioDelay;
289 }
290
291 Tick
292 IGbE::write(PacketPtr pkt)
293 {
294 int bar;
295 Addr daddr;
296
297
298 if (!getBAR(pkt->getAddr(), bar, daddr))
299 panic("Invalid PCI memory access to unmapped memory.\n");
300
301 // Only Memory register BAR is allowed
302 assert(bar == 0);
303
304 // Only 32bit accesses allowed
305 assert(pkt->getSize() == sizeof(uint32_t));
306
307 DPRINTF(Ethernet, "Wrote device register %#X value %#X\n", daddr, pkt->get<uint32_t>());
308
309 ///
310 /// Handle write of register here
311 ///
312 uint32_t val = pkt->get<uint32_t>();
313
314 Regs::RCTL oldrctl;
315 Regs::TCTL oldtctl;
316
317 switch (daddr) {
318 case REG_CTRL:
319 regs.ctrl = val;
320 if (regs.ctrl.tfce())
321 warn("TX Flow control enabled, should implement\n");
322 if (regs.ctrl.rfce())
323 warn("RX Flow control enabled, should implement\n");
324 break;
325 case REG_CTRL_EXT:
326 regs.ctrl_ext = val;
327 break;
328 case REG_STATUS:
329 regs.sts = val;
330 break;
331 case REG_EECD:
332 int oldClk;
333 oldClk = regs.eecd.sk();
334 regs.eecd = val;
335 // See if this is a eeprom access and emulate accordingly
336 if (!oldClk && regs.eecd.sk()) {
337 if (eeOpBits < 8) {
338 eeOpcode = eeOpcode << 1 | regs.eecd.din();
339 eeOpBits++;
340 } else if (eeAddrBits < 8 && eeOpcode == EEPROM_READ_OPCODE_SPI) {
341 eeAddr = eeAddr << 1 | regs.eecd.din();
342 eeAddrBits++;
343 } else if (eeDataBits < 16 && eeOpcode == EEPROM_READ_OPCODE_SPI) {
344 assert(eeAddr>>1 < EEPROM_SIZE);
345 DPRINTF(EthernetEEPROM, "EEPROM bit read: %d word: %#X\n",
346 flash[eeAddr>>1] >> eeDataBits & 0x1, flash[eeAddr>>1]);
347 regs.eecd.dout((flash[eeAddr>>1] >> (15-eeDataBits)) & 0x1);
348 eeDataBits++;
349 } else if (eeDataBits < 8 && eeOpcode == EEPROM_RDSR_OPCODE_SPI) {
350 regs.eecd.dout(0);
351 eeDataBits++;
352 } else
353 panic("What's going on with eeprom interface? opcode:"
354 " %#x:%d addr: %#x:%d, data: %d\n", (uint32_t)eeOpcode,
355 (uint32_t)eeOpBits, (uint32_t)eeAddr,
356 (uint32_t)eeAddrBits, (uint32_t)eeDataBits);
357
358 // Reset everything for the next command
359 if ((eeDataBits == 16 && eeOpcode == EEPROM_READ_OPCODE_SPI) ||
360 (eeDataBits == 8 && eeOpcode == EEPROM_RDSR_OPCODE_SPI)) {
361 eeOpBits = 0;
362 eeAddrBits = 0;
363 eeDataBits = 0;
364 eeOpcode = 0;
365 eeAddr = 0;
366 }
367
368 DPRINTF(EthernetEEPROM, "EEPROM: opcode: %#X:%d addr: %#X:%d\n",
369 (uint32_t)eeOpcode, (uint32_t) eeOpBits,
370 (uint32_t)eeAddr>>1, (uint32_t)eeAddrBits);
371 if (eeOpBits == 8 && !(eeOpcode == EEPROM_READ_OPCODE_SPI ||
372 eeOpcode == EEPROM_RDSR_OPCODE_SPI ))
373 panic("Unknown eeprom opcode: %#X:%d\n", (uint32_t)eeOpcode,
374 (uint32_t)eeOpBits);
375
376
377 }
378 // If driver requests eeprom access, immediately give it to it
379 regs.eecd.ee_gnt(regs.eecd.ee_req());
380 break;
381 case REG_EERD:
382 regs.eerd = val;
383 break;
384 case REG_MDIC:
385 regs.mdic = val;
386 if (regs.mdic.i())
387 panic("No support for interrupt on mdic complete\n");
388 if (regs.mdic.phyadd() != 1)
389 panic("No support for reading anything but phy\n");
390 DPRINTF(Ethernet, "%s phy address %x\n", regs.mdic.op() == 1 ? "Writing"
391 : "Reading", regs.mdic.regadd());
392 switch (regs.mdic.regadd()) {
393 case PHY_PSTATUS:
394 regs.mdic.data(0x796D); // link up
395 break;
396 case PHY_PID:
397 regs.mdic.data(0x02A8);
398 break;
399 case PHY_EPID:
400 regs.mdic.data(0x0380);
401 break;
402 case PHY_GSTATUS:
403 regs.mdic.data(0x7C00);
404 break;
405 case PHY_EPSTATUS:
406 regs.mdic.data(0x3000);
407 break;
408 case PHY_AGC:
409 regs.mdic.data(0x180); // some random length
410 break;
411 default:
412 regs.mdic.data(0);
413 }
414 regs.mdic.r(1);
415 break;
416 case REG_ICR:
417 DPRINTF(Ethernet, "Writing ICR. ICR=%#x IMR=%#x IAM=%#x IAME=%d\n", regs.icr(),
418 regs.imr, regs.iam, regs.ctrl_ext.iame());
419 if (regs.ctrl_ext.iame())
420 regs.imr &= ~regs.iam;
421 regs.icr = ~bits(val,30,0) & regs.icr();
422 chkInterrupt();
423 break;
424 case REG_ITR:
425 regs.itr = val;
426 break;
427 case REG_ICS:
428 DPRINTF(EthernetIntr, "Posting interrupt because of ICS write\n");
429 postInterrupt((IntTypes)val);
430 break;
431 case REG_IMS:
432 regs.imr |= val;
433 chkInterrupt();
434 break;
435 case REG_IMC:
436 regs.imr &= ~val;
437 chkInterrupt();
438 break;
439 case REG_IAM:
440 regs.iam = val;
441 break;
442 case REG_RCTL:
443 oldrctl = regs.rctl;
444 regs.rctl = val;
445 if (regs.rctl.rst()) {
446 rxDescCache.reset();
447 DPRINTF(EthernetSM, "RXS: Got RESET!\n");
448 rxFifo.clear();
449 regs.rctl.rst(0);
450 }
451 if (regs.rctl.en())
452 rxTick = true;
453 restartClock();
454 break;
455 case REG_FCTTV:
456 regs.fcttv = val;
457 break;
458 case REG_TCTL:
459 regs.tctl = val;
460 oldtctl = regs.tctl;
461 regs.tctl = val;
462 if (regs.tctl.en())
463 txTick = true;
464 restartClock();
465 if (regs.tctl.en() && !oldtctl.en()) {
466 txDescCache.reset();
467 }
468 break;
469 case REG_PBA:
470 regs.pba.rxa(val);
471 regs.pba.txa(64 - regs.pba.rxa());
472 break;
473 case REG_WUC:
474 case REG_LEDCTL:
475 case REG_FCAL:
476 case REG_FCAH:
477 case REG_FCT:
478 case REG_VET:
479 case REG_AIFS:
480 case REG_TIPG:
481 ; // We don't care, so don't store anything
482 break;
483 case REG_FCRTL:
484 regs.fcrtl = val;
485 break;
486 case REG_FCRTH:
487 regs.fcrth = val;
488 break;
489 case REG_RDBAL:
490 regs.rdba.rdbal( val & ~mask(4));
491 rxDescCache.areaChanged();
492 break;
493 case REG_RDBAH:
494 regs.rdba.rdbah(val);
495 rxDescCache.areaChanged();
496 break;
497 case REG_RDLEN:
498 regs.rdlen = val & ~mask(7);
499 rxDescCache.areaChanged();
500 break;
501 case REG_RDH:
502 regs.rdh = val;
503 rxDescCache.areaChanged();
504 break;
505 case REG_RDT:
506 regs.rdt = val;
507 DPRINTF(EthernetSM, "RXS: RDT Updated.\n");
508 if (getState() == SimObject::Running) {
509 DPRINTF(EthernetSM, "RXS: RDT Fetching Descriptors!\n");
510 rxDescCache.fetchDescriptors();
511 } else {
512 DPRINTF(EthernetSM, "RXS: RDT NOT Fetching Desc b/c draining!\n");
513 }
514 break;
515 case REG_RDTR:
516 regs.rdtr = val;
517 break;
518 case REG_RADV:
519 regs.radv = val;
520 break;
521 case REG_TDBAL:
522 regs.tdba.tdbal( val & ~mask(4));
523 txDescCache.areaChanged();
524 break;
525 case REG_TDBAH:
526 regs.tdba.tdbah(val);
527 txDescCache.areaChanged();
528 break;
529 case REG_TDLEN:
530 regs.tdlen = val & ~mask(7);
531 txDescCache.areaChanged();
532 break;
533 case REG_TDH:
534 regs.tdh = val;
535 txDescCache.areaChanged();
536 break;
537 case REG_TDT:
538 regs.tdt = val;
539 DPRINTF(EthernetSM, "TXS: TX Tail pointer updated\n");
540 if (getState() == SimObject::Running) {
541 DPRINTF(EthernetSM, "TXS: TDT Fetching Descriptors!\n");
542 txDescCache.fetchDescriptors();
543 } else {
544 DPRINTF(EthernetSM, "TXS: TDT NOT Fetching Desc b/c draining!\n");
545 }
546 break;
547 case REG_TIDV:
548 regs.tidv = val;
549 break;
550 case REG_TXDCTL:
551 regs.txdctl = val;
552 break;
553 case REG_TADV:
554 regs.tadv = val;
555 break;
556 case REG_RXCSUM:
557 regs.rxcsum = val;
558 break;
559 case REG_MANC:
560 regs.manc = val;
561 break;
562 default:
563 if (!(daddr >= REG_VFTA && daddr < (REG_VFTA + VLAN_FILTER_TABLE_SIZE*4)) &&
564 !(daddr >= REG_RAL && daddr < (REG_RAL + RCV_ADDRESS_TABLE_SIZE*8)) &&
565 !(daddr >= REG_MTA && daddr < (REG_MTA + MULTICAST_TABLE_SIZE*4)))
566 panic("Write request to unknown register number: %#x\n", daddr);
567 };
568
569 pkt->makeAtomicResponse();
570 return pioDelay;
571 }
572
573 void
574 IGbE::postInterrupt(IntTypes t, bool now)
575 {
576 assert(t);
577
578 // Interrupt is already pending
579 if (t & regs.icr() && !now)
580 return;
581
582 regs.icr = regs.icr() | t;
583 if (regs.itr.interval() == 0 || now) {
584 if (interEvent.scheduled()) {
585 interEvent.deschedule();
586 }
587 cpuPostInt();
588 } else {
589 DPRINTF(EthernetIntr, "EINT: Scheduling timer interrupt for %d ticks\n",
590 Clock::Int::ns * 256 * regs.itr.interval());
591 if (!interEvent.scheduled()) {
592 interEvent.schedule(curTick + Clock::Int::ns * 256 * regs.itr.interval());
593 }
594 }
595 }
596
597 void
598 IGbE::delayIntEvent()
599 {
600 cpuPostInt();
601 }
602
603
604 void
605 IGbE::cpuPostInt()
606 {
607
608 if (!(regs.icr() & regs.imr)) {
609 DPRINTF(Ethernet, "Interrupt Masked. Not Posting\n");
610 return;
611 }
612
613 DPRINTF(Ethernet, "Posting Interrupt\n");
614
615
616 if (interEvent.scheduled()) {
617 interEvent.deschedule();
618 }
619
620 if (rdtrEvent.scheduled()) {
621 regs.icr.rxt0(1);
622 rdtrEvent.deschedule();
623 }
624 if (radvEvent.scheduled()) {
625 regs.icr.rxt0(1);
626 radvEvent.deschedule();
627 }
628 if (tadvEvent.scheduled()) {
629 regs.icr.txdw(1);
630 tadvEvent.deschedule();
631 }
632 if (tidvEvent.scheduled()) {
633 regs.icr.txdw(1);
634 tidvEvent.deschedule();
635 }
636
637 regs.icr.int_assert(1);
638 DPRINTF(EthernetIntr, "EINT: Posting interrupt to CPU now. Vector %#x\n",
639 regs.icr());
640
641 intrPost();
642
643 }
644
645 void
646 IGbE::cpuClearInt()
647 {
648 if (regs.icr.int_assert()) {
649 regs.icr.int_assert(0);
650 DPRINTF(EthernetIntr, "EINT: Clearing interrupt to CPU now. Vector %#x\n",
651 regs.icr());
652 intrClear();
653 }
654 }
655
656 void
657 IGbE::chkInterrupt()
658 {
659 DPRINTF(Ethernet, "Checking interrupts icr: %#x imr: %#x\n", regs.icr(),
660 regs.imr);
661 // Check if we need to clear the cpu interrupt
662 if (!(regs.icr() & regs.imr)) {
663 DPRINTF(Ethernet, "Mask cleaned all interrupts\n");
664 if (interEvent.scheduled())
665 interEvent.deschedule();
666 if (regs.icr.int_assert())
667 cpuClearInt();
668 }
669 DPRINTF(Ethernet, "ITR = %#X itr.interval = %#X\n", regs.itr(), regs.itr.interval());
670
671 if (regs.icr() & regs.imr) {
672 if (regs.itr.interval() == 0) {
673 cpuPostInt();
674 } else {
675 DPRINTF(Ethernet, "Possibly scheduling interrupt because of imr write\n");
676 if (!interEvent.scheduled()) {
677 DPRINTF(Ethernet, "Scheduling for %d\n", curTick + Clock::Int::ns
678 * 256 * regs.itr.interval());
679 interEvent.schedule(curTick + Clock::Int::ns * 256 * regs.itr.interval());
680 }
681 }
682 }
683
684
685 }
686
687
688 IGbE::RxDescCache::RxDescCache(IGbE *i, const std::string n, int s)
689 : DescCache<RxDesc>(i, n, s), pktDone(false), pktEvent(this)
690
691 {
692 }
693
694 void
695 IGbE::RxDescCache::writePacket(EthPacketPtr packet)
696 {
697 // We shouldn't have to deal with any of these yet
698 DPRINTF(EthernetDesc, "Packet Length: %d Desc Size: %d\n",
699 packet->length, igbe->regs.rctl.descSize());
700 assert(packet->length < igbe->regs.rctl.descSize());
701
702 assert(unusedCache.size());
703 //if (!unusedCache.size())
704 // return false;
705
706 pktPtr = packet;
707 pktDone = false;
708 igbe->dmaWrite(igbe->platform->pciToDma(unusedCache.front()->buf),
709 packet->length, &pktEvent, packet->data);
710 }
711
712 void
713 IGbE::RxDescCache::pktComplete()
714 {
715 assert(unusedCache.size());
716 RxDesc *desc;
717 desc = unusedCache.front();
718
719 uint16_t crcfixup = igbe->regs.rctl.secrc() ? 0 : 4 ;
720 desc->len = htole((uint16_t)(pktPtr->length + crcfixup));
721 DPRINTF(EthernetDesc, "pktPtr->length: %d stripcrc offset: %d value written: %d %d\n",
722 pktPtr->length, crcfixup,
723 htole((uint16_t)(pktPtr->length + crcfixup)),
724 (uint16_t)(pktPtr->length + crcfixup));
725
726 // no support for anything but starting at 0
727 assert(igbe->regs.rxcsum.pcss() == 0);
728
729 DPRINTF(EthernetDesc, "Packet written to memory updating Descriptor\n");
730
731 uint8_t status = RXDS_DD | RXDS_EOP;
732 uint8_t err = 0;
733
734 IpPtr ip(pktPtr);
735
736 if (ip) {
737 DPRINTF(EthernetDesc, "Proccesing Ip packet with Id=%d\n", ip->id());
738
739 if (igbe->regs.rxcsum.ipofld()) {
740 DPRINTF(EthernetDesc, "Checking IP checksum\n");
741 status |= RXDS_IPCS;
742 desc->csum = htole(cksum(ip));
743 if (cksum(ip) != 0) {
744 err |= RXDE_IPE;
745 DPRINTF(EthernetDesc, "Checksum is bad!!\n");
746 }
747 }
748 TcpPtr tcp(ip);
749 if (tcp && igbe->regs.rxcsum.tuofld()) {
750 DPRINTF(EthernetDesc, "Checking TCP checksum\n");
751 status |= RXDS_TCPCS;
752 desc->csum = htole(cksum(tcp));
753 if (cksum(tcp) != 0) {
754 DPRINTF(EthernetDesc, "Checksum is bad!!\n");
755 err |= RXDE_TCPE;
756 }
757 }
758
759 UdpPtr udp(ip);
760 if (udp && igbe->regs.rxcsum.tuofld()) {
761 DPRINTF(EthernetDesc, "Checking UDP checksum\n");
762 status |= RXDS_UDPCS;
763 desc->csum = htole(cksum(udp));
764 if (cksum(udp) != 0) {
765 DPRINTF(EthernetDesc, "Checksum is bad!!\n");
766 err |= RXDE_TCPE;
767 }
768 }
769 } else { // if ip
770 DPRINTF(EthernetSM, "Proccesing Non-Ip packet\n");
771 }
772
773
774 desc->status = htole(status);
775 desc->errors = htole(err);
776
777 // No vlan support at this point... just set it to 0
778 desc->vlan = 0;
779
780 // Deal with the rx timer interrupts
781 if (igbe->regs.rdtr.delay()) {
782 DPRINTF(EthernetSM, "RXS: Scheduling DTR for %d\n",
783 igbe->regs.rdtr.delay() * igbe->intClock());
784 igbe->rdtrEvent.reschedule(curTick + igbe->regs.rdtr.delay() *
785 igbe->intClock(),true);
786 }
787
788 if (igbe->regs.radv.idv() && igbe->regs.rdtr.delay()) {
789 DPRINTF(EthernetSM, "RXS: Scheduling ADV for %d\n",
790 igbe->regs.radv.idv() * igbe->intClock());
791 if (!igbe->radvEvent.scheduled()) {
792 igbe->radvEvent.schedule(curTick + igbe->regs.radv.idv() *
793 igbe->intClock());
794 }
795 }
796
797 // if neither radv or rdtr, maybe itr is set...
798 if (!igbe->regs.rdtr.delay()) {
799 DPRINTF(EthernetSM, "RXS: Receive interrupt delay disabled, posting IT_RXT\n");
800 igbe->postInterrupt(IT_RXT);
801 }
802
803 // If the packet is small enough, interrupt appropriately
804 // I wonder if this is delayed or not?!
805 if (pktPtr->length <= igbe->regs.rsrpd.idv()) {
806 DPRINTF(EthernetSM, "RXS: Posting IT_SRPD beacuse small packet received\n");
807 igbe->postInterrupt(IT_SRPD);
808 }
809
810 DPRINTF(EthernetDesc, "Processing of this descriptor complete\n");
811 unusedCache.pop_front();
812 usedCache.push_back(desc);
813
814
815 pktPtr = NULL;
816 enableSm();
817 pktDone = true;
818 igbe->checkDrain();
819
820 }
821
822 void
823 IGbE::RxDescCache::enableSm()
824 {
825 if (!igbe->drainEvent) {
826 igbe->rxTick = true;
827 igbe->restartClock();
828 }
829 }
830
831 bool
832 IGbE::RxDescCache::packetDone()
833 {
834 if (pktDone) {
835 pktDone = false;
836 return true;
837 }
838 return false;
839 }
840
841 bool
842 IGbE::RxDescCache::hasOutstandingEvents()
843 {
844 return pktEvent.scheduled() || wbEvent.scheduled() ||
845 fetchEvent.scheduled();
846 }
847
848 void
849 IGbE::RxDescCache::serialize(std::ostream &os)
850 {
851 DescCache<RxDesc>::serialize(os);
852 SERIALIZE_SCALAR(pktDone);
853 }
854
855 void
856 IGbE::RxDescCache::unserialize(Checkpoint *cp, const std::string &section)
857 {
858 DescCache<RxDesc>::unserialize(cp, section);
859 UNSERIALIZE_SCALAR(pktDone);
860 }
861
862
863 ///////////////////////////////////// IGbE::TxDesc /////////////////////////////////
864
865 IGbE::TxDescCache::TxDescCache(IGbE *i, const std::string n, int s)
866 : DescCache<TxDesc>(i,n, s), pktDone(false), isTcp(false), pktWaiting(false),
867 pktEvent(this)
868
869 {
870 }
871
872 int
873 IGbE::TxDescCache::getPacketSize()
874 {
875 assert(unusedCache.size());
876
877 TxDesc *desc;
878
879 DPRINTF(EthernetDesc, "Starting processing of descriptor\n");
880
881 while (unusedCache.size() && TxdOp::isContext(unusedCache.front())) {
882 DPRINTF(EthernetDesc, "Got context descriptor type... skipping\n");
883
884 // I think we can just ignore these for now?
885 desc = unusedCache.front();
886 DPRINTF(EthernetDesc, "Descriptor upper: %#x lower: %#X\n", desc->d1,
887 desc->d2);
888 // is this going to be a tcp or udp packet?
889 isTcp = TxdOp::tcp(desc) ? true : false;
890
891 // make sure it's ipv4
892 //assert(TxdOp::ip(desc));
893
894 TxdOp::setDd(desc);
895 unusedCache.pop_front();
896 usedCache.push_back(desc);
897 }
898
899 if (!unusedCache.size())
900 return -1;
901
902 DPRINTF(EthernetDesc, "Next TX packet is %d bytes\n",
903 TxdOp::getLen(unusedCache.front()));
904
905 return TxdOp::getLen(unusedCache.front());
906 }
907
908 void
909 IGbE::TxDescCache::getPacketData(EthPacketPtr p)
910 {
911 assert(unusedCache.size());
912
913 TxDesc *desc;
914 desc = unusedCache.front();
915
916 assert((TxdOp::isLegacy(desc) || TxdOp::isData(desc)) && TxdOp::getLen(desc));
917
918 pktPtr = p;
919
920 pktWaiting = true;
921
922 DPRINTF(EthernetDesc, "Starting DMA of packet at offset %d\n", p->length);
923 igbe->dmaRead(igbe->platform->pciToDma(TxdOp::getBuf(desc)),
924 TxdOp::getLen(desc), &pktEvent, p->data + p->length);
925
926
927 }
928
929 void
930 IGbE::TxDescCache::pktComplete()
931 {
932
933 TxDesc *desc;
934 assert(unusedCache.size());
935 assert(pktPtr);
936
937 DPRINTF(EthernetDesc, "DMA of packet complete\n");
938
939
940 desc = unusedCache.front();
941 assert((TxdOp::isLegacy(desc) || TxdOp::isData(desc)) && TxdOp::getLen(desc));
942
943 DPRINTF(EthernetDesc, "TxDescriptor data d1: %#llx d2: %#llx\n", desc->d1, desc->d2);
944
945 if (!TxdOp::eop(desc)) {
946 pktPtr->length += TxdOp::getLen(desc);
947 unusedCache.pop_front();
948 usedCache.push_back(desc);
949 pktDone = true;
950 pktWaiting = false;
951 pktMultiDesc = true;
952
953 DPRINTF(EthernetDesc, "Partial Packet Descriptor of %d bytes Done\n",
954 pktPtr->length);
955 pktPtr = NULL;
956
957 enableSm();
958 igbe->checkDrain();
959 return;
960 }
961 pktMultiDesc = false;
962
963 // Set the length of the data in the EtherPacket
964 pktPtr->length += TxdOp::getLen(desc);
965
966 // no support for vlans
967 assert(!TxdOp::vle(desc));
968
969 // we alway report status
970 assert(TxdOp::rs(desc));
971
972 // we only support single packet descriptors at this point
973 assert(TxdOp::eop(desc));
974
975 // set that this packet is done
976 TxdOp::setDd(desc);
977
978 DPRINTF(EthernetDesc, "TxDescriptor data d1: %#llx d2: %#llx\n", desc->d1, desc->d2);
979
980 if (DTRACE(EthernetDesc)) {
981 IpPtr ip(pktPtr);
982 if (ip)
983 DPRINTF(EthernetDesc, "Proccesing Ip packet with Id=%d\n",
984 ip->id());
985 else
986 DPRINTF(EthernetSM, "Proccesing Non-Ip packet\n");
987 }
988
989 // Checksums are only ofloaded for new descriptor types
990 if (TxdOp::isData(desc) && ( TxdOp::ixsm(desc) || TxdOp::txsm(desc)) ) {
991 DPRINTF(EthernetDesc, "Calculating checksums for packet\n");
992 IpPtr ip(pktPtr);
993 assert(ip);
994 if (TxdOp::ixsm(desc)) {
995 ip->sum(0);
996 ip->sum(cksum(ip));
997 DPRINTF(EthernetDesc, "Calculated IP checksum\n");
998 }
999 if (TxdOp::txsm(desc)) {
1000 TcpPtr tcp(ip);
1001 UdpPtr udp(ip);
1002 if (tcp) {
1003 tcp->sum(0);
1004 tcp->sum(cksum(tcp));
1005 DPRINTF(EthernetDesc, "Calculated TCP checksum\n");
1006 } else if (udp) {
1007 assert(udp);
1008 udp->sum(0);
1009 udp->sum(cksum(udp));
1010 DPRINTF(EthernetDesc, "Calculated UDP checksum\n");
1011 } else {
1012 panic("Told to checksum, but don't know how\n");
1013 }
1014 }
1015 }
1016
1017 if (TxdOp::ide(desc)) {
1018 // Deal with the rx timer interrupts
1019 DPRINTF(EthernetDesc, "Descriptor had IDE set\n");
1020 if (igbe->regs.tidv.idv()) {
1021 DPRINTF(EthernetDesc, "setting tidv\n");
1022 igbe->tidvEvent.reschedule(curTick + igbe->regs.tidv.idv() *
1023 igbe->intClock(), true);
1024 }
1025
1026 if (igbe->regs.tadv.idv() && igbe->regs.tidv.idv()) {
1027 DPRINTF(EthernetDesc, "setting tadv\n");
1028 if (!igbe->tadvEvent.scheduled()) {
1029 igbe->tadvEvent.schedule(curTick + igbe->regs.tadv.idv() *
1030 igbe->intClock());
1031 }
1032 }
1033 }
1034
1035
1036
1037 unusedCache.pop_front();
1038 usedCache.push_back(desc);
1039 pktDone = true;
1040 pktWaiting = false;
1041 pktPtr = NULL;
1042
1043 DPRINTF(EthernetDesc, "Descriptor Done\n");
1044
1045 if (igbe->regs.txdctl.wthresh() == 0) {
1046 DPRINTF(EthernetDesc, "WTHRESH == 0, writing back descriptor\n");
1047 writeback(0);
1048 } else if (igbe->regs.txdctl.wthresh() >= usedCache.size()) {
1049 DPRINTF(EthernetDesc, "used > WTHRESH, writing back descriptor\n");
1050 writeback((igbe->cacheBlockSize()-1)>>4);
1051 }
1052 enableSm();
1053 igbe->checkDrain();
1054 }
1055
1056 void
1057 IGbE::TxDescCache::serialize(std::ostream &os)
1058 {
1059 DescCache<TxDesc>::serialize(os);
1060 SERIALIZE_SCALAR(pktDone);
1061 SERIALIZE_SCALAR(isTcp);
1062 SERIALIZE_SCALAR(pktWaiting);
1063 SERIALIZE_SCALAR(pktMultiDesc);
1064 }
1065
1066 void
1067 IGbE::TxDescCache::unserialize(Checkpoint *cp, const std::string &section)
1068 {
1069 DescCache<TxDesc>::unserialize(cp, section);
1070 UNSERIALIZE_SCALAR(pktDone);
1071 UNSERIALIZE_SCALAR(isTcp);
1072 UNSERIALIZE_SCALAR(pktWaiting);
1073 UNSERIALIZE_SCALAR(pktMultiDesc);
1074 }
1075
1076 bool
1077 IGbE::TxDescCache::packetAvailable()
1078 {
1079 if (pktDone) {
1080 pktDone = false;
1081 return true;
1082 }
1083 return false;
1084 }
1085
1086 void
1087 IGbE::TxDescCache::enableSm()
1088 {
1089 if (!igbe->drainEvent) {
1090 igbe->txTick = true;
1091 igbe->restartClock();
1092 }
1093 }
1094
1095 bool
1096 IGbE::TxDescCache::hasOutstandingEvents()
1097 {
1098 return pktEvent.scheduled() || wbEvent.scheduled() ||
1099 fetchEvent.scheduled();
1100 }
1101
1102
1103 ///////////////////////////////////// IGbE /////////////////////////////////
1104
1105 void
1106 IGbE::restartClock()
1107 {
1108 if (!tickEvent.scheduled() && (rxTick || txTick || txFifoTick) && getState() ==
1109 SimObject::Running)
1110 tickEvent.schedule((curTick/ticks(1)) * ticks(1) + ticks(1));
1111 }
1112
1113 unsigned int
1114 IGbE::drain(Event *de)
1115 {
1116 unsigned int count;
1117 count = pioPort->drain(de) + dmaPort->drain(de);
1118 if (rxDescCache.hasOutstandingEvents() ||
1119 txDescCache.hasOutstandingEvents()) {
1120 count++;
1121 drainEvent = de;
1122 }
1123
1124 txFifoTick = false;
1125 txTick = false;
1126 rxTick = false;
1127
1128 if (tickEvent.scheduled())
1129 tickEvent.deschedule();
1130
1131 if (count)
1132 changeState(Draining);
1133 else
1134 changeState(Drained);
1135
1136 return count;
1137 }
1138
1139 void
1140 IGbE::resume()
1141 {
1142 SimObject::resume();
1143
1144 txFifoTick = true;
1145 txTick = true;
1146 rxTick = true;
1147
1148 restartClock();
1149 }
1150
1151 void
1152 IGbE::checkDrain()
1153 {
1154 if (!drainEvent)
1155 return;
1156
1157 txFifoTick = false;
1158 txTick = false;
1159 rxTick = false;
1160 if (!rxDescCache.hasOutstandingEvents() &&
1161 !txDescCache.hasOutstandingEvents()) {
1162 drainEvent->process();
1163 drainEvent = NULL;
1164 }
1165 }
1166
1167 void
1168 IGbE::txStateMachine()
1169 {
1170 if (!regs.tctl.en()) {
1171 txTick = false;
1172 DPRINTF(EthernetSM, "TXS: TX disabled, stopping ticking\n");
1173 return;
1174 }
1175
1176 // If we have a packet available and it's length is not 0 (meaning it's not
1177 // a multidescriptor packet) put it in the fifo, otherwise an the next
1178 // iteration we'll get the rest of the data
1179 if (txPacket && txDescCache.packetAvailable()
1180 && !txDescCache.packetMultiDesc() && txPacket->length) {
1181 bool success;
1182
1183 DPRINTF(EthernetSM, "TXS: packet placed in TX FIFO\n");
1184 success = txFifo.push(txPacket);
1185 txFifoTick = true && !drainEvent;
1186 assert(success);
1187 txPacket = NULL;
1188 txDescCache.writeback((cacheBlockSize()-1)>>4);
1189 return;
1190 }
1191
1192 // Only support descriptor granularity
1193 assert(regs.txdctl.gran());
1194 if (regs.txdctl.lwthresh() && txDescCache.descLeft() < (regs.txdctl.lwthresh() * 8)) {
1195 DPRINTF(EthernetSM, "TXS: LWTHRESH caused posting of TXDLOW\n");
1196 postInterrupt(IT_TXDLOW);
1197 }
1198
1199 if (!txPacket) {
1200 txPacket = new EthPacketData(16384);
1201 }
1202
1203 if (!txDescCache.packetWaiting()) {
1204 if (txDescCache.descLeft() == 0) {
1205 postInterrupt(IT_TXQE);
1206 txDescCache.writeback(0);
1207 txDescCache.fetchDescriptors();
1208 DPRINTF(EthernetSM, "TXS: No descriptors left in ring, forcing "
1209 "writeback stopping ticking and posting TXQE\n");
1210 txTick = false;
1211 return;
1212 }
1213
1214
1215 if (!(txDescCache.descUnused())) {
1216 txDescCache.fetchDescriptors();
1217 DPRINTF(EthernetSM, "TXS: No descriptors available in cache, fetching and stopping ticking\n");
1218 txTick = false;
1219 return;
1220 }
1221
1222
1223 int size;
1224 size = txDescCache.getPacketSize();
1225 if (size > 0 && txFifo.avail() > size) {
1226 DPRINTF(EthernetSM, "TXS: Reserving %d bytes in FIFO and begining "
1227 "DMA of next packet\n", size);
1228 txFifo.reserve(size);
1229 txDescCache.getPacketData(txPacket);
1230 } else if (size <= 0) {
1231 DPRINTF(EthernetSM, "TXS: getPacketSize returned: %d\n", size);
1232 DPRINTF(EthernetSM, "TXS: No packets to get, writing back used descriptors\n");
1233 txDescCache.writeback(0);
1234 } else {
1235 DPRINTF(EthernetSM, "TXS: FIFO full, stopping ticking until space "
1236 "available in FIFO\n");
1237 txTick = false;
1238 }
1239
1240
1241 return;
1242 }
1243 DPRINTF(EthernetSM, "TXS: Nothing to do, stopping ticking\n");
1244 txTick = false;
1245 }
1246
1247 bool
1248 IGbE::ethRxPkt(EthPacketPtr pkt)
1249 {
1250 DPRINTF(Ethernet, "RxFIFO: Receiving pcakte from wire\n");
1251
1252 if (!regs.rctl.en()) {
1253 DPRINTF(Ethernet, "RxFIFO: RX not enabled, dropping\n");
1254 return true;
1255 }
1256
1257 // restart the state machines if they are stopped
1258 rxTick = true && !drainEvent;
1259 if ((rxTick || txTick) && !tickEvent.scheduled()) {
1260 DPRINTF(EthernetSM, "RXS: received packet into fifo, starting ticking\n");
1261 restartClock();
1262 }
1263
1264 if (!rxFifo.push(pkt)) {
1265 DPRINTF(Ethernet, "RxFIFO: Packet won't fit in fifo... dropped\n");
1266 postInterrupt(IT_RXO, true);
1267 return false;
1268 }
1269
1270 return true;
1271 }
1272
1273
1274 void
1275 IGbE::rxStateMachine()
1276 {
1277 if (!regs.rctl.en()) {
1278 rxTick = false;
1279 DPRINTF(EthernetSM, "RXS: RX disabled, stopping ticking\n");
1280 return;
1281 }
1282
1283 // If the packet is done check for interrupts/descriptors/etc
1284 if (rxDescCache.packetDone()) {
1285 rxDmaPacket = false;
1286 DPRINTF(EthernetSM, "RXS: Packet completed DMA to memory\n");
1287 int descLeft = rxDescCache.descLeft();
1288 switch (regs.rctl.rdmts()) {
1289 case 2: if (descLeft > .125 * regs.rdlen()) break;
1290 case 1: if (descLeft > .250 * regs.rdlen()) break;
1291 case 0: if (descLeft > .500 * regs.rdlen()) break;
1292 DPRINTF(Ethernet, "RXS: Interrupting (RXDMT) because of descriptors left\n");
1293 postInterrupt(IT_RXDMT);
1294 break;
1295 }
1296
1297 if (descLeft == 0) {
1298 rxDescCache.writeback(0);
1299 DPRINTF(EthernetSM, "RXS: No descriptors left in ring, forcing"
1300 " writeback and stopping ticking\n");
1301 rxTick = false;
1302 }
1303
1304 // only support descriptor granulaties
1305 assert(regs.rxdctl.gran());
1306
1307 if (regs.rxdctl.wthresh() >= rxDescCache.descUsed()) {
1308 DPRINTF(EthernetSM, "RXS: Writing back because WTHRESH >= descUsed\n");
1309 if (regs.rxdctl.wthresh() < (cacheBlockSize()>>4))
1310 rxDescCache.writeback(regs.rxdctl.wthresh()-1);
1311 else
1312 rxDescCache.writeback((cacheBlockSize()-1)>>4);
1313 }
1314
1315 if ((rxDescCache.descUnused() < regs.rxdctl.pthresh()) &&
1316 ((rxDescCache.descLeft() - rxDescCache.descUnused()) > regs.rxdctl.hthresh())) {
1317 DPRINTF(EthernetSM, "RXS: Fetching descriptors because descUnused < PTHRESH\n");
1318 rxDescCache.fetchDescriptors();
1319 }
1320
1321 if (rxDescCache.descUnused() == 0) {
1322 rxDescCache.fetchDescriptors();
1323 DPRINTF(EthernetSM, "RXS: No descriptors available in cache, "
1324 "fetching descriptors and stopping ticking\n");
1325 rxTick = false;
1326 }
1327 return;
1328 }
1329
1330 if (rxDmaPacket) {
1331 DPRINTF(EthernetSM, "RXS: stopping ticking until packet DMA completes\n");
1332 rxTick = false;
1333 return;
1334 }
1335
1336 if (!rxDescCache.descUnused()) {
1337 rxDescCache.fetchDescriptors();
1338 DPRINTF(EthernetSM, "RXS: No descriptors available in cache, stopping ticking\n");
1339 rxTick = false;
1340 DPRINTF(EthernetSM, "RXS: No descriptors available, fetching\n");
1341 return;
1342 }
1343
1344 if (rxFifo.empty()) {
1345 DPRINTF(EthernetSM, "RXS: RxFIFO empty, stopping ticking\n");
1346 rxTick = false;
1347 return;
1348 }
1349
1350 EthPacketPtr pkt;
1351 pkt = rxFifo.front();
1352
1353
1354 rxDescCache.writePacket(pkt);
1355 DPRINTF(EthernetSM, "RXS: Writing packet into memory\n");
1356 DPRINTF(EthernetSM, "RXS: Removing packet from FIFO\n");
1357 rxFifo.pop();
1358 DPRINTF(EthernetSM, "RXS: stopping ticking until packet DMA completes\n");
1359 rxTick = false;
1360 rxDmaPacket = true;
1361 }
1362
1363 void
1364 IGbE::txWire()
1365 {
1366 if (txFifo.empty()) {
1367 txFifoTick = false;
1368 return;
1369 }
1370
1371
1372 if (etherInt->sendPacket(txFifo.front())) {
1373 if (DTRACE(EthernetSM)) {
1374 IpPtr ip(txFifo.front());
1375 if (ip)
1376 DPRINTF(EthernetSM, "Transmitting Ip packet with Id=%d\n",
1377 ip->id());
1378 else
1379 DPRINTF(EthernetSM, "Transmitting Non-Ip packet\n");
1380 }
1381 DPRINTF(EthernetSM, "TxFIFO: Successful transmit, bytes available in fifo: %d\n",
1382 txFifo.avail());
1383 txFifo.pop();
1384 } else {
1385 // We'll get woken up when the packet ethTxDone() gets called
1386 txFifoTick = false;
1387 }
1388 }
1389
1390 void
1391 IGbE::tick()
1392 {
1393 DPRINTF(EthernetSM, "IGbE: -------------- Cycle --------------\n");
1394
1395 if (rxTick)
1396 rxStateMachine();
1397
1398 if (txTick)
1399 txStateMachine();
1400
1401 if (txFifoTick)
1402 txWire();
1403
1404
1405 if (rxTick || txTick || txFifoTick)
1406 tickEvent.schedule(curTick + ticks(1));
1407 }
1408
1409 void
1410 IGbE::ethTxDone()
1411 {
1412 // restart the tx state machines if they are stopped
1413 // fifo to send another packet
1414 // tx sm to put more data into the fifo
1415 txFifoTick = true && !drainEvent;
1416 if (txDescCache.descLeft() != 0 && !drainEvent)
1417 txTick = true;
1418
1419 restartClock();
1420 txWire();
1421 DPRINTF(EthernetSM, "TxFIFO: Transmission complete\n");
1422 }
1423
1424 void
1425 IGbE::serialize(std::ostream &os)
1426 {
1427 PciDev::serialize(os);
1428
1429 regs.serialize(os);
1430 SERIALIZE_SCALAR(eeOpBits);
1431 SERIALIZE_SCALAR(eeAddrBits);
1432 SERIALIZE_SCALAR(eeDataBits);
1433 SERIALIZE_SCALAR(eeOpcode);
1434 SERIALIZE_SCALAR(eeAddr);
1435 SERIALIZE_ARRAY(flash,iGbReg::EEPROM_SIZE);
1436
1437 rxFifo.serialize("rxfifo", os);
1438 txFifo.serialize("txfifo", os);
1439
1440 bool txPktExists = txPacket;
1441 SERIALIZE_SCALAR(txPktExists);
1442 if (txPktExists)
1443 txPacket->serialize("txpacket", os);
1444
1445 Tick rdtr_time = 0, radv_time = 0, tidv_time = 0, tadv_time = 0,
1446 inter_time = 0;
1447
1448 if (rdtrEvent.scheduled())
1449 rdtr_time = rdtrEvent.when();
1450 SERIALIZE_SCALAR(rdtr_time);
1451
1452 if (radvEvent.scheduled())
1453 radv_time = radvEvent.when();
1454 SERIALIZE_SCALAR(radv_time);
1455
1456 if (tidvEvent.scheduled())
1457 tidv_time = tidvEvent.when();
1458 SERIALIZE_SCALAR(tidv_time);
1459
1460 if (tadvEvent.scheduled())
1461 tadv_time = tadvEvent.when();
1462 SERIALIZE_SCALAR(tadv_time);
1463
1464 if (interEvent.scheduled())
1465 inter_time = interEvent.when();
1466 SERIALIZE_SCALAR(inter_time);
1467
1468 nameOut(os, csprintf("%s.TxDescCache", name()));
1469 txDescCache.serialize(os);
1470
1471 nameOut(os, csprintf("%s.RxDescCache", name()));
1472 rxDescCache.serialize(os);
1473 }
1474
1475 void
1476 IGbE::unserialize(Checkpoint *cp, const std::string &section)
1477 {
1478 PciDev::unserialize(cp, section);
1479
1480 regs.unserialize(cp, section);
1481 UNSERIALIZE_SCALAR(eeOpBits);
1482 UNSERIALIZE_SCALAR(eeAddrBits);
1483 UNSERIALIZE_SCALAR(eeDataBits);
1484 UNSERIALIZE_SCALAR(eeOpcode);
1485 UNSERIALIZE_SCALAR(eeAddr);
1486 UNSERIALIZE_ARRAY(flash,iGbReg::EEPROM_SIZE);
1487
1488 rxFifo.unserialize("rxfifo", cp, section);
1489 txFifo.unserialize("txfifo", cp, section);
1490
1491 bool txPktExists;
1492 UNSERIALIZE_SCALAR(txPktExists);
1493 if (txPktExists) {
1494 txPacket = new EthPacketData(16384);
1495 txPacket->unserialize("txpacket", cp, section);
1496 }
1497
1498 rxTick = true;
1499 txTick = true;
1500 txFifoTick = true;
1501
1502 Tick rdtr_time, radv_time, tidv_time, tadv_time, inter_time;
1503 UNSERIALIZE_SCALAR(rdtr_time);
1504 UNSERIALIZE_SCALAR(radv_time);
1505 UNSERIALIZE_SCALAR(tidv_time);
1506 UNSERIALIZE_SCALAR(tadv_time);
1507 UNSERIALIZE_SCALAR(inter_time);
1508
1509 if (rdtr_time)
1510 rdtrEvent.schedule(rdtr_time);
1511
1512 if (radv_time)
1513 radvEvent.schedule(radv_time);
1514
1515 if (tidv_time)
1516 tidvEvent.schedule(tidv_time);
1517
1518 if (tadv_time)
1519 tadvEvent.schedule(tadv_time);
1520
1521 if (inter_time)
1522 interEvent.schedule(inter_time);
1523
1524 txDescCache.unserialize(cp, csprintf("%s.TxDescCache", section));
1525
1526 rxDescCache.unserialize(cp, csprintf("%s.RxDescCache", section));
1527 }
1528
1529 IGbE *
1530 IGbEParams::create()
1531 {
1532 return new IGbE(this);
1533 }