Ethernet: share statistics between all ethernet devices and apply some
[gem5.git] / src / dev / i8254xGBe.cc
1 /*
2 * Copyright (c) 2006 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Ali Saidi
29 */
30
31 /* @file
32 * Device model for Intel's 8254x line of gigabit ethernet controllers.
33 * In particular an 82547 revision 2 (82547GI) MAC because it seems to have the
34 * fewest workarounds in the driver. It will probably work with most of the
35 * other MACs with slight modifications.
36 */
37
38
39 /*
40 * @todo really there are multiple dma engines.. we should implement them.
41 */
42
43 #include <algorithm>
44
45 #include "base/inet.hh"
46 #include "base/trace.hh"
47 #include "dev/i8254xGBe.hh"
48 #include "mem/packet.hh"
49 #include "mem/packet_access.hh"
50 #include "params/IGbE.hh"
51 #include "sim/stats.hh"
52 #include "sim/system.hh"
53
54 using namespace iGbReg;
55 using namespace Net;
56
57 IGbE::IGbE(const Params *p)
58 : EtherDevice(p), etherInt(NULL), drainEvent(NULL), useFlowControl(p->use_flow_control),
59 rxFifo(p->rx_fifo_size), txFifo(p->tx_fifo_size), rxTick(false),
60 txTick(false), txFifoTick(false), rxDmaPacket(false), rdtrEvent(this), radvEvent(this),
61 tadvEvent(this), tidvEvent(this), tickEvent(this), interEvent(this),
62 rxDescCache(this, name()+".RxDesc", p->rx_desc_cache_size),
63 txDescCache(this, name()+".TxDesc", p->tx_desc_cache_size), clock(p->clock)
64 {
65 etherInt = new IGbEInt(name() + ".int", this);
66
67 // Initialized internal registers per Intel documentation
68 // All registers intialized to 0 by per register constructor
69 regs.ctrl.fd(1);
70 regs.ctrl.lrst(1);
71 regs.ctrl.speed(2);
72 regs.ctrl.frcspd(1);
73 regs.sts.speed(3); // Say we're 1000Mbps
74 regs.sts.fd(1); // full duplex
75 regs.sts.lu(1); // link up
76 regs.eecd.fwe(1);
77 regs.eecd.ee_type(1);
78 regs.imr = 0;
79 regs.iam = 0;
80 regs.rxdctl.gran(1);
81 regs.rxdctl.wthresh(1);
82 regs.fcrth(1);
83
84 regs.pba.rxa(0x30);
85 regs.pba.txa(0x10);
86
87 eeOpBits = 0;
88 eeAddrBits = 0;
89 eeDataBits = 0;
90 eeOpcode = 0;
91
92 // clear all 64 16 bit words of the eeprom
93 memset(&flash, 0, EEPROM_SIZE*2);
94
95 // Set the MAC address
96 memcpy(flash, p->hardware_address.bytes(), ETH_ADDR_LEN);
97 for (int x = 0; x < ETH_ADDR_LEN/2; x++)
98 flash[x] = htobe(flash[x]);
99
100 uint16_t csum = 0;
101 for (int x = 0; x < EEPROM_SIZE; x++)
102 csum += htobe(flash[x]);
103
104
105 // Magic happy checksum value
106 flash[EEPROM_SIZE-1] = htobe((uint16_t)(EEPROM_CSUM - csum));
107
108 rxFifo.clear();
109 txFifo.clear();
110 }
111
112 EtherInt*
113 IGbE::getEthPort(const std::string &if_name, int idx)
114 {
115
116 if (if_name == "interface") {
117 if (etherInt->getPeer())
118 panic("Port already connected to\n");
119 return etherInt;
120 }
121 return NULL;
122 }
123
124 Tick
125 IGbE::writeConfig(PacketPtr pkt)
126 {
127 int offset = pkt->getAddr() & PCI_CONFIG_SIZE;
128 if (offset < PCI_DEVICE_SPECIFIC)
129 PciDev::writeConfig(pkt);
130 else
131 panic("Device specific PCI config space not implemented.\n");
132
133 ///
134 /// Some work may need to be done here based for the pci COMMAND bits.
135 ///
136
137 return pioDelay;
138 }
139
140 Tick
141 IGbE::read(PacketPtr pkt)
142 {
143 int bar;
144 Addr daddr;
145
146 if (!getBAR(pkt->getAddr(), bar, daddr))
147 panic("Invalid PCI memory access to unmapped memory.\n");
148
149 // Only Memory register BAR is allowed
150 assert(bar == 0);
151
152 // Only 32bit accesses allowed
153 assert(pkt->getSize() == 4);
154
155 DPRINTF(Ethernet, "Read device register %#X\n", daddr);
156
157 pkt->allocate();
158
159 ///
160 /// Handle read of register here
161 ///
162
163
164 switch (daddr) {
165 case REG_CTRL:
166 pkt->set<uint32_t>(regs.ctrl());
167 break;
168 case REG_STATUS:
169 pkt->set<uint32_t>(regs.sts());
170 break;
171 case REG_EECD:
172 pkt->set<uint32_t>(regs.eecd());
173 break;
174 case REG_EERD:
175 pkt->set<uint32_t>(regs.eerd());
176 break;
177 case REG_CTRL_EXT:
178 pkt->set<uint32_t>(regs.ctrl_ext());
179 break;
180 case REG_MDIC:
181 pkt->set<uint32_t>(regs.mdic());
182 break;
183 case REG_ICR:
184 DPRINTF(Ethernet, "Reading ICR. ICR=%#x IMR=%#x IAM=%#x IAME=%d\n", regs.icr(),
185 regs.imr, regs.iam, regs.ctrl_ext.iame());
186 pkt->set<uint32_t>(regs.icr());
187 if (regs.icr.int_assert() || regs.imr == 0) {
188 regs.icr = regs.icr() & ~mask(30);
189 DPRINTF(Ethernet, "Cleared ICR. ICR=%#x\n", regs.icr());
190 }
191 if (regs.ctrl_ext.iame() && regs.icr.int_assert())
192 regs.imr &= ~regs.iam;
193 chkInterrupt();
194 break;
195 case REG_ITR:
196 pkt->set<uint32_t>(regs.itr());
197 break;
198 case REG_RCTL:
199 pkt->set<uint32_t>(regs.rctl());
200 break;
201 case REG_FCTTV:
202 pkt->set<uint32_t>(regs.fcttv());
203 break;
204 case REG_TCTL:
205 pkt->set<uint32_t>(regs.tctl());
206 break;
207 case REG_PBA:
208 pkt->set<uint32_t>(regs.pba());
209 break;
210 case REG_WUC:
211 case REG_LEDCTL:
212 pkt->set<uint32_t>(0); // We don't care, so just return 0
213 break;
214 case REG_FCRTL:
215 pkt->set<uint32_t>(regs.fcrtl());
216 break;
217 case REG_FCRTH:
218 pkt->set<uint32_t>(regs.fcrth());
219 break;
220 case REG_RDBAL:
221 pkt->set<uint32_t>(regs.rdba.rdbal());
222 break;
223 case REG_RDBAH:
224 pkt->set<uint32_t>(regs.rdba.rdbah());
225 break;
226 case REG_RDLEN:
227 pkt->set<uint32_t>(regs.rdlen());
228 break;
229 case REG_RDH:
230 pkt->set<uint32_t>(regs.rdh());
231 break;
232 case REG_RDT:
233 pkt->set<uint32_t>(regs.rdt());
234 break;
235 case REG_RDTR:
236 pkt->set<uint32_t>(regs.rdtr());
237 if (regs.rdtr.fpd()) {
238 rxDescCache.writeback(0);
239 DPRINTF(EthernetIntr, "Posting interrupt because of RDTR.FPD write\n");
240 postInterrupt(IT_RXT);
241 regs.rdtr.fpd(0);
242 }
243 break;
244 case REG_RADV:
245 pkt->set<uint32_t>(regs.radv());
246 break;
247 case REG_TDBAL:
248 pkt->set<uint32_t>(regs.tdba.tdbal());
249 break;
250 case REG_TDBAH:
251 pkt->set<uint32_t>(regs.tdba.tdbah());
252 break;
253 case REG_TDLEN:
254 pkt->set<uint32_t>(regs.tdlen());
255 break;
256 case REG_TDH:
257 pkt->set<uint32_t>(regs.tdh());
258 break;
259 case REG_TDT:
260 pkt->set<uint32_t>(regs.tdt());
261 break;
262 case REG_TIDV:
263 pkt->set<uint32_t>(regs.tidv());
264 break;
265 case REG_TXDCTL:
266 pkt->set<uint32_t>(regs.txdctl());
267 break;
268 case REG_TADV:
269 pkt->set<uint32_t>(regs.tadv());
270 break;
271 case REG_RXCSUM:
272 pkt->set<uint32_t>(regs.rxcsum());
273 break;
274 case REG_MANC:
275 pkt->set<uint32_t>(regs.manc());
276 break;
277 default:
278 if (!(daddr >= REG_VFTA && daddr < (REG_VFTA + VLAN_FILTER_TABLE_SIZE*4)) &&
279 !(daddr >= REG_RAL && daddr < (REG_RAL + RCV_ADDRESS_TABLE_SIZE*8)) &&
280 !(daddr >= REG_MTA && daddr < (REG_MTA + MULTICAST_TABLE_SIZE*4)) &&
281 !(daddr >= REG_CRCERRS && daddr < (REG_CRCERRS + STATS_REGS_SIZE)))
282 panic("Read request to unknown register number: %#x\n", daddr);
283 else
284 pkt->set<uint32_t>(0);
285 };
286
287 pkt->makeAtomicResponse();
288 return pioDelay;
289 }
290
291 Tick
292 IGbE::write(PacketPtr pkt)
293 {
294 int bar;
295 Addr daddr;
296
297
298 if (!getBAR(pkt->getAddr(), bar, daddr))
299 panic("Invalid PCI memory access to unmapped memory.\n");
300
301 // Only Memory register BAR is allowed
302 assert(bar == 0);
303
304 // Only 32bit accesses allowed
305 assert(pkt->getSize() == sizeof(uint32_t));
306
307 DPRINTF(Ethernet, "Wrote device register %#X value %#X\n", daddr, pkt->get<uint32_t>());
308
309 ///
310 /// Handle write of register here
311 ///
312 uint32_t val = pkt->get<uint32_t>();
313
314 Regs::RCTL oldrctl;
315 Regs::TCTL oldtctl;
316
317 switch (daddr) {
318 case REG_CTRL:
319 regs.ctrl = val;
320 if (regs.ctrl.tfce())
321 warn("TX Flow control enabled, should implement\n");
322 if (regs.ctrl.rfce())
323 warn("RX Flow control enabled, should implement\n");
324 break;
325 case REG_CTRL_EXT:
326 regs.ctrl_ext = val;
327 break;
328 case REG_STATUS:
329 regs.sts = val;
330 break;
331 case REG_EECD:
332 int oldClk;
333 oldClk = regs.eecd.sk();
334 regs.eecd = val;
335 // See if this is a eeprom access and emulate accordingly
336 if (!oldClk && regs.eecd.sk()) {
337 if (eeOpBits < 8) {
338 eeOpcode = eeOpcode << 1 | regs.eecd.din();
339 eeOpBits++;
340 } else if (eeAddrBits < 8 && eeOpcode == EEPROM_READ_OPCODE_SPI) {
341 eeAddr = eeAddr << 1 | regs.eecd.din();
342 eeAddrBits++;
343 } else if (eeDataBits < 16 && eeOpcode == EEPROM_READ_OPCODE_SPI) {
344 assert(eeAddr>>1 < EEPROM_SIZE);
345 DPRINTF(EthernetEEPROM, "EEPROM bit read: %d word: %#X\n",
346 flash[eeAddr>>1] >> eeDataBits & 0x1, flash[eeAddr>>1]);
347 regs.eecd.dout((flash[eeAddr>>1] >> (15-eeDataBits)) & 0x1);
348 eeDataBits++;
349 } else if (eeDataBits < 8 && eeOpcode == EEPROM_RDSR_OPCODE_SPI) {
350 regs.eecd.dout(0);
351 eeDataBits++;
352 } else
353 panic("What's going on with eeprom interface? opcode:"
354 " %#x:%d addr: %#x:%d, data: %d\n", (uint32_t)eeOpcode,
355 (uint32_t)eeOpBits, (uint32_t)eeAddr,
356 (uint32_t)eeAddrBits, (uint32_t)eeDataBits);
357
358 // Reset everything for the next command
359 if ((eeDataBits == 16 && eeOpcode == EEPROM_READ_OPCODE_SPI) ||
360 (eeDataBits == 8 && eeOpcode == EEPROM_RDSR_OPCODE_SPI)) {
361 eeOpBits = 0;
362 eeAddrBits = 0;
363 eeDataBits = 0;
364 eeOpcode = 0;
365 eeAddr = 0;
366 }
367
368 DPRINTF(EthernetEEPROM, "EEPROM: opcode: %#X:%d addr: %#X:%d\n",
369 (uint32_t)eeOpcode, (uint32_t) eeOpBits,
370 (uint32_t)eeAddr>>1, (uint32_t)eeAddrBits);
371 if (eeOpBits == 8 && !(eeOpcode == EEPROM_READ_OPCODE_SPI ||
372 eeOpcode == EEPROM_RDSR_OPCODE_SPI ))
373 panic("Unknown eeprom opcode: %#X:%d\n", (uint32_t)eeOpcode,
374 (uint32_t)eeOpBits);
375
376
377 }
378 // If driver requests eeprom access, immediately give it to it
379 regs.eecd.ee_gnt(regs.eecd.ee_req());
380 break;
381 case REG_EERD:
382 regs.eerd = val;
383 break;
384 case REG_MDIC:
385 regs.mdic = val;
386 if (regs.mdic.i())
387 panic("No support for interrupt on mdic complete\n");
388 if (regs.mdic.phyadd() != 1)
389 panic("No support for reading anything but phy\n");
390 DPRINTF(Ethernet, "%s phy address %x\n", regs.mdic.op() == 1 ? "Writing"
391 : "Reading", regs.mdic.regadd());
392 switch (regs.mdic.regadd()) {
393 case PHY_PSTATUS:
394 regs.mdic.data(0x796D); // link up
395 break;
396 case PHY_PID:
397 regs.mdic.data(0x02A8);
398 break;
399 case PHY_EPID:
400 regs.mdic.data(0x0380);
401 break;
402 case PHY_GSTATUS:
403 regs.mdic.data(0x7C00);
404 break;
405 case PHY_EPSTATUS:
406 regs.mdic.data(0x3000);
407 break;
408 case PHY_AGC:
409 regs.mdic.data(0x180); // some random length
410 break;
411 default:
412 regs.mdic.data(0);
413 }
414 regs.mdic.r(1);
415 break;
416 case REG_ICR:
417 DPRINTF(Ethernet, "Writing ICR. ICR=%#x IMR=%#x IAM=%#x IAME=%d\n", regs.icr(),
418 regs.imr, regs.iam, regs.ctrl_ext.iame());
419 if (regs.ctrl_ext.iame())
420 regs.imr &= ~regs.iam;
421 regs.icr = ~bits(val,30,0) & regs.icr();
422 chkInterrupt();
423 break;
424 case REG_ITR:
425 regs.itr = val;
426 break;
427 case REG_ICS:
428 DPRINTF(EthernetIntr, "Posting interrupt because of ICS write\n");
429 postInterrupt((IntTypes)val);
430 break;
431 case REG_IMS:
432 regs.imr |= val;
433 chkInterrupt();
434 break;
435 case REG_IMC:
436 regs.imr &= ~val;
437 chkInterrupt();
438 break;
439 case REG_IAM:
440 regs.iam = val;
441 break;
442 case REG_RCTL:
443 oldrctl = regs.rctl;
444 regs.rctl = val;
445 if (regs.rctl.rst()) {
446 rxDescCache.reset();
447 DPRINTF(EthernetSM, "RXS: Got RESET!\n");
448 rxFifo.clear();
449 regs.rctl.rst(0);
450 }
451 if (regs.rctl.en())
452 rxTick = true;
453 restartClock();
454 break;
455 case REG_FCTTV:
456 regs.fcttv = val;
457 break;
458 case REG_TCTL:
459 regs.tctl = val;
460 oldtctl = regs.tctl;
461 regs.tctl = val;
462 if (regs.tctl.en())
463 txTick = true;
464 restartClock();
465 if (regs.tctl.en() && !oldtctl.en()) {
466 txDescCache.reset();
467 }
468 break;
469 case REG_PBA:
470 regs.pba.rxa(val);
471 regs.pba.txa(64 - regs.pba.rxa());
472 break;
473 case REG_WUC:
474 case REG_LEDCTL:
475 case REG_FCAL:
476 case REG_FCAH:
477 case REG_FCT:
478 case REG_VET:
479 case REG_AIFS:
480 case REG_TIPG:
481 ; // We don't care, so don't store anything
482 break;
483 case REG_FCRTL:
484 regs.fcrtl = val;
485 break;
486 case REG_FCRTH:
487 regs.fcrth = val;
488 break;
489 case REG_RDBAL:
490 regs.rdba.rdbal( val & ~mask(4));
491 rxDescCache.areaChanged();
492 break;
493 case REG_RDBAH:
494 regs.rdba.rdbah(val);
495 rxDescCache.areaChanged();
496 break;
497 case REG_RDLEN:
498 regs.rdlen = val & ~mask(7);
499 rxDescCache.areaChanged();
500 break;
501 case REG_RDH:
502 regs.rdh = val;
503 rxDescCache.areaChanged();
504 break;
505 case REG_RDT:
506 regs.rdt = val;
507 DPRINTF(EthernetSM, "RXS: RDT Updated.\n");
508 if (getState() == SimObject::Running) {
509 DPRINTF(EthernetSM, "RXS: RDT Fetching Descriptors!\n");
510 rxDescCache.fetchDescriptors();
511 } else {
512 DPRINTF(EthernetSM, "RXS: RDT NOT Fetching Desc b/c draining!\n");
513 }
514 break;
515 case REG_RDTR:
516 regs.rdtr = val;
517 break;
518 case REG_RADV:
519 regs.radv = val;
520 break;
521 case REG_TDBAL:
522 regs.tdba.tdbal( val & ~mask(4));
523 txDescCache.areaChanged();
524 break;
525 case REG_TDBAH:
526 regs.tdba.tdbah(val);
527 txDescCache.areaChanged();
528 break;
529 case REG_TDLEN:
530 regs.tdlen = val & ~mask(7);
531 txDescCache.areaChanged();
532 break;
533 case REG_TDH:
534 regs.tdh = val;
535 txDescCache.areaChanged();
536 break;
537 case REG_TDT:
538 regs.tdt = val;
539 DPRINTF(EthernetSM, "TXS: TX Tail pointer updated\n");
540 if (getState() == SimObject::Running) {
541 DPRINTF(EthernetSM, "TXS: TDT Fetching Descriptors!\n");
542 txDescCache.fetchDescriptors();
543 } else {
544 DPRINTF(EthernetSM, "TXS: TDT NOT Fetching Desc b/c draining!\n");
545 }
546 break;
547 case REG_TIDV:
548 regs.tidv = val;
549 break;
550 case REG_TXDCTL:
551 regs.txdctl = val;
552 break;
553 case REG_TADV:
554 regs.tadv = val;
555 break;
556 case REG_RXCSUM:
557 regs.rxcsum = val;
558 break;
559 case REG_MANC:
560 regs.manc = val;
561 break;
562 default:
563 if (!(daddr >= REG_VFTA && daddr < (REG_VFTA + VLAN_FILTER_TABLE_SIZE*4)) &&
564 !(daddr >= REG_RAL && daddr < (REG_RAL + RCV_ADDRESS_TABLE_SIZE*8)) &&
565 !(daddr >= REG_MTA && daddr < (REG_MTA + MULTICAST_TABLE_SIZE*4)))
566 panic("Write request to unknown register number: %#x\n", daddr);
567 };
568
569 pkt->makeAtomicResponse();
570 return pioDelay;
571 }
572
573 void
574 IGbE::postInterrupt(IntTypes t, bool now)
575 {
576 assert(t);
577
578 // Interrupt is already pending
579 if (t & regs.icr() && !now)
580 return;
581
582 regs.icr = regs.icr() | t;
583 if (regs.itr.interval() == 0 || now) {
584 if (interEvent.scheduled()) {
585 interEvent.deschedule();
586 }
587 postedInterrupts++;
588 cpuPostInt();
589 } else {
590 DPRINTF(EthernetIntr, "EINT: Scheduling timer interrupt for %d ticks\n",
591 Clock::Int::ns * 256 * regs.itr.interval());
592 if (!interEvent.scheduled()) {
593 interEvent.schedule(curTick + Clock::Int::ns * 256 * regs.itr.interval());
594 }
595 }
596 }
597
598 void
599 IGbE::delayIntEvent()
600 {
601 cpuPostInt();
602 }
603
604
605 void
606 IGbE::cpuPostInt()
607 {
608
609 if (!(regs.icr() & regs.imr)) {
610 DPRINTF(Ethernet, "Interrupt Masked. Not Posting\n");
611 return;
612 }
613
614 DPRINTF(Ethernet, "Posting Interrupt\n");
615
616
617 if (interEvent.scheduled()) {
618 interEvent.deschedule();
619 }
620
621 if (rdtrEvent.scheduled()) {
622 regs.icr.rxt0(1);
623 rdtrEvent.deschedule();
624 }
625 if (radvEvent.scheduled()) {
626 regs.icr.rxt0(1);
627 radvEvent.deschedule();
628 }
629 if (tadvEvent.scheduled()) {
630 regs.icr.txdw(1);
631 tadvEvent.deschedule();
632 }
633 if (tidvEvent.scheduled()) {
634 regs.icr.txdw(1);
635 tidvEvent.deschedule();
636 }
637
638 regs.icr.int_assert(1);
639 DPRINTF(EthernetIntr, "EINT: Posting interrupt to CPU now. Vector %#x\n",
640 regs.icr());
641
642 intrPost();
643
644 }
645
646 void
647 IGbE::cpuClearInt()
648 {
649 if (regs.icr.int_assert()) {
650 regs.icr.int_assert(0);
651 DPRINTF(EthernetIntr, "EINT: Clearing interrupt to CPU now. Vector %#x\n",
652 regs.icr());
653 intrClear();
654 }
655 }
656
657 void
658 IGbE::chkInterrupt()
659 {
660 DPRINTF(Ethernet, "Checking interrupts icr: %#x imr: %#x\n", regs.icr(),
661 regs.imr);
662 // Check if we need to clear the cpu interrupt
663 if (!(regs.icr() & regs.imr)) {
664 DPRINTF(Ethernet, "Mask cleaned all interrupts\n");
665 if (interEvent.scheduled())
666 interEvent.deschedule();
667 if (regs.icr.int_assert())
668 cpuClearInt();
669 }
670 DPRINTF(Ethernet, "ITR = %#X itr.interval = %#X\n", regs.itr(), regs.itr.interval());
671
672 if (regs.icr() & regs.imr) {
673 if (regs.itr.interval() == 0) {
674 cpuPostInt();
675 } else {
676 DPRINTF(Ethernet, "Possibly scheduling interrupt because of imr write\n");
677 if (!interEvent.scheduled()) {
678 DPRINTF(Ethernet, "Scheduling for %d\n", curTick + Clock::Int::ns
679 * 256 * regs.itr.interval());
680 interEvent.schedule(curTick + Clock::Int::ns * 256 * regs.itr.interval());
681 }
682 }
683 }
684
685
686 }
687
688
689 IGbE::RxDescCache::RxDescCache(IGbE *i, const std::string n, int s)
690 : DescCache<RxDesc>(i, n, s), pktDone(false), pktEvent(this)
691
692 {
693 }
694
695 void
696 IGbE::RxDescCache::writePacket(EthPacketPtr packet)
697 {
698 // We shouldn't have to deal with any of these yet
699 DPRINTF(EthernetDesc, "Packet Length: %d Desc Size: %d\n",
700 packet->length, igbe->regs.rctl.descSize());
701 assert(packet->length < igbe->regs.rctl.descSize());
702
703 assert(unusedCache.size());
704 //if (!unusedCache.size())
705 // return false;
706
707 pktPtr = packet;
708 pktDone = false;
709 igbe->dmaWrite(igbe->platform->pciToDma(unusedCache.front()->buf),
710 packet->length, &pktEvent, packet->data);
711 }
712
713 void
714 IGbE::RxDescCache::pktComplete()
715 {
716 assert(unusedCache.size());
717 RxDesc *desc;
718 desc = unusedCache.front();
719
720 uint16_t crcfixup = igbe->regs.rctl.secrc() ? 0 : 4 ;
721 desc->len = htole((uint16_t)(pktPtr->length + crcfixup));
722 DPRINTF(EthernetDesc, "pktPtr->length: %d stripcrc offset: %d value written: %d %d\n",
723 pktPtr->length, crcfixup,
724 htole((uint16_t)(pktPtr->length + crcfixup)),
725 (uint16_t)(pktPtr->length + crcfixup));
726
727 // no support for anything but starting at 0
728 assert(igbe->regs.rxcsum.pcss() == 0);
729
730 DPRINTF(EthernetDesc, "Packet written to memory updating Descriptor\n");
731
732 uint8_t status = RXDS_DD | RXDS_EOP;
733 uint8_t err = 0;
734
735 IpPtr ip(pktPtr);
736
737 if (ip) {
738 DPRINTF(EthernetDesc, "Proccesing Ip packet with Id=%d\n", ip->id());
739
740 if (igbe->regs.rxcsum.ipofld()) {
741 DPRINTF(EthernetDesc, "Checking IP checksum\n");
742 status |= RXDS_IPCS;
743 desc->csum = htole(cksum(ip));
744 igbe->rxIpChecksums++;
745 if (cksum(ip) != 0) {
746 err |= RXDE_IPE;
747 DPRINTF(EthernetDesc, "Checksum is bad!!\n");
748 }
749 }
750 TcpPtr tcp(ip);
751 if (tcp && igbe->regs.rxcsum.tuofld()) {
752 DPRINTF(EthernetDesc, "Checking TCP checksum\n");
753 status |= RXDS_TCPCS;
754 desc->csum = htole(cksum(tcp));
755 igbe->rxTcpChecksums++;
756 if (cksum(tcp) != 0) {
757 DPRINTF(EthernetDesc, "Checksum is bad!!\n");
758 err |= RXDE_TCPE;
759 }
760 }
761
762 UdpPtr udp(ip);
763 if (udp && igbe->regs.rxcsum.tuofld()) {
764 DPRINTF(EthernetDesc, "Checking UDP checksum\n");
765 status |= RXDS_UDPCS;
766 desc->csum = htole(cksum(udp));
767 igbe->rxUdpChecksums++;
768 if (cksum(udp) != 0) {
769 DPRINTF(EthernetDesc, "Checksum is bad!!\n");
770 err |= RXDE_TCPE;
771 }
772 }
773 } else { // if ip
774 DPRINTF(EthernetSM, "Proccesing Non-Ip packet\n");
775 }
776
777
778 desc->status = htole(status);
779 desc->errors = htole(err);
780
781 // No vlan support at this point... just set it to 0
782 desc->vlan = 0;
783
784 // Deal with the rx timer interrupts
785 if (igbe->regs.rdtr.delay()) {
786 DPRINTF(EthernetSM, "RXS: Scheduling DTR for %d\n",
787 igbe->regs.rdtr.delay() * igbe->intClock());
788 igbe->rdtrEvent.reschedule(curTick + igbe->regs.rdtr.delay() *
789 igbe->intClock(),true);
790 }
791
792 if (igbe->regs.radv.idv() && igbe->regs.rdtr.delay()) {
793 DPRINTF(EthernetSM, "RXS: Scheduling ADV for %d\n",
794 igbe->regs.radv.idv() * igbe->intClock());
795 if (!igbe->radvEvent.scheduled()) {
796 igbe->radvEvent.schedule(curTick + igbe->regs.radv.idv() *
797 igbe->intClock());
798 }
799 }
800
801 // if neither radv or rdtr, maybe itr is set...
802 if (!igbe->regs.rdtr.delay()) {
803 DPRINTF(EthernetSM, "RXS: Receive interrupt delay disabled, posting IT_RXT\n");
804 igbe->postInterrupt(IT_RXT);
805 }
806
807 // If the packet is small enough, interrupt appropriately
808 // I wonder if this is delayed or not?!
809 if (pktPtr->length <= igbe->regs.rsrpd.idv()) {
810 DPRINTF(EthernetSM, "RXS: Posting IT_SRPD beacuse small packet received\n");
811 igbe->postInterrupt(IT_SRPD);
812 }
813
814 DPRINTF(EthernetDesc, "Processing of this descriptor complete\n");
815 unusedCache.pop_front();
816 usedCache.push_back(desc);
817
818
819 pktPtr = NULL;
820 enableSm();
821 pktDone = true;
822 igbe->checkDrain();
823
824 }
825
826 void
827 IGbE::RxDescCache::enableSm()
828 {
829 if (!igbe->drainEvent) {
830 igbe->rxTick = true;
831 igbe->restartClock();
832 }
833 }
834
835 bool
836 IGbE::RxDescCache::packetDone()
837 {
838 if (pktDone) {
839 pktDone = false;
840 return true;
841 }
842 return false;
843 }
844
845 bool
846 IGbE::RxDescCache::hasOutstandingEvents()
847 {
848 return pktEvent.scheduled() || wbEvent.scheduled() ||
849 fetchEvent.scheduled();
850 }
851
852 void
853 IGbE::RxDescCache::serialize(std::ostream &os)
854 {
855 DescCache<RxDesc>::serialize(os);
856 SERIALIZE_SCALAR(pktDone);
857 }
858
859 void
860 IGbE::RxDescCache::unserialize(Checkpoint *cp, const std::string &section)
861 {
862 DescCache<RxDesc>::unserialize(cp, section);
863 UNSERIALIZE_SCALAR(pktDone);
864 }
865
866
867 ///////////////////////////////////// IGbE::TxDesc /////////////////////////////////
868
869 IGbE::TxDescCache::TxDescCache(IGbE *i, const std::string n, int s)
870 : DescCache<TxDesc>(i,n, s), pktDone(false), isTcp(false), pktWaiting(false),
871 pktEvent(this)
872
873 {
874 }
875
876 int
877 IGbE::TxDescCache::getPacketSize()
878 {
879 assert(unusedCache.size());
880
881 TxDesc *desc;
882
883 DPRINTF(EthernetDesc, "Starting processing of descriptor\n");
884
885 while (unusedCache.size() && TxdOp::isContext(unusedCache.front())) {
886 DPRINTF(EthernetDesc, "Got context descriptor type... skipping\n");
887
888 // I think we can just ignore these for now?
889 desc = unusedCache.front();
890 DPRINTF(EthernetDesc, "Descriptor upper: %#x lower: %#X\n", desc->d1,
891 desc->d2);
892 // is this going to be a tcp or udp packet?
893 isTcp = TxdOp::tcp(desc) ? true : false;
894
895 // make sure it's ipv4
896 //assert(TxdOp::ip(desc));
897
898 TxdOp::setDd(desc);
899 unusedCache.pop_front();
900 usedCache.push_back(desc);
901 }
902
903 if (!unusedCache.size())
904 return -1;
905
906 DPRINTF(EthernetDesc, "Next TX packet is %d bytes\n",
907 TxdOp::getLen(unusedCache.front()));
908
909 return TxdOp::getLen(unusedCache.front());
910 }
911
912 void
913 IGbE::TxDescCache::getPacketData(EthPacketPtr p)
914 {
915 assert(unusedCache.size());
916
917 TxDesc *desc;
918 desc = unusedCache.front();
919
920 assert((TxdOp::isLegacy(desc) || TxdOp::isData(desc)) && TxdOp::getLen(desc));
921
922 pktPtr = p;
923
924 pktWaiting = true;
925
926 DPRINTF(EthernetDesc, "Starting DMA of packet at offset %d\n", p->length);
927 igbe->dmaRead(igbe->platform->pciToDma(TxdOp::getBuf(desc)),
928 TxdOp::getLen(desc), &pktEvent, p->data + p->length);
929
930
931 }
932
933 void
934 IGbE::TxDescCache::pktComplete()
935 {
936
937 TxDesc *desc;
938 assert(unusedCache.size());
939 assert(pktPtr);
940
941 DPRINTF(EthernetDesc, "DMA of packet complete\n");
942
943
944 desc = unusedCache.front();
945 assert((TxdOp::isLegacy(desc) || TxdOp::isData(desc)) && TxdOp::getLen(desc));
946
947 DPRINTF(EthernetDesc, "TxDescriptor data d1: %#llx d2: %#llx\n", desc->d1, desc->d2);
948
949 if (!TxdOp::eop(desc)) {
950 pktPtr->length += TxdOp::getLen(desc);
951 unusedCache.pop_front();
952 usedCache.push_back(desc);
953 pktDone = true;
954 pktWaiting = false;
955 pktMultiDesc = true;
956
957 DPRINTF(EthernetDesc, "Partial Packet Descriptor of %d bytes Done\n",
958 pktPtr->length);
959 pktPtr = NULL;
960
961 enableSm();
962 igbe->checkDrain();
963 return;
964 }
965 pktMultiDesc = false;
966
967 // Set the length of the data in the EtherPacket
968 pktPtr->length += TxdOp::getLen(desc);
969
970 // no support for vlans
971 assert(!TxdOp::vle(desc));
972
973 // we alway report status
974 assert(TxdOp::rs(desc));
975
976 // we only support single packet descriptors at this point
977 assert(TxdOp::eop(desc));
978
979 // set that this packet is done
980 TxdOp::setDd(desc);
981
982 DPRINTF(EthernetDesc, "TxDescriptor data d1: %#llx d2: %#llx\n", desc->d1, desc->d2);
983
984 if (DTRACE(EthernetDesc)) {
985 IpPtr ip(pktPtr);
986 if (ip)
987 DPRINTF(EthernetDesc, "Proccesing Ip packet with Id=%d\n",
988 ip->id());
989 else
990 DPRINTF(EthernetSM, "Proccesing Non-Ip packet\n");
991 }
992
993 // Checksums are only ofloaded for new descriptor types
994 if (TxdOp::isData(desc) && ( TxdOp::ixsm(desc) || TxdOp::txsm(desc)) ) {
995 DPRINTF(EthernetDesc, "Calculating checksums for packet\n");
996 IpPtr ip(pktPtr);
997 assert(ip);
998 if (TxdOp::ixsm(desc)) {
999 ip->sum(0);
1000 ip->sum(cksum(ip));
1001 igbe->txIpChecksums++;
1002 DPRINTF(EthernetDesc, "Calculated IP checksum\n");
1003 }
1004 if (TxdOp::txsm(desc)) {
1005 TcpPtr tcp(ip);
1006 UdpPtr udp(ip);
1007 if (tcp) {
1008 tcp->sum(0);
1009 tcp->sum(cksum(tcp));
1010 igbe->txTcpChecksums++;
1011 DPRINTF(EthernetDesc, "Calculated TCP checksum\n");
1012 } else if (udp) {
1013 assert(udp);
1014 udp->sum(0);
1015 udp->sum(cksum(udp));
1016 igbe->txUdpChecksums++;
1017 DPRINTF(EthernetDesc, "Calculated UDP checksum\n");
1018 } else {
1019 panic("Told to checksum, but don't know how\n");
1020 }
1021 }
1022 }
1023
1024 if (TxdOp::ide(desc)) {
1025 // Deal with the rx timer interrupts
1026 DPRINTF(EthernetDesc, "Descriptor had IDE set\n");
1027 if (igbe->regs.tidv.idv()) {
1028 DPRINTF(EthernetDesc, "setting tidv\n");
1029 igbe->tidvEvent.reschedule(curTick + igbe->regs.tidv.idv() *
1030 igbe->intClock(), true);
1031 }
1032
1033 if (igbe->regs.tadv.idv() && igbe->regs.tidv.idv()) {
1034 DPRINTF(EthernetDesc, "setting tadv\n");
1035 if (!igbe->tadvEvent.scheduled()) {
1036 igbe->tadvEvent.schedule(curTick + igbe->regs.tadv.idv() *
1037 igbe->intClock());
1038 }
1039 }
1040 }
1041
1042
1043
1044 unusedCache.pop_front();
1045 usedCache.push_back(desc);
1046 pktDone = true;
1047 pktWaiting = false;
1048 pktPtr = NULL;
1049
1050 DPRINTF(EthernetDesc, "Descriptor Done\n");
1051
1052 if (igbe->regs.txdctl.wthresh() == 0) {
1053 DPRINTF(EthernetDesc, "WTHRESH == 0, writing back descriptor\n");
1054 writeback(0);
1055 } else if (igbe->regs.txdctl.wthresh() >= usedCache.size()) {
1056 DPRINTF(EthernetDesc, "used > WTHRESH, writing back descriptor\n");
1057 writeback((igbe->cacheBlockSize()-1)>>4);
1058 }
1059 enableSm();
1060 igbe->checkDrain();
1061 }
1062
1063 void
1064 IGbE::TxDescCache::serialize(std::ostream &os)
1065 {
1066 DescCache<TxDesc>::serialize(os);
1067 SERIALIZE_SCALAR(pktDone);
1068 SERIALIZE_SCALAR(isTcp);
1069 SERIALIZE_SCALAR(pktWaiting);
1070 SERIALIZE_SCALAR(pktMultiDesc);
1071 }
1072
1073 void
1074 IGbE::TxDescCache::unserialize(Checkpoint *cp, const std::string &section)
1075 {
1076 DescCache<TxDesc>::unserialize(cp, section);
1077 UNSERIALIZE_SCALAR(pktDone);
1078 UNSERIALIZE_SCALAR(isTcp);
1079 UNSERIALIZE_SCALAR(pktWaiting);
1080 UNSERIALIZE_SCALAR(pktMultiDesc);
1081 }
1082
1083 bool
1084 IGbE::TxDescCache::packetAvailable()
1085 {
1086 if (pktDone) {
1087 pktDone = false;
1088 return true;
1089 }
1090 return false;
1091 }
1092
1093 void
1094 IGbE::TxDescCache::enableSm()
1095 {
1096 if (!igbe->drainEvent) {
1097 igbe->txTick = true;
1098 igbe->restartClock();
1099 }
1100 }
1101
1102 bool
1103 IGbE::TxDescCache::hasOutstandingEvents()
1104 {
1105 return pktEvent.scheduled() || wbEvent.scheduled() ||
1106 fetchEvent.scheduled();
1107 }
1108
1109
1110 ///////////////////////////////////// IGbE /////////////////////////////////
1111
1112 void
1113 IGbE::restartClock()
1114 {
1115 if (!tickEvent.scheduled() && (rxTick || txTick || txFifoTick) && getState() ==
1116 SimObject::Running)
1117 tickEvent.schedule((curTick/ticks(1)) * ticks(1) + ticks(1));
1118 }
1119
1120 unsigned int
1121 IGbE::drain(Event *de)
1122 {
1123 unsigned int count;
1124 count = pioPort->drain(de) + dmaPort->drain(de);
1125 if (rxDescCache.hasOutstandingEvents() ||
1126 txDescCache.hasOutstandingEvents()) {
1127 count++;
1128 drainEvent = de;
1129 }
1130
1131 txFifoTick = false;
1132 txTick = false;
1133 rxTick = false;
1134
1135 if (tickEvent.scheduled())
1136 tickEvent.deschedule();
1137
1138 if (count)
1139 changeState(Draining);
1140 else
1141 changeState(Drained);
1142
1143 return count;
1144 }
1145
1146 void
1147 IGbE::resume()
1148 {
1149 SimObject::resume();
1150
1151 txFifoTick = true;
1152 txTick = true;
1153 rxTick = true;
1154
1155 restartClock();
1156 }
1157
1158 void
1159 IGbE::checkDrain()
1160 {
1161 if (!drainEvent)
1162 return;
1163
1164 txFifoTick = false;
1165 txTick = false;
1166 rxTick = false;
1167 if (!rxDescCache.hasOutstandingEvents() &&
1168 !txDescCache.hasOutstandingEvents()) {
1169 drainEvent->process();
1170 drainEvent = NULL;
1171 }
1172 }
1173
1174 void
1175 IGbE::txStateMachine()
1176 {
1177 if (!regs.tctl.en()) {
1178 txTick = false;
1179 DPRINTF(EthernetSM, "TXS: TX disabled, stopping ticking\n");
1180 return;
1181 }
1182
1183 // If we have a packet available and it's length is not 0 (meaning it's not
1184 // a multidescriptor packet) put it in the fifo, otherwise an the next
1185 // iteration we'll get the rest of the data
1186 if (txPacket && txDescCache.packetAvailable()
1187 && !txDescCache.packetMultiDesc() && txPacket->length) {
1188 bool success;
1189
1190 DPRINTF(EthernetSM, "TXS: packet placed in TX FIFO\n");
1191 success = txFifo.push(txPacket);
1192 txFifoTick = true && !drainEvent;
1193 assert(success);
1194 txPacket = NULL;
1195 txDescCache.writeback((cacheBlockSize()-1)>>4);
1196 return;
1197 }
1198
1199 // Only support descriptor granularity
1200 assert(regs.txdctl.gran());
1201 if (regs.txdctl.lwthresh() && txDescCache.descLeft() < (regs.txdctl.lwthresh() * 8)) {
1202 DPRINTF(EthernetSM, "TXS: LWTHRESH caused posting of TXDLOW\n");
1203 postInterrupt(IT_TXDLOW);
1204 }
1205
1206 if (!txPacket) {
1207 txPacket = new EthPacketData(16384);
1208 }
1209
1210 if (!txDescCache.packetWaiting()) {
1211 if (txDescCache.descLeft() == 0) {
1212 postInterrupt(IT_TXQE);
1213 txDescCache.writeback(0);
1214 txDescCache.fetchDescriptors();
1215 DPRINTF(EthernetSM, "TXS: No descriptors left in ring, forcing "
1216 "writeback stopping ticking and posting TXQE\n");
1217 txTick = false;
1218 return;
1219 }
1220
1221
1222 if (!(txDescCache.descUnused())) {
1223 txDescCache.fetchDescriptors();
1224 DPRINTF(EthernetSM, "TXS: No descriptors available in cache, fetching and stopping ticking\n");
1225 txTick = false;
1226 return;
1227 }
1228
1229
1230 int size;
1231 size = txDescCache.getPacketSize();
1232 if (size > 0 && txFifo.avail() > size) {
1233 DPRINTF(EthernetSM, "TXS: Reserving %d bytes in FIFO and begining "
1234 "DMA of next packet\n", size);
1235 txFifo.reserve(size);
1236 txDescCache.getPacketData(txPacket);
1237 } else if (size <= 0) {
1238 DPRINTF(EthernetSM, "TXS: getPacketSize returned: %d\n", size);
1239 DPRINTF(EthernetSM, "TXS: No packets to get, writing back used descriptors\n");
1240 txDescCache.writeback(0);
1241 } else {
1242 DPRINTF(EthernetSM, "TXS: FIFO full, stopping ticking until space "
1243 "available in FIFO\n");
1244 txTick = false;
1245 }
1246
1247
1248 return;
1249 }
1250 DPRINTF(EthernetSM, "TXS: Nothing to do, stopping ticking\n");
1251 txTick = false;
1252 }
1253
1254 bool
1255 IGbE::ethRxPkt(EthPacketPtr pkt)
1256 {
1257 rxBytes += pkt->length;
1258 rxPackets++;
1259
1260 DPRINTF(Ethernet, "RxFIFO: Receiving pcakte from wire\n");
1261
1262 if (!regs.rctl.en()) {
1263 DPRINTF(Ethernet, "RxFIFO: RX not enabled, dropping\n");
1264 return true;
1265 }
1266
1267 // restart the state machines if they are stopped
1268 rxTick = true && !drainEvent;
1269 if ((rxTick || txTick) && !tickEvent.scheduled()) {
1270 DPRINTF(EthernetSM, "RXS: received packet into fifo, starting ticking\n");
1271 restartClock();
1272 }
1273
1274 if (!rxFifo.push(pkt)) {
1275 DPRINTF(Ethernet, "RxFIFO: Packet won't fit in fifo... dropped\n");
1276 postInterrupt(IT_RXO, true);
1277 return false;
1278 }
1279
1280 return true;
1281 }
1282
1283
1284 void
1285 IGbE::rxStateMachine()
1286 {
1287 if (!regs.rctl.en()) {
1288 rxTick = false;
1289 DPRINTF(EthernetSM, "RXS: RX disabled, stopping ticking\n");
1290 return;
1291 }
1292
1293 // If the packet is done check for interrupts/descriptors/etc
1294 if (rxDescCache.packetDone()) {
1295 rxDmaPacket = false;
1296 DPRINTF(EthernetSM, "RXS: Packet completed DMA to memory\n");
1297 int descLeft = rxDescCache.descLeft();
1298 switch (regs.rctl.rdmts()) {
1299 case 2: if (descLeft > .125 * regs.rdlen()) break;
1300 case 1: if (descLeft > .250 * regs.rdlen()) break;
1301 case 0: if (descLeft > .500 * regs.rdlen()) break;
1302 DPRINTF(Ethernet, "RXS: Interrupting (RXDMT) because of descriptors left\n");
1303 postInterrupt(IT_RXDMT);
1304 break;
1305 }
1306
1307 if (descLeft == 0) {
1308 rxDescCache.writeback(0);
1309 DPRINTF(EthernetSM, "RXS: No descriptors left in ring, forcing"
1310 " writeback and stopping ticking\n");
1311 rxTick = false;
1312 }
1313
1314 // only support descriptor granulaties
1315 assert(regs.rxdctl.gran());
1316
1317 if (regs.rxdctl.wthresh() >= rxDescCache.descUsed()) {
1318 DPRINTF(EthernetSM, "RXS: Writing back because WTHRESH >= descUsed\n");
1319 if (regs.rxdctl.wthresh() < (cacheBlockSize()>>4))
1320 rxDescCache.writeback(regs.rxdctl.wthresh()-1);
1321 else
1322 rxDescCache.writeback((cacheBlockSize()-1)>>4);
1323 }
1324
1325 if ((rxDescCache.descUnused() < regs.rxdctl.pthresh()) &&
1326 ((rxDescCache.descLeft() - rxDescCache.descUnused()) > regs.rxdctl.hthresh())) {
1327 DPRINTF(EthernetSM, "RXS: Fetching descriptors because descUnused < PTHRESH\n");
1328 rxDescCache.fetchDescriptors();
1329 }
1330
1331 if (rxDescCache.descUnused() == 0) {
1332 rxDescCache.fetchDescriptors();
1333 DPRINTF(EthernetSM, "RXS: No descriptors available in cache, "
1334 "fetching descriptors and stopping ticking\n");
1335 rxTick = false;
1336 }
1337 return;
1338 }
1339
1340 if (rxDmaPacket) {
1341 DPRINTF(EthernetSM, "RXS: stopping ticking until packet DMA completes\n");
1342 rxTick = false;
1343 return;
1344 }
1345
1346 if (!rxDescCache.descUnused()) {
1347 rxDescCache.fetchDescriptors();
1348 DPRINTF(EthernetSM, "RXS: No descriptors available in cache, stopping ticking\n");
1349 rxTick = false;
1350 DPRINTF(EthernetSM, "RXS: No descriptors available, fetching\n");
1351 return;
1352 }
1353
1354 if (rxFifo.empty()) {
1355 DPRINTF(EthernetSM, "RXS: RxFIFO empty, stopping ticking\n");
1356 rxTick = false;
1357 return;
1358 }
1359
1360 EthPacketPtr pkt;
1361 pkt = rxFifo.front();
1362
1363
1364 rxDescCache.writePacket(pkt);
1365 DPRINTF(EthernetSM, "RXS: Writing packet into memory\n");
1366 DPRINTF(EthernetSM, "RXS: Removing packet from FIFO\n");
1367 rxFifo.pop();
1368 DPRINTF(EthernetSM, "RXS: stopping ticking until packet DMA completes\n");
1369 rxTick = false;
1370 rxDmaPacket = true;
1371 }
1372
1373 void
1374 IGbE::txWire()
1375 {
1376 if (txFifo.empty()) {
1377 txFifoTick = false;
1378 return;
1379 }
1380
1381
1382 if (etherInt->sendPacket(txFifo.front())) {
1383 if (DTRACE(EthernetSM)) {
1384 IpPtr ip(txFifo.front());
1385 if (ip)
1386 DPRINTF(EthernetSM, "Transmitting Ip packet with Id=%d\n",
1387 ip->id());
1388 else
1389 DPRINTF(EthernetSM, "Transmitting Non-Ip packet\n");
1390 }
1391 DPRINTF(EthernetSM, "TxFIFO: Successful transmit, bytes available in fifo: %d\n",
1392 txFifo.avail());
1393
1394 txBytes += txFifo.front()->length;
1395 txPackets++;
1396
1397 txFifo.pop();
1398 } else {
1399 // We'll get woken up when the packet ethTxDone() gets called
1400 txFifoTick = false;
1401 }
1402 }
1403
1404 void
1405 IGbE::tick()
1406 {
1407 DPRINTF(EthernetSM, "IGbE: -------------- Cycle --------------\n");
1408
1409 if (rxTick)
1410 rxStateMachine();
1411
1412 if (txTick)
1413 txStateMachine();
1414
1415 if (txFifoTick)
1416 txWire();
1417
1418
1419 if (rxTick || txTick || txFifoTick)
1420 tickEvent.schedule(curTick + ticks(1));
1421 }
1422
1423 void
1424 IGbE::ethTxDone()
1425 {
1426 // restart the tx state machines if they are stopped
1427 // fifo to send another packet
1428 // tx sm to put more data into the fifo
1429 txFifoTick = true && !drainEvent;
1430 if (txDescCache.descLeft() != 0 && !drainEvent)
1431 txTick = true;
1432
1433 restartClock();
1434 txWire();
1435 DPRINTF(EthernetSM, "TxFIFO: Transmission complete\n");
1436 }
1437
1438 void
1439 IGbE::serialize(std::ostream &os)
1440 {
1441 PciDev::serialize(os);
1442
1443 regs.serialize(os);
1444 SERIALIZE_SCALAR(eeOpBits);
1445 SERIALIZE_SCALAR(eeAddrBits);
1446 SERIALIZE_SCALAR(eeDataBits);
1447 SERIALIZE_SCALAR(eeOpcode);
1448 SERIALIZE_SCALAR(eeAddr);
1449 SERIALIZE_ARRAY(flash,iGbReg::EEPROM_SIZE);
1450
1451 rxFifo.serialize("rxfifo", os);
1452 txFifo.serialize("txfifo", os);
1453
1454 bool txPktExists = txPacket;
1455 SERIALIZE_SCALAR(txPktExists);
1456 if (txPktExists)
1457 txPacket->serialize("txpacket", os);
1458
1459 Tick rdtr_time = 0, radv_time = 0, tidv_time = 0, tadv_time = 0,
1460 inter_time = 0;
1461
1462 if (rdtrEvent.scheduled())
1463 rdtr_time = rdtrEvent.when();
1464 SERIALIZE_SCALAR(rdtr_time);
1465
1466 if (radvEvent.scheduled())
1467 radv_time = radvEvent.when();
1468 SERIALIZE_SCALAR(radv_time);
1469
1470 if (tidvEvent.scheduled())
1471 tidv_time = tidvEvent.when();
1472 SERIALIZE_SCALAR(tidv_time);
1473
1474 if (tadvEvent.scheduled())
1475 tadv_time = tadvEvent.when();
1476 SERIALIZE_SCALAR(tadv_time);
1477
1478 if (interEvent.scheduled())
1479 inter_time = interEvent.when();
1480 SERIALIZE_SCALAR(inter_time);
1481
1482 nameOut(os, csprintf("%s.TxDescCache", name()));
1483 txDescCache.serialize(os);
1484
1485 nameOut(os, csprintf("%s.RxDescCache", name()));
1486 rxDescCache.serialize(os);
1487 }
1488
1489 void
1490 IGbE::unserialize(Checkpoint *cp, const std::string &section)
1491 {
1492 PciDev::unserialize(cp, section);
1493
1494 regs.unserialize(cp, section);
1495 UNSERIALIZE_SCALAR(eeOpBits);
1496 UNSERIALIZE_SCALAR(eeAddrBits);
1497 UNSERIALIZE_SCALAR(eeDataBits);
1498 UNSERIALIZE_SCALAR(eeOpcode);
1499 UNSERIALIZE_SCALAR(eeAddr);
1500 UNSERIALIZE_ARRAY(flash,iGbReg::EEPROM_SIZE);
1501
1502 rxFifo.unserialize("rxfifo", cp, section);
1503 txFifo.unserialize("txfifo", cp, section);
1504
1505 bool txPktExists;
1506 UNSERIALIZE_SCALAR(txPktExists);
1507 if (txPktExists) {
1508 txPacket = new EthPacketData(16384);
1509 txPacket->unserialize("txpacket", cp, section);
1510 }
1511
1512 rxTick = true;
1513 txTick = true;
1514 txFifoTick = true;
1515
1516 Tick rdtr_time, radv_time, tidv_time, tadv_time, inter_time;
1517 UNSERIALIZE_SCALAR(rdtr_time);
1518 UNSERIALIZE_SCALAR(radv_time);
1519 UNSERIALIZE_SCALAR(tidv_time);
1520 UNSERIALIZE_SCALAR(tadv_time);
1521 UNSERIALIZE_SCALAR(inter_time);
1522
1523 if (rdtr_time)
1524 rdtrEvent.schedule(rdtr_time);
1525
1526 if (radv_time)
1527 radvEvent.schedule(radv_time);
1528
1529 if (tidv_time)
1530 tidvEvent.schedule(tidv_time);
1531
1532 if (tadv_time)
1533 tadvEvent.schedule(tadv_time);
1534
1535 if (inter_time)
1536 interEvent.schedule(inter_time);
1537
1538 txDescCache.unserialize(cp, csprintf("%s.TxDescCache", section));
1539
1540 rxDescCache.unserialize(cp, csprintf("%s.RxDescCache", section));
1541 }
1542
1543 IGbE *
1544 IGbEParams::create()
1545 {
1546 return new IGbE(this);
1547 }