X86: Set the local APIC ID to something meaningful.
[gem5.git] / src / dev / i8254xGBe.cc
1 /*
2 * Copyright (c) 2006 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Ali Saidi
29 */
30
31 /* @file
32 * Device model for Intel's 8254x line of gigabit ethernet controllers.
33 * In particular an 82547 revision 2 (82547GI) MAC because it seems to have the
34 * fewest workarounds in the driver. It will probably work with most of the
35 * other MACs with slight modifications.
36 */
37
38
39 /*
40 * @todo really there are multiple dma engines.. we should implement them.
41 */
42
43 #include <algorithm>
44
45 #include "base/inet.hh"
46 #include "base/trace.hh"
47 #include "dev/i8254xGBe.hh"
48 #include "mem/packet.hh"
49 #include "mem/packet_access.hh"
50 #include "params/IGbE.hh"
51 #include "sim/stats.hh"
52 #include "sim/system.hh"
53
54 using namespace iGbReg;
55 using namespace Net;
56
57 IGbE::IGbE(const Params *p)
58 : EtherDevice(p), etherInt(NULL), drainEvent(NULL), useFlowControl(p->use_flow_control),
59 rxFifo(p->rx_fifo_size), txFifo(p->tx_fifo_size), rxTick(false),
60 txTick(false), txFifoTick(false), rxDmaPacket(false), pktOffset(0),
61 fetchDelay(p->fetch_delay), wbDelay(p->wb_delay),
62 fetchCompDelay(p->fetch_comp_delay), wbCompDelay(p->wb_comp_delay),
63 rxWriteDelay(p->rx_write_delay), txReadDelay(p->tx_read_delay),
64 rdtrEvent(this), radvEvent(this),
65 tadvEvent(this), tidvEvent(this), tickEvent(this), interEvent(this),
66 rxDescCache(this, name()+".RxDesc", p->rx_desc_cache_size),
67 txDescCache(this, name()+".TxDesc", p->tx_desc_cache_size),
68 clock(p->clock), lastInterrupt(0)
69 {
70 etherInt = new IGbEInt(name() + ".int", this);
71
72 // Initialized internal registers per Intel documentation
73 // All registers intialized to 0 by per register constructor
74 regs.ctrl.fd(1);
75 regs.ctrl.lrst(1);
76 regs.ctrl.speed(2);
77 regs.ctrl.frcspd(1);
78 regs.sts.speed(3); // Say we're 1000Mbps
79 regs.sts.fd(1); // full duplex
80 regs.sts.lu(1); // link up
81 regs.eecd.fwe(1);
82 regs.eecd.ee_type(1);
83 regs.imr = 0;
84 regs.iam = 0;
85 regs.rxdctl.gran(1);
86 regs.rxdctl.wthresh(1);
87 regs.fcrth(1);
88 regs.tdwba = 0;
89 regs.rlpml = 0;
90 regs.sw_fw_sync = 0;
91
92 regs.pba.rxa(0x30);
93 regs.pba.txa(0x10);
94
95 eeOpBits = 0;
96 eeAddrBits = 0;
97 eeDataBits = 0;
98 eeOpcode = 0;
99
100 // clear all 64 16 bit words of the eeprom
101 memset(&flash, 0, EEPROM_SIZE*2);
102
103 // Set the MAC address
104 memcpy(flash, p->hardware_address.bytes(), ETH_ADDR_LEN);
105 for (int x = 0; x < ETH_ADDR_LEN/2; x++)
106 flash[x] = htobe(flash[x]);
107
108 uint16_t csum = 0;
109 for (int x = 0; x < EEPROM_SIZE; x++)
110 csum += htobe(flash[x]);
111
112
113 // Magic happy checksum value
114 flash[EEPROM_SIZE-1] = htobe((uint16_t)(EEPROM_CSUM - csum));
115
116 // Store the MAC address as queue ID
117 macAddr = p->hardware_address;
118
119 rxFifo.clear();
120 txFifo.clear();
121 }
122
123 void
124 IGbE::init()
125 {
126 cpa = CPA::cpa();
127 PciDev::init();
128 }
129
130 EtherInt*
131 IGbE::getEthPort(const std::string &if_name, int idx)
132 {
133
134 if (if_name == "interface") {
135 if (etherInt->getPeer())
136 panic("Port already connected to\n");
137 return etherInt;
138 }
139 return NULL;
140 }
141
142 Tick
143 IGbE::writeConfig(PacketPtr pkt)
144 {
145 int offset = pkt->getAddr() & PCI_CONFIG_SIZE;
146 if (offset < PCI_DEVICE_SPECIFIC)
147 PciDev::writeConfig(pkt);
148 else
149 panic("Device specific PCI config space not implemented.\n");
150
151 ///
152 /// Some work may need to be done here based for the pci COMMAND bits.
153 ///
154
155 return pioDelay;
156 }
157
158 Tick
159 IGbE::read(PacketPtr pkt)
160 {
161 int bar;
162 Addr daddr;
163
164 if (!getBAR(pkt->getAddr(), bar, daddr))
165 panic("Invalid PCI memory access to unmapped memory.\n");
166
167 // Only Memory register BAR is allowed
168 assert(bar == 0);
169
170 // Only 32bit accesses allowed
171 assert(pkt->getSize() == 4);
172
173 DPRINTF(Ethernet, "Read device register %#X\n", daddr);
174
175 pkt->allocate();
176
177 ///
178 /// Handle read of register here
179 ///
180
181
182 switch (daddr) {
183 case REG_CTRL:
184 pkt->set<uint32_t>(regs.ctrl());
185 break;
186 case REG_STATUS:
187 pkt->set<uint32_t>(regs.sts());
188 break;
189 case REG_EECD:
190 pkt->set<uint32_t>(regs.eecd());
191 break;
192 case REG_EERD:
193 pkt->set<uint32_t>(regs.eerd());
194 break;
195 case REG_CTRL_EXT:
196 pkt->set<uint32_t>(regs.ctrl_ext());
197 break;
198 case REG_MDIC:
199 pkt->set<uint32_t>(regs.mdic());
200 break;
201 case REG_ICR:
202 DPRINTF(Ethernet, "Reading ICR. ICR=%#x IMR=%#x IAM=%#x IAME=%d\n", regs.icr(),
203 regs.imr, regs.iam, regs.ctrl_ext.iame());
204 pkt->set<uint32_t>(regs.icr());
205 if (regs.icr.int_assert() || regs.imr == 0) {
206 regs.icr = regs.icr() & ~mask(30);
207 DPRINTF(Ethernet, "Cleared ICR. ICR=%#x\n", regs.icr());
208 }
209 if (regs.ctrl_ext.iame() && regs.icr.int_assert())
210 regs.imr &= ~regs.iam;
211 chkInterrupt();
212 break;
213 case REG_EICR:
214 // This is only useful for MSI, but the driver reads it every time
215 // Just don't do anything
216 pkt->set<uint32_t>(0);
217 break;
218 case REG_ITR:
219 pkt->set<uint32_t>(regs.itr());
220 break;
221 case REG_RCTL:
222 pkt->set<uint32_t>(regs.rctl());
223 break;
224 case REG_FCTTV:
225 pkt->set<uint32_t>(regs.fcttv());
226 break;
227 case REG_TCTL:
228 pkt->set<uint32_t>(regs.tctl());
229 break;
230 case REG_PBA:
231 pkt->set<uint32_t>(regs.pba());
232 break;
233 case REG_WUC:
234 case REG_LEDCTL:
235 pkt->set<uint32_t>(0); // We don't care, so just return 0
236 break;
237 case REG_FCRTL:
238 pkt->set<uint32_t>(regs.fcrtl());
239 break;
240 case REG_FCRTH:
241 pkt->set<uint32_t>(regs.fcrth());
242 break;
243 case REG_RDBAL:
244 pkt->set<uint32_t>(regs.rdba.rdbal());
245 break;
246 case REG_RDBAH:
247 pkt->set<uint32_t>(regs.rdba.rdbah());
248 break;
249 case REG_RDLEN:
250 pkt->set<uint32_t>(regs.rdlen());
251 break;
252 case REG_SRRCTL:
253 pkt->set<uint32_t>(regs.srrctl());
254 break;
255 case REG_RDH:
256 pkt->set<uint32_t>(regs.rdh());
257 break;
258 case REG_RDT:
259 pkt->set<uint32_t>(regs.rdt());
260 break;
261 case REG_RDTR:
262 pkt->set<uint32_t>(regs.rdtr());
263 if (regs.rdtr.fpd()) {
264 rxDescCache.writeback(0);
265 DPRINTF(EthernetIntr, "Posting interrupt because of RDTR.FPD write\n");
266 postInterrupt(IT_RXT);
267 regs.rdtr.fpd(0);
268 }
269 break;
270 case REG_RXDCTL:
271 pkt->set<uint32_t>(regs.rxdctl());
272 break;
273 case REG_RADV:
274 pkt->set<uint32_t>(regs.radv());
275 break;
276 case REG_TDBAL:
277 pkt->set<uint32_t>(regs.tdba.tdbal());
278 break;
279 case REG_TDBAH:
280 pkt->set<uint32_t>(regs.tdba.tdbah());
281 break;
282 case REG_TDLEN:
283 pkt->set<uint32_t>(regs.tdlen());
284 break;
285 case REG_TDH:
286 pkt->set<uint32_t>(regs.tdh());
287 break;
288 case REG_TXDCA_CTL:
289 pkt->set<uint32_t>(regs.txdca_ctl());
290 break;
291 case REG_TDT:
292 pkt->set<uint32_t>(regs.tdt());
293 break;
294 case REG_TIDV:
295 pkt->set<uint32_t>(regs.tidv());
296 break;
297 case REG_TXDCTL:
298 pkt->set<uint32_t>(regs.txdctl());
299 break;
300 case REG_TADV:
301 pkt->set<uint32_t>(regs.tadv());
302 break;
303 case REG_TDWBAL:
304 pkt->set<uint32_t>(regs.tdwba & mask(32));
305 break;
306 case REG_TDWBAH:
307 pkt->set<uint32_t>(regs.tdwba >> 32);
308 break;
309 case REG_RXCSUM:
310 pkt->set<uint32_t>(regs.rxcsum());
311 break;
312 case REG_RLPML:
313 pkt->set<uint32_t>(regs.rlpml);
314 break;
315 case REG_RFCTL:
316 pkt->set<uint32_t>(regs.rfctl());
317 break;
318 case REG_MANC:
319 pkt->set<uint32_t>(regs.manc());
320 break;
321 case REG_SWSM:
322 pkt->set<uint32_t>(regs.swsm());
323 regs.swsm.smbi(1);
324 break;
325 case REG_FWSM:
326 pkt->set<uint32_t>(regs.fwsm());
327 break;
328 case REG_SWFWSYNC:
329 pkt->set<uint32_t>(regs.sw_fw_sync);
330 break;
331 default:
332 if (!(daddr >= REG_VFTA && daddr < (REG_VFTA + VLAN_FILTER_TABLE_SIZE*4)) &&
333 !(daddr >= REG_RAL && daddr < (REG_RAL + RCV_ADDRESS_TABLE_SIZE*8)) &&
334 !(daddr >= REG_MTA && daddr < (REG_MTA + MULTICAST_TABLE_SIZE*4)) &&
335 !(daddr >= REG_CRCERRS && daddr < (REG_CRCERRS + STATS_REGS_SIZE)))
336 panic("Read request to unknown register number: %#x\n", daddr);
337 else
338 pkt->set<uint32_t>(0);
339 };
340
341 pkt->makeAtomicResponse();
342 return pioDelay;
343 }
344
345 Tick
346 IGbE::write(PacketPtr pkt)
347 {
348 int bar;
349 Addr daddr;
350
351
352 if (!getBAR(pkt->getAddr(), bar, daddr))
353 panic("Invalid PCI memory access to unmapped memory.\n");
354
355 // Only Memory register BAR is allowed
356 assert(bar == 0);
357
358 // Only 32bit accesses allowed
359 assert(pkt->getSize() == sizeof(uint32_t));
360
361 DPRINTF(Ethernet, "Wrote device register %#X value %#X\n", daddr, pkt->get<uint32_t>());
362
363 ///
364 /// Handle write of register here
365 ///
366 uint32_t val = pkt->get<uint32_t>();
367
368 Regs::RCTL oldrctl;
369 Regs::TCTL oldtctl;
370
371 switch (daddr) {
372 case REG_CTRL:
373 regs.ctrl = val;
374 if (regs.ctrl.tfce())
375 warn("TX Flow control enabled, should implement\n");
376 if (regs.ctrl.rfce())
377 warn("RX Flow control enabled, should implement\n");
378 break;
379 case REG_CTRL_EXT:
380 regs.ctrl_ext = val;
381 break;
382 case REG_STATUS:
383 regs.sts = val;
384 break;
385 case REG_EECD:
386 int oldClk;
387 oldClk = regs.eecd.sk();
388 regs.eecd = val;
389 // See if this is a eeprom access and emulate accordingly
390 if (!oldClk && regs.eecd.sk()) {
391 if (eeOpBits < 8) {
392 eeOpcode = eeOpcode << 1 | regs.eecd.din();
393 eeOpBits++;
394 } else if (eeAddrBits < 8 && eeOpcode == EEPROM_READ_OPCODE_SPI) {
395 eeAddr = eeAddr << 1 | regs.eecd.din();
396 eeAddrBits++;
397 } else if (eeDataBits < 16 && eeOpcode == EEPROM_READ_OPCODE_SPI) {
398 assert(eeAddr>>1 < EEPROM_SIZE);
399 DPRINTF(EthernetEEPROM, "EEPROM bit read: %d word: %#X\n",
400 flash[eeAddr>>1] >> eeDataBits & 0x1, flash[eeAddr>>1]);
401 regs.eecd.dout((flash[eeAddr>>1] >> (15-eeDataBits)) & 0x1);
402 eeDataBits++;
403 } else if (eeDataBits < 8 && eeOpcode == EEPROM_RDSR_OPCODE_SPI) {
404 regs.eecd.dout(0);
405 eeDataBits++;
406 } else
407 panic("What's going on with eeprom interface? opcode:"
408 " %#x:%d addr: %#x:%d, data: %d\n", (uint32_t)eeOpcode,
409 (uint32_t)eeOpBits, (uint32_t)eeAddr,
410 (uint32_t)eeAddrBits, (uint32_t)eeDataBits);
411
412 // Reset everything for the next command
413 if ((eeDataBits == 16 && eeOpcode == EEPROM_READ_OPCODE_SPI) ||
414 (eeDataBits == 8 && eeOpcode == EEPROM_RDSR_OPCODE_SPI)) {
415 eeOpBits = 0;
416 eeAddrBits = 0;
417 eeDataBits = 0;
418 eeOpcode = 0;
419 eeAddr = 0;
420 }
421
422 DPRINTF(EthernetEEPROM, "EEPROM: opcode: %#X:%d addr: %#X:%d\n",
423 (uint32_t)eeOpcode, (uint32_t) eeOpBits,
424 (uint32_t)eeAddr>>1, (uint32_t)eeAddrBits);
425 if (eeOpBits == 8 && !(eeOpcode == EEPROM_READ_OPCODE_SPI ||
426 eeOpcode == EEPROM_RDSR_OPCODE_SPI ))
427 panic("Unknown eeprom opcode: %#X:%d\n", (uint32_t)eeOpcode,
428 (uint32_t)eeOpBits);
429
430
431 }
432 // If driver requests eeprom access, immediately give it to it
433 regs.eecd.ee_gnt(regs.eecd.ee_req());
434 break;
435 case REG_EERD:
436 regs.eerd = val;
437 if (regs.eerd.start()) {
438 regs.eerd.done(1);
439 assert(regs.eerd.addr() < EEPROM_SIZE);
440 regs.eerd.data(flash[regs.eerd.addr()]);
441 regs.eerd.start(0);
442 DPRINTF(EthernetEEPROM, "EEPROM: read addr: %#X data %#x\n",
443 regs.eerd.addr(), regs.eerd.data());
444 }
445 break;
446 case REG_MDIC:
447 regs.mdic = val;
448 if (regs.mdic.i())
449 panic("No support for interrupt on mdic complete\n");
450 if (regs.mdic.phyadd() != 1)
451 panic("No support for reading anything but phy\n");
452 DPRINTF(Ethernet, "%s phy address %x\n", regs.mdic.op() == 1 ? "Writing"
453 : "Reading", regs.mdic.regadd());
454 switch (regs.mdic.regadd()) {
455 case PHY_PSTATUS:
456 regs.mdic.data(0x796D); // link up
457 break;
458 case PHY_PID:
459 regs.mdic.data(params()->phy_pid);
460 break;
461 case PHY_EPID:
462 regs.mdic.data(params()->phy_epid);
463 break;
464 case PHY_GSTATUS:
465 regs.mdic.data(0x7C00);
466 break;
467 case PHY_EPSTATUS:
468 regs.mdic.data(0x3000);
469 break;
470 case PHY_AGC:
471 regs.mdic.data(0x180); // some random length
472 break;
473 default:
474 regs.mdic.data(0);
475 }
476 regs.mdic.r(1);
477 break;
478 case REG_ICR:
479 DPRINTF(Ethernet, "Writing ICR. ICR=%#x IMR=%#x IAM=%#x IAME=%d\n", regs.icr(),
480 regs.imr, regs.iam, regs.ctrl_ext.iame());
481 if (regs.ctrl_ext.iame())
482 regs.imr &= ~regs.iam;
483 regs.icr = ~bits(val,30,0) & regs.icr();
484 chkInterrupt();
485 break;
486 case REG_ITR:
487 regs.itr = val;
488 break;
489 case REG_ICS:
490 DPRINTF(EthernetIntr, "Posting interrupt because of ICS write\n");
491 postInterrupt((IntTypes)val);
492 break;
493 case REG_IMS:
494 regs.imr |= val;
495 chkInterrupt();
496 break;
497 case REG_IMC:
498 regs.imr &= ~val;
499 chkInterrupt();
500 break;
501 case REG_IAM:
502 regs.iam = val;
503 break;
504 case REG_RCTL:
505 oldrctl = regs.rctl;
506 regs.rctl = val;
507 if (regs.rctl.rst()) {
508 rxDescCache.reset();
509 DPRINTF(EthernetSM, "RXS: Got RESET!\n");
510 rxFifo.clear();
511 regs.rctl.rst(0);
512 }
513 if (regs.rctl.en())
514 rxTick = true;
515 restartClock();
516 break;
517 case REG_FCTTV:
518 regs.fcttv = val;
519 break;
520 case REG_TCTL:
521 regs.tctl = val;
522 oldtctl = regs.tctl;
523 regs.tctl = val;
524 if (regs.tctl.en())
525 txTick = true;
526 restartClock();
527 if (regs.tctl.en() && !oldtctl.en()) {
528 txDescCache.reset();
529 }
530 break;
531 case REG_PBA:
532 regs.pba.rxa(val);
533 regs.pba.txa(64 - regs.pba.rxa());
534 break;
535 case REG_WUC:
536 case REG_LEDCTL:
537 case REG_FCAL:
538 case REG_FCAH:
539 case REG_FCT:
540 case REG_VET:
541 case REG_AIFS:
542 case REG_TIPG:
543 ; // We don't care, so don't store anything
544 break;
545 case REG_IVAR0:
546 warn("Writing to IVAR0, ignoring...\n");
547 break;
548 case REG_FCRTL:
549 regs.fcrtl = val;
550 break;
551 case REG_FCRTH:
552 regs.fcrth = val;
553 break;
554 case REG_RDBAL:
555 regs.rdba.rdbal( val & ~mask(4));
556 rxDescCache.areaChanged();
557 break;
558 case REG_RDBAH:
559 regs.rdba.rdbah(val);
560 rxDescCache.areaChanged();
561 break;
562 case REG_RDLEN:
563 regs.rdlen = val & ~mask(7);
564 rxDescCache.areaChanged();
565 break;
566 case REG_SRRCTL:
567 regs.srrctl = val;
568 break;
569 case REG_RDH:
570 regs.rdh = val;
571 rxDescCache.areaChanged();
572 break;
573 case REG_RDT:
574 regs.rdt = val;
575 DPRINTF(EthernetSM, "RXS: RDT Updated.\n");
576 if (getState() == SimObject::Running) {
577 DPRINTF(EthernetSM, "RXS: RDT Fetching Descriptors!\n");
578 rxDescCache.fetchDescriptors();
579 } else {
580 DPRINTF(EthernetSM, "RXS: RDT NOT Fetching Desc b/c draining!\n");
581 }
582 break;
583 case REG_RDTR:
584 regs.rdtr = val;
585 break;
586 case REG_RADV:
587 regs.radv = val;
588 break;
589 case REG_RXDCTL:
590 regs.rxdctl = val;
591 break;
592 case REG_TDBAL:
593 regs.tdba.tdbal( val & ~mask(4));
594 txDescCache.areaChanged();
595 break;
596 case REG_TDBAH:
597 regs.tdba.tdbah(val);
598 txDescCache.areaChanged();
599 break;
600 case REG_TDLEN:
601 regs.tdlen = val & ~mask(7);
602 txDescCache.areaChanged();
603 break;
604 case REG_TDH:
605 regs.tdh = val;
606 txDescCache.areaChanged();
607 break;
608 case REG_TXDCA_CTL:
609 regs.txdca_ctl = val;
610 if (regs.txdca_ctl.enabled())
611 panic("No support for DCA\n");
612 break;
613 case REG_TDT:
614 regs.tdt = val;
615 DPRINTF(EthernetSM, "TXS: TX Tail pointer updated\n");
616 if (getState() == SimObject::Running) {
617 DPRINTF(EthernetSM, "TXS: TDT Fetching Descriptors!\n");
618 txDescCache.fetchDescriptors();
619 } else {
620 DPRINTF(EthernetSM, "TXS: TDT NOT Fetching Desc b/c draining!\n");
621 }
622 break;
623 case REG_TIDV:
624 regs.tidv = val;
625 break;
626 case REG_TXDCTL:
627 regs.txdctl = val;
628 break;
629 case REG_TADV:
630 regs.tadv = val;
631 break;
632 case REG_TDWBAL:
633 regs.tdwba &= ~mask(32);
634 regs.tdwba |= val;
635 txDescCache.completionWriteback(regs.tdwba & ~mask(1), regs.tdwba & mask(1));
636 break;
637 case REG_TDWBAH:
638 regs.tdwba &= mask(32);
639 regs.tdwba |= (uint64_t)val << 32;
640 txDescCache.completionWriteback(regs.tdwba & ~mask(1), regs.tdwba & mask(1));
641 break;
642 case REG_RXCSUM:
643 regs.rxcsum = val;
644 break;
645 case REG_RLPML:
646 regs.rlpml = val;
647 break;
648 case REG_RFCTL:
649 regs.rfctl = val;
650 if (regs.rfctl.exsten())
651 panic("Extended RX descriptors not implemented\n");
652 break;
653 case REG_MANC:
654 regs.manc = val;
655 break;
656 case REG_SWSM:
657 regs.swsm = val;
658 if (regs.fwsm.eep_fw_semaphore())
659 regs.swsm.swesmbi(0);
660 break;
661 case REG_SWFWSYNC:
662 regs.sw_fw_sync = val;
663 break;
664 default:
665 if (!(daddr >= REG_VFTA && daddr < (REG_VFTA + VLAN_FILTER_TABLE_SIZE*4)) &&
666 !(daddr >= REG_RAL && daddr < (REG_RAL + RCV_ADDRESS_TABLE_SIZE*8)) &&
667 !(daddr >= REG_MTA && daddr < (REG_MTA + MULTICAST_TABLE_SIZE*4)))
668 panic("Write request to unknown register number: %#x\n", daddr);
669 };
670
671 pkt->makeAtomicResponse();
672 return pioDelay;
673 }
674
675 void
676 IGbE::postInterrupt(IntTypes t, bool now)
677 {
678 assert(t);
679
680 // Interrupt is already pending
681 if (t & regs.icr() && !now)
682 return;
683
684 regs.icr = regs.icr() | t;
685
686 Tick itr_interval = Clock::Int::ns * 256 * regs.itr.interval();
687 DPRINTF(EthernetIntr, "EINT: postInterrupt() curTick: %d itr: %d interval: %d\n",
688 curTick, regs.itr.interval(), itr_interval);
689
690 if (regs.itr.interval() == 0 || now || lastInterrupt + itr_interval <= curTick) {
691 if (interEvent.scheduled()) {
692 deschedule(interEvent);
693 }
694 cpuPostInt();
695 } else {
696 Tick int_time = lastInterrupt + itr_interval;
697 assert(int_time > 0);
698 DPRINTF(EthernetIntr, "EINT: Scheduling timer interrupt for tick %d\n",
699 int_time);
700 if (!interEvent.scheduled()) {
701 schedule(interEvent, int_time);
702 }
703 }
704 }
705
706 void
707 IGbE::delayIntEvent()
708 {
709 cpuPostInt();
710 }
711
712
713 void
714 IGbE::cpuPostInt()
715 {
716
717 postedInterrupts++;
718
719 if (!(regs.icr() & regs.imr)) {
720 DPRINTF(Ethernet, "Interrupt Masked. Not Posting\n");
721 return;
722 }
723
724 DPRINTF(Ethernet, "Posting Interrupt\n");
725
726
727 if (interEvent.scheduled()) {
728 deschedule(interEvent);
729 }
730
731 if (rdtrEvent.scheduled()) {
732 regs.icr.rxt0(1);
733 deschedule(rdtrEvent);
734 }
735 if (radvEvent.scheduled()) {
736 regs.icr.rxt0(1);
737 deschedule(radvEvent);
738 }
739 if (tadvEvent.scheduled()) {
740 regs.icr.txdw(1);
741 deschedule(tadvEvent);
742 }
743 if (tidvEvent.scheduled()) {
744 regs.icr.txdw(1);
745 deschedule(tidvEvent);
746 }
747
748 regs.icr.int_assert(1);
749 DPRINTF(EthernetIntr, "EINT: Posting interrupt to CPU now. Vector %#x\n",
750 regs.icr());
751
752 intrPost();
753
754 lastInterrupt = curTick;
755 }
756
757 void
758 IGbE::cpuClearInt()
759 {
760 if (regs.icr.int_assert()) {
761 regs.icr.int_assert(0);
762 DPRINTF(EthernetIntr, "EINT: Clearing interrupt to CPU now. Vector %#x\n",
763 regs.icr());
764 intrClear();
765 }
766 }
767
768 void
769 IGbE::chkInterrupt()
770 {
771 DPRINTF(Ethernet, "Checking interrupts icr: %#x imr: %#x\n", regs.icr(),
772 regs.imr);
773 // Check if we need to clear the cpu interrupt
774 if (!(regs.icr() & regs.imr)) {
775 DPRINTF(Ethernet, "Mask cleaned all interrupts\n");
776 if (interEvent.scheduled())
777 deschedule(interEvent);
778 if (regs.icr.int_assert())
779 cpuClearInt();
780 }
781 DPRINTF(Ethernet, "ITR = %#X itr.interval = %#X\n", regs.itr(), regs.itr.interval());
782
783 if (regs.icr() & regs.imr) {
784 if (regs.itr.interval() == 0) {
785 cpuPostInt();
786 } else {
787 DPRINTF(Ethernet, "Possibly scheduling interrupt because of imr write\n");
788 if (!interEvent.scheduled()) {
789 DPRINTF(Ethernet, "Scheduling for %d\n", curTick + Clock::Int::ns
790 * 256 * regs.itr.interval());
791 schedule(interEvent,
792 curTick + Clock::Int::ns * 256 * regs.itr.interval());
793 }
794 }
795 }
796
797
798 }
799
800
801 IGbE::RxDescCache::RxDescCache(IGbE *i, const std::string n, int s)
802 : DescCache<RxDesc>(i, n, s), pktDone(false), splitCount(0),
803 pktEvent(this), pktHdrEvent(this), pktDataEvent(this)
804
805 {
806 annSmFetch = "RX Desc Fetch";
807 annSmWb = "RX Desc Writeback";
808 annUnusedDescQ = "RX Unused Descriptors";
809 annUnusedCacheQ = "RX Unused Descriptor Cache";
810 annUsedCacheQ = "RX Used Descriptor Cache";
811 annUsedDescQ = "RX Used Descriptors";
812 annDescQ = "RX Descriptors";
813 }
814
815 void
816 IGbE::RxDescCache::pktSplitDone()
817 {
818 splitCount++;
819 DPRINTF(EthernetDesc, "Part of split packet done: splitcount now %d\n", splitCount);
820 assert(splitCount <= 2);
821 if (splitCount != 2)
822 return;
823 splitCount = 0;
824 DPRINTF(EthernetDesc, "Part of split packet done: calling pktComplete()\n");
825 pktComplete();
826 }
827
828 int
829 IGbE::RxDescCache::writePacket(EthPacketPtr packet, int pkt_offset)
830 {
831 assert(unusedCache.size());
832 //if (!unusedCache.size())
833 // return false;
834
835 pktPtr = packet;
836 pktDone = false;
837 int buf_len, hdr_len;
838
839 RxDesc *desc = unusedCache.front();
840 switch (igbe->regs.srrctl.desctype()) {
841 case RXDT_LEGACY:
842 assert(pkt_offset == 0);
843 bytesCopied = packet->length;
844 DPRINTF(EthernetDesc, "Packet Length: %d Desc Size: %d\n",
845 packet->length, igbe->regs.rctl.descSize());
846 assert(packet->length < igbe->regs.rctl.descSize());
847 igbe->dmaWrite(igbe->platform->pciToDma(desc->legacy.buf), packet->length, &pktEvent,
848 packet->data, igbe->rxWriteDelay);
849 break;
850 case RXDT_ADV_ONEBUF:
851 assert(pkt_offset == 0);
852 bytesCopied = packet->length;
853 buf_len = igbe->regs.rctl.lpe() ? igbe->regs.srrctl.bufLen() :
854 igbe->regs.rctl.descSize();
855 DPRINTF(EthernetDesc, "Packet Length: %d srrctl: %#x Desc Size: %d\n",
856 packet->length, igbe->regs.srrctl(), buf_len);
857 assert(packet->length < buf_len);
858 igbe->dmaWrite(igbe->platform->pciToDma(desc->adv_read.pkt), packet->length, &pktEvent,
859 packet->data, igbe->rxWriteDelay);
860 desc->adv_wb.header_len = htole(0);
861 desc->adv_wb.sph = htole(0);
862 desc->adv_wb.pkt_len = htole((uint16_t)(pktPtr->length));
863 break;
864 case RXDT_ADV_SPLIT_A:
865 int split_point;
866
867 buf_len = igbe->regs.rctl.lpe() ? igbe->regs.srrctl.bufLen() :
868 igbe->regs.rctl.descSize();
869 hdr_len = igbe->regs.rctl.lpe() ? igbe->regs.srrctl.hdrLen() : 0;
870 DPRINTF(EthernetDesc, "lpe: %d Packet Length: %d offset: %d srrctl: %#x hdr addr: %#x Hdr Size: %d desc addr: %#x Desc Size: %d\n",
871 igbe->regs.rctl.lpe(), packet->length, pkt_offset, igbe->regs.srrctl(), desc->adv_read.hdr, hdr_len, desc->adv_read.pkt, buf_len);
872
873 split_point = hsplit(pktPtr);
874
875 if (packet->length <= hdr_len) {
876 bytesCopied = packet->length;
877 assert(pkt_offset == 0);
878 DPRINTF(EthernetDesc, "Header Splitting: Entire packet being placed in header\n");
879 igbe->dmaWrite(igbe->platform->pciToDma(desc->adv_read.hdr), packet->length, &pktEvent,
880 packet->data, igbe->rxWriteDelay);
881 desc->adv_wb.header_len = htole((uint16_t)packet->length);
882 desc->adv_wb.sph = htole(0);
883 desc->adv_wb.pkt_len = htole(0);
884 } else if (split_point) {
885 if (pkt_offset) {
886 // we are only copying some data, header/data has already been
887 // copied
888 int max_to_copy = std::min(packet->length - pkt_offset, buf_len);
889 bytesCopied += max_to_copy;
890 DPRINTF(EthernetDesc, "Header Splitting: Continuing data buffer copy\n");
891 igbe->dmaWrite(igbe->platform->pciToDma(desc->adv_read.pkt),max_to_copy, &pktEvent,
892 packet->data + pkt_offset, igbe->rxWriteDelay);
893 desc->adv_wb.header_len = htole(0);
894 desc->adv_wb.pkt_len = htole((uint16_t)max_to_copy);
895 desc->adv_wb.sph = htole(0);
896 } else {
897 int max_to_copy = std::min(packet->length - split_point, buf_len);
898 bytesCopied += max_to_copy + split_point;
899
900 DPRINTF(EthernetDesc, "Header Splitting: splitting at %d\n",
901 split_point);
902 igbe->dmaWrite(igbe->platform->pciToDma(desc->adv_read.hdr), split_point, &pktHdrEvent,
903 packet->data, igbe->rxWriteDelay);
904 igbe->dmaWrite(igbe->platform->pciToDma(desc->adv_read.pkt),
905 max_to_copy, &pktDataEvent, packet->data + split_point, igbe->rxWriteDelay);
906 desc->adv_wb.header_len = htole(split_point);
907 desc->adv_wb.sph = 1;
908 desc->adv_wb.pkt_len = htole((uint16_t)(max_to_copy));
909 }
910 } else {
911 panic("Header split not fitting within header buffer or undecodable"
912 " packet not fitting in header unsupported\n");
913 }
914 break;
915 default:
916 panic("Unimplemnted RX receive buffer type: %d\n",
917 igbe->regs.srrctl.desctype());
918 }
919 return bytesCopied;
920
921 }
922
923 void
924 IGbE::RxDescCache::pktComplete()
925 {
926 assert(unusedCache.size());
927 RxDesc *desc;
928 desc = unusedCache.front();
929
930 igbe->anBegin("RXS", "Update Desc");
931
932 uint16_t crcfixup = igbe->regs.rctl.secrc() ? 0 : 4 ;
933 DPRINTF(EthernetDesc, "pktPtr->length: %d bytesCopied: %d stripcrc offset: %d value written: %d %d\n",
934 pktPtr->length, bytesCopied, crcfixup,
935 htole((uint16_t)(pktPtr->length + crcfixup)),
936 (uint16_t)(pktPtr->length + crcfixup));
937
938 // no support for anything but starting at 0
939 assert(igbe->regs.rxcsum.pcss() == 0);
940
941 DPRINTF(EthernetDesc, "Packet written to memory updating Descriptor\n");
942
943 uint16_t status = RXDS_DD;
944 uint8_t err = 0;
945 uint16_t ext_err = 0;
946 uint16_t csum = 0;
947 uint16_t ptype = 0;
948 uint16_t ip_id = 0;
949
950 assert(bytesCopied <= pktPtr->length);
951 if (bytesCopied == pktPtr->length)
952 status |= RXDS_EOP;
953
954 IpPtr ip(pktPtr);
955
956 if (ip) {
957 DPRINTF(EthernetDesc, "Proccesing Ip packet with Id=%d\n", ip->id());
958 ptype |= RXDP_IPV4;
959 ip_id = ip->id();
960
961 if (igbe->regs.rxcsum.ipofld()) {
962 DPRINTF(EthernetDesc, "Checking IP checksum\n");
963 status |= RXDS_IPCS;
964 csum = htole(cksum(ip));
965 igbe->rxIpChecksums++;
966 if (cksum(ip) != 0) {
967 err |= RXDE_IPE;
968 ext_err |= RXDEE_IPE;
969 DPRINTF(EthernetDesc, "Checksum is bad!!\n");
970 }
971 }
972 TcpPtr tcp(ip);
973 if (tcp && igbe->regs.rxcsum.tuofld()) {
974 DPRINTF(EthernetDesc, "Checking TCP checksum\n");
975 status |= RXDS_TCPCS;
976 ptype |= RXDP_TCP;
977 csum = htole(cksum(tcp));
978 igbe->rxTcpChecksums++;
979 if (cksum(tcp) != 0) {
980 DPRINTF(EthernetDesc, "Checksum is bad!!\n");
981 err |= RXDE_TCPE;
982 ext_err |= RXDEE_TCPE;
983 }
984 }
985
986 UdpPtr udp(ip);
987 if (udp && igbe->regs.rxcsum.tuofld()) {
988 DPRINTF(EthernetDesc, "Checking UDP checksum\n");
989 status |= RXDS_UDPCS;
990 ptype |= RXDP_UDP;
991 csum = htole(cksum(udp));
992 igbe->rxUdpChecksums++;
993 if (cksum(udp) != 0) {
994 DPRINTF(EthernetDesc, "Checksum is bad!!\n");
995 ext_err |= RXDEE_TCPE;
996 err |= RXDE_TCPE;
997 }
998 }
999 } else { // if ip
1000 DPRINTF(EthernetSM, "Proccesing Non-Ip packet\n");
1001 }
1002
1003 switch (igbe->regs.srrctl.desctype()) {
1004 case RXDT_LEGACY:
1005 desc->legacy.len = htole((uint16_t)(pktPtr->length + crcfixup));
1006 desc->legacy.status = htole(status);
1007 desc->legacy.errors = htole(err);
1008 // No vlan support at this point... just set it to 0
1009 desc->legacy.vlan = 0;
1010 break;
1011 case RXDT_ADV_SPLIT_A:
1012 case RXDT_ADV_ONEBUF:
1013 desc->adv_wb.rss_type = htole(0);
1014 desc->adv_wb.pkt_type = htole(ptype);
1015 if (igbe->regs.rxcsum.pcsd()) {
1016 // no rss support right now
1017 desc->adv_wb.rss_hash = htole(0);
1018 } else {
1019 desc->adv_wb.id = htole(ip_id);
1020 desc->adv_wb.csum = htole(csum);
1021 }
1022 desc->adv_wb.status = htole(status);
1023 desc->adv_wb.errors = htole(ext_err);
1024 // no vlan support
1025 desc->adv_wb.vlan_tag = htole(0);
1026 break;
1027 default:
1028 panic("Unimplemnted RX receive buffer type %d\n",
1029 igbe->regs.srrctl.desctype());
1030 }
1031
1032 DPRINTF(EthernetDesc, "Descriptor complete w0: %#x w1: %#x\n",
1033 desc->adv_read.pkt, desc->adv_read.hdr);
1034
1035 if (bytesCopied == pktPtr->length) {
1036 DPRINTF(EthernetDesc, "Packet completely written to descriptor buffers\n");
1037 // Deal with the rx timer interrupts
1038 if (igbe->regs.rdtr.delay()) {
1039 DPRINTF(EthernetSM, "RXS: Scheduling DTR for %d\n",
1040 igbe->regs.rdtr.delay() * igbe->intClock());
1041 igbe->reschedule(igbe->rdtrEvent,
1042 curTick + igbe->regs.rdtr.delay() * igbe->intClock(), true);
1043 }
1044
1045 if (igbe->regs.radv.idv()) {
1046 DPRINTF(EthernetSM, "RXS: Scheduling ADV for %d\n",
1047 igbe->regs.radv.idv() * igbe->intClock());
1048 if (!igbe->radvEvent.scheduled()) {
1049 igbe->schedule(igbe->radvEvent,
1050 curTick + igbe->regs.radv.idv() * igbe->intClock());
1051 }
1052 }
1053
1054 // if neither radv or rdtr, maybe itr is set...
1055 if (!igbe->regs.rdtr.delay() && !igbe->regs.radv.idv()) {
1056 DPRINTF(EthernetSM, "RXS: Receive interrupt delay disabled, posting IT_RXT\n");
1057 igbe->postInterrupt(IT_RXT);
1058 }
1059
1060 // If the packet is small enough, interrupt appropriately
1061 // I wonder if this is delayed or not?!
1062 if (pktPtr->length <= igbe->regs.rsrpd.idv()) {
1063 DPRINTF(EthernetSM, "RXS: Posting IT_SRPD beacuse small packet received\n");
1064 igbe->postInterrupt(IT_SRPD);
1065 }
1066 bytesCopied = 0;
1067 }
1068
1069 pktPtr = NULL;
1070 igbe->checkDrain();
1071 enableSm();
1072 pktDone = true;
1073
1074 igbe->anBegin("RXS", "Done Updating Desc");
1075 DPRINTF(EthernetDesc, "Processing of this descriptor complete\n");
1076 igbe->anDq("RXS", annUnusedCacheQ);
1077 unusedCache.pop_front();
1078 igbe->anQ("RXS", annUsedCacheQ);
1079 usedCache.push_back(desc);
1080 }
1081
1082 void
1083 IGbE::RxDescCache::enableSm()
1084 {
1085 if (!igbe->drainEvent) {
1086 igbe->rxTick = true;
1087 igbe->restartClock();
1088 }
1089 }
1090
1091 bool
1092 IGbE::RxDescCache::packetDone()
1093 {
1094 if (pktDone) {
1095 pktDone = false;
1096 return true;
1097 }
1098 return false;
1099 }
1100
1101 bool
1102 IGbE::RxDescCache::hasOutstandingEvents()
1103 {
1104 return pktEvent.scheduled() || wbEvent.scheduled() ||
1105 fetchEvent.scheduled() || pktHdrEvent.scheduled() ||
1106 pktDataEvent.scheduled();
1107
1108 }
1109
1110 void
1111 IGbE::RxDescCache::serialize(std::ostream &os)
1112 {
1113 DescCache<RxDesc>::serialize(os);
1114 SERIALIZE_SCALAR(pktDone);
1115 SERIALIZE_SCALAR(splitCount);
1116 SERIALIZE_SCALAR(bytesCopied);
1117 }
1118
1119 void
1120 IGbE::RxDescCache::unserialize(Checkpoint *cp, const std::string &section)
1121 {
1122 DescCache<RxDesc>::unserialize(cp, section);
1123 UNSERIALIZE_SCALAR(pktDone);
1124 UNSERIALIZE_SCALAR(splitCount);
1125 UNSERIALIZE_SCALAR(bytesCopied);
1126 }
1127
1128
1129 ///////////////////////////////////// IGbE::TxDesc /////////////////////////////////
1130
1131 IGbE::TxDescCache::TxDescCache(IGbE *i, const std::string n, int s)
1132 : DescCache<TxDesc>(i,n, s), pktDone(false), isTcp(false), pktWaiting(false),
1133 completionAddress(0), completionEnabled(false),
1134 useTso(false), pktEvent(this), headerEvent(this), nullEvent(this)
1135
1136 {
1137 annSmFetch = "TX Desc Fetch";
1138 annSmWb = "TX Desc Writeback";
1139 annUnusedDescQ = "TX Unused Descriptors";
1140 annUnusedCacheQ = "TX Unused Descriptor Cache";
1141 annUsedCacheQ = "TX Used Descriptor Cache";
1142 annUsedDescQ = "TX Used Descriptors";
1143 annDescQ = "TX Descriptors";
1144 }
1145
1146 void
1147 IGbE::TxDescCache::processContextDesc()
1148 {
1149 assert(unusedCache.size());
1150 TxDesc *desc;
1151
1152 DPRINTF(EthernetDesc, "Checking and processing context descriptors\n");
1153
1154 while (!useTso && unusedCache.size() && TxdOp::isContext(unusedCache.front())) {
1155 DPRINTF(EthernetDesc, "Got context descriptor type...\n");
1156
1157 desc = unusedCache.front();
1158 DPRINTF(EthernetDesc, "Descriptor upper: %#x lower: %#X\n",
1159 desc->d1, desc->d2);
1160
1161
1162 // is this going to be a tcp or udp packet?
1163 isTcp = TxdOp::tcp(desc) ? true : false;
1164
1165 // setup all the TSO variables, they'll be ignored if we don't use
1166 // tso for this connection
1167 tsoHeaderLen = TxdOp::hdrlen(desc);
1168 tsoMss = TxdOp::mss(desc);
1169
1170 if (TxdOp::isType(desc, TxdOp::TXD_CNXT) && TxdOp::tse(desc)) {
1171 DPRINTF(EthernetDesc, "TCP offload enabled for packet hdrlen: %d mss: %d paylen %d\n",
1172 TxdOp::hdrlen(desc), TxdOp::mss(desc), TxdOp::getLen(desc));
1173 useTso = true;
1174 tsoTotalLen = TxdOp::getLen(desc);
1175 tsoLoadedHeader = false;
1176 tsoDescBytesUsed = 0;
1177 tsoUsedLen = 0;
1178 tsoPrevSeq = 0;
1179 tsoPktHasHeader = false;
1180 tsoPkts = 0;
1181
1182 }
1183
1184 TxdOp::setDd(desc);
1185 unusedCache.pop_front();
1186 igbe->anDq("TXS", annUnusedCacheQ);
1187 usedCache.push_back(desc);
1188 igbe->anQ("TXS", annUsedCacheQ);
1189 }
1190
1191 if (!unusedCache.size())
1192 return;
1193
1194 desc = unusedCache.front();
1195 if (!useTso && TxdOp::isType(desc, TxdOp::TXD_ADVDATA) && TxdOp::tse(desc)) {
1196 DPRINTF(EthernetDesc, "TCP offload(adv) enabled for packet hdrlen: %d mss: %d paylen %d\n",
1197 tsoHeaderLen, tsoMss, TxdOp::getTsoLen(desc));
1198 useTso = true;
1199 tsoTotalLen = TxdOp::getTsoLen(desc);
1200 tsoLoadedHeader = false;
1201 tsoDescBytesUsed = 0;
1202 tsoUsedLen = 0;
1203 tsoPrevSeq = 0;
1204 tsoPktHasHeader = false;
1205 tsoPkts = 0;
1206 }
1207
1208 if (useTso && !tsoLoadedHeader) {
1209 // we need to fetch a header
1210 DPRINTF(EthernetDesc, "Starting DMA of TSO header\n");
1211 assert(TxdOp::isData(desc) && TxdOp::getLen(desc) >= tsoHeaderLen);
1212 pktWaiting = true;
1213 assert(tsoHeaderLen <= 256);
1214 igbe->dmaRead(igbe->platform->pciToDma(TxdOp::getBuf(desc)),
1215 tsoHeaderLen, &headerEvent, tsoHeader, 0);
1216 }
1217 }
1218
1219 void
1220 IGbE::TxDescCache::headerComplete()
1221 {
1222 DPRINTF(EthernetDesc, "TSO: Fetching TSO header complete\n");
1223 pktWaiting = false;
1224
1225 assert(unusedCache.size());
1226 TxDesc *desc = unusedCache.front();
1227 DPRINTF(EthernetDesc, "TSO: len: %d tsoHeaderLen: %d\n",
1228 TxdOp::getLen(desc), tsoHeaderLen);
1229
1230 if (TxdOp::getLen(desc) == tsoHeaderLen) {
1231 tsoDescBytesUsed = 0;
1232 tsoLoadedHeader = true;
1233 unusedCache.pop_front();
1234 usedCache.push_back(desc);
1235 } else {
1236 // I don't think this case happens, I think the headrer is always
1237 // it's own packet, if it wasn't it might be as simple as just
1238 // incrementing descBytesUsed by the header length, but I'm not
1239 // completely sure
1240 panic("TSO header part of bigger packet, not implemented\n");
1241 }
1242 enableSm();
1243 igbe->checkDrain();
1244 }
1245
1246 int
1247 IGbE::TxDescCache::getPacketSize(EthPacketPtr p)
1248 {
1249 TxDesc *desc;
1250
1251
1252 if (!unusedCache.size())
1253 return -1;
1254
1255 DPRINTF(EthernetDesc, "Starting processing of descriptor\n");
1256
1257 assert(!useTso || tsoLoadedHeader);
1258 desc = unusedCache.front();
1259
1260
1261 if (useTso) {
1262 DPRINTF(EthernetDesc, "getPacket(): TxDescriptor data d1: %#llx d2: %#llx\n", desc->d1, desc->d2);
1263 DPRINTF(EthernetDesc, "TSO: use: %d hdrlen: %d mss: %d total: %d used: %d loaded hdr: %d\n",
1264 useTso, tsoHeaderLen, tsoMss, tsoTotalLen, tsoUsedLen, tsoLoadedHeader);
1265 DPRINTF(EthernetDesc, "TSO: descBytesUsed: %d copyBytes: %d this descLen: %d\n",
1266 tsoDescBytesUsed, tsoCopyBytes, TxdOp::getLen(desc));
1267 DPRINTF(EthernetDesc, "TSO: pktHasHeader: %d\n", tsoPktHasHeader);
1268
1269 if (tsoPktHasHeader)
1270 tsoCopyBytes = std::min((tsoMss + tsoHeaderLen) - p->length, TxdOp::getLen(desc) - tsoDescBytesUsed);
1271 else
1272 tsoCopyBytes = std::min(tsoMss, TxdOp::getLen(desc) - tsoDescBytesUsed);
1273 Addr pkt_size = tsoCopyBytes + (tsoPktHasHeader ? 0 : tsoHeaderLen);
1274 DPRINTF(EthernetDesc, "TSO: Next packet is %d bytes\n", pkt_size);
1275 return pkt_size;
1276 }
1277
1278 DPRINTF(EthernetDesc, "Next TX packet is %d bytes\n",
1279 TxdOp::getLen(unusedCache.front()));
1280 return TxdOp::getLen(desc);
1281 }
1282
1283 void
1284 IGbE::TxDescCache::getPacketData(EthPacketPtr p)
1285 {
1286 assert(unusedCache.size());
1287
1288 TxDesc *desc;
1289 desc = unusedCache.front();
1290
1291 DPRINTF(EthernetDesc, "getPacketData(): TxDescriptor data d1: %#llx d2: %#llx\n", desc->d1, desc->d2);
1292 assert((TxdOp::isLegacy(desc) || TxdOp::isData(desc)) && TxdOp::getLen(desc));
1293
1294 pktPtr = p;
1295
1296 pktWaiting = true;
1297
1298 DPRINTF(EthernetDesc, "Starting DMA of packet at offset %d\n", p->length);
1299
1300 if (useTso) {
1301 assert(tsoLoadedHeader);
1302 if (!tsoPktHasHeader) {
1303 DPRINTF(EthernetDesc, "Loading TSO header (%d bytes) into start of packet\n",
1304 tsoHeaderLen);
1305 memcpy(p->data, &tsoHeader,tsoHeaderLen);
1306 p->length +=tsoHeaderLen;
1307 tsoPktHasHeader = true;
1308 }
1309 }
1310
1311 if (useTso) {
1312 tsoDescBytesUsed += tsoCopyBytes;
1313 assert(tsoDescBytesUsed <= TxdOp::getLen(desc));
1314 DPRINTF(EthernetDesc, "Starting DMA of packet at offset %d length: %d\n",
1315 p->length, tsoCopyBytes);
1316 igbe->dmaRead(igbe->platform->pciToDma(TxdOp::getBuf(desc)) + tsoDescBytesUsed,
1317 tsoCopyBytes, &pktEvent, p->data + p->length, igbe->txReadDelay);
1318 } else {
1319 igbe->dmaRead(igbe->platform->pciToDma(TxdOp::getBuf(desc)),
1320 TxdOp::getLen(desc), &pktEvent, p->data + p->length, igbe->txReadDelay);
1321 }
1322 }
1323
1324 void
1325 IGbE::TxDescCache::pktComplete()
1326 {
1327
1328 TxDesc *desc;
1329 assert(unusedCache.size());
1330 assert(pktPtr);
1331
1332 igbe->anBegin("TXS", "Update Desc");
1333
1334 DPRINTF(EthernetDesc, "DMA of packet complete\n");
1335
1336
1337 desc = unusedCache.front();
1338 assert((TxdOp::isLegacy(desc) || TxdOp::isData(desc)) && TxdOp::getLen(desc));
1339
1340 DPRINTF(EthernetDesc, "TxDescriptor data d1: %#llx d2: %#llx\n", desc->d1, desc->d2);
1341 DPRINTF(EthernetDesc, "TSO: use: %d hdrlen: %d mss: %d total: %d used: %d loaded hdr: %d\n",
1342 useTso, tsoHeaderLen, tsoMss, tsoTotalLen, tsoUsedLen, tsoLoadedHeader);
1343
1344 // Set the length of the data in the EtherPacket
1345 if (useTso) {
1346 pktPtr->length += tsoCopyBytes;
1347 tsoUsedLen += tsoCopyBytes;
1348 } else
1349 pktPtr->length += TxdOp::getLen(desc);
1350
1351 DPRINTF(EthernetDesc, "TSO: descBytesUsed: %d copyBytes: %d\n",
1352 tsoDescBytesUsed, tsoCopyBytes);
1353
1354
1355 if ((!TxdOp::eop(desc) && !useTso) ||
1356 (pktPtr->length < ( tsoMss + tsoHeaderLen) &&
1357 tsoTotalLen != tsoUsedLen && useTso)) {
1358 assert(!useTso || (tsoDescBytesUsed == TxdOp::getLen(desc)));
1359 igbe->anDq("TXS", annUnusedCacheQ);
1360 unusedCache.pop_front();
1361 igbe->anQ("TXS", annUsedCacheQ);
1362 usedCache.push_back(desc);
1363
1364 tsoDescBytesUsed = 0;
1365 pktDone = true;
1366 pktWaiting = false;
1367 pktMultiDesc = true;
1368
1369 DPRINTF(EthernetDesc, "Partial Packet Descriptor of %d bytes Done\n",
1370 pktPtr->length);
1371 pktPtr = NULL;
1372
1373 enableSm();
1374 igbe->checkDrain();
1375 return;
1376 }
1377
1378
1379 pktMultiDesc = false;
1380 // no support for vlans
1381 assert(!TxdOp::vle(desc));
1382
1383 // we only support single packet descriptors at this point
1384 if (!useTso)
1385 assert(TxdOp::eop(desc));
1386
1387 // set that this packet is done
1388 if (TxdOp::rs(desc))
1389 TxdOp::setDd(desc);
1390
1391 DPRINTF(EthernetDesc, "TxDescriptor data d1: %#llx d2: %#llx\n", desc->d1, desc->d2);
1392
1393 if (useTso) {
1394 IpPtr ip(pktPtr);
1395 if (ip) {
1396 DPRINTF(EthernetDesc, "TSO: Modifying IP header. Id + %d\n",
1397 tsoPkts);
1398 ip->id(ip->id() + tsoPkts++);
1399 ip->len(pktPtr->length - EthPtr(pktPtr)->size());
1400
1401 TcpPtr tcp(ip);
1402 if (tcp) {
1403 DPRINTF(EthernetDesc, "TSO: Modifying TCP header. old seq %d + %d\n",
1404 tcp->seq(), tsoPrevSeq);
1405 tcp->seq(tcp->seq() + tsoPrevSeq);
1406 if (tsoUsedLen != tsoTotalLen)
1407 tcp->flags(tcp->flags() & ~9); // clear fin & psh
1408 }
1409 UdpPtr udp(ip);
1410 if (udp) {
1411 DPRINTF(EthernetDesc, "TSO: Modifying UDP header.\n");
1412 udp->len(pktPtr->length - EthPtr(pktPtr)->size());
1413 }
1414 }
1415 tsoPrevSeq = tsoUsedLen;
1416 }
1417
1418 if (DTRACE(EthernetDesc)) {
1419 IpPtr ip(pktPtr);
1420 if (ip)
1421 DPRINTF(EthernetDesc, "Proccesing Ip packet with Id=%d\n",
1422 ip->id());
1423 else
1424 DPRINTF(EthernetSM, "Proccesing Non-Ip packet\n");
1425 }
1426
1427 // Checksums are only ofloaded for new descriptor types
1428 if (TxdOp::isData(desc) && ( TxdOp::ixsm(desc) || TxdOp::txsm(desc)) ) {
1429 DPRINTF(EthernetDesc, "Calculating checksums for packet\n");
1430 IpPtr ip(pktPtr);
1431 assert(ip);
1432 if (TxdOp::ixsm(desc)) {
1433 ip->sum(0);
1434 ip->sum(cksum(ip));
1435 igbe->txIpChecksums++;
1436 DPRINTF(EthernetDesc, "Calculated IP checksum\n");
1437 }
1438 if (TxdOp::txsm(desc)) {
1439 TcpPtr tcp(ip);
1440 UdpPtr udp(ip);
1441 if (tcp) {
1442 tcp->sum(0);
1443 tcp->sum(cksum(tcp));
1444 igbe->txTcpChecksums++;
1445 DPRINTF(EthernetDesc, "Calculated TCP checksum\n");
1446 } else if (udp) {
1447 assert(udp);
1448 udp->sum(0);
1449 udp->sum(cksum(udp));
1450 igbe->txUdpChecksums++;
1451 DPRINTF(EthernetDesc, "Calculated UDP checksum\n");
1452 } else {
1453 panic("Told to checksum, but don't know how\n");
1454 }
1455 }
1456 }
1457
1458 if (TxdOp::ide(desc)) {
1459 // Deal with the rx timer interrupts
1460 DPRINTF(EthernetDesc, "Descriptor had IDE set\n");
1461 if (igbe->regs.tidv.idv()) {
1462 DPRINTF(EthernetDesc, "setting tidv\n");
1463 igbe->reschedule(igbe->tidvEvent,
1464 curTick + igbe->regs.tidv.idv() * igbe->intClock(), true);
1465 }
1466
1467 if (igbe->regs.tadv.idv() && igbe->regs.tidv.idv()) {
1468 DPRINTF(EthernetDesc, "setting tadv\n");
1469 if (!igbe->tadvEvent.scheduled()) {
1470 igbe->schedule(igbe->tadvEvent,
1471 curTick + igbe->regs.tadv.idv() * igbe->intClock());
1472 }
1473 }
1474 }
1475
1476
1477 if (!useTso || TxdOp::getLen(desc) == tsoDescBytesUsed) {
1478 DPRINTF(EthernetDesc, "Descriptor Done\n");
1479 igbe->anDq("TXS", annUnusedCacheQ);
1480 unusedCache.pop_front();
1481 igbe->anQ("TXS", annUsedCacheQ);
1482 usedCache.push_back(desc);
1483 tsoDescBytesUsed = 0;
1484 }
1485
1486 if (useTso && tsoUsedLen == tsoTotalLen)
1487 useTso = false;
1488
1489
1490 DPRINTF(EthernetDesc, "------Packet of %d bytes ready for transmission-------\n",
1491 pktPtr->length);
1492 pktDone = true;
1493 pktWaiting = false;
1494 pktPtr = NULL;
1495 tsoPktHasHeader = false;
1496
1497 if (igbe->regs.txdctl.wthresh() == 0) {
1498 igbe->anBegin("TXS", "Desc Writeback");
1499 DPRINTF(EthernetDesc, "WTHRESH == 0, writing back descriptor\n");
1500 writeback(0);
1501 } else if (igbe->regs.txdctl.gran() && igbe->regs.txdctl.wthresh() >=
1502 descInBlock(usedCache.size())) {
1503 DPRINTF(EthernetDesc, "used > WTHRESH, writing back descriptor\n");
1504 igbe->anBegin("TXS", "Desc Writeback");
1505 writeback((igbe->cacheBlockSize()-1)>>4);
1506 } else if (igbe->regs.txdctl.wthresh() >= usedCache.size()) {
1507 DPRINTF(EthernetDesc, "used > WTHRESH, writing back descriptor\n");
1508 igbe->anBegin("TXS", "Desc Writeback");
1509 writeback((igbe->cacheBlockSize()-1)>>4);
1510 }
1511
1512 enableSm();
1513 igbe->checkDrain();
1514 }
1515
1516 void
1517 IGbE::TxDescCache::actionAfterWb()
1518 {
1519 DPRINTF(EthernetDesc, "actionAfterWb() completionEnabled: %d\n",
1520 completionEnabled);
1521 igbe->postInterrupt(iGbReg::IT_TXDW);
1522 if (completionEnabled) {
1523 descEnd = igbe->regs.tdh();
1524 DPRINTF(EthernetDesc, "Completion writing back value: %d to addr: %#x\n", descEnd,
1525 completionAddress);
1526 igbe->dmaWrite(igbe->platform->pciToDma(mbits(completionAddress, 63, 2)),
1527 sizeof(descEnd), &nullEvent, (uint8_t*)&descEnd, 0);
1528 }
1529 }
1530
1531 void
1532 IGbE::TxDescCache::serialize(std::ostream &os)
1533 {
1534 DescCache<TxDesc>::serialize(os);
1535 SERIALIZE_SCALAR(pktDone);
1536 SERIALIZE_SCALAR(isTcp);
1537 SERIALIZE_SCALAR(pktWaiting);
1538 SERIALIZE_SCALAR(pktMultiDesc);
1539
1540 SERIALIZE_SCALAR(useTso);
1541 SERIALIZE_SCALAR(tsoHeaderLen);
1542 SERIALIZE_SCALAR(tsoMss);
1543 SERIALIZE_SCALAR(tsoTotalLen);
1544 SERIALIZE_SCALAR(tsoUsedLen);
1545 SERIALIZE_SCALAR(tsoPrevSeq);;
1546 SERIALIZE_SCALAR(tsoPktPayloadBytes);
1547 SERIALIZE_SCALAR(tsoLoadedHeader);
1548 SERIALIZE_SCALAR(tsoPktHasHeader);
1549 SERIALIZE_ARRAY(tsoHeader, 256);
1550 SERIALIZE_SCALAR(tsoDescBytesUsed);
1551 SERIALIZE_SCALAR(tsoCopyBytes);
1552 SERIALIZE_SCALAR(tsoPkts);
1553
1554 SERIALIZE_SCALAR(completionAddress);
1555 SERIALIZE_SCALAR(completionEnabled);
1556 SERIALIZE_SCALAR(descEnd);
1557 }
1558
1559 void
1560 IGbE::TxDescCache::unserialize(Checkpoint *cp, const std::string &section)
1561 {
1562 DescCache<TxDesc>::unserialize(cp, section);
1563 UNSERIALIZE_SCALAR(pktDone);
1564 UNSERIALIZE_SCALAR(isTcp);
1565 UNSERIALIZE_SCALAR(pktWaiting);
1566 UNSERIALIZE_SCALAR(pktMultiDesc);
1567
1568 UNSERIALIZE_SCALAR(useTso);
1569 UNSERIALIZE_SCALAR(tsoHeaderLen);
1570 UNSERIALIZE_SCALAR(tsoMss);
1571 UNSERIALIZE_SCALAR(tsoTotalLen);
1572 UNSERIALIZE_SCALAR(tsoUsedLen);
1573 UNSERIALIZE_SCALAR(tsoPrevSeq);;
1574 UNSERIALIZE_SCALAR(tsoPktPayloadBytes);
1575 UNSERIALIZE_SCALAR(tsoLoadedHeader);
1576 UNSERIALIZE_SCALAR(tsoPktHasHeader);
1577 UNSERIALIZE_ARRAY(tsoHeader, 256);
1578 UNSERIALIZE_SCALAR(tsoDescBytesUsed);
1579 UNSERIALIZE_SCALAR(tsoCopyBytes);
1580 UNSERIALIZE_SCALAR(tsoPkts);
1581
1582 UNSERIALIZE_SCALAR(completionAddress);
1583 UNSERIALIZE_SCALAR(completionEnabled);
1584 UNSERIALIZE_SCALAR(descEnd);
1585 }
1586
1587 bool
1588 IGbE::TxDescCache::packetAvailable()
1589 {
1590 if (pktDone) {
1591 pktDone = false;
1592 return true;
1593 }
1594 return false;
1595 }
1596
1597 void
1598 IGbE::TxDescCache::enableSm()
1599 {
1600 if (!igbe->drainEvent) {
1601 igbe->txTick = true;
1602 igbe->restartClock();
1603 }
1604 }
1605
1606 bool
1607 IGbE::TxDescCache::hasOutstandingEvents()
1608 {
1609 return pktEvent.scheduled() || wbEvent.scheduled() ||
1610 fetchEvent.scheduled();
1611 }
1612
1613
1614 ///////////////////////////////////// IGbE /////////////////////////////////
1615
1616 void
1617 IGbE::restartClock()
1618 {
1619 if (!tickEvent.scheduled() && (rxTick || txTick || txFifoTick) &&
1620 getState() == SimObject::Running)
1621 schedule(tickEvent, (curTick / ticks(1)) * ticks(1) + ticks(1));
1622 }
1623
1624 unsigned int
1625 IGbE::drain(Event *de)
1626 {
1627 unsigned int count;
1628 count = pioPort->drain(de) + dmaPort->drain(de);
1629 if (rxDescCache.hasOutstandingEvents() ||
1630 txDescCache.hasOutstandingEvents()) {
1631 count++;
1632 drainEvent = de;
1633 }
1634
1635 txFifoTick = false;
1636 txTick = false;
1637 rxTick = false;
1638
1639 if (tickEvent.scheduled())
1640 deschedule(tickEvent);
1641
1642 if (count)
1643 changeState(Draining);
1644 else
1645 changeState(Drained);
1646
1647 DPRINTF(EthernetSM, "got drain() returning %d", count);
1648 return count;
1649 }
1650
1651 void
1652 IGbE::resume()
1653 {
1654 SimObject::resume();
1655
1656 txFifoTick = true;
1657 txTick = true;
1658 rxTick = true;
1659
1660 restartClock();
1661 DPRINTF(EthernetSM, "resuming from drain");
1662 }
1663
1664 void
1665 IGbE::checkDrain()
1666 {
1667 if (!drainEvent)
1668 return;
1669
1670 DPRINTF(EthernetSM, "checkDrain() in drain\n");
1671 txFifoTick = false;
1672 txTick = false;
1673 rxTick = false;
1674 if (!rxDescCache.hasOutstandingEvents() &&
1675 !txDescCache.hasOutstandingEvents()) {
1676 drainEvent->process();
1677 drainEvent = NULL;
1678 }
1679 }
1680
1681 void
1682 IGbE::txStateMachine()
1683 {
1684 if (!regs.tctl.en()) {
1685 txTick = false;
1686 DPRINTF(EthernetSM, "TXS: TX disabled, stopping ticking\n");
1687 return;
1688 }
1689
1690 // If we have a packet available and it's length is not 0 (meaning it's not
1691 // a multidescriptor packet) put it in the fifo, otherwise an the next
1692 // iteration we'll get the rest of the data
1693 if (txPacket && txDescCache.packetAvailable()
1694 && !txDescCache.packetMultiDesc() && txPacket->length) {
1695 bool success;
1696
1697 anQ("TXS", "TX FIFO Q");
1698 DPRINTF(EthernetSM, "TXS: packet placed in TX FIFO\n");
1699 success = txFifo.push(txPacket);
1700 txFifoTick = true && !drainEvent;
1701 assert(success);
1702 txPacket = NULL;
1703 anBegin("TXS", "Desc Writeback");
1704 txDescCache.writeback((cacheBlockSize()-1)>>4);
1705 return;
1706 }
1707
1708 // Only support descriptor granularity
1709 if (regs.txdctl.lwthresh() && txDescCache.descLeft() < (regs.txdctl.lwthresh() * 8)) {
1710 DPRINTF(EthernetSM, "TXS: LWTHRESH caused posting of TXDLOW\n");
1711 postInterrupt(IT_TXDLOW);
1712 }
1713
1714 if (!txPacket) {
1715 txPacket = new EthPacketData(16384);
1716 }
1717
1718 if (!txDescCache.packetWaiting()) {
1719 if (txDescCache.descLeft() == 0) {
1720 postInterrupt(IT_TXQE);
1721 anBegin("TXS", "Desc Writeback");
1722 txDescCache.writeback(0);
1723 anBegin("TXS", "Desc Fetch");
1724 anWe("TXS", txDescCache.annUnusedCacheQ);
1725 txDescCache.fetchDescriptors();
1726 DPRINTF(EthernetSM, "TXS: No descriptors left in ring, forcing "
1727 "writeback stopping ticking and posting TXQE\n");
1728 txTick = false;
1729 return;
1730 }
1731
1732
1733 if (!(txDescCache.descUnused())) {
1734 anBegin("TXS", "Desc Fetch");
1735 txDescCache.fetchDescriptors();
1736 anWe("TXS", txDescCache.annUnusedCacheQ);
1737 DPRINTF(EthernetSM, "TXS: No descriptors available in cache, fetching and stopping ticking\n");
1738 txTick = false;
1739 return;
1740 }
1741 anPq("TXS", txDescCache.annUnusedCacheQ);
1742
1743
1744 txDescCache.processContextDesc();
1745 if (txDescCache.packetWaiting()) {
1746 DPRINTF(EthernetSM, "TXS: Fetching TSO header, stopping ticking\n");
1747 txTick = false;
1748 return;
1749 }
1750
1751 int size;
1752 size = txDescCache.getPacketSize(txPacket);
1753 if (size > 0 && txFifo.avail() > size) {
1754 anRq("TXS", "TX FIFO Q");
1755 anBegin("TXS", "DMA Packet");
1756 DPRINTF(EthernetSM, "TXS: Reserving %d bytes in FIFO and begining "
1757 "DMA of next packet\n", size);
1758 txFifo.reserve(size);
1759 txDescCache.getPacketData(txPacket);
1760 } else if (size <= 0) {
1761 DPRINTF(EthernetSM, "TXS: getPacketSize returned: %d\n", size);
1762 DPRINTF(EthernetSM, "TXS: No packets to get, writing back used descriptors\n");
1763 anBegin("TXS", "Desc Writeback");
1764 txDescCache.writeback(0);
1765 } else {
1766 anWf("TXS", "TX FIFO Q");
1767 DPRINTF(EthernetSM, "TXS: FIFO full, stopping ticking until space "
1768 "available in FIFO\n");
1769 txTick = false;
1770 }
1771
1772
1773 return;
1774 }
1775 DPRINTF(EthernetSM, "TXS: Nothing to do, stopping ticking\n");
1776 txTick = false;
1777 }
1778
1779 bool
1780 IGbE::ethRxPkt(EthPacketPtr pkt)
1781 {
1782 rxBytes += pkt->length;
1783 rxPackets++;
1784
1785 DPRINTF(Ethernet, "RxFIFO: Receiving pcakte from wire\n");
1786 anBegin("RXQ", "Wire Recv");
1787
1788
1789 if (!regs.rctl.en()) {
1790 DPRINTF(Ethernet, "RxFIFO: RX not enabled, dropping\n");
1791 anBegin("RXQ", "FIFO Drop", CPA::FL_BAD);
1792 return true;
1793 }
1794
1795 // restart the state machines if they are stopped
1796 rxTick = true && !drainEvent;
1797 if ((rxTick || txTick) && !tickEvent.scheduled()) {
1798 DPRINTF(EthernetSM, "RXS: received packet into fifo, starting ticking\n");
1799 restartClock();
1800 }
1801
1802 if (!rxFifo.push(pkt)) {
1803 DPRINTF(Ethernet, "RxFIFO: Packet won't fit in fifo... dropped\n");
1804 postInterrupt(IT_RXO, true);
1805 anBegin("RXQ", "FIFO Drop", CPA::FL_BAD);
1806 return false;
1807 }
1808
1809 if (CPA::available() && cpa->enabled()) {
1810 assert(sys->numSystemsRunning <= 2);
1811 System *other_sys;
1812 if (sys->systemList[0] == sys)
1813 other_sys = sys->systemList[1];
1814 else
1815 other_sys = sys->systemList[0];
1816
1817 cpa->hwDq(CPA::FL_NONE, sys, macAddr, "RXQ", "WireQ", 0, other_sys);
1818 anQ("RXQ", "RX FIFO Q");
1819 cpa->hwWe(CPA::FL_NONE, sys, macAddr, "RXQ", "WireQ", 0, other_sys);
1820 }
1821
1822 return true;
1823 }
1824
1825
1826 void
1827 IGbE::rxStateMachine()
1828 {
1829 if (!regs.rctl.en()) {
1830 rxTick = false;
1831 DPRINTF(EthernetSM, "RXS: RX disabled, stopping ticking\n");
1832 return;
1833 }
1834
1835 // If the packet is done check for interrupts/descriptors/etc
1836 if (rxDescCache.packetDone()) {
1837 rxDmaPacket = false;
1838 DPRINTF(EthernetSM, "RXS: Packet completed DMA to memory\n");
1839 int descLeft = rxDescCache.descLeft();
1840 DPRINTF(EthernetSM, "RXS: descLeft: %d rdmts: %d rdlen: %d\n",
1841 descLeft, regs.rctl.rdmts(), regs.rdlen());
1842 switch (regs.rctl.rdmts()) {
1843 case 2: if (descLeft > .125 * regs.rdlen()) break;
1844 case 1: if (descLeft > .250 * regs.rdlen()) break;
1845 case 0: if (descLeft > .500 * regs.rdlen()) break;
1846 DPRINTF(Ethernet, "RXS: Interrupting (RXDMT) because of descriptors left\n");
1847 postInterrupt(IT_RXDMT);
1848 break;
1849 }
1850
1851 if (rxFifo.empty())
1852 rxDescCache.writeback(0);
1853
1854 if (descLeft == 0) {
1855 anBegin("RXS", "Writeback Descriptors");
1856 rxDescCache.writeback(0);
1857 DPRINTF(EthernetSM, "RXS: No descriptors left in ring, forcing"
1858 " writeback and stopping ticking\n");
1859 rxTick = false;
1860 }
1861
1862 // only support descriptor granulaties
1863 assert(regs.rxdctl.gran());
1864
1865 if (regs.rxdctl.wthresh() >= rxDescCache.descUsed()) {
1866 DPRINTF(EthernetSM, "RXS: Writing back because WTHRESH >= descUsed\n");
1867 anBegin("RXS", "Writeback Descriptors");
1868 if (regs.rxdctl.wthresh() < (cacheBlockSize()>>4))
1869 rxDescCache.writeback(regs.rxdctl.wthresh()-1);
1870 else
1871 rxDescCache.writeback((cacheBlockSize()-1)>>4);
1872 }
1873
1874 if ((rxDescCache.descUnused() < regs.rxdctl.pthresh()) &&
1875 ((rxDescCache.descLeft() - rxDescCache.descUnused()) > regs.rxdctl.hthresh())) {
1876 DPRINTF(EthernetSM, "RXS: Fetching descriptors because descUnused < PTHRESH\n");
1877 anBegin("RXS", "Fetch Descriptors");
1878 rxDescCache.fetchDescriptors();
1879 }
1880
1881 if (rxDescCache.descUnused() == 0) {
1882 anBegin("RXS", "Fetch Descriptors");
1883 rxDescCache.fetchDescriptors();
1884 anWe("RXS", rxDescCache.annUnusedCacheQ);
1885 DPRINTF(EthernetSM, "RXS: No descriptors available in cache, "
1886 "fetching descriptors and stopping ticking\n");
1887 rxTick = false;
1888 }
1889 return;
1890 }
1891
1892 if (rxDmaPacket) {
1893 DPRINTF(EthernetSM, "RXS: stopping ticking until packet DMA completes\n");
1894 rxTick = false;
1895 return;
1896 }
1897
1898 if (!rxDescCache.descUnused()) {
1899 anBegin("RXS", "Fetch Descriptors");
1900 rxDescCache.fetchDescriptors();
1901 anWe("RXS", rxDescCache.annUnusedCacheQ);
1902 DPRINTF(EthernetSM, "RXS: No descriptors available in cache, stopping ticking\n");
1903 rxTick = false;
1904 DPRINTF(EthernetSM, "RXS: No descriptors available, fetching\n");
1905 return;
1906 }
1907 anPq("RXS", rxDescCache.annUnusedCacheQ);
1908
1909 if (rxFifo.empty()) {
1910 anWe("RXS", "RX FIFO Q");
1911 DPRINTF(EthernetSM, "RXS: RxFIFO empty, stopping ticking\n");
1912 rxTick = false;
1913 return;
1914 }
1915 anPq("RXS", "RX FIFO Q");
1916 anBegin("RXS", "Get Desc");
1917
1918 EthPacketPtr pkt;
1919 pkt = rxFifo.front();
1920
1921
1922 pktOffset = rxDescCache.writePacket(pkt, pktOffset);
1923 DPRINTF(EthernetSM, "RXS: Writing packet into memory\n");
1924 if (pktOffset == pkt->length) {
1925 anBegin( "RXS", "FIFO Dequeue");
1926 DPRINTF(EthernetSM, "RXS: Removing packet from FIFO\n");
1927 pktOffset = 0;
1928 anDq("RXS", "RX FIFO Q");
1929 rxFifo.pop();
1930 }
1931
1932 DPRINTF(EthernetSM, "RXS: stopping ticking until packet DMA completes\n");
1933 rxTick = false;
1934 rxDmaPacket = true;
1935 anBegin("RXS", "DMA Packet");
1936 }
1937
1938 void
1939 IGbE::txWire()
1940 {
1941 if (txFifo.empty()) {
1942 anWe("TXQ", "TX FIFO Q");
1943 txFifoTick = false;
1944 return;
1945 }
1946
1947
1948 anPq("TXQ", "TX FIFO Q");
1949 if (etherInt->sendPacket(txFifo.front())) {
1950 cpa->hwQ(CPA::FL_NONE, sys, macAddr, "TXQ", "WireQ", 0);
1951 if (DTRACE(EthernetSM)) {
1952 IpPtr ip(txFifo.front());
1953 if (ip)
1954 DPRINTF(EthernetSM, "Transmitting Ip packet with Id=%d\n",
1955 ip->id());
1956 else
1957 DPRINTF(EthernetSM, "Transmitting Non-Ip packet\n");
1958 }
1959 anDq("TXQ", "TX FIFO Q");
1960 anBegin("TXQ", "Wire Send");
1961 DPRINTF(EthernetSM, "TxFIFO: Successful transmit, bytes available in fifo: %d\n",
1962 txFifo.avail());
1963
1964 txBytes += txFifo.front()->length;
1965 txPackets++;
1966 txFifoTick = false;
1967
1968 txFifo.pop();
1969 } else {
1970 // We'll get woken up when the packet ethTxDone() gets called
1971 txFifoTick = false;
1972 }
1973 }
1974
1975 void
1976 IGbE::tick()
1977 {
1978 DPRINTF(EthernetSM, "IGbE: -------------- Cycle --------------\n");
1979
1980 if (rxTick)
1981 rxStateMachine();
1982
1983 if (txTick)
1984 txStateMachine();
1985
1986 if (txFifoTick)
1987 txWire();
1988
1989
1990 if (rxTick || txTick || txFifoTick)
1991 schedule(tickEvent, curTick + ticks(1));
1992 }
1993
1994 void
1995 IGbE::ethTxDone()
1996 {
1997 anBegin("TXQ", "Send Done");
1998 // restart the tx state machines if they are stopped
1999 // fifo to send another packet
2000 // tx sm to put more data into the fifo
2001 txFifoTick = true && !drainEvent;
2002 if (txDescCache.descLeft() != 0 && !drainEvent)
2003 txTick = true;
2004
2005 restartClock();
2006 txWire();
2007 DPRINTF(EthernetSM, "TxFIFO: Transmission complete\n");
2008 }
2009
2010 void
2011 IGbE::serialize(std::ostream &os)
2012 {
2013 PciDev::serialize(os);
2014
2015 regs.serialize(os);
2016 SERIALIZE_SCALAR(eeOpBits);
2017 SERIALIZE_SCALAR(eeAddrBits);
2018 SERIALIZE_SCALAR(eeDataBits);
2019 SERIALIZE_SCALAR(eeOpcode);
2020 SERIALIZE_SCALAR(eeAddr);
2021 SERIALIZE_SCALAR(lastInterrupt);
2022 SERIALIZE_ARRAY(flash,iGbReg::EEPROM_SIZE);
2023
2024 rxFifo.serialize("rxfifo", os);
2025 txFifo.serialize("txfifo", os);
2026
2027 bool txPktExists = txPacket;
2028 SERIALIZE_SCALAR(txPktExists);
2029 if (txPktExists)
2030 txPacket->serialize("txpacket", os);
2031
2032 Tick rdtr_time = 0, radv_time = 0, tidv_time = 0, tadv_time = 0,
2033 inter_time = 0;
2034
2035 if (rdtrEvent.scheduled())
2036 rdtr_time = rdtrEvent.when();
2037 SERIALIZE_SCALAR(rdtr_time);
2038
2039 if (radvEvent.scheduled())
2040 radv_time = radvEvent.when();
2041 SERIALIZE_SCALAR(radv_time);
2042
2043 if (tidvEvent.scheduled())
2044 tidv_time = tidvEvent.when();
2045 SERIALIZE_SCALAR(tidv_time);
2046
2047 if (tadvEvent.scheduled())
2048 tadv_time = tadvEvent.when();
2049 SERIALIZE_SCALAR(tadv_time);
2050
2051 if (interEvent.scheduled())
2052 inter_time = interEvent.when();
2053 SERIALIZE_SCALAR(inter_time);
2054
2055 SERIALIZE_SCALAR(pktOffset);
2056
2057 nameOut(os, csprintf("%s.TxDescCache", name()));
2058 txDescCache.serialize(os);
2059
2060 nameOut(os, csprintf("%s.RxDescCache", name()));
2061 rxDescCache.serialize(os);
2062 }
2063
2064 void
2065 IGbE::unserialize(Checkpoint *cp, const std::string &section)
2066 {
2067 PciDev::unserialize(cp, section);
2068
2069 regs.unserialize(cp, section);
2070 UNSERIALIZE_SCALAR(eeOpBits);
2071 UNSERIALIZE_SCALAR(eeAddrBits);
2072 UNSERIALIZE_SCALAR(eeDataBits);
2073 UNSERIALIZE_SCALAR(eeOpcode);
2074 UNSERIALIZE_SCALAR(eeAddr);
2075 UNSERIALIZE_SCALAR(lastInterrupt);
2076 UNSERIALIZE_ARRAY(flash,iGbReg::EEPROM_SIZE);
2077
2078 rxFifo.unserialize("rxfifo", cp, section);
2079 txFifo.unserialize("txfifo", cp, section);
2080
2081 bool txPktExists;
2082 UNSERIALIZE_SCALAR(txPktExists);
2083 if (txPktExists) {
2084 txPacket = new EthPacketData(16384);
2085 txPacket->unserialize("txpacket", cp, section);
2086 }
2087
2088 rxTick = true;
2089 txTick = true;
2090 txFifoTick = true;
2091
2092 Tick rdtr_time, radv_time, tidv_time, tadv_time, inter_time;
2093 UNSERIALIZE_SCALAR(rdtr_time);
2094 UNSERIALIZE_SCALAR(radv_time);
2095 UNSERIALIZE_SCALAR(tidv_time);
2096 UNSERIALIZE_SCALAR(tadv_time);
2097 UNSERIALIZE_SCALAR(inter_time);
2098
2099 if (rdtr_time)
2100 schedule(rdtrEvent, rdtr_time);
2101
2102 if (radv_time)
2103 schedule(radvEvent, radv_time);
2104
2105 if (tidv_time)
2106 schedule(tidvEvent, tidv_time);
2107
2108 if (tadv_time)
2109 schedule(tadvEvent, tadv_time);
2110
2111 if (inter_time)
2112 schedule(interEvent, inter_time);
2113
2114 UNSERIALIZE_SCALAR(pktOffset);
2115
2116 txDescCache.unserialize(cp, csprintf("%s.TxDescCache", section));
2117
2118 rxDescCache.unserialize(cp, csprintf("%s.RxDescCache", section));
2119 }
2120
2121 IGbE *
2122 IGbEParams::create()
2123 {
2124 return new IGbE(this);
2125 }