X86: Change I8254 and PCSpeaker devices from subdevices to SimObjects and eliminate...
[gem5.git] / src / dev / i8254xGBe.hh
1 /*
2 * Copyright (c) 2006 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Ali Saidi
29 */
30
31 /* @file
32 * Device model for Intel's 8254x line of gigabit ethernet controllers.
33 */
34
35 #ifndef __DEV_I8254XGBE_HH__
36 #define __DEV_I8254XGBE_HH__
37
38 #include <deque>
39 #include <string>
40
41 #include "base/inet.hh"
42 #include "dev/etherdevice.hh"
43 #include "dev/etherint.hh"
44 #include "dev/etherpkt.hh"
45 #include "dev/i8254xGBe_defs.hh"
46 #include "dev/pcidev.hh"
47 #include "dev/pktfifo.hh"
48 #include "params/IGbE.hh"
49 #include "sim/eventq.hh"
50
51 class IGbEInt;
52
53 class IGbE : public EtherDevice
54 {
55 private:
56 IGbEInt *etherInt;
57
58 // device registers
59 iGbReg::Regs regs;
60
61 // eeprom data, status and control bits
62 int eeOpBits, eeAddrBits, eeDataBits;
63 uint8_t eeOpcode, eeAddr;
64 uint16_t flash[iGbReg::EEPROM_SIZE];
65
66 // The drain event if we have one
67 Event *drainEvent;
68
69 // cached parameters from params struct
70 bool useFlowControl;
71
72 // packet fifos
73 PacketFifo rxFifo;
74 PacketFifo txFifo;
75
76 // Packet that we are currently putting into the txFifo
77 EthPacketPtr txPacket;
78
79 // Should to Rx/Tx State machine tick?
80 bool rxTick;
81 bool txTick;
82 bool txFifoTick;
83
84 bool rxDmaPacket;
85
86 // Delays in managaging descriptors
87 Tick fetchDelay, wbDelay;
88 Tick fetchCompDelay, wbCompDelay;
89 Tick rxWriteDelay, txReadDelay;
90
91 // Event and function to deal with RDTR timer expiring
92 void rdtrProcess() {
93 rxDescCache.writeback(0);
94 DPRINTF(EthernetIntr, "Posting RXT interrupt because RDTR timer expired\n");
95 postInterrupt(iGbReg::IT_RXT);
96 }
97
98 //friend class EventWrapper<IGbE, &IGbE::rdtrProcess>;
99 EventWrapper<IGbE, &IGbE::rdtrProcess> rdtrEvent;
100
101 // Event and function to deal with RADV timer expiring
102 void radvProcess() {
103 rxDescCache.writeback(0);
104 DPRINTF(EthernetIntr, "Posting RXT interrupt because RADV timer expired\n");
105 postInterrupt(iGbReg::IT_RXT);
106 }
107
108 //friend class EventWrapper<IGbE, &IGbE::radvProcess>;
109 EventWrapper<IGbE, &IGbE::radvProcess> radvEvent;
110
111 // Event and function to deal with TADV timer expiring
112 void tadvProcess() {
113 txDescCache.writeback(0);
114 DPRINTF(EthernetIntr, "Posting TXDW interrupt because TADV timer expired\n");
115 postInterrupt(iGbReg::IT_TXDW);
116 }
117
118 //friend class EventWrapper<IGbE, &IGbE::tadvProcess>;
119 EventWrapper<IGbE, &IGbE::tadvProcess> tadvEvent;
120
121 // Event and function to deal with TIDV timer expiring
122 void tidvProcess() {
123 txDescCache.writeback(0);
124 DPRINTF(EthernetIntr, "Posting TXDW interrupt because TIDV timer expired\n");
125 postInterrupt(iGbReg::IT_TXDW);
126 }
127 //friend class EventWrapper<IGbE, &IGbE::tidvProcess>;
128 EventWrapper<IGbE, &IGbE::tidvProcess> tidvEvent;
129
130 // Main event to tick the device
131 void tick();
132 //friend class EventWrapper<IGbE, &IGbE::tick>;
133 EventWrapper<IGbE, &IGbE::tick> tickEvent;
134
135
136 uint64_t macAddr;
137
138 void rxStateMachine();
139 void txStateMachine();
140 void txWire();
141
142 /** Write an interrupt into the interrupt pending register and check mask
143 * and interrupt limit timer before sending interrupt to CPU
144 * @param t the type of interrupt we are posting
145 * @param now should we ignore the interrupt limiting timer
146 */
147 void postInterrupt(iGbReg::IntTypes t, bool now = false);
148
149 /** Check and see if changes to the mask register have caused an interrupt
150 * to need to be sent or perhaps removed an interrupt cause.
151 */
152 void chkInterrupt();
153
154 /** Send an interrupt to the cpu
155 */
156 void delayIntEvent();
157 void cpuPostInt();
158 // Event to moderate interrupts
159 EventWrapper<IGbE, &IGbE::delayIntEvent> interEvent;
160
161 /** Clear the interupt line to the cpu
162 */
163 void cpuClearInt();
164
165 Tick intClock() { return Clock::Int::ns * 1024; }
166
167 /** This function is used to restart the clock so it can handle things like
168 * draining and resume in one place. */
169 void restartClock();
170
171 /** Check if all the draining things that need to occur have occured and
172 * handle the drain event if so.
173 */
174 void checkDrain();
175
176 template<class T>
177 class DescCache
178 {
179 protected:
180 virtual Addr descBase() const = 0;
181 virtual long descHead() const = 0;
182 virtual long descTail() const = 0;
183 virtual long descLen() const = 0;
184 virtual void updateHead(long h) = 0;
185 virtual void enableSm() = 0;
186 virtual void intAfterWb() const {}
187 virtual void fetchAfterWb() = 0;
188
189 std::deque<T*> usedCache;
190 std::deque<T*> unusedCache;
191
192 T *fetchBuf;
193 T *wbBuf;
194
195 // Pointer to the device we cache for
196 IGbE *igbe;
197
198 // Name of this descriptor cache
199 std::string _name;
200
201 // How far we've cached
202 int cachePnt;
203
204 // The size of the descriptor cache
205 int size;
206
207 // How many descriptors we are currently fetching
208 int curFetching;
209
210 // How many descriptors we are currently writing back
211 int wbOut;
212
213 // if the we wrote back to the end of the descriptor ring and are going
214 // to have to wrap and write more
215 bool moreToWb;
216
217 // What the alignment is of the next descriptor writeback
218 Addr wbAlignment;
219
220 /** The packet that is currently being dmad to memory if any
221 */
222 EthPacketPtr pktPtr;
223
224 public:
225 DescCache(IGbE *i, const std::string n, int s)
226 : igbe(i), _name(n), cachePnt(0), size(s), curFetching(0), wbOut(0),
227 pktPtr(NULL), wbDelayEvent(this), fetchDelayEvent(this),
228 fetchEvent(this), wbEvent(this)
229 {
230 fetchBuf = new T[size];
231 wbBuf = new T[size];
232 }
233
234 virtual ~DescCache()
235 {
236 reset();
237 }
238
239 std::string name() { return _name; }
240
241 /** If the address/len/head change when we've got descriptors that are
242 * dirty that is very bad. This function checks that we don't and if we
243 * do panics.
244 */
245 void areaChanged()
246 {
247 if (usedCache.size() > 0 || curFetching || wbOut)
248 panic("Descriptor Address, Length or Head changed. Bad\n");
249 reset();
250
251 }
252
253 void writeback(Addr aMask)
254 {
255 int curHead = descHead();
256 int max_to_wb = usedCache.size();
257
258 // Check if this writeback is less restrictive that the previous
259 // and if so setup another one immediately following it
260 if (wbOut) {
261 if (aMask < wbAlignment) {
262 moreToWb = true;
263 wbAlignment = aMask;
264 }
265 DPRINTF(EthernetDesc, "Writing back already in process, returning\n");
266 return;
267 }
268
269 moreToWb = false;
270 wbAlignment = aMask;
271
272
273 DPRINTF(EthernetDesc, "Writing back descriptors head: %d tail: "
274 "%d len: %d cachePnt: %d max_to_wb: %d descleft: %d\n",
275 curHead, descTail(), descLen(), cachePnt, max_to_wb,
276 descLeft());
277
278 if (max_to_wb + curHead >= descLen()) {
279 max_to_wb = descLen() - curHead;
280 moreToWb = true;
281 // this is by definition aligned correctly
282 } else if (wbAlignment != 0) {
283 // align the wb point to the mask
284 max_to_wb = max_to_wb & ~wbAlignment;
285 }
286
287 DPRINTF(EthernetDesc, "Writing back %d descriptors\n", max_to_wb);
288
289 if (max_to_wb <= 0) {
290 return;
291 }
292
293 wbOut = max_to_wb;
294
295 assert(!wbDelayEvent.scheduled());
296 igbe->schedule(wbDelayEvent, curTick + igbe->wbDelay);
297 }
298
299 void writeback1()
300 {
301 // If we're draining delay issuing this DMA
302 if (igbe->drainEvent) {
303 igbe->schedule(wbDelayEvent, curTick + igbe->wbDelay);
304 return;
305 }
306
307 DPRINTF(EthernetDesc, "Beining DMA of %d descriptors\n", wbOut);
308
309 for (int x = 0; x < wbOut; x++) {
310 assert(usedCache.size());
311 memcpy(&wbBuf[x], usedCache[x], sizeof(T));
312 //delete usedCache[0];
313 //usedCache.pop_front();
314 }
315
316 assert(wbOut);
317 igbe->dmaWrite(igbe->platform->pciToDma(descBase() + descHead() * sizeof(T)),
318 wbOut * sizeof(T), &wbEvent, (uint8_t*)wbBuf,
319 igbe->wbCompDelay);
320 }
321 EventWrapper<DescCache, &DescCache::writeback1> wbDelayEvent;
322
323 /** Fetch a chunk of descriptors into the descriptor cache.
324 * Calls fetchComplete when the memory system returns the data
325 */
326
327 void fetchDescriptors()
328 {
329 size_t max_to_fetch;
330
331 if (curFetching) {
332 DPRINTF(EthernetDesc, "Currently fetching %d descriptors, returning\n", curFetching);
333 return;
334 }
335
336 if (descTail() >= cachePnt)
337 max_to_fetch = descTail() - cachePnt;
338 else
339 max_to_fetch = descLen() - cachePnt;
340
341 size_t free_cache = size - usedCache.size() - unusedCache.size();
342
343 max_to_fetch = std::min(max_to_fetch, free_cache);
344
345
346 DPRINTF(EthernetDesc, "Fetching descriptors head: %d tail: "
347 "%d len: %d cachePnt: %d max_to_fetch: %d descleft: %d\n",
348 descHead(), descTail(), descLen(), cachePnt,
349 max_to_fetch, descLeft());
350
351 // Nothing to do
352 if (max_to_fetch == 0)
353 return;
354
355 // So we don't have two descriptor fetches going on at once
356 curFetching = max_to_fetch;
357
358 assert(!fetchDelayEvent.scheduled());
359 igbe->schedule(fetchDelayEvent, curTick + igbe->fetchDelay);
360 }
361
362 void fetchDescriptors1()
363 {
364 // If we're draining delay issuing this DMA
365 if (igbe->drainEvent) {
366 igbe->schedule(fetchDelayEvent, curTick + igbe->fetchDelay);
367 return;
368 }
369
370 DPRINTF(EthernetDesc, "Fetching descriptors at %#x (%#x), size: %#x\n",
371 descBase() + cachePnt * sizeof(T),
372 igbe->platform->pciToDma(descBase() + cachePnt * sizeof(T)),
373 curFetching * sizeof(T));
374 assert(curFetching);
375 igbe->dmaRead(igbe->platform->pciToDma(descBase() + cachePnt * sizeof(T)),
376 curFetching * sizeof(T), &fetchEvent, (uint8_t*)fetchBuf,
377 igbe->fetchCompDelay);
378 }
379
380 EventWrapper<DescCache, &DescCache::fetchDescriptors1> fetchDelayEvent;
381
382 /** Called by event when dma to read descriptors is completed
383 */
384 void fetchComplete()
385 {
386 T *newDesc;
387 for (int x = 0; x < curFetching; x++) {
388 newDesc = new T;
389 memcpy(newDesc, &fetchBuf[x], sizeof(T));
390 unusedCache.push_back(newDesc);
391 }
392
393
394 #ifndef NDEBUG
395 int oldCp = cachePnt;
396 #endif
397
398 cachePnt += curFetching;
399 assert(cachePnt <= descLen());
400 if (cachePnt == descLen())
401 cachePnt = 0;
402
403 curFetching = 0;
404
405 DPRINTF(EthernetDesc, "Fetching complete cachePnt %d -> %d\n",
406 oldCp, cachePnt);
407
408 enableSm();
409 igbe->checkDrain();
410 }
411
412 EventWrapper<DescCache, &DescCache::fetchComplete> fetchEvent;
413
414 /** Called by event when dma to writeback descriptors is completed
415 */
416 void wbComplete()
417 {
418
419 long curHead = descHead();
420 #ifndef NDEBUG
421 long oldHead = curHead;
422 #endif
423
424 for (int x = 0; x < wbOut; x++) {
425 assert(usedCache.size());
426 delete usedCache[0];
427 usedCache.pop_front();
428 }
429
430 curHead += wbOut;
431 wbOut = 0;
432
433 if (curHead >= descLen())
434 curHead -= descLen();
435
436 // Update the head
437 updateHead(curHead);
438
439 DPRINTF(EthernetDesc, "Writeback complete curHead %d -> %d\n",
440 oldHead, curHead);
441
442 // If we still have more to wb, call wb now
443 intAfterWb();
444 if (moreToWb) {
445 moreToWb = false;
446 DPRINTF(EthernetDesc, "Writeback has more todo\n");
447 writeback(wbAlignment);
448 }
449
450 if (!wbOut) {
451 igbe->checkDrain();
452 }
453 fetchAfterWb();
454 }
455
456
457 EventWrapper<DescCache, &DescCache::wbComplete> wbEvent;
458
459 /* Return the number of descriptors left in the ring, so the device has
460 * a way to figure out if it needs to interrupt.
461 */
462 int descLeft() const
463 {
464 int left = unusedCache.size();
465 if (cachePnt - descTail() >= 0)
466 left += (cachePnt - descTail());
467 else
468 left += (descTail() - cachePnt);
469
470 return left;
471 }
472
473 /* Return the number of descriptors used and not written back.
474 */
475 int descUsed() const { return usedCache.size(); }
476
477 /* Return the number of cache unused descriptors we have. */
478 int descUnused() const {return unusedCache.size(); }
479
480 /* Get into a state where the descriptor address/head/etc colud be
481 * changed */
482 void reset()
483 {
484 DPRINTF(EthernetDesc, "Reseting descriptor cache\n");
485 for (int x = 0; x < usedCache.size(); x++)
486 delete usedCache[x];
487 for (int x = 0; x < unusedCache.size(); x++)
488 delete unusedCache[x];
489
490 usedCache.clear();
491 unusedCache.clear();
492
493 cachePnt = 0;
494
495 }
496
497 virtual void serialize(std::ostream &os)
498 {
499 SERIALIZE_SCALAR(cachePnt);
500 SERIALIZE_SCALAR(curFetching);
501 SERIALIZE_SCALAR(wbOut);
502 SERIALIZE_SCALAR(moreToWb);
503 SERIALIZE_SCALAR(wbAlignment);
504
505 int usedCacheSize = usedCache.size();
506 SERIALIZE_SCALAR(usedCacheSize);
507 for(int x = 0; x < usedCacheSize; x++) {
508 arrayParamOut(os, csprintf("usedCache_%d", x),
509 (uint8_t*)usedCache[x],sizeof(T));
510 }
511
512 int unusedCacheSize = unusedCache.size();
513 SERIALIZE_SCALAR(unusedCacheSize);
514 for(int x = 0; x < unusedCacheSize; x++) {
515 arrayParamOut(os, csprintf("unusedCache_%d", x),
516 (uint8_t*)unusedCache[x],sizeof(T));
517 }
518
519 Tick fetch_delay = 0, wb_delay = 0;
520 if (fetchDelayEvent.scheduled())
521 fetch_delay = fetchDelayEvent.when();
522 SERIALIZE_SCALAR(fetch_delay);
523 if (wbDelayEvent.scheduled())
524 wb_delay = wbDelayEvent.when();
525 SERIALIZE_SCALAR(wb_delay);
526
527
528 }
529
530 virtual void unserialize(Checkpoint *cp, const std::string &section)
531 {
532 UNSERIALIZE_SCALAR(cachePnt);
533 UNSERIALIZE_SCALAR(curFetching);
534 UNSERIALIZE_SCALAR(wbOut);
535 UNSERIALIZE_SCALAR(moreToWb);
536 UNSERIALIZE_SCALAR(wbAlignment);
537
538 int usedCacheSize;
539 UNSERIALIZE_SCALAR(usedCacheSize);
540 T *temp;
541 for(int x = 0; x < usedCacheSize; x++) {
542 temp = new T;
543 arrayParamIn(cp, section, csprintf("usedCache_%d", x),
544 (uint8_t*)temp,sizeof(T));
545 usedCache.push_back(temp);
546 }
547
548 int unusedCacheSize;
549 UNSERIALIZE_SCALAR(unusedCacheSize);
550 for(int x = 0; x < unusedCacheSize; x++) {
551 temp = new T;
552 arrayParamIn(cp, section, csprintf("unusedCache_%d", x),
553 (uint8_t*)temp,sizeof(T));
554 unusedCache.push_back(temp);
555 }
556 Tick fetch_delay = 0, wb_delay = 0;
557 UNSERIALIZE_SCALAR(fetch_delay);
558 UNSERIALIZE_SCALAR(wb_delay);
559 if (fetch_delay)
560 igbe->schedule(fetchDelayEvent, fetch_delay);
561 if (wb_delay)
562 igbe->schedule(wbDelayEvent, wb_delay);
563
564
565 }
566 virtual bool hasOutstandingEvents() {
567 return wbEvent.scheduled() || fetchEvent.scheduled();
568 }
569
570 };
571
572
573 class RxDescCache : public DescCache<iGbReg::RxDesc>
574 {
575 protected:
576 virtual Addr descBase() const { return igbe->regs.rdba(); }
577 virtual long descHead() const { return igbe->regs.rdh(); }
578 virtual long descLen() const { return igbe->regs.rdlen() >> 4; }
579 virtual long descTail() const { return igbe->regs.rdt(); }
580 virtual void updateHead(long h) { igbe->regs.rdh(h); }
581 virtual void enableSm();
582 virtual void fetchAfterWb() {
583 if (!igbe->rxTick && igbe->getState() == SimObject::Running)
584 fetchDescriptors();
585 }
586
587 bool pktDone;
588
589 public:
590 RxDescCache(IGbE *i, std::string n, int s);
591
592 /** Write the given packet into the buffer(s) pointed to by the
593 * descriptor and update the book keeping. Should only be called when
594 * there are no dma's pending.
595 * @param packet ethernet packet to write
596 * @return if the packet could be written (there was a free descriptor)
597 */
598 void writePacket(EthPacketPtr packet);
599 /** Called by event when dma to write packet is completed
600 */
601 void pktComplete();
602
603 /** Check if the dma on the packet has completed.
604 */
605
606 bool packetDone();
607
608 EventWrapper<RxDescCache, &RxDescCache::pktComplete> pktEvent;
609
610 virtual bool hasOutstandingEvents();
611
612 virtual void serialize(std::ostream &os);
613 virtual void unserialize(Checkpoint *cp, const std::string &section);
614 };
615 friend class RxDescCache;
616
617 RxDescCache rxDescCache;
618
619 class TxDescCache : public DescCache<iGbReg::TxDesc>
620 {
621 protected:
622 virtual Addr descBase() const { return igbe->regs.tdba(); }
623 virtual long descHead() const { return igbe->regs.tdh(); }
624 virtual long descTail() const { return igbe->regs.tdt(); }
625 virtual long descLen() const { return igbe->regs.tdlen() >> 4; }
626 virtual void updateHead(long h) { igbe->regs.tdh(h); }
627 virtual void enableSm();
628 virtual void intAfterWb() const { igbe->postInterrupt(iGbReg::IT_TXDW); }
629 virtual void fetchAfterWb() {
630 if (!igbe->txTick && igbe->getState() == SimObject::Running)
631 fetchDescriptors();
632 }
633
634 bool pktDone;
635 bool isTcp;
636 bool pktWaiting;
637 bool pktMultiDesc;
638
639 public:
640 TxDescCache(IGbE *i, std::string n, int s);
641
642 /** Tell the cache to DMA a packet from main memory into its buffer and
643 * return the size the of the packet to reserve space in tx fifo.
644 * @return size of the packet
645 */
646 int getPacketSize();
647 void getPacketData(EthPacketPtr p);
648
649 /** Ask if the packet has been transfered so the state machine can give
650 * it to the fifo.
651 * @return packet available in descriptor cache
652 */
653 bool packetAvailable();
654
655 /** Ask if we are still waiting for the packet to be transfered.
656 * @return packet still in transit.
657 */
658 bool packetWaiting() { return pktWaiting; }
659
660 /** Ask if this packet is composed of multiple descriptors
661 * so even if we've got data, we need to wait for more before
662 * we can send it out.
663 * @return packet can't be sent out because it's a multi-descriptor
664 * packet
665 */
666 bool packetMultiDesc() { return pktMultiDesc;}
667
668 /** Called by event when dma to write packet is completed
669 */
670 void pktComplete();
671 EventWrapper<TxDescCache, &TxDescCache::pktComplete> pktEvent;
672
673 virtual bool hasOutstandingEvents();
674
675 virtual void serialize(std::ostream &os);
676 virtual void unserialize(Checkpoint *cp, const std::string &section);
677
678 };
679 friend class TxDescCache;
680
681 TxDescCache txDescCache;
682
683 public:
684 typedef IGbEParams Params;
685 const Params *
686 params() const
687 {
688 return dynamic_cast<const Params *>(_params);
689 }
690 IGbE(const Params *params);
691 ~IGbE() {}
692
693 virtual EtherInt *getEthPort(const std::string &if_name, int idx);
694
695 Tick clock;
696 Tick lastInterrupt;
697 inline Tick ticks(int numCycles) const { return numCycles * clock; }
698
699 virtual Tick read(PacketPtr pkt);
700 virtual Tick write(PacketPtr pkt);
701
702 virtual Tick writeConfig(PacketPtr pkt);
703
704 bool ethRxPkt(EthPacketPtr packet);
705 void ethTxDone();
706
707 virtual void serialize(std::ostream &os);
708 virtual void unserialize(Checkpoint *cp, const std::string &section);
709 virtual unsigned int drain(Event *de);
710 virtual void resume();
711
712 };
713
714 class IGbEInt : public EtherInt
715 {
716 private:
717 IGbE *dev;
718
719 public:
720 IGbEInt(const std::string &name, IGbE *d)
721 : EtherInt(name), dev(d)
722 { }
723
724 virtual bool recvPacket(EthPacketPtr pkt) { return dev->ethRxPkt(pkt); }
725 virtual void sendDone() { dev->ethTxDone(); }
726 };
727
728
729
730
731
732 #endif //__DEV_I8254XGBE_HH__
733