2 * Copyright (c) 2006 The Regents of The University of Michigan
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 * Device model for Intel's 8254x line of gigabit ethernet controllers.
35 #ifndef __DEV_I8254XGBE_HH__
36 #define __DEV_I8254XGBE_HH__
41 #include "base/inet.hh"
42 #include "dev/etherdevice.hh"
43 #include "dev/etherint.hh"
44 #include "dev/etherpkt.hh"
45 #include "dev/i8254xGBe_defs.hh"
46 #include "dev/pcidev.hh"
47 #include "dev/pktfifo.hh"
48 #include "params/IGbE.hh"
49 #include "sim/eventq.hh"
53 class IGbE : public EtherDevice
61 // eeprom data, status and control bits
62 int eeOpBits, eeAddrBits, eeDataBits;
63 uint8_t eeOpcode, eeAddr;
64 uint16_t flash[iGbReg::EEPROM_SIZE];
66 // The drain event if we have one
69 // cached parameters from params struct
76 // Packet that we are currently putting into the txFifo
77 EthPacketPtr txPacket;
79 // Should to Rx/Tx State machine tick?
86 // Delays in managaging descriptors
87 Tick fetchDelay, wbDelay;
88 Tick fetchCompDelay, wbCompDelay;
89 Tick rxWriteDelay, txReadDelay;
91 // Event and function to deal with RDTR timer expiring
93 rxDescCache.writeback(0);
94 DPRINTF(EthernetIntr, "Posting RXT interrupt because RDTR timer expired\n");
95 postInterrupt(iGbReg::IT_RXT);
98 //friend class EventWrapper<IGbE, &IGbE::rdtrProcess>;
99 EventWrapper<IGbE, &IGbE::rdtrProcess> rdtrEvent;
101 // Event and function to deal with RADV timer expiring
103 rxDescCache.writeback(0);
104 DPRINTF(EthernetIntr, "Posting RXT interrupt because RADV timer expired\n");
105 postInterrupt(iGbReg::IT_RXT);
108 //friend class EventWrapper<IGbE, &IGbE::radvProcess>;
109 EventWrapper<IGbE, &IGbE::radvProcess> radvEvent;
111 // Event and function to deal with TADV timer expiring
113 txDescCache.writeback(0);
114 DPRINTF(EthernetIntr, "Posting TXDW interrupt because TADV timer expired\n");
115 postInterrupt(iGbReg::IT_TXDW);
118 //friend class EventWrapper<IGbE, &IGbE::tadvProcess>;
119 EventWrapper<IGbE, &IGbE::tadvProcess> tadvEvent;
121 // Event and function to deal with TIDV timer expiring
123 txDescCache.writeback(0);
124 DPRINTF(EthernetIntr, "Posting TXDW interrupt because TIDV timer expired\n");
125 postInterrupt(iGbReg::IT_TXDW);
127 //friend class EventWrapper<IGbE, &IGbE::tidvProcess>;
128 EventWrapper<IGbE, &IGbE::tidvProcess> tidvEvent;
130 // Main event to tick the device
132 //friend class EventWrapper<IGbE, &IGbE::tick>;
133 EventWrapper<IGbE, &IGbE::tick> tickEvent;
138 void rxStateMachine();
139 void txStateMachine();
142 /** Write an interrupt into the interrupt pending register and check mask
143 * and interrupt limit timer before sending interrupt to CPU
144 * @param t the type of interrupt we are posting
145 * @param now should we ignore the interrupt limiting timer
147 void postInterrupt(iGbReg::IntTypes t, bool now = false);
149 /** Check and see if changes to the mask register have caused an interrupt
150 * to need to be sent or perhaps removed an interrupt cause.
154 /** Send an interrupt to the cpu
156 void delayIntEvent();
158 // Event to moderate interrupts
159 EventWrapper<IGbE, &IGbE::delayIntEvent> interEvent;
161 /** Clear the interupt line to the cpu
165 Tick intClock() { return Clock::Int::ns * 1024; }
167 /** This function is used to restart the clock so it can handle things like
168 * draining and resume in one place. */
171 /** Check if all the draining things that need to occur have occured and
172 * handle the drain event if so.
180 virtual Addr descBase() const = 0;
181 virtual long descHead() const = 0;
182 virtual long descTail() const = 0;
183 virtual long descLen() const = 0;
184 virtual void updateHead(long h) = 0;
185 virtual void enableSm() = 0;
186 virtual void intAfterWb() const {}
187 virtual void fetchAfterWb() = 0;
189 std::deque<T*> usedCache;
190 std::deque<T*> unusedCache;
195 // Pointer to the device we cache for
198 // Name of this descriptor cache
201 // How far we've cached
204 // The size of the descriptor cache
207 // How many descriptors we are currently fetching
210 // How many descriptors we are currently writing back
213 // if the we wrote back to the end of the descriptor ring and are going
214 // to have to wrap and write more
217 // What the alignment is of the next descriptor writeback
220 /** The packet that is currently being dmad to memory if any
225 DescCache(IGbE *i, const std::string n, int s)
226 : igbe(i), _name(n), cachePnt(0), size(s), curFetching(0), wbOut(0),
227 pktPtr(NULL), wbDelayEvent(this), fetchDelayEvent(this),
228 fetchEvent(this), wbEvent(this)
230 fetchBuf = new T[size];
239 std::string name() { return _name; }
241 /** If the address/len/head change when we've got descriptors that are
242 * dirty that is very bad. This function checks that we don't and if we
247 if (usedCache.size() > 0 || curFetching || wbOut)
248 panic("Descriptor Address, Length or Head changed. Bad\n");
253 void writeback(Addr aMask)
255 int curHead = descHead();
256 int max_to_wb = usedCache.size();
258 // Check if this writeback is less restrictive that the previous
259 // and if so setup another one immediately following it
261 if (aMask < wbAlignment) {
265 DPRINTF(EthernetDesc, "Writing back already in process, returning\n");
273 DPRINTF(EthernetDesc, "Writing back descriptors head: %d tail: "
274 "%d len: %d cachePnt: %d max_to_wb: %d descleft: %d\n",
275 curHead, descTail(), descLen(), cachePnt, max_to_wb,
278 if (max_to_wb + curHead >= descLen()) {
279 max_to_wb = descLen() - curHead;
281 // this is by definition aligned correctly
282 } else if (wbAlignment != 0) {
283 // align the wb point to the mask
284 max_to_wb = max_to_wb & ~wbAlignment;
287 DPRINTF(EthernetDesc, "Writing back %d descriptors\n", max_to_wb);
289 if (max_to_wb <= 0) {
295 assert(!wbDelayEvent.scheduled());
296 igbe->schedule(wbDelayEvent, curTick + igbe->wbDelay);
301 // If we're draining delay issuing this DMA
302 if (igbe->drainEvent) {
303 igbe->schedule(wbDelayEvent, curTick + igbe->wbDelay);
307 DPRINTF(EthernetDesc, "Beining DMA of %d descriptors\n", wbOut);
309 for (int x = 0; x < wbOut; x++) {
310 assert(usedCache.size());
311 memcpy(&wbBuf[x], usedCache[x], sizeof(T));
312 //delete usedCache[0];
313 //usedCache.pop_front();
317 igbe->dmaWrite(igbe->platform->pciToDma(descBase() + descHead() * sizeof(T)),
318 wbOut * sizeof(T), &wbEvent, (uint8_t*)wbBuf,
321 EventWrapper<DescCache, &DescCache::writeback1> wbDelayEvent;
323 /** Fetch a chunk of descriptors into the descriptor cache.
324 * Calls fetchComplete when the memory system returns the data
327 void fetchDescriptors()
332 DPRINTF(EthernetDesc, "Currently fetching %d descriptors, returning\n", curFetching);
336 if (descTail() >= cachePnt)
337 max_to_fetch = descTail() - cachePnt;
339 max_to_fetch = descLen() - cachePnt;
341 size_t free_cache = size - usedCache.size() - unusedCache.size();
343 max_to_fetch = std::min(max_to_fetch, free_cache);
346 DPRINTF(EthernetDesc, "Fetching descriptors head: %d tail: "
347 "%d len: %d cachePnt: %d max_to_fetch: %d descleft: %d\n",
348 descHead(), descTail(), descLen(), cachePnt,
349 max_to_fetch, descLeft());
352 if (max_to_fetch == 0)
355 // So we don't have two descriptor fetches going on at once
356 curFetching = max_to_fetch;
358 assert(!fetchDelayEvent.scheduled());
359 igbe->schedule(fetchDelayEvent, curTick + igbe->fetchDelay);
362 void fetchDescriptors1()
364 // If we're draining delay issuing this DMA
365 if (igbe->drainEvent) {
366 igbe->schedule(fetchDelayEvent, curTick + igbe->fetchDelay);
370 DPRINTF(EthernetDesc, "Fetching descriptors at %#x (%#x), size: %#x\n",
371 descBase() + cachePnt * sizeof(T),
372 igbe->platform->pciToDma(descBase() + cachePnt * sizeof(T)),
373 curFetching * sizeof(T));
375 igbe->dmaRead(igbe->platform->pciToDma(descBase() + cachePnt * sizeof(T)),
376 curFetching * sizeof(T), &fetchEvent, (uint8_t*)fetchBuf,
377 igbe->fetchCompDelay);
380 EventWrapper<DescCache, &DescCache::fetchDescriptors1> fetchDelayEvent;
382 /** Called by event when dma to read descriptors is completed
387 for (int x = 0; x < curFetching; x++) {
389 memcpy(newDesc, &fetchBuf[x], sizeof(T));
390 unusedCache.push_back(newDesc);
395 int oldCp = cachePnt;
398 cachePnt += curFetching;
399 assert(cachePnt <= descLen());
400 if (cachePnt == descLen())
405 DPRINTF(EthernetDesc, "Fetching complete cachePnt %d -> %d\n",
412 EventWrapper<DescCache, &DescCache::fetchComplete> fetchEvent;
414 /** Called by event when dma to writeback descriptors is completed
419 long curHead = descHead();
421 long oldHead = curHead;
424 for (int x = 0; x < wbOut; x++) {
425 assert(usedCache.size());
427 usedCache.pop_front();
433 if (curHead >= descLen())
434 curHead -= descLen();
439 DPRINTF(EthernetDesc, "Writeback complete curHead %d -> %d\n",
442 // If we still have more to wb, call wb now
446 DPRINTF(EthernetDesc, "Writeback has more todo\n");
447 writeback(wbAlignment);
457 EventWrapper<DescCache, &DescCache::wbComplete> wbEvent;
459 /* Return the number of descriptors left in the ring, so the device has
460 * a way to figure out if it needs to interrupt.
464 int left = unusedCache.size();
465 if (cachePnt - descTail() >= 0)
466 left += (cachePnt - descTail());
468 left += (descTail() - cachePnt);
473 /* Return the number of descriptors used and not written back.
475 int descUsed() const { return usedCache.size(); }
477 /* Return the number of cache unused descriptors we have. */
478 int descUnused() const {return unusedCache.size(); }
480 /* Get into a state where the descriptor address/head/etc colud be
484 DPRINTF(EthernetDesc, "Reseting descriptor cache\n");
485 for (int x = 0; x < usedCache.size(); x++)
487 for (int x = 0; x < unusedCache.size(); x++)
488 delete unusedCache[x];
497 virtual void serialize(std::ostream &os)
499 SERIALIZE_SCALAR(cachePnt);
500 SERIALIZE_SCALAR(curFetching);
501 SERIALIZE_SCALAR(wbOut);
502 SERIALIZE_SCALAR(moreToWb);
503 SERIALIZE_SCALAR(wbAlignment);
505 int usedCacheSize = usedCache.size();
506 SERIALIZE_SCALAR(usedCacheSize);
507 for(int x = 0; x < usedCacheSize; x++) {
508 arrayParamOut(os, csprintf("usedCache_%d", x),
509 (uint8_t*)usedCache[x],sizeof(T));
512 int unusedCacheSize = unusedCache.size();
513 SERIALIZE_SCALAR(unusedCacheSize);
514 for(int x = 0; x < unusedCacheSize; x++) {
515 arrayParamOut(os, csprintf("unusedCache_%d", x),
516 (uint8_t*)unusedCache[x],sizeof(T));
519 Tick fetch_delay = 0, wb_delay = 0;
520 if (fetchDelayEvent.scheduled())
521 fetch_delay = fetchDelayEvent.when();
522 SERIALIZE_SCALAR(fetch_delay);
523 if (wbDelayEvent.scheduled())
524 wb_delay = wbDelayEvent.when();
525 SERIALIZE_SCALAR(wb_delay);
530 virtual void unserialize(Checkpoint *cp, const std::string §ion)
532 UNSERIALIZE_SCALAR(cachePnt);
533 UNSERIALIZE_SCALAR(curFetching);
534 UNSERIALIZE_SCALAR(wbOut);
535 UNSERIALIZE_SCALAR(moreToWb);
536 UNSERIALIZE_SCALAR(wbAlignment);
539 UNSERIALIZE_SCALAR(usedCacheSize);
541 for(int x = 0; x < usedCacheSize; x++) {
543 arrayParamIn(cp, section, csprintf("usedCache_%d", x),
544 (uint8_t*)temp,sizeof(T));
545 usedCache.push_back(temp);
549 UNSERIALIZE_SCALAR(unusedCacheSize);
550 for(int x = 0; x < unusedCacheSize; x++) {
552 arrayParamIn(cp, section, csprintf("unusedCache_%d", x),
553 (uint8_t*)temp,sizeof(T));
554 unusedCache.push_back(temp);
556 Tick fetch_delay = 0, wb_delay = 0;
557 UNSERIALIZE_SCALAR(fetch_delay);
558 UNSERIALIZE_SCALAR(wb_delay);
560 igbe->schedule(fetchDelayEvent, fetch_delay);
562 igbe->schedule(wbDelayEvent, wb_delay);
566 virtual bool hasOutstandingEvents() {
567 return wbEvent.scheduled() || fetchEvent.scheduled();
573 class RxDescCache : public DescCache<iGbReg::RxDesc>
576 virtual Addr descBase() const { return igbe->regs.rdba(); }
577 virtual long descHead() const { return igbe->regs.rdh(); }
578 virtual long descLen() const { return igbe->regs.rdlen() >> 4; }
579 virtual long descTail() const { return igbe->regs.rdt(); }
580 virtual void updateHead(long h) { igbe->regs.rdh(h); }
581 virtual void enableSm();
582 virtual void fetchAfterWb() {
583 if (!igbe->rxTick && igbe->getState() == SimObject::Running)
590 RxDescCache(IGbE *i, std::string n, int s);
592 /** Write the given packet into the buffer(s) pointed to by the
593 * descriptor and update the book keeping. Should only be called when
594 * there are no dma's pending.
595 * @param packet ethernet packet to write
596 * @return if the packet could be written (there was a free descriptor)
598 void writePacket(EthPacketPtr packet);
599 /** Called by event when dma to write packet is completed
603 /** Check if the dma on the packet has completed.
608 EventWrapper<RxDescCache, &RxDescCache::pktComplete> pktEvent;
610 virtual bool hasOutstandingEvents();
612 virtual void serialize(std::ostream &os);
613 virtual void unserialize(Checkpoint *cp, const std::string §ion);
615 friend class RxDescCache;
617 RxDescCache rxDescCache;
619 class TxDescCache : public DescCache<iGbReg::TxDesc>
622 virtual Addr descBase() const { return igbe->regs.tdba(); }
623 virtual long descHead() const { return igbe->regs.tdh(); }
624 virtual long descTail() const { return igbe->regs.tdt(); }
625 virtual long descLen() const { return igbe->regs.tdlen() >> 4; }
626 virtual void updateHead(long h) { igbe->regs.tdh(h); }
627 virtual void enableSm();
628 virtual void intAfterWb() const { igbe->postInterrupt(iGbReg::IT_TXDW); }
629 virtual void fetchAfterWb() {
630 if (!igbe->txTick && igbe->getState() == SimObject::Running)
640 TxDescCache(IGbE *i, std::string n, int s);
642 /** Tell the cache to DMA a packet from main memory into its buffer and
643 * return the size the of the packet to reserve space in tx fifo.
644 * @return size of the packet
647 void getPacketData(EthPacketPtr p);
649 /** Ask if the packet has been transfered so the state machine can give
651 * @return packet available in descriptor cache
653 bool packetAvailable();
655 /** Ask if we are still waiting for the packet to be transfered.
656 * @return packet still in transit.
658 bool packetWaiting() { return pktWaiting; }
660 /** Ask if this packet is composed of multiple descriptors
661 * so even if we've got data, we need to wait for more before
662 * we can send it out.
663 * @return packet can't be sent out because it's a multi-descriptor
666 bool packetMultiDesc() { return pktMultiDesc;}
668 /** Called by event when dma to write packet is completed
671 EventWrapper<TxDescCache, &TxDescCache::pktComplete> pktEvent;
673 virtual bool hasOutstandingEvents();
675 virtual void serialize(std::ostream &os);
676 virtual void unserialize(Checkpoint *cp, const std::string §ion);
679 friend class TxDescCache;
681 TxDescCache txDescCache;
684 typedef IGbEParams Params;
688 return dynamic_cast<const Params *>(_params);
690 IGbE(const Params *params);
693 virtual EtherInt *getEthPort(const std::string &if_name, int idx);
697 inline Tick ticks(int numCycles) const { return numCycles * clock; }
699 virtual Tick read(PacketPtr pkt);
700 virtual Tick write(PacketPtr pkt);
702 virtual Tick writeConfig(PacketPtr pkt);
704 bool ethRxPkt(EthPacketPtr packet);
707 virtual void serialize(std::ostream &os);
708 virtual void unserialize(Checkpoint *cp, const std::string §ion);
709 virtual unsigned int drain(Event *de);
710 virtual void resume();
714 class IGbEInt : public EtherInt
720 IGbEInt(const std::string &name, IGbE *d)
721 : EtherInt(name), dev(d)
724 virtual bool recvPacket(EthPacketPtr pkt) { return dev->ethRxPkt(pkt); }
725 virtual void sendDone() { dev->ethTxDone(); }
732 #endif //__DEV_I8254XGBE_HH__