2 * Copyright (c) 2006 The Regents of The University of Michigan
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 * Device model for Intel's 8254x line of gigabit ethernet controllers.
35 #ifndef __DEV_I8254XGBE_HH__
36 #define __DEV_I8254XGBE_HH__
41 #include "base/cp_annotate.hh"
42 #include "base/inet.hh"
43 #include "dev/etherdevice.hh"
44 #include "dev/etherint.hh"
45 #include "dev/etherpkt.hh"
46 #include "dev/i8254xGBe_defs.hh"
47 #include "dev/pcidev.hh"
48 #include "dev/pktfifo.hh"
49 #include "params/IGbE.hh"
50 #include "sim/eventq.hh"
54 class IGbE : public EtherDevice
63 // eeprom data, status and control bits
64 int eeOpBits, eeAddrBits, eeDataBits;
65 uint8_t eeOpcode, eeAddr;
66 uint16_t flash[iGbReg::EEPROM_SIZE];
68 // The drain event if we have one
71 // cached parameters from params struct
78 // Packet that we are currently putting into the txFifo
79 EthPacketPtr txPacket;
81 // Should to Rx/Tx State machine tick?
88 // Number of bytes copied from current RX packet
91 // Delays in managaging descriptors
92 Tick fetchDelay, wbDelay;
93 Tick fetchCompDelay, wbCompDelay;
94 Tick rxWriteDelay, txReadDelay;
96 // Event and function to deal with RDTR timer expiring
98 rxDescCache.writeback(0);
99 DPRINTF(EthernetIntr, "Posting RXT interrupt because RDTR timer expired\n");
100 postInterrupt(iGbReg::IT_RXT);
103 //friend class EventWrapper<IGbE, &IGbE::rdtrProcess>;
104 EventWrapper<IGbE, &IGbE::rdtrProcess> rdtrEvent;
106 // Event and function to deal with RADV timer expiring
108 rxDescCache.writeback(0);
109 DPRINTF(EthernetIntr, "Posting RXT interrupt because RADV timer expired\n");
110 postInterrupt(iGbReg::IT_RXT);
113 //friend class EventWrapper<IGbE, &IGbE::radvProcess>;
114 EventWrapper<IGbE, &IGbE::radvProcess> radvEvent;
116 // Event and function to deal with TADV timer expiring
118 txDescCache.writeback(0);
119 DPRINTF(EthernetIntr, "Posting TXDW interrupt because TADV timer expired\n");
120 postInterrupt(iGbReg::IT_TXDW);
123 //friend class EventWrapper<IGbE, &IGbE::tadvProcess>;
124 EventWrapper<IGbE, &IGbE::tadvProcess> tadvEvent;
126 // Event and function to deal with TIDV timer expiring
128 txDescCache.writeback(0);
129 DPRINTF(EthernetIntr, "Posting TXDW interrupt because TIDV timer expired\n");
130 postInterrupt(iGbReg::IT_TXDW);
132 //friend class EventWrapper<IGbE, &IGbE::tidvProcess>;
133 EventWrapper<IGbE, &IGbE::tidvProcess> tidvEvent;
135 // Main event to tick the device
137 //friend class EventWrapper<IGbE, &IGbE::tick>;
138 EventWrapper<IGbE, &IGbE::tick> tickEvent;
143 void rxStateMachine();
144 void txStateMachine();
147 /** Write an interrupt into the interrupt pending register and check mask
148 * and interrupt limit timer before sending interrupt to CPU
149 * @param t the type of interrupt we are posting
150 * @param now should we ignore the interrupt limiting timer
152 void postInterrupt(iGbReg::IntTypes t, bool now = false);
154 /** Check and see if changes to the mask register have caused an interrupt
155 * to need to be sent or perhaps removed an interrupt cause.
159 /** Send an interrupt to the cpu
161 void delayIntEvent();
163 // Event to moderate interrupts
164 EventWrapper<IGbE, &IGbE::delayIntEvent> interEvent;
166 /** Clear the interupt line to the cpu
170 Tick intClock() { return Clock::Int::ns * 1024; }
172 /** This function is used to restart the clock so it can handle things like
173 * draining and resume in one place. */
176 /** Check if all the draining things that need to occur have occured and
177 * handle the drain event if so.
181 void anBegin(std::string sm, std::string st, int flags = CPA::FL_NONE) {
182 cpa->hwBegin((CPA::flags)flags, sys, macAddr, sm, st);
185 void anQ(std::string sm, std::string q) {
186 cpa->hwQ(CPA::FL_NONE, sys, macAddr, sm, q, macAddr);
189 void anDq(std::string sm, std::string q) {
190 cpa->hwDq(CPA::FL_NONE, sys, macAddr, sm, q, macAddr);
193 void anPq(std::string sm, std::string q, int num = 1) {
194 cpa->hwPq(CPA::FL_NONE, sys, macAddr, sm, q, macAddr, NULL, num);
197 void anRq(std::string sm, std::string q, int num = 1) {
198 cpa->hwPq(CPA::FL_NONE, sys, macAddr, sm, q, macAddr, NULL, num);
201 void anWe(std::string sm, std::string q) {
202 cpa->hwWe(CPA::FL_NONE, sys, macAddr, sm, q, macAddr);
205 void anWf(std::string sm, std::string q) {
206 cpa->hwWf(CPA::FL_NONE, sys, macAddr, sm, q, macAddr);
214 virtual Addr descBase() const = 0;
215 virtual long descHead() const = 0;
216 virtual long descTail() const = 0;
217 virtual long descLen() const = 0;
218 virtual void updateHead(long h) = 0;
219 virtual void enableSm() = 0;
220 virtual void actionAfterWb() {}
221 virtual void fetchAfterWb() = 0;
223 std::deque<T*> usedCache;
224 std::deque<T*> unusedCache;
229 // Pointer to the device we cache for
232 // Name of this descriptor cache
235 // How far we've cached
238 // The size of the descriptor cache
241 // How many descriptors we are currently fetching
244 // How many descriptors we are currently writing back
247 // if the we wrote back to the end of the descriptor ring and are going
248 // to have to wrap and write more
251 // What the alignment is of the next descriptor writeback
254 /** The packet that is currently being dmad to memory if any
260 std::string annSmFetch, annSmWb, annUnusedDescQ, annUsedCacheQ,
261 annUsedDescQ, annUnusedCacheQ, annDescQ;
263 DescCache(IGbE *i, const std::string n, int s)
264 : igbe(i), _name(n), cachePnt(0), size(s), curFetching(0), wbOut(0),
265 pktPtr(NULL), wbDelayEvent(this), fetchDelayEvent(this),
266 fetchEvent(this), wbEvent(this)
268 fetchBuf = new T[size];
277 std::string name() { return _name; }
279 /** If the address/len/head change when we've got descriptors that are
280 * dirty that is very bad. This function checks that we don't and if we
285 if (usedCache.size() > 0 || curFetching || wbOut)
286 panic("Descriptor Address, Length or Head changed. Bad\n");
291 void writeback(Addr aMask)
293 int curHead = descHead();
294 int max_to_wb = usedCache.size();
296 // Check if this writeback is less restrictive that the previous
297 // and if so setup another one immediately following it
299 if (aMask < wbAlignment) {
303 DPRINTF(EthernetDesc, "Writing back already in process, returning\n");
311 DPRINTF(EthernetDesc, "Writing back descriptors head: %d tail: "
312 "%d len: %d cachePnt: %d max_to_wb: %d descleft: %d\n",
313 curHead, descTail(), descLen(), cachePnt, max_to_wb,
316 if (max_to_wb + curHead >= descLen()) {
317 max_to_wb = descLen() - curHead;
319 // this is by definition aligned correctly
320 } else if (wbAlignment != 0) {
321 // align the wb point to the mask
322 max_to_wb = max_to_wb & ~wbAlignment;
325 DPRINTF(EthernetDesc, "Writing back %d descriptors\n", max_to_wb);
327 if (max_to_wb <= 0) {
328 if (usedCache.size())
329 igbe->anBegin(annSmWb, "Wait Alignment", CPA::FL_WAIT);
331 igbe->anWe(annSmWb, annUsedCacheQ);
337 assert(!wbDelayEvent.scheduled());
338 igbe->schedule(wbDelayEvent, curTick + igbe->wbDelay);
339 igbe->anBegin(annSmWb, "Prepare Writeback Desc");
344 // If we're draining delay issuing this DMA
345 if (igbe->getState() != SimObject::Running) {
346 igbe->schedule(wbDelayEvent, curTick + igbe->wbDelay);
350 DPRINTF(EthernetDesc, "Begining DMA of %d descriptors\n", wbOut);
352 for (int x = 0; x < wbOut; x++) {
353 assert(usedCache.size());
354 memcpy(&wbBuf[x], usedCache[x], sizeof(T));
355 igbe->anPq(annSmWb, annUsedCacheQ);
356 igbe->anPq(annSmWb, annDescQ);
357 igbe->anQ(annSmWb, annUsedDescQ);
361 igbe->anBegin(annSmWb, "Writeback Desc DMA");
364 igbe->dmaWrite(igbe->platform->pciToDma(descBase() + descHead() * sizeof(T)),
365 wbOut * sizeof(T), &wbEvent, (uint8_t*)wbBuf,
368 EventWrapper<DescCache, &DescCache::writeback1> wbDelayEvent;
370 /** Fetch a chunk of descriptors into the descriptor cache.
371 * Calls fetchComplete when the memory system returns the data
374 void fetchDescriptors()
379 DPRINTF(EthernetDesc, "Currently fetching %d descriptors, returning\n", curFetching);
383 if (descTail() >= cachePnt)
384 max_to_fetch = descTail() - cachePnt;
386 max_to_fetch = descLen() - cachePnt;
388 size_t free_cache = size - usedCache.size() - unusedCache.size();
391 igbe->anWe(annSmFetch, annUnusedDescQ);
393 igbe->anPq(annSmFetch, annUnusedDescQ, max_to_fetch);
397 igbe->anWf(annSmFetch, annDescQ);
399 igbe->anRq(annSmFetch, annDescQ, free_cache);
402 max_to_fetch = std::min(max_to_fetch, free_cache);
405 DPRINTF(EthernetDesc, "Fetching descriptors head: %d tail: "
406 "%d len: %d cachePnt: %d max_to_fetch: %d descleft: %d\n",
407 descHead(), descTail(), descLen(), cachePnt,
408 max_to_fetch, descLeft());
411 if (max_to_fetch == 0)
414 // So we don't have two descriptor fetches going on at once
415 curFetching = max_to_fetch;
417 assert(!fetchDelayEvent.scheduled());
418 igbe->schedule(fetchDelayEvent, curTick + igbe->fetchDelay);
419 igbe->anBegin(annSmFetch, "Prepare Fetch Desc");
422 void fetchDescriptors1()
424 // If we're draining delay issuing this DMA
425 if (igbe->getState() != SimObject::Running) {
426 igbe->schedule(fetchDelayEvent, curTick + igbe->fetchDelay);
430 igbe->anBegin(annSmFetch, "Fetch Desc");
432 DPRINTF(EthernetDesc, "Fetching descriptors at %#x (%#x), size: %#x\n",
433 descBase() + cachePnt * sizeof(T),
434 igbe->platform->pciToDma(descBase() + cachePnt * sizeof(T)),
435 curFetching * sizeof(T));
437 igbe->dmaRead(igbe->platform->pciToDma(descBase() + cachePnt * sizeof(T)),
438 curFetching * sizeof(T), &fetchEvent, (uint8_t*)fetchBuf,
439 igbe->fetchCompDelay);
442 EventWrapper<DescCache, &DescCache::fetchDescriptors1> fetchDelayEvent;
444 /** Called by event when dma to read descriptors is completed
449 igbe->anBegin(annSmFetch, "Fetch Complete");
450 for (int x = 0; x < curFetching; x++) {
452 memcpy(newDesc, &fetchBuf[x], sizeof(T));
453 unusedCache.push_back(newDesc);
454 igbe->anDq(annSmFetch, annUnusedDescQ);
455 igbe->anQ(annSmFetch, annUnusedCacheQ);
456 igbe->anQ(annSmFetch, annDescQ);
461 int oldCp = cachePnt;
464 cachePnt += curFetching;
465 assert(cachePnt <= descLen());
466 if (cachePnt == descLen())
471 DPRINTF(EthernetDesc, "Fetching complete cachePnt %d -> %d\n",
474 if ((descTail() >= cachePnt ? (descTail() - cachePnt) : (descLen() -
477 igbe->anWe(annSmFetch, annUnusedDescQ);
478 } else if (!(size - usedCache.size() - unusedCache.size())) {
479 igbe->anWf(annSmFetch, annDescQ);
481 igbe->anBegin(annSmFetch, "Wait", CPA::FL_WAIT);
488 EventWrapper<DescCache, &DescCache::fetchComplete> fetchEvent;
490 /** Called by event when dma to writeback descriptors is completed
495 igbe->anBegin(annSmWb, "Finish Writeback");
497 long curHead = descHead();
499 long oldHead = curHead;
502 for (int x = 0; x < wbOut; x++) {
503 assert(usedCache.size());
505 usedCache.pop_front();
507 igbe->anDq(annSmWb, annUsedCacheQ);
508 igbe->anDq(annSmWb, annDescQ);
514 if (curHead >= descLen())
515 curHead -= descLen();
520 DPRINTF(EthernetDesc, "Writeback complete curHead %d -> %d\n",
523 // If we still have more to wb, call wb now
527 DPRINTF(EthernetDesc, "Writeback has more todo\n");
528 writeback(wbAlignment);
533 if (usedCache.size())
534 igbe->anBegin(annSmWb, "Wait", CPA::FL_WAIT);
536 igbe->anWe(annSmWb, annUsedCacheQ);
542 EventWrapper<DescCache, &DescCache::wbComplete> wbEvent;
544 /* Return the number of descriptors left in the ring, so the device has
545 * a way to figure out if it needs to interrupt.
549 int left = unusedCache.size();
550 if (cachePnt >= descTail())
551 left += (descLen() - cachePnt + descTail());
553 left += (descTail() - cachePnt);
558 /* Return the number of descriptors used and not written back.
560 int descUsed() const { return usedCache.size(); }
562 /* Return the number of cache unused descriptors we have. */
563 int descUnused() const {return unusedCache.size(); }
565 /* Get into a state where the descriptor address/head/etc colud be
569 DPRINTF(EthernetDesc, "Reseting descriptor cache\n");
570 for (int x = 0; x < usedCache.size(); x++)
572 for (int x = 0; x < unusedCache.size(); x++)
573 delete unusedCache[x];
582 virtual void serialize(std::ostream &os)
584 SERIALIZE_SCALAR(cachePnt);
585 SERIALIZE_SCALAR(curFetching);
586 SERIALIZE_SCALAR(wbOut);
587 SERIALIZE_SCALAR(moreToWb);
588 SERIALIZE_SCALAR(wbAlignment);
590 int usedCacheSize = usedCache.size();
591 SERIALIZE_SCALAR(usedCacheSize);
592 for(int x = 0; x < usedCacheSize; x++) {
593 arrayParamOut(os, csprintf("usedCache_%d", x),
594 (uint8_t*)usedCache[x],sizeof(T));
597 int unusedCacheSize = unusedCache.size();
598 SERIALIZE_SCALAR(unusedCacheSize);
599 for(int x = 0; x < unusedCacheSize; x++) {
600 arrayParamOut(os, csprintf("unusedCache_%d", x),
601 (uint8_t*)unusedCache[x],sizeof(T));
604 Tick fetch_delay = 0, wb_delay = 0;
605 if (fetchDelayEvent.scheduled())
606 fetch_delay = fetchDelayEvent.when();
607 SERIALIZE_SCALAR(fetch_delay);
608 if (wbDelayEvent.scheduled())
609 wb_delay = wbDelayEvent.when();
610 SERIALIZE_SCALAR(wb_delay);
615 virtual void unserialize(Checkpoint *cp, const std::string §ion)
617 UNSERIALIZE_SCALAR(cachePnt);
618 UNSERIALIZE_SCALAR(curFetching);
619 UNSERIALIZE_SCALAR(wbOut);
620 UNSERIALIZE_SCALAR(moreToWb);
621 UNSERIALIZE_SCALAR(wbAlignment);
624 UNSERIALIZE_SCALAR(usedCacheSize);
626 for(int x = 0; x < usedCacheSize; x++) {
628 arrayParamIn(cp, section, csprintf("usedCache_%d", x),
629 (uint8_t*)temp,sizeof(T));
630 usedCache.push_back(temp);
634 UNSERIALIZE_SCALAR(unusedCacheSize);
635 for(int x = 0; x < unusedCacheSize; x++) {
637 arrayParamIn(cp, section, csprintf("unusedCache_%d", x),
638 (uint8_t*)temp,sizeof(T));
639 unusedCache.push_back(temp);
641 Tick fetch_delay = 0, wb_delay = 0;
642 UNSERIALIZE_SCALAR(fetch_delay);
643 UNSERIALIZE_SCALAR(wb_delay);
645 igbe->schedule(fetchDelayEvent, fetch_delay);
647 igbe->schedule(wbDelayEvent, wb_delay);
651 virtual bool hasOutstandingEvents() {
652 return wbEvent.scheduled() || fetchEvent.scheduled();
658 class RxDescCache : public DescCache<iGbReg::RxDesc>
661 virtual Addr descBase() const { return igbe->regs.rdba(); }
662 virtual long descHead() const { return igbe->regs.rdh(); }
663 virtual long descLen() const { return igbe->regs.rdlen() >> 4; }
664 virtual long descTail() const { return igbe->regs.rdt(); }
665 virtual void updateHead(long h) { igbe->regs.rdh(h); }
666 virtual void enableSm();
667 virtual void fetchAfterWb() {
668 if (!igbe->rxTick && igbe->getState() == SimObject::Running)
674 /** Variable to head with header/data completion events */
677 /** Bytes of packet that have been copied, so we know when to set EOP */
681 RxDescCache(IGbE *i, std::string n, int s);
683 /** Write the given packet into the buffer(s) pointed to by the
684 * descriptor and update the book keeping. Should only be called when
685 * there are no dma's pending.
686 * @param packet ethernet packet to write
687 * @param pkt_offset bytes already copied from the packet to memory
688 * @return pkt_offset + number of bytes copied during this call
690 int writePacket(EthPacketPtr packet, int pkt_offset);
692 /** Called by event when dma to write packet is completed
696 /** Check if the dma on the packet has completed and RX state machine
701 EventWrapper<RxDescCache, &RxDescCache::pktComplete> pktEvent;
703 // Event to handle issuing header and data write at the same time
704 // and only callking pktComplete() when both are completed
706 EventWrapper<RxDescCache, &RxDescCache::pktSplitDone> pktHdrEvent;
707 EventWrapper<RxDescCache, &RxDescCache::pktSplitDone> pktDataEvent;
709 virtual bool hasOutstandingEvents();
711 virtual void serialize(std::ostream &os);
712 virtual void unserialize(Checkpoint *cp, const std::string §ion);
714 friend class RxDescCache;
716 RxDescCache rxDescCache;
718 class TxDescCache : public DescCache<iGbReg::TxDesc>
721 virtual Addr descBase() const { return igbe->regs.tdba(); }
722 virtual long descHead() const { return igbe->regs.tdh(); }
723 virtual long descTail() const { return igbe->regs.tdt(); }
724 virtual long descLen() const { return igbe->regs.tdlen() >> 4; }
725 virtual void updateHead(long h) { igbe->regs.tdh(h); }
726 virtual void enableSm();
727 virtual void actionAfterWb();
728 virtual void fetchAfterWb() {
729 if (!igbe->txTick && igbe->getState() == SimObject::Running)
739 Addr completionAddress;
740 bool completionEnabled;
751 Addr tsoPktPayloadBytes;
752 bool tsoLoadedHeader;
753 bool tsoPktHasHeader;
754 uint8_t tsoHeader[256];
755 Addr tsoDescBytesUsed;
760 TxDescCache(IGbE *i, std::string n, int s);
762 /** Tell the cache to DMA a packet from main memory into its buffer and
763 * return the size the of the packet to reserve space in tx fifo.
764 * @return size of the packet
766 int getPacketSize(EthPacketPtr p);
767 void getPacketData(EthPacketPtr p);
768 void processContextDesc();
770 /** Return the number of dsecriptors in a cache block for threshold
773 int descInBlock(int num_desc) { return num_desc /
774 igbe->cacheBlockSize() / sizeof(iGbReg::TxDesc); }
775 /** Ask if the packet has been transfered so the state machine can give
777 * @return packet available in descriptor cache
779 bool packetAvailable();
781 /** Ask if we are still waiting for the packet to be transfered.
782 * @return packet still in transit.
784 bool packetWaiting() { return pktWaiting; }
786 /** Ask if this packet is composed of multiple descriptors
787 * so even if we've got data, we need to wait for more before
788 * we can send it out.
789 * @return packet can't be sent out because it's a multi-descriptor
792 bool packetMultiDesc() { return pktMultiDesc;}
794 /** Called by event when dma to write packet is completed
797 EventWrapper<TxDescCache, &TxDescCache::pktComplete> pktEvent;
799 void headerComplete();
800 EventWrapper<TxDescCache, &TxDescCache::headerComplete> headerEvent;
803 void completionWriteback(Addr a, bool enabled) {
804 DPRINTF(EthernetDesc, "Completion writeback Addr: %#x enabled: %d\n",
806 completionAddress = a;
807 completionEnabled = enabled;
810 virtual bool hasOutstandingEvents();
812 void nullCallback() { DPRINTF(EthernetDesc, "Completion writeback complete\n"); }
813 EventWrapper<TxDescCache, &TxDescCache::nullCallback> nullEvent;
815 virtual void serialize(std::ostream &os);
816 virtual void unserialize(Checkpoint *cp, const std::string §ion);
819 friend class TxDescCache;
821 TxDescCache txDescCache;
824 typedef IGbEParams Params;
828 return dynamic_cast<const Params *>(_params);
830 IGbE(const Params *params);
834 virtual EtherInt *getEthPort(const std::string &if_name, int idx);
838 inline Tick ticks(int numCycles) const { return numCycles * clock; }
840 virtual Tick read(PacketPtr pkt);
841 virtual Tick write(PacketPtr pkt);
843 virtual Tick writeConfig(PacketPtr pkt);
845 bool ethRxPkt(EthPacketPtr packet);
848 virtual void serialize(std::ostream &os);
849 virtual void unserialize(Checkpoint *cp, const std::string §ion);
850 virtual unsigned int drain(Event *de);
851 virtual void resume();
855 class IGbEInt : public EtherInt
861 IGbEInt(const std::string &name, IGbE *d)
862 : EtherInt(name), dev(d)
865 virtual bool recvPacket(EthPacketPtr pkt) { return dev->ethRxPkt(pkt); }
866 virtual void sendDone() { dev->ethTxDone(); }
873 #endif //__DEV_I8254XGBE_HH__