2 * Copyright (c) 2006 The Regents of The University of Michigan
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 * Device model for Intel's 8254x line of gigabit ethernet controllers.
35 #ifndef __DEV_I8254XGBE_HH__
36 #define __DEV_I8254XGBE_HH__
41 #include "base/inet.hh"
42 #include "base/statistics.hh"
43 #include "dev/etherint.hh"
44 #include "dev/etherpkt.hh"
45 #include "dev/i8254xGBe_defs.hh"
46 #include "dev/pcidev.hh"
47 #include "dev/pktfifo.hh"
48 #include "sim/eventq.hh"
52 class IGbE : public PciDev
60 // eeprom data, status and control bits
61 int eeOpBits, eeAddrBits, eeDataBits;
62 uint8_t eeOpcode, eeAddr;
63 uint16_t flash[iGbReg::EEPROM_SIZE];
65 // The drain event if we have one
68 // cached parameters from params struct
75 // Packet that we are currently putting into the txFifo
76 EthPacketPtr txPacket;
78 // Should to Rx/Tx State machine tick?
85 // Event and function to deal with RDTR timer expiring
87 rxDescCache.writeback(0);
88 DPRINTF(EthernetIntr, "Posting RXT interrupt because RDTR timer expired\n");
89 postInterrupt(iGbReg::IT_RXT, true);
92 //friend class EventWrapper<IGbE, &IGbE::rdtrProcess>;
93 EventWrapper<IGbE, &IGbE::rdtrProcess> rdtrEvent;
95 // Event and function to deal with RADV timer expiring
97 rxDescCache.writeback(0);
98 DPRINTF(EthernetIntr, "Posting RXT interrupt because RADV timer expired\n");
99 postInterrupt(iGbReg::IT_RXT, true);
102 //friend class EventWrapper<IGbE, &IGbE::radvProcess>;
103 EventWrapper<IGbE, &IGbE::radvProcess> radvEvent;
105 // Event and function to deal with TADV timer expiring
107 txDescCache.writeback(0);
108 DPRINTF(EthernetIntr, "Posting TXDW interrupt because TADV timer expired\n");
109 postInterrupt(iGbReg::IT_TXDW, true);
112 //friend class EventWrapper<IGbE, &IGbE::tadvProcess>;
113 EventWrapper<IGbE, &IGbE::tadvProcess> tadvEvent;
115 // Event and function to deal with TIDV timer expiring
117 txDescCache.writeback(0);
118 DPRINTF(EthernetIntr, "Posting TXDW interrupt because TIDV timer expired\n");
119 postInterrupt(iGbReg::IT_TXDW, true);
121 //friend class EventWrapper<IGbE, &IGbE::tidvProcess>;
122 EventWrapper<IGbE, &IGbE::tidvProcess> tidvEvent;
124 // Main event to tick the device
126 //friend class EventWrapper<IGbE, &IGbE::tick>;
127 EventWrapper<IGbE, &IGbE::tick> tickEvent;
130 void rxStateMachine();
131 void txStateMachine();
134 /** Write an interrupt into the interrupt pending register and check mask
135 * and interrupt limit timer before sending interrupt to CPU
136 * @param t the type of interrupt we are posting
137 * @param now should we ignore the interrupt limiting timer
139 void postInterrupt(iGbReg::IntTypes t, bool now = false);
141 /** Check and see if changes to the mask register have caused an interrupt
142 * to need to be sent or perhaps removed an interrupt cause.
146 /** Send an interrupt to the cpu
149 // Event to moderate interrupts
150 EventWrapper<IGbE, &IGbE::cpuPostInt> interEvent;
152 /** Clear the interupt line to the cpu
156 Tick intClock() { return Clock::Int::ns * 1024; }
158 /** This function is used to restart the clock so it can handle things like
159 * draining and resume in one place. */
162 /** Check if all the draining things that need to occur have occured and
163 * handle the drain event if so.
171 virtual Addr descBase() const = 0;
172 virtual long descHead() const = 0;
173 virtual long descTail() const = 0;
174 virtual long descLen() const = 0;
175 virtual void updateHead(long h) = 0;
176 virtual void enableSm() = 0;
177 virtual void intAfterWb() const {}
179 std::deque<T*> usedCache;
180 std::deque<T*> unusedCache;
185 // Pointer to the device we cache for
188 // Name of this descriptor cache
191 // How far we've cached
194 // The size of the descriptor cache
197 // How many descriptors we are currently fetching
200 // How many descriptors we are currently writing back
203 // if the we wrote back to the end of the descriptor ring and are going
204 // to have to wrap and write more
207 // What the alignment is of the next descriptor writeback
210 /** The packet that is currently being dmad to memory if any
215 DescCache(IGbE *i, const std::string n, int s)
216 : igbe(i), _name(n), cachePnt(0), size(s), curFetching(0), wbOut(0),
217 pktPtr(NULL), fetchEvent(this), wbEvent(this)
219 fetchBuf = new T[size];
228 std::string name() { return _name; }
230 /** If the address/len/head change when we've got descriptors that are
231 * dirty that is very bad. This function checks that we don't and if we
236 if (usedCache.size() > 0 || curFetching || wbOut)
237 panic("Descriptor Address, Length or Head changed. Bad\n");
242 void writeback(Addr aMask)
244 int curHead = descHead();
245 int max_to_wb = usedCache.size();
247 DPRINTF(EthernetDesc, "Writing back descriptors head: %d tail: "
248 "%d len: %d cachePnt: %d max_to_wb: %d descleft: %d\n",
249 curHead, descTail(), descLen(), cachePnt, max_to_wb,
252 // Check if this writeback is less restrictive that the previous
253 // and if so setup another one immediately following it
254 if (wbOut && (aMask < wbAlignment)) {
257 DPRINTF(EthernetDesc, "Writing back already in process, returning\n");
265 if (max_to_wb + curHead >= descLen()) {
266 max_to_wb = descLen() - curHead;
268 // this is by definition aligned correctly
269 } else if (aMask != 0) {
270 // align the wb point to the mask
271 max_to_wb = max_to_wb & ~aMask;
274 DPRINTF(EthernetDesc, "Writing back %d descriptors\n", max_to_wb);
276 if (max_to_wb <= 0 || wbOut)
281 for (int x = 0; x < wbOut; x++)
282 memcpy(&wbBuf[x], usedCache[x], sizeof(T));
284 for (int x = 0; x < wbOut; x++) {
285 assert(usedCache.size());
287 usedCache.pop_front();
292 igbe->dmaWrite(igbe->platform->pciToDma(descBase() + curHead * sizeof(T)),
293 wbOut * sizeof(T), &wbEvent, (uint8_t*)wbBuf);
296 /** Fetch a chunk of descriptors into the descriptor cache.
297 * Calls fetchComplete when the memory system returns the data
299 void fetchDescriptors()
303 if (descTail() >= cachePnt)
304 max_to_fetch = descTail() - cachePnt;
306 max_to_fetch = descLen() - cachePnt;
309 max_to_fetch = std::min(max_to_fetch, (size - usedCache.size() -
310 unusedCache.size()));
312 DPRINTF(EthernetDesc, "Fetching descriptors head: %d tail: "
313 "%d len: %d cachePnt: %d max_to_fetch: %d descleft: %d\n",
314 descHead(), descTail(), descLen(), cachePnt,
315 max_to_fetch, descLeft());
318 if (max_to_fetch == 0 || curFetching)
321 // So we don't have two descriptor fetches going on at once
322 curFetching = max_to_fetch;
324 DPRINTF(EthernetDesc, "Fetching descriptors at %#x (%#x), size: %#x\n",
325 descBase() + cachePnt * sizeof(T),
326 igbe->platform->pciToDma(descBase() + cachePnt * sizeof(T)),
327 curFetching * sizeof(T));
330 igbe->dmaRead(igbe->platform->pciToDma(descBase() + cachePnt * sizeof(T)),
331 curFetching * sizeof(T), &fetchEvent, (uint8_t*)fetchBuf);
335 /** Called by event when dma to read descriptors is completed
340 for (int x = 0; x < curFetching; x++) {
342 memcpy(newDesc, &fetchBuf[x], sizeof(T));
343 unusedCache.push_back(newDesc);
347 int oldCp = cachePnt;
350 cachePnt += curFetching;
351 assert(cachePnt <= descLen());
352 if (cachePnt == descLen())
357 DPRINTF(EthernetDesc, "Fetching complete cachePnt %d -> %d\n",
364 EventWrapper<DescCache, &DescCache::fetchComplete> fetchEvent;
366 /** Called by event when dma to writeback descriptors is completed
370 long curHead = descHead();
372 long oldHead = curHead;
378 if (curHead >= descLen())
379 curHead -= descLen();
384 DPRINTF(EthernetDesc, "Writeback complete curHead %d -> %d\n",
387 // If we still have more to wb, call wb now
389 DPRINTF(EthernetDesc, "Writeback has more todo\n");
390 writeback(wbAlignment);
397 EventWrapper<DescCache, &DescCache::wbComplete> wbEvent;
399 /* Return the number of descriptors left in the ring, so the device has
400 * a way to figure out if it needs to interrupt.
404 int left = unusedCache.size();
405 if (cachePnt - descTail() >= 0)
406 left += (cachePnt - descTail());
408 left += (descTail() - cachePnt);
413 /* Return the number of descriptors used and not written back.
415 int descUsed() const { return usedCache.size(); }
417 /* Return the number of cache unused descriptors we have. */
418 int descUnused() const {return unusedCache.size(); }
420 /* Get into a state where the descriptor address/head/etc colud be
424 DPRINTF(EthernetDesc, "Reseting descriptor cache\n");
425 for (int x = 0; x < usedCache.size(); x++)
427 for (int x = 0; x < unusedCache.size(); x++)
428 delete unusedCache[x];
437 virtual void serialize(std::ostream &os)
439 SERIALIZE_SCALAR(cachePnt);
440 SERIALIZE_SCALAR(curFetching);
441 SERIALIZE_SCALAR(wbOut);
442 SERIALIZE_SCALAR(moreToWb);
443 SERIALIZE_SCALAR(wbAlignment);
445 int usedCacheSize = usedCache.size();
446 SERIALIZE_SCALAR(usedCacheSize);
447 for(int x = 0; x < usedCacheSize; x++) {
448 arrayParamOut(os, csprintf("usedCache_%d", x),
449 (uint8_t*)usedCache[x],sizeof(T));
452 int unusedCacheSize = unusedCache.size();
453 SERIALIZE_SCALAR(unusedCacheSize);
454 for(int x = 0; x < unusedCacheSize; x++) {
455 arrayParamOut(os, csprintf("unusedCache_%d", x),
456 (uint8_t*)unusedCache[x],sizeof(T));
460 virtual void unserialize(Checkpoint *cp, const std::string §ion)
462 UNSERIALIZE_SCALAR(cachePnt);
463 UNSERIALIZE_SCALAR(curFetching);
464 UNSERIALIZE_SCALAR(wbOut);
465 UNSERIALIZE_SCALAR(moreToWb);
466 UNSERIALIZE_SCALAR(wbAlignment);
469 UNSERIALIZE_SCALAR(usedCacheSize);
471 for(int x = 0; x < usedCacheSize; x++) {
473 arrayParamIn(cp, section, csprintf("usedCache_%d", x),
474 (uint8_t*)temp,sizeof(T));
475 usedCache.push_back(temp);
479 UNSERIALIZE_SCALAR(unusedCacheSize);
480 for(int x = 0; x < unusedCacheSize; x++) {
482 arrayParamIn(cp, section, csprintf("unusedCache_%d", x),
483 (uint8_t*)temp,sizeof(T));
484 unusedCache.push_back(temp);
487 virtual bool hasOutstandingEvents() {
488 return wbEvent.scheduled() || fetchEvent.scheduled();
494 class RxDescCache : public DescCache<iGbReg::RxDesc>
497 virtual Addr descBase() const { return igbe->regs.rdba(); }
498 virtual long descHead() const { return igbe->regs.rdh(); }
499 virtual long descLen() const { return igbe->regs.rdlen() >> 4; }
500 virtual long descTail() const { return igbe->regs.rdt(); }
501 virtual void updateHead(long h) { igbe->regs.rdh(h); }
502 virtual void enableSm();
507 RxDescCache(IGbE *i, std::string n, int s);
509 /** Write the given packet into the buffer(s) pointed to by the
510 * descriptor and update the book keeping. Should only be called when
511 * there are no dma's pending.
512 * @param packet ethernet packet to write
513 * @return if the packet could be written (there was a free descriptor)
515 bool writePacket(EthPacketPtr packet);
516 /** Called by event when dma to write packet is completed
520 /** Check if the dma on the packet has completed.
525 EventWrapper<RxDescCache, &RxDescCache::pktComplete> pktEvent;
527 virtual bool hasOutstandingEvents();
529 virtual void serialize(std::ostream &os);
530 virtual void unserialize(Checkpoint *cp, const std::string §ion);
532 friend class RxDescCache;
534 RxDescCache rxDescCache;
536 class TxDescCache : public DescCache<iGbReg::TxDesc>
539 virtual Addr descBase() const { return igbe->regs.tdba(); }
540 virtual long descHead() const { return igbe->regs.tdh(); }
541 virtual long descTail() const { return igbe->regs.tdt(); }
542 virtual long descLen() const { return igbe->regs.tdlen() >> 4; }
543 virtual void updateHead(long h) { igbe->regs.tdh(h); }
544 virtual void enableSm();
545 virtual void intAfterWb() const { igbe->postInterrupt(iGbReg::IT_TXDW);}
552 TxDescCache(IGbE *i, std::string n, int s);
554 /** Tell the cache to DMA a packet from main memory into its buffer and
555 * return the size the of the packet to reserve space in tx fifo.
556 * @return size of the packet
559 void getPacketData(EthPacketPtr p);
561 /** Ask if the packet has been transfered so the state machine can give
563 * @return packet available in descriptor cache
565 bool packetAvailable();
567 /** Ask if we are still waiting for the packet to be transfered.
568 * @return packet still in transit.
570 bool packetWaiting() { return pktWaiting; }
572 /** Called by event when dma to write packet is completed
575 EventWrapper<TxDescCache, &TxDescCache::pktComplete> pktEvent;
577 virtual bool hasOutstandingEvents();
579 virtual void serialize(std::ostream &os);
580 virtual void unserialize(Checkpoint *cp, const std::string §ion);
583 friend class TxDescCache;
585 TxDescCache txDescCache;
588 struct Params : public PciDev::Params
590 Net::EthAddr hardware_address;
591 bool use_flow_control;
594 int rx_desc_cache_size;
595 int tx_desc_cache_size;
599 IGbE(Params *params);
603 inline Tick cycles(int numCycles) const { return numCycles * clock; }
605 virtual Tick read(PacketPtr pkt);
606 virtual Tick write(PacketPtr pkt);
608 virtual Tick writeConfig(PacketPtr pkt);
610 bool ethRxPkt(EthPacketPtr packet);
613 void setEthInt(IGbEInt *i) { assert(!etherInt); etherInt = i; }
616 const Params *params() const {return (const Params *)_params; }
618 virtual void serialize(std::ostream &os);
619 virtual void unserialize(Checkpoint *cp, const std::string §ion);
620 virtual unsigned int drain(Event *de);
621 virtual void resume();
625 class IGbEInt : public EtherInt
631 IGbEInt(const std::string &name, IGbE *d)
632 : EtherInt(name), dev(d)
633 { dev->setEthInt(this); }
635 virtual bool recvPacket(EthPacketPtr pkt) { return dev->ethRxPkt(pkt); }
636 virtual void sendDone() { dev->ethTxDone(); }
643 #endif //__DEV_I8254XGBE_HH__