0daf094c15e86e6dee15ed7a447106477cedf6e5
[gem5.git] / src / dev / i8254xGBe.hh
1 /*
2 * Copyright (c) 2006 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Ali Saidi
29 */
30
31 /* @file
32 * Device model for Intel's 8254x line of gigabit ethernet controllers.
33 */
34
35 #ifndef __DEV_I8254XGBE_HH__
36 #define __DEV_I8254XGBE_HH__
37
38 #include <deque>
39 #include <string>
40
41 #include "base/inet.hh"
42 #include "base/statistics.hh"
43 #include "dev/etherdevice.hh"
44 #include "dev/etherint.hh"
45 #include "dev/etherpkt.hh"
46 #include "dev/i8254xGBe_defs.hh"
47 #include "dev/pcidev.hh"
48 #include "dev/pktfifo.hh"
49 #include "params/IGbE.hh"
50 #include "sim/eventq.hh"
51
52 class IGbEInt;
53
54 class IGbE : public EtherDevice
55 {
56 private:
57 IGbEInt *etherInt;
58
59 // device registers
60 iGbReg::Regs regs;
61
62 // eeprom data, status and control bits
63 int eeOpBits, eeAddrBits, eeDataBits;
64 uint8_t eeOpcode, eeAddr;
65 uint16_t flash[iGbReg::EEPROM_SIZE];
66
67 // The drain event if we have one
68 Event *drainEvent;
69
70 // cached parameters from params struct
71 bool useFlowControl;
72
73 // packet fifos
74 PacketFifo rxFifo;
75 PacketFifo txFifo;
76
77 // Packet that we are currently putting into the txFifo
78 EthPacketPtr txPacket;
79
80 // Should to Rx/Tx State machine tick?
81 bool rxTick;
82 bool txTick;
83 bool txFifoTick;
84
85 bool rxDmaPacket;
86
87 // Event and function to deal with RDTR timer expiring
88 void rdtrProcess() {
89 rxDescCache.writeback(0);
90 DPRINTF(EthernetIntr, "Posting RXT interrupt because RDTR timer expired\n");
91 postInterrupt(iGbReg::IT_RXT, true);
92 }
93
94 //friend class EventWrapper<IGbE, &IGbE::rdtrProcess>;
95 EventWrapper<IGbE, &IGbE::rdtrProcess> rdtrEvent;
96
97 // Event and function to deal with RADV timer expiring
98 void radvProcess() {
99 rxDescCache.writeback(0);
100 DPRINTF(EthernetIntr, "Posting RXT interrupt because RADV timer expired\n");
101 postInterrupt(iGbReg::IT_RXT, true);
102 }
103
104 //friend class EventWrapper<IGbE, &IGbE::radvProcess>;
105 EventWrapper<IGbE, &IGbE::radvProcess> radvEvent;
106
107 // Event and function to deal with TADV timer expiring
108 void tadvProcess() {
109 txDescCache.writeback(0);
110 DPRINTF(EthernetIntr, "Posting TXDW interrupt because TADV timer expired\n");
111 postInterrupt(iGbReg::IT_TXDW, true);
112 }
113
114 //friend class EventWrapper<IGbE, &IGbE::tadvProcess>;
115 EventWrapper<IGbE, &IGbE::tadvProcess> tadvEvent;
116
117 // Event and function to deal with TIDV timer expiring
118 void tidvProcess() {
119 txDescCache.writeback(0);
120 DPRINTF(EthernetIntr, "Posting TXDW interrupt because TIDV timer expired\n");
121 postInterrupt(iGbReg::IT_TXDW, true);
122 }
123 //friend class EventWrapper<IGbE, &IGbE::tidvProcess>;
124 EventWrapper<IGbE, &IGbE::tidvProcess> tidvEvent;
125
126 // Main event to tick the device
127 void tick();
128 //friend class EventWrapper<IGbE, &IGbE::tick>;
129 EventWrapper<IGbE, &IGbE::tick> tickEvent;
130
131
132 void rxStateMachine();
133 void txStateMachine();
134 void txWire();
135
136 /** Write an interrupt into the interrupt pending register and check mask
137 * and interrupt limit timer before sending interrupt to CPU
138 * @param t the type of interrupt we are posting
139 * @param now should we ignore the interrupt limiting timer
140 */
141 void postInterrupt(iGbReg::IntTypes t, bool now = false);
142
143 /** Check and see if changes to the mask register have caused an interrupt
144 * to need to be sent or perhaps removed an interrupt cause.
145 */
146 void chkInterrupt();
147
148 /** Send an interrupt to the cpu
149 */
150 void delayIntEvent();
151 void cpuPostInt();
152 // Event to moderate interrupts
153 EventWrapper<IGbE, &IGbE::delayIntEvent> interEvent;
154
155 /** Clear the interupt line to the cpu
156 */
157 void cpuClearInt();
158
159 Tick intClock() { return Clock::Int::ns * 1024; }
160
161 /** This function is used to restart the clock so it can handle things like
162 * draining and resume in one place. */
163 void restartClock();
164
165 /** Check if all the draining things that need to occur have occured and
166 * handle the drain event if so.
167 */
168 void checkDrain();
169
170 template<class T>
171 class DescCache
172 {
173 protected:
174 virtual Addr descBase() const = 0;
175 virtual long descHead() const = 0;
176 virtual long descTail() const = 0;
177 virtual long descLen() const = 0;
178 virtual void updateHead(long h) = 0;
179 virtual void enableSm() = 0;
180 virtual void intAfterWb() const {}
181 virtual void fetchAfterWb() = 0;
182
183 std::deque<T*> usedCache;
184 std::deque<T*> unusedCache;
185
186 T *fetchBuf;
187 T *wbBuf;
188
189 // Pointer to the device we cache for
190 IGbE *igbe;
191
192 // Name of this descriptor cache
193 std::string _name;
194
195 // How far we've cached
196 int cachePnt;
197
198 // The size of the descriptor cache
199 int size;
200
201 // How many descriptors we are currently fetching
202 int curFetching;
203
204 // How many descriptors we are currently writing back
205 int wbOut;
206
207 // if the we wrote back to the end of the descriptor ring and are going
208 // to have to wrap and write more
209 bool moreToWb;
210
211 // What the alignment is of the next descriptor writeback
212 Addr wbAlignment;
213
214 /** The packet that is currently being dmad to memory if any
215 */
216 EthPacketPtr pktPtr;
217
218 public:
219 DescCache(IGbE *i, const std::string n, int s)
220 : igbe(i), _name(n), cachePnt(0), size(s), curFetching(0), wbOut(0),
221 pktPtr(NULL), fetchEvent(this), wbEvent(this)
222 {
223 fetchBuf = new T[size];
224 wbBuf = new T[size];
225 }
226
227 virtual ~DescCache()
228 {
229 reset();
230 }
231
232 std::string name() { return _name; }
233
234 /** If the address/len/head change when we've got descriptors that are
235 * dirty that is very bad. This function checks that we don't and if we
236 * do panics.
237 */
238 void areaChanged()
239 {
240 if (usedCache.size() > 0 || curFetching || wbOut)
241 panic("Descriptor Address, Length or Head changed. Bad\n");
242 reset();
243
244 }
245
246 void writeback(Addr aMask)
247 {
248 int curHead = descHead();
249 int max_to_wb = usedCache.size();
250
251 DPRINTF(EthernetDesc, "Writing back descriptors head: %d tail: "
252 "%d len: %d cachePnt: %d max_to_wb: %d descleft: %d\n",
253 curHead, descTail(), descLen(), cachePnt, max_to_wb,
254 descLeft());
255
256 // Check if this writeback is less restrictive that the previous
257 // and if so setup another one immediately following it
258 if (wbOut && (aMask < wbAlignment)) {
259 moreToWb = true;
260 wbAlignment = aMask;
261 DPRINTF(EthernetDesc, "Writing back already in process, returning\n");
262 return;
263 }
264
265
266 moreToWb = false;
267 wbAlignment = aMask;
268
269 if (max_to_wb + curHead >= descLen()) {
270 max_to_wb = descLen() - curHead;
271 moreToWb = true;
272 // this is by definition aligned correctly
273 } else if (aMask != 0) {
274 // align the wb point to the mask
275 max_to_wb = max_to_wb & ~aMask;
276 }
277
278 DPRINTF(EthernetDesc, "Writing back %d descriptors\n", max_to_wb);
279
280 if (max_to_wb <= 0 || wbOut)
281 return;
282
283 wbOut = max_to_wb;
284
285 for (int x = 0; x < wbOut; x++) {
286 assert(usedCache.size());
287 memcpy(&wbBuf[x], usedCache[0], sizeof(T));
288 delete usedCache[0];
289 usedCache.pop_front();
290 }
291
292
293 assert(wbOut);
294 igbe->dmaWrite(igbe->platform->pciToDma(descBase() + curHead * sizeof(T)),
295 wbOut * sizeof(T), &wbEvent, (uint8_t*)wbBuf);
296 }
297
298 /** Fetch a chunk of descriptors into the descriptor cache.
299 * Calls fetchComplete when the memory system returns the data
300 */
301 void fetchDescriptors()
302 {
303 size_t max_to_fetch;
304
305 if (curFetching)
306 return;
307
308 if (descTail() >= cachePnt)
309 max_to_fetch = descTail() - cachePnt;
310 else
311 max_to_fetch = descLen() - cachePnt;
312
313 size_t free_cache = size - usedCache.size() - unusedCache.size();
314
315 max_to_fetch = std::min(max_to_fetch, free_cache);
316
317 DPRINTF(EthernetDesc, "Fetching descriptors head: %d tail: "
318 "%d len: %d cachePnt: %d max_to_fetch: %d descleft: %d\n",
319 descHead(), descTail(), descLen(), cachePnt,
320 max_to_fetch, descLeft());
321
322 // Nothing to do
323 if (max_to_fetch == 0)
324 return;
325
326 // So we don't have two descriptor fetches going on at once
327 curFetching = max_to_fetch;
328
329 DPRINTF(EthernetDesc, "Fetching descriptors at %#x (%#x), size: %#x\n",
330 descBase() + cachePnt * sizeof(T),
331 igbe->platform->pciToDma(descBase() + cachePnt * sizeof(T)),
332 curFetching * sizeof(T));
333 assert(curFetching);
334 igbe->dmaRead(igbe->platform->pciToDma(descBase() + cachePnt * sizeof(T)),
335 curFetching * sizeof(T), &fetchEvent, (uint8_t*)fetchBuf);
336 }
337
338
339 /** Called by event when dma to read descriptors is completed
340 */
341 void fetchComplete()
342 {
343 T *newDesc;
344 for (int x = 0; x < curFetching; x++) {
345 newDesc = new T;
346 memcpy(newDesc, &fetchBuf[x], sizeof(T));
347 unusedCache.push_back(newDesc);
348 }
349
350 #ifndef NDEBUG
351 int oldCp = cachePnt;
352 #endif
353
354 cachePnt += curFetching;
355 assert(cachePnt <= descLen());
356 if (cachePnt == descLen())
357 cachePnt = 0;
358
359 curFetching = 0;
360
361 DPRINTF(EthernetDesc, "Fetching complete cachePnt %d -> %d\n",
362 oldCp, cachePnt);
363
364 enableSm();
365 igbe->checkDrain();
366 }
367
368 EventWrapper<DescCache, &DescCache::fetchComplete> fetchEvent;
369
370 /** Called by event when dma to writeback descriptors is completed
371 */
372 void wbComplete()
373 {
374
375 long curHead = descHead();
376 #ifndef NDEBUG
377 long oldHead = curHead;
378 #endif
379
380 curHead += wbOut;
381 wbOut = 0;
382
383 if (curHead >= descLen())
384 curHead -= descLen();
385
386 // Update the head
387 updateHead(curHead);
388
389 DPRINTF(EthernetDesc, "Writeback complete curHead %d -> %d\n",
390 oldHead, curHead);
391
392 // If we still have more to wb, call wb now
393 intAfterWb();
394 if (moreToWb) {
395 DPRINTF(EthernetDesc, "Writeback has more todo\n");
396 writeback(wbAlignment);
397 }
398
399 if (!wbOut) {
400 igbe->checkDrain();
401 }
402 fetchAfterWb();
403 }
404
405
406 EventWrapper<DescCache, &DescCache::wbComplete> wbEvent;
407
408 /* Return the number of descriptors left in the ring, so the device has
409 * a way to figure out if it needs to interrupt.
410 */
411 int descLeft() const
412 {
413 int left = unusedCache.size();
414 if (cachePnt - descTail() >= 0)
415 left += (cachePnt - descTail());
416 else
417 left += (descTail() - cachePnt);
418
419 return left;
420 }
421
422 /* Return the number of descriptors used and not written back.
423 */
424 int descUsed() const { return usedCache.size(); }
425
426 /* Return the number of cache unused descriptors we have. */
427 int descUnused() const {return unusedCache.size(); }
428
429 /* Get into a state where the descriptor address/head/etc colud be
430 * changed */
431 void reset()
432 {
433 DPRINTF(EthernetDesc, "Reseting descriptor cache\n");
434 for (int x = 0; x < usedCache.size(); x++)
435 delete usedCache[x];
436 for (int x = 0; x < unusedCache.size(); x++)
437 delete unusedCache[x];
438
439 usedCache.clear();
440 unusedCache.clear();
441
442 cachePnt = 0;
443
444 }
445
446 virtual void serialize(std::ostream &os)
447 {
448 SERIALIZE_SCALAR(cachePnt);
449 SERIALIZE_SCALAR(curFetching);
450 SERIALIZE_SCALAR(wbOut);
451 SERIALIZE_SCALAR(moreToWb);
452 SERIALIZE_SCALAR(wbAlignment);
453
454 int usedCacheSize = usedCache.size();
455 SERIALIZE_SCALAR(usedCacheSize);
456 for(int x = 0; x < usedCacheSize; x++) {
457 arrayParamOut(os, csprintf("usedCache_%d", x),
458 (uint8_t*)usedCache[x],sizeof(T));
459 }
460
461 int unusedCacheSize = unusedCache.size();
462 SERIALIZE_SCALAR(unusedCacheSize);
463 for(int x = 0; x < unusedCacheSize; x++) {
464 arrayParamOut(os, csprintf("unusedCache_%d", x),
465 (uint8_t*)unusedCache[x],sizeof(T));
466 }
467 }
468
469 virtual void unserialize(Checkpoint *cp, const std::string &section)
470 {
471 UNSERIALIZE_SCALAR(cachePnt);
472 UNSERIALIZE_SCALAR(curFetching);
473 UNSERIALIZE_SCALAR(wbOut);
474 UNSERIALIZE_SCALAR(moreToWb);
475 UNSERIALIZE_SCALAR(wbAlignment);
476
477 int usedCacheSize;
478 UNSERIALIZE_SCALAR(usedCacheSize);
479 T *temp;
480 for(int x = 0; x < usedCacheSize; x++) {
481 temp = new T;
482 arrayParamIn(cp, section, csprintf("usedCache_%d", x),
483 (uint8_t*)temp,sizeof(T));
484 usedCache.push_back(temp);
485 }
486
487 int unusedCacheSize;
488 UNSERIALIZE_SCALAR(unusedCacheSize);
489 for(int x = 0; x < unusedCacheSize; x++) {
490 temp = new T;
491 arrayParamIn(cp, section, csprintf("unusedCache_%d", x),
492 (uint8_t*)temp,sizeof(T));
493 unusedCache.push_back(temp);
494 }
495 }
496 virtual bool hasOutstandingEvents() {
497 return wbEvent.scheduled() || fetchEvent.scheduled();
498 }
499
500 };
501
502
503 class RxDescCache : public DescCache<iGbReg::RxDesc>
504 {
505 protected:
506 virtual Addr descBase() const { return igbe->regs.rdba(); }
507 virtual long descHead() const { return igbe->regs.rdh(); }
508 virtual long descLen() const { return igbe->regs.rdlen() >> 4; }
509 virtual long descTail() const { return igbe->regs.rdt(); }
510 virtual void updateHead(long h) { igbe->regs.rdh(h); }
511 virtual void enableSm();
512 virtual void fetchAfterWb() {
513 if (!igbe->rxTick && igbe->getState() == SimObject::Running)
514 fetchDescriptors();
515 }
516
517 bool pktDone;
518
519 public:
520 RxDescCache(IGbE *i, std::string n, int s);
521
522 /** Write the given packet into the buffer(s) pointed to by the
523 * descriptor and update the book keeping. Should only be called when
524 * there are no dma's pending.
525 * @param packet ethernet packet to write
526 * @return if the packet could be written (there was a free descriptor)
527 */
528 void writePacket(EthPacketPtr packet);
529 /** Called by event when dma to write packet is completed
530 */
531 void pktComplete();
532
533 /** Check if the dma on the packet has completed.
534 */
535
536 bool packetDone();
537
538 EventWrapper<RxDescCache, &RxDescCache::pktComplete> pktEvent;
539
540 virtual bool hasOutstandingEvents();
541
542 virtual void serialize(std::ostream &os);
543 virtual void unserialize(Checkpoint *cp, const std::string &section);
544 };
545 friend class RxDescCache;
546
547 RxDescCache rxDescCache;
548
549 class TxDescCache : public DescCache<iGbReg::TxDesc>
550 {
551 protected:
552 virtual Addr descBase() const { return igbe->regs.tdba(); }
553 virtual long descHead() const { return igbe->regs.tdh(); }
554 virtual long descTail() const { return igbe->regs.tdt(); }
555 virtual long descLen() const { return igbe->regs.tdlen() >> 4; }
556 virtual void updateHead(long h) { igbe->regs.tdh(h); }
557 virtual void enableSm();
558 virtual void intAfterWb() const { igbe->postInterrupt(iGbReg::IT_TXDW); }
559 virtual void fetchAfterWb() {
560 if (!igbe->txTick && igbe->getState() == SimObject::Running)
561 fetchDescriptors();
562 }
563
564 bool pktDone;
565 bool isTcp;
566 bool pktWaiting;
567 bool pktMultiDesc;
568
569 public:
570 TxDescCache(IGbE *i, std::string n, int s);
571
572 /** Tell the cache to DMA a packet from main memory into its buffer and
573 * return the size the of the packet to reserve space in tx fifo.
574 * @return size of the packet
575 */
576 int getPacketSize();
577 void getPacketData(EthPacketPtr p);
578
579 /** Ask if the packet has been transfered so the state machine can give
580 * it to the fifo.
581 * @return packet available in descriptor cache
582 */
583 bool packetAvailable();
584
585 /** Ask if we are still waiting for the packet to be transfered.
586 * @return packet still in transit.
587 */
588 bool packetWaiting() { return pktWaiting; }
589
590 /** Ask if this packet is composed of multiple descriptors
591 * so even if we've got data, we need to wait for more before
592 * we can send it out.
593 * @return packet can't be sent out because it's a multi-descriptor
594 * packet
595 */
596 bool packetMultiDesc() { return pktMultiDesc;}
597
598 /** Called by event when dma to write packet is completed
599 */
600 void pktComplete();
601 EventWrapper<TxDescCache, &TxDescCache::pktComplete> pktEvent;
602
603 virtual bool hasOutstandingEvents();
604
605 virtual void serialize(std::ostream &os);
606 virtual void unserialize(Checkpoint *cp, const std::string &section);
607
608 };
609 friend class TxDescCache;
610
611 TxDescCache txDescCache;
612
613 public:
614 typedef IGbEParams Params;
615 const Params *
616 params() const
617 {
618 return dynamic_cast<const Params *>(_params);
619 }
620 IGbE(const Params *params);
621 ~IGbE() {}
622
623 virtual EtherInt *getEthPort(const std::string &if_name, int idx);
624
625 Tick clock;
626 inline Tick ticks(int numCycles) const { return numCycles * clock; }
627
628 virtual Tick read(PacketPtr pkt);
629 virtual Tick write(PacketPtr pkt);
630
631 virtual Tick writeConfig(PacketPtr pkt);
632
633 bool ethRxPkt(EthPacketPtr packet);
634 void ethTxDone();
635
636 virtual void serialize(std::ostream &os);
637 virtual void unserialize(Checkpoint *cp, const std::string &section);
638 virtual unsigned int drain(Event *de);
639 virtual void resume();
640
641 };
642
643 class IGbEInt : public EtherInt
644 {
645 private:
646 IGbE *dev;
647
648 public:
649 IGbEInt(const std::string &name, IGbE *d)
650 : EtherInt(name), dev(d)
651 { }
652
653 virtual bool recvPacket(EthPacketPtr pkt) { return dev->ethRxPkt(pkt); }
654 virtual void sendDone() { dev->ethTxDone(); }
655 };
656
657
658
659
660
661 #endif //__DEV_I8254XGBE_HH__
662