make serialization at least seem to work
[gem5.git] / src / dev / i8254xGBe.hh
1 /*
2 * Copyright (c) 2006 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Ali Saidi
29 */
30
31 /* @file
32 * Device model for Intel's 8254x line of gigabit ethernet controllers.
33 */
34
35 #ifndef __DEV_I8254XGBE_HH__
36 #define __DEV_I8254XGBE_HH__
37
38 #include <deque>
39 #include <string>
40
41 #include "base/inet.hh"
42 #include "base/statistics.hh"
43 #include "dev/etherint.hh"
44 #include "dev/etherpkt.hh"
45 #include "dev/i8254xGBe_defs.hh"
46 #include "dev/pcidev.hh"
47 #include "dev/pktfifo.hh"
48 #include "sim/eventq.hh"
49
50 class IGbEInt;
51
52 class IGbE : public PciDev
53 {
54 private:
55 IGbEInt *etherInt;
56
57 // device registers
58 iGbReg::Regs regs;
59
60 // eeprom data, status and control bits
61 int eeOpBits, eeAddrBits, eeDataBits;
62 uint8_t eeOpcode, eeAddr;
63 uint16_t flash[iGbReg::EEPROM_SIZE];
64
65 // The drain event if we have one
66 Event *drainEvent;
67
68 // cached parameters from params struct
69 bool useFlowControl;
70
71 // packet fifos
72 PacketFifo rxFifo;
73 PacketFifo txFifo;
74
75 // Packet that we are currently putting into the txFifo
76 EthPacketPtr txPacket;
77
78 // Should to Rx/Tx State machine tick?
79 bool rxTick;
80 bool txTick;
81 bool txFifoTick;
82
83 // Event and function to deal with RDTR timer expiring
84 void rdtrProcess() {
85 rxDescCache.writeback(0);
86 DPRINTF(EthernetIntr, "Posting RXT interrupt because RDTR timer expired\n");
87 postInterrupt(iGbReg::IT_RXT, true);
88 }
89
90 //friend class EventWrapper<IGbE, &IGbE::rdtrProcess>;
91 EventWrapper<IGbE, &IGbE::rdtrProcess> rdtrEvent;
92
93 // Event and function to deal with RADV timer expiring
94 void radvProcess() {
95 rxDescCache.writeback(0);
96 DPRINTF(EthernetIntr, "Posting RXT interrupt because RADV timer expired\n");
97 postInterrupt(iGbReg::IT_RXT, true);
98 }
99
100 //friend class EventWrapper<IGbE, &IGbE::radvProcess>;
101 EventWrapper<IGbE, &IGbE::radvProcess> radvEvent;
102
103 // Event and function to deal with TADV timer expiring
104 void tadvProcess() {
105 txDescCache.writeback(0);
106 DPRINTF(EthernetIntr, "Posting TXDW interrupt because TADV timer expired\n");
107 postInterrupt(iGbReg::IT_TXDW, true);
108 }
109
110 //friend class EventWrapper<IGbE, &IGbE::tadvProcess>;
111 EventWrapper<IGbE, &IGbE::tadvProcess> tadvEvent;
112
113 // Event and function to deal with TIDV timer expiring
114 void tidvProcess() {
115 txDescCache.writeback(0);
116 DPRINTF(EthernetIntr, "Posting TXDW interrupt because TIDV timer expired\n");
117 postInterrupt(iGbReg::IT_TXDW, true);
118 }
119 //friend class EventWrapper<IGbE, &IGbE::tidvProcess>;
120 EventWrapper<IGbE, &IGbE::tidvProcess> tidvEvent;
121
122 // Main event to tick the device
123 void tick();
124 //friend class EventWrapper<IGbE, &IGbE::tick>;
125 EventWrapper<IGbE, &IGbE::tick> tickEvent;
126
127
128 void rxStateMachine();
129 void txStateMachine();
130 void txWire();
131
132 /** Write an interrupt into the interrupt pending register and check mask
133 * and interrupt limit timer before sending interrupt to CPU
134 * @param t the type of interrupt we are posting
135 * @param now should we ignore the interrupt limiting timer
136 */
137 void postInterrupt(iGbReg::IntTypes t, bool now = false);
138
139 /** Check and see if changes to the mask register have caused an interrupt
140 * to need to be sent or perhaps removed an interrupt cause.
141 */
142 void chkInterrupt();
143
144 /** Send an interrupt to the cpu
145 */
146 void cpuPostInt();
147 // Event to moderate interrupts
148 EventWrapper<IGbE, &IGbE::cpuPostInt> interEvent;
149
150 /** Clear the interupt line to the cpu
151 */
152 void cpuClearInt();
153
154 Tick intClock() { return Clock::Int::ns * 1024; }
155
156 /** This function is used to restart the clock so it can handle things like
157 * draining and resume in one place. */
158 void restartClock();
159
160 /** Check if all the draining things that need to occur have occured and
161 * handle the drain event if so.
162 */
163 void checkDrain();
164
165 template<class T>
166 class DescCache
167 {
168 protected:
169 virtual Addr descBase() const = 0;
170 virtual long descHead() const = 0;
171 virtual long descTail() const = 0;
172 virtual long descLen() const = 0;
173 virtual void updateHead(long h) = 0;
174 virtual void enableSm() = 0;
175 virtual void intAfterWb() const {}
176
177 std::deque<T*> usedCache;
178 std::deque<T*> unusedCache;
179
180 T *fetchBuf;
181 T *wbBuf;
182
183 // Pointer to the device we cache for
184 IGbE *igbe;
185
186 // Name of this descriptor cache
187 std::string _name;
188
189 // How far we've cached
190 int cachePnt;
191
192 // The size of the descriptor cache
193 int size;
194
195 // How many descriptors we are currently fetching
196 int curFetching;
197
198 // How many descriptors we are currently writing back
199 int wbOut;
200
201 // if the we wrote back to the end of the descriptor ring and are going
202 // to have to wrap and write more
203 bool moreToWb;
204
205 // What the alignment is of the next descriptor writeback
206 Addr wbAlignment;
207
208 /** The packet that is currently being dmad to memory if any
209 */
210 EthPacketPtr pktPtr;
211
212 public:
213 DescCache(IGbE *i, const std::string n, int s)
214 : igbe(i), _name(n), cachePnt(0), size(s), curFetching(0), wbOut(0),
215 pktPtr(NULL), fetchEvent(this), wbEvent(this)
216 {
217 fetchBuf = new T[size];
218 wbBuf = new T[size];
219 }
220
221 virtual ~DescCache()
222 {
223 reset();
224 }
225
226 std::string name() { return _name; }
227
228 /** If the address/len/head change when we've got descriptors that are
229 * dirty that is very bad. This function checks that we don't and if we
230 * do panics.
231 */
232 void areaChanged()
233 {
234 if (usedCache.size() > 0 || curFetching || wbOut)
235 panic("Descriptor Address, Length or Head changed. Bad\n");
236 reset();
237
238 }
239
240 void writeback(Addr aMask)
241 {
242 int curHead = descHead();
243 int max_to_wb = usedCache.size();
244
245 DPRINTF(EthernetDesc, "Writing back descriptors head: %d tail: "
246 "%d len: %d cachePnt: %d max_to_wb: %d descleft: %d\n",
247 curHead, descTail(), descLen(), cachePnt, max_to_wb,
248 descLeft());
249
250 // Check if this writeback is less restrictive that the previous
251 // and if so setup another one immediately following it
252 if (wbOut && (aMask < wbAlignment)) {
253 moreToWb = true;
254 wbAlignment = aMask;
255 DPRINTF(EthernetDesc, "Writing back already in process, returning\n");
256 return;
257 }
258
259
260 moreToWb = false;
261 wbAlignment = aMask;
262
263 if (max_to_wb + curHead >= descLen()) {
264 max_to_wb = descLen() - curHead;
265 moreToWb = true;
266 // this is by definition aligned correctly
267 } else if (aMask != 0) {
268 // align the wb point to the mask
269 max_to_wb = max_to_wb & ~aMask;
270 }
271
272 DPRINTF(EthernetDesc, "Writing back %d descriptors\n", max_to_wb);
273
274 if (max_to_wb <= 0 || wbOut)
275 return;
276
277 wbOut = max_to_wb;
278
279 for (int x = 0; x < wbOut; x++)
280 memcpy(&wbBuf[x], usedCache[x], sizeof(T));
281
282 for (int x = 0; x < wbOut; x++) {
283 assert(usedCache.size());
284 delete usedCache[0];
285 usedCache.pop_front();
286 };
287
288
289 assert(wbOut);
290 igbe->dmaWrite(igbe->platform->pciToDma(descBase() + curHead * sizeof(T)),
291 wbOut * sizeof(T), &wbEvent, (uint8_t*)wbBuf);
292 }
293
294 /** Fetch a chunk of descriptors into the descriptor cache.
295 * Calls fetchComplete when the memory system returns the data
296 */
297 void fetchDescriptors()
298 {
299 size_t max_to_fetch;
300
301 if (descTail() >= cachePnt)
302 max_to_fetch = descTail() - cachePnt;
303 else
304 max_to_fetch = descLen() - cachePnt;
305
306
307 max_to_fetch = std::min(max_to_fetch, (size - usedCache.size() -
308 unusedCache.size()));
309
310 DPRINTF(EthernetDesc, "Fetching descriptors head: %d tail: "
311 "%d len: %d cachePnt: %d max_to_fetch: %d descleft: %d\n",
312 descHead(), descTail(), descLen(), cachePnt,
313 max_to_fetch, descLeft());
314
315 // Nothing to do
316 if (max_to_fetch == 0 || curFetching)
317 return;
318
319 // So we don't have two descriptor fetches going on at once
320 curFetching = max_to_fetch;
321
322 DPRINTF(EthernetDesc, "Fetching descriptors at %#x (%#x), size: %#x\n",
323 descBase() + cachePnt * sizeof(T),
324 igbe->platform->pciToDma(descBase() + cachePnt * sizeof(T)),
325 curFetching * sizeof(T));
326
327 assert(curFetching);
328 igbe->dmaRead(igbe->platform->pciToDma(descBase() + cachePnt * sizeof(T)),
329 curFetching * sizeof(T), &fetchEvent, (uint8_t*)fetchBuf);
330 }
331
332
333 /** Called by event when dma to read descriptors is completed
334 */
335 void fetchComplete()
336 {
337 T *newDesc;
338 for (int x = 0; x < curFetching; x++) {
339 newDesc = new T;
340 memcpy(newDesc, &fetchBuf[x], sizeof(T));
341 unusedCache.push_back(newDesc);
342 }
343
344 #ifndef NDEBUG
345 int oldCp = cachePnt;
346 #endif
347
348 cachePnt += curFetching;
349 assert(cachePnt <= descLen());
350 if (cachePnt == descLen())
351 cachePnt = 0;
352
353 curFetching = 0;
354
355 DPRINTF(EthernetDesc, "Fetching complete cachePnt %d -> %d\n",
356 oldCp, cachePnt);
357
358 enableSm();
359 igbe->checkDrain();
360 }
361
362 EventWrapper<DescCache, &DescCache::fetchComplete> fetchEvent;
363
364 /** Called by event when dma to writeback descriptors is completed
365 */
366 void wbComplete()
367 {
368 long curHead = descHead();
369 #ifndef NDEBUG
370 long oldHead = curHead;
371 #endif
372
373 curHead += wbOut;
374 wbOut = 0;
375
376 if (curHead >= descLen())
377 curHead -= descLen();
378
379 // Update the head
380 updateHead(curHead);
381
382 DPRINTF(EthernetDesc, "Writeback complete curHead %d -> %d\n",
383 oldHead, curHead);
384
385 // If we still have more to wb, call wb now
386 if (moreToWb) {
387 DPRINTF(EthernetDesc, "Writeback has more todo\n");
388 writeback(wbAlignment);
389 }
390 intAfterWb();
391 igbe->checkDrain();
392 }
393
394
395 EventWrapper<DescCache, &DescCache::wbComplete> wbEvent;
396
397 /* Return the number of descriptors left in the ring, so the device has
398 * a way to figure out if it needs to interrupt.
399 */
400 int descLeft() const
401 {
402 int left = unusedCache.size();
403 if (cachePnt - descTail() >= 0)
404 left += (cachePnt - descTail());
405 else
406 left += (descTail() - cachePnt);
407
408 return left;
409 }
410
411 /* Return the number of descriptors used and not written back.
412 */
413 int descUsed() const { return usedCache.size(); }
414
415 /* Return the number of cache unused descriptors we have. */
416 int descUnused() const {return unusedCache.size(); }
417
418 /* Get into a state where the descriptor address/head/etc colud be
419 * changed */
420 void reset()
421 {
422 DPRINTF(EthernetDesc, "Reseting descriptor cache\n");
423 for (int x = 0; x < usedCache.size(); x++)
424 delete usedCache[x];
425 for (int x = 0; x < unusedCache.size(); x++)
426 delete unusedCache[x];
427
428 usedCache.clear();
429 unusedCache.clear();
430
431 cachePnt = 0;
432
433 }
434
435 virtual void serialize(std::ostream &os)
436 {
437 SERIALIZE_SCALAR(cachePnt);
438 SERIALIZE_SCALAR(curFetching);
439 SERIALIZE_SCALAR(wbOut);
440 SERIALIZE_SCALAR(moreToWb);
441 SERIALIZE_SCALAR(wbAlignment);
442
443 int usedCacheSize = usedCache.size();
444 SERIALIZE_SCALAR(usedCacheSize);
445 for(int x = 0; x < usedCacheSize; x++) {
446 arrayParamOut(os, csprintf("usedCache_%d", x),
447 (uint8_t*)usedCache[x],sizeof(T));
448 }
449
450 int unusedCacheSize = unusedCache.size();
451 SERIALIZE_SCALAR(unusedCacheSize);
452 for(int x = 0; x < unusedCacheSize; x++) {
453 arrayParamOut(os, csprintf("unusedCache_%d", x),
454 (uint8_t*)unusedCache[x],sizeof(T));
455 }
456 }
457
458 virtual void unserialize(Checkpoint *cp, const std::string &section)
459 {
460 UNSERIALIZE_SCALAR(cachePnt);
461 UNSERIALIZE_SCALAR(curFetching);
462 UNSERIALIZE_SCALAR(wbOut);
463 UNSERIALIZE_SCALAR(moreToWb);
464 UNSERIALIZE_SCALAR(wbAlignment);
465
466 int usedCacheSize;
467 UNSERIALIZE_SCALAR(usedCacheSize);
468 T *temp;
469 for(int x = 0; x < usedCacheSize; x++) {
470 temp = new T;
471 arrayParamIn(cp, section, csprintf("usedCache_%d", x),
472 (uint8_t*)temp,sizeof(T));
473 usedCache.push_back(temp);
474 }
475
476 int unusedCacheSize;
477 UNSERIALIZE_SCALAR(unusedCacheSize);
478 for(int x = 0; x < unusedCacheSize; x++) {
479 temp = new T;
480 arrayParamIn(cp, section, csprintf("unusedCache_%d", x),
481 (uint8_t*)temp,sizeof(T));
482 unusedCache.push_back(temp);
483 }
484 }
485 virtual bool hasOutstandingEvents() {
486 return wbEvent.scheduled() || fetchEvent.scheduled();
487 }
488
489 };
490
491
492 class RxDescCache : public DescCache<iGbReg::RxDesc>
493 {
494 protected:
495 virtual Addr descBase() const { return igbe->regs.rdba(); }
496 virtual long descHead() const { return igbe->regs.rdh(); }
497 virtual long descLen() const { return igbe->regs.rdlen() >> 4; }
498 virtual long descTail() const { return igbe->regs.rdt(); }
499 virtual void updateHead(long h) { igbe->regs.rdh(h); }
500 virtual void enableSm();
501
502 bool pktDone;
503
504 public:
505 RxDescCache(IGbE *i, std::string n, int s);
506
507 /** Write the given packet into the buffer(s) pointed to by the
508 * descriptor and update the book keeping. Should only be called when
509 * there are no dma's pending.
510 * @param packet ethernet packet to write
511 * @return if the packet could be written (there was a free descriptor)
512 */
513 bool writePacket(EthPacketPtr packet);
514 /** Called by event when dma to write packet is completed
515 */
516 void pktComplete();
517
518 /** Check if the dma on the packet has completed.
519 */
520
521 bool packetDone();
522
523 EventWrapper<RxDescCache, &RxDescCache::pktComplete> pktEvent;
524
525 virtual bool hasOutstandingEvents();
526
527 virtual void serialize(std::ostream &os);
528 virtual void unserialize(Checkpoint *cp, const std::string &section);
529 };
530 friend class RxDescCache;
531
532 RxDescCache rxDescCache;
533
534 class TxDescCache : public DescCache<iGbReg::TxDesc>
535 {
536 protected:
537 virtual Addr descBase() const { return igbe->regs.tdba(); }
538 virtual long descHead() const { return igbe->regs.tdh(); }
539 virtual long descTail() const { return igbe->regs.tdt(); }
540 virtual long descLen() const { return igbe->regs.tdlen() >> 4; }
541 virtual void updateHead(long h) { igbe->regs.tdh(h); }
542 virtual void enableSm();
543 virtual void intAfterWb() const { igbe->postInterrupt(iGbReg::IT_TXDW);}
544
545 bool pktDone;
546 bool isTcp;
547 bool pktWaiting;
548
549 public:
550 TxDescCache(IGbE *i, std::string n, int s);
551
552 /** Tell the cache to DMA a packet from main memory into its buffer and
553 * return the size the of the packet to reserve space in tx fifo.
554 * @return size of the packet
555 */
556 int getPacketSize();
557 void getPacketData(EthPacketPtr p);
558
559 /** Ask if the packet has been transfered so the state machine can give
560 * it to the fifo.
561 * @return packet available in descriptor cache
562 */
563 bool packetAvailable();
564
565 /** Ask if we are still waiting for the packet to be transfered.
566 * @return packet still in transit.
567 */
568 bool packetWaiting() { return pktWaiting; }
569
570 /** Called by event when dma to write packet is completed
571 */
572 void pktComplete();
573 EventWrapper<TxDescCache, &TxDescCache::pktComplete> pktEvent;
574
575 virtual bool hasOutstandingEvents();
576
577 virtual void serialize(std::ostream &os);
578 virtual void unserialize(Checkpoint *cp, const std::string &section);
579
580 };
581 friend class TxDescCache;
582
583 TxDescCache txDescCache;
584
585 public:
586 struct Params : public PciDev::Params
587 {
588 Net::EthAddr hardware_address;
589 bool use_flow_control;
590 int rx_fifo_size;
591 int tx_fifo_size;
592 int rx_desc_cache_size;
593 int tx_desc_cache_size;
594 Tick clock;
595 };
596
597 IGbE(Params *params);
598 ~IGbE() {;}
599
600 Tick clock;
601 inline Tick cycles(int numCycles) const { return numCycles * clock; }
602
603 virtual Tick read(PacketPtr pkt);
604 virtual Tick write(PacketPtr pkt);
605
606 virtual Tick writeConfig(PacketPtr pkt);
607
608 bool ethRxPkt(EthPacketPtr packet);
609 void ethTxDone();
610
611 void setEthInt(IGbEInt *i) { assert(!etherInt); etherInt = i; }
612
613
614 const Params *params() const {return (const Params *)_params; }
615
616 virtual void serialize(std::ostream &os);
617 virtual void unserialize(Checkpoint *cp, const std::string &section);
618 virtual unsigned int drain(Event *de);
619 virtual void resume();
620
621 };
622
623 class IGbEInt : public EtherInt
624 {
625 private:
626 IGbE *dev;
627
628 public:
629 IGbEInt(const std::string &name, IGbE *d)
630 : EtherInt(name), dev(d)
631 { dev->setEthInt(this); }
632
633 virtual bool recvPacket(EthPacketPtr pkt) { return dev->ethRxPkt(pkt); }
634 virtual void sendDone() { dev->ethTxDone(); }
635 };
636
637
638
639
640
641 #endif //__DEV_I8254XGBE_HH__
642