Merge zizzer:/bk/newmem
[gem5.git] / src / dev / i8254xGBe.hh
1 /*
2 * Copyright (c) 2006 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Ali Saidi
29 */
30
31 /* @file
32 * Device model for Intel's 8254x line of gigabit ethernet controllers.
33 */
34
35 #ifndef __DEV_I8254XGBE_HH__
36 #define __DEV_I8254XGBE_HH__
37
38 #include <deque>
39 #include <string>
40
41 #include "base/inet.hh"
42 #include "base/statistics.hh"
43 #include "dev/etherint.hh"
44 #include "dev/etherpkt.hh"
45 #include "dev/i8254xGBe_defs.hh"
46 #include "dev/pcidev.hh"
47 #include "dev/pktfifo.hh"
48 #include "sim/eventq.hh"
49
50 class IGbEInt;
51
52 class IGbE : public PciDev
53 {
54 private:
55 IGbEInt *etherInt;
56
57 // device registers
58 iGbReg::Regs regs;
59
60 // eeprom data, status and control bits
61 int eeOpBits, eeAddrBits, eeDataBits;
62 uint8_t eeOpcode, eeAddr;
63 uint16_t flash[iGbReg::EEPROM_SIZE];
64
65 // cached parameters from params struct
66 Tick tickRate;
67 bool useFlowControl;
68
69 // packet fifos
70 PacketFifo rxFifo;
71 PacketFifo txFifo;
72
73 // Packet that we are currently putting into the txFifo
74 EthPacketPtr txPacket;
75
76 // Should to Rx/Tx State machine tick?
77 bool rxTick;
78 bool txTick;
79
80 // Event and function to deal with RDTR timer expiring
81 void rdtrProcess() { rxDescCache.writeback(0); postInterrupt(iGbReg::IT_RXT, true); }
82 //friend class EventWrapper<IGbE, &IGbE::rdtrProcess>;
83 EventWrapper<IGbE, &IGbE::rdtrProcess> rdtrEvent;
84
85 // Event and function to deal with RADV timer expiring
86 void radvProcess() { rxDescCache.writeback(0); postInterrupt(iGbReg::IT_RXT, true); }
87 //friend class EventWrapper<IGbE, &IGbE::radvProcess>;
88 EventWrapper<IGbE, &IGbE::radvProcess> radvEvent;
89
90 // Event and function to deal with TADV timer expiring
91 void tadvProcess() { postInterrupt(iGbReg::IT_TXDW, true); }
92 //friend class EventWrapper<IGbE, &IGbE::tadvProcess>;
93 EventWrapper<IGbE, &IGbE::tadvProcess> tadvEvent;
94
95 // Event and function to deal with TIDV timer expiring
96 void tidvProcess() { postInterrupt(iGbReg::IT_TXDW, true); };
97 //friend class EventWrapper<IGbE, &IGbE::tidvProcess>;
98 EventWrapper<IGbE, &IGbE::tidvProcess> tidvEvent;
99
100 // Main event to tick the device
101 void tick();
102 //friend class EventWrapper<IGbE, &IGbE::tick>;
103 EventWrapper<IGbE, &IGbE::tick> tickEvent;
104
105
106 void rxStateMachine();
107 void txStateMachine();
108 void txWire();
109
110 /** Write an interrupt into the interrupt pending register and check mask
111 * and interrupt limit timer before sending interrupt to CPU
112 * @param t the type of interrupt we are posting
113 * @param now should we ignore the interrupt limiting timer
114 */
115 void postInterrupt(iGbReg::IntTypes t, bool now = false);
116
117 /** Check and see if changes to the mask register have caused an interrupt
118 * to need to be sent or perhaps removed an interrupt cause.
119 */
120 void chkInterrupt();
121
122 /** Send an interrupt to the cpu
123 */
124 void cpuPostInt();
125 // Event to moderate interrupts
126 EventWrapper<IGbE, &IGbE::cpuPostInt> interEvent;
127
128 /** Clear the interupt line to the cpu
129 */
130 void cpuClearInt();
131
132 Tick intClock() { return Clock::Int::ns * 1024; }
133
134 void restartClock();
135
136 template<class T>
137 class DescCache
138 {
139 protected:
140 virtual Addr descBase() const = 0;
141 virtual long descHead() const = 0;
142 virtual long descTail() const = 0;
143 virtual long descLen() const = 0;
144 virtual void updateHead(long h) = 0;
145 virtual void enableSm() = 0;
146 virtual void intAfterWb() const {}
147
148 std::deque<T*> usedCache;
149 std::deque<T*> unusedCache;
150
151 T *fetchBuf;
152 T *wbBuf;
153
154 // Pointer to the device we cache for
155 IGbE *igbe;
156
157 // Name of this descriptor cache
158 std::string _name;
159
160 // How far we've cached
161 int cachePnt;
162
163 // The size of the descriptor cache
164 int size;
165
166 // How many descriptors we are currently fetching
167 int curFetching;
168
169 // How many descriptors we are currently writing back
170 int wbOut;
171
172 // if the we wrote back to the end of the descriptor ring and are going
173 // to have to wrap and write more
174 bool moreToWb;
175
176 // What the alignment is of the next descriptor writeback
177 Addr wbAlignment;
178
179 /** The packet that is currently being dmad to memory if any
180 */
181 EthPacketPtr pktPtr;
182
183 public:
184 DescCache(IGbE *i, const std::string n, int s)
185 : igbe(i), _name(n), cachePnt(0), size(s), curFetching(0), wbOut(0),
186 pktPtr(NULL), fetchEvent(this), wbEvent(this)
187 {
188 fetchBuf = new T[size];
189 wbBuf = new T[size];
190 }
191
192 virtual ~DescCache()
193 {
194 reset();
195 }
196
197 std::string name() { return _name; }
198
199 /** If the address/len/head change when we've got descriptors that are
200 * dirty that is very bad. This function checks that we don't and if we
201 * do panics.
202 */
203 void areaChanged()
204 {
205 if (usedCache.size() > 0 || unusedCache.size() > 0)
206 panic("Descriptor Address, Length or Head changed. Bad\n");
207 }
208
209 void writeback(Addr aMask)
210 {
211 int curHead = descHead();
212 int max_to_wb = usedCache.size();
213
214 DPRINTF(EthernetDesc, "Writing back descriptors head: %d tail: "
215 "%d len: %d cachePnt: %d max_to_wb: %d descleft: %d\n",
216 curHead, descTail(), descLen(), cachePnt, max_to_wb,
217 descLeft());
218
219 // Check if this writeback is less restrictive that the previous
220 // and if so setup another one immediately following it
221 if (wbOut && (aMask < wbAlignment)) {
222 moreToWb = true;
223 wbAlignment = aMask;
224 DPRINTF(EthernetDesc, "Writing back already in process, returning\n");
225 return;
226 }
227
228
229 moreToWb = false;
230 wbAlignment = aMask;
231
232 if (max_to_wb + curHead > descLen()) {
233 max_to_wb = descLen() - curHead;
234 moreToWb = true;
235 // this is by definition aligned correctly
236 } else if (aMask != 0) {
237 // align the wb point to the mask
238 max_to_wb = max_to_wb & ~aMask;
239 }
240
241 DPRINTF(EthernetDesc, "Writing back %d descriptors\n", max_to_wb);
242
243 if (max_to_wb <= 0 || wbOut)
244 return;
245
246 wbOut = max_to_wb;
247
248 for (int x = 0; x < wbOut; x++)
249 memcpy(&wbBuf[x], usedCache[x], sizeof(T));
250
251 for (int x = 0; x < wbOut; x++) {
252 assert(usedCache.size());
253 delete usedCache[0];
254 usedCache.pop_front();
255 };
256
257
258 assert(wbOut);
259 igbe->dmaWrite(igbe->platform->pciToDma(descBase() + curHead * sizeof(T)),
260 wbOut * sizeof(T), &wbEvent, (uint8_t*)wbBuf);
261 }
262
263 /** Fetch a chunk of descriptors into the descriptor cache.
264 * Calls fetchComplete when the memory system returns the data
265 */
266 void fetchDescriptors()
267 {
268 size_t max_to_fetch = descTail() - cachePnt;
269 if (max_to_fetch < 0)
270 max_to_fetch = descLen() - cachePnt;
271
272 max_to_fetch = std::min(max_to_fetch, (size - usedCache.size() -
273 unusedCache.size()));
274
275 DPRINTF(EthernetDesc, "Fetching descriptors head: %d tail: "
276 "%d len: %d cachePnt: %d max_to_fetch: %d descleft: %d\n",
277 descHead(), descTail(), descLen(), cachePnt,
278 max_to_fetch, descLeft());
279
280 // Nothing to do
281 if (max_to_fetch == 0 || curFetching)
282 return;
283
284 // So we don't have two descriptor fetches going on at once
285 curFetching = max_to_fetch;
286
287 DPRINTF(EthernetDesc, "Fetching descriptors at %#x (%#x), size: %#x\n",
288 descBase() + cachePnt * sizeof(T),
289 igbe->platform->pciToDma(descBase() + cachePnt * sizeof(T)),
290 curFetching * sizeof(T));
291
292 assert(curFetching);
293 igbe->dmaRead(igbe->platform->pciToDma(descBase() + cachePnt * sizeof(T)),
294 curFetching * sizeof(T), &fetchEvent, (uint8_t*)fetchBuf);
295 }
296
297
298 /** Called by event when dma to read descriptors is completed
299 */
300 void fetchComplete()
301 {
302 T *newDesc;
303 for (int x = 0; x < curFetching; x++) {
304 newDesc = new T;
305 memcpy(newDesc, &fetchBuf[x], sizeof(T));
306 unusedCache.push_back(newDesc);
307 }
308
309 #ifndef NDEBUG
310 int oldCp = cachePnt;
311 #endif
312
313 cachePnt += curFetching;
314 if (cachePnt > descLen())
315 cachePnt -= descLen();
316
317 curFetching = 0;
318
319 DPRINTF(EthernetDesc, "Fetching complete cachePnt %d -> %d\n",
320 oldCp, cachePnt);
321
322 enableSm();
323
324 }
325
326 EventWrapper<DescCache, &DescCache::fetchComplete> fetchEvent;
327
328 /** Called by event when dma to writeback descriptors is completed
329 */
330 void wbComplete()
331 {
332 long curHead = descHead();
333 #ifndef NDEBUG
334 long oldHead = curHead;
335 #endif
336
337 curHead += wbOut;
338 wbOut = 0;
339
340 if (curHead > descLen())
341 curHead = 0;
342
343 // Update the head
344 updateHead(curHead);
345
346 DPRINTF(EthernetDesc, "Writeback complete curHead %d -> %d\n",
347 oldHead, curHead);
348
349 // If we still have more to wb, call wb now
350 if (moreToWb) {
351 DPRINTF(EthernetDesc, "Writeback has more todo\n");
352 writeback(wbAlignment);
353 }
354 intAfterWb();
355 }
356
357
358 EventWrapper<DescCache, &DescCache::wbComplete> wbEvent;
359
360 /* Return the number of descriptors left in the ring, so the device has
361 * a way to figure out if it needs to interrupt.
362 */
363 int descLeft() const
364 {
365 int left = unusedCache.size();
366 if (cachePnt - descTail() >= 0)
367 left += (cachePnt - descTail());
368 else
369 left += (descTail() - cachePnt);
370
371 return left;
372 }
373
374 /* Return the number of descriptors used and not written back.
375 */
376 int descUsed() const { return usedCache.size(); }
377
378 /* Return the number of cache unused descriptors we have. */
379 int descUnused() const {return unusedCache.size(); }
380
381 /* Get into a state where the descriptor address/head/etc colud be
382 * changed */
383 void reset()
384 {
385 DPRINTF(EthernetDesc, "Reseting descriptor cache\n");
386 for (int x = 0; x < usedCache.size(); x++)
387 delete usedCache[x];
388 for (int x = 0; x < unusedCache.size(); x++)
389 delete unusedCache[x];
390
391 usedCache.clear();
392 unusedCache.clear();
393 }
394
395 };
396
397
398 class RxDescCache : public DescCache<iGbReg::RxDesc>
399 {
400 protected:
401 virtual Addr descBase() const { return igbe->regs.rdba(); }
402 virtual long descHead() const { return igbe->regs.rdh(); }
403 virtual long descLen() const { return igbe->regs.rdlen() >> 4; }
404 virtual long descTail() const { return igbe->regs.rdt(); }
405 virtual void updateHead(long h) { igbe->regs.rdh(h); }
406 virtual void enableSm();
407
408 bool pktDone;
409
410 public:
411 RxDescCache(IGbE *i, std::string n, int s);
412
413 /** Write the given packet into the buffer(s) pointed to by the
414 * descriptor and update the book keeping. Should only be called when
415 * there are no dma's pending.
416 * @param packet ethernet packet to write
417 * @return if the packet could be written (there was a free descriptor)
418 */
419 bool writePacket(EthPacketPtr packet);
420 /** Called by event when dma to write packet is completed
421 */
422 void pktComplete();
423
424 /** Check if the dma on the packet has completed.
425 */
426
427 bool packetDone();
428
429 EventWrapper<RxDescCache, &RxDescCache::pktComplete> pktEvent;
430
431 };
432 friend class RxDescCache;
433
434 RxDescCache rxDescCache;
435
436 class TxDescCache : public DescCache<iGbReg::TxDesc>
437 {
438 protected:
439 virtual Addr descBase() const { return igbe->regs.tdba(); }
440 virtual long descHead() const { return igbe->regs.tdh(); }
441 virtual long descTail() const { return igbe->regs.tdt(); }
442 virtual long descLen() const { return igbe->regs.tdlen() >> 4; }
443 virtual void updateHead(long h) { igbe->regs.tdh(h); }
444 virtual void enableSm();
445 virtual void intAfterWb() const { igbe->postInterrupt(iGbReg::IT_TXDW);}
446
447 bool pktDone;
448 bool isTcp;
449 bool pktWaiting;
450 int hLen;
451
452 public:
453 TxDescCache(IGbE *i, std::string n, int s);
454
455 /** Tell the cache to DMA a packet from main memory into its buffer and
456 * return the size the of the packet to reserve space in tx fifo.
457 * @return size of the packet
458 */
459 int getPacketSize();
460 void getPacketData(EthPacketPtr p);
461
462 /** Ask if the packet has been transfered so the state machine can give
463 * it to the fifo.
464 * @return packet available in descriptor cache
465 */
466 bool packetAvailable();
467
468 /** Ask if we are still waiting for the packet to be transfered.
469 * @return packet still in transit.
470 */
471 bool packetWaiting() { return pktWaiting; }
472
473 /** Called by event when dma to write packet is completed
474 */
475 void pktComplete();
476 EventWrapper<TxDescCache, &TxDescCache::pktComplete> pktEvent;
477
478 };
479 friend class TxDescCache;
480
481 TxDescCache txDescCache;
482
483 public:
484 struct Params : public PciDev::Params
485 {
486 Net::EthAddr hardware_address;
487 bool use_flow_control;
488 int rx_fifo_size;
489 int tx_fifo_size;
490 int rx_desc_cache_size;
491 int tx_desc_cache_size;
492 Tick clock;
493 };
494
495 IGbE(Params *params);
496 ~IGbE() {;}
497
498 Tick clock;
499 inline Tick cycles(int numCycles) const { return numCycles * clock; }
500
501 virtual Tick read(PacketPtr pkt);
502 virtual Tick write(PacketPtr pkt);
503
504 virtual Tick writeConfig(PacketPtr pkt);
505
506 bool ethRxPkt(EthPacketPtr packet);
507 void ethTxDone();
508
509 void setEthInt(IGbEInt *i) { assert(!etherInt); etherInt = i; }
510
511
512 const Params *params() const {return (const Params *)_params; }
513
514 virtual void serialize(std::ostream &os);
515 virtual void unserialize(Checkpoint *cp, const std::string &section);
516
517
518 };
519
520 class IGbEInt : public EtherInt
521 {
522 private:
523 IGbE *dev;
524
525 public:
526 IGbEInt(const std::string &name, IGbE *d)
527 : EtherInt(name), dev(d)
528 { dev->setEthInt(this); }
529
530 virtual bool recvPacket(EthPacketPtr pkt) { return dev->ethRxPkt(pkt); }
531 virtual void sendDone() { dev->ethTxDone(); }
532 };
533
534
535
536
537
538 #endif //__DEV_I8254XGBE_HH__
539