base,sim: Move DTRACE into base/debug.hh.
[gem5.git] / src / sim / eventq.hh
1 /*
2 * Copyright (c) 2000-2005 The Regents of The University of Michigan
3 * Copyright (c) 2013 Advanced Micro Devices, Inc.
4 * Copyright (c) 2013 Mark D. Hill and David A. Wood
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are
9 * met: redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer;
11 * redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution;
14 * neither the name of the copyright holders nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31 /* @file
32 * EventQueue interfaces
33 */
34
35 #ifndef __SIM_EVENTQ_HH__
36 #define __SIM_EVENTQ_HH__
37
38 #include <algorithm>
39 #include <cassert>
40 #include <climits>
41 #include <functional>
42 #include <iosfwd>
43 #include <memory>
44 #include <mutex>
45 #include <string>
46
47 #include "base/flags.hh"
48 #include "base/types.hh"
49 #include "debug/Event.hh"
50 #include "sim/serialize.hh"
51
52 class EventQueue; // forward declaration
53 class BaseGlobalEvent;
54
55 //! Simulation Quantum for multiple eventq simulation.
56 //! The quantum value is the period length after which the queues
57 //! synchronize themselves with each other. This means that any
58 //! event to scheduled on Queue A which is generated by an event on
59 //! Queue B should be at least simQuantum ticks away in future.
60 extern Tick simQuantum;
61
62 //! Current number of allocated main event queues.
63 extern uint32_t numMainEventQueues;
64
65 //! Array for main event queues.
66 extern std::vector<EventQueue *> mainEventQueue;
67
68 //! The current event queue for the running thread. Access to this queue
69 //! does not require any locking from the thread.
70
71 extern __thread EventQueue *_curEventQueue;
72
73 //! Current mode of execution: parallel / serial
74 extern bool inParallelMode;
75
76 //! Function for returning eventq queue for the provided
77 //! index. The function allocates a new queue in case one
78 //! does not exist for the index, provided that the index
79 //! is with in bounds.
80 EventQueue *getEventQueue(uint32_t index);
81
82 inline EventQueue *curEventQueue() { return _curEventQueue; }
83 inline void curEventQueue(EventQueue *q) { _curEventQueue = q; }
84
85 /**
86 * Common base class for Event and GlobalEvent, so they can share flag
87 * and priority definitions and accessor functions. This class should
88 * not be used directly.
89 */
90 class EventBase
91 {
92 protected:
93 typedef unsigned short FlagsType;
94 typedef ::Flags<FlagsType> Flags;
95
96 static const FlagsType PublicRead = 0x003f; // public readable flags
97 static const FlagsType PublicWrite = 0x001d; // public writable flags
98 static const FlagsType Squashed = 0x0001; // has been squashed
99 static const FlagsType Scheduled = 0x0002; // has been scheduled
100 static const FlagsType Managed = 0x0004; // Use life cycle manager
101 static const FlagsType AutoDelete = Managed; // delete after dispatch
102 /**
103 * This used to be AutoSerialize. This value can't be reused
104 * without changing the checkpoint version since the flag field
105 * gets serialized.
106 */
107 static const FlagsType Reserved0 = 0x0008;
108 static const FlagsType IsExitEvent = 0x0010; // special exit event
109 static const FlagsType IsMainQueue = 0x0020; // on main event queue
110 static const FlagsType Initialized = 0x7a40; // somewhat random bits
111 static const FlagsType InitMask = 0xffc0; // mask for init bits
112
113 public:
114 /**
115 * @ingroup api_eventq
116 */
117 typedef int8_t Priority;
118
119 /// Event priorities, to provide tie-breakers for events scheduled
120 /// at the same cycle. Most events are scheduled at the default
121 /// priority; these values are used to control events that need to
122 /// be ordered within a cycle.
123
124 /**
125 * Minimum priority
126 *
127 * @ingroup api_eventq
128 */
129 static const Priority Minimum_Pri = SCHAR_MIN;
130
131 /**
132 * If we enable tracing on a particular cycle, do that as the
133 * very first thing so we don't miss any of the events on
134 * that cycle (even if we enter the debugger).
135 *
136 * @ingroup api_eventq
137 */
138 static const Priority Debug_Enable_Pri = -101;
139
140 /**
141 * Breakpoints should happen before anything else (except
142 * enabling trace output), so we don't miss any action when
143 * debugging.
144 *
145 * @ingroup api_eventq
146 */
147 static const Priority Debug_Break_Pri = -100;
148
149 /**
150 * CPU switches schedule the new CPU's tick event for the
151 * same cycle (after unscheduling the old CPU's tick event).
152 * The switch needs to come before any tick events to make
153 * sure we don't tick both CPUs in the same cycle.
154 *
155 * @ingroup api_eventq
156 */
157 static const Priority CPU_Switch_Pri = -31;
158
159 /**
160 * For some reason "delayed" inter-cluster writebacks are
161 * scheduled before regular writebacks (which have default
162 * priority). Steve?
163 *
164 * @ingroup api_eventq
165 */
166 static const Priority Delayed_Writeback_Pri = -1;
167
168 /**
169 * Default is zero for historical reasons.
170 *
171 * @ingroup api_eventq
172 */
173 static const Priority Default_Pri = 0;
174
175 /**
176 * DVFS update event leads to stats dump therefore given a lower priority
177 * to ensure all relevant states have been updated
178 *
179 * @ingroup api_eventq
180 */
181 static const Priority DVFS_Update_Pri = 31;
182
183 /**
184 * Serailization needs to occur before tick events also, so
185 * that a serialize/unserialize is identical to an on-line
186 * CPU switch.
187 *
188 * @ingroup api_eventq
189 */
190 static const Priority Serialize_Pri = 32;
191
192 /**
193 * CPU ticks must come after other associated CPU events
194 * (such as writebacks).
195 *
196 * @ingroup api_eventq
197 */
198 static const Priority CPU_Tick_Pri = 50;
199
200 /**
201 * If we want to exit a thread in a CPU, it comes after CPU_Tick_Pri
202 *
203 * @ingroup api_eventq
204 */
205 static const Priority CPU_Exit_Pri = 64;
206
207 /**
208 * Statistics events (dump, reset, etc.) come after
209 * everything else, but before exit.
210 *
211 * @ingroup api_eventq
212 */
213 static const Priority Stat_Event_Pri = 90;
214
215 /**
216 * Progress events come at the end.
217 *
218 * @ingroup api_eventq
219 */
220 static const Priority Progress_Event_Pri = 95;
221
222 /**
223 * If we want to exit on this cycle, it's the very last thing
224 * we do.
225 *
226 * @ingroup api_eventq
227 */
228 static const Priority Sim_Exit_Pri = 100;
229
230 /**
231 * Maximum priority
232 *
233 * @ingroup api_eventq
234 */
235 static const Priority Maximum_Pri = SCHAR_MAX;
236 };
237
238 /*
239 * An item on an event queue. The action caused by a given
240 * event is specified by deriving a subclass and overriding the
241 * process() member function.
242 *
243 * Caution, the order of members is chosen to maximize data packing.
244 */
245 class Event : public EventBase, public Serializable
246 {
247 friend class EventQueue;
248
249 private:
250 // The event queue is now a linked list of linked lists. The
251 // 'nextBin' pointer is to find the bin, where a bin is defined as
252 // when+priority. All events in the same bin will be stored in a
253 // second linked list (a stack) maintained by the 'nextInBin'
254 // pointer. The list will be accessed in LIFO order. The end
255 // result is that the insert/removal in 'nextBin' is
256 // linear/constant, and the lookup/removal in 'nextInBin' is
257 // constant/constant. Hopefully this is a significant improvement
258 // over the current fully linear insertion.
259 Event *nextBin;
260 Event *nextInBin;
261
262 static Event *insertBefore(Event *event, Event *curr);
263 static Event *removeItem(Event *event, Event *last);
264
265 Tick _when; //!< timestamp when event should be processed
266 Priority _priority; //!< event priority
267 Flags flags;
268
269 #ifndef NDEBUG
270 /// Global counter to generate unique IDs for Event instances
271 static Counter instanceCounter;
272
273 /// This event's unique ID. We can also use pointer values for
274 /// this but they're not consistent across runs making debugging
275 /// more difficult. Thus we use a global counter value when
276 /// debugging.
277 Counter instance;
278
279 /// queue to which this event belongs (though it may or may not be
280 /// scheduled on this queue yet)
281 EventQueue *queue;
282 #endif
283
284 #ifdef EVENTQ_DEBUG
285 Tick whenCreated; //!< time created
286 Tick whenScheduled; //!< time scheduled
287 #endif
288
289 void
290 setWhen(Tick when, EventQueue *q)
291 {
292 _when = when;
293 #ifndef NDEBUG
294 queue = q;
295 #endif
296 #ifdef EVENTQ_DEBUG
297 whenScheduled = curTick();
298 #endif
299 }
300
301 bool
302 initialized() const
303 {
304 return (flags & InitMask) == Initialized;
305 }
306
307 protected:
308 Flags
309 getFlags() const
310 {
311 return flags & PublicRead;
312 }
313
314 bool
315 isFlagSet(Flags _flags) const
316 {
317 assert(_flags.noneSet(~PublicRead));
318 return flags.isSet(_flags);
319 }
320
321 void
322 setFlags(Flags _flags)
323 {
324 assert(_flags.noneSet(~PublicWrite));
325 flags.set(_flags);
326 }
327
328 void
329 clearFlags(Flags _flags)
330 {
331 assert(_flags.noneSet(~PublicWrite));
332 flags.clear(_flags);
333 }
334
335 void
336 clearFlags()
337 {
338 flags.clear(PublicWrite);
339 }
340
341 /**
342 * This function isn't really useful if TRACING_ON is not defined
343 *
344 * @ingroup api_eventq
345 */
346 virtual void trace(const char *action); //!< trace event activity
347
348 /// Return the instance number as a string.
349 const std::string instanceString() const;
350
351 protected: /* Memory management */
352 /**
353 * @{
354 * Memory management hooks for events that have the Managed flag set
355 *
356 * Events can use automatic memory management by setting the
357 * Managed flag. The default implementation automatically deletes
358 * events once they have been removed from the event queue. This
359 * typically happens when events are descheduled or have been
360 * triggered and not rescheduled.
361 *
362 * The methods below may be overridden by events that need custom
363 * memory management. For example, events exported to Python need
364 * to impement reference counting to ensure that the Python
365 * implementation of the event is kept alive while it lives in the
366 * event queue.
367 *
368 * @note Memory managers are responsible for implementing
369 * reference counting (by overriding both acquireImpl() and
370 * releaseImpl()) or checking if an event is no longer scheduled
371 * in releaseImpl() before deallocating it.
372 */
373
374 /**
375 * Managed event scheduled and being held in the event queue.
376 */
377 void acquire()
378 {
379 if (flags.isSet(Event::Managed))
380 acquireImpl();
381 }
382
383 /**
384 * Managed event removed from the event queue.
385 */
386 void release() {
387 if (flags.isSet(Event::Managed))
388 releaseImpl();
389 }
390
391 virtual void acquireImpl() {}
392
393 virtual void releaseImpl() {
394 if (!scheduled())
395 delete this;
396 }
397
398 /** @} */
399
400 public:
401
402 /*
403 * Event constructor
404 * @param queue that the event gets scheduled on
405 *
406 * @ingroup api_eventq
407 */
408 Event(Priority p = Default_Pri, Flags f = 0)
409 : nextBin(nullptr), nextInBin(nullptr), _when(0), _priority(p),
410 flags(Initialized | f)
411 {
412 assert(f.noneSet(~PublicWrite));
413 #ifndef NDEBUG
414 instance = ++instanceCounter;
415 queue = NULL;
416 #endif
417 #ifdef EVENTQ_DEBUG
418 whenCreated = curTick();
419 whenScheduled = 0;
420 #endif
421 }
422
423 /**
424 * @ingroup api_eventq
425 * @{
426 */
427 virtual ~Event();
428 virtual const std::string name() const;
429
430 /// Return a C string describing the event. This string should
431 /// *not* be dynamically allocated; just a const char array
432 /// describing the event class.
433 virtual const char *description() const;
434
435 /// Dump the current event data
436 void dump() const;
437 /** @}*/ //end of api group
438
439 public:
440 /*
441 * This member function is invoked when the event is processed
442 * (occurs). There is no default implementation; each subclass
443 * must provide its own implementation. The event is not
444 * automatically deleted after it is processed (to allow for
445 * statically allocated event objects).
446 *
447 * If the AutoDestroy flag is set, the object is deleted once it
448 * is processed.
449 *
450 * @ingroup api_eventq
451 */
452 virtual void process() = 0;
453
454 /**
455 * Determine if the current event is scheduled
456 *
457 * @ingroup api_eventq
458 */
459 bool scheduled() const { return flags.isSet(Scheduled); }
460
461 /**
462 * Squash the current event
463 *
464 * @ingroup api_eventq
465 */
466 void squash() { flags.set(Squashed); }
467
468 /**
469 * Check whether the event is squashed
470 *
471 * @ingroup api_eventq
472 */
473 bool squashed() const { return flags.isSet(Squashed); }
474
475 /**
476 * See if this is a SimExitEvent (without resorting to RTTI)
477 *
478 * @ingroup api_eventq
479 */
480 bool isExitEvent() const { return flags.isSet(IsExitEvent); }
481
482 /**
483 * Check whether this event will auto-delete
484 *
485 * @ingroup api_eventq
486 */
487 bool isManaged() const { return flags.isSet(Managed); }
488
489 /**
490 * @ingroup api_eventq
491 */
492 bool isAutoDelete() const { return isManaged(); }
493
494 /**
495 * Get the time that the event is scheduled
496 *
497 * @ingroup api_eventq
498 */
499 Tick when() const { return _when; }
500
501 /**
502 * Get the event priority
503 *
504 * @ingroup api_eventq
505 */
506 Priority priority() const { return _priority; }
507
508 //! If this is part of a GlobalEvent, return the pointer to the
509 //! Global Event. By default, there is no GlobalEvent, so return
510 //! NULL. (Overridden in GlobalEvent::BarrierEvent.)
511 virtual BaseGlobalEvent *globalEvent() { return NULL; }
512
513 void serialize(CheckpointOut &cp) const override;
514 void unserialize(CheckpointIn &cp) override;
515 };
516
517 /**
518 * @ingroup api_eventq
519 */
520 inline bool
521 operator<(const Event &l, const Event &r)
522 {
523 return l.when() < r.when() ||
524 (l.when() == r.when() && l.priority() < r.priority());
525 }
526
527 /**
528 * @ingroup api_eventq
529 */
530 inline bool
531 operator>(const Event &l, const Event &r)
532 {
533 return l.when() > r.when() ||
534 (l.when() == r.when() && l.priority() > r.priority());
535 }
536
537 /**
538 * @ingroup api_eventq
539 */
540 inline bool
541 operator<=(const Event &l, const Event &r)
542 {
543 return l.when() < r.when() ||
544 (l.when() == r.when() && l.priority() <= r.priority());
545 }
546
547 /**
548 * @ingroup api_eventq
549 */
550 inline bool
551 operator>=(const Event &l, const Event &r)
552 {
553 return l.when() > r.when() ||
554 (l.when() == r.when() && l.priority() >= r.priority());
555 }
556
557 /**
558 * @ingroup api_eventq
559 */
560 inline bool
561 operator==(const Event &l, const Event &r)
562 {
563 return l.when() == r.when() && l.priority() == r.priority();
564 }
565
566 /**
567 * @ingroup api_eventq
568 */
569 inline bool
570 operator!=(const Event &l, const Event &r)
571 {
572 return l.when() != r.when() || l.priority() != r.priority();
573 }
574
575 /**
576 * Queue of events sorted in time order
577 *
578 * Events are scheduled (inserted into the event queue) using the
579 * schedule() method. This method either inserts a <i>synchronous</i>
580 * or <i>asynchronous</i> event.
581 *
582 * Synchronous events are scheduled using schedule() method with the
583 * argument 'global' set to false (default). This should only be done
584 * from a thread holding the event queue lock
585 * (EventQueue::service_mutex). The lock is always held when an event
586 * handler is called, it can therefore always insert events into its
587 * own event queue unless it voluntarily releases the lock.
588 *
589 * Events can be scheduled across thread (and event queue borders) by
590 * either scheduling asynchronous events or taking the target event
591 * queue's lock. However, the lock should <i>never</i> be taken
592 * directly since this is likely to cause deadlocks. Instead, code
593 * that needs to schedule events in other event queues should
594 * temporarily release its own queue and lock the new queue. This
595 * prevents deadlocks since a single thread never owns more than one
596 * event queue lock. This functionality is provided by the
597 * ScopedMigration helper class. Note that temporarily migrating
598 * between event queues can make the simulation non-deterministic, it
599 * should therefore be limited to cases where that can be tolerated
600 * (e.g., handling asynchronous IO or fast-forwarding in KVM).
601 *
602 * Asynchronous events can also be scheduled using the normal
603 * schedule() method with the 'global' parameter set to true. Unlike
604 * the previous queue migration strategy, this strategy is fully
605 * deterministic. This causes the event to be inserted in a separate
606 * queue of asynchronous events (async_queue), which is merged main
607 * event queue at the end of each simulation quantum (by calling the
608 * handleAsyncInsertions() method). Note that this implies that such
609 * events must happen at least one simulation quantum into the future,
610 * otherwise they risk being scheduled in the past by
611 * handleAsyncInsertions().
612 */
613 class EventQueue
614 {
615 private:
616 std::string objName;
617 Event *head;
618 Tick _curTick;
619
620 //! Mutex to protect async queue.
621 std::mutex async_queue_mutex;
622
623 //! List of events added by other threads to this event queue.
624 std::list<Event*> async_queue;
625
626 /**
627 * Lock protecting event handling.
628 *
629 * This lock is always taken when servicing events. It is assumed
630 * that the thread scheduling new events (not asynchronous events
631 * though) have taken this lock. This is normally done by
632 * serviceOne() since new events are typically scheduled as a
633 * response to an earlier event.
634 *
635 * This lock is intended to be used to temporarily steal an event
636 * queue to support inter-thread communication when some
637 * deterministic timing can be sacrificed for speed. For example,
638 * the KVM CPU can use this support to access devices running in a
639 * different thread.
640 *
641 * @see EventQueue::ScopedMigration.
642 * @see EventQueue::ScopedRelease
643 * @see EventQueue::lock()
644 * @see EventQueue::unlock()
645 */
646 std::mutex service_mutex;
647
648 //! Insert / remove event from the queue. Should only be called
649 //! by thread operating this queue.
650 void insert(Event *event);
651 void remove(Event *event);
652
653 //! Function for adding events to the async queue. The added events
654 //! are added to main event queue later. Threads, other than the
655 //! owning thread, should call this function instead of insert().
656 void asyncInsert(Event *event);
657
658 EventQueue(const EventQueue &);
659
660 public:
661 /**
662 * Temporarily migrate execution to a different event queue.
663 *
664 * An instance of this class temporarily migrates execution to a
665 * different event queue by releasing the current queue, locking
666 * the new queue, and updating curEventQueue(). This can, for
667 * example, be useful when performing IO across thread event
668 * queues when timing is not crucial (e.g., during fast
669 * forwarding).
670 *
671 * ScopedMigration does nothing if both eqs are the same
672 */
673 class ScopedMigration
674 {
675 public:
676 /**
677 * @ingroup api_eventq
678 */
679 ScopedMigration(EventQueue *_new_eq, bool _doMigrate = true)
680 :new_eq(*_new_eq), old_eq(*curEventQueue()),
681 doMigrate((&new_eq != &old_eq)&&_doMigrate)
682 {
683 if (doMigrate){
684 old_eq.unlock();
685 new_eq.lock();
686 curEventQueue(&new_eq);
687 }
688 }
689
690 ~ScopedMigration()
691 {
692 if (doMigrate){
693 new_eq.unlock();
694 old_eq.lock();
695 curEventQueue(&old_eq);
696 }
697 }
698
699 private:
700 EventQueue &new_eq;
701 EventQueue &old_eq;
702 bool doMigrate;
703 };
704
705 /**
706 * Temporarily release the event queue service lock.
707 *
708 * There are cases where it is desirable to temporarily release
709 * the event queue lock to prevent deadlocks. For example, when
710 * waiting on the global barrier, we need to release the lock to
711 * prevent deadlocks from happening when another thread tries to
712 * temporarily take over the event queue waiting on the barrier.
713 */
714 class ScopedRelease
715 {
716 public:
717 /**
718 * @group api_eventq
719 */
720 ScopedRelease(EventQueue *_eq)
721 : eq(*_eq)
722 {
723 eq.unlock();
724 }
725
726 ~ScopedRelease()
727 {
728 eq.lock();
729 }
730
731 private:
732 EventQueue &eq;
733 };
734
735 /**
736 * @ingroup api_eventq
737 */
738 EventQueue(const std::string &n);
739
740 /**
741 * @ingroup api_eventq
742 * @{
743 */
744 virtual const std::string name() const { return objName; }
745 void name(const std::string &st) { objName = st; }
746 /** @}*/ //end of api_eventq group
747
748 /**
749 * Schedule the given event on this queue. Safe to call from any thread.
750 *
751 * @ingroup api_eventq
752 */
753 void schedule(Event *event, Tick when, bool global = false);
754
755 /**
756 * Deschedule the specified event. Should be called only from the owning
757 * thread.
758 * @ingroup api_eventq
759 */
760 void deschedule(Event *event);
761
762 /**
763 * Reschedule the specified event. Should be called only from the owning
764 * thread.
765 *
766 * @ingroup api_eventq
767 */
768 void reschedule(Event *event, Tick when, bool always = false);
769
770 Tick nextTick() const { return head->when(); }
771 void setCurTick(Tick newVal) { _curTick = newVal; }
772
773 /**
774 * While curTick() is useful for any object assigned to this event queue,
775 * if an object that is assigned to another event queue (or a non-event
776 * object) need to access the current tick of this event queue, this
777 * function is used.
778 *
779 * @return Tick The current tick of this event queue.
780 * @ingroup api_eventq
781 */
782 Tick getCurTick() const { return _curTick; }
783 Event *getHead() const { return head; }
784
785 Event *serviceOne();
786
787 /**
788 * process all events up to the given timestamp. we inline a quick test
789 * to see if there are any events to process; if so, call the internal
790 * out-of-line version to process them all.
791 *
792 * Notes:
793 * - This is only used for "instruction" event queues. Instead of counting
794 * ticks, this is actually counting instructions.
795 * - This updates the current tick value to the value of the entry at the
796 * head of the queue.
797 *
798 * @ingroup api_eventq
799 */
800 void
801 serviceEvents(Tick when)
802 {
803 while (!empty()) {
804 if (nextTick() > when)
805 break;
806
807 /**
808 * @todo this assert is a good bug catcher. I need to
809 * make it true again.
810 */
811 //assert(head->when() >= when && "event scheduled in the past");
812 serviceOne();
813 }
814
815 setCurTick(when);
816 }
817
818 /**
819 * Returns true if no events are queued
820 *
821 * @ingroup api_eventq
822 */
823 bool empty() const { return head == NULL; }
824
825 /**
826 * This is a debugging function which will print everything on the event
827 * queue.
828 *
829 * @ingroup api_eventq
830 */
831 void dump() const;
832
833 bool debugVerify() const;
834
835 /**
836 * Function for moving events from the async_queue to the main queue.
837 */
838 void handleAsyncInsertions();
839
840 /**
841 * Function to signal that the event loop should be woken up because
842 * an event has been scheduled by an agent outside the gem5 event
843 * loop(s) whose event insertion may not have been noticed by gem5.
844 * This function isn't needed by the usual gem5 event loop but may
845 * be necessary in derived EventQueues which host gem5 onto other
846 * schedulers.
847 *
848 * @param when Time of a delayed wakeup (if known). This parameter
849 * can be used by an implementation to schedule a wakeup in the
850 * future if it is sure it will remain active until then.
851 * Or it can be ignored and the event queue can be woken up now.
852 *
853 * @ingroup api_eventq
854 */
855 virtual void wakeup(Tick when = (Tick)-1) { }
856
857 /**
858 * function for replacing the head of the event queue, so that a
859 * different set of events can run without disturbing events that have
860 * already been scheduled. Already scheduled events can be processed
861 * by replacing the original head back.
862 * USING THIS FUNCTION CAN BE DANGEROUS TO THE HEALTH OF THE SIMULATOR.
863 * NOT RECOMMENDED FOR USE.
864 */
865 Event* replaceHead(Event* s);
866
867 /**@{*/
868 /**
869 * Provide an interface for locking/unlocking the event queue.
870 *
871 * @warn Do NOT use these methods directly unless you really know
872 * what you are doing. Incorrect use can easily lead to simulator
873 * deadlocks.
874 *
875 * @see EventQueue::ScopedMigration.
876 * @see EventQueue::ScopedRelease
877 * @see EventQueue
878 */
879 void lock() { service_mutex.lock(); }
880 void unlock() { service_mutex.unlock(); }
881 /**@}*/
882
883 /**
884 * Reschedule an event after a checkpoint.
885 *
886 * Since events don't know which event queue they belong to,
887 * parent objects need to reschedule events themselves. This
888 * method conditionally schedules an event that has the Scheduled
889 * flag set. It should be called by parent objects after
890 * unserializing an object.
891 *
892 * @warn Only use this method after unserializing an Event.
893 */
894 void checkpointReschedule(Event *event);
895
896 virtual ~EventQueue()
897 {
898 while (!empty())
899 deschedule(getHead());
900 }
901 };
902
903 void dumpMainQueue();
904
905 class EventManager
906 {
907 protected:
908 /** A pointer to this object's event queue */
909 EventQueue *eventq;
910
911 public:
912 /**
913 * @ingroup api_eventq
914 * @{
915 */
916 EventManager(EventManager &em) : eventq(em.eventq) {}
917 EventManager(EventManager *em) : eventq(em->eventq) {}
918 EventManager(EventQueue *eq) : eventq(eq) {}
919 /** @}*/ //end of api_eventq group
920
921 /**
922 * @ingroup api_eventq
923 */
924 EventQueue *
925 eventQueue() const
926 {
927 return eventq;
928 }
929
930 /**
931 * @ingroup api_eventq
932 */
933 void
934 schedule(Event &event, Tick when)
935 {
936 eventq->schedule(&event, when);
937 }
938
939 /**
940 * @ingroup api_eventq
941 */
942 void
943 deschedule(Event &event)
944 {
945 eventq->deschedule(&event);
946 }
947
948 /**
949 * @ingroup api_eventq
950 */
951 void
952 reschedule(Event &event, Tick when, bool always = false)
953 {
954 eventq->reschedule(&event, when, always);
955 }
956
957 /**
958 * @ingroup api_eventq
959 */
960 void
961 schedule(Event *event, Tick when)
962 {
963 eventq->schedule(event, when);
964 }
965
966 /**
967 * @ingroup api_eventq
968 */
969 void
970 deschedule(Event *event)
971 {
972 eventq->deschedule(event);
973 }
974
975 /**
976 * @ingroup api_eventq
977 */
978 void
979 reschedule(Event *event, Tick when, bool always = false)
980 {
981 eventq->reschedule(event, when, always);
982 }
983
984 /**
985 * @ingroup api_eventq
986 */
987 void wakeupEventQueue(Tick when = (Tick)-1)
988 {
989 eventq->wakeup(when);
990 }
991
992 void setCurTick(Tick newVal) { eventq->setCurTick(newVal); }
993 };
994
995 template <class T, void (T::* F)()>
996 class EventWrapper : public Event
997 {
998 private:
999 T *object;
1000
1001 public:
1002 EventWrapper(T *obj, bool del = false, Priority p = Default_Pri)
1003 : Event(p), object(obj)
1004 {
1005 if (del)
1006 setFlags(AutoDelete);
1007 }
1008
1009 EventWrapper(T &obj, bool del = false, Priority p = Default_Pri)
1010 : Event(p), object(&obj)
1011 {
1012 if (del)
1013 setFlags(AutoDelete);
1014 }
1015
1016 void process() { (object->*F)(); }
1017
1018 const std::string
1019 name() const
1020 {
1021 return object->name() + ".wrapped_event";
1022 }
1023
1024 const char *description() const { return "EventWrapped"; }
1025 };
1026
1027 class EventFunctionWrapper : public Event
1028 {
1029 private:
1030 std::function<void(void)> callback;
1031 std::string _name;
1032
1033 public:
1034 /**
1035 * @ingroup api_eventq
1036 */
1037 EventFunctionWrapper(const std::function<void(void)> &callback,
1038 const std::string &name,
1039 bool del = false,
1040 Priority p = Default_Pri)
1041 : Event(p), callback(callback), _name(name)
1042 {
1043 if (del)
1044 setFlags(AutoDelete);
1045 }
1046
1047 /**
1048 * @ingroup api_eventq
1049 */
1050 void process() { callback(); }
1051
1052 /**
1053 * @ingroup api_eventq
1054 */
1055 const std::string
1056 name() const
1057 {
1058 return _name + ".wrapped_function_event";
1059 }
1060
1061 /**
1062 * @ingroup api_eventq
1063 */
1064 const char *description() const { return "EventFunctionWrapped"; }
1065 };
1066
1067 #endif // __SIM_EVENTQ_HH__