base,sim: implement a faster mutex for single thread case
[gem5.git] / src / sim / eventq.hh
1 /*
2 * Copyright (c) 2000-2005 The Regents of The University of Michigan
3 * Copyright (c) 2013 Advanced Micro Devices, Inc.
4 * Copyright (c) 2013 Mark D. Hill and David A. Wood
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are
9 * met: redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer;
11 * redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution;
14 * neither the name of the copyright holders nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31 /* @file
32 * EventQueue interfaces
33 */
34
35 #ifndef __SIM_EVENTQ_HH__
36 #define __SIM_EVENTQ_HH__
37
38 #include <algorithm>
39 #include <cassert>
40 #include <climits>
41 #include <functional>
42 #include <iosfwd>
43 #include <memory>
44 #include <string>
45
46 #include "base/debug.hh"
47 #include "base/flags.hh"
48 #include "base/types.hh"
49 #include "base/uncontended_mutex.hh"
50 #include "debug/Event.hh"
51 #include "sim/serialize.hh"
52
53 class EventQueue; // forward declaration
54 class BaseGlobalEvent;
55
56 //! Simulation Quantum for multiple eventq simulation.
57 //! The quantum value is the period length after which the queues
58 //! synchronize themselves with each other. This means that any
59 //! event to scheduled on Queue A which is generated by an event on
60 //! Queue B should be at least simQuantum ticks away in future.
61 extern Tick simQuantum;
62
63 //! Current number of allocated main event queues.
64 extern uint32_t numMainEventQueues;
65
66 //! Array for main event queues.
67 extern std::vector<EventQueue *> mainEventQueue;
68
69 //! The current event queue for the running thread. Access to this queue
70 //! does not require any locking from the thread.
71
72 extern __thread EventQueue *_curEventQueue;
73
74 //! Current mode of execution: parallel / serial
75 extern bool inParallelMode;
76
77 //! Function for returning eventq queue for the provided
78 //! index. The function allocates a new queue in case one
79 //! does not exist for the index, provided that the index
80 //! is with in bounds.
81 EventQueue *getEventQueue(uint32_t index);
82
83 inline EventQueue *curEventQueue() { return _curEventQueue; }
84 inline void curEventQueue(EventQueue *q) { _curEventQueue = q; }
85
86 /**
87 * Common base class for Event and GlobalEvent, so they can share flag
88 * and priority definitions and accessor functions. This class should
89 * not be used directly.
90 */
91 class EventBase
92 {
93 protected:
94 typedef unsigned short FlagsType;
95 typedef ::Flags<FlagsType> Flags;
96
97 static const FlagsType PublicRead = 0x003f; // public readable flags
98 static const FlagsType PublicWrite = 0x001d; // public writable flags
99 static const FlagsType Squashed = 0x0001; // has been squashed
100 static const FlagsType Scheduled = 0x0002; // has been scheduled
101 static const FlagsType Managed = 0x0004; // Use life cycle manager
102 static const FlagsType AutoDelete = Managed; // delete after dispatch
103 /**
104 * This used to be AutoSerialize. This value can't be reused
105 * without changing the checkpoint version since the flag field
106 * gets serialized.
107 */
108 static const FlagsType Reserved0 = 0x0008;
109 static const FlagsType IsExitEvent = 0x0010; // special exit event
110 static const FlagsType IsMainQueue = 0x0020; // on main event queue
111 static const FlagsType Initialized = 0x7a40; // somewhat random bits
112 static const FlagsType InitMask = 0xffc0; // mask for init bits
113
114 public:
115 /**
116 * @ingroup api_eventq
117 */
118 typedef int8_t Priority;
119
120 /// Event priorities, to provide tie-breakers for events scheduled
121 /// at the same cycle. Most events are scheduled at the default
122 /// priority; these values are used to control events that need to
123 /// be ordered within a cycle.
124
125 /**
126 * Minimum priority
127 *
128 * @ingroup api_eventq
129 */
130 static const Priority Minimum_Pri = SCHAR_MIN;
131
132 /**
133 * If we enable tracing on a particular cycle, do that as the
134 * very first thing so we don't miss any of the events on
135 * that cycle (even if we enter the debugger).
136 *
137 * @ingroup api_eventq
138 */
139 static const Priority Debug_Enable_Pri = -101;
140
141 /**
142 * Breakpoints should happen before anything else (except
143 * enabling trace output), so we don't miss any action when
144 * debugging.
145 *
146 * @ingroup api_eventq
147 */
148 static const Priority Debug_Break_Pri = -100;
149
150 /**
151 * CPU switches schedule the new CPU's tick event for the
152 * same cycle (after unscheduling the old CPU's tick event).
153 * The switch needs to come before any tick events to make
154 * sure we don't tick both CPUs in the same cycle.
155 *
156 * @ingroup api_eventq
157 */
158 static const Priority CPU_Switch_Pri = -31;
159
160 /**
161 * For some reason "delayed" inter-cluster writebacks are
162 * scheduled before regular writebacks (which have default
163 * priority). Steve?
164 *
165 * @ingroup api_eventq
166 */
167 static const Priority Delayed_Writeback_Pri = -1;
168
169 /**
170 * Default is zero for historical reasons.
171 *
172 * @ingroup api_eventq
173 */
174 static const Priority Default_Pri = 0;
175
176 /**
177 * DVFS update event leads to stats dump therefore given a lower priority
178 * to ensure all relevant states have been updated
179 *
180 * @ingroup api_eventq
181 */
182 static const Priority DVFS_Update_Pri = 31;
183
184 /**
185 * Serailization needs to occur before tick events also, so
186 * that a serialize/unserialize is identical to an on-line
187 * CPU switch.
188 *
189 * @ingroup api_eventq
190 */
191 static const Priority Serialize_Pri = 32;
192
193 /**
194 * CPU ticks must come after other associated CPU events
195 * (such as writebacks).
196 *
197 * @ingroup api_eventq
198 */
199 static const Priority CPU_Tick_Pri = 50;
200
201 /**
202 * If we want to exit a thread in a CPU, it comes after CPU_Tick_Pri
203 *
204 * @ingroup api_eventq
205 */
206 static const Priority CPU_Exit_Pri = 64;
207
208 /**
209 * Statistics events (dump, reset, etc.) come after
210 * everything else, but before exit.
211 *
212 * @ingroup api_eventq
213 */
214 static const Priority Stat_Event_Pri = 90;
215
216 /**
217 * Progress events come at the end.
218 *
219 * @ingroup api_eventq
220 */
221 static const Priority Progress_Event_Pri = 95;
222
223 /**
224 * If we want to exit on this cycle, it's the very last thing
225 * we do.
226 *
227 * @ingroup api_eventq
228 */
229 static const Priority Sim_Exit_Pri = 100;
230
231 /**
232 * Maximum priority
233 *
234 * @ingroup api_eventq
235 */
236 static const Priority Maximum_Pri = SCHAR_MAX;
237 };
238
239 /*
240 * An item on an event queue. The action caused by a given
241 * event is specified by deriving a subclass and overriding the
242 * process() member function.
243 *
244 * Caution, the order of members is chosen to maximize data packing.
245 */
246 class Event : public EventBase, public Serializable
247 {
248 friend class EventQueue;
249
250 private:
251 // The event queue is now a linked list of linked lists. The
252 // 'nextBin' pointer is to find the bin, where a bin is defined as
253 // when+priority. All events in the same bin will be stored in a
254 // second linked list (a stack) maintained by the 'nextInBin'
255 // pointer. The list will be accessed in LIFO order. The end
256 // result is that the insert/removal in 'nextBin' is
257 // linear/constant, and the lookup/removal in 'nextInBin' is
258 // constant/constant. Hopefully this is a significant improvement
259 // over the current fully linear insertion.
260 Event *nextBin;
261 Event *nextInBin;
262
263 static Event *insertBefore(Event *event, Event *curr);
264 static Event *removeItem(Event *event, Event *last);
265
266 Tick _when; //!< timestamp when event should be processed
267 Priority _priority; //!< event priority
268 Flags flags;
269
270 #ifndef NDEBUG
271 /// Global counter to generate unique IDs for Event instances
272 static Counter instanceCounter;
273
274 /// This event's unique ID. We can also use pointer values for
275 /// this but they're not consistent across runs making debugging
276 /// more difficult. Thus we use a global counter value when
277 /// debugging.
278 Counter instance;
279
280 /// queue to which this event belongs (though it may or may not be
281 /// scheduled on this queue yet)
282 EventQueue *queue;
283 #endif
284
285 #ifdef EVENTQ_DEBUG
286 Tick whenCreated; //!< time created
287 Tick whenScheduled; //!< time scheduled
288 #endif
289
290 void
291 setWhen(Tick when, EventQueue *q)
292 {
293 _when = when;
294 #ifndef NDEBUG
295 queue = q;
296 #endif
297 #ifdef EVENTQ_DEBUG
298 whenScheduled = curTick();
299 #endif
300 }
301
302 bool
303 initialized() const
304 {
305 return (flags & InitMask) == Initialized;
306 }
307
308 protected:
309 Flags
310 getFlags() const
311 {
312 return flags & PublicRead;
313 }
314
315 bool
316 isFlagSet(Flags _flags) const
317 {
318 assert(_flags.noneSet(~PublicRead));
319 return flags.isSet(_flags);
320 }
321
322 void
323 setFlags(Flags _flags)
324 {
325 assert(_flags.noneSet(~PublicWrite));
326 flags.set(_flags);
327 }
328
329 void
330 clearFlags(Flags _flags)
331 {
332 assert(_flags.noneSet(~PublicWrite));
333 flags.clear(_flags);
334 }
335
336 void
337 clearFlags()
338 {
339 flags.clear(PublicWrite);
340 }
341
342 /**
343 * This function isn't really useful if TRACING_ON is not defined
344 *
345 * @ingroup api_eventq
346 */
347 virtual void trace(const char *action); //!< trace event activity
348
349 /// Return the instance number as a string.
350 const std::string instanceString() const;
351
352 protected: /* Memory management */
353 /**
354 * @{
355 * Memory management hooks for events that have the Managed flag set
356 *
357 * Events can use automatic memory management by setting the
358 * Managed flag. The default implementation automatically deletes
359 * events once they have been removed from the event queue. This
360 * typically happens when events are descheduled or have been
361 * triggered and not rescheduled.
362 *
363 * The methods below may be overridden by events that need custom
364 * memory management. For example, events exported to Python need
365 * to impement reference counting to ensure that the Python
366 * implementation of the event is kept alive while it lives in the
367 * event queue.
368 *
369 * @note Memory managers are responsible for implementing
370 * reference counting (by overriding both acquireImpl() and
371 * releaseImpl()) or checking if an event is no longer scheduled
372 * in releaseImpl() before deallocating it.
373 */
374
375 /**
376 * Managed event scheduled and being held in the event queue.
377 */
378 void acquire()
379 {
380 if (flags.isSet(Event::Managed))
381 acquireImpl();
382 }
383
384 /**
385 * Managed event removed from the event queue.
386 */
387 void release() {
388 if (flags.isSet(Event::Managed))
389 releaseImpl();
390 }
391
392 virtual void acquireImpl() {}
393
394 virtual void releaseImpl() {
395 if (!scheduled())
396 delete this;
397 }
398
399 /** @} */
400
401 public:
402
403 /*
404 * Event constructor
405 * @param queue that the event gets scheduled on
406 *
407 * @ingroup api_eventq
408 */
409 Event(Priority p = Default_Pri, Flags f = 0)
410 : nextBin(nullptr), nextInBin(nullptr), _when(0), _priority(p),
411 flags(Initialized | f)
412 {
413 assert(f.noneSet(~PublicWrite));
414 #ifndef NDEBUG
415 instance = ++instanceCounter;
416 queue = NULL;
417 #endif
418 #ifdef EVENTQ_DEBUG
419 whenCreated = curTick();
420 whenScheduled = 0;
421 #endif
422 }
423
424 /**
425 * @ingroup api_eventq
426 * @{
427 */
428 virtual ~Event();
429 virtual const std::string name() const;
430
431 /// Return a C string describing the event. This string should
432 /// *not* be dynamically allocated; just a const char array
433 /// describing the event class.
434 virtual const char *description() const;
435
436 /// Dump the current event data
437 void dump() const;
438 /** @}*/ //end of api group
439
440 public:
441 /*
442 * This member function is invoked when the event is processed
443 * (occurs). There is no default implementation; each subclass
444 * must provide its own implementation. The event is not
445 * automatically deleted after it is processed (to allow for
446 * statically allocated event objects).
447 *
448 * If the AutoDestroy flag is set, the object is deleted once it
449 * is processed.
450 *
451 * @ingroup api_eventq
452 */
453 virtual void process() = 0;
454
455 /**
456 * Determine if the current event is scheduled
457 *
458 * @ingroup api_eventq
459 */
460 bool scheduled() const { return flags.isSet(Scheduled); }
461
462 /**
463 * Squash the current event
464 *
465 * @ingroup api_eventq
466 */
467 void squash() { flags.set(Squashed); }
468
469 /**
470 * Check whether the event is squashed
471 *
472 * @ingroup api_eventq
473 */
474 bool squashed() const { return flags.isSet(Squashed); }
475
476 /**
477 * See if this is a SimExitEvent (without resorting to RTTI)
478 *
479 * @ingroup api_eventq
480 */
481 bool isExitEvent() const { return flags.isSet(IsExitEvent); }
482
483 /**
484 * Check whether this event will auto-delete
485 *
486 * @ingroup api_eventq
487 */
488 bool isManaged() const { return flags.isSet(Managed); }
489
490 /**
491 * The function returns true if the object is automatically
492 * deleted after the event is processed.
493 *
494 * @ingroup api_eventq
495 */
496 bool isAutoDelete() const { return isManaged(); }
497
498 /**
499 * Get the time that the event is scheduled
500 *
501 * @ingroup api_eventq
502 */
503 Tick when() const { return _when; }
504
505 /**
506 * Get the event priority
507 *
508 * @ingroup api_eventq
509 */
510 Priority priority() const { return _priority; }
511
512 //! If this is part of a GlobalEvent, return the pointer to the
513 //! Global Event. By default, there is no GlobalEvent, so return
514 //! NULL. (Overridden in GlobalEvent::BarrierEvent.)
515 virtual BaseGlobalEvent *globalEvent() { return NULL; }
516
517 void serialize(CheckpointOut &cp) const override;
518 void unserialize(CheckpointIn &cp) override;
519 };
520
521 /**
522 * @ingroup api_eventq
523 */
524 inline bool
525 operator<(const Event &l, const Event &r)
526 {
527 return l.when() < r.when() ||
528 (l.when() == r.when() && l.priority() < r.priority());
529 }
530
531 /**
532 * @ingroup api_eventq
533 */
534 inline bool
535 operator>(const Event &l, const Event &r)
536 {
537 return l.when() > r.when() ||
538 (l.when() == r.when() && l.priority() > r.priority());
539 }
540
541 /**
542 * @ingroup api_eventq
543 */
544 inline bool
545 operator<=(const Event &l, const Event &r)
546 {
547 return l.when() < r.when() ||
548 (l.when() == r.when() && l.priority() <= r.priority());
549 }
550
551 /**
552 * @ingroup api_eventq
553 */
554 inline bool
555 operator>=(const Event &l, const Event &r)
556 {
557 return l.when() > r.when() ||
558 (l.when() == r.when() && l.priority() >= r.priority());
559 }
560
561 /**
562 * @ingroup api_eventq
563 */
564 inline bool
565 operator==(const Event &l, const Event &r)
566 {
567 return l.when() == r.when() && l.priority() == r.priority();
568 }
569
570 /**
571 * @ingroup api_eventq
572 */
573 inline bool
574 operator!=(const Event &l, const Event &r)
575 {
576 return l.when() != r.when() || l.priority() != r.priority();
577 }
578
579 /**
580 * Queue of events sorted in time order
581 *
582 * Events are scheduled (inserted into the event queue) using the
583 * schedule() method. This method either inserts a <i>synchronous</i>
584 * or <i>asynchronous</i> event.
585 *
586 * Synchronous events are scheduled using schedule() method with the
587 * argument 'global' set to false (default). This should only be done
588 * from a thread holding the event queue lock
589 * (EventQueue::service_mutex). The lock is always held when an event
590 * handler is called, it can therefore always insert events into its
591 * own event queue unless it voluntarily releases the lock.
592 *
593 * Events can be scheduled across thread (and event queue borders) by
594 * either scheduling asynchronous events or taking the target event
595 * queue's lock. However, the lock should <i>never</i> be taken
596 * directly since this is likely to cause deadlocks. Instead, code
597 * that needs to schedule events in other event queues should
598 * temporarily release its own queue and lock the new queue. This
599 * prevents deadlocks since a single thread never owns more than one
600 * event queue lock. This functionality is provided by the
601 * ScopedMigration helper class. Note that temporarily migrating
602 * between event queues can make the simulation non-deterministic, it
603 * should therefore be limited to cases where that can be tolerated
604 * (e.g., handling asynchronous IO or fast-forwarding in KVM).
605 *
606 * Asynchronous events can also be scheduled using the normal
607 * schedule() method with the 'global' parameter set to true. Unlike
608 * the previous queue migration strategy, this strategy is fully
609 * deterministic. This causes the event to be inserted in a separate
610 * queue of asynchronous events (async_queue), which is merged main
611 * event queue at the end of each simulation quantum (by calling the
612 * handleAsyncInsertions() method). Note that this implies that such
613 * events must happen at least one simulation quantum into the future,
614 * otherwise they risk being scheduled in the past by
615 * handleAsyncInsertions().
616 */
617 class EventQueue
618 {
619 private:
620 std::string objName;
621 Event *head;
622 Tick _curTick;
623
624 //! Mutex to protect async queue.
625 UncontendedMutex async_queue_mutex;
626
627 //! List of events added by other threads to this event queue.
628 std::list<Event*> async_queue;
629
630 /**
631 * Lock protecting event handling.
632 *
633 * This lock is always taken when servicing events. It is assumed
634 * that the thread scheduling new events (not asynchronous events
635 * though) have taken this lock. This is normally done by
636 * serviceOne() since new events are typically scheduled as a
637 * response to an earlier event.
638 *
639 * This lock is intended to be used to temporarily steal an event
640 * queue to support inter-thread communication when some
641 * deterministic timing can be sacrificed for speed. For example,
642 * the KVM CPU can use this support to access devices running in a
643 * different thread.
644 *
645 * @see EventQueue::ScopedMigration.
646 * @see EventQueue::ScopedRelease
647 * @see EventQueue::lock()
648 * @see EventQueue::unlock()
649 */
650 UncontendedMutex service_mutex;
651
652 //! Insert / remove event from the queue. Should only be called
653 //! by thread operating this queue.
654 void insert(Event *event);
655 void remove(Event *event);
656
657 //! Function for adding events to the async queue. The added events
658 //! are added to main event queue later. Threads, other than the
659 //! owning thread, should call this function instead of insert().
660 void asyncInsert(Event *event);
661
662 EventQueue(const EventQueue &);
663
664 public:
665 class ScopedMigration
666 {
667 public:
668 /**
669 * Temporarily migrate execution to a different event queue.
670 *
671 * An instance of this class temporarily migrates execution to
672 * different event queue by releasing the current queue, locking
673 * the new queue, and updating curEventQueue(). This can, for
674 * example, be useful when performing IO across thread event
675 * queues when timing is not crucial (e.g., during fast
676 * forwarding).
677 *
678 * ScopedMigration does nothing if both eqs are the same
679 *
680 * @ingroup api_eventq
681 */
682 ScopedMigration(EventQueue *_new_eq, bool _doMigrate = true)
683 :new_eq(*_new_eq), old_eq(*curEventQueue()),
684 doMigrate((&new_eq != &old_eq)&&_doMigrate)
685 {
686 if (doMigrate){
687 old_eq.unlock();
688 new_eq.lock();
689 curEventQueue(&new_eq);
690 }
691 }
692
693 ~ScopedMigration()
694 {
695 if (doMigrate){
696 new_eq.unlock();
697 old_eq.lock();
698 curEventQueue(&old_eq);
699 }
700 }
701
702 private:
703 EventQueue &new_eq;
704 EventQueue &old_eq;
705 bool doMigrate;
706 };
707
708
709 class ScopedRelease
710 {
711 public:
712 /**
713 * Temporarily release the event queue service lock.
714 *
715 * There are cases where it is desirable to temporarily release
716 * the event queue lock to prevent deadlocks. For example, when
717 * waiting on the global barrier, we need to release the lock to
718 * prevent deadlocks from happening when another thread tries to
719 * temporarily take over the event queue waiting on the barrier.
720 *
721 * @group api_eventq
722 */
723 ScopedRelease(EventQueue *_eq)
724 : eq(*_eq)
725 {
726 eq.unlock();
727 }
728
729 ~ScopedRelease()
730 {
731 eq.lock();
732 }
733
734 private:
735 EventQueue &eq;
736 };
737
738 /**
739 * @ingroup api_eventq
740 */
741 EventQueue(const std::string &n);
742
743 /**
744 * @ingroup api_eventq
745 * @{
746 */
747 virtual const std::string name() const { return objName; }
748 void name(const std::string &st) { objName = st; }
749 /** @}*/ //end of api_eventq group
750
751 /**
752 * Schedule the given event on this queue. Safe to call from any thread.
753 *
754 * @ingroup api_eventq
755 */
756 void
757 schedule(Event *event, Tick when, bool global=false)
758 {
759 assert(when >= getCurTick());
760 assert(!event->scheduled());
761 assert(event->initialized());
762
763 event->setWhen(when, this);
764
765 // The check below is to make sure of two things
766 // a. A thread schedules local events on other queues through the
767 // asyncq.
768 // b. A thread schedules global events on the asyncq, whether or not
769 // this event belongs to this eventq. This is required to maintain
770 // a total order amongst the global events. See global_event.{cc,hh}
771 // for more explanation.
772 if (inParallelMode && (this != curEventQueue() || global)) {
773 asyncInsert(event);
774 } else {
775 insert(event);
776 }
777 event->flags.set(Event::Scheduled);
778 event->acquire();
779
780 if (DTRACE(Event))
781 event->trace("scheduled");
782 }
783
784 /**
785 * Deschedule the specified event. Should be called only from the owning
786 * thread.
787 * @ingroup api_eventq
788 */
789 void
790 deschedule(Event *event)
791 {
792 assert(event->scheduled());
793 assert(event->initialized());
794 assert(!inParallelMode || this == curEventQueue());
795
796 remove(event);
797
798 event->flags.clear(Event::Squashed);
799 event->flags.clear(Event::Scheduled);
800
801 if (DTRACE(Event))
802 event->trace("descheduled");
803
804 event->release();
805 }
806
807 /**
808 * Reschedule the specified event. Should be called only from the owning
809 * thread.
810 *
811 * @ingroup api_eventq
812 */
813 void
814 reschedule(Event *event, Tick when, bool always=false)
815 {
816 assert(when >= getCurTick());
817 assert(always || event->scheduled());
818 assert(event->initialized());
819 assert(!inParallelMode || this == curEventQueue());
820
821 if (event->scheduled()) {
822 remove(event);
823 } else {
824 event->acquire();
825 }
826
827 event->setWhen(when, this);
828 insert(event);
829 event->flags.clear(Event::Squashed);
830 event->flags.set(Event::Scheduled);
831
832 if (DTRACE(Event))
833 event->trace("rescheduled");
834 }
835
836 Tick nextTick() const { return head->when(); }
837 void setCurTick(Tick newVal) { _curTick = newVal; }
838
839 /**
840 * While curTick() is useful for any object assigned to this event queue,
841 * if an object that is assigned to another event queue (or a non-event
842 * object) need to access the current tick of this event queue, this
843 * function is used.
844 *
845 * Tick is the unit of time used in gem5.
846 *
847 * @return Tick The current tick of this event queue.
848 * @ingroup api_eventq
849 */
850 Tick getCurTick() const { return _curTick; }
851 Event *getHead() const { return head; }
852
853 Event *serviceOne();
854
855 /**
856 * process all events up to the given timestamp. we inline a quick test
857 * to see if there are any events to process; if so, call the internal
858 * out-of-line version to process them all.
859 *
860 * Notes:
861 * - This is only used for "instruction" event queues. Instead of counting
862 * ticks, this is actually counting instructions.
863 * - This updates the current tick value to the value of the entry at the
864 * head of the queue.
865 *
866 * @ingroup api_eventq
867 */
868 void
869 serviceEvents(Tick when)
870 {
871 while (!empty()) {
872 if (nextTick() > when)
873 break;
874
875 /**
876 * @todo this assert is a good bug catcher. I need to
877 * make it true again.
878 */
879 //assert(head->when() >= when && "event scheduled in the past");
880 serviceOne();
881 }
882
883 setCurTick(when);
884 }
885
886 /**
887 * Returns true if no events are queued
888 *
889 * @ingroup api_eventq
890 */
891 bool empty() const { return head == NULL; }
892
893 /**
894 * This is a debugging function which will print everything on the event
895 * queue.
896 *
897 * @ingroup api_eventq
898 */
899 void dump() const;
900
901 bool debugVerify() const;
902
903 /**
904 * Function for moving events from the async_queue to the main queue.
905 */
906 void handleAsyncInsertions();
907
908 /**
909 * Function to signal that the event loop should be woken up because
910 * an event has been scheduled by an agent outside the gem5 event
911 * loop(s) whose event insertion may not have been noticed by gem5.
912 * This function isn't needed by the usual gem5 event loop but may
913 * be necessary in derived EventQueues which host gem5 onto other
914 * schedulers.
915 *
916 * @param when Time of a delayed wakeup (if known). This parameter
917 * can be used by an implementation to schedule a wakeup in the
918 * future if it is sure it will remain active until then.
919 * Or it can be ignored and the event queue can be woken up now.
920 *
921 * @ingroup api_eventq
922 */
923 virtual void wakeup(Tick when = (Tick)-1) { }
924
925 /**
926 * function for replacing the head of the event queue, so that a
927 * different set of events can run without disturbing events that have
928 * already been scheduled. Already scheduled events can be processed
929 * by replacing the original head back.
930 * USING THIS FUNCTION CAN BE DANGEROUS TO THE HEALTH OF THE SIMULATOR.
931 * NOT RECOMMENDED FOR USE.
932 */
933 Event* replaceHead(Event* s);
934
935 /**@{*/
936 /**
937 * Provide an interface for locking/unlocking the event queue.
938 *
939 * @warn Do NOT use these methods directly unless you really know
940 * what you are doing. Incorrect use can easily lead to simulator
941 * deadlocks.
942 *
943 * @see EventQueue::ScopedMigration.
944 * @see EventQueue::ScopedRelease
945 * @see EventQueue
946 */
947 void lock() { service_mutex.lock(); }
948 void unlock() { service_mutex.unlock(); }
949 /**@}*/
950
951 /**
952 * Reschedule an event after a checkpoint.
953 *
954 * Since events don't know which event queue they belong to,
955 * parent objects need to reschedule events themselves. This
956 * method conditionally schedules an event that has the Scheduled
957 * flag set. It should be called by parent objects after
958 * unserializing an object.
959 *
960 * @warn Only use this method after unserializing an Event.
961 */
962 void checkpointReschedule(Event *event);
963
964 virtual ~EventQueue()
965 {
966 while (!empty())
967 deschedule(getHead());
968 }
969 };
970
971 void dumpMainQueue();
972
973 class EventManager
974 {
975 protected:
976 /** A pointer to this object's event queue */
977 EventQueue *eventq;
978
979 public:
980 /**
981 * Event manger manages events in the event queue. Where
982 * you can schedule and deschedule different events.
983 *
984 * @ingroup api_eventq
985 * @{
986 */
987 EventManager(EventManager &em) : eventq(em.eventq) {}
988 EventManager(EventManager *em) : eventq(em->eventq) {}
989 EventManager(EventQueue *eq) : eventq(eq) {}
990 /** @}*/ //end of api_eventq group
991
992 /**
993 * @ingroup api_eventq
994 */
995 EventQueue *
996 eventQueue() const
997 {
998 return eventq;
999 }
1000
1001 /**
1002 * @ingroup api_eventq
1003 */
1004 void
1005 schedule(Event &event, Tick when)
1006 {
1007 eventq->schedule(&event, when);
1008 }
1009
1010 /**
1011 * @ingroup api_eventq
1012 */
1013 void
1014 deschedule(Event &event)
1015 {
1016 eventq->deschedule(&event);
1017 }
1018
1019 /**
1020 * @ingroup api_eventq
1021 */
1022 void
1023 reschedule(Event &event, Tick when, bool always = false)
1024 {
1025 eventq->reschedule(&event, when, always);
1026 }
1027
1028 /**
1029 * @ingroup api_eventq
1030 */
1031 void
1032 schedule(Event *event, Tick when)
1033 {
1034 eventq->schedule(event, when);
1035 }
1036
1037 /**
1038 * @ingroup api_eventq
1039 */
1040 void
1041 deschedule(Event *event)
1042 {
1043 eventq->deschedule(event);
1044 }
1045
1046 /**
1047 * @ingroup api_eventq
1048 */
1049 void
1050 reschedule(Event *event, Tick when, bool always = false)
1051 {
1052 eventq->reschedule(event, when, always);
1053 }
1054
1055 /**
1056 * This function is not needed by the usual gem5 event loop
1057 * but may be necessary in derived EventQueues which host gem5
1058 * on other schedulers.
1059 * @ingroup api_eventq
1060 */
1061 void wakeupEventQueue(Tick when = (Tick)-1)
1062 {
1063 eventq->wakeup(when);
1064 }
1065
1066 void setCurTick(Tick newVal) { eventq->setCurTick(newVal); }
1067 };
1068
1069 template <class T, void (T::* F)()>
1070 class EventWrapper : public Event
1071 {
1072 private:
1073 T *object;
1074
1075 public:
1076 EventWrapper(T *obj, bool del = false, Priority p = Default_Pri)
1077 : Event(p), object(obj)
1078 {
1079 if (del)
1080 setFlags(AutoDelete);
1081 }
1082
1083 EventWrapper(T &obj, bool del = false, Priority p = Default_Pri)
1084 : Event(p), object(&obj)
1085 {
1086 if (del)
1087 setFlags(AutoDelete);
1088 }
1089
1090 void process() { (object->*F)(); }
1091
1092 const std::string
1093 name() const
1094 {
1095 return object->name() + ".wrapped_event";
1096 }
1097
1098 const char *description() const { return "EventWrapped"; }
1099 };
1100
1101 class EventFunctionWrapper : public Event
1102 {
1103 private:
1104 std::function<void(void)> callback;
1105 std::string _name;
1106
1107 public:
1108 /**
1109 * This function wraps a function into an event, to be
1110 * executed later.
1111 *
1112 * @ingroup api_eventq
1113 */
1114 EventFunctionWrapper(const std::function<void(void)> &callback,
1115 const std::string &name,
1116 bool del = false,
1117 Priority p = Default_Pri)
1118 : Event(p), callback(callback), _name(name)
1119 {
1120 if (del)
1121 setFlags(AutoDelete);
1122 }
1123
1124 /**
1125 * @ingroup api_eventq
1126 */
1127 void process() { callback(); }
1128
1129 /**
1130 * @ingroup api_eventq
1131 */
1132 const std::string
1133 name() const
1134 {
1135 return _name + ".wrapped_function_event";
1136 }
1137
1138 /**
1139 * @ingroup api_eventq
1140 */
1141 const char *description() const { return "EventFunctionWrapped"; }
1142 };
1143
1144 #endif // __SIM_EVENTQ_HH__