2 * Copyright (c) 2000-2005 The Regents of The University of Michigan
3 * Copyright (c) 2013 Advanced Micro Devices, Inc.
4 * Copyright (c) 2013 Mark D. Hill and David A. Wood
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are
9 * met: redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer;
11 * redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution;
14 * neither the name of the copyright holders nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 * EventQueue interfaces
35 #ifndef __SIM_EVENTQ_HH__
36 #define __SIM_EVENTQ_HH__
47 #include "base/flags.hh"
48 #include "base/types.hh"
49 #include "debug/Event.hh"
50 #include "sim/serialize.hh"
52 class EventQueue; // forward declaration
53 class BaseGlobalEvent;
55 //! Simulation Quantum for multiple eventq simulation.
56 //! The quantum value is the period length after which the queues
57 //! synchronize themselves with each other. This means that any
58 //! event to scheduled on Queue A which is generated by an event on
59 //! Queue B should be at least simQuantum ticks away in future.
60 extern Tick simQuantum;
62 //! Current number of allocated main event queues.
63 extern uint32_t numMainEventQueues;
65 //! Array for main event queues.
66 extern std::vector<EventQueue *> mainEventQueue;
68 //! The current event queue for the running thread. Access to this queue
69 //! does not require any locking from the thread.
71 extern __thread EventQueue *_curEventQueue;
73 //! Current mode of execution: parallel / serial
74 extern bool inParallelMode;
76 //! Function for returning eventq queue for the provided
77 //! index. The function allocates a new queue in case one
78 //! does not exist for the index, provided that the index
79 //! is with in bounds.
80 EventQueue *getEventQueue(uint32_t index);
82 inline EventQueue *curEventQueue() { return _curEventQueue; }
83 inline void curEventQueue(EventQueue *q) { _curEventQueue = q; }
86 * Common base class for Event and GlobalEvent, so they can share flag
87 * and priority definitions and accessor functions. This class should
88 * not be used directly.
93 typedef unsigned short FlagsType;
94 typedef ::Flags<FlagsType> Flags;
96 static const FlagsType PublicRead = 0x003f; // public readable flags
97 static const FlagsType PublicWrite = 0x001d; // public writable flags
98 static const FlagsType Squashed = 0x0001; // has been squashed
99 static const FlagsType Scheduled = 0x0002; // has been scheduled
100 static const FlagsType Managed = 0x0004; // Use life cycle manager
101 static const FlagsType AutoDelete = Managed; // delete after dispatch
103 * This used to be AutoSerialize. This value can't be reused
104 * without changing the checkpoint version since the flag field
107 static const FlagsType Reserved0 = 0x0008;
108 static const FlagsType IsExitEvent = 0x0010; // special exit event
109 static const FlagsType IsMainQueue = 0x0020; // on main event queue
110 static const FlagsType Initialized = 0x7a40; // somewhat random bits
111 static const FlagsType InitMask = 0xffc0; // mask for init bits
115 * @ingroup api_eventq
117 typedef int8_t Priority;
119 /// Event priorities, to provide tie-breakers for events scheduled
120 /// at the same cycle. Most events are scheduled at the default
121 /// priority; these values are used to control events that need to
122 /// be ordered within a cycle.
127 * @ingroup api_eventq
129 static const Priority Minimum_Pri = SCHAR_MIN;
132 * If we enable tracing on a particular cycle, do that as the
133 * very first thing so we don't miss any of the events on
134 * that cycle (even if we enter the debugger).
136 * @ingroup api_eventq
138 static const Priority Debug_Enable_Pri = -101;
141 * Breakpoints should happen before anything else (except
142 * enabling trace output), so we don't miss any action when
145 * @ingroup api_eventq
147 static const Priority Debug_Break_Pri = -100;
150 * CPU switches schedule the new CPU's tick event for the
151 * same cycle (after unscheduling the old CPU's tick event).
152 * The switch needs to come before any tick events to make
153 * sure we don't tick both CPUs in the same cycle.
155 * @ingroup api_eventq
157 static const Priority CPU_Switch_Pri = -31;
160 * For some reason "delayed" inter-cluster writebacks are
161 * scheduled before regular writebacks (which have default
164 * @ingroup api_eventq
166 static const Priority Delayed_Writeback_Pri = -1;
169 * Default is zero for historical reasons.
171 * @ingroup api_eventq
173 static const Priority Default_Pri = 0;
176 * DVFS update event leads to stats dump therefore given a lower priority
177 * to ensure all relevant states have been updated
179 * @ingroup api_eventq
181 static const Priority DVFS_Update_Pri = 31;
184 * Serailization needs to occur before tick events also, so
185 * that a serialize/unserialize is identical to an on-line
188 * @ingroup api_eventq
190 static const Priority Serialize_Pri = 32;
193 * CPU ticks must come after other associated CPU events
194 * (such as writebacks).
196 * @ingroup api_eventq
198 static const Priority CPU_Tick_Pri = 50;
201 * If we want to exit a thread in a CPU, it comes after CPU_Tick_Pri
203 * @ingroup api_eventq
205 static const Priority CPU_Exit_Pri = 64;
208 * Statistics events (dump, reset, etc.) come after
209 * everything else, but before exit.
211 * @ingroup api_eventq
213 static const Priority Stat_Event_Pri = 90;
216 * Progress events come at the end.
218 * @ingroup api_eventq
220 static const Priority Progress_Event_Pri = 95;
223 * If we want to exit on this cycle, it's the very last thing
226 * @ingroup api_eventq
228 static const Priority Sim_Exit_Pri = 100;
233 * @ingroup api_eventq
235 static const Priority Maximum_Pri = SCHAR_MAX;
239 * An item on an event queue. The action caused by a given
240 * event is specified by deriving a subclass and overriding the
241 * process() member function.
243 * Caution, the order of members is chosen to maximize data packing.
245 class Event : public EventBase, public Serializable
247 friend class EventQueue;
250 // The event queue is now a linked list of linked lists. The
251 // 'nextBin' pointer is to find the bin, where a bin is defined as
252 // when+priority. All events in the same bin will be stored in a
253 // second linked list (a stack) maintained by the 'nextInBin'
254 // pointer. The list will be accessed in LIFO order. The end
255 // result is that the insert/removal in 'nextBin' is
256 // linear/constant, and the lookup/removal in 'nextInBin' is
257 // constant/constant. Hopefully this is a significant improvement
258 // over the current fully linear insertion.
262 static Event *insertBefore(Event *event, Event *curr);
263 static Event *removeItem(Event *event, Event *last);
265 Tick _when; //!< timestamp when event should be processed
266 Priority _priority; //!< event priority
270 /// Global counter to generate unique IDs for Event instances
271 static Counter instanceCounter;
273 /// This event's unique ID. We can also use pointer values for
274 /// this but they're not consistent across runs making debugging
275 /// more difficult. Thus we use a global counter value when
279 /// queue to which this event belongs (though it may or may not be
280 /// scheduled on this queue yet)
285 Tick whenCreated; //!< time created
286 Tick whenScheduled; //!< time scheduled
290 setWhen(Tick when, EventQueue *q)
297 whenScheduled = curTick();
304 return (flags & InitMask) == Initialized;
311 return flags & PublicRead;
315 isFlagSet(Flags _flags) const
317 assert(_flags.noneSet(~PublicRead));
318 return flags.isSet(_flags);
322 setFlags(Flags _flags)
324 assert(_flags.noneSet(~PublicWrite));
329 clearFlags(Flags _flags)
331 assert(_flags.noneSet(~PublicWrite));
338 flags.clear(PublicWrite);
342 * This function isn't really useful if TRACING_ON is not defined
344 * @ingroup api_eventq
346 virtual void trace(const char *action); //!< trace event activity
348 /// Return the instance number as a string.
349 const std::string instanceString() const;
351 protected: /* Memory management */
354 * Memory management hooks for events that have the Managed flag set
356 * Events can use automatic memory management by setting the
357 * Managed flag. The default implementation automatically deletes
358 * events once they have been removed from the event queue. This
359 * typically happens when events are descheduled or have been
360 * triggered and not rescheduled.
362 * The methods below may be overridden by events that need custom
363 * memory management. For example, events exported to Python need
364 * to impement reference counting to ensure that the Python
365 * implementation of the event is kept alive while it lives in the
368 * @note Memory managers are responsible for implementing
369 * reference counting (by overriding both acquireImpl() and
370 * releaseImpl()) or checking if an event is no longer scheduled
371 * in releaseImpl() before deallocating it.
375 * Managed event scheduled and being held in the event queue.
379 if (flags.isSet(Event::Managed))
384 * Managed event removed from the event queue.
387 if (flags.isSet(Event::Managed))
391 virtual void acquireImpl() {}
393 virtual void releaseImpl() {
404 * @param queue that the event gets scheduled on
406 * @ingroup api_eventq
408 Event(Priority p = Default_Pri, Flags f = 0)
409 : nextBin(nullptr), nextInBin(nullptr), _when(0), _priority(p),
410 flags(Initialized | f)
412 assert(f.noneSet(~PublicWrite));
414 instance = ++instanceCounter;
418 whenCreated = curTick();
424 * @ingroup api_eventq
428 virtual const std::string name() const;
430 /// Return a C string describing the event. This string should
431 /// *not* be dynamically allocated; just a const char array
432 /// describing the event class.
433 virtual const char *description() const;
435 /// Dump the current event data
437 /** @}*/ //end of api group
441 * This member function is invoked when the event is processed
442 * (occurs). There is no default implementation; each subclass
443 * must provide its own implementation. The event is not
444 * automatically deleted after it is processed (to allow for
445 * statically allocated event objects).
447 * If the AutoDestroy flag is set, the object is deleted once it
450 * @ingroup api_eventq
452 virtual void process() = 0;
455 * Determine if the current event is scheduled
457 * @ingroup api_eventq
459 bool scheduled() const { return flags.isSet(Scheduled); }
462 * Squash the current event
464 * @ingroup api_eventq
466 void squash() { flags.set(Squashed); }
469 * Check whether the event is squashed
471 * @ingroup api_eventq
473 bool squashed() const { return flags.isSet(Squashed); }
476 * See if this is a SimExitEvent (without resorting to RTTI)
478 * @ingroup api_eventq
480 bool isExitEvent() const { return flags.isSet(IsExitEvent); }
483 * Check whether this event will auto-delete
485 * @ingroup api_eventq
487 bool isManaged() const { return flags.isSet(Managed); }
490 * @ingroup api_eventq
492 bool isAutoDelete() const { return isManaged(); }
495 * Get the time that the event is scheduled
497 * @ingroup api_eventq
499 Tick when() const { return _when; }
502 * Get the event priority
504 * @ingroup api_eventq
506 Priority priority() const { return _priority; }
508 //! If this is part of a GlobalEvent, return the pointer to the
509 //! Global Event. By default, there is no GlobalEvent, so return
510 //! NULL. (Overridden in GlobalEvent::BarrierEvent.)
511 virtual BaseGlobalEvent *globalEvent() { return NULL; }
513 void serialize(CheckpointOut &cp) const override;
514 void unserialize(CheckpointIn &cp) override;
518 * @ingroup api_eventq
521 operator<(const Event &l, const Event &r)
523 return l.when() < r.when() ||
524 (l.when() == r.when() && l.priority() < r.priority());
528 * @ingroup api_eventq
531 operator>(const Event &l, const Event &r)
533 return l.when() > r.when() ||
534 (l.when() == r.when() && l.priority() > r.priority());
538 * @ingroup api_eventq
541 operator<=(const Event &l, const Event &r)
543 return l.when() < r.when() ||
544 (l.when() == r.when() && l.priority() <= r.priority());
548 * @ingroup api_eventq
551 operator>=(const Event &l, const Event &r)
553 return l.when() > r.when() ||
554 (l.when() == r.when() && l.priority() >= r.priority());
558 * @ingroup api_eventq
561 operator==(const Event &l, const Event &r)
563 return l.when() == r.when() && l.priority() == r.priority();
567 * @ingroup api_eventq
570 operator!=(const Event &l, const Event &r)
572 return l.when() != r.when() || l.priority() != r.priority();
576 * Queue of events sorted in time order
578 * Events are scheduled (inserted into the event queue) using the
579 * schedule() method. This method either inserts a <i>synchronous</i>
580 * or <i>asynchronous</i> event.
582 * Synchronous events are scheduled using schedule() method with the
583 * argument 'global' set to false (default). This should only be done
584 * from a thread holding the event queue lock
585 * (EventQueue::service_mutex). The lock is always held when an event
586 * handler is called, it can therefore always insert events into its
587 * own event queue unless it voluntarily releases the lock.
589 * Events can be scheduled across thread (and event queue borders) by
590 * either scheduling asynchronous events or taking the target event
591 * queue's lock. However, the lock should <i>never</i> be taken
592 * directly since this is likely to cause deadlocks. Instead, code
593 * that needs to schedule events in other event queues should
594 * temporarily release its own queue and lock the new queue. This
595 * prevents deadlocks since a single thread never owns more than one
596 * event queue lock. This functionality is provided by the
597 * ScopedMigration helper class. Note that temporarily migrating
598 * between event queues can make the simulation non-deterministic, it
599 * should therefore be limited to cases where that can be tolerated
600 * (e.g., handling asynchronous IO or fast-forwarding in KVM).
602 * Asynchronous events can also be scheduled using the normal
603 * schedule() method with the 'global' parameter set to true. Unlike
604 * the previous queue migration strategy, this strategy is fully
605 * deterministic. This causes the event to be inserted in a separate
606 * queue of asynchronous events (async_queue), which is merged main
607 * event queue at the end of each simulation quantum (by calling the
608 * handleAsyncInsertions() method). Note that this implies that such
609 * events must happen at least one simulation quantum into the future,
610 * otherwise they risk being scheduled in the past by
611 * handleAsyncInsertions().
620 //! Mutex to protect async queue.
621 std::mutex async_queue_mutex;
623 //! List of events added by other threads to this event queue.
624 std::list<Event*> async_queue;
627 * Lock protecting event handling.
629 * This lock is always taken when servicing events. It is assumed
630 * that the thread scheduling new events (not asynchronous events
631 * though) have taken this lock. This is normally done by
632 * serviceOne() since new events are typically scheduled as a
633 * response to an earlier event.
635 * This lock is intended to be used to temporarily steal an event
636 * queue to support inter-thread communication when some
637 * deterministic timing can be sacrificed for speed. For example,
638 * the KVM CPU can use this support to access devices running in a
641 * @see EventQueue::ScopedMigration.
642 * @see EventQueue::ScopedRelease
643 * @see EventQueue::lock()
644 * @see EventQueue::unlock()
646 std::mutex service_mutex;
648 //! Insert / remove event from the queue. Should only be called
649 //! by thread operating this queue.
650 void insert(Event *event);
651 void remove(Event *event);
653 //! Function for adding events to the async queue. The added events
654 //! are added to main event queue later. Threads, other than the
655 //! owning thread, should call this function instead of insert().
656 void asyncInsert(Event *event);
658 EventQueue(const EventQueue &);
662 * Temporarily migrate execution to a different event queue.
664 * An instance of this class temporarily migrates execution to a
665 * different event queue by releasing the current queue, locking
666 * the new queue, and updating curEventQueue(). This can, for
667 * example, be useful when performing IO across thread event
668 * queues when timing is not crucial (e.g., during fast
671 * ScopedMigration does nothing if both eqs are the same
673 class ScopedMigration
677 * @ingroup api_eventq
679 ScopedMigration(EventQueue *_new_eq, bool _doMigrate = true)
680 :new_eq(*_new_eq), old_eq(*curEventQueue()),
681 doMigrate((&new_eq != &old_eq)&&_doMigrate)
686 curEventQueue(&new_eq);
695 curEventQueue(&old_eq);
706 * Temporarily release the event queue service lock.
708 * There are cases where it is desirable to temporarily release
709 * the event queue lock to prevent deadlocks. For example, when
710 * waiting on the global barrier, we need to release the lock to
711 * prevent deadlocks from happening when another thread tries to
712 * temporarily take over the event queue waiting on the barrier.
720 ScopedRelease(EventQueue *_eq)
736 * @ingroup api_eventq
738 EventQueue(const std::string &n);
741 * @ingroup api_eventq
744 virtual const std::string name() const { return objName; }
745 void name(const std::string &st) { objName = st; }
746 /** @}*/ //end of api_eventq group
749 * Schedule the given event on this queue. Safe to call from any thread.
751 * @ingroup api_eventq
753 void schedule(Event *event, Tick when, bool global = false);
756 * Deschedule the specified event. Should be called only from the owning
758 * @ingroup api_eventq
760 void deschedule(Event *event);
763 * Reschedule the specified event. Should be called only from the owning
766 * @ingroup api_eventq
768 void reschedule(Event *event, Tick when, bool always = false);
770 Tick nextTick() const { return head->when(); }
771 void setCurTick(Tick newVal) { _curTick = newVal; }
774 * While curTick() is useful for any object assigned to this event queue,
775 * if an object that is assigned to another event queue (or a non-event
776 * object) need to access the current tick of this event queue, this
779 * @return Tick The current tick of this event queue.
780 * @ingroup api_eventq
782 Tick getCurTick() const { return _curTick; }
783 Event *getHead() const { return head; }
788 * process all events up to the given timestamp. we inline a quick test
789 * to see if there are any events to process; if so, call the internal
790 * out-of-line version to process them all.
793 * - This is only used for "instruction" event queues. Instead of counting
794 * ticks, this is actually counting instructions.
795 * - This updates the current tick value to the value of the entry at the
798 * @ingroup api_eventq
801 serviceEvents(Tick when)
804 if (nextTick() > when)
808 * @todo this assert is a good bug catcher. I need to
809 * make it true again.
811 //assert(head->when() >= when && "event scheduled in the past");
819 * Returns true if no events are queued
821 * @ingroup api_eventq
823 bool empty() const { return head == NULL; }
826 * This is a debugging function which will print everything on the event
829 * @ingroup api_eventq
833 bool debugVerify() const;
836 * Function for moving events from the async_queue to the main queue.
838 void handleAsyncInsertions();
841 * Function to signal that the event loop should be woken up because
842 * an event has been scheduled by an agent outside the gem5 event
843 * loop(s) whose event insertion may not have been noticed by gem5.
844 * This function isn't needed by the usual gem5 event loop but may
845 * be necessary in derived EventQueues which host gem5 onto other
848 * @param when Time of a delayed wakeup (if known). This parameter
849 * can be used by an implementation to schedule a wakeup in the
850 * future if it is sure it will remain active until then.
851 * Or it can be ignored and the event queue can be woken up now.
853 * @ingroup api_eventq
855 virtual void wakeup(Tick when = (Tick)-1) { }
858 * function for replacing the head of the event queue, so that a
859 * different set of events can run without disturbing events that have
860 * already been scheduled. Already scheduled events can be processed
861 * by replacing the original head back.
862 * USING THIS FUNCTION CAN BE DANGEROUS TO THE HEALTH OF THE SIMULATOR.
863 * NOT RECOMMENDED FOR USE.
865 Event* replaceHead(Event* s);
869 * Provide an interface for locking/unlocking the event queue.
871 * @warn Do NOT use these methods directly unless you really know
872 * what you are doing. Incorrect use can easily lead to simulator
875 * @see EventQueue::ScopedMigration.
876 * @see EventQueue::ScopedRelease
879 void lock() { service_mutex.lock(); }
880 void unlock() { service_mutex.unlock(); }
884 * Reschedule an event after a checkpoint.
886 * Since events don't know which event queue they belong to,
887 * parent objects need to reschedule events themselves. This
888 * method conditionally schedules an event that has the Scheduled
889 * flag set. It should be called by parent objects after
890 * unserializing an object.
892 * @warn Only use this method after unserializing an Event.
894 void checkpointReschedule(Event *event);
896 virtual ~EventQueue()
899 deschedule(getHead());
903 void dumpMainQueue();
908 /** A pointer to this object's event queue */
913 * @ingroup api_eventq
916 EventManager(EventManager &em) : eventq(em.eventq) {}
917 EventManager(EventManager *em) : eventq(em->eventq) {}
918 EventManager(EventQueue *eq) : eventq(eq) {}
919 /** @}*/ //end of api_eventq group
922 * @ingroup api_eventq
931 * @ingroup api_eventq
934 schedule(Event &event, Tick when)
936 eventq->schedule(&event, when);
940 * @ingroup api_eventq
943 deschedule(Event &event)
945 eventq->deschedule(&event);
949 * @ingroup api_eventq
952 reschedule(Event &event, Tick when, bool always = false)
954 eventq->reschedule(&event, when, always);
958 * @ingroup api_eventq
961 schedule(Event *event, Tick when)
963 eventq->schedule(event, when);
967 * @ingroup api_eventq
970 deschedule(Event *event)
972 eventq->deschedule(event);
976 * @ingroup api_eventq
979 reschedule(Event *event, Tick when, bool always = false)
981 eventq->reschedule(event, when, always);
985 * @ingroup api_eventq
987 void wakeupEventQueue(Tick when = (Tick)-1)
989 eventq->wakeup(when);
992 void setCurTick(Tick newVal) { eventq->setCurTick(newVal); }
995 template <class T, void (T::* F)()>
996 class EventWrapper : public Event
1002 EventWrapper(T *obj, bool del = false, Priority p = Default_Pri)
1003 : Event(p), object(obj)
1006 setFlags(AutoDelete);
1009 EventWrapper(T &obj, bool del = false, Priority p = Default_Pri)
1010 : Event(p), object(&obj)
1013 setFlags(AutoDelete);
1016 void process() { (object->*F)(); }
1021 return object->name() + ".wrapped_event";
1024 const char *description() const { return "EventWrapped"; }
1027 class EventFunctionWrapper : public Event
1030 std::function<void(void)> callback;
1035 * @ingroup api_eventq
1037 EventFunctionWrapper(const std::function<void(void)> &callback,
1038 const std::string &name,
1040 Priority p = Default_Pri)
1041 : Event(p), callback(callback), _name(name)
1044 setFlags(AutoDelete);
1048 * @ingroup api_eventq
1050 void process() { callback(); }
1053 * @ingroup api_eventq
1058 return _name + ".wrapped_function_event";
1062 * @ingroup api_eventq
1064 const char *description() const { return "EventFunctionWrapped"; }
1067 #endif // __SIM_EVENTQ_HH__