Cleanup tracing.
[binutils-gdb.git] / sim / ppc / events.c
1 /* This file is part of the program psim.
2
3 Copyright (C) 1994-1998, Andrew Cagney <cagney@highland.com.au>
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the Free Software
17 Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18
19 */
20
21
22 #ifndef _EVENTS_C_
23 #define _EVENTS_C_
24
25 #include "basics.h"
26 #include "events.h"
27
28 #include <signal.h>
29
30 #if !defined (SIM_EVENTS_POLL_RATE)
31 #define SIM_EVENTS_POLL_RATE 0x1000
32 #endif
33
34
35
36 /* The event queue maintains a single absolute time using two
37 variables.
38
39 TIME_OF_EVENT: this holds the time at which the next event is ment
40 to occure. If no next event it will hold the time of the last
41 event.
42
43 TIME_FROM_EVENT: The current distance from TIME_OF_EVENT. If an
44 event is pending, this will be positive. If no future event is
45 pending this will be negative. This variable is decremented once
46 for each iteration of a clock cycle.
47
48 Initially, the clock is started at time one (1) with TIME_OF_EVENT
49 == 0 and TIME_FROM_EVENT == -1.
50
51 Clearly there is a bug in that this code assumes that the absolute
52 time counter will never become greater than 2^62. */
53
54 typedef struct _event_entry event_entry;
55 struct _event_entry {
56 void *data;
57 event_handler *handler;
58 signed64 time_of_event;
59 event_entry *next;
60 };
61
62 struct _event_queue {
63 int processing;
64 event_entry *queue;
65 event_entry *volatile held;
66 event_entry *volatile *volatile held_end;
67 signed64 time_of_event;
68 signed64 time_from_event;
69 };
70
71
72 STATIC_INLINE_EVENTS\
73 (void)
74 sim_events_poll (void *data)
75 {
76 event_queue *queue = data;
77 /* just re-schedule in 1000 million ticks time */
78 event_queue_schedule (queue, SIM_EVENTS_POLL_RATE, sim_events_poll, queue);
79 sim_io_poll_quit ();
80 }
81
82
83 INLINE_EVENTS\
84 (event_queue *)
85 event_queue_create(void)
86 {
87 event_queue *new_event_queue = ZALLOC(event_queue);
88
89 new_event_queue->processing = 0;
90 new_event_queue->queue = NULL;
91 new_event_queue->held = NULL;
92 new_event_queue->held_end = &new_event_queue->held;
93
94 /* both times are already zero */
95 return new_event_queue;
96 }
97
98
99 INLINE_EVENTS\
100 (void)
101 event_queue_init(event_queue *queue)
102 {
103 event_entry *event;
104
105 /* drain the interrupt queue */
106 {
107 #if defined(HAVE_SIGPROCMASK) && defined(SIG_SETMASK)
108 sigset_t old_mask;
109 sigset_t new_mask;
110 sigfillset(&new_mask);
111 /*-LOCK-*/ sigprocmask(SIG_SETMASK, &new_mask, &old_mask);
112 #endif
113 event = queue->held;
114 while (event != NULL) {
115 event_entry *dead = event;
116 event = event->next;
117 zfree(dead);
118 }
119 queue->held = NULL;
120 queue->held_end = &queue->held;
121 #if defined(HAVE_SIGPROCMASK) && defined(SIG_SETMASK)
122 /*-UNLOCK-*/ sigprocmask(SIG_SETMASK, &old_mask, NULL);
123 #endif
124 }
125
126 /* drain the normal queue */
127 event = queue->queue;
128 while (event != NULL) {
129 event_entry *dead = event;
130 event = event->next;
131 zfree(dead);
132 }
133 queue->queue = NULL;
134
135 /* wind time back to one */
136 queue->processing = 0;
137 queue->time_of_event = 0;
138 queue->time_from_event = -1;
139
140 /* schedule our initial counter event */
141 event_queue_schedule (queue, 0, sim_events_poll, queue);
142 }
143
144 INLINE_EVENTS\
145 (signed64)
146 event_queue_time(event_queue *queue)
147 {
148 return queue->time_of_event - queue->time_from_event;
149 }
150
151 STATIC_INLINE_EVENTS\
152 (void)
153 update_time_from_event(event_queue *events)
154 {
155 signed64 current_time = event_queue_time(events);
156 if (events->queue != NULL) {
157 events->time_from_event = (events->queue->time_of_event - current_time);
158 events->time_of_event = events->queue->time_of_event;
159 }
160 else {
161 events->time_of_event = current_time - 1;
162 events->time_from_event = -1;
163 }
164 if (WITH_TRACE && ppc_trace[trace_events])
165 {
166 event_entry *event;
167 int i;
168 for (event = events->queue, i = 0;
169 event != NULL;
170 event = event->next, i++)
171 {
172 TRACE(trace_events, ("event time-from-event - time %ld, delta %ld - event %d, tag 0x%lx, time %ld, handler 0x%lx, data 0x%lx\n",
173 (long)current_time,
174 (long)events->time_from_event,
175 i,
176 (long)event,
177 (long)event->time_of_event,
178 (long)event->handler,
179 (long)event->data));
180 }
181 }
182 ASSERT(current_time == event_queue_time(events));
183 ASSERT((events->time_from_event >= 0) == (events->queue != NULL));
184 }
185
186 STATIC_INLINE_EVENTS\
187 (void)
188 insert_event_entry(event_queue *events,
189 event_entry *new_event,
190 signed64 delta)
191 {
192 event_entry *curr;
193 event_entry **prev;
194 signed64 time_of_event;
195
196 if (delta < 0)
197 error("what is past is past!\n");
198
199 /* compute when the event should occure */
200 time_of_event = event_queue_time(events) + delta;
201
202 /* find the queue insertion point - things are time ordered */
203 prev = &events->queue;
204 curr = events->queue;
205 while (curr != NULL && time_of_event >= curr->time_of_event) {
206 ASSERT(curr->next == NULL
207 || curr->time_of_event <= curr->next->time_of_event);
208 prev = &curr->next;
209 curr = curr->next;
210 }
211 ASSERT(curr == NULL || time_of_event < curr->time_of_event);
212
213 /* insert it */
214 new_event->next = curr;
215 *prev = new_event;
216 new_event->time_of_event = time_of_event;
217
218 /* adjust the time until the first event */
219 update_time_from_event(events);
220 }
221
222 INLINE_EVENTS\
223 (event_entry_tag)
224 event_queue_schedule(event_queue *events,
225 signed64 delta_time,
226 event_handler *handler,
227 void *data)
228 {
229 event_entry *new_event = ZALLOC(event_entry);
230 new_event->data = data;
231 new_event->handler = handler;
232 insert_event_entry(events, new_event, delta_time);
233 TRACE(trace_events, ("event scheduled at %ld - tag 0x%lx - time %ld, handler 0x%lx, data 0x%lx\n",
234 (long)event_queue_time(events),
235 (long)new_event,
236 (long)new_event->time_of_event,
237 (long)new_event->handler,
238 (long)new_event->data));
239 return (event_entry_tag)new_event;
240 }
241
242
243 INLINE_EVENTS\
244 (event_entry_tag)
245 event_queue_schedule_after_signal(event_queue *events,
246 signed64 delta_time,
247 event_handler *handler,
248 void *data)
249 {
250 event_entry *new_event = ZALLOC(event_entry);
251
252 new_event->data = data;
253 new_event->handler = handler;
254 new_event->time_of_event = delta_time; /* work it out later */
255 new_event->next = NULL;
256
257 {
258 #if defined(HAVE_SIGPROCMASK) && defined(SIG_SETMASK)
259 sigset_t old_mask;
260 sigset_t new_mask;
261 sigfillset(&new_mask);
262 /*-LOCK-*/ sigprocmask(SIG_SETMASK, &new_mask, &old_mask);
263 #endif
264 if (events->held == NULL) {
265 events->held = new_event;
266 }
267 else {
268 *events->held_end = new_event;
269 }
270 events->held_end = &new_event->next;
271 #if defined(HAVE_SIGPROCMASK) && defined(SIG_SETMASK)
272 /*-UNLOCK-*/ sigprocmask(SIG_SETMASK, &old_mask, NULL);
273 #endif
274 }
275
276 TRACE(trace_events, ("event scheduled at %ld - tag 0x%lx - time %ld, handler 0x%lx, data 0x%lx\n",
277 (long)event_queue_time(events),
278 (long)new_event,
279 (long)new_event->time_of_event,
280 (long)new_event->handler,
281 (long)new_event->data));
282
283 return (event_entry_tag)new_event;
284 }
285
286
287 INLINE_EVENTS\
288 (void)
289 event_queue_deschedule(event_queue *events,
290 event_entry_tag event_to_remove)
291 {
292 event_entry *to_remove = (event_entry*)event_to_remove;
293 ASSERT((events->time_from_event >= 0) == (events->queue != NULL));
294 if (event_to_remove != NULL) {
295 event_entry *current;
296 event_entry **ptr_to_current;
297 for (ptr_to_current = &events->queue, current = *ptr_to_current;
298 current != NULL && current != to_remove;
299 ptr_to_current = &current->next, current = *ptr_to_current);
300 if (current == to_remove) {
301 *ptr_to_current = current->next;
302 TRACE(trace_events, ("event descheduled at %ld - tag 0x%lx - time %ld, handler 0x%lx, data 0x%lx\n",
303 (long)event_queue_time(events),
304 (long)event_to_remove,
305 (long)current->time_of_event,
306 (long)current->handler,
307 (long)current->data));
308 zfree(current);
309 update_time_from_event(events);
310 }
311 else {
312 TRACE(trace_events, ("event descheduled at %ld - tag 0x%lx - not found\n",
313 (long)event_queue_time(events),
314 (long)event_to_remove));
315 }
316 }
317 ASSERT((events->time_from_event >= 0) == (events->queue != NULL));
318 }
319
320
321
322
323 INLINE_EVENTS\
324 (int)
325 event_queue_tick(event_queue *events)
326 {
327 signed64 time_from_event;
328
329 /* we should only be here when the previous tick has been fully processed */
330 ASSERT(!events->processing);
331
332 /* move any events that were queued by any signal handlers onto the
333 real event queue. BTW: When inlining, having this code here,
334 instead of in event_queue_process() causes GCC to put greater
335 weight on keeping the pointer EVENTS in a register. This, in
336 turn results in better code being output. */
337 if (events->held != NULL) {
338 event_entry *held_events;
339 event_entry *curr_event;
340
341 {
342 #if defined(HAVE_SIGPROCMASK) && defined(SIG_SETMASK)
343 sigset_t old_mask;
344 sigset_t new_mask;
345 sigfillset(&new_mask);
346 /*-LOCK-*/ sigprocmask(SIG_SETMASK, &new_mask, &old_mask);
347 #endif
348 held_events = events->held;
349 events->held = NULL;
350 events->held_end = &events->held;
351 #if defined(HAVE_SIGPROCMASK) && defined(SIG_SETMASK)
352 /*-UNLOCK-*/ sigprocmask(SIG_SETMASK, &old_mask, NULL);
353 #endif
354 }
355
356 do {
357 curr_event = held_events;
358 held_events = curr_event->next;
359 insert_event_entry(events, curr_event, curr_event->time_of_event);
360 } while (held_events != NULL);
361 }
362
363 /* advance time, checking to see if we've reached time zero which
364 would indicate the time for the next event has arrived */
365 time_from_event = events->time_from_event;
366 events->time_from_event = time_from_event - 1;
367 return time_from_event == 0;
368 }
369
370
371
372 INLINE_EVENTS\
373 (void)
374 event_queue_process(event_queue *events)
375 {
376 signed64 event_time = event_queue_time(events);
377
378 ASSERT((events->time_from_event == -1 && events->queue != NULL)
379 || events->processing); /* something to do */
380
381 /* consume all events for this or earlier times. Be careful to
382 allow a new event to appear under our feet */
383 events->processing = 1;
384 while (events->queue != NULL
385 && events->queue->time_of_event <= event_time) {
386 event_entry *to_do = events->queue;
387 event_handler *handler = to_do->handler;
388 void *data = to_do->data;
389 events->queue = to_do->next;
390 TRACE(trace_events, ("event issued at %ld - tag 0x%lx - time %ld, handler 0x%lx, data 0x%lx\n",
391 (long)event_time,
392 (long)to_do,
393 (long)to_do->time_of_event,
394 (long)handler,
395 (long)data));
396 zfree(to_do);
397 handler(data);
398 }
399 events->processing = 0;
400
401 /* re-caculate time for new events */
402 update_time_from_event(events);
403 }
404
405
406 #endif /* _EVENTS_C_ */