]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - sim/ppc/events.c
Update sim copyright headers from GPLv2-or-later to GPLv3-or-later.
[thirdparty/binutils-gdb.git] / sim / ppc / events.c
1 /* This file is part of the program psim.
2
3 Copyright (C) 1994-1998, Andrew Cagney <cagney@highland.com.au>
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 3 of the License, or
8 (at your option) any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the Free Software
17 Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18
19 */
20
21
22 #ifndef _EVENTS_C_
23 #define _EVENTS_C_
24
25 #include "basics.h"
26 #include "events.h"
27
28 #include <signal.h>
29 #include <stdlib.h>
30
31 #if !defined (SIM_EVENTS_POLL_RATE)
32 #define SIM_EVENTS_POLL_RATE 0x1000
33 #endif
34
35
36
37 /* The event queue maintains a single absolute time using two
38 variables.
39
40 TIME_OF_EVENT: this holds the time at which the next event is ment
41 to occure. If no next event it will hold the time of the last
42 event.
43
44 TIME_FROM_EVENT: The current distance from TIME_OF_EVENT. If an
45 event is pending, this will be positive. If no future event is
46 pending this will be negative. This variable is decremented once
47 for each iteration of a clock cycle.
48
49 Initially, the clock is started at time one (1) with TIME_OF_EVENT
50 == 0 and TIME_FROM_EVENT == -1.
51
52 Clearly there is a bug in that this code assumes that the absolute
53 time counter will never become greater than 2^62. */
54
55 typedef struct _event_entry event_entry;
56 struct _event_entry {
57 void *data;
58 event_handler *handler;
59 signed64 time_of_event;
60 event_entry *next;
61 };
62
63 struct _event_queue {
64 int processing;
65 event_entry *queue;
66 event_entry *volatile held;
67 event_entry *volatile *volatile held_end;
68 signed64 time_of_event;
69 signed64 time_from_event;
70 };
71
72
73 STATIC_INLINE_EVENTS\
74 (void)
75 sim_events_poll (void *data)
76 {
77 event_queue *queue = data;
78 /* just re-schedule in 1000 million ticks time */
79 event_queue_schedule (queue, SIM_EVENTS_POLL_RATE, sim_events_poll, queue);
80 sim_io_poll_quit ();
81 }
82
83
84 INLINE_EVENTS\
85 (event_queue *)
86 event_queue_create(void)
87 {
88 event_queue *new_event_queue = ZALLOC(event_queue);
89
90 new_event_queue->processing = 0;
91 new_event_queue->queue = NULL;
92 new_event_queue->held = NULL;
93 new_event_queue->held_end = &new_event_queue->held;
94
95 /* both times are already zero */
96 return new_event_queue;
97 }
98
99
100 INLINE_EVENTS\
101 (void)
102 event_queue_init(event_queue *queue)
103 {
104 event_entry *event;
105
106 /* drain the interrupt queue */
107 {
108 #if defined(HAVE_SIGPROCMASK) && defined(SIG_SETMASK)
109 sigset_t old_mask;
110 sigset_t new_mask;
111 sigfillset(&new_mask);
112 /*-LOCK-*/ sigprocmask(SIG_SETMASK, &new_mask, &old_mask);
113 #endif
114 event = queue->held;
115 while (event != NULL) {
116 event_entry *dead = event;
117 event = event->next;
118 free(dead);
119 }
120 queue->held = NULL;
121 queue->held_end = &queue->held;
122 #if defined(HAVE_SIGPROCMASK) && defined(SIG_SETMASK)
123 /*-UNLOCK-*/ sigprocmask(SIG_SETMASK, &old_mask, NULL);
124 #endif
125 }
126
127 /* drain the normal queue */
128 event = queue->queue;
129 while (event != NULL) {
130 event_entry *dead = event;
131 event = event->next;
132 free(dead);
133 }
134 queue->queue = NULL;
135
136 /* wind time back to one */
137 queue->processing = 0;
138 queue->time_of_event = 0;
139 queue->time_from_event = -1;
140
141 /* schedule our initial counter event */
142 event_queue_schedule (queue, 0, sim_events_poll, queue);
143 }
144
145 INLINE_EVENTS\
146 (signed64)
147 event_queue_time(event_queue *queue)
148 {
149 return queue->time_of_event - queue->time_from_event;
150 }
151
152 STATIC_INLINE_EVENTS\
153 (void)
154 update_time_from_event(event_queue *events)
155 {
156 signed64 current_time = event_queue_time(events);
157 if (events->queue != NULL) {
158 events->time_from_event = (events->queue->time_of_event - current_time);
159 events->time_of_event = events->queue->time_of_event;
160 }
161 else {
162 events->time_of_event = current_time - 1;
163 events->time_from_event = -1;
164 }
165 if (WITH_TRACE && ppc_trace[trace_events])
166 {
167 event_entry *event;
168 int i;
169 for (event = events->queue, i = 0;
170 event != NULL;
171 event = event->next, i++)
172 {
173 TRACE(trace_events, ("event time-from-event - time %ld, delta %ld - event %d, tag 0x%lx, time %ld, handler 0x%lx, data 0x%lx\n",
174 (long)current_time,
175 (long)events->time_from_event,
176 i,
177 (long)event,
178 (long)event->time_of_event,
179 (long)event->handler,
180 (long)event->data));
181 }
182 }
183 ASSERT(current_time == event_queue_time(events));
184 }
185
186 STATIC_INLINE_EVENTS\
187 (void)
188 insert_event_entry(event_queue *events,
189 event_entry *new_event,
190 signed64 delta)
191 {
192 event_entry *curr;
193 event_entry **prev;
194 signed64 time_of_event;
195
196 if (delta < 0)
197 error("what is past is past!\n");
198
199 /* compute when the event should occure */
200 time_of_event = event_queue_time(events) + delta;
201
202 /* find the queue insertion point - things are time ordered */
203 prev = &events->queue;
204 curr = events->queue;
205 while (curr != NULL && time_of_event >= curr->time_of_event) {
206 ASSERT(curr->next == NULL
207 || curr->time_of_event <= curr->next->time_of_event);
208 prev = &curr->next;
209 curr = curr->next;
210 }
211 ASSERT(curr == NULL || time_of_event < curr->time_of_event);
212
213 /* insert it */
214 new_event->next = curr;
215 *prev = new_event;
216 new_event->time_of_event = time_of_event;
217
218 /* adjust the time until the first event */
219 update_time_from_event(events);
220 }
221
222 INLINE_EVENTS\
223 (event_entry_tag)
224 event_queue_schedule(event_queue *events,
225 signed64 delta_time,
226 event_handler *handler,
227 void *data)
228 {
229 event_entry *new_event = ZALLOC(event_entry);
230 new_event->data = data;
231 new_event->handler = handler;
232 insert_event_entry(events, new_event, delta_time);
233 TRACE(trace_events, ("event scheduled at %ld - tag 0x%lx - time %ld, handler 0x%lx, data 0x%lx\n",
234 (long)event_queue_time(events),
235 (long)new_event,
236 (long)new_event->time_of_event,
237 (long)new_event->handler,
238 (long)new_event->data));
239 return (event_entry_tag)new_event;
240 }
241
242
243 INLINE_EVENTS\
244 (event_entry_tag)
245 event_queue_schedule_after_signal(event_queue *events,
246 signed64 delta_time,
247 event_handler *handler,
248 void *data)
249 {
250 event_entry *new_event = ZALLOC(event_entry);
251
252 new_event->data = data;
253 new_event->handler = handler;
254 new_event->time_of_event = delta_time; /* work it out later */
255 new_event->next = NULL;
256
257 {
258 #if defined(HAVE_SIGPROCMASK) && defined(SIG_SETMASK)
259 sigset_t old_mask;
260 sigset_t new_mask;
261 sigfillset(&new_mask);
262 /*-LOCK-*/ sigprocmask(SIG_SETMASK, &new_mask, &old_mask);
263 #endif
264 if (events->held == NULL) {
265 events->held = new_event;
266 }
267 else {
268 *events->held_end = new_event;
269 }
270 events->held_end = &new_event->next;
271 #if defined(HAVE_SIGPROCMASK) && defined(SIG_SETMASK)
272 /*-UNLOCK-*/ sigprocmask(SIG_SETMASK, &old_mask, NULL);
273 #endif
274 }
275
276 TRACE(trace_events, ("event scheduled at %ld - tag 0x%lx - time %ld, handler 0x%lx, data 0x%lx\n",
277 (long)event_queue_time(events),
278 (long)new_event,
279 (long)new_event->time_of_event,
280 (long)new_event->handler,
281 (long)new_event->data));
282
283 return (event_entry_tag)new_event;
284 }
285
286
287 INLINE_EVENTS\
288 (void)
289 event_queue_deschedule(event_queue *events,
290 event_entry_tag event_to_remove)
291 {
292 event_entry *to_remove = (event_entry*)event_to_remove;
293 ASSERT((events->time_from_event >= 0) == (events->queue != NULL));
294 if (event_to_remove != NULL) {
295 event_entry *current;
296 event_entry **ptr_to_current;
297 for (ptr_to_current = &events->queue, current = *ptr_to_current;
298 current != NULL && current != to_remove;
299 ptr_to_current = &current->next, current = *ptr_to_current);
300 if (current == to_remove) {
301 *ptr_to_current = current->next;
302 TRACE(trace_events, ("event descheduled at %ld - tag 0x%lx - time %ld, handler 0x%lx, data 0x%lx\n",
303 (long)event_queue_time(events),
304 (long)event_to_remove,
305 (long)current->time_of_event,
306 (long)current->handler,
307 (long)current->data));
308 free(current);
309 update_time_from_event(events);
310 }
311 else {
312 TRACE(trace_events, ("event descheduled at %ld - tag 0x%lx - not found\n",
313 (long)event_queue_time(events),
314 (long)event_to_remove));
315 }
316 }
317 ASSERT((events->time_from_event >= 0) == (events->queue != NULL));
318 }
319
320
321
322
323 INLINE_EVENTS\
324 (int)
325 event_queue_tick(event_queue *events)
326 {
327 signed64 time_from_event;
328
329 /* we should only be here when the previous tick has been fully processed */
330 ASSERT(!events->processing);
331
332 /* move any events that were queued by any signal handlers onto the
333 real event queue. BTW: When inlining, having this code here,
334 instead of in event_queue_process() causes GCC to put greater
335 weight on keeping the pointer EVENTS in a register. This, in
336 turn results in better code being output. */
337 if (events->held != NULL) {
338 event_entry *held_events;
339 event_entry *curr_event;
340
341 {
342 #if defined(HAVE_SIGPROCMASK) && defined(SIG_SETMASK)
343 sigset_t old_mask;
344 sigset_t new_mask;
345 sigfillset(&new_mask);
346 /*-LOCK-*/ sigprocmask(SIG_SETMASK, &new_mask, &old_mask);
347 #endif
348 held_events = events->held;
349 events->held = NULL;
350 events->held_end = &events->held;
351 #if defined(HAVE_SIGPROCMASK) && defined(SIG_SETMASK)
352 /*-UNLOCK-*/ sigprocmask(SIG_SETMASK, &old_mask, NULL);
353 #endif
354 }
355
356 do {
357 curr_event = held_events;
358 held_events = curr_event->next;
359 insert_event_entry(events, curr_event, curr_event->time_of_event);
360 } while (held_events != NULL);
361 }
362
363 /* advance time, checking to see if we've reached time zero which
364 would indicate the time for the next event has arrived */
365 time_from_event = events->time_from_event;
366 events->time_from_event = time_from_event - 1;
367 return time_from_event == 0;
368 }
369
370
371
372 INLINE_EVENTS\
373 (void)
374 event_queue_process(event_queue *events)
375 {
376 signed64 event_time = event_queue_time(events);
377
378 ASSERT((events->time_from_event == -1 && events->queue != NULL)
379 || events->processing); /* something to do */
380
381 /* consume all events for this or earlier times. Be careful to
382 allow a new event to appear under our feet */
383 events->processing = 1;
384 while (events->queue != NULL
385 && events->queue->time_of_event <= event_time) {
386 event_entry *to_do = events->queue;
387 event_handler *handler = to_do->handler;
388 void *data = to_do->data;
389 events->queue = to_do->next;
390 TRACE(trace_events, ("event issued at %ld - tag 0x%lx - time %ld, handler 0x%lx, data 0x%lx\n",
391 (long)event_time,
392 (long)to_do,
393 (long)to_do->time_of_event,
394 (long)handler,
395 (long)data));
396 free(to_do);
397 /* Always re-compute the time to the next event so that HANDLER()
398 can safely insert new events into the queue. */
399 update_time_from_event(events);
400 handler(data);
401 }
402 events->processing = 0;
403
404 ASSERT(events->time_from_event > 0);
405 ASSERT(events->queue != NULL); /* always poll event */
406 }
407
408
409 #endif /* _EVENTS_C_ */