]>
Commit | Line | Data |
---|---|---|
cbbb4b6c | 1 | /* Helper code for POSIX timer implementation on NPTL. |
d4697bc9 | 2 | Copyright (C) 2000-2014 Free Software Foundation, Inc. |
76a50749 UD |
3 | This file is part of the GNU C Library. |
4 | Contributed by Kaz Kylheku <kaz@ashi.footprints.net>. | |
5 | ||
6 | The GNU C Library is free software; you can redistribute it and/or | |
7 | modify it under the terms of the GNU Lesser General Public License as | |
8 | published by the Free Software Foundation; either version 2.1 of the | |
9 | License, or (at your option) any later version. | |
10 | ||
11 | The GNU C Library is distributed in the hope that it will be useful, | |
12 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | Lesser General Public License for more details. | |
15 | ||
16 | You should have received a copy of the GNU Lesser General Public | |
59ba27a6 PE |
17 | License along with the GNU C Library; see the file COPYING.LIB. If |
18 | not, see <http://www.gnu.org/licenses/>. */ | |
76a50749 UD |
19 | |
20 | #include <assert.h> | |
21 | #include <errno.h> | |
22 | #include <pthread.h> | |
23 | #include <stddef.h> | |
24 | #include <stdlib.h> | |
25 | #include <string.h> | |
26 | #include <sysdep.h> | |
27 | #include <time.h> | |
28 | #include <unistd.h> | |
29 | #include <sys/syscall.h> | |
30 | ||
31 | #include "posix-timer.h" | |
32 | #include <pthreadP.h> | |
33 | ||
34 | ||
35 | /* Number of threads used. */ | |
36 | #define THREAD_MAXNODES 16 | |
37 | ||
38 | /* Array containing the descriptors for the used threads. */ | |
39 | static struct thread_node thread_array[THREAD_MAXNODES]; | |
40 | ||
41 | /* Static array with the structures for all the timers. */ | |
42 | struct timer_node __timer_array[TIMER_MAX]; | |
43 | ||
44 | /* Global lock to protect operation on the lists. */ | |
45 | pthread_mutex_t __timer_mutex = PTHREAD_MUTEX_INITIALIZER; | |
46 | ||
47 | /* Variable to protext initialization. */ | |
48 | pthread_once_t __timer_init_once_control = PTHREAD_ONCE_INIT; | |
49 | ||
50 | /* Nonzero if initialization of timer implementation failed. */ | |
51 | int __timer_init_failed; | |
52 | ||
53 | /* Node for the thread used to deliver signals. */ | |
54 | struct thread_node __timer_signal_thread_rclk; | |
76a50749 UD |
55 | |
56 | /* Lists to keep free and used timers and threads. */ | |
57 | struct list_links timer_free_list; | |
58 | struct list_links thread_free_list; | |
59 | struct list_links thread_active_list; | |
60 | ||
61 | ||
62 | #ifdef __NR_rt_sigqueueinfo | |
63 | extern int __syscall_rt_sigqueueinfo (int, int, siginfo_t *); | |
64 | #endif | |
65 | ||
66 | ||
67 | /* List handling functions. */ | |
68 | static inline void | |
69 | list_init (struct list_links *list) | |
70 | { | |
71 | list->next = list->prev = list; | |
72 | } | |
73 | ||
74 | static inline void | |
75 | list_append (struct list_links *list, struct list_links *newp) | |
76 | { | |
77 | newp->prev = list->prev; | |
78 | newp->next = list; | |
79 | list->prev->next = newp; | |
80 | list->prev = newp; | |
81 | } | |
82 | ||
83 | static inline void | |
84 | list_insbefore (struct list_links *list, struct list_links *newp) | |
85 | { | |
86 | list_append (list, newp); | |
87 | } | |
88 | ||
89 | /* | |
90 | * Like list_unlink_ip, except that calling it on a node that | |
91 | * is already unlinked is disastrous rather than a noop. | |
92 | */ | |
93 | ||
94 | static inline void | |
95 | list_unlink (struct list_links *list) | |
96 | { | |
97 | struct list_links *lnext = list->next, *lprev = list->prev; | |
98 | ||
99 | lnext->prev = lprev; | |
100 | lprev->next = lnext; | |
101 | } | |
102 | ||
103 | static inline struct list_links * | |
104 | list_first (struct list_links *list) | |
105 | { | |
106 | return list->next; | |
107 | } | |
108 | ||
109 | static inline struct list_links * | |
110 | list_null (struct list_links *list) | |
111 | { | |
112 | return list; | |
113 | } | |
114 | ||
115 | static inline struct list_links * | |
116 | list_next (struct list_links *list) | |
117 | { | |
118 | return list->next; | |
119 | } | |
120 | ||
121 | static inline int | |
122 | list_isempty (struct list_links *list) | |
123 | { | |
124 | return list->next == list; | |
125 | } | |
126 | ||
127 | ||
128 | /* Functions build on top of the list functions. */ | |
129 | static inline struct thread_node * | |
130 | thread_links2ptr (struct list_links *list) | |
131 | { | |
132 | return (struct thread_node *) ((char *) list | |
133 | - offsetof (struct thread_node, links)); | |
134 | } | |
135 | ||
136 | static inline struct timer_node * | |
137 | timer_links2ptr (struct list_links *list) | |
138 | { | |
139 | return (struct timer_node *) ((char *) list | |
140 | - offsetof (struct timer_node, links)); | |
141 | } | |
142 | ||
143 | ||
144 | /* Initialize a newly allocated thread structure. */ | |
145 | static void | |
146 | thread_init (struct thread_node *thread, const pthread_attr_t *attr, clockid_t clock_id) | |
147 | { | |
148 | if (attr != NULL) | |
149 | thread->attr = *attr; | |
150 | else | |
151 | { | |
152 | pthread_attr_init (&thread->attr); | |
153 | pthread_attr_setdetachstate (&thread->attr, PTHREAD_CREATE_DETACHED); | |
154 | } | |
155 | ||
156 | thread->exists = 0; | |
157 | list_init (&thread->timer_queue); | |
158 | pthread_cond_init (&thread->cond, 0); | |
159 | thread->current_timer = 0; | |
160 | thread->captured = pthread_self (); | |
161 | thread->clock_id = clock_id; | |
162 | } | |
163 | ||
164 | ||
165 | /* Initialize the global lists, and acquire global resources. Error | |
166 | reporting is done by storing a non-zero value to the global variable | |
167 | timer_init_failed. */ | |
168 | static void | |
169 | init_module (void) | |
170 | { | |
171 | int i; | |
172 | ||
173 | list_init (&timer_free_list); | |
174 | list_init (&thread_free_list); | |
175 | list_init (&thread_active_list); | |
176 | ||
177 | for (i = 0; i < TIMER_MAX; ++i) | |
178 | { | |
179 | list_append (&timer_free_list, &__timer_array[i].links); | |
180 | __timer_array[i].inuse = TIMER_FREE; | |
181 | } | |
182 | ||
183 | for (i = 0; i < THREAD_MAXNODES; ++i) | |
184 | list_append (&thread_free_list, &thread_array[i].links); | |
185 | ||
186 | thread_init (&__timer_signal_thread_rclk, 0, CLOCK_REALTIME); | |
76a50749 UD |
187 | } |
188 | ||
189 | ||
190 | /* This is a handler executed in a child process after a fork() | |
191 | occurs. It reinitializes the module, resetting all of the data | |
192 | structures to their initial state. The mutex is initialized in | |
193 | case it was locked in the parent process. */ | |
194 | static void | |
195 | reinit_after_fork (void) | |
196 | { | |
197 | init_module (); | |
198 | pthread_mutex_init (&__timer_mutex, 0); | |
199 | } | |
200 | ||
201 | ||
202 | /* Called once form pthread_once in timer_init. This initializes the | |
203 | module and ensures that reinit_after_fork will be executed in any | |
204 | child process. */ | |
205 | void | |
206 | __timer_init_once (void) | |
207 | { | |
208 | init_module (); | |
209 | pthread_atfork (0, 0, reinit_after_fork); | |
210 | } | |
211 | ||
212 | ||
213 | /* Deinitialize a thread that is about to be deallocated. */ | |
214 | static void | |
215 | thread_deinit (struct thread_node *thread) | |
216 | { | |
217 | assert (list_isempty (&thread->timer_queue)); | |
218 | pthread_cond_destroy (&thread->cond); | |
219 | } | |
220 | ||
221 | ||
222 | /* Allocate a thread structure from the global free list. Global | |
223 | mutex lock must be held by caller. The thread is moved to | |
224 | the active list. */ | |
225 | struct thread_node * | |
226 | __timer_thread_alloc (const pthread_attr_t *desired_attr, clockid_t clock_id) | |
227 | { | |
228 | struct list_links *node = list_first (&thread_free_list); | |
229 | ||
230 | if (node != list_null (&thread_free_list)) | |
231 | { | |
232 | struct thread_node *thread = thread_links2ptr (node); | |
233 | list_unlink (node); | |
234 | thread_init (thread, desired_attr, clock_id); | |
235 | list_append (&thread_active_list, node); | |
236 | return thread; | |
237 | } | |
238 | ||
239 | return 0; | |
240 | } | |
241 | ||
242 | ||
243 | /* Return a thread structure to the global free list. Global lock | |
244 | must be held by caller. */ | |
245 | void | |
246 | __timer_thread_dealloc (struct thread_node *thread) | |
247 | { | |
248 | thread_deinit (thread); | |
249 | list_unlink (&thread->links); | |
250 | list_append (&thread_free_list, &thread->links); | |
251 | } | |
252 | ||
253 | ||
254 | /* Each of our threads which terminates executes this cleanup | |
255 | handler. We never terminate threads ourselves; if a thread gets here | |
256 | it means that the evil application has killed it. If the thread has | |
257 | timers, these require servicing and so we must hire a replacement | |
258 | thread right away. We must also unblock another thread that may | |
259 | have been waiting for this thread to finish servicing a timer (see | |
260 | timer_delete()). */ | |
261 | ||
262 | static void | |
263 | thread_cleanup (void *val) | |
264 | { | |
265 | if (val != NULL) | |
266 | { | |
267 | struct thread_node *thread = val; | |
268 | ||
269 | /* How did the signal thread get killed? */ | |
270 | assert (thread != &__timer_signal_thread_rclk); | |
76a50749 UD |
271 | |
272 | pthread_mutex_lock (&__timer_mutex); | |
273 | ||
274 | thread->exists = 0; | |
275 | ||
276 | /* We are no longer processing a timer event. */ | |
277 | thread->current_timer = 0; | |
278 | ||
279 | if (list_isempty (&thread->timer_queue)) | |
a7f6c66e | 280 | __timer_thread_dealloc (thread); |
76a50749 UD |
281 | else |
282 | (void) __timer_thread_start (thread); | |
283 | ||
284 | pthread_mutex_unlock (&__timer_mutex); | |
285 | ||
286 | /* Unblock potentially blocked timer_delete(). */ | |
287 | pthread_cond_broadcast (&thread->cond); | |
288 | } | |
289 | } | |
290 | ||
291 | ||
292 | /* Handle a timer which is supposed to go off now. */ | |
293 | static void | |
294 | thread_expire_timer (struct thread_node *self, struct timer_node *timer) | |
295 | { | |
296 | self->current_timer = timer; /* Lets timer_delete know timer is running. */ | |
297 | ||
298 | pthread_mutex_unlock (&__timer_mutex); | |
299 | ||
300 | switch (__builtin_expect (timer->event.sigev_notify, SIGEV_SIGNAL)) | |
301 | { | |
302 | case SIGEV_NONE: | |
76a50749 UD |
303 | break; |
304 | ||
305 | case SIGEV_SIGNAL: | |
306 | #ifdef __NR_rt_sigqueueinfo | |
307 | { | |
308 | siginfo_t info; | |
309 | ||
310 | /* First, clear the siginfo_t structure, so that we don't pass our | |
311 | stack content to other tasks. */ | |
312 | memset (&info, 0, sizeof (siginfo_t)); | |
313 | /* We must pass the information about the data in a siginfo_t | |
314 | value. */ | |
315 | info.si_signo = timer->event.sigev_signo; | |
316 | info.si_code = SI_TIMER; | |
317 | info.si_pid = timer->creator_pid; | |
318 | info.si_uid = getuid (); | |
319 | info.si_value = timer->event.sigev_value; | |
320 | ||
321 | INLINE_SYSCALL (rt_sigqueueinfo, 3, info.si_pid, info.si_signo, &info); | |
322 | } | |
323 | #else | |
324 | if (pthread_kill (self->captured, timer->event.sigev_signo) != 0) | |
325 | { | |
326 | if (pthread_kill (self->id, timer->event.sigev_signo) != 0) | |
327 | abort (); | |
328 | } | |
329 | #endif | |
330 | break; | |
331 | ||
332 | case SIGEV_THREAD: | |
333 | timer->event.sigev_notify_function (timer->event.sigev_value); | |
334 | break; | |
335 | ||
336 | default: | |
337 | assert (! "unknown event"); | |
338 | break; | |
339 | } | |
340 | ||
341 | pthread_mutex_lock (&__timer_mutex); | |
342 | ||
343 | self->current_timer = 0; | |
344 | ||
345 | pthread_cond_broadcast (&self->cond); | |
346 | } | |
347 | ||
348 | ||
349 | /* Thread function; executed by each timer thread. The job of this | |
350 | function is to wait on the thread's timer queue and expire the | |
351 | timers in chronological order as close to their scheduled time as | |
352 | possible. */ | |
353 | static void | |
354 | __attribute__ ((noreturn)) | |
355 | thread_func (void *arg) | |
356 | { | |
357 | struct thread_node *self = arg; | |
358 | ||
359 | /* Register cleanup handler, in case rogue application terminates | |
360 | this thread. (This cannot happen to __timer_signal_thread, which | |
361 | doesn't invoke application callbacks). */ | |
362 | ||
363 | pthread_cleanup_push (thread_cleanup, self); | |
364 | ||
365 | pthread_mutex_lock (&__timer_mutex); | |
366 | ||
367 | while (1) | |
368 | { | |
369 | struct list_links *first; | |
370 | struct timer_node *timer = NULL; | |
371 | ||
372 | /* While the timer queue is not empty, inspect the first node. */ | |
373 | first = list_first (&self->timer_queue); | |
374 | if (first != list_null (&self->timer_queue)) | |
375 | { | |
376 | struct timespec now; | |
377 | ||
378 | timer = timer_links2ptr (first); | |
379 | ||
380 | /* This assumes that the elements of the list of one thread | |
381 | are all for the same clock. */ | |
382 | clock_gettime (timer->clock, &now); | |
383 | ||
384 | while (1) | |
385 | { | |
386 | /* If the timer is due or overdue, remove it from the queue. | |
387 | If it's a periodic timer, re-compute its new time and | |
388 | requeue it. Either way, perform the timer expiry. */ | |
389 | if (timespec_compare (&now, &timer->expirytime) < 0) | |
390 | break; | |
391 | ||
392 | list_unlink_ip (first); | |
393 | ||
394 | if (__builtin_expect (timer->value.it_interval.tv_sec, 0) != 0 | |
395 | || timer->value.it_interval.tv_nsec != 0) | |
396 | { | |
397 | timer->overrun_count = 0; | |
398 | timespec_add (&timer->expirytime, &timer->expirytime, | |
399 | &timer->value.it_interval); | |
400 | while (timespec_compare (&timer->expirytime, &now) < 0) | |
401 | { | |
402 | timespec_add (&timer->expirytime, &timer->expirytime, | |
403 | &timer->value.it_interval); | |
404 | if (timer->overrun_count < DELAYTIMER_MAX) | |
405 | ++timer->overrun_count; | |
406 | } | |
407 | __timer_thread_queue_timer (self, timer); | |
408 | } | |
409 | ||
410 | thread_expire_timer (self, timer); | |
411 | ||
412 | first = list_first (&self->timer_queue); | |
413 | if (first == list_null (&self->timer_queue)) | |
414 | break; | |
415 | ||
416 | timer = timer_links2ptr (first); | |
417 | } | |
418 | } | |
419 | ||
420 | /* If the queue is not empty, wait until the expiry time of the | |
421 | first node. Otherwise wait indefinitely. Insertions at the | |
422 | head of the queue must wake up the thread by broadcasting | |
423 | this condition variable. */ | |
424 | if (timer != NULL) | |
425 | pthread_cond_timedwait (&self->cond, &__timer_mutex, | |
426 | &timer->expirytime); | |
427 | else | |
428 | pthread_cond_wait (&self->cond, &__timer_mutex); | |
429 | } | |
430 | /* This macro will never be executed since the while loop loops | |
431 | forever - but we have to add it for proper nesting. */ | |
432 | pthread_cleanup_pop (1); | |
433 | } | |
434 | ||
435 | ||
436 | /* Enqueue a timer in wakeup order in the thread's timer queue. | |
437 | Returns 1 if the timer was inserted at the head of the queue, | |
438 | causing the queue's next wakeup time to change. */ | |
439 | ||
440 | int | |
441 | __timer_thread_queue_timer (struct thread_node *thread, | |
442 | struct timer_node *insert) | |
443 | { | |
444 | struct list_links *iter; | |
445 | int athead = 1; | |
446 | ||
447 | for (iter = list_first (&thread->timer_queue); | |
448 | iter != list_null (&thread->timer_queue); | |
449 | iter = list_next (iter)) | |
450 | { | |
451 | struct timer_node *timer = timer_links2ptr (iter); | |
452 | ||
453 | if (timespec_compare (&insert->expirytime, &timer->expirytime) < 0) | |
454 | break; | |
455 | athead = 0; | |
456 | } | |
457 | ||
458 | list_insbefore (iter, &insert->links); | |
459 | return athead; | |
460 | } | |
461 | ||
462 | ||
463 | /* Start a thread and associate it with the given thread node. Global | |
464 | lock must be held by caller. */ | |
465 | int | |
466 | __timer_thread_start (struct thread_node *thread) | |
467 | { | |
468 | int retval = 1; | |
469 | ||
470 | assert (!thread->exists); | |
471 | thread->exists = 1; | |
472 | ||
473 | if (pthread_create (&thread->id, &thread->attr, | |
474 | (void *(*) (void *)) thread_func, thread) != 0) | |
475 | { | |
476 | thread->exists = 0; | |
477 | retval = -1; | |
478 | } | |
479 | ||
480 | return retval; | |
481 | } | |
482 | ||
483 | ||
484 | void | |
485 | __timer_thread_wakeup (struct thread_node *thread) | |
486 | { | |
487 | pthread_cond_broadcast (&thread->cond); | |
488 | } | |
489 | ||
490 | ||
491 | /* Compare two pthread_attr_t thread attributes for exact equality. | |
9f292c24 UD |
492 | Returns 1 if they are equal, otherwise zero if they are not equal |
493 | or contain illegal values. This version is NPTL-specific for | |
76a50749 UD |
494 | performance reason. One could use the access functions to get the |
495 | values of all the fields of the attribute structure. */ | |
496 | static int | |
497 | thread_attr_compare (const pthread_attr_t *left, const pthread_attr_t *right) | |
498 | { | |
499 | struct pthread_attr *ileft = (struct pthread_attr *) left; | |
500 | struct pthread_attr *iright = (struct pthread_attr *) right; | |
501 | ||
502 | return (ileft->flags == iright->flags | |
503 | && ileft->schedpolicy == iright->schedpolicy | |
504 | && (ileft->schedparam.sched_priority | |
1683daeb UD |
505 | == iright->schedparam.sched_priority) |
506 | && ileft->guardsize == iright->guardsize | |
507 | && ileft->stackaddr == iright->stackaddr | |
508 | && ileft->stacksize == iright->stacksize | |
509 | && ((ileft->cpuset == NULL && iright->cpuset == NULL) | |
510 | || (ileft->cpuset != NULL && iright->cpuset != NULL | |
511 | && ileft->cpusetsize == iright->cpusetsize | |
512 | && memcmp (ileft->cpuset, iright->cpuset, | |
513 | ileft->cpusetsize) == 0))); | |
76a50749 UD |
514 | } |
515 | ||
516 | ||
517 | /* Search the list of active threads and find one which has matching | |
518 | attributes. Global mutex lock must be held by caller. */ | |
519 | struct thread_node * | |
520 | __timer_thread_find_matching (const pthread_attr_t *desired_attr, | |
521 | clockid_t desired_clock_id) | |
522 | { | |
523 | struct list_links *iter = list_first (&thread_active_list); | |
524 | ||
525 | while (iter != list_null (&thread_active_list)) | |
526 | { | |
527 | struct thread_node *candidate = thread_links2ptr (iter); | |
528 | ||
529 | if (thread_attr_compare (desired_attr, &candidate->attr) | |
530 | && desired_clock_id == candidate->clock_id) | |
a7f6c66e | 531 | return candidate; |
76a50749 UD |
532 | |
533 | iter = list_next (iter); | |
534 | } | |
535 | ||
536 | return NULL; | |
537 | } | |
538 | ||
539 | ||
540 | /* Grab a free timer structure from the global free list. The global | |
541 | lock must be held by the caller. */ | |
542 | struct timer_node * | |
543 | __timer_alloc (void) | |
544 | { | |
545 | struct list_links *node = list_first (&timer_free_list); | |
546 | ||
547 | if (node != list_null (&timer_free_list)) | |
548 | { | |
549 | struct timer_node *timer = timer_links2ptr (node); | |
550 | list_unlink_ip (node); | |
551 | timer->inuse = TIMER_INUSE; | |
552 | timer->refcount = 1; | |
553 | return timer; | |
554 | } | |
555 | ||
556 | return NULL; | |
557 | } | |
558 | ||
559 | ||
560 | /* Return a timer structure to the global free list. The global lock | |
561 | must be held by the caller. */ | |
562 | void | |
563 | __timer_dealloc (struct timer_node *timer) | |
564 | { | |
565 | assert (timer->refcount == 0); | |
566 | timer->thread = NULL; /* Break association between timer and thread. */ | |
567 | timer->inuse = TIMER_FREE; | |
568 | list_append (&timer_free_list, &timer->links); | |
569 | } | |
570 | ||
571 | ||
572 | /* Thread cancellation handler which unlocks a mutex. */ | |
573 | void | |
574 | __timer_mutex_cancel_handler (void *arg) | |
575 | { | |
576 | pthread_mutex_unlock (arg); | |
577 | } |