]> git.ipfire.org Git - thirdparty/chrony.git/blob - sched.c
cmdmon: save NTS cookies and server keys on dump command
[thirdparty/chrony.git] / sched.c
1 /*
2 chronyd/chronyc - Programs for keeping computer clocks accurate.
3
4 **********************************************************************
5 * Copyright (C) Richard P. Curnow 1997-2003
6 * Copyright (C) Miroslav Lichvar 2011, 2013-2016
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
20 *
21 **********************************************************************
22
23 =======================================================================
24
25 This file contains the scheduling loop and the timeout queue.
26
27 */
28
29 #include "config.h"
30
31 #include "sysincl.h"
32
33 #include "array.h"
34 #include "sched.h"
35 #include "memory.h"
36 #include "util.h"
37 #include "local.h"
38 #include "logging.h"
39
40 /* ================================================== */
41
42 /* Flag indicating that we are initialised */
43 static int initialised = 0;
44
45 /* ================================================== */
46
47 /* One more than the highest file descriptor that is registered */
48 static unsigned int one_highest_fd;
49
50 #ifndef FD_SETSIZE
51 /* If FD_SETSIZE is not defined, assume that fd_set is implemented
52 as a fixed size array of bits, possibly embedded inside a record */
53 #define FD_SETSIZE (sizeof(fd_set) * 8)
54 #endif
55
56 typedef struct {
57 SCH_FileHandler handler;
58 SCH_ArbitraryArgument arg;
59 int events;
60 } FileHandlerEntry;
61
62 static ARR_Instance file_handlers;
63
64 /* Timestamp when last select() returned */
65 static struct timespec last_select_ts, last_select_ts_raw;
66 static double last_select_ts_err;
67
68 #define TS_MONO_PRECISION_NS 10000000U
69
70 /* Monotonic low-precision timestamp measuring interval since the start */
71 static double last_select_ts_mono;
72 static uint32_t last_select_ts_mono_ns;
73
74 /* ================================================== */
75
76 /* Variables to handler the timer queue */
77
78 typedef struct _TimerQueueEntry
79 {
80 struct _TimerQueueEntry *next; /* Forward and back links in the list */
81 struct _TimerQueueEntry *prev;
82 struct timespec ts; /* Local system time at which the
83 timeout is to expire. Clearly this
84 must be in terms of what the
85 operating system thinks of as
86 system time, because it will be an
87 argument to select(). Therefore,
88 any fudges etc that our local time
89 driver module would apply to time
90 that we pass to clients etc doesn't
91 apply to this. */
92 SCH_TimeoutID id; /* ID to allow client to delete
93 timeout */
94 SCH_TimeoutClass class; /* The class that the epoch is in */
95 SCH_TimeoutHandler handler; /* The handler routine to use */
96 SCH_ArbitraryArgument arg; /* The argument to pass to the handler */
97
98 } TimerQueueEntry;
99
100 /* The timer queue. We only use the next and prev entries of this
101 record, these chain to the real entries. */
102 static TimerQueueEntry timer_queue;
103 static unsigned long n_timer_queue_entries;
104 static SCH_TimeoutID next_tqe_id;
105
106 /* Pointer to head of free list */
107 static TimerQueueEntry *tqe_free_list = NULL;
108
109 /* Timestamp when was last timeout dispatched for each class */
110 static struct timespec last_class_dispatch[SCH_NumberOfClasses];
111
112 /* ================================================== */
113
114 static int need_to_exit;
115
116 /* ================================================== */
117
118 static void
119 handle_slew(struct timespec *raw,
120 struct timespec *cooked,
121 double dfreq,
122 double doffset,
123 LCL_ChangeType change_type,
124 void *anything);
125
126 /* ================================================== */
127
128 void
129 SCH_Initialise(void)
130 {
131 file_handlers = ARR_CreateInstance(sizeof (FileHandlerEntry));
132
133 n_timer_queue_entries = 0;
134 next_tqe_id = 0;
135
136 timer_queue.next = &timer_queue;
137 timer_queue.prev = &timer_queue;
138
139 need_to_exit = 0;
140
141 LCL_AddParameterChangeHandler(handle_slew, NULL);
142
143 LCL_ReadRawTime(&last_select_ts_raw);
144 last_select_ts = last_select_ts_raw;
145 last_select_ts_mono = 0.0;
146 last_select_ts_mono_ns = 0;
147
148 initialised = 1;
149 }
150
151
152 /* ================================================== */
153
154 void
155 SCH_Finalise(void) {
156 ARR_DestroyInstance(file_handlers);
157
158 LCL_RemoveParameterChangeHandler(handle_slew, NULL);
159
160 initialised = 0;
161 }
162
163 /* ================================================== */
164
165 void
166 SCH_AddFileHandler
167 (int fd, int events, SCH_FileHandler handler, SCH_ArbitraryArgument arg)
168 {
169 FileHandlerEntry *ptr;
170
171 assert(initialised);
172 assert(events);
173 assert(fd >= 0);
174
175 if (fd >= FD_SETSIZE)
176 LOG_FATAL("Too many file descriptors");
177
178 /* Resize the array if the descriptor is highest so far */
179 while (ARR_GetSize(file_handlers) <= fd) {
180 ptr = ARR_GetNewElement(file_handlers);
181 ptr->handler = NULL;
182 ptr->arg = NULL;
183 ptr->events = 0;
184 }
185
186 ptr = ARR_GetElement(file_handlers, fd);
187
188 /* Don't want to allow the same fd to register a handler more than
189 once without deleting a previous association - this suggests
190 a bug somewhere else in the program. */
191 assert(!ptr->handler);
192
193 ptr->handler = handler;
194 ptr->arg = arg;
195 ptr->events = events;
196
197 if (one_highest_fd < fd + 1)
198 one_highest_fd = fd + 1;
199 }
200
201
202 /* ================================================== */
203
204 void
205 SCH_RemoveFileHandler(int fd)
206 {
207 FileHandlerEntry *ptr;
208
209 assert(initialised);
210
211 ptr = ARR_GetElement(file_handlers, fd);
212
213 /* Check that a handler was registered for the fd in question */
214 assert(ptr->handler);
215
216 ptr->handler = NULL;
217 ptr->arg = NULL;
218 ptr->events = 0;
219
220 /* Find new highest file descriptor */
221 while (one_highest_fd > 0) {
222 ptr = ARR_GetElement(file_handlers, one_highest_fd - 1);
223 if (ptr->handler)
224 break;
225 one_highest_fd--;
226 }
227 }
228
229 /* ================================================== */
230
231 void
232 SCH_SetFileHandlerEvent(int fd, int event, int enable)
233 {
234 FileHandlerEntry *ptr;
235
236 ptr = ARR_GetElement(file_handlers, fd);
237
238 if (enable)
239 ptr->events |= event;
240 else
241 ptr->events &= ~event;
242 }
243
244 /* ================================================== */
245
246 void
247 SCH_GetLastEventTime(struct timespec *cooked, double *err, struct timespec *raw)
248 {
249 if (cooked) {
250 *cooked = last_select_ts;
251 if (err)
252 *err = last_select_ts_err;
253 }
254 if (raw)
255 *raw = last_select_ts_raw;
256 }
257
258 /* ================================================== */
259
260 double
261 SCH_GetLastEventMonoTime(void)
262 {
263 return last_select_ts_mono;
264 }
265
266 /* ================================================== */
267
268 #define TQE_ALLOC_QUANTUM 32
269
270 static TimerQueueEntry *
271 allocate_tqe(void)
272 {
273 TimerQueueEntry *new_block;
274 TimerQueueEntry *result;
275 int i;
276 if (tqe_free_list == NULL) {
277 new_block = MallocArray(TimerQueueEntry, TQE_ALLOC_QUANTUM);
278 for (i=1; i<TQE_ALLOC_QUANTUM; i++) {
279 new_block[i].next = &(new_block[i-1]);
280 }
281 new_block[0].next = NULL;
282 tqe_free_list = &(new_block[TQE_ALLOC_QUANTUM - 1]);
283 }
284
285 result = tqe_free_list;
286 tqe_free_list = tqe_free_list->next;
287 return result;
288 }
289
290 /* ================================================== */
291
292 static void
293 release_tqe(TimerQueueEntry *node)
294 {
295 node->next = tqe_free_list;
296 tqe_free_list = node;
297 }
298
299 /* ================================================== */
300
301 static SCH_TimeoutID
302 get_new_tqe_id(void)
303 {
304 TimerQueueEntry *ptr;
305
306 try_again:
307 next_tqe_id++;
308 if (!next_tqe_id)
309 goto try_again;
310
311 /* Make sure the ID isn't already used */
312 for (ptr = timer_queue.next; ptr != &timer_queue; ptr = ptr->next)
313 if (ptr->id == next_tqe_id)
314 goto try_again;
315
316 return next_tqe_id;
317 }
318
319 /* ================================================== */
320
321 SCH_TimeoutID
322 SCH_AddTimeout(struct timespec *ts, SCH_TimeoutHandler handler, SCH_ArbitraryArgument arg)
323 {
324 TimerQueueEntry *new_tqe;
325 TimerQueueEntry *ptr;
326
327 assert(initialised);
328
329 new_tqe = allocate_tqe();
330
331 new_tqe->id = get_new_tqe_id();
332 new_tqe->handler = handler;
333 new_tqe->arg = arg;
334 new_tqe->ts = *ts;
335 new_tqe->class = SCH_ReservedTimeoutValue;
336
337 /* Now work out where to insert the new entry in the list */
338 for (ptr = timer_queue.next; ptr != &timer_queue; ptr = ptr->next) {
339 if (UTI_CompareTimespecs(&new_tqe->ts, &ptr->ts) == -1) {
340 /* If the new entry comes before the current pointer location in
341 the list, we want to insert the new entry just before ptr. */
342 break;
343 }
344 }
345
346 /* At this stage, we want to insert the new entry immediately before
347 the entry identified by 'ptr' */
348
349 new_tqe->next = ptr;
350 new_tqe->prev = ptr->prev;
351 ptr->prev->next = new_tqe;
352 ptr->prev = new_tqe;
353
354 n_timer_queue_entries++;
355
356 return new_tqe->id;
357 }
358
359 /* ================================================== */
360 /* This queues a timeout to elapse at a given delta time relative to
361 the current (raw) time */
362
363 SCH_TimeoutID
364 SCH_AddTimeoutByDelay(double delay, SCH_TimeoutHandler handler, SCH_ArbitraryArgument arg)
365 {
366 struct timespec now, then;
367
368 assert(initialised);
369 assert(delay >= 0.0);
370
371 LCL_ReadRawTime(&now);
372 UTI_AddDoubleToTimespec(&now, delay, &then);
373 if (UTI_CompareTimespecs(&now, &then) > 0) {
374 LOG_FATAL("Timeout overflow");
375 }
376
377 return SCH_AddTimeout(&then, handler, arg);
378
379 }
380
381 /* ================================================== */
382
383 SCH_TimeoutID
384 SCH_AddTimeoutInClass(double min_delay, double separation, double randomness,
385 SCH_TimeoutClass class,
386 SCH_TimeoutHandler handler, SCH_ArbitraryArgument arg)
387 {
388 TimerQueueEntry *new_tqe;
389 TimerQueueEntry *ptr;
390 struct timespec now;
391 double diff, r;
392 double new_min_delay;
393
394 assert(initialised);
395 assert(min_delay >= 0.0);
396 assert(class < SCH_NumberOfClasses);
397
398 if (randomness > 0.0) {
399 uint32_t rnd;
400
401 UTI_GetRandomBytes(&rnd, sizeof (rnd));
402 r = rnd * (randomness / (uint32_t)-1) + 1.0;
403 min_delay *= r;
404 separation *= r;
405 }
406
407 LCL_ReadRawTime(&now);
408 new_min_delay = min_delay;
409
410 /* Check the separation from the last dispatched timeout */
411 diff = UTI_DiffTimespecsToDouble(&now, &last_class_dispatch[class]);
412 if (diff < separation && diff >= 0.0 && diff + new_min_delay < separation) {
413 new_min_delay = separation - diff;
414 }
415
416 /* Scan through list for entries in the same class and increase min_delay
417 if necessary to keep at least the separation away */
418 for (ptr = timer_queue.next; ptr != &timer_queue; ptr = ptr->next) {
419 if (ptr->class == class) {
420 diff = UTI_DiffTimespecsToDouble(&ptr->ts, &now);
421 if (new_min_delay > diff) {
422 if (new_min_delay - diff < separation) {
423 new_min_delay = diff + separation;
424 }
425 } else {
426 if (diff - new_min_delay < separation) {
427 new_min_delay = diff + separation;
428 }
429 }
430 }
431 }
432
433 for (ptr = timer_queue.next; ptr != &timer_queue; ptr = ptr->next) {
434 diff = UTI_DiffTimespecsToDouble(&ptr->ts, &now);
435 if (diff > new_min_delay) {
436 break;
437 }
438 }
439
440 /* We have located the insertion point */
441 new_tqe = allocate_tqe();
442
443 new_tqe->id = get_new_tqe_id();
444 new_tqe->handler = handler;
445 new_tqe->arg = arg;
446 UTI_AddDoubleToTimespec(&now, new_min_delay, &new_tqe->ts);
447 new_tqe->class = class;
448
449 new_tqe->next = ptr;
450 new_tqe->prev = ptr->prev;
451 ptr->prev->next = new_tqe;
452 ptr->prev = new_tqe;
453 n_timer_queue_entries++;
454
455 return new_tqe->id;
456 }
457
458 /* ================================================== */
459
460 void
461 SCH_RemoveTimeout(SCH_TimeoutID id)
462 {
463 TimerQueueEntry *ptr;
464
465 assert(initialised);
466
467 if (!id)
468 return;
469
470 for (ptr = timer_queue.next; ptr != &timer_queue; ptr = ptr->next) {
471
472 if (ptr->id == id) {
473 /* Found the required entry */
474
475 /* Unlink from the queue */
476 ptr->next->prev = ptr->prev;
477 ptr->prev->next = ptr->next;
478
479 /* Decrement entry count */
480 --n_timer_queue_entries;
481
482 /* Release memory back to the operating system */
483 release_tqe(ptr);
484
485 return;
486 }
487 }
488
489 /* Catch calls with invalid non-zero ID */
490 assert(0);
491 }
492
493 /* ================================================== */
494
495 void
496 SCH_Reset(void)
497 {
498 while (n_timer_queue_entries > 0)
499 SCH_RemoveTimeout(timer_queue.next->id);
500
501 while (one_highest_fd > 0) {
502 close(one_highest_fd - 1);
503 SCH_RemoveFileHandler(one_highest_fd - 1);
504 }
505 }
506
507 /* ================================================== */
508 /* Try to dispatch any timeouts that have already gone by, and
509 keep going until all are done. (The earlier ones may take so
510 long to do that the later ones come around by the time they are
511 completed). */
512
513 static void
514 dispatch_timeouts(struct timespec *now) {
515 TimerQueueEntry *ptr;
516 SCH_TimeoutHandler handler;
517 SCH_ArbitraryArgument arg;
518 int n_done = 0, n_entries_on_start = n_timer_queue_entries;
519
520 while (1) {
521 LCL_ReadRawTime(now);
522
523 if (!(n_timer_queue_entries > 0 &&
524 UTI_CompareTimespecs(now, &timer_queue.next->ts) >= 0)) {
525 break;
526 }
527
528 ptr = timer_queue.next;
529
530 last_class_dispatch[ptr->class] = *now;
531
532 handler = ptr->handler;
533 arg = ptr->arg;
534
535 SCH_RemoveTimeout(ptr->id);
536
537 /* Dispatch the handler */
538 (handler)(arg);
539
540 /* Increment count of timeouts handled */
541 ++n_done;
542
543 /* If more timeouts were handled than there were in the timer queue on
544 start and there are now, assume some code is scheduling timeouts with
545 negative delays and abort. Make the actual limit higher in case the
546 machine is temporarily overloaded and dispatching the handlers takes
547 more time than was delay of a scheduled timeout. */
548 if (n_done > n_timer_queue_entries * 4 &&
549 n_done > n_entries_on_start * 4) {
550 LOG_FATAL("Possible infinite loop in scheduling");
551 }
552 }
553 }
554
555 /* ================================================== */
556
557 /* nfd is the number of bits set in all fd_sets */
558
559 static void
560 dispatch_filehandlers(int nfd, fd_set *read_fds, fd_set *write_fds, fd_set *except_fds)
561 {
562 FileHandlerEntry *ptr;
563 int fd;
564
565 for (fd = 0; nfd && fd < one_highest_fd; fd++) {
566 if (except_fds && FD_ISSET(fd, except_fds)) {
567 /* This descriptor has an exception, dispatch its handler */
568 ptr = (FileHandlerEntry *)ARR_GetElement(file_handlers, fd);
569 if (ptr->handler)
570 (ptr->handler)(fd, SCH_FILE_EXCEPTION, ptr->arg);
571 nfd--;
572
573 /* Don't try to read from it now */
574 if (read_fds && FD_ISSET(fd, read_fds)) {
575 FD_CLR(fd, read_fds);
576 nfd--;
577 }
578 }
579
580 if (read_fds && FD_ISSET(fd, read_fds)) {
581 /* This descriptor can be read from, dispatch its handler */
582 ptr = (FileHandlerEntry *)ARR_GetElement(file_handlers, fd);
583 if (ptr->handler)
584 (ptr->handler)(fd, SCH_FILE_INPUT, ptr->arg);
585 nfd--;
586 }
587
588 if (write_fds && FD_ISSET(fd, write_fds)) {
589 /* This descriptor can be written to, dispatch its handler */
590 ptr = (FileHandlerEntry *)ARR_GetElement(file_handlers, fd);
591 if (ptr->handler)
592 (ptr->handler)(fd, SCH_FILE_OUTPUT, ptr->arg);
593 nfd--;
594 }
595 }
596 }
597
598 /* ================================================== */
599
600 static void
601 handle_slew(struct timespec *raw,
602 struct timespec *cooked,
603 double dfreq,
604 double doffset,
605 LCL_ChangeType change_type,
606 void *anything)
607 {
608 TimerQueueEntry *ptr;
609 double delta;
610 int i;
611
612 if (change_type != LCL_ChangeAdjust) {
613 /* Make sure this handler is invoked first in order to not shift new timers
614 added from other handlers */
615 assert(LCL_IsFirstParameterChangeHandler(handle_slew));
616
617 /* If a step change occurs, just shift all raw time stamps by the offset */
618
619 for (ptr = timer_queue.next; ptr != &timer_queue; ptr = ptr->next) {
620 UTI_AddDoubleToTimespec(&ptr->ts, -doffset, &ptr->ts);
621 }
622
623 for (i = 0; i < SCH_NumberOfClasses; i++) {
624 UTI_AddDoubleToTimespec(&last_class_dispatch[i], -doffset, &last_class_dispatch[i]);
625 }
626
627 UTI_AddDoubleToTimespec(&last_select_ts_raw, -doffset, &last_select_ts_raw);
628 }
629
630 UTI_AdjustTimespec(&last_select_ts, cooked, &last_select_ts, &delta, dfreq, doffset);
631 }
632
633 /* ================================================== */
634
635 static void
636 fill_fd_sets(fd_set **read_fds, fd_set **write_fds, fd_set **except_fds)
637 {
638 FileHandlerEntry *handlers;
639 fd_set *rd, *wr, *ex;
640 int i, n, events;
641
642 n = ARR_GetSize(file_handlers);
643 handlers = ARR_GetElements(file_handlers);
644 rd = wr = ex = NULL;
645
646 for (i = 0; i < n; i++) {
647 events = handlers[i].events;
648
649 if (!events)
650 continue;
651
652 if (events & SCH_FILE_INPUT) {
653 if (!rd) {
654 rd = *read_fds;
655 FD_ZERO(rd);
656 }
657 FD_SET(i, rd);
658 }
659
660 if (events & SCH_FILE_OUTPUT) {
661 if (!wr) {
662 wr = *write_fds;
663 FD_ZERO(wr);
664 }
665 FD_SET(i, wr);
666 }
667
668 if (events & SCH_FILE_EXCEPTION) {
669 if (!ex) {
670 ex = *except_fds;
671 FD_ZERO(ex);
672 }
673 FD_SET(i, ex);
674 }
675 }
676
677 if (!rd)
678 *read_fds = NULL;
679 if (!wr)
680 *write_fds = NULL;
681 if (!ex)
682 *except_fds = NULL;
683 }
684
685 /* ================================================== */
686
687 #define JUMP_DETECT_THRESHOLD 10
688
689 static int
690 check_current_time(struct timespec *prev_raw, struct timespec *raw, int timeout,
691 struct timeval *orig_select_tv,
692 struct timeval *rem_select_tv)
693 {
694 struct timespec elapsed_min, elapsed_max, orig_select_ts, rem_select_ts;
695 double step, elapsed;
696
697 UTI_TimevalToTimespec(orig_select_tv, &orig_select_ts);
698
699 /* Get an estimate of the time spent waiting in the select() call. On some
700 systems (e.g. Linux) the timeout timeval is modified to return the
701 remaining time, use that information. */
702 if (timeout) {
703 elapsed_max = elapsed_min = orig_select_ts;
704 } else if (rem_select_tv && rem_select_tv->tv_sec >= 0 &&
705 rem_select_tv->tv_sec <= orig_select_tv->tv_sec &&
706 (rem_select_tv->tv_sec != orig_select_tv->tv_sec ||
707 rem_select_tv->tv_usec != orig_select_tv->tv_usec)) {
708 UTI_TimevalToTimespec(rem_select_tv, &rem_select_ts);
709 UTI_DiffTimespecs(&elapsed_min, &orig_select_ts, &rem_select_ts);
710 elapsed_max = elapsed_min;
711 } else {
712 if (rem_select_tv)
713 elapsed_max = orig_select_ts;
714 else
715 UTI_DiffTimespecs(&elapsed_max, raw, prev_raw);
716 UTI_ZeroTimespec(&elapsed_min);
717 }
718
719 if (last_select_ts_raw.tv_sec + elapsed_min.tv_sec >
720 raw->tv_sec + JUMP_DETECT_THRESHOLD) {
721 LOG(LOGS_WARN, "Backward time jump detected!");
722 } else if (prev_raw->tv_sec + elapsed_max.tv_sec + JUMP_DETECT_THRESHOLD <
723 raw->tv_sec) {
724 LOG(LOGS_WARN, "Forward time jump detected!");
725 } else {
726 return 1;
727 }
728
729 step = UTI_DiffTimespecsToDouble(&last_select_ts_raw, raw);
730 elapsed = UTI_TimespecToDouble(&elapsed_min);
731 step += elapsed;
732
733 /* Cooked time may no longer be valid after dispatching the handlers */
734 LCL_NotifyExternalTimeStep(raw, raw, step, fabs(step));
735
736 return 0;
737 }
738
739 /* ================================================== */
740
741 static void
742 update_monotonic_time(struct timespec *now, struct timespec *before)
743 {
744 struct timespec diff;
745
746 /* Avoid frequent floating-point operations and handle small
747 increments to a large value */
748
749 UTI_DiffTimespecs(&diff, now, before);
750 if (diff.tv_sec == 0) {
751 last_select_ts_mono_ns += diff.tv_nsec;
752 } else {
753 last_select_ts_mono += fabs(UTI_TimespecToDouble(&diff) +
754 last_select_ts_mono_ns / 1.0e9);
755 last_select_ts_mono_ns = 0;
756 }
757
758 if (last_select_ts_mono_ns > TS_MONO_PRECISION_NS) {
759 last_select_ts_mono += last_select_ts_mono_ns / 1.0e9;
760 last_select_ts_mono_ns = 0;
761 }
762 }
763
764 /* ================================================== */
765
766 void
767 SCH_MainLoop(void)
768 {
769 fd_set read_fds, write_fds, except_fds;
770 fd_set *p_read_fds, *p_write_fds, *p_except_fds;
771 int status, errsv;
772 struct timeval tv, saved_tv, *ptv;
773 struct timespec ts, now, saved_now, cooked;
774 double err;
775
776 assert(initialised);
777
778 while (!need_to_exit) {
779 /* Dispatch timeouts and fill now with current raw time */
780 dispatch_timeouts(&now);
781 saved_now = now;
782
783 /* The timeout handlers may request quit */
784 if (need_to_exit)
785 break;
786
787 /* Check whether there is a timeout and set it up */
788 if (n_timer_queue_entries > 0) {
789 UTI_DiffTimespecs(&ts, &timer_queue.next->ts, &now);
790 assert(ts.tv_sec > 0 || ts.tv_nsec > 0);
791
792 UTI_TimespecToTimeval(&ts, &tv);
793 ptv = &tv;
794 saved_tv = tv;
795 } else {
796 ptv = NULL;
797 saved_tv.tv_sec = saved_tv.tv_usec = 0;
798 }
799
800 p_read_fds = &read_fds;
801 p_write_fds = &write_fds;
802 p_except_fds = &except_fds;
803 fill_fd_sets(&p_read_fds, &p_write_fds, &p_except_fds);
804
805 /* if there are no file descriptors being waited on and no
806 timeout set, this is clearly ridiculous, so stop the run */
807 if (!ptv && !p_read_fds && !p_write_fds)
808 LOG_FATAL("Nothing to do");
809
810 status = select(one_highest_fd, p_read_fds, p_write_fds, p_except_fds, ptv);
811 errsv = errno;
812
813 LCL_ReadRawTime(&now);
814 LCL_CookTime(&now, &cooked, &err);
815
816 /* Check if the time didn't jump unexpectedly */
817 if (!check_current_time(&saved_now, &now, status == 0, &saved_tv, ptv)) {
818 /* Cook the time again after handling the step */
819 LCL_CookTime(&now, &cooked, &err);
820 }
821
822 update_monotonic_time(&cooked, &last_select_ts);
823
824 last_select_ts_raw = now;
825 last_select_ts = cooked;
826 last_select_ts_err = err;
827
828 if (status < 0) {
829 if (!need_to_exit && errsv != EINTR) {
830 LOG_FATAL("select() failed : %s", strerror(errsv));
831 }
832 } else if (status > 0) {
833 /* A file descriptor is ready for input or output */
834 dispatch_filehandlers(status, p_read_fds, p_write_fds, p_except_fds);
835 } else {
836 /* No descriptors readable, timeout must have elapsed.
837 Therefore, tv must be non-null */
838 assert(ptv);
839
840 /* There's nothing to do here, since the timeouts
841 will be dispatched at the top of the next loop
842 cycle */
843
844 }
845 }
846 }
847
848 /* ================================================== */
849
850 void
851 SCH_QuitProgram(void)
852 {
853 need_to_exit = 1;
854 }
855
856 /* ================================================== */
857