]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/libsystemd/sd-event/sd-event.c
shutdown: log on container exit
[thirdparty/systemd.git] / src / libsystemd / sd-event / sd-event.c
1 /* SPDX-License-Identifier: LGPL-2.1-or-later */
2
3 #include <sys/epoll.h>
4 #include <sys/timerfd.h>
5 #include <sys/wait.h>
6
7 #include "sd-daemon.h"
8 #include "sd-event.h"
9 #include "sd-id128.h"
10
11 #include "alloc-util.h"
12 #include "env-util.h"
13 #include "event-source.h"
14 #include "fd-util.h"
15 #include "fs-util.h"
16 #include "hashmap.h"
17 #include "list.h"
18 #include "macro.h"
19 #include "memory-util.h"
20 #include "missing_syscall.h"
21 #include "prioq.h"
22 #include "process-util.h"
23 #include "set.h"
24 #include "signal-util.h"
25 #include "string-table.h"
26 #include "string-util.h"
27 #include "strxcpyx.h"
28 #include "time-util.h"
29
30 #define DEFAULT_ACCURACY_USEC (250 * USEC_PER_MSEC)
31
32 static bool EVENT_SOURCE_WATCH_PIDFD(sd_event_source *s) {
33 /* Returns true if this is a PID event source and can be implemented by watching EPOLLIN */
34 return s &&
35 s->type == SOURCE_CHILD &&
36 s->child.pidfd >= 0 &&
37 s->child.options == WEXITED;
38 }
39
40 static bool event_source_is_online(sd_event_source *s) {
41 assert(s);
42 return s->enabled != SD_EVENT_OFF && !s->ratelimited;
43 }
44
45 static bool event_source_is_offline(sd_event_source *s) {
46 assert(s);
47 return s->enabled == SD_EVENT_OFF || s->ratelimited;
48 }
49
50 static const char* const event_source_type_table[_SOURCE_EVENT_SOURCE_TYPE_MAX] = {
51 [SOURCE_IO] = "io",
52 [SOURCE_TIME_REALTIME] = "realtime",
53 [SOURCE_TIME_BOOTTIME] = "bootime",
54 [SOURCE_TIME_MONOTONIC] = "monotonic",
55 [SOURCE_TIME_REALTIME_ALARM] = "realtime-alarm",
56 [SOURCE_TIME_BOOTTIME_ALARM] = "boottime-alarm",
57 [SOURCE_SIGNAL] = "signal",
58 [SOURCE_CHILD] = "child",
59 [SOURCE_DEFER] = "defer",
60 [SOURCE_POST] = "post",
61 [SOURCE_EXIT] = "exit",
62 [SOURCE_WATCHDOG] = "watchdog",
63 [SOURCE_INOTIFY] = "inotify",
64 };
65
66 DEFINE_PRIVATE_STRING_TABLE_LOOKUP_TO_STRING(event_source_type, int);
67
68 #define EVENT_SOURCE_IS_TIME(t) \
69 IN_SET((t), \
70 SOURCE_TIME_REALTIME, \
71 SOURCE_TIME_BOOTTIME, \
72 SOURCE_TIME_MONOTONIC, \
73 SOURCE_TIME_REALTIME_ALARM, \
74 SOURCE_TIME_BOOTTIME_ALARM)
75
76 #define EVENT_SOURCE_CAN_RATE_LIMIT(t) \
77 IN_SET((t), \
78 SOURCE_IO, \
79 SOURCE_TIME_REALTIME, \
80 SOURCE_TIME_BOOTTIME, \
81 SOURCE_TIME_MONOTONIC, \
82 SOURCE_TIME_REALTIME_ALARM, \
83 SOURCE_TIME_BOOTTIME_ALARM, \
84 SOURCE_SIGNAL, \
85 SOURCE_DEFER, \
86 SOURCE_INOTIFY)
87
88 struct sd_event {
89 unsigned n_ref;
90
91 int epoll_fd;
92 int watchdog_fd;
93
94 Prioq *pending;
95 Prioq *prepare;
96
97 /* timerfd_create() only supports these five clocks so far. We
98 * can add support for more clocks when the kernel learns to
99 * deal with them, too. */
100 struct clock_data realtime;
101 struct clock_data boottime;
102 struct clock_data monotonic;
103 struct clock_data realtime_alarm;
104 struct clock_data boottime_alarm;
105
106 usec_t perturb;
107
108 sd_event_source **signal_sources; /* indexed by signal number */
109 Hashmap *signal_data; /* indexed by priority */
110
111 Hashmap *child_sources;
112 unsigned n_online_child_sources;
113
114 Set *post_sources;
115
116 Prioq *exit;
117
118 Hashmap *inotify_data; /* indexed by priority */
119
120 /* A list of inode structures that still have an fd open, that we need to close before the next loop iteration */
121 LIST_HEAD(struct inode_data, inode_data_to_close);
122
123 /* A list of inotify objects that already have events buffered which aren't processed yet */
124 LIST_HEAD(struct inotify_data, inotify_data_buffered);
125
126 pid_t original_pid;
127
128 uint64_t iteration;
129 triple_timestamp timestamp;
130 int state;
131
132 bool exit_requested:1;
133 bool need_process_child:1;
134 bool watchdog:1;
135 bool profile_delays:1;
136
137 int exit_code;
138
139 pid_t tid;
140 sd_event **default_event_ptr;
141
142 usec_t watchdog_last, watchdog_period;
143
144 unsigned n_sources;
145
146 struct epoll_event *event_queue;
147 size_t event_queue_allocated;
148
149 LIST_HEAD(sd_event_source, sources);
150
151 usec_t last_run_usec, last_log_usec;
152 unsigned delays[sizeof(usec_t) * 8];
153 };
154
155 static thread_local sd_event *default_event = NULL;
156
157 static void source_disconnect(sd_event_source *s);
158 static void event_gc_inode_data(sd_event *e, struct inode_data *d);
159
160 static sd_event *event_resolve(sd_event *e) {
161 return e == SD_EVENT_DEFAULT ? default_event : e;
162 }
163
164 static int pending_prioq_compare(const void *a, const void *b) {
165 const sd_event_source *x = a, *y = b;
166 int r;
167
168 assert(x->pending);
169 assert(y->pending);
170
171 /* Enabled ones first */
172 if (x->enabled != SD_EVENT_OFF && y->enabled == SD_EVENT_OFF)
173 return -1;
174 if (x->enabled == SD_EVENT_OFF && y->enabled != SD_EVENT_OFF)
175 return 1;
176
177 /* Non rate-limited ones first. */
178 r = CMP(!!x->ratelimited, !!y->ratelimited);
179 if (r != 0)
180 return r;
181
182 /* Lower priority values first */
183 r = CMP(x->priority, y->priority);
184 if (r != 0)
185 return r;
186
187 /* Older entries first */
188 return CMP(x->pending_iteration, y->pending_iteration);
189 }
190
191 static int prepare_prioq_compare(const void *a, const void *b) {
192 const sd_event_source *x = a, *y = b;
193 int r;
194
195 assert(x->prepare);
196 assert(y->prepare);
197
198 /* Enabled ones first */
199 if (x->enabled != SD_EVENT_OFF && y->enabled == SD_EVENT_OFF)
200 return -1;
201 if (x->enabled == SD_EVENT_OFF && y->enabled != SD_EVENT_OFF)
202 return 1;
203
204 /* Non rate-limited ones first. */
205 r = CMP(!!x->ratelimited, !!y->ratelimited);
206 if (r != 0)
207 return r;
208
209 /* Move most recently prepared ones last, so that we can stop
210 * preparing as soon as we hit one that has already been
211 * prepared in the current iteration */
212 r = CMP(x->prepare_iteration, y->prepare_iteration);
213 if (r != 0)
214 return r;
215
216 /* Lower priority values first */
217 return CMP(x->priority, y->priority);
218 }
219
220 static usec_t time_event_source_next(const sd_event_source *s) {
221 assert(s);
222
223 /* We have two kinds of event sources that have elapsation times associated with them: the actual
224 * time based ones and the ones for which a ratelimit can be in effect (where we want to be notified
225 * once the ratelimit time window ends). Let's return the next elapsing time depending on what we are
226 * looking at here. */
227
228 if (s->ratelimited) { /* If rate-limited the next elapsation is when the ratelimit time window ends */
229 assert(s->rate_limit.begin != 0);
230 assert(s->rate_limit.interval != 0);
231 return usec_add(s->rate_limit.begin, s->rate_limit.interval);
232 }
233
234 /* Otherwise this must be a time event source, if not ratelimited */
235 if (EVENT_SOURCE_IS_TIME(s->type))
236 return s->time.next;
237
238 return USEC_INFINITY;
239 }
240
241 static int earliest_time_prioq_compare(const void *a, const void *b) {
242 const sd_event_source *x = a, *y = b;
243
244 /* Enabled ones first */
245 if (x->enabled != SD_EVENT_OFF && y->enabled == SD_EVENT_OFF)
246 return -1;
247 if (x->enabled == SD_EVENT_OFF && y->enabled != SD_EVENT_OFF)
248 return 1;
249
250 /* Move the pending ones to the end */
251 if (!x->pending && y->pending)
252 return -1;
253 if (x->pending && !y->pending)
254 return 1;
255
256 /* Order by time */
257 return CMP(time_event_source_next(x), time_event_source_next(y));
258 }
259
260 static usec_t time_event_source_latest(const sd_event_source *s) {
261 assert(s);
262
263 if (s->ratelimited) { /* For ratelimited stuff the earliest and the latest time shall actually be the
264 * same, as we should avoid adding additional inaccuracy on an inaccuracy time
265 * window */
266 assert(s->rate_limit.begin != 0);
267 assert(s->rate_limit.interval != 0);
268 return usec_add(s->rate_limit.begin, s->rate_limit.interval);
269 }
270
271 /* Must be a time event source, if not ratelimited */
272 if (EVENT_SOURCE_IS_TIME(s->type))
273 return usec_add(s->time.next, s->time.accuracy);
274
275 return USEC_INFINITY;
276 }
277
278 static int latest_time_prioq_compare(const void *a, const void *b) {
279 const sd_event_source *x = a, *y = b;
280
281 /* Enabled ones first */
282 if (x->enabled != SD_EVENT_OFF && y->enabled == SD_EVENT_OFF)
283 return -1;
284 if (x->enabled == SD_EVENT_OFF && y->enabled != SD_EVENT_OFF)
285 return 1;
286
287 /* Move the pending ones to the end */
288 if (!x->pending && y->pending)
289 return -1;
290 if (x->pending && !y->pending)
291 return 1;
292
293 /* Order by time */
294 return CMP(time_event_source_latest(x), time_event_source_latest(y));
295 }
296
297 static int exit_prioq_compare(const void *a, const void *b) {
298 const sd_event_source *x = a, *y = b;
299
300 assert(x->type == SOURCE_EXIT);
301 assert(y->type == SOURCE_EXIT);
302
303 /* Enabled ones first */
304 if (x->enabled != SD_EVENT_OFF && y->enabled == SD_EVENT_OFF)
305 return -1;
306 if (x->enabled == SD_EVENT_OFF && y->enabled != SD_EVENT_OFF)
307 return 1;
308
309 /* Lower priority values first */
310 return CMP(x->priority, y->priority);
311 }
312
313 static void free_clock_data(struct clock_data *d) {
314 assert(d);
315 assert(d->wakeup == WAKEUP_CLOCK_DATA);
316
317 safe_close(d->fd);
318 prioq_free(d->earliest);
319 prioq_free(d->latest);
320 }
321
322 static sd_event *event_free(sd_event *e) {
323 sd_event_source *s;
324
325 assert(e);
326
327 while ((s = e->sources)) {
328 assert(s->floating);
329 source_disconnect(s);
330 sd_event_source_unref(s);
331 }
332
333 assert(e->n_sources == 0);
334
335 if (e->default_event_ptr)
336 *(e->default_event_ptr) = NULL;
337
338 safe_close(e->epoll_fd);
339 safe_close(e->watchdog_fd);
340
341 free_clock_data(&e->realtime);
342 free_clock_data(&e->boottime);
343 free_clock_data(&e->monotonic);
344 free_clock_data(&e->realtime_alarm);
345 free_clock_data(&e->boottime_alarm);
346
347 prioq_free(e->pending);
348 prioq_free(e->prepare);
349 prioq_free(e->exit);
350
351 free(e->signal_sources);
352 hashmap_free(e->signal_data);
353
354 hashmap_free(e->inotify_data);
355
356 hashmap_free(e->child_sources);
357 set_free(e->post_sources);
358
359 free(e->event_queue);
360
361 return mfree(e);
362 }
363
364 _public_ int sd_event_new(sd_event** ret) {
365 sd_event *e;
366 int r;
367
368 assert_return(ret, -EINVAL);
369
370 e = new(sd_event, 1);
371 if (!e)
372 return -ENOMEM;
373
374 *e = (sd_event) {
375 .n_ref = 1,
376 .epoll_fd = -1,
377 .watchdog_fd = -1,
378 .realtime.wakeup = WAKEUP_CLOCK_DATA,
379 .realtime.fd = -1,
380 .realtime.next = USEC_INFINITY,
381 .boottime.wakeup = WAKEUP_CLOCK_DATA,
382 .boottime.fd = -1,
383 .boottime.next = USEC_INFINITY,
384 .monotonic.wakeup = WAKEUP_CLOCK_DATA,
385 .monotonic.fd = -1,
386 .monotonic.next = USEC_INFINITY,
387 .realtime_alarm.wakeup = WAKEUP_CLOCK_DATA,
388 .realtime_alarm.fd = -1,
389 .realtime_alarm.next = USEC_INFINITY,
390 .boottime_alarm.wakeup = WAKEUP_CLOCK_DATA,
391 .boottime_alarm.fd = -1,
392 .boottime_alarm.next = USEC_INFINITY,
393 .perturb = USEC_INFINITY,
394 .original_pid = getpid_cached(),
395 };
396
397 r = prioq_ensure_allocated(&e->pending, pending_prioq_compare);
398 if (r < 0)
399 goto fail;
400
401 e->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
402 if (e->epoll_fd < 0) {
403 r = -errno;
404 goto fail;
405 }
406
407 e->epoll_fd = fd_move_above_stdio(e->epoll_fd);
408
409 if (secure_getenv("SD_EVENT_PROFILE_DELAYS")) {
410 log_debug("Event loop profiling enabled. Logarithmic histogram of event loop iterations in the range 2^0 … 2^63 us will be logged every 5s.");
411 e->profile_delays = true;
412 }
413
414 *ret = e;
415 return 0;
416
417 fail:
418 event_free(e);
419 return r;
420 }
421
422 DEFINE_PUBLIC_TRIVIAL_REF_UNREF_FUNC(sd_event, sd_event, event_free);
423
424 _public_ sd_event_source* sd_event_source_disable_unref(sd_event_source *s) {
425 if (s)
426 (void) sd_event_source_set_enabled(s, SD_EVENT_OFF);
427 return sd_event_source_unref(s);
428 }
429
430 static bool event_pid_changed(sd_event *e) {
431 assert(e);
432
433 /* We don't support people creating an event loop and keeping
434 * it around over a fork(). Let's complain. */
435
436 return e->original_pid != getpid_cached();
437 }
438
439 static void source_io_unregister(sd_event_source *s) {
440 assert(s);
441 assert(s->type == SOURCE_IO);
442
443 if (event_pid_changed(s->event))
444 return;
445
446 if (!s->io.registered)
447 return;
448
449 if (epoll_ctl(s->event->epoll_fd, EPOLL_CTL_DEL, s->io.fd, NULL) < 0)
450 log_debug_errno(errno, "Failed to remove source %s (type %s) from epoll, ignoring: %m",
451 strna(s->description), event_source_type_to_string(s->type));
452
453 s->io.registered = false;
454 }
455
456 static int source_io_register(
457 sd_event_source *s,
458 int enabled,
459 uint32_t events) {
460
461 assert(s);
462 assert(s->type == SOURCE_IO);
463 assert(enabled != SD_EVENT_OFF);
464
465 struct epoll_event ev = {
466 .events = events | (enabled == SD_EVENT_ONESHOT ? EPOLLONESHOT : 0),
467 .data.ptr = s,
468 };
469
470 if (epoll_ctl(s->event->epoll_fd,
471 s->io.registered ? EPOLL_CTL_MOD : EPOLL_CTL_ADD,
472 s->io.fd, &ev) < 0)
473 return -errno;
474
475 s->io.registered = true;
476
477 return 0;
478 }
479
480 static void source_child_pidfd_unregister(sd_event_source *s) {
481 assert(s);
482 assert(s->type == SOURCE_CHILD);
483
484 if (event_pid_changed(s->event))
485 return;
486
487 if (!s->child.registered)
488 return;
489
490 if (EVENT_SOURCE_WATCH_PIDFD(s))
491 if (epoll_ctl(s->event->epoll_fd, EPOLL_CTL_DEL, s->child.pidfd, NULL) < 0)
492 log_debug_errno(errno, "Failed to remove source %s (type %s) from epoll, ignoring: %m",
493 strna(s->description), event_source_type_to_string(s->type));
494
495 s->child.registered = false;
496 }
497
498 static int source_child_pidfd_register(sd_event_source *s, int enabled) {
499 assert(s);
500 assert(s->type == SOURCE_CHILD);
501 assert(enabled != SD_EVENT_OFF);
502
503 if (EVENT_SOURCE_WATCH_PIDFD(s)) {
504 struct epoll_event ev = {
505 .events = EPOLLIN | (enabled == SD_EVENT_ONESHOT ? EPOLLONESHOT : 0),
506 .data.ptr = s,
507 };
508
509 if (epoll_ctl(s->event->epoll_fd,
510 s->child.registered ? EPOLL_CTL_MOD : EPOLL_CTL_ADD,
511 s->child.pidfd, &ev) < 0)
512 return -errno;
513 }
514
515 s->child.registered = true;
516 return 0;
517 }
518
519 static clockid_t event_source_type_to_clock(EventSourceType t) {
520
521 switch (t) {
522
523 case SOURCE_TIME_REALTIME:
524 return CLOCK_REALTIME;
525
526 case SOURCE_TIME_BOOTTIME:
527 return CLOCK_BOOTTIME;
528
529 case SOURCE_TIME_MONOTONIC:
530 return CLOCK_MONOTONIC;
531
532 case SOURCE_TIME_REALTIME_ALARM:
533 return CLOCK_REALTIME_ALARM;
534
535 case SOURCE_TIME_BOOTTIME_ALARM:
536 return CLOCK_BOOTTIME_ALARM;
537
538 default:
539 return (clockid_t) -1;
540 }
541 }
542
543 static EventSourceType clock_to_event_source_type(clockid_t clock) {
544
545 switch (clock) {
546
547 case CLOCK_REALTIME:
548 return SOURCE_TIME_REALTIME;
549
550 case CLOCK_BOOTTIME:
551 return SOURCE_TIME_BOOTTIME;
552
553 case CLOCK_MONOTONIC:
554 return SOURCE_TIME_MONOTONIC;
555
556 case CLOCK_REALTIME_ALARM:
557 return SOURCE_TIME_REALTIME_ALARM;
558
559 case CLOCK_BOOTTIME_ALARM:
560 return SOURCE_TIME_BOOTTIME_ALARM;
561
562 default:
563 return _SOURCE_EVENT_SOURCE_TYPE_INVALID;
564 }
565 }
566
567 static struct clock_data* event_get_clock_data(sd_event *e, EventSourceType t) {
568 assert(e);
569
570 switch (t) {
571
572 case SOURCE_TIME_REALTIME:
573 return &e->realtime;
574
575 case SOURCE_TIME_BOOTTIME:
576 return &e->boottime;
577
578 case SOURCE_TIME_MONOTONIC:
579 return &e->monotonic;
580
581 case SOURCE_TIME_REALTIME_ALARM:
582 return &e->realtime_alarm;
583
584 case SOURCE_TIME_BOOTTIME_ALARM:
585 return &e->boottime_alarm;
586
587 default:
588 return NULL;
589 }
590 }
591
592 static void event_free_signal_data(sd_event *e, struct signal_data *d) {
593 assert(e);
594
595 if (!d)
596 return;
597
598 hashmap_remove(e->signal_data, &d->priority);
599 safe_close(d->fd);
600 free(d);
601 }
602
603 static int event_make_signal_data(
604 sd_event *e,
605 int sig,
606 struct signal_data **ret) {
607
608 struct signal_data *d;
609 bool added = false;
610 sigset_t ss_copy;
611 int64_t priority;
612 int r;
613
614 assert(e);
615
616 if (event_pid_changed(e))
617 return -ECHILD;
618
619 if (e->signal_sources && e->signal_sources[sig])
620 priority = e->signal_sources[sig]->priority;
621 else
622 priority = SD_EVENT_PRIORITY_NORMAL;
623
624 d = hashmap_get(e->signal_data, &priority);
625 if (d) {
626 if (sigismember(&d->sigset, sig) > 0) {
627 if (ret)
628 *ret = d;
629 return 0;
630 }
631 } else {
632 d = new(struct signal_data, 1);
633 if (!d)
634 return -ENOMEM;
635
636 *d = (struct signal_data) {
637 .wakeup = WAKEUP_SIGNAL_DATA,
638 .fd = -1,
639 .priority = priority,
640 };
641
642 r = hashmap_ensure_put(&e->signal_data, &uint64_hash_ops, &d->priority, d);
643 if (r < 0) {
644 free(d);
645 return r;
646 }
647
648 added = true;
649 }
650
651 ss_copy = d->sigset;
652 assert_se(sigaddset(&ss_copy, sig) >= 0);
653
654 r = signalfd(d->fd, &ss_copy, SFD_NONBLOCK|SFD_CLOEXEC);
655 if (r < 0) {
656 r = -errno;
657 goto fail;
658 }
659
660 d->sigset = ss_copy;
661
662 if (d->fd >= 0) {
663 if (ret)
664 *ret = d;
665 return 0;
666 }
667
668 d->fd = fd_move_above_stdio(r);
669
670 struct epoll_event ev = {
671 .events = EPOLLIN,
672 .data.ptr = d,
673 };
674
675 if (epoll_ctl(e->epoll_fd, EPOLL_CTL_ADD, d->fd, &ev) < 0) {
676 r = -errno;
677 goto fail;
678 }
679
680 if (ret)
681 *ret = d;
682
683 return 0;
684
685 fail:
686 if (added)
687 event_free_signal_data(e, d);
688
689 return r;
690 }
691
692 static void event_unmask_signal_data(sd_event *e, struct signal_data *d, int sig) {
693 assert(e);
694 assert(d);
695
696 /* Turns off the specified signal in the signal data
697 * object. If the signal mask of the object becomes empty that
698 * way removes it. */
699
700 if (sigismember(&d->sigset, sig) == 0)
701 return;
702
703 assert_se(sigdelset(&d->sigset, sig) >= 0);
704
705 if (sigisemptyset(&d->sigset)) {
706 /* If all the mask is all-zero we can get rid of the structure */
707 event_free_signal_data(e, d);
708 return;
709 }
710
711 assert(d->fd >= 0);
712
713 if (signalfd(d->fd, &d->sigset, SFD_NONBLOCK|SFD_CLOEXEC) < 0)
714 log_debug_errno(errno, "Failed to unset signal bit, ignoring: %m");
715 }
716
717 static void event_gc_signal_data(sd_event *e, const int64_t *priority, int sig) {
718 struct signal_data *d;
719 static const int64_t zero_priority = 0;
720
721 assert(e);
722
723 /* Rechecks if the specified signal is still something we are interested in. If not, we'll unmask it,
724 * and possibly drop the signalfd for it. */
725
726 if (sig == SIGCHLD &&
727 e->n_online_child_sources > 0)
728 return;
729
730 if (e->signal_sources &&
731 e->signal_sources[sig] &&
732 event_source_is_online(e->signal_sources[sig]))
733 return;
734
735 /*
736 * The specified signal might be enabled in three different queues:
737 *
738 * 1) the one that belongs to the priority passed (if it is non-NULL)
739 * 2) the one that belongs to the priority of the event source of the signal (if there is one)
740 * 3) the 0 priority (to cover the SIGCHLD case)
741 *
742 * Hence, let's remove it from all three here.
743 */
744
745 if (priority) {
746 d = hashmap_get(e->signal_data, priority);
747 if (d)
748 event_unmask_signal_data(e, d, sig);
749 }
750
751 if (e->signal_sources && e->signal_sources[sig]) {
752 d = hashmap_get(e->signal_data, &e->signal_sources[sig]->priority);
753 if (d)
754 event_unmask_signal_data(e, d, sig);
755 }
756
757 d = hashmap_get(e->signal_data, &zero_priority);
758 if (d)
759 event_unmask_signal_data(e, d, sig);
760 }
761
762 static void event_source_pp_prioq_reshuffle(sd_event_source *s) {
763 assert(s);
764
765 /* Reshuffles the pending + prepare prioqs. Called whenever the dispatch order changes, i.e. when
766 * they are enabled/disabled or marked pending and such. */
767
768 if (s->pending)
769 prioq_reshuffle(s->event->pending, s, &s->pending_index);
770
771 if (s->prepare)
772 prioq_reshuffle(s->event->prepare, s, &s->prepare_index);
773 }
774
775 static void event_source_time_prioq_reshuffle(sd_event_source *s) {
776 struct clock_data *d;
777
778 assert(s);
779
780 /* Called whenever the event source's timer ordering properties changed, i.e. time, accuracy,
781 * pending, enable state. Makes sure the two prioq's are ordered properly again. */
782
783 if (s->ratelimited)
784 d = &s->event->monotonic;
785 else {
786 assert(EVENT_SOURCE_IS_TIME(s->type));
787 assert_se(d = event_get_clock_data(s->event, s->type));
788 }
789
790 prioq_reshuffle(d->earliest, s, &s->earliest_index);
791 prioq_reshuffle(d->latest, s, &s->latest_index);
792 d->needs_rearm = true;
793 }
794
795 static void event_source_time_prioq_remove(
796 sd_event_source *s,
797 struct clock_data *d) {
798
799 assert(s);
800 assert(d);
801
802 prioq_remove(d->earliest, s, &s->earliest_index);
803 prioq_remove(d->latest, s, &s->latest_index);
804 s->earliest_index = s->latest_index = PRIOQ_IDX_NULL;
805 d->needs_rearm = true;
806 }
807
808 static void source_disconnect(sd_event_source *s) {
809 sd_event *event;
810
811 assert(s);
812
813 if (!s->event)
814 return;
815
816 assert(s->event->n_sources > 0);
817
818 switch (s->type) {
819
820 case SOURCE_IO:
821 if (s->io.fd >= 0)
822 source_io_unregister(s);
823
824 break;
825
826 case SOURCE_TIME_REALTIME:
827 case SOURCE_TIME_BOOTTIME:
828 case SOURCE_TIME_MONOTONIC:
829 case SOURCE_TIME_REALTIME_ALARM:
830 case SOURCE_TIME_BOOTTIME_ALARM:
831 /* Only remove this event source from the time event source here if it is not ratelimited. If
832 * it is ratelimited, we'll remove it below, separately. Why? Because the clock used might
833 * differ: ratelimiting always uses CLOCK_MONOTONIC, but timer events might use any clock */
834
835 if (!s->ratelimited) {
836 struct clock_data *d;
837 assert_se(d = event_get_clock_data(s->event, s->type));
838 event_source_time_prioq_remove(s, d);
839 }
840
841 break;
842
843 case SOURCE_SIGNAL:
844 if (s->signal.sig > 0) {
845
846 if (s->event->signal_sources)
847 s->event->signal_sources[s->signal.sig] = NULL;
848
849 event_gc_signal_data(s->event, &s->priority, s->signal.sig);
850 }
851
852 break;
853
854 case SOURCE_CHILD:
855 if (s->child.pid > 0) {
856 if (event_source_is_online(s)) {
857 assert(s->event->n_online_child_sources > 0);
858 s->event->n_online_child_sources--;
859 }
860
861 (void) hashmap_remove(s->event->child_sources, PID_TO_PTR(s->child.pid));
862 }
863
864 if (EVENT_SOURCE_WATCH_PIDFD(s))
865 source_child_pidfd_unregister(s);
866 else
867 event_gc_signal_data(s->event, &s->priority, SIGCHLD);
868
869 break;
870
871 case SOURCE_DEFER:
872 /* nothing */
873 break;
874
875 case SOURCE_POST:
876 set_remove(s->event->post_sources, s);
877 break;
878
879 case SOURCE_EXIT:
880 prioq_remove(s->event->exit, s, &s->exit.prioq_index);
881 break;
882
883 case SOURCE_INOTIFY: {
884 struct inode_data *inode_data;
885
886 inode_data = s->inotify.inode_data;
887 if (inode_data) {
888 struct inotify_data *inotify_data;
889 assert_se(inotify_data = inode_data->inotify_data);
890
891 /* Detach this event source from the inode object */
892 LIST_REMOVE(inotify.by_inode_data, inode_data->event_sources, s);
893 s->inotify.inode_data = NULL;
894
895 if (s->pending) {
896 assert(inotify_data->n_pending > 0);
897 inotify_data->n_pending--;
898 }
899
900 /* Note that we don't reduce the inotify mask for the watch descriptor here if the inode is
901 * continued to being watched. That's because inotify doesn't really have an API for that: we
902 * can only change watch masks with access to the original inode either by fd or by path. But
903 * paths aren't stable, and keeping an O_PATH fd open all the time would mean wasting an fd
904 * continuously and keeping the mount busy which we can't really do. We could reconstruct the
905 * original inode from /proc/self/fdinfo/$INOTIFY_FD (as all watch descriptors are listed
906 * there), but given the need for open_by_handle_at() which is privileged and not universally
907 * available this would be quite an incomplete solution. Hence we go the other way, leave the
908 * mask set, even if it is not minimized now, and ignore all events we aren't interested in
909 * anymore after reception. Yes, this sucks, but … Linux … */
910
911 /* Maybe release the inode data (and its inotify) */
912 event_gc_inode_data(s->event, inode_data);
913 }
914
915 break;
916 }
917
918 default:
919 assert_not_reached("Wut? I shouldn't exist.");
920 }
921
922 if (s->pending)
923 prioq_remove(s->event->pending, s, &s->pending_index);
924
925 if (s->prepare)
926 prioq_remove(s->event->prepare, s, &s->prepare_index);
927
928 if (s->ratelimited)
929 event_source_time_prioq_remove(s, &s->event->monotonic);
930
931 event = TAKE_PTR(s->event);
932 LIST_REMOVE(sources, event->sources, s);
933 event->n_sources--;
934
935 /* Note that we don't invalidate the type here, since we still need it in order to close the fd or
936 * pidfd associated with this event source, which we'll do only on source_free(). */
937
938 if (!s->floating)
939 sd_event_unref(event);
940 }
941
942 static sd_event_source* source_free(sd_event_source *s) {
943 assert(s);
944
945 source_disconnect(s);
946
947 if (s->type == SOURCE_IO && s->io.owned)
948 s->io.fd = safe_close(s->io.fd);
949
950 if (s->type == SOURCE_CHILD) {
951 /* Eventually the kernel will do this automatically for us, but for now let's emulate this (unreliably) in userspace. */
952
953 if (s->child.process_owned) {
954
955 if (!s->child.exited) {
956 bool sent = false;
957
958 if (s->child.pidfd >= 0) {
959 if (pidfd_send_signal(s->child.pidfd, SIGKILL, NULL, 0) < 0) {
960 if (errno == ESRCH) /* Already dead */
961 sent = true;
962 else if (!ERRNO_IS_NOT_SUPPORTED(errno))
963 log_debug_errno(errno, "Failed to kill process " PID_FMT " via pidfd_send_signal(), re-trying via kill(): %m",
964 s->child.pid);
965 } else
966 sent = true;
967 }
968
969 if (!sent)
970 if (kill(s->child.pid, SIGKILL) < 0)
971 if (errno != ESRCH) /* Already dead */
972 log_debug_errno(errno, "Failed to kill process " PID_FMT " via kill(), ignoring: %m",
973 s->child.pid);
974 }
975
976 if (!s->child.waited) {
977 siginfo_t si = {};
978
979 /* Reap the child if we can */
980 (void) waitid(P_PID, s->child.pid, &si, WEXITED);
981 }
982 }
983
984 if (s->child.pidfd_owned)
985 s->child.pidfd = safe_close(s->child.pidfd);
986 }
987
988 if (s->destroy_callback)
989 s->destroy_callback(s->userdata);
990
991 free(s->description);
992 return mfree(s);
993 }
994 DEFINE_TRIVIAL_CLEANUP_FUNC(sd_event_source*, source_free);
995
996 static int source_set_pending(sd_event_source *s, bool b) {
997 int r;
998
999 assert(s);
1000 assert(s->type != SOURCE_EXIT);
1001
1002 if (s->pending == b)
1003 return 0;
1004
1005 s->pending = b;
1006
1007 if (b) {
1008 s->pending_iteration = s->event->iteration;
1009
1010 r = prioq_put(s->event->pending, s, &s->pending_index);
1011 if (r < 0) {
1012 s->pending = false;
1013 return r;
1014 }
1015 } else
1016 assert_se(prioq_remove(s->event->pending, s, &s->pending_index));
1017
1018 if (EVENT_SOURCE_IS_TIME(s->type))
1019 event_source_time_prioq_reshuffle(s);
1020
1021 if (s->type == SOURCE_SIGNAL && !b) {
1022 struct signal_data *d;
1023
1024 d = hashmap_get(s->event->signal_data, &s->priority);
1025 if (d && d->current == s)
1026 d->current = NULL;
1027 }
1028
1029 if (s->type == SOURCE_INOTIFY) {
1030
1031 assert(s->inotify.inode_data);
1032 assert(s->inotify.inode_data->inotify_data);
1033
1034 if (b)
1035 s->inotify.inode_data->inotify_data->n_pending ++;
1036 else {
1037 assert(s->inotify.inode_data->inotify_data->n_pending > 0);
1038 s->inotify.inode_data->inotify_data->n_pending --;
1039 }
1040 }
1041
1042 return 0;
1043 }
1044
1045 static sd_event_source *source_new(sd_event *e, bool floating, EventSourceType type) {
1046 sd_event_source *s;
1047
1048 assert(e);
1049
1050 s = new(sd_event_source, 1);
1051 if (!s)
1052 return NULL;
1053
1054 *s = (struct sd_event_source) {
1055 .n_ref = 1,
1056 .event = e,
1057 .floating = floating,
1058 .type = type,
1059 .pending_index = PRIOQ_IDX_NULL,
1060 .prepare_index = PRIOQ_IDX_NULL,
1061 };
1062
1063 if (!floating)
1064 sd_event_ref(e);
1065
1066 LIST_PREPEND(sources, e->sources, s);
1067 e->n_sources++;
1068
1069 return s;
1070 }
1071
1072 static int io_exit_callback(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
1073 assert(s);
1074
1075 return sd_event_exit(sd_event_source_get_event(s), PTR_TO_INT(userdata));
1076 }
1077
1078 _public_ int sd_event_add_io(
1079 sd_event *e,
1080 sd_event_source **ret,
1081 int fd,
1082 uint32_t events,
1083 sd_event_io_handler_t callback,
1084 void *userdata) {
1085
1086 _cleanup_(source_freep) sd_event_source *s = NULL;
1087 int r;
1088
1089 assert_return(e, -EINVAL);
1090 assert_return(e = event_resolve(e), -ENOPKG);
1091 assert_return(fd >= 0, -EBADF);
1092 assert_return(!(events & ~(EPOLLIN|EPOLLOUT|EPOLLRDHUP|EPOLLPRI|EPOLLERR|EPOLLHUP|EPOLLET)), -EINVAL);
1093 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
1094 assert_return(!event_pid_changed(e), -ECHILD);
1095
1096 if (!callback)
1097 callback = io_exit_callback;
1098
1099 s = source_new(e, !ret, SOURCE_IO);
1100 if (!s)
1101 return -ENOMEM;
1102
1103 s->wakeup = WAKEUP_EVENT_SOURCE;
1104 s->io.fd = fd;
1105 s->io.events = events;
1106 s->io.callback = callback;
1107 s->userdata = userdata;
1108 s->enabled = SD_EVENT_ON;
1109
1110 r = source_io_register(s, s->enabled, events);
1111 if (r < 0)
1112 return r;
1113
1114 if (ret)
1115 *ret = s;
1116 TAKE_PTR(s);
1117
1118 return 0;
1119 }
1120
1121 static void initialize_perturb(sd_event *e) {
1122 sd_id128_t bootid = {};
1123
1124 /* When we sleep for longer, we try to realign the wakeup to
1125 the same time within each minute/second/250ms, so that
1126 events all across the system can be coalesced into a single
1127 CPU wakeup. However, let's take some system-specific
1128 randomness for this value, so that in a network of systems
1129 with synced clocks timer events are distributed a
1130 bit. Here, we calculate a perturbation usec offset from the
1131 boot ID. */
1132
1133 if (_likely_(e->perturb != USEC_INFINITY))
1134 return;
1135
1136 if (sd_id128_get_boot(&bootid) >= 0)
1137 e->perturb = (bootid.qwords[0] ^ bootid.qwords[1]) % USEC_PER_MINUTE;
1138 }
1139
1140 static int event_setup_timer_fd(
1141 sd_event *e,
1142 struct clock_data *d,
1143 clockid_t clock) {
1144
1145 assert(e);
1146 assert(d);
1147
1148 if (_likely_(d->fd >= 0))
1149 return 0;
1150
1151 _cleanup_close_ int fd = -1;
1152
1153 fd = timerfd_create(clock, TFD_NONBLOCK|TFD_CLOEXEC);
1154 if (fd < 0)
1155 return -errno;
1156
1157 fd = fd_move_above_stdio(fd);
1158
1159 struct epoll_event ev = {
1160 .events = EPOLLIN,
1161 .data.ptr = d,
1162 };
1163
1164 if (epoll_ctl(e->epoll_fd, EPOLL_CTL_ADD, fd, &ev) < 0)
1165 return -errno;
1166
1167 d->fd = TAKE_FD(fd);
1168 return 0;
1169 }
1170
1171 static int time_exit_callback(sd_event_source *s, uint64_t usec, void *userdata) {
1172 assert(s);
1173
1174 return sd_event_exit(sd_event_source_get_event(s), PTR_TO_INT(userdata));
1175 }
1176
1177 static int setup_clock_data(sd_event *e, struct clock_data *d, clockid_t clock) {
1178 int r;
1179
1180 assert(d);
1181
1182 if (d->fd < 0) {
1183 r = event_setup_timer_fd(e, d, clock);
1184 if (r < 0)
1185 return r;
1186 }
1187
1188 r = prioq_ensure_allocated(&d->earliest, earliest_time_prioq_compare);
1189 if (r < 0)
1190 return r;
1191
1192 r = prioq_ensure_allocated(&d->latest, latest_time_prioq_compare);
1193 if (r < 0)
1194 return r;
1195
1196 return 0;
1197 }
1198
1199 static int event_source_time_prioq_put(
1200 sd_event_source *s,
1201 struct clock_data *d) {
1202
1203 int r;
1204
1205 assert(s);
1206 assert(d);
1207
1208 r = prioq_put(d->earliest, s, &s->earliest_index);
1209 if (r < 0)
1210 return r;
1211
1212 r = prioq_put(d->latest, s, &s->latest_index);
1213 if (r < 0) {
1214 assert_se(prioq_remove(d->earliest, s, &s->earliest_index) > 0);
1215 s->earliest_index = PRIOQ_IDX_NULL;
1216 return r;
1217 }
1218
1219 d->needs_rearm = true;
1220 return 0;
1221 }
1222
1223 _public_ int sd_event_add_time(
1224 sd_event *e,
1225 sd_event_source **ret,
1226 clockid_t clock,
1227 uint64_t usec,
1228 uint64_t accuracy,
1229 sd_event_time_handler_t callback,
1230 void *userdata) {
1231
1232 EventSourceType type;
1233 _cleanup_(source_freep) sd_event_source *s = NULL;
1234 struct clock_data *d;
1235 int r;
1236
1237 assert_return(e, -EINVAL);
1238 assert_return(e = event_resolve(e), -ENOPKG);
1239 assert_return(accuracy != (uint64_t) -1, -EINVAL);
1240 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
1241 assert_return(!event_pid_changed(e), -ECHILD);
1242
1243 if (!clock_supported(clock)) /* Checks whether the kernel supports the clock */
1244 return -EOPNOTSUPP;
1245
1246 type = clock_to_event_source_type(clock); /* checks whether sd-event supports this clock */
1247 if (type < 0)
1248 return -EOPNOTSUPP;
1249
1250 if (!callback)
1251 callback = time_exit_callback;
1252
1253 assert_se(d = event_get_clock_data(e, type));
1254
1255 r = setup_clock_data(e, d, clock);
1256 if (r < 0)
1257 return r;
1258
1259 s = source_new(e, !ret, type);
1260 if (!s)
1261 return -ENOMEM;
1262
1263 s->time.next = usec;
1264 s->time.accuracy = accuracy == 0 ? DEFAULT_ACCURACY_USEC : accuracy;
1265 s->time.callback = callback;
1266 s->earliest_index = s->latest_index = PRIOQ_IDX_NULL;
1267 s->userdata = userdata;
1268 s->enabled = SD_EVENT_ONESHOT;
1269
1270 r = event_source_time_prioq_put(s, d);
1271 if (r < 0)
1272 return r;
1273
1274 if (ret)
1275 *ret = s;
1276 TAKE_PTR(s);
1277
1278 return 0;
1279 }
1280
1281 _public_ int sd_event_add_time_relative(
1282 sd_event *e,
1283 sd_event_source **ret,
1284 clockid_t clock,
1285 uint64_t usec,
1286 uint64_t accuracy,
1287 sd_event_time_handler_t callback,
1288 void *userdata) {
1289
1290 usec_t t;
1291 int r;
1292
1293 /* Same as sd_event_add_time() but operates relative to the event loop's current point in time, and
1294 * checks for overflow. */
1295
1296 r = sd_event_now(e, clock, &t);
1297 if (r < 0)
1298 return r;
1299
1300 if (usec >= USEC_INFINITY - t)
1301 return -EOVERFLOW;
1302
1303 return sd_event_add_time(e, ret, clock, t + usec, accuracy, callback, userdata);
1304 }
1305
1306 static int signal_exit_callback(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
1307 assert(s);
1308
1309 return sd_event_exit(sd_event_source_get_event(s), PTR_TO_INT(userdata));
1310 }
1311
1312 _public_ int sd_event_add_signal(
1313 sd_event *e,
1314 sd_event_source **ret,
1315 int sig,
1316 sd_event_signal_handler_t callback,
1317 void *userdata) {
1318
1319 _cleanup_(source_freep) sd_event_source *s = NULL;
1320 struct signal_data *d;
1321 int r;
1322
1323 assert_return(e, -EINVAL);
1324 assert_return(e = event_resolve(e), -ENOPKG);
1325 assert_return(SIGNAL_VALID(sig), -EINVAL);
1326 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
1327 assert_return(!event_pid_changed(e), -ECHILD);
1328
1329 if (!callback)
1330 callback = signal_exit_callback;
1331
1332 r = signal_is_blocked(sig);
1333 if (r < 0)
1334 return r;
1335 if (r == 0)
1336 return -EBUSY;
1337
1338 if (!e->signal_sources) {
1339 e->signal_sources = new0(sd_event_source*, _NSIG);
1340 if (!e->signal_sources)
1341 return -ENOMEM;
1342 } else if (e->signal_sources[sig])
1343 return -EBUSY;
1344
1345 s = source_new(e, !ret, SOURCE_SIGNAL);
1346 if (!s)
1347 return -ENOMEM;
1348
1349 s->signal.sig = sig;
1350 s->signal.callback = callback;
1351 s->userdata = userdata;
1352 s->enabled = SD_EVENT_ON;
1353
1354 e->signal_sources[sig] = s;
1355
1356 r = event_make_signal_data(e, sig, &d);
1357 if (r < 0)
1358 return r;
1359
1360 /* Use the signal name as description for the event source by default */
1361 (void) sd_event_source_set_description(s, signal_to_string(sig));
1362
1363 if (ret)
1364 *ret = s;
1365 TAKE_PTR(s);
1366
1367 return 0;
1368 }
1369
1370 static int child_exit_callback(sd_event_source *s, const siginfo_t *si, void *userdata) {
1371 assert(s);
1372
1373 return sd_event_exit(sd_event_source_get_event(s), PTR_TO_INT(userdata));
1374 }
1375
1376 static bool shall_use_pidfd(void) {
1377 /* Mostly relevant for debugging, i.e. this is used in test-event.c to test the event loop once with and once without pidfd */
1378 return getenv_bool_secure("SYSTEMD_PIDFD") != 0;
1379 }
1380
1381 _public_ int sd_event_add_child(
1382 sd_event *e,
1383 sd_event_source **ret,
1384 pid_t pid,
1385 int options,
1386 sd_event_child_handler_t callback,
1387 void *userdata) {
1388
1389 _cleanup_(source_freep) sd_event_source *s = NULL;
1390 int r;
1391
1392 assert_return(e, -EINVAL);
1393 assert_return(e = event_resolve(e), -ENOPKG);
1394 assert_return(pid > 1, -EINVAL);
1395 assert_return(!(options & ~(WEXITED|WSTOPPED|WCONTINUED)), -EINVAL);
1396 assert_return(options != 0, -EINVAL);
1397 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
1398 assert_return(!event_pid_changed(e), -ECHILD);
1399
1400 if (!callback)
1401 callback = child_exit_callback;
1402
1403 if (e->n_online_child_sources == 0) {
1404 /* Caller must block SIGCHLD before using us to watch children, even if pidfd is available,
1405 * for compatibility with pre-pidfd and because we don't want the reap the child processes
1406 * ourselves, i.e. call waitid(), and don't want Linux' default internal logic for that to
1407 * take effect.
1408 *
1409 * (As an optimization we only do this check on the first child event source created.) */
1410 r = signal_is_blocked(SIGCHLD);
1411 if (r < 0)
1412 return r;
1413 if (r == 0)
1414 return -EBUSY;
1415 }
1416
1417 r = hashmap_ensure_allocated(&e->child_sources, NULL);
1418 if (r < 0)
1419 return r;
1420
1421 if (hashmap_contains(e->child_sources, PID_TO_PTR(pid)))
1422 return -EBUSY;
1423
1424 s = source_new(e, !ret, SOURCE_CHILD);
1425 if (!s)
1426 return -ENOMEM;
1427
1428 s->wakeup = WAKEUP_EVENT_SOURCE;
1429 s->child.pid = pid;
1430 s->child.options = options;
1431 s->child.callback = callback;
1432 s->userdata = userdata;
1433 s->enabled = SD_EVENT_ONESHOT;
1434
1435 /* We always take a pidfd here if we can, even if we wait for anything else than WEXITED, so that we
1436 * pin the PID, and make regular waitid() handling race-free. */
1437
1438 if (shall_use_pidfd()) {
1439 s->child.pidfd = pidfd_open(s->child.pid, 0);
1440 if (s->child.pidfd < 0) {
1441 /* Propagate errors unless the syscall is not supported or blocked */
1442 if (!ERRNO_IS_NOT_SUPPORTED(errno) && !ERRNO_IS_PRIVILEGE(errno))
1443 return -errno;
1444 } else
1445 s->child.pidfd_owned = true; /* If we allocate the pidfd we own it by default */
1446 } else
1447 s->child.pidfd = -1;
1448
1449 r = hashmap_put(e->child_sources, PID_TO_PTR(pid), s);
1450 if (r < 0)
1451 return r;
1452
1453 if (EVENT_SOURCE_WATCH_PIDFD(s)) {
1454 /* We have a pidfd and we only want to watch for exit */
1455 r = source_child_pidfd_register(s, s->enabled);
1456 if (r < 0)
1457 return r;
1458
1459 } else {
1460 /* We have no pidfd or we shall wait for some other event than WEXITED */
1461 r = event_make_signal_data(e, SIGCHLD, NULL);
1462 if (r < 0)
1463 return r;
1464
1465 e->need_process_child = true;
1466 }
1467
1468 e->n_online_child_sources++;
1469
1470 if (ret)
1471 *ret = s;
1472 TAKE_PTR(s);
1473 return 0;
1474 }
1475
1476 _public_ int sd_event_add_child_pidfd(
1477 sd_event *e,
1478 sd_event_source **ret,
1479 int pidfd,
1480 int options,
1481 sd_event_child_handler_t callback,
1482 void *userdata) {
1483
1484
1485 _cleanup_(source_freep) sd_event_source *s = NULL;
1486 pid_t pid;
1487 int r;
1488
1489 assert_return(e, -EINVAL);
1490 assert_return(e = event_resolve(e), -ENOPKG);
1491 assert_return(pidfd >= 0, -EBADF);
1492 assert_return(!(options & ~(WEXITED|WSTOPPED|WCONTINUED)), -EINVAL);
1493 assert_return(options != 0, -EINVAL);
1494 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
1495 assert_return(!event_pid_changed(e), -ECHILD);
1496
1497 if (!callback)
1498 callback = child_exit_callback;
1499
1500 if (e->n_online_child_sources == 0) {
1501 r = signal_is_blocked(SIGCHLD);
1502 if (r < 0)
1503 return r;
1504 if (r == 0)
1505 return -EBUSY;
1506 }
1507
1508 r = hashmap_ensure_allocated(&e->child_sources, NULL);
1509 if (r < 0)
1510 return r;
1511
1512 r = pidfd_get_pid(pidfd, &pid);
1513 if (r < 0)
1514 return r;
1515
1516 if (hashmap_contains(e->child_sources, PID_TO_PTR(pid)))
1517 return -EBUSY;
1518
1519 s = source_new(e, !ret, SOURCE_CHILD);
1520 if (!s)
1521 return -ENOMEM;
1522
1523 s->wakeup = WAKEUP_EVENT_SOURCE;
1524 s->child.pidfd = pidfd;
1525 s->child.pid = pid;
1526 s->child.options = options;
1527 s->child.callback = callback;
1528 s->child.pidfd_owned = false; /* If we got the pidfd passed in we don't own it by default (similar to the IO fd case) */
1529 s->userdata = userdata;
1530 s->enabled = SD_EVENT_ONESHOT;
1531
1532 r = hashmap_put(e->child_sources, PID_TO_PTR(pid), s);
1533 if (r < 0)
1534 return r;
1535
1536 if (EVENT_SOURCE_WATCH_PIDFD(s)) {
1537 /* We only want to watch for WEXITED */
1538 r = source_child_pidfd_register(s, s->enabled);
1539 if (r < 0)
1540 return r;
1541 } else {
1542 /* We shall wait for some other event than WEXITED */
1543 r = event_make_signal_data(e, SIGCHLD, NULL);
1544 if (r < 0)
1545 return r;
1546
1547 e->need_process_child = true;
1548 }
1549
1550 e->n_online_child_sources++;
1551
1552 if (ret)
1553 *ret = s;
1554 TAKE_PTR(s);
1555 return 0;
1556 }
1557
1558 static int generic_exit_callback(sd_event_source *s, void *userdata) {
1559 assert(s);
1560
1561 return sd_event_exit(sd_event_source_get_event(s), PTR_TO_INT(userdata));
1562 }
1563
1564 _public_ int sd_event_add_defer(
1565 sd_event *e,
1566 sd_event_source **ret,
1567 sd_event_handler_t callback,
1568 void *userdata) {
1569
1570 _cleanup_(source_freep) sd_event_source *s = NULL;
1571 int r;
1572
1573 assert_return(e, -EINVAL);
1574 assert_return(e = event_resolve(e), -ENOPKG);
1575 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
1576 assert_return(!event_pid_changed(e), -ECHILD);
1577
1578 if (!callback)
1579 callback = generic_exit_callback;
1580
1581 s = source_new(e, !ret, SOURCE_DEFER);
1582 if (!s)
1583 return -ENOMEM;
1584
1585 s->defer.callback = callback;
1586 s->userdata = userdata;
1587 s->enabled = SD_EVENT_ONESHOT;
1588
1589 r = source_set_pending(s, true);
1590 if (r < 0)
1591 return r;
1592
1593 if (ret)
1594 *ret = s;
1595 TAKE_PTR(s);
1596
1597 return 0;
1598 }
1599
1600 _public_ int sd_event_add_post(
1601 sd_event *e,
1602 sd_event_source **ret,
1603 sd_event_handler_t callback,
1604 void *userdata) {
1605
1606 _cleanup_(source_freep) sd_event_source *s = NULL;
1607 int r;
1608
1609 assert_return(e, -EINVAL);
1610 assert_return(e = event_resolve(e), -ENOPKG);
1611 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
1612 assert_return(!event_pid_changed(e), -ECHILD);
1613
1614 if (!callback)
1615 callback = generic_exit_callback;
1616
1617 s = source_new(e, !ret, SOURCE_POST);
1618 if (!s)
1619 return -ENOMEM;
1620
1621 s->post.callback = callback;
1622 s->userdata = userdata;
1623 s->enabled = SD_EVENT_ON;
1624
1625 r = set_ensure_put(&e->post_sources, NULL, s);
1626 if (r < 0)
1627 return r;
1628 assert(r > 0);
1629
1630 if (ret)
1631 *ret = s;
1632 TAKE_PTR(s);
1633
1634 return 0;
1635 }
1636
1637 _public_ int sd_event_add_exit(
1638 sd_event *e,
1639 sd_event_source **ret,
1640 sd_event_handler_t callback,
1641 void *userdata) {
1642
1643 _cleanup_(source_freep) sd_event_source *s = NULL;
1644 int r;
1645
1646 assert_return(e, -EINVAL);
1647 assert_return(e = event_resolve(e), -ENOPKG);
1648 assert_return(callback, -EINVAL);
1649 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
1650 assert_return(!event_pid_changed(e), -ECHILD);
1651
1652 r = prioq_ensure_allocated(&e->exit, exit_prioq_compare);
1653 if (r < 0)
1654 return r;
1655
1656 s = source_new(e, !ret, SOURCE_EXIT);
1657 if (!s)
1658 return -ENOMEM;
1659
1660 s->exit.callback = callback;
1661 s->userdata = userdata;
1662 s->exit.prioq_index = PRIOQ_IDX_NULL;
1663 s->enabled = SD_EVENT_ONESHOT;
1664
1665 r = prioq_put(s->event->exit, s, &s->exit.prioq_index);
1666 if (r < 0)
1667 return r;
1668
1669 if (ret)
1670 *ret = s;
1671 TAKE_PTR(s);
1672
1673 return 0;
1674 }
1675
1676 static void event_free_inotify_data(sd_event *e, struct inotify_data *d) {
1677 assert(e);
1678
1679 if (!d)
1680 return;
1681
1682 assert(hashmap_isempty(d->inodes));
1683 assert(hashmap_isempty(d->wd));
1684
1685 if (d->buffer_filled > 0)
1686 LIST_REMOVE(buffered, e->inotify_data_buffered, d);
1687
1688 hashmap_free(d->inodes);
1689 hashmap_free(d->wd);
1690
1691 assert_se(hashmap_remove(e->inotify_data, &d->priority) == d);
1692
1693 if (d->fd >= 0) {
1694 if (epoll_ctl(e->epoll_fd, EPOLL_CTL_DEL, d->fd, NULL) < 0)
1695 log_debug_errno(errno, "Failed to remove inotify fd from epoll, ignoring: %m");
1696
1697 safe_close(d->fd);
1698 }
1699 free(d);
1700 }
1701
1702 static int event_make_inotify_data(
1703 sd_event *e,
1704 int64_t priority,
1705 struct inotify_data **ret) {
1706
1707 _cleanup_close_ int fd = -1;
1708 struct inotify_data *d;
1709 int r;
1710
1711 assert(e);
1712
1713 d = hashmap_get(e->inotify_data, &priority);
1714 if (d) {
1715 if (ret)
1716 *ret = d;
1717 return 0;
1718 }
1719
1720 fd = inotify_init1(IN_NONBLOCK|O_CLOEXEC);
1721 if (fd < 0)
1722 return -errno;
1723
1724 fd = fd_move_above_stdio(fd);
1725
1726 d = new(struct inotify_data, 1);
1727 if (!d)
1728 return -ENOMEM;
1729
1730 *d = (struct inotify_data) {
1731 .wakeup = WAKEUP_INOTIFY_DATA,
1732 .fd = TAKE_FD(fd),
1733 .priority = priority,
1734 };
1735
1736 r = hashmap_ensure_put(&e->inotify_data, &uint64_hash_ops, &d->priority, d);
1737 if (r < 0) {
1738 d->fd = safe_close(d->fd);
1739 free(d);
1740 return r;
1741 }
1742
1743 struct epoll_event ev = {
1744 .events = EPOLLIN,
1745 .data.ptr = d,
1746 };
1747
1748 if (epoll_ctl(e->epoll_fd, EPOLL_CTL_ADD, d->fd, &ev) < 0) {
1749 r = -errno;
1750 d->fd = safe_close(d->fd); /* let's close this ourselves, as event_free_inotify_data() would otherwise
1751 * remove the fd from the epoll first, which we don't want as we couldn't
1752 * add it in the first place. */
1753 event_free_inotify_data(e, d);
1754 return r;
1755 }
1756
1757 if (ret)
1758 *ret = d;
1759
1760 return 1;
1761 }
1762
1763 static int inode_data_compare(const struct inode_data *x, const struct inode_data *y) {
1764 int r;
1765
1766 assert(x);
1767 assert(y);
1768
1769 r = CMP(x->dev, y->dev);
1770 if (r != 0)
1771 return r;
1772
1773 return CMP(x->ino, y->ino);
1774 }
1775
1776 static void inode_data_hash_func(const struct inode_data *d, struct siphash *state) {
1777 assert(d);
1778
1779 siphash24_compress(&d->dev, sizeof(d->dev), state);
1780 siphash24_compress(&d->ino, sizeof(d->ino), state);
1781 }
1782
1783 DEFINE_PRIVATE_HASH_OPS(inode_data_hash_ops, struct inode_data, inode_data_hash_func, inode_data_compare);
1784
1785 static void event_free_inode_data(
1786 sd_event *e,
1787 struct inode_data *d) {
1788
1789 assert(e);
1790
1791 if (!d)
1792 return;
1793
1794 assert(!d->event_sources);
1795
1796 if (d->fd >= 0) {
1797 LIST_REMOVE(to_close, e->inode_data_to_close, d);
1798 safe_close(d->fd);
1799 }
1800
1801 if (d->inotify_data) {
1802
1803 if (d->wd >= 0) {
1804 if (d->inotify_data->fd >= 0) {
1805 /* So here's a problem. At the time this runs the watch descriptor might already be
1806 * invalidated, because an IN_IGNORED event might be queued right the moment we enter
1807 * the syscall. Hence, whenever we get EINVAL, ignore it entirely, since it's a very
1808 * likely case to happen. */
1809
1810 if (inotify_rm_watch(d->inotify_data->fd, d->wd) < 0 && errno != EINVAL)
1811 log_debug_errno(errno, "Failed to remove watch descriptor %i from inotify, ignoring: %m", d->wd);
1812 }
1813
1814 assert_se(hashmap_remove(d->inotify_data->wd, INT_TO_PTR(d->wd)) == d);
1815 }
1816
1817 assert_se(hashmap_remove(d->inotify_data->inodes, d) == d);
1818 }
1819
1820 free(d);
1821 }
1822
1823 static void event_gc_inode_data(
1824 sd_event *e,
1825 struct inode_data *d) {
1826
1827 struct inotify_data *inotify_data;
1828
1829 assert(e);
1830
1831 if (!d)
1832 return;
1833
1834 if (d->event_sources)
1835 return;
1836
1837 inotify_data = d->inotify_data;
1838 event_free_inode_data(e, d);
1839
1840 if (inotify_data && hashmap_isempty(inotify_data->inodes))
1841 event_free_inotify_data(e, inotify_data);
1842 }
1843
1844 static int event_make_inode_data(
1845 sd_event *e,
1846 struct inotify_data *inotify_data,
1847 dev_t dev,
1848 ino_t ino,
1849 struct inode_data **ret) {
1850
1851 struct inode_data *d, key;
1852 int r;
1853
1854 assert(e);
1855 assert(inotify_data);
1856
1857 key = (struct inode_data) {
1858 .ino = ino,
1859 .dev = dev,
1860 };
1861
1862 d = hashmap_get(inotify_data->inodes, &key);
1863 if (d) {
1864 if (ret)
1865 *ret = d;
1866
1867 return 0;
1868 }
1869
1870 r = hashmap_ensure_allocated(&inotify_data->inodes, &inode_data_hash_ops);
1871 if (r < 0)
1872 return r;
1873
1874 d = new(struct inode_data, 1);
1875 if (!d)
1876 return -ENOMEM;
1877
1878 *d = (struct inode_data) {
1879 .dev = dev,
1880 .ino = ino,
1881 .wd = -1,
1882 .fd = -1,
1883 .inotify_data = inotify_data,
1884 };
1885
1886 r = hashmap_put(inotify_data->inodes, d, d);
1887 if (r < 0) {
1888 free(d);
1889 return r;
1890 }
1891
1892 if (ret)
1893 *ret = d;
1894
1895 return 1;
1896 }
1897
1898 static uint32_t inode_data_determine_mask(struct inode_data *d) {
1899 bool excl_unlink = true;
1900 uint32_t combined = 0;
1901 sd_event_source *s;
1902
1903 assert(d);
1904
1905 /* Combines the watch masks of all event sources watching this inode. We generally just OR them together, but
1906 * the IN_EXCL_UNLINK flag is ANDed instead.
1907 *
1908 * Note that we add all sources to the mask here, regardless whether enabled, disabled or oneshot. That's
1909 * because we cannot change the mask anymore after the event source was created once, since the kernel has no
1910 * API for that. Hence we need to subscribe to the maximum mask we ever might be interested in, and suppress
1911 * events we don't care for client-side. */
1912
1913 LIST_FOREACH(inotify.by_inode_data, s, d->event_sources) {
1914
1915 if ((s->inotify.mask & IN_EXCL_UNLINK) == 0)
1916 excl_unlink = false;
1917
1918 combined |= s->inotify.mask;
1919 }
1920
1921 return (combined & ~(IN_ONESHOT|IN_DONT_FOLLOW|IN_ONLYDIR|IN_EXCL_UNLINK)) | (excl_unlink ? IN_EXCL_UNLINK : 0);
1922 }
1923
1924 static int inode_data_realize_watch(sd_event *e, struct inode_data *d) {
1925 uint32_t combined_mask;
1926 int wd, r;
1927
1928 assert(d);
1929 assert(d->fd >= 0);
1930
1931 combined_mask = inode_data_determine_mask(d);
1932
1933 if (d->wd >= 0 && combined_mask == d->combined_mask)
1934 return 0;
1935
1936 r = hashmap_ensure_allocated(&d->inotify_data->wd, NULL);
1937 if (r < 0)
1938 return r;
1939
1940 wd = inotify_add_watch_fd(d->inotify_data->fd, d->fd, combined_mask);
1941 if (wd < 0)
1942 return -errno;
1943
1944 if (d->wd < 0) {
1945 r = hashmap_put(d->inotify_data->wd, INT_TO_PTR(wd), d);
1946 if (r < 0) {
1947 (void) inotify_rm_watch(d->inotify_data->fd, wd);
1948 return r;
1949 }
1950
1951 d->wd = wd;
1952
1953 } else if (d->wd != wd) {
1954
1955 log_debug("Weird, the watch descriptor we already knew for this inode changed?");
1956 (void) inotify_rm_watch(d->fd, wd);
1957 return -EINVAL;
1958 }
1959
1960 d->combined_mask = combined_mask;
1961 return 1;
1962 }
1963
1964 static int inotify_exit_callback(sd_event_source *s, const struct inotify_event *event, void *userdata) {
1965 assert(s);
1966
1967 return sd_event_exit(sd_event_source_get_event(s), PTR_TO_INT(userdata));
1968 }
1969
1970 _public_ int sd_event_add_inotify(
1971 sd_event *e,
1972 sd_event_source **ret,
1973 const char *path,
1974 uint32_t mask,
1975 sd_event_inotify_handler_t callback,
1976 void *userdata) {
1977
1978 struct inotify_data *inotify_data = NULL;
1979 struct inode_data *inode_data = NULL;
1980 _cleanup_close_ int fd = -1;
1981 _cleanup_(source_freep) sd_event_source *s = NULL;
1982 struct stat st;
1983 int r;
1984
1985 assert_return(e, -EINVAL);
1986 assert_return(e = event_resolve(e), -ENOPKG);
1987 assert_return(path, -EINVAL);
1988 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
1989 assert_return(!event_pid_changed(e), -ECHILD);
1990
1991 if (!callback)
1992 callback = inotify_exit_callback;
1993
1994 /* Refuse IN_MASK_ADD since we coalesce watches on the same inode, and hence really don't want to merge
1995 * masks. Or in other words, this whole code exists only to manage IN_MASK_ADD type operations for you, hence
1996 * the user can't use them for us. */
1997 if (mask & IN_MASK_ADD)
1998 return -EINVAL;
1999
2000 fd = open(path, O_PATH|O_CLOEXEC|
2001 (mask & IN_ONLYDIR ? O_DIRECTORY : 0)|
2002 (mask & IN_DONT_FOLLOW ? O_NOFOLLOW : 0));
2003 if (fd < 0)
2004 return -errno;
2005
2006 if (fstat(fd, &st) < 0)
2007 return -errno;
2008
2009 s = source_new(e, !ret, SOURCE_INOTIFY);
2010 if (!s)
2011 return -ENOMEM;
2012
2013 s->enabled = mask & IN_ONESHOT ? SD_EVENT_ONESHOT : SD_EVENT_ON;
2014 s->inotify.mask = mask;
2015 s->inotify.callback = callback;
2016 s->userdata = userdata;
2017
2018 /* Allocate an inotify object for this priority, and an inode object within it */
2019 r = event_make_inotify_data(e, SD_EVENT_PRIORITY_NORMAL, &inotify_data);
2020 if (r < 0)
2021 return r;
2022
2023 r = event_make_inode_data(e, inotify_data, st.st_dev, st.st_ino, &inode_data);
2024 if (r < 0) {
2025 event_free_inotify_data(e, inotify_data);
2026 return r;
2027 }
2028
2029 /* Keep the O_PATH fd around until the first iteration of the loop, so that we can still change the priority of
2030 * the event source, until then, for which we need the original inode. */
2031 if (inode_data->fd < 0) {
2032 inode_data->fd = TAKE_FD(fd);
2033 LIST_PREPEND(to_close, e->inode_data_to_close, inode_data);
2034 }
2035
2036 /* Link our event source to the inode data object */
2037 LIST_PREPEND(inotify.by_inode_data, inode_data->event_sources, s);
2038 s->inotify.inode_data = inode_data;
2039
2040 /* Actually realize the watch now */
2041 r = inode_data_realize_watch(e, inode_data);
2042 if (r < 0)
2043 return r;
2044
2045 (void) sd_event_source_set_description(s, path);
2046
2047 if (ret)
2048 *ret = s;
2049 TAKE_PTR(s);
2050
2051 return 0;
2052 }
2053
2054 static sd_event_source* event_source_free(sd_event_source *s) {
2055 if (!s)
2056 return NULL;
2057
2058 /* Here's a special hack: when we are called from a
2059 * dispatch handler we won't free the event source
2060 * immediately, but we will detach the fd from the
2061 * epoll. This way it is safe for the caller to unref
2062 * the event source and immediately close the fd, but
2063 * we still retain a valid event source object after
2064 * the callback. */
2065
2066 if (s->dispatching) {
2067 if (s->type == SOURCE_IO)
2068 source_io_unregister(s);
2069
2070 source_disconnect(s);
2071 } else
2072 source_free(s);
2073
2074 return NULL;
2075 }
2076
2077 DEFINE_PUBLIC_TRIVIAL_REF_UNREF_FUNC(sd_event_source, sd_event_source, event_source_free);
2078
2079 _public_ int sd_event_source_set_description(sd_event_source *s, const char *description) {
2080 assert_return(s, -EINVAL);
2081 assert_return(!event_pid_changed(s->event), -ECHILD);
2082
2083 return free_and_strdup(&s->description, description);
2084 }
2085
2086 _public_ int sd_event_source_get_description(sd_event_source *s, const char **description) {
2087 assert_return(s, -EINVAL);
2088 assert_return(description, -EINVAL);
2089 assert_return(!event_pid_changed(s->event), -ECHILD);
2090
2091 if (!s->description)
2092 return -ENXIO;
2093
2094 *description = s->description;
2095 return 0;
2096 }
2097
2098 _public_ sd_event *sd_event_source_get_event(sd_event_source *s) {
2099 assert_return(s, NULL);
2100
2101 return s->event;
2102 }
2103
2104 _public_ int sd_event_source_get_pending(sd_event_source *s) {
2105 assert_return(s, -EINVAL);
2106 assert_return(s->type != SOURCE_EXIT, -EDOM);
2107 assert_return(s->event->state != SD_EVENT_FINISHED, -ESTALE);
2108 assert_return(!event_pid_changed(s->event), -ECHILD);
2109
2110 return s->pending;
2111 }
2112
2113 _public_ int sd_event_source_get_io_fd(sd_event_source *s) {
2114 assert_return(s, -EINVAL);
2115 assert_return(s->type == SOURCE_IO, -EDOM);
2116 assert_return(!event_pid_changed(s->event), -ECHILD);
2117
2118 return s->io.fd;
2119 }
2120
2121 _public_ int sd_event_source_set_io_fd(sd_event_source *s, int fd) {
2122 int r;
2123
2124 assert_return(s, -EINVAL);
2125 assert_return(fd >= 0, -EBADF);
2126 assert_return(s->type == SOURCE_IO, -EDOM);
2127 assert_return(!event_pid_changed(s->event), -ECHILD);
2128
2129 if (s->io.fd == fd)
2130 return 0;
2131
2132 if (event_source_is_offline(s)) {
2133 s->io.fd = fd;
2134 s->io.registered = false;
2135 } else {
2136 int saved_fd;
2137
2138 saved_fd = s->io.fd;
2139 assert(s->io.registered);
2140
2141 s->io.fd = fd;
2142 s->io.registered = false;
2143
2144 r = source_io_register(s, s->enabled, s->io.events);
2145 if (r < 0) {
2146 s->io.fd = saved_fd;
2147 s->io.registered = true;
2148 return r;
2149 }
2150
2151 (void) epoll_ctl(s->event->epoll_fd, EPOLL_CTL_DEL, saved_fd, NULL);
2152 }
2153
2154 return 0;
2155 }
2156
2157 _public_ int sd_event_source_get_io_fd_own(sd_event_source *s) {
2158 assert_return(s, -EINVAL);
2159 assert_return(s->type == SOURCE_IO, -EDOM);
2160
2161 return s->io.owned;
2162 }
2163
2164 _public_ int sd_event_source_set_io_fd_own(sd_event_source *s, int own) {
2165 assert_return(s, -EINVAL);
2166 assert_return(s->type == SOURCE_IO, -EDOM);
2167
2168 s->io.owned = own;
2169 return 0;
2170 }
2171
2172 _public_ int sd_event_source_get_io_events(sd_event_source *s, uint32_t* events) {
2173 assert_return(s, -EINVAL);
2174 assert_return(events, -EINVAL);
2175 assert_return(s->type == SOURCE_IO, -EDOM);
2176 assert_return(!event_pid_changed(s->event), -ECHILD);
2177
2178 *events = s->io.events;
2179 return 0;
2180 }
2181
2182 _public_ int sd_event_source_set_io_events(sd_event_source *s, uint32_t events) {
2183 int r;
2184
2185 assert_return(s, -EINVAL);
2186 assert_return(s->type == SOURCE_IO, -EDOM);
2187 assert_return(!(events & ~(EPOLLIN|EPOLLOUT|EPOLLRDHUP|EPOLLPRI|EPOLLERR|EPOLLHUP|EPOLLET)), -EINVAL);
2188 assert_return(s->event->state != SD_EVENT_FINISHED, -ESTALE);
2189 assert_return(!event_pid_changed(s->event), -ECHILD);
2190
2191 /* edge-triggered updates are never skipped, so we can reset edges */
2192 if (s->io.events == events && !(events & EPOLLET))
2193 return 0;
2194
2195 r = source_set_pending(s, false);
2196 if (r < 0)
2197 return r;
2198
2199 if (event_source_is_online(s)) {
2200 r = source_io_register(s, s->enabled, events);
2201 if (r < 0)
2202 return r;
2203 }
2204
2205 s->io.events = events;
2206
2207 return 0;
2208 }
2209
2210 _public_ int sd_event_source_get_io_revents(sd_event_source *s, uint32_t* revents) {
2211 assert_return(s, -EINVAL);
2212 assert_return(revents, -EINVAL);
2213 assert_return(s->type == SOURCE_IO, -EDOM);
2214 assert_return(s->pending, -ENODATA);
2215 assert_return(!event_pid_changed(s->event), -ECHILD);
2216
2217 *revents = s->io.revents;
2218 return 0;
2219 }
2220
2221 _public_ int sd_event_source_get_signal(sd_event_source *s) {
2222 assert_return(s, -EINVAL);
2223 assert_return(s->type == SOURCE_SIGNAL, -EDOM);
2224 assert_return(!event_pid_changed(s->event), -ECHILD);
2225
2226 return s->signal.sig;
2227 }
2228
2229 _public_ int sd_event_source_get_priority(sd_event_source *s, int64_t *priority) {
2230 assert_return(s, -EINVAL);
2231 assert_return(!event_pid_changed(s->event), -ECHILD);
2232
2233 *priority = s->priority;
2234 return 0;
2235 }
2236
2237 _public_ int sd_event_source_set_priority(sd_event_source *s, int64_t priority) {
2238 bool rm_inotify = false, rm_inode = false;
2239 struct inotify_data *new_inotify_data = NULL;
2240 struct inode_data *new_inode_data = NULL;
2241 int r;
2242
2243 assert_return(s, -EINVAL);
2244 assert_return(s->event->state != SD_EVENT_FINISHED, -ESTALE);
2245 assert_return(!event_pid_changed(s->event), -ECHILD);
2246
2247 if (s->priority == priority)
2248 return 0;
2249
2250 if (s->type == SOURCE_INOTIFY) {
2251 struct inode_data *old_inode_data;
2252
2253 assert(s->inotify.inode_data);
2254 old_inode_data = s->inotify.inode_data;
2255
2256 /* We need the original fd to change the priority. If we don't have it we can't change the priority,
2257 * anymore. Note that we close any fds when entering the next event loop iteration, i.e. for inotify
2258 * events we allow priority changes only until the first following iteration. */
2259 if (old_inode_data->fd < 0)
2260 return -EOPNOTSUPP;
2261
2262 r = event_make_inotify_data(s->event, priority, &new_inotify_data);
2263 if (r < 0)
2264 return r;
2265 rm_inotify = r > 0;
2266
2267 r = event_make_inode_data(s->event, new_inotify_data, old_inode_data->dev, old_inode_data->ino, &new_inode_data);
2268 if (r < 0)
2269 goto fail;
2270 rm_inode = r > 0;
2271
2272 if (new_inode_data->fd < 0) {
2273 /* Duplicate the fd for the new inode object if we don't have any yet */
2274 new_inode_data->fd = fcntl(old_inode_data->fd, F_DUPFD_CLOEXEC, 3);
2275 if (new_inode_data->fd < 0) {
2276 r = -errno;
2277 goto fail;
2278 }
2279
2280 LIST_PREPEND(to_close, s->event->inode_data_to_close, new_inode_data);
2281 }
2282
2283 /* Move the event source to the new inode data structure */
2284 LIST_REMOVE(inotify.by_inode_data, old_inode_data->event_sources, s);
2285 LIST_PREPEND(inotify.by_inode_data, new_inode_data->event_sources, s);
2286 s->inotify.inode_data = new_inode_data;
2287
2288 /* Now create the new watch */
2289 r = inode_data_realize_watch(s->event, new_inode_data);
2290 if (r < 0) {
2291 /* Move it back */
2292 LIST_REMOVE(inotify.by_inode_data, new_inode_data->event_sources, s);
2293 LIST_PREPEND(inotify.by_inode_data, old_inode_data->event_sources, s);
2294 s->inotify.inode_data = old_inode_data;
2295 goto fail;
2296 }
2297
2298 s->priority = priority;
2299
2300 event_gc_inode_data(s->event, old_inode_data);
2301
2302 } else if (s->type == SOURCE_SIGNAL && event_source_is_online(s)) {
2303 struct signal_data *old, *d;
2304
2305 /* Move us from the signalfd belonging to the old
2306 * priority to the signalfd of the new priority */
2307
2308 assert_se(old = hashmap_get(s->event->signal_data, &s->priority));
2309
2310 s->priority = priority;
2311
2312 r = event_make_signal_data(s->event, s->signal.sig, &d);
2313 if (r < 0) {
2314 s->priority = old->priority;
2315 return r;
2316 }
2317
2318 event_unmask_signal_data(s->event, old, s->signal.sig);
2319 } else
2320 s->priority = priority;
2321
2322 event_source_pp_prioq_reshuffle(s);
2323
2324 if (s->type == SOURCE_EXIT)
2325 prioq_reshuffle(s->event->exit, s, &s->exit.prioq_index);
2326
2327 return 0;
2328
2329 fail:
2330 if (rm_inode)
2331 event_free_inode_data(s->event, new_inode_data);
2332
2333 if (rm_inotify)
2334 event_free_inotify_data(s->event, new_inotify_data);
2335
2336 return r;
2337 }
2338
2339 _public_ int sd_event_source_get_enabled(sd_event_source *s, int *ret) {
2340 assert_return(s, -EINVAL);
2341 assert_return(!event_pid_changed(s->event), -ECHILD);
2342
2343 if (ret)
2344 *ret = s->enabled;
2345
2346 return s->enabled != SD_EVENT_OFF;
2347 }
2348
2349 static int event_source_offline(
2350 sd_event_source *s,
2351 int enabled,
2352 bool ratelimited) {
2353
2354 bool was_offline;
2355 int r;
2356
2357 assert(s);
2358 assert(enabled == SD_EVENT_OFF || ratelimited);
2359
2360 /* Unset the pending flag when this event source is disabled */
2361 if (s->enabled != SD_EVENT_OFF &&
2362 enabled == SD_EVENT_OFF &&
2363 !IN_SET(s->type, SOURCE_DEFER, SOURCE_EXIT)) {
2364 r = source_set_pending(s, false);
2365 if (r < 0)
2366 return r;
2367 }
2368
2369 was_offline = event_source_is_offline(s);
2370 s->enabled = enabled;
2371 s->ratelimited = ratelimited;
2372
2373 switch (s->type) {
2374
2375 case SOURCE_IO:
2376 source_io_unregister(s);
2377 break;
2378
2379 case SOURCE_TIME_REALTIME:
2380 case SOURCE_TIME_BOOTTIME:
2381 case SOURCE_TIME_MONOTONIC:
2382 case SOURCE_TIME_REALTIME_ALARM:
2383 case SOURCE_TIME_BOOTTIME_ALARM:
2384 event_source_time_prioq_reshuffle(s);
2385 break;
2386
2387 case SOURCE_SIGNAL:
2388 event_gc_signal_data(s->event, &s->priority, s->signal.sig);
2389 break;
2390
2391 case SOURCE_CHILD:
2392 if (!was_offline) {
2393 assert(s->event->n_online_child_sources > 0);
2394 s->event->n_online_child_sources--;
2395 }
2396
2397 if (EVENT_SOURCE_WATCH_PIDFD(s))
2398 source_child_pidfd_unregister(s);
2399 else
2400 event_gc_signal_data(s->event, &s->priority, SIGCHLD);
2401 break;
2402
2403 case SOURCE_EXIT:
2404 prioq_reshuffle(s->event->exit, s, &s->exit.prioq_index);
2405 break;
2406
2407 case SOURCE_DEFER:
2408 case SOURCE_POST:
2409 case SOURCE_INOTIFY:
2410 break;
2411
2412 default:
2413 assert_not_reached("Wut? I shouldn't exist.");
2414 }
2415
2416 return 1;
2417 }
2418
2419 static int event_source_online(
2420 sd_event_source *s,
2421 int enabled,
2422 bool ratelimited) {
2423
2424 bool was_online;
2425 int r;
2426
2427 assert(s);
2428 assert(enabled != SD_EVENT_OFF || !ratelimited);
2429
2430 /* Unset the pending flag when this event source is enabled */
2431 if (s->enabled == SD_EVENT_OFF &&
2432 enabled != SD_EVENT_OFF &&
2433 !IN_SET(s->type, SOURCE_DEFER, SOURCE_EXIT)) {
2434 r = source_set_pending(s, false);
2435 if (r < 0)
2436 return r;
2437 }
2438
2439 /* Are we really ready for onlining? */
2440 if (enabled == SD_EVENT_OFF || ratelimited) {
2441 /* Nope, we are not ready for onlining, then just update the precise state and exit */
2442 s->enabled = enabled;
2443 s->ratelimited = ratelimited;
2444 return 0;
2445 }
2446
2447 was_online = event_source_is_online(s);
2448
2449 switch (s->type) {
2450 case SOURCE_IO:
2451 r = source_io_register(s, enabled, s->io.events);
2452 if (r < 0)
2453 return r;
2454 break;
2455
2456 case SOURCE_SIGNAL:
2457 r = event_make_signal_data(s->event, s->signal.sig, NULL);
2458 if (r < 0) {
2459 event_gc_signal_data(s->event, &s->priority, s->signal.sig);
2460 return r;
2461 }
2462
2463 break;
2464
2465 case SOURCE_CHILD:
2466 if (EVENT_SOURCE_WATCH_PIDFD(s)) {
2467 /* yes, we have pidfd */
2468
2469 r = source_child_pidfd_register(s, enabled);
2470 if (r < 0)
2471 return r;
2472 } else {
2473 /* no pidfd, or something other to watch for than WEXITED */
2474
2475 r = event_make_signal_data(s->event, SIGCHLD, NULL);
2476 if (r < 0) {
2477 event_gc_signal_data(s->event, &s->priority, SIGCHLD);
2478 return r;
2479 }
2480 }
2481
2482 if (!was_online)
2483 s->event->n_online_child_sources++;
2484 break;
2485
2486 case SOURCE_TIME_REALTIME:
2487 case SOURCE_TIME_BOOTTIME:
2488 case SOURCE_TIME_MONOTONIC:
2489 case SOURCE_TIME_REALTIME_ALARM:
2490 case SOURCE_TIME_BOOTTIME_ALARM:
2491 case SOURCE_EXIT:
2492 case SOURCE_DEFER:
2493 case SOURCE_POST:
2494 case SOURCE_INOTIFY:
2495 break;
2496
2497 default:
2498 assert_not_reached("Wut? I shouldn't exist.");
2499 }
2500
2501 s->enabled = enabled;
2502 s->ratelimited = ratelimited;
2503
2504 /* Non-failing operations below */
2505 switch (s->type) {
2506 case SOURCE_TIME_REALTIME:
2507 case SOURCE_TIME_BOOTTIME:
2508 case SOURCE_TIME_MONOTONIC:
2509 case SOURCE_TIME_REALTIME_ALARM:
2510 case SOURCE_TIME_BOOTTIME_ALARM:
2511 event_source_time_prioq_reshuffle(s);
2512 break;
2513
2514 case SOURCE_EXIT:
2515 prioq_reshuffle(s->event->exit, s, &s->exit.prioq_index);
2516 break;
2517
2518 default:
2519 break;
2520 }
2521
2522 return 1;
2523 }
2524
2525 _public_ int sd_event_source_set_enabled(sd_event_source *s, int m) {
2526 int r;
2527
2528 assert_return(s, -EINVAL);
2529 assert_return(IN_SET(m, SD_EVENT_OFF, SD_EVENT_ON, SD_EVENT_ONESHOT), -EINVAL);
2530 assert_return(!event_pid_changed(s->event), -ECHILD);
2531
2532 /* If we are dead anyway, we are fine with turning off sources, but everything else needs to fail. */
2533 if (s->event->state == SD_EVENT_FINISHED)
2534 return m == SD_EVENT_OFF ? 0 : -ESTALE;
2535
2536 if (s->enabled == m) /* No change? */
2537 return 0;
2538
2539 if (m == SD_EVENT_OFF)
2540 r = event_source_offline(s, m, s->ratelimited);
2541 else {
2542 if (s->enabled != SD_EVENT_OFF) {
2543 /* Switching from "on" to "oneshot" or back? If that's the case, we can take a shortcut, the
2544 * event source is already enabled after all. */
2545 s->enabled = m;
2546 return 0;
2547 }
2548
2549 r = event_source_online(s, m, s->ratelimited);
2550 }
2551 if (r < 0)
2552 return r;
2553
2554 event_source_pp_prioq_reshuffle(s);
2555 return 0;
2556 }
2557
2558 _public_ int sd_event_source_get_time(sd_event_source *s, uint64_t *usec) {
2559 assert_return(s, -EINVAL);
2560 assert_return(usec, -EINVAL);
2561 assert_return(EVENT_SOURCE_IS_TIME(s->type), -EDOM);
2562 assert_return(!event_pid_changed(s->event), -ECHILD);
2563
2564 *usec = s->time.next;
2565 return 0;
2566 }
2567
2568 _public_ int sd_event_source_set_time(sd_event_source *s, uint64_t usec) {
2569 int r;
2570
2571 assert_return(s, -EINVAL);
2572 assert_return(EVENT_SOURCE_IS_TIME(s->type), -EDOM);
2573 assert_return(s->event->state != SD_EVENT_FINISHED, -ESTALE);
2574 assert_return(!event_pid_changed(s->event), -ECHILD);
2575
2576 r = source_set_pending(s, false);
2577 if (r < 0)
2578 return r;
2579
2580 s->time.next = usec;
2581
2582 event_source_time_prioq_reshuffle(s);
2583 return 0;
2584 }
2585
2586 _public_ int sd_event_source_set_time_relative(sd_event_source *s, uint64_t usec) {
2587 usec_t t;
2588 int r;
2589
2590 assert_return(s, -EINVAL);
2591 assert_return(EVENT_SOURCE_IS_TIME(s->type), -EDOM);
2592
2593 r = sd_event_now(s->event, event_source_type_to_clock(s->type), &t);
2594 if (r < 0)
2595 return r;
2596
2597 if (usec >= USEC_INFINITY - t)
2598 return -EOVERFLOW;
2599
2600 return sd_event_source_set_time(s, t + usec);
2601 }
2602
2603 _public_ int sd_event_source_get_time_accuracy(sd_event_source *s, uint64_t *usec) {
2604 assert_return(s, -EINVAL);
2605 assert_return(usec, -EINVAL);
2606 assert_return(EVENT_SOURCE_IS_TIME(s->type), -EDOM);
2607 assert_return(!event_pid_changed(s->event), -ECHILD);
2608
2609 *usec = s->time.accuracy;
2610 return 0;
2611 }
2612
2613 _public_ int sd_event_source_set_time_accuracy(sd_event_source *s, uint64_t usec) {
2614 int r;
2615
2616 assert_return(s, -EINVAL);
2617 assert_return(usec != (uint64_t) -1, -EINVAL);
2618 assert_return(EVENT_SOURCE_IS_TIME(s->type), -EDOM);
2619 assert_return(s->event->state != SD_EVENT_FINISHED, -ESTALE);
2620 assert_return(!event_pid_changed(s->event), -ECHILD);
2621
2622 r = source_set_pending(s, false);
2623 if (r < 0)
2624 return r;
2625
2626 if (usec == 0)
2627 usec = DEFAULT_ACCURACY_USEC;
2628
2629 s->time.accuracy = usec;
2630
2631 event_source_time_prioq_reshuffle(s);
2632 return 0;
2633 }
2634
2635 _public_ int sd_event_source_get_time_clock(sd_event_source *s, clockid_t *clock) {
2636 assert_return(s, -EINVAL);
2637 assert_return(clock, -EINVAL);
2638 assert_return(EVENT_SOURCE_IS_TIME(s->type), -EDOM);
2639 assert_return(!event_pid_changed(s->event), -ECHILD);
2640
2641 *clock = event_source_type_to_clock(s->type);
2642 return 0;
2643 }
2644
2645 _public_ int sd_event_source_get_child_pid(sd_event_source *s, pid_t *pid) {
2646 assert_return(s, -EINVAL);
2647 assert_return(pid, -EINVAL);
2648 assert_return(s->type == SOURCE_CHILD, -EDOM);
2649 assert_return(!event_pid_changed(s->event), -ECHILD);
2650
2651 *pid = s->child.pid;
2652 return 0;
2653 }
2654
2655 _public_ int sd_event_source_get_child_pidfd(sd_event_source *s) {
2656 assert_return(s, -EINVAL);
2657 assert_return(s->type == SOURCE_CHILD, -EDOM);
2658 assert_return(!event_pid_changed(s->event), -ECHILD);
2659
2660 if (s->child.pidfd < 0)
2661 return -EOPNOTSUPP;
2662
2663 return s->child.pidfd;
2664 }
2665
2666 _public_ int sd_event_source_send_child_signal(sd_event_source *s, int sig, const siginfo_t *si, unsigned flags) {
2667 assert_return(s, -EINVAL);
2668 assert_return(s->type == SOURCE_CHILD, -EDOM);
2669 assert_return(!event_pid_changed(s->event), -ECHILD);
2670 assert_return(SIGNAL_VALID(sig), -EINVAL);
2671
2672 /* If we already have seen indication the process exited refuse sending a signal early. This way we
2673 * can be sure we don't accidentally kill the wrong process on PID reuse when pidfds are not
2674 * available. */
2675 if (s->child.exited)
2676 return -ESRCH;
2677
2678 if (s->child.pidfd >= 0) {
2679 siginfo_t copy;
2680
2681 /* pidfd_send_signal() changes the siginfo_t argument. This is weird, let's hence copy the
2682 * structure here */
2683 if (si)
2684 copy = *si;
2685
2686 if (pidfd_send_signal(s->child.pidfd, sig, si ? &copy : NULL, 0) < 0) {
2687 /* Let's propagate the error only if the system call is not implemented or prohibited */
2688 if (!ERRNO_IS_NOT_SUPPORTED(errno) && !ERRNO_IS_PRIVILEGE(errno))
2689 return -errno;
2690 } else
2691 return 0;
2692 }
2693
2694 /* Flags are only supported for pidfd_send_signal(), not for rt_sigqueueinfo(), hence let's refuse
2695 * this here. */
2696 if (flags != 0)
2697 return -EOPNOTSUPP;
2698
2699 if (si) {
2700 /* We use rt_sigqueueinfo() only if siginfo_t is specified. */
2701 siginfo_t copy = *si;
2702
2703 if (rt_sigqueueinfo(s->child.pid, sig, &copy) < 0)
2704 return -errno;
2705 } else if (kill(s->child.pid, sig) < 0)
2706 return -errno;
2707
2708 return 0;
2709 }
2710
2711 _public_ int sd_event_source_get_child_pidfd_own(sd_event_source *s) {
2712 assert_return(s, -EINVAL);
2713 assert_return(s->type == SOURCE_CHILD, -EDOM);
2714
2715 if (s->child.pidfd < 0)
2716 return -EOPNOTSUPP;
2717
2718 return s->child.pidfd_owned;
2719 }
2720
2721 _public_ int sd_event_source_set_child_pidfd_own(sd_event_source *s, int own) {
2722 assert_return(s, -EINVAL);
2723 assert_return(s->type == SOURCE_CHILD, -EDOM);
2724
2725 if (s->child.pidfd < 0)
2726 return -EOPNOTSUPP;
2727
2728 s->child.pidfd_owned = own;
2729 return 0;
2730 }
2731
2732 _public_ int sd_event_source_get_child_process_own(sd_event_source *s) {
2733 assert_return(s, -EINVAL);
2734 assert_return(s->type == SOURCE_CHILD, -EDOM);
2735
2736 return s->child.process_owned;
2737 }
2738
2739 _public_ int sd_event_source_set_child_process_own(sd_event_source *s, int own) {
2740 assert_return(s, -EINVAL);
2741 assert_return(s->type == SOURCE_CHILD, -EDOM);
2742
2743 s->child.process_owned = own;
2744 return 0;
2745 }
2746
2747 _public_ int sd_event_source_get_inotify_mask(sd_event_source *s, uint32_t *mask) {
2748 assert_return(s, -EINVAL);
2749 assert_return(mask, -EINVAL);
2750 assert_return(s->type == SOURCE_INOTIFY, -EDOM);
2751 assert_return(!event_pid_changed(s->event), -ECHILD);
2752
2753 *mask = s->inotify.mask;
2754 return 0;
2755 }
2756
2757 _public_ int sd_event_source_set_prepare(sd_event_source *s, sd_event_handler_t callback) {
2758 int r;
2759
2760 assert_return(s, -EINVAL);
2761 assert_return(s->type != SOURCE_EXIT, -EDOM);
2762 assert_return(s->event->state != SD_EVENT_FINISHED, -ESTALE);
2763 assert_return(!event_pid_changed(s->event), -ECHILD);
2764
2765 if (s->prepare == callback)
2766 return 0;
2767
2768 if (callback && s->prepare) {
2769 s->prepare = callback;
2770 return 0;
2771 }
2772
2773 r = prioq_ensure_allocated(&s->event->prepare, prepare_prioq_compare);
2774 if (r < 0)
2775 return r;
2776
2777 s->prepare = callback;
2778
2779 if (callback) {
2780 r = prioq_put(s->event->prepare, s, &s->prepare_index);
2781 if (r < 0)
2782 return r;
2783 } else
2784 prioq_remove(s->event->prepare, s, &s->prepare_index);
2785
2786 return 0;
2787 }
2788
2789 _public_ void* sd_event_source_get_userdata(sd_event_source *s) {
2790 assert_return(s, NULL);
2791
2792 return s->userdata;
2793 }
2794
2795 _public_ void *sd_event_source_set_userdata(sd_event_source *s, void *userdata) {
2796 void *ret;
2797
2798 assert_return(s, NULL);
2799
2800 ret = s->userdata;
2801 s->userdata = userdata;
2802
2803 return ret;
2804 }
2805
2806 static int event_source_enter_ratelimited(sd_event_source *s) {
2807 int r;
2808
2809 assert(s);
2810
2811 /* When an event source becomes ratelimited, we place it in the CLOCK_MONOTONIC priority queue, with
2812 * the end of the rate limit time window, much as if it was a timer event source. */
2813
2814 if (s->ratelimited)
2815 return 0; /* Already ratelimited, this is a NOP hence */
2816
2817 /* Make sure we can install a CLOCK_MONOTONIC event further down. */
2818 r = setup_clock_data(s->event, &s->event->monotonic, CLOCK_MONOTONIC);
2819 if (r < 0)
2820 return r;
2821
2822 /* Timer event sources are already using the earliest/latest queues for the timer scheduling. Let's
2823 * first remove them from the prioq appropriate for their own clock, so that we can use the prioq
2824 * fields of the event source then for adding it to the CLOCK_MONOTONIC prioq instead. */
2825 if (EVENT_SOURCE_IS_TIME(s->type))
2826 event_source_time_prioq_remove(s, event_get_clock_data(s->event, s->type));
2827
2828 /* Now, let's add the event source to the monotonic clock instead */
2829 r = event_source_time_prioq_put(s, &s->event->monotonic);
2830 if (r < 0)
2831 goto fail;
2832
2833 /* And let's take the event source officially offline */
2834 r = event_source_offline(s, s->enabled, /* ratelimited= */ true);
2835 if (r < 0) {
2836 event_source_time_prioq_remove(s, &s->event->monotonic);
2837 goto fail;
2838 }
2839
2840 event_source_pp_prioq_reshuffle(s);
2841
2842 log_debug("Event source %p (%s) entered rate limit state.", s, strna(s->description));
2843 return 0;
2844
2845 fail:
2846 /* Reinstall time event sources in the priority queue as before. This shouldn't fail, since the queue
2847 * space for it should already be allocated. */
2848 if (EVENT_SOURCE_IS_TIME(s->type))
2849 assert_se(event_source_time_prioq_put(s, event_get_clock_data(s->event, s->type)) >= 0);
2850
2851 return r;
2852 }
2853
2854 static int event_source_leave_ratelimit(sd_event_source *s) {
2855 int r;
2856
2857 assert(s);
2858
2859 if (!s->ratelimited)
2860 return 0;
2861
2862 /* Let's take the event source out of the monotonic prioq first. */
2863 event_source_time_prioq_remove(s, &s->event->monotonic);
2864
2865 /* Let's then add the event source to its native clock prioq again — if this is a timer event source */
2866 if (EVENT_SOURCE_IS_TIME(s->type)) {
2867 r = event_source_time_prioq_put(s, event_get_clock_data(s->event, s->type));
2868 if (r < 0)
2869 goto fail;
2870 }
2871
2872 /* Let's try to take it online again. */
2873 r = event_source_online(s, s->enabled, /* ratelimited= */ false);
2874 if (r < 0) {
2875 /* Do something roughly sensible when this failed: undo the two prioq ops above */
2876 if (EVENT_SOURCE_IS_TIME(s->type))
2877 event_source_time_prioq_remove(s, event_get_clock_data(s->event, s->type));
2878
2879 goto fail;
2880 }
2881
2882 event_source_pp_prioq_reshuffle(s);
2883 ratelimit_reset(&s->rate_limit);
2884
2885 log_debug("Event source %p (%s) left rate limit state.", s, strna(s->description));
2886 return 0;
2887
2888 fail:
2889 /* Do something somewhat reasonable when we cannot move an event sources out of ratelimited mode:
2890 * simply put it back in it, maybe we can then process it more successfully next iteration. */
2891 assert_se(event_source_time_prioq_put(s, &s->event->monotonic) >= 0);
2892
2893 return r;
2894 }
2895
2896 static usec_t sleep_between(sd_event *e, usec_t a, usec_t b) {
2897 usec_t c;
2898 assert(e);
2899 assert(a <= b);
2900
2901 if (a <= 0)
2902 return 0;
2903 if (a >= USEC_INFINITY)
2904 return USEC_INFINITY;
2905
2906 if (b <= a + 1)
2907 return a;
2908
2909 initialize_perturb(e);
2910
2911 /*
2912 Find a good time to wake up again between times a and b. We
2913 have two goals here:
2914
2915 a) We want to wake up as seldom as possible, hence prefer
2916 later times over earlier times.
2917
2918 b) But if we have to wake up, then let's make sure to
2919 dispatch as much as possible on the entire system.
2920
2921 We implement this by waking up everywhere at the same time
2922 within any given minute if we can, synchronised via the
2923 perturbation value determined from the boot ID. If we can't,
2924 then we try to find the same spot in every 10s, then 1s and
2925 then 250ms step. Otherwise, we pick the last possible time
2926 to wake up.
2927 */
2928
2929 c = (b / USEC_PER_MINUTE) * USEC_PER_MINUTE + e->perturb;
2930 if (c >= b) {
2931 if (_unlikely_(c < USEC_PER_MINUTE))
2932 return b;
2933
2934 c -= USEC_PER_MINUTE;
2935 }
2936
2937 if (c >= a)
2938 return c;
2939
2940 c = (b / (USEC_PER_SEC*10)) * (USEC_PER_SEC*10) + (e->perturb % (USEC_PER_SEC*10));
2941 if (c >= b) {
2942 if (_unlikely_(c < USEC_PER_SEC*10))
2943 return b;
2944
2945 c -= USEC_PER_SEC*10;
2946 }
2947
2948 if (c >= a)
2949 return c;
2950
2951 c = (b / USEC_PER_SEC) * USEC_PER_SEC + (e->perturb % USEC_PER_SEC);
2952 if (c >= b) {
2953 if (_unlikely_(c < USEC_PER_SEC))
2954 return b;
2955
2956 c -= USEC_PER_SEC;
2957 }
2958
2959 if (c >= a)
2960 return c;
2961
2962 c = (b / (USEC_PER_MSEC*250)) * (USEC_PER_MSEC*250) + (e->perturb % (USEC_PER_MSEC*250));
2963 if (c >= b) {
2964 if (_unlikely_(c < USEC_PER_MSEC*250))
2965 return b;
2966
2967 c -= USEC_PER_MSEC*250;
2968 }
2969
2970 if (c >= a)
2971 return c;
2972
2973 return b;
2974 }
2975
2976 static int event_arm_timer(
2977 sd_event *e,
2978 struct clock_data *d) {
2979
2980 struct itimerspec its = {};
2981 sd_event_source *a, *b;
2982 usec_t t;
2983
2984 assert(e);
2985 assert(d);
2986
2987 if (!d->needs_rearm)
2988 return 0;
2989 else
2990 d->needs_rearm = false;
2991
2992 a = prioq_peek(d->earliest);
2993 if (!a || a->enabled == SD_EVENT_OFF || time_event_source_next(a) == USEC_INFINITY) {
2994
2995 if (d->fd < 0)
2996 return 0;
2997
2998 if (d->next == USEC_INFINITY)
2999 return 0;
3000
3001 /* disarm */
3002 if (timerfd_settime(d->fd, TFD_TIMER_ABSTIME, &its, NULL) < 0)
3003 return -errno;
3004
3005 d->next = USEC_INFINITY;
3006 return 0;
3007 }
3008
3009 b = prioq_peek(d->latest);
3010 assert_se(b && b->enabled != SD_EVENT_OFF);
3011
3012 t = sleep_between(e, time_event_source_next(a), time_event_source_latest(b));
3013 if (d->next == t)
3014 return 0;
3015
3016 assert_se(d->fd >= 0);
3017
3018 if (t == 0) {
3019 /* We don' want to disarm here, just mean some time looooong ago. */
3020 its.it_value.tv_sec = 0;
3021 its.it_value.tv_nsec = 1;
3022 } else
3023 timespec_store(&its.it_value, t);
3024
3025 if (timerfd_settime(d->fd, TFD_TIMER_ABSTIME, &its, NULL) < 0)
3026 return -errno;
3027
3028 d->next = t;
3029 return 0;
3030 }
3031
3032 static int process_io(sd_event *e, sd_event_source *s, uint32_t revents) {
3033 assert(e);
3034 assert(s);
3035 assert(s->type == SOURCE_IO);
3036
3037 /* If the event source was already pending, we just OR in the
3038 * new revents, otherwise we reset the value. The ORing is
3039 * necessary to handle EPOLLONESHOT events properly where
3040 * readability might happen independently of writability, and
3041 * we need to keep track of both */
3042
3043 if (s->pending)
3044 s->io.revents |= revents;
3045 else
3046 s->io.revents = revents;
3047
3048 return source_set_pending(s, true);
3049 }
3050
3051 static int flush_timer(sd_event *e, int fd, uint32_t events, usec_t *next) {
3052 uint64_t x;
3053 ssize_t ss;
3054
3055 assert(e);
3056 assert(fd >= 0);
3057
3058 assert_return(events == EPOLLIN, -EIO);
3059
3060 ss = read(fd, &x, sizeof(x));
3061 if (ss < 0) {
3062 if (IN_SET(errno, EAGAIN, EINTR))
3063 return 0;
3064
3065 return -errno;
3066 }
3067
3068 if (_unlikely_(ss != sizeof(x)))
3069 return -EIO;
3070
3071 if (next)
3072 *next = USEC_INFINITY;
3073
3074 return 0;
3075 }
3076
3077 static int process_timer(
3078 sd_event *e,
3079 usec_t n,
3080 struct clock_data *d) {
3081
3082 sd_event_source *s;
3083 int r;
3084
3085 assert(e);
3086 assert(d);
3087
3088 for (;;) {
3089 s = prioq_peek(d->earliest);
3090 if (!s || time_event_source_next(s) > n)
3091 break;
3092
3093 if (s->ratelimited) {
3094 /* This is an event sources whose ratelimit window has ended. Let's turn it on
3095 * again. */
3096 assert(s->ratelimited);
3097
3098 r = event_source_leave_ratelimit(s);
3099 if (r < 0)
3100 return r;
3101
3102 continue;
3103 }
3104
3105 if (s->enabled == SD_EVENT_OFF || s->pending)
3106 break;
3107
3108 r = source_set_pending(s, true);
3109 if (r < 0)
3110 return r;
3111
3112 event_source_time_prioq_reshuffle(s);
3113 }
3114
3115 return 0;
3116 }
3117
3118 static int process_child(sd_event *e) {
3119 sd_event_source *s;
3120 int r;
3121
3122 assert(e);
3123
3124 e->need_process_child = false;
3125
3126 /*
3127 So, this is ugly. We iteratively invoke waitid() with P_PID
3128 + WNOHANG for each PID we wait for, instead of using
3129 P_ALL. This is because we only want to get child
3130 information of very specific child processes, and not all
3131 of them. We might not have processed the SIGCHLD even of a
3132 previous invocation and we don't want to maintain a
3133 unbounded *per-child* event queue, hence we really don't
3134 want anything flushed out of the kernel's queue that we
3135 don't care about. Since this is O(n) this means that if you
3136 have a lot of processes you probably want to handle SIGCHLD
3137 yourself.
3138
3139 We do not reap the children here (by using WNOWAIT), this
3140 is only done after the event source is dispatched so that
3141 the callback still sees the process as a zombie.
3142 */
3143
3144 HASHMAP_FOREACH(s, e->child_sources) {
3145 assert(s->type == SOURCE_CHILD);
3146
3147 if (s->pending)
3148 continue;
3149
3150 if (event_source_is_offline(s))
3151 continue;
3152
3153 if (s->child.exited)
3154 continue;
3155
3156 if (EVENT_SOURCE_WATCH_PIDFD(s)) /* There's a usable pidfd known for this event source? then don't waitid() for it here */
3157 continue;
3158
3159 zero(s->child.siginfo);
3160 if (waitid(P_PID, s->child.pid, &s->child.siginfo,
3161 WNOHANG | (s->child.options & WEXITED ? WNOWAIT : 0) | s->child.options) < 0)
3162 return -errno;
3163
3164 if (s->child.siginfo.si_pid != 0) {
3165 bool zombie = IN_SET(s->child.siginfo.si_code, CLD_EXITED, CLD_KILLED, CLD_DUMPED);
3166
3167 if (zombie)
3168 s->child.exited = true;
3169
3170 if (!zombie && (s->child.options & WEXITED)) {
3171 /* If the child isn't dead then let's
3172 * immediately remove the state change
3173 * from the queue, since there's no
3174 * benefit in leaving it queued */
3175
3176 assert(s->child.options & (WSTOPPED|WCONTINUED));
3177 (void) waitid(P_PID, s->child.pid, &s->child.siginfo, WNOHANG|(s->child.options & (WSTOPPED|WCONTINUED)));
3178 }
3179
3180 r = source_set_pending(s, true);
3181 if (r < 0)
3182 return r;
3183 }
3184 }
3185
3186 return 0;
3187 }
3188
3189 static int process_pidfd(sd_event *e, sd_event_source *s, uint32_t revents) {
3190 assert(e);
3191 assert(s);
3192 assert(s->type == SOURCE_CHILD);
3193
3194 if (s->pending)
3195 return 0;
3196
3197 if (event_source_is_offline(s))
3198 return 0;
3199
3200 if (!EVENT_SOURCE_WATCH_PIDFD(s))
3201 return 0;
3202
3203 zero(s->child.siginfo);
3204 if (waitid(P_PID, s->child.pid, &s->child.siginfo, WNOHANG | WNOWAIT | s->child.options) < 0)
3205 return -errno;
3206
3207 if (s->child.siginfo.si_pid == 0)
3208 return 0;
3209
3210 if (IN_SET(s->child.siginfo.si_code, CLD_EXITED, CLD_KILLED, CLD_DUMPED))
3211 s->child.exited = true;
3212
3213 return source_set_pending(s, true);
3214 }
3215
3216 static int process_signal(sd_event *e, struct signal_data *d, uint32_t events) {
3217 bool read_one = false;
3218 int r;
3219
3220 assert(e);
3221 assert(d);
3222 assert_return(events == EPOLLIN, -EIO);
3223
3224 /* If there's a signal queued on this priority and SIGCHLD is
3225 on this priority too, then make sure to recheck the
3226 children we watch. This is because we only ever dequeue
3227 the first signal per priority, and if we dequeue one, and
3228 SIGCHLD might be enqueued later we wouldn't know, but we
3229 might have higher priority children we care about hence we
3230 need to check that explicitly. */
3231
3232 if (sigismember(&d->sigset, SIGCHLD))
3233 e->need_process_child = true;
3234
3235 /* If there's already an event source pending for this
3236 * priority we don't read another */
3237 if (d->current)
3238 return 0;
3239
3240 for (;;) {
3241 struct signalfd_siginfo si;
3242 ssize_t n;
3243 sd_event_source *s = NULL;
3244
3245 n = read(d->fd, &si, sizeof(si));
3246 if (n < 0) {
3247 if (IN_SET(errno, EAGAIN, EINTR))
3248 return read_one;
3249
3250 return -errno;
3251 }
3252
3253 if (_unlikely_(n != sizeof(si)))
3254 return -EIO;
3255
3256 assert(SIGNAL_VALID(si.ssi_signo));
3257
3258 read_one = true;
3259
3260 if (e->signal_sources)
3261 s = e->signal_sources[si.ssi_signo];
3262 if (!s)
3263 continue;
3264 if (s->pending)
3265 continue;
3266
3267 s->signal.siginfo = si;
3268 d->current = s;
3269
3270 r = source_set_pending(s, true);
3271 if (r < 0)
3272 return r;
3273
3274 return 1;
3275 }
3276 }
3277
3278 static int event_inotify_data_read(sd_event *e, struct inotify_data *d, uint32_t revents) {
3279 ssize_t n;
3280
3281 assert(e);
3282 assert(d);
3283
3284 assert_return(revents == EPOLLIN, -EIO);
3285
3286 /* If there's already an event source pending for this priority, don't read another */
3287 if (d->n_pending > 0)
3288 return 0;
3289
3290 /* Is the read buffer non-empty? If so, let's not read more */
3291 if (d->buffer_filled > 0)
3292 return 0;
3293
3294 n = read(d->fd, &d->buffer, sizeof(d->buffer));
3295 if (n < 0) {
3296 if (IN_SET(errno, EAGAIN, EINTR))
3297 return 0;
3298
3299 return -errno;
3300 }
3301
3302 assert(n > 0);
3303 d->buffer_filled = (size_t) n;
3304 LIST_PREPEND(buffered, e->inotify_data_buffered, d);
3305
3306 return 1;
3307 }
3308
3309 static void event_inotify_data_drop(sd_event *e, struct inotify_data *d, size_t sz) {
3310 assert(e);
3311 assert(d);
3312 assert(sz <= d->buffer_filled);
3313
3314 if (sz == 0)
3315 return;
3316
3317 /* Move the rest to the buffer to the front, in order to get things properly aligned again */
3318 memmove(d->buffer.raw, d->buffer.raw + sz, d->buffer_filled - sz);
3319 d->buffer_filled -= sz;
3320
3321 if (d->buffer_filled == 0)
3322 LIST_REMOVE(buffered, e->inotify_data_buffered, d);
3323 }
3324
3325 static int event_inotify_data_process(sd_event *e, struct inotify_data *d) {
3326 int r;
3327
3328 assert(e);
3329 assert(d);
3330
3331 /* If there's already an event source pending for this priority, don't read another */
3332 if (d->n_pending > 0)
3333 return 0;
3334
3335 while (d->buffer_filled > 0) {
3336 size_t sz;
3337
3338 /* Let's validate that the event structures are complete */
3339 if (d->buffer_filled < offsetof(struct inotify_event, name))
3340 return -EIO;
3341
3342 sz = offsetof(struct inotify_event, name) + d->buffer.ev.len;
3343 if (d->buffer_filled < sz)
3344 return -EIO;
3345
3346 if (d->buffer.ev.mask & IN_Q_OVERFLOW) {
3347 struct inode_data *inode_data;
3348
3349 /* The queue overran, let's pass this event to all event sources connected to this inotify
3350 * object */
3351
3352 HASHMAP_FOREACH(inode_data, d->inodes) {
3353 sd_event_source *s;
3354
3355 LIST_FOREACH(inotify.by_inode_data, s, inode_data->event_sources) {
3356
3357 if (event_source_is_offline(s))
3358 continue;
3359
3360 r = source_set_pending(s, true);
3361 if (r < 0)
3362 return r;
3363 }
3364 }
3365 } else {
3366 struct inode_data *inode_data;
3367 sd_event_source *s;
3368
3369 /* Find the inode object for this watch descriptor. If IN_IGNORED is set we also remove it from
3370 * our watch descriptor table. */
3371 if (d->buffer.ev.mask & IN_IGNORED) {
3372
3373 inode_data = hashmap_remove(d->wd, INT_TO_PTR(d->buffer.ev.wd));
3374 if (!inode_data) {
3375 event_inotify_data_drop(e, d, sz);
3376 continue;
3377 }
3378
3379 /* The watch descriptor was removed by the kernel, let's drop it here too */
3380 inode_data->wd = -1;
3381 } else {
3382 inode_data = hashmap_get(d->wd, INT_TO_PTR(d->buffer.ev.wd));
3383 if (!inode_data) {
3384 event_inotify_data_drop(e, d, sz);
3385 continue;
3386 }
3387 }
3388
3389 /* Trigger all event sources that are interested in these events. Also trigger all event
3390 * sources if IN_IGNORED or IN_UNMOUNT is set. */
3391 LIST_FOREACH(inotify.by_inode_data, s, inode_data->event_sources) {
3392
3393 if (event_source_is_offline(s))
3394 continue;
3395
3396 if ((d->buffer.ev.mask & (IN_IGNORED|IN_UNMOUNT)) == 0 &&
3397 (s->inotify.mask & d->buffer.ev.mask & IN_ALL_EVENTS) == 0)
3398 continue;
3399
3400 r = source_set_pending(s, true);
3401 if (r < 0)
3402 return r;
3403 }
3404 }
3405
3406 /* Something pending now? If so, let's finish, otherwise let's read more. */
3407 if (d->n_pending > 0)
3408 return 1;
3409 }
3410
3411 return 0;
3412 }
3413
3414 static int process_inotify(sd_event *e) {
3415 struct inotify_data *d;
3416 int r, done = 0;
3417
3418 assert(e);
3419
3420 LIST_FOREACH(buffered, d, e->inotify_data_buffered) {
3421 r = event_inotify_data_process(e, d);
3422 if (r < 0)
3423 return r;
3424 if (r > 0)
3425 done ++;
3426 }
3427
3428 return done;
3429 }
3430
3431 static int source_dispatch(sd_event_source *s) {
3432 _cleanup_(sd_event_unrefp) sd_event *saved_event = NULL;
3433 EventSourceType saved_type;
3434 int r = 0;
3435
3436 assert(s);
3437 assert(s->pending || s->type == SOURCE_EXIT);
3438
3439 /* Save the event source type, here, so that we still know it after the event callback which might
3440 * invalidate the event. */
3441 saved_type = s->type;
3442
3443 /* Similar, store a reference to the event loop object, so that we can still access it after the
3444 * callback might have invalidated/disconnected the event source. */
3445 saved_event = sd_event_ref(s->event);
3446
3447 /* Check if we hit the ratelimit for this event source, if so, let's disable it. */
3448 assert(!s->ratelimited);
3449 if (!ratelimit_below(&s->rate_limit)) {
3450 r = event_source_enter_ratelimited(s);
3451 if (r < 0)
3452 return r;
3453
3454 return 1;
3455 }
3456
3457 if (!IN_SET(s->type, SOURCE_DEFER, SOURCE_EXIT)) {
3458 r = source_set_pending(s, false);
3459 if (r < 0)
3460 return r;
3461 }
3462
3463 if (s->type != SOURCE_POST) {
3464 sd_event_source *z;
3465
3466 /* If we execute a non-post source, let's mark all
3467 * post sources as pending */
3468
3469 SET_FOREACH(z, s->event->post_sources) {
3470 if (event_source_is_offline(z))
3471 continue;
3472
3473 r = source_set_pending(z, true);
3474 if (r < 0)
3475 return r;
3476 }
3477 }
3478
3479 if (s->enabled == SD_EVENT_ONESHOT) {
3480 r = sd_event_source_set_enabled(s, SD_EVENT_OFF);
3481 if (r < 0)
3482 return r;
3483 }
3484
3485 s->dispatching = true;
3486
3487 switch (s->type) {
3488
3489 case SOURCE_IO:
3490 r = s->io.callback(s, s->io.fd, s->io.revents, s->userdata);
3491 break;
3492
3493 case SOURCE_TIME_REALTIME:
3494 case SOURCE_TIME_BOOTTIME:
3495 case SOURCE_TIME_MONOTONIC:
3496 case SOURCE_TIME_REALTIME_ALARM:
3497 case SOURCE_TIME_BOOTTIME_ALARM:
3498 r = s->time.callback(s, s->time.next, s->userdata);
3499 break;
3500
3501 case SOURCE_SIGNAL:
3502 r = s->signal.callback(s, &s->signal.siginfo, s->userdata);
3503 break;
3504
3505 case SOURCE_CHILD: {
3506 bool zombie;
3507
3508 zombie = IN_SET(s->child.siginfo.si_code, CLD_EXITED, CLD_KILLED, CLD_DUMPED);
3509
3510 r = s->child.callback(s, &s->child.siginfo, s->userdata);
3511
3512 /* Now, reap the PID for good. */
3513 if (zombie) {
3514 (void) waitid(P_PID, s->child.pid, &s->child.siginfo, WNOHANG|WEXITED);
3515 s->child.waited = true;
3516 }
3517
3518 break;
3519 }
3520
3521 case SOURCE_DEFER:
3522 r = s->defer.callback(s, s->userdata);
3523 break;
3524
3525 case SOURCE_POST:
3526 r = s->post.callback(s, s->userdata);
3527 break;
3528
3529 case SOURCE_EXIT:
3530 r = s->exit.callback(s, s->userdata);
3531 break;
3532
3533 case SOURCE_INOTIFY: {
3534 struct sd_event *e = s->event;
3535 struct inotify_data *d;
3536 size_t sz;
3537
3538 assert(s->inotify.inode_data);
3539 assert_se(d = s->inotify.inode_data->inotify_data);
3540
3541 assert(d->buffer_filled >= offsetof(struct inotify_event, name));
3542 sz = offsetof(struct inotify_event, name) + d->buffer.ev.len;
3543 assert(d->buffer_filled >= sz);
3544
3545 r = s->inotify.callback(s, &d->buffer.ev, s->userdata);
3546
3547 /* When no event is pending anymore on this inotify object, then let's drop the event from the
3548 * buffer. */
3549 if (d->n_pending == 0)
3550 event_inotify_data_drop(e, d, sz);
3551
3552 break;
3553 }
3554
3555 case SOURCE_WATCHDOG:
3556 case _SOURCE_EVENT_SOURCE_TYPE_MAX:
3557 case _SOURCE_EVENT_SOURCE_TYPE_INVALID:
3558 assert_not_reached("Wut? I shouldn't exist.");
3559 }
3560
3561 s->dispatching = false;
3562
3563 if (r < 0) {
3564 log_debug_errno(r, "Event source %s (type %s) returned error, %s: %m",
3565 strna(s->description),
3566 event_source_type_to_string(saved_type),
3567 s->exit_on_failure ? "exiting" : "disabling");
3568
3569 if (s->exit_on_failure)
3570 (void) sd_event_exit(saved_event, r);
3571 }
3572
3573 if (s->n_ref == 0)
3574 source_free(s);
3575 else if (r < 0)
3576 sd_event_source_set_enabled(s, SD_EVENT_OFF);
3577
3578 return 1;
3579 }
3580
3581 static int event_prepare(sd_event *e) {
3582 int r;
3583
3584 assert(e);
3585
3586 for (;;) {
3587 sd_event_source *s;
3588
3589 s = prioq_peek(e->prepare);
3590 if (!s || s->prepare_iteration == e->iteration || event_source_is_offline(s))
3591 break;
3592
3593 s->prepare_iteration = e->iteration;
3594 r = prioq_reshuffle(e->prepare, s, &s->prepare_index);
3595 if (r < 0)
3596 return r;
3597
3598 assert(s->prepare);
3599
3600 s->dispatching = true;
3601 r = s->prepare(s, s->userdata);
3602 s->dispatching = false;
3603
3604 if (r < 0) {
3605 log_debug_errno(r, "Prepare callback of event source %s (type %s) returned error, %s: %m",
3606 strna(s->description),
3607 event_source_type_to_string(s->type),
3608 s->exit_on_failure ? "exiting" : "disabling");
3609
3610 if (s->exit_on_failure)
3611 (void) sd_event_exit(e, r);
3612 }
3613
3614 if (s->n_ref == 0)
3615 source_free(s);
3616 else if (r < 0)
3617 sd_event_source_set_enabled(s, SD_EVENT_OFF);
3618 }
3619
3620 return 0;
3621 }
3622
3623 static int dispatch_exit(sd_event *e) {
3624 sd_event_source *p;
3625 int r;
3626
3627 assert(e);
3628
3629 p = prioq_peek(e->exit);
3630 if (!p || event_source_is_offline(p)) {
3631 e->state = SD_EVENT_FINISHED;
3632 return 0;
3633 }
3634
3635 _unused_ _cleanup_(sd_event_unrefp) sd_event *ref = sd_event_ref(e);
3636 e->iteration++;
3637 e->state = SD_EVENT_EXITING;
3638 r = source_dispatch(p);
3639 e->state = SD_EVENT_INITIAL;
3640 return r;
3641 }
3642
3643 static sd_event_source* event_next_pending(sd_event *e) {
3644 sd_event_source *p;
3645
3646 assert(e);
3647
3648 p = prioq_peek(e->pending);
3649 if (!p)
3650 return NULL;
3651
3652 if (event_source_is_offline(p))
3653 return NULL;
3654
3655 return p;
3656 }
3657
3658 static int arm_watchdog(sd_event *e) {
3659 struct itimerspec its = {};
3660 usec_t t;
3661
3662 assert(e);
3663 assert(e->watchdog_fd >= 0);
3664
3665 t = sleep_between(e,
3666 e->watchdog_last + (e->watchdog_period / 2),
3667 e->watchdog_last + (e->watchdog_period * 3 / 4));
3668
3669 timespec_store(&its.it_value, t);
3670
3671 /* Make sure we never set the watchdog to 0, which tells the
3672 * kernel to disable it. */
3673 if (its.it_value.tv_sec == 0 && its.it_value.tv_nsec == 0)
3674 its.it_value.tv_nsec = 1;
3675
3676 if (timerfd_settime(e->watchdog_fd, TFD_TIMER_ABSTIME, &its, NULL) < 0)
3677 return -errno;
3678
3679 return 0;
3680 }
3681
3682 static int process_watchdog(sd_event *e) {
3683 assert(e);
3684
3685 if (!e->watchdog)
3686 return 0;
3687
3688 /* Don't notify watchdog too often */
3689 if (e->watchdog_last + e->watchdog_period / 4 > e->timestamp.monotonic)
3690 return 0;
3691
3692 sd_notify(false, "WATCHDOG=1");
3693 e->watchdog_last = e->timestamp.monotonic;
3694
3695 return arm_watchdog(e);
3696 }
3697
3698 static void event_close_inode_data_fds(sd_event *e) {
3699 struct inode_data *d;
3700
3701 assert(e);
3702
3703 /* Close the fds pointing to the inodes to watch now. We need to close them as they might otherwise pin
3704 * filesystems. But we can't close them right-away as we need them as long as the user still wants to make
3705 * adjustments to the even source, such as changing the priority (which requires us to remove and re-add a watch
3706 * for the inode). Hence, let's close them when entering the first iteration after they were added, as a
3707 * compromise. */
3708
3709 while ((d = e->inode_data_to_close)) {
3710 assert(d->fd >= 0);
3711 d->fd = safe_close(d->fd);
3712
3713 LIST_REMOVE(to_close, e->inode_data_to_close, d);
3714 }
3715 }
3716
3717 _public_ int sd_event_prepare(sd_event *e) {
3718 int r;
3719
3720 assert_return(e, -EINVAL);
3721 assert_return(e = event_resolve(e), -ENOPKG);
3722 assert_return(!event_pid_changed(e), -ECHILD);
3723 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
3724 assert_return(e->state == SD_EVENT_INITIAL, -EBUSY);
3725
3726 /* Let's check that if we are a default event loop we are executed in the correct thread. We only do
3727 * this check here once, since gettid() is typically not cached, and thus want to minimize
3728 * syscalls */
3729 assert_return(!e->default_event_ptr || e->tid == gettid(), -EREMOTEIO);
3730
3731 /* Make sure that none of the preparation callbacks ends up freeing the event source under our feet */
3732 _unused_ _cleanup_(sd_event_unrefp) sd_event *ref = sd_event_ref(e);
3733
3734 if (e->exit_requested)
3735 goto pending;
3736
3737 e->iteration++;
3738
3739 e->state = SD_EVENT_PREPARING;
3740 r = event_prepare(e);
3741 e->state = SD_EVENT_INITIAL;
3742 if (r < 0)
3743 return r;
3744
3745 r = event_arm_timer(e, &e->realtime);
3746 if (r < 0)
3747 return r;
3748
3749 r = event_arm_timer(e, &e->boottime);
3750 if (r < 0)
3751 return r;
3752
3753 r = event_arm_timer(e, &e->monotonic);
3754 if (r < 0)
3755 return r;
3756
3757 r = event_arm_timer(e, &e->realtime_alarm);
3758 if (r < 0)
3759 return r;
3760
3761 r = event_arm_timer(e, &e->boottime_alarm);
3762 if (r < 0)
3763 return r;
3764
3765 event_close_inode_data_fds(e);
3766
3767 if (event_next_pending(e) || e->need_process_child)
3768 goto pending;
3769
3770 e->state = SD_EVENT_ARMED;
3771
3772 return 0;
3773
3774 pending:
3775 e->state = SD_EVENT_ARMED;
3776 r = sd_event_wait(e, 0);
3777 if (r == 0)
3778 e->state = SD_EVENT_ARMED;
3779
3780 return r;
3781 }
3782
3783 static int epoll_wait_usec(
3784 int fd,
3785 struct epoll_event *events,
3786 int maxevents,
3787 usec_t timeout) {
3788
3789 static bool epoll_pwait2_absent = false;
3790 int r, msec;
3791
3792 /* A wrapper that uses epoll_pwait2() if available, and falls back to epoll_wait() if not */
3793
3794 if (!epoll_pwait2_absent && timeout != USEC_INFINITY) {
3795 struct timespec ts;
3796
3797 r = epoll_pwait2(fd,
3798 events,
3799 maxevents,
3800 timespec_store(&ts, timeout),
3801 NULL);
3802 if (r >= 0)
3803 return r;
3804 if (!ERRNO_IS_NOT_SUPPORTED(r) && !ERRNO_IS_PRIVILEGE(r))
3805 return -errno; /* Only fallback to old epoll_wait() if the syscall is masked or not
3806 * supported. */
3807
3808 epoll_pwait2_absent = true;
3809 }
3810
3811 if (timeout == USEC_INFINITY)
3812 msec = -1;
3813 else {
3814 usec_t k;
3815
3816 k = DIV_ROUND_UP(timeout, USEC_PER_MSEC);
3817 if (k >= INT_MAX)
3818 msec = INT_MAX; /* Saturate */
3819 else
3820 msec = (int) k;
3821 }
3822
3823 r = epoll_wait(fd,
3824 events,
3825 maxevents,
3826 msec);
3827 if (r < 0)
3828 return -errno;
3829
3830 return r;
3831 }
3832
3833 _public_ int sd_event_wait(sd_event *e, uint64_t timeout) {
3834 size_t n_event_queue, m;
3835 int r;
3836
3837 assert_return(e, -EINVAL);
3838 assert_return(e = event_resolve(e), -ENOPKG);
3839 assert_return(!event_pid_changed(e), -ECHILD);
3840 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
3841 assert_return(e->state == SD_EVENT_ARMED, -EBUSY);
3842
3843 if (e->exit_requested) {
3844 e->state = SD_EVENT_PENDING;
3845 return 1;
3846 }
3847
3848 n_event_queue = MAX(e->n_sources, 1u);
3849 if (!GREEDY_REALLOC(e->event_queue, e->event_queue_allocated, n_event_queue))
3850 return -ENOMEM;
3851
3852 /* If we still have inotify data buffered, then query the other fds, but don't wait on it */
3853 if (e->inotify_data_buffered)
3854 timeout = 0;
3855
3856 for (;;) {
3857 r = epoll_wait_usec(e->epoll_fd, e->event_queue, e->event_queue_allocated, timeout);
3858 if (r == -EINTR) {
3859 e->state = SD_EVENT_PENDING;
3860 return 1;
3861 }
3862 if (r < 0)
3863 goto finish;
3864
3865 m = (size_t) r;
3866
3867 if (m < e->event_queue_allocated)
3868 break;
3869
3870 if (e->event_queue_allocated >= n_event_queue * 10)
3871 break;
3872
3873 if (!GREEDY_REALLOC(e->event_queue, e->event_queue_allocated, e->event_queue_allocated + n_event_queue))
3874 return -ENOMEM;
3875
3876 timeout = 0;
3877 }
3878
3879 triple_timestamp_get(&e->timestamp);
3880
3881 for (size_t i = 0; i < m; i++) {
3882
3883 if (e->event_queue[i].data.ptr == INT_TO_PTR(SOURCE_WATCHDOG))
3884 r = flush_timer(e, e->watchdog_fd, e->event_queue[i].events, NULL);
3885 else {
3886 WakeupType *t = e->event_queue[i].data.ptr;
3887
3888 switch (*t) {
3889
3890 case WAKEUP_EVENT_SOURCE: {
3891 sd_event_source *s = e->event_queue[i].data.ptr;
3892
3893 assert(s);
3894
3895 switch (s->type) {
3896
3897 case SOURCE_IO:
3898 r = process_io(e, s, e->event_queue[i].events);
3899 break;
3900
3901 case SOURCE_CHILD:
3902 r = process_pidfd(e, s, e->event_queue[i].events);
3903 break;
3904
3905 default:
3906 assert_not_reached("Unexpected event source type");
3907 }
3908
3909 break;
3910 }
3911
3912 case WAKEUP_CLOCK_DATA: {
3913 struct clock_data *d = e->event_queue[i].data.ptr;
3914
3915 assert(d);
3916
3917 r = flush_timer(e, d->fd, e->event_queue[i].events, &d->next);
3918 break;
3919 }
3920
3921 case WAKEUP_SIGNAL_DATA:
3922 r = process_signal(e, e->event_queue[i].data.ptr, e->event_queue[i].events);
3923 break;
3924
3925 case WAKEUP_INOTIFY_DATA:
3926 r = event_inotify_data_read(e, e->event_queue[i].data.ptr, e->event_queue[i].events);
3927 break;
3928
3929 default:
3930 assert_not_reached("Invalid wake-up pointer");
3931 }
3932 }
3933 if (r < 0)
3934 goto finish;
3935 }
3936
3937 r = process_watchdog(e);
3938 if (r < 0)
3939 goto finish;
3940
3941 r = process_timer(e, e->timestamp.realtime, &e->realtime);
3942 if (r < 0)
3943 goto finish;
3944
3945 r = process_timer(e, e->timestamp.boottime, &e->boottime);
3946 if (r < 0)
3947 goto finish;
3948
3949 r = process_timer(e, e->timestamp.monotonic, &e->monotonic);
3950 if (r < 0)
3951 goto finish;
3952
3953 r = process_timer(e, e->timestamp.realtime, &e->realtime_alarm);
3954 if (r < 0)
3955 goto finish;
3956
3957 r = process_timer(e, e->timestamp.boottime, &e->boottime_alarm);
3958 if (r < 0)
3959 goto finish;
3960
3961 if (e->need_process_child) {
3962 r = process_child(e);
3963 if (r < 0)
3964 goto finish;
3965 }
3966
3967 r = process_inotify(e);
3968 if (r < 0)
3969 goto finish;
3970
3971 if (event_next_pending(e)) {
3972 e->state = SD_EVENT_PENDING;
3973
3974 return 1;
3975 }
3976
3977 r = 0;
3978
3979 finish:
3980 e->state = SD_EVENT_INITIAL;
3981
3982 return r;
3983 }
3984
3985 _public_ int sd_event_dispatch(sd_event *e) {
3986 sd_event_source *p;
3987 int r;
3988
3989 assert_return(e, -EINVAL);
3990 assert_return(e = event_resolve(e), -ENOPKG);
3991 assert_return(!event_pid_changed(e), -ECHILD);
3992 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
3993 assert_return(e->state == SD_EVENT_PENDING, -EBUSY);
3994
3995 if (e->exit_requested)
3996 return dispatch_exit(e);
3997
3998 p = event_next_pending(e);
3999 if (p) {
4000 _unused_ _cleanup_(sd_event_unrefp) sd_event *ref = sd_event_ref(e);
4001
4002 e->state = SD_EVENT_RUNNING;
4003 r = source_dispatch(p);
4004 e->state = SD_EVENT_INITIAL;
4005 return r;
4006 }
4007
4008 e->state = SD_EVENT_INITIAL;
4009
4010 return 1;
4011 }
4012
4013 static void event_log_delays(sd_event *e) {
4014 char b[ELEMENTSOF(e->delays) * DECIMAL_STR_MAX(unsigned) + 1], *p;
4015 size_t l, i;
4016
4017 p = b;
4018 l = sizeof(b);
4019 for (i = 0; i < ELEMENTSOF(e->delays); i++) {
4020 l = strpcpyf(&p, l, "%u ", e->delays[i]);
4021 e->delays[i] = 0;
4022 }
4023 log_debug("Event loop iterations: %s", b);
4024 }
4025
4026 _public_ int sd_event_run(sd_event *e, uint64_t timeout) {
4027 int r;
4028
4029 assert_return(e, -EINVAL);
4030 assert_return(e = event_resolve(e), -ENOPKG);
4031 assert_return(!event_pid_changed(e), -ECHILD);
4032 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
4033 assert_return(e->state == SD_EVENT_INITIAL, -EBUSY);
4034
4035 if (e->profile_delays && e->last_run_usec != 0) {
4036 usec_t this_run;
4037 unsigned l;
4038
4039 this_run = now(CLOCK_MONOTONIC);
4040
4041 l = u64log2(this_run - e->last_run_usec);
4042 assert(l < ELEMENTSOF(e->delays));
4043 e->delays[l]++;
4044
4045 if (this_run - e->last_log_usec >= 5*USEC_PER_SEC) {
4046 event_log_delays(e);
4047 e->last_log_usec = this_run;
4048 }
4049 }
4050
4051 /* Make sure that none of the preparation callbacks ends up freeing the event source under our feet */
4052 _unused_ _cleanup_(sd_event_unrefp) sd_event *ref = sd_event_ref(e);
4053
4054 r = sd_event_prepare(e);
4055 if (r == 0)
4056 /* There was nothing? Then wait... */
4057 r = sd_event_wait(e, timeout);
4058
4059 if (e->profile_delays)
4060 e->last_run_usec = now(CLOCK_MONOTONIC);
4061
4062 if (r > 0) {
4063 /* There's something now, then let's dispatch it */
4064 r = sd_event_dispatch(e);
4065 if (r < 0)
4066 return r;
4067
4068 return 1;
4069 }
4070
4071 return r;
4072 }
4073
4074 _public_ int sd_event_loop(sd_event *e) {
4075 int r;
4076
4077 assert_return(e, -EINVAL);
4078 assert_return(e = event_resolve(e), -ENOPKG);
4079 assert_return(!event_pid_changed(e), -ECHILD);
4080 assert_return(e->state == SD_EVENT_INITIAL, -EBUSY);
4081
4082 _unused_ _cleanup_(sd_event_unrefp) sd_event *ref = NULL;
4083
4084 while (e->state != SD_EVENT_FINISHED) {
4085 r = sd_event_run(e, (uint64_t) -1);
4086 if (r < 0)
4087 return r;
4088 }
4089
4090 return e->exit_code;
4091 }
4092
4093 _public_ int sd_event_get_fd(sd_event *e) {
4094 assert_return(e, -EINVAL);
4095 assert_return(e = event_resolve(e), -ENOPKG);
4096 assert_return(!event_pid_changed(e), -ECHILD);
4097
4098 return e->epoll_fd;
4099 }
4100
4101 _public_ int sd_event_get_state(sd_event *e) {
4102 assert_return(e, -EINVAL);
4103 assert_return(e = event_resolve(e), -ENOPKG);
4104 assert_return(!event_pid_changed(e), -ECHILD);
4105
4106 return e->state;
4107 }
4108
4109 _public_ int sd_event_get_exit_code(sd_event *e, int *code) {
4110 assert_return(e, -EINVAL);
4111 assert_return(e = event_resolve(e), -ENOPKG);
4112 assert_return(code, -EINVAL);
4113 assert_return(!event_pid_changed(e), -ECHILD);
4114
4115 if (!e->exit_requested)
4116 return -ENODATA;
4117
4118 *code = e->exit_code;
4119 return 0;
4120 }
4121
4122 _public_ int sd_event_exit(sd_event *e, int code) {
4123 assert_return(e, -EINVAL);
4124 assert_return(e = event_resolve(e), -ENOPKG);
4125 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
4126 assert_return(!event_pid_changed(e), -ECHILD);
4127
4128 e->exit_requested = true;
4129 e->exit_code = code;
4130
4131 return 0;
4132 }
4133
4134 _public_ int sd_event_now(sd_event *e, clockid_t clock, uint64_t *usec) {
4135 assert_return(e, -EINVAL);
4136 assert_return(e = event_resolve(e), -ENOPKG);
4137 assert_return(usec, -EINVAL);
4138 assert_return(!event_pid_changed(e), -ECHILD);
4139
4140 if (!TRIPLE_TIMESTAMP_HAS_CLOCK(clock))
4141 return -EOPNOTSUPP;
4142
4143 /* Generate a clean error in case CLOCK_BOOTTIME is not available. Note that don't use clock_supported() here,
4144 * for a reason: there are systems where CLOCK_BOOTTIME is supported, but CLOCK_BOOTTIME_ALARM is not, but for
4145 * the purpose of getting the time this doesn't matter. */
4146 if (IN_SET(clock, CLOCK_BOOTTIME, CLOCK_BOOTTIME_ALARM) && !clock_boottime_supported())
4147 return -EOPNOTSUPP;
4148
4149 if (!triple_timestamp_is_set(&e->timestamp)) {
4150 /* Implicitly fall back to now() if we never ran before and thus have no cached time. */
4151 *usec = now(clock);
4152 return 1;
4153 }
4154
4155 *usec = triple_timestamp_by_clock(&e->timestamp, clock);
4156 return 0;
4157 }
4158
4159 _public_ int sd_event_default(sd_event **ret) {
4160 sd_event *e = NULL;
4161 int r;
4162
4163 if (!ret)
4164 return !!default_event;
4165
4166 if (default_event) {
4167 *ret = sd_event_ref(default_event);
4168 return 0;
4169 }
4170
4171 r = sd_event_new(&e);
4172 if (r < 0)
4173 return r;
4174
4175 e->default_event_ptr = &default_event;
4176 e->tid = gettid();
4177 default_event = e;
4178
4179 *ret = e;
4180 return 1;
4181 }
4182
4183 _public_ int sd_event_get_tid(sd_event *e, pid_t *tid) {
4184 assert_return(e, -EINVAL);
4185 assert_return(e = event_resolve(e), -ENOPKG);
4186 assert_return(tid, -EINVAL);
4187 assert_return(!event_pid_changed(e), -ECHILD);
4188
4189 if (e->tid != 0) {
4190 *tid = e->tid;
4191 return 0;
4192 }
4193
4194 return -ENXIO;
4195 }
4196
4197 _public_ int sd_event_set_watchdog(sd_event *e, int b) {
4198 int r;
4199
4200 assert_return(e, -EINVAL);
4201 assert_return(e = event_resolve(e), -ENOPKG);
4202 assert_return(!event_pid_changed(e), -ECHILD);
4203
4204 if (e->watchdog == !!b)
4205 return e->watchdog;
4206
4207 if (b) {
4208 r = sd_watchdog_enabled(false, &e->watchdog_period);
4209 if (r <= 0)
4210 return r;
4211
4212 /* Issue first ping immediately */
4213 sd_notify(false, "WATCHDOG=1");
4214 e->watchdog_last = now(CLOCK_MONOTONIC);
4215
4216 e->watchdog_fd = timerfd_create(CLOCK_MONOTONIC, TFD_NONBLOCK|TFD_CLOEXEC);
4217 if (e->watchdog_fd < 0)
4218 return -errno;
4219
4220 r = arm_watchdog(e);
4221 if (r < 0)
4222 goto fail;
4223
4224 struct epoll_event ev = {
4225 .events = EPOLLIN,
4226 .data.ptr = INT_TO_PTR(SOURCE_WATCHDOG),
4227 };
4228
4229 if (epoll_ctl(e->epoll_fd, EPOLL_CTL_ADD, e->watchdog_fd, &ev) < 0) {
4230 r = -errno;
4231 goto fail;
4232 }
4233
4234 } else {
4235 if (e->watchdog_fd >= 0) {
4236 (void) epoll_ctl(e->epoll_fd, EPOLL_CTL_DEL, e->watchdog_fd, NULL);
4237 e->watchdog_fd = safe_close(e->watchdog_fd);
4238 }
4239 }
4240
4241 e->watchdog = !!b;
4242 return e->watchdog;
4243
4244 fail:
4245 e->watchdog_fd = safe_close(e->watchdog_fd);
4246 return r;
4247 }
4248
4249 _public_ int sd_event_get_watchdog(sd_event *e) {
4250 assert_return(e, -EINVAL);
4251 assert_return(e = event_resolve(e), -ENOPKG);
4252 assert_return(!event_pid_changed(e), -ECHILD);
4253
4254 return e->watchdog;
4255 }
4256
4257 _public_ int sd_event_get_iteration(sd_event *e, uint64_t *ret) {
4258 assert_return(e, -EINVAL);
4259 assert_return(e = event_resolve(e), -ENOPKG);
4260 assert_return(!event_pid_changed(e), -ECHILD);
4261
4262 *ret = e->iteration;
4263 return 0;
4264 }
4265
4266 _public_ int sd_event_source_set_destroy_callback(sd_event_source *s, sd_event_destroy_t callback) {
4267 assert_return(s, -EINVAL);
4268
4269 s->destroy_callback = callback;
4270 return 0;
4271 }
4272
4273 _public_ int sd_event_source_get_destroy_callback(sd_event_source *s, sd_event_destroy_t *ret) {
4274 assert_return(s, -EINVAL);
4275
4276 if (ret)
4277 *ret = s->destroy_callback;
4278
4279 return !!s->destroy_callback;
4280 }
4281
4282 _public_ int sd_event_source_get_floating(sd_event_source *s) {
4283 assert_return(s, -EINVAL);
4284
4285 return s->floating;
4286 }
4287
4288 _public_ int sd_event_source_set_floating(sd_event_source *s, int b) {
4289 assert_return(s, -EINVAL);
4290
4291 if (s->floating == !!b)
4292 return 0;
4293
4294 if (!s->event) /* Already disconnected */
4295 return -ESTALE;
4296
4297 s->floating = b;
4298
4299 if (b) {
4300 sd_event_source_ref(s);
4301 sd_event_unref(s->event);
4302 } else {
4303 sd_event_ref(s->event);
4304 sd_event_source_unref(s);
4305 }
4306
4307 return 1;
4308 }
4309
4310 _public_ int sd_event_source_get_exit_on_failure(sd_event_source *s) {
4311 assert_return(s, -EINVAL);
4312 assert_return(s->type != SOURCE_EXIT, -EDOM);
4313
4314 return s->exit_on_failure;
4315 }
4316
4317 _public_ int sd_event_source_set_exit_on_failure(sd_event_source *s, int b) {
4318 assert_return(s, -EINVAL);
4319 assert_return(s->type != SOURCE_EXIT, -EDOM);
4320
4321 if (s->exit_on_failure == !!b)
4322 return 0;
4323
4324 s->exit_on_failure = b;
4325 return 1;
4326 }
4327
4328 _public_ int sd_event_source_set_ratelimit(sd_event_source *s, uint64_t interval, unsigned burst) {
4329 int r;
4330
4331 assert_return(s, -EINVAL);
4332
4333 /* Turning on ratelimiting on event source types that don't support it, is a loggable offense. Doing
4334 * so is a programming error. */
4335 assert_return(EVENT_SOURCE_CAN_RATE_LIMIT(s->type), -EDOM);
4336
4337 /* When ratelimiting is configured we'll always reset the rate limit state first and start fresh,
4338 * non-ratelimited. */
4339 r = event_source_leave_ratelimit(s);
4340 if (r < 0)
4341 return r;
4342
4343 s->rate_limit = (RateLimit) { interval, burst };
4344 return 0;
4345 }
4346
4347 _public_ int sd_event_source_get_ratelimit(sd_event_source *s, uint64_t *ret_interval, unsigned *ret_burst) {
4348 assert_return(s, -EINVAL);
4349
4350 /* Querying whether an event source has ratelimiting configured is not a loggable offsense, hence
4351 * don't use assert_return(). Unlike turning on ratelimiting it's not really a programming error */
4352 if (!EVENT_SOURCE_CAN_RATE_LIMIT(s->type))
4353 return -EDOM;
4354
4355 if (!ratelimit_configured(&s->rate_limit))
4356 return -ENOEXEC;
4357
4358 if (ret_interval)
4359 *ret_interval = s->rate_limit.interval;
4360 if (ret_burst)
4361 *ret_burst = s->rate_limit.burst;
4362
4363 return 0;
4364 }
4365
4366 _public_ int sd_event_source_is_ratelimited(sd_event_source *s) {
4367 assert_return(s, -EINVAL);
4368
4369 if (!EVENT_SOURCE_CAN_RATE_LIMIT(s->type))
4370 return false;
4371
4372 if (!ratelimit_configured(&s->rate_limit))
4373 return false;
4374
4375 return s->ratelimited;
4376 }