]> git.ipfire.org Git - thirdparty/systemd.git/blame - src/libsystemd/sd-event/sd-event.c
man: various typos and other small issues
[thirdparty/systemd.git] / src / libsystemd / sd-event / sd-event.c
CommitLineData
db9ecf05 1/* SPDX-License-Identifier: LGPL-2.1-or-later */
fd38203a
LP
2
3#include <sys/epoll.h>
4#include <sys/timerfd.h>
5#include <sys/wait.h>
6
cde93897 7#include "sd-daemon.h"
07630cea
LP
8#include "sd-event.h"
9#include "sd-id128.h"
10
b5efdb8a 11#include "alloc-util.h"
f8f3f926 12#include "env-util.h"
a137a1c3 13#include "event-source.h"
3ffd4af2 14#include "fd-util.h"
97ef5391 15#include "fs-util.h"
fd38203a 16#include "hashmap.h"
07630cea
LP
17#include "list.h"
18#include "macro.h"
0a970718 19#include "memory-util.h"
f5947a5e 20#include "missing_syscall.h"
07630cea 21#include "prioq.h"
4a0b58c4 22#include "process-util.h"
6e9feda3 23#include "set.h"
24882e06 24#include "signal-util.h"
55cbfaa5 25#include "string-table.h"
07630cea 26#include "string-util.h"
442ac269 27#include "strxcpyx.h"
07630cea 28#include "time-util.h"
fd38203a 29
c2ba3ad6 30#define DEFAULT_ACCURACY_USEC (250 * USEC_PER_MSEC)
fd38203a 31
f8f3f926
LP
32static bool EVENT_SOURCE_WATCH_PIDFD(sd_event_source *s) {
33 /* Returns true if this is a PID event source and can be implemented by watching EPOLLIN */
34 return s &&
35 s->type == SOURCE_CHILD &&
36 s->child.pidfd >= 0 &&
37 s->child.options == WEXITED;
38}
39
b6d5481b
LP
40static bool event_source_is_online(sd_event_source *s) {
41 assert(s);
42 return s->enabled != SD_EVENT_OFF && !s->ratelimited;
43}
44
45static bool event_source_is_offline(sd_event_source *s) {
46 assert(s);
47 return s->enabled == SD_EVENT_OFF || s->ratelimited;
48}
49
55cbfaa5
DM
50static const char* const event_source_type_table[_SOURCE_EVENT_SOURCE_TYPE_MAX] = {
51 [SOURCE_IO] = "io",
52 [SOURCE_TIME_REALTIME] = "realtime",
53 [SOURCE_TIME_BOOTTIME] = "bootime",
54 [SOURCE_TIME_MONOTONIC] = "monotonic",
55 [SOURCE_TIME_REALTIME_ALARM] = "realtime-alarm",
56 [SOURCE_TIME_BOOTTIME_ALARM] = "boottime-alarm",
57 [SOURCE_SIGNAL] = "signal",
58 [SOURCE_CHILD] = "child",
59 [SOURCE_DEFER] = "defer",
60 [SOURCE_POST] = "post",
61 [SOURCE_EXIT] = "exit",
62 [SOURCE_WATCHDOG] = "watchdog",
97ef5391 63 [SOURCE_INOTIFY] = "inotify",
55cbfaa5
DM
64};
65
66DEFINE_PRIVATE_STRING_TABLE_LOOKUP_TO_STRING(event_source_type, int);
67
b6d5481b
LP
68#define EVENT_SOURCE_IS_TIME(t) \
69 IN_SET((t), \
70 SOURCE_TIME_REALTIME, \
71 SOURCE_TIME_BOOTTIME, \
72 SOURCE_TIME_MONOTONIC, \
73 SOURCE_TIME_REALTIME_ALARM, \
74 SOURCE_TIME_BOOTTIME_ALARM)
75
76#define EVENT_SOURCE_CAN_RATE_LIMIT(t) \
77 IN_SET((t), \
78 SOURCE_IO, \
79 SOURCE_TIME_REALTIME, \
80 SOURCE_TIME_BOOTTIME, \
81 SOURCE_TIME_MONOTONIC, \
82 SOURCE_TIME_REALTIME_ALARM, \
83 SOURCE_TIME_BOOTTIME_ALARM, \
84 SOURCE_SIGNAL, \
85 SOURCE_DEFER, \
86 SOURCE_INOTIFY)
6a0f1f6d 87
fd38203a 88struct sd_event {
da7e457c 89 unsigned n_ref;
fd38203a
LP
90
91 int epoll_fd;
cde93897 92 int watchdog_fd;
fd38203a
LP
93
94 Prioq *pending;
95 Prioq *prepare;
c2ba3ad6 96
a8548816 97 /* timerfd_create() only supports these five clocks so far. We
6a0f1f6d
LP
98 * can add support for more clocks when the kernel learns to
99 * deal with them, too. */
100 struct clock_data realtime;
a8548816 101 struct clock_data boottime;
6a0f1f6d
LP
102 struct clock_data monotonic;
103 struct clock_data realtime_alarm;
104 struct clock_data boottime_alarm;
fd38203a 105
da7e457c
LP
106 usec_t perturb;
107
9da4cb2b
LP
108 sd_event_source **signal_sources; /* indexed by signal number */
109 Hashmap *signal_data; /* indexed by priority */
fd38203a
LP
110
111 Hashmap *child_sources;
b6d5481b 112 unsigned n_online_child_sources;
fd38203a 113
6e9feda3
LP
114 Set *post_sources;
115
6203e07a 116 Prioq *exit;
fd38203a 117
97ef5391
LP
118 Hashmap *inotify_data; /* indexed by priority */
119
120 /* A list of inode structures that still have an fd open, that we need to close before the next loop iteration */
121 LIST_HEAD(struct inode_data, inode_data_to_close);
122
123 /* A list of inotify objects that already have events buffered which aren't processed yet */
124 LIST_HEAD(struct inotify_data, inotify_data_buffered);
125
da7e457c 126 pid_t original_pid;
c2ba3ad6 127
60a3b1e1 128 uint64_t iteration;
e475d10c 129 triple_timestamp timestamp;
da7e457c 130 int state;
eaa3cbef 131
6203e07a 132 bool exit_requested:1;
da7e457c 133 bool need_process_child:1;
cde93897 134 bool watchdog:1;
34b87517 135 bool profile_delays:1;
afc6adb5 136
6203e07a
LP
137 int exit_code;
138
afc6adb5
LP
139 pid_t tid;
140 sd_event **default_event_ptr;
cde93897
LP
141
142 usec_t watchdog_last, watchdog_period;
15b38f93
LP
143
144 unsigned n_sources;
a71fe8b8 145
5cddd924
LP
146 struct epoll_event *event_queue;
147 size_t event_queue_allocated;
148
a71fe8b8 149 LIST_HEAD(sd_event_source, sources);
34b87517 150
e6a7bee5 151 usec_t last_run_usec, last_log_usec;
34b87517 152 unsigned delays[sizeof(usec_t) * 8];
fd38203a
LP
153};
154
b937d761
NM
155static thread_local sd_event *default_event = NULL;
156
a71fe8b8 157static void source_disconnect(sd_event_source *s);
97ef5391 158static void event_gc_inode_data(sd_event *e, struct inode_data *d);
a71fe8b8 159
b937d761
NM
160static sd_event *event_resolve(sd_event *e) {
161 return e == SD_EVENT_DEFAULT ? default_event : e;
162}
163
fd38203a
LP
164static int pending_prioq_compare(const void *a, const void *b) {
165 const sd_event_source *x = a, *y = b;
9c57a73b 166 int r;
fd38203a
LP
167
168 assert(x->pending);
169 assert(y->pending);
170
baf76283
LP
171 /* Enabled ones first */
172 if (x->enabled != SD_EVENT_OFF && y->enabled == SD_EVENT_OFF)
fd38203a 173 return -1;
baf76283 174 if (x->enabled == SD_EVENT_OFF && y->enabled != SD_EVENT_OFF)
fd38203a
LP
175 return 1;
176
b6d5481b
LP
177 /* Non rate-limited ones first. */
178 r = CMP(!!x->ratelimited, !!y->ratelimited);
179 if (r != 0)
180 return r;
181
fd38203a 182 /* Lower priority values first */
9c57a73b
YW
183 r = CMP(x->priority, y->priority);
184 if (r != 0)
185 return r;
fd38203a
LP
186
187 /* Older entries first */
9c57a73b 188 return CMP(x->pending_iteration, y->pending_iteration);
fd38203a
LP
189}
190
191static int prepare_prioq_compare(const void *a, const void *b) {
192 const sd_event_source *x = a, *y = b;
9c57a73b 193 int r;
fd38203a
LP
194
195 assert(x->prepare);
196 assert(y->prepare);
197
8046c457
KK
198 /* Enabled ones first */
199 if (x->enabled != SD_EVENT_OFF && y->enabled == SD_EVENT_OFF)
200 return -1;
201 if (x->enabled == SD_EVENT_OFF && y->enabled != SD_EVENT_OFF)
202 return 1;
203
b6d5481b
LP
204 /* Non rate-limited ones first. */
205 r = CMP(!!x->ratelimited, !!y->ratelimited);
206 if (r != 0)
207 return r;
208
fd38203a
LP
209 /* Move most recently prepared ones last, so that we can stop
210 * preparing as soon as we hit one that has already been
211 * prepared in the current iteration */
9c57a73b
YW
212 r = CMP(x->prepare_iteration, y->prepare_iteration);
213 if (r != 0)
214 return r;
fd38203a 215
fd38203a 216 /* Lower priority values first */
9c57a73b 217 return CMP(x->priority, y->priority);
fd38203a
LP
218}
219
b6d5481b
LP
220static usec_t time_event_source_next(const sd_event_source *s) {
221 assert(s);
222
223 /* We have two kinds of event sources that have elapsation times associated with them: the actual
224 * time based ones and the ones for which a ratelimit can be in effect (where we want to be notified
225 * once the ratelimit time window ends). Let's return the next elapsing time depending on what we are
226 * looking at here. */
227
228 if (s->ratelimited) { /* If rate-limited the next elapsation is when the ratelimit time window ends */
229 assert(s->rate_limit.begin != 0);
230 assert(s->rate_limit.interval != 0);
231 return usec_add(s->rate_limit.begin, s->rate_limit.interval);
232 }
233
234 /* Otherwise this must be a time event source, if not ratelimited */
235 if (EVENT_SOURCE_IS_TIME(s->type))
236 return s->time.next;
237
238 return USEC_INFINITY;
239}
240
c2ba3ad6 241static int earliest_time_prioq_compare(const void *a, const void *b) {
fd38203a
LP
242 const sd_event_source *x = a, *y = b;
243
baf76283
LP
244 /* Enabled ones first */
245 if (x->enabled != SD_EVENT_OFF && y->enabled == SD_EVENT_OFF)
fd38203a 246 return -1;
baf76283 247 if (x->enabled == SD_EVENT_OFF && y->enabled != SD_EVENT_OFF)
fd38203a
LP
248 return 1;
249
250 /* Move the pending ones to the end */
251 if (!x->pending && y->pending)
252 return -1;
253 if (x->pending && !y->pending)
254 return 1;
255
256 /* Order by time */
b6d5481b 257 return CMP(time_event_source_next(x), time_event_source_next(y));
fd38203a
LP
258}
259
1bce0ffa 260static usec_t time_event_source_latest(const sd_event_source *s) {
b6d5481b
LP
261 assert(s);
262
263 if (s->ratelimited) { /* For ratelimited stuff the earliest and the latest time shall actually be the
264 * same, as we should avoid adding additional inaccuracy on an inaccuracy time
265 * window */
266 assert(s->rate_limit.begin != 0);
267 assert(s->rate_limit.interval != 0);
268 return usec_add(s->rate_limit.begin, s->rate_limit.interval);
269 }
270
271 /* Must be a time event source, if not ratelimited */
272 if (EVENT_SOURCE_IS_TIME(s->type))
273 return usec_add(s->time.next, s->time.accuracy);
274
275 return USEC_INFINITY;
1bce0ffa
LP
276}
277
c2ba3ad6
LP
278static int latest_time_prioq_compare(const void *a, const void *b) {
279 const sd_event_source *x = a, *y = b;
280
baf76283
LP
281 /* Enabled ones first */
282 if (x->enabled != SD_EVENT_OFF && y->enabled == SD_EVENT_OFF)
c2ba3ad6 283 return -1;
baf76283 284 if (x->enabled == SD_EVENT_OFF && y->enabled != SD_EVENT_OFF)
c2ba3ad6
LP
285 return 1;
286
287 /* Move the pending ones to the end */
288 if (!x->pending && y->pending)
289 return -1;
290 if (x->pending && !y->pending)
291 return 1;
292
293 /* Order by time */
9c57a73b 294 return CMP(time_event_source_latest(x), time_event_source_latest(y));
c2ba3ad6
LP
295}
296
6203e07a 297static int exit_prioq_compare(const void *a, const void *b) {
da7e457c
LP
298 const sd_event_source *x = a, *y = b;
299
6203e07a
LP
300 assert(x->type == SOURCE_EXIT);
301 assert(y->type == SOURCE_EXIT);
da7e457c 302
baf76283
LP
303 /* Enabled ones first */
304 if (x->enabled != SD_EVENT_OFF && y->enabled == SD_EVENT_OFF)
da7e457c 305 return -1;
baf76283 306 if (x->enabled == SD_EVENT_OFF && y->enabled != SD_EVENT_OFF)
da7e457c
LP
307 return 1;
308
309 /* Lower priority values first */
6dd91b36 310 return CMP(x->priority, y->priority);
da7e457c
LP
311}
312
6a0f1f6d
LP
313static void free_clock_data(struct clock_data *d) {
314 assert(d);
9da4cb2b 315 assert(d->wakeup == WAKEUP_CLOCK_DATA);
6a0f1f6d
LP
316
317 safe_close(d->fd);
318 prioq_free(d->earliest);
319 prioq_free(d->latest);
320}
321
8301aa0b 322static sd_event *event_free(sd_event *e) {
a71fe8b8
LP
323 sd_event_source *s;
324
fd38203a 325 assert(e);
a71fe8b8
LP
326
327 while ((s = e->sources)) {
328 assert(s->floating);
329 source_disconnect(s);
330 sd_event_source_unref(s);
331 }
332
15b38f93 333 assert(e->n_sources == 0);
fd38203a 334
afc6adb5
LP
335 if (e->default_event_ptr)
336 *(e->default_event_ptr) = NULL;
337
03e334a1 338 safe_close(e->epoll_fd);
03e334a1 339 safe_close(e->watchdog_fd);
cde93897 340
6a0f1f6d 341 free_clock_data(&e->realtime);
a8548816 342 free_clock_data(&e->boottime);
6a0f1f6d
LP
343 free_clock_data(&e->monotonic);
344 free_clock_data(&e->realtime_alarm);
345 free_clock_data(&e->boottime_alarm);
346
fd38203a
LP
347 prioq_free(e->pending);
348 prioq_free(e->prepare);
6203e07a 349 prioq_free(e->exit);
fd38203a
LP
350
351 free(e->signal_sources);
9da4cb2b 352 hashmap_free(e->signal_data);
fd38203a 353
97ef5391
LP
354 hashmap_free(e->inotify_data);
355
fd38203a 356 hashmap_free(e->child_sources);
6e9feda3 357 set_free(e->post_sources);
8301aa0b 358
5cddd924
LP
359 free(e->event_queue);
360
8301aa0b 361 return mfree(e);
fd38203a
LP
362}
363
f7262a9f 364_public_ int sd_event_new(sd_event** ret) {
fd38203a
LP
365 sd_event *e;
366 int r;
367
305f78bf 368 assert_return(ret, -EINVAL);
fd38203a 369
d08eb1fa 370 e = new(sd_event, 1);
fd38203a
LP
371 if (!e)
372 return -ENOMEM;
373
d08eb1fa
LP
374 *e = (sd_event) {
375 .n_ref = 1,
376 .epoll_fd = -1,
377 .watchdog_fd = -1,
378 .realtime.wakeup = WAKEUP_CLOCK_DATA,
379 .realtime.fd = -1,
380 .realtime.next = USEC_INFINITY,
381 .boottime.wakeup = WAKEUP_CLOCK_DATA,
382 .boottime.fd = -1,
383 .boottime.next = USEC_INFINITY,
384 .monotonic.wakeup = WAKEUP_CLOCK_DATA,
385 .monotonic.fd = -1,
386 .monotonic.next = USEC_INFINITY,
387 .realtime_alarm.wakeup = WAKEUP_CLOCK_DATA,
388 .realtime_alarm.fd = -1,
389 .realtime_alarm.next = USEC_INFINITY,
390 .boottime_alarm.wakeup = WAKEUP_CLOCK_DATA,
391 .boottime_alarm.fd = -1,
392 .boottime_alarm.next = USEC_INFINITY,
393 .perturb = USEC_INFINITY,
394 .original_pid = getpid_cached(),
395 };
fd38203a 396
c983e776
EV
397 r = prioq_ensure_allocated(&e->pending, pending_prioq_compare);
398 if (r < 0)
fd38203a 399 goto fail;
fd38203a
LP
400
401 e->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
402 if (e->epoll_fd < 0) {
403 r = -errno;
404 goto fail;
405 }
406
7fe2903c
LP
407 e->epoll_fd = fd_move_above_stdio(e->epoll_fd);
408
34b87517 409 if (secure_getenv("SD_EVENT_PROFILE_DELAYS")) {
34a6843d 410 log_debug("Event loop profiling enabled. Logarithmic histogram of event loop iterations in the range 2^0 ... 2^63 us will be logged every 5s.");
34b87517
VC
411 e->profile_delays = true;
412 }
413
fd38203a
LP
414 *ret = e;
415 return 0;
416
417fail:
418 event_free(e);
419 return r;
420}
421
8301aa0b 422DEFINE_PUBLIC_TRIVIAL_REF_UNREF_FUNC(sd_event, sd_event, event_free);
fd38203a 423
afd15bbb
ZJS
424_public_ sd_event_source* sd_event_source_disable_unref(sd_event_source *s) {
425 if (s)
426 (void) sd_event_source_set_enabled(s, SD_EVENT_OFF);
427 return sd_event_source_unref(s);
428}
429
eaa3cbef
LP
430static bool event_pid_changed(sd_event *e) {
431 assert(e);
432
a2360a46 433 /* We don't support people creating an event loop and keeping
eaa3cbef
LP
434 * it around over a fork(). Let's complain. */
435
df0ff127 436 return e->original_pid != getpid_cached();
eaa3cbef
LP
437}
438
366e6411 439static void source_io_unregister(sd_event_source *s) {
fd38203a
LP
440 assert(s);
441 assert(s->type == SOURCE_IO);
442
f6806734 443 if (event_pid_changed(s->event))
366e6411 444 return;
f6806734 445
fd38203a 446 if (!s->io.registered)
366e6411 447 return;
fd38203a 448
d1cf2023 449 if (epoll_ctl(s->event->epoll_fd, EPOLL_CTL_DEL, s->io.fd, NULL) < 0)
f80a5d6a 450 log_debug_errno(errno, "Failed to remove source %s (type %s) from epoll, ignoring: %m",
55cbfaa5 451 strna(s->description), event_source_type_to_string(s->type));
fd38203a
LP
452
453 s->io.registered = false;
fd38203a
LP
454}
455
305f78bf
LP
456static int source_io_register(
457 sd_event_source *s,
458 int enabled,
459 uint32_t events) {
460
fd38203a
LP
461 assert(s);
462 assert(s->type == SOURCE_IO);
baf76283 463 assert(enabled != SD_EVENT_OFF);
fd38203a 464
1eac7948 465 struct epoll_event ev = {
a82f89aa
LP
466 .events = events | (enabled == SD_EVENT_ONESHOT ? EPOLLONESHOT : 0),
467 .data.ptr = s,
468 };
fd38203a 469
15c689d7 470 if (epoll_ctl(s->event->epoll_fd,
1eac7948 471 s->io.registered ? EPOLL_CTL_MOD : EPOLL_CTL_ADD,
55c540d3 472 s->io.fd, &ev) < 0)
fd38203a
LP
473 return -errno;
474
475 s->io.registered = true;
476
477 return 0;
478}
479
f8f3f926
LP
480static void source_child_pidfd_unregister(sd_event_source *s) {
481 assert(s);
482 assert(s->type == SOURCE_CHILD);
483
484 if (event_pid_changed(s->event))
485 return;
486
487 if (!s->child.registered)
488 return;
489
490 if (EVENT_SOURCE_WATCH_PIDFD(s))
491 if (epoll_ctl(s->event->epoll_fd, EPOLL_CTL_DEL, s->child.pidfd, NULL) < 0)
f80a5d6a 492 log_debug_errno(errno, "Failed to remove source %s (type %s) from epoll, ignoring: %m",
f8f3f926
LP
493 strna(s->description), event_source_type_to_string(s->type));
494
495 s->child.registered = false;
496}
497
498static int source_child_pidfd_register(sd_event_source *s, int enabled) {
f8f3f926
LP
499 assert(s);
500 assert(s->type == SOURCE_CHILD);
501 assert(enabled != SD_EVENT_OFF);
502
503 if (EVENT_SOURCE_WATCH_PIDFD(s)) {
1eac7948 504 struct epoll_event ev = {
f8f3f926
LP
505 .events = EPOLLIN | (enabled == SD_EVENT_ONESHOT ? EPOLLONESHOT : 0),
506 .data.ptr = s,
507 };
508
55c540d3
ZJS
509 if (epoll_ctl(s->event->epoll_fd,
510 s->child.registered ? EPOLL_CTL_MOD : EPOLL_CTL_ADD,
511 s->child.pidfd, &ev) < 0)
f8f3f926
LP
512 return -errno;
513 }
514
515 s->child.registered = true;
516 return 0;
517}
518
6a0f1f6d
LP
519static clockid_t event_source_type_to_clock(EventSourceType t) {
520
521 switch (t) {
522
523 case SOURCE_TIME_REALTIME:
524 return CLOCK_REALTIME;
525
a8548816
TG
526 case SOURCE_TIME_BOOTTIME:
527 return CLOCK_BOOTTIME;
528
6a0f1f6d
LP
529 case SOURCE_TIME_MONOTONIC:
530 return CLOCK_MONOTONIC;
531
532 case SOURCE_TIME_REALTIME_ALARM:
533 return CLOCK_REALTIME_ALARM;
534
535 case SOURCE_TIME_BOOTTIME_ALARM:
536 return CLOCK_BOOTTIME_ALARM;
537
538 default:
539 return (clockid_t) -1;
540 }
541}
542
543static EventSourceType clock_to_event_source_type(clockid_t clock) {
544
545 switch (clock) {
546
547 case CLOCK_REALTIME:
548 return SOURCE_TIME_REALTIME;
549
a8548816
TG
550 case CLOCK_BOOTTIME:
551 return SOURCE_TIME_BOOTTIME;
552
6a0f1f6d
LP
553 case CLOCK_MONOTONIC:
554 return SOURCE_TIME_MONOTONIC;
555
556 case CLOCK_REALTIME_ALARM:
557 return SOURCE_TIME_REALTIME_ALARM;
558
559 case CLOCK_BOOTTIME_ALARM:
560 return SOURCE_TIME_BOOTTIME_ALARM;
561
562 default:
563 return _SOURCE_EVENT_SOURCE_TYPE_INVALID;
564 }
565}
566
567static struct clock_data* event_get_clock_data(sd_event *e, EventSourceType t) {
568 assert(e);
569
570 switch (t) {
571
572 case SOURCE_TIME_REALTIME:
573 return &e->realtime;
574
a8548816
TG
575 case SOURCE_TIME_BOOTTIME:
576 return &e->boottime;
577
6a0f1f6d
LP
578 case SOURCE_TIME_MONOTONIC:
579 return &e->monotonic;
580
581 case SOURCE_TIME_REALTIME_ALARM:
582 return &e->realtime_alarm;
583
584 case SOURCE_TIME_BOOTTIME_ALARM:
585 return &e->boottime_alarm;
586
587 default:
588 return NULL;
589 }
590}
591
3e4eb8e7
YW
592static void event_free_signal_data(sd_event *e, struct signal_data *d) {
593 assert(e);
594
595 if (!d)
596 return;
597
598 hashmap_remove(e->signal_data, &d->priority);
599 safe_close(d->fd);
600 free(d);
601}
602
9da4cb2b
LP
603static int event_make_signal_data(
604 sd_event *e,
605 int sig,
606 struct signal_data **ret) {
4807d2d0 607
9da4cb2b
LP
608 struct signal_data *d;
609 bool added = false;
610 sigset_t ss_copy;
611 int64_t priority;
f95387cd
ZJS
612 int r;
613
614 assert(e);
615
f6806734 616 if (event_pid_changed(e))
9da4cb2b 617 return -ECHILD;
f6806734 618
9da4cb2b
LP
619 if (e->signal_sources && e->signal_sources[sig])
620 priority = e->signal_sources[sig]->priority;
621 else
de05913d 622 priority = SD_EVENT_PRIORITY_NORMAL;
f95387cd 623
9da4cb2b
LP
624 d = hashmap_get(e->signal_data, &priority);
625 if (d) {
626 if (sigismember(&d->sigset, sig) > 0) {
627 if (ret)
628 *ret = d;
629 return 0;
630 }
631 } else {
d08eb1fa 632 d = new(struct signal_data, 1);
9da4cb2b
LP
633 if (!d)
634 return -ENOMEM;
635
d08eb1fa
LP
636 *d = (struct signal_data) {
637 .wakeup = WAKEUP_SIGNAL_DATA,
638 .fd = -1,
639 .priority = priority,
640 };
9da4cb2b 641
f656fdb6 642 r = hashmap_ensure_put(&e->signal_data, &uint64_hash_ops, &d->priority, d);
90f604d1
ZJS
643 if (r < 0) {
644 free(d);
9da4cb2b 645 return r;
90f604d1 646 }
f95387cd 647
9da4cb2b
LP
648 added = true;
649 }
650
651 ss_copy = d->sigset;
652 assert_se(sigaddset(&ss_copy, sig) >= 0);
653
654 r = signalfd(d->fd, &ss_copy, SFD_NONBLOCK|SFD_CLOEXEC);
655 if (r < 0) {
656 r = -errno;
657 goto fail;
658 }
659
660 d->sigset = ss_copy;
f95387cd 661
9da4cb2b
LP
662 if (d->fd >= 0) {
663 if (ret)
664 *ret = d;
f95387cd 665 return 0;
9da4cb2b
LP
666 }
667
7fe2903c 668 d->fd = fd_move_above_stdio(r);
f95387cd 669
1eac7948 670 struct epoll_event ev = {
a82f89aa
LP
671 .events = EPOLLIN,
672 .data.ptr = d,
673 };
f95387cd 674
15c689d7 675 if (epoll_ctl(e->epoll_fd, EPOLL_CTL_ADD, d->fd, &ev) < 0) {
9da4cb2b
LP
676 r = -errno;
677 goto fail;
f95387cd
ZJS
678 }
679
9da4cb2b
LP
680 if (ret)
681 *ret = d;
682
f95387cd 683 return 0;
9da4cb2b
LP
684
685fail:
3e4eb8e7
YW
686 if (added)
687 event_free_signal_data(e, d);
9da4cb2b
LP
688
689 return r;
690}
691
692static void event_unmask_signal_data(sd_event *e, struct signal_data *d, int sig) {
693 assert(e);
694 assert(d);
695
696 /* Turns off the specified signal in the signal data
697 * object. If the signal mask of the object becomes empty that
698 * way removes it. */
699
700 if (sigismember(&d->sigset, sig) == 0)
701 return;
702
703 assert_se(sigdelset(&d->sigset, sig) >= 0);
704
705 if (sigisemptyset(&d->sigset)) {
9da4cb2b 706 /* If all the mask is all-zero we can get rid of the structure */
3e4eb8e7 707 event_free_signal_data(e, d);
9da4cb2b
LP
708 return;
709 }
710
711 assert(d->fd >= 0);
712
713 if (signalfd(d->fd, &d->sigset, SFD_NONBLOCK|SFD_CLOEXEC) < 0)
714 log_debug_errno(errno, "Failed to unset signal bit, ignoring: %m");
715}
716
717static void event_gc_signal_data(sd_event *e, const int64_t *priority, int sig) {
718 struct signal_data *d;
719 static const int64_t zero_priority = 0;
720
721 assert(e);
722
f8f3f926
LP
723 /* Rechecks if the specified signal is still something we are interested in. If not, we'll unmask it,
724 * and possibly drop the signalfd for it. */
9da4cb2b
LP
725
726 if (sig == SIGCHLD &&
b6d5481b 727 e->n_online_child_sources > 0)
9da4cb2b
LP
728 return;
729
730 if (e->signal_sources &&
731 e->signal_sources[sig] &&
b6d5481b 732 event_source_is_online(e->signal_sources[sig]))
9da4cb2b
LP
733 return;
734
735 /*
736 * The specified signal might be enabled in three different queues:
737 *
738 * 1) the one that belongs to the priority passed (if it is non-NULL)
739 * 2) the one that belongs to the priority of the event source of the signal (if there is one)
740 * 3) the 0 priority (to cover the SIGCHLD case)
741 *
742 * Hence, let's remove it from all three here.
743 */
744
745 if (priority) {
746 d = hashmap_get(e->signal_data, priority);
747 if (d)
748 event_unmask_signal_data(e, d, sig);
749 }
750
751 if (e->signal_sources && e->signal_sources[sig]) {
752 d = hashmap_get(e->signal_data, &e->signal_sources[sig]->priority);
753 if (d)
754 event_unmask_signal_data(e, d, sig);
755 }
756
757 d = hashmap_get(e->signal_data, &zero_priority);
758 if (d)
759 event_unmask_signal_data(e, d, sig);
f95387cd
ZJS
760}
761
e1951c16
MS
762static void event_source_pp_prioq_reshuffle(sd_event_source *s) {
763 assert(s);
764
765 /* Reshuffles the pending + prepare prioqs. Called whenever the dispatch order changes, i.e. when
766 * they are enabled/disabled or marked pending and such. */
767
768 if (s->pending)
769 prioq_reshuffle(s->event->pending, s, &s->pending_index);
770
771 if (s->prepare)
772 prioq_reshuffle(s->event->prepare, s, &s->prepare_index);
773}
774
775static void event_source_time_prioq_reshuffle(sd_event_source *s) {
776 struct clock_data *d;
777
778 assert(s);
e1951c16
MS
779
780 /* Called whenever the event source's timer ordering properties changed, i.e. time, accuracy,
781 * pending, enable state. Makes sure the two prioq's are ordered properly again. */
b6d5481b
LP
782
783 if (s->ratelimited)
784 d = &s->event->monotonic;
785 else {
786 assert(EVENT_SOURCE_IS_TIME(s->type));
787 assert_se(d = event_get_clock_data(s->event, s->type));
788 }
789
f41315fc
LP
790 prioq_reshuffle(d->earliest, s, &s->earliest_index);
791 prioq_reshuffle(d->latest, s, &s->latest_index);
e1951c16
MS
792 d->needs_rearm = true;
793}
794
1e45e3fe
LP
795static void event_source_time_prioq_remove(
796 sd_event_source *s,
797 struct clock_data *d) {
798
799 assert(s);
800 assert(d);
801
f41315fc
LP
802 prioq_remove(d->earliest, s, &s->earliest_index);
803 prioq_remove(d->latest, s, &s->latest_index);
804 s->earliest_index = s->latest_index = PRIOQ_IDX_NULL;
1e45e3fe
LP
805 d->needs_rearm = true;
806}
807
a71fe8b8
LP
808static void source_disconnect(sd_event_source *s) {
809 sd_event *event;
810
fd38203a
LP
811 assert(s);
812
a71fe8b8
LP
813 if (!s->event)
814 return;
15b38f93 815
a71fe8b8 816 assert(s->event->n_sources > 0);
fd38203a 817
a71fe8b8 818 switch (s->type) {
fd38203a 819
a71fe8b8
LP
820 case SOURCE_IO:
821 if (s->io.fd >= 0)
822 source_io_unregister(s);
fd38203a 823
a71fe8b8 824 break;
6a0f1f6d 825
a71fe8b8 826 case SOURCE_TIME_REALTIME:
a8548816 827 case SOURCE_TIME_BOOTTIME:
a71fe8b8
LP
828 case SOURCE_TIME_MONOTONIC:
829 case SOURCE_TIME_REALTIME_ALARM:
b6d5481b
LP
830 case SOURCE_TIME_BOOTTIME_ALARM:
831 /* Only remove this event source from the time event source here if it is not ratelimited. If
832 * it is ratelimited, we'll remove it below, separately. Why? Because the clock used might
833 * differ: ratelimiting always uses CLOCK_MONOTONIC, but timer events might use any clock */
834
835 if (!s->ratelimited) {
836 struct clock_data *d;
837 assert_se(d = event_get_clock_data(s->event, s->type));
838 event_source_time_prioq_remove(s, d);
839 }
840
a71fe8b8 841 break;
a71fe8b8
LP
842
843 case SOURCE_SIGNAL:
844 if (s->signal.sig > 0) {
9da4cb2b 845
a71fe8b8
LP
846 if (s->event->signal_sources)
847 s->event->signal_sources[s->signal.sig] = NULL;
4807d2d0 848
9da4cb2b 849 event_gc_signal_data(s->event, &s->priority, s->signal.sig);
6a0f1f6d 850 }
fd38203a 851
a71fe8b8 852 break;
fd38203a 853
a71fe8b8
LP
854 case SOURCE_CHILD:
855 if (s->child.pid > 0) {
b6d5481b
LP
856 if (event_source_is_online(s)) {
857 assert(s->event->n_online_child_sources > 0);
858 s->event->n_online_child_sources--;
4807d2d0 859 }
fd38203a 860
4a0b58c4 861 (void) hashmap_remove(s->event->child_sources, PID_TO_PTR(s->child.pid));
a71fe8b8 862 }
fd38203a 863
f8f3f926
LP
864 if (EVENT_SOURCE_WATCH_PIDFD(s))
865 source_child_pidfd_unregister(s);
866 else
867 event_gc_signal_data(s->event, &s->priority, SIGCHLD);
868
a71fe8b8 869 break;
fd38203a 870
a71fe8b8
LP
871 case SOURCE_DEFER:
872 /* nothing */
873 break;
fd38203a 874
a71fe8b8
LP
875 case SOURCE_POST:
876 set_remove(s->event->post_sources, s);
877 break;
da7e457c 878
a71fe8b8
LP
879 case SOURCE_EXIT:
880 prioq_remove(s->event->exit, s, &s->exit.prioq_index);
881 break;
0eb2e0e3 882
97ef5391
LP
883 case SOURCE_INOTIFY: {
884 struct inode_data *inode_data;
885
886 inode_data = s->inotify.inode_data;
887 if (inode_data) {
888 struct inotify_data *inotify_data;
889 assert_se(inotify_data = inode_data->inotify_data);
890
891 /* Detach this event source from the inode object */
892 LIST_REMOVE(inotify.by_inode_data, inode_data->event_sources, s);
893 s->inotify.inode_data = NULL;
894
895 if (s->pending) {
896 assert(inotify_data->n_pending > 0);
897 inotify_data->n_pending--;
898 }
899
900 /* Note that we don't reduce the inotify mask for the watch descriptor here if the inode is
901 * continued to being watched. That's because inotify doesn't really have an API for that: we
902 * can only change watch masks with access to the original inode either by fd or by path. But
903 * paths aren't stable, and keeping an O_PATH fd open all the time would mean wasting an fd
f21f31b2 904 * continuously and keeping the mount busy which we can't really do. We could reconstruct the
97ef5391
LP
905 * original inode from /proc/self/fdinfo/$INOTIFY_FD (as all watch descriptors are listed
906 * there), but given the need for open_by_handle_at() which is privileged and not universally
907 * available this would be quite an incomplete solution. Hence we go the other way, leave the
908 * mask set, even if it is not minimized now, and ignore all events we aren't interested in
909 * anymore after reception. Yes, this sucks, but … Linux … */
910
911 /* Maybe release the inode data (and its inotify) */
912 event_gc_inode_data(s->event, inode_data);
913 }
914
915 break;
916 }
917
a71fe8b8
LP
918 default:
919 assert_not_reached("Wut? I shouldn't exist.");
920 }
6e9feda3 921
a71fe8b8
LP
922 if (s->pending)
923 prioq_remove(s->event->pending, s, &s->pending_index);
9d3e3aa5 924
a71fe8b8
LP
925 if (s->prepare)
926 prioq_remove(s->event->prepare, s, &s->prepare_index);
fd38203a 927
b6d5481b
LP
928 if (s->ratelimited)
929 event_source_time_prioq_remove(s, &s->event->monotonic);
930
e514aa1e 931 event = TAKE_PTR(s->event);
a71fe8b8
LP
932 LIST_REMOVE(sources, event->sources, s);
933 event->n_sources--;
fd38203a 934
f5982559
LP
935 /* Note that we don't invalidate the type here, since we still need it in order to close the fd or
936 * pidfd associated with this event source, which we'll do only on source_free(). */
937
a71fe8b8
LP
938 if (!s->floating)
939 sd_event_unref(event);
940}
941
942static void source_free(sd_event_source *s) {
943 assert(s);
fd38203a 944
a71fe8b8 945 source_disconnect(s);
ab93297c
NM
946
947 if (s->type == SOURCE_IO && s->io.owned)
15723a1d
LP
948 s->io.fd = safe_close(s->io.fd);
949
f8f3f926
LP
950 if (s->type == SOURCE_CHILD) {
951 /* Eventually the kernel will do this automatically for us, but for now let's emulate this (unreliably) in userspace. */
952
953 if (s->child.process_owned) {
954
955 if (!s->child.exited) {
956 bool sent = false;
957
958 if (s->child.pidfd >= 0) {
959 if (pidfd_send_signal(s->child.pidfd, SIGKILL, NULL, 0) < 0) {
960 if (errno == ESRCH) /* Already dead */
961 sent = true;
962 else if (!ERRNO_IS_NOT_SUPPORTED(errno))
963 log_debug_errno(errno, "Failed to kill process " PID_FMT " via pidfd_send_signal(), re-trying via kill(): %m",
964 s->child.pid);
965 } else
966 sent = true;
967 }
968
969 if (!sent)
970 if (kill(s->child.pid, SIGKILL) < 0)
971 if (errno != ESRCH) /* Already dead */
972 log_debug_errno(errno, "Failed to kill process " PID_FMT " via kill(), ignoring: %m",
973 s->child.pid);
974 }
975
976 if (!s->child.waited) {
977 siginfo_t si = {};
978
979 /* Reap the child if we can */
980 (void) waitid(P_PID, s->child.pid, &si, WEXITED);
981 }
982 }
983
984 if (s->child.pidfd_owned)
985 s->child.pidfd = safe_close(s->child.pidfd);
986 }
987
15723a1d
LP
988 if (s->destroy_callback)
989 s->destroy_callback(s->userdata);
ab93297c 990
356779df 991 free(s->description);
fd38203a
LP
992 free(s);
993}
8c75fe17 994DEFINE_TRIVIAL_CLEANUP_FUNC(sd_event_source*, source_free);
fd38203a
LP
995
996static int source_set_pending(sd_event_source *s, bool b) {
997 int r;
998
999 assert(s);
6203e07a 1000 assert(s->type != SOURCE_EXIT);
fd38203a
LP
1001
1002 if (s->pending == b)
1003 return 0;
1004
1005 s->pending = b;
1006
1007 if (b) {
1008 s->pending_iteration = s->event->iteration;
1009
1010 r = prioq_put(s->event->pending, s, &s->pending_index);
1011 if (r < 0) {
1012 s->pending = false;
1013 return r;
1014 }
1015 } else
1016 assert_se(prioq_remove(s->event->pending, s, &s->pending_index));
1017
e1951c16
MS
1018 if (EVENT_SOURCE_IS_TIME(s->type))
1019 event_source_time_prioq_reshuffle(s);
2576a19e 1020
9da4cb2b
LP
1021 if (s->type == SOURCE_SIGNAL && !b) {
1022 struct signal_data *d;
1023
1024 d = hashmap_get(s->event->signal_data, &s->priority);
1025 if (d && d->current == s)
1026 d->current = NULL;
1027 }
1028
97ef5391
LP
1029 if (s->type == SOURCE_INOTIFY) {
1030
1031 assert(s->inotify.inode_data);
1032 assert(s->inotify.inode_data->inotify_data);
1033
1034 if (b)
1035 s->inotify.inode_data->inotify_data->n_pending ++;
1036 else {
1037 assert(s->inotify.inode_data->inotify_data->n_pending > 0);
1038 s->inotify.inode_data->inotify_data->n_pending --;
1039 }
1040 }
1041
fd38203a
LP
1042 return 0;
1043}
1044
a71fe8b8 1045static sd_event_source *source_new(sd_event *e, bool floating, EventSourceType type) {
fd38203a
LP
1046 sd_event_source *s;
1047
1048 assert(e);
1049
d08eb1fa 1050 s = new(sd_event_source, 1);
fd38203a
LP
1051 if (!s)
1052 return NULL;
1053
d08eb1fa
LP
1054 *s = (struct sd_event_source) {
1055 .n_ref = 1,
1056 .event = e,
1057 .floating = floating,
1058 .type = type,
1059 .pending_index = PRIOQ_IDX_NULL,
1060 .prepare_index = PRIOQ_IDX_NULL,
1061 };
a71fe8b8
LP
1062
1063 if (!floating)
1064 sd_event_ref(e);
fd38203a 1065
a71fe8b8 1066 LIST_PREPEND(sources, e->sources, s);
313cefa1 1067 e->n_sources++;
15b38f93 1068
fd38203a
LP
1069 return s;
1070}
1071
b9350e70
LP
1072static int io_exit_callback(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
1073 assert(s);
1074
1075 return sd_event_exit(sd_event_source_get_event(s), PTR_TO_INT(userdata));
1076}
1077
f7262a9f 1078_public_ int sd_event_add_io(
fd38203a 1079 sd_event *e,
151b9b96 1080 sd_event_source **ret,
fd38203a
LP
1081 int fd,
1082 uint32_t events,
718db961 1083 sd_event_io_handler_t callback,
151b9b96 1084 void *userdata) {
fd38203a 1085
ec766a51 1086 _cleanup_(source_freep) sd_event_source *s = NULL;
fd38203a
LP
1087 int r;
1088
305f78bf 1089 assert_return(e, -EINVAL);
b937d761 1090 assert_return(e = event_resolve(e), -ENOPKG);
8ac43fee 1091 assert_return(fd >= 0, -EBADF);
2a16a986 1092 assert_return(!(events & ~(EPOLLIN|EPOLLOUT|EPOLLRDHUP|EPOLLPRI|EPOLLERR|EPOLLHUP|EPOLLET)), -EINVAL);
da7e457c 1093 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
305f78bf 1094 assert_return(!event_pid_changed(e), -ECHILD);
fd38203a 1095
b9350e70
LP
1096 if (!callback)
1097 callback = io_exit_callback;
1098
a71fe8b8 1099 s = source_new(e, !ret, SOURCE_IO);
fd38203a
LP
1100 if (!s)
1101 return -ENOMEM;
1102
9da4cb2b 1103 s->wakeup = WAKEUP_EVENT_SOURCE;
fd38203a
LP
1104 s->io.fd = fd;
1105 s->io.events = events;
1106 s->io.callback = callback;
1107 s->userdata = userdata;
baf76283 1108 s->enabled = SD_EVENT_ON;
fd38203a 1109
baf76283 1110 r = source_io_register(s, s->enabled, events);
ec766a51 1111 if (r < 0)
050f74f2 1112 return r;
fd38203a 1113
a71fe8b8
LP
1114 if (ret)
1115 *ret = s;
ec766a51 1116 TAKE_PTR(s);
a71fe8b8 1117
fd38203a
LP
1118 return 0;
1119}
1120
52444dc4
LP
1121static void initialize_perturb(sd_event *e) {
1122 sd_id128_t bootid = {};
1123
1124 /* When we sleep for longer, we try to realign the wakeup to
f21f31b2 1125 the same time within each minute/second/250ms, so that
52444dc4
LP
1126 events all across the system can be coalesced into a single
1127 CPU wakeup. However, let's take some system-specific
1128 randomness for this value, so that in a network of systems
1129 with synced clocks timer events are distributed a
1130 bit. Here, we calculate a perturbation usec offset from the
1131 boot ID. */
1132
3a43da28 1133 if (_likely_(e->perturb != USEC_INFINITY))
52444dc4
LP
1134 return;
1135
1136 if (sd_id128_get_boot(&bootid) >= 0)
1137 e->perturb = (bootid.qwords[0] ^ bootid.qwords[1]) % USEC_PER_MINUTE;
1138}
1139
fd38203a
LP
1140static int event_setup_timer_fd(
1141 sd_event *e,
6a0f1f6d
LP
1142 struct clock_data *d,
1143 clockid_t clock) {
fd38203a 1144
fd38203a 1145 assert(e);
6a0f1f6d 1146 assert(d);
fd38203a 1147
6a0f1f6d 1148 if (_likely_(d->fd >= 0))
fd38203a
LP
1149 return 0;
1150
b44d87e2 1151 _cleanup_close_ int fd = -1;
b44d87e2 1152
6a0f1f6d 1153 fd = timerfd_create(clock, TFD_NONBLOCK|TFD_CLOEXEC);
fd38203a
LP
1154 if (fd < 0)
1155 return -errno;
1156
7fe2903c
LP
1157 fd = fd_move_above_stdio(fd);
1158
1eac7948 1159 struct epoll_event ev = {
a82f89aa
LP
1160 .events = EPOLLIN,
1161 .data.ptr = d,
1162 };
fd38203a 1163
15c689d7 1164 if (epoll_ctl(e->epoll_fd, EPOLL_CTL_ADD, fd, &ev) < 0)
fd38203a 1165 return -errno;
fd38203a 1166
b44d87e2 1167 d->fd = TAKE_FD(fd);
fd38203a
LP
1168 return 0;
1169}
1170
c4f1aff2
TG
1171static int time_exit_callback(sd_event_source *s, uint64_t usec, void *userdata) {
1172 assert(s);
1173
1174 return sd_event_exit(sd_event_source_get_event(s), PTR_TO_INT(userdata));
1175}
1176
41c63f36
LP
1177static int setup_clock_data(sd_event *e, struct clock_data *d, clockid_t clock) {
1178 int r;
1179
1180 assert(d);
1181
1182 if (d->fd < 0) {
1183 r = event_setup_timer_fd(e, d, clock);
1184 if (r < 0)
1185 return r;
1186 }
1187
1188 r = prioq_ensure_allocated(&d->earliest, earliest_time_prioq_compare);
1189 if (r < 0)
1190 return r;
1191
1192 r = prioq_ensure_allocated(&d->latest, latest_time_prioq_compare);
1193 if (r < 0)
1194 return r;
1195
1196 return 0;
1197}
1198
1e45e3fe
LP
1199static int event_source_time_prioq_put(
1200 sd_event_source *s,
1201 struct clock_data *d) {
1202
1203 int r;
1204
1205 assert(s);
1206 assert(d);
1207
f41315fc 1208 r = prioq_put(d->earliest, s, &s->earliest_index);
1e45e3fe
LP
1209 if (r < 0)
1210 return r;
1211
f41315fc 1212 r = prioq_put(d->latest, s, &s->latest_index);
1e45e3fe 1213 if (r < 0) {
f41315fc
LP
1214 assert_se(prioq_remove(d->earliest, s, &s->earliest_index) > 0);
1215 s->earliest_index = PRIOQ_IDX_NULL;
1e45e3fe
LP
1216 return r;
1217 }
1218
1219 d->needs_rearm = true;
1220 return 0;
1221}
1222
6a0f1f6d 1223_public_ int sd_event_add_time(
fd38203a 1224 sd_event *e,
151b9b96 1225 sd_event_source **ret,
6a0f1f6d 1226 clockid_t clock,
fd38203a 1227 uint64_t usec,
c2ba3ad6 1228 uint64_t accuracy,
718db961 1229 sd_event_time_handler_t callback,
151b9b96 1230 void *userdata) {
fd38203a 1231
6a0f1f6d 1232 EventSourceType type;
ec766a51 1233 _cleanup_(source_freep) sd_event_source *s = NULL;
6a0f1f6d 1234 struct clock_data *d;
fd38203a
LP
1235 int r;
1236
305f78bf 1237 assert_return(e, -EINVAL);
b937d761 1238 assert_return(e = event_resolve(e), -ENOPKG);
305f78bf 1239 assert_return(accuracy != (uint64_t) -1, -EINVAL);
da7e457c 1240 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
305f78bf 1241 assert_return(!event_pid_changed(e), -ECHILD);
fd38203a 1242
e475d10c
LP
1243 if (!clock_supported(clock)) /* Checks whether the kernel supports the clock */
1244 return -EOPNOTSUPP;
1245
1246 type = clock_to_event_source_type(clock); /* checks whether sd-event supports this clock */
1247 if (type < 0)
3411372e
LP
1248 return -EOPNOTSUPP;
1249
c4f1aff2
TG
1250 if (!callback)
1251 callback = time_exit_callback;
1252
1e45e3fe 1253 assert_se(d = event_get_clock_data(e, type));
c2ba3ad6 1254
41c63f36 1255 r = setup_clock_data(e, d, clock);
c983e776
EV
1256 if (r < 0)
1257 return r;
fd38203a 1258
a71fe8b8 1259 s = source_new(e, !ret, type);
fd38203a
LP
1260 if (!s)
1261 return -ENOMEM;
1262
1263 s->time.next = usec;
c2ba3ad6 1264 s->time.accuracy = accuracy == 0 ? DEFAULT_ACCURACY_USEC : accuracy;
fd38203a 1265 s->time.callback = callback;
f41315fc 1266 s->earliest_index = s->latest_index = PRIOQ_IDX_NULL;
fd38203a 1267 s->userdata = userdata;
baf76283 1268 s->enabled = SD_EVENT_ONESHOT;
fd38203a 1269
1e45e3fe 1270 r = event_source_time_prioq_put(s, d);
c2ba3ad6 1271 if (r < 0)
ec766a51 1272 return r;
fd38203a 1273
a71fe8b8
LP
1274 if (ret)
1275 *ret = s;
ec766a51 1276 TAKE_PTR(s);
a71fe8b8 1277
fd38203a
LP
1278 return 0;
1279}
1280
d6a83dc4
LP
1281_public_ int sd_event_add_time_relative(
1282 sd_event *e,
1283 sd_event_source **ret,
1284 clockid_t clock,
1285 uint64_t usec,
1286 uint64_t accuracy,
1287 sd_event_time_handler_t callback,
1288 void *userdata) {
1289
1290 usec_t t;
1291 int r;
1292
1293 /* Same as sd_event_add_time() but operates relative to the event loop's current point in time, and
1294 * checks for overflow. */
1295
1296 r = sd_event_now(e, clock, &t);
1297 if (r < 0)
1298 return r;
1299
1300 if (usec >= USEC_INFINITY - t)
1301 return -EOVERFLOW;
1302
1303 return sd_event_add_time(e, ret, clock, t + usec, accuracy, callback, userdata);
1304}
1305
59bc1fd7
LP
1306static int signal_exit_callback(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
1307 assert(s);
1308
1309 return sd_event_exit(sd_event_source_get_event(s), PTR_TO_INT(userdata));
1310}
1311
f7262a9f 1312_public_ int sd_event_add_signal(
305f78bf 1313 sd_event *e,
151b9b96 1314 sd_event_source **ret,
305f78bf 1315 int sig,
718db961 1316 sd_event_signal_handler_t callback,
151b9b96 1317 void *userdata) {
305f78bf 1318
ec766a51 1319 _cleanup_(source_freep) sd_event_source *s = NULL;
9da4cb2b 1320 struct signal_data *d;
fd38203a
LP
1321 int r;
1322
305f78bf 1323 assert_return(e, -EINVAL);
b937d761 1324 assert_return(e = event_resolve(e), -ENOPKG);
6eb7c172 1325 assert_return(SIGNAL_VALID(sig), -EINVAL);
da7e457c 1326 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
305f78bf 1327 assert_return(!event_pid_changed(e), -ECHILD);
fd38203a 1328
59bc1fd7
LP
1329 if (!callback)
1330 callback = signal_exit_callback;
1331
d1b75241
LP
1332 r = signal_is_blocked(sig);
1333 if (r < 0)
1334 return r;
1335 if (r == 0)
3022d74b
LP
1336 return -EBUSY;
1337
fd38203a
LP
1338 if (!e->signal_sources) {
1339 e->signal_sources = new0(sd_event_source*, _NSIG);
1340 if (!e->signal_sources)
1341 return -ENOMEM;
1342 } else if (e->signal_sources[sig])
1343 return -EBUSY;
1344
a71fe8b8 1345 s = source_new(e, !ret, SOURCE_SIGNAL);
fd38203a
LP
1346 if (!s)
1347 return -ENOMEM;
1348
1349 s->signal.sig = sig;
1350 s->signal.callback = callback;
1351 s->userdata = userdata;
baf76283 1352 s->enabled = SD_EVENT_ON;
fd38203a
LP
1353
1354 e->signal_sources[sig] = s;
fd38203a 1355
9da4cb2b 1356 r = event_make_signal_data(e, sig, &d);
ec766a51 1357 if (r < 0)
9da4cb2b 1358 return r;
fd38203a 1359
f1f00dbb
LP
1360 /* Use the signal name as description for the event source by default */
1361 (void) sd_event_source_set_description(s, signal_to_string(sig));
1362
a71fe8b8
LP
1363 if (ret)
1364 *ret = s;
ec766a51 1365 TAKE_PTR(s);
a71fe8b8 1366
fd38203a
LP
1367 return 0;
1368}
1369
b9350e70
LP
1370static int child_exit_callback(sd_event_source *s, const siginfo_t *si, void *userdata) {
1371 assert(s);
1372
1373 return sd_event_exit(sd_event_source_get_event(s), PTR_TO_INT(userdata));
1374}
1375
f8f3f926
LP
1376static bool shall_use_pidfd(void) {
1377 /* Mostly relevant for debugging, i.e. this is used in test-event.c to test the event loop once with and once without pidfd */
1378 return getenv_bool_secure("SYSTEMD_PIDFD") != 0;
1379}
1380
f7262a9f 1381_public_ int sd_event_add_child(
305f78bf 1382 sd_event *e,
151b9b96 1383 sd_event_source **ret,
305f78bf
LP
1384 pid_t pid,
1385 int options,
718db961 1386 sd_event_child_handler_t callback,
151b9b96 1387 void *userdata) {
305f78bf 1388
ec766a51 1389 _cleanup_(source_freep) sd_event_source *s = NULL;
fd38203a
LP
1390 int r;
1391
305f78bf 1392 assert_return(e, -EINVAL);
b937d761 1393 assert_return(e = event_resolve(e), -ENOPKG);
305f78bf
LP
1394 assert_return(pid > 1, -EINVAL);
1395 assert_return(!(options & ~(WEXITED|WSTOPPED|WCONTINUED)), -EINVAL);
1396 assert_return(options != 0, -EINVAL);
da7e457c 1397 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
305f78bf 1398 assert_return(!event_pid_changed(e), -ECHILD);
fd38203a 1399
b9350e70
LP
1400 if (!callback)
1401 callback = child_exit_callback;
1402
b6d5481b 1403 if (e->n_online_child_sources == 0) {
ee880b37
LP
1404 /* Caller must block SIGCHLD before using us to watch children, even if pidfd is available,
1405 * for compatibility with pre-pidfd and because we don't want the reap the child processes
1406 * ourselves, i.e. call waitid(), and don't want Linux' default internal logic for that to
1407 * take effect.
1408 *
1409 * (As an optimization we only do this check on the first child event source created.) */
1410 r = signal_is_blocked(SIGCHLD);
1411 if (r < 0)
1412 return r;
1413 if (r == 0)
1414 return -EBUSY;
1415 }
1416
d5099efc 1417 r = hashmap_ensure_allocated(&e->child_sources, NULL);
fd38203a
LP
1418 if (r < 0)
1419 return r;
1420
4a0b58c4 1421 if (hashmap_contains(e->child_sources, PID_TO_PTR(pid)))
fd38203a
LP
1422 return -EBUSY;
1423
a71fe8b8 1424 s = source_new(e, !ret, SOURCE_CHILD);
fd38203a
LP
1425 if (!s)
1426 return -ENOMEM;
1427
f8f3f926 1428 s->wakeup = WAKEUP_EVENT_SOURCE;
fd38203a
LP
1429 s->child.pid = pid;
1430 s->child.options = options;
1431 s->child.callback = callback;
1432 s->userdata = userdata;
baf76283 1433 s->enabled = SD_EVENT_ONESHOT;
fd38203a 1434
f8f3f926
LP
1435 /* We always take a pidfd here if we can, even if we wait for anything else than WEXITED, so that we
1436 * pin the PID, and make regular waitid() handling race-free. */
1437
1438 if (shall_use_pidfd()) {
1439 s->child.pidfd = pidfd_open(s->child.pid, 0);
1440 if (s->child.pidfd < 0) {
1441 /* Propagate errors unless the syscall is not supported or blocked */
1442 if (!ERRNO_IS_NOT_SUPPORTED(errno) && !ERRNO_IS_PRIVILEGE(errno))
1443 return -errno;
1444 } else
1445 s->child.pidfd_owned = true; /* If we allocate the pidfd we own it by default */
1446 } else
1447 s->child.pidfd = -1;
1448
4a0b58c4 1449 r = hashmap_put(e->child_sources, PID_TO_PTR(pid), s);
ec766a51 1450 if (r < 0)
fd38203a 1451 return r;
fd38203a 1452
f8f3f926
LP
1453 if (EVENT_SOURCE_WATCH_PIDFD(s)) {
1454 /* We have a pidfd and we only want to watch for exit */
f8f3f926 1455 r = source_child_pidfd_register(s, s->enabled);
ac9f2640 1456 if (r < 0)
f8f3f926 1457 return r;
ac9f2640 1458
f8f3f926
LP
1459 } else {
1460 /* We have no pidfd or we shall wait for some other event than WEXITED */
f8f3f926 1461 r = event_make_signal_data(e, SIGCHLD, NULL);
ac9f2640 1462 if (r < 0)
f8f3f926 1463 return r;
f8f3f926
LP
1464
1465 e->need_process_child = true;
1466 }
c2ba3ad6 1467
b6d5481b 1468 e->n_online_child_sources++;
ac9f2640 1469
a71fe8b8
LP
1470 if (ret)
1471 *ret = s;
ec766a51 1472 TAKE_PTR(s);
f8f3f926
LP
1473 return 0;
1474}
1475
1476_public_ int sd_event_add_child_pidfd(
1477 sd_event *e,
1478 sd_event_source **ret,
1479 int pidfd,
1480 int options,
1481 sd_event_child_handler_t callback,
1482 void *userdata) {
1483
1484
1485 _cleanup_(source_freep) sd_event_source *s = NULL;
1486 pid_t pid;
1487 int r;
1488
1489 assert_return(e, -EINVAL);
1490 assert_return(e = event_resolve(e), -ENOPKG);
1491 assert_return(pidfd >= 0, -EBADF);
1492 assert_return(!(options & ~(WEXITED|WSTOPPED|WCONTINUED)), -EINVAL);
1493 assert_return(options != 0, -EINVAL);
f8f3f926
LP
1494 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
1495 assert_return(!event_pid_changed(e), -ECHILD);
1496
b9350e70
LP
1497 if (!callback)
1498 callback = child_exit_callback;
1499
b6d5481b 1500 if (e->n_online_child_sources == 0) {
ee880b37
LP
1501 r = signal_is_blocked(SIGCHLD);
1502 if (r < 0)
1503 return r;
1504 if (r == 0)
1505 return -EBUSY;
1506 }
1507
f8f3f926
LP
1508 r = hashmap_ensure_allocated(&e->child_sources, NULL);
1509 if (r < 0)
1510 return r;
1511
1512 r = pidfd_get_pid(pidfd, &pid);
1513 if (r < 0)
1514 return r;
1515
1516 if (hashmap_contains(e->child_sources, PID_TO_PTR(pid)))
1517 return -EBUSY;
1518
1519 s = source_new(e, !ret, SOURCE_CHILD);
1520 if (!s)
1521 return -ENOMEM;
1522
1523 s->wakeup = WAKEUP_EVENT_SOURCE;
1524 s->child.pidfd = pidfd;
1525 s->child.pid = pid;
1526 s->child.options = options;
1527 s->child.callback = callback;
1528 s->child.pidfd_owned = false; /* If we got the pidfd passed in we don't own it by default (similar to the IO fd case) */
1529 s->userdata = userdata;
1530 s->enabled = SD_EVENT_ONESHOT;
1531
1532 r = hashmap_put(e->child_sources, PID_TO_PTR(pid), s);
1533 if (r < 0)
1534 return r;
1535
f8f3f926
LP
1536 if (EVENT_SOURCE_WATCH_PIDFD(s)) {
1537 /* We only want to watch for WEXITED */
f8f3f926 1538 r = source_child_pidfd_register(s, s->enabled);
ac9f2640 1539 if (r < 0)
f8f3f926 1540 return r;
f8f3f926
LP
1541 } else {
1542 /* We shall wait for some other event than WEXITED */
f8f3f926 1543 r = event_make_signal_data(e, SIGCHLD, NULL);
ac9f2640 1544 if (r < 0)
f8f3f926 1545 return r;
a71fe8b8 1546
f8f3f926
LP
1547 e->need_process_child = true;
1548 }
1549
b6d5481b 1550 e->n_online_child_sources++;
ac9f2640 1551
f8f3f926
LP
1552 if (ret)
1553 *ret = s;
f8f3f926 1554 TAKE_PTR(s);
fd38203a
LP
1555 return 0;
1556}
1557
b9350e70
LP
1558static int generic_exit_callback(sd_event_source *s, void *userdata) {
1559 assert(s);
1560
1561 return sd_event_exit(sd_event_source_get_event(s), PTR_TO_INT(userdata));
1562}
1563
f7262a9f 1564_public_ int sd_event_add_defer(
305f78bf 1565 sd_event *e,
151b9b96 1566 sd_event_source **ret,
718db961 1567 sd_event_handler_t callback,
151b9b96 1568 void *userdata) {
305f78bf 1569
ec766a51 1570 _cleanup_(source_freep) sd_event_source *s = NULL;
fd38203a
LP
1571 int r;
1572
305f78bf 1573 assert_return(e, -EINVAL);
b937d761 1574 assert_return(e = event_resolve(e), -ENOPKG);
da7e457c 1575 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
305f78bf 1576 assert_return(!event_pid_changed(e), -ECHILD);
fd38203a 1577
b9350e70
LP
1578 if (!callback)
1579 callback = generic_exit_callback;
1580
a71fe8b8 1581 s = source_new(e, !ret, SOURCE_DEFER);
fd38203a
LP
1582 if (!s)
1583 return -ENOMEM;
1584
1585 s->defer.callback = callback;
1586 s->userdata = userdata;
baf76283 1587 s->enabled = SD_EVENT_ONESHOT;
fd38203a
LP
1588
1589 r = source_set_pending(s, true);
ec766a51 1590 if (r < 0)
fd38203a 1591 return r;
fd38203a 1592
a71fe8b8
LP
1593 if (ret)
1594 *ret = s;
ec766a51 1595 TAKE_PTR(s);
a71fe8b8 1596
fd38203a
LP
1597 return 0;
1598}
1599
6e9feda3
LP
1600_public_ int sd_event_add_post(
1601 sd_event *e,
1602 sd_event_source **ret,
1603 sd_event_handler_t callback,
1604 void *userdata) {
1605
ec766a51 1606 _cleanup_(source_freep) sd_event_source *s = NULL;
6e9feda3
LP
1607 int r;
1608
1609 assert_return(e, -EINVAL);
b937d761 1610 assert_return(e = event_resolve(e), -ENOPKG);
6e9feda3
LP
1611 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
1612 assert_return(!event_pid_changed(e), -ECHILD);
1613
b9350e70
LP
1614 if (!callback)
1615 callback = generic_exit_callback;
1616
a71fe8b8 1617 s = source_new(e, !ret, SOURCE_POST);
6e9feda3
LP
1618 if (!s)
1619 return -ENOMEM;
1620
1621 s->post.callback = callback;
1622 s->userdata = userdata;
1623 s->enabled = SD_EVENT_ON;
1624
de7fef4b 1625 r = set_ensure_put(&e->post_sources, NULL, s);
ec766a51 1626 if (r < 0)
6e9feda3 1627 return r;
de7fef4b 1628 assert(r > 0);
6e9feda3 1629
a71fe8b8
LP
1630 if (ret)
1631 *ret = s;
ec766a51 1632 TAKE_PTR(s);
a71fe8b8 1633
6e9feda3
LP
1634 return 0;
1635}
1636
6203e07a 1637_public_ int sd_event_add_exit(
305f78bf 1638 sd_event *e,
151b9b96 1639 sd_event_source **ret,
718db961 1640 sd_event_handler_t callback,
151b9b96 1641 void *userdata) {
305f78bf 1642
ec766a51 1643 _cleanup_(source_freep) sd_event_source *s = NULL;
da7e457c
LP
1644 int r;
1645
1646 assert_return(e, -EINVAL);
b937d761 1647 assert_return(e = event_resolve(e), -ENOPKG);
da7e457c
LP
1648 assert_return(callback, -EINVAL);
1649 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
1650 assert_return(!event_pid_changed(e), -ECHILD);
1651
c983e776
EV
1652 r = prioq_ensure_allocated(&e->exit, exit_prioq_compare);
1653 if (r < 0)
1654 return r;
da7e457c 1655
a71fe8b8 1656 s = source_new(e, !ret, SOURCE_EXIT);
fd38203a 1657 if (!s)
da7e457c 1658 return -ENOMEM;
fd38203a 1659
6203e07a 1660 s->exit.callback = callback;
da7e457c 1661 s->userdata = userdata;
6203e07a 1662 s->exit.prioq_index = PRIOQ_IDX_NULL;
baf76283 1663 s->enabled = SD_EVENT_ONESHOT;
da7e457c 1664
6203e07a 1665 r = prioq_put(s->event->exit, s, &s->exit.prioq_index);
ec766a51 1666 if (r < 0)
da7e457c 1667 return r;
da7e457c 1668
a71fe8b8
LP
1669 if (ret)
1670 *ret = s;
ec766a51 1671 TAKE_PTR(s);
a71fe8b8 1672
da7e457c
LP
1673 return 0;
1674}
1675
97ef5391
LP
1676static void event_free_inotify_data(sd_event *e, struct inotify_data *d) {
1677 assert(e);
1678
1679 if (!d)
1680 return;
1681
1682 assert(hashmap_isempty(d->inodes));
1683 assert(hashmap_isempty(d->wd));
1684
1685 if (d->buffer_filled > 0)
1686 LIST_REMOVE(buffered, e->inotify_data_buffered, d);
1687
1688 hashmap_free(d->inodes);
1689 hashmap_free(d->wd);
1690
1691 assert_se(hashmap_remove(e->inotify_data, &d->priority) == d);
1692
1693 if (d->fd >= 0) {
1694 if (epoll_ctl(e->epoll_fd, EPOLL_CTL_DEL, d->fd, NULL) < 0)
1695 log_debug_errno(errno, "Failed to remove inotify fd from epoll, ignoring: %m");
1696
1697 safe_close(d->fd);
1698 }
1699 free(d);
1700}
1701
1702static int event_make_inotify_data(
1703 sd_event *e,
1704 int64_t priority,
1705 struct inotify_data **ret) {
1706
1707 _cleanup_close_ int fd = -1;
1708 struct inotify_data *d;
97ef5391
LP
1709 int r;
1710
1711 assert(e);
1712
1713 d = hashmap_get(e->inotify_data, &priority);
1714 if (d) {
1715 if (ret)
1716 *ret = d;
1717 return 0;
1718 }
1719
1720 fd = inotify_init1(IN_NONBLOCK|O_CLOEXEC);
1721 if (fd < 0)
1722 return -errno;
1723
1724 fd = fd_move_above_stdio(fd);
1725
97ef5391
LP
1726 d = new(struct inotify_data, 1);
1727 if (!d)
1728 return -ENOMEM;
1729
1730 *d = (struct inotify_data) {
1731 .wakeup = WAKEUP_INOTIFY_DATA,
1732 .fd = TAKE_FD(fd),
1733 .priority = priority,
1734 };
1735
c2484a75 1736 r = hashmap_ensure_put(&e->inotify_data, &uint64_hash_ops, &d->priority, d);
97ef5391
LP
1737 if (r < 0) {
1738 d->fd = safe_close(d->fd);
1739 free(d);
1740 return r;
1741 }
1742
1eac7948 1743 struct epoll_event ev = {
97ef5391
LP
1744 .events = EPOLLIN,
1745 .data.ptr = d,
1746 };
1747
1748 if (epoll_ctl(e->epoll_fd, EPOLL_CTL_ADD, d->fd, &ev) < 0) {
1749 r = -errno;
1750 d->fd = safe_close(d->fd); /* let's close this ourselves, as event_free_inotify_data() would otherwise
1751 * remove the fd from the epoll first, which we don't want as we couldn't
1752 * add it in the first place. */
1753 event_free_inotify_data(e, d);
1754 return r;
1755 }
1756
1757 if (ret)
1758 *ret = d;
1759
1760 return 1;
1761}
1762
7a08d314 1763static int inode_data_compare(const struct inode_data *x, const struct inode_data *y) {
90c88092 1764 int r;
97ef5391
LP
1765
1766 assert(x);
1767 assert(y);
1768
90c88092
YW
1769 r = CMP(x->dev, y->dev);
1770 if (r != 0)
1771 return r;
97ef5391 1772
6dd91b36 1773 return CMP(x->ino, y->ino);
97ef5391
LP
1774}
1775
7a08d314
YW
1776static void inode_data_hash_func(const struct inode_data *d, struct siphash *state) {
1777 assert(d);
97ef5391
LP
1778
1779 siphash24_compress(&d->dev, sizeof(d->dev), state);
1780 siphash24_compress(&d->ino, sizeof(d->ino), state);
1781}
1782
7a08d314 1783DEFINE_PRIVATE_HASH_OPS(inode_data_hash_ops, struct inode_data, inode_data_hash_func, inode_data_compare);
97ef5391
LP
1784
1785static void event_free_inode_data(
1786 sd_event *e,
1787 struct inode_data *d) {
1788
1789 assert(e);
1790
1791 if (!d)
1792 return;
1793
1794 assert(!d->event_sources);
1795
1796 if (d->fd >= 0) {
1797 LIST_REMOVE(to_close, e->inode_data_to_close, d);
1798 safe_close(d->fd);
1799 }
1800
1801 if (d->inotify_data) {
1802
1803 if (d->wd >= 0) {
1804 if (d->inotify_data->fd >= 0) {
1805 /* So here's a problem. At the time this runs the watch descriptor might already be
1806 * invalidated, because an IN_IGNORED event might be queued right the moment we enter
1807 * the syscall. Hence, whenever we get EINVAL, ignore it entirely, since it's a very
1808 * likely case to happen. */
1809
1810 if (inotify_rm_watch(d->inotify_data->fd, d->wd) < 0 && errno != EINVAL)
1811 log_debug_errno(errno, "Failed to remove watch descriptor %i from inotify, ignoring: %m", d->wd);
1812 }
1813
1814 assert_se(hashmap_remove(d->inotify_data->wd, INT_TO_PTR(d->wd)) == d);
1815 }
1816
1817 assert_se(hashmap_remove(d->inotify_data->inodes, d) == d);
1818 }
1819
1820 free(d);
1821}
1822
1823static void event_gc_inode_data(
1824 sd_event *e,
1825 struct inode_data *d) {
1826
1827 struct inotify_data *inotify_data;
1828
1829 assert(e);
1830
1831 if (!d)
1832 return;
1833
1834 if (d->event_sources)
1835 return;
1836
1837 inotify_data = d->inotify_data;
1838 event_free_inode_data(e, d);
1839
1840 if (inotify_data && hashmap_isempty(inotify_data->inodes))
1841 event_free_inotify_data(e, inotify_data);
1842}
1843
1844static int event_make_inode_data(
1845 sd_event *e,
1846 struct inotify_data *inotify_data,
1847 dev_t dev,
1848 ino_t ino,
1849 struct inode_data **ret) {
1850
1851 struct inode_data *d, key;
1852 int r;
1853
1854 assert(e);
1855 assert(inotify_data);
1856
1857 key = (struct inode_data) {
1858 .ino = ino,
1859 .dev = dev,
1860 };
1861
1862 d = hashmap_get(inotify_data->inodes, &key);
1863 if (d) {
1864 if (ret)
1865 *ret = d;
1866
1867 return 0;
1868 }
1869
1870 r = hashmap_ensure_allocated(&inotify_data->inodes, &inode_data_hash_ops);
1871 if (r < 0)
1872 return r;
1873
1874 d = new(struct inode_data, 1);
1875 if (!d)
1876 return -ENOMEM;
1877
1878 *d = (struct inode_data) {
1879 .dev = dev,
1880 .ino = ino,
1881 .wd = -1,
1882 .fd = -1,
1883 .inotify_data = inotify_data,
1884 };
1885
1886 r = hashmap_put(inotify_data->inodes, d, d);
1887 if (r < 0) {
1888 free(d);
1889 return r;
1890 }
1891
1892 if (ret)
1893 *ret = d;
1894
1895 return 1;
1896}
1897
1898static uint32_t inode_data_determine_mask(struct inode_data *d) {
1899 bool excl_unlink = true;
1900 uint32_t combined = 0;
1901 sd_event_source *s;
1902
1903 assert(d);
1904
1905 /* Combines the watch masks of all event sources watching this inode. We generally just OR them together, but
1906 * the IN_EXCL_UNLINK flag is ANDed instead.
1907 *
1908 * Note that we add all sources to the mask here, regardless whether enabled, disabled or oneshot. That's
1909 * because we cannot change the mask anymore after the event source was created once, since the kernel has no
f21f31b2 1910 * API for that. Hence we need to subscribe to the maximum mask we ever might be interested in, and suppress
97ef5391
LP
1911 * events we don't care for client-side. */
1912
1913 LIST_FOREACH(inotify.by_inode_data, s, d->event_sources) {
1914
1915 if ((s->inotify.mask & IN_EXCL_UNLINK) == 0)
1916 excl_unlink = false;
1917
1918 combined |= s->inotify.mask;
1919 }
1920
1921 return (combined & ~(IN_ONESHOT|IN_DONT_FOLLOW|IN_ONLYDIR|IN_EXCL_UNLINK)) | (excl_unlink ? IN_EXCL_UNLINK : 0);
1922}
1923
1924static int inode_data_realize_watch(sd_event *e, struct inode_data *d) {
1925 uint32_t combined_mask;
1926 int wd, r;
1927
1928 assert(d);
1929 assert(d->fd >= 0);
1930
1931 combined_mask = inode_data_determine_mask(d);
1932
1933 if (d->wd >= 0 && combined_mask == d->combined_mask)
1934 return 0;
1935
1936 r = hashmap_ensure_allocated(&d->inotify_data->wd, NULL);
1937 if (r < 0)
1938 return r;
1939
1940 wd = inotify_add_watch_fd(d->inotify_data->fd, d->fd, combined_mask);
1941 if (wd < 0)
1942 return -errno;
1943
1944 if (d->wd < 0) {
1945 r = hashmap_put(d->inotify_data->wd, INT_TO_PTR(wd), d);
1946 if (r < 0) {
1947 (void) inotify_rm_watch(d->inotify_data->fd, wd);
1948 return r;
1949 }
1950
1951 d->wd = wd;
1952
1953 } else if (d->wd != wd) {
1954
1955 log_debug("Weird, the watch descriptor we already knew for this inode changed?");
1956 (void) inotify_rm_watch(d->fd, wd);
1957 return -EINVAL;
1958 }
1959
1960 d->combined_mask = combined_mask;
1961 return 1;
1962}
1963
b9350e70
LP
1964static int inotify_exit_callback(sd_event_source *s, const struct inotify_event *event, void *userdata) {
1965 assert(s);
1966
1967 return sd_event_exit(sd_event_source_get_event(s), PTR_TO_INT(userdata));
1968}
1969
97ef5391
LP
1970_public_ int sd_event_add_inotify(
1971 sd_event *e,
1972 sd_event_source **ret,
1973 const char *path,
1974 uint32_t mask,
1975 sd_event_inotify_handler_t callback,
1976 void *userdata) {
1977
97ef5391
LP
1978 struct inotify_data *inotify_data = NULL;
1979 struct inode_data *inode_data = NULL;
1980 _cleanup_close_ int fd = -1;
8c75fe17 1981 _cleanup_(source_freep) sd_event_source *s = NULL;
97ef5391
LP
1982 struct stat st;
1983 int r;
1984
1985 assert_return(e, -EINVAL);
1986 assert_return(e = event_resolve(e), -ENOPKG);
1987 assert_return(path, -EINVAL);
97ef5391
LP
1988 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
1989 assert_return(!event_pid_changed(e), -ECHILD);
1990
b9350e70
LP
1991 if (!callback)
1992 callback = inotify_exit_callback;
1993
97ef5391
LP
1994 /* Refuse IN_MASK_ADD since we coalesce watches on the same inode, and hence really don't want to merge
1995 * masks. Or in other words, this whole code exists only to manage IN_MASK_ADD type operations for you, hence
1996 * the user can't use them for us. */
1997 if (mask & IN_MASK_ADD)
1998 return -EINVAL;
1999
2000 fd = open(path, O_PATH|O_CLOEXEC|
2001 (mask & IN_ONLYDIR ? O_DIRECTORY : 0)|
2002 (mask & IN_DONT_FOLLOW ? O_NOFOLLOW : 0));
2003 if (fd < 0)
2004 return -errno;
2005
2006 if (fstat(fd, &st) < 0)
2007 return -errno;
2008
2009 s = source_new(e, !ret, SOURCE_INOTIFY);
2010 if (!s)
2011 return -ENOMEM;
2012
2013 s->enabled = mask & IN_ONESHOT ? SD_EVENT_ONESHOT : SD_EVENT_ON;
2014 s->inotify.mask = mask;
2015 s->inotify.callback = callback;
2016 s->userdata = userdata;
2017
2018 /* Allocate an inotify object for this priority, and an inode object within it */
2019 r = event_make_inotify_data(e, SD_EVENT_PRIORITY_NORMAL, &inotify_data);
2020 if (r < 0)
8c75fe17 2021 return r;
97ef5391
LP
2022
2023 r = event_make_inode_data(e, inotify_data, st.st_dev, st.st_ino, &inode_data);
8c75fe17
ZJS
2024 if (r < 0) {
2025 event_free_inotify_data(e, inotify_data);
2026 return r;
2027 }
97ef5391
LP
2028
2029 /* Keep the O_PATH fd around until the first iteration of the loop, so that we can still change the priority of
2030 * the event source, until then, for which we need the original inode. */
2031 if (inode_data->fd < 0) {
2032 inode_data->fd = TAKE_FD(fd);
2033 LIST_PREPEND(to_close, e->inode_data_to_close, inode_data);
2034 }
2035
2036 /* Link our event source to the inode data object */
2037 LIST_PREPEND(inotify.by_inode_data, inode_data->event_sources, s);
2038 s->inotify.inode_data = inode_data;
2039
97ef5391
LP
2040 /* Actually realize the watch now */
2041 r = inode_data_realize_watch(e, inode_data);
2042 if (r < 0)
8c75fe17 2043 return r;
97ef5391
LP
2044
2045 (void) sd_event_source_set_description(s, path);
2046
2047 if (ret)
2048 *ret = s;
8c75fe17 2049 TAKE_PTR(s);
97ef5391
LP
2050
2051 return 0;
97ef5391
LP
2052}
2053
8301aa0b 2054static sd_event_source* event_source_free(sd_event_source *s) {
6680dd6b
LP
2055 if (!s)
2056 return NULL;
da7e457c 2057
8301aa0b
YW
2058 /* Here's a special hack: when we are called from a
2059 * dispatch handler we won't free the event source
2060 * immediately, but we will detach the fd from the
2061 * epoll. This way it is safe for the caller to unref
2062 * the event source and immediately close the fd, but
2063 * we still retain a valid event source object after
2064 * the callback. */
fd38203a 2065
8301aa0b
YW
2066 if (s->dispatching) {
2067 if (s->type == SOURCE_IO)
2068 source_io_unregister(s);
fd38203a 2069
8301aa0b
YW
2070 source_disconnect(s);
2071 } else
2072 source_free(s);
fd38203a
LP
2073
2074 return NULL;
2075}
2076
8301aa0b
YW
2077DEFINE_PUBLIC_TRIVIAL_REF_UNREF_FUNC(sd_event_source, sd_event_source, event_source_free);
2078
356779df 2079_public_ int sd_event_source_set_description(sd_event_source *s, const char *description) {
f7f53e9e 2080 assert_return(s, -EINVAL);
f4b2933e 2081 assert_return(!event_pid_changed(s->event), -ECHILD);
f7f53e9e 2082
356779df 2083 return free_and_strdup(&s->description, description);
f7f53e9e
TG
2084}
2085
356779df 2086_public_ int sd_event_source_get_description(sd_event_source *s, const char **description) {
f7f53e9e 2087 assert_return(s, -EINVAL);
356779df 2088 assert_return(description, -EINVAL);
f4b2933e 2089 assert_return(!event_pid_changed(s->event), -ECHILD);
f7f53e9e 2090
7d92a1a4
ZJS
2091 if (!s->description)
2092 return -ENXIO;
2093
356779df 2094 *description = s->description;
f7f53e9e
TG
2095 return 0;
2096}
2097
adcc4ca3 2098_public_ sd_event *sd_event_source_get_event(sd_event_source *s) {
305f78bf 2099 assert_return(s, NULL);
eaa3cbef
LP
2100
2101 return s->event;
2102}
2103
f7262a9f 2104_public_ int sd_event_source_get_pending(sd_event_source *s) {
305f78bf 2105 assert_return(s, -EINVAL);
6203e07a 2106 assert_return(s->type != SOURCE_EXIT, -EDOM);
da7e457c 2107 assert_return(s->event->state != SD_EVENT_FINISHED, -ESTALE);
305f78bf 2108 assert_return(!event_pid_changed(s->event), -ECHILD);
fd38203a
LP
2109
2110 return s->pending;
2111}
2112
f7262a9f 2113_public_ int sd_event_source_get_io_fd(sd_event_source *s) {
305f78bf
LP
2114 assert_return(s, -EINVAL);
2115 assert_return(s->type == SOURCE_IO, -EDOM);
2116 assert_return(!event_pid_changed(s->event), -ECHILD);
fd38203a
LP
2117
2118 return s->io.fd;
2119}
2120
30caf8f3
LP
2121_public_ int sd_event_source_set_io_fd(sd_event_source *s, int fd) {
2122 int r;
2123
2124 assert_return(s, -EINVAL);
8ac43fee 2125 assert_return(fd >= 0, -EBADF);
30caf8f3
LP
2126 assert_return(s->type == SOURCE_IO, -EDOM);
2127 assert_return(!event_pid_changed(s->event), -ECHILD);
2128
2129 if (s->io.fd == fd)
2130 return 0;
2131
b6d5481b 2132 if (event_source_is_offline(s)) {
30caf8f3
LP
2133 s->io.fd = fd;
2134 s->io.registered = false;
2135 } else {
2136 int saved_fd;
2137
2138 saved_fd = s->io.fd;
2139 assert(s->io.registered);
2140
2141 s->io.fd = fd;
2142 s->io.registered = false;
2143
2144 r = source_io_register(s, s->enabled, s->io.events);
2145 if (r < 0) {
2146 s->io.fd = saved_fd;
2147 s->io.registered = true;
2148 return r;
2149 }
2150
5a795bff 2151 (void) epoll_ctl(s->event->epoll_fd, EPOLL_CTL_DEL, saved_fd, NULL);
30caf8f3
LP
2152 }
2153
2154 return 0;
2155}
2156
ab93297c
NM
2157_public_ int sd_event_source_get_io_fd_own(sd_event_source *s) {
2158 assert_return(s, -EINVAL);
2159 assert_return(s->type == SOURCE_IO, -EDOM);
2160
2161 return s->io.owned;
2162}
2163
2164_public_ int sd_event_source_set_io_fd_own(sd_event_source *s, int own) {
2165 assert_return(s, -EINVAL);
2166 assert_return(s->type == SOURCE_IO, -EDOM);
2167
2168 s->io.owned = own;
2169 return 0;
2170}
2171
f7262a9f 2172_public_ int sd_event_source_get_io_events(sd_event_source *s, uint32_t* events) {
305f78bf
LP
2173 assert_return(s, -EINVAL);
2174 assert_return(events, -EINVAL);
2175 assert_return(s->type == SOURCE_IO, -EDOM);
2176 assert_return(!event_pid_changed(s->event), -ECHILD);
fd38203a
LP
2177
2178 *events = s->io.events;
2179 return 0;
2180}
2181
f7262a9f 2182_public_ int sd_event_source_set_io_events(sd_event_source *s, uint32_t events) {
fd38203a
LP
2183 int r;
2184
305f78bf
LP
2185 assert_return(s, -EINVAL);
2186 assert_return(s->type == SOURCE_IO, -EDOM);
2a16a986 2187 assert_return(!(events & ~(EPOLLIN|EPOLLOUT|EPOLLRDHUP|EPOLLPRI|EPOLLERR|EPOLLHUP|EPOLLET)), -EINVAL);
da7e457c 2188 assert_return(s->event->state != SD_EVENT_FINISHED, -ESTALE);
305f78bf 2189 assert_return(!event_pid_changed(s->event), -ECHILD);
fd38203a 2190
b63c8d4f
DH
2191 /* edge-triggered updates are never skipped, so we can reset edges */
2192 if (s->io.events == events && !(events & EPOLLET))
fd38203a
LP
2193 return 0;
2194
2a0dc6cd
LP
2195 r = source_set_pending(s, false);
2196 if (r < 0)
2197 return r;
2198
b6d5481b 2199 if (event_source_is_online(s)) {
e4715127 2200 r = source_io_register(s, s->enabled, events);
fd38203a
LP
2201 if (r < 0)
2202 return r;
2203 }
2204
2205 s->io.events = events;
2206
2207 return 0;
2208}
2209
f7262a9f 2210_public_ int sd_event_source_get_io_revents(sd_event_source *s, uint32_t* revents) {
305f78bf
LP
2211 assert_return(s, -EINVAL);
2212 assert_return(revents, -EINVAL);
2213 assert_return(s->type == SOURCE_IO, -EDOM);
2214 assert_return(s->pending, -ENODATA);
2215 assert_return(!event_pid_changed(s->event), -ECHILD);
fd38203a
LP
2216
2217 *revents = s->io.revents;
2218 return 0;
2219}
2220
f7262a9f 2221_public_ int sd_event_source_get_signal(sd_event_source *s) {
305f78bf
LP
2222 assert_return(s, -EINVAL);
2223 assert_return(s->type == SOURCE_SIGNAL, -EDOM);
2224 assert_return(!event_pid_changed(s->event), -ECHILD);
fd38203a
LP
2225
2226 return s->signal.sig;
2227}
2228
31927c16 2229_public_ int sd_event_source_get_priority(sd_event_source *s, int64_t *priority) {
305f78bf
LP
2230 assert_return(s, -EINVAL);
2231 assert_return(!event_pid_changed(s->event), -ECHILD);
fd38203a 2232
6680b8d1
ME
2233 *priority = s->priority;
2234 return 0;
fd38203a
LP
2235}
2236
31927c16 2237_public_ int sd_event_source_set_priority(sd_event_source *s, int64_t priority) {
97ef5391
LP
2238 bool rm_inotify = false, rm_inode = false;
2239 struct inotify_data *new_inotify_data = NULL;
2240 struct inode_data *new_inode_data = NULL;
9da4cb2b
LP
2241 int r;
2242
305f78bf 2243 assert_return(s, -EINVAL);
da7e457c 2244 assert_return(s->event->state != SD_EVENT_FINISHED, -ESTALE);
305f78bf 2245 assert_return(!event_pid_changed(s->event), -ECHILD);
fd38203a
LP
2246
2247 if (s->priority == priority)
2248 return 0;
2249
97ef5391
LP
2250 if (s->type == SOURCE_INOTIFY) {
2251 struct inode_data *old_inode_data;
2252
2253 assert(s->inotify.inode_data);
2254 old_inode_data = s->inotify.inode_data;
2255
2256 /* We need the original fd to change the priority. If we don't have it we can't change the priority,
2257 * anymore. Note that we close any fds when entering the next event loop iteration, i.e. for inotify
2258 * events we allow priority changes only until the first following iteration. */
2259 if (old_inode_data->fd < 0)
2260 return -EOPNOTSUPP;
2261
2262 r = event_make_inotify_data(s->event, priority, &new_inotify_data);
2263 if (r < 0)
2264 return r;
2265 rm_inotify = r > 0;
2266
2267 r = event_make_inode_data(s->event, new_inotify_data, old_inode_data->dev, old_inode_data->ino, &new_inode_data);
2268 if (r < 0)
2269 goto fail;
2270 rm_inode = r > 0;
2271
2272 if (new_inode_data->fd < 0) {
2273 /* Duplicate the fd for the new inode object if we don't have any yet */
2274 new_inode_data->fd = fcntl(old_inode_data->fd, F_DUPFD_CLOEXEC, 3);
2275 if (new_inode_data->fd < 0) {
2276 r = -errno;
2277 goto fail;
2278 }
2279
2280 LIST_PREPEND(to_close, s->event->inode_data_to_close, new_inode_data);
2281 }
2282
2283 /* Move the event source to the new inode data structure */
2284 LIST_REMOVE(inotify.by_inode_data, old_inode_data->event_sources, s);
2285 LIST_PREPEND(inotify.by_inode_data, new_inode_data->event_sources, s);
2286 s->inotify.inode_data = new_inode_data;
2287
2288 /* Now create the new watch */
2289 r = inode_data_realize_watch(s->event, new_inode_data);
2290 if (r < 0) {
2291 /* Move it back */
2292 LIST_REMOVE(inotify.by_inode_data, new_inode_data->event_sources, s);
2293 LIST_PREPEND(inotify.by_inode_data, old_inode_data->event_sources, s);
2294 s->inotify.inode_data = old_inode_data;
2295 goto fail;
2296 }
2297
2298 s->priority = priority;
2299
2300 event_gc_inode_data(s->event, old_inode_data);
2301
b6d5481b 2302 } else if (s->type == SOURCE_SIGNAL && event_source_is_online(s)) {
9da4cb2b
LP
2303 struct signal_data *old, *d;
2304
2305 /* Move us from the signalfd belonging to the old
2306 * priority to the signalfd of the new priority */
2307
2308 assert_se(old = hashmap_get(s->event->signal_data, &s->priority));
2309
2310 s->priority = priority;
2311
2312 r = event_make_signal_data(s->event, s->signal.sig, &d);
2313 if (r < 0) {
2314 s->priority = old->priority;
2315 return r;
2316 }
2317
2318 event_unmask_signal_data(s->event, old, s->signal.sig);
2319 } else
2320 s->priority = priority;
fd38203a 2321
e1951c16 2322 event_source_pp_prioq_reshuffle(s);
fd38203a 2323
6203e07a
LP
2324 if (s->type == SOURCE_EXIT)
2325 prioq_reshuffle(s->event->exit, s, &s->exit.prioq_index);
305f78bf 2326
fd38203a 2327 return 0;
97ef5391
LP
2328
2329fail:
2330 if (rm_inode)
2331 event_free_inode_data(s->event, new_inode_data);
2332
2333 if (rm_inotify)
2334 event_free_inotify_data(s->event, new_inotify_data);
2335
2336 return r;
fd38203a
LP
2337}
2338
cad143a8 2339_public_ int sd_event_source_get_enabled(sd_event_source *s, int *ret) {
305f78bf 2340 assert_return(s, -EINVAL);
305f78bf 2341 assert_return(!event_pid_changed(s->event), -ECHILD);
fd38203a 2342
cad143a8
LP
2343 if (ret)
2344 *ret = s->enabled;
2345
08c1eb0e 2346 return s->enabled != SD_EVENT_OFF;
fd38203a
LP
2347}
2348
b6d5481b
LP
2349static int event_source_offline(
2350 sd_event_source *s,
2351 int enabled,
2352 bool ratelimited) {
2353
2354 bool was_offline;
fd38203a
LP
2355 int r;
2356
ddfde737 2357 assert(s);
b6d5481b 2358 assert(enabled == SD_EVENT_OFF || ratelimited);
fd38203a 2359
ddfde737 2360 /* Unset the pending flag when this event source is disabled */
b6d5481b
LP
2361 if (s->enabled != SD_EVENT_OFF &&
2362 enabled == SD_EVENT_OFF &&
2363 !IN_SET(s->type, SOURCE_DEFER, SOURCE_EXIT)) {
ddfde737
LP
2364 r = source_set_pending(s, false);
2365 if (r < 0)
2366 return r;
2367 }
cc567911 2368
b6d5481b
LP
2369 was_offline = event_source_is_offline(s);
2370 s->enabled = enabled;
2371 s->ratelimited = ratelimited;
fd38203a 2372
ddfde737 2373 switch (s->type) {
fd38203a 2374
ddfde737
LP
2375 case SOURCE_IO:
2376 source_io_unregister(s);
2377 break;
ac989a78 2378
ddfde737
LP
2379 case SOURCE_TIME_REALTIME:
2380 case SOURCE_TIME_BOOTTIME:
2381 case SOURCE_TIME_MONOTONIC:
2382 case SOURCE_TIME_REALTIME_ALARM:
2383 case SOURCE_TIME_BOOTTIME_ALARM:
2384 event_source_time_prioq_reshuffle(s);
2385 break;
fd38203a 2386
ddfde737
LP
2387 case SOURCE_SIGNAL:
2388 event_gc_signal_data(s->event, &s->priority, s->signal.sig);
2389 break;
fd38203a 2390
ddfde737 2391 case SOURCE_CHILD:
b6d5481b
LP
2392 if (!was_offline) {
2393 assert(s->event->n_online_child_sources > 0);
2394 s->event->n_online_child_sources--;
2395 }
fd38203a 2396
ddfde737
LP
2397 if (EVENT_SOURCE_WATCH_PIDFD(s))
2398 source_child_pidfd_unregister(s);
2399 else
2400 event_gc_signal_data(s->event, &s->priority, SIGCHLD);
2401 break;
4807d2d0 2402
ddfde737
LP
2403 case SOURCE_EXIT:
2404 prioq_reshuffle(s->event->exit, s, &s->exit.prioq_index);
2405 break;
fd38203a 2406
ddfde737
LP
2407 case SOURCE_DEFER:
2408 case SOURCE_POST:
2409 case SOURCE_INOTIFY:
2410 break;
fd38203a 2411
ddfde737
LP
2412 default:
2413 assert_not_reached("Wut? I shouldn't exist.");
2414 }
fd38203a 2415
b6d5481b 2416 return 1;
ddfde737 2417}
f8f3f926 2418
b6d5481b
LP
2419static int event_source_online(
2420 sd_event_source *s,
2421 int enabled,
2422 bool ratelimited) {
2423
2424 bool was_online;
ddfde737 2425 int r;
fd38203a 2426
ddfde737 2427 assert(s);
b6d5481b 2428 assert(enabled != SD_EVENT_OFF || !ratelimited);
305f78bf 2429
ddfde737 2430 /* Unset the pending flag when this event source is enabled */
b6d5481b
LP
2431 if (s->enabled == SD_EVENT_OFF &&
2432 enabled != SD_EVENT_OFF &&
2433 !IN_SET(s->type, SOURCE_DEFER, SOURCE_EXIT)) {
ddfde737
LP
2434 r = source_set_pending(s, false);
2435 if (r < 0)
2436 return r;
2437 }
9d3e3aa5 2438
b6d5481b
LP
2439 /* Are we really ready for onlining? */
2440 if (enabled == SD_EVENT_OFF || ratelimited) {
2441 /* Nope, we are not ready for onlining, then just update the precise state and exit */
2442 s->enabled = enabled;
2443 s->ratelimited = ratelimited;
2444 return 0;
2445 }
2446
2447 was_online = event_source_is_online(s);
2448
ddfde737 2449 switch (s->type) {
ddfde737 2450 case SOURCE_IO:
b6d5481b 2451 r = source_io_register(s, enabled, s->io.events);
d2eafe61 2452 if (r < 0)
ddfde737 2453 return r;
ddfde737 2454 break;
fd38203a 2455
ddfde737
LP
2456 case SOURCE_SIGNAL:
2457 r = event_make_signal_data(s->event, s->signal.sig, NULL);
2458 if (r < 0) {
ddfde737
LP
2459 event_gc_signal_data(s->event, &s->priority, s->signal.sig);
2460 return r;
2461 }
fd38203a 2462
ddfde737 2463 break;
fd38203a 2464
ddfde737 2465 case SOURCE_CHILD:
ddfde737
LP
2466 if (EVENT_SOURCE_WATCH_PIDFD(s)) {
2467 /* yes, we have pidfd */
9da4cb2b 2468
b6d5481b 2469 r = source_child_pidfd_register(s, enabled);
ac9f2640 2470 if (r < 0)
9da4cb2b 2471 return r;
ddfde737
LP
2472 } else {
2473 /* no pidfd, or something other to watch for than WEXITED */
9da4cb2b 2474
ddfde737
LP
2475 r = event_make_signal_data(s->event, SIGCHLD, NULL);
2476 if (r < 0) {
ddfde737
LP
2477 event_gc_signal_data(s->event, &s->priority, SIGCHLD);
2478 return r;
2479 }
2480 }
fd38203a 2481
b6d5481b
LP
2482 if (!was_online)
2483 s->event->n_online_child_sources++;
ddfde737 2484 break;
4807d2d0 2485
d2eafe61
ZJS
2486 case SOURCE_TIME_REALTIME:
2487 case SOURCE_TIME_BOOTTIME:
2488 case SOURCE_TIME_MONOTONIC:
2489 case SOURCE_TIME_REALTIME_ALARM:
2490 case SOURCE_TIME_BOOTTIME_ALARM:
ddfde737 2491 case SOURCE_EXIT:
ddfde737
LP
2492 case SOURCE_DEFER:
2493 case SOURCE_POST:
2494 case SOURCE_INOTIFY:
2495 break;
9da4cb2b 2496
ddfde737
LP
2497 default:
2498 assert_not_reached("Wut? I shouldn't exist.");
2499 }
f8f3f926 2500
b6d5481b
LP
2501 s->enabled = enabled;
2502 s->ratelimited = ratelimited;
d2eafe61
ZJS
2503
2504 /* Non-failing operations below */
2505 switch (s->type) {
2506 case SOURCE_TIME_REALTIME:
2507 case SOURCE_TIME_BOOTTIME:
2508 case SOURCE_TIME_MONOTONIC:
2509 case SOURCE_TIME_REALTIME_ALARM:
2510 case SOURCE_TIME_BOOTTIME_ALARM:
2511 event_source_time_prioq_reshuffle(s);
2512 break;
2513
2514 case SOURCE_EXIT:
2515 prioq_reshuffle(s->event->exit, s, &s->exit.prioq_index);
2516 break;
2517
2518 default:
2519 break;
2520 }
2521
b6d5481b 2522 return 1;
ddfde737
LP
2523}
2524
2525_public_ int sd_event_source_set_enabled(sd_event_source *s, int m) {
2526 int r;
9da4cb2b 2527
ddfde737
LP
2528 assert_return(s, -EINVAL);
2529 assert_return(IN_SET(m, SD_EVENT_OFF, SD_EVENT_ON, SD_EVENT_ONESHOT), -EINVAL);
2530 assert_return(!event_pid_changed(s->event), -ECHILD);
fd38203a 2531
ddfde737
LP
2532 /* If we are dead anyway, we are fine with turning off sources, but everything else needs to fail. */
2533 if (s->event->state == SD_EVENT_FINISHED)
2534 return m == SD_EVENT_OFF ? 0 : -ESTALE;
305f78bf 2535
ddfde737
LP
2536 if (s->enabled == m) /* No change? */
2537 return 0;
9d3e3aa5 2538
ddfde737 2539 if (m == SD_EVENT_OFF)
b6d5481b 2540 r = event_source_offline(s, m, s->ratelimited);
ddfde737
LP
2541 else {
2542 if (s->enabled != SD_EVENT_OFF) {
2543 /* Switching from "on" to "oneshot" or back? If that's the case, we can take a shortcut, the
2544 * event source is already enabled after all. */
2545 s->enabled = m;
2546 return 0;
fd38203a 2547 }
ddfde737 2548
b6d5481b 2549 r = event_source_online(s, m, s->ratelimited);
fd38203a 2550 }
ddfde737
LP
2551 if (r < 0)
2552 return r;
fd38203a 2553
e1951c16 2554 event_source_pp_prioq_reshuffle(s);
fd38203a
LP
2555 return 0;
2556}
2557
f7262a9f 2558_public_ int sd_event_source_get_time(sd_event_source *s, uint64_t *usec) {
305f78bf
LP
2559 assert_return(s, -EINVAL);
2560 assert_return(usec, -EINVAL);
6a0f1f6d 2561 assert_return(EVENT_SOURCE_IS_TIME(s->type), -EDOM);
305f78bf 2562 assert_return(!event_pid_changed(s->event), -ECHILD);
fd38203a
LP
2563
2564 *usec = s->time.next;
2565 return 0;
2566}
2567
f7262a9f 2568_public_ int sd_event_source_set_time(sd_event_source *s, uint64_t usec) {
2a0dc6cd 2569 int r;
6a0f1f6d 2570
305f78bf 2571 assert_return(s, -EINVAL);
6a0f1f6d 2572 assert_return(EVENT_SOURCE_IS_TIME(s->type), -EDOM);
da7e457c 2573 assert_return(s->event->state != SD_EVENT_FINISHED, -ESTALE);
305f78bf 2574 assert_return(!event_pid_changed(s->event), -ECHILD);
fd38203a 2575
2a0dc6cd
LP
2576 r = source_set_pending(s, false);
2577 if (r < 0)
2578 return r;
2576a19e 2579
2a0dc6cd 2580 s->time.next = usec;
fd38203a 2581
e1951c16 2582 event_source_time_prioq_reshuffle(s);
fd38203a
LP
2583 return 0;
2584}
2585
d6a83dc4
LP
2586_public_ int sd_event_source_set_time_relative(sd_event_source *s, uint64_t usec) {
2587 usec_t t;
2588 int r;
2589
2590 assert_return(s, -EINVAL);
2591 assert_return(EVENT_SOURCE_IS_TIME(s->type), -EDOM);
2592
2593 r = sd_event_now(s->event, event_source_type_to_clock(s->type), &t);
2594 if (r < 0)
2595 return r;
2596
2597 if (usec >= USEC_INFINITY - t)
2598 return -EOVERFLOW;
2599
2600 return sd_event_source_set_time(s, t + usec);
2601}
2602
f7262a9f 2603_public_ int sd_event_source_get_time_accuracy(sd_event_source *s, uint64_t *usec) {
305f78bf
LP
2604 assert_return(s, -EINVAL);
2605 assert_return(usec, -EINVAL);
6a0f1f6d 2606 assert_return(EVENT_SOURCE_IS_TIME(s->type), -EDOM);
305f78bf
LP
2607 assert_return(!event_pid_changed(s->event), -ECHILD);
2608
2609 *usec = s->time.accuracy;
2610 return 0;
2611}
2612
f7262a9f 2613_public_ int sd_event_source_set_time_accuracy(sd_event_source *s, uint64_t usec) {
2a0dc6cd 2614 int r;
6a0f1f6d 2615
305f78bf
LP
2616 assert_return(s, -EINVAL);
2617 assert_return(usec != (uint64_t) -1, -EINVAL);
6a0f1f6d 2618 assert_return(EVENT_SOURCE_IS_TIME(s->type), -EDOM);
da7e457c 2619 assert_return(s->event->state != SD_EVENT_FINISHED, -ESTALE);
305f78bf 2620 assert_return(!event_pid_changed(s->event), -ECHILD);
eaa3cbef 2621
2a0dc6cd
LP
2622 r = source_set_pending(s, false);
2623 if (r < 0)
2624 return r;
2625
eaa3cbef
LP
2626 if (usec == 0)
2627 usec = DEFAULT_ACCURACY_USEC;
2628
eaa3cbef
LP
2629 s->time.accuracy = usec;
2630
e1951c16 2631 event_source_time_prioq_reshuffle(s);
6a0f1f6d
LP
2632 return 0;
2633}
2634
2635_public_ int sd_event_source_get_time_clock(sd_event_source *s, clockid_t *clock) {
2636 assert_return(s, -EINVAL);
2637 assert_return(clock, -EINVAL);
2638 assert_return(EVENT_SOURCE_IS_TIME(s->type), -EDOM);
2639 assert_return(!event_pid_changed(s->event), -ECHILD);
eaa3cbef 2640
6a0f1f6d 2641 *clock = event_source_type_to_clock(s->type);
eaa3cbef
LP
2642 return 0;
2643}
2644
f7262a9f 2645_public_ int sd_event_source_get_child_pid(sd_event_source *s, pid_t *pid) {
4bee8012
LP
2646 assert_return(s, -EINVAL);
2647 assert_return(pid, -EINVAL);
2648 assert_return(s->type == SOURCE_CHILD, -EDOM);
2649 assert_return(!event_pid_changed(s->event), -ECHILD);
2650
2651 *pid = s->child.pid;
2652 return 0;
2653}
2654
f8f3f926
LP
2655_public_ int sd_event_source_get_child_pidfd(sd_event_source *s) {
2656 assert_return(s, -EINVAL);
2657 assert_return(s->type == SOURCE_CHILD, -EDOM);
2658 assert_return(!event_pid_changed(s->event), -ECHILD);
2659
2660 if (s->child.pidfd < 0)
2661 return -EOPNOTSUPP;
2662
2663 return s->child.pidfd;
2664}
2665
2666_public_ int sd_event_source_send_child_signal(sd_event_source *s, int sig, const siginfo_t *si, unsigned flags) {
2667 assert_return(s, -EINVAL);
2668 assert_return(s->type == SOURCE_CHILD, -EDOM);
2669 assert_return(!event_pid_changed(s->event), -ECHILD);
2670 assert_return(SIGNAL_VALID(sig), -EINVAL);
2671
2672 /* If we already have seen indication the process exited refuse sending a signal early. This way we
2673 * can be sure we don't accidentally kill the wrong process on PID reuse when pidfds are not
2674 * available. */
2675 if (s->child.exited)
2676 return -ESRCH;
2677
2678 if (s->child.pidfd >= 0) {
2679 siginfo_t copy;
2680
2681 /* pidfd_send_signal() changes the siginfo_t argument. This is weird, let's hence copy the
2682 * structure here */
2683 if (si)
2684 copy = *si;
2685
2686 if (pidfd_send_signal(s->child.pidfd, sig, si ? &copy : NULL, 0) < 0) {
2687 /* Let's propagate the error only if the system call is not implemented or prohibited */
2688 if (!ERRNO_IS_NOT_SUPPORTED(errno) && !ERRNO_IS_PRIVILEGE(errno))
2689 return -errno;
2690 } else
2691 return 0;
2692 }
2693
2694 /* Flags are only supported for pidfd_send_signal(), not for rt_sigqueueinfo(), hence let's refuse
2695 * this here. */
2696 if (flags != 0)
2697 return -EOPNOTSUPP;
2698
2699 if (si) {
2700 /* We use rt_sigqueueinfo() only if siginfo_t is specified. */
2701 siginfo_t copy = *si;
2702
2703 if (rt_sigqueueinfo(s->child.pid, sig, &copy) < 0)
2704 return -errno;
2705 } else if (kill(s->child.pid, sig) < 0)
2706 return -errno;
2707
2708 return 0;
2709}
2710
2711_public_ int sd_event_source_get_child_pidfd_own(sd_event_source *s) {
2712 assert_return(s, -EINVAL);
2713 assert_return(s->type == SOURCE_CHILD, -EDOM);
2714
2715 if (s->child.pidfd < 0)
2716 return -EOPNOTSUPP;
2717
2718 return s->child.pidfd_owned;
2719}
2720
2721_public_ int sd_event_source_set_child_pidfd_own(sd_event_source *s, int own) {
2722 assert_return(s, -EINVAL);
2723 assert_return(s->type == SOURCE_CHILD, -EDOM);
2724
2725 if (s->child.pidfd < 0)
2726 return -EOPNOTSUPP;
2727
2728 s->child.pidfd_owned = own;
2729 return 0;
2730}
2731
2732_public_ int sd_event_source_get_child_process_own(sd_event_source *s) {
2733 assert_return(s, -EINVAL);
2734 assert_return(s->type == SOURCE_CHILD, -EDOM);
2735
2736 return s->child.process_owned;
2737}
2738
2739_public_ int sd_event_source_set_child_process_own(sd_event_source *s, int own) {
2740 assert_return(s, -EINVAL);
2741 assert_return(s->type == SOURCE_CHILD, -EDOM);
2742
2743 s->child.process_owned = own;
2744 return 0;
2745}
2746
97ef5391
LP
2747_public_ int sd_event_source_get_inotify_mask(sd_event_source *s, uint32_t *mask) {
2748 assert_return(s, -EINVAL);
2749 assert_return(mask, -EINVAL);
2750 assert_return(s->type == SOURCE_INOTIFY, -EDOM);
2751 assert_return(!event_pid_changed(s->event), -ECHILD);
2752
2753 *mask = s->inotify.mask;
2754 return 0;
2755}
2756
718db961 2757_public_ int sd_event_source_set_prepare(sd_event_source *s, sd_event_handler_t callback) {
fd38203a
LP
2758 int r;
2759
da7e457c 2760 assert_return(s, -EINVAL);
6203e07a 2761 assert_return(s->type != SOURCE_EXIT, -EDOM);
da7e457c
LP
2762 assert_return(s->event->state != SD_EVENT_FINISHED, -ESTALE);
2763 assert_return(!event_pid_changed(s->event), -ECHILD);
fd38203a
LP
2764
2765 if (s->prepare == callback)
2766 return 0;
2767
2768 if (callback && s->prepare) {
2769 s->prepare = callback;
2770 return 0;
2771 }
2772
2773 r = prioq_ensure_allocated(&s->event->prepare, prepare_prioq_compare);
2774 if (r < 0)
2775 return r;
2776
2777 s->prepare = callback;
2778
2779 if (callback) {
2780 r = prioq_put(s->event->prepare, s, &s->prepare_index);
2781 if (r < 0)
2782 return r;
2783 } else
2784 prioq_remove(s->event->prepare, s, &s->prepare_index);
2785
2786 return 0;
2787}
2788
f7262a9f 2789_public_ void* sd_event_source_get_userdata(sd_event_source *s) {
da7e457c 2790 assert_return(s, NULL);
fd38203a
LP
2791
2792 return s->userdata;
2793}
2794
8f726607
LP
2795_public_ void *sd_event_source_set_userdata(sd_event_source *s, void *userdata) {
2796 void *ret;
2797
2798 assert_return(s, NULL);
2799
2800 ret = s->userdata;
2801 s->userdata = userdata;
2802
2803 return ret;
2804}
2805
b6d5481b
LP
2806static int event_source_enter_ratelimited(sd_event_source *s) {
2807 int r;
2808
2809 assert(s);
2810
2811 /* When an event source becomes ratelimited, we place it in the CLOCK_MONOTONIC priority queue, with
2812 * the end of the rate limit time window, much as if it was a timer event source. */
2813
2814 if (s->ratelimited)
2815 return 0; /* Already ratelimited, this is a NOP hence */
2816
2817 /* Make sure we can install a CLOCK_MONOTONIC event further down. */
2818 r = setup_clock_data(s->event, &s->event->monotonic, CLOCK_MONOTONIC);
2819 if (r < 0)
2820 return r;
2821
2822 /* Timer event sources are already using the earliest/latest queues for the timer scheduling. Let's
2823 * first remove them from the prioq appropriate for their own clock, so that we can use the prioq
2824 * fields of the event source then for adding it to the CLOCK_MONOTONIC prioq instead. */
2825 if (EVENT_SOURCE_IS_TIME(s->type))
2826 event_source_time_prioq_remove(s, event_get_clock_data(s->event, s->type));
2827
2828 /* Now, let's add the event source to the monotonic clock instead */
2829 r = event_source_time_prioq_put(s, &s->event->monotonic);
2830 if (r < 0)
2831 goto fail;
2832
2833 /* And let's take the event source officially offline */
2834 r = event_source_offline(s, s->enabled, /* ratelimited= */ true);
2835 if (r < 0) {
2836 event_source_time_prioq_remove(s, &s->event->monotonic);
2837 goto fail;
2838 }
2839
2840 event_source_pp_prioq_reshuffle(s);
2841
2842 log_debug("Event source %p (%s) entered rate limit state.", s, strna(s->description));
2843 return 0;
2844
2845fail:
2846 /* Reinstall time event sources in the priority queue as before. This shouldn't fail, since the queue
2847 * space for it should already be allocated. */
2848 if (EVENT_SOURCE_IS_TIME(s->type))
2849 assert_se(event_source_time_prioq_put(s, event_get_clock_data(s->event, s->type)) >= 0);
2850
2851 return r;
2852}
2853
2854static int event_source_leave_ratelimit(sd_event_source *s) {
2855 int r;
2856
2857 assert(s);
2858
2859 if (!s->ratelimited)
2860 return 0;
2861
2862 /* Let's take the event source out of the monotonic prioq first. */
2863 event_source_time_prioq_remove(s, &s->event->monotonic);
2864
2865 /* Let's then add the event source to its native clock prioq again — if this is a timer event source */
2866 if (EVENT_SOURCE_IS_TIME(s->type)) {
2867 r = event_source_time_prioq_put(s, event_get_clock_data(s->event, s->type));
2868 if (r < 0)
2869 goto fail;
2870 }
2871
2872 /* Let's try to take it online again. */
2873 r = event_source_online(s, s->enabled, /* ratelimited= */ false);
2874 if (r < 0) {
2875 /* Do something roughly sensible when this failed: undo the two prioq ops above */
2876 if (EVENT_SOURCE_IS_TIME(s->type))
2877 event_source_time_prioq_remove(s, event_get_clock_data(s->event, s->type));
2878
2879 goto fail;
2880 }
2881
2882 event_source_pp_prioq_reshuffle(s);
2883 ratelimit_reset(&s->rate_limit);
2884
2885 log_debug("Event source %p (%s) left rate limit state.", s, strna(s->description));
2886 return 0;
2887
2888fail:
2889 /* Do something somewhat reasonable when we cannot move an event sources out of ratelimited mode:
2890 * simply put it back in it, maybe we can then process it more successfully next iteration. */
2891 assert_se(event_source_time_prioq_put(s, &s->event->monotonic) >= 0);
2892
2893 return r;
2894}
2895
c2ba3ad6
LP
2896static usec_t sleep_between(sd_event *e, usec_t a, usec_t b) {
2897 usec_t c;
2898 assert(e);
2899 assert(a <= b);
2900
2901 if (a <= 0)
2902 return 0;
393003e1
LP
2903 if (a >= USEC_INFINITY)
2904 return USEC_INFINITY;
c2ba3ad6
LP
2905
2906 if (b <= a + 1)
2907 return a;
2908
52444dc4
LP
2909 initialize_perturb(e);
2910
c2ba3ad6
LP
2911 /*
2912 Find a good time to wake up again between times a and b. We
2913 have two goals here:
2914
2915 a) We want to wake up as seldom as possible, hence prefer
2916 later times over earlier times.
2917
2918 b) But if we have to wake up, then let's make sure to
2919 dispatch as much as possible on the entire system.
2920
2921 We implement this by waking up everywhere at the same time
850516e0 2922 within any given minute if we can, synchronised via the
c2ba3ad6 2923 perturbation value determined from the boot ID. If we can't,
ba276c81
LP
2924 then we try to find the same spot in every 10s, then 1s and
2925 then 250ms step. Otherwise, we pick the last possible time
2926 to wake up.
c2ba3ad6
LP
2927 */
2928
850516e0
LP
2929 c = (b / USEC_PER_MINUTE) * USEC_PER_MINUTE + e->perturb;
2930 if (c >= b) {
2931 if (_unlikely_(c < USEC_PER_MINUTE))
2932 return b;
2933
2934 c -= USEC_PER_MINUTE;
2935 }
2936
ba276c81
LP
2937 if (c >= a)
2938 return c;
2939
2940 c = (b / (USEC_PER_SEC*10)) * (USEC_PER_SEC*10) + (e->perturb % (USEC_PER_SEC*10));
2941 if (c >= b) {
2942 if (_unlikely_(c < USEC_PER_SEC*10))
2943 return b;
2944
2945 c -= USEC_PER_SEC*10;
2946 }
2947
850516e0
LP
2948 if (c >= a)
2949 return c;
2950
2951 c = (b / USEC_PER_SEC) * USEC_PER_SEC + (e->perturb % USEC_PER_SEC);
c2ba3ad6
LP
2952 if (c >= b) {
2953 if (_unlikely_(c < USEC_PER_SEC))
2954 return b;
2955
2956 c -= USEC_PER_SEC;
2957 }
2958
2959 if (c >= a)
2960 return c;
2961
2962 c = (b / (USEC_PER_MSEC*250)) * (USEC_PER_MSEC*250) + (e->perturb % (USEC_PER_MSEC*250));
2963 if (c >= b) {
2964 if (_unlikely_(c < USEC_PER_MSEC*250))
2965 return b;
2966
2967 c -= USEC_PER_MSEC*250;
2968 }
2969
2970 if (c >= a)
2971 return c;
2972
2973 return b;
2974}
2975
fd38203a
LP
2976static int event_arm_timer(
2977 sd_event *e,
6a0f1f6d 2978 struct clock_data *d) {
fd38203a
LP
2979
2980 struct itimerspec its = {};
c2ba3ad6
LP
2981 sd_event_source *a, *b;
2982 usec_t t;
fd38203a 2983
cde93897 2984 assert(e);
6a0f1f6d 2985 assert(d);
fd38203a 2986
d06441da 2987 if (!d->needs_rearm)
212bbb17
TG
2988 return 0;
2989 else
2990 d->needs_rearm = false;
2991
6a0f1f6d 2992 a = prioq_peek(d->earliest);
b6d5481b 2993 if (!a || a->enabled == SD_EVENT_OFF || time_event_source_next(a) == USEC_INFINITY) {
72aedc1e 2994
6a0f1f6d 2995 if (d->fd < 0)
c57b5ca3
LP
2996 return 0;
2997
3a43da28 2998 if (d->next == USEC_INFINITY)
72aedc1e
LP
2999 return 0;
3000
3001 /* disarm */
15c689d7
LP
3002 if (timerfd_settime(d->fd, TFD_TIMER_ABSTIME, &its, NULL) < 0)
3003 return -errno;
72aedc1e 3004
3a43da28 3005 d->next = USEC_INFINITY;
fd38203a 3006 return 0;
72aedc1e 3007 }
fd38203a 3008
6a0f1f6d 3009 b = prioq_peek(d->latest);
baf76283 3010 assert_se(b && b->enabled != SD_EVENT_OFF);
c2ba3ad6 3011
b6d5481b 3012 t = sleep_between(e, time_event_source_next(a), time_event_source_latest(b));
6a0f1f6d 3013 if (d->next == t)
fd38203a
LP
3014 return 0;
3015
6a0f1f6d 3016 assert_se(d->fd >= 0);
fd38203a 3017
c2ba3ad6 3018 if (t == 0) {
fd38203a
LP
3019 /* We don' want to disarm here, just mean some time looooong ago. */
3020 its.it_value.tv_sec = 0;
3021 its.it_value.tv_nsec = 1;
3022 } else
c2ba3ad6 3023 timespec_store(&its.it_value, t);
fd38203a 3024
15c689d7 3025 if (timerfd_settime(d->fd, TFD_TIMER_ABSTIME, &its, NULL) < 0)
cde93897 3026 return -errno;
fd38203a 3027
6a0f1f6d 3028 d->next = t;
fd38203a
LP
3029 return 0;
3030}
3031
9a800b56 3032static int process_io(sd_event *e, sd_event_source *s, uint32_t revents) {
fd38203a
LP
3033 assert(e);
3034 assert(s);
3035 assert(s->type == SOURCE_IO);
3036
9a800b56
LP
3037 /* If the event source was already pending, we just OR in the
3038 * new revents, otherwise we reset the value. The ORing is
3039 * necessary to handle EPOLLONESHOT events properly where
3040 * readability might happen independently of writability, and
3041 * we need to keep track of both */
3042
3043 if (s->pending)
3044 s->io.revents |= revents;
3045 else
3046 s->io.revents = revents;
fd38203a 3047
fd38203a
LP
3048 return source_set_pending(s, true);
3049}
3050
72aedc1e 3051static int flush_timer(sd_event *e, int fd, uint32_t events, usec_t *next) {
fd38203a
LP
3052 uint64_t x;
3053 ssize_t ss;
3054
3055 assert(e);
da7e457c 3056 assert(fd >= 0);
72aedc1e 3057
305f78bf 3058 assert_return(events == EPOLLIN, -EIO);
fd38203a
LP
3059
3060 ss = read(fd, &x, sizeof(x));
3061 if (ss < 0) {
945c2931 3062 if (IN_SET(errno, EAGAIN, EINTR))
fd38203a
LP
3063 return 0;
3064
3065 return -errno;
3066 }
3067
8d35dae7 3068 if (_unlikely_(ss != sizeof(x)))
fd38203a
LP
3069 return -EIO;
3070
cde93897 3071 if (next)
3a43da28 3072 *next = USEC_INFINITY;
72aedc1e 3073
fd38203a
LP
3074 return 0;
3075}
3076
305f78bf
LP
3077static int process_timer(
3078 sd_event *e,
3079 usec_t n,
6a0f1f6d 3080 struct clock_data *d) {
305f78bf 3081
fd38203a
LP
3082 sd_event_source *s;
3083 int r;
3084
3085 assert(e);
6a0f1f6d 3086 assert(d);
fd38203a
LP
3087
3088 for (;;) {
6a0f1f6d 3089 s = prioq_peek(d->earliest);
b6d5481b
LP
3090 if (!s || time_event_source_next(s) > n)
3091 break;
3092
3093 if (s->ratelimited) {
3094 /* This is an event sources whose ratelimit window has ended. Let's turn it on
3095 * again. */
3096 assert(s->ratelimited);
3097
3098 r = event_source_leave_ratelimit(s);
3099 if (r < 0)
3100 return r;
3101
3102 continue;
3103 }
3104
3105 if (s->enabled == SD_EVENT_OFF || s->pending)
fd38203a
LP
3106 break;
3107
3108 r = source_set_pending(s, true);
3109 if (r < 0)
3110 return r;
3111
e1951c16 3112 event_source_time_prioq_reshuffle(s);
fd38203a
LP
3113 }
3114
3115 return 0;
3116}
3117
3118static int process_child(sd_event *e) {
3119 sd_event_source *s;
fd38203a
LP
3120 int r;
3121
3122 assert(e);
3123
c2ba3ad6
LP
3124 e->need_process_child = false;
3125
fd38203a
LP
3126 /*
3127 So, this is ugly. We iteratively invoke waitid() with P_PID
3128 + WNOHANG for each PID we wait for, instead of using
3129 P_ALL. This is because we only want to get child
3130 information of very specific child processes, and not all
3131 of them. We might not have processed the SIGCHLD even of a
3132 previous invocation and we don't want to maintain a
3133 unbounded *per-child* event queue, hence we really don't
3134 want anything flushed out of the kernel's queue that we
3135 don't care about. Since this is O(n) this means that if you
3136 have a lot of processes you probably want to handle SIGCHLD
3137 yourself.
08cd1552
LP
3138
3139 We do not reap the children here (by using WNOWAIT), this
3140 is only done after the event source is dispatched so that
3141 the callback still sees the process as a zombie.
fd38203a
LP
3142 */
3143
90e74a66 3144 HASHMAP_FOREACH(s, e->child_sources) {
fd38203a
LP
3145 assert(s->type == SOURCE_CHILD);
3146
3147 if (s->pending)
3148 continue;
3149
b6d5481b 3150 if (event_source_is_offline(s))
fd38203a
LP
3151 continue;
3152
f8f3f926
LP
3153 if (s->child.exited)
3154 continue;
3155
3156 if (EVENT_SOURCE_WATCH_PIDFD(s)) /* There's a usable pidfd known for this event source? then don't waitid() for it here */
3157 continue;
3158
fd38203a 3159 zero(s->child.siginfo);
15c689d7
LP
3160 if (waitid(P_PID, s->child.pid, &s->child.siginfo,
3161 WNOHANG | (s->child.options & WEXITED ? WNOWAIT : 0) | s->child.options) < 0)
fd38203a
LP
3162 return -errno;
3163
3164 if (s->child.siginfo.si_pid != 0) {
945c2931 3165 bool zombie = IN_SET(s->child.siginfo.si_code, CLD_EXITED, CLD_KILLED, CLD_DUMPED);
08cd1552 3166
f8f3f926
LP
3167 if (zombie)
3168 s->child.exited = true;
3169
08cd1552
LP
3170 if (!zombie && (s->child.options & WEXITED)) {
3171 /* If the child isn't dead then let's
3172 * immediately remove the state change
3173 * from the queue, since there's no
3174 * benefit in leaving it queued */
3175
3176 assert(s->child.options & (WSTOPPED|WCONTINUED));
a5d27871 3177 (void) waitid(P_PID, s->child.pid, &s->child.siginfo, WNOHANG|(s->child.options & (WSTOPPED|WCONTINUED)));
08cd1552
LP
3178 }
3179
fd38203a
LP
3180 r = source_set_pending(s, true);
3181 if (r < 0)
3182 return r;
3183 }
3184 }
3185
fd38203a
LP
3186 return 0;
3187}
3188
f8f3f926
LP
3189static int process_pidfd(sd_event *e, sd_event_source *s, uint32_t revents) {
3190 assert(e);
3191 assert(s);
3192 assert(s->type == SOURCE_CHILD);
3193
3194 if (s->pending)
3195 return 0;
3196
b6d5481b 3197 if (event_source_is_offline(s))
f8f3f926
LP
3198 return 0;
3199
3200 if (!EVENT_SOURCE_WATCH_PIDFD(s))
3201 return 0;
3202
3203 zero(s->child.siginfo);
3204 if (waitid(P_PID, s->child.pid, &s->child.siginfo, WNOHANG | WNOWAIT | s->child.options) < 0)
3205 return -errno;
3206
3207 if (s->child.siginfo.si_pid == 0)
3208 return 0;
3209
3210 if (IN_SET(s->child.siginfo.si_code, CLD_EXITED, CLD_KILLED, CLD_DUMPED))
3211 s->child.exited = true;
3212
3213 return source_set_pending(s, true);
3214}
3215
9da4cb2b 3216static int process_signal(sd_event *e, struct signal_data *d, uint32_t events) {
fd38203a 3217 bool read_one = false;
fd38203a
LP
3218 int r;
3219
da7e457c 3220 assert(e);
97ef5391 3221 assert(d);
305f78bf 3222 assert_return(events == EPOLLIN, -EIO);
fd38203a 3223
9da4cb2b
LP
3224 /* If there's a signal queued on this priority and SIGCHLD is
3225 on this priority too, then make sure to recheck the
3226 children we watch. This is because we only ever dequeue
3227 the first signal per priority, and if we dequeue one, and
3228 SIGCHLD might be enqueued later we wouldn't know, but we
3229 might have higher priority children we care about hence we
3230 need to check that explicitly. */
3231
3232 if (sigismember(&d->sigset, SIGCHLD))
3233 e->need_process_child = true;
3234
3235 /* If there's already an event source pending for this
3236 * priority we don't read another */
3237 if (d->current)
3238 return 0;
3239
fd38203a 3240 for (;;) {
0eb2e0e3 3241 struct signalfd_siginfo si;
7057bd99 3242 ssize_t n;
92daebc0 3243 sd_event_source *s = NULL;
fd38203a 3244
9da4cb2b 3245 n = read(d->fd, &si, sizeof(si));
7057bd99 3246 if (n < 0) {
945c2931 3247 if (IN_SET(errno, EAGAIN, EINTR))
fd38203a
LP
3248 return read_one;
3249
3250 return -errno;
3251 }
3252
7057bd99 3253 if (_unlikely_(n != sizeof(si)))
fd38203a
LP
3254 return -EIO;
3255
6eb7c172 3256 assert(SIGNAL_VALID(si.ssi_signo));
7057bd99 3257
fd38203a
LP
3258 read_one = true;
3259
92daebc0
LP
3260 if (e->signal_sources)
3261 s = e->signal_sources[si.ssi_signo];
92daebc0
LP
3262 if (!s)
3263 continue;
9da4cb2b
LP
3264 if (s->pending)
3265 continue;
fd38203a
LP
3266
3267 s->signal.siginfo = si;
9da4cb2b
LP
3268 d->current = s;
3269
fd38203a
LP
3270 r = source_set_pending(s, true);
3271 if (r < 0)
3272 return r;
9da4cb2b
LP
3273
3274 return 1;
fd38203a 3275 }
fd38203a
LP
3276}
3277
97ef5391
LP
3278static int event_inotify_data_read(sd_event *e, struct inotify_data *d, uint32_t revents) {
3279 ssize_t n;
3280
3281 assert(e);
3282 assert(d);
3283
3284 assert_return(revents == EPOLLIN, -EIO);
3285
3286 /* If there's already an event source pending for this priority, don't read another */
3287 if (d->n_pending > 0)
3288 return 0;
3289
3290 /* Is the read buffer non-empty? If so, let's not read more */
3291 if (d->buffer_filled > 0)
3292 return 0;
3293
3294 n = read(d->fd, &d->buffer, sizeof(d->buffer));
3295 if (n < 0) {
3296 if (IN_SET(errno, EAGAIN, EINTR))
3297 return 0;
3298
3299 return -errno;
3300 }
3301
3302 assert(n > 0);
3303 d->buffer_filled = (size_t) n;
3304 LIST_PREPEND(buffered, e->inotify_data_buffered, d);
3305
3306 return 1;
3307}
3308
3309static void event_inotify_data_drop(sd_event *e, struct inotify_data *d, size_t sz) {
3310 assert(e);
3311 assert(d);
3312 assert(sz <= d->buffer_filled);
3313
3314 if (sz == 0)
3315 return;
3316
3317 /* Move the rest to the buffer to the front, in order to get things properly aligned again */
3318 memmove(d->buffer.raw, d->buffer.raw + sz, d->buffer_filled - sz);
3319 d->buffer_filled -= sz;
3320
3321 if (d->buffer_filled == 0)
3322 LIST_REMOVE(buffered, e->inotify_data_buffered, d);
3323}
3324
3325static int event_inotify_data_process(sd_event *e, struct inotify_data *d) {
3326 int r;
3327
3328 assert(e);
3329 assert(d);
3330
3331 /* If there's already an event source pending for this priority, don't read another */
3332 if (d->n_pending > 0)
3333 return 0;
3334
3335 while (d->buffer_filled > 0) {
3336 size_t sz;
3337
3338 /* Let's validate that the event structures are complete */
3339 if (d->buffer_filled < offsetof(struct inotify_event, name))
3340 return -EIO;
3341
3342 sz = offsetof(struct inotify_event, name) + d->buffer.ev.len;
3343 if (d->buffer_filled < sz)
3344 return -EIO;
3345
3346 if (d->buffer.ev.mask & IN_Q_OVERFLOW) {
3347 struct inode_data *inode_data;
97ef5391
LP
3348
3349 /* The queue overran, let's pass this event to all event sources connected to this inotify
3350 * object */
3351
90e74a66 3352 HASHMAP_FOREACH(inode_data, d->inodes) {
97ef5391
LP
3353 sd_event_source *s;
3354
3355 LIST_FOREACH(inotify.by_inode_data, s, inode_data->event_sources) {
3356
b6d5481b 3357 if (event_source_is_offline(s))
97ef5391
LP
3358 continue;
3359
3360 r = source_set_pending(s, true);
3361 if (r < 0)
3362 return r;
3363 }
3364 }
3365 } else {
3366 struct inode_data *inode_data;
3367 sd_event_source *s;
3368
3369 /* Find the inode object for this watch descriptor. If IN_IGNORED is set we also remove it from
3370 * our watch descriptor table. */
3371 if (d->buffer.ev.mask & IN_IGNORED) {
3372
3373 inode_data = hashmap_remove(d->wd, INT_TO_PTR(d->buffer.ev.wd));
3374 if (!inode_data) {
3375 event_inotify_data_drop(e, d, sz);
3376 continue;
3377 }
3378
3379 /* The watch descriptor was removed by the kernel, let's drop it here too */
3380 inode_data->wd = -1;
3381 } else {
3382 inode_data = hashmap_get(d->wd, INT_TO_PTR(d->buffer.ev.wd));
3383 if (!inode_data) {
3384 event_inotify_data_drop(e, d, sz);
3385 continue;
3386 }
3387 }
3388
3389 /* Trigger all event sources that are interested in these events. Also trigger all event
3390 * sources if IN_IGNORED or IN_UNMOUNT is set. */
3391 LIST_FOREACH(inotify.by_inode_data, s, inode_data->event_sources) {
3392
b6d5481b 3393 if (event_source_is_offline(s))
97ef5391
LP
3394 continue;
3395
3396 if ((d->buffer.ev.mask & (IN_IGNORED|IN_UNMOUNT)) == 0 &&
3397 (s->inotify.mask & d->buffer.ev.mask & IN_ALL_EVENTS) == 0)
3398 continue;
3399
3400 r = source_set_pending(s, true);
3401 if (r < 0)
3402 return r;
3403 }
3404 }
3405
3406 /* Something pending now? If so, let's finish, otherwise let's read more. */
3407 if (d->n_pending > 0)
3408 return 1;
3409 }
3410
3411 return 0;
3412}
3413
3414static int process_inotify(sd_event *e) {
3415 struct inotify_data *d;
3416 int r, done = 0;
3417
3418 assert(e);
3419
3420 LIST_FOREACH(buffered, d, e->inotify_data_buffered) {
3421 r = event_inotify_data_process(e, d);
3422 if (r < 0)
3423 return r;
3424 if (r > 0)
3425 done ++;
3426 }
3427
3428 return done;
3429}
3430
fd38203a 3431static int source_dispatch(sd_event_source *s) {
b778cba4 3432 _cleanup_(sd_event_unrefp) sd_event *saved_event = NULL;
8f5c235d 3433 EventSourceType saved_type;
fe8245eb 3434 int r = 0;
fd38203a
LP
3435
3436 assert(s);
6203e07a 3437 assert(s->pending || s->type == SOURCE_EXIT);
fd38203a 3438
b778cba4
LP
3439 /* Save the event source type, here, so that we still know it after the event callback which might
3440 * invalidate the event. */
8f5c235d
LP
3441 saved_type = s->type;
3442
b778cba4
LP
3443 /* Similar, store a reference to the event loop object, so that we can still access it after the
3444 * callback might have invalidated/disconnected the event source. */
3445 saved_event = sd_event_ref(s->event);
3446
b6d5481b
LP
3447 /* Check if we hit the ratelimit for this event source, if so, let's disable it. */
3448 assert(!s->ratelimited);
3449 if (!ratelimit_below(&s->rate_limit)) {
3450 r = event_source_enter_ratelimited(s);
3451 if (r < 0)
3452 return r;
3453
3454 return 1;
3455 }
3456
945c2931 3457 if (!IN_SET(s->type, SOURCE_DEFER, SOURCE_EXIT)) {
da7e457c
LP
3458 r = source_set_pending(s, false);
3459 if (r < 0)
3460 return r;
3461 }
fd38203a 3462
6e9feda3
LP
3463 if (s->type != SOURCE_POST) {
3464 sd_event_source *z;
6e9feda3
LP
3465
3466 /* If we execute a non-post source, let's mark all
3467 * post sources as pending */
3468
90e74a66 3469 SET_FOREACH(z, s->event->post_sources) {
b6d5481b 3470 if (event_source_is_offline(z))
6e9feda3
LP
3471 continue;
3472
3473 r = source_set_pending(z, true);
3474 if (r < 0)
3475 return r;
3476 }
3477 }
3478
baf76283
LP
3479 if (s->enabled == SD_EVENT_ONESHOT) {
3480 r = sd_event_source_set_enabled(s, SD_EVENT_OFF);
fd38203a
LP
3481 if (r < 0)
3482 return r;
3483 }
3484
12179984 3485 s->dispatching = true;
b7484e2a 3486
fd38203a
LP
3487 switch (s->type) {
3488
3489 case SOURCE_IO:
3490 r = s->io.callback(s, s->io.fd, s->io.revents, s->userdata);
3491 break;
3492
6a0f1f6d 3493 case SOURCE_TIME_REALTIME:
a8548816 3494 case SOURCE_TIME_BOOTTIME:
6a0f1f6d
LP
3495 case SOURCE_TIME_MONOTONIC:
3496 case SOURCE_TIME_REALTIME_ALARM:
3497 case SOURCE_TIME_BOOTTIME_ALARM:
fd38203a
LP
3498 r = s->time.callback(s, s->time.next, s->userdata);
3499 break;
3500
3501 case SOURCE_SIGNAL:
3502 r = s->signal.callback(s, &s->signal.siginfo, s->userdata);
3503 break;
3504
08cd1552
LP
3505 case SOURCE_CHILD: {
3506 bool zombie;
3507
945c2931 3508 zombie = IN_SET(s->child.siginfo.si_code, CLD_EXITED, CLD_KILLED, CLD_DUMPED);
08cd1552 3509
fd38203a 3510 r = s->child.callback(s, &s->child.siginfo, s->userdata);
08cd1552
LP
3511
3512 /* Now, reap the PID for good. */
f8f3f926 3513 if (zombie) {
cc59d290 3514 (void) waitid(P_PID, s->child.pid, &s->child.siginfo, WNOHANG|WEXITED);
f8f3f926
LP
3515 s->child.waited = true;
3516 }
08cd1552 3517
fd38203a 3518 break;
08cd1552 3519 }
fd38203a
LP
3520
3521 case SOURCE_DEFER:
3522 r = s->defer.callback(s, s->userdata);
3523 break;
da7e457c 3524
6e9feda3
LP
3525 case SOURCE_POST:
3526 r = s->post.callback(s, s->userdata);
3527 break;
3528
6203e07a
LP
3529 case SOURCE_EXIT:
3530 r = s->exit.callback(s, s->userdata);
da7e457c 3531 break;
9d3e3aa5 3532
97ef5391
LP
3533 case SOURCE_INOTIFY: {
3534 struct sd_event *e = s->event;
3535 struct inotify_data *d;
3536 size_t sz;
3537
3538 assert(s->inotify.inode_data);
3539 assert_se(d = s->inotify.inode_data->inotify_data);
3540
3541 assert(d->buffer_filled >= offsetof(struct inotify_event, name));
3542 sz = offsetof(struct inotify_event, name) + d->buffer.ev.len;
3543 assert(d->buffer_filled >= sz);
3544
3545 r = s->inotify.callback(s, &d->buffer.ev, s->userdata);
3546
3547 /* When no event is pending anymore on this inotify object, then let's drop the event from the
3548 * buffer. */
3549 if (d->n_pending == 0)
3550 event_inotify_data_drop(e, d, sz);
3551
3552 break;
3553 }
3554
9d3e3aa5 3555 case SOURCE_WATCHDOG:
a71fe8b8 3556 case _SOURCE_EVENT_SOURCE_TYPE_MAX:
9f2a50a3 3557 case _SOURCE_EVENT_SOURCE_TYPE_INVALID:
9d3e3aa5 3558 assert_not_reached("Wut? I shouldn't exist.");
fd38203a
LP
3559 }
3560
12179984
LP
3561 s->dispatching = false;
3562
b778cba4
LP
3563 if (r < 0) {
3564 log_debug_errno(r, "Event source %s (type %s) returned error, %s: %m",
3565 strna(s->description),
3566 event_source_type_to_string(saved_type),
3567 s->exit_on_failure ? "exiting" : "disabling");
3568
3569 if (s->exit_on_failure)
3570 (void) sd_event_exit(saved_event, r);
3571 }
12179984
LP
3572
3573 if (s->n_ref == 0)
3574 source_free(s);
3575 else if (r < 0)
6203e07a 3576 sd_event_source_set_enabled(s, SD_EVENT_OFF);
b7484e2a 3577
6203e07a 3578 return 1;
fd38203a
LP
3579}
3580
3581static int event_prepare(sd_event *e) {
3582 int r;
3583
3584 assert(e);
3585
3586 for (;;) {
3587 sd_event_source *s;
3588
3589 s = prioq_peek(e->prepare);
b6d5481b 3590 if (!s || s->prepare_iteration == e->iteration || event_source_is_offline(s))
fd38203a
LP
3591 break;
3592
3593 s->prepare_iteration = e->iteration;
3594 r = prioq_reshuffle(e->prepare, s, &s->prepare_index);
3595 if (r < 0)
3596 return r;
3597
3598 assert(s->prepare);
12179984
LP
3599
3600 s->dispatching = true;
fd38203a 3601 r = s->prepare(s, s->userdata);
12179984
LP
3602 s->dispatching = false;
3603
b778cba4
LP
3604 if (r < 0) {
3605 log_debug_errno(r, "Prepare callback of event source %s (type %s) returned error, %s: %m",
3606 strna(s->description),
3607 event_source_type_to_string(s->type),
3608 s->exit_on_failure ? "exiting" : "disabling");
3609
3610 if (s->exit_on_failure)
3611 (void) sd_event_exit(e, r);
3612 }
fd38203a 3613
12179984
LP
3614 if (s->n_ref == 0)
3615 source_free(s);
3616 else if (r < 0)
3617 sd_event_source_set_enabled(s, SD_EVENT_OFF);
fd38203a
LP
3618 }
3619
3620 return 0;
3621}
3622
6203e07a 3623static int dispatch_exit(sd_event *e) {
da7e457c
LP
3624 sd_event_source *p;
3625 int r;
3626
3627 assert(e);
3628
6203e07a 3629 p = prioq_peek(e->exit);
b6d5481b 3630 if (!p || event_source_is_offline(p)) {
da7e457c
LP
3631 e->state = SD_EVENT_FINISHED;
3632 return 0;
3633 }
3634
f814c871 3635 _unused_ _cleanup_(sd_event_unrefp) sd_event *ref = sd_event_ref(e);
da7e457c 3636 e->iteration++;
6203e07a 3637 e->state = SD_EVENT_EXITING;
da7e457c 3638 r = source_dispatch(p);
2b0c9ef7 3639 e->state = SD_EVENT_INITIAL;
da7e457c
LP
3640 return r;
3641}
3642
c2ba3ad6
LP
3643static sd_event_source* event_next_pending(sd_event *e) {
3644 sd_event_source *p;
3645
da7e457c
LP
3646 assert(e);
3647
c2ba3ad6
LP
3648 p = prioq_peek(e->pending);
3649 if (!p)
3650 return NULL;
3651
b6d5481b 3652 if (event_source_is_offline(p))
c2ba3ad6
LP
3653 return NULL;
3654
3655 return p;
3656}
3657
cde93897
LP
3658static int arm_watchdog(sd_event *e) {
3659 struct itimerspec its = {};
3660 usec_t t;
cde93897
LP
3661
3662 assert(e);
3663 assert(e->watchdog_fd >= 0);
3664
3665 t = sleep_between(e,
3666 e->watchdog_last + (e->watchdog_period / 2),
3667 e->watchdog_last + (e->watchdog_period * 3 / 4));
3668
3669 timespec_store(&its.it_value, t);
3670
75145780
LP
3671 /* Make sure we never set the watchdog to 0, which tells the
3672 * kernel to disable it. */
3673 if (its.it_value.tv_sec == 0 && its.it_value.tv_nsec == 0)
3674 its.it_value.tv_nsec = 1;
3675
15c689d7 3676 if (timerfd_settime(e->watchdog_fd, TFD_TIMER_ABSTIME, &its, NULL) < 0)
cde93897
LP
3677 return -errno;
3678
3679 return 0;
3680}
3681
3682static int process_watchdog(sd_event *e) {
3683 assert(e);
3684
3685 if (!e->watchdog)
3686 return 0;
3687
3688 /* Don't notify watchdog too often */
3689 if (e->watchdog_last + e->watchdog_period / 4 > e->timestamp.monotonic)
3690 return 0;
3691
3692 sd_notify(false, "WATCHDOG=1");
3693 e->watchdog_last = e->timestamp.monotonic;
3694
3695 return arm_watchdog(e);
3696}
3697
97ef5391
LP
3698static void event_close_inode_data_fds(sd_event *e) {
3699 struct inode_data *d;
3700
3701 assert(e);
3702
3703 /* Close the fds pointing to the inodes to watch now. We need to close them as they might otherwise pin
3704 * filesystems. But we can't close them right-away as we need them as long as the user still wants to make
5238e957 3705 * adjustments to the even source, such as changing the priority (which requires us to remove and re-add a watch
97ef5391
LP
3706 * for the inode). Hence, let's close them when entering the first iteration after they were added, as a
3707 * compromise. */
3708
3709 while ((d = e->inode_data_to_close)) {
3710 assert(d->fd >= 0);
3711 d->fd = safe_close(d->fd);
3712
3713 LIST_REMOVE(to_close, e->inode_data_to_close, d);
3714 }
3715}
3716
c45a5a74
TG
3717_public_ int sd_event_prepare(sd_event *e) {
3718 int r;
fd38203a 3719
da7e457c 3720 assert_return(e, -EINVAL);
b937d761 3721 assert_return(e = event_resolve(e), -ENOPKG);
da7e457c
LP
3722 assert_return(!event_pid_changed(e), -ECHILD);
3723 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
2b0c9ef7 3724 assert_return(e->state == SD_EVENT_INITIAL, -EBUSY);
da7e457c 3725
e5446015
LP
3726 /* Let's check that if we are a default event loop we are executed in the correct thread. We only do
3727 * this check here once, since gettid() is typically not cached, and thus want to minimize
3728 * syscalls */
3729 assert_return(!e->default_event_ptr || e->tid == gettid(), -EREMOTEIO);
3730
f814c871
LP
3731 /* Make sure that none of the preparation callbacks ends up freeing the event source under our feet */
3732 _unused_ _cleanup_(sd_event_unrefp) sd_event *ref = sd_event_ref(e);
3733
6203e07a 3734 if (e->exit_requested)
c45a5a74 3735 goto pending;
fd38203a
LP
3736
3737 e->iteration++;
3738
0be6c2f6 3739 e->state = SD_EVENT_PREPARING;
fd38203a 3740 r = event_prepare(e);
0be6c2f6 3741 e->state = SD_EVENT_INITIAL;
fd38203a 3742 if (r < 0)
c45a5a74 3743 return r;
fd38203a 3744
6a0f1f6d
LP
3745 r = event_arm_timer(e, &e->realtime);
3746 if (r < 0)
c45a5a74 3747 return r;
6a0f1f6d 3748
a8548816
TG
3749 r = event_arm_timer(e, &e->boottime);
3750 if (r < 0)
c45a5a74 3751 return r;
a8548816 3752
6a0f1f6d
LP
3753 r = event_arm_timer(e, &e->monotonic);
3754 if (r < 0)
c45a5a74 3755 return r;
6a0f1f6d
LP
3756
3757 r = event_arm_timer(e, &e->realtime_alarm);
1b5995b0 3758 if (r < 0)
c45a5a74 3759 return r;
fd38203a 3760
6a0f1f6d 3761 r = event_arm_timer(e, &e->boottime_alarm);
1b5995b0 3762 if (r < 0)
c45a5a74 3763 return r;
fd38203a 3764
97ef5391
LP
3765 event_close_inode_data_fds(e);
3766
1b5995b0 3767 if (event_next_pending(e) || e->need_process_child)
c45a5a74
TG
3768 goto pending;
3769
2b0c9ef7 3770 e->state = SD_EVENT_ARMED;
c45a5a74
TG
3771
3772 return 0;
3773
3774pending:
2b0c9ef7 3775 e->state = SD_EVENT_ARMED;
6d148a84
TG
3776 r = sd_event_wait(e, 0);
3777 if (r == 0)
2b0c9ef7 3778 e->state = SD_EVENT_ARMED;
6d148a84
TG
3779
3780 return r;
c45a5a74
TG
3781}
3782
3783_public_ int sd_event_wait(sd_event *e, uint64_t timeout) {
5cddd924 3784 size_t event_queue_max;
c45a5a74
TG
3785 int r, m, i;
3786
3787 assert_return(e, -EINVAL);
b937d761 3788 assert_return(e = event_resolve(e), -ENOPKG);
c45a5a74
TG
3789 assert_return(!event_pid_changed(e), -ECHILD);
3790 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
2b0c9ef7 3791 assert_return(e->state == SD_EVENT_ARMED, -EBUSY);
c45a5a74
TG
3792
3793 if (e->exit_requested) {
3794 e->state = SD_EVENT_PENDING;
3795 return 1;
3796 }
6a0f1f6d 3797
5cddd924
LP
3798 event_queue_max = MAX(e->n_sources, 1u);
3799 if (!GREEDY_REALLOC(e->event_queue, e->event_queue_allocated, event_queue_max))
3800 return -ENOMEM;
fd38203a 3801
97ef5391
LP
3802 /* If we still have inotify data buffered, then query the other fds, but don't wait on it */
3803 if (e->inotify_data_buffered)
3804 timeout = 0;
3805
5cddd924 3806 m = epoll_wait(e->epoll_fd, e->event_queue, event_queue_max,
bab4820e 3807 timeout == (uint64_t) -1 ? -1 : (int) DIV_ROUND_UP(timeout, USEC_PER_MSEC));
da7e457c 3808 if (m < 0) {
c45a5a74
TG
3809 if (errno == EINTR) {
3810 e->state = SD_EVENT_PENDING;
3811 return 1;
3812 }
3813
3814 r = -errno;
da7e457c
LP
3815 goto finish;
3816 }
fd38203a 3817
e475d10c 3818 triple_timestamp_get(&e->timestamp);
fd38203a
LP
3819
3820 for (i = 0; i < m; i++) {
3821
5cddd924
LP
3822 if (e->event_queue[i].data.ptr == INT_TO_PTR(SOURCE_WATCHDOG))
3823 r = flush_timer(e, e->watchdog_fd, e->event_queue[i].events, NULL);
9da4cb2b 3824 else {
5cddd924 3825 WakeupType *t = e->event_queue[i].data.ptr;
9da4cb2b
LP
3826
3827 switch (*t) {
3828
f8f3f926 3829 case WAKEUP_EVENT_SOURCE: {
5cddd924 3830 sd_event_source *s = e->event_queue[i].data.ptr;
f8f3f926
LP
3831
3832 assert(s);
3833
3834 switch (s->type) {
3835
3836 case SOURCE_IO:
5cddd924 3837 r = process_io(e, s, e->event_queue[i].events);
f8f3f926
LP
3838 break;
3839
3840 case SOURCE_CHILD:
5cddd924 3841 r = process_pidfd(e, s, e->event_queue[i].events);
f8f3f926
LP
3842 break;
3843
3844 default:
3845 assert_not_reached("Unexpected event source type");
3846 }
3847
9da4cb2b 3848 break;
f8f3f926 3849 }
fd38203a 3850
9da4cb2b 3851 case WAKEUP_CLOCK_DATA: {
5cddd924 3852 struct clock_data *d = e->event_queue[i].data.ptr;
f8f3f926
LP
3853
3854 assert(d);
3855
5cddd924 3856 r = flush_timer(e, d->fd, e->event_queue[i].events, &d->next);
9da4cb2b
LP
3857 break;
3858 }
3859
3860 case WAKEUP_SIGNAL_DATA:
5cddd924 3861 r = process_signal(e, e->event_queue[i].data.ptr, e->event_queue[i].events);
9da4cb2b
LP
3862 break;
3863
97ef5391 3864 case WAKEUP_INOTIFY_DATA:
5cddd924 3865 r = event_inotify_data_read(e, e->event_queue[i].data.ptr, e->event_queue[i].events);
97ef5391
LP
3866 break;
3867
9da4cb2b
LP
3868 default:
3869 assert_not_reached("Invalid wake-up pointer");
3870 }
3871 }
fd38203a 3872 if (r < 0)
da7e457c 3873 goto finish;
fd38203a
LP
3874 }
3875
cde93897
LP
3876 r = process_watchdog(e);
3877 if (r < 0)
3878 goto finish;
3879
6a0f1f6d
LP
3880 r = process_timer(e, e->timestamp.realtime, &e->realtime);
3881 if (r < 0)
3882 goto finish;
3883
e475d10c 3884 r = process_timer(e, e->timestamp.boottime, &e->boottime);
a8548816
TG
3885 if (r < 0)
3886 goto finish;
3887
6a0f1f6d
LP
3888 r = process_timer(e, e->timestamp.monotonic, &e->monotonic);
3889 if (r < 0)
3890 goto finish;
3891
3892 r = process_timer(e, e->timestamp.realtime, &e->realtime_alarm);
fd38203a 3893 if (r < 0)
da7e457c 3894 goto finish;
fd38203a 3895
e475d10c 3896 r = process_timer(e, e->timestamp.boottime, &e->boottime_alarm);
fd38203a 3897 if (r < 0)
da7e457c 3898 goto finish;
fd38203a 3899
c2ba3ad6 3900 if (e->need_process_child) {
fd38203a
LP
3901 r = process_child(e);
3902 if (r < 0)
da7e457c 3903 goto finish;
fd38203a
LP
3904 }
3905
97ef5391
LP
3906 r = process_inotify(e);
3907 if (r < 0)
3908 goto finish;
3909
c45a5a74
TG
3910 if (event_next_pending(e)) {
3911 e->state = SD_EVENT_PENDING;
3912
3913 return 1;
da7e457c
LP
3914 }
3915
c45a5a74 3916 r = 0;
fd38203a 3917
da7e457c 3918finish:
2b0c9ef7 3919 e->state = SD_EVENT_INITIAL;
da7e457c
LP
3920
3921 return r;
fd38203a
LP
3922}
3923
c45a5a74
TG
3924_public_ int sd_event_dispatch(sd_event *e) {
3925 sd_event_source *p;
3926 int r;
3927
3928 assert_return(e, -EINVAL);
b937d761 3929 assert_return(e = event_resolve(e), -ENOPKG);
c45a5a74
TG
3930 assert_return(!event_pid_changed(e), -ECHILD);
3931 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
3932 assert_return(e->state == SD_EVENT_PENDING, -EBUSY);
3933
3934 if (e->exit_requested)
3935 return dispatch_exit(e);
3936
3937 p = event_next_pending(e);
3938 if (p) {
f814c871 3939 _unused_ _cleanup_(sd_event_unrefp) sd_event *ref = sd_event_ref(e);
c45a5a74
TG
3940
3941 e->state = SD_EVENT_RUNNING;
3942 r = source_dispatch(p);
2b0c9ef7 3943 e->state = SD_EVENT_INITIAL;
c45a5a74
TG
3944 return r;
3945 }
3946
2b0c9ef7 3947 e->state = SD_EVENT_INITIAL;
c45a5a74
TG
3948
3949 return 1;
3950}
3951
34b87517 3952static void event_log_delays(sd_event *e) {
442ac269
YW
3953 char b[ELEMENTSOF(e->delays) * DECIMAL_STR_MAX(unsigned) + 1], *p;
3954 size_t l, i;
34b87517 3955
442ac269
YW
3956 p = b;
3957 l = sizeof(b);
3958 for (i = 0; i < ELEMENTSOF(e->delays); i++) {
3959 l = strpcpyf(&p, l, "%u ", e->delays[i]);
34b87517
VC
3960 e->delays[i] = 0;
3961 }
442ac269 3962 log_debug("Event loop iterations: %s", b);
34b87517
VC
3963}
3964
c45a5a74
TG
3965_public_ int sd_event_run(sd_event *e, uint64_t timeout) {
3966 int r;
3967
3968 assert_return(e, -EINVAL);
b937d761 3969 assert_return(e = event_resolve(e), -ENOPKG);
c45a5a74
TG
3970 assert_return(!event_pid_changed(e), -ECHILD);
3971 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
2b0c9ef7 3972 assert_return(e->state == SD_EVENT_INITIAL, -EBUSY);
c45a5a74 3973
e6a7bee5 3974 if (e->profile_delays && e->last_run_usec != 0) {
34b87517
VC
3975 usec_t this_run;
3976 unsigned l;
3977
3978 this_run = now(CLOCK_MONOTONIC);
3979
e6a7bee5 3980 l = u64log2(this_run - e->last_run_usec);
cb9d621e 3981 assert(l < ELEMENTSOF(e->delays));
34b87517
VC
3982 e->delays[l]++;
3983
e6a7bee5 3984 if (this_run - e->last_log_usec >= 5*USEC_PER_SEC) {
34b87517 3985 event_log_delays(e);
e6a7bee5 3986 e->last_log_usec = this_run;
34b87517
VC
3987 }
3988 }
3989
f814c871
LP
3990 /* Make sure that none of the preparation callbacks ends up freeing the event source under our feet */
3991 _unused_ _cleanup_(sd_event_unrefp) sd_event *ref = sd_event_ref(e);
3992
c45a5a74 3993 r = sd_event_prepare(e);
53bac4e0
LP
3994 if (r == 0)
3995 /* There was nothing? Then wait... */
3996 r = sd_event_wait(e, timeout);
c45a5a74 3997
34b87517 3998 if (e->profile_delays)
e6a7bee5 3999 e->last_run_usec = now(CLOCK_MONOTONIC);
34b87517 4000
02d30981 4001 if (r > 0) {
53bac4e0 4002 /* There's something now, then let's dispatch it */
02d30981
TG
4003 r = sd_event_dispatch(e);
4004 if (r < 0)
4005 return r;
53bac4e0
LP
4006
4007 return 1;
4008 }
4009
4010 return r;
c45a5a74
TG
4011}
4012
f7262a9f 4013_public_ int sd_event_loop(sd_event *e) {
fd38203a
LP
4014 int r;
4015
da7e457c 4016 assert_return(e, -EINVAL);
b937d761 4017 assert_return(e = event_resolve(e), -ENOPKG);
da7e457c 4018 assert_return(!event_pid_changed(e), -ECHILD);
2b0c9ef7 4019 assert_return(e->state == SD_EVENT_INITIAL, -EBUSY);
da7e457c 4020
f814c871 4021 _unused_ _cleanup_(sd_event_unrefp) sd_event *ref = NULL;
fd38203a 4022
da7e457c 4023 while (e->state != SD_EVENT_FINISHED) {
fd38203a
LP
4024 r = sd_event_run(e, (uint64_t) -1);
4025 if (r < 0)
30dd293c 4026 return r;
fd38203a
LP
4027 }
4028
30dd293c 4029 return e->exit_code;
fd38203a
LP
4030}
4031
9b364545 4032_public_ int sd_event_get_fd(sd_event *e) {
9b364545 4033 assert_return(e, -EINVAL);
b937d761 4034 assert_return(e = event_resolve(e), -ENOPKG);
9b364545
TG
4035 assert_return(!event_pid_changed(e), -ECHILD);
4036
4037 return e->epoll_fd;
4038}
4039
f7262a9f 4040_public_ int sd_event_get_state(sd_event *e) {
da7e457c 4041 assert_return(e, -EINVAL);
b937d761 4042 assert_return(e = event_resolve(e), -ENOPKG);
da7e457c
LP
4043 assert_return(!event_pid_changed(e), -ECHILD);
4044
4045 return e->state;
4046}
4047
6203e07a 4048_public_ int sd_event_get_exit_code(sd_event *e, int *code) {
da7e457c 4049 assert_return(e, -EINVAL);
b937d761 4050 assert_return(e = event_resolve(e), -ENOPKG);
6203e07a 4051 assert_return(code, -EINVAL);
da7e457c 4052 assert_return(!event_pid_changed(e), -ECHILD);
fd38203a 4053
6203e07a
LP
4054 if (!e->exit_requested)
4055 return -ENODATA;
4056
4057 *code = e->exit_code;
4058 return 0;
fd38203a
LP
4059}
4060
6203e07a 4061_public_ int sd_event_exit(sd_event *e, int code) {
da7e457c 4062 assert_return(e, -EINVAL);
b937d761 4063 assert_return(e = event_resolve(e), -ENOPKG);
da7e457c
LP
4064 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
4065 assert_return(!event_pid_changed(e), -ECHILD);
fd38203a 4066
6203e07a
LP
4067 e->exit_requested = true;
4068 e->exit_code = code;
4069
fd38203a
LP
4070 return 0;
4071}
46e8c825 4072
6a0f1f6d 4073_public_ int sd_event_now(sd_event *e, clockid_t clock, uint64_t *usec) {
46e8c825 4074 assert_return(e, -EINVAL);
b937d761 4075 assert_return(e = event_resolve(e), -ENOPKG);
46e8c825 4076 assert_return(usec, -EINVAL);
46e8c825
LP
4077 assert_return(!event_pid_changed(e), -ECHILD);
4078
e475d10c
LP
4079 if (!TRIPLE_TIMESTAMP_HAS_CLOCK(clock))
4080 return -EOPNOTSUPP;
4081
4082 /* Generate a clean error in case CLOCK_BOOTTIME is not available. Note that don't use clock_supported() here,
4083 * for a reason: there are systems where CLOCK_BOOTTIME is supported, but CLOCK_BOOTTIME_ALARM is not, but for
4084 * the purpose of getting the time this doesn't matter. */
3411372e
LP
4085 if (IN_SET(clock, CLOCK_BOOTTIME, CLOCK_BOOTTIME_ALARM) && !clock_boottime_supported())
4086 return -EOPNOTSUPP;
4087
e475d10c 4088 if (!triple_timestamp_is_set(&e->timestamp)) {
15c689d7 4089 /* Implicitly fall back to now() if we never ran before and thus have no cached time. */
38a03f06
LP
4090 *usec = now(clock);
4091 return 1;
4092 }
46e8c825 4093
e475d10c 4094 *usec = triple_timestamp_by_clock(&e->timestamp, clock);
46e8c825
LP
4095 return 0;
4096}
afc6adb5
LP
4097
4098_public_ int sd_event_default(sd_event **ret) {
39883f62 4099 sd_event *e = NULL;
afc6adb5
LP
4100 int r;
4101
4102 if (!ret)
4103 return !!default_event;
4104
4105 if (default_event) {
4106 *ret = sd_event_ref(default_event);
4107 return 0;
4108 }
4109
4110 r = sd_event_new(&e);
4111 if (r < 0)
4112 return r;
4113
4114 e->default_event_ptr = &default_event;
4115 e->tid = gettid();
4116 default_event = e;
4117
4118 *ret = e;
4119 return 1;
4120}
4121
4122_public_ int sd_event_get_tid(sd_event *e, pid_t *tid) {
4123 assert_return(e, -EINVAL);
b937d761 4124 assert_return(e = event_resolve(e), -ENOPKG);
afc6adb5 4125 assert_return(tid, -EINVAL);
76b54375 4126 assert_return(!event_pid_changed(e), -ECHILD);
afc6adb5 4127
76b54375
LP
4128 if (e->tid != 0) {
4129 *tid = e->tid;
4130 return 0;
4131 }
4132
4133 return -ENXIO;
afc6adb5 4134}
cde93897
LP
4135
4136_public_ int sd_event_set_watchdog(sd_event *e, int b) {
4137 int r;
4138
4139 assert_return(e, -EINVAL);
b937d761 4140 assert_return(e = event_resolve(e), -ENOPKG);
8f726607 4141 assert_return(!event_pid_changed(e), -ECHILD);
cde93897
LP
4142
4143 if (e->watchdog == !!b)
4144 return e->watchdog;
4145
4146 if (b) {
09812eb7
LP
4147 r = sd_watchdog_enabled(false, &e->watchdog_period);
4148 if (r <= 0)
cde93897 4149 return r;
cde93897
LP
4150
4151 /* Issue first ping immediately */
4152 sd_notify(false, "WATCHDOG=1");
4153 e->watchdog_last = now(CLOCK_MONOTONIC);
4154
4155 e->watchdog_fd = timerfd_create(CLOCK_MONOTONIC, TFD_NONBLOCK|TFD_CLOEXEC);
4156 if (e->watchdog_fd < 0)
4157 return -errno;
4158
4159 r = arm_watchdog(e);
4160 if (r < 0)
4161 goto fail;
4162
1eac7948 4163 struct epoll_event ev = {
a82f89aa
LP
4164 .events = EPOLLIN,
4165 .data.ptr = INT_TO_PTR(SOURCE_WATCHDOG),
4166 };
cde93897 4167
15c689d7 4168 if (epoll_ctl(e->epoll_fd, EPOLL_CTL_ADD, e->watchdog_fd, &ev) < 0) {
cde93897
LP
4169 r = -errno;
4170 goto fail;
4171 }
4172
4173 } else {
4174 if (e->watchdog_fd >= 0) {
5a795bff 4175 (void) epoll_ctl(e->epoll_fd, EPOLL_CTL_DEL, e->watchdog_fd, NULL);
03e334a1 4176 e->watchdog_fd = safe_close(e->watchdog_fd);
cde93897
LP
4177 }
4178 }
4179
4180 e->watchdog = !!b;
4181 return e->watchdog;
4182
4183fail:
03e334a1 4184 e->watchdog_fd = safe_close(e->watchdog_fd);
cde93897
LP
4185 return r;
4186}
8f726607
LP
4187
4188_public_ int sd_event_get_watchdog(sd_event *e) {
4189 assert_return(e, -EINVAL);
b937d761 4190 assert_return(e = event_resolve(e), -ENOPKG);
8f726607
LP
4191 assert_return(!event_pid_changed(e), -ECHILD);
4192
4193 return e->watchdog;
4194}
60a3b1e1
LP
4195
4196_public_ int sd_event_get_iteration(sd_event *e, uint64_t *ret) {
4197 assert_return(e, -EINVAL);
b937d761 4198 assert_return(e = event_resolve(e), -ENOPKG);
60a3b1e1
LP
4199 assert_return(!event_pid_changed(e), -ECHILD);
4200
4201 *ret = e->iteration;
4202 return 0;
4203}
15723a1d
LP
4204
4205_public_ int sd_event_source_set_destroy_callback(sd_event_source *s, sd_event_destroy_t callback) {
4206 assert_return(s, -EINVAL);
4207
4208 s->destroy_callback = callback;
4209 return 0;
4210}
4211
4212_public_ int sd_event_source_get_destroy_callback(sd_event_source *s, sd_event_destroy_t *ret) {
4213 assert_return(s, -EINVAL);
4214
4215 if (ret)
4216 *ret = s->destroy_callback;
4217
4218 return !!s->destroy_callback;
4219}
2382c936
YW
4220
4221_public_ int sd_event_source_get_floating(sd_event_source *s) {
4222 assert_return(s, -EINVAL);
4223
4224 return s->floating;
4225}
4226
4227_public_ int sd_event_source_set_floating(sd_event_source *s, int b) {
4228 assert_return(s, -EINVAL);
4229
4230 if (s->floating == !!b)
4231 return 0;
4232
4233 if (!s->event) /* Already disconnected */
4234 return -ESTALE;
4235
4236 s->floating = b;
4237
4238 if (b) {
4239 sd_event_source_ref(s);
4240 sd_event_unref(s->event);
4241 } else {
4242 sd_event_ref(s->event);
4243 sd_event_source_unref(s);
4244 }
4245
4246 return 1;
4247}
b778cba4
LP
4248
4249_public_ int sd_event_source_get_exit_on_failure(sd_event_source *s) {
4250 assert_return(s, -EINVAL);
4251 assert_return(s->type != SOURCE_EXIT, -EDOM);
4252
4253 return s->exit_on_failure;
4254}
4255
4256_public_ int sd_event_source_set_exit_on_failure(sd_event_source *s, int b) {
4257 assert_return(s, -EINVAL);
4258 assert_return(s->type != SOURCE_EXIT, -EDOM);
4259
4260 if (s->exit_on_failure == !!b)
4261 return 0;
4262
4263 s->exit_on_failure = b;
4264 return 1;
4265}
b6d5481b
LP
4266
4267_public_ int sd_event_source_set_ratelimit(sd_event_source *s, uint64_t interval, unsigned burst) {
4268 int r;
4269
4270 assert_return(s, -EINVAL);
4271
4272 /* Turning on ratelimiting on event source types that don't support it, is a loggable offense. Doing
4273 * so is a programming error. */
4274 assert_return(EVENT_SOURCE_CAN_RATE_LIMIT(s->type), -EDOM);
4275
4276 /* When ratelimiting is configured we'll always reset the rate limit state first and start fresh,
4277 * non-ratelimited. */
4278 r = event_source_leave_ratelimit(s);
4279 if (r < 0)
4280 return r;
4281
4282 s->rate_limit = (RateLimit) { interval, burst };
4283 return 0;
4284}
4285
4286_public_ int sd_event_source_get_ratelimit(sd_event_source *s, uint64_t *ret_interval, unsigned *ret_burst) {
4287 assert_return(s, -EINVAL);
4288
4289 /* Querying whether an event source has ratelimiting configured is not a loggable offsense, hence
4290 * don't use assert_return(). Unlike turning on ratelimiting it's not really a programming error */
4291 if (!EVENT_SOURCE_CAN_RATE_LIMIT(s->type))
4292 return -EDOM;
4293
4294 if (!ratelimit_configured(&s->rate_limit))
4295 return -ENOEXEC;
4296
4297 if (ret_interval)
4298 *ret_interval = s->rate_limit.interval;
4299 if (ret_burst)
4300 *ret_burst = s->rate_limit.burst;
4301
4302 return 0;
4303}
4304
4305_public_ int sd_event_source_is_ratelimited(sd_event_source *s) {
4306 assert_return(s, -EINVAL);
4307
4308 if (!EVENT_SOURCE_CAN_RATE_LIMIT(s->type))
4309 return false;
4310
4311 if (!ratelimit_configured(&s->rate_limit))
4312 return false;
4313
4314 return s->ratelimited;
4315}