1 /* SPDX-License-Identifier: LGPL-2.1+ */
4 #include <sys/timerfd.h>
11 #include "alloc-util.h"
12 #include "event-source.h"
20 #include "process-util.h"
22 #include "signal-util.h"
23 #include "string-table.h"
24 #include "string-util.h"
25 #include "time-util.h"
28 #define DEFAULT_ACCURACY_USEC (250 * USEC_PER_MSEC)
30 static const char* const event_source_type_table
[_SOURCE_EVENT_SOURCE_TYPE_MAX
] = {
32 [SOURCE_TIME_REALTIME
] = "realtime",
33 [SOURCE_TIME_BOOTTIME
] = "bootime",
34 [SOURCE_TIME_MONOTONIC
] = "monotonic",
35 [SOURCE_TIME_REALTIME_ALARM
] = "realtime-alarm",
36 [SOURCE_TIME_BOOTTIME_ALARM
] = "boottime-alarm",
37 [SOURCE_SIGNAL
] = "signal",
38 [SOURCE_CHILD
] = "child",
39 [SOURCE_DEFER
] = "defer",
40 [SOURCE_POST
] = "post",
41 [SOURCE_EXIT
] = "exit",
42 [SOURCE_WATCHDOG
] = "watchdog",
43 [SOURCE_INOTIFY
] = "inotify",
46 DEFINE_PRIVATE_STRING_TABLE_LOOKUP_TO_STRING(event_source_type
, int);
48 #define EVENT_SOURCE_IS_TIME(t) IN_SET((t), SOURCE_TIME_REALTIME, SOURCE_TIME_BOOTTIME, SOURCE_TIME_MONOTONIC, SOURCE_TIME_REALTIME_ALARM, SOURCE_TIME_BOOTTIME_ALARM)
59 /* timerfd_create() only supports these five clocks so far. We
60 * can add support for more clocks when the kernel learns to
61 * deal with them, too. */
62 struct clock_data realtime
;
63 struct clock_data boottime
;
64 struct clock_data monotonic
;
65 struct clock_data realtime_alarm
;
66 struct clock_data boottime_alarm
;
70 sd_event_source
**signal_sources
; /* indexed by signal number */
71 Hashmap
*signal_data
; /* indexed by priority */
73 Hashmap
*child_sources
;
74 unsigned n_enabled_child_sources
;
80 Hashmap
*inotify_data
; /* indexed by priority */
82 /* A list of inode structures that still have an fd open, that we need to close before the next loop iteration */
83 LIST_HEAD(struct inode_data
, inode_data_to_close
);
85 /* A list of inotify objects that already have events buffered which aren't processed yet */
86 LIST_HEAD(struct inotify_data
, inotify_data_buffered
);
91 triple_timestamp timestamp
;
94 bool exit_requested
:1;
95 bool need_process_child
:1;
97 bool profile_delays
:1;
102 sd_event
**default_event_ptr
;
104 usec_t watchdog_last
, watchdog_period
;
108 LIST_HEAD(sd_event_source
, sources
);
110 usec_t last_run
, last_log
;
111 unsigned delays
[sizeof(usec_t
) * 8];
114 static thread_local sd_event
*default_event
= NULL
;
116 static void source_disconnect(sd_event_source
*s
);
117 static void event_gc_inode_data(sd_event
*e
, struct inode_data
*d
);
119 static sd_event
*event_resolve(sd_event
*e
) {
120 return e
== SD_EVENT_DEFAULT
? default_event
: e
;
123 static int pending_prioq_compare(const void *a
, const void *b
) {
124 const sd_event_source
*x
= a
, *y
= b
;
130 /* Enabled ones first */
131 if (x
->enabled
!= SD_EVENT_OFF
&& y
->enabled
== SD_EVENT_OFF
)
133 if (x
->enabled
== SD_EVENT_OFF
&& y
->enabled
!= SD_EVENT_OFF
)
136 /* Lower priority values first */
137 r
= CMP(x
->priority
, y
->priority
);
141 /* Older entries first */
142 return CMP(x
->pending_iteration
, y
->pending_iteration
);
145 static int prepare_prioq_compare(const void *a
, const void *b
) {
146 const sd_event_source
*x
= a
, *y
= b
;
152 /* Enabled ones first */
153 if (x
->enabled
!= SD_EVENT_OFF
&& y
->enabled
== SD_EVENT_OFF
)
155 if (x
->enabled
== SD_EVENT_OFF
&& y
->enabled
!= SD_EVENT_OFF
)
158 /* Move most recently prepared ones last, so that we can stop
159 * preparing as soon as we hit one that has already been
160 * prepared in the current iteration */
161 r
= CMP(x
->prepare_iteration
, y
->prepare_iteration
);
165 /* Lower priority values first */
166 return CMP(x
->priority
, y
->priority
);
169 static int earliest_time_prioq_compare(const void *a
, const void *b
) {
170 const sd_event_source
*x
= a
, *y
= b
;
172 assert(EVENT_SOURCE_IS_TIME(x
->type
));
173 assert(x
->type
== y
->type
);
175 /* Enabled ones first */
176 if (x
->enabled
!= SD_EVENT_OFF
&& y
->enabled
== SD_EVENT_OFF
)
178 if (x
->enabled
== SD_EVENT_OFF
&& y
->enabled
!= SD_EVENT_OFF
)
181 /* Move the pending ones to the end */
182 if (!x
->pending
&& y
->pending
)
184 if (x
->pending
&& !y
->pending
)
188 return CMP(x
->time
.next
, y
->time
.next
);
191 static usec_t
time_event_source_latest(const sd_event_source
*s
) {
192 return usec_add(s
->time
.next
, s
->time
.accuracy
);
195 static int latest_time_prioq_compare(const void *a
, const void *b
) {
196 const sd_event_source
*x
= a
, *y
= b
;
198 assert(EVENT_SOURCE_IS_TIME(x
->type
));
199 assert(x
->type
== y
->type
);
201 /* Enabled ones first */
202 if (x
->enabled
!= SD_EVENT_OFF
&& y
->enabled
== SD_EVENT_OFF
)
204 if (x
->enabled
== SD_EVENT_OFF
&& y
->enabled
!= SD_EVENT_OFF
)
207 /* Move the pending ones to the end */
208 if (!x
->pending
&& y
->pending
)
210 if (x
->pending
&& !y
->pending
)
214 return CMP(time_event_source_latest(x
), time_event_source_latest(y
));
217 static int exit_prioq_compare(const void *a
, const void *b
) {
218 const sd_event_source
*x
= a
, *y
= b
;
220 assert(x
->type
== SOURCE_EXIT
);
221 assert(y
->type
== SOURCE_EXIT
);
223 /* Enabled ones first */
224 if (x
->enabled
!= SD_EVENT_OFF
&& y
->enabled
== SD_EVENT_OFF
)
226 if (x
->enabled
== SD_EVENT_OFF
&& y
->enabled
!= SD_EVENT_OFF
)
229 /* Lower priority values first */
230 return CMP(x
->priority
, y
->priority
);
233 static void free_clock_data(struct clock_data
*d
) {
235 assert(d
->wakeup
== WAKEUP_CLOCK_DATA
);
238 prioq_free(d
->earliest
);
239 prioq_free(d
->latest
);
242 static sd_event
*event_free(sd_event
*e
) {
247 while ((s
= e
->sources
)) {
249 source_disconnect(s
);
250 sd_event_source_unref(s
);
253 assert(e
->n_sources
== 0);
255 if (e
->default_event_ptr
)
256 *(e
->default_event_ptr
) = NULL
;
258 safe_close(e
->epoll_fd
);
259 safe_close(e
->watchdog_fd
);
261 free_clock_data(&e
->realtime
);
262 free_clock_data(&e
->boottime
);
263 free_clock_data(&e
->monotonic
);
264 free_clock_data(&e
->realtime_alarm
);
265 free_clock_data(&e
->boottime_alarm
);
267 prioq_free(e
->pending
);
268 prioq_free(e
->prepare
);
271 free(e
->signal_sources
);
272 hashmap_free(e
->signal_data
);
274 hashmap_free(e
->inotify_data
);
276 hashmap_free(e
->child_sources
);
277 set_free(e
->post_sources
);
282 _public_
int sd_event_new(sd_event
** ret
) {
286 assert_return(ret
, -EINVAL
);
288 e
= new(sd_event
, 1);
296 .realtime
.wakeup
= WAKEUP_CLOCK_DATA
,
298 .realtime
.next
= USEC_INFINITY
,
299 .boottime
.wakeup
= WAKEUP_CLOCK_DATA
,
301 .boottime
.next
= USEC_INFINITY
,
302 .monotonic
.wakeup
= WAKEUP_CLOCK_DATA
,
304 .monotonic
.next
= USEC_INFINITY
,
305 .realtime_alarm
.wakeup
= WAKEUP_CLOCK_DATA
,
306 .realtime_alarm
.fd
= -1,
307 .realtime_alarm
.next
= USEC_INFINITY
,
308 .boottime_alarm
.wakeup
= WAKEUP_CLOCK_DATA
,
309 .boottime_alarm
.fd
= -1,
310 .boottime_alarm
.next
= USEC_INFINITY
,
311 .perturb
= USEC_INFINITY
,
312 .original_pid
= getpid_cached(),
315 r
= prioq_ensure_allocated(&e
->pending
, pending_prioq_compare
);
319 e
->epoll_fd
= epoll_create1(EPOLL_CLOEXEC
);
320 if (e
->epoll_fd
< 0) {
325 e
->epoll_fd
= fd_move_above_stdio(e
->epoll_fd
);
327 if (secure_getenv("SD_EVENT_PROFILE_DELAYS")) {
328 log_debug("Event loop profiling enabled. Logarithmic histogram of event loop iterations in the range 2^0 ... 2^63 us will be logged every 5s.");
329 e
->profile_delays
= true;
340 DEFINE_PUBLIC_TRIVIAL_REF_UNREF_FUNC(sd_event
, sd_event
, event_free
);
342 static bool event_pid_changed(sd_event
*e
) {
345 /* We don't support people creating an event loop and keeping
346 * it around over a fork(). Let's complain. */
348 return e
->original_pid
!= getpid_cached();
351 static void source_io_unregister(sd_event_source
*s
) {
355 assert(s
->type
== SOURCE_IO
);
357 if (event_pid_changed(s
->event
))
360 if (!s
->io
.registered
)
363 r
= epoll_ctl(s
->event
->epoll_fd
, EPOLL_CTL_DEL
, s
->io
.fd
, NULL
);
365 log_debug_errno(errno
, "Failed to remove source %s (type %s) from epoll: %m",
366 strna(s
->description
), event_source_type_to_string(s
->type
));
368 s
->io
.registered
= false;
371 static int source_io_register(
376 struct epoll_event ev
;
380 assert(s
->type
== SOURCE_IO
);
381 assert(enabled
!= SD_EVENT_OFF
);
383 ev
= (struct epoll_event
) {
384 .events
= events
| (enabled
== SD_EVENT_ONESHOT
? EPOLLONESHOT
: 0),
388 if (s
->io
.registered
)
389 r
= epoll_ctl(s
->event
->epoll_fd
, EPOLL_CTL_MOD
, s
->io
.fd
, &ev
);
391 r
= epoll_ctl(s
->event
->epoll_fd
, EPOLL_CTL_ADD
, s
->io
.fd
, &ev
);
395 s
->io
.registered
= true;
400 static clockid_t
event_source_type_to_clock(EventSourceType t
) {
404 case SOURCE_TIME_REALTIME
:
405 return CLOCK_REALTIME
;
407 case SOURCE_TIME_BOOTTIME
:
408 return CLOCK_BOOTTIME
;
410 case SOURCE_TIME_MONOTONIC
:
411 return CLOCK_MONOTONIC
;
413 case SOURCE_TIME_REALTIME_ALARM
:
414 return CLOCK_REALTIME_ALARM
;
416 case SOURCE_TIME_BOOTTIME_ALARM
:
417 return CLOCK_BOOTTIME_ALARM
;
420 return (clockid_t
) -1;
424 static EventSourceType
clock_to_event_source_type(clockid_t clock
) {
429 return SOURCE_TIME_REALTIME
;
432 return SOURCE_TIME_BOOTTIME
;
434 case CLOCK_MONOTONIC
:
435 return SOURCE_TIME_MONOTONIC
;
437 case CLOCK_REALTIME_ALARM
:
438 return SOURCE_TIME_REALTIME_ALARM
;
440 case CLOCK_BOOTTIME_ALARM
:
441 return SOURCE_TIME_BOOTTIME_ALARM
;
444 return _SOURCE_EVENT_SOURCE_TYPE_INVALID
;
448 static struct clock_data
* event_get_clock_data(sd_event
*e
, EventSourceType t
) {
453 case SOURCE_TIME_REALTIME
:
456 case SOURCE_TIME_BOOTTIME
:
459 case SOURCE_TIME_MONOTONIC
:
460 return &e
->monotonic
;
462 case SOURCE_TIME_REALTIME_ALARM
:
463 return &e
->realtime_alarm
;
465 case SOURCE_TIME_BOOTTIME_ALARM
:
466 return &e
->boottime_alarm
;
473 static int event_make_signal_data(
476 struct signal_data
**ret
) {
478 struct epoll_event ev
;
479 struct signal_data
*d
;
487 if (event_pid_changed(e
))
490 if (e
->signal_sources
&& e
->signal_sources
[sig
])
491 priority
= e
->signal_sources
[sig
]->priority
;
493 priority
= SD_EVENT_PRIORITY_NORMAL
;
495 d
= hashmap_get(e
->signal_data
, &priority
);
497 if (sigismember(&d
->sigset
, sig
) > 0) {
503 r
= hashmap_ensure_allocated(&e
->signal_data
, &uint64_hash_ops
);
507 d
= new(struct signal_data
, 1);
511 *d
= (struct signal_data
) {
512 .wakeup
= WAKEUP_SIGNAL_DATA
,
514 .priority
= priority
,
517 r
= hashmap_put(e
->signal_data
, &d
->priority
, d
);
527 assert_se(sigaddset(&ss_copy
, sig
) >= 0);
529 r
= signalfd(d
->fd
, &ss_copy
, SFD_NONBLOCK
|SFD_CLOEXEC
);
543 d
->fd
= fd_move_above_stdio(r
);
545 ev
= (struct epoll_event
) {
550 r
= epoll_ctl(e
->epoll_fd
, EPOLL_CTL_ADD
, d
->fd
, &ev
);
563 d
->fd
= safe_close(d
->fd
);
564 hashmap_remove(e
->signal_data
, &d
->priority
);
571 static void event_unmask_signal_data(sd_event
*e
, struct signal_data
*d
, int sig
) {
575 /* Turns off the specified signal in the signal data
576 * object. If the signal mask of the object becomes empty that
579 if (sigismember(&d
->sigset
, sig
) == 0)
582 assert_se(sigdelset(&d
->sigset
, sig
) >= 0);
584 if (sigisemptyset(&d
->sigset
)) {
586 /* If all the mask is all-zero we can get rid of the structure */
587 hashmap_remove(e
->signal_data
, &d
->priority
);
595 if (signalfd(d
->fd
, &d
->sigset
, SFD_NONBLOCK
|SFD_CLOEXEC
) < 0)
596 log_debug_errno(errno
, "Failed to unset signal bit, ignoring: %m");
599 static void event_gc_signal_data(sd_event
*e
, const int64_t *priority
, int sig
) {
600 struct signal_data
*d
;
601 static const int64_t zero_priority
= 0;
605 /* Rechecks if the specified signal is still something we are
606 * interested in. If not, we'll unmask it, and possibly drop
607 * the signalfd for it. */
609 if (sig
== SIGCHLD
&&
610 e
->n_enabled_child_sources
> 0)
613 if (e
->signal_sources
&&
614 e
->signal_sources
[sig
] &&
615 e
->signal_sources
[sig
]->enabled
!= SD_EVENT_OFF
)
619 * The specified signal might be enabled in three different queues:
621 * 1) the one that belongs to the priority passed (if it is non-NULL)
622 * 2) the one that belongs to the priority of the event source of the signal (if there is one)
623 * 3) the 0 priority (to cover the SIGCHLD case)
625 * Hence, let's remove it from all three here.
629 d
= hashmap_get(e
->signal_data
, priority
);
631 event_unmask_signal_data(e
, d
, sig
);
634 if (e
->signal_sources
&& e
->signal_sources
[sig
]) {
635 d
= hashmap_get(e
->signal_data
, &e
->signal_sources
[sig
]->priority
);
637 event_unmask_signal_data(e
, d
, sig
);
640 d
= hashmap_get(e
->signal_data
, &zero_priority
);
642 event_unmask_signal_data(e
, d
, sig
);
645 static void source_disconnect(sd_event_source
*s
) {
653 assert(s
->event
->n_sources
> 0);
659 source_io_unregister(s
);
663 case SOURCE_TIME_REALTIME
:
664 case SOURCE_TIME_BOOTTIME
:
665 case SOURCE_TIME_MONOTONIC
:
666 case SOURCE_TIME_REALTIME_ALARM
:
667 case SOURCE_TIME_BOOTTIME_ALARM
: {
668 struct clock_data
*d
;
670 d
= event_get_clock_data(s
->event
, s
->type
);
673 prioq_remove(d
->earliest
, s
, &s
->time
.earliest_index
);
674 prioq_remove(d
->latest
, s
, &s
->time
.latest_index
);
675 d
->needs_rearm
= true;
680 if (s
->signal
.sig
> 0) {
682 if (s
->event
->signal_sources
)
683 s
->event
->signal_sources
[s
->signal
.sig
] = NULL
;
685 event_gc_signal_data(s
->event
, &s
->priority
, s
->signal
.sig
);
691 if (s
->child
.pid
> 0) {
692 if (s
->enabled
!= SD_EVENT_OFF
) {
693 assert(s
->event
->n_enabled_child_sources
> 0);
694 s
->event
->n_enabled_child_sources
--;
697 (void) hashmap_remove(s
->event
->child_sources
, PID_TO_PTR(s
->child
.pid
));
698 event_gc_signal_data(s
->event
, &s
->priority
, SIGCHLD
);
708 set_remove(s
->event
->post_sources
, s
);
712 prioq_remove(s
->event
->exit
, s
, &s
->exit
.prioq_index
);
715 case SOURCE_INOTIFY
: {
716 struct inode_data
*inode_data
;
718 inode_data
= s
->inotify
.inode_data
;
720 struct inotify_data
*inotify_data
;
721 assert_se(inotify_data
= inode_data
->inotify_data
);
723 /* Detach this event source from the inode object */
724 LIST_REMOVE(inotify
.by_inode_data
, inode_data
->event_sources
, s
);
725 s
->inotify
.inode_data
= NULL
;
728 assert(inotify_data
->n_pending
> 0);
729 inotify_data
->n_pending
--;
732 /* Note that we don't reduce the inotify mask for the watch descriptor here if the inode is
733 * continued to being watched. That's because inotify doesn't really have an API for that: we
734 * can only change watch masks with access to the original inode either by fd or by path. But
735 * paths aren't stable, and keeping an O_PATH fd open all the time would mean wasting an fd
736 * continuously and keeping the mount busy which we can't really do. We could reconstruct the
737 * original inode from /proc/self/fdinfo/$INOTIFY_FD (as all watch descriptors are listed
738 * there), but given the need for open_by_handle_at() which is privileged and not universally
739 * available this would be quite an incomplete solution. Hence we go the other way, leave the
740 * mask set, even if it is not minimized now, and ignore all events we aren't interested in
741 * anymore after reception. Yes, this sucks, but … Linux … */
743 /* Maybe release the inode data (and its inotify) */
744 event_gc_inode_data(s
->event
, inode_data
);
751 assert_not_reached("Wut? I shouldn't exist.");
755 prioq_remove(s
->event
->pending
, s
, &s
->pending_index
);
758 prioq_remove(s
->event
->prepare
, s
, &s
->prepare_index
);
762 s
->type
= _SOURCE_EVENT_SOURCE_TYPE_INVALID
;
764 LIST_REMOVE(sources
, event
->sources
, s
);
768 sd_event_unref(event
);
771 static void source_free(sd_event_source
*s
) {
774 source_disconnect(s
);
776 if (s
->type
== SOURCE_IO
&& s
->io
.owned
)
777 s
->io
.fd
= safe_close(s
->io
.fd
);
779 if (s
->destroy_callback
)
780 s
->destroy_callback(s
->userdata
);
782 free(s
->description
);
785 DEFINE_TRIVIAL_CLEANUP_FUNC(sd_event_source
*, source_free
);
787 static int source_set_pending(sd_event_source
*s
, bool b
) {
791 assert(s
->type
!= SOURCE_EXIT
);
799 s
->pending_iteration
= s
->event
->iteration
;
801 r
= prioq_put(s
->event
->pending
, s
, &s
->pending_index
);
807 assert_se(prioq_remove(s
->event
->pending
, s
, &s
->pending_index
));
809 if (EVENT_SOURCE_IS_TIME(s
->type
)) {
810 struct clock_data
*d
;
812 d
= event_get_clock_data(s
->event
, s
->type
);
815 prioq_reshuffle(d
->earliest
, s
, &s
->time
.earliest_index
);
816 prioq_reshuffle(d
->latest
, s
, &s
->time
.latest_index
);
817 d
->needs_rearm
= true;
820 if (s
->type
== SOURCE_SIGNAL
&& !b
) {
821 struct signal_data
*d
;
823 d
= hashmap_get(s
->event
->signal_data
, &s
->priority
);
824 if (d
&& d
->current
== s
)
828 if (s
->type
== SOURCE_INOTIFY
) {
830 assert(s
->inotify
.inode_data
);
831 assert(s
->inotify
.inode_data
->inotify_data
);
834 s
->inotify
.inode_data
->inotify_data
->n_pending
++;
836 assert(s
->inotify
.inode_data
->inotify_data
->n_pending
> 0);
837 s
->inotify
.inode_data
->inotify_data
->n_pending
--;
844 static sd_event_source
*source_new(sd_event
*e
, bool floating
, EventSourceType type
) {
849 s
= new(sd_event_source
, 1);
853 *s
= (struct sd_event_source
) {
856 .floating
= floating
,
858 .pending_index
= PRIOQ_IDX_NULL
,
859 .prepare_index
= PRIOQ_IDX_NULL
,
865 LIST_PREPEND(sources
, e
->sources
, s
);
871 _public_
int sd_event_add_io(
873 sd_event_source
**ret
,
876 sd_event_io_handler_t callback
,
879 _cleanup_(source_freep
) sd_event_source
*s
= NULL
;
882 assert_return(e
, -EINVAL
);
883 assert_return(e
= event_resolve(e
), -ENOPKG
);
884 assert_return(fd
>= 0, -EBADF
);
885 assert_return(!(events
& ~(EPOLLIN
|EPOLLOUT
|EPOLLRDHUP
|EPOLLPRI
|EPOLLERR
|EPOLLHUP
|EPOLLET
)), -EINVAL
);
886 assert_return(callback
, -EINVAL
);
887 assert_return(e
->state
!= SD_EVENT_FINISHED
, -ESTALE
);
888 assert_return(!event_pid_changed(e
), -ECHILD
);
890 s
= source_new(e
, !ret
, SOURCE_IO
);
894 s
->wakeup
= WAKEUP_EVENT_SOURCE
;
896 s
->io
.events
= events
;
897 s
->io
.callback
= callback
;
898 s
->userdata
= userdata
;
899 s
->enabled
= SD_EVENT_ON
;
901 r
= source_io_register(s
, s
->enabled
, events
);
912 static void initialize_perturb(sd_event
*e
) {
913 sd_id128_t bootid
= {};
915 /* When we sleep for longer, we try to realign the wakeup to
916 the same time within each minute/second/250ms, so that
917 events all across the system can be coalesced into a single
918 CPU wakeup. However, let's take some system-specific
919 randomness for this value, so that in a network of systems
920 with synced clocks timer events are distributed a
921 bit. Here, we calculate a perturbation usec offset from the
924 if (_likely_(e
->perturb
!= USEC_INFINITY
))
927 if (sd_id128_get_boot(&bootid
) >= 0)
928 e
->perturb
= (bootid
.qwords
[0] ^ bootid
.qwords
[1]) % USEC_PER_MINUTE
;
931 static int event_setup_timer_fd(
933 struct clock_data
*d
,
936 struct epoll_event ev
;
942 if (_likely_(d
->fd
>= 0))
945 fd
= timerfd_create(clock
, TFD_NONBLOCK
|TFD_CLOEXEC
);
949 fd
= fd_move_above_stdio(fd
);
951 ev
= (struct epoll_event
) {
956 r
= epoll_ctl(e
->epoll_fd
, EPOLL_CTL_ADD
, fd
, &ev
);
966 static int time_exit_callback(sd_event_source
*s
, uint64_t usec
, void *userdata
) {
969 return sd_event_exit(sd_event_source_get_event(s
), PTR_TO_INT(userdata
));
972 _public_
int sd_event_add_time(
974 sd_event_source
**ret
,
978 sd_event_time_handler_t callback
,
981 EventSourceType type
;
982 _cleanup_(source_freep
) sd_event_source
*s
= NULL
;
983 struct clock_data
*d
;
986 assert_return(e
, -EINVAL
);
987 assert_return(e
= event_resolve(e
), -ENOPKG
);
988 assert_return(accuracy
!= (uint64_t) -1, -EINVAL
);
989 assert_return(e
->state
!= SD_EVENT_FINISHED
, -ESTALE
);
990 assert_return(!event_pid_changed(e
), -ECHILD
);
992 if (!clock_supported(clock
)) /* Checks whether the kernel supports the clock */
995 type
= clock_to_event_source_type(clock
); /* checks whether sd-event supports this clock */
1000 callback
= time_exit_callback
;
1002 d
= event_get_clock_data(e
, type
);
1005 r
= prioq_ensure_allocated(&d
->earliest
, earliest_time_prioq_compare
);
1009 r
= prioq_ensure_allocated(&d
->latest
, latest_time_prioq_compare
);
1014 r
= event_setup_timer_fd(e
, d
, clock
);
1019 s
= source_new(e
, !ret
, type
);
1023 s
->time
.next
= usec
;
1024 s
->time
.accuracy
= accuracy
== 0 ? DEFAULT_ACCURACY_USEC
: accuracy
;
1025 s
->time
.callback
= callback
;
1026 s
->time
.earliest_index
= s
->time
.latest_index
= PRIOQ_IDX_NULL
;
1027 s
->userdata
= userdata
;
1028 s
->enabled
= SD_EVENT_ONESHOT
;
1030 d
->needs_rearm
= true;
1032 r
= prioq_put(d
->earliest
, s
, &s
->time
.earliest_index
);
1036 r
= prioq_put(d
->latest
, s
, &s
->time
.latest_index
);
1047 static int signal_exit_callback(sd_event_source
*s
, const struct signalfd_siginfo
*si
, void *userdata
) {
1050 return sd_event_exit(sd_event_source_get_event(s
), PTR_TO_INT(userdata
));
1053 _public_
int sd_event_add_signal(
1055 sd_event_source
**ret
,
1057 sd_event_signal_handler_t callback
,
1060 _cleanup_(source_freep
) sd_event_source
*s
= NULL
;
1061 struct signal_data
*d
;
1065 assert_return(e
, -EINVAL
);
1066 assert_return(e
= event_resolve(e
), -ENOPKG
);
1067 assert_return(SIGNAL_VALID(sig
), -EINVAL
);
1068 assert_return(e
->state
!= SD_EVENT_FINISHED
, -ESTALE
);
1069 assert_return(!event_pid_changed(e
), -ECHILD
);
1072 callback
= signal_exit_callback
;
1074 r
= pthread_sigmask(SIG_SETMASK
, NULL
, &ss
);
1078 if (!sigismember(&ss
, sig
))
1081 if (!e
->signal_sources
) {
1082 e
->signal_sources
= new0(sd_event_source
*, _NSIG
);
1083 if (!e
->signal_sources
)
1085 } else if (e
->signal_sources
[sig
])
1088 s
= source_new(e
, !ret
, SOURCE_SIGNAL
);
1092 s
->signal
.sig
= sig
;
1093 s
->signal
.callback
= callback
;
1094 s
->userdata
= userdata
;
1095 s
->enabled
= SD_EVENT_ON
;
1097 e
->signal_sources
[sig
] = s
;
1099 r
= event_make_signal_data(e
, sig
, &d
);
1103 /* Use the signal name as description for the event source by default */
1104 (void) sd_event_source_set_description(s
, signal_to_string(sig
));
1113 _public_
int sd_event_add_child(
1115 sd_event_source
**ret
,
1118 sd_event_child_handler_t callback
,
1121 _cleanup_(source_freep
) sd_event_source
*s
= NULL
;
1124 assert_return(e
, -EINVAL
);
1125 assert_return(e
= event_resolve(e
), -ENOPKG
);
1126 assert_return(pid
> 1, -EINVAL
);
1127 assert_return(!(options
& ~(WEXITED
|WSTOPPED
|WCONTINUED
)), -EINVAL
);
1128 assert_return(options
!= 0, -EINVAL
);
1129 assert_return(callback
, -EINVAL
);
1130 assert_return(e
->state
!= SD_EVENT_FINISHED
, -ESTALE
);
1131 assert_return(!event_pid_changed(e
), -ECHILD
);
1133 r
= hashmap_ensure_allocated(&e
->child_sources
, NULL
);
1137 if (hashmap_contains(e
->child_sources
, PID_TO_PTR(pid
)))
1140 s
= source_new(e
, !ret
, SOURCE_CHILD
);
1145 s
->child
.options
= options
;
1146 s
->child
.callback
= callback
;
1147 s
->userdata
= userdata
;
1148 s
->enabled
= SD_EVENT_ONESHOT
;
1150 r
= hashmap_put(e
->child_sources
, PID_TO_PTR(pid
), s
);
1154 e
->n_enabled_child_sources
++;
1156 r
= event_make_signal_data(e
, SIGCHLD
, NULL
);
1158 e
->n_enabled_child_sources
--;
1162 e
->need_process_child
= true;
1171 _public_
int sd_event_add_defer(
1173 sd_event_source
**ret
,
1174 sd_event_handler_t callback
,
1177 _cleanup_(source_freep
) sd_event_source
*s
= NULL
;
1180 assert_return(e
, -EINVAL
);
1181 assert_return(e
= event_resolve(e
), -ENOPKG
);
1182 assert_return(callback
, -EINVAL
);
1183 assert_return(e
->state
!= SD_EVENT_FINISHED
, -ESTALE
);
1184 assert_return(!event_pid_changed(e
), -ECHILD
);
1186 s
= source_new(e
, !ret
, SOURCE_DEFER
);
1190 s
->defer
.callback
= callback
;
1191 s
->userdata
= userdata
;
1192 s
->enabled
= SD_EVENT_ONESHOT
;
1194 r
= source_set_pending(s
, true);
1205 _public_
int sd_event_add_post(
1207 sd_event_source
**ret
,
1208 sd_event_handler_t callback
,
1211 _cleanup_(source_freep
) sd_event_source
*s
= NULL
;
1214 assert_return(e
, -EINVAL
);
1215 assert_return(e
= event_resolve(e
), -ENOPKG
);
1216 assert_return(callback
, -EINVAL
);
1217 assert_return(e
->state
!= SD_EVENT_FINISHED
, -ESTALE
);
1218 assert_return(!event_pid_changed(e
), -ECHILD
);
1220 r
= set_ensure_allocated(&e
->post_sources
, NULL
);
1224 s
= source_new(e
, !ret
, SOURCE_POST
);
1228 s
->post
.callback
= callback
;
1229 s
->userdata
= userdata
;
1230 s
->enabled
= SD_EVENT_ON
;
1232 r
= set_put(e
->post_sources
, s
);
1243 _public_
int sd_event_add_exit(
1245 sd_event_source
**ret
,
1246 sd_event_handler_t callback
,
1249 _cleanup_(source_freep
) sd_event_source
*s
= NULL
;
1252 assert_return(e
, -EINVAL
);
1253 assert_return(e
= event_resolve(e
), -ENOPKG
);
1254 assert_return(callback
, -EINVAL
);
1255 assert_return(e
->state
!= SD_EVENT_FINISHED
, -ESTALE
);
1256 assert_return(!event_pid_changed(e
), -ECHILD
);
1258 r
= prioq_ensure_allocated(&e
->exit
, exit_prioq_compare
);
1262 s
= source_new(e
, !ret
, SOURCE_EXIT
);
1266 s
->exit
.callback
= callback
;
1267 s
->userdata
= userdata
;
1268 s
->exit
.prioq_index
= PRIOQ_IDX_NULL
;
1269 s
->enabled
= SD_EVENT_ONESHOT
;
1271 r
= prioq_put(s
->event
->exit
, s
, &s
->exit
.prioq_index
);
1282 static void event_free_inotify_data(sd_event
*e
, struct inotify_data
*d
) {
1288 assert(hashmap_isempty(d
->inodes
));
1289 assert(hashmap_isempty(d
->wd
));
1291 if (d
->buffer_filled
> 0)
1292 LIST_REMOVE(buffered
, e
->inotify_data_buffered
, d
);
1294 hashmap_free(d
->inodes
);
1295 hashmap_free(d
->wd
);
1297 assert_se(hashmap_remove(e
->inotify_data
, &d
->priority
) == d
);
1300 if (epoll_ctl(e
->epoll_fd
, EPOLL_CTL_DEL
, d
->fd
, NULL
) < 0)
1301 log_debug_errno(errno
, "Failed to remove inotify fd from epoll, ignoring: %m");
1308 static int event_make_inotify_data(
1311 struct inotify_data
**ret
) {
1313 _cleanup_close_
int fd
= -1;
1314 struct inotify_data
*d
;
1315 struct epoll_event ev
;
1320 d
= hashmap_get(e
->inotify_data
, &priority
);
1327 fd
= inotify_init1(IN_NONBLOCK
|O_CLOEXEC
);
1331 fd
= fd_move_above_stdio(fd
);
1333 r
= hashmap_ensure_allocated(&e
->inotify_data
, &uint64_hash_ops
);
1337 d
= new(struct inotify_data
, 1);
1341 *d
= (struct inotify_data
) {
1342 .wakeup
= WAKEUP_INOTIFY_DATA
,
1344 .priority
= priority
,
1347 r
= hashmap_put(e
->inotify_data
, &d
->priority
, d
);
1349 d
->fd
= safe_close(d
->fd
);
1354 ev
= (struct epoll_event
) {
1359 if (epoll_ctl(e
->epoll_fd
, EPOLL_CTL_ADD
, d
->fd
, &ev
) < 0) {
1361 d
->fd
= safe_close(d
->fd
); /* let's close this ourselves, as event_free_inotify_data() would otherwise
1362 * remove the fd from the epoll first, which we don't want as we couldn't
1363 * add it in the first place. */
1364 event_free_inotify_data(e
, d
);
1374 static int inode_data_compare(const struct inode_data
*x
, const struct inode_data
*y
) {
1380 r
= CMP(x
->dev
, y
->dev
);
1384 return CMP(x
->ino
, y
->ino
);
1387 static void inode_data_hash_func(const struct inode_data
*d
, struct siphash
*state
) {
1390 siphash24_compress(&d
->dev
, sizeof(d
->dev
), state
);
1391 siphash24_compress(&d
->ino
, sizeof(d
->ino
), state
);
1394 DEFINE_PRIVATE_HASH_OPS(inode_data_hash_ops
, struct inode_data
, inode_data_hash_func
, inode_data_compare
);
1396 static void event_free_inode_data(
1398 struct inode_data
*d
) {
1405 assert(!d
->event_sources
);
1408 LIST_REMOVE(to_close
, e
->inode_data_to_close
, d
);
1412 if (d
->inotify_data
) {
1415 if (d
->inotify_data
->fd
>= 0) {
1416 /* So here's a problem. At the time this runs the watch descriptor might already be
1417 * invalidated, because an IN_IGNORED event might be queued right the moment we enter
1418 * the syscall. Hence, whenever we get EINVAL, ignore it entirely, since it's a very
1419 * likely case to happen. */
1421 if (inotify_rm_watch(d
->inotify_data
->fd
, d
->wd
) < 0 && errno
!= EINVAL
)
1422 log_debug_errno(errno
, "Failed to remove watch descriptor %i from inotify, ignoring: %m", d
->wd
);
1425 assert_se(hashmap_remove(d
->inotify_data
->wd
, INT_TO_PTR(d
->wd
)) == d
);
1428 assert_se(hashmap_remove(d
->inotify_data
->inodes
, d
) == d
);
1434 static void event_gc_inode_data(
1436 struct inode_data
*d
) {
1438 struct inotify_data
*inotify_data
;
1445 if (d
->event_sources
)
1448 inotify_data
= d
->inotify_data
;
1449 event_free_inode_data(e
, d
);
1451 if (inotify_data
&& hashmap_isempty(inotify_data
->inodes
))
1452 event_free_inotify_data(e
, inotify_data
);
1455 static int event_make_inode_data(
1457 struct inotify_data
*inotify_data
,
1460 struct inode_data
**ret
) {
1462 struct inode_data
*d
, key
;
1466 assert(inotify_data
);
1468 key
= (struct inode_data
) {
1473 d
= hashmap_get(inotify_data
->inodes
, &key
);
1481 r
= hashmap_ensure_allocated(&inotify_data
->inodes
, &inode_data_hash_ops
);
1485 d
= new(struct inode_data
, 1);
1489 *d
= (struct inode_data
) {
1494 .inotify_data
= inotify_data
,
1497 r
= hashmap_put(inotify_data
->inodes
, d
, d
);
1509 static uint32_t inode_data_determine_mask(struct inode_data
*d
) {
1510 bool excl_unlink
= true;
1511 uint32_t combined
= 0;
1516 /* Combines the watch masks of all event sources watching this inode. We generally just OR them together, but
1517 * the IN_EXCL_UNLINK flag is ANDed instead.
1519 * Note that we add all sources to the mask here, regardless whether enabled, disabled or oneshot. That's
1520 * because we cannot change the mask anymore after the event source was created once, since the kernel has no
1521 * API for that. Hence we need to subscribe to the maximum mask we ever might be interested in, and suppress
1522 * events we don't care for client-side. */
1524 LIST_FOREACH(inotify
.by_inode_data
, s
, d
->event_sources
) {
1526 if ((s
->inotify
.mask
& IN_EXCL_UNLINK
) == 0)
1527 excl_unlink
= false;
1529 combined
|= s
->inotify
.mask
;
1532 return (combined
& ~(IN_ONESHOT
|IN_DONT_FOLLOW
|IN_ONLYDIR
|IN_EXCL_UNLINK
)) | (excl_unlink
? IN_EXCL_UNLINK
: 0);
1535 static int inode_data_realize_watch(sd_event
*e
, struct inode_data
*d
) {
1536 uint32_t combined_mask
;
1542 combined_mask
= inode_data_determine_mask(d
);
1544 if (d
->wd
>= 0 && combined_mask
== d
->combined_mask
)
1547 r
= hashmap_ensure_allocated(&d
->inotify_data
->wd
, NULL
);
1551 wd
= inotify_add_watch_fd(d
->inotify_data
->fd
, d
->fd
, combined_mask
);
1556 r
= hashmap_put(d
->inotify_data
->wd
, INT_TO_PTR(wd
), d
);
1558 (void) inotify_rm_watch(d
->inotify_data
->fd
, wd
);
1564 } else if (d
->wd
!= wd
) {
1566 log_debug("Weird, the watch descriptor we already knew for this inode changed?");
1567 (void) inotify_rm_watch(d
->fd
, wd
);
1571 d
->combined_mask
= combined_mask
;
1575 _public_
int sd_event_add_inotify(
1577 sd_event_source
**ret
,
1580 sd_event_inotify_handler_t callback
,
1583 struct inotify_data
*inotify_data
= NULL
;
1584 struct inode_data
*inode_data
= NULL
;
1585 _cleanup_close_
int fd
= -1;
1586 _cleanup_(source_freep
) sd_event_source
*s
= NULL
;
1590 assert_return(e
, -EINVAL
);
1591 assert_return(e
= event_resolve(e
), -ENOPKG
);
1592 assert_return(path
, -EINVAL
);
1593 assert_return(callback
, -EINVAL
);
1594 assert_return(e
->state
!= SD_EVENT_FINISHED
, -ESTALE
);
1595 assert_return(!event_pid_changed(e
), -ECHILD
);
1597 /* Refuse IN_MASK_ADD since we coalesce watches on the same inode, and hence really don't want to merge
1598 * masks. Or in other words, this whole code exists only to manage IN_MASK_ADD type operations for you, hence
1599 * the user can't use them for us. */
1600 if (mask
& IN_MASK_ADD
)
1603 fd
= open(path
, O_PATH
|O_CLOEXEC
|
1604 (mask
& IN_ONLYDIR
? O_DIRECTORY
: 0)|
1605 (mask
& IN_DONT_FOLLOW
? O_NOFOLLOW
: 0));
1609 if (fstat(fd
, &st
) < 0)
1612 s
= source_new(e
, !ret
, SOURCE_INOTIFY
);
1616 s
->enabled
= mask
& IN_ONESHOT
? SD_EVENT_ONESHOT
: SD_EVENT_ON
;
1617 s
->inotify
.mask
= mask
;
1618 s
->inotify
.callback
= callback
;
1619 s
->userdata
= userdata
;
1621 /* Allocate an inotify object for this priority, and an inode object within it */
1622 r
= event_make_inotify_data(e
, SD_EVENT_PRIORITY_NORMAL
, &inotify_data
);
1626 r
= event_make_inode_data(e
, inotify_data
, st
.st_dev
, st
.st_ino
, &inode_data
);
1628 event_free_inotify_data(e
, inotify_data
);
1632 /* Keep the O_PATH fd around until the first iteration of the loop, so that we can still change the priority of
1633 * the event source, until then, for which we need the original inode. */
1634 if (inode_data
->fd
< 0) {
1635 inode_data
->fd
= TAKE_FD(fd
);
1636 LIST_PREPEND(to_close
, e
->inode_data_to_close
, inode_data
);
1639 /* Link our event source to the inode data object */
1640 LIST_PREPEND(inotify
.by_inode_data
, inode_data
->event_sources
, s
);
1641 s
->inotify
.inode_data
= inode_data
;
1643 /* Actually realize the watch now */
1644 r
= inode_data_realize_watch(e
, inode_data
);
1648 (void) sd_event_source_set_description(s
, path
);
1657 static sd_event_source
* event_source_free(sd_event_source
*s
) {
1661 /* Here's a special hack: when we are called from a
1662 * dispatch handler we won't free the event source
1663 * immediately, but we will detach the fd from the
1664 * epoll. This way it is safe for the caller to unref
1665 * the event source and immediately close the fd, but
1666 * we still retain a valid event source object after
1669 if (s
->dispatching
) {
1670 if (s
->type
== SOURCE_IO
)
1671 source_io_unregister(s
);
1673 source_disconnect(s
);
1680 DEFINE_PUBLIC_TRIVIAL_REF_UNREF_FUNC(sd_event_source
, sd_event_source
, event_source_free
);
1682 _public_
int sd_event_source_set_description(sd_event_source
*s
, const char *description
) {
1683 assert_return(s
, -EINVAL
);
1684 assert_return(!event_pid_changed(s
->event
), -ECHILD
);
1686 return free_and_strdup(&s
->description
, description
);
1689 _public_
int sd_event_source_get_description(sd_event_source
*s
, const char **description
) {
1690 assert_return(s
, -EINVAL
);
1691 assert_return(description
, -EINVAL
);
1692 assert_return(!event_pid_changed(s
->event
), -ECHILD
);
1694 if (!s
->description
)
1697 *description
= s
->description
;
1701 _public_ sd_event
*sd_event_source_get_event(sd_event_source
*s
) {
1702 assert_return(s
, NULL
);
1707 _public_
int sd_event_source_get_pending(sd_event_source
*s
) {
1708 assert_return(s
, -EINVAL
);
1709 assert_return(s
->type
!= SOURCE_EXIT
, -EDOM
);
1710 assert_return(s
->event
->state
!= SD_EVENT_FINISHED
, -ESTALE
);
1711 assert_return(!event_pid_changed(s
->event
), -ECHILD
);
1716 _public_
int sd_event_source_get_io_fd(sd_event_source
*s
) {
1717 assert_return(s
, -EINVAL
);
1718 assert_return(s
->type
== SOURCE_IO
, -EDOM
);
1719 assert_return(!event_pid_changed(s
->event
), -ECHILD
);
1724 _public_
int sd_event_source_set_io_fd(sd_event_source
*s
, int fd
) {
1727 assert_return(s
, -EINVAL
);
1728 assert_return(fd
>= 0, -EBADF
);
1729 assert_return(s
->type
== SOURCE_IO
, -EDOM
);
1730 assert_return(!event_pid_changed(s
->event
), -ECHILD
);
1735 if (s
->enabled
== SD_EVENT_OFF
) {
1737 s
->io
.registered
= false;
1741 saved_fd
= s
->io
.fd
;
1742 assert(s
->io
.registered
);
1745 s
->io
.registered
= false;
1747 r
= source_io_register(s
, s
->enabled
, s
->io
.events
);
1749 s
->io
.fd
= saved_fd
;
1750 s
->io
.registered
= true;
1754 epoll_ctl(s
->event
->epoll_fd
, EPOLL_CTL_DEL
, saved_fd
, NULL
);
1760 _public_
int sd_event_source_get_io_fd_own(sd_event_source
*s
) {
1761 assert_return(s
, -EINVAL
);
1762 assert_return(s
->type
== SOURCE_IO
, -EDOM
);
1767 _public_
int sd_event_source_set_io_fd_own(sd_event_source
*s
, int own
) {
1768 assert_return(s
, -EINVAL
);
1769 assert_return(s
->type
== SOURCE_IO
, -EDOM
);
1775 _public_
int sd_event_source_get_io_events(sd_event_source
*s
, uint32_t* events
) {
1776 assert_return(s
, -EINVAL
);
1777 assert_return(events
, -EINVAL
);
1778 assert_return(s
->type
== SOURCE_IO
, -EDOM
);
1779 assert_return(!event_pid_changed(s
->event
), -ECHILD
);
1781 *events
= s
->io
.events
;
1785 _public_
int sd_event_source_set_io_events(sd_event_source
*s
, uint32_t events
) {
1788 assert_return(s
, -EINVAL
);
1789 assert_return(s
->type
== SOURCE_IO
, -EDOM
);
1790 assert_return(!(events
& ~(EPOLLIN
|EPOLLOUT
|EPOLLRDHUP
|EPOLLPRI
|EPOLLERR
|EPOLLHUP
|EPOLLET
)), -EINVAL
);
1791 assert_return(s
->event
->state
!= SD_EVENT_FINISHED
, -ESTALE
);
1792 assert_return(!event_pid_changed(s
->event
), -ECHILD
);
1794 /* edge-triggered updates are never skipped, so we can reset edges */
1795 if (s
->io
.events
== events
&& !(events
& EPOLLET
))
1798 r
= source_set_pending(s
, false);
1802 if (s
->enabled
!= SD_EVENT_OFF
) {
1803 r
= source_io_register(s
, s
->enabled
, events
);
1808 s
->io
.events
= events
;
1813 _public_
int sd_event_source_get_io_revents(sd_event_source
*s
, uint32_t* revents
) {
1814 assert_return(s
, -EINVAL
);
1815 assert_return(revents
, -EINVAL
);
1816 assert_return(s
->type
== SOURCE_IO
, -EDOM
);
1817 assert_return(s
->pending
, -ENODATA
);
1818 assert_return(!event_pid_changed(s
->event
), -ECHILD
);
1820 *revents
= s
->io
.revents
;
1824 _public_
int sd_event_source_get_signal(sd_event_source
*s
) {
1825 assert_return(s
, -EINVAL
);
1826 assert_return(s
->type
== SOURCE_SIGNAL
, -EDOM
);
1827 assert_return(!event_pid_changed(s
->event
), -ECHILD
);
1829 return s
->signal
.sig
;
1832 _public_
int sd_event_source_get_priority(sd_event_source
*s
, int64_t *priority
) {
1833 assert_return(s
, -EINVAL
);
1834 assert_return(!event_pid_changed(s
->event
), -ECHILD
);
1836 *priority
= s
->priority
;
1840 _public_
int sd_event_source_set_priority(sd_event_source
*s
, int64_t priority
) {
1841 bool rm_inotify
= false, rm_inode
= false;
1842 struct inotify_data
*new_inotify_data
= NULL
;
1843 struct inode_data
*new_inode_data
= NULL
;
1846 assert_return(s
, -EINVAL
);
1847 assert_return(s
->event
->state
!= SD_EVENT_FINISHED
, -ESTALE
);
1848 assert_return(!event_pid_changed(s
->event
), -ECHILD
);
1850 if (s
->priority
== priority
)
1853 if (s
->type
== SOURCE_INOTIFY
) {
1854 struct inode_data
*old_inode_data
;
1856 assert(s
->inotify
.inode_data
);
1857 old_inode_data
= s
->inotify
.inode_data
;
1859 /* We need the original fd to change the priority. If we don't have it we can't change the priority,
1860 * anymore. Note that we close any fds when entering the next event loop iteration, i.e. for inotify
1861 * events we allow priority changes only until the first following iteration. */
1862 if (old_inode_data
->fd
< 0)
1865 r
= event_make_inotify_data(s
->event
, priority
, &new_inotify_data
);
1870 r
= event_make_inode_data(s
->event
, new_inotify_data
, old_inode_data
->dev
, old_inode_data
->ino
, &new_inode_data
);
1875 if (new_inode_data
->fd
< 0) {
1876 /* Duplicate the fd for the new inode object if we don't have any yet */
1877 new_inode_data
->fd
= fcntl(old_inode_data
->fd
, F_DUPFD_CLOEXEC
, 3);
1878 if (new_inode_data
->fd
< 0) {
1883 LIST_PREPEND(to_close
, s
->event
->inode_data_to_close
, new_inode_data
);
1886 /* Move the event source to the new inode data structure */
1887 LIST_REMOVE(inotify
.by_inode_data
, old_inode_data
->event_sources
, s
);
1888 LIST_PREPEND(inotify
.by_inode_data
, new_inode_data
->event_sources
, s
);
1889 s
->inotify
.inode_data
= new_inode_data
;
1891 /* Now create the new watch */
1892 r
= inode_data_realize_watch(s
->event
, new_inode_data
);
1895 LIST_REMOVE(inotify
.by_inode_data
, new_inode_data
->event_sources
, s
);
1896 LIST_PREPEND(inotify
.by_inode_data
, old_inode_data
->event_sources
, s
);
1897 s
->inotify
.inode_data
= old_inode_data
;
1901 s
->priority
= priority
;
1903 event_gc_inode_data(s
->event
, old_inode_data
);
1905 } else if (s
->type
== SOURCE_SIGNAL
&& s
->enabled
!= SD_EVENT_OFF
) {
1906 struct signal_data
*old
, *d
;
1908 /* Move us from the signalfd belonging to the old
1909 * priority to the signalfd of the new priority */
1911 assert_se(old
= hashmap_get(s
->event
->signal_data
, &s
->priority
));
1913 s
->priority
= priority
;
1915 r
= event_make_signal_data(s
->event
, s
->signal
.sig
, &d
);
1917 s
->priority
= old
->priority
;
1921 event_unmask_signal_data(s
->event
, old
, s
->signal
.sig
);
1923 s
->priority
= priority
;
1926 prioq_reshuffle(s
->event
->pending
, s
, &s
->pending_index
);
1929 prioq_reshuffle(s
->event
->prepare
, s
, &s
->prepare_index
);
1931 if (s
->type
== SOURCE_EXIT
)
1932 prioq_reshuffle(s
->event
->exit
, s
, &s
->exit
.prioq_index
);
1938 event_free_inode_data(s
->event
, new_inode_data
);
1941 event_free_inotify_data(s
->event
, new_inotify_data
);
1946 _public_
int sd_event_source_get_enabled(sd_event_source
*s
, int *m
) {
1947 assert_return(s
, -EINVAL
);
1948 assert_return(!event_pid_changed(s
->event
), -ECHILD
);
1952 return s
->enabled
!= SD_EVENT_OFF
;
1955 _public_
int sd_event_source_set_enabled(sd_event_source
*s
, int m
) {
1958 assert_return(s
, -EINVAL
);
1959 assert_return(IN_SET(m
, SD_EVENT_OFF
, SD_EVENT_ON
, SD_EVENT_ONESHOT
), -EINVAL
);
1960 assert_return(!event_pid_changed(s
->event
), -ECHILD
);
1962 /* If we are dead anyway, we are fine with turning off
1963 * sources, but everything else needs to fail. */
1964 if (s
->event
->state
== SD_EVENT_FINISHED
)
1965 return m
== SD_EVENT_OFF
? 0 : -ESTALE
;
1967 if (s
->enabled
== m
)
1970 if (m
== SD_EVENT_OFF
) {
1972 /* Unset the pending flag when this event source is disabled */
1973 if (!IN_SET(s
->type
, SOURCE_DEFER
, SOURCE_EXIT
)) {
1974 r
= source_set_pending(s
, false);
1982 source_io_unregister(s
);
1986 case SOURCE_TIME_REALTIME
:
1987 case SOURCE_TIME_BOOTTIME
:
1988 case SOURCE_TIME_MONOTONIC
:
1989 case SOURCE_TIME_REALTIME_ALARM
:
1990 case SOURCE_TIME_BOOTTIME_ALARM
: {
1991 struct clock_data
*d
;
1994 d
= event_get_clock_data(s
->event
, s
->type
);
1997 prioq_reshuffle(d
->earliest
, s
, &s
->time
.earliest_index
);
1998 prioq_reshuffle(d
->latest
, s
, &s
->time
.latest_index
);
1999 d
->needs_rearm
= true;
2006 event_gc_signal_data(s
->event
, &s
->priority
, s
->signal
.sig
);
2012 assert(s
->event
->n_enabled_child_sources
> 0);
2013 s
->event
->n_enabled_child_sources
--;
2015 event_gc_signal_data(s
->event
, &s
->priority
, SIGCHLD
);
2020 prioq_reshuffle(s
->event
->exit
, s
, &s
->exit
.prioq_index
);
2025 case SOURCE_INOTIFY
:
2030 assert_not_reached("Wut? I shouldn't exist.");
2035 /* Unset the pending flag when this event source is enabled */
2036 if (s
->enabled
== SD_EVENT_OFF
&& !IN_SET(s
->type
, SOURCE_DEFER
, SOURCE_EXIT
)) {
2037 r
= source_set_pending(s
, false);
2045 r
= source_io_register(s
, m
, s
->io
.events
);
2052 case SOURCE_TIME_REALTIME
:
2053 case SOURCE_TIME_BOOTTIME
:
2054 case SOURCE_TIME_MONOTONIC
:
2055 case SOURCE_TIME_REALTIME_ALARM
:
2056 case SOURCE_TIME_BOOTTIME_ALARM
: {
2057 struct clock_data
*d
;
2060 d
= event_get_clock_data(s
->event
, s
->type
);
2063 prioq_reshuffle(d
->earliest
, s
, &s
->time
.earliest_index
);
2064 prioq_reshuffle(d
->latest
, s
, &s
->time
.latest_index
);
2065 d
->needs_rearm
= true;
2073 r
= event_make_signal_data(s
->event
, s
->signal
.sig
, NULL
);
2075 s
->enabled
= SD_EVENT_OFF
;
2076 event_gc_signal_data(s
->event
, &s
->priority
, s
->signal
.sig
);
2084 if (s
->enabled
== SD_EVENT_OFF
)
2085 s
->event
->n_enabled_child_sources
++;
2089 r
= event_make_signal_data(s
->event
, SIGCHLD
, NULL
);
2091 s
->enabled
= SD_EVENT_OFF
;
2092 s
->event
->n_enabled_child_sources
--;
2093 event_gc_signal_data(s
->event
, &s
->priority
, SIGCHLD
);
2101 prioq_reshuffle(s
->event
->exit
, s
, &s
->exit
.prioq_index
);
2106 case SOURCE_INOTIFY
:
2111 assert_not_reached("Wut? I shouldn't exist.");
2116 prioq_reshuffle(s
->event
->pending
, s
, &s
->pending_index
);
2119 prioq_reshuffle(s
->event
->prepare
, s
, &s
->prepare_index
);
2124 _public_
int sd_event_source_get_time(sd_event_source
*s
, uint64_t *usec
) {
2125 assert_return(s
, -EINVAL
);
2126 assert_return(usec
, -EINVAL
);
2127 assert_return(EVENT_SOURCE_IS_TIME(s
->type
), -EDOM
);
2128 assert_return(!event_pid_changed(s
->event
), -ECHILD
);
2130 *usec
= s
->time
.next
;
2134 _public_
int sd_event_source_set_time(sd_event_source
*s
, uint64_t usec
) {
2135 struct clock_data
*d
;
2138 assert_return(s
, -EINVAL
);
2139 assert_return(EVENT_SOURCE_IS_TIME(s
->type
), -EDOM
);
2140 assert_return(s
->event
->state
!= SD_EVENT_FINISHED
, -ESTALE
);
2141 assert_return(!event_pid_changed(s
->event
), -ECHILD
);
2143 r
= source_set_pending(s
, false);
2147 s
->time
.next
= usec
;
2149 d
= event_get_clock_data(s
->event
, s
->type
);
2152 prioq_reshuffle(d
->earliest
, s
, &s
->time
.earliest_index
);
2153 prioq_reshuffle(d
->latest
, s
, &s
->time
.latest_index
);
2154 d
->needs_rearm
= true;
2159 _public_
int sd_event_source_get_time_accuracy(sd_event_source
*s
, uint64_t *usec
) {
2160 assert_return(s
, -EINVAL
);
2161 assert_return(usec
, -EINVAL
);
2162 assert_return(EVENT_SOURCE_IS_TIME(s
->type
), -EDOM
);
2163 assert_return(!event_pid_changed(s
->event
), -ECHILD
);
2165 *usec
= s
->time
.accuracy
;
2169 _public_
int sd_event_source_set_time_accuracy(sd_event_source
*s
, uint64_t usec
) {
2170 struct clock_data
*d
;
2173 assert_return(s
, -EINVAL
);
2174 assert_return(usec
!= (uint64_t) -1, -EINVAL
);
2175 assert_return(EVENT_SOURCE_IS_TIME(s
->type
), -EDOM
);
2176 assert_return(s
->event
->state
!= SD_EVENT_FINISHED
, -ESTALE
);
2177 assert_return(!event_pid_changed(s
->event
), -ECHILD
);
2179 r
= source_set_pending(s
, false);
2184 usec
= DEFAULT_ACCURACY_USEC
;
2186 s
->time
.accuracy
= usec
;
2188 d
= event_get_clock_data(s
->event
, s
->type
);
2191 prioq_reshuffle(d
->latest
, s
, &s
->time
.latest_index
);
2192 d
->needs_rearm
= true;
2197 _public_
int sd_event_source_get_time_clock(sd_event_source
*s
, clockid_t
*clock
) {
2198 assert_return(s
, -EINVAL
);
2199 assert_return(clock
, -EINVAL
);
2200 assert_return(EVENT_SOURCE_IS_TIME(s
->type
), -EDOM
);
2201 assert_return(!event_pid_changed(s
->event
), -ECHILD
);
2203 *clock
= event_source_type_to_clock(s
->type
);
2207 _public_
int sd_event_source_get_child_pid(sd_event_source
*s
, pid_t
*pid
) {
2208 assert_return(s
, -EINVAL
);
2209 assert_return(pid
, -EINVAL
);
2210 assert_return(s
->type
== SOURCE_CHILD
, -EDOM
);
2211 assert_return(!event_pid_changed(s
->event
), -ECHILD
);
2213 *pid
= s
->child
.pid
;
2217 _public_
int sd_event_source_get_inotify_mask(sd_event_source
*s
, uint32_t *mask
) {
2218 assert_return(s
, -EINVAL
);
2219 assert_return(mask
, -EINVAL
);
2220 assert_return(s
->type
== SOURCE_INOTIFY
, -EDOM
);
2221 assert_return(!event_pid_changed(s
->event
), -ECHILD
);
2223 *mask
= s
->inotify
.mask
;
2227 _public_
int sd_event_source_set_prepare(sd_event_source
*s
, sd_event_handler_t callback
) {
2230 assert_return(s
, -EINVAL
);
2231 assert_return(s
->type
!= SOURCE_EXIT
, -EDOM
);
2232 assert_return(s
->event
->state
!= SD_EVENT_FINISHED
, -ESTALE
);
2233 assert_return(!event_pid_changed(s
->event
), -ECHILD
);
2235 if (s
->prepare
== callback
)
2238 if (callback
&& s
->prepare
) {
2239 s
->prepare
= callback
;
2243 r
= prioq_ensure_allocated(&s
->event
->prepare
, prepare_prioq_compare
);
2247 s
->prepare
= callback
;
2250 r
= prioq_put(s
->event
->prepare
, s
, &s
->prepare_index
);
2254 prioq_remove(s
->event
->prepare
, s
, &s
->prepare_index
);
2259 _public_
void* sd_event_source_get_userdata(sd_event_source
*s
) {
2260 assert_return(s
, NULL
);
2265 _public_
void *sd_event_source_set_userdata(sd_event_source
*s
, void *userdata
) {
2268 assert_return(s
, NULL
);
2271 s
->userdata
= userdata
;
2276 static usec_t
sleep_between(sd_event
*e
, usec_t a
, usec_t b
) {
2283 if (a
>= USEC_INFINITY
)
2284 return USEC_INFINITY
;
2289 initialize_perturb(e
);
2292 Find a good time to wake up again between times a and b. We
2293 have two goals here:
2295 a) We want to wake up as seldom as possible, hence prefer
2296 later times over earlier times.
2298 b) But if we have to wake up, then let's make sure to
2299 dispatch as much as possible on the entire system.
2301 We implement this by waking up everywhere at the same time
2302 within any given minute if we can, synchronised via the
2303 perturbation value determined from the boot ID. If we can't,
2304 then we try to find the same spot in every 10s, then 1s and
2305 then 250ms step. Otherwise, we pick the last possible time
2309 c
= (b
/ USEC_PER_MINUTE
) * USEC_PER_MINUTE
+ e
->perturb
;
2311 if (_unlikely_(c
< USEC_PER_MINUTE
))
2314 c
-= USEC_PER_MINUTE
;
2320 c
= (b
/ (USEC_PER_SEC
*10)) * (USEC_PER_SEC
*10) + (e
->perturb
% (USEC_PER_SEC
*10));
2322 if (_unlikely_(c
< USEC_PER_SEC
*10))
2325 c
-= USEC_PER_SEC
*10;
2331 c
= (b
/ USEC_PER_SEC
) * USEC_PER_SEC
+ (e
->perturb
% USEC_PER_SEC
);
2333 if (_unlikely_(c
< USEC_PER_SEC
))
2342 c
= (b
/ (USEC_PER_MSEC
*250)) * (USEC_PER_MSEC
*250) + (e
->perturb
% (USEC_PER_MSEC
*250));
2344 if (_unlikely_(c
< USEC_PER_MSEC
*250))
2347 c
-= USEC_PER_MSEC
*250;
2356 static int event_arm_timer(
2358 struct clock_data
*d
) {
2360 struct itimerspec its
= {};
2361 sd_event_source
*a
, *b
;
2368 if (!d
->needs_rearm
)
2371 d
->needs_rearm
= false;
2373 a
= prioq_peek(d
->earliest
);
2374 if (!a
|| a
->enabled
== SD_EVENT_OFF
|| a
->time
.next
== USEC_INFINITY
) {
2379 if (d
->next
== USEC_INFINITY
)
2383 r
= timerfd_settime(d
->fd
, TFD_TIMER_ABSTIME
, &its
, NULL
);
2387 d
->next
= USEC_INFINITY
;
2391 b
= prioq_peek(d
->latest
);
2392 assert_se(b
&& b
->enabled
!= SD_EVENT_OFF
);
2394 t
= sleep_between(e
, a
->time
.next
, time_event_source_latest(b
));
2398 assert_se(d
->fd
>= 0);
2401 /* We don' want to disarm here, just mean some time looooong ago. */
2402 its
.it_value
.tv_sec
= 0;
2403 its
.it_value
.tv_nsec
= 1;
2405 timespec_store(&its
.it_value
, t
);
2407 r
= timerfd_settime(d
->fd
, TFD_TIMER_ABSTIME
, &its
, NULL
);
2415 static int process_io(sd_event
*e
, sd_event_source
*s
, uint32_t revents
) {
2418 assert(s
->type
== SOURCE_IO
);
2420 /* If the event source was already pending, we just OR in the
2421 * new revents, otherwise we reset the value. The ORing is
2422 * necessary to handle EPOLLONESHOT events properly where
2423 * readability might happen independently of writability, and
2424 * we need to keep track of both */
2427 s
->io
.revents
|= revents
;
2429 s
->io
.revents
= revents
;
2431 return source_set_pending(s
, true);
2434 static int flush_timer(sd_event
*e
, int fd
, uint32_t events
, usec_t
*next
) {
2441 assert_return(events
== EPOLLIN
, -EIO
);
2443 ss
= read(fd
, &x
, sizeof(x
));
2445 if (IN_SET(errno
, EAGAIN
, EINTR
))
2451 if (_unlikely_(ss
!= sizeof(x
)))
2455 *next
= USEC_INFINITY
;
2460 static int process_timer(
2463 struct clock_data
*d
) {
2472 s
= prioq_peek(d
->earliest
);
2475 s
->enabled
== SD_EVENT_OFF
||
2479 r
= source_set_pending(s
, true);
2483 prioq_reshuffle(d
->earliest
, s
, &s
->time
.earliest_index
);
2484 prioq_reshuffle(d
->latest
, s
, &s
->time
.latest_index
);
2485 d
->needs_rearm
= true;
2491 static int process_child(sd_event
*e
) {
2498 e
->need_process_child
= false;
2501 So, this is ugly. We iteratively invoke waitid() with P_PID
2502 + WNOHANG for each PID we wait for, instead of using
2503 P_ALL. This is because we only want to get child
2504 information of very specific child processes, and not all
2505 of them. We might not have processed the SIGCHLD even of a
2506 previous invocation and we don't want to maintain a
2507 unbounded *per-child* event queue, hence we really don't
2508 want anything flushed out of the kernel's queue that we
2509 don't care about. Since this is O(n) this means that if you
2510 have a lot of processes you probably want to handle SIGCHLD
2513 We do not reap the children here (by using WNOWAIT), this
2514 is only done after the event source is dispatched so that
2515 the callback still sees the process as a zombie.
2518 HASHMAP_FOREACH(s
, e
->child_sources
, i
) {
2519 assert(s
->type
== SOURCE_CHILD
);
2524 if (s
->enabled
== SD_EVENT_OFF
)
2527 zero(s
->child
.siginfo
);
2528 r
= waitid(P_PID
, s
->child
.pid
, &s
->child
.siginfo
,
2529 WNOHANG
| (s
->child
.options
& WEXITED
? WNOWAIT
: 0) | s
->child
.options
);
2533 if (s
->child
.siginfo
.si_pid
!= 0) {
2534 bool zombie
= IN_SET(s
->child
.siginfo
.si_code
, CLD_EXITED
, CLD_KILLED
, CLD_DUMPED
);
2536 if (!zombie
&& (s
->child
.options
& WEXITED
)) {
2537 /* If the child isn't dead then let's
2538 * immediately remove the state change
2539 * from the queue, since there's no
2540 * benefit in leaving it queued */
2542 assert(s
->child
.options
& (WSTOPPED
|WCONTINUED
));
2543 waitid(P_PID
, s
->child
.pid
, &s
->child
.siginfo
, WNOHANG
|(s
->child
.options
& (WSTOPPED
|WCONTINUED
)));
2546 r
= source_set_pending(s
, true);
2555 static int process_signal(sd_event
*e
, struct signal_data
*d
, uint32_t events
) {
2556 bool read_one
= false;
2561 assert_return(events
== EPOLLIN
, -EIO
);
2563 /* If there's a signal queued on this priority and SIGCHLD is
2564 on this priority too, then make sure to recheck the
2565 children we watch. This is because we only ever dequeue
2566 the first signal per priority, and if we dequeue one, and
2567 SIGCHLD might be enqueued later we wouldn't know, but we
2568 might have higher priority children we care about hence we
2569 need to check that explicitly. */
2571 if (sigismember(&d
->sigset
, SIGCHLD
))
2572 e
->need_process_child
= true;
2574 /* If there's already an event source pending for this
2575 * priority we don't read another */
2580 struct signalfd_siginfo si
;
2582 sd_event_source
*s
= NULL
;
2584 n
= read(d
->fd
, &si
, sizeof(si
));
2586 if (IN_SET(errno
, EAGAIN
, EINTR
))
2592 if (_unlikely_(n
!= sizeof(si
)))
2595 assert(SIGNAL_VALID(si
.ssi_signo
));
2599 if (e
->signal_sources
)
2600 s
= e
->signal_sources
[si
.ssi_signo
];
2606 s
->signal
.siginfo
= si
;
2609 r
= source_set_pending(s
, true);
2617 static int event_inotify_data_read(sd_event
*e
, struct inotify_data
*d
, uint32_t revents
) {
2623 assert_return(revents
== EPOLLIN
, -EIO
);
2625 /* If there's already an event source pending for this priority, don't read another */
2626 if (d
->n_pending
> 0)
2629 /* Is the read buffer non-empty? If so, let's not read more */
2630 if (d
->buffer_filled
> 0)
2633 n
= read(d
->fd
, &d
->buffer
, sizeof(d
->buffer
));
2635 if (IN_SET(errno
, EAGAIN
, EINTR
))
2642 d
->buffer_filled
= (size_t) n
;
2643 LIST_PREPEND(buffered
, e
->inotify_data_buffered
, d
);
2648 static void event_inotify_data_drop(sd_event
*e
, struct inotify_data
*d
, size_t sz
) {
2651 assert(sz
<= d
->buffer_filled
);
2656 /* Move the rest to the buffer to the front, in order to get things properly aligned again */
2657 memmove(d
->buffer
.raw
, d
->buffer
.raw
+ sz
, d
->buffer_filled
- sz
);
2658 d
->buffer_filled
-= sz
;
2660 if (d
->buffer_filled
== 0)
2661 LIST_REMOVE(buffered
, e
->inotify_data_buffered
, d
);
2664 static int event_inotify_data_process(sd_event
*e
, struct inotify_data
*d
) {
2670 /* If there's already an event source pending for this priority, don't read another */
2671 if (d
->n_pending
> 0)
2674 while (d
->buffer_filled
> 0) {
2677 /* Let's validate that the event structures are complete */
2678 if (d
->buffer_filled
< offsetof(struct inotify_event
, name
))
2681 sz
= offsetof(struct inotify_event
, name
) + d
->buffer
.ev
.len
;
2682 if (d
->buffer_filled
< sz
)
2685 if (d
->buffer
.ev
.mask
& IN_Q_OVERFLOW
) {
2686 struct inode_data
*inode_data
;
2689 /* The queue overran, let's pass this event to all event sources connected to this inotify
2692 HASHMAP_FOREACH(inode_data
, d
->inodes
, i
) {
2695 LIST_FOREACH(inotify
.by_inode_data
, s
, inode_data
->event_sources
) {
2697 if (s
->enabled
== SD_EVENT_OFF
)
2700 r
= source_set_pending(s
, true);
2706 struct inode_data
*inode_data
;
2709 /* Find the inode object for this watch descriptor. If IN_IGNORED is set we also remove it from
2710 * our watch descriptor table. */
2711 if (d
->buffer
.ev
.mask
& IN_IGNORED
) {
2713 inode_data
= hashmap_remove(d
->wd
, INT_TO_PTR(d
->buffer
.ev
.wd
));
2715 event_inotify_data_drop(e
, d
, sz
);
2719 /* The watch descriptor was removed by the kernel, let's drop it here too */
2720 inode_data
->wd
= -1;
2722 inode_data
= hashmap_get(d
->wd
, INT_TO_PTR(d
->buffer
.ev
.wd
));
2724 event_inotify_data_drop(e
, d
, sz
);
2729 /* Trigger all event sources that are interested in these events. Also trigger all event
2730 * sources if IN_IGNORED or IN_UNMOUNT is set. */
2731 LIST_FOREACH(inotify
.by_inode_data
, s
, inode_data
->event_sources
) {
2733 if (s
->enabled
== SD_EVENT_OFF
)
2736 if ((d
->buffer
.ev
.mask
& (IN_IGNORED
|IN_UNMOUNT
)) == 0 &&
2737 (s
->inotify
.mask
& d
->buffer
.ev
.mask
& IN_ALL_EVENTS
) == 0)
2740 r
= source_set_pending(s
, true);
2746 /* Something pending now? If so, let's finish, otherwise let's read more. */
2747 if (d
->n_pending
> 0)
2754 static int process_inotify(sd_event
*e
) {
2755 struct inotify_data
*d
;
2760 LIST_FOREACH(buffered
, d
, e
->inotify_data_buffered
) {
2761 r
= event_inotify_data_process(e
, d
);
2771 static int source_dispatch(sd_event_source
*s
) {
2772 EventSourceType saved_type
;
2776 assert(s
->pending
|| s
->type
== SOURCE_EXIT
);
2778 /* Save the event source type, here, so that we still know it after the event callback which might invalidate
2780 saved_type
= s
->type
;
2782 if (!IN_SET(s
->type
, SOURCE_DEFER
, SOURCE_EXIT
)) {
2783 r
= source_set_pending(s
, false);
2788 if (s
->type
!= SOURCE_POST
) {
2792 /* If we execute a non-post source, let's mark all
2793 * post sources as pending */
2795 SET_FOREACH(z
, s
->event
->post_sources
, i
) {
2796 if (z
->enabled
== SD_EVENT_OFF
)
2799 r
= source_set_pending(z
, true);
2805 if (s
->enabled
== SD_EVENT_ONESHOT
) {
2806 r
= sd_event_source_set_enabled(s
, SD_EVENT_OFF
);
2811 s
->dispatching
= true;
2816 r
= s
->io
.callback(s
, s
->io
.fd
, s
->io
.revents
, s
->userdata
);
2819 case SOURCE_TIME_REALTIME
:
2820 case SOURCE_TIME_BOOTTIME
:
2821 case SOURCE_TIME_MONOTONIC
:
2822 case SOURCE_TIME_REALTIME_ALARM
:
2823 case SOURCE_TIME_BOOTTIME_ALARM
:
2824 r
= s
->time
.callback(s
, s
->time
.next
, s
->userdata
);
2828 r
= s
->signal
.callback(s
, &s
->signal
.siginfo
, s
->userdata
);
2831 case SOURCE_CHILD
: {
2834 zombie
= IN_SET(s
->child
.siginfo
.si_code
, CLD_EXITED
, CLD_KILLED
, CLD_DUMPED
);
2836 r
= s
->child
.callback(s
, &s
->child
.siginfo
, s
->userdata
);
2838 /* Now, reap the PID for good. */
2840 (void) waitid(P_PID
, s
->child
.pid
, &s
->child
.siginfo
, WNOHANG
|WEXITED
);
2846 r
= s
->defer
.callback(s
, s
->userdata
);
2850 r
= s
->post
.callback(s
, s
->userdata
);
2854 r
= s
->exit
.callback(s
, s
->userdata
);
2857 case SOURCE_INOTIFY
: {
2858 struct sd_event
*e
= s
->event
;
2859 struct inotify_data
*d
;
2862 assert(s
->inotify
.inode_data
);
2863 assert_se(d
= s
->inotify
.inode_data
->inotify_data
);
2865 assert(d
->buffer_filled
>= offsetof(struct inotify_event
, name
));
2866 sz
= offsetof(struct inotify_event
, name
) + d
->buffer
.ev
.len
;
2867 assert(d
->buffer_filled
>= sz
);
2869 r
= s
->inotify
.callback(s
, &d
->buffer
.ev
, s
->userdata
);
2871 /* When no event is pending anymore on this inotify object, then let's drop the event from the
2873 if (d
->n_pending
== 0)
2874 event_inotify_data_drop(e
, d
, sz
);
2879 case SOURCE_WATCHDOG
:
2880 case _SOURCE_EVENT_SOURCE_TYPE_MAX
:
2881 case _SOURCE_EVENT_SOURCE_TYPE_INVALID
:
2882 assert_not_reached("Wut? I shouldn't exist.");
2885 s
->dispatching
= false;
2888 log_debug_errno(r
, "Event source %s (type %s) returned error, disabling: %m",
2889 strna(s
->description
), event_source_type_to_string(saved_type
));
2894 sd_event_source_set_enabled(s
, SD_EVENT_OFF
);
2899 static int event_prepare(sd_event
*e
) {
2907 s
= prioq_peek(e
->prepare
);
2908 if (!s
|| s
->prepare_iteration
== e
->iteration
|| s
->enabled
== SD_EVENT_OFF
)
2911 s
->prepare_iteration
= e
->iteration
;
2912 r
= prioq_reshuffle(e
->prepare
, s
, &s
->prepare_index
);
2918 s
->dispatching
= true;
2919 r
= s
->prepare(s
, s
->userdata
);
2920 s
->dispatching
= false;
2923 log_debug_errno(r
, "Prepare callback of event source %s (type %s) returned error, disabling: %m",
2924 strna(s
->description
), event_source_type_to_string(s
->type
));
2929 sd_event_source_set_enabled(s
, SD_EVENT_OFF
);
2935 static int dispatch_exit(sd_event
*e
) {
2937 _cleanup_(sd_event_unrefp
) sd_event
*ref
= NULL
;
2942 p
= prioq_peek(e
->exit
);
2943 if (!p
|| p
->enabled
== SD_EVENT_OFF
) {
2944 e
->state
= SD_EVENT_FINISHED
;
2948 ref
= sd_event_ref(e
);
2950 e
->state
= SD_EVENT_EXITING
;
2951 r
= source_dispatch(p
);
2952 e
->state
= SD_EVENT_INITIAL
;
2956 static sd_event_source
* event_next_pending(sd_event
*e
) {
2961 p
= prioq_peek(e
->pending
);
2965 if (p
->enabled
== SD_EVENT_OFF
)
2971 static int arm_watchdog(sd_event
*e
) {
2972 struct itimerspec its
= {};
2977 assert(e
->watchdog_fd
>= 0);
2979 t
= sleep_between(e
,
2980 e
->watchdog_last
+ (e
->watchdog_period
/ 2),
2981 e
->watchdog_last
+ (e
->watchdog_period
* 3 / 4));
2983 timespec_store(&its
.it_value
, t
);
2985 /* Make sure we never set the watchdog to 0, which tells the
2986 * kernel to disable it. */
2987 if (its
.it_value
.tv_sec
== 0 && its
.it_value
.tv_nsec
== 0)
2988 its
.it_value
.tv_nsec
= 1;
2990 r
= timerfd_settime(e
->watchdog_fd
, TFD_TIMER_ABSTIME
, &its
, NULL
);
2997 static int process_watchdog(sd_event
*e
) {
3003 /* Don't notify watchdog too often */
3004 if (e
->watchdog_last
+ e
->watchdog_period
/ 4 > e
->timestamp
.monotonic
)
3007 sd_notify(false, "WATCHDOG=1");
3008 e
->watchdog_last
= e
->timestamp
.monotonic
;
3010 return arm_watchdog(e
);
3013 static void event_close_inode_data_fds(sd_event
*e
) {
3014 struct inode_data
*d
;
3018 /* Close the fds pointing to the inodes to watch now. We need to close them as they might otherwise pin
3019 * filesystems. But we can't close them right-away as we need them as long as the user still wants to make
3020 * adjustments to the even source, such as changing the priority (which requires us to remove and readd a watch
3021 * for the inode). Hence, let's close them when entering the first iteration after they were added, as a
3024 while ((d
= e
->inode_data_to_close
)) {
3026 d
->fd
= safe_close(d
->fd
);
3028 LIST_REMOVE(to_close
, e
->inode_data_to_close
, d
);
3032 _public_
int sd_event_prepare(sd_event
*e
) {
3035 assert_return(e
, -EINVAL
);
3036 assert_return(e
= event_resolve(e
), -ENOPKG
);
3037 assert_return(!event_pid_changed(e
), -ECHILD
);
3038 assert_return(e
->state
!= SD_EVENT_FINISHED
, -ESTALE
);
3039 assert_return(e
->state
== SD_EVENT_INITIAL
, -EBUSY
);
3041 if (e
->exit_requested
)
3046 e
->state
= SD_EVENT_PREPARING
;
3047 r
= event_prepare(e
);
3048 e
->state
= SD_EVENT_INITIAL
;
3052 r
= event_arm_timer(e
, &e
->realtime
);
3056 r
= event_arm_timer(e
, &e
->boottime
);
3060 r
= event_arm_timer(e
, &e
->monotonic
);
3064 r
= event_arm_timer(e
, &e
->realtime_alarm
);
3068 r
= event_arm_timer(e
, &e
->boottime_alarm
);
3072 event_close_inode_data_fds(e
);
3074 if (event_next_pending(e
) || e
->need_process_child
)
3077 e
->state
= SD_EVENT_ARMED
;
3082 e
->state
= SD_EVENT_ARMED
;
3083 r
= sd_event_wait(e
, 0);
3085 e
->state
= SD_EVENT_ARMED
;
3090 _public_
int sd_event_wait(sd_event
*e
, uint64_t timeout
) {
3091 struct epoll_event
*ev_queue
;
3092 unsigned ev_queue_max
;
3095 assert_return(e
, -EINVAL
);
3096 assert_return(e
= event_resolve(e
), -ENOPKG
);
3097 assert_return(!event_pid_changed(e
), -ECHILD
);
3098 assert_return(e
->state
!= SD_EVENT_FINISHED
, -ESTALE
);
3099 assert_return(e
->state
== SD_EVENT_ARMED
, -EBUSY
);
3101 if (e
->exit_requested
) {
3102 e
->state
= SD_EVENT_PENDING
;
3106 ev_queue_max
= MAX(e
->n_sources
, 1u);
3107 ev_queue
= newa(struct epoll_event
, ev_queue_max
);
3109 /* If we still have inotify data buffered, then query the other fds, but don't wait on it */
3110 if (e
->inotify_data_buffered
)
3113 m
= epoll_wait(e
->epoll_fd
, ev_queue
, ev_queue_max
,
3114 timeout
== (uint64_t) -1 ? -1 : (int) ((timeout
+ USEC_PER_MSEC
- 1) / USEC_PER_MSEC
));
3116 if (errno
== EINTR
) {
3117 e
->state
= SD_EVENT_PENDING
;
3125 triple_timestamp_get(&e
->timestamp
);
3127 for (i
= 0; i
< m
; i
++) {
3129 if (ev_queue
[i
].data
.ptr
== INT_TO_PTR(SOURCE_WATCHDOG
))
3130 r
= flush_timer(e
, e
->watchdog_fd
, ev_queue
[i
].events
, NULL
);
3132 WakeupType
*t
= ev_queue
[i
].data
.ptr
;
3136 case WAKEUP_EVENT_SOURCE
:
3137 r
= process_io(e
, ev_queue
[i
].data
.ptr
, ev_queue
[i
].events
);
3140 case WAKEUP_CLOCK_DATA
: {
3141 struct clock_data
*d
= ev_queue
[i
].data
.ptr
;
3142 r
= flush_timer(e
, d
->fd
, ev_queue
[i
].events
, &d
->next
);
3146 case WAKEUP_SIGNAL_DATA
:
3147 r
= process_signal(e
, ev_queue
[i
].data
.ptr
, ev_queue
[i
].events
);
3150 case WAKEUP_INOTIFY_DATA
:
3151 r
= event_inotify_data_read(e
, ev_queue
[i
].data
.ptr
, ev_queue
[i
].events
);
3155 assert_not_reached("Invalid wake-up pointer");
3162 r
= process_watchdog(e
);
3166 r
= process_timer(e
, e
->timestamp
.realtime
, &e
->realtime
);
3170 r
= process_timer(e
, e
->timestamp
.boottime
, &e
->boottime
);
3174 r
= process_timer(e
, e
->timestamp
.monotonic
, &e
->monotonic
);
3178 r
= process_timer(e
, e
->timestamp
.realtime
, &e
->realtime_alarm
);
3182 r
= process_timer(e
, e
->timestamp
.boottime
, &e
->boottime_alarm
);
3186 if (e
->need_process_child
) {
3187 r
= process_child(e
);
3192 r
= process_inotify(e
);
3196 if (event_next_pending(e
)) {
3197 e
->state
= SD_EVENT_PENDING
;
3205 e
->state
= SD_EVENT_INITIAL
;
3210 _public_
int sd_event_dispatch(sd_event
*e
) {
3214 assert_return(e
, -EINVAL
);
3215 assert_return(e
= event_resolve(e
), -ENOPKG
);
3216 assert_return(!event_pid_changed(e
), -ECHILD
);
3217 assert_return(e
->state
!= SD_EVENT_FINISHED
, -ESTALE
);
3218 assert_return(e
->state
== SD_EVENT_PENDING
, -EBUSY
);
3220 if (e
->exit_requested
)
3221 return dispatch_exit(e
);
3223 p
= event_next_pending(e
);
3225 _cleanup_(sd_event_unrefp
) sd_event
*ref
= NULL
;
3227 ref
= sd_event_ref(e
);
3228 e
->state
= SD_EVENT_RUNNING
;
3229 r
= source_dispatch(p
);
3230 e
->state
= SD_EVENT_INITIAL
;
3234 e
->state
= SD_EVENT_INITIAL
;
3239 static void event_log_delays(sd_event
*e
) {
3240 char b
[ELEMENTSOF(e
->delays
) * DECIMAL_STR_MAX(unsigned) + 1];
3244 for (i
= o
= 0; i
< ELEMENTSOF(e
->delays
); i
++) {
3245 o
+= snprintf(&b
[o
], sizeof(b
) - o
, "%u ", e
->delays
[i
]);
3248 log_debug("Event loop iterations: %.*s", o
, b
);
3251 _public_
int sd_event_run(sd_event
*e
, uint64_t timeout
) {
3254 assert_return(e
, -EINVAL
);
3255 assert_return(e
= event_resolve(e
), -ENOPKG
);
3256 assert_return(!event_pid_changed(e
), -ECHILD
);
3257 assert_return(e
->state
!= SD_EVENT_FINISHED
, -ESTALE
);
3258 assert_return(e
->state
== SD_EVENT_INITIAL
, -EBUSY
);
3260 if (e
->profile_delays
&& e
->last_run
) {
3264 this_run
= now(CLOCK_MONOTONIC
);
3266 l
= u64log2(this_run
- e
->last_run
);
3267 assert(l
< sizeof(e
->delays
));
3270 if (this_run
- e
->last_log
>= 5*USEC_PER_SEC
) {
3271 event_log_delays(e
);
3272 e
->last_log
= this_run
;
3276 r
= sd_event_prepare(e
);
3278 /* There was nothing? Then wait... */
3279 r
= sd_event_wait(e
, timeout
);
3281 if (e
->profile_delays
)
3282 e
->last_run
= now(CLOCK_MONOTONIC
);
3285 /* There's something now, then let's dispatch it */
3286 r
= sd_event_dispatch(e
);
3296 _public_
int sd_event_loop(sd_event
*e
) {
3297 _cleanup_(sd_event_unrefp
) sd_event
*ref
= NULL
;
3300 assert_return(e
, -EINVAL
);
3301 assert_return(e
= event_resolve(e
), -ENOPKG
);
3302 assert_return(!event_pid_changed(e
), -ECHILD
);
3303 assert_return(e
->state
== SD_EVENT_INITIAL
, -EBUSY
);
3305 ref
= sd_event_ref(e
);
3307 while (e
->state
!= SD_EVENT_FINISHED
) {
3308 r
= sd_event_run(e
, (uint64_t) -1);
3313 return e
->exit_code
;
3316 _public_
int sd_event_get_fd(sd_event
*e
) {
3318 assert_return(e
, -EINVAL
);
3319 assert_return(e
= event_resolve(e
), -ENOPKG
);
3320 assert_return(!event_pid_changed(e
), -ECHILD
);
3325 _public_
int sd_event_get_state(sd_event
*e
) {
3326 assert_return(e
, -EINVAL
);
3327 assert_return(e
= event_resolve(e
), -ENOPKG
);
3328 assert_return(!event_pid_changed(e
), -ECHILD
);
3333 _public_
int sd_event_get_exit_code(sd_event
*e
, int *code
) {
3334 assert_return(e
, -EINVAL
);
3335 assert_return(e
= event_resolve(e
), -ENOPKG
);
3336 assert_return(code
, -EINVAL
);
3337 assert_return(!event_pid_changed(e
), -ECHILD
);
3339 if (!e
->exit_requested
)
3342 *code
= e
->exit_code
;
3346 _public_
int sd_event_exit(sd_event
*e
, int code
) {
3347 assert_return(e
, -EINVAL
);
3348 assert_return(e
= event_resolve(e
), -ENOPKG
);
3349 assert_return(e
->state
!= SD_EVENT_FINISHED
, -ESTALE
);
3350 assert_return(!event_pid_changed(e
), -ECHILD
);
3352 e
->exit_requested
= true;
3353 e
->exit_code
= code
;
3358 _public_
int sd_event_now(sd_event
*e
, clockid_t clock
, uint64_t *usec
) {
3359 assert_return(e
, -EINVAL
);
3360 assert_return(e
= event_resolve(e
), -ENOPKG
);
3361 assert_return(usec
, -EINVAL
);
3362 assert_return(!event_pid_changed(e
), -ECHILD
);
3364 if (!TRIPLE_TIMESTAMP_HAS_CLOCK(clock
))
3367 /* Generate a clean error in case CLOCK_BOOTTIME is not available. Note that don't use clock_supported() here,
3368 * for a reason: there are systems where CLOCK_BOOTTIME is supported, but CLOCK_BOOTTIME_ALARM is not, but for
3369 * the purpose of getting the time this doesn't matter. */
3370 if (IN_SET(clock
, CLOCK_BOOTTIME
, CLOCK_BOOTTIME_ALARM
) && !clock_boottime_supported())
3373 if (!triple_timestamp_is_set(&e
->timestamp
)) {
3374 /* Implicitly fall back to now() if we never ran
3375 * before and thus have no cached time. */
3380 *usec
= triple_timestamp_by_clock(&e
->timestamp
, clock
);
3384 _public_
int sd_event_default(sd_event
**ret
) {
3389 return !!default_event
;
3391 if (default_event
) {
3392 *ret
= sd_event_ref(default_event
);
3396 r
= sd_event_new(&e
);
3400 e
->default_event_ptr
= &default_event
;
3408 _public_
int sd_event_get_tid(sd_event
*e
, pid_t
*tid
) {
3409 assert_return(e
, -EINVAL
);
3410 assert_return(e
= event_resolve(e
), -ENOPKG
);
3411 assert_return(tid
, -EINVAL
);
3412 assert_return(!event_pid_changed(e
), -ECHILD
);
3422 _public_
int sd_event_set_watchdog(sd_event
*e
, int b
) {
3425 assert_return(e
, -EINVAL
);
3426 assert_return(e
= event_resolve(e
), -ENOPKG
);
3427 assert_return(!event_pid_changed(e
), -ECHILD
);
3429 if (e
->watchdog
== !!b
)
3433 struct epoll_event ev
;
3435 r
= sd_watchdog_enabled(false, &e
->watchdog_period
);
3439 /* Issue first ping immediately */
3440 sd_notify(false, "WATCHDOG=1");
3441 e
->watchdog_last
= now(CLOCK_MONOTONIC
);
3443 e
->watchdog_fd
= timerfd_create(CLOCK_MONOTONIC
, TFD_NONBLOCK
|TFD_CLOEXEC
);
3444 if (e
->watchdog_fd
< 0)
3447 r
= arm_watchdog(e
);
3451 ev
= (struct epoll_event
) {
3453 .data
.ptr
= INT_TO_PTR(SOURCE_WATCHDOG
),
3456 r
= epoll_ctl(e
->epoll_fd
, EPOLL_CTL_ADD
, e
->watchdog_fd
, &ev
);
3463 if (e
->watchdog_fd
>= 0) {
3464 epoll_ctl(e
->epoll_fd
, EPOLL_CTL_DEL
, e
->watchdog_fd
, NULL
);
3465 e
->watchdog_fd
= safe_close(e
->watchdog_fd
);
3473 e
->watchdog_fd
= safe_close(e
->watchdog_fd
);
3477 _public_
int sd_event_get_watchdog(sd_event
*e
) {
3478 assert_return(e
, -EINVAL
);
3479 assert_return(e
= event_resolve(e
), -ENOPKG
);
3480 assert_return(!event_pid_changed(e
), -ECHILD
);
3485 _public_
int sd_event_get_iteration(sd_event
*e
, uint64_t *ret
) {
3486 assert_return(e
, -EINVAL
);
3487 assert_return(e
= event_resolve(e
), -ENOPKG
);
3488 assert_return(!event_pid_changed(e
), -ECHILD
);
3490 *ret
= e
->iteration
;
3494 _public_
int sd_event_source_set_destroy_callback(sd_event_source
*s
, sd_event_destroy_t callback
) {
3495 assert_return(s
, -EINVAL
);
3497 s
->destroy_callback
= callback
;
3501 _public_
int sd_event_source_get_destroy_callback(sd_event_source
*s
, sd_event_destroy_t
*ret
) {
3502 assert_return(s
, -EINVAL
);
3505 *ret
= s
->destroy_callback
;
3507 return !!s
->destroy_callback
;
3510 _public_
int sd_event_source_get_floating(sd_event_source
*s
) {
3511 assert_return(s
, -EINVAL
);
3516 _public_
int sd_event_source_set_floating(sd_event_source
*s
, int b
) {
3517 assert_return(s
, -EINVAL
);
3519 if (s
->floating
== !!b
)
3522 if (!s
->event
) /* Already disconnected */
3528 sd_event_source_ref(s
);
3529 sd_event_unref(s
->event
);
3531 sd_event_ref(s
->event
);
3532 sd_event_source_unref(s
);