2 /* SPDX-License-Identifier: LGPL-2.1+ */
5 #include <sys/timerfd.h>
15 typedef enum EventSourceType
{
19 SOURCE_TIME_MONOTONIC
,
20 SOURCE_TIME_REALTIME_ALARM
,
21 SOURCE_TIME_BOOTTIME_ALARM
,
29 _SOURCE_EVENT_SOURCE_TYPE_MAX
,
30 _SOURCE_EVENT_SOURCE_TYPE_INVALID
= -1
33 /* All objects we use in epoll events start with this value, so that
34 * we know how to dispatch it */
35 typedef enum WakeupType
{
37 WAKEUP_EVENT_SOURCE
, /* either I/O or pidfd wakeup */
42 _WAKEUP_TYPE_INVALID
= -1,
47 struct sd_event_source
{
54 sd_event_handler_t prepare
;
58 EventSourceType type
:5;
63 bool exit_on_failure
:1;
66 unsigned pending_index
;
67 unsigned prepare_index
;
68 uint64_t pending_iteration
;
69 uint64_t prepare_iteration
;
71 sd_event_destroy_t destroy_callback
;
73 LIST_FIELDS(sd_event_source
, sources
);
77 sd_event_io_handler_t callback
;
85 sd_event_time_handler_t callback
;
86 usec_t next
, accuracy
;
87 unsigned earliest_index
;
88 unsigned latest_index
;
91 sd_event_signal_handler_t callback
;
92 struct signalfd_siginfo siginfo
;
96 sd_event_child_handler_t callback
;
101 bool registered
:1; /* whether the pidfd is registered in the epoll */
102 bool pidfd_owned
:1; /* close pidfd when event source is freed */
103 bool process_owned
:1; /* kill+reap process when event source is freed */
104 bool exited
:1; /* true if process exited (i.e. if there's value in SIGKILLing it if we want to get rid of it) */
105 bool waited
:1; /* true if process was waited for (i.e. if there's value in waitid(P_PID)'ing it if we want to get rid of it) */
108 sd_event_handler_t callback
;
111 sd_event_handler_t callback
;
114 sd_event_handler_t callback
;
115 unsigned prioq_index
;
118 sd_event_inotify_handler_t callback
;
120 struct inode_data
*inode_data
;
121 LIST_FIELDS(sd_event_source
, by_inode_data
);
130 /* For all clocks we maintain two priority queues each, one
131 * ordered for the earliest times the events may be
132 * dispatched, and one ordered by the latest times they must
133 * have been dispatched. The range between the top entries in
134 * the two prioqs is the time window we can freely schedule
147 /* For each priority we maintain one signal fd, so that we
148 * only have to dequeue a single event per priority at a
154 sd_event_source
*current
;
157 /* A structure listing all event sources currently watching a specific inode */
159 /* The identifier for the inode, the combination of the .st_dev + .st_ino fields of the file */
163 /* An fd of the inode to watch. The fd is kept open until the next iteration of the loop, so that we can
164 * rearrange the priority still until then, as we need the original inode to change the priority as we need to
165 * add a watch descriptor to the right inotify for the priority which we can only do if we have a handle to the
166 * original inode. We keep a list of all inode_data objects with an open fd in the to_close list (see below) of
167 * the sd-event object, so that it is efficient to close everything, before entering the next event loop
171 /* The inotify "watch descriptor" */
174 /* The combination of the mask of all inotify watches on this inode we manage. This is also the mask that has
175 * most recently been set on the watch descriptor. */
176 uint32_t combined_mask
;
178 /* All event sources subscribed to this inode */
179 LIST_HEAD(sd_event_source
, event_sources
);
181 /* The inotify object we watch this inode with */
182 struct inotify_data
*inotify_data
;
184 /* A linked list of all inode data objects with fds to close (see above) */
185 LIST_FIELDS(struct inode_data
, to_close
);
188 /* A structure encapsulating an inotify fd */
189 struct inotify_data
{
192 /* For each priority we maintain one inotify fd, so that we only have to dequeue a single event per priority at
198 Hashmap
*inodes
; /* The inode_data structures keyed by dev+ino */
199 Hashmap
*wd
; /* The inode_data structures keyed by the watch descriptor for each */
201 /* The buffer we read inotify events into */
202 union inotify_event_buffer buffer
;
203 size_t buffer_filled
; /* fill level of the buffer */
205 /* How many event sources are currently marked pending for this inotify. We won't read new events off the
206 * inotify fd as long as there are still pending events on the inotify (because we have no strategy of queuing
207 * the events locally if they can't be coalesced). */
210 /* A linked list of all inotify objects with data already read, that still need processing. We keep this list
211 * to make it efficient to figure out what inotify objects to process data on next. */
212 LIST_FIELDS(struct inotify_data
, buffered
);