]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/libsystemd/sd-event/event-source.h
license: LGPL-2.1+ -> LGPL-2.1-or-later
[thirdparty/systemd.git] / src / libsystemd / sd-event / event-source.h
1 #pragma once
2 /* SPDX-License-Identifier: LGPL-2.1-or-later */
3
4 #include <sys/epoll.h>
5 #include <sys/timerfd.h>
6 #include <sys/wait.h>
7
8 #include "sd-event.h"
9
10 #include "fs-util.h"
11 #include "hashmap.h"
12 #include "list.h"
13 #include "prioq.h"
14
15 typedef enum EventSourceType {
16 SOURCE_IO,
17 SOURCE_TIME_REALTIME,
18 SOURCE_TIME_BOOTTIME,
19 SOURCE_TIME_MONOTONIC,
20 SOURCE_TIME_REALTIME_ALARM,
21 SOURCE_TIME_BOOTTIME_ALARM,
22 SOURCE_SIGNAL,
23 SOURCE_CHILD,
24 SOURCE_DEFER,
25 SOURCE_POST,
26 SOURCE_EXIT,
27 SOURCE_WATCHDOG,
28 SOURCE_INOTIFY,
29 _SOURCE_EVENT_SOURCE_TYPE_MAX,
30 _SOURCE_EVENT_SOURCE_TYPE_INVALID = -1
31 } EventSourceType;
32
33 /* All objects we use in epoll events start with this value, so that
34 * we know how to dispatch it */
35 typedef enum WakeupType {
36 WAKEUP_NONE,
37 WAKEUP_EVENT_SOURCE, /* either I/O or pidfd wakeup */
38 WAKEUP_CLOCK_DATA,
39 WAKEUP_SIGNAL_DATA,
40 WAKEUP_INOTIFY_DATA,
41 _WAKEUP_TYPE_MAX,
42 _WAKEUP_TYPE_INVALID = -1,
43 } WakeupType;
44
45 struct inode_data;
46
47 struct sd_event_source {
48 WakeupType wakeup;
49
50 unsigned n_ref;
51
52 sd_event *event;
53 void *userdata;
54 sd_event_handler_t prepare;
55
56 char *description;
57
58 EventSourceType type:5;
59 signed int enabled:3;
60 bool pending:1;
61 bool dispatching:1;
62 bool floating:1;
63 bool exit_on_failure:1;
64
65 int64_t priority;
66 unsigned pending_index;
67 unsigned prepare_index;
68 uint64_t pending_iteration;
69 uint64_t prepare_iteration;
70
71 sd_event_destroy_t destroy_callback;
72
73 LIST_FIELDS(sd_event_source, sources);
74
75 union {
76 struct {
77 sd_event_io_handler_t callback;
78 int fd;
79 uint32_t events;
80 uint32_t revents;
81 bool registered:1;
82 bool owned:1;
83 } io;
84 struct {
85 sd_event_time_handler_t callback;
86 usec_t next, accuracy;
87 unsigned earliest_index;
88 unsigned latest_index;
89 } time;
90 struct {
91 sd_event_signal_handler_t callback;
92 struct signalfd_siginfo siginfo;
93 int sig;
94 } signal;
95 struct {
96 sd_event_child_handler_t callback;
97 siginfo_t siginfo;
98 pid_t pid;
99 int options;
100 int pidfd;
101 bool registered:1; /* whether the pidfd is registered in the epoll */
102 bool pidfd_owned:1; /* close pidfd when event source is freed */
103 bool process_owned:1; /* kill+reap process when event source is freed */
104 bool exited:1; /* true if process exited (i.e. if there's value in SIGKILLing it if we want to get rid of it) */
105 bool waited:1; /* true if process was waited for (i.e. if there's value in waitid(P_PID)'ing it if we want to get rid of it) */
106 } child;
107 struct {
108 sd_event_handler_t callback;
109 } defer;
110 struct {
111 sd_event_handler_t callback;
112 } post;
113 struct {
114 sd_event_handler_t callback;
115 unsigned prioq_index;
116 } exit;
117 struct {
118 sd_event_inotify_handler_t callback;
119 uint32_t mask;
120 struct inode_data *inode_data;
121 LIST_FIELDS(sd_event_source, by_inode_data);
122 } inotify;
123 };
124 };
125
126 struct clock_data {
127 WakeupType wakeup;
128 int fd;
129
130 /* For all clocks we maintain two priority queues each, one
131 * ordered for the earliest times the events may be
132 * dispatched, and one ordered by the latest times they must
133 * have been dispatched. The range between the top entries in
134 * the two prioqs is the time window we can freely schedule
135 * wakeups in */
136
137 Prioq *earliest;
138 Prioq *latest;
139 usec_t next;
140
141 bool needs_rearm:1;
142 };
143
144 struct signal_data {
145 WakeupType wakeup;
146
147 /* For each priority we maintain one signal fd, so that we
148 * only have to dequeue a single event per priority at a
149 * time. */
150
151 int fd;
152 int64_t priority;
153 sigset_t sigset;
154 sd_event_source *current;
155 };
156
157 /* A structure listing all event sources currently watching a specific inode */
158 struct inode_data {
159 /* The identifier for the inode, the combination of the .st_dev + .st_ino fields of the file */
160 ino_t ino;
161 dev_t dev;
162
163 /* An fd of the inode to watch. The fd is kept open until the next iteration of the loop, so that we can
164 * rearrange the priority still until then, as we need the original inode to change the priority as we need to
165 * add a watch descriptor to the right inotify for the priority which we can only do if we have a handle to the
166 * original inode. We keep a list of all inode_data objects with an open fd in the to_close list (see below) of
167 * the sd-event object, so that it is efficient to close everything, before entering the next event loop
168 * iteration. */
169 int fd;
170
171 /* The inotify "watch descriptor" */
172 int wd;
173
174 /* The combination of the mask of all inotify watches on this inode we manage. This is also the mask that has
175 * most recently been set on the watch descriptor. */
176 uint32_t combined_mask;
177
178 /* All event sources subscribed to this inode */
179 LIST_HEAD(sd_event_source, event_sources);
180
181 /* The inotify object we watch this inode with */
182 struct inotify_data *inotify_data;
183
184 /* A linked list of all inode data objects with fds to close (see above) */
185 LIST_FIELDS(struct inode_data, to_close);
186 };
187
188 /* A structure encapsulating an inotify fd */
189 struct inotify_data {
190 WakeupType wakeup;
191
192 /* For each priority we maintain one inotify fd, so that we only have to dequeue a single event per priority at
193 * a time */
194
195 int fd;
196 int64_t priority;
197
198 Hashmap *inodes; /* The inode_data structures keyed by dev+ino */
199 Hashmap *wd; /* The inode_data structures keyed by the watch descriptor for each */
200
201 /* The buffer we read inotify events into */
202 union inotify_event_buffer buffer;
203 size_t buffer_filled; /* fill level of the buffer */
204
205 /* How many event sources are currently marked pending for this inotify. We won't read new events off the
206 * inotify fd as long as there are still pending events on the inotify (because we have no strategy of queuing
207 * the events locally if they can't be coalesced). */
208 unsigned n_pending;
209
210 /* A linked list of all inotify objects with data already read, that still need processing. We keep this list
211 * to make it efficient to figure out what inotify objects to process data on next. */
212 LIST_FIELDS(struct inotify_data, buffered);
213 };