1 /* SPDX-License-Identifier: LGPL-2.1-or-later */
8 #include "alloc-util.h"
14 #include "missing_syscall.h"
15 #include "parse-util.h"
16 #include "path-util.h"
17 #include "process-util.h"
18 #include "random-util.h"
20 #include "signal-util.h"
21 #include "stdio-util.h"
22 #include "string-util.h"
24 #include "tmpfile-util.h"
26 static int prepare_handler(sd_event_source
*s
, void *userdata
) {
27 log_info("preparing %c", PTR_TO_INT(userdata
));
31 static bool got_a
, got_b
, got_c
, got_unref
;
32 static unsigned got_d
;
34 static int unref_handler(sd_event_source
*s
, int fd
, uint32_t revents
, void *userdata
) {
35 sd_event_source_unref(s
);
40 static int io_handler(sd_event_source
*s
, int fd
, uint32_t revents
, void *userdata
) {
42 log_info("got IO on %c", PTR_TO_INT(userdata
));
44 if (userdata
== INT_TO_PTR('a')) {
45 assert_se(sd_event_source_set_enabled(s
, SD_EVENT_OFF
) >= 0);
48 } else if (userdata
== INT_TO_PTR('b')) {
51 } else if (userdata
== INT_TO_PTR('d')) {
54 assert_se(sd_event_source_set_enabled(s
, SD_EVENT_ONESHOT
) >= 0);
56 assert_se(sd_event_source_set_enabled(s
, SD_EVENT_OFF
) >= 0);
63 static int child_handler(sd_event_source
*s
, const siginfo_t
*si
, void *userdata
) {
68 assert_se(si
->si_uid
== getuid());
69 assert_se(si
->si_signo
== SIGCHLD
);
70 assert_se(si
->si_code
== CLD_EXITED
);
71 assert_se(si
->si_status
== 78);
73 log_info("got child on %c", PTR_TO_INT(userdata
));
75 assert_se(userdata
== INT_TO_PTR('f'));
77 assert_se(sd_event_exit(sd_event_source_get_event(s
), 0) >= 0);
78 sd_event_source_unref(s
);
83 static int signal_handler(sd_event_source
*s
, const struct signalfd_siginfo
*si
, void *userdata
) {
84 sd_event_source
*p
= NULL
;
91 log_info("got signal on %c", PTR_TO_INT(userdata
));
93 assert_se(userdata
== INT_TO_PTR('e'));
95 assert_se(sigprocmask_many(SIG_BLOCK
, NULL
, SIGCHLD
, SIGUSR2
, -1) >= 0);
103 assert_se(sigemptyset(&ss
) >= 0);
104 assert_se(sigaddset(&ss
, SIGUSR2
) >= 0);
107 assert_se(sigwaitinfo(&ss
, &plain_si
) >= 0);
109 assert_se(plain_si
.si_signo
== SIGUSR2
);
110 assert_se(plain_si
.si_value
.sival_int
== 4711);
115 assert_se(sd_event_add_child(sd_event_source_get_event(s
), &p
, pid
, WEXITED
, child_handler
, INT_TO_PTR('f')) >= 0);
116 assert_se(sd_event_source_set_enabled(p
, SD_EVENT_ONESHOT
) >= 0);
117 assert_se(sd_event_source_set_child_process_own(p
, true) >= 0);
119 /* We can't use structured initialization here, since the structure contains various unions and these
120 * fields lie in overlapping (carefully aligned) unions that LLVM is allergic to allow assignments
123 plain_si
.si_signo
= SIGUSR2
;
124 plain_si
.si_code
= SI_QUEUE
;
125 plain_si
.si_pid
= getpid_cached();
126 plain_si
.si_uid
= getuid();
127 plain_si
.si_value
.sival_int
= 4711;
129 assert_se(sd_event_source_send_child_signal(p
, SIGUSR2
, &plain_si
, 0) >= 0);
131 sd_event_source_unref(s
);
136 static int defer_handler(sd_event_source
*s
, void *userdata
) {
137 sd_event_source
*p
= NULL
;
141 log_info("got defer on %c", PTR_TO_INT(userdata
));
143 assert_se(userdata
== INT_TO_PTR('d'));
145 assert_se(sigprocmask_many(SIG_BLOCK
, NULL
, SIGUSR1
, -1) >= 0);
147 assert_se(sd_event_add_signal(sd_event_source_get_event(s
), &p
, SIGUSR1
, signal_handler
, INT_TO_PTR('e')) >= 0);
148 assert_se(sd_event_source_set_enabled(p
, SD_EVENT_ONESHOT
) >= 0);
151 sd_event_source_unref(s
);
158 static int time_handler(sd_event_source
*s
, uint64_t usec
, void *userdata
) {
159 log_info("got timer on %c", PTR_TO_INT(userdata
));
161 if (userdata
== INT_TO_PTR('c')) {
166 assert_se(sd_event_add_defer(sd_event_source_get_event(s
), &p
, defer_handler
, INT_TO_PTR('d')) >= 0);
167 assert_se(sd_event_source_set_enabled(p
, SD_EVENT_ONESHOT
) >= 0);
173 assert_not_reached();
178 static bool got_exit
= false;
180 static int exit_handler(sd_event_source
*s
, void *userdata
) {
181 log_info("got quit handler on %c", PTR_TO_INT(userdata
));
188 static bool got_post
= false;
190 static int post_handler(sd_event_source
*s
, void *userdata
) {
191 log_info("got post handler");
198 static void test_basic_one(bool with_pidfd
) {
200 sd_event_source
*w
= NULL
, *x
= NULL
, *y
= NULL
, *z
= NULL
, *q
= NULL
, *t
= NULL
;
201 static const char ch
= 'x';
202 int a
[2] = EBADF_PAIR
, b
[2] = EBADF_PAIR
,
203 d
[2] = EBADF_PAIR
, k
[2] = EBADF_PAIR
;
207 log_info("/* %s(pidfd=%s) */", __func__
, yes_no(with_pidfd
));
209 assert_se(setenv("SYSTEMD_PIDFD", yes_no(with_pidfd
), 1) >= 0);
211 assert_se(pipe(a
) >= 0);
212 assert_se(pipe(b
) >= 0);
213 assert_se(pipe(d
) >= 0);
214 assert_se(pipe(k
) >= 0);
216 assert_se(sd_event_default(&e
) >= 0);
217 assert_se(sd_event_now(e
, CLOCK_MONOTONIC
, &event_now
) > 0);
219 assert_se(sd_event_set_watchdog(e
, true) >= 0);
221 /* Test whether we cleanly can destroy an io event source from its own handler */
223 assert_se(sd_event_add_io(e
, &t
, k
[0], EPOLLIN
, unref_handler
, NULL
) >= 0);
224 assert_se(write(k
[1], &ch
, 1) == 1);
225 assert_se(sd_event_run(e
, UINT64_MAX
) >= 1);
226 assert_se(got_unref
);
228 got_a
= false, got_b
= false, got_c
= false, got_d
= 0;
230 /* Add a oneshot handler, trigger it, reenable it, and trigger it again. */
231 assert_se(sd_event_add_io(e
, &w
, d
[0], EPOLLIN
, io_handler
, INT_TO_PTR('d')) >= 0);
232 assert_se(sd_event_source_set_enabled(w
, SD_EVENT_ONESHOT
) >= 0);
233 assert_se(write(d
[1], &ch
, 1) >= 0);
234 assert_se(sd_event_run(e
, UINT64_MAX
) >= 1);
235 assert_se(got_d
== 1);
236 assert_se(write(d
[1], &ch
, 1) >= 0);
237 assert_se(sd_event_run(e
, UINT64_MAX
) >= 1);
238 assert_se(got_d
== 2);
240 assert_se(sd_event_add_io(e
, &x
, a
[0], EPOLLIN
, io_handler
, INT_TO_PTR('a')) >= 0);
241 assert_se(sd_event_add_io(e
, &y
, b
[0], EPOLLIN
, io_handler
, INT_TO_PTR('b')) >= 0);
244 assert_se(sd_event_add_time(e
, &z
, CLOCK_MONOTONIC
, 0, 0, time_handler
, INT_TO_PTR('c')) >= 0);
245 assert_se(sd_event_add_exit(e
, &q
, exit_handler
, INT_TO_PTR('g')) >= 0);
247 assert_se(sd_event_source_set_priority(x
, 99) >= 0);
248 assert_se(sd_event_source_get_priority(x
, &priority
) >= 0);
249 assert_se(priority
== 99);
250 assert_se(sd_event_source_set_enabled(y
, SD_EVENT_ONESHOT
) >= 0);
251 assert_se(sd_event_source_set_prepare(x
, prepare_handler
) >= 0);
252 assert_se(sd_event_source_set_priority(z
, 50) >= 0);
253 assert_se(sd_event_source_set_enabled(z
, SD_EVENT_ONESHOT
) >= 0);
254 assert_se(sd_event_source_set_prepare(z
, prepare_handler
) >= 0);
256 /* Test for floating event sources */
257 assert_se(sigprocmask_many(SIG_BLOCK
, NULL
, SIGRTMIN
+1, -1) >= 0);
258 assert_se(sd_event_add_signal(e
, NULL
, SIGRTMIN
+1, NULL
, NULL
) >= 0);
260 assert_se(write(a
[1], &ch
, 1) >= 0);
261 assert_se(write(b
[1], &ch
, 1) >= 0);
263 assert_se(!got_a
&& !got_b
&& !got_c
);
265 assert_se(sd_event_run(e
, UINT64_MAX
) >= 1);
267 assert_se(!got_a
&& got_b
&& !got_c
);
269 assert_se(sd_event_run(e
, UINT64_MAX
) >= 1);
271 assert_se(!got_a
&& got_b
&& got_c
);
273 assert_se(sd_event_run(e
, UINT64_MAX
) >= 1);
275 assert_se(got_a
&& got_b
&& got_c
);
277 sd_event_source_unref(x
);
278 sd_event_source_unref(y
);
281 assert_se(sd_event_add_post(e
, NULL
, post_handler
, NULL
) >= 0);
282 assert_se(sd_event_now(e
, CLOCK_MONOTONIC
, &event_now
) == 0);
283 assert_se(sd_event_source_set_time(z
, event_now
+ 200 * USEC_PER_MSEC
) >= 0);
284 assert_se(sd_event_source_set_enabled(z
, SD_EVENT_ONESHOT
) >= 0);
286 assert_se(sd_event_loop(e
) >= 0);
290 sd_event_source_unref(z
);
291 sd_event_source_unref(q
);
293 sd_event_source_unref(w
);
302 assert_se(unsetenv("SYSTEMD_PIDFD") >= 0);
306 test_basic_one(true); /* test with pidfd */
307 test_basic_one(false); /* test without pidfd */
311 _cleanup_(sd_event_unrefp
) sd_event
*e
= NULL
;
314 assert_se(sd_event_new(&e
) >= 0);
315 assert_se(sd_event_now(e
, CLOCK_MONOTONIC
, &event_now
) > 0);
316 assert_se(sd_event_now(e
, CLOCK_REALTIME
, &event_now
) > 0);
317 assert_se(sd_event_now(e
, CLOCK_REALTIME_ALARM
, &event_now
) > 0);
318 assert_se(sd_event_now(e
, CLOCK_BOOTTIME
, &event_now
) > 0);
319 assert_se(sd_event_now(e
, CLOCK_BOOTTIME_ALARM
, &event_now
) > 0);
320 assert_se(sd_event_now(e
, -1, &event_now
) == -EOPNOTSUPP
);
321 assert_se(sd_event_now(e
, 900 /* arbitrary big number */, &event_now
) == -EOPNOTSUPP
);
323 assert_se(sd_event_run(e
, 0) == 0);
325 assert_se(sd_event_now(e
, CLOCK_MONOTONIC
, &event_now
) == 0);
326 assert_se(sd_event_now(e
, CLOCK_REALTIME
, &event_now
) == 0);
327 assert_se(sd_event_now(e
, CLOCK_REALTIME_ALARM
, &event_now
) == 0);
328 assert_se(sd_event_now(e
, CLOCK_BOOTTIME
, &event_now
) == 0);
329 assert_se(sd_event_now(e
, CLOCK_BOOTTIME_ALARM
, &event_now
) == 0);
330 assert_se(sd_event_now(e
, -1, &event_now
) == -EOPNOTSUPP
);
331 assert_se(sd_event_now(e
, 900 /* arbitrary big number */, &event_now
) == -EOPNOTSUPP
);
334 static int last_rtqueue_sigval
= 0;
335 static int n_rtqueue
= 0;
337 static int rtqueue_handler(sd_event_source
*s
, const struct signalfd_siginfo
*si
, void *userdata
) {
338 last_rtqueue_sigval
= si
->ssi_int
;
344 sd_event_source
*u
= NULL
, *v
= NULL
, *s
= NULL
;
347 assert_se(sd_event_default(&e
) >= 0);
349 assert_se(sigprocmask_many(SIG_BLOCK
, NULL
, SIGRTMIN
+2, SIGRTMIN
+3, SIGUSR2
, -1) >= 0);
350 assert_se(sd_event_add_signal(e
, &u
, SIGRTMIN
+2, rtqueue_handler
, NULL
) >= 0);
351 assert_se(sd_event_add_signal(e
, &v
, SIGRTMIN
+3, rtqueue_handler
, NULL
) >= 0);
352 assert_se(sd_event_add_signal(e
, &s
, SIGUSR2
, rtqueue_handler
, NULL
) >= 0);
354 assert_se(sd_event_source_set_priority(v
, -10) >= 0);
356 assert_se(sigqueue(getpid_cached(), SIGRTMIN
+2, (union sigval
) { .sival_int
= 1 }) >= 0);
357 assert_se(sigqueue(getpid_cached(), SIGRTMIN
+3, (union sigval
) { .sival_int
= 2 }) >= 0);
358 assert_se(sigqueue(getpid_cached(), SIGUSR2
, (union sigval
) { .sival_int
= 3 }) >= 0);
359 assert_se(sigqueue(getpid_cached(), SIGRTMIN
+3, (union sigval
) { .sival_int
= 4 }) >= 0);
360 assert_se(sigqueue(getpid_cached(), SIGUSR2
, (union sigval
) { .sival_int
= 5 }) >= 0);
362 assert_se(n_rtqueue
== 0);
363 assert_se(last_rtqueue_sigval
== 0);
365 assert_se(sd_event_run(e
, UINT64_MAX
) >= 1);
366 assert_se(n_rtqueue
== 1);
367 assert_se(last_rtqueue_sigval
== 2); /* first SIGRTMIN+3 */
369 assert_se(sd_event_run(e
, UINT64_MAX
) >= 1);
370 assert_se(n_rtqueue
== 2);
371 assert_se(last_rtqueue_sigval
== 4); /* second SIGRTMIN+3 */
373 assert_se(sd_event_run(e
, UINT64_MAX
) >= 1);
374 assert_se(n_rtqueue
== 3);
375 assert_se(last_rtqueue_sigval
== 3); /* first SIGUSR2 */
377 assert_se(sd_event_run(e
, UINT64_MAX
) >= 1);
378 assert_se(n_rtqueue
== 4);
379 assert_se(last_rtqueue_sigval
== 1); /* SIGRTMIN+2 */
381 assert_se(sd_event_run(e
, 0) == 0); /* the other SIGUSR2 is dropped, because the first one was still queued */
382 assert_se(n_rtqueue
== 4);
383 assert_se(last_rtqueue_sigval
== 1);
385 sd_event_source_unref(u
);
386 sd_event_source_unref(v
);
387 sd_event_source_unref(s
);
392 #define CREATE_EVENTS_MAX (70000U)
394 struct inotify_context
{
395 bool delete_self_handler_called
;
396 unsigned create_called
[CREATE_EVENTS_MAX
];
397 unsigned create_overflow
;
398 unsigned n_create_events
;
401 static void maybe_exit(sd_event_source
*s
, struct inotify_context
*c
) {
407 if (!c
->delete_self_handler_called
)
410 for (n
= 0; n
< 3; n
++) {
413 if (c
->create_overflow
& (1U << n
))
416 for (i
= 0; i
< c
->n_create_events
; i
++)
417 if (!(c
->create_called
[i
] & (1U << n
)))
421 sd_event_exit(sd_event_source_get_event(s
), 0);
424 static int inotify_handler(sd_event_source
*s
, const struct inotify_event
*ev
, void *userdata
) {
425 struct inotify_context
*c
= userdata
;
426 const char *description
;
429 assert_se(sd_event_source_get_description(s
, &description
) >= 0);
430 assert_se(safe_atou(description
, &n
) >= 0);
435 if (ev
->mask
& IN_Q_OVERFLOW
) {
436 log_info("inotify-handler <%s>: overflow", description
);
437 c
->create_overflow
|= bit
;
438 } else if (ev
->mask
& IN_CREATE
) {
439 if (streq(ev
->name
, "sub"))
440 log_debug("inotify-handler <%s>: create on %s", description
, ev
->name
);
444 assert_se(safe_atou(ev
->name
, &i
) >= 0);
445 assert_se(i
< c
->n_create_events
);
446 c
->create_called
[i
] |= bit
;
448 } else if (ev
->mask
& IN_DELETE
) {
449 log_info("inotify-handler <%s>: delete of %s", description
, ev
->name
);
450 assert_se(streq(ev
->name
, "sub"));
452 assert_not_reached();
458 static int delete_self_handler(sd_event_source
*s
, const struct inotify_event
*ev
, void *userdata
) {
459 struct inotify_context
*c
= userdata
;
461 if (ev
->mask
& IN_Q_OVERFLOW
) {
462 log_info("delete-self-handler: overflow");
463 c
->delete_self_handler_called
= true;
464 } else if (ev
->mask
& IN_DELETE_SELF
) {
465 log_info("delete-self-handler: delete-self");
466 c
->delete_self_handler_called
= true;
467 } else if (ev
->mask
& IN_IGNORED
) {
468 log_info("delete-self-handler: ignore");
470 assert_not_reached();
476 static void test_inotify_one(unsigned n_create_events
) {
477 _cleanup_(rm_rf_physical_and_freep
) char *p
= NULL
;
478 sd_event_source
*a
= NULL
, *b
= NULL
, *c
= NULL
, *d
= NULL
;
479 struct inotify_context context
= {
480 .n_create_events
= n_create_events
,
486 log_info("/* %s(%u) */", __func__
, n_create_events
);
488 assert_se(sd_event_default(&e
) >= 0);
490 assert_se(mkdtemp_malloc("/tmp/test-inotify-XXXXXX", &p
) >= 0);
492 assert_se(sd_event_add_inotify(e
, &a
, p
, IN_CREATE
|IN_ONLYDIR
, inotify_handler
, &context
) >= 0);
493 assert_se(sd_event_add_inotify(e
, &b
, p
, IN_CREATE
|IN_DELETE
|IN_DONT_FOLLOW
, inotify_handler
, &context
) >= 0);
494 assert_se(sd_event_source_set_priority(b
, SD_EVENT_PRIORITY_IDLE
) >= 0);
495 assert_se(sd_event_source_set_priority(b
, SD_EVENT_PRIORITY_NORMAL
) >= 0);
496 assert_se(sd_event_add_inotify(e
, &c
, p
, IN_CREATE
|IN_DELETE
|IN_EXCL_UNLINK
, inotify_handler
, &context
) >= 0);
497 assert_se(sd_event_source_set_priority(c
, SD_EVENT_PRIORITY_IDLE
) >= 0);
499 assert_se(sd_event_source_set_description(a
, "0") >= 0);
500 assert_se(sd_event_source_set_description(b
, "1") >= 0);
501 assert_se(sd_event_source_set_description(c
, "2") >= 0);
503 q
= strjoina(p
, "/sub");
504 assert_se(touch(q
) >= 0);
505 assert_se(sd_event_add_inotify(e
, &d
, q
, IN_DELETE_SELF
, delete_self_handler
, &context
) >= 0);
507 for (i
= 0; i
< n_create_events
; i
++) {
508 char buf
[DECIMAL_STR_MAX(unsigned)+1];
509 _cleanup_free_
char *z
= NULL
;
511 xsprintf(buf
, "%u", i
);
512 assert_se(z
= path_join(p
, buf
));
514 assert_se(touch(z
) >= 0);
517 assert_se(unlink(q
) >= 0);
519 assert_se(sd_event_loop(e
) >= 0);
521 sd_event_source_unref(a
);
522 sd_event_source_unref(b
);
523 sd_event_source_unref(c
);
524 sd_event_source_unref(d
);
530 test_inotify_one(100); /* should work without overflow */
531 test_inotify_one(33000); /* should trigger a q overflow */
534 static int pidfd_handler(sd_event_source
*s
, const siginfo_t
*si
, void *userdata
) {
538 assert_se(si
->si_uid
== getuid());
539 assert_se(si
->si_signo
== SIGCHLD
);
540 assert_se(si
->si_code
== CLD_EXITED
);
541 assert_se(si
->si_status
== 66);
543 log_info("got pidfd on %c", PTR_TO_INT(userdata
));
545 assert_se(userdata
== INT_TO_PTR('p'));
547 assert_se(sd_event_exit(sd_event_source_get_event(s
), 0) >= 0);
548 sd_event_source_unref(s
);
554 sd_event_source
*s
= NULL
, *t
= NULL
;
559 assert_se(sigprocmask_many(SIG_BLOCK
, NULL
, SIGCHLD
, -1) >= 0);
568 pidfd
= pidfd_open(pid
, 0);
570 /* No pidfd_open() supported or blocked? */
571 assert_se(ERRNO_IS_NOT_SUPPORTED(errno
) || ERRNO_IS_PRIVILEGE(errno
));
572 (void) wait_for_terminate(pid
, NULL
);
582 assert_se(sd_event_default(&e
) >= 0);
583 assert_se(sd_event_add_child_pidfd(e
, &s
, pidfd
, WEXITED
, pidfd_handler
, INT_TO_PTR('p')) >= 0);
584 assert_se(sd_event_source_set_child_pidfd_own(s
, true) >= 0);
586 /* This one should never trigger, since our second child lives forever */
587 assert_se(sd_event_add_child(e
, &t
, pid2
, WEXITED
, pidfd_handler
, INT_TO_PTR('q')) >= 0);
588 assert_se(sd_event_source_set_child_process_own(t
, true) >= 0);
590 assert_se(sd_event_loop(e
) >= 0);
592 /* Child should still be alive */
593 assert_se(kill(pid2
, 0) >= 0);
595 t
= sd_event_source_unref(t
);
597 /* Child should now be dead, since we dropped the ref */
598 assert_se(kill(pid2
, 0) < 0 && errno
== ESRCH
);
603 static int ratelimit_io_handler(sd_event_source
*s
, int fd
, uint32_t revents
, void *userdata
) {
604 unsigned *c
= (unsigned*) userdata
;
609 static int ratelimit_time_handler(sd_event_source
*s
, uint64_t usec
, void *userdata
) {
612 r
= sd_event_source_set_enabled(s
, SD_EVENT_ON
);
614 log_warning_errno(r
, "Failed to turn on notify event source: %m");
616 r
= sd_event_source_set_time(s
, usec
+ 1000);
618 log_error_errno(r
, "Failed to restart watchdog event source: %m");
620 unsigned *c
= (unsigned*) userdata
;
626 static int expired
= -1;
627 static int ratelimit_expired(sd_event_source
*s
, void *userdata
) {
632 _cleanup_close_pair_
int p
[2] = EBADF_PAIR
;
633 _cleanup_(sd_event_unrefp
) sd_event
*e
= NULL
;
634 _cleanup_(sd_event_source_unrefp
) sd_event_source
*s
= NULL
;
636 unsigned count
, burst
;
638 assert_se(sd_event_default(&e
) >= 0);
639 assert_se(pipe2(p
, O_CLOEXEC
|O_NONBLOCK
) >= 0);
641 assert_se(sd_event_add_io(e
, &s
, p
[0], EPOLLIN
, ratelimit_io_handler
, &count
) >= 0);
642 assert_se(sd_event_source_set_description(s
, "test-ratelimit-io") >= 0);
643 assert_se(sd_event_source_set_ratelimit(s
, 1 * USEC_PER_SEC
, 5) >= 0);
644 assert_se(sd_event_source_get_ratelimit(s
, &interval
, &burst
) >= 0);
645 assert_se(interval
== 1 * USEC_PER_SEC
&& burst
== 5);
647 assert_se(write(p
[1], "1", 1) == 1);
650 for (unsigned i
= 0; i
< 10; i
++) {
651 log_debug("slow loop iteration %u", i
);
652 assert_se(sd_event_run(e
, UINT64_MAX
) >= 0);
653 assert_se(usleep_safe(250 * USEC_PER_MSEC
) >= 0);
656 assert_se(sd_event_source_is_ratelimited(s
) == 0);
657 assert_se(count
== 10);
658 log_info("ratelimit_io_handler: called %u times, event source not ratelimited", count
);
660 assert_se(sd_event_source_set_ratelimit(s
, 0, 0) >= 0);
661 assert_se(sd_event_source_set_ratelimit(s
, 1 * USEC_PER_SEC
, 5) >= 0);
664 for (unsigned i
= 0; i
< 10; i
++) {
665 log_debug("fast event loop iteration %u", i
);
666 assert_se(sd_event_run(e
, UINT64_MAX
) >= 0);
667 assert_se(usleep_safe(10) >= 0);
669 log_info("ratelimit_io_handler: called %u times, event source got ratelimited", count
);
670 assert_se(count
< 10);
672 s
= sd_event_source_unref(s
);
676 assert_se(sd_event_add_time_relative(e
, &s
, CLOCK_MONOTONIC
, 1000, 1, ratelimit_time_handler
, &count
) >= 0);
677 assert_se(sd_event_source_set_ratelimit(s
, 1 * USEC_PER_SEC
, 10) == 0);
680 assert_se(sd_event_run(e
, UINT64_MAX
) >= 0);
681 } while (!sd_event_source_is_ratelimited(s
));
683 log_info("ratelimit_time_handler: called %u times, event source got ratelimited", count
);
684 assert_se(count
== 10);
686 /* In order to get rid of active rate limit client needs to disable it explicitly */
687 assert_se(sd_event_source_set_ratelimit(s
, 0, 0) >= 0);
688 assert_se(!sd_event_source_is_ratelimited(s
));
690 assert_se(sd_event_source_set_ratelimit(s
, 1 * USEC_PER_SEC
, 10) >= 0);
692 /* Set callback that will be invoked when we leave rate limited state. */
693 assert_se(sd_event_source_set_ratelimit_expire_callback(s
, ratelimit_expired
) >= 0);
696 assert_se(sd_event_run(e
, UINT64_MAX
) >= 0);
697 } while (!sd_event_source_is_ratelimited(s
));
699 log_info("ratelimit_time_handler: called 10 more times, event source got ratelimited");
700 assert_se(count
== 20);
702 /* Dispatch the event loop once more and check that ratelimit expiration callback got called */
703 assert_se(sd_event_run(e
, UINT64_MAX
) >= 0);
704 assert_se(expired
== 0);
707 TEST(simple_timeout
) {
708 _cleanup_(sd_event_unrefp
) sd_event
*e
= NULL
;
709 usec_t f
, t
, some_time
;
711 some_time
= random_u64_range(2 * USEC_PER_SEC
);
713 assert_se(sd_event_default(&e
) >= 0);
715 assert_se(sd_event_prepare(e
) == 0);
717 f
= now(CLOCK_MONOTONIC
);
718 assert_se(sd_event_wait(e
, some_time
) >= 0);
719 t
= now(CLOCK_MONOTONIC
);
721 /* The event loop may sleep longer than the specified time (timer accuracy, scheduling latencies, …),
722 * but never shorter. Let's check that. */
723 assert_se(t
>= usec_add(f
, some_time
));
726 static int inotify_self_destroy_handler(sd_event_source
*s
, const struct inotify_event
*ev
, void *userdata
) {
727 sd_event_source
**p
= userdata
;
733 assert_se(FLAGS_SET(ev
->mask
, IN_ATTRIB
));
735 assert_se(sd_event_exit(sd_event_source_get_event(s
), 0) >= 0);
737 *p
= sd_event_source_unref(*p
); /* here's what we actually intend to test: we destroy the event
738 * source from inside the event source handler */
742 TEST(inotify_self_destroy
) {
743 _cleanup_(sd_event_source_unrefp
) sd_event_source
*s
= NULL
;
744 _cleanup_(sd_event_unrefp
) sd_event
*e
= NULL
;
745 char path
[] = "/tmp/inotifyXXXXXX";
746 _cleanup_close_
int fd
= -EBADF
;
748 /* Tests that destroying an inotify event source from its own handler is safe */
750 assert_se(sd_event_default(&e
) >= 0);
752 fd
= mkostemp_safe(path
);
754 assert_se(sd_event_add_inotify_fd(e
, &s
, fd
, IN_ATTRIB
, inotify_self_destroy_handler
, &s
) >= 0);
756 assert_se(unlink(path
) >= 0); /* This will trigger IN_ATTRIB because link count goes to zero */
757 assert_se(sd_event_loop(e
) >= 0);
760 struct inotify_process_buffered_data_context
{
765 static int inotify_process_buffered_data_handler(sd_event_source
*s
, const struct inotify_event
*ev
, void *userdata
) {
766 struct inotify_process_buffered_data_context
*c
= ASSERT_PTR(userdata
);
767 const char *description
;
769 assert_se(sd_event_source_get_description(s
, &description
) >= 0);
772 assert_se(streq(c
->path
[c
->i
], description
));
778 TEST(inotify_process_buffered_data
) {
779 _cleanup_(rm_rf_physical_and_freep
) char *p
= NULL
, *q
= NULL
;
780 _cleanup_(sd_event_source_unrefp
) sd_event_source
*a
= NULL
, *b
= NULL
;
781 _cleanup_(sd_event_unrefp
) sd_event
*e
= NULL
;
782 _cleanup_free_
char *z
= NULL
;
784 /* For issue #23826 */
786 assert_se(sd_event_default(&e
) >= 0);
788 assert_se(mkdtemp_malloc("/tmp/test-inotify-XXXXXX", &p
) >= 0);
789 assert_se(mkdtemp_malloc("/tmp/test-inotify-XXXXXX", &q
) >= 0);
791 struct inotify_process_buffered_data_context context
= {
795 assert_se(sd_event_add_inotify(e
, &a
, p
, IN_CREATE
, inotify_process_buffered_data_handler
, &context
) >= 0);
796 assert_se(sd_event_add_inotify(e
, &b
, q
, IN_CREATE
, inotify_process_buffered_data_handler
, &context
) >= 0);
798 assert_se(z
= path_join(p
, "aaa"));
799 assert_se(touch(z
) >= 0);
801 assert_se(z
= path_join(q
, "bbb"));
802 assert_se(touch(z
) >= 0);
805 assert_se(sd_event_run(e
, 10 * USEC_PER_SEC
) > 0);
806 assert_se(sd_event_prepare(e
) > 0); /* issue #23826: this was 0. */
807 assert_se(sd_event_dispatch(e
) > 0);
808 assert_se(sd_event_prepare(e
) == 0);
809 assert_se(sd_event_wait(e
, 0) == 0);
813 _cleanup_(sd_event_unrefp
) sd_event
*e
= NULL
;
816 assert_se(sd_event_default(&e
) >= 0);
817 assert_se(sd_event_prepare(e
) == 0);
819 /* Check that after a fork the cleanup functions return NULL */
820 r
= safe_fork("(bus-fork-test)", FORK_WAIT
|FORK_LOG
, NULL
);
823 assert_se(sd_event_ref(e
) == NULL
);
824 assert_se(sd_event_unref(e
) == NULL
);
831 static int hup_callback(sd_event_source
*s
, int fd
, uint32_t revents
, void *userdata
) {
832 unsigned *c
= userdata
;
834 assert_se(revents
== EPOLLHUP
);
840 TEST(leave_ratelimit
) {
841 bool expect_ratelimit
= false, manually_left_ratelimit
= false;
842 _cleanup_(sd_event_source_unrefp
) sd_event_source
*s
= NULL
;
843 _cleanup_(sd_event_unrefp
) sd_event
*e
= NULL
;
844 _cleanup_close_pair_
int pfd
[2] = EBADF_PAIR
;
848 assert_se(sd_event_default(&e
) >= 0);
850 /* Create an event source that will continuously fire by creating a pipe whose write side is closed,
851 * and which hence will only see EOF and constant EPOLLHUP */
852 assert_se(pipe2(pfd
, O_CLOEXEC
) >= 0);
853 assert_se(sd_event_add_io(e
, &s
, pfd
[0], EPOLLIN
, hup_callback
, &c
) >= 0);
854 assert_se(sd_event_source_set_io_fd_own(s
, true) >= 0);
855 assert_se(sd_event_source_set_ratelimit(s
, 5*USEC_PER_MINUTE
, 5) >= 0);
858 pfd
[1] = safe_close(pfd
[1]); /* Trigger continuous EOF */
861 r
= sd_event_prepare(e
);
865 r
= sd_event_wait(e
, UINT64_MAX
);
869 r
= sd_event_dispatch(e
);
872 r
= sd_event_source_is_ratelimited(s
);
876 /* First four dispatches should just work */
879 /* The fifth dispatch should still work, but we now expect the ratelimit to be hit subsequently */
880 if (!expect_ratelimit
) {
882 assert_se(sd_event_source_leave_ratelimit(s
) == 0); /* this should be a NOP, and return 0 hence */
883 expect_ratelimit
= true;
885 /* We expected the ratelimit, let's leave it manually, and verify it */
887 assert_se(sd_event_source_leave_ratelimit(s
) > 0); /* we are ratelimited, hence should return > 0 */
888 assert_se(sd_event_source_is_ratelimited(s
) == 0);
890 manually_left_ratelimit
= true;
894 /* On the sixth iteration let's just exit */
898 /* Verify we definitely hit the ratelimit and left it manually again */
899 assert_se(manually_left_ratelimit
);
902 DEFINE_TEST_MAIN(LOG_DEBUG
);