int enabled,
uint32_t events) {
- struct epoll_event ev;
- int r;
-
assert(s);
assert(s->type == SOURCE_IO);
assert(enabled != SD_EVENT_OFF);
- ev = (struct epoll_event) {
+ struct epoll_event ev = {
.events = events | (enabled == SD_EVENT_ONESHOT ? EPOLLONESHOT : 0),
.data.ptr = s,
};
+ int r;
- if (s->io.registered)
- r = epoll_ctl(s->event->epoll_fd, EPOLL_CTL_MOD, s->io.fd, &ev);
- else
- r = epoll_ctl(s->event->epoll_fd, EPOLL_CTL_ADD, s->io.fd, &ev);
+ r = epoll_ctl(s->event->epoll_fd,
+ s->io.registered ? EPOLL_CTL_MOD : EPOLL_CTL_ADD,
+ s->io.fd,
+ &ev);
if (r < 0)
return -errno;
assert(enabled != SD_EVENT_OFF);
if (EVENT_SOURCE_WATCH_PIDFD(s)) {
- struct epoll_event ev;
-
- ev = (struct epoll_event) {
+ struct epoll_event ev = {
.events = EPOLLIN | (enabled == SD_EVENT_ONESHOT ? EPOLLONESHOT : 0),
.data.ptr = s,
};
int sig,
struct signal_data **ret) {
- struct epoll_event ev;
struct signal_data *d;
bool added = false;
sigset_t ss_copy;
d->fd = fd_move_above_stdio(r);
- ev = (struct epoll_event) {
+ struct epoll_event ev = {
.events = EPOLLIN,
.data.ptr = d,
};
struct clock_data *d,
clockid_t clock) {
- struct epoll_event ev;
- int r, fd;
-
assert(e);
assert(d);
if (_likely_(d->fd >= 0))
return 0;
+ _cleanup_close_ int fd = -1;
+ int r;
+
fd = timerfd_create(clock, TFD_NONBLOCK|TFD_CLOEXEC);
if (fd < 0)
return -errno;
fd = fd_move_above_stdio(fd);
- ev = (struct epoll_event) {
+ struct epoll_event ev = {
.events = EPOLLIN,
.data.ptr = d,
};
r = epoll_ctl(e->epoll_fd, EPOLL_CTL_ADD, fd, &ev);
- if (r < 0) {
- safe_close(fd);
+ if (r < 0)
return -errno;
- }
- d->fd = fd;
+ d->fd = TAKE_FD(fd);
return 0;
}
return 0;
}
+_public_ int sd_event_add_time_relative(
+ sd_event *e,
+ sd_event_source **ret,
+ clockid_t clock,
+ uint64_t usec,
+ uint64_t accuracy,
+ sd_event_time_handler_t callback,
+ void *userdata) {
+
+ usec_t t;
+ int r;
+
+ /* Same as sd_event_add_time() but operates relative to the event loop's current point in time, and
+ * checks for overflow. */
+
+ r = sd_event_now(e, clock, &t);
+ if (r < 0)
+ return r;
+
+ if (usec >= USEC_INFINITY - t)
+ return -EOVERFLOW;
+
+ return sd_event_add_time(e, ret, clock, t + usec, accuracy, callback, userdata);
+}
+
static int signal_exit_callback(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
assert(s);
assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
assert_return(!event_pid_changed(e), -ECHILD);
- r = set_ensure_allocated(&e->post_sources, NULL);
- if (r < 0)
- return r;
-
s = source_new(e, !ret, SOURCE_POST);
if (!s)
return -ENOMEM;
s->userdata = userdata;
s->enabled = SD_EVENT_ON;
- r = set_put(e->post_sources, s);
+ r = set_ensure_put(&e->post_sources, NULL, s);
if (r < 0)
return r;
+ assert(r > 0);
if (ret)
*ret = s;
_cleanup_close_ int fd = -1;
struct inotify_data *d;
- struct epoll_event ev;
int r;
assert(e);
return r;
}
- ev = (struct epoll_event) {
+ struct epoll_event ev = {
.events = EPOLLIN,
.data.ptr = d,
};
return 0;
}
+_public_ int sd_event_source_set_time_relative(sd_event_source *s, uint64_t usec) {
+ usec_t t;
+ int r;
+
+ assert_return(s, -EINVAL);
+ assert_return(EVENT_SOURCE_IS_TIME(s->type), -EDOM);
+
+ r = sd_event_now(s->event, event_source_type_to_clock(s->type), &t);
+ if (r < 0)
+ return r;
+
+ if (usec >= USEC_INFINITY - t)
+ return -EOVERFLOW;
+
+ return sd_event_source_set_time(s, t + usec);
+}
+
_public_ int sd_event_source_get_time_accuracy(sd_event_source *s, uint64_t *usec) {
assert_return(s, -EINVAL);
assert_return(usec, -EINVAL);
return e->watchdog;
if (b) {
- struct epoll_event ev;
-
r = sd_watchdog_enabled(false, &e->watchdog_period);
if (r <= 0)
return r;
if (r < 0)
goto fail;
- ev = (struct epoll_event) {
+ struct epoll_event ev = {
.events = EPOLLIN,
.data.ptr = INT_TO_PTR(SOURCE_WATCHDOG),
};