#include "sd-id128.h"
#include "alloc-util.h"
+#include "event-source.h"
#include "fd-util.h"
#include "fs-util.h"
#include "hashmap.h"
#include "list.h"
#include "macro.h"
+#include "memory-util.h"
#include "missing.h"
#include "prioq.h"
#include "process-util.h"
#include "string-table.h"
#include "string-util.h"
#include "time-util.h"
-#include "util.h"
#define DEFAULT_ACCURACY_USEC (250 * USEC_PER_MSEC)
-typedef enum EventSourceType {
- SOURCE_IO,
- SOURCE_TIME_REALTIME,
- SOURCE_TIME_BOOTTIME,
- SOURCE_TIME_MONOTONIC,
- SOURCE_TIME_REALTIME_ALARM,
- SOURCE_TIME_BOOTTIME_ALARM,
- SOURCE_SIGNAL,
- SOURCE_CHILD,
- SOURCE_DEFER,
- SOURCE_POST,
- SOURCE_EXIT,
- SOURCE_WATCHDOG,
- SOURCE_INOTIFY,
- _SOURCE_EVENT_SOURCE_TYPE_MAX,
- _SOURCE_EVENT_SOURCE_TYPE_INVALID = -1
-} EventSourceType;
-
static const char* const event_source_type_table[_SOURCE_EVENT_SOURCE_TYPE_MAX] = {
[SOURCE_IO] = "io",
[SOURCE_TIME_REALTIME] = "realtime",
DEFINE_PRIVATE_STRING_TABLE_LOOKUP_TO_STRING(event_source_type, int);
-/* All objects we use in epoll events start with this value, so that
- * we know how to dispatch it */
-typedef enum WakeupType {
- WAKEUP_NONE,
- WAKEUP_EVENT_SOURCE,
- WAKEUP_CLOCK_DATA,
- WAKEUP_SIGNAL_DATA,
- WAKEUP_INOTIFY_DATA,
- _WAKEUP_TYPE_MAX,
- _WAKEUP_TYPE_INVALID = -1,
-} WakeupType;
-
#define EVENT_SOURCE_IS_TIME(t) IN_SET((t), SOURCE_TIME_REALTIME, SOURCE_TIME_BOOTTIME, SOURCE_TIME_MONOTONIC, SOURCE_TIME_REALTIME_ALARM, SOURCE_TIME_BOOTTIME_ALARM)
-struct inode_data;
-
-struct sd_event_source {
- WakeupType wakeup;
-
- unsigned n_ref;
-
- sd_event *event;
- void *userdata;
- sd_event_handler_t prepare;
-
- char *description;
-
- EventSourceType type:5;
- signed int enabled:3;
- bool pending:1;
- bool dispatching:1;
- bool floating:1;
-
- int64_t priority;
- unsigned pending_index;
- unsigned prepare_index;
- uint64_t pending_iteration;
- uint64_t prepare_iteration;
-
- sd_event_destroy_t destroy_callback;
-
- LIST_FIELDS(sd_event_source, sources);
-
- union {
- struct {
- sd_event_io_handler_t callback;
- int fd;
- uint32_t events;
- uint32_t revents;
- bool registered:1;
- bool owned:1;
- } io;
- struct {
- sd_event_time_handler_t callback;
- usec_t next, accuracy;
- unsigned earliest_index;
- unsigned latest_index;
- } time;
- struct {
- sd_event_signal_handler_t callback;
- struct signalfd_siginfo siginfo;
- int sig;
- } signal;
- struct {
- sd_event_child_handler_t callback;
- siginfo_t siginfo;
- pid_t pid;
- int options;
- } child;
- struct {
- sd_event_handler_t callback;
- } defer;
- struct {
- sd_event_handler_t callback;
- } post;
- struct {
- sd_event_handler_t callback;
- unsigned prioq_index;
- } exit;
- struct {
- sd_event_inotify_handler_t callback;
- uint32_t mask;
- struct inode_data *inode_data;
- LIST_FIELDS(sd_event_source, by_inode_data);
- } inotify;
- };
-};
-
-struct clock_data {
- WakeupType wakeup;
- int fd;
-
- /* For all clocks we maintain two priority queues each, one
- * ordered for the earliest times the events may be
- * dispatched, and one ordered by the latest times they must
- * have been dispatched. The range between the top entries in
- * the two prioqs is the time window we can freely schedule
- * wakeups in */
-
- Prioq *earliest;
- Prioq *latest;
- usec_t next;
-
- bool needs_rearm:1;
-};
-
-struct signal_data {
- WakeupType wakeup;
-
- /* For each priority we maintain one signal fd, so that we
- * only have to dequeue a single event per priority at a
- * time. */
-
- int fd;
- int64_t priority;
- sigset_t sigset;
- sd_event_source *current;
-};
-
-/* A structure listing all event sources currently watching a specific inode */
-struct inode_data {
- /* The identifier for the inode, the combination of the .st_dev + .st_ino fields of the file */
- ino_t ino;
- dev_t dev;
-
- /* An fd of the inode to watch. The fd is kept open until the next iteration of the loop, so that we can
- * rearrange the priority still until then, as we need the original inode to change the priority as we need to
- * add a watch descriptor to the right inotify for the priority which we can only do if we have a handle to the
- * original inode. We keep a list of all inode_data objects with an open fd in the to_close list (see below) of
- * the sd-event object, so that it is efficient to close everything, before entering the next event loop
- * iteration. */
- int fd;
-
- /* The inotify "watch descriptor" */
- int wd;
-
- /* The combination of the mask of all inotify watches on this inode we manage. This is also the mask that has
- * most recently been set on the watch descriptor. */
- uint32_t combined_mask;
-
- /* All event sources subscribed to this inode */
- LIST_HEAD(sd_event_source, event_sources);
-
- /* The inotify object we watch this inode with */
- struct inotify_data *inotify_data;
-
- /* A linked list of all inode data objects with fds to close (see above) */
- LIST_FIELDS(struct inode_data, to_close);
-};
-
-/* A structure encapsulating an inotify fd */
-struct inotify_data {
- WakeupType wakeup;
-
- /* For each priority we maintain one inotify fd, so that we only have to dequeue a single event per priority at
- * a time */
-
- int fd;
- int64_t priority;
-
- Hashmap *inodes; /* The inode_data structures keyed by dev+ino */
- Hashmap *wd; /* The inode_data structures keyed by the watch descriptor for each */
-
- /* The buffer we read inotify events into */
- union inotify_event_buffer buffer;
- size_t buffer_filled; /* fill level of the buffer */
-
- /* How many event sources are currently marked pending for this inotify. We won't read new events off the
- * inotify fd as long as there are still pending events on the inotify (because we have no strategy of queuing
- * the events locally if they can't be coalesced). */
- unsigned n_pending;
-
- /* A linked list of all inotify objects with data already read, that still need processing. We keep this list
- * to make it efficient to figure out what inotify objects to process data on next. */
- LIST_FIELDS(struct inotify_data, buffered);
-};
-
struct sd_event {
unsigned n_ref;
}
}
+static void event_free_signal_data(sd_event *e, struct signal_data *d) {
+ assert(e);
+
+ if (!d)
+ return;
+
+ hashmap_remove(e->signal_data, &d->priority);
+ safe_close(d->fd);
+ free(d);
+}
+
static int event_make_signal_data(
sd_event *e,
int sig,
return 0;
fail:
- if (added) {
- d->fd = safe_close(d->fd);
- hashmap_remove(e->signal_data, &d->priority);
- free(d);
- }
+ if (added)
+ event_free_signal_data(e, d);
return r;
}
assert_se(sigdelset(&d->sigset, sig) >= 0);
if (sigisemptyset(&d->sigset)) {
-
/* If all the mask is all-zero we can get rid of the structure */
- hashmap_remove(e->signal_data, &d->priority);
- safe_close(d->fd);
- free(d);
+ event_free_signal_data(e, d);
return;
}
return 1;
}
-static int inode_data_compare(const void *a, const void *b) {
- const struct inode_data *x = a, *y = b;
+static int inode_data_compare(const struct inode_data *x, const struct inode_data *y) {
int r;
assert(x);
return CMP(x->ino, y->ino);
}
-static void inode_data_hash_func(const void *p, struct siphash *state) {
- const struct inode_data *d = p;
-
- assert(p);
+static void inode_data_hash_func(const struct inode_data *d, struct siphash *state) {
+ assert(d);
siphash24_compress(&d->dev, sizeof(d->dev), state);
siphash24_compress(&d->ino, sizeof(d->ino), state);
}
-const struct hash_ops inode_data_hash_ops = {
- .hash = inode_data_hash_func,
- .compare = inode_data_compare
-};
+DEFINE_PRIVATE_HASH_OPS(inode_data_hash_ops, struct inode_data, inode_data_hash_func, inode_data_compare);
static void event_free_inode_data(
sd_event *e,
_public_ int sd_event_source_get_description(sd_event_source *s, const char **description) {
assert_return(s, -EINVAL);
assert_return(description, -EINVAL);
- assert_return(s->description, -ENXIO);
assert_return(!event_pid_changed(s->event), -ECHILD);
+ if (!s->description)
+ return -ENXIO;
+
*description = s->description;
return 0;
}
_public_ int sd_event_source_get_enabled(sd_event_source *s, int *m) {
assert_return(s, -EINVAL);
- assert_return(m, -EINVAL);
assert_return(!event_pid_changed(s->event), -ECHILD);
- *m = s->enabled;
- return 0;
+ if (m)
+ *m = s->enabled;
+ return s->enabled != SD_EVENT_OFF;
}
_public_ int sd_event_source_set_enabled(sd_event_source *s, int m) {
timeout = 0;
m = epoll_wait(e->epoll_fd, ev_queue, ev_queue_max,
- timeout == (uint64_t) -1 ? -1 : (int) ((timeout + USEC_PER_MSEC - 1) / USEC_PER_MSEC));
+ timeout == (uint64_t) -1 ? -1 : (int) DIV_ROUND_UP(timeout, USEC_PER_MSEC));
if (m < 0) {
if (errno == EINTR) {
e->state = SD_EVENT_PENDING;
return !!s->destroy_callback;
}
+
+_public_ int sd_event_source_get_floating(sd_event_source *s) {
+ assert_return(s, -EINVAL);
+
+ return s->floating;
+}
+
+_public_ int sd_event_source_set_floating(sd_event_source *s, int b) {
+ assert_return(s, -EINVAL);
+
+ if (s->floating == !!b)
+ return 0;
+
+ if (!s->event) /* Already disconnected */
+ return -ESTALE;
+
+ s->floating = b;
+
+ if (b) {
+ sd_event_source_ref(s);
+ sd_event_unref(s->event);
+ } else {
+ sd_event_ref(s->event);
+ sd_event_source_unref(s);
+ }
+
+ return 1;
+}