_WAKEUP_TYPE_INVALID = -EINVAL,
} WakeupType;
-struct inode_data;
+typedef struct inode_data InodeData;
struct sd_event_source {
WakeupType wakeup;
struct {
sd_event_inotify_handler_t callback;
uint32_t mask;
- struct inode_data *inode_data;
+ InodeData *inode_data;
LIST_FIELDS(sd_event_source, by_inode_data);
} inotify;
struct {
/* An fd of the inode to watch. The fd is kept open until the next iteration of the loop, so that we can
* rearrange the priority still until then, as we need the original inode to change the priority as we need to
* add a watch descriptor to the right inotify for the priority which we can only do if we have a handle to the
- * original inode. We keep a list of all inode_data objects with an open fd in the to_close list (see below) of
+ * original inode. We keep a list of all InodeData objects with an open fd in the to_close list (see below) of
* the sd-event object, so that it is efficient to close everything, before entering the next event loop
* iteration. */
int fd;
struct inotify_data *inotify_data;
/* A linked list of all inode data objects with fds to close (see above) */
- LIST_FIELDS(struct inode_data, to_close);
+ LIST_FIELDS(InodeData, to_close);
};
/* A structure encapsulating an inotify fd */
int fd;
int64_t priority;
- Hashmap *inodes; /* The inode_data structures keyed by dev+ino */
- Hashmap *wd; /* The inode_data structures keyed by the watch descriptor for each */
+ Hashmap *inodes; /* The InodeData structures keyed by dev+ino */
+ Hashmap *wd; /* The InodeData structures keyed by the watch descriptor for each */
/* How many event sources are currently marked pending for this inotify. We won't read new events off the
* inotify fd as long as there are still pending events on the inotify (because we have no strategy of queuing
Hashmap *inotify_data; /* indexed by priority */
/* A list of inode structures that still have an fd open, that we need to close before the next loop iteration */
- LIST_HEAD(struct inode_data, inode_data_to_close_list);
+ LIST_HEAD(InodeData, inode_data_to_close_list);
/* A list of inotify objects that already have events buffered which aren't processed yet */
LIST_HEAD(struct inotify_data, buffered_inotify_data_list);
static thread_local sd_event *default_event = NULL;
static void source_disconnect(sd_event_source *s);
-static void event_gc_inode_data(sd_event *e, struct inode_data *d);
+static void event_gc_inode_data(sd_event *e, InodeData *d);
static sd_event* event_resolve(sd_event *e) {
return e == SD_EVENT_DEFAULT ? default_event : e;
break;
case SOURCE_INOTIFY: {
- struct inode_data *inode_data;
+ InodeData *inode_data;
inode_data = s->inotify.inode_data;
if (inode_data) {
return 1;
}
-static int inode_data_compare(const struct inode_data *x, const struct inode_data *y) {
+static int inode_data_compare(const InodeData *x, const InodeData *y) {
int r;
assert(x);
return CMP(x->ino, y->ino);
}
-static void inode_data_hash_func(const struct inode_data *d, struct siphash *state) {
+static void inode_data_hash_func(const InodeData *d, struct siphash *state) {
assert(d);
siphash24_compress_typesafe(d->dev, state);
siphash24_compress_typesafe(d->ino, state);
}
-DEFINE_PRIVATE_HASH_OPS(inode_data_hash_ops, struct inode_data, inode_data_hash_func, inode_data_compare);
-
-static void event_free_inode_data(
- sd_event *e,
- struct inode_data *d) {
+DEFINE_PRIVATE_HASH_OPS(inode_data_hash_ops, InodeData, inode_data_hash_func, inode_data_compare);
+static void event_free_inode_data(sd_event *e, InodeData *d) {
assert(e);
if (!d)
event_free_inotify_data(e, d);
}
-static void event_gc_inode_data(
- sd_event *e,
- struct inode_data *d) {
-
+static void event_gc_inode_data(sd_event *e, InodeData *d) {
struct inotify_data *inotify_data;
assert(e);
struct inotify_data *inotify_data,
dev_t dev,
ino_t ino,
- struct inode_data **ret) {
+ InodeData **ret) {
- struct inode_data *d, key;
+ InodeData *d, key;
int r;
assert(e);
assert(inotify_data);
- key = (struct inode_data) {
+ key = (InodeData) {
.ino = ino,
.dev = dev,
};
if (r < 0)
return r;
- d = new(struct inode_data, 1);
+ d = new(InodeData, 1);
if (!d)
return -ENOMEM;
- *d = (struct inode_data) {
+ *d = (InodeData) {
.dev = dev,
.ino = ino,
.wd = -1,
return 1;
}
-static uint32_t inode_data_determine_mask(struct inode_data *d) {
+static uint32_t inode_data_determine_mask(InodeData *d) {
bool excl_unlink = true;
uint32_t combined = 0;
return (combined & ~(IN_ONESHOT|IN_DONT_FOLLOW|IN_ONLYDIR|IN_EXCL_UNLINK)) | (excl_unlink ? IN_EXCL_UNLINK : 0);
}
-static int inode_data_realize_watch(sd_event *e, struct inode_data *d) {
+static int inode_data_realize_watch(sd_event *e, InodeData *d) {
uint32_t combined_mask;
int wd, r;
_cleanup_close_ int donated_fd = donate ? fd : -EBADF;
_cleanup_(source_freep) sd_event_source *s = NULL;
struct inotify_data *inotify_data = NULL;
- struct inode_data *inode_data = NULL;
+ InodeData *inode_data = NULL;
struct stat st;
int r;
_public_ int sd_event_source_set_priority(sd_event_source *s, int64_t priority) {
bool rm_inotify = false, rm_inode = false;
struct inotify_data *new_inotify_data = NULL;
- struct inode_data *new_inode_data = NULL;
+ InodeData *new_inode_data = NULL;
int r;
assert_return(s, -EINVAL);
return 0;
if (s->type == SOURCE_INOTIFY) {
- struct inode_data *old_inode_data;
+ InodeData *old_inode_data;
assert(s->inotify.inode_data);
old_inode_data = s->inotify.inode_data;
return -EIO;
if (d->buffer.ev.mask & IN_Q_OVERFLOW) {
- struct inode_data *inode_data;
+ InodeData *inode_data;
/* The queue overran, let's pass this event to all event sources connected to this inotify
* object */
return r;
}
} else {
- struct inode_data *inode_data;
+ InodeData *inode_data;
/* Find the inode object for this watch descriptor. If IN_IGNORED is set we also remove it from
* our watch descriptor table. */
}
static void event_close_inode_data_fds(sd_event *e) {
- struct inode_data *d;
+ InodeData *d;
assert(e);