s->child.options == WEXITED;
}
+static bool event_source_is_online(sd_event_source *s) {
+ assert(s);
+ return s->enabled != SD_EVENT_OFF && !s->ratelimited;
+}
+
+static bool event_source_is_offline(sd_event_source *s) {
+ assert(s);
+ return s->enabled == SD_EVENT_OFF || s->ratelimited;
+}
+
static const char* const event_source_type_table[_SOURCE_EVENT_SOURCE_TYPE_MAX] = {
[SOURCE_IO] = "io",
[SOURCE_TIME_REALTIME] = "realtime",
DEFINE_PRIVATE_STRING_TABLE_LOOKUP_TO_STRING(event_source_type, int);
-#define EVENT_SOURCE_IS_TIME(t) IN_SET((t), SOURCE_TIME_REALTIME, SOURCE_TIME_BOOTTIME, SOURCE_TIME_MONOTONIC, SOURCE_TIME_REALTIME_ALARM, SOURCE_TIME_BOOTTIME_ALARM)
+#define EVENT_SOURCE_IS_TIME(t) \
+ IN_SET((t), \
+ SOURCE_TIME_REALTIME, \
+ SOURCE_TIME_BOOTTIME, \
+ SOURCE_TIME_MONOTONIC, \
+ SOURCE_TIME_REALTIME_ALARM, \
+ SOURCE_TIME_BOOTTIME_ALARM)
+
+#define EVENT_SOURCE_CAN_RATE_LIMIT(t) \
+ IN_SET((t), \
+ SOURCE_IO, \
+ SOURCE_TIME_REALTIME, \
+ SOURCE_TIME_BOOTTIME, \
+ SOURCE_TIME_MONOTONIC, \
+ SOURCE_TIME_REALTIME_ALARM, \
+ SOURCE_TIME_BOOTTIME_ALARM, \
+ SOURCE_SIGNAL, \
+ SOURCE_DEFER, \
+ SOURCE_INOTIFY)
struct sd_event {
unsigned n_ref;
Hashmap *signal_data; /* indexed by priority */
Hashmap *child_sources;
- unsigned n_enabled_child_sources;
+ unsigned n_online_child_sources;
Set *post_sources;
if (x->enabled == SD_EVENT_OFF && y->enabled != SD_EVENT_OFF)
return 1;
+ /* Non rate-limited ones first. */
+ r = CMP(!!x->ratelimited, !!y->ratelimited);
+ if (r != 0)
+ return r;
+
/* Lower priority values first */
r = CMP(x->priority, y->priority);
if (r != 0)
if (x->enabled == SD_EVENT_OFF && y->enabled != SD_EVENT_OFF)
return 1;
+ /* Non rate-limited ones first. */
+ r = CMP(!!x->ratelimited, !!y->ratelimited);
+ if (r != 0)
+ return r;
+
/* Move most recently prepared ones last, so that we can stop
* preparing as soon as we hit one that has already been
* prepared in the current iteration */
return CMP(x->priority, y->priority);
}
+static usec_t time_event_source_next(const sd_event_source *s) {
+ assert(s);
+
+ /* We have two kinds of event sources that have elapsation times associated with them: the actual
+ * time based ones and the ones for which a ratelimit can be in effect (where we want to be notified
+ * once the ratelimit time window ends). Let's return the next elapsing time depending on what we are
+ * looking at here. */
+
+ if (s->ratelimited) { /* If rate-limited the next elapsation is when the ratelimit time window ends */
+ assert(s->rate_limit.begin != 0);
+ assert(s->rate_limit.interval != 0);
+ return usec_add(s->rate_limit.begin, s->rate_limit.interval);
+ }
+
+ /* Otherwise this must be a time event source, if not ratelimited */
+ if (EVENT_SOURCE_IS_TIME(s->type))
+ return s->time.next;
+
+ return USEC_INFINITY;
+}
+
static int earliest_time_prioq_compare(const void *a, const void *b) {
const sd_event_source *x = a, *y = b;
- assert(EVENT_SOURCE_IS_TIME(x->type));
- assert(x->type == y->type);
-
/* Enabled ones first */
if (x->enabled != SD_EVENT_OFF && y->enabled == SD_EVENT_OFF)
return -1;
return 1;
/* Order by time */
- return CMP(x->time.next, y->time.next);
+ return CMP(time_event_source_next(x), time_event_source_next(y));
}
static usec_t time_event_source_latest(const sd_event_source *s) {
- return usec_add(s->time.next, s->time.accuracy);
+ assert(s);
+
+ if (s->ratelimited) { /* For ratelimited stuff the earliest and the latest time shall actually be the
+ * same, as we should avoid adding additional inaccuracy on an inaccuracy time
+ * window */
+ assert(s->rate_limit.begin != 0);
+ assert(s->rate_limit.interval != 0);
+ return usec_add(s->rate_limit.begin, s->rate_limit.interval);
+ }
+
+ /* Must be a time event source, if not ratelimited */
+ if (EVENT_SOURCE_IS_TIME(s->type))
+ return usec_add(s->time.next, s->time.accuracy);
+
+ return USEC_INFINITY;
}
static int latest_time_prioq_compare(const void *a, const void *b) {
const sd_event_source *x = a, *y = b;
- assert(EVENT_SOURCE_IS_TIME(x->type));
- assert(x->type == y->type);
-
/* Enabled ones first */
if (x->enabled != SD_EVENT_OFF && y->enabled == SD_EVENT_OFF)
return -1;
* and possibly drop the signalfd for it. */
if (sig == SIGCHLD &&
- e->n_enabled_child_sources > 0)
+ e->n_online_child_sources > 0)
return;
if (e->signal_sources &&
e->signal_sources[sig] &&
- e->signal_sources[sig]->enabled != SD_EVENT_OFF)
+ event_source_is_online(e->signal_sources[sig]))
return;
/*
struct clock_data *d;
assert(s);
- assert(EVENT_SOURCE_IS_TIME(s->type));
/* Called whenever the event source's timer ordering properties changed, i.e. time, accuracy,
* pending, enable state. Makes sure the two prioq's are ordered properly again. */
- assert_se(d = event_get_clock_data(s->event, s->type));
+
+ if (s->ratelimited)
+ d = &s->event->monotonic;
+ else {
+ assert(EVENT_SOURCE_IS_TIME(s->type));
+ assert_se(d = event_get_clock_data(s->event, s->type));
+ }
+
prioq_reshuffle(d->earliest, s, &s->earliest_index);
prioq_reshuffle(d->latest, s, &s->latest_index);
d->needs_rearm = true;
case SOURCE_TIME_BOOTTIME:
case SOURCE_TIME_MONOTONIC:
case SOURCE_TIME_REALTIME_ALARM:
- case SOURCE_TIME_BOOTTIME_ALARM: {
- struct clock_data *d;
- assert_se(d = event_get_clock_data(s->event, s->type));
- event_source_time_prioq_remove(s, d);
+ case SOURCE_TIME_BOOTTIME_ALARM:
+ /* Only remove this event source from the time event source here if it is not ratelimited. If
+ * it is ratelimited, we'll remove it below, separately. Why? Because the clock used might
+ * differ: ratelimiting always uses CLOCK_MONOTONIC, but timer events might use any clock */
+
+ if (!s->ratelimited) {
+ struct clock_data *d;
+ assert_se(d = event_get_clock_data(s->event, s->type));
+ event_source_time_prioq_remove(s, d);
+ }
+
break;
- }
case SOURCE_SIGNAL:
if (s->signal.sig > 0) {
case SOURCE_CHILD:
if (s->child.pid > 0) {
- if (s->enabled != SD_EVENT_OFF) {
- assert(s->event->n_enabled_child_sources > 0);
- s->event->n_enabled_child_sources--;
+ if (event_source_is_online(s)) {
+ assert(s->event->n_online_child_sources > 0);
+ s->event->n_online_child_sources--;
}
(void) hashmap_remove(s->event->child_sources, PID_TO_PTR(s->child.pid));
if (s->prepare)
prioq_remove(s->event->prepare, s, &s->prepare_index);
+ if (s->ratelimited)
+ event_source_time_prioq_remove(s, &s->event->monotonic);
+
event = TAKE_PTR(s->event);
LIST_REMOVE(sources, event->sources, s);
event->n_sources--;
if (!callback)
callback = child_exit_callback;
- if (e->n_enabled_child_sources == 0) {
+ if (e->n_online_child_sources == 0) {
/* Caller must block SIGCHLD before using us to watch children, even if pidfd is available,
* for compatibility with pre-pidfd and because we don't want the reap the child processes
* ourselves, i.e. call waitid(), and don't want Linux' default internal logic for that to
e->need_process_child = true;
}
- e->n_enabled_child_sources++;
+ e->n_online_child_sources++;
if (ret)
*ret = s;
if (!callback)
callback = child_exit_callback;
- if (e->n_enabled_child_sources == 0) {
+ if (e->n_online_child_sources == 0) {
r = signal_is_blocked(SIGCHLD);
if (r < 0)
return r;
e->need_process_child = true;
}
- e->n_enabled_child_sources++;
+ e->n_online_child_sources++;
if (ret)
*ret = s;
if (s->io.fd == fd)
return 0;
- if (s->enabled == SD_EVENT_OFF) {
+ if (event_source_is_offline(s)) {
s->io.fd = fd;
s->io.registered = false;
} else {
if (r < 0)
return r;
- if (s->enabled != SD_EVENT_OFF) {
+ if (event_source_is_online(s)) {
r = source_io_register(s, s->enabled, events);
if (r < 0)
return r;
event_gc_inode_data(s->event, old_inode_data);
- } else if (s->type == SOURCE_SIGNAL && s->enabled != SD_EVENT_OFF) {
+ } else if (s->type == SOURCE_SIGNAL && event_source_is_online(s)) {
struct signal_data *old, *d;
/* Move us from the signalfd belonging to the old
return s->enabled != SD_EVENT_OFF;
}
-static int event_source_disable(sd_event_source *s) {
+static int event_source_offline(
+ sd_event_source *s,
+ int enabled,
+ bool ratelimited) {
+
+ bool was_offline;
int r;
assert(s);
- assert(s->enabled != SD_EVENT_OFF);
+ assert(enabled == SD_EVENT_OFF || ratelimited);
/* Unset the pending flag when this event source is disabled */
- if (!IN_SET(s->type, SOURCE_DEFER, SOURCE_EXIT)) {
+ if (s->enabled != SD_EVENT_OFF &&
+ enabled == SD_EVENT_OFF &&
+ !IN_SET(s->type, SOURCE_DEFER, SOURCE_EXIT)) {
r = source_set_pending(s, false);
if (r < 0)
return r;
}
- s->enabled = SD_EVENT_OFF;
+ was_offline = event_source_is_offline(s);
+ s->enabled = enabled;
+ s->ratelimited = ratelimited;
switch (s->type) {
break;
case SOURCE_CHILD:
- assert(s->event->n_enabled_child_sources > 0);
- s->event->n_enabled_child_sources--;
+ if (!was_offline) {
+ assert(s->event->n_online_child_sources > 0);
+ s->event->n_online_child_sources--;
+ }
if (EVENT_SOURCE_WATCH_PIDFD(s))
source_child_pidfd_unregister(s);
assert_not_reached("Wut? I shouldn't exist.");
}
- return 0;
+ return 1;
}
-static int event_source_enable(sd_event_source *s, int enable) {
+static int event_source_online(
+ sd_event_source *s,
+ int enabled,
+ bool ratelimited) {
+
+ bool was_online;
int r;
assert(s);
- assert(IN_SET(enable, SD_EVENT_ON, SD_EVENT_ONESHOT));
- assert(s->enabled == SD_EVENT_OFF);
+ assert(enabled != SD_EVENT_OFF || !ratelimited);
/* Unset the pending flag when this event source is enabled */
- if (!IN_SET(s->type, SOURCE_DEFER, SOURCE_EXIT)) {
+ if (s->enabled == SD_EVENT_OFF &&
+ enabled != SD_EVENT_OFF &&
+ !IN_SET(s->type, SOURCE_DEFER, SOURCE_EXIT)) {
r = source_set_pending(s, false);
if (r < 0)
return r;
}
+ /* Are we really ready for onlining? */
+ if (enabled == SD_EVENT_OFF || ratelimited) {
+ /* Nope, we are not ready for onlining, then just update the precise state and exit */
+ s->enabled = enabled;
+ s->ratelimited = ratelimited;
+ return 0;
+ }
+
+ was_online = event_source_is_online(s);
+
switch (s->type) {
case SOURCE_IO:
- r = source_io_register(s, enable, s->io.events);
+ r = source_io_register(s, enabled, s->io.events);
if (r < 0)
return r;
break;
if (EVENT_SOURCE_WATCH_PIDFD(s)) {
/* yes, we have pidfd */
- r = source_child_pidfd_register(s, enable);
+ r = source_child_pidfd_register(s, enabled);
if (r < 0)
return r;
} else {
}
}
- s->event->n_enabled_child_sources++;
-
+ if (!was_online)
+ s->event->n_online_child_sources++;
break;
case SOURCE_TIME_REALTIME:
assert_not_reached("Wut? I shouldn't exist.");
}
- s->enabled = enable;
+ s->enabled = enabled;
+ s->ratelimited = ratelimited;
/* Non-failing operations below */
switch (s->type) {
break;
}
- return 0;
+ return 1;
}
_public_ int sd_event_source_set_enabled(sd_event_source *s, int m) {
return 0;
if (m == SD_EVENT_OFF)
- r = event_source_disable(s);
+ r = event_source_offline(s, m, s->ratelimited);
else {
if (s->enabled != SD_EVENT_OFF) {
/* Switching from "on" to "oneshot" or back? If that's the case, we can take a shortcut, the
return 0;
}
- r = event_source_enable(s, m);
+ r = event_source_online(s, m, s->ratelimited);
}
if (r < 0)
return r;
return ret;
}
+static int event_source_enter_ratelimited(sd_event_source *s) {
+ int r;
+
+ assert(s);
+
+ /* When an event source becomes ratelimited, we place it in the CLOCK_MONOTONIC priority queue, with
+ * the end of the rate limit time window, much as if it was a timer event source. */
+
+ if (s->ratelimited)
+ return 0; /* Already ratelimited, this is a NOP hence */
+
+ /* Make sure we can install a CLOCK_MONOTONIC event further down. */
+ r = setup_clock_data(s->event, &s->event->monotonic, CLOCK_MONOTONIC);
+ if (r < 0)
+ return r;
+
+ /* Timer event sources are already using the earliest/latest queues for the timer scheduling. Let's
+ * first remove them from the prioq appropriate for their own clock, so that we can use the prioq
+ * fields of the event source then for adding it to the CLOCK_MONOTONIC prioq instead. */
+ if (EVENT_SOURCE_IS_TIME(s->type))
+ event_source_time_prioq_remove(s, event_get_clock_data(s->event, s->type));
+
+ /* Now, let's add the event source to the monotonic clock instead */
+ r = event_source_time_prioq_put(s, &s->event->monotonic);
+ if (r < 0)
+ goto fail;
+
+ /* And let's take the event source officially offline */
+ r = event_source_offline(s, s->enabled, /* ratelimited= */ true);
+ if (r < 0) {
+ event_source_time_prioq_remove(s, &s->event->monotonic);
+ goto fail;
+ }
+
+ event_source_pp_prioq_reshuffle(s);
+
+ log_debug("Event source %p (%s) entered rate limit state.", s, strna(s->description));
+ return 0;
+
+fail:
+ /* Reinstall time event sources in the priority queue as before. This shouldn't fail, since the queue
+ * space for it should already be allocated. */
+ if (EVENT_SOURCE_IS_TIME(s->type))
+ assert_se(event_source_time_prioq_put(s, event_get_clock_data(s->event, s->type)) >= 0);
+
+ return r;
+}
+
+static int event_source_leave_ratelimit(sd_event_source *s) {
+ int r;
+
+ assert(s);
+
+ if (!s->ratelimited)
+ return 0;
+
+ /* Let's take the event source out of the monotonic prioq first. */
+ event_source_time_prioq_remove(s, &s->event->monotonic);
+
+ /* Let's then add the event source to its native clock prioq again — if this is a timer event source */
+ if (EVENT_SOURCE_IS_TIME(s->type)) {
+ r = event_source_time_prioq_put(s, event_get_clock_data(s->event, s->type));
+ if (r < 0)
+ goto fail;
+ }
+
+ /* Let's try to take it online again. */
+ r = event_source_online(s, s->enabled, /* ratelimited= */ false);
+ if (r < 0) {
+ /* Do something roughly sensible when this failed: undo the two prioq ops above */
+ if (EVENT_SOURCE_IS_TIME(s->type))
+ event_source_time_prioq_remove(s, event_get_clock_data(s->event, s->type));
+
+ goto fail;
+ }
+
+ event_source_pp_prioq_reshuffle(s);
+ ratelimit_reset(&s->rate_limit);
+
+ log_debug("Event source %p (%s) left rate limit state.", s, strna(s->description));
+ return 0;
+
+fail:
+ /* Do something somewhat reasonable when we cannot move an event sources out of ratelimited mode:
+ * simply put it back in it, maybe we can then process it more successfully next iteration. */
+ assert_se(event_source_time_prioq_put(s, &s->event->monotonic) >= 0);
+
+ return r;
+}
+
static usec_t sleep_between(sd_event *e, usec_t a, usec_t b) {
usec_t c;
assert(e);
d->needs_rearm = false;
a = prioq_peek(d->earliest);
- if (!a || a->enabled == SD_EVENT_OFF || a->time.next == USEC_INFINITY) {
+ if (!a || a->enabled == SD_EVENT_OFF || time_event_source_next(a) == USEC_INFINITY) {
if (d->fd < 0)
return 0;
b = prioq_peek(d->latest);
assert_se(b && b->enabled != SD_EVENT_OFF);
- t = sleep_between(e, a->time.next, time_event_source_latest(b));
+ t = sleep_between(e, time_event_source_next(a), time_event_source_latest(b));
if (d->next == t)
return 0;
for (;;) {
s = prioq_peek(d->earliest);
- if (!s ||
- s->time.next > n ||
- s->enabled == SD_EVENT_OFF ||
- s->pending)
+ if (!s || time_event_source_next(s) > n)
+ break;
+
+ if (s->ratelimited) {
+ /* This is an event sources whose ratelimit window has ended. Let's turn it on
+ * again. */
+ assert(s->ratelimited);
+
+ r = event_source_leave_ratelimit(s);
+ if (r < 0)
+ return r;
+
+ continue;
+ }
+
+ if (s->enabled == SD_EVENT_OFF || s->pending)
break;
r = source_set_pending(s, true);
if (s->pending)
continue;
- if (s->enabled == SD_EVENT_OFF)
+ if (event_source_is_offline(s))
continue;
if (s->child.exited)
if (s->pending)
return 0;
- if (s->enabled == SD_EVENT_OFF)
+ if (event_source_is_offline(s))
return 0;
if (!EVENT_SOURCE_WATCH_PIDFD(s))
LIST_FOREACH(inotify.by_inode_data, s, inode_data->event_sources) {
- if (s->enabled == SD_EVENT_OFF)
+ if (event_source_is_offline(s))
continue;
r = source_set_pending(s, true);
* sources if IN_IGNORED or IN_UNMOUNT is set. */
LIST_FOREACH(inotify.by_inode_data, s, inode_data->event_sources) {
- if (s->enabled == SD_EVENT_OFF)
+ if (event_source_is_offline(s))
continue;
if ((d->buffer.ev.mask & (IN_IGNORED|IN_UNMOUNT)) == 0 &&
* callback might have invalidated/disconnected the event source. */
saved_event = sd_event_ref(s->event);
+ /* Check if we hit the ratelimit for this event source, if so, let's disable it. */
+ assert(!s->ratelimited);
+ if (!ratelimit_below(&s->rate_limit)) {
+ r = event_source_enter_ratelimited(s);
+ if (r < 0)
+ return r;
+
+ return 1;
+ }
+
if (!IN_SET(s->type, SOURCE_DEFER, SOURCE_EXIT)) {
r = source_set_pending(s, false);
if (r < 0)
* post sources as pending */
SET_FOREACH(z, s->event->post_sources) {
- if (z->enabled == SD_EVENT_OFF)
+ if (event_source_is_offline(z))
continue;
r = source_set_pending(z, true);
sd_event_source *s;
s = prioq_peek(e->prepare);
- if (!s || s->prepare_iteration == e->iteration || s->enabled == SD_EVENT_OFF)
+ if (!s || s->prepare_iteration == e->iteration || event_source_is_offline(s))
break;
s->prepare_iteration = e->iteration;
assert(e);
p = prioq_peek(e->exit);
- if (!p || p->enabled == SD_EVENT_OFF) {
+ if (!p || event_source_is_offline(p)) {
e->state = SD_EVENT_FINISHED;
return 0;
}
if (!p)
return NULL;
- if (p->enabled == SD_EVENT_OFF)
+ if (event_source_is_offline(p))
return NULL;
return p;
s->exit_on_failure = b;
return 1;
}
+
+_public_ int sd_event_source_set_ratelimit(sd_event_source *s, uint64_t interval, unsigned burst) {
+ int r;
+
+ assert_return(s, -EINVAL);
+
+ /* Turning on ratelimiting on event source types that don't support it, is a loggable offense. Doing
+ * so is a programming error. */
+ assert_return(EVENT_SOURCE_CAN_RATE_LIMIT(s->type), -EDOM);
+
+ /* When ratelimiting is configured we'll always reset the rate limit state first and start fresh,
+ * non-ratelimited. */
+ r = event_source_leave_ratelimit(s);
+ if (r < 0)
+ return r;
+
+ s->rate_limit = (RateLimit) { interval, burst };
+ return 0;
+}
+
+_public_ int sd_event_source_get_ratelimit(sd_event_source *s, uint64_t *ret_interval, unsigned *ret_burst) {
+ assert_return(s, -EINVAL);
+
+ /* Querying whether an event source has ratelimiting configured is not a loggable offsense, hence
+ * don't use assert_return(). Unlike turning on ratelimiting it's not really a programming error */
+ if (!EVENT_SOURCE_CAN_RATE_LIMIT(s->type))
+ return -EDOM;
+
+ if (!ratelimit_configured(&s->rate_limit))
+ return -ENOEXEC;
+
+ if (ret_interval)
+ *ret_interval = s->rate_limit.interval;
+ if (ret_burst)
+ *ret_burst = s->rate_limit.burst;
+
+ return 0;
+}
+
+_public_ int sd_event_source_is_ratelimited(sd_event_source *s) {
+ assert_return(s, -EINVAL);
+
+ if (!EVENT_SOURCE_CAN_RATE_LIMIT(s->type))
+ return false;
+
+ if (!ratelimit_configured(&s->rate_limit))
+ return false;
+
+ return s->ratelimited;
+}