#include "clean-ipc.h"
#include "clock-util.h"
#include "core-varlink.h"
+#include "creds-util.h"
#include "dbus-job.h"
#include "dbus-manager.h"
#include "dbus-unit.h"
#include "install.h"
#include "io-util.h"
#include "label.h"
-#include "locale-setup.h"
#include "load-fragment.h"
+#include "locale-setup.h"
#include "log.h"
#include "macro.h"
#include "manager.h"
+#include "manager-dump.h"
#include "memory-util.h"
#include "mkdir.h"
#include "parse-util.h"
}
static int have_ask_password(void) {
- _cleanup_closedir_ DIR *dir;
+ _cleanup_closedir_ DIR *dir = NULL;
struct dirent *de;
dir = opendir("/run/systemd/ask-password");
static void manager_close_ask_password(Manager *m) {
assert(m);
- m->ask_password_event_source = sd_event_source_unref(m->ask_password_event_source);
+ m->ask_password_event_source = sd_event_source_disable_unref(m->ask_password_event_source);
m->ask_password_inotify_fd = safe_close(m->ask_password_inotify_fd);
m->have_ask_password = -EINVAL;
}
static void manager_close_idle_pipe(Manager *m) {
assert(m);
- m->idle_pipe_event_source = sd_event_source_unref(m->idle_pipe_event_source);
+ m->idle_pipe_event_source = sd_event_source_disable_unref(m->idle_pipe_event_source);
safe_close_pair(m->idle_pipe);
safe_close_pair(m->idle_pipe + 2);
if (MANAGER_IS_TEST_RUN(m))
return 0;
- m->time_change_event_source = sd_event_source_unref(m->time_change_event_source);
+ m->time_change_event_source = sd_event_source_disable_unref(m->time_change_event_source);
m->time_change_fd = safe_close(m->time_change_fd);
m->time_change_fd = time_change_fd();
/* Import locale variables LC_*= from configuration */
(void) locale_setup(&m->transient_environment);
} else {
- _cleanup_free_ char *k = NULL;
-
- /* The user manager passes its own environment
- * along to its children, except for $PATH. */
+ /* The user manager passes its own environment along to its children, except for $PATH. */
m->transient_environment = strv_copy(environ);
if (!m->transient_environment)
return log_oom();
- k = strdup("PATH=" DEFAULT_USER_PATH);
- if (!k)
- return log_oom();
-
- r = strv_env_replace(&m->transient_environment, k);
+ r = strv_env_replace_strdup(&m->transient_environment, "PATH=" DEFAULT_USER_PATH);
if (r < 0)
return log_oom();
- TAKE_PTR(k);
}
sanitize_environment(m->transient_environment);
for (ExecDirectoryType i = 0; i < _EXEC_DIRECTORY_TYPE_MAX; i++) {
r = sd_path_lookup(p[i].type, p[i].suffix, &m->prefix[i]);
if (r < 0)
- return r;
+ return log_warning_errno(r, "Failed to lookup %s path: %m",
+ exec_directory_type_to_string(i));
}
return 0;
if (r < 0)
return r;
- e = secure_getenv("CREDENTIALS_DIRECTORY");
- if (e) {
+ r = get_credentials_dir(&e);
+ if (r >= 0) {
m->received_credentials = strdup(e);
if (!m->received_credentials)
return -ENOMEM;
/* First free all secondary fields */
m->notify_socket = mfree(m->notify_socket);
- m->notify_event_source = sd_event_source_unref(m->notify_event_source);
+ m->notify_event_source = sd_event_source_disable_unref(m->notify_event_source);
fd = socket(AF_UNIX, SOCK_DGRAM|SOCK_CLOEXEC|SOCK_NONBLOCK, 0);
if (fd < 0)
_cleanup_close_ int fd = -1;
/* First free all secondary fields */
- m->cgroups_agent_event_source = sd_event_source_unref(m->cgroups_agent_event_source);
+ m->cgroups_agent_event_source = sd_event_source_disable_unref(m->cgroups_agent_event_source);
fd = socket(AF_UNIX, SOCK_DGRAM|SOCK_CLOEXEC|SOCK_NONBLOCK, 0);
if (fd < 0)
/* Free all secondary fields */
safe_close_pair(m->user_lookup_fds);
- m->user_lookup_event_source = sd_event_source_unref(m->user_lookup_event_source);
+ m->user_lookup_event_source = sd_event_source_disable_unref(m->user_lookup_event_source);
if (socketpair(AF_UNIX, SOCK_DGRAM|SOCK_CLOEXEC, 0, m->user_lookup_fds) < 0)
return log_error_errno(errno, "Failed to allocate user lookup socket: %m");
static void unit_gc_mark_good(Unit *u, unsigned gc_marker) {
Unit *other;
- void *v;
u->gc_marker = gc_marker + GC_OFFSET_GOOD;
/* Recursively mark referenced units as GOOD as well */
- HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_REFERENCES])
+ UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_REFERENCES)
if (other->gc_marker == gc_marker + GC_OFFSET_UNSURE)
unit_gc_mark_good(other, gc_marker);
}
static void unit_gc_sweep(Unit *u, unsigned gc_marker) {
Unit *other;
bool is_bad;
- void *v;
assert(u);
is_bad = true;
- HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_REFERENCED_BY]) {
+ UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_REFERENCED_BY) {
unit_gc_sweep(other, gc_marker);
if (other->gc_marker == gc_marker + GC_OFFSET_GOOD)
is_bad = false;
}
- if (u->refs_by_target) {
- const UnitRef *ref;
+ const UnitRef *ref;
+ LIST_FOREACH(refs_by_target, ref, u->refs_by_target) {
+ unit_gc_sweep(ref->source, gc_marker);
- LIST_FOREACH(refs_by_target, ref, u->refs_by_target) {
- unit_gc_sweep(ref->source, gc_marker);
-
- if (ref->source->gc_marker == gc_marker + GC_OFFSET_GOOD)
- goto good;
+ if (ref->source->gc_marker == gc_marker + GC_OFFSET_GOOD)
+ goto good;
- if (ref->source->gc_marker != gc_marker + GC_OFFSET_BAD)
- is_bad = false;
- }
+ if (ref->source->gc_marker != gc_marker + GC_OFFSET_BAD)
+ is_bad = false;
}
if (is_bad)
while ((u = m->stop_when_unneeded_queue)) {
_cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
- assert(m->stop_when_unneeded_queue);
assert(u->in_stop_when_unneeded_queue);
LIST_REMOVE(stop_when_unneeded_queue, m->stop_when_unneeded_queue, u);
/* If stopping a unit fails continuously we might enter a stop loop here, hence stop acting on the
* service being unnecessary after a while. */
- if (!ratelimit_below(&u->auto_stop_ratelimit)) {
+ if (!ratelimit_below(&u->auto_start_stop_ratelimit)) {
log_unit_warning(u, "Unit not needed anymore, but not stopping since we tried this too often recently.");
continue;
}
return n;
}
+static unsigned manager_dispatch_start_when_upheld_queue(Manager *m) {
+ unsigned n = 0;
+ Unit *u;
+ int r;
+
+ assert(m);
+
+ while ((u = m->start_when_upheld_queue)) {
+ _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
+ Unit *culprit = NULL;
+
+ assert(u->in_start_when_upheld_queue);
+ LIST_REMOVE(start_when_upheld_queue, m->start_when_upheld_queue, u);
+ u->in_start_when_upheld_queue = false;
+
+ n++;
+
+ if (!unit_is_upheld_by_active(u, &culprit))
+ continue;
+
+ log_unit_debug(u, "Unit is started because upheld by active unit %s.", culprit->id);
+
+ /* If stopping a unit fails continuously we might enter a stop loop here, hence stop acting on the
+ * service being unnecessary after a while. */
+
+ if (!ratelimit_below(&u->auto_start_stop_ratelimit)) {
+ log_unit_warning(u, "Unit needs to be started because active unit %s upholds it, but not starting since we tried this too often recently.", culprit->id);
+ continue;
+ }
+
+ r = manager_add_job(u->manager, JOB_START, u, JOB_FAIL, NULL, &error, NULL);
+ if (r < 0)
+ log_unit_warning_errno(u, r, "Failed to enqueue start job, ignoring: %s", bus_error_message(&error, r));
+ }
+
+ return n;
+}
+
+static unsigned manager_dispatch_stop_when_bound_queue(Manager *m) {
+ unsigned n = 0;
+ Unit *u;
+ int r;
+
+ assert(m);
+
+ while ((u = m->stop_when_bound_queue)) {
+ _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
+ Unit *culprit = NULL;
+
+ assert(u->in_stop_when_bound_queue);
+ LIST_REMOVE(stop_when_bound_queue, m->stop_when_bound_queue, u);
+ u->in_stop_when_bound_queue = false;
+
+ n++;
+
+ if (!unit_is_bound_by_inactive(u, &culprit))
+ continue;
+
+ log_unit_debug(u, "Unit is stopped because bound to inactive unit %s.", culprit->id);
+
+ /* If stopping a unit fails continuously we might enter a stop loop here, hence stop acting on the
+ * service being unnecessary after a while. */
+
+ if (!ratelimit_below(&u->auto_start_stop_ratelimit)) {
+ log_unit_warning(u, "Unit needs to be stopped because it is bound to inactive unit %s it, but not stopping since we tried this too often recently.", culprit->id);
+ continue;
+ }
+
+ r = manager_add_job(u->manager, JOB_STOP, u, JOB_REPLACE, NULL, &error, NULL);
+ if (r < 0)
+ log_unit_warning_errno(u, r, "Failed to enqueue stop job, ignoring: %s", bus_error_message(&error, r));
+ }
+
+ return n;
+}
+
static void manager_clear_jobs_and_units(Manager *m) {
Unit *u;
assert(!m->gc_unit_queue);
assert(!m->gc_job_queue);
assert(!m->stop_when_unneeded_queue);
+ assert(!m->start_when_upheld_queue);
+ assert(!m->stop_when_bound_queue);
assert(hashmap_isempty(m->jobs));
assert(hashmap_isempty(m->units));
assert(mode < _JOB_MODE_MAX);
if (mode == JOB_ISOLATE && type != JOB_START)
- return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Isolate is only valid for start.");
+ return sd_bus_error_set(error, SD_BUS_ERROR_INVALID_ARGS, "Isolate is only valid for start.");
if (mode == JOB_ISOLATE && !unit->allow_isolate)
- return sd_bus_error_setf(error, BUS_ERROR_NO_ISOLATION, "Operation refused, unit may not be isolated.");
+ return sd_bus_error_set(error, BUS_ERROR_NO_ISOLATION, "Operation refused, unit may not be isolated.");
if (mode == JOB_TRIGGERING && type != JOB_STOP)
- return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "--job-mode=triggering is only valid for stop.");
+ return sd_bus_error_set(error, SD_BUS_ERROR_INVALID_ARGS, "--job-mode=triggering is only valid for stop.");
log_unit_debug(unit, "Trying to enqueue job %s/%s/%s", unit->id, job_type_to_string(type), job_mode_to_string(mode));
Unit *u;
int r = 0;
- static const UnitDependency deps[] = {
- UNIT_REQUIRED_BY,
- UNIT_REQUISITE_OF,
- UNIT_WANTED_BY,
- UNIT_BOUND_BY
- };
-
assert(m);
while ((u = m->target_deps_queue)) {
+ _cleanup_free_ Unit **targets = NULL;
+ int n_targets;
+
assert(u->in_target_deps_queue);
LIST_REMOVE(target_deps_queue, u->manager->target_deps_queue, u);
u->in_target_deps_queue = false;
- for (size_t k = 0; k < ELEMENTSOF(deps); k++) {
- Unit *target;
- void *v;
+ /* Take an "atomic" snapshot of dependencies here, as the call below will likely modify the
+ * dependencies, and we can't have it that hash tables we iterate through are modified while
+ * we are iterating through them. */
+ n_targets = unit_get_dependency_array(u, UNIT_ATOM_DEFAULT_TARGET_DEPENDENCIES, &targets);
+ if (n_targets < 0)
+ return n_targets;
- HASHMAP_FOREACH_KEY(v, target, u->dependencies[deps[k]]) {
- r = unit_add_default_target_dependency(u, target);
- if (r < 0)
- return r;
- }
+ for (int i = 0; i < n_targets; i++) {
+ r = unit_add_default_target_dependency(u, targets[i]);
+ if (r < 0)
+ return r;
}
}
return 0;
}
-void manager_dump_jobs(Manager *s, FILE *f, const char *prefix) {
- Job *j;
-
- assert(s);
- assert(f);
-
- HASHMAP_FOREACH(j, s->jobs)
- job_dump(j, f, prefix);
-}
-
-void manager_dump_units(Manager *s, FILE *f, const char *prefix) {
- Unit *u;
- const char *t;
-
- assert(s);
- assert(f);
-
- HASHMAP_FOREACH_KEY(u, t, s->units)
- if (u->id == t)
- unit_dump(u, f, prefix);
-}
-
-void manager_dump(Manager *m, FILE *f, const char *prefix) {
- assert(m);
- assert(f);
-
- for (ManagerTimestamp q = 0; q < _MANAGER_TIMESTAMP_MAX; q++) {
- const dual_timestamp *t = m->timestamps + q;
- char buf[CONST_MAX(FORMAT_TIMESPAN_MAX, FORMAT_TIMESTAMP_MAX)];
-
- if (dual_timestamp_is_set(t))
- fprintf(f, "%sTimestamp %s: %s\n",
- strempty(prefix),
- manager_timestamp_to_string(q),
- timestamp_is_set(t->realtime) ? format_timestamp(buf, sizeof buf, t->realtime) :
- format_timespan(buf, sizeof buf, t->monotonic, 1));
- }
-
- manager_dump_units(m, f, prefix);
- manager_dump_jobs(m, f, prefix);
-}
-
-int manager_get_dump_string(Manager *m, char **ret) {
- _cleanup_free_ char *dump = NULL;
- _cleanup_fclose_ FILE *f = NULL;
- size_t size;
- int r;
-
- assert(m);
- assert(ret);
-
- f = open_memstream_unlocked(&dump, &size);
- if (!f)
- return -errno;
-
- manager_dump(m, f, NULL);
-
- r = fflush_and_check(f);
- if (r < 0)
- return r;
-
- f = safe_fclose(f);
-
- *ret = TAKE_PTR(dump);
-
- return 0;
-}
-
void manager_clear_jobs(Manager *m) {
Job *j;
/* When we are reloading, let's not wait with generating signals, since we need to exit the manager as quickly
* as we can. There's no point in throttling generation of signals in that case. */
if (MANAGER_IS_RELOADING(m) || m->send_reloading_done || m->pending_reload_message)
- budget = (unsigned) -1; /* infinite budget in this case */
+ budget = UINT_MAX; /* infinite budget in this case */
else {
/* Anything to do at all? */
if (!m->dbus_unit_queue && !m->dbus_job_queue)
bus_unit_send_change_signal(u);
n++;
- if (budget != (unsigned) -1)
+ if (budget != UINT_MAX)
budget--;
}
bus_job_send_change_signal(j);
n++;
- if (budget != (unsigned) -1)
+ if (budget != UINT_MAX)
budget--;
}
if (manager_dispatch_cgroup_realize_queue(m) > 0)
continue;
+ if (manager_dispatch_start_when_upheld_queue(m) > 0)
+ continue;
+
+ if (manager_dispatch_stop_when_bound_queue(m) > 0)
+ continue;
+
if (manager_dispatch_stop_when_unneeded_queue(m) > 0)
continue;
#define DESTROY_IPC_FLAG (UINT32_C(1) << 31)
static void manager_serialize_uid_refs_internal(
- Manager *m,
FILE *f,
- Hashmap **uid_refs,
+ Hashmap *uid_refs,
const char *field_name) {
void *p, *k;
- assert(m);
assert(f);
- assert(uid_refs);
assert(field_name);
/* Serialize the UID reference table. Or actually, just the IPC destruction flag of it, as
* the actual counter of it is better rebuild after a reload/reexec. */
- HASHMAP_FOREACH_KEY(p, k, *uid_refs) {
+ HASHMAP_FOREACH_KEY(p, k, uid_refs) {
uint32_t c;
uid_t uid;
}
static void manager_serialize_uid_refs(Manager *m, FILE *f) {
- manager_serialize_uid_refs_internal(m, f, &m->uid_refs, "destroy-ipc-uid");
+ manager_serialize_uid_refs_internal(f, m->uid_refs, "destroy-ipc-uid");
}
static void manager_serialize_gid_refs(Manager *m, FILE *f) {
- manager_serialize_uid_refs_internal(m, f, &m->gid_refs, "destroy-ipc-gid");
+ manager_serialize_uid_refs_internal(f, m->gid_refs, "destroy-ipc-gid");
}
int manager_serialize(
if (u->id != t)
continue;
- /* Start marker */
- fputs(u->id, f);
- fputc('\n', f);
-
- r = unit_serialize(u, f, fds, !switching_root);
+ r = unit_serialize(u, f, fds, switching_root);
if (r < 0)
return r;
}
}
static void manager_deserialize_uid_refs_one_internal(
- Manager *m,
Hashmap** uid_refs,
const char *value) {
uint32_t c;
int r;
- assert(m);
assert(uid_refs);
assert(value);
r = parse_uid(value, &uid);
if (r < 0 || uid == 0) {
- log_debug("Unable to parse UID reference serialization: " UID_FMT, uid);
+ log_debug("Unable to parse UID/GID reference serialization: " UID_FMT, uid);
return;
}
- r = hashmap_ensure_allocated(uid_refs, &trivial_hash_ops);
- if (r < 0) {
+ if (hashmap_ensure_allocated(uid_refs, &trivial_hash_ops) < 0) {
log_oom();
return;
}
r = hashmap_replace(*uid_refs, UID_TO_PTR(uid), UINT32_TO_PTR(c));
if (r < 0) {
- log_debug_errno(r, "Failed to add UID reference entry: %m");
+ log_debug_errno(r, "Failed to add UID/GID reference entry: %m");
return;
}
}
static void manager_deserialize_uid_refs_one(Manager *m, const char *value) {
- manager_deserialize_uid_refs_one_internal(m, &m->uid_refs, value);
+ manager_deserialize_uid_refs_one_internal(&m->uid_refs, value);
}
static void manager_deserialize_gid_refs_one(Manager *m, const char *value) {
- manager_deserialize_uid_refs_one_internal(m, &m->gid_refs, value);
+ manager_deserialize_uid_refs_one_internal(&m->gid_refs, value);
}
int manager_deserialize(Manager *m, FILE *f, FDSet *fds) {
if (safe_atoi(val, &fd) < 0 || fd < 0 || !fdset_contains(fds, fd))
log_notice("Failed to parse notify fd, ignoring: \"%s\"", val);
else {
- m->notify_event_source = sd_event_source_unref(m->notify_event_source);
+ m->notify_event_source = sd_event_source_disable_unref(m->notify_event_source);
safe_close(m->notify_fd);
m->notify_fd = fdset_remove(fds, fd);
}
if (safe_atoi(val, &fd) < 0 || fd < 0 || !fdset_contains(fds, fd))
log_notice("Failed to parse cgroups agent fd, ignoring.: %s", val);
else {
- m->cgroups_agent_event_source = sd_event_source_unref(m->cgroups_agent_event_source);
+ m->cgroups_agent_event_source = sd_event_source_disable_unref(m->cgroups_agent_event_source);
safe_close(m->cgroups_agent_fd);
m->cgroups_agent_fd = fdset_remove(fds, fd);
}
if (sscanf(val, "%i %i", &fd0, &fd1) != 2 || fd0 < 0 || fd1 < 0 || fd0 == fd1 || !fdset_contains(fds, fd0) || !fdset_contains(fds, fd1))
log_notice("Failed to parse user lookup fd, ignoring: %s", val);
else {
- m->user_lookup_event_source = sd_event_source_unref(m->user_lookup_event_source);
+ m->user_lookup_event_source = sd_event_source_disable_unref(m->user_lookup_event_source);
safe_close_pair(m->user_lookup_fds);
m->user_lookup_fds[0] = fdset_remove(fds, fd0);
m->user_lookup_fds[1] = fdset_remove(fds, fd1);
/* Start by flushing out all jobs and units, all generated units, all runtime environments, all dynamic users
* and everything else that is worth flushing out. We'll get it all back from the serialization — if we need
- * it.*/
+ * it. */
manager_clear_jobs_and_units(m);
lookup_paths_flush_generator(&m->lookup_paths);
if (dual_timestamp_is_set(&m->timestamps[MANAGER_TIMESTAMP_INITRD])) {
- /* The initrd case on bare-metal*/
+ /* The initrd case on bare-metal */
kernel_usec = m->timestamps[MANAGER_TIMESTAMP_INITRD].monotonic - m->timestamps[MANAGER_TIMESTAMP_KERNEL].monotonic;
initrd_usec = m->timestamps[MANAGER_TIMESTAMP_USERSPACE].monotonic - m->timestamps[MANAGER_TIMESTAMP_INITRD].monotonic;
format_timespan(userspace, sizeof(userspace), userspace_usec, USEC_PER_MSEC),
format_timespan(sum, sizeof(sum), total_usec, USEC_PER_MSEC)));
} else {
- /* The initrd-less case on bare-metal*/
+ /* The initrd-less case on bare-metal */
kernel_usec = m->timestamps[MANAGER_TIMESTAMP_USERSPACE].monotonic - m->timestamps[MANAGER_TIMESTAMP_KERNEL].monotonic;
initrd_usec = 0;
va_end(ap);
}
-Set *manager_get_units_requiring_mounts_for(Manager *m, const char *path) {
- char p[strlen(path)+1];
-
+Set* manager_get_units_requiring_mounts_for(Manager *m, const char *path) {
assert(m);
assert(path);
- strcpy(p, path);
- path_simplify(p, false);
+ if (path_equal(path, "/"))
+ path = "";
- return hashmap_get(m->units_requiring_mounts_for, streq(p, "/") ? "" : p);
+ return hashmap_get(m->units_requiring_mounts_for, path);
}
int manager_update_failed_units(Manager *m, Unit *u, bool failed) {
}
static void manager_unref_uid_internal(
- Manager *m,
- Hashmap **uid_refs,
+ Hashmap *uid_refs,
uid_t uid,
bool destroy_now,
int (*_clean_ipc)(uid_t uid)) {
uint32_t c, n;
- assert(m);
- assert(uid_refs);
assert(uid_is_valid(uid));
assert(_clean_ipc);
if (uid == 0) /* We don't keep track of root, and will never destroy it */
return;
- c = PTR_TO_UINT32(hashmap_get(*uid_refs, UID_TO_PTR(uid)));
+ c = PTR_TO_UINT32(hashmap_get(uid_refs, UID_TO_PTR(uid)));
n = c & ~DESTROY_IPC_FLAG;
assert(n > 0);
n--;
if (destroy_now && n == 0) {
- hashmap_remove(*uid_refs, UID_TO_PTR(uid));
+ hashmap_remove(uid_refs, UID_TO_PTR(uid));
if (c & DESTROY_IPC_FLAG) {
log_debug("%s " UID_FMT " is no longer referenced, cleaning up its IPC.",
}
} else {
c = n | (c & DESTROY_IPC_FLAG);
- assert_se(hashmap_update(*uid_refs, UID_TO_PTR(uid), UINT32_TO_PTR(c)) >= 0);
+ assert_se(hashmap_update(uid_refs, UID_TO_PTR(uid), UINT32_TO_PTR(c)) >= 0);
}
}
void manager_unref_uid(Manager *m, uid_t uid, bool destroy_now) {
- manager_unref_uid_internal(m, &m->uid_refs, uid, destroy_now, clean_ipc_by_uid);
+ manager_unref_uid_internal(m->uid_refs, uid, destroy_now, clean_ipc_by_uid);
}
void manager_unref_gid(Manager *m, gid_t gid, bool destroy_now) {
- manager_unref_uid_internal(m, &m->gid_refs, (uid_t) gid, destroy_now, clean_ipc_by_gid);
+ manager_unref_uid_internal(m->gid_refs, (uid_t) gid, destroy_now, clean_ipc_by_gid);
}
static int manager_ref_uid_internal(
- Manager *m,
Hashmap **uid_refs,
uid_t uid,
bool clean_ipc) {
uint32_t c, n;
int r;
- assert(m);
assert(uid_refs);
assert(uid_is_valid(uid));
}
int manager_ref_uid(Manager *m, uid_t uid, bool clean_ipc) {
- return manager_ref_uid_internal(m, &m->uid_refs, uid, clean_ipc);
+ return manager_ref_uid_internal(&m->uid_refs, uid, clean_ipc);
}
int manager_ref_gid(Manager *m, gid_t gid, bool clean_ipc) {
- return manager_ref_uid_internal(m, &m->gid_refs, (uid_t) gid, clean_ipc);
+ return manager_ref_uid_internal(&m->gid_refs, (uid_t) gid, clean_ipc);
}
static void manager_vacuum_uid_refs_internal(
- Manager *m,
- Hashmap **uid_refs,
+ Hashmap *uid_refs,
int (*_clean_ipc)(uid_t uid)) {
void *p, *k;
- assert(m);
- assert(uid_refs);
assert(_clean_ipc);
- HASHMAP_FOREACH_KEY(p, k, *uid_refs) {
+ HASHMAP_FOREACH_KEY(p, k, uid_refs) {
uint32_t c, n;
uid_t uid;
(void) _clean_ipc(uid);
}
- assert_se(hashmap_remove(*uid_refs, k) == p);
+ assert_se(hashmap_remove(uid_refs, k) == p);
}
}
static void manager_vacuum_uid_refs(Manager *m) {
- manager_vacuum_uid_refs_internal(m, &m->uid_refs, clean_ipc_by_uid);
+ manager_vacuum_uid_refs_internal(m->uid_refs, clean_ipc_by_uid);
}
static void manager_vacuum_gid_refs(Manager *m) {
- manager_vacuum_uid_refs_internal(m, &m->gid_refs, clean_ipc_by_gid);
+ manager_vacuum_uid_refs_internal(m->gid_refs, clean_ipc_by_gid);
}
static void manager_vacuum(Manager *m) {
buf = new(char, sizeof("split-usr:"
"cgroups-missing:"
+ "cgrousv1:"
"local-hwclock:"
"var-run-bad:"
"overflowuid-not-65534:"
if (access("/proc/cgroups", F_OK) < 0)
e = stpcpy(e, "cgroups-missing:");
+ if (cg_all_unified() == 0)
+ e = stpcpy(e, "cgroupsv1:");
+
if (clock_is_localtime(NULL) > 0)
e = stpcpy(e, "local-hwclock:");