const char *path,
struct file_handle **ret_handle,
int *ret_mnt_id,
+ uint64_t *ret_unique_mnt_id,
int flags) {
size_t n = ORIGINAL_MAX_HANDLE_SZ;
assert(fd >= 0 || fd == AT_FDCWD);
- assert((flags & ~(AT_SYMLINK_FOLLOW|AT_EMPTY_PATH|AT_HANDLE_FID)) == 0);
+ assert((flags & ~(AT_SYMLINK_FOLLOW|AT_EMPTY_PATH|AT_HANDLE_FID|AT_HANDLE_MNT_ID_UNIQUE)) == 0);
/* We need to invoke name_to_handle_at() in a loop, given that it might return EOVERFLOW when the specified
* buffer is too small. Note that in contrast to what the docs might suggest, MAX_HANDLE_SZ is only good as a
for (;;) {
_cleanup_free_ struct file_handle *h = NULL;
- int mnt_id = -1;
+ int mnt_id = -1, r;
+ uint64_t unique_mnt_id = 0;
h = malloc0(offsetof(struct file_handle, f_handle) + n);
if (!h)
h->handle_bytes = n;
- if (name_to_handle_at(fd, path, h, &mnt_id, flags) >= 0) {
+ if (FLAGS_SET(flags, AT_HANDLE_MNT_ID_UNIQUE))
+ /* The kernel will still use this as uint64_t pointer */
+ r = name_to_handle_at(fd, path, h, (int *) &unique_mnt_id, flags);
+ else
+ r = name_to_handle_at(fd, path, h, &mnt_id, flags);
+ if (r >= 0) {
if (ret_handle)
*ret_handle = TAKE_PTR(h);
+ if (ret_unique_mnt_id)
+ *ret_unique_mnt_id = unique_mnt_id;
if (ret_mnt_id)
*ret_mnt_id = mnt_id;
if (errno != EOVERFLOW)
return -errno;
- if (!ret_handle && ret_mnt_id && mnt_id >= 0) {
+ if (!ret_handle && ((ret_mnt_id && mnt_id >= 0) || (ret_unique_mnt_id && unique_mnt_id > 0))) {
/* As it appears, name_to_handle_at() fills in mnt_id even when it returns EOVERFLOW when the
* buffer is too small, but that's undocumented. Hence, let's make use of this if it appears to
* be filled in, and the caller was interested in only the mount ID an nothing else. */
- *ret_mnt_id = mnt_id;
+ if (ret_unique_mnt_id)
+ *ret_unique_mnt_id = unique_mnt_id;
+ if (ret_mnt_id)
+ *ret_mnt_id = mnt_id;
return 0;
}
* we'll try without the flag, in order to support older kernels that didn't have AT_HANDLE_FID
* (i.e. older than Linux 6.5). */
- r = name_to_handle_at_loop(fd, path, ret_handle, ret_mnt_id, flags | AT_HANDLE_FID);
+ r = name_to_handle_at_loop(fd, path, ret_handle, ret_mnt_id, /* ret_unique_mnt_id= */ NULL, flags | AT_HANDLE_FID);
if (r >= 0 || is_name_to_handle_at_fatal_error(r))
return r;
- return name_to_handle_at_loop(fd, path, ret_handle, ret_mnt_id, flags & ~AT_HANDLE_FID);
+ return name_to_handle_at_loop(fd, path, ret_handle, ret_mnt_id, /* ret_unique_mnt_id= */ NULL, flags & ~AT_HANDLE_FID);
+}
+
+int name_to_handle_at_try_unique_mntid_fid(
+ int fd,
+ const char *path,
+ struct file_handle **ret_handle,
+ uint64_t *ret_mnt_id,
+ int flags) {
+
+ int mnt_id = -1, r;
+
+ assert(fd >= 0 || fd == AT_FDCWD);
+
+ /* First issues name_to_handle_at() with AT_HANDLE_MNT_ID_UNIQUE and AT_HANDLE_FID.
+ * If this fails and this is not a fatal error we'll try without the
+ * AT_HANDLE_MNT_ID_UNIQUE flag because it's only available from Linux 6.12 onwards. */
+ r = name_to_handle_at_loop(fd, path, ret_handle, /* ret_mnt_id= */ NULL, ret_mnt_id, flags | AT_HANDLE_MNT_ID_UNIQUE | AT_HANDLE_FID);
+ if (r >= 0 || is_name_to_handle_at_fatal_error(r))
+ return r;
+
+ flags &= ~AT_HANDLE_MNT_ID_UNIQUE;
+
+ /* Then issues name_to_handle_at() with AT_HANDLE_FID. If this fails and this is not a fatal error
+ * we'll try without the flag, in order to support older kernels that didn't have AT_HANDLE_FID
+ * (i.e. older than Linux 6.5). */
+
+ r = name_to_handle_at_loop(fd, path, ret_handle, &mnt_id, /* ret_unique_mnt_id= */ NULL, flags | AT_HANDLE_FID);
+ if (r < 0 && is_name_to_handle_at_fatal_error(r))
+ return r;
+ if (r >= 0) {
+ if (ret_mnt_id && mnt_id >= 0) {
+ /* See if we can do better because statx can do unique mount IDs since Linux 6.8
+ * and only if this doesn't work we use the non-unique mnt_id as returned. */
+ if (path_get_unique_mnt_id_at(fd, path, ret_mnt_id) < 0)
+ *ret_mnt_id = mnt_id;
+ }
+
+ return r;
+ }
+
+ r = name_to_handle_at_loop(fd, path, ret_handle, &mnt_id, /* ret_unique_mnt_id= */ NULL, flags & ~AT_HANDLE_FID);
+ if (ret_mnt_id && mnt_id >= 0)
+ *ret_mnt_id = mnt_id;
+ return r;
}
int name_to_handle_at_u64(int fd, const char *path, uint64_t *ret) {
/* This provides the first 64bit of the file handle. */
- r = name_to_handle_at_loop(fd, path, &h, /* ret_mnt_id= */ NULL, /* flags= */ 0);
+ r = name_to_handle_at_loop(fd, path, &h, /* ret_mnt_id= */ NULL, /* ret_unique_mnt_id= */ NULL, /* flags= */ 0);
if (r < 0)
return r;
if (h->handle_bytes < sizeof(uint64_t))
return memcmp_nn(a->f_handle, a->handle_bytes, b->f_handle, b->handle_bytes) == 0;
}
+struct file_handle* file_handle_dup(const struct file_handle *fh) {
+ _cleanup_free_ struct file_handle *fh_copy = NULL;
+
+ assert(fh);
+
+ fh_copy = malloc0(offsetof(struct file_handle, f_handle) + fh->handle_bytes);
+ if (!fh_copy)
+ return NULL;
+
+ fh_copy->handle_bytes = fh->handle_bytes;
+ fh_copy->handle_type = fh->handle_type;
+ memcpy(fh_copy->f_handle, fh->f_handle, fh->handle_bytes);
+
+ return TAKE_PTR(fh_copy);
+}
+
int is_mount_point_at(int dir_fd, const char *path, int flags) {
int r;
return 0;
}
+int path_get_unique_mnt_id_at(int dir_fd, const char *path, uint64_t *ret) {
+ struct statx sx;
+
+ assert(dir_fd >= 0 || dir_fd == AT_FDCWD);
+ assert(ret);
+
+ if (statx(dir_fd,
+ strempty(path),
+ (isempty(path) ? AT_EMPTY_PATH : AT_SYMLINK_NOFOLLOW) |
+ AT_NO_AUTOMOUNT | /* don't trigger automounts, mnt_id is a local concept */
+ AT_STATX_DONT_SYNC, /* don't go to the network, mnt_id is a local concept */
+ STATX_MNT_ID_UNIQUE,
+ &sx) < 0)
+ return -errno;
+
+ if (!FLAGS_SET(sx.stx_mask, STATX_MNT_ID_UNIQUE))
+ return -EOPNOTSUPP;
+
+ *ret = sx.stx_mnt_id;
+ return 0;
+}
+
bool fstype_is_network(const char *fstype) {
const char *x;
#include <sys/mount.h>
#include <unistd.h>
+#include "sd-json.h"
#include "sd-varlink.h"
#include "argv-util.h"
DEFINE_PRIVATE_STRING_TABLE_LOOKUP_WITH_BOOLEAN(mutable_mode, MutableMode, MUTABLE_YES);
+enum {
+ MERGE_NOTHING_FOUND,
+ MERGE_MOUNTED,
+ MERGE_SKIP_REFRESH,
+};
+
+enum {
+ MERGE_EXIT_NOTHING_FOUND = 123,
+ MERGE_EXIT_SKIP_REFRESH = 124,
+};
+
static char **arg_hierarchies = NULL; /* "/usr" + "/opt" by default for sysext and /etc by default for confext */
static char *arg_root = NULL;
static sd_json_format_flags_t arg_json_format_flags = SD_JSON_FORMAT_OFF;
static bool arg_legend = true;
static bool arg_force = false;
static bool arg_no_reload = false;
+static bool arg_always_refresh = false;
static int arg_noexec = -1;
static ImagePolicy *arg_image_policy = NULL;
static bool arg_image_policy_set = false; /* Tracks initialization */
return 0;
}
+static int write_origin_file(ImageClass image_class, const char *origin_content, const char *meta_path, const char *hierarchy) {
+ _cleanup_free_ char *f = NULL;
+ int r;
+
+ assert(meta_path);
+
+ /* The origin file is compared to know if a refresh can be skipped (opt-in, used at service startup). */
+ f = path_join(meta_path, image_class_info[image_class].dot_directory_name, "origin");
+ if (!f)
+ return log_oom();
+
+ _cleanup_free_ char *hierarchy_path = path_join(hierarchy, image_class_info[image_class].dot_directory_name, image_class_info[image_class].short_identifier_plural);
+ if (!hierarchy_path)
+ return log_oom();
+
+ r = write_string_file_full(AT_FDCWD, f, strempty(origin_content),
+ WRITE_STRING_FILE_CREATE|WRITE_STRING_FILE_MKDIR_0755|WRITE_STRING_FILE_LABEL|WRITE_STRING_FILE_AVOID_NEWLINE,
+ /* ts= */ NULL, hierarchy_path);
+ if (r < 0)
+ return log_error_errno(r, "Failed to write origin meta file '%s': %m", f);
+
+ return 0;
+}
+
static int write_dev_file(ImageClass image_class, const char *meta_path, const char *overlay_path, const char *hierarchy) {
_cleanup_free_ char *f = NULL;
struct stat st;
static int store_info_in_meta(
ImageClass image_class,
char **extensions,
+ const char *origin_content,
const char *meta_path,
const char *overlay_path,
const char *work_dir,
if (r < 0)
return r;
+ r = write_origin_file(image_class, origin_content, meta_path, hierarchy);
+ if (r < 0)
+ return r;
+
r = write_dev_file(image_class, meta_path, overlay_path, hierarchy);
if (r < 0)
return r;
int noexec,
char **extensions,
char **paths,
+ const char *origin_content,
const char *meta_path,
const char *overlay_path,
const char *workspace_path) {
if (r < 0)
return r;
- r = store_info_in_meta(image_class, extensions, meta_path, overlay_path, op->work_dir, op->hierarchy, backing);
+ r = store_info_in_meta(image_class, extensions, origin_content, meta_path, overlay_path, op->work_dir, op->hierarchy, backing);
if (r < 0)
return r;
ImageClass image_class,
char **hierarchies,
bool force,
+ bool always_refresh,
int noexec,
Hashmap *images,
const char *workspace) {
_cleanup_free_ char *host_os_release_id = NULL, *host_os_release_id_like = NULL,
*host_os_release_version_id = NULL, *host_os_release_api_level = NULL,
- *filename = NULL;
+ *filename = NULL, *old_origin_content = NULL,
+ *extensions_origin_content = NULL, *root_resolved = NULL;
_cleanup_strv_free_ char **extensions = NULL, **extensions_v = NULL, **paths = NULL;
+ _cleanup_(sd_json_variant_unrefp) sd_json_variant *extensions_origin_entries = NULL,
+ *extensions_origin_json = NULL, *mutable_dir_entries = NULL;
size_t n_extensions = 0;
unsigned n_ignored = 0;
Image *img;
int r;
+ if (!isempty(arg_root)) {
+ r = chase(arg_root, /* root= */ NULL, CHASE_MUST_BE_DIRECTORY, &root_resolved, /* ret_fd= */ NULL);
+ if (r < 0)
+ return log_error_errno(r, "Failed to resolve --root='%s': %m", strempty(arg_root));
+ }
+
assert(path_startswith(workspace, "/run/"));
/* Mark the whole of /run as MS_SLAVE, so that we can mount stuff below it that doesn't show up on
/* Let's now mount all images */
HASHMAP_FOREACH(img, images) {
- _cleanup_free_ char *p = NULL;
+ _cleanup_free_ char *p = NULL, *path_without_root = NULL;
+ _cleanup_(sd_json_variant_unrefp) sd_json_variant *verity_hash = NULL;
p = path_join(workspace, image_class_info[image_class].short_identifier_plural, img->name);
if (!p)
if (r < 0)
return r;
+ if (iovec_is_set(&verity_settings.root_hash)) {
+ r = sd_json_variant_new_hex(&verity_hash, verity_settings.root_hash.iov_base, verity_settings.root_hash.iov_len);
+ if (r < 0)
+ return log_error_errno(r, "Failed to create origin verity entry for '%s': %m", img->name);
+ }
+
r = dissected_image_decrypt(m, arg_root, /* passphrase= */ NULL, &verity_settings, pick_image_policy(img), flags);
if (r < 0)
return r;
if (r < 0)
return log_oom();
+ /* Encode extension image origin to check if we can skip the refresh.
+ * It can also be used to provide more detail in "systemd-sysext status". */
+
+ if (!isempty(arg_root)) {
+ const char *without_root = NULL;
+ without_root = path_startswith(img->path, root_resolved);
+ if (!isempty(without_root)) {
+ path_without_root = strjoin("/", without_root);
+ if (!path_without_root)
+ return log_oom();
+ }
+ }
+ if (!path_without_root) {
+ path_without_root = strdup(img->path);
+ if (!path_without_root)
+ return log_oom();
+ }
+
+ /* The verity hash is not available for all extension types, thus, but only as fallback,
+ * also include data to check for file/directory replacements through a file handle and
+ * unique mount ID (or inode and mount ID as fallback).
+ * A unique mount ID is best because st_dev gets reused too easily, e.g., by a loop dev
+ * mount. For the mount ID to be valid it has to be resolved before we enter the new mount
+ * namespace. Thus, here it wouldn't work and so instead it gets provided by the image
+ * dissect logic and handed over to this subprocess we are in.
+ * Online modification is not well supported with overlay mounts, so we don't do a file
+ * checksum nor do we recurse into a directory to look for touched files. If users want
+ * modifications to be picked up, they need to set the --always-refresh=yes flag (as will be
+ * printed out). */
+
+ _cleanup_(sd_json_variant_unrefp) sd_json_variant *origin_entry = NULL;
+
+ /* We suppress inclusion of weak identifiers when a strong one is there so that, e.g.,
+ * a confext image stored on /usr gets identified only by the verity hash instead of also
+ * the mount ID because that changes when a sysext overlay mount appears but since the
+ * verity hash is the same for the confext it can actually be reused. */
+ r = sd_json_buildo(&origin_entry,
+ SD_JSON_BUILD_PAIR_STRING("path", path_without_root),
+ SD_JSON_BUILD_PAIR_CONDITION(!!verity_hash, "verityHash", SD_JSON_BUILD_VARIANT(verity_hash)),
+ SD_JSON_BUILD_PAIR_CONDITION(!verity_hash, "onMountId", SD_JSON_BUILD_UNSIGNED(img->on_mount_id)),
+ SD_JSON_BUILD_PAIR_CONDITION(!verity_hash && !!img->fh, "fileHandle",
+ SD_JSON_BUILD_OBJECT(SD_JSON_BUILD_PAIR_INTEGER("type", img->fh->handle_type),
+ SD_JSON_BUILD_PAIR_HEX("handle", img->fh->f_handle,
+ img->fh->handle_bytes))),
+ SD_JSON_BUILD_PAIR_CONDITION(!verity_hash && !img->fh, "inode", SD_JSON_BUILD_UNSIGNED(img->inode)),
+ SD_JSON_BUILD_PAIR_CONDITION(!verity_hash, "crtime", SD_JSON_BUILD_UNSIGNED(img->crtime)),
+ SD_JSON_BUILD_PAIR_CONDITION(!verity_hash, "mtime", SD_JSON_BUILD_UNSIGNED(img->mtime)));
+ if (r < 0)
+ return log_error_errno(r, "Failed to create origin entry for '%s': %m", img->name);
+
+ r = sd_json_variant_set_field(&extensions_origin_entries, img->name, origin_entry);
+ if (r < 0)
+ return log_error_errno(r, "Failed to add origin entry for '%s': %m", img->name);
+
n_extensions++;
}
log_info("No suitable extensions found (%u ignored due to incompatible image(s)).", n_ignored);
else
log_info("No extensions found.");
- return 0;
+ return MERGE_NOTHING_FOUND;
}
/* Order by version sort with strverscmp_improved() */
typesafe_qsort(extensions, n_extensions, strverscmp_improvedp);
typesafe_qsort(extensions_v, n_extensions, strverscmp_improvedp);
+ STRV_FOREACH(h, hierarchies) {
+ _cleanup_(overlayfs_paths_freep) OverlayFSPaths *op = NULL;
+ _cleanup_free_ char *f = NULL, *buf = NULL, *resolved = NULL, *mutable_directory_without_root = NULL;
+
+ /* The origin file includes the backing directories for mutable overlays. */
+ r = overlayfs_paths_new(*h, workspace, &op);
+ if (r < 0)
+ return r;
+
+ if (op->resolved_mutable_directory && !isempty(arg_root)) {
+ const char *without_root = NULL;
+ without_root = path_startswith(op->resolved_mutable_directory, root_resolved);
+ if (!isempty(without_root)) {
+ mutable_directory_without_root = strjoin("/", without_root);
+ if (!mutable_directory_without_root)
+ return log_oom();
+ }
+ }
+ if (!mutable_directory_without_root && op->resolved_mutable_directory) {
+ mutable_directory_without_root = strdup(op->resolved_mutable_directory);
+ if (!mutable_directory_without_root)
+ return log_oom();
+ }
+
+ if (mutable_directory_without_root) {
+ r = sd_json_variant_set_field_string(&mutable_dir_entries, *h, mutable_directory_without_root);
+ if (r < 0)
+ return log_error_errno(r, "Failed to add mutable directory to origin JSON entry: %m");
+ }
+
+ /* Find existing origin file for comparison. */
+ r = chase(*h, arg_root, CHASE_PREFIX_ROOT|CHASE_NONEXISTENT, &resolved, /* ret_fd= */ NULL);
+ if (r < 0)
+ return log_error_errno(r, "Failed to resolve hierarchy '%s%s': %m", strempty(arg_root), *h);
+
+ f = path_join(resolved, image_class_info[image_class].dot_directory_name, "origin");
+ if (!f)
+ return log_oom();
+
+ r = is_our_mount_point(image_class, resolved);
+ if (r < 0)
+ return r;
+ if (r == 0)
+ continue;
+
+ if (old_origin_content)
+ continue;
+
+ r = read_full_file(f, &buf, /* ret_size */ NULL);
+ if (r < 0) {
+ log_debug_errno(r, "Failed to open '%s', continuing search: %m", f);
+ continue;
+ }
+
+ old_origin_content = TAKE_PTR(buf);
+ }
+
+ r = sd_json_buildo(&extensions_origin_json,
+ SD_JSON_BUILD_PAIR_OBJECT("mutable",
+ SD_JSON_BUILD_PAIR_STRING("mode", mutable_mode_to_string(arg_mutable)),
+ SD_JSON_BUILD_PAIR_CONDITION(!!mutable_dir_entries,
+ "mutableDirs",
+ SD_JSON_BUILD_VARIANT(mutable_dir_entries))),
+ SD_JSON_BUILD_PAIR_CONDITION(!isempty(arg_overlayfs_mount_options),
+ "mountOptions",
+ SD_JSON_BUILD_STRING(arg_overlayfs_mount_options)),
+ SD_JSON_BUILD_PAIR_CONDITION(!!extensions_origin_entries,
+ "extensions",
+ SD_JSON_BUILD_VARIANT(extensions_origin_entries)));
+ if (r < 0)
+ return log_error_errno(r, "Failed to create extensions origin JSON object: %m");
+
+ r = sd_json_variant_format(extensions_origin_json, SD_JSON_FORMAT_PRETTY|SD_JSON_FORMAT_NEWLINE, &extensions_origin_content);
+ if (r < 0)
+ return log_error_errno(r, "Failed to format extension origin as JSON: %m");
+
+ log_debug("New extension origin entry (unordered):\n%s\n", extensions_origin_content);
+
+ if (old_origin_content) {
+ _cleanup_(sd_json_variant_unrefp) sd_json_variant *old_origin_json = NULL;
+
+ log_debug("Old extension origin entry (unordered):\n%s\n", old_origin_content);
+ r = sd_json_parse(old_origin_content, /* flags= */ 0, &old_origin_json, /* reterr_line= */ NULL, /* reterr_column= */ NULL);
+ if (r < 0)
+ return log_error_errno(r, "Failed to parse existing extension origin content: %m");
+
+ /* This works well with unordered entries. */
+ if (sd_json_variant_equal(extensions_origin_json, old_origin_json)) {
+ if (!always_refresh) {
+ /* This only happens during refresh, not merge, thus talk about refresh here. */
+ log_info("Skipping extension refresh because no change was found, use --always-refresh=yes to always do a refresh.");
+ return MERGE_SKIP_REFRESH;
+ }
+
+ log_debug("No change found based on origin entry but continuing as requested by --always-refresh=yes.");
+ } else
+ log_debug("Found changes based on origin entry, continuing with the refresh.");
+ }
+
if (n_extensions == 0) {
assert(arg_mutable != MUTABLE_NO);
log_info("No extensions found, proceeding in mutable mode.");
noexec,
extensions,
paths,
+ extensions_origin_content,
meta_path,
overlay_path,
merge_hierarchy_workspace);
log_info("Merged extensions into '%s'.", resolved);
}
- return 1;
+ return MERGE_MOUNTED;
}
static int merge(ImageClass image_class,
char **hierarchies,
bool force,
bool no_reload,
+ bool always_refresh,
int noexec,
Hashmap *images) {
if (r == 0) {
/* Child with its own mount namespace */
- r = merge_subprocess(image_class, hierarchies, force, noexec, images, "/run/systemd/sysext");
- if (r < 0)
- _exit(EXIT_FAILURE);
+ r = merge_subprocess(image_class, hierarchies, force, always_refresh, noexec, images, "/run/systemd/sysext");
/* Our namespace ceases to exist here, also implicitly detaching all temporary mounts we
* created below /run. Nice! */
- _exit(r > 0 ? EXIT_SUCCESS : 123); /* 123 means: didn't find any extensions */
+ if (r < 0)
+ _exit(EXIT_FAILURE);
+ if (r == MERGE_NOTHING_FOUND)
+ _exit(MERGE_EXIT_NOTHING_FOUND);
+ if (r == MERGE_SKIP_REFRESH)
+ _exit(MERGE_EXIT_SKIP_REFRESH);
+
+ _exit(EXIT_SUCCESS);
}
r = pidref_wait_for_terminate_and_check("(sd-merge)", &pidref, WAIT_LOG_ABNORMAL);
if (r < 0)
return r;
- if (r == 123) /* exit code 123 means: didn't do anything */
- return 0;
+ if (r == MERGE_EXIT_NOTHING_FOUND)
+ return 0; /* Tell refresh to unmount */
+ if (r == MERGE_EXIT_SKIP_REFRESH)
+ return 1; /* Same return code as below when we have merged new */
if (r > 0)
return log_error_errno(SYNTHETIC_ERRNO(EPROTO), "Failed to merge hierarchies");
arg_hierarchies,
arg_force,
arg_no_reload,
+ arg_always_refresh,
arg_noexec,
images);
}
const char *class;
int force;
int no_reload;
+ int always_refresh;
int noexec;
} MethodMergeParameters;
static int parse_merge_parameters(sd_varlink *link, sd_json_variant *parameters, MethodMergeParameters *p) {
static const sd_json_dispatch_field dispatch_table[] = {
- { "class", SD_JSON_VARIANT_STRING, sd_json_dispatch_const_string, offsetof(MethodMergeParameters, class), 0 },
- { "force", SD_JSON_VARIANT_BOOLEAN, sd_json_dispatch_tristate, offsetof(MethodMergeParameters, force), 0 },
- { "noReload", SD_JSON_VARIANT_BOOLEAN, sd_json_dispatch_tristate, offsetof(MethodMergeParameters, no_reload), 0 },
- { "noexec", SD_JSON_VARIANT_BOOLEAN, sd_json_dispatch_tristate, offsetof(MethodMergeParameters, noexec), 0 },
+ { "class", SD_JSON_VARIANT_STRING, sd_json_dispatch_const_string, offsetof(MethodMergeParameters, class), 0 },
+ { "force", SD_JSON_VARIANT_BOOLEAN, sd_json_dispatch_tristate, offsetof(MethodMergeParameters, force), 0 },
+ { "noReload", SD_JSON_VARIANT_BOOLEAN, sd_json_dispatch_tristate, offsetof(MethodMergeParameters, no_reload), 0 },
+ { "alwaysRefresh", SD_JSON_VARIANT_BOOLEAN, sd_json_dispatch_tristate, offsetof(MethodMergeParameters, always_refresh), 0 },
+ { "noexec", SD_JSON_VARIANT_BOOLEAN, sd_json_dispatch_tristate, offsetof(MethodMergeParameters, noexec), 0 },
VARLINK_DISPATCH_POLKIT_FIELD,
{}
};
MethodMergeParameters p = {
.force = -1,
.no_reload = -1,
+ .always_refresh = -1,
.noexec = -1,
};
_cleanup_strv_free_ char **hierarchies = NULL;
ImageClass image_class = arg_image_class;
- bool force, no_reload;
+ bool force, no_reload, always_refresh;
int r, noexec;
assert(link);
force = p.force >= 0 ? p.force : arg_force;
no_reload = p.no_reload >= 0 ? p.no_reload : arg_no_reload;
+ always_refresh = p.always_refresh >= 0 ? p.always_refresh : arg_always_refresh;
noexec = p.noexec >= 0 ? p.noexec : arg_noexec;
r = varlink_verify_polkit_async(
if (r > 0)
return sd_varlink_errorbo(link, "io.systemd.sysext.AlreadyMerged", SD_JSON_BUILD_PAIR_STRING("hierarchy", which));
- r = merge(image_class, hierarchies ?: arg_hierarchies, force, no_reload, noexec, images);
+ r = merge(image_class, hierarchies ?: arg_hierarchies, force, no_reload, always_refresh, noexec, images);
if (r < 0)
return r;
char **hierarchies,
bool force,
bool no_reload,
+ bool always_refresh,
int noexec) {
_cleanup_hashmap_free_ Hashmap *images = NULL;
return r;
/* Returns > 0 if it did something, i.e. a new overlayfs is mounted now. When it does so it
- * implicitly unmounts any overlayfs placed there before. Returns == 0 if it did nothing, i.e. no
+ * implicitly unmounts any overlayfs placed there before. It also returns == 1 if there were
+ * no changes found to apply and the mount stays intact. Returns == 0 if it did nothing, i.e. no
* extension images found. In this case the old overlayfs remains in place if there was one. */
- r = merge(image_class, hierarchies, force, no_reload, noexec, images);
+ r = merge(image_class, hierarchies, force, no_reload, always_refresh, noexec, images);
if (r < 0)
return r;
if (r == 0) /* No images found? Then unmerge. The goal of --refresh is after all that after having
* 1. If an overlayfs was mounted before and no extensions exist anymore, we'll have unmerged things.
*
* 2. If an overlayfs was mounted before, and there are still extensions installed' we'll have
- * unmerged and then merged things again.
+ * unmerged and then merged things again or we have skipped the refresh because no changes
+ * were found.
*
* 3. If an overlayfs so far wasn't mounted, and there are extensions installed, we'll have it
* mounted now.
arg_hierarchies,
arg_force,
arg_no_reload,
+ arg_always_refresh,
arg_noexec);
}
MethodMergeParameters p = {
.force = -1,
.no_reload = -1,
+ .always_refresh = -1,
.noexec = -1,
};
Hashmap **polkit_registry = ASSERT_PTR(userdata);
_cleanup_strv_free_ char **hierarchies = NULL;
ImageClass image_class = arg_image_class;
- bool force, no_reload;
+ bool force, no_reload, always_refresh;
int r, noexec;
assert(link);
force = p.force >= 0 ? p.force : arg_force;
no_reload = p.no_reload >= 0 ? p.no_reload : arg_no_reload;
+ always_refresh = p.always_refresh >= 0 ? p.always_refresh : arg_always_refresh;
noexec = p.noexec >= 0 ? p.noexec : arg_noexec;
r = varlink_verify_polkit_async(
if (r <= 0)
return r;
- r = refresh(image_class, hierarchies ?: arg_hierarchies, force, no_reload, noexec);
+ r = refresh(image_class, hierarchies ?: arg_hierarchies, force, no_reload, always_refresh, noexec);
if (r < 0)
return r;
" Generate JSON output\n"
" --force Ignore version incompatibilities\n"
" --no-reload Do not reload the service manager\n"
+ " --always-refresh=yes|no\n"
+ " Do not skip refresh when no changes were found\n"
" --image-policy=POLICY\n"
" Specify disk image dissection policy\n"
" --noexec=BOOL Whether to mount extension overlay with noexec\n"
ARG_IMAGE_POLICY,
ARG_NOEXEC,
ARG_NO_RELOAD,
+ ARG_ALWAYS_REFRESH,
ARG_MUTABLE,
};
static const struct option options[] = {
- { "help", no_argument, NULL, 'h' },
- { "version", no_argument, NULL, ARG_VERSION },
- { "no-pager", no_argument, NULL, ARG_NO_PAGER },
- { "no-legend", no_argument, NULL, ARG_NO_LEGEND },
- { "root", required_argument, NULL, ARG_ROOT },
- { "json", required_argument, NULL, ARG_JSON },
- { "force", no_argument, NULL, ARG_FORCE },
- { "image-policy", required_argument, NULL, ARG_IMAGE_POLICY },
- { "noexec", required_argument, NULL, ARG_NOEXEC },
- { "no-reload", no_argument, NULL, ARG_NO_RELOAD },
- { "mutable", required_argument, NULL, ARG_MUTABLE },
+ { "help", no_argument, NULL, 'h' },
+ { "version", no_argument, NULL, ARG_VERSION },
+ { "no-pager", no_argument, NULL, ARG_NO_PAGER },
+ { "no-legend", no_argument, NULL, ARG_NO_LEGEND },
+ { "root", required_argument, NULL, ARG_ROOT },
+ { "json", required_argument, NULL, ARG_JSON },
+ { "force", no_argument, NULL, ARG_FORCE },
+ { "image-policy", required_argument, NULL, ARG_IMAGE_POLICY },
+ { "noexec", required_argument, NULL, ARG_NOEXEC },
+ { "no-reload", no_argument, NULL, ARG_NO_RELOAD },
+ { "always-refresh", required_argument, NULL, ARG_ALWAYS_REFRESH },
+ { "mutable", required_argument, NULL, ARG_MUTABLE },
{}
};
arg_no_reload = true;
break;
+ case ARG_ALWAYS_REFRESH:
+ r = parse_boolean_argument("--always-refresh", optarg, &arg_always_refresh);
+ if (r < 0)
+ return r;
+ break;
+
case ARG_MUTABLE:
if (streq(optarg, "help")) {
if (arg_legend)