]>
git.ipfire.org Git - thirdparty/systemd.git/blob - src/nspawn/nspawn-patch-uid.c
edb87ecb1d97e1503ab1fb15ecb8d86918b453f3
1 /* SPDX-License-Identifier: LGPL-2.1+ */
3 Copyright 2016 Lennart Poettering
7 #include <linux/magic.h>
12 #include <sys/statvfs.h>
17 #include "dirent-util.h"
21 #include "nspawn-def.h"
22 #include "nspawn-patch-uid.h"
23 #include "stat-util.h"
24 #include "stdio-util.h"
25 #include "string-util.h"
27 #include "user-util.h"
31 static int get_acl(int fd
, const char *name
, acl_type_t type
, acl_t
*ret
) {
32 char procfs_path
[STRLEN("/proc/self/fd/") + DECIMAL_STR_MAX(int) + 1];
39 _cleanup_close_
int child_fd
= -1;
41 child_fd
= openat(fd
, name
, O_PATH
|O_CLOEXEC
|O_NOFOLLOW
);
45 xsprintf(procfs_path
, "/proc/self/fd/%i", child_fd
);
46 acl
= acl_get_file(procfs_path
, type
);
47 } else if (type
== ACL_TYPE_ACCESS
)
50 xsprintf(procfs_path
, "/proc/self/fd/%i", fd
);
51 acl
= acl_get_file(procfs_path
, type
);
60 static int set_acl(int fd
, const char *name
, acl_type_t type
, acl_t acl
) {
61 char procfs_path
[STRLEN("/proc/self/fd/") + DECIMAL_STR_MAX(int) + 1];
68 _cleanup_close_
int child_fd
= -1;
70 child_fd
= openat(fd
, name
, O_PATH
|O_CLOEXEC
|O_NOFOLLOW
);
74 xsprintf(procfs_path
, "/proc/self/fd/%i", child_fd
);
75 r
= acl_set_file(procfs_path
, type
, acl
);
76 } else if (type
== ACL_TYPE_ACCESS
)
77 r
= acl_set_fd(fd
, acl
);
79 xsprintf(procfs_path
, "/proc/self/fd/%i", fd
);
80 r
= acl_set_file(procfs_path
, type
, acl
);
88 static int shift_acl(acl_t acl
, uid_t shift
, acl_t
*ret
) {
89 _cleanup_(acl_freep
) acl_t copy
= NULL
;
96 r
= acl_get_entry(acl
, ACL_FIRST_ENTRY
, &i
);
100 uid_t
*old_uid
, new_uid
;
104 if (acl_get_tag_type(i
, &tag
) < 0)
107 if (IN_SET(tag
, ACL_USER
, ACL_GROUP
)) {
109 /* We don't distuingish here between uid_t and gid_t, let's make sure the compiler checks that
110 * this is actually OK */
111 assert_cc(sizeof(uid_t
) == sizeof(gid_t
));
113 old_uid
= acl_get_qualifier(i
);
117 new_uid
= shift
| (*old_uid
& UINT32_C(0xFFFF));
118 if (!uid_is_valid(new_uid
))
121 modify
= new_uid
!= *old_uid
;
122 if (modify
&& !copy
) {
125 /* There's no copy of the ACL yet? if so, let's create one, and start the loop from the
126 * beginning, so that we copy all entries, starting from the first, this time. */
128 n
= acl_entries(acl
);
136 /* Seek back to the beginning */
137 r
= acl_get_entry(acl
, ACL_FIRST_ENTRY
, &i
);
145 acl_entry_t new_entry
;
147 if (acl_create_entry(©
, &new_entry
) < 0)
150 if (acl_copy_entry(new_entry
, i
) < 0)
154 if (acl_set_qualifier(new_entry
, &new_uid
) < 0)
158 r
= acl_get_entry(acl
, ACL_NEXT_ENTRY
, &i
);
163 *ret
= TAKE_PTR(copy
);
168 static int patch_acls(int fd
, const char *name
, const struct stat
*st
, uid_t shift
) {
169 _cleanup_(acl_freep
) acl_t acl
= NULL
, shifted
= NULL
;
170 bool changed
= false;
176 /* ACLs are not supported on symlinks, there's no point in trying */
177 if (S_ISLNK(st
->st_mode
))
180 r
= get_acl(fd
, name
, ACL_TYPE_ACCESS
, &acl
);
181 if (r
== -EOPNOTSUPP
)
186 r
= shift_acl(acl
, shift
, &shifted
);
190 r
= set_acl(fd
, name
, ACL_TYPE_ACCESS
, shifted
);
197 if (S_ISDIR(st
->st_mode
)) {
201 acl
= shifted
= NULL
;
203 r
= get_acl(fd
, name
, ACL_TYPE_DEFAULT
, &acl
);
207 r
= shift_acl(acl
, shift
, &shifted
);
211 r
= set_acl(fd
, name
, ACL_TYPE_DEFAULT
, shifted
);
224 static int patch_acls(int fd
, const char *name
, const struct stat
*st
, uid_t shift
) {
230 static int patch_fd(int fd
, const char *name
, const struct stat
*st
, uid_t shift
) {
233 bool changed
= false;
239 new_uid
= shift
| (st
->st_uid
& UINT32_C(0xFFFF));
240 new_gid
= (gid_t
) shift
| (st
->st_gid
& UINT32_C(0xFFFF));
242 if (!uid_is_valid(new_uid
) || !gid_is_valid(new_gid
))
245 if (st
->st_uid
!= new_uid
|| st
->st_gid
!= new_gid
) {
247 r
= fchownat(fd
, name
, new_uid
, new_gid
, AT_SYMLINK_NOFOLLOW
);
249 r
= fchown(fd
, new_uid
, new_gid
);
253 /* The Linux kernel alters the mode in some cases of chown(). Let's undo this. */
255 if (!S_ISLNK(st
->st_mode
))
256 r
= fchmodat(fd
, name
, st
->st_mode
, 0);
257 else /* AT_SYMLINK_NOFOLLOW is not available for fchmodat() */
260 r
= fchmod(fd
, st
->st_mode
);
267 r
= patch_acls(fd
, name
, st
, shift
);
271 return r
> 0 || changed
;
275 * Check if the filesystem is fully compatible with user namespaces or
276 * UID/GID patching. Some filesystems in this list can be fully mounted inside
277 * user namespaces, however their inodes may relate to host resources or only
278 * valid in the global user namespace, therefore no patching should be applied.
280 static int is_fs_fully_userns_compatible(const struct statfs
*sfs
) {
284 return F_TYPE_EQUAL(sfs
->f_type
, BINFMTFS_MAGIC
) ||
285 F_TYPE_EQUAL(sfs
->f_type
, CGROUP_SUPER_MAGIC
) ||
286 F_TYPE_EQUAL(sfs
->f_type
, CGROUP2_SUPER_MAGIC
) ||
287 F_TYPE_EQUAL(sfs
->f_type
, DEBUGFS_MAGIC
) ||
288 F_TYPE_EQUAL(sfs
->f_type
, DEVPTS_SUPER_MAGIC
) ||
289 F_TYPE_EQUAL(sfs
->f_type
, EFIVARFS_MAGIC
) ||
290 F_TYPE_EQUAL(sfs
->f_type
, HUGETLBFS_MAGIC
) ||
291 F_TYPE_EQUAL(sfs
->f_type
, MQUEUE_MAGIC
) ||
292 F_TYPE_EQUAL(sfs
->f_type
, PROC_SUPER_MAGIC
) ||
293 F_TYPE_EQUAL(sfs
->f_type
, PSTOREFS_MAGIC
) ||
294 F_TYPE_EQUAL(sfs
->f_type
, SELINUX_MAGIC
) ||
295 F_TYPE_EQUAL(sfs
->f_type
, SMACK_MAGIC
) ||
296 F_TYPE_EQUAL(sfs
->f_type
, SECURITYFS_MAGIC
) ||
297 F_TYPE_EQUAL(sfs
->f_type
, BPF_FS_MAGIC
) ||
298 F_TYPE_EQUAL(sfs
->f_type
, TRACEFS_MAGIC
) ||
299 F_TYPE_EQUAL(sfs
->f_type
, SYSFS_MAGIC
);
302 static int recurse_fd(int fd
, bool donate_fd
, const struct stat
*st
, uid_t shift
, bool is_toplevel
) {
303 _cleanup_closedir_
DIR *d
= NULL
;
304 bool changed
= false;
310 if (fstatfs(fd
, &sfs
) < 0)
313 /* We generally want to permit crossing of mount boundaries when patching the UIDs/GIDs. However, we probably
314 * shouldn't do this for /proc and /sys if that is already mounted into place. Hence, let's stop the recursion
315 * when we hit procfs, sysfs or some other special file systems. */
317 r
= is_fs_fully_userns_compatible(&sfs
);
321 r
= 0; /* don't recurse */
325 /* Also, if we hit a read-only file system, then don't bother, skip the whole subtree */
326 if ((sfs
.f_flags
& ST_RDONLY
) ||
327 access_fd(fd
, W_OK
) == -EROFS
)
330 if (S_ISDIR(st
->st_mode
)) {
336 copy
= fcntl(fd
, F_DUPFD_CLOEXEC
, 3);
353 FOREACH_DIRENT_ALL(de
, d
, r
= -errno
; goto finish
) {
356 if (dot_or_dot_dot(de
->d_name
))
359 if (fstatat(dirfd(d
), de
->d_name
, &fst
, AT_SYMLINK_NOFOLLOW
) < 0) {
364 if (S_ISDIR(fst
.st_mode
)) {
367 subdir_fd
= openat(dirfd(d
), de
->d_name
, O_RDONLY
|O_NONBLOCK
|O_DIRECTORY
|O_CLOEXEC
|O_NOFOLLOW
|O_NOATIME
);
374 r
= recurse_fd(subdir_fd
, true, &fst
, shift
, false);
381 r
= patch_fd(dirfd(d
), de
->d_name
, &fst
, shift
);
390 /* After we descended, also patch the directory itself. It's key to do this in this order so that the top-level
391 * directory is patched as very last object in the tree, so that we can use it as quick indicator whether the
392 * tree is properly chown()ed already. */
393 r
= patch_fd(d
? dirfd(d
) : fd
, NULL
, st
, shift
);
404 _cleanup_free_
char *name
= NULL
;
406 /* When we hit a ready-only subtree we simply skip it, but log about it. */
407 (void) fd_get_path(fd
, &name
);
408 log_debug("Skippping read-only file or directory %s.", strna(name
));
419 static int fd_patch_uid_internal(int fd
, bool donate_fd
, uid_t shift
, uid_t range
) {
425 /* Recursively adjusts the UID/GIDs of all files of a directory tree. This is used to automatically fix up an
426 * OS tree to the used user namespace UID range. Note that this automatic adjustment only works for UID ranges
427 * following the concept that the upper 16bit of a UID identify the container, and the lower 16bit are the actual
428 * UID within the container. */
430 if ((shift
& 0xFFFF) != 0) {
431 /* We only support containers where the shift starts at a 2^16 boundary */
436 if (shift
== UID_BUSY_BASE
) {
441 if (range
!= 0x10000) {
442 /* We only support containers with 16bit UID ranges for the patching logic */
447 if (fstat(fd
, &st
) < 0) {
452 if ((uint32_t) st
.st_uid
>> 16 != (uint32_t) st
.st_gid
>> 16) {
453 /* We only support containers where the uid/gid container ID match */
458 /* Try to detect if the range is already right. Of course, this a pretty drastic optimization, as we assume
459 * that if the top-level dir has the right upper 16bit assigned, then everything below will have too... */
460 if (((uint32_t) (st
.st_uid
^ shift
) >> 16) == 0)
463 /* Before we start recursively chowning, mark the top-level dir as "busy" by chowning it to the "busy"
464 * range. Should we be interrupted in the middle of our work, we'll see it owned by this user and will start
465 * chown()ing it again, unconditionally, as the busy UID is not a valid UID we'd everpick for ourselves. */
467 if ((st
.st_uid
& UID_BUSY_MASK
) != UID_BUSY_BASE
) {
469 UID_BUSY_BASE
| (st
.st_uid
& ~UID_BUSY_MASK
),
470 (gid_t
) UID_BUSY_BASE
| (st
.st_gid
& ~(gid_t
) UID_BUSY_MASK
)) < 0) {
476 return recurse_fd(fd
, donate_fd
, &st
, shift
, true);
485 int fd_patch_uid(int fd
, uid_t shift
, uid_t range
) {
486 return fd_patch_uid_internal(fd
, false, shift
, range
);
489 int path_patch_uid(const char *path
, uid_t shift
, uid_t range
) {
492 fd
= open(path
, O_RDONLY
|O_NONBLOCK
|O_DIRECTORY
|O_CLOEXEC
|O_NOFOLLOW
|O_NOATIME
);
496 return fd_patch_uid_internal(fd
, true, shift
, range
);