]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/nspawn/nspawn-patch-uid.c
tree-wide: drop acl.h when acl-util.h is included
[thirdparty/systemd.git] / src / nspawn / nspawn-patch-uid.c
1 /* SPDX-License-Identifier: LGPL-2.1+ */
2
3 #include <fcntl.h>
4 #include <linux/magic.h>
5 #include <sys/stat.h>
6 #include <sys/statvfs.h>
7 #include <sys/vfs.h>
8 #include <unistd.h>
9
10 #include "acl-util.h"
11 #include "dirent-util.h"
12 #include "fd-util.h"
13 #include "fs-util.h"
14 #include "missing_magic.h"
15 #include "nspawn-def.h"
16 #include "nspawn-patch-uid.h"
17 #include "stat-util.h"
18 #include "stdio-util.h"
19 #include "string-util.h"
20 #include "strv.h"
21 #include "user-util.h"
22
23 #if HAVE_ACL
24
25 static int get_acl(int fd, const char *name, acl_type_t type, acl_t *ret) {
26 char procfs_path[STRLEN("/proc/self/fd/") + DECIMAL_STR_MAX(int) + 1];
27 acl_t acl;
28
29 assert(fd >= 0);
30 assert(ret);
31
32 if (name) {
33 _cleanup_close_ int child_fd = -1;
34
35 child_fd = openat(fd, name, O_PATH|O_CLOEXEC|O_NOFOLLOW);
36 if (child_fd < 0)
37 return -errno;
38
39 xsprintf(procfs_path, "/proc/self/fd/%i", child_fd);
40 acl = acl_get_file(procfs_path, type);
41 } else if (type == ACL_TYPE_ACCESS)
42 acl = acl_get_fd(fd);
43 else {
44 xsprintf(procfs_path, "/proc/self/fd/%i", fd);
45 acl = acl_get_file(procfs_path, type);
46 }
47 if (!acl)
48 return -errno;
49
50 *ret = acl;
51 return 0;
52 }
53
54 static int set_acl(int fd, const char *name, acl_type_t type, acl_t acl) {
55 char procfs_path[STRLEN("/proc/self/fd/") + DECIMAL_STR_MAX(int) + 1];
56 int r;
57
58 assert(fd >= 0);
59 assert(acl);
60
61 if (name) {
62 _cleanup_close_ int child_fd = -1;
63
64 child_fd = openat(fd, name, O_PATH|O_CLOEXEC|O_NOFOLLOW);
65 if (child_fd < 0)
66 return -errno;
67
68 xsprintf(procfs_path, "/proc/self/fd/%i", child_fd);
69 r = acl_set_file(procfs_path, type, acl);
70 } else if (type == ACL_TYPE_ACCESS)
71 r = acl_set_fd(fd, acl);
72 else {
73 xsprintf(procfs_path, "/proc/self/fd/%i", fd);
74 r = acl_set_file(procfs_path, type, acl);
75 }
76 if (r < 0)
77 return -errno;
78
79 return 0;
80 }
81
82 static int shift_acl(acl_t acl, uid_t shift, acl_t *ret) {
83 _cleanup_(acl_freep) acl_t copy = NULL;
84 acl_entry_t i;
85 int r;
86
87 assert(acl);
88 assert(ret);
89
90 r = acl_get_entry(acl, ACL_FIRST_ENTRY, &i);
91 if (r < 0)
92 return -errno;
93 while (r > 0) {
94 uid_t *old_uid, new_uid;
95 bool modify = false;
96 acl_tag_t tag;
97
98 if (acl_get_tag_type(i, &tag) < 0)
99 return -errno;
100
101 if (IN_SET(tag, ACL_USER, ACL_GROUP)) {
102
103 /* We don't distinguish here between uid_t and gid_t, let's make sure the compiler checks that
104 * this is actually OK */
105 assert_cc(sizeof(uid_t) == sizeof(gid_t));
106
107 old_uid = acl_get_qualifier(i);
108 if (!old_uid)
109 return -errno;
110
111 new_uid = shift | (*old_uid & UINT32_C(0xFFFF));
112 if (!uid_is_valid(new_uid))
113 return -EINVAL;
114
115 modify = new_uid != *old_uid;
116 if (modify && !copy) {
117 int n;
118
119 /* There's no copy of the ACL yet? if so, let's create one, and start the loop from the
120 * beginning, so that we copy all entries, starting from the first, this time. */
121
122 n = acl_entries(acl);
123 if (n < 0)
124 return -errno;
125
126 copy = acl_init(n);
127 if (!copy)
128 return -errno;
129
130 /* Seek back to the beginning */
131 r = acl_get_entry(acl, ACL_FIRST_ENTRY, &i);
132 if (r < 0)
133 return -errno;
134 continue;
135 }
136 }
137
138 if (copy) {
139 acl_entry_t new_entry;
140
141 if (acl_create_entry(&copy, &new_entry) < 0)
142 return -errno;
143
144 if (acl_copy_entry(new_entry, i) < 0)
145 return -errno;
146
147 if (modify)
148 if (acl_set_qualifier(new_entry, &new_uid) < 0)
149 return -errno;
150 }
151
152 r = acl_get_entry(acl, ACL_NEXT_ENTRY, &i);
153 if (r < 0)
154 return -errno;
155 }
156
157 *ret = TAKE_PTR(copy);
158
159 return !!*ret;
160 }
161
162 static int patch_acls(int fd, const char *name, const struct stat *st, uid_t shift) {
163 _cleanup_(acl_freep) acl_t acl = NULL, shifted = NULL;
164 bool changed = false;
165 int r;
166
167 assert(fd >= 0);
168 assert(st);
169
170 /* ACLs are not supported on symlinks, there's no point in trying */
171 if (S_ISLNK(st->st_mode))
172 return 0;
173
174 r = get_acl(fd, name, ACL_TYPE_ACCESS, &acl);
175 if (r == -EOPNOTSUPP)
176 return 0;
177 if (r < 0)
178 return r;
179
180 r = shift_acl(acl, shift, &shifted);
181 if (r < 0)
182 return r;
183 if (r > 0) {
184 r = set_acl(fd, name, ACL_TYPE_ACCESS, shifted);
185 if (r < 0)
186 return r;
187
188 changed = true;
189 }
190
191 if (S_ISDIR(st->st_mode)) {
192 acl_free(acl);
193 acl_free(shifted);
194
195 acl = shifted = NULL;
196
197 r = get_acl(fd, name, ACL_TYPE_DEFAULT, &acl);
198 if (r < 0)
199 return r;
200
201 r = shift_acl(acl, shift, &shifted);
202 if (r < 0)
203 return r;
204 if (r > 0) {
205 r = set_acl(fd, name, ACL_TYPE_DEFAULT, shifted);
206 if (r < 0)
207 return r;
208
209 changed = true;
210 }
211 }
212
213 return changed;
214 }
215
216 #else
217
218 static int patch_acls(int fd, const char *name, const struct stat *st, uid_t shift) {
219 return 0;
220 }
221
222 #endif
223
224 static int patch_fd(int fd, const char *name, const struct stat *st, uid_t shift) {
225 uid_t new_uid;
226 gid_t new_gid;
227 bool changed = false;
228 int r;
229
230 assert(fd >= 0);
231 assert(st);
232
233 new_uid = shift | (st->st_uid & UINT32_C(0xFFFF));
234 new_gid = (gid_t) shift | (st->st_gid & UINT32_C(0xFFFF));
235
236 if (!uid_is_valid(new_uid) || !gid_is_valid(new_gid))
237 return -EINVAL;
238
239 if (st->st_uid != new_uid || st->st_gid != new_gid) {
240 if (name)
241 r = fchownat(fd, name, new_uid, new_gid, AT_SYMLINK_NOFOLLOW);
242 else
243 r = fchown(fd, new_uid, new_gid);
244 if (r < 0)
245 return -errno;
246
247 /* The Linux kernel alters the mode in some cases of chown(). Let's undo this. */
248 if (name) {
249 if (!S_ISLNK(st->st_mode))
250 r = fchmodat(fd, name, st->st_mode, 0);
251 else /* AT_SYMLINK_NOFOLLOW is not available for fchmodat() */
252 r = 0;
253 } else
254 r = fchmod(fd, st->st_mode);
255 if (r < 0)
256 return -errno;
257
258 changed = true;
259 }
260
261 r = patch_acls(fd, name, st, shift);
262 if (r < 0)
263 return r;
264
265 return r > 0 || changed;
266 }
267
268 /*
269 * Check if the filesystem is fully compatible with user namespaces or
270 * UID/GID patching. Some filesystems in this list can be fully mounted inside
271 * user namespaces, however their inodes may relate to host resources or only
272 * valid in the global user namespace, therefore no patching should be applied.
273 */
274 static int is_fs_fully_userns_compatible(const struct statfs *sfs) {
275
276 assert(sfs);
277
278 return F_TYPE_EQUAL(sfs->f_type, BINFMTFS_MAGIC) ||
279 F_TYPE_EQUAL(sfs->f_type, CGROUP_SUPER_MAGIC) ||
280 F_TYPE_EQUAL(sfs->f_type, CGROUP2_SUPER_MAGIC) ||
281 F_TYPE_EQUAL(sfs->f_type, DEBUGFS_MAGIC) ||
282 F_TYPE_EQUAL(sfs->f_type, DEVPTS_SUPER_MAGIC) ||
283 F_TYPE_EQUAL(sfs->f_type, EFIVARFS_MAGIC) ||
284 F_TYPE_EQUAL(sfs->f_type, HUGETLBFS_MAGIC) ||
285 F_TYPE_EQUAL(sfs->f_type, MQUEUE_MAGIC) ||
286 F_TYPE_EQUAL(sfs->f_type, PROC_SUPER_MAGIC) ||
287 F_TYPE_EQUAL(sfs->f_type, PSTOREFS_MAGIC) ||
288 F_TYPE_EQUAL(sfs->f_type, SELINUX_MAGIC) ||
289 F_TYPE_EQUAL(sfs->f_type, SMACK_MAGIC) ||
290 F_TYPE_EQUAL(sfs->f_type, SECURITYFS_MAGIC) ||
291 F_TYPE_EQUAL(sfs->f_type, BPF_FS_MAGIC) ||
292 F_TYPE_EQUAL(sfs->f_type, TRACEFS_MAGIC) ||
293 F_TYPE_EQUAL(sfs->f_type, SYSFS_MAGIC);
294 }
295
296 static int recurse_fd(int fd, bool donate_fd, const struct stat *st, uid_t shift, bool is_toplevel) {
297 _cleanup_closedir_ DIR *d = NULL;
298 bool changed = false;
299 struct statfs sfs;
300 int r;
301
302 assert(fd >= 0);
303
304 if (fstatfs(fd, &sfs) < 0)
305 return -errno;
306
307 /* We generally want to permit crossing of mount boundaries when patching the UIDs/GIDs. However, we probably
308 * shouldn't do this for /proc and /sys if that is already mounted into place. Hence, let's stop the recursion
309 * when we hit procfs, sysfs or some other special file systems. */
310
311 r = is_fs_fully_userns_compatible(&sfs);
312 if (r < 0)
313 goto finish;
314 if (r > 0) {
315 r = 0; /* don't recurse */
316 goto finish;
317 }
318
319 /* Also, if we hit a read-only file system, then don't bother, skip the whole subtree */
320 if ((sfs.f_flags & ST_RDONLY) ||
321 access_fd(fd, W_OK) == -EROFS)
322 goto read_only;
323
324 if (S_ISDIR(st->st_mode)) {
325 struct dirent *de;
326
327 if (!donate_fd) {
328 int copy;
329
330 copy = fcntl(fd, F_DUPFD_CLOEXEC, 3);
331 if (copy < 0) {
332 r = -errno;
333 goto finish;
334 }
335
336 fd = copy;
337 donate_fd = true;
338 }
339
340 d = fdopendir(fd);
341 if (!d) {
342 r = -errno;
343 goto finish;
344 }
345 fd = -1;
346
347 FOREACH_DIRENT_ALL(de, d, r = -errno; goto finish) {
348 struct stat fst;
349
350 if (dot_or_dot_dot(de->d_name))
351 continue;
352
353 if (fstatat(dirfd(d), de->d_name, &fst, AT_SYMLINK_NOFOLLOW) < 0) {
354 r = -errno;
355 goto finish;
356 }
357
358 if (S_ISDIR(fst.st_mode)) {
359 int subdir_fd;
360
361 subdir_fd = openat(dirfd(d), de->d_name, O_RDONLY|O_NONBLOCK|O_DIRECTORY|O_CLOEXEC|O_NOFOLLOW|O_NOATIME);
362 if (subdir_fd < 0) {
363 r = -errno;
364 goto finish;
365
366 }
367
368 r = recurse_fd(subdir_fd, true, &fst, shift, false);
369 if (r < 0)
370 goto finish;
371 if (r > 0)
372 changed = true;
373
374 } else {
375 r = patch_fd(dirfd(d), de->d_name, &fst, shift);
376 if (r < 0)
377 goto finish;
378 if (r > 0)
379 changed = true;
380 }
381 }
382 }
383
384 /* After we descended, also patch the directory itself. It's key to do this in this order so that the top-level
385 * directory is patched as very last object in the tree, so that we can use it as quick indicator whether the
386 * tree is properly chown()ed already. */
387 r = patch_fd(d ? dirfd(d) : fd, NULL, st, shift);
388 if (r == -EROFS)
389 goto read_only;
390 if (r > 0)
391 changed = true;
392
393 r = changed;
394 goto finish;
395
396 read_only:
397 if (!is_toplevel) {
398 _cleanup_free_ char *name = NULL;
399
400 /* When we hit a ready-only subtree we simply skip it, but log about it. */
401 (void) fd_get_path(fd, &name);
402 log_debug("Skipping read-only file or directory %s.", strna(name));
403 r = changed;
404 }
405
406 finish:
407 if (donate_fd)
408 safe_close(fd);
409
410 return r;
411 }
412
413 static int fd_patch_uid_internal(int fd, bool donate_fd, uid_t shift, uid_t range) {
414 struct stat st;
415 int r;
416
417 assert(fd >= 0);
418
419 /* Recursively adjusts the UID/GIDs of all files of a directory tree. This is used to automatically fix up an
420 * OS tree to the used user namespace UID range. Note that this automatic adjustment only works for UID ranges
421 * following the concept that the upper 16bit of a UID identify the container, and the lower 16bit are the actual
422 * UID within the container. */
423
424 if ((shift & 0xFFFF) != 0) {
425 /* We only support containers where the shift starts at a 2^16 boundary */
426 r = -EOPNOTSUPP;
427 goto finish;
428 }
429
430 if (shift == UID_BUSY_BASE) {
431 r = -EINVAL;
432 goto finish;
433 }
434
435 if (range != 0x10000) {
436 /* We only support containers with 16bit UID ranges for the patching logic */
437 r = -EOPNOTSUPP;
438 goto finish;
439 }
440
441 if (fstat(fd, &st) < 0) {
442 r = -errno;
443 goto finish;
444 }
445
446 if ((uint32_t) st.st_uid >> 16 != (uint32_t) st.st_gid >> 16) {
447 /* We only support containers where the uid/gid container ID match */
448 r = -EBADE;
449 goto finish;
450 }
451
452 /* Try to detect if the range is already right. Of course, this a pretty drastic optimization, as we assume
453 * that if the top-level dir has the right upper 16bit assigned, then everything below will have too... */
454 if (((uint32_t) (st.st_uid ^ shift) >> 16) == 0)
455 return 0;
456
457 /* Before we start recursively chowning, mark the top-level dir as "busy" by chowning it to the "busy"
458 * range. Should we be interrupted in the middle of our work, we'll see it owned by this user and will start
459 * chown()ing it again, unconditionally, as the busy UID is not a valid UID we'd everpick for ourselves. */
460
461 if ((st.st_uid & UID_BUSY_MASK) != UID_BUSY_BASE) {
462 if (fchown(fd,
463 UID_BUSY_BASE | (st.st_uid & ~UID_BUSY_MASK),
464 (gid_t) UID_BUSY_BASE | (st.st_gid & ~(gid_t) UID_BUSY_MASK)) < 0) {
465 r = -errno;
466 goto finish;
467 }
468 }
469
470 return recurse_fd(fd, donate_fd, &st, shift, true);
471
472 finish:
473 if (donate_fd)
474 safe_close(fd);
475
476 return r;
477 }
478
479 int path_patch_uid(const char *path, uid_t shift, uid_t range) {
480 int fd;
481
482 fd = open(path, O_RDONLY|O_NONBLOCK|O_DIRECTORY|O_CLOEXEC|O_NOFOLLOW|O_NOATIME);
483 if (fd < 0)
484 return -errno;
485
486 return fd_patch_uid_internal(fd, true, shift, range);
487 }