]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/nspawn/nspawn-patch-uid.c
tree-wide: drop stat.h or statfs.h when stat-util.h is included
[thirdparty/systemd.git] / src / nspawn / nspawn-patch-uid.c
1 /* SPDX-License-Identifier: LGPL-2.1+ */
2
3 #include <fcntl.h>
4 #include <linux/magic.h>
5 #include <sys/statvfs.h>
6 #include <sys/vfs.h>
7 #include <unistd.h>
8
9 #include "acl-util.h"
10 #include "dirent-util.h"
11 #include "fd-util.h"
12 #include "fs-util.h"
13 #include "missing_magic.h"
14 #include "nspawn-def.h"
15 #include "nspawn-patch-uid.h"
16 #include "stat-util.h"
17 #include "stdio-util.h"
18 #include "string-util.h"
19 #include "strv.h"
20 #include "user-util.h"
21
22 #if HAVE_ACL
23
24 static int get_acl(int fd, const char *name, acl_type_t type, acl_t *ret) {
25 char procfs_path[STRLEN("/proc/self/fd/") + DECIMAL_STR_MAX(int) + 1];
26 acl_t acl;
27
28 assert(fd >= 0);
29 assert(ret);
30
31 if (name) {
32 _cleanup_close_ int child_fd = -1;
33
34 child_fd = openat(fd, name, O_PATH|O_CLOEXEC|O_NOFOLLOW);
35 if (child_fd < 0)
36 return -errno;
37
38 xsprintf(procfs_path, "/proc/self/fd/%i", child_fd);
39 acl = acl_get_file(procfs_path, type);
40 } else if (type == ACL_TYPE_ACCESS)
41 acl = acl_get_fd(fd);
42 else {
43 xsprintf(procfs_path, "/proc/self/fd/%i", fd);
44 acl = acl_get_file(procfs_path, type);
45 }
46 if (!acl)
47 return -errno;
48
49 *ret = acl;
50 return 0;
51 }
52
53 static int set_acl(int fd, const char *name, acl_type_t type, acl_t acl) {
54 char procfs_path[STRLEN("/proc/self/fd/") + DECIMAL_STR_MAX(int) + 1];
55 int r;
56
57 assert(fd >= 0);
58 assert(acl);
59
60 if (name) {
61 _cleanup_close_ int child_fd = -1;
62
63 child_fd = openat(fd, name, O_PATH|O_CLOEXEC|O_NOFOLLOW);
64 if (child_fd < 0)
65 return -errno;
66
67 xsprintf(procfs_path, "/proc/self/fd/%i", child_fd);
68 r = acl_set_file(procfs_path, type, acl);
69 } else if (type == ACL_TYPE_ACCESS)
70 r = acl_set_fd(fd, acl);
71 else {
72 xsprintf(procfs_path, "/proc/self/fd/%i", fd);
73 r = acl_set_file(procfs_path, type, acl);
74 }
75 if (r < 0)
76 return -errno;
77
78 return 0;
79 }
80
81 static int shift_acl(acl_t acl, uid_t shift, acl_t *ret) {
82 _cleanup_(acl_freep) acl_t copy = NULL;
83 acl_entry_t i;
84 int r;
85
86 assert(acl);
87 assert(ret);
88
89 r = acl_get_entry(acl, ACL_FIRST_ENTRY, &i);
90 if (r < 0)
91 return -errno;
92 while (r > 0) {
93 uid_t *old_uid, new_uid;
94 bool modify = false;
95 acl_tag_t tag;
96
97 if (acl_get_tag_type(i, &tag) < 0)
98 return -errno;
99
100 if (IN_SET(tag, ACL_USER, ACL_GROUP)) {
101
102 /* We don't distinguish here between uid_t and gid_t, let's make sure the compiler checks that
103 * this is actually OK */
104 assert_cc(sizeof(uid_t) == sizeof(gid_t));
105
106 old_uid = acl_get_qualifier(i);
107 if (!old_uid)
108 return -errno;
109
110 new_uid = shift | (*old_uid & UINT32_C(0xFFFF));
111 if (!uid_is_valid(new_uid))
112 return -EINVAL;
113
114 modify = new_uid != *old_uid;
115 if (modify && !copy) {
116 int n;
117
118 /* There's no copy of the ACL yet? if so, let's create one, and start the loop from the
119 * beginning, so that we copy all entries, starting from the first, this time. */
120
121 n = acl_entries(acl);
122 if (n < 0)
123 return -errno;
124
125 copy = acl_init(n);
126 if (!copy)
127 return -errno;
128
129 /* Seek back to the beginning */
130 r = acl_get_entry(acl, ACL_FIRST_ENTRY, &i);
131 if (r < 0)
132 return -errno;
133 continue;
134 }
135 }
136
137 if (copy) {
138 acl_entry_t new_entry;
139
140 if (acl_create_entry(&copy, &new_entry) < 0)
141 return -errno;
142
143 if (acl_copy_entry(new_entry, i) < 0)
144 return -errno;
145
146 if (modify)
147 if (acl_set_qualifier(new_entry, &new_uid) < 0)
148 return -errno;
149 }
150
151 r = acl_get_entry(acl, ACL_NEXT_ENTRY, &i);
152 if (r < 0)
153 return -errno;
154 }
155
156 *ret = TAKE_PTR(copy);
157
158 return !!*ret;
159 }
160
161 static int patch_acls(int fd, const char *name, const struct stat *st, uid_t shift) {
162 _cleanup_(acl_freep) acl_t acl = NULL, shifted = NULL;
163 bool changed = false;
164 int r;
165
166 assert(fd >= 0);
167 assert(st);
168
169 /* ACLs are not supported on symlinks, there's no point in trying */
170 if (S_ISLNK(st->st_mode))
171 return 0;
172
173 r = get_acl(fd, name, ACL_TYPE_ACCESS, &acl);
174 if (r == -EOPNOTSUPP)
175 return 0;
176 if (r < 0)
177 return r;
178
179 r = shift_acl(acl, shift, &shifted);
180 if (r < 0)
181 return r;
182 if (r > 0) {
183 r = set_acl(fd, name, ACL_TYPE_ACCESS, shifted);
184 if (r < 0)
185 return r;
186
187 changed = true;
188 }
189
190 if (S_ISDIR(st->st_mode)) {
191 acl_free(acl);
192 acl_free(shifted);
193
194 acl = shifted = NULL;
195
196 r = get_acl(fd, name, ACL_TYPE_DEFAULT, &acl);
197 if (r < 0)
198 return r;
199
200 r = shift_acl(acl, shift, &shifted);
201 if (r < 0)
202 return r;
203 if (r > 0) {
204 r = set_acl(fd, name, ACL_TYPE_DEFAULT, shifted);
205 if (r < 0)
206 return r;
207
208 changed = true;
209 }
210 }
211
212 return changed;
213 }
214
215 #else
216
217 static int patch_acls(int fd, const char *name, const struct stat *st, uid_t shift) {
218 return 0;
219 }
220
221 #endif
222
223 static int patch_fd(int fd, const char *name, const struct stat *st, uid_t shift) {
224 uid_t new_uid;
225 gid_t new_gid;
226 bool changed = false;
227 int r;
228
229 assert(fd >= 0);
230 assert(st);
231
232 new_uid = shift | (st->st_uid & UINT32_C(0xFFFF));
233 new_gid = (gid_t) shift | (st->st_gid & UINT32_C(0xFFFF));
234
235 if (!uid_is_valid(new_uid) || !gid_is_valid(new_gid))
236 return -EINVAL;
237
238 if (st->st_uid != new_uid || st->st_gid != new_gid) {
239 if (name)
240 r = fchownat(fd, name, new_uid, new_gid, AT_SYMLINK_NOFOLLOW);
241 else
242 r = fchown(fd, new_uid, new_gid);
243 if (r < 0)
244 return -errno;
245
246 /* The Linux kernel alters the mode in some cases of chown(). Let's undo this. */
247 if (name) {
248 if (!S_ISLNK(st->st_mode))
249 r = fchmodat(fd, name, st->st_mode, 0);
250 else /* AT_SYMLINK_NOFOLLOW is not available for fchmodat() */
251 r = 0;
252 } else
253 r = fchmod(fd, st->st_mode);
254 if (r < 0)
255 return -errno;
256
257 changed = true;
258 }
259
260 r = patch_acls(fd, name, st, shift);
261 if (r < 0)
262 return r;
263
264 return r > 0 || changed;
265 }
266
267 /*
268 * Check if the filesystem is fully compatible with user namespaces or
269 * UID/GID patching. Some filesystems in this list can be fully mounted inside
270 * user namespaces, however their inodes may relate to host resources or only
271 * valid in the global user namespace, therefore no patching should be applied.
272 */
273 static int is_fs_fully_userns_compatible(const struct statfs *sfs) {
274
275 assert(sfs);
276
277 return F_TYPE_EQUAL(sfs->f_type, BINFMTFS_MAGIC) ||
278 F_TYPE_EQUAL(sfs->f_type, CGROUP_SUPER_MAGIC) ||
279 F_TYPE_EQUAL(sfs->f_type, CGROUP2_SUPER_MAGIC) ||
280 F_TYPE_EQUAL(sfs->f_type, DEBUGFS_MAGIC) ||
281 F_TYPE_EQUAL(sfs->f_type, DEVPTS_SUPER_MAGIC) ||
282 F_TYPE_EQUAL(sfs->f_type, EFIVARFS_MAGIC) ||
283 F_TYPE_EQUAL(sfs->f_type, HUGETLBFS_MAGIC) ||
284 F_TYPE_EQUAL(sfs->f_type, MQUEUE_MAGIC) ||
285 F_TYPE_EQUAL(sfs->f_type, PROC_SUPER_MAGIC) ||
286 F_TYPE_EQUAL(sfs->f_type, PSTOREFS_MAGIC) ||
287 F_TYPE_EQUAL(sfs->f_type, SELINUX_MAGIC) ||
288 F_TYPE_EQUAL(sfs->f_type, SMACK_MAGIC) ||
289 F_TYPE_EQUAL(sfs->f_type, SECURITYFS_MAGIC) ||
290 F_TYPE_EQUAL(sfs->f_type, BPF_FS_MAGIC) ||
291 F_TYPE_EQUAL(sfs->f_type, TRACEFS_MAGIC) ||
292 F_TYPE_EQUAL(sfs->f_type, SYSFS_MAGIC);
293 }
294
295 static int recurse_fd(int fd, bool donate_fd, const struct stat *st, uid_t shift, bool is_toplevel) {
296 _cleanup_closedir_ DIR *d = NULL;
297 bool changed = false;
298 struct statfs sfs;
299 int r;
300
301 assert(fd >= 0);
302
303 if (fstatfs(fd, &sfs) < 0)
304 return -errno;
305
306 /* We generally want to permit crossing of mount boundaries when patching the UIDs/GIDs. However, we probably
307 * shouldn't do this for /proc and /sys if that is already mounted into place. Hence, let's stop the recursion
308 * when we hit procfs, sysfs or some other special file systems. */
309
310 r = is_fs_fully_userns_compatible(&sfs);
311 if (r < 0)
312 goto finish;
313 if (r > 0) {
314 r = 0; /* don't recurse */
315 goto finish;
316 }
317
318 /* Also, if we hit a read-only file system, then don't bother, skip the whole subtree */
319 if ((sfs.f_flags & ST_RDONLY) ||
320 access_fd(fd, W_OK) == -EROFS)
321 goto read_only;
322
323 if (S_ISDIR(st->st_mode)) {
324 struct dirent *de;
325
326 if (!donate_fd) {
327 int copy;
328
329 copy = fcntl(fd, F_DUPFD_CLOEXEC, 3);
330 if (copy < 0) {
331 r = -errno;
332 goto finish;
333 }
334
335 fd = copy;
336 donate_fd = true;
337 }
338
339 d = fdopendir(fd);
340 if (!d) {
341 r = -errno;
342 goto finish;
343 }
344 fd = -1;
345
346 FOREACH_DIRENT_ALL(de, d, r = -errno; goto finish) {
347 struct stat fst;
348
349 if (dot_or_dot_dot(de->d_name))
350 continue;
351
352 if (fstatat(dirfd(d), de->d_name, &fst, AT_SYMLINK_NOFOLLOW) < 0) {
353 r = -errno;
354 goto finish;
355 }
356
357 if (S_ISDIR(fst.st_mode)) {
358 int subdir_fd;
359
360 subdir_fd = openat(dirfd(d), de->d_name, O_RDONLY|O_NONBLOCK|O_DIRECTORY|O_CLOEXEC|O_NOFOLLOW|O_NOATIME);
361 if (subdir_fd < 0) {
362 r = -errno;
363 goto finish;
364
365 }
366
367 r = recurse_fd(subdir_fd, true, &fst, shift, false);
368 if (r < 0)
369 goto finish;
370 if (r > 0)
371 changed = true;
372
373 } else {
374 r = patch_fd(dirfd(d), de->d_name, &fst, shift);
375 if (r < 0)
376 goto finish;
377 if (r > 0)
378 changed = true;
379 }
380 }
381 }
382
383 /* After we descended, also patch the directory itself. It's key to do this in this order so that the top-level
384 * directory is patched as very last object in the tree, so that we can use it as quick indicator whether the
385 * tree is properly chown()ed already. */
386 r = patch_fd(d ? dirfd(d) : fd, NULL, st, shift);
387 if (r == -EROFS)
388 goto read_only;
389 if (r > 0)
390 changed = true;
391
392 r = changed;
393 goto finish;
394
395 read_only:
396 if (!is_toplevel) {
397 _cleanup_free_ char *name = NULL;
398
399 /* When we hit a ready-only subtree we simply skip it, but log about it. */
400 (void) fd_get_path(fd, &name);
401 log_debug("Skipping read-only file or directory %s.", strna(name));
402 r = changed;
403 }
404
405 finish:
406 if (donate_fd)
407 safe_close(fd);
408
409 return r;
410 }
411
412 static int fd_patch_uid_internal(int fd, bool donate_fd, uid_t shift, uid_t range) {
413 struct stat st;
414 int r;
415
416 assert(fd >= 0);
417
418 /* Recursively adjusts the UID/GIDs of all files of a directory tree. This is used to automatically fix up an
419 * OS tree to the used user namespace UID range. Note that this automatic adjustment only works for UID ranges
420 * following the concept that the upper 16bit of a UID identify the container, and the lower 16bit are the actual
421 * UID within the container. */
422
423 if ((shift & 0xFFFF) != 0) {
424 /* We only support containers where the shift starts at a 2^16 boundary */
425 r = -EOPNOTSUPP;
426 goto finish;
427 }
428
429 if (shift == UID_BUSY_BASE) {
430 r = -EINVAL;
431 goto finish;
432 }
433
434 if (range != 0x10000) {
435 /* We only support containers with 16bit UID ranges for the patching logic */
436 r = -EOPNOTSUPP;
437 goto finish;
438 }
439
440 if (fstat(fd, &st) < 0) {
441 r = -errno;
442 goto finish;
443 }
444
445 if ((uint32_t) st.st_uid >> 16 != (uint32_t) st.st_gid >> 16) {
446 /* We only support containers where the uid/gid container ID match */
447 r = -EBADE;
448 goto finish;
449 }
450
451 /* Try to detect if the range is already right. Of course, this a pretty drastic optimization, as we assume
452 * that if the top-level dir has the right upper 16bit assigned, then everything below will have too... */
453 if (((uint32_t) (st.st_uid ^ shift) >> 16) == 0)
454 return 0;
455
456 /* Before we start recursively chowning, mark the top-level dir as "busy" by chowning it to the "busy"
457 * range. Should we be interrupted in the middle of our work, we'll see it owned by this user and will start
458 * chown()ing it again, unconditionally, as the busy UID is not a valid UID we'd everpick for ourselves. */
459
460 if ((st.st_uid & UID_BUSY_MASK) != UID_BUSY_BASE) {
461 if (fchown(fd,
462 UID_BUSY_BASE | (st.st_uid & ~UID_BUSY_MASK),
463 (gid_t) UID_BUSY_BASE | (st.st_gid & ~(gid_t) UID_BUSY_MASK)) < 0) {
464 r = -errno;
465 goto finish;
466 }
467 }
468
469 return recurse_fd(fd, donate_fd, &st, shift, true);
470
471 finish:
472 if (donate_fd)
473 safe_close(fd);
474
475 return r;
476 }
477
478 int path_patch_uid(const char *path, uid_t shift, uid_t range) {
479 int fd;
480
481 fd = open(path, O_RDONLY|O_NONBLOCK|O_DIRECTORY|O_CLOEXEC|O_NOFOLLOW|O_NOATIME);
482 if (fd < 0)
483 return -errno;
484
485 return fd_patch_uid_internal(fd, true, shift, range);
486 }