]>
Commit | Line | Data |
---|---|---|
53e1b683 | 1 | /* SPDX-License-Identifier: LGPL-2.1+ */ |
7336138e | 2 | /*** |
7336138e | 3 | Copyright 2016 Lennart Poettering |
7336138e LP |
4 | ***/ |
5 | ||
6 | #include <fcntl.h> | |
88cd066e | 7 | #include <linux/magic.h> |
349cc4a5 | 8 | #if HAVE_ACL |
7336138e LP |
9 | #include <sys/acl.h> |
10 | #endif | |
11 | #include <sys/stat.h> | |
3603efde | 12 | #include <sys/statvfs.h> |
88cd066e | 13 | #include <sys/vfs.h> |
7336138e LP |
14 | #include <unistd.h> |
15 | ||
16 | #include "acl-util.h" | |
17 | #include "dirent-util.h" | |
18 | #include "fd-util.h" | |
3603efde | 19 | #include "fs-util.h" |
88cd066e | 20 | #include "missing.h" |
3603efde | 21 | #include "nspawn-def.h" |
7336138e | 22 | #include "nspawn-patch-uid.h" |
88cd066e | 23 | #include "stat-util.h" |
7336138e LP |
24 | #include "stdio-util.h" |
25 | #include "string-util.h" | |
26 | #include "strv.h" | |
27 | #include "user-util.h" | |
28 | ||
349cc4a5 | 29 | #if HAVE_ACL |
7336138e LP |
30 | |
31 | static int get_acl(int fd, const char *name, acl_type_t type, acl_t *ret) { | |
fbd0b64f | 32 | char procfs_path[STRLEN("/proc/self/fd/") + DECIMAL_STR_MAX(int) + 1]; |
7336138e LP |
33 | acl_t acl; |
34 | ||
35 | assert(fd >= 0); | |
36 | assert(ret); | |
37 | ||
38 | if (name) { | |
39 | _cleanup_close_ int child_fd = -1; | |
40 | ||
41 | child_fd = openat(fd, name, O_PATH|O_CLOEXEC|O_NOFOLLOW); | |
42 | if (child_fd < 0) | |
43 | return -errno; | |
44 | ||
45 | xsprintf(procfs_path, "/proc/self/fd/%i", child_fd); | |
46 | acl = acl_get_file(procfs_path, type); | |
47 | } else if (type == ACL_TYPE_ACCESS) | |
48 | acl = acl_get_fd(fd); | |
49 | else { | |
50 | xsprintf(procfs_path, "/proc/self/fd/%i", fd); | |
51 | acl = acl_get_file(procfs_path, type); | |
52 | } | |
53 | if (!acl) | |
54 | return -errno; | |
55 | ||
56 | *ret = acl; | |
57 | return 0; | |
58 | } | |
59 | ||
60 | static int set_acl(int fd, const char *name, acl_type_t type, acl_t acl) { | |
fbd0b64f | 61 | char procfs_path[STRLEN("/proc/self/fd/") + DECIMAL_STR_MAX(int) + 1]; |
7336138e LP |
62 | int r; |
63 | ||
64 | assert(fd >= 0); | |
65 | assert(acl); | |
66 | ||
67 | if (name) { | |
68 | _cleanup_close_ int child_fd = -1; | |
69 | ||
70 | child_fd = openat(fd, name, O_PATH|O_CLOEXEC|O_NOFOLLOW); | |
71 | if (child_fd < 0) | |
72 | return -errno; | |
73 | ||
74 | xsprintf(procfs_path, "/proc/self/fd/%i", child_fd); | |
75 | r = acl_set_file(procfs_path, type, acl); | |
76 | } else if (type == ACL_TYPE_ACCESS) | |
77 | r = acl_set_fd(fd, acl); | |
78 | else { | |
79 | xsprintf(procfs_path, "/proc/self/fd/%i", fd); | |
80 | r = acl_set_file(procfs_path, type, acl); | |
81 | } | |
82 | if (r < 0) | |
83 | return -errno; | |
84 | ||
85 | return 0; | |
86 | } | |
87 | ||
88 | static int shift_acl(acl_t acl, uid_t shift, acl_t *ret) { | |
89 | _cleanup_(acl_freep) acl_t copy = NULL; | |
90 | acl_entry_t i; | |
91 | int r; | |
92 | ||
93 | assert(acl); | |
94 | assert(ret); | |
95 | ||
96 | r = acl_get_entry(acl, ACL_FIRST_ENTRY, &i); | |
97 | if (r < 0) | |
98 | return -errno; | |
99 | while (r > 0) { | |
100 | uid_t *old_uid, new_uid; | |
101 | bool modify = false; | |
102 | acl_tag_t tag; | |
103 | ||
104 | if (acl_get_tag_type(i, &tag) < 0) | |
105 | return -errno; | |
106 | ||
107 | if (IN_SET(tag, ACL_USER, ACL_GROUP)) { | |
108 | ||
109 | /* We don't distuingish here between uid_t and gid_t, let's make sure the compiler checks that | |
110 | * this is actually OK */ | |
111 | assert_cc(sizeof(uid_t) == sizeof(gid_t)); | |
112 | ||
113 | old_uid = acl_get_qualifier(i); | |
114 | if (!old_uid) | |
115 | return -errno; | |
116 | ||
117 | new_uid = shift | (*old_uid & UINT32_C(0xFFFF)); | |
118 | if (!uid_is_valid(new_uid)) | |
119 | return -EINVAL; | |
120 | ||
121 | modify = new_uid != *old_uid; | |
122 | if (modify && !copy) { | |
123 | int n; | |
124 | ||
125 | /* There's no copy of the ACL yet? if so, let's create one, and start the loop from the | |
126 | * beginning, so that we copy all entries, starting from the first, this time. */ | |
127 | ||
128 | n = acl_entries(acl); | |
129 | if (n < 0) | |
130 | return -errno; | |
131 | ||
132 | copy = acl_init(n); | |
133 | if (!copy) | |
134 | return -errno; | |
135 | ||
136 | /* Seek back to the beginning */ | |
137 | r = acl_get_entry(acl, ACL_FIRST_ENTRY, &i); | |
138 | if (r < 0) | |
139 | return -errno; | |
140 | continue; | |
141 | } | |
142 | } | |
143 | ||
144 | if (copy) { | |
145 | acl_entry_t new_entry; | |
146 | ||
147 | if (acl_create_entry(©, &new_entry) < 0) | |
148 | return -errno; | |
149 | ||
150 | if (acl_copy_entry(new_entry, i) < 0) | |
151 | return -errno; | |
152 | ||
153 | if (modify) | |
154 | if (acl_set_qualifier(new_entry, &new_uid) < 0) | |
155 | return -errno; | |
156 | } | |
157 | ||
158 | r = acl_get_entry(acl, ACL_NEXT_ENTRY, &i); | |
159 | if (r < 0) | |
160 | return -errno; | |
161 | } | |
162 | ||
1cc6c93a | 163 | *ret = TAKE_PTR(copy); |
7336138e LP |
164 | |
165 | return !!*ret; | |
166 | } | |
167 | ||
168 | static int patch_acls(int fd, const char *name, const struct stat *st, uid_t shift) { | |
169 | _cleanup_(acl_freep) acl_t acl = NULL, shifted = NULL; | |
170 | bool changed = false; | |
171 | int r; | |
172 | ||
173 | assert(fd >= 0); | |
174 | assert(st); | |
175 | ||
176 | /* ACLs are not supported on symlinks, there's no point in trying */ | |
177 | if (S_ISLNK(st->st_mode)) | |
178 | return 0; | |
179 | ||
180 | r = get_acl(fd, name, ACL_TYPE_ACCESS, &acl); | |
181 | if (r == -EOPNOTSUPP) | |
182 | return 0; | |
183 | if (r < 0) | |
184 | return r; | |
185 | ||
186 | r = shift_acl(acl, shift, &shifted); | |
187 | if (r < 0) | |
188 | return r; | |
189 | if (r > 0) { | |
190 | r = set_acl(fd, name, ACL_TYPE_ACCESS, shifted); | |
191 | if (r < 0) | |
192 | return r; | |
193 | ||
194 | changed = true; | |
195 | } | |
196 | ||
197 | if (S_ISDIR(st->st_mode)) { | |
198 | acl_free(acl); | |
199 | acl_free(shifted); | |
200 | ||
201 | acl = shifted = NULL; | |
202 | ||
203 | r = get_acl(fd, name, ACL_TYPE_DEFAULT, &acl); | |
204 | if (r < 0) | |
205 | return r; | |
206 | ||
207 | r = shift_acl(acl, shift, &shifted); | |
208 | if (r < 0) | |
209 | return r; | |
210 | if (r > 0) { | |
211 | r = set_acl(fd, name, ACL_TYPE_DEFAULT, shifted); | |
212 | if (r < 0) | |
213 | return r; | |
214 | ||
215 | changed = true; | |
216 | } | |
217 | } | |
218 | ||
219 | return changed; | |
220 | } | |
221 | ||
222 | #else | |
223 | ||
224 | static int patch_acls(int fd, const char *name, const struct stat *st, uid_t shift) { | |
225 | return 0; | |
226 | } | |
227 | ||
228 | #endif | |
229 | ||
230 | static int patch_fd(int fd, const char *name, const struct stat *st, uid_t shift) { | |
231 | uid_t new_uid; | |
232 | gid_t new_gid; | |
233 | bool changed = false; | |
234 | int r; | |
235 | ||
236 | assert(fd >= 0); | |
237 | assert(st); | |
238 | ||
239 | new_uid = shift | (st->st_uid & UINT32_C(0xFFFF)); | |
240 | new_gid = (gid_t) shift | (st->st_gid & UINT32_C(0xFFFF)); | |
241 | ||
242 | if (!uid_is_valid(new_uid) || !gid_is_valid(new_gid)) | |
243 | return -EINVAL; | |
244 | ||
245 | if (st->st_uid != new_uid || st->st_gid != new_gid) { | |
246 | if (name) | |
247 | r = fchownat(fd, name, new_uid, new_gid, AT_SYMLINK_NOFOLLOW); | |
248 | else | |
249 | r = fchown(fd, new_uid, new_gid); | |
250 | if (r < 0) | |
251 | return -errno; | |
252 | ||
253 | /* The Linux kernel alters the mode in some cases of chown(). Let's undo this. */ | |
0c6aeb46 LP |
254 | if (name) { |
255 | if (!S_ISLNK(st->st_mode)) | |
256 | r = fchmodat(fd, name, st->st_mode, 0); | |
257 | else /* AT_SYMLINK_NOFOLLOW is not available for fchmodat() */ | |
258 | r = 0; | |
259 | } else | |
7336138e LP |
260 | r = fchmod(fd, st->st_mode); |
261 | if (r < 0) | |
262 | return -errno; | |
263 | ||
264 | changed = true; | |
265 | } | |
266 | ||
267 | r = patch_acls(fd, name, st, shift); | |
268 | if (r < 0) | |
269 | return r; | |
270 | ||
271 | return r > 0 || changed; | |
272 | } | |
273 | ||
231bfb1b DH |
274 | /* |
275 | * Check if the filesystem is fully compatible with user namespaces or | |
276 | * UID/GID patching. Some filesystems in this list can be fully mounted inside | |
277 | * user namespaces, however their inodes may relate to host resources or only | |
278 | * valid in the global user namespace, therefore no patching should be applied. | |
279 | */ | |
3603efde LP |
280 | static int is_fs_fully_userns_compatible(const struct statfs *sfs) { |
281 | ||
282 | assert(sfs); | |
283 | ||
284 | return F_TYPE_EQUAL(sfs->f_type, BINFMTFS_MAGIC) || | |
285 | F_TYPE_EQUAL(sfs->f_type, CGROUP_SUPER_MAGIC) || | |
286 | F_TYPE_EQUAL(sfs->f_type, CGROUP2_SUPER_MAGIC) || | |
287 | F_TYPE_EQUAL(sfs->f_type, DEBUGFS_MAGIC) || | |
288 | F_TYPE_EQUAL(sfs->f_type, DEVPTS_SUPER_MAGIC) || | |
289 | F_TYPE_EQUAL(sfs->f_type, EFIVARFS_MAGIC) || | |
290 | F_TYPE_EQUAL(sfs->f_type, HUGETLBFS_MAGIC) || | |
291 | F_TYPE_EQUAL(sfs->f_type, MQUEUE_MAGIC) || | |
292 | F_TYPE_EQUAL(sfs->f_type, PROC_SUPER_MAGIC) || | |
293 | F_TYPE_EQUAL(sfs->f_type, PSTOREFS_MAGIC) || | |
294 | F_TYPE_EQUAL(sfs->f_type, SELINUX_MAGIC) || | |
295 | F_TYPE_EQUAL(sfs->f_type, SMACK_MAGIC) || | |
296 | F_TYPE_EQUAL(sfs->f_type, SECURITYFS_MAGIC) || | |
297 | F_TYPE_EQUAL(sfs->f_type, BPF_FS_MAGIC) || | |
298 | F_TYPE_EQUAL(sfs->f_type, TRACEFS_MAGIC) || | |
299 | F_TYPE_EQUAL(sfs->f_type, SYSFS_MAGIC); | |
88cd066e LP |
300 | } |
301 | ||
4aeb20f5 | 302 | static int recurse_fd(int fd, bool donate_fd, const struct stat *st, uid_t shift, bool is_toplevel) { |
3603efde | 303 | _cleanup_closedir_ DIR *d = NULL; |
7336138e | 304 | bool changed = false; |
3603efde | 305 | struct statfs sfs; |
7336138e LP |
306 | int r; |
307 | ||
308 | assert(fd >= 0); | |
309 | ||
3603efde LP |
310 | if (fstatfs(fd, &sfs) < 0) |
311 | return -errno; | |
312 | ||
313 | /* We generally want to permit crossing of mount boundaries when patching the UIDs/GIDs. However, we probably | |
314 | * shouldn't do this for /proc and /sys if that is already mounted into place. Hence, let's stop the recursion | |
315 | * when we hit procfs, sysfs or some other special file systems. */ | |
316 | ||
317 | r = is_fs_fully_userns_compatible(&sfs); | |
88cd066e LP |
318 | if (r < 0) |
319 | goto finish; | |
320 | if (r > 0) { | |
321 | r = 0; /* don't recurse */ | |
322 | goto finish; | |
323 | } | |
324 | ||
3603efde LP |
325 | /* Also, if we hit a read-only file system, then don't bother, skip the whole subtree */ |
326 | if ((sfs.f_flags & ST_RDONLY) || | |
327 | access_fd(fd, W_OK) == -EROFS) | |
328 | goto read_only; | |
7336138e LP |
329 | |
330 | if (S_ISDIR(st->st_mode)) { | |
7336138e LP |
331 | struct dirent *de; |
332 | ||
333 | if (!donate_fd) { | |
334 | int copy; | |
335 | ||
336 | copy = fcntl(fd, F_DUPFD_CLOEXEC, 3); | |
88cd066e LP |
337 | if (copy < 0) { |
338 | r = -errno; | |
339 | goto finish; | |
340 | } | |
7336138e LP |
341 | |
342 | fd = copy; | |
343 | donate_fd = true; | |
344 | } | |
345 | ||
346 | d = fdopendir(fd); | |
347 | if (!d) { | |
348 | r = -errno; | |
349 | goto finish; | |
350 | } | |
351 | fd = -1; | |
352 | ||
353 | FOREACH_DIRENT_ALL(de, d, r = -errno; goto finish) { | |
354 | struct stat fst; | |
355 | ||
49bfc877 | 356 | if (dot_or_dot_dot(de->d_name)) |
7336138e LP |
357 | continue; |
358 | ||
359 | if (fstatat(dirfd(d), de->d_name, &fst, AT_SYMLINK_NOFOLLOW) < 0) { | |
360 | r = -errno; | |
361 | goto finish; | |
362 | } | |
363 | ||
364 | if (S_ISDIR(fst.st_mode)) { | |
365 | int subdir_fd; | |
366 | ||
367 | subdir_fd = openat(dirfd(d), de->d_name, O_RDONLY|O_NONBLOCK|O_DIRECTORY|O_CLOEXEC|O_NOFOLLOW|O_NOATIME); | |
368 | if (subdir_fd < 0) { | |
369 | r = -errno; | |
370 | goto finish; | |
371 | ||
372 | } | |
373 | ||
4aeb20f5 | 374 | r = recurse_fd(subdir_fd, true, &fst, shift, false); |
7336138e LP |
375 | if (r < 0) |
376 | goto finish; | |
377 | if (r > 0) | |
378 | changed = true; | |
379 | ||
380 | } else { | |
381 | r = patch_fd(dirfd(d), de->d_name, &fst, shift); | |
382 | if (r < 0) | |
383 | goto finish; | |
384 | if (r > 0) | |
385 | changed = true; | |
386 | } | |
387 | } | |
388 | } | |
389 | ||
3603efde LP |
390 | /* After we descended, also patch the directory itself. It's key to do this in this order so that the top-level |
391 | * directory is patched as very last object in the tree, so that we can use it as quick indicator whether the | |
392 | * tree is properly chown()ed already. */ | |
393 | r = patch_fd(d ? dirfd(d) : fd, NULL, st, shift); | |
394 | if (r == -EROFS) | |
395 | goto read_only; | |
396 | if (r > 0) | |
397 | changed = true; | |
398 | ||
7336138e | 399 | r = changed; |
3603efde LP |
400 | goto finish; |
401 | ||
402 | read_only: | |
403 | if (!is_toplevel) { | |
404 | _cleanup_free_ char *name = NULL; | |
405 | ||
406 | /* When we hit a ready-only subtree we simply skip it, but log about it. */ | |
407 | (void) fd_get_path(fd, &name); | |
408 | log_debug("Skippping read-only file or directory %s.", strna(name)); | |
409 | r = changed; | |
410 | } | |
7336138e LP |
411 | |
412 | finish: | |
413 | if (donate_fd) | |
414 | safe_close(fd); | |
415 | ||
416 | return r; | |
417 | } | |
418 | ||
419 | static int fd_patch_uid_internal(int fd, bool donate_fd, uid_t shift, uid_t range) { | |
420 | struct stat st; | |
421 | int r; | |
422 | ||
423 | assert(fd >= 0); | |
424 | ||
425 | /* Recursively adjusts the UID/GIDs of all files of a directory tree. This is used to automatically fix up an | |
426 | * OS tree to the used user namespace UID range. Note that this automatic adjustment only works for UID ranges | |
427 | * following the concept that the upper 16bit of a UID identify the container, and the lower 16bit are the actual | |
428 | * UID within the container. */ | |
429 | ||
430 | if ((shift & 0xFFFF) != 0) { | |
431 | /* We only support containers where the shift starts at a 2^16 boundary */ | |
432 | r = -EOPNOTSUPP; | |
433 | goto finish; | |
434 | } | |
435 | ||
3603efde LP |
436 | if (shift == UID_BUSY_BASE) { |
437 | r = -EINVAL; | |
438 | goto finish; | |
439 | } | |
440 | ||
7336138e LP |
441 | if (range != 0x10000) { |
442 | /* We only support containers with 16bit UID ranges for the patching logic */ | |
443 | r = -EOPNOTSUPP; | |
444 | goto finish; | |
445 | } | |
446 | ||
447 | if (fstat(fd, &st) < 0) { | |
448 | r = -errno; | |
449 | goto finish; | |
450 | } | |
451 | ||
452 | if ((uint32_t) st.st_uid >> 16 != (uint32_t) st.st_gid >> 16) { | |
453 | /* We only support containers where the uid/gid container ID match */ | |
454 | r = -EBADE; | |
455 | goto finish; | |
456 | } | |
457 | ||
458 | /* Try to detect if the range is already right. Of course, this a pretty drastic optimization, as we assume | |
459 | * that if the top-level dir has the right upper 16bit assigned, then everything below will have too... */ | |
460 | if (((uint32_t) (st.st_uid ^ shift) >> 16) == 0) | |
461 | return 0; | |
462 | ||
3603efde LP |
463 | /* Before we start recursively chowning, mark the top-level dir as "busy" by chowning it to the "busy" |
464 | * range. Should we be interrupted in the middle of our work, we'll see it owned by this user and will start | |
465 | * chown()ing it again, unconditionally, as the busy UID is not a valid UID we'd everpick for ourselves. */ | |
466 | ||
467 | if ((st.st_uid & UID_BUSY_MASK) != UID_BUSY_BASE) { | |
468 | if (fchown(fd, | |
469 | UID_BUSY_BASE | (st.st_uid & ~UID_BUSY_MASK), | |
470 | (gid_t) UID_BUSY_BASE | (st.st_gid & ~(gid_t) UID_BUSY_MASK)) < 0) { | |
471 | r = -errno; | |
472 | goto finish; | |
473 | } | |
474 | } | |
475 | ||
4aeb20f5 | 476 | return recurse_fd(fd, donate_fd, &st, shift, true); |
7336138e LP |
477 | |
478 | finish: | |
479 | if (donate_fd) | |
480 | safe_close(fd); | |
481 | ||
482 | return r; | |
483 | } | |
484 | ||
485 | int fd_patch_uid(int fd, uid_t shift, uid_t range) { | |
486 | return fd_patch_uid_internal(fd, false, shift, range); | |
487 | } | |
488 | ||
489 | int path_patch_uid(const char *path, uid_t shift, uid_t range) { | |
490 | int fd; | |
491 | ||
492 | fd = open(path, O_RDONLY|O_NONBLOCK|O_DIRECTORY|O_CLOEXEC|O_NOFOLLOW|O_NOATIME); | |
493 | if (fd < 0) | |
494 | return -errno; | |
495 | ||
496 | return fd_patch_uid_internal(fd, true, shift, range); | |
497 | } |