]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/shutdown/umount.c
basic: rename util.h to logarithm.h
[thirdparty/systemd.git] / src / shutdown / umount.c
1 /* SPDX-License-Identifier: LGPL-2.1-or-later */
2 /***
3 Copyright © 2010 ProFUSION embedded systems
4 ***/
5
6 #include <errno.h>
7 #include <fcntl.h>
8 #include <linux/dm-ioctl.h>
9 #include <linux/major.h>
10 #include <linux/raid/md_u.h>
11 #include <linux/loop.h>
12 #include <sys/mount.h>
13 #include <sys/swap.h>
14 #include <sys/stat.h>
15 #include <sys/types.h>
16 #include <unistd.h>
17
18 #if HAVE_VALGRIND_MEMCHECK_H
19 #include <valgrind/memcheck.h>
20 #endif
21
22 #include "sd-device.h"
23
24 #include "alloc-util.h"
25 #include "blockdev-util.h"
26 #include "def.h"
27 #include "device-util.h"
28 #include "dirent-util.h"
29 #include "escape.h"
30 #include "fd-util.h"
31 #include "fileio.h"
32 #include "fs-util.h"
33 #include "fstab-util.h"
34 #include "libmount-util.h"
35 #include "mount-setup.h"
36 #include "mount-util.h"
37 #include "mountpoint-util.h"
38 #include "parse-util.h"
39 #include "path-util.h"
40 #include "process-util.h"
41 #include "signal-util.h"
42 #include "string-util.h"
43 #include "strv.h"
44 #include "sync-util.h"
45 #include "umount.h"
46 #include "virt.h"
47
48 static void mount_point_free(MountPoint **head, MountPoint *m) {
49 assert(head);
50 assert(m);
51
52 LIST_REMOVE(mount_point, *head, m);
53
54 free(m->path);
55 free(m->remount_options);
56 free(m);
57 }
58
59 void mount_points_list_free(MountPoint **head) {
60 assert(head);
61
62 while (*head)
63 mount_point_free(head, *head);
64 }
65
66 int mount_points_list_get(const char *mountinfo, MountPoint **head) {
67 _cleanup_(mnt_free_tablep) struct libmnt_table *table = NULL;
68 _cleanup_(mnt_free_iterp) struct libmnt_iter *iter = NULL;
69 int r;
70
71 assert(head);
72
73 r = libmount_parse(mountinfo, NULL, &table, &iter);
74 if (r < 0)
75 return log_error_errno(r, "Failed to parse %s: %m", mountinfo ?: "/proc/self/mountinfo");
76
77 for (;;) {
78 _cleanup_free_ char *options = NULL, *remount_options = NULL;
79 struct libmnt_fs *fs;
80 const char *path, *fstype;
81 unsigned long remount_flags = 0u;
82 bool try_remount_ro, is_api_vfs;
83 _cleanup_free_ MountPoint *m = NULL;
84
85 r = mnt_table_next_fs(table, iter, &fs);
86 if (r == 1) /* EOF */
87 break;
88 if (r < 0)
89 return log_error_errno(r, "Failed to get next entry from %s: %m", mountinfo ?: "/proc/self/mountinfo");
90
91 path = mnt_fs_get_target(fs);
92 if (!path)
93 continue;
94
95 fstype = mnt_fs_get_fstype(fs);
96
97 /* Combine the generic VFS options with the FS-specific options. Duplicates are not a problem
98 * here, because the only options that should come up twice are typically ro/rw, which are
99 * turned into MS_RDONLY or the inversion of it.
100 *
101 * Even if there are duplicates later in mount_option_mangle() they shouldn't hurt anyways as
102 * they override each other. */
103 if (!strextend_with_separator(&options, ",", mnt_fs_get_vfs_options(fs)))
104 return log_oom();
105 if (!strextend_with_separator(&options, ",", mnt_fs_get_fs_options(fs)))
106 return log_oom();
107
108 /* Ignore mount points we can't unmount because they are API or because we are keeping them
109 * open (like /dev/console). Also, ignore all mounts below API file systems, since they are
110 * likely virtual too, and hence not worth spending time on. Also, in unprivileged containers
111 * we might lack the rights to unmount these things, hence don't bother. */
112 if (mount_point_is_api(path) ||
113 mount_point_ignore(path) ||
114 PATH_STARTSWITH_SET(path, "/dev", "/sys", "/proc"))
115 continue;
116
117 is_api_vfs = fstype_is_api_vfs(fstype);
118
119 /* If we are in a container, don't attempt to read-only mount anything as that brings no real
120 * benefits, but might confuse the host, as we remount the superblock here, not the bind
121 * mount.
122 *
123 * If the filesystem is a network fs, also skip the remount. It brings no value (we cannot
124 * leave a "dirty fs") and could hang if the network is down. Note that umount2() is more
125 * careful and will not hang because of the network being down. */
126 try_remount_ro = detect_container() <= 0 &&
127 !fstype_is_network(fstype) &&
128 !is_api_vfs &&
129 !fstype_is_ro(fstype) &&
130 !fstab_test_yes_no_option(options, "ro\0rw\0");
131
132 if (try_remount_ro) {
133 /* mount(2) states that mount flags and options need to be exactly the same as they
134 * were when the filesystem was mounted, except for the desired changes. So we
135 * reconstruct both here and adjust them for the later remount call too. */
136
137 r = mnt_fs_get_propagation(fs, &remount_flags);
138 if (r < 0) {
139 log_warning_errno(r, "mnt_fs_get_propagation() failed for %s, ignoring: %m", path);
140 continue;
141 }
142
143 r = mount_option_mangle(options, remount_flags, &remount_flags, &remount_options);
144 if (r < 0) {
145 log_warning_errno(r, "mount_option_mangle failed for %s, ignoring: %m", path);
146 continue;
147 }
148
149 /* MS_BIND is special. If it is provided it will only make the mount-point
150 * read-only. If left out, the super block itself is remounted, which we want. */
151 remount_flags = (remount_flags|MS_REMOUNT|MS_RDONLY) & ~MS_BIND;
152 }
153
154 m = new(MountPoint, 1);
155 if (!m)
156 return log_oom();
157
158 *m = (MountPoint) {
159 .remount_options = remount_options,
160 .remount_flags = remount_flags,
161 .try_remount_ro = try_remount_ro,
162
163 /* Unmount sysfs/procfs/… lazily, since syncing doesn't matter there, and it's OK if
164 * something keeps an fd open to it. */
165 .umount_lazily = is_api_vfs,
166 };
167
168 m->path = strdup(path);
169 if (!m->path)
170 return log_oom();
171
172 TAKE_PTR(remount_options);
173
174 LIST_PREPEND(mount_point, *head, TAKE_PTR(m));
175 }
176
177 return 0;
178 }
179
180 int swap_list_get(const char *swaps, MountPoint **head) {
181 _cleanup_(mnt_free_tablep) struct libmnt_table *t = NULL;
182 _cleanup_(mnt_free_iterp) struct libmnt_iter *i = NULL;
183 int r;
184
185 assert(head);
186
187 t = mnt_new_table();
188 i = mnt_new_iter(MNT_ITER_FORWARD);
189 if (!t || !i)
190 return log_oom();
191
192 r = mnt_table_parse_swaps(t, swaps);
193 if (r == -ENOENT) /* no /proc/swaps is fine */
194 return 0;
195 if (r < 0)
196 return log_error_errno(r, "Failed to parse %s: %m", swaps ?: "/proc/swaps");
197
198 for (;;) {
199 struct libmnt_fs *fs;
200 _cleanup_free_ MountPoint *swap = NULL;
201 const char *source;
202
203 r = mnt_table_next_fs(t, i, &fs);
204 if (r == 1) /* EOF */
205 break;
206 if (r < 0)
207 return log_error_errno(r, "Failed to get next entry from %s: %m", swaps ?: "/proc/swaps");
208
209 source = mnt_fs_get_source(fs);
210 if (!source)
211 continue;
212
213 swap = new0(MountPoint, 1);
214 if (!swap)
215 return log_oom();
216
217 swap->path = strdup(source);
218 if (!swap->path)
219 return log_oom();
220
221 LIST_PREPEND(mount_point, *head, TAKE_PTR(swap));
222 }
223
224 return 0;
225 }
226
227 static int loopback_list_get(MountPoint **head) {
228 _cleanup_(sd_device_enumerator_unrefp) sd_device_enumerator *e = NULL;
229 sd_device *d;
230 int r;
231
232 assert(head);
233
234 r = sd_device_enumerator_new(&e);
235 if (r < 0)
236 return r;
237
238 r = sd_device_enumerator_allow_uninitialized(e);
239 if (r < 0)
240 return r;
241
242 r = sd_device_enumerator_add_match_subsystem(e, "block", true);
243 if (r < 0)
244 return r;
245
246 r = sd_device_enumerator_add_match_sysname(e, "loop*");
247 if (r < 0)
248 return r;
249
250 r = sd_device_enumerator_add_match_sysattr(e, "loop/backing_file", NULL, true);
251 if (r < 0)
252 return r;
253
254 FOREACH_DEVICE(e, d) {
255 _cleanup_free_ char *p = NULL;
256 const char *dn;
257 MountPoint *lb;
258 dev_t devnum;
259
260 if (sd_device_get_devnum(d, &devnum) < 0 ||
261 sd_device_get_devname(d, &dn) < 0)
262 continue;
263
264 p = strdup(dn);
265 if (!p)
266 return -ENOMEM;
267
268 lb = new(MountPoint, 1);
269 if (!lb)
270 return -ENOMEM;
271
272 *lb = (MountPoint) {
273 .path = TAKE_PTR(p),
274 .devnum = devnum,
275 };
276
277 LIST_PREPEND(mount_point, *head, lb);
278 }
279
280 return 0;
281 }
282
283 static int dm_list_get(MountPoint **head) {
284 _cleanup_(sd_device_enumerator_unrefp) sd_device_enumerator *e = NULL;
285 sd_device *d;
286 int r;
287
288 assert(head);
289
290 r = sd_device_enumerator_new(&e);
291 if (r < 0)
292 return r;
293
294 r = sd_device_enumerator_allow_uninitialized(e);
295 if (r < 0)
296 return r;
297
298 r = sd_device_enumerator_add_match_subsystem(e, "block", true);
299 if (r < 0)
300 return r;
301
302 r = sd_device_enumerator_add_match_sysname(e, "dm-*");
303 if (r < 0)
304 return r;
305
306 FOREACH_DEVICE(e, d) {
307 _cleanup_free_ char *p = NULL;
308 const char *dn;
309 MountPoint *m;
310 dev_t devnum;
311
312 if (sd_device_get_devnum(d, &devnum) < 0 ||
313 sd_device_get_devname(d, &dn) < 0)
314 continue;
315
316 p = strdup(dn);
317 if (!p)
318 return -ENOMEM;
319
320 m = new(MountPoint, 1);
321 if (!m)
322 return -ENOMEM;
323
324 *m = (MountPoint) {
325 .path = TAKE_PTR(p),
326 .devnum = devnum,
327 };
328
329 LIST_PREPEND(mount_point, *head, m);
330 }
331
332 return 0;
333 }
334
335 static int md_list_get(MountPoint **head) {
336 _cleanup_(sd_device_enumerator_unrefp) sd_device_enumerator *e = NULL;
337 sd_device *d;
338 int r;
339
340 assert(head);
341
342 r = sd_device_enumerator_new(&e);
343 if (r < 0)
344 return r;
345
346 r = sd_device_enumerator_allow_uninitialized(e);
347 if (r < 0)
348 return r;
349
350 r = sd_device_enumerator_add_match_subsystem(e, "block", true);
351 if (r < 0)
352 return r;
353
354 r = sd_device_enumerator_add_match_sysname(e, "md*");
355 if (r < 0)
356 return r;
357
358 /* Filter out partitions. */
359 r = sd_device_enumerator_add_match_property(e, "DEVTYPE", "disk");
360 if (r < 0)
361 return r;
362
363 FOREACH_DEVICE(e, d) {
364 _cleanup_free_ char *p = NULL;
365 const char *dn, *md_level;
366 MountPoint *m;
367 dev_t devnum;
368
369 if (sd_device_get_devnum(d, &devnum) < 0 ||
370 sd_device_get_devname(d, &dn) < 0)
371 continue;
372
373 r = sd_device_get_property_value(d, "MD_LEVEL", &md_level);
374 if (r < 0) {
375 log_warning_errno(r, "Failed to get MD_LEVEL property for %s, ignoring: %m", dn);
376 continue;
377 }
378
379 /* MD "containers" are a special type of MD devices, used for external metadata. Since it
380 * doesn't provide RAID functionality in itself we don't need to stop it. */
381 if (streq(md_level, "container"))
382 continue;
383
384 p = strdup(dn);
385 if (!p)
386 return -ENOMEM;
387
388 m = new(MountPoint, 1);
389 if (!m)
390 return -ENOMEM;
391
392 *m = (MountPoint) {
393 .path = TAKE_PTR(p),
394 .devnum = devnum,
395 };
396
397 LIST_PREPEND(mount_point, *head, m);
398 }
399
400 return 0;
401 }
402
403 static int delete_loopback(const char *device) {
404 _cleanup_close_ int fd = -1;
405 struct loop_info64 info;
406
407 assert(device);
408
409 fd = open(device, O_RDONLY|O_CLOEXEC);
410 if (fd < 0) {
411 log_debug_errno(errno, "Failed to open loopback device %s: %m", device);
412 return errno == ENOENT ? 0 : -errno;
413 }
414
415 /* Loopback block devices don't sync in-flight blocks when we clear the fd, hence sync explicitly
416 * first */
417 if (fsync(fd) < 0)
418 log_debug_errno(errno, "Failed to sync loop block device %s, ignoring: %m", device);
419
420 if (ioctl(fd, LOOP_CLR_FD, 0) < 0) {
421 if (errno == ENXIO) /* Nothing bound, didn't do anything */
422 return 0;
423
424 if (errno != EBUSY)
425 return log_debug_errno(errno, "Failed to clear loopback device %s: %m", device);
426
427 if (ioctl(fd, LOOP_GET_STATUS64, &info) < 0) {
428 if (errno == ENXIO) /* What? Suddenly detached after all? That's fine by us then. */
429 return 1;
430
431 log_debug_errno(errno, "Failed to invoke LOOP_GET_STATUS64 on loopback device %s, ignoring: %m", device);
432 return -EBUSY; /* propagate original error */
433 }
434
435 #if HAVE_VALGRIND_MEMCHECK_H
436 VALGRIND_MAKE_MEM_DEFINED(&info, sizeof(info));
437 #endif
438
439 if (FLAGS_SET(info.lo_flags, LO_FLAGS_AUTOCLEAR)) /* someone else already set LO_FLAGS_AUTOCLEAR for us? fine by us */
440 return -EBUSY; /* propagate original error */
441
442 info.lo_flags |= LO_FLAGS_AUTOCLEAR;
443 if (ioctl(fd, LOOP_SET_STATUS64, &info) < 0) {
444 if (errno == ENXIO) /* Suddenly detached after all? Fine by us */
445 return 1;
446
447 log_debug_errno(errno, "Failed to set LO_FLAGS_AUTOCLEAR flag for loop device %s, ignoring: %m", device);
448 } else
449 log_debug("Successfully set LO_FLAGS_AUTOCLEAR flag for loop device %s.", device);
450
451 return -EBUSY;
452 }
453
454 if (ioctl(fd, LOOP_GET_STATUS64, &info) < 0) {
455 /* If the LOOP_CLR_FD above succeeded we'll see ENXIO here. */
456 if (errno == ENXIO)
457 log_debug("Successfully detached loopback device %s.", device);
458 else
459 log_debug_errno(errno, "Failed to invoke LOOP_GET_STATUS64 on loopback device %s, ignoring: %m", device); /* the LOOP_CLR_FD at least worked, let's hope for the best */
460
461 return 1;
462 }
463
464 #if HAVE_VALGRIND_MEMCHECK_H
465 VALGRIND_MAKE_MEM_DEFINED(&info, sizeof(info));
466 #endif
467
468 /* Linux makes LOOP_CLR_FD succeed whenever LO_FLAGS_AUTOCLEAR is set without actually doing
469 * anything. Very confusing. Let's hence not claim we did anything in this case. */
470 if (FLAGS_SET(info.lo_flags, LO_FLAGS_AUTOCLEAR))
471 log_debug("Successfully called LOOP_CLR_FD on a loopback device %s with autoclear set, which is a NOP.", device);
472 else
473 log_debug("Weird, LOOP_CLR_FD succeeded but the device is still attached on %s.", device);
474
475 return -EBUSY; /* Nothing changed, the device is still attached, hence it apparently is still busy */
476 }
477
478 static int delete_dm(MountPoint *m) {
479 _cleanup_close_ int fd = -1;
480 int r;
481
482 assert(m);
483 assert(major(m->devnum) != 0);
484 assert(m->path);
485
486 fd = open("/dev/mapper/control", O_RDWR|O_CLOEXEC);
487 if (fd < 0)
488 return -errno;
489
490 r = fsync_path_at(AT_FDCWD, m->path);
491 if (r < 0)
492 log_debug_errno(r, "Failed to sync DM block device %s, ignoring: %m", m->path);
493
494 return RET_NERRNO(ioctl(fd, DM_DEV_REMOVE, &(struct dm_ioctl) {
495 .version = {
496 DM_VERSION_MAJOR,
497 DM_VERSION_MINOR,
498 DM_VERSION_PATCHLEVEL
499 },
500 .data_size = sizeof(struct dm_ioctl),
501 .dev = m->devnum,
502 }));
503 }
504
505 static int delete_md(MountPoint *m) {
506 _cleanup_close_ int fd = -1;
507
508 assert(m);
509 assert(major(m->devnum) != 0);
510 assert(m->path);
511
512 fd = open(m->path, O_RDONLY|O_CLOEXEC|O_EXCL);
513 if (fd < 0)
514 return -errno;
515
516 if (fsync(fd) < 0)
517 log_debug_errno(errno, "Failed to sync MD block device %s, ignoring: %m", m->path);
518
519 return RET_NERRNO(ioctl(fd, STOP_ARRAY, NULL));
520 }
521
522 static bool nonunmountable_path(const char *path) {
523 return path_equal(path, "/")
524 #if ! HAVE_SPLIT_USR
525 || path_equal(path, "/usr")
526 #endif
527 || path_startswith(path, "/run/initramfs");
528 }
529
530 static void log_umount_blockers(const char *mnt) {
531 _cleanup_free_ char *blockers = NULL;
532 int r;
533
534 _cleanup_closedir_ DIR *dir = opendir("/proc");
535 if (!dir)
536 return (void) log_warning_errno(errno, "Failed to open /proc/: %m");
537
538 FOREACH_DIRENT_ALL(de, dir, break) {
539 if (!IN_SET(de->d_type, DT_DIR, DT_UNKNOWN))
540 continue;
541
542 pid_t pid;
543 if (parse_pid(de->d_name, &pid) < 0)
544 continue;
545
546 _cleanup_free_ char *fdp = path_join(de->d_name, "fd");
547 if (!fdp)
548 return (void) log_oom();
549
550 _cleanup_closedir_ DIR *fd_dir = xopendirat(dirfd(dir), fdp, 0);
551 if (!fd_dir) {
552 if (errno != ENOENT) /* process gone by now? */
553 log_debug_errno(errno, "Failed to open /proc/%s/, ignoring: %m",fdp);
554 continue;
555 }
556
557 bool culprit = false;
558 FOREACH_DIRENT(fd_de, fd_dir, break) {
559 _cleanup_free_ char *open_file = NULL;
560
561 r = readlinkat_malloc(dirfd(fd_dir), fd_de->d_name, &open_file);
562 if (r < 0) {
563 if (r != -ENOENT) /* fd closed by now */
564 log_debug_errno(r, "Failed to read link /proc/%s/%s, ignoring: %m", fdp, fd_de->d_name);
565 continue;
566 }
567
568 if (path_startswith(open_file, mnt)) {
569 culprit = true;
570 break;
571 }
572 }
573
574 if (!culprit)
575 continue;
576
577 _cleanup_free_ char *comm = NULL;
578 r = get_process_comm(pid, &comm);
579 if (r < 0) {
580 if (r != -ESRCH) /* process gone by now */
581 log_debug_errno(r, "Failed to read process name of PID " PID_FMT ": %m", pid);
582 continue;
583 }
584
585 if (!strextend_with_separator(&blockers, ", ", comm))
586 return (void) log_oom();
587
588 if (!strextend(&blockers, "(", de->d_name, ")"))
589 return (void) log_oom();
590 }
591
592 if (blockers)
593 log_warning("Unmounting '%s' blocked by: %s", mnt, blockers);
594 }
595
596 static int remount_with_timeout(MountPoint *m, bool last_try) {
597 pid_t pid;
598 int r;
599
600 BLOCK_SIGNALS(SIGCHLD);
601
602 assert(m);
603
604 /* Due to the possibility of a remount operation hanging, we fork a child process and set a
605 * timeout. If the timeout lapses, the assumption is that the particular remount failed. */
606 r = safe_fork("(sd-remount)", FORK_RESET_SIGNALS|FORK_CLOSE_ALL_FDS|FORK_LOG|FORK_REOPEN_LOG, &pid);
607 if (r < 0)
608 return r;
609 if (r == 0) {
610 log_info("Remounting '%s' read-only with options '%s'.", m->path, strempty(m->remount_options));
611
612 /* Start the mount operation here in the child */
613 r = mount(NULL, m->path, NULL, m->remount_flags, m->remount_options);
614 if (r < 0)
615 log_full_errno(last_try ? LOG_ERR : LOG_INFO,
616 errno,
617 "Failed to remount '%s' read-only: %m",
618 m->path);
619
620 _exit(r < 0 ? EXIT_FAILURE : EXIT_SUCCESS);
621 }
622
623 r = wait_for_terminate_with_timeout(pid, DEFAULT_TIMEOUT_USEC);
624 if (r == -ETIMEDOUT) {
625 log_error_errno(r, "Remounting '%s' timed out, issuing SIGKILL to PID " PID_FMT ".", m->path, pid);
626 (void) kill(pid, SIGKILL);
627 } else if (r == -EPROTO)
628 log_debug_errno(r, "Remounting '%s' failed abnormally, child process " PID_FMT " aborted or exited non-zero.", m->path, pid);
629 else if (r < 0)
630 log_error_errno(r, "Remounting '%s' failed unexpectedly, couldn't wait for child process " PID_FMT ": %m", m->path, pid);
631
632 return r;
633 }
634
635 static int umount_with_timeout(MountPoint *m, bool last_try) {
636 pid_t pid;
637 int r;
638
639 BLOCK_SIGNALS(SIGCHLD);
640
641 assert(m);
642
643 /* Due to the possibility of a umount operation hanging, we fork a child process and set a
644 * timeout. If the timeout lapses, the assumption is that the particular umount failed. */
645 r = safe_fork("(sd-umount)", FORK_RESET_SIGNALS|FORK_CLOSE_ALL_FDS|FORK_LOG|FORK_REOPEN_LOG, &pid);
646 if (r < 0)
647 return r;
648 if (r == 0) {
649 log_info("Unmounting '%s'.", m->path);
650
651 /* Start the mount operation here in the child Using MNT_FORCE causes some filesystems
652 * (e.g. FUSE and NFS and other network filesystems) to abort any pending requests and return
653 * -EIO rather than blocking indefinitely. If the filesysten is "busy", this may allow
654 * processes to die, thus making the filesystem less busy so the unmount might succeed
655 * (rather than return EBUSY). */
656 r = RET_NERRNO(umount2(m->path,
657 UMOUNT_NOFOLLOW | /* Don't follow symlinks: this should never happen unless our mount list was wrong */
658 (m->umount_lazily ? MNT_DETACH : MNT_FORCE)));
659 if (r < 0) {
660 log_full_errno(last_try ? LOG_ERR : LOG_INFO, r, "Failed to unmount %s: %m", m->path);
661
662 if (r == -EBUSY && last_try)
663 log_umount_blockers(m->path);
664 }
665
666 _exit(r < 0 ? EXIT_FAILURE : EXIT_SUCCESS);
667 }
668
669 r = wait_for_terminate_with_timeout(pid, DEFAULT_TIMEOUT_USEC);
670 if (r == -ETIMEDOUT) {
671 log_error_errno(r, "Unmounting '%s' timed out, issuing SIGKILL to PID " PID_FMT ".", m->path, pid);
672 (void) kill(pid, SIGKILL);
673 } else if (r == -EPROTO)
674 log_debug_errno(r, "Unmounting '%s' failed abnormally, child process " PID_FMT " aborted or exited non-zero.", m->path, pid);
675 else if (r < 0)
676 log_error_errno(r, "Unmounting '%s' failed unexpectedly, couldn't wait for child process " PID_FMT ": %m", m->path, pid);
677
678 return r;
679 }
680
681 /* This includes remounting readonly, which changes the kernel mount options. Therefore the list passed to
682 * this function is invalidated, and should not be reused. */
683 static int mount_points_list_umount(MountPoint **head, bool *changed, bool last_try) {
684 int n_failed = 0;
685
686 assert(head);
687 assert(changed);
688
689 LIST_FOREACH(mount_point, m, *head) {
690 if (m->try_remount_ro) {
691 /* We always try to remount directories read-only first, before we go on and umount
692 * them.
693 *
694 * Mount points can be stacked. If a mount point is stacked below / or /usr, we
695 * cannot umount or remount it directly, since there is no way to refer to the
696 * underlying mount. There's nothing we can do about it for the general case, but we
697 * can do something about it if it is aliased somewhere else via a bind mount. If we
698 * explicitly remount the super block of that alias read-only we hence should be
699 * relatively safe regarding keeping a dirty fs we cannot otherwise see.
700 *
701 * Since the remount can hang in the instance of remote filesystems, we remount
702 * asynchronously and skip the subsequent umount if it fails. */
703 if (remount_with_timeout(m, last_try) < 0) {
704 /* Remount failed, but try unmounting anyway,
705 * unless this is a mount point we want to skip. */
706 if (nonunmountable_path(m->path)) {
707 n_failed++;
708 continue;
709 }
710 }
711 }
712
713 /* Skip / and /usr since we cannot unmount that anyway, since we are running from it. They
714 * have already been remounted ro. */
715 if (nonunmountable_path(m->path))
716 continue;
717
718 /* Trying to umount */
719 if (umount_with_timeout(m, last_try) < 0)
720 n_failed++;
721 else
722 *changed = true;
723 }
724
725 return n_failed;
726 }
727
728 static int swap_points_list_off(MountPoint **head, bool *changed) {
729 int n_failed = 0;
730
731 assert(head);
732 assert(changed);
733
734 LIST_FOREACH(mount_point, m, *head) {
735 log_info("Deactivating swap %s.", m->path);
736 if (swapoff(m->path) < 0) {
737 log_warning_errno(errno, "Could not deactivate swap %s: %m", m->path);
738 n_failed++;
739 continue;
740 }
741
742 *changed = true;
743 mount_point_free(head, m);
744 }
745
746 return n_failed;
747 }
748
749 static int loopback_points_list_detach(MountPoint **head, bool *changed, bool last_try) {
750 int n_failed = 0, r;
751 dev_t rootdev = 0;
752
753 assert(head);
754 assert(changed);
755
756 (void) get_block_device("/", &rootdev);
757
758 LIST_FOREACH(mount_point, m, *head) {
759 if (major(rootdev) != 0 && rootdev == m->devnum) {
760 n_failed++;
761 continue;
762 }
763
764 log_info("Detaching loopback %s.", m->path);
765 r = delete_loopback(m->path);
766 if (r < 0) {
767 log_full_errno(last_try ? LOG_ERR : LOG_INFO, r, "Could not detach loopback %s: %m", m->path);
768 n_failed++;
769 continue;
770 }
771 if (r > 0)
772 *changed = true;
773
774 mount_point_free(head, m);
775 }
776
777 return n_failed;
778 }
779
780 static int dm_points_list_detach(MountPoint **head, bool *changed, bool last_try) {
781 int n_failed = 0, r;
782 dev_t rootdev = 0;
783
784 assert(head);
785 assert(changed);
786
787 (void) get_block_device("/", &rootdev);
788
789 LIST_FOREACH(mount_point, m, *head) {
790 if (major(rootdev) != 0 && rootdev == m->devnum) {
791 n_failed ++;
792 continue;
793 }
794
795 log_info("Detaching DM %s (%u:%u).", m->path, major(m->devnum), minor(m->devnum));
796 r = delete_dm(m);
797 if (r < 0) {
798 log_full_errno(last_try ? LOG_ERR : LOG_INFO, r, "Could not detach DM %s: %m", m->path);
799 n_failed++;
800 continue;
801 }
802
803 *changed = true;
804 mount_point_free(head, m);
805 }
806
807 return n_failed;
808 }
809
810 static int md_points_list_detach(MountPoint **head, bool *changed, bool last_try) {
811 int n_failed = 0, r;
812 dev_t rootdev = 0;
813
814 assert(head);
815 assert(changed);
816
817 (void) get_block_device("/", &rootdev);
818
819 LIST_FOREACH(mount_point, m, *head) {
820 if (major(rootdev) != 0 && rootdev == m->devnum) {
821 n_failed ++;
822 continue;
823 }
824
825 log_info("Stopping MD %s (%u:%u).", m->path, major(m->devnum), minor(m->devnum));
826 r = delete_md(m);
827 if (r < 0) {
828 log_full_errno(last_try ? LOG_ERR : LOG_INFO, r, "Could not stop MD %s: %m", m->path);
829 n_failed++;
830 continue;
831 }
832
833 *changed = true;
834 mount_point_free(head, m);
835 }
836
837 return n_failed;
838 }
839
840 static int umount_all_once(bool *changed, bool last_try) {
841 _cleanup_(mount_points_list_free) LIST_HEAD(MountPoint, mp_list_head);
842 int r;
843
844 assert(changed);
845
846 LIST_HEAD_INIT(mp_list_head);
847 r = mount_points_list_get(NULL, &mp_list_head);
848 if (r < 0)
849 return r;
850
851 return mount_points_list_umount(&mp_list_head, changed, last_try);
852 }
853
854 int umount_all(bool *changed, bool last_try) {
855 bool umount_changed;
856 int r;
857
858 assert(changed);
859
860 /* Retry umount, until nothing can be umounted anymore. Mounts are processed in order, newest
861 * first. The retries are needed when an old mount has been moved, to a path inside a newer mount. */
862 do {
863 umount_changed = false;
864
865 r = umount_all_once(&umount_changed, last_try);
866 if (umount_changed)
867 *changed = true;
868 } while (umount_changed);
869
870 return r;
871 }
872
873 int swapoff_all(bool *changed) {
874 _cleanup_(mount_points_list_free) LIST_HEAD(MountPoint, swap_list_head);
875 int r;
876
877 assert(changed);
878
879 LIST_HEAD_INIT(swap_list_head);
880
881 r = swap_list_get(NULL, &swap_list_head);
882 if (r < 0)
883 return r;
884
885 return swap_points_list_off(&swap_list_head, changed);
886 }
887
888 int loopback_detach_all(bool *changed, bool last_try) {
889 _cleanup_(mount_points_list_free) LIST_HEAD(MountPoint, loopback_list_head);
890 int r;
891
892 assert(changed);
893
894 LIST_HEAD_INIT(loopback_list_head);
895
896 r = loopback_list_get(&loopback_list_head);
897 if (r < 0)
898 return r;
899
900 return loopback_points_list_detach(&loopback_list_head, changed, last_try);
901 }
902
903 int dm_detach_all(bool *changed, bool last_try) {
904 _cleanup_(mount_points_list_free) LIST_HEAD(MountPoint, dm_list_head);
905 int r;
906
907 assert(changed);
908
909 LIST_HEAD_INIT(dm_list_head);
910
911 r = dm_list_get(&dm_list_head);
912 if (r < 0)
913 return r;
914
915 return dm_points_list_detach(&dm_list_head, changed, last_try);
916 }
917
918 int md_detach_all(bool *changed, bool last_try) {
919 _cleanup_(mount_points_list_free) LIST_HEAD(MountPoint, md_list_head);
920 int r;
921
922 assert(changed);
923
924 LIST_HEAD_INIT(md_list_head);
925
926 r = md_list_get(&md_list_head);
927 if (r < 0)
928 return r;
929
930 return md_points_list_detach(&md_list_head, changed, last_try);
931 }