1 /* SPDX-License-Identifier: LGPL-2.1-or-later */
3 #if HAVE_VALGRIND_MEMCHECK_H
4 #include <valgrind/memcheck.h>
9 #include <linux/blkpg.h>
11 #include <linux/loop.h>
13 #include <sys/ioctl.h>
16 #include "sd-device.h"
18 #include "alloc-util.h"
19 #include "blockdev-util.h"
20 #include "device-util.h"
21 #include "devnum-util.h"
23 #include "errno-util.h"
26 #include "loop-util.h"
27 #include "missing_loop.h"
28 #include "parse-util.h"
29 #include "random-util.h"
30 #include "stat-util.h"
31 #include "stdio-util.h"
32 #include "string-util.h"
33 #include "tmpfile-util.h"
35 static void cleanup_clear_loop_close(int *fd
) {
39 (void) ioctl(*fd
, LOOP_CLR_FD
);
40 (void) safe_close(*fd
);
43 static int loop_is_bound(int fd
) {
44 struct loop_info64 info
;
48 if (ioctl(fd
, LOOP_GET_STATUS64
, &info
) < 0) {
50 return false; /* not bound! */
55 return true; /* bound! */
58 static int get_current_uevent_seqnum(uint64_t *ret
) {
59 _cleanup_free_
char *p
= NULL
;
62 r
= read_full_virtual_file("/sys/kernel/uevent_seqnum", &p
, NULL
);
64 return log_debug_errno(r
, "Failed to read current uevent sequence number: %m");
66 r
= safe_atou64(strstrip(p
), ret
);
68 return log_debug_errno(r
, "Failed to parse current uevent sequence number: %s", p
);
73 static int device_has_block_children(sd_device
*d
) {
74 _cleanup_(sd_device_enumerator_unrefp
) sd_device_enumerator
*e
= NULL
;
75 const char *main_ss
, *main_dt
;
81 /* Checks if the specified device currently has block device children (i.e. partition block
84 r
= sd_device_get_subsystem(d
, &main_ss
);
88 if (!streq(main_ss
, "block"))
91 r
= sd_device_get_devtype(d
, &main_dt
);
95 if (!streq(main_dt
, "disk")) /* Refuse invocation on partition block device, insist on "whole" device */
98 r
= sd_device_enumerator_new(&e
);
102 r
= sd_device_enumerator_allow_uninitialized(e
);
106 r
= sd_device_enumerator_add_match_parent(e
, d
);
110 FOREACH_DEVICE(e
, q
) {
113 r
= sd_device_get_subsystem(q
, &ss
);
115 log_device_debug_errno(q
, r
, "Failed to get subsystem of child, ignoring: %m");
119 if (!streq(ss
, "block")) {
120 log_device_debug(q
, "Skipping child that is not a block device (subsystem=%s).", ss
);
124 r
= sd_device_get_devtype(q
, &dt
);
126 log_device_debug_errno(q
, r
, "Failed to get devtype of child, ignoring: %m");
130 if (!streq(dt
, "partition")) {
131 log_device_debug(q
, "Skipping non-partition child (devtype=%s).", dt
);
135 return true; /* we have block device children */
141 static int loop_configure(
144 const struct loop_config
*c
,
145 bool *try_loop_configure
,
146 uint64_t *ret_seqnum_not_before
,
147 usec_t
*ret_timestamp_not_before
) {
149 _cleanup_(sd_device_unrefp
) sd_device
*d
= NULL
;
150 _cleanup_free_
char *sysname
= NULL
;
151 _cleanup_close_
int lock_fd
= -1;
152 struct loop_info64 info_copy
;
160 assert(try_loop_configure
);
162 if (asprintf(&sysname
, "loop%i", nr
) < 0)
165 r
= sd_device_new_from_subsystem_sysname(&d
, "block", sysname
);
169 /* Let's lock the device before we do anything. We take the BSD lock on a second, separately opened
170 * fd for the device. udev after all watches for close() events (specifically IN_CLOSE_WRITE) on
171 * block devices to reprobe them, hence by having a separate fd we will later close() we can ensure
172 * we trigger udev after everything is done. If we'd lock our own fd instead and keep it open for a
173 * long time udev would possibly never run on it again, even though the fd is unlocked, simply
174 * because we never close() it. It also has the nice benefit we can use the _cleanup_close_ logic to
175 * automatically release the lock, after we are done. */
176 lock_fd
= fd_reopen(fd
, O_RDWR
|O_CLOEXEC
|O_NONBLOCK
|O_NOCTTY
);
179 if (flock(lock_fd
, LOCK_EX
) < 0)
182 /* Let's see if the device is really detached, i.e. currently has no associated partition block
183 * devices. On various kernels (such as 5.8) it is possible to have a loopback block device that
184 * superficially is detached but still has partition block devices associated for it. They only go
185 * away when the device is reattached. (Yes, LOOP_CLR_FD doesn't work then, because officially
186 * nothing is attached and LOOP_CTL_REMOVE doesn't either, since it doesn't care about partition
188 r
= device_has_block_children(d
);
192 r
= loop_is_bound(fd
);
198 return -EUCLEAN
; /* Bound but children? Tell caller to reattach something so that the
199 * partition block devices are gone too. */
202 if (*try_loop_configure
) {
203 /* Acquire uevent seqnum immediately before attaching the loopback device. This allows
204 * callers to ignore all uevents with a seqnum before this one, if they need to associate
205 * uevent with this attachment. Doing so isn't race-free though, as uevents that happen in
206 * the window between this reading of the seqnum, and the LOOP_CONFIGURE call might still be
207 * mistaken as originating from our attachment, even though might be caused by an earlier
208 * use. But doing this at least shortens the race window a bit. */
209 r
= get_current_uevent_seqnum(&seqnum
);
212 timestamp
= now(CLOCK_MONOTONIC
);
214 if (ioctl(fd
, LOOP_CONFIGURE
, c
) < 0) {
215 /* Do fallback only if LOOP_CONFIGURE is not supported, propagate all other
216 * errors. Note that the kernel is weird: non-existing ioctls currently return EINVAL
217 * rather than ENOTTY on loopback block devices. They should fix that in the kernel,
218 * but in the meantime we accept both here. */
219 if (!ERRNO_IS_NOT_SUPPORTED(errno
) && errno
!= EINVAL
)
222 *try_loop_configure
= false;
226 if (c
->info
.lo_sizelimit
!= 0) {
227 /* Kernel 5.8 vanilla doesn't properly propagate the size limit into the
228 * block device. If it's used, let's immediately check if it had the desired
229 * effect hence. And if not use classic LOOP_SET_STATUS64. */
232 if (ioctl(fd
, BLKGETSIZE64
, &z
) < 0) {
237 if (z
!= c
->info
.lo_sizelimit
) {
238 log_debug("LOOP_CONFIGURE is broken, doesn't honour .lo_sizelimit. Falling back to LOOP_SET_STATUS64.");
243 if (FLAGS_SET(c
->info
.lo_flags
, LO_FLAGS_PARTSCAN
)) {
244 /* Kernel 5.8 vanilla doesn't properly propagate the partition scanning flag
245 * into the block device. Let's hence verify if things work correctly here
246 * before returning. */
248 r
= blockdev_partscan_enabled(fd
);
252 log_debug("LOOP_CONFIGURE is broken, doesn't honour LO_FLAGS_PARTSCAN. Falling back to LOOP_SET_STATUS64.");
258 /* LOOP_CONFIGURE doesn't work. Remember that. */
259 *try_loop_configure
= false;
261 /* We return EBUSY here instead of retrying immediately with LOOP_SET_FD,
262 * because LOOP_CLR_FD is async: if the operation cannot be executed right
263 * away it just sets the autoclear flag on the device. This means there's a
264 * good chance we cannot actually reuse the loopback device right-away. Hence
265 * let's assume it's busy, avoid the trouble and let the calling loop call us
266 * again with a new, likely unused device. */
271 if (ret_seqnum_not_before
)
272 *ret_seqnum_not_before
= seqnum
;
273 if (ret_timestamp_not_before
)
274 *ret_timestamp_not_before
= timestamp
;
280 /* Let's read the seqnum again, to shorten the window. */
281 r
= get_current_uevent_seqnum(&seqnum
);
284 timestamp
= now(CLOCK_MONOTONIC
);
286 /* Since kernel commit 5db470e229e22b7eda6e23b5566e532c96fb5bc3 (kernel v5.0) the LOOP_SET_STATUS64
287 * ioctl can return EAGAIN in case we change the lo_offset field, if someone else is accessing the
288 * block device while we try to reconfigure it. This is a pretty common case, since udev might
289 * instantly start probing the device as soon as we attach an fd to it. Hence handle it in two ways:
290 * first, let's take the BSD lock to ensure that udev will not step in between the point in
291 * time where we attach the fd and where we reconfigure the device. Secondly, let's wait 50ms on
292 * EAGAIN and retry. The former should be an efficient mechanism to avoid we have to wait 50ms
293 * needlessly if we are just racing against udev. The latter is protection against all other cases,
294 * i.e. peers that do not take the BSD lock. */
296 if (ioctl(fd
, LOOP_SET_FD
, c
->fd
) < 0)
299 /* Only some of the flags LOOP_CONFIGURE can set are also settable via LOOP_SET_STATUS64, hence mask
302 info_copy
.lo_flags
&= LOOP_SET_STATUS_SETTABLE_FLAGS
;
304 for (unsigned n_attempts
= 0;;) {
305 if (ioctl(fd
, LOOP_SET_STATUS64
, &info_copy
) >= 0)
307 if (errno
!= EAGAIN
|| ++n_attempts
>= 64) {
308 r
= log_debug_errno(errno
, "Failed to configure loopback device: %m");
312 /* Sleep some random time, but at least 10ms, at most 250ms. Increase the delay the more
313 * failed attempts we see */
314 (void) usleep(UINT64_C(10) * USEC_PER_MSEC
+
315 random_u64_range(UINT64_C(240) * USEC_PER_MSEC
* n_attempts
/64));
318 /* Work around a kernel bug, where changing offset/size of the loopback device doesn't correctly
319 * invalidate the buffer cache. For details see:
321 * https://android.googlesource.com/platform/system/apex/+/bef74542fbbb4cd629793f4efee8e0053b360570
323 * This was fixed in kernel 5.0, see:
325 * https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=5db470e229e22b7eda6e23b5566e532c96fb5bc3
327 * We'll run the work-around here in the legacy LOOP_SET_STATUS64 codepath. In the LOOP_CONFIGURE
328 * codepath above it should not be necessary. */
329 if (c
->info
.lo_offset
!= 0 || c
->info
.lo_sizelimit
!= 0)
330 if (ioctl(fd
, BLKFLSBUF
, 0) < 0)
331 log_debug_errno(errno
, "Failed to issue BLKFLSBUF ioctl, ignoring: %m");
333 /* LO_FLAGS_DIRECT_IO is a flags we need to configure via explicit ioctls. */
334 if (FLAGS_SET(c
->info
.lo_flags
, LO_FLAGS_DIRECT_IO
)) {
337 if (ioctl(fd
, LOOP_SET_DIRECT_IO
, b
) < 0)
338 log_debug_errno(errno
, "Failed to enable direct IO mode on loopback device /dev/loop%i, ignoring: %m", nr
);
341 if (ret_seqnum_not_before
)
342 *ret_seqnum_not_before
= seqnum
;
343 if (ret_timestamp_not_before
)
344 *ret_timestamp_not_before
= timestamp
;
349 (void) ioctl(fd
, LOOP_CLR_FD
);
353 static int attach_empty_file(int loop
, int nr
) {
354 _cleanup_close_
int fd
= -1;
356 /* So here's the thing: on various kernels (5.8 at least) loop block devices might enter a state
357 * where they are detached but nonetheless have partitions, when used heavily. Accessing these
358 * partitions results in immediatey IO errors. There's no pretty way to get rid of them
359 * again. Neither LOOP_CLR_FD nor LOOP_CTL_REMOVE suffice (see above). What does work is to
360 * reassociate them with a new fd however. This is what we do here hence: we associate the devices
361 * with an empty file (i.e. an image that definitely has no partitions). We then immediately clear it
362 * again. This suffices to make the partitions go away. Ugly but appears to work. */
364 log_debug("Found unattached loopback block device /dev/loop%i with partitions. Attaching empty file to remove them.", nr
);
366 fd
= open_tmpfile_unlinkable(NULL
, O_RDONLY
);
370 if (flock(loop
, LOCK_EX
) < 0)
373 if (ioctl(loop
, LOOP_SET_FD
, fd
) < 0)
376 if (ioctl(loop
, LOOP_SET_STATUS64
, &(struct loop_info64
) {
377 .lo_flags
= LO_FLAGS_READ_ONLY
|
379 LO_FLAGS_PARTSCAN
, /* enable partscan, so that the partitions really go away */
383 if (ioctl(loop
, LOOP_CLR_FD
) < 0)
386 /* The caller is expected to immediately close the loopback device after this, so that the BSD lock
387 * is released, and udev sees the changes. */
391 static int loop_device_make_internal(
399 _cleanup_close_
int direct_io_fd
= -1;
400 _cleanup_free_
char *loopdev
= NULL
;
401 bool try_loop_configure
= true;
402 struct loop_config config
;
403 LoopDevice
*d
= NULL
;
404 uint64_t seqnum
= UINT64_MAX
;
405 usec_t timestamp
= USEC_INFINITY
;
406 int nr
= -1, r
, f_flags
;
411 assert(IN_SET(open_flags
, O_RDWR
, O_RDONLY
));
413 if (fstat(fd
, &st
) < 0)
416 if (S_ISBLK(st
.st_mode
)) {
417 if (ioctl(fd
, LOOP_GET_STATUS64
, &config
.info
) >= 0) {
418 /* Oh! This is a loopback device? That's interesting! */
420 #if HAVE_VALGRIND_MEMCHECK_H
421 /* Valgrind currently doesn't know LOOP_GET_STATUS64. Remove this once it does */
422 VALGRIND_MAKE_MEM_DEFINED(&config
.info
, sizeof(config
.info
));
424 nr
= config
.info
.lo_number
;
426 if (asprintf(&loopdev
, "/dev/loop%i", nr
) < 0)
430 if (offset
== 0 && IN_SET(size
, 0, UINT64_MAX
)) {
431 _cleanup_close_
int copy
= -1;
432 uint64_t diskseq
= 0;
434 /* If this is already a block device and we are supposed to cover the whole of it
435 * then store an fd to the original open device node — and do not actually create an
436 * unnecessary loopback device for it. Note that we reopen the inode here, instead of
437 * keeping just a dup() clone of it around, since we want to ensure that the O_DIRECT
438 * flag of the handle we keep is off, we have our own file index, and have the right
439 * read/write mode in effect. */
441 copy
= fd_reopen(fd
, open_flags
|O_NONBLOCK
|O_CLOEXEC
|O_NOCTTY
);
445 r
= fd_get_diskseq(copy
, &diskseq
);
446 if (r
< 0 && r
!= -EOPNOTSUPP
)
449 d
= new(LoopDevice
, 1);
455 .node
= TAKE_PTR(loopdev
),
456 .relinquished
= true, /* It's not allocated by us, don't destroy it when this object is freed */
459 .uevent_seqnum_not_before
= UINT64_MAX
,
460 .timestamp_not_before
= USEC_INFINITY
,
467 r
= stat_verify_regular(&st
);
472 f_flags
= fcntl(fd
, F_GETFL
);
476 if (FLAGS_SET(loop_flags
, LO_FLAGS_DIRECT_IO
) != FLAGS_SET(f_flags
, O_DIRECT
)) {
477 /* If LO_FLAGS_DIRECT_IO is requested, then make sure we have the fd open with O_DIRECT, as
478 * that's required. Conversely, if it's off require that O_DIRECT is off too (that's because
479 * new kernels will implicitly enable LO_FLAGS_DIRECT_IO if O_DIRECT is set).
481 * Our intention here is that LO_FLAGS_DIRECT_IO is the primary knob, and O_DIRECT derived
482 * from that automatically. */
484 direct_io_fd
= fd_reopen(fd
, (FLAGS_SET(loop_flags
, LO_FLAGS_DIRECT_IO
) ? O_DIRECT
: 0)|O_CLOEXEC
|O_NONBLOCK
|open_flags
);
485 if (direct_io_fd
< 0) {
486 if (!FLAGS_SET(loop_flags
, LO_FLAGS_DIRECT_IO
))
487 return log_debug_errno(errno
, "Failed to reopen file descriptor without O_DIRECT: %m");
489 /* Some file systems might not support O_DIRECT, let's gracefully continue without it then. */
490 log_debug_errno(errno
, "Failed to enable O_DIRECT for backing file descriptor for loopback device. Continuing without.");
491 loop_flags
&= ~LO_FLAGS_DIRECT_IO
;
493 fd
= direct_io_fd
; /* From now on, operate on our new O_DIRECT fd */
496 _cleanup_close_
int control
= -1;
497 _cleanup_(cleanup_clear_loop_close
) int loop_with_fd
= -1;
499 control
= open("/dev/loop-control", O_RDWR
|O_CLOEXEC
|O_NOCTTY
|O_NONBLOCK
);
503 config
= (struct loop_config
) {
506 /* Use the specified flags, but configure the read-only flag from the open flags, and force autoclear */
507 .lo_flags
= (loop_flags
& ~LO_FLAGS_READ_ONLY
) | ((open_flags
& O_ACCMODE
) == O_RDONLY
? LO_FLAGS_READ_ONLY
: 0) | LO_FLAGS_AUTOCLEAR
,
509 .lo_sizelimit
= size
== UINT64_MAX
? 0 : size
,
513 /* Loop around LOOP_CTL_GET_FREE, since at the moment we attempt to open the returned device it might
514 * be gone already, taken by somebody else racing against us. */
515 for (unsigned n_attempts
= 0;;) {
516 _cleanup_close_
int loop
= -1;
518 /* Let's take a lock on the control device first. On a busy system, where many programs
519 * attempt to allocate a loopback device at the same time, we might otherwise keep looping
520 * around relatively heavy operations: asking for a free loopback device, then opening it,
521 * validating it, attaching something to it. Let's serialize this whole operation, to make
522 * unnecessary busywork less likely. Note that this is just something we do to optimize our
523 * own code (and whoever else decides to use LOCK_EX locks for this), taking this lock is not
524 * necessary, it just means it's less likely we have to iterate through this loop again and
525 * again if our own code races against our own code. */
526 if (flock(control
, LOCK_EX
) < 0)
529 nr
= ioctl(control
, LOOP_CTL_GET_FREE
);
533 if (asprintf(&loopdev
, "/dev/loop%i", nr
) < 0)
536 loop
= open(loopdev
, O_CLOEXEC
|O_NONBLOCK
|O_NOCTTY
|open_flags
);
538 /* Somebody might've gotten the same number from the kernel, used the device,
539 * and called LOOP_CTL_REMOVE on it. Let's retry with a new number. */
540 if (!ERRNO_IS_DEVICE_ABSENT(errno
))
543 r
= loop_configure(loop
, nr
, &config
, &try_loop_configure
, &seqnum
, ×tamp
);
545 loop_with_fd
= TAKE_FD(loop
);
549 /* Make left-over partition disappear hack (see above) */
550 r
= attach_empty_file(loop
, nr
);
551 if (r
< 0 && r
!= -EBUSY
)
553 } else if (r
!= -EBUSY
)
557 /* OK, this didn't work, let's try again a bit later, but first release the lock on the
559 if (flock(control
, LOCK_UN
) < 0)
562 if (++n_attempts
>= 64) /* Give up eventually */
565 /* Now close the loop device explicitly. This will release any lock acquired by
566 * attach_empty_file() or similar, while we sleep below. */
567 loop
= safe_close(loop
);
568 loopdev
= mfree(loopdev
);
570 /* Wait some random time, to make collision less likely. Let's pick a random time in the
571 * range 0ms…250ms, linearly scaled by the number of failed attempts. */
572 (void) usleep(random_u64_range(UINT64_C(10) * USEC_PER_MSEC
+
573 UINT64_C(240) * USEC_PER_MSEC
* n_attempts
/64));
576 if (FLAGS_SET(loop_flags
, LO_FLAGS_DIRECT_IO
)) {
577 struct loop_info64 info
;
579 if (ioctl(loop_with_fd
, LOOP_GET_STATUS64
, &info
) < 0)
582 #if HAVE_VALGRIND_MEMCHECK_H
583 VALGRIND_MAKE_MEM_DEFINED(&info
, sizeof(info
));
586 /* On older kernels (<= 5.3) it was necessary to set the block size of the loopback block
587 * device to the logical block size of the underlying file system. Since there was no nice
588 * way to query the value, we are not bothering to do this however. On newer kernels the
589 * block size is propagated automatically and does not require intervention from us. We'll
590 * check here if enabling direct IO worked, to make this easily debuggable however.
592 * (Should anyone really care and actually wants direct IO on old kernels: it might be worth
593 * enabling direct IO with iteratively larger block sizes until it eventually works.) */
594 if (!FLAGS_SET(info
.lo_flags
, LO_FLAGS_DIRECT_IO
))
595 log_debug("Could not enable direct IO mode, proceeding in buffered IO mode.");
598 if (fstat(loop_with_fd
, &st
) < 0)
600 assert(S_ISBLK(st
.st_mode
));
602 uint64_t diskseq
= 0;
603 r
= fd_get_diskseq(loop_with_fd
, &diskseq
);
604 if (r
< 0 && r
!= -EOPNOTSUPP
)
607 d
= new(LoopDevice
, 1);
611 .fd
= TAKE_FD(loop_with_fd
),
612 .node
= TAKE_PTR(loopdev
),
616 .uevent_seqnum_not_before
= seqnum
,
617 .timestamp_not_before
= timestamp
,
620 log_debug("Successfully acquired %s, devno=%u:%u, nr=%i, diskseq=%" PRIu64
,
622 major(d
->devno
), minor(d
->devno
),
630 static uint32_t loop_flags_mangle(uint32_t loop_flags
) {
633 r
= getenv_bool("SYSTEMD_LOOP_DIRECT_IO");
634 if (r
< 0 && r
!= -ENXIO
)
635 log_debug_errno(r
, "Failed to parse $SYSTEMD_LOOP_DIRECT_IO, ignoring: %m");
637 return UPDATE_FLAG(loop_flags
, LO_FLAGS_DIRECT_IO
, r
!= 0); /* Turn on LO_FLAGS_DIRECT_IO by default, unless explicitly configured to off. */
640 int loop_device_make(
651 return loop_device_make_internal(
656 loop_flags_mangle(loop_flags
),
660 int loop_device_make_by_path(
666 int r
, basic_flags
, direct_flags
, rdwr_flags
;
667 _cleanup_close_
int fd
= -1;
672 assert(open_flags
< 0 || IN_SET(open_flags
, O_RDWR
, O_RDONLY
));
674 /* Passing < 0 as open_flags here means we'll try to open the device writable if we can, retrying
675 * read-only if we cannot. */
677 loop_flags
= loop_flags_mangle(loop_flags
);
679 /* Let's open with O_DIRECT if we can. But not all file systems support that, hence fall back to
680 * non-O_DIRECT mode automatically, if it fails. */
682 basic_flags
= O_CLOEXEC
|O_NONBLOCK
|O_NOCTTY
;
683 direct_flags
= FLAGS_SET(loop_flags
, LO_FLAGS_DIRECT_IO
) ? O_DIRECT
: 0;
684 rdwr_flags
= open_flags
>= 0 ? open_flags
: O_RDWR
;
686 fd
= open(path
, basic_flags
|direct_flags
|rdwr_flags
);
687 if (fd
< 0 && direct_flags
!= 0) /* If we had O_DIRECT on, and things failed with that, let's immediately try again without */
688 fd
= open(path
, basic_flags
|rdwr_flags
);
690 direct
= direct_flags
!= 0;
694 /* Retry read-only? */
695 if (open_flags
>= 0 || !(ERRNO_IS_PRIVILEGE(r
) || r
== -EROFS
))
698 fd
= open(path
, basic_flags
|direct_flags
|O_RDONLY
);
699 if (fd
< 0 && direct_flags
!= 0) /* as above */
700 fd
= open(path
, basic_flags
|O_RDONLY
);
702 direct
= direct_flags
!= 0;
704 return r
; /* Propagate original error */
706 open_flags
= O_RDONLY
;
707 } else if (open_flags
< 0)
710 log_debug("Opened '%s' in %s access mode%s, with O_DIRECT %s%s.",
712 open_flags
== O_RDWR
? "O_RDWR" : "O_RDONLY",
713 open_flags
!= rdwr_flags
? " (O_RDWR was requested but not allowed)" : "",
714 direct
? "enabled" : "disabled",
715 direct
!= (direct_flags
!= 0) ? " (O_DIRECT was requested but not supported)" : "");
717 return loop_device_make_internal(fd
, open_flags
, 0, 0, loop_flags
, ret
);
720 LoopDevice
* loop_device_unref(LoopDevice
*d
) {
725 /* Implicitly sync the device, since otherwise in-flight blocks might not get written */
726 if (fsync(d
->fd
) < 0)
727 log_debug_errno(errno
, "Failed to sync loop block device, ignoring: %m");
729 if (d
->nr
>= 0 && !d
->relinquished
) {
730 if (ioctl(d
->fd
, LOOP_CLR_FD
) < 0)
731 log_debug_errno(errno
, "Failed to clear loop device: %m");
738 if (d
->nr
>= 0 && !d
->relinquished
) {
739 _cleanup_close_
int control
= -1;
741 control
= open("/dev/loop-control", O_RDWR
|O_CLOEXEC
|O_NOCTTY
|O_NONBLOCK
);
743 log_warning_errno(errno
,
744 "Failed to open loop control device, cannot remove loop device %s: %m",
747 for (unsigned n_attempts
= 0;;) {
748 if (ioctl(control
, LOOP_CTL_REMOVE
, d
->nr
) >= 0)
750 if (errno
!= EBUSY
|| ++n_attempts
>= 64) {
751 log_warning_errno(errno
, "Failed to remove device %s: %m", strna(d
->node
));
754 (void) usleep(50 * USEC_PER_MSEC
);
762 void loop_device_relinquish(LoopDevice
*d
) {
765 /* Don't attempt to clean up the loop device anymore from this point on. Leave the clean-ing up to the kernel
766 * itself, using the loop device "auto-clear" logic we already turned on when creating the device. */
768 d
->relinquished
= true;
771 int loop_device_open(const char *loop_path
, int open_flags
, LoopDevice
**ret
) {
772 _cleanup_close_
int loop_fd
= -1;
773 _cleanup_free_
char *p
= NULL
;
774 struct loop_info64 info
;
780 assert(IN_SET(open_flags
, O_RDWR
, O_RDONLY
));
783 loop_fd
= open(loop_path
, O_CLOEXEC
|O_NONBLOCK
|O_NOCTTY
|open_flags
);
787 if (fstat(loop_fd
, &st
) < 0)
789 if (!S_ISBLK(st
.st_mode
))
792 if (ioctl(loop_fd
, LOOP_GET_STATUS64
, &info
) >= 0) {
793 #if HAVE_VALGRIND_MEMCHECK_H
794 /* Valgrind currently doesn't know LOOP_GET_STATUS64. Remove this once it does */
795 VALGRIND_MAKE_MEM_DEFINED(&info
, sizeof(info
));
801 p
= strdup(loop_path
);
805 d
= new(LoopDevice
, 1);
810 .fd
= TAKE_FD(loop_fd
),
813 .relinquished
= true, /* It's not ours, don't try to destroy it when this object is freed */
815 .uevent_seqnum_not_before
= UINT64_MAX
,
816 .timestamp_not_before
= USEC_INFINITY
,
823 static int resize_partition(int partition_fd
, uint64_t offset
, uint64_t size
) {
824 char sysfs
[STRLEN("/sys/dev/block/:/partition") + 2*DECIMAL_STR_MAX(dev_t
) + 1];
825 _cleanup_free_
char *whole
= NULL
, *buffer
= NULL
;
826 uint64_t current_offset
, current_size
, partno
;
827 _cleanup_close_
int whole_fd
= -1;
832 assert(partition_fd
>= 0);
834 /* Resizes the partition the loopback device refer to (assuming it refers to one instead of an actual
835 * loopback device), and changes the offset, if needed. This is a fancy wrapper around
836 * BLKPG_RESIZE_PARTITION. */
838 if (fstat(partition_fd
, &st
) < 0)
841 assert(S_ISBLK(st
.st_mode
));
843 xsprintf(sysfs
, "/sys/dev/block/%u:%u/partition", major(st
.st_rdev
), minor(st
.st_rdev
));
844 r
= read_one_line_file(sysfs
, &buffer
);
845 if (r
== -ENOENT
) /* not a partition, cannot resize */
849 r
= safe_atou64(buffer
, &partno
);
853 xsprintf(sysfs
, "/sys/dev/block/%u:%u/start", major(st
.st_rdev
), minor(st
.st_rdev
));
855 buffer
= mfree(buffer
);
856 r
= read_one_line_file(sysfs
, &buffer
);
859 r
= safe_atou64(buffer
, ¤t_offset
);
862 if (current_offset
> UINT64_MAX
/512U)
864 current_offset
*= 512U;
866 if (ioctl(partition_fd
, BLKGETSIZE64
, ¤t_size
) < 0)
869 if (size
== UINT64_MAX
&& offset
== UINT64_MAX
)
871 if (current_size
== size
&& current_offset
== offset
)
874 xsprintf(sysfs
, "/sys/dev/block/%u:%u/../dev", major(st
.st_rdev
), minor(st
.st_rdev
));
876 buffer
= mfree(buffer
);
877 r
= read_one_line_file(sysfs
, &buffer
);
880 r
= parse_devnum(buffer
, &devno
);
884 r
= device_path_make_major_minor(S_IFBLK
, devno
, &whole
);
888 whole_fd
= open(whole
, O_RDWR
|O_CLOEXEC
|O_NONBLOCK
|O_NOCTTY
);
892 struct blkpg_partition bp
= {
894 .start
= offset
== UINT64_MAX
? current_offset
: offset
,
895 .length
= size
== UINT64_MAX
? current_size
: size
,
898 struct blkpg_ioctl_arg ba
= {
899 .op
= BLKPG_RESIZE_PARTITION
,
901 .datalen
= sizeof(bp
),
904 return RET_NERRNO(ioctl(whole_fd
, BLKPG
, &ba
));
907 int loop_device_refresh_size(LoopDevice
*d
, uint64_t offset
, uint64_t size
) {
908 struct loop_info64 info
;
911 /* Changes the offset/start of the loop device relative to the beginning of the underlying file or
912 * block device. If this loop device actually refers to a partition and not a loopback device, we'll
913 * try to adjust the partition offsets instead.
915 * If either offset or size is UINT64_MAX we won't change that parameter. */
920 if (d
->nr
< 0) /* not a loopback device */
921 return resize_partition(d
->fd
, offset
, size
);
923 if (ioctl(d
->fd
, LOOP_GET_STATUS64
, &info
) < 0)
926 #if HAVE_VALGRIND_MEMCHECK_H
927 /* Valgrind currently doesn't know LOOP_GET_STATUS64. Remove this once it does */
928 VALGRIND_MAKE_MEM_DEFINED(&info
, sizeof(info
));
931 if (size
== UINT64_MAX
&& offset
== UINT64_MAX
)
933 if (info
.lo_sizelimit
== size
&& info
.lo_offset
== offset
)
936 if (size
!= UINT64_MAX
)
937 info
.lo_sizelimit
= size
;
938 if (offset
!= UINT64_MAX
)
939 info
.lo_offset
= offset
;
941 return RET_NERRNO(ioctl(d
->fd
, LOOP_SET_STATUS64
, &info
));
944 int loop_device_flock(LoopDevice
*d
, int operation
) {
950 return RET_NERRNO(flock(d
->fd
, operation
));
953 int loop_device_sync(LoopDevice
*d
) {
956 /* We also do this implicitly in loop_device_unref(). Doing this explicitly here has the benefit that
957 * we can check the return value though. */
962 return RET_NERRNO(fsync(d
->fd
));