]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/shared/loop-util.c
Merge pull request #24474 from yuwata/udevadm-settle-cleanups
[thirdparty/systemd.git] / src / shared / loop-util.c
1 /* SPDX-License-Identifier: LGPL-2.1-or-later */
2
3 #if HAVE_VALGRIND_MEMCHECK_H
4 #include <valgrind/memcheck.h>
5 #endif
6
7 #include <errno.h>
8 #include <fcntl.h>
9 #include <linux/blkpg.h>
10 #include <linux/fs.h>
11 #include <linux/loop.h>
12 #include <sys/file.h>
13 #include <sys/ioctl.h>
14 #include <unistd.h>
15
16 #include "sd-device.h"
17
18 #include "alloc-util.h"
19 #include "blockdev-util.h"
20 #include "device-util.h"
21 #include "devnum-util.h"
22 #include "env-util.h"
23 #include "errno-util.h"
24 #include "fd-util.h"
25 #include "fileio.h"
26 #include "loop-util.h"
27 #include "missing_loop.h"
28 #include "parse-util.h"
29 #include "random-util.h"
30 #include "stat-util.h"
31 #include "stdio-util.h"
32 #include "string-util.h"
33 #include "tmpfile-util.h"
34
35 static void cleanup_clear_loop_close(int *fd) {
36 if (*fd < 0)
37 return;
38
39 (void) ioctl(*fd, LOOP_CLR_FD);
40 (void) safe_close(*fd);
41 }
42
43 static int loop_is_bound(int fd) {
44 struct loop_info64 info;
45
46 assert(fd >= 0);
47
48 if (ioctl(fd, LOOP_GET_STATUS64, &info) < 0) {
49 if (errno == ENXIO)
50 return false; /* not bound! */
51
52 return -errno;
53 }
54
55 return true; /* bound! */
56 }
57
58 static int get_current_uevent_seqnum(uint64_t *ret) {
59 _cleanup_free_ char *p = NULL;
60 int r;
61
62 r = read_full_virtual_file("/sys/kernel/uevent_seqnum", &p, NULL);
63 if (r < 0)
64 return log_debug_errno(r, "Failed to read current uevent sequence number: %m");
65
66 r = safe_atou64(strstrip(p), ret);
67 if (r < 0)
68 return log_debug_errno(r, "Failed to parse current uevent sequence number: %s", p);
69
70 return 0;
71 }
72
73 static int device_has_block_children(sd_device *d) {
74 _cleanup_(sd_device_enumerator_unrefp) sd_device_enumerator *e = NULL;
75 const char *main_ss, *main_dt;
76 int r;
77
78 assert(d);
79
80 /* Checks if the specified device currently has block device children (i.e. partition block
81 * devices). */
82
83 r = sd_device_get_subsystem(d, &main_ss);
84 if (r < 0)
85 return r;
86
87 if (!streq(main_ss, "block"))
88 return -EINVAL;
89
90 r = sd_device_get_devtype(d, &main_dt);
91 if (r < 0)
92 return r;
93
94 if (!streq(main_dt, "disk")) /* Refuse invocation on partition block device, insist on "whole" device */
95 return -EINVAL;
96
97 r = sd_device_enumerator_new(&e);
98 if (r < 0)
99 return r;
100
101 r = sd_device_enumerator_allow_uninitialized(e);
102 if (r < 0)
103 return r;
104
105 r = sd_device_enumerator_add_match_parent(e, d);
106 if (r < 0)
107 return r;
108
109 r = sd_device_enumerator_add_match_subsystem(e, "block", /* match = */ true);
110 if (r < 0)
111 return r;
112
113 r = sd_device_enumerator_add_match_property(e, "DEVTYPE", "partition");
114 if (r < 0)
115 return r;
116
117 return !!sd_device_enumerator_get_device_first(e);
118 }
119
120 static int loop_configure(
121 int fd,
122 int nr,
123 const struct loop_config *c,
124 bool *try_loop_configure,
125 uint64_t *ret_seqnum_not_before,
126 usec_t *ret_timestamp_not_before) {
127
128 _cleanup_(sd_device_unrefp) sd_device *d = NULL;
129 _cleanup_free_ char *sysname = NULL;
130 _cleanup_close_ int lock_fd = -1;
131 struct loop_info64 info_copy;
132 uint64_t seqnum;
133 usec_t timestamp;
134 int r;
135
136 assert(fd >= 0);
137 assert(nr >= 0);
138 assert(c);
139 assert(try_loop_configure);
140
141 if (asprintf(&sysname, "loop%i", nr) < 0)
142 return -ENOMEM;
143
144 r = sd_device_new_from_subsystem_sysname(&d, "block", sysname);
145 if (r < 0)
146 return r;
147
148 /* Let's lock the device before we do anything. We take the BSD lock on a second, separately opened
149 * fd for the device. udev after all watches for close() events (specifically IN_CLOSE_WRITE) on
150 * block devices to reprobe them, hence by having a separate fd we will later close() we can ensure
151 * we trigger udev after everything is done. If we'd lock our own fd instead and keep it open for a
152 * long time udev would possibly never run on it again, even though the fd is unlocked, simply
153 * because we never close() it. It also has the nice benefit we can use the _cleanup_close_ logic to
154 * automatically release the lock, after we are done. */
155 lock_fd = fd_reopen(fd, O_RDWR|O_CLOEXEC|O_NONBLOCK|O_NOCTTY);
156 if (lock_fd < 0)
157 return lock_fd;
158 if (flock(lock_fd, LOCK_EX) < 0)
159 return -errno;
160
161 /* Let's see if the device is really detached, i.e. currently has no associated partition block
162 * devices. On various kernels (such as 5.8) it is possible to have a loopback block device that
163 * superficially is detached but still has partition block devices associated for it. They only go
164 * away when the device is reattached. (Yes, LOOP_CLR_FD doesn't work then, because officially
165 * nothing is attached and LOOP_CTL_REMOVE doesn't either, since it doesn't care about partition
166 * block devices. */
167 r = device_has_block_children(d);
168 if (r < 0)
169 return r;
170 if (r > 0) {
171 r = loop_is_bound(fd);
172 if (r < 0)
173 return r;
174 if (r > 0)
175 return -EBUSY;
176
177 return -EUCLEAN; /* Bound but children? Tell caller to reattach something so that the
178 * partition block devices are gone too. */
179 }
180
181 if (*try_loop_configure) {
182 /* Acquire uevent seqnum immediately before attaching the loopback device. This allows
183 * callers to ignore all uevents with a seqnum before this one, if they need to associate
184 * uevent with this attachment. Doing so isn't race-free though, as uevents that happen in
185 * the window between this reading of the seqnum, and the LOOP_CONFIGURE call might still be
186 * mistaken as originating from our attachment, even though might be caused by an earlier
187 * use. But doing this at least shortens the race window a bit. */
188 r = get_current_uevent_seqnum(&seqnum);
189 if (r < 0)
190 return r;
191 timestamp = now(CLOCK_MONOTONIC);
192
193 if (ioctl(fd, LOOP_CONFIGURE, c) < 0) {
194 /* Do fallback only if LOOP_CONFIGURE is not supported, propagate all other
195 * errors. Note that the kernel is weird: non-existing ioctls currently return EINVAL
196 * rather than ENOTTY on loopback block devices. They should fix that in the kernel,
197 * but in the meantime we accept both here. */
198 if (!ERRNO_IS_NOT_SUPPORTED(errno) && errno != EINVAL)
199 return -errno;
200
201 *try_loop_configure = false;
202 } else {
203 bool good = true;
204
205 if (c->info.lo_sizelimit != 0) {
206 /* Kernel 5.8 vanilla doesn't properly propagate the size limit into the
207 * block device. If it's used, let's immediately check if it had the desired
208 * effect hence. And if not use classic LOOP_SET_STATUS64. */
209 uint64_t z;
210
211 if (ioctl(fd, BLKGETSIZE64, &z) < 0) {
212 r = -errno;
213 goto fail;
214 }
215
216 if (z != c->info.lo_sizelimit) {
217 log_debug("LOOP_CONFIGURE is broken, doesn't honour .lo_sizelimit. Falling back to LOOP_SET_STATUS64.");
218 good = false;
219 }
220 }
221
222 if (FLAGS_SET(c->info.lo_flags, LO_FLAGS_PARTSCAN)) {
223 /* Kernel 5.8 vanilla doesn't properly propagate the partition scanning flag
224 * into the block device. Let's hence verify if things work correctly here
225 * before returning. */
226
227 r = blockdev_partscan_enabled(fd);
228 if (r < 0)
229 goto fail;
230 if (r == 0) {
231 log_debug("LOOP_CONFIGURE is broken, doesn't honour LO_FLAGS_PARTSCAN. Falling back to LOOP_SET_STATUS64.");
232 good = false;
233 }
234 }
235
236 if (!good) {
237 /* LOOP_CONFIGURE doesn't work. Remember that. */
238 *try_loop_configure = false;
239
240 /* We return EBUSY here instead of retrying immediately with LOOP_SET_FD,
241 * because LOOP_CLR_FD is async: if the operation cannot be executed right
242 * away it just sets the autoclear flag on the device. This means there's a
243 * good chance we cannot actually reuse the loopback device right-away. Hence
244 * let's assume it's busy, avoid the trouble and let the calling loop call us
245 * again with a new, likely unused device. */
246 r = -EBUSY;
247 goto fail;
248 }
249
250 if (ret_seqnum_not_before)
251 *ret_seqnum_not_before = seqnum;
252 if (ret_timestamp_not_before)
253 *ret_timestamp_not_before = timestamp;
254
255 return 0;
256 }
257 }
258
259 /* Let's read the seqnum again, to shorten the window. */
260 r = get_current_uevent_seqnum(&seqnum);
261 if (r < 0)
262 return r;
263 timestamp = now(CLOCK_MONOTONIC);
264
265 /* Since kernel commit 5db470e229e22b7eda6e23b5566e532c96fb5bc3 (kernel v5.0) the LOOP_SET_STATUS64
266 * ioctl can return EAGAIN in case we change the lo_offset field, if someone else is accessing the
267 * block device while we try to reconfigure it. This is a pretty common case, since udev might
268 * instantly start probing the device as soon as we attach an fd to it. Hence handle it in two ways:
269 * first, let's take the BSD lock to ensure that udev will not step in between the point in
270 * time where we attach the fd and where we reconfigure the device. Secondly, let's wait 50ms on
271 * EAGAIN and retry. The former should be an efficient mechanism to avoid we have to wait 50ms
272 * needlessly if we are just racing against udev. The latter is protection against all other cases,
273 * i.e. peers that do not take the BSD lock. */
274
275 if (ioctl(fd, LOOP_SET_FD, c->fd) < 0)
276 return -errno;
277
278 /* Only some of the flags LOOP_CONFIGURE can set are also settable via LOOP_SET_STATUS64, hence mask
279 * them out. */
280 info_copy = c->info;
281 info_copy.lo_flags &= LOOP_SET_STATUS_SETTABLE_FLAGS;
282
283 for (unsigned n_attempts = 0;;) {
284 if (ioctl(fd, LOOP_SET_STATUS64, &info_copy) >= 0)
285 break;
286 if (errno != EAGAIN || ++n_attempts >= 64) {
287 r = log_debug_errno(errno, "Failed to configure loopback block device: %m");
288 goto fail;
289 }
290
291 /* Sleep some random time, but at least 10ms, at most 250ms. Increase the delay the more
292 * failed attempts we see */
293 (void) usleep(UINT64_C(10) * USEC_PER_MSEC +
294 random_u64_range(UINT64_C(240) * USEC_PER_MSEC * n_attempts/64));
295 }
296
297 /* Work around a kernel bug, where changing offset/size of the loopback device doesn't correctly
298 * invalidate the buffer cache. For details see:
299 *
300 * https://android.googlesource.com/platform/system/apex/+/bef74542fbbb4cd629793f4efee8e0053b360570
301 *
302 * This was fixed in kernel 5.0, see:
303 *
304 * https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=5db470e229e22b7eda6e23b5566e532c96fb5bc3
305 *
306 * We'll run the work-around here in the legacy LOOP_SET_STATUS64 codepath. In the LOOP_CONFIGURE
307 * codepath above it should not be necessary. */
308 if (c->info.lo_offset != 0 || c->info.lo_sizelimit != 0)
309 if (ioctl(fd, BLKFLSBUF, 0) < 0)
310 log_debug_errno(errno, "Failed to issue BLKFLSBUF ioctl, ignoring: %m");
311
312 /* LO_FLAGS_DIRECT_IO is a flags we need to configure via explicit ioctls. */
313 if (FLAGS_SET(c->info.lo_flags, LO_FLAGS_DIRECT_IO)) {
314 unsigned long b = 1;
315
316 if (ioctl(fd, LOOP_SET_DIRECT_IO, b) < 0)
317 log_debug_errno(errno, "Failed to enable direct IO mode on loopback device /dev/loop%i, ignoring: %m", nr);
318 }
319
320 if (ret_seqnum_not_before)
321 *ret_seqnum_not_before = seqnum;
322 if (ret_timestamp_not_before)
323 *ret_timestamp_not_before = timestamp;
324
325 return 0;
326
327 fail:
328 (void) ioctl(fd, LOOP_CLR_FD);
329 return r;
330 }
331
332 static int attach_empty_file(int loop, int nr) {
333 _cleanup_close_ int fd = -1;
334
335 /* So here's the thing: on various kernels (5.8 at least) loop block devices might enter a state
336 * where they are detached but nonetheless have partitions, when used heavily. Accessing these
337 * partitions results in immediatey IO errors. There's no pretty way to get rid of them
338 * again. Neither LOOP_CLR_FD nor LOOP_CTL_REMOVE suffice (see above). What does work is to
339 * reassociate them with a new fd however. This is what we do here hence: we associate the devices
340 * with an empty file (i.e. an image that definitely has no partitions). We then immediately clear it
341 * again. This suffices to make the partitions go away. Ugly but appears to work. */
342
343 log_debug("Found unattached loopback block device /dev/loop%i with partitions. Attaching empty file to remove them.", nr);
344
345 fd = open_tmpfile_unlinkable(NULL, O_RDONLY);
346 if (fd < 0)
347 return fd;
348
349 if (flock(loop, LOCK_EX) < 0)
350 return -errno;
351
352 if (ioctl(loop, LOOP_SET_FD, fd) < 0)
353 return -errno;
354
355 if (ioctl(loop, LOOP_SET_STATUS64, &(struct loop_info64) {
356 .lo_flags = LO_FLAGS_READ_ONLY|
357 LO_FLAGS_AUTOCLEAR|
358 LO_FLAGS_PARTSCAN, /* enable partscan, so that the partitions really go away */
359 }) < 0)
360 return -errno;
361
362 if (ioctl(loop, LOOP_CLR_FD) < 0)
363 return -errno;
364
365 /* The caller is expected to immediately close the loopback device after this, so that the BSD lock
366 * is released, and udev sees the changes. */
367 return 0;
368 }
369
370 static int loop_device_make_internal(
371 int fd,
372 int open_flags,
373 uint64_t offset,
374 uint64_t size,
375 uint32_t loop_flags,
376 LoopDevice **ret) {
377
378 _cleanup_close_ int direct_io_fd = -1;
379 _cleanup_free_ char *loopdev = NULL;
380 bool try_loop_configure = true;
381 struct loop_config config;
382 LoopDevice *d = NULL;
383 uint64_t seqnum = UINT64_MAX;
384 usec_t timestamp = USEC_INFINITY;
385 int nr = -1, r, f_flags;
386 struct stat st;
387
388 assert(fd >= 0);
389 assert(ret);
390 assert(IN_SET(open_flags, O_RDWR, O_RDONLY));
391
392 if (fstat(fd, &st) < 0)
393 return -errno;
394
395 if (S_ISBLK(st.st_mode)) {
396 if (ioctl(fd, LOOP_GET_STATUS64, &config.info) >= 0) {
397 /* Oh! This is a loopback device? That's interesting! */
398
399 #if HAVE_VALGRIND_MEMCHECK_H
400 /* Valgrind currently doesn't know LOOP_GET_STATUS64. Remove this once it does */
401 VALGRIND_MAKE_MEM_DEFINED(&config.info, sizeof(config.info));
402 #endif
403 nr = config.info.lo_number;
404
405 if (asprintf(&loopdev, "/dev/loop%i", nr) < 0)
406 return -ENOMEM;
407 }
408
409 if (offset == 0 && IN_SET(size, 0, UINT64_MAX)) {
410 _cleanup_close_ int copy = -1;
411 uint64_t diskseq = 0;
412
413 /* If this is already a block device and we are supposed to cover the whole of it
414 * then store an fd to the original open device node — and do not actually create an
415 * unnecessary loopback device for it. Note that we reopen the inode here, instead of
416 * keeping just a dup() clone of it around, since we want to ensure that the O_DIRECT
417 * flag of the handle we keep is off, we have our own file index, and have the right
418 * read/write mode in effect. */
419
420 copy = fd_reopen(fd, open_flags|O_NONBLOCK|O_CLOEXEC|O_NOCTTY);
421 if (copy < 0)
422 return copy;
423
424 r = fd_get_diskseq(copy, &diskseq);
425 if (r < 0 && r != -EOPNOTSUPP)
426 return r;
427
428 d = new(LoopDevice, 1);
429 if (!d)
430 return -ENOMEM;
431 *d = (LoopDevice) {
432 .fd = TAKE_FD(copy),
433 .nr = nr,
434 .node = TAKE_PTR(loopdev),
435 .relinquished = true, /* It's not allocated by us, don't destroy it when this object is freed */
436 .devno = st.st_rdev,
437 .diskseq = diskseq,
438 .uevent_seqnum_not_before = UINT64_MAX,
439 .timestamp_not_before = USEC_INFINITY,
440 };
441
442 *ret = d;
443 return d->fd;
444 }
445 } else {
446 r = stat_verify_regular(&st);
447 if (r < 0)
448 return r;
449 }
450
451 f_flags = fcntl(fd, F_GETFL);
452 if (f_flags < 0)
453 return -errno;
454
455 if (FLAGS_SET(loop_flags, LO_FLAGS_DIRECT_IO) != FLAGS_SET(f_flags, O_DIRECT)) {
456 /* If LO_FLAGS_DIRECT_IO is requested, then make sure we have the fd open with O_DIRECT, as
457 * that's required. Conversely, if it's off require that O_DIRECT is off too (that's because
458 * new kernels will implicitly enable LO_FLAGS_DIRECT_IO if O_DIRECT is set).
459 *
460 * Our intention here is that LO_FLAGS_DIRECT_IO is the primary knob, and O_DIRECT derived
461 * from that automatically. */
462
463 direct_io_fd = fd_reopen(fd, (FLAGS_SET(loop_flags, LO_FLAGS_DIRECT_IO) ? O_DIRECT : 0)|O_CLOEXEC|O_NONBLOCK|open_flags);
464 if (direct_io_fd < 0) {
465 if (!FLAGS_SET(loop_flags, LO_FLAGS_DIRECT_IO))
466 return log_debug_errno(errno, "Failed to reopen file descriptor without O_DIRECT: %m");
467
468 /* Some file systems might not support O_DIRECT, let's gracefully continue without it then. */
469 log_debug_errno(errno, "Failed to enable O_DIRECT for backing file descriptor for loopback device. Continuing without.");
470 loop_flags &= ~LO_FLAGS_DIRECT_IO;
471 } else
472 fd = direct_io_fd; /* From now on, operate on our new O_DIRECT fd */
473 }
474
475 _cleanup_close_ int control = -1;
476 _cleanup_(cleanup_clear_loop_close) int loop_with_fd = -1;
477
478 control = open("/dev/loop-control", O_RDWR|O_CLOEXEC|O_NOCTTY|O_NONBLOCK);
479 if (control < 0)
480 return -errno;
481
482 config = (struct loop_config) {
483 .fd = fd,
484 .info = {
485 /* Use the specified flags, but configure the read-only flag from the open flags, and force autoclear */
486 .lo_flags = (loop_flags & ~LO_FLAGS_READ_ONLY) | ((open_flags & O_ACCMODE) == O_RDONLY ? LO_FLAGS_READ_ONLY : 0) | LO_FLAGS_AUTOCLEAR,
487 .lo_offset = offset,
488 .lo_sizelimit = size == UINT64_MAX ? 0 : size,
489 },
490 };
491
492 /* Loop around LOOP_CTL_GET_FREE, since at the moment we attempt to open the returned device it might
493 * be gone already, taken by somebody else racing against us. */
494 for (unsigned n_attempts = 0;;) {
495 _cleanup_close_ int loop = -1;
496
497 /* Let's take a lock on the control device first. On a busy system, where many programs
498 * attempt to allocate a loopback device at the same time, we might otherwise keep looping
499 * around relatively heavy operations: asking for a free loopback device, then opening it,
500 * validating it, attaching something to it. Let's serialize this whole operation, to make
501 * unnecessary busywork less likely. Note that this is just something we do to optimize our
502 * own code (and whoever else decides to use LOCK_EX locks for this), taking this lock is not
503 * necessary, it just means it's less likely we have to iterate through this loop again and
504 * again if our own code races against our own code. */
505 if (flock(control, LOCK_EX) < 0)
506 return -errno;
507
508 nr = ioctl(control, LOOP_CTL_GET_FREE);
509 if (nr < 0)
510 return -errno;
511
512 if (asprintf(&loopdev, "/dev/loop%i", nr) < 0)
513 return -ENOMEM;
514
515 loop = open(loopdev, O_CLOEXEC|O_NONBLOCK|O_NOCTTY|open_flags);
516 if (loop < 0) {
517 /* Somebody might've gotten the same number from the kernel, used the device,
518 * and called LOOP_CTL_REMOVE on it. Let's retry with a new number. */
519 if (!ERRNO_IS_DEVICE_ABSENT(errno))
520 return -errno;
521 } else {
522 r = loop_configure(loop, nr, &config, &try_loop_configure, &seqnum, &timestamp);
523 if (r >= 0) {
524 loop_with_fd = TAKE_FD(loop);
525 break;
526 }
527 if (r == -EUCLEAN) {
528 /* Make left-over partition disappear hack (see above) */
529 r = attach_empty_file(loop, nr);
530 if (r < 0 && r != -EBUSY)
531 return r;
532 } else if (r != -EBUSY)
533 return r;
534 }
535
536 /* OK, this didn't work, let's try again a bit later, but first release the lock on the
537 * control device */
538 if (flock(control, LOCK_UN) < 0)
539 return -errno;
540
541 if (++n_attempts >= 64) /* Give up eventually */
542 return -EBUSY;
543
544 /* Now close the loop device explicitly. This will release any lock acquired by
545 * attach_empty_file() or similar, while we sleep below. */
546 loop = safe_close(loop);
547 loopdev = mfree(loopdev);
548
549 /* Wait some random time, to make collision less likely. Let's pick a random time in the
550 * range 0ms…250ms, linearly scaled by the number of failed attempts. */
551 (void) usleep(random_u64_range(UINT64_C(10) * USEC_PER_MSEC +
552 UINT64_C(240) * USEC_PER_MSEC * n_attempts/64));
553 }
554
555 if (FLAGS_SET(loop_flags, LO_FLAGS_DIRECT_IO)) {
556 struct loop_info64 info;
557
558 if (ioctl(loop_with_fd, LOOP_GET_STATUS64, &info) < 0)
559 return -errno;
560
561 #if HAVE_VALGRIND_MEMCHECK_H
562 VALGRIND_MAKE_MEM_DEFINED(&info, sizeof(info));
563 #endif
564
565 /* On older kernels (<= 5.3) it was necessary to set the block size of the loopback block
566 * device to the logical block size of the underlying file system. Since there was no nice
567 * way to query the value, we are not bothering to do this however. On newer kernels the
568 * block size is propagated automatically and does not require intervention from us. We'll
569 * check here if enabling direct IO worked, to make this easily debuggable however.
570 *
571 * (Should anyone really care and actually wants direct IO on old kernels: it might be worth
572 * enabling direct IO with iteratively larger block sizes until it eventually works.) */
573 if (!FLAGS_SET(info.lo_flags, LO_FLAGS_DIRECT_IO))
574 log_debug("Could not enable direct IO mode, proceeding in buffered IO mode.");
575 }
576
577 if (fstat(loop_with_fd, &st) < 0)
578 return -errno;
579 assert(S_ISBLK(st.st_mode));
580
581 uint64_t diskseq = 0;
582 r = fd_get_diskseq(loop_with_fd, &diskseq);
583 if (r < 0 && r != -EOPNOTSUPP)
584 return r;
585
586 d = new(LoopDevice, 1);
587 if (!d)
588 return -ENOMEM;
589 *d = (LoopDevice) {
590 .fd = TAKE_FD(loop_with_fd),
591 .node = TAKE_PTR(loopdev),
592 .nr = nr,
593 .devno = st.st_rdev,
594 .diskseq = diskseq,
595 .uevent_seqnum_not_before = seqnum,
596 .timestamp_not_before = timestamp,
597 };
598
599 log_debug("Successfully acquired %s, devno=%u:%u, nr=%i, diskseq=%" PRIu64,
600 d->node,
601 major(d->devno), minor(d->devno),
602 d->nr,
603 d->diskseq);
604
605 *ret = d;
606 return d->fd;
607 }
608
609 static uint32_t loop_flags_mangle(uint32_t loop_flags) {
610 int r;
611
612 r = getenv_bool("SYSTEMD_LOOP_DIRECT_IO");
613 if (r < 0 && r != -ENXIO)
614 log_debug_errno(r, "Failed to parse $SYSTEMD_LOOP_DIRECT_IO, ignoring: %m");
615
616 return UPDATE_FLAG(loop_flags, LO_FLAGS_DIRECT_IO, r != 0); /* Turn on LO_FLAGS_DIRECT_IO by default, unless explicitly configured to off. */
617 }
618
619 int loop_device_make(
620 int fd,
621 int open_flags,
622 uint64_t offset,
623 uint64_t size,
624 uint32_t loop_flags,
625 LoopDevice **ret) {
626
627 assert(fd >= 0);
628 assert(ret);
629
630 return loop_device_make_internal(
631 fd,
632 open_flags,
633 offset,
634 size,
635 loop_flags_mangle(loop_flags),
636 ret);
637 }
638
639 int loop_device_make_by_path(
640 const char *path,
641 int open_flags,
642 uint32_t loop_flags,
643 LoopDevice **ret) {
644
645 int r, basic_flags, direct_flags, rdwr_flags;
646 _cleanup_close_ int fd = -1;
647 bool direct = false;
648
649 assert(path);
650 assert(ret);
651 assert(open_flags < 0 || IN_SET(open_flags, O_RDWR, O_RDONLY));
652
653 /* Passing < 0 as open_flags here means we'll try to open the device writable if we can, retrying
654 * read-only if we cannot. */
655
656 loop_flags = loop_flags_mangle(loop_flags);
657
658 /* Let's open with O_DIRECT if we can. But not all file systems support that, hence fall back to
659 * non-O_DIRECT mode automatically, if it fails. */
660
661 basic_flags = O_CLOEXEC|O_NONBLOCK|O_NOCTTY;
662 direct_flags = FLAGS_SET(loop_flags, LO_FLAGS_DIRECT_IO) ? O_DIRECT : 0;
663 rdwr_flags = open_flags >= 0 ? open_flags : O_RDWR;
664
665 fd = open(path, basic_flags|direct_flags|rdwr_flags);
666 if (fd < 0 && direct_flags != 0) /* If we had O_DIRECT on, and things failed with that, let's immediately try again without */
667 fd = open(path, basic_flags|rdwr_flags);
668 else
669 direct = direct_flags != 0;
670 if (fd < 0) {
671 r = -errno;
672
673 /* Retry read-only? */
674 if (open_flags >= 0 || !(ERRNO_IS_PRIVILEGE(r) || r == -EROFS))
675 return r;
676
677 fd = open(path, basic_flags|direct_flags|O_RDONLY);
678 if (fd < 0 && direct_flags != 0) /* as above */
679 fd = open(path, basic_flags|O_RDONLY);
680 else
681 direct = direct_flags != 0;
682 if (fd < 0)
683 return r; /* Propagate original error */
684
685 open_flags = O_RDONLY;
686 } else if (open_flags < 0)
687 open_flags = O_RDWR;
688
689 log_debug("Opened '%s' in %s access mode%s, with O_DIRECT %s%s.",
690 path,
691 open_flags == O_RDWR ? "O_RDWR" : "O_RDONLY",
692 open_flags != rdwr_flags ? " (O_RDWR was requested but not allowed)" : "",
693 direct ? "enabled" : "disabled",
694 direct != (direct_flags != 0) ? " (O_DIRECT was requested but not supported)" : "");
695
696 return loop_device_make_internal(fd, open_flags, 0, 0, loop_flags, ret);
697 }
698
699 LoopDevice* loop_device_unref(LoopDevice *d) {
700 if (!d)
701 return NULL;
702
703 if (d->fd >= 0) {
704 /* Implicitly sync the device, since otherwise in-flight blocks might not get written */
705 if (fsync(d->fd) < 0)
706 log_debug_errno(errno, "Failed to sync loop block device, ignoring: %m");
707
708 if (d->nr >= 0 && !d->relinquished) {
709 if (ioctl(d->fd, LOOP_CLR_FD) < 0)
710 log_debug_errno(errno, "Failed to clear loop device: %m");
711
712 }
713
714 safe_close(d->fd);
715 }
716
717 if (d->nr >= 0 && !d->relinquished) {
718 _cleanup_close_ int control = -1;
719
720 control = open("/dev/loop-control", O_RDWR|O_CLOEXEC|O_NOCTTY|O_NONBLOCK);
721 if (control < 0)
722 log_warning_errno(errno,
723 "Failed to open loop control device, cannot remove loop device %s: %m",
724 strna(d->node));
725 else
726 for (unsigned n_attempts = 0;;) {
727 if (ioctl(control, LOOP_CTL_REMOVE, d->nr) >= 0)
728 break;
729 if (errno != EBUSY || ++n_attempts >= 64) {
730 log_warning_errno(errno, "Failed to remove device %s: %m", strna(d->node));
731 break;
732 }
733 (void) usleep(50 * USEC_PER_MSEC);
734 }
735 }
736
737 free(d->node);
738 return mfree(d);
739 }
740
741 void loop_device_relinquish(LoopDevice *d) {
742 assert(d);
743
744 /* Don't attempt to clean up the loop device anymore from this point on. Leave the clean-ing up to the kernel
745 * itself, using the loop device "auto-clear" logic we already turned on when creating the device. */
746
747 d->relinquished = true;
748 }
749
750 void loop_device_unrelinquish(LoopDevice *d) {
751 assert(d);
752 d->relinquished = false;
753 }
754
755 int loop_device_open(const char *loop_path, int open_flags, LoopDevice **ret) {
756 _cleanup_close_ int loop_fd = -1;
757 _cleanup_free_ char *p = NULL;
758 struct loop_info64 info;
759 struct stat st;
760 LoopDevice *d;
761 int nr;
762
763 assert(loop_path);
764 assert(IN_SET(open_flags, O_RDWR, O_RDONLY));
765 assert(ret);
766
767 loop_fd = open(loop_path, O_CLOEXEC|O_NONBLOCK|O_NOCTTY|open_flags);
768 if (loop_fd < 0)
769 return -errno;
770
771 if (fstat(loop_fd, &st) < 0)
772 return -errno;
773 if (!S_ISBLK(st.st_mode))
774 return -ENOTBLK;
775
776 if (ioctl(loop_fd, LOOP_GET_STATUS64, &info) >= 0) {
777 #if HAVE_VALGRIND_MEMCHECK_H
778 /* Valgrind currently doesn't know LOOP_GET_STATUS64. Remove this once it does */
779 VALGRIND_MAKE_MEM_DEFINED(&info, sizeof(info));
780 #endif
781 nr = info.lo_number;
782 } else
783 nr = -1;
784
785 p = strdup(loop_path);
786 if (!p)
787 return -ENOMEM;
788
789 d = new(LoopDevice, 1);
790 if (!d)
791 return -ENOMEM;
792
793 *d = (LoopDevice) {
794 .fd = TAKE_FD(loop_fd),
795 .nr = nr,
796 .node = TAKE_PTR(p),
797 .relinquished = true, /* It's not ours, don't try to destroy it when this object is freed */
798 .devno = st.st_dev,
799 .uevent_seqnum_not_before = UINT64_MAX,
800 .timestamp_not_before = USEC_INFINITY,
801 };
802
803 *ret = d;
804 return d->fd;
805 }
806
807 static int resize_partition(int partition_fd, uint64_t offset, uint64_t size) {
808 char sysfs[STRLEN("/sys/dev/block/:/partition") + 2*DECIMAL_STR_MAX(dev_t) + 1];
809 _cleanup_free_ char *buffer = NULL;
810 uint64_t current_offset, current_size, partno;
811 _cleanup_close_ int whole_fd = -1;
812 struct stat st;
813 dev_t devno;
814 int r;
815
816 assert(partition_fd >= 0);
817
818 /* Resizes the partition the loopback device refer to (assuming it refers to one instead of an actual
819 * loopback device), and changes the offset, if needed. This is a fancy wrapper around
820 * BLKPG_RESIZE_PARTITION. */
821
822 if (fstat(partition_fd, &st) < 0)
823 return -errno;
824
825 assert(S_ISBLK(st.st_mode));
826
827 xsprintf(sysfs, "/sys/dev/block/%u:%u/partition", major(st.st_rdev), minor(st.st_rdev));
828 r = read_one_line_file(sysfs, &buffer);
829 if (r == -ENOENT) /* not a partition, cannot resize */
830 return -ENOTTY;
831 if (r < 0)
832 return r;
833 r = safe_atou64(buffer, &partno);
834 if (r < 0)
835 return r;
836
837 xsprintf(sysfs, "/sys/dev/block/%u:%u/start", major(st.st_rdev), minor(st.st_rdev));
838
839 buffer = mfree(buffer);
840 r = read_one_line_file(sysfs, &buffer);
841 if (r < 0)
842 return r;
843 r = safe_atou64(buffer, &current_offset);
844 if (r < 0)
845 return r;
846 if (current_offset > UINT64_MAX/512U)
847 return -EINVAL;
848 current_offset *= 512U;
849
850 if (ioctl(partition_fd, BLKGETSIZE64, &current_size) < 0)
851 return -EINVAL;
852
853 if (size == UINT64_MAX && offset == UINT64_MAX)
854 return 0;
855 if (current_size == size && current_offset == offset)
856 return 0;
857
858 xsprintf(sysfs, "/sys/dev/block/%u:%u/../dev", major(st.st_rdev), minor(st.st_rdev));
859
860 buffer = mfree(buffer);
861 r = read_one_line_file(sysfs, &buffer);
862 if (r < 0)
863 return r;
864 r = parse_devnum(buffer, &devno);
865 if (r < 0)
866 return r;
867
868 whole_fd = r = device_open_from_devnum(S_IFBLK, devno, O_RDWR|O_CLOEXEC|O_NONBLOCK|O_NOCTTY, NULL);
869 if (r < 0)
870 return r;
871
872 struct blkpg_partition bp = {
873 .pno = partno,
874 .start = offset == UINT64_MAX ? current_offset : offset,
875 .length = size == UINT64_MAX ? current_size : size,
876 };
877
878 struct blkpg_ioctl_arg ba = {
879 .op = BLKPG_RESIZE_PARTITION,
880 .data = &bp,
881 .datalen = sizeof(bp),
882 };
883
884 return RET_NERRNO(ioctl(whole_fd, BLKPG, &ba));
885 }
886
887 int loop_device_refresh_size(LoopDevice *d, uint64_t offset, uint64_t size) {
888 struct loop_info64 info;
889 assert(d);
890
891 /* Changes the offset/start of the loop device relative to the beginning of the underlying file or
892 * block device. If this loop device actually refers to a partition and not a loopback device, we'll
893 * try to adjust the partition offsets instead.
894 *
895 * If either offset or size is UINT64_MAX we won't change that parameter. */
896
897 if (d->fd < 0)
898 return -EBADF;
899
900 if (d->nr < 0) /* not a loopback device */
901 return resize_partition(d->fd, offset, size);
902
903 if (ioctl(d->fd, LOOP_GET_STATUS64, &info) < 0)
904 return -errno;
905
906 #if HAVE_VALGRIND_MEMCHECK_H
907 /* Valgrind currently doesn't know LOOP_GET_STATUS64. Remove this once it does */
908 VALGRIND_MAKE_MEM_DEFINED(&info, sizeof(info));
909 #endif
910
911 if (size == UINT64_MAX && offset == UINT64_MAX)
912 return 0;
913 if (info.lo_sizelimit == size && info.lo_offset == offset)
914 return 0;
915
916 if (size != UINT64_MAX)
917 info.lo_sizelimit = size;
918 if (offset != UINT64_MAX)
919 info.lo_offset = offset;
920
921 return RET_NERRNO(ioctl(d->fd, LOOP_SET_STATUS64, &info));
922 }
923
924 int loop_device_flock(LoopDevice *d, int operation) {
925 assert(d);
926
927 if (d->fd < 0)
928 return -EBADF;
929
930 return RET_NERRNO(flock(d->fd, operation));
931 }
932
933 int loop_device_sync(LoopDevice *d) {
934 assert(d);
935
936 /* We also do this implicitly in loop_device_unref(). Doing this explicitly here has the benefit that
937 * we can check the return value though. */
938
939 if (d->fd < 0)
940 return -EBADF;
941
942 return RET_NERRNO(fsync(d->fd));
943 }