]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/shared/loop-util.c
Merge pull request #26867 from dtardon/list-dependencies-circular
[thirdparty/systemd.git] / src / shared / loop-util.c
1 /* SPDX-License-Identifier: LGPL-2.1-or-later */
2
3 #if HAVE_VALGRIND_MEMCHECK_H
4 #include <valgrind/memcheck.h>
5 #endif
6
7 #include <errno.h>
8 #include <fcntl.h>
9 #include <linux/blkpg.h>
10 #include <linux/fs.h>
11 #include <linux/loop.h>
12 #include <sys/file.h>
13 #include <sys/ioctl.h>
14 #include <unistd.h>
15
16 #include "sd-device.h"
17
18 #include "alloc-util.h"
19 #include "blockdev-util.h"
20 #include "data-fd-util.h"
21 #include "device-util.h"
22 #include "devnum-util.h"
23 #include "dissect-image.h"
24 #include "env-util.h"
25 #include "errno-util.h"
26 #include "fd-util.h"
27 #include "fileio.h"
28 #include "loop-util.h"
29 #include "missing_loop.h"
30 #include "parse-util.h"
31 #include "path-util.h"
32 #include "random-util.h"
33 #include "stat-util.h"
34 #include "stdio-util.h"
35 #include "string-util.h"
36 #include "tmpfile-util.h"
37
38 static void cleanup_clear_loop_close(int *fd) {
39 if (*fd < 0)
40 return;
41
42 (void) ioctl(*fd, LOOP_CLR_FD);
43 (void) safe_close(*fd);
44 }
45
46 static int loop_is_bound(int fd) {
47 struct loop_info64 info;
48
49 assert(fd >= 0);
50
51 if (ioctl(fd, LOOP_GET_STATUS64, &info) < 0) {
52 if (errno == ENXIO)
53 return false; /* not bound! */
54
55 return -errno;
56 }
57
58 return true; /* bound! */
59 }
60
61 static int get_current_uevent_seqnum(uint64_t *ret) {
62 _cleanup_free_ char *p = NULL;
63 int r;
64
65 r = read_full_virtual_file("/sys/kernel/uevent_seqnum", &p, NULL);
66 if (r < 0)
67 return log_debug_errno(r, "Failed to read current uevent sequence number: %m");
68
69 r = safe_atou64(strstrip(p), ret);
70 if (r < 0)
71 return log_debug_errno(r, "Failed to parse current uevent sequence number: %s", p);
72
73 return 0;
74 }
75
76 static int open_lock_fd(int primary_fd, int operation) {
77 _cleanup_close_ int lock_fd = -EBADF;
78
79 assert(primary_fd >= 0);
80 assert(IN_SET(operation & ~LOCK_NB, LOCK_SH, LOCK_EX));
81
82 lock_fd = fd_reopen(primary_fd, O_RDONLY|O_CLOEXEC|O_NONBLOCK|O_NOCTTY);
83 if (lock_fd < 0)
84 return lock_fd;
85
86 if (flock(lock_fd, operation) < 0)
87 return -errno;
88
89 return TAKE_FD(lock_fd);
90 }
91
92 static int loop_configure_verify_direct_io(int fd, const struct loop_config *c) {
93 assert(fd);
94 assert(c);
95
96 if (FLAGS_SET(c->info.lo_flags, LO_FLAGS_DIRECT_IO)) {
97 struct loop_info64 info;
98
99 if (ioctl(fd, LOOP_GET_STATUS64, &info) < 0)
100 return log_debug_errno(errno, "Failed to issue LOOP_GET_STATUS64: %m");
101
102 #if HAVE_VALGRIND_MEMCHECK_H
103 VALGRIND_MAKE_MEM_DEFINED(&info, sizeof(info));
104 #endif
105
106 /* On older kernels (<= 5.3) it was necessary to set the block size of the loopback block
107 * device to the logical block size of the underlying file system. Since there was no nice
108 * way to query the value, we are not bothering to do this however. On newer kernels the
109 * block size is propagated automatically and does not require intervention from us. We'll
110 * check here if enabling direct IO worked, to make this easily debuggable however.
111 *
112 * (Should anyone really care and actually wants direct IO on old kernels: it might be worth
113 * enabling direct IO with iteratively larger block sizes until it eventually works.) */
114 if (!FLAGS_SET(info.lo_flags, LO_FLAGS_DIRECT_IO))
115 log_debug("Could not enable direct IO mode, proceeding in buffered IO mode.");
116 }
117
118 return 0;
119 }
120
121 static int loop_configure_verify(int fd, const struct loop_config *c) {
122 bool broken = false;
123 int r;
124
125 assert(fd >= 0);
126 assert(c);
127
128 if (c->block_size != 0) {
129 uint32_t ssz;
130
131 r = blockdev_get_sector_size(fd, &ssz);
132 if (r < 0)
133 return r;
134
135 if (ssz != c->block_size) {
136 log_debug("LOOP_CONFIGURE didn't honour requested block size %" PRIu32 ", got %" PRIu32 " instead. Ignoring.", c->block_size, ssz);
137 broken = true;
138 }
139 }
140
141 if (c->info.lo_sizelimit != 0) {
142 /* Kernel 5.8 vanilla doesn't properly propagate the size limit into the
143 * block device. If it's used, let's immediately check if it had the desired
144 * effect hence. And if not use classic LOOP_SET_STATUS64. */
145 uint64_t z;
146
147 if (ioctl(fd, BLKGETSIZE64, &z) < 0)
148 return -errno;
149
150 if (z != c->info.lo_sizelimit) {
151 log_debug("LOOP_CONFIGURE is broken, doesn't honour .info.lo_sizelimit. Falling back to LOOP_SET_STATUS64.");
152 broken = true;
153 }
154 }
155
156 if (FLAGS_SET(c->info.lo_flags, LO_FLAGS_PARTSCAN)) {
157 /* Kernel 5.8 vanilla doesn't properly propagate the partition scanning flag
158 * into the block device. Let's hence verify if things work correctly here
159 * before returning. */
160
161 r = blockdev_partscan_enabled(fd);
162 if (r < 0)
163 return r;
164 if (r == 0) {
165 log_debug("LOOP_CONFIGURE is broken, doesn't honour LO_FLAGS_PARTSCAN. Falling back to LOOP_SET_STATUS64.");
166 broken = true;
167 }
168 }
169
170 r = loop_configure_verify_direct_io(fd, c);
171 if (r < 0)
172 return r;
173
174 return !broken;
175 }
176
177 static int loop_configure_fallback(int fd, const struct loop_config *c) {
178 struct loop_info64 info_copy;
179 int r;
180
181 assert(fd >= 0);
182 assert(c);
183
184 /* Only some of the flags LOOP_CONFIGURE can set are also settable via LOOP_SET_STATUS64, hence mask
185 * them out. */
186 info_copy = c->info;
187 info_copy.lo_flags &= LOOP_SET_STATUS_SETTABLE_FLAGS;
188
189 /* Since kernel commit 5db470e229e22b7eda6e23b5566e532c96fb5bc3 (kernel v5.0) the LOOP_SET_STATUS64
190 * ioctl can return EAGAIN in case we change the info.lo_offset field, if someone else is accessing the
191 * block device while we try to reconfigure it. This is a pretty common case, since udev might
192 * instantly start probing the device as soon as we attach an fd to it. Hence handle it in two ways:
193 * first, let's take the BSD lock to ensure that udev will not step in between the point in
194 * time where we attach the fd and where we reconfigure the device. Secondly, let's wait 50ms on
195 * EAGAIN and retry. The former should be an efficient mechanism to avoid we have to wait 50ms
196 * needlessly if we are just racing against udev. The latter is protection against all other cases,
197 * i.e. peers that do not take the BSD lock. */
198
199 for (unsigned n_attempts = 0;;) {
200 if (ioctl(fd, LOOP_SET_STATUS64, &info_copy) >= 0)
201 break;
202
203 if (errno != EAGAIN || ++n_attempts >= 64)
204 return log_debug_errno(errno, "Failed to configure loopback block device: %m");
205
206 /* Sleep some random time, but at least 10ms, at most 250ms. Increase the delay the more
207 * failed attempts we see */
208 (void) usleep(UINT64_C(10) * USEC_PER_MSEC +
209 random_u64_range(UINT64_C(240) * USEC_PER_MSEC * n_attempts/64));
210 }
211
212 /* Work around a kernel bug, where changing offset/size of the loopback device doesn't correctly
213 * invalidate the buffer cache. For details see:
214 *
215 * https://android.googlesource.com/platform/system/apex/+/bef74542fbbb4cd629793f4efee8e0053b360570
216 *
217 * This was fixed in kernel 5.0, see:
218 *
219 * https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=5db470e229e22b7eda6e23b5566e532c96fb5bc3
220 *
221 * We'll run the work-around here in the legacy LOOP_SET_STATUS64 codepath. In the LOOP_CONFIGURE
222 * codepath above it should not be necessary. */
223 if (c->info.lo_offset != 0 || c->info.lo_sizelimit != 0)
224 if (ioctl(fd, BLKFLSBUF, 0) < 0)
225 log_debug_errno(errno, "Failed to issue BLKFLSBUF ioctl, ignoring: %m");
226
227 /* If a block size is requested then try to configure it. If that doesn't work, ignore errors, but
228 * afterwards, let's validate what is in effect, and if it doesn't match what we want, fail */
229 if (c->block_size != 0) {
230 uint32_t ssz;
231
232 if (ioctl(fd, LOOP_SET_BLOCK_SIZE, (unsigned long) c->block_size) < 0)
233 log_debug_errno(errno, "Failed to set sector size, ignoring: %m");
234
235 r = blockdev_get_sector_size(fd, &ssz);
236 if (r < 0)
237 return log_debug_errno(r, "Failed to read sector size: %m");
238 if (ssz != c->block_size)
239 return log_debug_errno(SYNTHETIC_ERRNO(EIO), "Sector size of loopback device doesn't match what we requested, refusing.");
240 }
241
242 /* LO_FLAGS_DIRECT_IO is a flags we need to configure via explicit ioctls. */
243 if (FLAGS_SET(c->info.lo_flags, LO_FLAGS_DIRECT_IO))
244 if (ioctl(fd, LOOP_SET_DIRECT_IO, 1UL) < 0)
245 log_debug_errno(errno, "Failed to enable direct IO mode, ignoring: %m");
246
247 return loop_configure_verify_direct_io(fd, c);
248 }
249
250 static int loop_configure(
251 int nr,
252 int open_flags,
253 int lock_op,
254 const struct loop_config *c,
255 LoopDevice **ret) {
256
257 static bool loop_configure_broken = false;
258
259 _cleanup_(sd_device_unrefp) sd_device *dev = NULL;
260 _cleanup_(cleanup_clear_loop_close) int loop_with_fd = -EBADF; /* This must be declared before lock_fd. */
261 _cleanup_close_ int fd = -EBADF, lock_fd = -EBADF;
262 _cleanup_free_ char *node = NULL;
263 uint64_t diskseq = 0, seqnum = UINT64_MAX;
264 usec_t timestamp = USEC_INFINITY;
265 dev_t devno;
266 int r;
267
268 assert(nr >= 0);
269 assert(c);
270 assert(ret);
271
272 if (asprintf(&node, "/dev/loop%i", nr) < 0)
273 return log_oom_debug();
274
275 r = sd_device_new_from_devname(&dev, node);
276 if (r < 0)
277 return log_debug_errno(r, "Failed to create sd_device object for \"%s\": %m", node);
278
279 r = sd_device_get_devnum(dev, &devno);
280 if (r < 0)
281 return log_device_debug_errno(dev, r, "Failed to get devnum: %m");
282
283 fd = sd_device_open(dev, O_CLOEXEC|O_NONBLOCK|O_NOCTTY|open_flags);
284 if (fd < 0)
285 return log_device_debug_errno(dev, fd, "Failed to open device: %m");
286
287 /* Let's lock the device before we do anything. We take the BSD lock on a second, separately opened
288 * fd for the device. udev after all watches for close() events (specifically IN_CLOSE_WRITE) on
289 * block devices to reprobe them, hence by having a separate fd we will later close() we can ensure
290 * we trigger udev after everything is done. If we'd lock our own fd instead and keep it open for a
291 * long time udev would possibly never run on it again, even though the fd is unlocked, simply
292 * because we never close() it. It also has the nice benefit we can use the _cleanup_close_ logic to
293 * automatically release the lock, after we are done. */
294 lock_fd = open_lock_fd(fd, LOCK_EX);
295 if (lock_fd < 0)
296 return log_device_debug_errno(dev, lock_fd, "Failed to acquire lock: %m");
297
298 log_device_debug(dev, "Acquired exclusive lock.");
299
300 /* Let's see if backing file is really unattached. Someone may already attach a backing file without
301 * taking BSD lock. */
302 r = loop_is_bound(fd);
303 if (r < 0)
304 return log_device_debug_errno(dev, r, "Failed to check if the loopback block device is bound: %m");
305 if (r > 0)
306 return log_device_debug_errno(dev, SYNTHETIC_ERRNO(EBUSY),
307 "The loopback block device is already bound, ignoring.");
308
309 /* Let's see if the device is really detached, i.e. currently has no associated partition block
310 * devices. On various kernels (such as 5.8) it is possible to have a loopback block device that
311 * superficially is detached but still has partition block devices associated for it. Let's then
312 * manually remove the partitions via BLKPG, and tell the caller we did that via EUCLEAN, so they try
313 * again. */
314 r = block_device_remove_all_partitions(dev, fd);
315 if (r < 0)
316 return log_device_debug_errno(dev, r, "Failed to remove partitions on the loopback block device: %m");
317 if (r > 0)
318 /* Removed all partitions. Let's report this to the caller, to try again, and count this as
319 * an attempt. */
320 return log_device_debug_errno(dev, SYNTHETIC_ERRNO(EUCLEAN),
321 "Removed partitions on the loopback block device.");
322
323 if (!loop_configure_broken) {
324 /* Acquire uevent seqnum immediately before attaching the loopback device. This allows
325 * callers to ignore all uevents with a seqnum before this one, if they need to associate
326 * uevent with this attachment. Doing so isn't race-free though, as uevents that happen in
327 * the window between this reading of the seqnum, and the LOOP_CONFIGURE call might still be
328 * mistaken as originating from our attachment, even though might be caused by an earlier
329 * use. But doing this at least shortens the race window a bit. */
330 r = get_current_uevent_seqnum(&seqnum);
331 if (r < 0)
332 return log_device_debug_errno(dev, r, "Failed to get the current uevent seqnum: %m");
333
334 timestamp = now(CLOCK_MONOTONIC);
335
336 if (ioctl(fd, LOOP_CONFIGURE, c) < 0) {
337 /* Do fallback only if LOOP_CONFIGURE is not supported, propagate all other
338 * errors. Note that the kernel is weird: non-existing ioctls currently return EINVAL
339 * rather than ENOTTY on loopback block devices. They should fix that in the kernel,
340 * but in the meantime we accept both here. */
341 if (!ERRNO_IS_NOT_SUPPORTED(errno) && errno != EINVAL)
342 return log_device_debug_errno(dev, errno, "ioctl(LOOP_CONFIGURE) failed: %m");
343
344 loop_configure_broken = true;
345 } else {
346 loop_with_fd = TAKE_FD(fd);
347
348 r = loop_configure_verify(loop_with_fd, c);
349 if (r < 0)
350 return log_device_debug_errno(dev, r, "Failed to verify if loopback block device is correctly configured: %m");
351 if (r == 0) {
352 /* LOOP_CONFIGURE doesn't work. Remember that. */
353 loop_configure_broken = true;
354
355 /* We return EBUSY here instead of retrying immediately with LOOP_SET_FD,
356 * because LOOP_CLR_FD is async: if the operation cannot be executed right
357 * away it just sets the autoclear flag on the device. This means there's a
358 * good chance we cannot actually reuse the loopback device right-away. Hence
359 * let's assume it's busy, avoid the trouble and let the calling loop call us
360 * again with a new, likely unused device. */
361 return -EBUSY;
362 }
363 }
364 }
365
366 if (loop_configure_broken) {
367 /* Let's read the seqnum again, to shorten the window. */
368 r = get_current_uevent_seqnum(&seqnum);
369 if (r < 0)
370 return log_device_debug_errno(dev, r, "Failed to get the current uevent seqnum: %m");
371
372 timestamp = now(CLOCK_MONOTONIC);
373
374 if (ioctl(fd, LOOP_SET_FD, c->fd) < 0)
375 return log_device_debug_errno(dev, errno, "ioctl(LOOP_SET_FD) failed: %m");
376
377 loop_with_fd = TAKE_FD(fd);
378
379 r = loop_configure_fallback(loop_with_fd, c);
380 if (r < 0)
381 return r;
382 }
383
384 r = fd_get_diskseq(loop_with_fd, &diskseq);
385 if (r < 0 && r != -EOPNOTSUPP)
386 return log_device_debug_errno(dev, r, "Failed to get diskseq: %m");
387
388 switch (lock_op & ~LOCK_NB) {
389 case LOCK_EX: /* Already in effect */
390 break;
391 case LOCK_SH: /* Downgrade */
392 if (flock(lock_fd, lock_op) < 0)
393 return log_device_debug_errno(dev, errno, "Failed to downgrade lock level: %m");
394 break;
395 case LOCK_UN: /* Release */
396 lock_fd = safe_close(lock_fd);
397 break;
398 default:
399 assert_not_reached();
400 }
401
402 LoopDevice *d = new(LoopDevice, 1);
403 if (!d)
404 return log_oom_debug();
405
406 *d = (LoopDevice) {
407 .n_ref = 1,
408 .fd = TAKE_FD(loop_with_fd),
409 .lock_fd = TAKE_FD(lock_fd),
410 .node = TAKE_PTR(node),
411 .nr = nr,
412 .devno = devno,
413 .dev = TAKE_PTR(dev),
414 .diskseq = diskseq,
415 .uevent_seqnum_not_before = seqnum,
416 .timestamp_not_before = timestamp,
417 .sector_size = c->block_size,
418 };
419
420 *ret = TAKE_PTR(d);
421 return 0;
422 }
423
424 static int loop_device_make_internal(
425 const char *path,
426 int fd,
427 int open_flags,
428 uint64_t offset,
429 uint64_t size,
430 uint32_t sector_size,
431 uint32_t loop_flags,
432 int lock_op,
433 LoopDevice **ret) {
434
435 _cleanup_(loop_device_unrefp) LoopDevice *d = NULL;
436 _cleanup_close_ int direct_io_fd = -EBADF, control = -EBADF;
437 _cleanup_free_ char *backing_file = NULL;
438 struct loop_config config;
439 int r, f_flags;
440 struct stat st;
441
442 assert(fd >= 0);
443 assert(ret);
444 assert(IN_SET(open_flags, O_RDWR, O_RDONLY));
445
446 if (fstat(fd, &st) < 0)
447 return -errno;
448
449 if (S_ISBLK(st.st_mode)) {
450 if (offset == 0 && IN_SET(size, 0, UINT64_MAX))
451 /* If this is already a block device and we are supposed to cover the whole of it
452 * then store an fd to the original open device node — and do not actually create an
453 * unnecessary loopback device for it. */
454 return loop_device_open_from_fd(fd, open_flags, lock_op, ret);
455 } else {
456 r = stat_verify_regular(&st);
457 if (r < 0)
458 return r;
459 }
460
461 if (path) {
462 r = path_make_absolute_cwd(path, &backing_file);
463 if (r < 0)
464 return r;
465
466 path_simplify(backing_file);
467 } else {
468 r = fd_get_path(fd, &backing_file);
469 if (r < 0)
470 return r;
471 }
472
473 f_flags = fcntl(fd, F_GETFL);
474 if (f_flags < 0)
475 return -errno;
476
477 if (FLAGS_SET(loop_flags, LO_FLAGS_DIRECT_IO) != FLAGS_SET(f_flags, O_DIRECT)) {
478 /* If LO_FLAGS_DIRECT_IO is requested, then make sure we have the fd open with O_DIRECT, as
479 * that's required. Conversely, if it's off require that O_DIRECT is off too (that's because
480 * new kernels will implicitly enable LO_FLAGS_DIRECT_IO if O_DIRECT is set).
481 *
482 * Our intention here is that LO_FLAGS_DIRECT_IO is the primary knob, and O_DIRECT derived
483 * from that automatically. */
484
485 direct_io_fd = fd_reopen(fd, (FLAGS_SET(loop_flags, LO_FLAGS_DIRECT_IO) ? O_DIRECT : 0)|O_CLOEXEC|O_NONBLOCK|open_flags);
486 if (direct_io_fd < 0) {
487 if (!FLAGS_SET(loop_flags, LO_FLAGS_DIRECT_IO))
488 return log_debug_errno(errno, "Failed to reopen file descriptor without O_DIRECT: %m");
489
490 /* Some file systems might not support O_DIRECT, let's gracefully continue without it then. */
491 log_debug_errno(errno, "Failed to enable O_DIRECT for backing file descriptor for loopback device. Continuing without.");
492 loop_flags &= ~LO_FLAGS_DIRECT_IO;
493 } else
494 fd = direct_io_fd; /* From now on, operate on our new O_DIRECT fd */
495 }
496
497 control = open("/dev/loop-control", O_RDWR|O_CLOEXEC|O_NOCTTY|O_NONBLOCK);
498 if (control < 0)
499 return -errno;
500
501 if (sector_size == 0)
502 /* If no sector size is specified, default to the classic default */
503 sector_size = 512;
504 else if (sector_size == UINT32_MAX) {
505
506 if (S_ISBLK(st.st_mode))
507 /* If the sector size is specified as UINT32_MAX we'll propagate the sector size of
508 * the underlying block device. */
509 r = blockdev_get_sector_size(fd, &sector_size);
510 else {
511 _cleanup_close_ int non_direct_io_fd = -EBADF;
512 int probe_fd;
513
514 assert(S_ISREG(st.st_mode));
515
516 /* If sector size is specified as UINT32_MAX, we'll try to probe the right sector
517 * size of the image in question by looking for the GPT partition header at various
518 * offsets. This of course only works if the image already has a disk label.
519 *
520 * So here we actually want to read the file contents ourselves. This is quite likely
521 * not going to work if we managed to enable O_DIRECT, because in such a case there
522 * are some pretty strict alignment requirements to offset, size and target, but
523 * there's no way to query what alignment specifically is actually required. Hence,
524 * let's avoid the mess, and temporarily open an fd without O_DIRECT for the probing
525 * logic. */
526
527 if (FLAGS_SET(loop_flags, LO_FLAGS_DIRECT_IO)) {
528 non_direct_io_fd = fd_reopen(fd, O_RDONLY|O_CLOEXEC|O_NONBLOCK);
529 if (non_direct_io_fd < 0)
530 return non_direct_io_fd;
531
532 probe_fd = non_direct_io_fd;
533 } else
534 probe_fd = fd;
535
536 r = probe_sector_size(probe_fd, &sector_size);
537 }
538 if (r < 0)
539 return r;
540 }
541
542 config = (struct loop_config) {
543 .fd = fd,
544 .block_size = sector_size,
545 .info = {
546 /* Use the specified flags, but configure the read-only flag from the open flags, and force autoclear */
547 .lo_flags = (loop_flags & ~LO_FLAGS_READ_ONLY) | ((open_flags & O_ACCMODE) == O_RDONLY ? LO_FLAGS_READ_ONLY : 0) | LO_FLAGS_AUTOCLEAR,
548 .lo_offset = offset,
549 .lo_sizelimit = size == UINT64_MAX ? 0 : size,
550 },
551 };
552
553 /* Loop around LOOP_CTL_GET_FREE, since at the moment we attempt to open the returned device it might
554 * be gone already, taken by somebody else racing against us. */
555 for (unsigned n_attempts = 0;;) {
556 usec_t usec;
557 int nr;
558
559 /* Let's take a lock on the control device first. On a busy system, where many programs
560 * attempt to allocate a loopback device at the same time, we might otherwise keep looping
561 * around relatively heavy operations: asking for a free loopback device, then opening it,
562 * validating it, attaching something to it. Let's serialize this whole operation, to make
563 * unnecessary busywork less likely. Note that this is just something we do to optimize our
564 * own code (and whoever else decides to use LOCK_EX locks for this), taking this lock is not
565 * necessary, it just means it's less likely we have to iterate through this loop again and
566 * again if our own code races against our own code.
567 *
568 * Note: our lock protocol is to take the /dev/loop-control lock first, and the block device
569 * lock second, if both are taken, and always in this order, to avoid ABBA locking issues. */
570 if (flock(control, LOCK_EX) < 0)
571 return -errno;
572
573 nr = ioctl(control, LOOP_CTL_GET_FREE);
574 if (nr < 0)
575 return -errno;
576
577 r = loop_configure(nr, open_flags, lock_op, &config, &d);
578 if (r >= 0)
579 break;
580
581 /* -ENODEV or friends: Somebody might've gotten the same number from the kernel, used the
582 * device, and called LOOP_CTL_REMOVE on it. Let's retry with a new number.
583 * -EBUSY: a file descriptor is already bound to the loopback block device.
584 * -EUCLEAN: some left-over partition devices that were cleaned up. */
585 if (!ERRNO_IS_DEVICE_ABSENT(r) && !IN_SET(r, -EBUSY, -EUCLEAN))
586 return r;
587
588 /* OK, this didn't work, let's try again a bit later, but first release the lock on the
589 * control device */
590 if (flock(control, LOCK_UN) < 0)
591 return -errno;
592
593 if (++n_attempts >= 64) /* Give up eventually */
594 return -EBUSY;
595
596 /* Wait some random time, to make collision less likely. Let's pick a random time in the
597 * range 0ms…250ms, linearly scaled by the number of failed attempts. */
598 usec = random_u64_range(UINT64_C(10) * USEC_PER_MSEC +
599 UINT64_C(240) * USEC_PER_MSEC * n_attempts/64);
600 log_debug("Trying again after %s.", FORMAT_TIMESPAN(usec, USEC_PER_MSEC));
601 (void) usleep(usec);
602 }
603
604 d->backing_file = TAKE_PTR(backing_file);
605 d->backing_inode = st.st_ino;
606 d->backing_devno = st.st_dev;
607
608 log_debug("Successfully acquired %s, devno=%u:%u, nr=%i, diskseq=%" PRIu64,
609 d->node,
610 major(d->devno), minor(d->devno),
611 d->nr,
612 d->diskseq);
613
614 *ret = TAKE_PTR(d);
615 return 0;
616 }
617
618 static uint32_t loop_flags_mangle(uint32_t loop_flags) {
619 int r;
620
621 r = getenv_bool("SYSTEMD_LOOP_DIRECT_IO");
622 if (r < 0 && r != -ENXIO)
623 log_debug_errno(r, "Failed to parse $SYSTEMD_LOOP_DIRECT_IO, ignoring: %m");
624
625 return UPDATE_FLAG(loop_flags, LO_FLAGS_DIRECT_IO, r != 0); /* Turn on LO_FLAGS_DIRECT_IO by default, unless explicitly configured to off. */
626 }
627
628 int loop_device_make(
629 int fd,
630 int open_flags,
631 uint64_t offset,
632 uint64_t size,
633 uint32_t sector_size,
634 uint32_t loop_flags,
635 int lock_op,
636 LoopDevice **ret) {
637
638 assert(fd >= 0);
639 assert(ret);
640
641 return loop_device_make_internal(
642 NULL,
643 fd,
644 open_flags,
645 offset,
646 size,
647 sector_size,
648 loop_flags_mangle(loop_flags),
649 lock_op,
650 ret);
651 }
652
653 int loop_device_make_by_path(
654 const char *path,
655 int open_flags,
656 uint32_t sector_size,
657 uint32_t loop_flags,
658 int lock_op,
659 LoopDevice **ret) {
660
661 int r, basic_flags, direct_flags, rdwr_flags;
662 _cleanup_close_ int fd = -EBADF;
663 bool direct = false;
664
665 assert(path);
666 assert(ret);
667 assert(open_flags < 0 || IN_SET(open_flags, O_RDWR, O_RDONLY));
668
669 /* Passing < 0 as open_flags here means we'll try to open the device writable if we can, retrying
670 * read-only if we cannot. */
671
672 loop_flags = loop_flags_mangle(loop_flags);
673
674 /* Let's open with O_DIRECT if we can. But not all file systems support that, hence fall back to
675 * non-O_DIRECT mode automatically, if it fails. */
676
677 basic_flags = O_CLOEXEC|O_NONBLOCK|O_NOCTTY;
678 direct_flags = FLAGS_SET(loop_flags, LO_FLAGS_DIRECT_IO) ? O_DIRECT : 0;
679 rdwr_flags = open_flags >= 0 ? open_flags : O_RDWR;
680
681 fd = open(path, basic_flags|direct_flags|rdwr_flags);
682 if (fd < 0 && direct_flags != 0) /* If we had O_DIRECT on, and things failed with that, let's immediately try again without */
683 fd = open(path, basic_flags|rdwr_flags);
684 else
685 direct = direct_flags != 0;
686 if (fd < 0) {
687 r = -errno;
688
689 /* Retry read-only? */
690 if (open_flags >= 0 || !(ERRNO_IS_PRIVILEGE(r) || r == -EROFS))
691 return r;
692
693 fd = open(path, basic_flags|direct_flags|O_RDONLY);
694 if (fd < 0 && direct_flags != 0) /* as above */
695 fd = open(path, basic_flags|O_RDONLY);
696 else
697 direct = direct_flags != 0;
698 if (fd < 0)
699 return r; /* Propagate original error */
700
701 open_flags = O_RDONLY;
702 } else if (open_flags < 0)
703 open_flags = O_RDWR;
704
705 log_debug("Opened '%s' in %s access mode%s, with O_DIRECT %s%s.",
706 path,
707 open_flags == O_RDWR ? "O_RDWR" : "O_RDONLY",
708 open_flags != rdwr_flags ? " (O_RDWR was requested but not allowed)" : "",
709 direct ? "enabled" : "disabled",
710 direct != (direct_flags != 0) ? " (O_DIRECT was requested but not supported)" : "");
711
712 return loop_device_make_internal(path, fd, open_flags, 0, 0, sector_size, loop_flags, lock_op, ret);
713 }
714
715 int loop_device_make_by_path_memory(
716 const char *path,
717 int open_flags,
718 uint32_t sector_size,
719 uint32_t loop_flags,
720 int lock_op,
721 LoopDevice **ret) {
722
723 _cleanup_close_ int fd = -EBADF, mfd = -EBADF;
724 _cleanup_free_ char *fn = NULL;
725 struct stat st;
726 int r;
727
728 assert(path);
729 assert(IN_SET(open_flags, O_RDWR, O_RDONLY));
730 assert(ret);
731
732 loop_flags &= ~LO_FLAGS_DIRECT_IO; /* memfds don't support O_DIRECT, hence LO_FLAGS_DIRECT_IO can't be used either */
733
734 fd = open(path, O_CLOEXEC|O_NONBLOCK|O_NOCTTY|O_RDONLY);
735 if (fd < 0)
736 return -errno;
737
738 if (fstat(fd, &st) < 0)
739 return -errno;
740
741 if (!S_ISREG(st.st_mode) && !S_ISBLK(st.st_mode))
742 return -EBADF;
743
744 r = path_extract_filename(path, &fn);
745 if (r < 0)
746 return r;
747
748 mfd = memfd_clone_fd(fd, fn, open_flags|O_CLOEXEC);
749 if (mfd < 0)
750 return mfd;
751
752 fd = safe_close(fd); /* Let's close the original early */
753
754 return loop_device_make_internal(NULL, mfd, open_flags, 0, 0, sector_size, loop_flags, lock_op, ret);
755 }
756
757 static LoopDevice* loop_device_free(LoopDevice *d) {
758 _cleanup_close_ int control = -EBADF;
759 int r;
760
761 if (!d)
762 return NULL;
763
764 /* Release any lock we might have on the device first. We want to open+lock the /dev/loop-control
765 * device below, but our lock protocol says that if both control and block device locks are taken,
766 * the control lock needs to be taken first, the block device lock second — in order to avoid ABBA
767 * locking issues. Moreover, we want to issue LOOP_CLR_FD on the block device further down, and that
768 * would fail if we had another fd open to the device. */
769 d->lock_fd = safe_close(d->lock_fd);
770
771 /* Let's open the control device early, and lock it, so that we can release our block device and
772 * delete it in a synchronized fashion, and allocators won't needlessly see the block device as free
773 * while we are about to delete it. */
774 if (!LOOP_DEVICE_IS_FOREIGN(d) && !d->relinquished) {
775 control = open("/dev/loop-control", O_RDWR|O_CLOEXEC|O_NOCTTY|O_NONBLOCK);
776 if (control < 0)
777 log_debug_errno(errno, "Failed to open loop control device, cannot remove loop device '%s', ignoring: %m", strna(d->node));
778 else if (flock(control, LOCK_EX) < 0)
779 log_debug_errno(errno, "Failed to lock loop control device, ignoring: %m");
780 }
781
782 /* Then let's release the loopback block device */
783 if (d->fd >= 0) {
784 /* Implicitly sync the device, since otherwise in-flight blocks might not get written */
785 if (fsync(d->fd) < 0)
786 log_debug_errno(errno, "Failed to sync loop block device, ignoring: %m");
787
788 if (!LOOP_DEVICE_IS_FOREIGN(d) && !d->relinquished) {
789 /* We are supposed to clear the loopback device. Let's do this synchronously: lock
790 * the device, manually remove all partitions and then clear it. This should ensure
791 * udev doesn't concurrently access the devices, and we can be reasonably sure that
792 * once we are done here the device is cleared and all its partition children
793 * removed. Note that we lock our primary device fd here (and not a separate locking
794 * fd, as we do during allocation, since we want to keep the lock all the way through
795 * the LOOP_CLR_FD, but that call would fail if we had more than one fd open.) */
796
797 if (flock(d->fd, LOCK_EX) < 0)
798 log_debug_errno(errno, "Failed to lock loop block device, ignoring: %m");
799
800 r = block_device_remove_all_partitions(d->dev, d->fd);
801 if (r < 0)
802 log_debug_errno(r, "Failed to remove partitions of loopback block device, ignoring: %m");
803
804 if (ioctl(d->fd, LOOP_CLR_FD) < 0)
805 log_debug_errno(errno, "Failed to clear loop device, ignoring: %m");
806 }
807
808 safe_close(d->fd);
809 }
810
811 /* Now that the block device is released, let's also try to remove it */
812 if (control >= 0)
813 for (unsigned n_attempts = 0;;) {
814 if (ioctl(control, LOOP_CTL_REMOVE, d->nr) >= 0)
815 break;
816 if (errno != EBUSY || ++n_attempts >= 64) {
817 log_debug_errno(errno, "Failed to remove device %s: %m", strna(d->node));
818 break;
819 }
820 (void) usleep(50 * USEC_PER_MSEC);
821 }
822
823 free(d->node);
824 sd_device_unref(d->dev);
825 free(d->backing_file);
826 return mfree(d);
827 }
828
829 DEFINE_TRIVIAL_REF_UNREF_FUNC(LoopDevice, loop_device, loop_device_free);
830
831 void loop_device_relinquish(LoopDevice *d) {
832 assert(d);
833
834 /* Don't attempt to clean up the loop device anymore from this point on. Leave the clean-ing up to the kernel
835 * itself, using the loop device "auto-clear" logic we already turned on when creating the device. */
836
837 d->relinquished = true;
838 }
839
840 void loop_device_unrelinquish(LoopDevice *d) {
841 assert(d);
842 d->relinquished = false;
843 }
844
845 int loop_device_open(
846 sd_device *dev,
847 int open_flags,
848 int lock_op,
849 LoopDevice **ret) {
850
851 _cleanup_close_ int fd = -EBADF, lock_fd = -EBADF;
852 _cleanup_free_ char *node = NULL, *backing_file = NULL;
853 dev_t devnum, backing_devno = 0;
854 struct loop_info64 info;
855 ino_t backing_inode = 0;
856 uint64_t diskseq = 0;
857 LoopDevice *d;
858 const char *s;
859 int r, nr = -1;
860
861 assert(dev);
862 assert(IN_SET(open_flags, O_RDWR, O_RDONLY));
863 assert(ret);
864
865 /* Even if fd is provided through the argument in loop_device_open_from_fd(), we reopen the inode
866 * here, instead of keeping just a dup() clone of it around, since we want to ensure that the
867 * O_DIRECT flag of the handle we keep is off, we have our own file index, and have the right
868 * read/write mode in effect. */
869 fd = sd_device_open(dev, O_CLOEXEC|O_NONBLOCK|O_NOCTTY|open_flags);
870 if (fd < 0)
871 return fd;
872
873 if ((lock_op & ~LOCK_NB) != LOCK_UN) {
874 lock_fd = open_lock_fd(fd, lock_op);
875 if (lock_fd < 0)
876 return lock_fd;
877 }
878
879 if (ioctl(fd, LOOP_GET_STATUS64, &info) >= 0) {
880 #if HAVE_VALGRIND_MEMCHECK_H
881 /* Valgrind currently doesn't know LOOP_GET_STATUS64. Remove this once it does */
882 VALGRIND_MAKE_MEM_DEFINED(&info, sizeof(info));
883 #endif
884 nr = info.lo_number;
885
886 if (sd_device_get_sysattr_value(dev, "loop/backing_file", &s) >= 0) {
887 backing_file = strdup(s);
888 if (!backing_file)
889 return -ENOMEM;
890 }
891
892 backing_devno = info.lo_device;
893 backing_inode = info.lo_inode;
894 }
895
896 r = fd_get_diskseq(fd, &diskseq);
897 if (r < 0 && r != -EOPNOTSUPP)
898 return r;
899
900 uint32_t sector_size;
901 r = blockdev_get_sector_size(fd, &sector_size);
902 if (r < 0)
903 return r;
904
905 r = sd_device_get_devnum(dev, &devnum);
906 if (r < 0)
907 return r;
908
909 r = sd_device_get_devname(dev, &s);
910 if (r < 0)
911 return r;
912
913 node = strdup(s);
914 if (!node)
915 return -ENOMEM;
916
917 d = new(LoopDevice, 1);
918 if (!d)
919 return -ENOMEM;
920
921 *d = (LoopDevice) {
922 .n_ref = 1,
923 .fd = TAKE_FD(fd),
924 .lock_fd = TAKE_FD(lock_fd),
925 .nr = nr,
926 .node = TAKE_PTR(node),
927 .dev = sd_device_ref(dev),
928 .backing_file = TAKE_PTR(backing_file),
929 .backing_inode = backing_inode,
930 .backing_devno = backing_devno,
931 .relinquished = true, /* It's not ours, don't try to destroy it when this object is freed */
932 .devno = devnum,
933 .diskseq = diskseq,
934 .uevent_seqnum_not_before = UINT64_MAX,
935 .timestamp_not_before = USEC_INFINITY,
936 .sector_size = sector_size,
937 };
938
939 *ret = d;
940 return 0;
941 }
942
943 int loop_device_open_from_fd(
944 int fd,
945 int open_flags,
946 int lock_op,
947 LoopDevice **ret) {
948
949 _cleanup_(sd_device_unrefp) sd_device *dev = NULL;
950 int r;
951
952 assert(fd >= 0);
953
954 r = block_device_new_from_fd(fd, 0, &dev);
955 if (r < 0)
956 return r;
957
958 return loop_device_open(dev, open_flags, lock_op, ret);
959 }
960
961 int loop_device_open_from_path(
962 const char *path,
963 int open_flags,
964 int lock_op,
965 LoopDevice **ret) {
966
967 _cleanup_(sd_device_unrefp) sd_device *dev = NULL;
968 int r;
969
970 assert(path);
971
972 r = block_device_new_from_path(path, 0, &dev);
973 if (r < 0)
974 return r;
975
976 return loop_device_open(dev, open_flags, lock_op, ret);
977 }
978
979 static int resize_partition(int partition_fd, uint64_t offset, uint64_t size) {
980 char sysfs[STRLEN("/sys/dev/block/:/partition") + 2*DECIMAL_STR_MAX(dev_t) + 1];
981 _cleanup_free_ char *buffer = NULL;
982 uint64_t current_offset, current_size, partno;
983 _cleanup_close_ int whole_fd = -EBADF;
984 struct stat st;
985 dev_t devno;
986 int r;
987
988 assert(partition_fd >= 0);
989
990 /* Resizes the partition the loopback device refer to (assuming it refers to one instead of an actual
991 * loopback device), and changes the offset, if needed. This is a fancy wrapper around
992 * BLKPG_RESIZE_PARTITION. */
993
994 if (fstat(partition_fd, &st) < 0)
995 return -errno;
996
997 assert(S_ISBLK(st.st_mode));
998
999 xsprintf(sysfs, "/sys/dev/block/" DEVNUM_FORMAT_STR "/partition", DEVNUM_FORMAT_VAL(st.st_rdev));
1000 r = read_one_line_file(sysfs, &buffer);
1001 if (r == -ENOENT) /* not a partition, cannot resize */
1002 return -ENOTTY;
1003 if (r < 0)
1004 return r;
1005 r = safe_atou64(buffer, &partno);
1006 if (r < 0)
1007 return r;
1008
1009 xsprintf(sysfs, "/sys/dev/block/" DEVNUM_FORMAT_STR "/start", DEVNUM_FORMAT_VAL(st.st_rdev));
1010
1011 buffer = mfree(buffer);
1012 r = read_one_line_file(sysfs, &buffer);
1013 if (r < 0)
1014 return r;
1015 r = safe_atou64(buffer, &current_offset);
1016 if (r < 0)
1017 return r;
1018 if (current_offset > UINT64_MAX/512U)
1019 return -EINVAL;
1020 current_offset *= 512U;
1021
1022 if (ioctl(partition_fd, BLKGETSIZE64, &current_size) < 0)
1023 return -EINVAL;
1024
1025 if (size == UINT64_MAX && offset == UINT64_MAX)
1026 return 0;
1027 if (current_size == size && current_offset == offset)
1028 return 0;
1029
1030 xsprintf(sysfs, "/sys/dev/block/" DEVNUM_FORMAT_STR "/../dev", DEVNUM_FORMAT_VAL(st.st_rdev));
1031
1032 buffer = mfree(buffer);
1033 r = read_one_line_file(sysfs, &buffer);
1034 if (r < 0)
1035 return r;
1036 r = parse_devnum(buffer, &devno);
1037 if (r < 0)
1038 return r;
1039
1040 whole_fd = r = device_open_from_devnum(S_IFBLK, devno, O_RDWR|O_CLOEXEC|O_NONBLOCK|O_NOCTTY, NULL);
1041 if (r < 0)
1042 return r;
1043
1044 return block_device_resize_partition(
1045 whole_fd,
1046 partno,
1047 offset == UINT64_MAX ? current_offset : offset,
1048 size == UINT64_MAX ? current_size : size);
1049 }
1050
1051 int loop_device_refresh_size(LoopDevice *d, uint64_t offset, uint64_t size) {
1052 struct loop_info64 info;
1053
1054 assert(d);
1055 assert(d->fd >= 0);
1056
1057 /* Changes the offset/start of the loop device relative to the beginning of the underlying file or
1058 * block device. If this loop device actually refers to a partition and not a loopback device, we'll
1059 * try to adjust the partition offsets instead.
1060 *
1061 * If either offset or size is UINT64_MAX we won't change that parameter. */
1062
1063 if (d->nr < 0) /* not a loopback device */
1064 return resize_partition(d->fd, offset, size);
1065
1066 if (ioctl(d->fd, LOOP_GET_STATUS64, &info) < 0)
1067 return -errno;
1068
1069 #if HAVE_VALGRIND_MEMCHECK_H
1070 /* Valgrind currently doesn't know LOOP_GET_STATUS64. Remove this once it does */
1071 VALGRIND_MAKE_MEM_DEFINED(&info, sizeof(info));
1072 #endif
1073
1074 if (size == UINT64_MAX && offset == UINT64_MAX)
1075 return 0;
1076 if (info.lo_sizelimit == size && info.lo_offset == offset)
1077 return 0;
1078
1079 if (size != UINT64_MAX)
1080 info.lo_sizelimit = size;
1081 if (offset != UINT64_MAX)
1082 info.lo_offset = offset;
1083
1084 return RET_NERRNO(ioctl(d->fd, LOOP_SET_STATUS64, &info));
1085 }
1086
1087 int loop_device_flock(LoopDevice *d, int operation) {
1088 assert(IN_SET(operation & ~LOCK_NB, LOCK_UN, LOCK_SH, LOCK_EX));
1089 assert(d);
1090
1091 /* When unlocking just close the lock fd */
1092 if ((operation & ~LOCK_NB) == LOCK_UN) {
1093 d->lock_fd = safe_close(d->lock_fd);
1094 return 0;
1095 }
1096
1097 /* If we had no lock fd so far, create one and lock it right-away */
1098 if (d->lock_fd < 0) {
1099 assert(d->fd >= 0);
1100
1101 d->lock_fd = open_lock_fd(d->fd, operation);
1102 if (d->lock_fd < 0)
1103 return d->lock_fd;
1104
1105 return 0;
1106 }
1107
1108 /* Otherwise change the current lock mode on the existing fd */
1109 return RET_NERRNO(flock(d->lock_fd, operation));
1110 }
1111
1112 int loop_device_sync(LoopDevice *d) {
1113 assert(d);
1114 assert(d->fd >= 0);
1115
1116 /* We also do this implicitly in loop_device_unref(). Doing this explicitly here has the benefit that
1117 * we can check the return value though. */
1118
1119 return RET_NERRNO(fsync(d->fd));
1120 }
1121
1122 int loop_device_set_autoclear(LoopDevice *d, bool autoclear) {
1123 struct loop_info64 info;
1124
1125 assert(d);
1126
1127 if (ioctl(d->fd, LOOP_GET_STATUS64, &info) < 0)
1128 return -errno;
1129
1130 if (autoclear == FLAGS_SET(info.lo_flags, LO_FLAGS_AUTOCLEAR))
1131 return 0;
1132
1133 SET_FLAG(info.lo_flags, LO_FLAGS_AUTOCLEAR, autoclear);
1134
1135 if (ioctl(d->fd, LOOP_SET_STATUS64, &info) < 0)
1136 return -errno;
1137
1138 return 1;
1139 }
1140
1141 int loop_device_set_filename(LoopDevice *d, const char *name) {
1142 struct loop_info64 info;
1143
1144 assert(d);
1145
1146 /* Sets the .lo_file_name of the loopback device. This is supposed to contain the path to the file
1147 * backing the block device, but is actually just a free-form string you can pass to the kernel. Most
1148 * tools that actually care for the backing file path use the sysfs attribute file loop/backing_file
1149 * which is a kernel generated string, subject to file system namespaces and such.
1150 *
1151 * .lo_file_name is useful since userspace can select it freely when creating a loopback block
1152 * device, and we can use it for /dev/loop/by-ref/ symlinks, and similar, so that apps can recognize
1153 * their own loopback files. */
1154
1155 if (name && strlen(name) >= sizeof(info.lo_file_name))
1156 return -ENOBUFS;
1157
1158 if (ioctl(d->fd, LOOP_GET_STATUS64, &info) < 0)
1159 return -errno;
1160
1161 if (strneq((char*) info.lo_file_name, strempty(name), sizeof(info.lo_file_name)))
1162 return 0;
1163
1164 if (name) {
1165 strncpy((char*) info.lo_file_name, name, sizeof(info.lo_file_name)-1);
1166 info.lo_file_name[sizeof(info.lo_file_name)-1] = 0;
1167 } else
1168 memzero(info.lo_file_name, sizeof(info.lo_file_name));
1169
1170 if (ioctl(d->fd, LOOP_SET_STATUS64, &info) < 0)
1171 return -errno;
1172
1173 return 1;
1174 }