]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/shared/loop-util.c
Merge pull request #22759 from msekletar/issue-18077-long-sysfs-paths-hashing
[thirdparty/systemd.git] / src / shared / loop-util.c
1 /* SPDX-License-Identifier: LGPL-2.1-or-later */
2
3 #if HAVE_VALGRIND_MEMCHECK_H
4 #include <valgrind/memcheck.h>
5 #endif
6
7 #include <errno.h>
8 #include <fcntl.h>
9 #include <linux/blkpg.h>
10 #include <linux/fs.h>
11 #include <linux/loop.h>
12 #include <sys/file.h>
13 #include <sys/ioctl.h>
14 #include <unistd.h>
15
16 #include "sd-device.h"
17
18 #include "alloc-util.h"
19 #include "blockdev-util.h"
20 #include "device-util.h"
21 #include "devnum-util.h"
22 #include "env-util.h"
23 #include "errno-util.h"
24 #include "fd-util.h"
25 #include "fileio.h"
26 #include "loop-util.h"
27 #include "missing_loop.h"
28 #include "parse-util.h"
29 #include "random-util.h"
30 #include "stat-util.h"
31 #include "stdio-util.h"
32 #include "string-util.h"
33 #include "tmpfile-util.h"
34
35 static void cleanup_clear_loop_close(int *fd) {
36 if (*fd < 0)
37 return;
38
39 (void) ioctl(*fd, LOOP_CLR_FD);
40 (void) safe_close(*fd);
41 }
42
43 static int loop_is_bound(int fd) {
44 struct loop_info64 info;
45
46 assert(fd >= 0);
47
48 if (ioctl(fd, LOOP_GET_STATUS64, &info) < 0) {
49 if (errno == ENXIO)
50 return false; /* not bound! */
51
52 return -errno;
53 }
54
55 return true; /* bound! */
56 }
57
58 static int get_current_uevent_seqnum(uint64_t *ret) {
59 _cleanup_free_ char *p = NULL;
60 int r;
61
62 r = read_full_virtual_file("/sys/kernel/uevent_seqnum", &p, NULL);
63 if (r < 0)
64 return log_debug_errno(r, "Failed to read current uevent sequence number: %m");
65
66 r = safe_atou64(strstrip(p), ret);
67 if (r < 0)
68 return log_debug_errno(r, "Failed to parse current uevent sequence number: %s", p);
69
70 return 0;
71 }
72
73 static int device_has_block_children(sd_device *d) {
74 _cleanup_(sd_device_enumerator_unrefp) sd_device_enumerator *e = NULL;
75 const char *main_ss, *main_dt;
76 sd_device *q;
77 int r;
78
79 assert(d);
80
81 /* Checks if the specified device currently has block device children (i.e. partition block
82 * devices). */
83
84 r = sd_device_get_subsystem(d, &main_ss);
85 if (r < 0)
86 return r;
87
88 if (!streq(main_ss, "block"))
89 return -EINVAL;
90
91 r = sd_device_get_devtype(d, &main_dt);
92 if (r < 0)
93 return r;
94
95 if (!streq(main_dt, "disk")) /* Refuse invocation on partition block device, insist on "whole" device */
96 return -EINVAL;
97
98 r = sd_device_enumerator_new(&e);
99 if (r < 0)
100 return r;
101
102 r = sd_device_enumerator_allow_uninitialized(e);
103 if (r < 0)
104 return r;
105
106 r = sd_device_enumerator_add_match_parent(e, d);
107 if (r < 0)
108 return r;
109
110 FOREACH_DEVICE(e, q) {
111 const char *ss, *dt;
112
113 r = sd_device_get_subsystem(q, &ss);
114 if (r < 0) {
115 log_device_debug_errno(q, r, "Failed to get subsystem of child, ignoring: %m");
116 continue;
117 }
118
119 if (!streq(ss, "block")) {
120 log_device_debug(q, "Skipping child that is not a block device (subsystem=%s).", ss);
121 continue;
122 }
123
124 r = sd_device_get_devtype(q, &dt);
125 if (r < 0) {
126 log_device_debug_errno(q, r, "Failed to get devtype of child, ignoring: %m");
127 continue;
128 }
129
130 if (!streq(dt, "partition")) {
131 log_device_debug(q, "Skipping non-partition child (devtype=%s).", dt);
132 continue;
133 }
134
135 return true; /* we have block device children */
136 }
137
138 return false;
139 }
140
141 static int loop_configure(
142 int fd,
143 int nr,
144 const struct loop_config *c,
145 bool *try_loop_configure,
146 uint64_t *ret_seqnum_not_before,
147 usec_t *ret_timestamp_not_before) {
148
149 _cleanup_(sd_device_unrefp) sd_device *d = NULL;
150 _cleanup_free_ char *sysname = NULL;
151 _cleanup_close_ int lock_fd = -1;
152 struct loop_info64 info_copy;
153 uint64_t seqnum;
154 usec_t timestamp;
155 int r;
156
157 assert(fd >= 0);
158 assert(nr >= 0);
159 assert(c);
160 assert(try_loop_configure);
161
162 if (asprintf(&sysname, "loop%i", nr) < 0)
163 return -ENOMEM;
164
165 r = sd_device_new_from_subsystem_sysname(&d, "block", sysname);
166 if (r < 0)
167 return r;
168
169 /* Let's lock the device before we do anything. We take the BSD lock on a second, separately opened
170 * fd for the device. udev after all watches for close() events (specifically IN_CLOSE_WRITE) on
171 * block devices to reprobe them, hence by having a separate fd we will later close() we can ensure
172 * we trigger udev after everything is done. If we'd lock our own fd instead and keep it open for a
173 * long time udev would possibly never run on it again, even though the fd is unlocked, simply
174 * because we never close() it. It also has the nice benefit we can use the _cleanup_close_ logic to
175 * automatically release the lock, after we are done. */
176 lock_fd = fd_reopen(fd, O_RDWR|O_CLOEXEC|O_NONBLOCK|O_NOCTTY);
177 if (lock_fd < 0)
178 return lock_fd;
179 if (flock(lock_fd, LOCK_EX) < 0)
180 return -errno;
181
182 /* Let's see if the device is really detached, i.e. currently has no associated partition block
183 * devices. On various kernels (such as 5.8) it is possible to have a loopback block device that
184 * superficially is detached but still has partition block devices associated for it. They only go
185 * away when the device is reattached. (Yes, LOOP_CLR_FD doesn't work then, because officially
186 * nothing is attached and LOOP_CTL_REMOVE doesn't either, since it doesn't care about partition
187 * block devices. */
188 r = device_has_block_children(d);
189 if (r < 0)
190 return r;
191 if (r > 0) {
192 r = loop_is_bound(fd);
193 if (r < 0)
194 return r;
195 if (r > 0)
196 return -EBUSY;
197
198 return -EUCLEAN; /* Bound but children? Tell caller to reattach something so that the
199 * partition block devices are gone too. */
200 }
201
202 if (*try_loop_configure) {
203 /* Acquire uevent seqnum immediately before attaching the loopback device. This allows
204 * callers to ignore all uevents with a seqnum before this one, if they need to associate
205 * uevent with this attachment. Doing so isn't race-free though, as uevents that happen in
206 * the window between this reading of the seqnum, and the LOOP_CONFIGURE call might still be
207 * mistaken as originating from our attachment, even though might be caused by an earlier
208 * use. But doing this at least shortens the race window a bit. */
209 r = get_current_uevent_seqnum(&seqnum);
210 if (r < 0)
211 return r;
212 timestamp = now(CLOCK_MONOTONIC);
213
214 if (ioctl(fd, LOOP_CONFIGURE, c) < 0) {
215 /* Do fallback only if LOOP_CONFIGURE is not supported, propagate all other
216 * errors. Note that the kernel is weird: non-existing ioctls currently return EINVAL
217 * rather than ENOTTY on loopback block devices. They should fix that in the kernel,
218 * but in the meantime we accept both here. */
219 if (!ERRNO_IS_NOT_SUPPORTED(errno) && errno != EINVAL)
220 return -errno;
221
222 *try_loop_configure = false;
223 } else {
224 bool good = true;
225
226 if (c->info.lo_sizelimit != 0) {
227 /* Kernel 5.8 vanilla doesn't properly propagate the size limit into the
228 * block device. If it's used, let's immediately check if it had the desired
229 * effect hence. And if not use classic LOOP_SET_STATUS64. */
230 uint64_t z;
231
232 if (ioctl(fd, BLKGETSIZE64, &z) < 0) {
233 r = -errno;
234 goto fail;
235 }
236
237 if (z != c->info.lo_sizelimit) {
238 log_debug("LOOP_CONFIGURE is broken, doesn't honour .lo_sizelimit. Falling back to LOOP_SET_STATUS64.");
239 good = false;
240 }
241 }
242
243 if (FLAGS_SET(c->info.lo_flags, LO_FLAGS_PARTSCAN)) {
244 /* Kernel 5.8 vanilla doesn't properly propagate the partition scanning flag
245 * into the block device. Let's hence verify if things work correctly here
246 * before returning. */
247
248 r = blockdev_partscan_enabled(fd);
249 if (r < 0)
250 goto fail;
251 if (r == 0) {
252 log_debug("LOOP_CONFIGURE is broken, doesn't honour LO_FLAGS_PARTSCAN. Falling back to LOOP_SET_STATUS64.");
253 good = false;
254 }
255 }
256
257 if (!good) {
258 /* LOOP_CONFIGURE doesn't work. Remember that. */
259 *try_loop_configure = false;
260
261 /* We return EBUSY here instead of retrying immediately with LOOP_SET_FD,
262 * because LOOP_CLR_FD is async: if the operation cannot be executed right
263 * away it just sets the autoclear flag on the device. This means there's a
264 * good chance we cannot actually reuse the loopback device right-away. Hence
265 * let's assume it's busy, avoid the trouble and let the calling loop call us
266 * again with a new, likely unused device. */
267 r = -EBUSY;
268 goto fail;
269 }
270
271 if (ret_seqnum_not_before)
272 *ret_seqnum_not_before = seqnum;
273 if (ret_timestamp_not_before)
274 *ret_timestamp_not_before = timestamp;
275
276 return 0;
277 }
278 }
279
280 /* Let's read the seqnum again, to shorten the window. */
281 r = get_current_uevent_seqnum(&seqnum);
282 if (r < 0)
283 return r;
284 timestamp = now(CLOCK_MONOTONIC);
285
286 /* Since kernel commit 5db470e229e22b7eda6e23b5566e532c96fb5bc3 (kernel v5.0) the LOOP_SET_STATUS64
287 * ioctl can return EAGAIN in case we change the lo_offset field, if someone else is accessing the
288 * block device while we try to reconfigure it. This is a pretty common case, since udev might
289 * instantly start probing the device as soon as we attach an fd to it. Hence handle it in two ways:
290 * first, let's take the BSD lock to ensure that udev will not step in between the point in
291 * time where we attach the fd and where we reconfigure the device. Secondly, let's wait 50ms on
292 * EAGAIN and retry. The former should be an efficient mechanism to avoid we have to wait 50ms
293 * needlessly if we are just racing against udev. The latter is protection against all other cases,
294 * i.e. peers that do not take the BSD lock. */
295
296 if (ioctl(fd, LOOP_SET_FD, c->fd) < 0)
297 return -errno;
298
299 /* Only some of the flags LOOP_CONFIGURE can set are also settable via LOOP_SET_STATUS64, hence mask
300 * them out. */
301 info_copy = c->info;
302 info_copy.lo_flags &= LOOP_SET_STATUS_SETTABLE_FLAGS;
303
304 for (unsigned n_attempts = 0;;) {
305 if (ioctl(fd, LOOP_SET_STATUS64, &info_copy) >= 0)
306 break;
307 if (errno != EAGAIN || ++n_attempts >= 64) {
308 r = log_debug_errno(errno, "Failed to configure loopback device: %m");
309 goto fail;
310 }
311
312 /* Sleep some random time, but at least 10ms, at most 250ms. Increase the delay the more
313 * failed attempts we see */
314 (void) usleep(UINT64_C(10) * USEC_PER_MSEC +
315 random_u64_range(UINT64_C(240) * USEC_PER_MSEC * n_attempts/64));
316 }
317
318 /* Work around a kernel bug, where changing offset/size of the loopback device doesn't correctly
319 * invalidate the buffer cache. For details see:
320 *
321 * https://android.googlesource.com/platform/system/apex/+/bef74542fbbb4cd629793f4efee8e0053b360570
322 *
323 * This was fixed in kernel 5.0, see:
324 *
325 * https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=5db470e229e22b7eda6e23b5566e532c96fb5bc3
326 *
327 * We'll run the work-around here in the legacy LOOP_SET_STATUS64 codepath. In the LOOP_CONFIGURE
328 * codepath above it should not be necessary. */
329 if (c->info.lo_offset != 0 || c->info.lo_sizelimit != 0)
330 if (ioctl(fd, BLKFLSBUF, 0) < 0)
331 log_debug_errno(errno, "Failed to issue BLKFLSBUF ioctl, ignoring: %m");
332
333 /* LO_FLAGS_DIRECT_IO is a flags we need to configure via explicit ioctls. */
334 if (FLAGS_SET(c->info.lo_flags, LO_FLAGS_DIRECT_IO)) {
335 unsigned long b = 1;
336
337 if (ioctl(fd, LOOP_SET_DIRECT_IO, b) < 0)
338 log_debug_errno(errno, "Failed to enable direct IO mode on loopback device /dev/loop%i, ignoring: %m", nr);
339 }
340
341 if (ret_seqnum_not_before)
342 *ret_seqnum_not_before = seqnum;
343 if (ret_timestamp_not_before)
344 *ret_timestamp_not_before = timestamp;
345
346 return 0;
347
348 fail:
349 (void) ioctl(fd, LOOP_CLR_FD);
350 return r;
351 }
352
353 static int attach_empty_file(int loop, int nr) {
354 _cleanup_close_ int fd = -1;
355
356 /* So here's the thing: on various kernels (5.8 at least) loop block devices might enter a state
357 * where they are detached but nonetheless have partitions, when used heavily. Accessing these
358 * partitions results in immediatey IO errors. There's no pretty way to get rid of them
359 * again. Neither LOOP_CLR_FD nor LOOP_CTL_REMOVE suffice (see above). What does work is to
360 * reassociate them with a new fd however. This is what we do here hence: we associate the devices
361 * with an empty file (i.e. an image that definitely has no partitions). We then immediately clear it
362 * again. This suffices to make the partitions go away. Ugly but appears to work. */
363
364 log_debug("Found unattached loopback block device /dev/loop%i with partitions. Attaching empty file to remove them.", nr);
365
366 fd = open_tmpfile_unlinkable(NULL, O_RDONLY);
367 if (fd < 0)
368 return fd;
369
370 if (flock(loop, LOCK_EX) < 0)
371 return -errno;
372
373 if (ioctl(loop, LOOP_SET_FD, fd) < 0)
374 return -errno;
375
376 if (ioctl(loop, LOOP_SET_STATUS64, &(struct loop_info64) {
377 .lo_flags = LO_FLAGS_READ_ONLY|
378 LO_FLAGS_AUTOCLEAR|
379 LO_FLAGS_PARTSCAN, /* enable partscan, so that the partitions really go away */
380 }) < 0)
381 return -errno;
382
383 if (ioctl(loop, LOOP_CLR_FD) < 0)
384 return -errno;
385
386 /* The caller is expected to immediately close the loopback device after this, so that the BSD lock
387 * is released, and udev sees the changes. */
388 return 0;
389 }
390
391 static int loop_device_make_internal(
392 int fd,
393 int open_flags,
394 uint64_t offset,
395 uint64_t size,
396 uint32_t loop_flags,
397 LoopDevice **ret) {
398
399 _cleanup_close_ int direct_io_fd = -1;
400 _cleanup_free_ char *loopdev = NULL;
401 bool try_loop_configure = true;
402 struct loop_config config;
403 LoopDevice *d = NULL;
404 uint64_t seqnum = UINT64_MAX;
405 usec_t timestamp = USEC_INFINITY;
406 int nr = -1, r, f_flags;
407 struct stat st;
408
409 assert(fd >= 0);
410 assert(ret);
411 assert(IN_SET(open_flags, O_RDWR, O_RDONLY));
412
413 if (fstat(fd, &st) < 0)
414 return -errno;
415
416 if (S_ISBLK(st.st_mode)) {
417 if (ioctl(fd, LOOP_GET_STATUS64, &config.info) >= 0) {
418 /* Oh! This is a loopback device? That's interesting! */
419
420 #if HAVE_VALGRIND_MEMCHECK_H
421 /* Valgrind currently doesn't know LOOP_GET_STATUS64. Remove this once it does */
422 VALGRIND_MAKE_MEM_DEFINED(&config.info, sizeof(config.info));
423 #endif
424 nr = config.info.lo_number;
425
426 if (asprintf(&loopdev, "/dev/loop%i", nr) < 0)
427 return -ENOMEM;
428 }
429
430 if (offset == 0 && IN_SET(size, 0, UINT64_MAX)) {
431 _cleanup_close_ int copy = -1;
432 uint64_t diskseq = 0;
433
434 /* If this is already a block device and we are supposed to cover the whole of it
435 * then store an fd to the original open device node — and do not actually create an
436 * unnecessary loopback device for it. Note that we reopen the inode here, instead of
437 * keeping just a dup() clone of it around, since we want to ensure that the O_DIRECT
438 * flag of the handle we keep is off, we have our own file index, and have the right
439 * read/write mode in effect. */
440
441 copy = fd_reopen(fd, open_flags|O_NONBLOCK|O_CLOEXEC|O_NOCTTY);
442 if (copy < 0)
443 return copy;
444
445 r = fd_get_diskseq(copy, &diskseq);
446 if (r < 0 && r != -EOPNOTSUPP)
447 return r;
448
449 d = new(LoopDevice, 1);
450 if (!d)
451 return -ENOMEM;
452 *d = (LoopDevice) {
453 .fd = TAKE_FD(copy),
454 .nr = nr,
455 .node = TAKE_PTR(loopdev),
456 .relinquished = true, /* It's not allocated by us, don't destroy it when this object is freed */
457 .devno = st.st_rdev,
458 .diskseq = diskseq,
459 .uevent_seqnum_not_before = UINT64_MAX,
460 .timestamp_not_before = USEC_INFINITY,
461 };
462
463 *ret = d;
464 return d->fd;
465 }
466 } else {
467 r = stat_verify_regular(&st);
468 if (r < 0)
469 return r;
470 }
471
472 f_flags = fcntl(fd, F_GETFL);
473 if (f_flags < 0)
474 return -errno;
475
476 if (FLAGS_SET(loop_flags, LO_FLAGS_DIRECT_IO) != FLAGS_SET(f_flags, O_DIRECT)) {
477 /* If LO_FLAGS_DIRECT_IO is requested, then make sure we have the fd open with O_DIRECT, as
478 * that's required. Conversely, if it's off require that O_DIRECT is off too (that's because
479 * new kernels will implicitly enable LO_FLAGS_DIRECT_IO if O_DIRECT is set).
480 *
481 * Our intention here is that LO_FLAGS_DIRECT_IO is the primary knob, and O_DIRECT derived
482 * from that automatically. */
483
484 direct_io_fd = fd_reopen(fd, (FLAGS_SET(loop_flags, LO_FLAGS_DIRECT_IO) ? O_DIRECT : 0)|O_CLOEXEC|O_NONBLOCK|open_flags);
485 if (direct_io_fd < 0) {
486 if (!FLAGS_SET(loop_flags, LO_FLAGS_DIRECT_IO))
487 return log_debug_errno(errno, "Failed to reopen file descriptor without O_DIRECT: %m");
488
489 /* Some file systems might not support O_DIRECT, let's gracefully continue without it then. */
490 log_debug_errno(errno, "Failed to enable O_DIRECT for backing file descriptor for loopback device. Continuing without.");
491 loop_flags &= ~LO_FLAGS_DIRECT_IO;
492 } else
493 fd = direct_io_fd; /* From now on, operate on our new O_DIRECT fd */
494 }
495
496 _cleanup_close_ int control = -1;
497 _cleanup_(cleanup_clear_loop_close) int loop_with_fd = -1;
498
499 control = open("/dev/loop-control", O_RDWR|O_CLOEXEC|O_NOCTTY|O_NONBLOCK);
500 if (control < 0)
501 return -errno;
502
503 config = (struct loop_config) {
504 .fd = fd,
505 .info = {
506 /* Use the specified flags, but configure the read-only flag from the open flags, and force autoclear */
507 .lo_flags = (loop_flags & ~LO_FLAGS_READ_ONLY) | ((open_flags & O_ACCMODE) == O_RDONLY ? LO_FLAGS_READ_ONLY : 0) | LO_FLAGS_AUTOCLEAR,
508 .lo_offset = offset,
509 .lo_sizelimit = size == UINT64_MAX ? 0 : size,
510 },
511 };
512
513 /* Loop around LOOP_CTL_GET_FREE, since at the moment we attempt to open the returned device it might
514 * be gone already, taken by somebody else racing against us. */
515 for (unsigned n_attempts = 0;;) {
516 _cleanup_close_ int loop = -1;
517
518 /* Let's take a lock on the control device first. On a busy system, where many programs
519 * attempt to allocate a loopback device at the same time, we might otherwise keep looping
520 * around relatively heavy operations: asking for a free loopback device, then opening it,
521 * validating it, attaching something to it. Let's serialize this whole operation, to make
522 * unnecessary busywork less likely. Note that this is just something we do to optimize our
523 * own code (and whoever else decides to use LOCK_EX locks for this), taking this lock is not
524 * necessary, it just means it's less likely we have to iterate through this loop again and
525 * again if our own code races against our own code. */
526 if (flock(control, LOCK_EX) < 0)
527 return -errno;
528
529 nr = ioctl(control, LOOP_CTL_GET_FREE);
530 if (nr < 0)
531 return -errno;
532
533 if (asprintf(&loopdev, "/dev/loop%i", nr) < 0)
534 return -ENOMEM;
535
536 loop = open(loopdev, O_CLOEXEC|O_NONBLOCK|O_NOCTTY|open_flags);
537 if (loop < 0) {
538 /* Somebody might've gotten the same number from the kernel, used the device,
539 * and called LOOP_CTL_REMOVE on it. Let's retry with a new number. */
540 if (!ERRNO_IS_DEVICE_ABSENT(errno))
541 return -errno;
542 } else {
543 r = loop_configure(loop, nr, &config, &try_loop_configure, &seqnum, &timestamp);
544 if (r >= 0) {
545 loop_with_fd = TAKE_FD(loop);
546 break;
547 }
548 if (r == -EUCLEAN) {
549 /* Make left-over partition disappear hack (see above) */
550 r = attach_empty_file(loop, nr);
551 if (r < 0 && r != -EBUSY)
552 return r;
553 } else if (r != -EBUSY)
554 return r;
555 }
556
557 /* OK, this didn't work, let's try again a bit later, but first release the lock on the
558 * control device */
559 if (flock(control, LOCK_UN) < 0)
560 return -errno;
561
562 if (++n_attempts >= 64) /* Give up eventually */
563 return -EBUSY;
564
565 /* Now close the loop device explicitly. This will release any lock acquired by
566 * attach_empty_file() or similar, while we sleep below. */
567 loop = safe_close(loop);
568 loopdev = mfree(loopdev);
569
570 /* Wait some random time, to make collision less likely. Let's pick a random time in the
571 * range 0ms…250ms, linearly scaled by the number of failed attempts. */
572 (void) usleep(random_u64_range(UINT64_C(10) * USEC_PER_MSEC +
573 UINT64_C(240) * USEC_PER_MSEC * n_attempts/64));
574 }
575
576 if (FLAGS_SET(loop_flags, LO_FLAGS_DIRECT_IO)) {
577 struct loop_info64 info;
578
579 if (ioctl(loop_with_fd, LOOP_GET_STATUS64, &info) < 0)
580 return -errno;
581
582 #if HAVE_VALGRIND_MEMCHECK_H
583 VALGRIND_MAKE_MEM_DEFINED(&info, sizeof(info));
584 #endif
585
586 /* On older kernels (<= 5.3) it was necessary to set the block size of the loopback block
587 * device to the logical block size of the underlying file system. Since there was no nice
588 * way to query the value, we are not bothering to do this however. On newer kernels the
589 * block size is propagated automatically and does not require intervention from us. We'll
590 * check here if enabling direct IO worked, to make this easily debuggable however.
591 *
592 * (Should anyone really care and actually wants direct IO on old kernels: it might be worth
593 * enabling direct IO with iteratively larger block sizes until it eventually works.) */
594 if (!FLAGS_SET(info.lo_flags, LO_FLAGS_DIRECT_IO))
595 log_debug("Could not enable direct IO mode, proceeding in buffered IO mode.");
596 }
597
598 if (fstat(loop_with_fd, &st) < 0)
599 return -errno;
600 assert(S_ISBLK(st.st_mode));
601
602 uint64_t diskseq = 0;
603 r = fd_get_diskseq(loop_with_fd, &diskseq);
604 if (r < 0 && r != -EOPNOTSUPP)
605 return r;
606
607 d = new(LoopDevice, 1);
608 if (!d)
609 return -ENOMEM;
610 *d = (LoopDevice) {
611 .fd = TAKE_FD(loop_with_fd),
612 .node = TAKE_PTR(loopdev),
613 .nr = nr,
614 .devno = st.st_rdev,
615 .diskseq = diskseq,
616 .uevent_seqnum_not_before = seqnum,
617 .timestamp_not_before = timestamp,
618 };
619
620 log_debug("Successfully acquired %s, devno=%u:%u, nr=%i, diskseq=%" PRIu64,
621 d->node,
622 major(d->devno), minor(d->devno),
623 d->nr,
624 d->diskseq);
625
626 *ret = d;
627 return d->fd;
628 }
629
630 static uint32_t loop_flags_mangle(uint32_t loop_flags) {
631 int r;
632
633 r = getenv_bool("SYSTEMD_LOOP_DIRECT_IO");
634 if (r < 0 && r != -ENXIO)
635 log_debug_errno(r, "Failed to parse $SYSTEMD_LOOP_DIRECT_IO, ignoring: %m");
636
637 return UPDATE_FLAG(loop_flags, LO_FLAGS_DIRECT_IO, r != 0); /* Turn on LO_FLAGS_DIRECT_IO by default, unless explicitly configured to off. */
638 }
639
640 int loop_device_make(
641 int fd,
642 int open_flags,
643 uint64_t offset,
644 uint64_t size,
645 uint32_t loop_flags,
646 LoopDevice **ret) {
647
648 assert(fd >= 0);
649 assert(ret);
650
651 return loop_device_make_internal(
652 fd,
653 open_flags,
654 offset,
655 size,
656 loop_flags_mangle(loop_flags),
657 ret);
658 }
659
660 int loop_device_make_by_path(
661 const char *path,
662 int open_flags,
663 uint32_t loop_flags,
664 LoopDevice **ret) {
665
666 int r, basic_flags, direct_flags, rdwr_flags;
667 _cleanup_close_ int fd = -1;
668 bool direct = false;
669
670 assert(path);
671 assert(ret);
672 assert(open_flags < 0 || IN_SET(open_flags, O_RDWR, O_RDONLY));
673
674 /* Passing < 0 as open_flags here means we'll try to open the device writable if we can, retrying
675 * read-only if we cannot. */
676
677 loop_flags = loop_flags_mangle(loop_flags);
678
679 /* Let's open with O_DIRECT if we can. But not all file systems support that, hence fall back to
680 * non-O_DIRECT mode automatically, if it fails. */
681
682 basic_flags = O_CLOEXEC|O_NONBLOCK|O_NOCTTY;
683 direct_flags = FLAGS_SET(loop_flags, LO_FLAGS_DIRECT_IO) ? O_DIRECT : 0;
684 rdwr_flags = open_flags >= 0 ? open_flags : O_RDWR;
685
686 fd = open(path, basic_flags|direct_flags|rdwr_flags);
687 if (fd < 0 && direct_flags != 0) /* If we had O_DIRECT on, and things failed with that, let's immediately try again without */
688 fd = open(path, basic_flags|rdwr_flags);
689 else
690 direct = direct_flags != 0;
691 if (fd < 0) {
692 r = -errno;
693
694 /* Retry read-only? */
695 if (open_flags >= 0 || !(ERRNO_IS_PRIVILEGE(r) || r == -EROFS))
696 return r;
697
698 fd = open(path, basic_flags|direct_flags|O_RDONLY);
699 if (fd < 0 && direct_flags != 0) /* as above */
700 fd = open(path, basic_flags|O_RDONLY);
701 else
702 direct = direct_flags != 0;
703 if (fd < 0)
704 return r; /* Propagate original error */
705
706 open_flags = O_RDONLY;
707 } else if (open_flags < 0)
708 open_flags = O_RDWR;
709
710 log_debug("Opened '%s' in %s access mode%s, with O_DIRECT %s%s.",
711 path,
712 open_flags == O_RDWR ? "O_RDWR" : "O_RDONLY",
713 open_flags != rdwr_flags ? " (O_RDWR was requested but not allowed)" : "",
714 direct ? "enabled" : "disabled",
715 direct != (direct_flags != 0) ? " (O_DIRECT was requested but not supported)" : "");
716
717 return loop_device_make_internal(fd, open_flags, 0, 0, loop_flags, ret);
718 }
719
720 LoopDevice* loop_device_unref(LoopDevice *d) {
721 if (!d)
722 return NULL;
723
724 if (d->fd >= 0) {
725 /* Implicitly sync the device, since otherwise in-flight blocks might not get written */
726 if (fsync(d->fd) < 0)
727 log_debug_errno(errno, "Failed to sync loop block device, ignoring: %m");
728
729 if (d->nr >= 0 && !d->relinquished) {
730 if (ioctl(d->fd, LOOP_CLR_FD) < 0)
731 log_debug_errno(errno, "Failed to clear loop device: %m");
732
733 }
734
735 safe_close(d->fd);
736 }
737
738 if (d->nr >= 0 && !d->relinquished) {
739 _cleanup_close_ int control = -1;
740
741 control = open("/dev/loop-control", O_RDWR|O_CLOEXEC|O_NOCTTY|O_NONBLOCK);
742 if (control < 0)
743 log_warning_errno(errno,
744 "Failed to open loop control device, cannot remove loop device %s: %m",
745 strna(d->node));
746 else
747 for (unsigned n_attempts = 0;;) {
748 if (ioctl(control, LOOP_CTL_REMOVE, d->nr) >= 0)
749 break;
750 if (errno != EBUSY || ++n_attempts >= 64) {
751 log_warning_errno(errno, "Failed to remove device %s: %m", strna(d->node));
752 break;
753 }
754 (void) usleep(50 * USEC_PER_MSEC);
755 }
756 }
757
758 free(d->node);
759 return mfree(d);
760 }
761
762 void loop_device_relinquish(LoopDevice *d) {
763 assert(d);
764
765 /* Don't attempt to clean up the loop device anymore from this point on. Leave the clean-ing up to the kernel
766 * itself, using the loop device "auto-clear" logic we already turned on when creating the device. */
767
768 d->relinquished = true;
769 }
770
771 int loop_device_open(const char *loop_path, int open_flags, LoopDevice **ret) {
772 _cleanup_close_ int loop_fd = -1;
773 _cleanup_free_ char *p = NULL;
774 struct loop_info64 info;
775 struct stat st;
776 LoopDevice *d;
777 int nr;
778
779 assert(loop_path);
780 assert(IN_SET(open_flags, O_RDWR, O_RDONLY));
781 assert(ret);
782
783 loop_fd = open(loop_path, O_CLOEXEC|O_NONBLOCK|O_NOCTTY|open_flags);
784 if (loop_fd < 0)
785 return -errno;
786
787 if (fstat(loop_fd, &st) < 0)
788 return -errno;
789 if (!S_ISBLK(st.st_mode))
790 return -ENOTBLK;
791
792 if (ioctl(loop_fd, LOOP_GET_STATUS64, &info) >= 0) {
793 #if HAVE_VALGRIND_MEMCHECK_H
794 /* Valgrind currently doesn't know LOOP_GET_STATUS64. Remove this once it does */
795 VALGRIND_MAKE_MEM_DEFINED(&info, sizeof(info));
796 #endif
797 nr = info.lo_number;
798 } else
799 nr = -1;
800
801 p = strdup(loop_path);
802 if (!p)
803 return -ENOMEM;
804
805 d = new(LoopDevice, 1);
806 if (!d)
807 return -ENOMEM;
808
809 *d = (LoopDevice) {
810 .fd = TAKE_FD(loop_fd),
811 .nr = nr,
812 .node = TAKE_PTR(p),
813 .relinquished = true, /* It's not ours, don't try to destroy it when this object is freed */
814 .devno = st.st_dev,
815 .uevent_seqnum_not_before = UINT64_MAX,
816 .timestamp_not_before = USEC_INFINITY,
817 };
818
819 *ret = d;
820 return d->fd;
821 }
822
823 static int resize_partition(int partition_fd, uint64_t offset, uint64_t size) {
824 char sysfs[STRLEN("/sys/dev/block/:/partition") + 2*DECIMAL_STR_MAX(dev_t) + 1];
825 _cleanup_free_ char *whole = NULL, *buffer = NULL;
826 uint64_t current_offset, current_size, partno;
827 _cleanup_close_ int whole_fd = -1;
828 struct stat st;
829 dev_t devno;
830 int r;
831
832 assert(partition_fd >= 0);
833
834 /* Resizes the partition the loopback device refer to (assuming it refers to one instead of an actual
835 * loopback device), and changes the offset, if needed. This is a fancy wrapper around
836 * BLKPG_RESIZE_PARTITION. */
837
838 if (fstat(partition_fd, &st) < 0)
839 return -errno;
840
841 assert(S_ISBLK(st.st_mode));
842
843 xsprintf(sysfs, "/sys/dev/block/%u:%u/partition", major(st.st_rdev), minor(st.st_rdev));
844 r = read_one_line_file(sysfs, &buffer);
845 if (r == -ENOENT) /* not a partition, cannot resize */
846 return -ENOTTY;
847 if (r < 0)
848 return r;
849 r = safe_atou64(buffer, &partno);
850 if (r < 0)
851 return r;
852
853 xsprintf(sysfs, "/sys/dev/block/%u:%u/start", major(st.st_rdev), minor(st.st_rdev));
854
855 buffer = mfree(buffer);
856 r = read_one_line_file(sysfs, &buffer);
857 if (r < 0)
858 return r;
859 r = safe_atou64(buffer, &current_offset);
860 if (r < 0)
861 return r;
862 if (current_offset > UINT64_MAX/512U)
863 return -EINVAL;
864 current_offset *= 512U;
865
866 if (ioctl(partition_fd, BLKGETSIZE64, &current_size) < 0)
867 return -EINVAL;
868
869 if (size == UINT64_MAX && offset == UINT64_MAX)
870 return 0;
871 if (current_size == size && current_offset == offset)
872 return 0;
873
874 xsprintf(sysfs, "/sys/dev/block/%u:%u/../dev", major(st.st_rdev), minor(st.st_rdev));
875
876 buffer = mfree(buffer);
877 r = read_one_line_file(sysfs, &buffer);
878 if (r < 0)
879 return r;
880 r = parse_devnum(buffer, &devno);
881 if (r < 0)
882 return r;
883
884 r = device_path_make_major_minor(S_IFBLK, devno, &whole);
885 if (r < 0)
886 return r;
887
888 whole_fd = open(whole, O_RDWR|O_CLOEXEC|O_NONBLOCK|O_NOCTTY);
889 if (whole_fd < 0)
890 return -errno;
891
892 struct blkpg_partition bp = {
893 .pno = partno,
894 .start = offset == UINT64_MAX ? current_offset : offset,
895 .length = size == UINT64_MAX ? current_size : size,
896 };
897
898 struct blkpg_ioctl_arg ba = {
899 .op = BLKPG_RESIZE_PARTITION,
900 .data = &bp,
901 .datalen = sizeof(bp),
902 };
903
904 return RET_NERRNO(ioctl(whole_fd, BLKPG, &ba));
905 }
906
907 int loop_device_refresh_size(LoopDevice *d, uint64_t offset, uint64_t size) {
908 struct loop_info64 info;
909 assert(d);
910
911 /* Changes the offset/start of the loop device relative to the beginning of the underlying file or
912 * block device. If this loop device actually refers to a partition and not a loopback device, we'll
913 * try to adjust the partition offsets instead.
914 *
915 * If either offset or size is UINT64_MAX we won't change that parameter. */
916
917 if (d->fd < 0)
918 return -EBADF;
919
920 if (d->nr < 0) /* not a loopback device */
921 return resize_partition(d->fd, offset, size);
922
923 if (ioctl(d->fd, LOOP_GET_STATUS64, &info) < 0)
924 return -errno;
925
926 #if HAVE_VALGRIND_MEMCHECK_H
927 /* Valgrind currently doesn't know LOOP_GET_STATUS64. Remove this once it does */
928 VALGRIND_MAKE_MEM_DEFINED(&info, sizeof(info));
929 #endif
930
931 if (size == UINT64_MAX && offset == UINT64_MAX)
932 return 0;
933 if (info.lo_sizelimit == size && info.lo_offset == offset)
934 return 0;
935
936 if (size != UINT64_MAX)
937 info.lo_sizelimit = size;
938 if (offset != UINT64_MAX)
939 info.lo_offset = offset;
940
941 return RET_NERRNO(ioctl(d->fd, LOOP_SET_STATUS64, &info));
942 }
943
944 int loop_device_flock(LoopDevice *d, int operation) {
945 assert(d);
946
947 if (d->fd < 0)
948 return -EBADF;
949
950 return RET_NERRNO(flock(d->fd, operation));
951 }
952
953 int loop_device_sync(LoopDevice *d) {
954 assert(d);
955
956 /* We also do this implicitly in loop_device_unref(). Doing this explicitly here has the benefit that
957 * we can check the return value though. */
958
959 if (d->fd < 0)
960 return -EBADF;
961
962 return RET_NERRNO(fsync(d->fd));
963 }