]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/shared/loop-util.c
tree-wide: use -EBADF for fd initialization
[thirdparty/systemd.git] / src / shared / loop-util.c
1 /* SPDX-License-Identifier: LGPL-2.1-or-later */
2
3 #if HAVE_VALGRIND_MEMCHECK_H
4 #include <valgrind/memcheck.h>
5 #endif
6
7 #include <errno.h>
8 #include <fcntl.h>
9 #include <linux/blkpg.h>
10 #include <linux/fs.h>
11 #include <linux/loop.h>
12 #include <sys/file.h>
13 #include <sys/ioctl.h>
14 #include <unistd.h>
15
16 #include "sd-device.h"
17
18 #include "alloc-util.h"
19 #include "blockdev-util.h"
20 #include "data-fd-util.h"
21 #include "device-util.h"
22 #include "devnum-util.h"
23 #include "env-util.h"
24 #include "errno-util.h"
25 #include "fd-util.h"
26 #include "fileio.h"
27 #include "loop-util.h"
28 #include "missing_loop.h"
29 #include "parse-util.h"
30 #include "path-util.h"
31 #include "random-util.h"
32 #include "stat-util.h"
33 #include "stdio-util.h"
34 #include "string-util.h"
35 #include "tmpfile-util.h"
36
37 static void cleanup_clear_loop_close(int *fd) {
38 if (*fd < 0)
39 return;
40
41 (void) ioctl(*fd, LOOP_CLR_FD);
42 (void) safe_close(*fd);
43 }
44
45 static int loop_is_bound(int fd) {
46 struct loop_info64 info;
47
48 assert(fd >= 0);
49
50 if (ioctl(fd, LOOP_GET_STATUS64, &info) < 0) {
51 if (errno == ENXIO)
52 return false; /* not bound! */
53
54 return -errno;
55 }
56
57 return true; /* bound! */
58 }
59
60 static int get_current_uevent_seqnum(uint64_t *ret) {
61 _cleanup_free_ char *p = NULL;
62 int r;
63
64 r = read_full_virtual_file("/sys/kernel/uevent_seqnum", &p, NULL);
65 if (r < 0)
66 return log_debug_errno(r, "Failed to read current uevent sequence number: %m");
67
68 r = safe_atou64(strstrip(p), ret);
69 if (r < 0)
70 return log_debug_errno(r, "Failed to parse current uevent sequence number: %s", p);
71
72 return 0;
73 }
74
75 static int open_lock_fd(int primary_fd, int operation) {
76 _cleanup_close_ int lock_fd = -EBADF;
77
78 assert(primary_fd >= 0);
79 assert(IN_SET(operation & ~LOCK_NB, LOCK_SH, LOCK_EX));
80
81 lock_fd = fd_reopen(primary_fd, O_RDONLY|O_CLOEXEC|O_NONBLOCK|O_NOCTTY);
82 if (lock_fd < 0)
83 return lock_fd;
84
85 if (flock(lock_fd, operation) < 0)
86 return -errno;
87
88 return TAKE_FD(lock_fd);
89 }
90
91 static int loop_configure_verify_direct_io(int fd, const struct loop_config *c) {
92 assert(fd);
93 assert(c);
94
95 if (FLAGS_SET(c->info.lo_flags, LO_FLAGS_DIRECT_IO)) {
96 struct loop_info64 info;
97
98 if (ioctl(fd, LOOP_GET_STATUS64, &info) < 0)
99 return log_debug_errno(errno, "Failed to issue LOOP_GET_STATUS64: %m");
100
101 #if HAVE_VALGRIND_MEMCHECK_H
102 VALGRIND_MAKE_MEM_DEFINED(&info, sizeof(info));
103 #endif
104
105 /* On older kernels (<= 5.3) it was necessary to set the block size of the loopback block
106 * device to the logical block size of the underlying file system. Since there was no nice
107 * way to query the value, we are not bothering to do this however. On newer kernels the
108 * block size is propagated automatically and does not require intervention from us. We'll
109 * check here if enabling direct IO worked, to make this easily debuggable however.
110 *
111 * (Should anyone really care and actually wants direct IO on old kernels: it might be worth
112 * enabling direct IO with iteratively larger block sizes until it eventually works.) */
113 if (!FLAGS_SET(info.lo_flags, LO_FLAGS_DIRECT_IO))
114 log_debug("Could not enable direct IO mode, proceeding in buffered IO mode.");
115 }
116
117 return 0;
118 }
119
120 static int loop_configure_verify(int fd, const struct loop_config *c) {
121 bool broken = false;
122 int r;
123
124 assert(fd >= 0);
125 assert(c);
126
127 if (c->block_size != 0) {
128 int z;
129
130 if (ioctl(fd, BLKSSZGET, &z) < 0)
131 return -errno;
132
133 assert(z >= 0);
134 if ((uint32_t) z != c->block_size)
135 log_debug("LOOP_CONFIGURE didn't honour requested block size %u, got %i instead. Ignoring.", c->block_size, z);
136 }
137
138 if (c->info.lo_sizelimit != 0) {
139 /* Kernel 5.8 vanilla doesn't properly propagate the size limit into the
140 * block device. If it's used, let's immediately check if it had the desired
141 * effect hence. And if not use classic LOOP_SET_STATUS64. */
142 uint64_t z;
143
144 if (ioctl(fd, BLKGETSIZE64, &z) < 0)
145 return -errno;
146
147 if (z != c->info.lo_sizelimit) {
148 log_debug("LOOP_CONFIGURE is broken, doesn't honour .info.lo_sizelimit. Falling back to LOOP_SET_STATUS64.");
149 broken = true;
150 }
151 }
152
153 if (FLAGS_SET(c->info.lo_flags, LO_FLAGS_PARTSCAN)) {
154 /* Kernel 5.8 vanilla doesn't properly propagate the partition scanning flag
155 * into the block device. Let's hence verify if things work correctly here
156 * before returning. */
157
158 r = blockdev_partscan_enabled(fd);
159 if (r < 0)
160 return r;
161 if (r == 0) {
162 log_debug("LOOP_CONFIGURE is broken, doesn't honour LO_FLAGS_PARTSCAN. Falling back to LOOP_SET_STATUS64.");
163 broken = true;
164 }
165 }
166
167 r = loop_configure_verify_direct_io(fd, c);
168 if (r < 0)
169 return r;
170
171 return !broken;
172 }
173
174 static int loop_configure_fallback(int fd, const struct loop_config *c) {
175 struct loop_info64 info_copy;
176
177 assert(fd >= 0);
178 assert(c);
179
180 /* Only some of the flags LOOP_CONFIGURE can set are also settable via LOOP_SET_STATUS64, hence mask
181 * them out. */
182 info_copy = c->info;
183 info_copy.lo_flags &= LOOP_SET_STATUS_SETTABLE_FLAGS;
184
185 /* Since kernel commit 5db470e229e22b7eda6e23b5566e532c96fb5bc3 (kernel v5.0) the LOOP_SET_STATUS64
186 * ioctl can return EAGAIN in case we change the info.lo_offset field, if someone else is accessing the
187 * block device while we try to reconfigure it. This is a pretty common case, since udev might
188 * instantly start probing the device as soon as we attach an fd to it. Hence handle it in two ways:
189 * first, let's take the BSD lock to ensure that udev will not step in between the point in
190 * time where we attach the fd and where we reconfigure the device. Secondly, let's wait 50ms on
191 * EAGAIN and retry. The former should be an efficient mechanism to avoid we have to wait 50ms
192 * needlessly if we are just racing against udev. The latter is protection against all other cases,
193 * i.e. peers that do not take the BSD lock. */
194
195 for (unsigned n_attempts = 0;;) {
196 if (ioctl(fd, LOOP_SET_STATUS64, &info_copy) >= 0)
197 break;
198
199 if (errno != EAGAIN || ++n_attempts >= 64)
200 return log_debug_errno(errno, "Failed to configure loopback block device: %m");
201
202 /* Sleep some random time, but at least 10ms, at most 250ms. Increase the delay the more
203 * failed attempts we see */
204 (void) usleep(UINT64_C(10) * USEC_PER_MSEC +
205 random_u64_range(UINT64_C(240) * USEC_PER_MSEC * n_attempts/64));
206 }
207
208 /* Work around a kernel bug, where changing offset/size of the loopback device doesn't correctly
209 * invalidate the buffer cache. For details see:
210 *
211 * https://android.googlesource.com/platform/system/apex/+/bef74542fbbb4cd629793f4efee8e0053b360570
212 *
213 * This was fixed in kernel 5.0, see:
214 *
215 * https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=5db470e229e22b7eda6e23b5566e532c96fb5bc3
216 *
217 * We'll run the work-around here in the legacy LOOP_SET_STATUS64 codepath. In the LOOP_CONFIGURE
218 * codepath above it should not be necessary. */
219 if (c->info.lo_offset != 0 || c->info.lo_sizelimit != 0)
220 if (ioctl(fd, BLKFLSBUF, 0) < 0)
221 log_debug_errno(errno, "Failed to issue BLKFLSBUF ioctl, ignoring: %m");
222
223 /* LO_FLAGS_DIRECT_IO is a flags we need to configure via explicit ioctls. */
224 if (FLAGS_SET(c->info.lo_flags, LO_FLAGS_DIRECT_IO))
225 if (ioctl(fd, LOOP_SET_DIRECT_IO, 1UL) < 0)
226 log_debug_errno(errno, "Failed to enable direct IO mode, ignoring: %m");
227
228 return loop_configure_verify_direct_io(fd, c);
229 }
230
231 static int loop_configure(
232 int nr,
233 int open_flags,
234 int lock_op,
235 const struct loop_config *c,
236 LoopDevice **ret) {
237
238 static bool loop_configure_broken = false;
239
240 _cleanup_(sd_device_unrefp) sd_device *dev = NULL;
241 _cleanup_(cleanup_clear_loop_close) int loop_with_fd = -EBADF; /* This must be declared before lock_fd. */
242 _cleanup_close_ int fd = -EBADF, lock_fd = -EBADF;
243 _cleanup_free_ char *node = NULL;
244 uint64_t diskseq = 0, seqnum = UINT64_MAX;
245 usec_t timestamp = USEC_INFINITY;
246 dev_t devno;
247 int r;
248
249 assert(nr >= 0);
250 assert(c);
251 assert(ret);
252
253 if (asprintf(&node, "/dev/loop%i", nr) < 0)
254 return -ENOMEM;
255
256 r = sd_device_new_from_devname(&dev, node);
257 if (r < 0)
258 return r;
259
260 r = sd_device_get_devnum(dev, &devno);
261 if (r < 0)
262 return r;
263
264 fd = sd_device_open(dev, O_CLOEXEC|O_NONBLOCK|O_NOCTTY|open_flags);
265 if (fd < 0)
266 return fd;
267
268 /* Let's lock the device before we do anything. We take the BSD lock on a second, separately opened
269 * fd for the device. udev after all watches for close() events (specifically IN_CLOSE_WRITE) on
270 * block devices to reprobe them, hence by having a separate fd we will later close() we can ensure
271 * we trigger udev after everything is done. If we'd lock our own fd instead and keep it open for a
272 * long time udev would possibly never run on it again, even though the fd is unlocked, simply
273 * because we never close() it. It also has the nice benefit we can use the _cleanup_close_ logic to
274 * automatically release the lock, after we are done. */
275 lock_fd = open_lock_fd(fd, LOCK_EX);
276 if (lock_fd < 0)
277 return lock_fd;
278
279 /* Let's see if backing file is really unattached. Someone may already attach a backing file without
280 * taking BSD lock. */
281 r = loop_is_bound(fd);
282 if (r < 0)
283 return r;
284 if (r > 0)
285 return -EBUSY;
286
287 /* Let's see if the device is really detached, i.e. currently has no associated partition block
288 * devices. On various kernels (such as 5.8) it is possible to have a loopback block device that
289 * superficially is detached but still has partition block devices associated for it. Let's then
290 * manually remove the partitions via BLKPG, and tell the caller we did that via EUCLEAN, so they try
291 * again. */
292 r = block_device_remove_all_partitions(dev, fd);
293 if (r < 0)
294 return r;
295 if (r > 0)
296 /* Removed all partitions. Let's report this to the caller, to try again, and count this as
297 * an attempt. */
298 return -EUCLEAN;
299
300 if (!loop_configure_broken) {
301 /* Acquire uevent seqnum immediately before attaching the loopback device. This allows
302 * callers to ignore all uevents with a seqnum before this one, if they need to associate
303 * uevent with this attachment. Doing so isn't race-free though, as uevents that happen in
304 * the window between this reading of the seqnum, and the LOOP_CONFIGURE call might still be
305 * mistaken as originating from our attachment, even though might be caused by an earlier
306 * use. But doing this at least shortens the race window a bit. */
307 r = get_current_uevent_seqnum(&seqnum);
308 if (r < 0)
309 return r;
310
311 timestamp = now(CLOCK_MONOTONIC);
312
313 if (ioctl(fd, LOOP_CONFIGURE, c) < 0) {
314 /* Do fallback only if LOOP_CONFIGURE is not supported, propagate all other
315 * errors. Note that the kernel is weird: non-existing ioctls currently return EINVAL
316 * rather than ENOTTY on loopback block devices. They should fix that in the kernel,
317 * but in the meantime we accept both here. */
318 if (!ERRNO_IS_NOT_SUPPORTED(errno) && errno != EINVAL)
319 return -errno;
320
321 loop_configure_broken = true;
322 } else {
323 loop_with_fd = TAKE_FD(fd);
324
325 r = loop_configure_verify(loop_with_fd, c);
326 if (r < 0)
327 return r;
328 if (r == 0) {
329 /* LOOP_CONFIGURE doesn't work. Remember that. */
330 loop_configure_broken = true;
331
332 /* We return EBUSY here instead of retrying immediately with LOOP_SET_FD,
333 * because LOOP_CLR_FD is async: if the operation cannot be executed right
334 * away it just sets the autoclear flag on the device. This means there's a
335 * good chance we cannot actually reuse the loopback device right-away. Hence
336 * let's assume it's busy, avoid the trouble and let the calling loop call us
337 * again with a new, likely unused device. */
338 return -EBUSY;
339 }
340 }
341 }
342
343 if (loop_configure_broken) {
344 /* Let's read the seqnum again, to shorten the window. */
345 r = get_current_uevent_seqnum(&seqnum);
346 if (r < 0)
347 return r;
348
349 timestamp = now(CLOCK_MONOTONIC);
350
351 if (ioctl(fd, LOOP_SET_FD, c->fd) < 0)
352 return -errno;
353
354 loop_with_fd = TAKE_FD(fd);
355
356 r = loop_configure_fallback(loop_with_fd, c);
357 if (r < 0)
358 return r;
359 }
360
361 r = fd_get_diskseq(loop_with_fd, &diskseq);
362 if (r < 0 && r != -EOPNOTSUPP)
363 return r;
364
365 switch (lock_op & ~LOCK_NB) {
366 case LOCK_EX: /* Already in effect */
367 break;
368 case LOCK_SH: /* Downgrade */
369 if (flock(lock_fd, lock_op) < 0)
370 return -errno;
371 break;
372 case LOCK_UN: /* Release */
373 lock_fd = safe_close(lock_fd);
374 break;
375 default:
376 assert_not_reached();
377 }
378
379 LoopDevice *d = new(LoopDevice, 1);
380 if (!d)
381 return -ENOMEM;
382
383 *d = (LoopDevice) {
384 .n_ref = 1,
385 .fd = TAKE_FD(loop_with_fd),
386 .lock_fd = TAKE_FD(lock_fd),
387 .node = TAKE_PTR(node),
388 .nr = nr,
389 .devno = devno,
390 .dev = TAKE_PTR(dev),
391 .diskseq = diskseq,
392 .uevent_seqnum_not_before = seqnum,
393 .timestamp_not_before = timestamp,
394 };
395
396 *ret = TAKE_PTR(d);
397 return 0;
398 }
399
400 static int loop_device_make_internal(
401 const char *path,
402 int fd,
403 int open_flags,
404 uint64_t offset,
405 uint64_t size,
406 uint32_t block_size,
407 uint32_t loop_flags,
408 int lock_op,
409 LoopDevice **ret) {
410
411 _cleanup_(loop_device_unrefp) LoopDevice *d = NULL;
412 _cleanup_close_ int direct_io_fd = -EBADF, control = -EBADF;
413 _cleanup_free_ char *backing_file = NULL;
414 struct loop_config config;
415 int r, f_flags;
416 struct stat st;
417
418 assert(fd >= 0);
419 assert(ret);
420 assert(IN_SET(open_flags, O_RDWR, O_RDONLY));
421
422 if (fstat(fd, &st) < 0)
423 return -errno;
424
425 if (S_ISBLK(st.st_mode)) {
426 if (offset == 0 && IN_SET(size, 0, UINT64_MAX))
427 /* If this is already a block device and we are supposed to cover the whole of it
428 * then store an fd to the original open device node — and do not actually create an
429 * unnecessary loopback device for it. */
430 return loop_device_open_from_fd(fd, open_flags, lock_op, ret);
431 } else {
432 r = stat_verify_regular(&st);
433 if (r < 0)
434 return r;
435 }
436
437 if (path) {
438 r = path_make_absolute_cwd(path, &backing_file);
439 if (r < 0)
440 return r;
441
442 path_simplify(backing_file);
443 } else {
444 r = fd_get_path(fd, &backing_file);
445 if (r < 0)
446 return r;
447 }
448
449 f_flags = fcntl(fd, F_GETFL);
450 if (f_flags < 0)
451 return -errno;
452
453 if (FLAGS_SET(loop_flags, LO_FLAGS_DIRECT_IO) != FLAGS_SET(f_flags, O_DIRECT)) {
454 /* If LO_FLAGS_DIRECT_IO is requested, then make sure we have the fd open with O_DIRECT, as
455 * that's required. Conversely, if it's off require that O_DIRECT is off too (that's because
456 * new kernels will implicitly enable LO_FLAGS_DIRECT_IO if O_DIRECT is set).
457 *
458 * Our intention here is that LO_FLAGS_DIRECT_IO is the primary knob, and O_DIRECT derived
459 * from that automatically. */
460
461 direct_io_fd = fd_reopen(fd, (FLAGS_SET(loop_flags, LO_FLAGS_DIRECT_IO) ? O_DIRECT : 0)|O_CLOEXEC|O_NONBLOCK|open_flags);
462 if (direct_io_fd < 0) {
463 if (!FLAGS_SET(loop_flags, LO_FLAGS_DIRECT_IO))
464 return log_debug_errno(errno, "Failed to reopen file descriptor without O_DIRECT: %m");
465
466 /* Some file systems might not support O_DIRECT, let's gracefully continue without it then. */
467 log_debug_errno(errno, "Failed to enable O_DIRECT for backing file descriptor for loopback device. Continuing without.");
468 loop_flags &= ~LO_FLAGS_DIRECT_IO;
469 } else
470 fd = direct_io_fd; /* From now on, operate on our new O_DIRECT fd */
471 }
472
473 control = open("/dev/loop-control", O_RDWR|O_CLOEXEC|O_NOCTTY|O_NONBLOCK);
474 if (control < 0)
475 return -errno;
476
477 config = (struct loop_config) {
478 .fd = fd,
479 .block_size = block_size,
480 .info = {
481 /* Use the specified flags, but configure the read-only flag from the open flags, and force autoclear */
482 .lo_flags = (loop_flags & ~LO_FLAGS_READ_ONLY) | ((open_flags & O_ACCMODE) == O_RDONLY ? LO_FLAGS_READ_ONLY : 0) | LO_FLAGS_AUTOCLEAR,
483 .lo_offset = offset,
484 .lo_sizelimit = size == UINT64_MAX ? 0 : size,
485 },
486 };
487
488 /* Loop around LOOP_CTL_GET_FREE, since at the moment we attempt to open the returned device it might
489 * be gone already, taken by somebody else racing against us. */
490 for (unsigned n_attempts = 0;;) {
491 int nr;
492
493 /* Let's take a lock on the control device first. On a busy system, where many programs
494 * attempt to allocate a loopback device at the same time, we might otherwise keep looping
495 * around relatively heavy operations: asking for a free loopback device, then opening it,
496 * validating it, attaching something to it. Let's serialize this whole operation, to make
497 * unnecessary busywork less likely. Note that this is just something we do to optimize our
498 * own code (and whoever else decides to use LOCK_EX locks for this), taking this lock is not
499 * necessary, it just means it's less likely we have to iterate through this loop again and
500 * again if our own code races against our own code.
501 *
502 * Note: our lock protocol is to take the /dev/loop-control lock first, and the block device
503 * lock second, if both are taken, and always in this order, to avoid ABBA locking issues. */
504 if (flock(control, LOCK_EX) < 0)
505 return -errno;
506
507 nr = ioctl(control, LOOP_CTL_GET_FREE);
508 if (nr < 0)
509 return -errno;
510
511 r = loop_configure(nr, open_flags, lock_op, &config, &d);
512 if (r >= 0)
513 break;
514
515 /* -ENODEV or friends: Somebody might've gotten the same number from the kernel, used the
516 * device, and called LOOP_CTL_REMOVE on it. Let's retry with a new number.
517 * -EBUSY: a file descriptor is already bound to the loopback block device.
518 * -EUCLEAN: some left-over partition devices that were cleaned up. */
519 if (!ERRNO_IS_DEVICE_ABSENT(errno) && !IN_SET(r, -EBUSY, -EUCLEAN))
520 return -errno;
521
522 /* OK, this didn't work, let's try again a bit later, but first release the lock on the
523 * control device */
524 if (flock(control, LOCK_UN) < 0)
525 return -errno;
526
527 if (++n_attempts >= 64) /* Give up eventually */
528 return -EBUSY;
529
530 /* Wait some random time, to make collision less likely. Let's pick a random time in the
531 * range 0ms…250ms, linearly scaled by the number of failed attempts. */
532 (void) usleep(random_u64_range(UINT64_C(10) * USEC_PER_MSEC +
533 UINT64_C(240) * USEC_PER_MSEC * n_attempts/64));
534 }
535
536 d->backing_file = TAKE_PTR(backing_file);
537
538 log_debug("Successfully acquired %s, devno=%u:%u, nr=%i, diskseq=%" PRIu64,
539 d->node,
540 major(d->devno), minor(d->devno),
541 d->nr,
542 d->diskseq);
543
544 *ret = TAKE_PTR(d);
545 return 0;
546 }
547
548 static uint32_t loop_flags_mangle(uint32_t loop_flags) {
549 int r;
550
551 r = getenv_bool("SYSTEMD_LOOP_DIRECT_IO");
552 if (r < 0 && r != -ENXIO)
553 log_debug_errno(r, "Failed to parse $SYSTEMD_LOOP_DIRECT_IO, ignoring: %m");
554
555 return UPDATE_FLAG(loop_flags, LO_FLAGS_DIRECT_IO, r != 0); /* Turn on LO_FLAGS_DIRECT_IO by default, unless explicitly configured to off. */
556 }
557
558 int loop_device_make(
559 int fd,
560 int open_flags,
561 uint64_t offset,
562 uint64_t size,
563 uint32_t block_size,
564 uint32_t loop_flags,
565 int lock_op,
566 LoopDevice **ret) {
567
568 assert(fd >= 0);
569 assert(ret);
570
571 return loop_device_make_internal(
572 NULL,
573 fd,
574 open_flags,
575 offset,
576 size,
577 block_size,
578 loop_flags_mangle(loop_flags),
579 lock_op,
580 ret);
581 }
582
583 int loop_device_make_by_path(
584 const char *path,
585 int open_flags,
586 uint32_t loop_flags,
587 int lock_op,
588 LoopDevice **ret) {
589
590 int r, basic_flags, direct_flags, rdwr_flags;
591 _cleanup_close_ int fd = -EBADF;
592 bool direct = false;
593
594 assert(path);
595 assert(ret);
596 assert(open_flags < 0 || IN_SET(open_flags, O_RDWR, O_RDONLY));
597
598 /* Passing < 0 as open_flags here means we'll try to open the device writable if we can, retrying
599 * read-only if we cannot. */
600
601 loop_flags = loop_flags_mangle(loop_flags);
602
603 /* Let's open with O_DIRECT if we can. But not all file systems support that, hence fall back to
604 * non-O_DIRECT mode automatically, if it fails. */
605
606 basic_flags = O_CLOEXEC|O_NONBLOCK|O_NOCTTY;
607 direct_flags = FLAGS_SET(loop_flags, LO_FLAGS_DIRECT_IO) ? O_DIRECT : 0;
608 rdwr_flags = open_flags >= 0 ? open_flags : O_RDWR;
609
610 fd = open(path, basic_flags|direct_flags|rdwr_flags);
611 if (fd < 0 && direct_flags != 0) /* If we had O_DIRECT on, and things failed with that, let's immediately try again without */
612 fd = open(path, basic_flags|rdwr_flags);
613 else
614 direct = direct_flags != 0;
615 if (fd < 0) {
616 r = -errno;
617
618 /* Retry read-only? */
619 if (open_flags >= 0 || !(ERRNO_IS_PRIVILEGE(r) || r == -EROFS))
620 return r;
621
622 fd = open(path, basic_flags|direct_flags|O_RDONLY);
623 if (fd < 0 && direct_flags != 0) /* as above */
624 fd = open(path, basic_flags|O_RDONLY);
625 else
626 direct = direct_flags != 0;
627 if (fd < 0)
628 return r; /* Propagate original error */
629
630 open_flags = O_RDONLY;
631 } else if (open_flags < 0)
632 open_flags = O_RDWR;
633
634 log_debug("Opened '%s' in %s access mode%s, with O_DIRECT %s%s.",
635 path,
636 open_flags == O_RDWR ? "O_RDWR" : "O_RDONLY",
637 open_flags != rdwr_flags ? " (O_RDWR was requested but not allowed)" : "",
638 direct ? "enabled" : "disabled",
639 direct != (direct_flags != 0) ? " (O_DIRECT was requested but not supported)" : "");
640
641 return loop_device_make_internal(path, fd, open_flags, 0, 0, 0, loop_flags, lock_op, ret);
642 }
643
644 int loop_device_make_by_path_memory(
645 const char *path,
646 int open_flags,
647 uint32_t loop_flags,
648 int lock_op,
649 LoopDevice **ret) {
650
651 _cleanup_close_ int fd = -EBADF, mfd = -EBADF;
652 _cleanup_free_ char *fn = NULL;
653 struct stat st;
654 int r;
655
656 assert(path);
657 assert(IN_SET(open_flags, O_RDWR, O_RDONLY));
658 assert(ret);
659
660 loop_flags &= ~LO_FLAGS_DIRECT_IO; /* memfds don't support O_DIRECT, hence LO_FLAGS_DIRECT_IO can't be used either */
661
662 fd = open(path, O_CLOEXEC|O_NONBLOCK|O_NOCTTY|O_RDONLY);
663 if (fd < 0)
664 return -errno;
665
666 if (fstat(fd, &st) < 0)
667 return -errno;
668
669 if (!S_ISREG(st.st_mode) && !S_ISBLK(st.st_mode))
670 return -EBADF;
671
672 r = path_extract_filename(path, &fn);
673 if (r < 0)
674 return r;
675
676 mfd = memfd_clone_fd(fd, fn, open_flags|O_CLOEXEC);
677 if (mfd < 0)
678 return mfd;
679
680 fd = safe_close(fd); /* Let's close the original early */
681
682 return loop_device_make_internal(NULL, mfd, open_flags, 0, 0, 0, loop_flags, lock_op, ret);
683 }
684
685 static LoopDevice* loop_device_free(LoopDevice *d) {
686 _cleanup_close_ int control = -1;
687 int r;
688
689 if (!d)
690 return NULL;
691
692 /* Release any lock we might have on the device first. We want to open+lock the /dev/loop-control
693 * device below, but our lock protocol says that if both control and block device locks are taken,
694 * the control lock needs to be taken first, the block device lock second — in order to avoid ABBA
695 * locking issues. Moreover, we want to issue LOOP_CLR_FD on the block device further down, and that
696 * would fail if we had another fd open to the device. */
697 d->lock_fd = safe_close(d->lock_fd);
698
699 /* Let's open the control device early, and lock it, so that we can release our block device and
700 * delete it in a synchronized fashion, and allocators won't needlessly see the block device as free
701 * while we are about to delete it. */
702 if (!LOOP_DEVICE_IS_FOREIGN(d) && !d->relinquished) {
703 control = open("/dev/loop-control", O_RDWR|O_CLOEXEC|O_NOCTTY|O_NONBLOCK);
704 if (control < 0)
705 log_debug_errno(errno, "Failed to open loop control device, cannot remove loop device '%s', ignoring: %m", strna(d->node));
706 else if (flock(control, LOCK_EX) < 0)
707 log_debug_errno(errno, "Failed to lock loop control device, ignoring: %m");
708 }
709
710 /* Then let's release the loopback block device */
711 if (d->fd >= 0) {
712 /* Implicitly sync the device, since otherwise in-flight blocks might not get written */
713 if (fsync(d->fd) < 0)
714 log_debug_errno(errno, "Failed to sync loop block device, ignoring: %m");
715
716 if (!LOOP_DEVICE_IS_FOREIGN(d) && !d->relinquished) {
717 /* We are supposed to clear the loopback device. Let's do this synchronously: lock
718 * the device, manually remove all partitions and then clear it. This should ensure
719 * udev doesn't concurrently access the devices, and we can be reasonably sure that
720 * once we are done here the device is cleared and all its partition children
721 * removed. Note that we lock our primary device fd here (and not a separate locking
722 * fd, as we do during allocation, since we want to keep the lock all the way through
723 * the LOOP_CLR_FD, but that call would fail if we had more than one fd open.) */
724
725 if (flock(d->fd, LOCK_EX) < 0)
726 log_debug_errno(errno, "Failed to lock loop block device, ignoring: %m");
727
728 r = block_device_remove_all_partitions(d->dev, d->fd);
729 if (r < 0)
730 log_debug_errno(r, "Failed to remove partitions of loopback block device, ignoring: %m");
731
732 if (ioctl(d->fd, LOOP_CLR_FD) < 0)
733 log_debug_errno(errno, "Failed to clear loop device, ignoring: %m");
734 }
735
736 safe_close(d->fd);
737 }
738
739 /* Now that the block device is released, let's also try to remove it */
740 if (control >= 0)
741 for (unsigned n_attempts = 0;;) {
742 if (ioctl(control, LOOP_CTL_REMOVE, d->nr) >= 0)
743 break;
744 if (errno != EBUSY || ++n_attempts >= 64) {
745 log_debug_errno(errno, "Failed to remove device %s: %m", strna(d->node));
746 break;
747 }
748 (void) usleep(50 * USEC_PER_MSEC);
749 }
750
751 free(d->node);
752 sd_device_unref(d->dev);
753 free(d->backing_file);
754 return mfree(d);
755 }
756
757 DEFINE_TRIVIAL_REF_UNREF_FUNC(LoopDevice, loop_device, loop_device_free);
758
759 void loop_device_relinquish(LoopDevice *d) {
760 assert(d);
761
762 /* Don't attempt to clean up the loop device anymore from this point on. Leave the clean-ing up to the kernel
763 * itself, using the loop device "auto-clear" logic we already turned on when creating the device. */
764
765 d->relinquished = true;
766 }
767
768 void loop_device_unrelinquish(LoopDevice *d) {
769 assert(d);
770 d->relinquished = false;
771 }
772
773 int loop_device_open(
774 sd_device *dev,
775 int open_flags,
776 int lock_op,
777 LoopDevice **ret) {
778
779 _cleanup_close_ int fd = -EBADF, lock_fd = -EBADF;
780 _cleanup_free_ char *node = NULL, *backing_file = NULL;
781 struct loop_info64 info;
782 uint64_t diskseq = 0;
783 LoopDevice *d;
784 const char *s;
785 dev_t devnum;
786 int r, nr = -1;
787
788 assert(dev);
789 assert(IN_SET(open_flags, O_RDWR, O_RDONLY));
790 assert(ret);
791
792 /* Even if fd is provided through the argument in loop_device_open_from_fd(), we reopen the inode
793 * here, instead of keeping just a dup() clone of it around, since we want to ensure that the
794 * O_DIRECT flag of the handle we keep is off, we have our own file index, and have the right
795 * read/write mode in effect. */
796 fd = sd_device_open(dev, O_CLOEXEC|O_NONBLOCK|O_NOCTTY|open_flags);
797 if (fd < 0)
798 return fd;
799
800 if ((lock_op & ~LOCK_NB) != LOCK_UN) {
801 lock_fd = open_lock_fd(fd, lock_op);
802 if (lock_fd < 0)
803 return lock_fd;
804 }
805
806 if (ioctl(fd, LOOP_GET_STATUS64, &info) >= 0) {
807 #if HAVE_VALGRIND_MEMCHECK_H
808 /* Valgrind currently doesn't know LOOP_GET_STATUS64. Remove this once it does */
809 VALGRIND_MAKE_MEM_DEFINED(&info, sizeof(info));
810 #endif
811 nr = info.lo_number;
812
813 if (sd_device_get_sysattr_value(dev, "loop/backing_file", &s) >= 0) {
814 backing_file = strdup(s);
815 if (!backing_file)
816 return -ENOMEM;
817 }
818 }
819
820 r = fd_get_diskseq(fd, &diskseq);
821 if (r < 0 && r != -EOPNOTSUPP)
822 return r;
823
824 r = sd_device_get_devnum(dev, &devnum);
825 if (r < 0)
826 return r;
827
828 r = sd_device_get_devname(dev, &s);
829 if (r < 0)
830 return r;
831
832 node = strdup(s);
833 if (!node)
834 return -ENOMEM;
835
836 d = new(LoopDevice, 1);
837 if (!d)
838 return -ENOMEM;
839
840 *d = (LoopDevice) {
841 .n_ref = 1,
842 .fd = TAKE_FD(fd),
843 .lock_fd = TAKE_FD(lock_fd),
844 .nr = nr,
845 .node = TAKE_PTR(node),
846 .dev = sd_device_ref(dev),
847 .backing_file = TAKE_PTR(backing_file),
848 .relinquished = true, /* It's not ours, don't try to destroy it when this object is freed */
849 .devno = devnum,
850 .diskseq = diskseq,
851 .uevent_seqnum_not_before = UINT64_MAX,
852 .timestamp_not_before = USEC_INFINITY,
853 };
854
855 *ret = d;
856 return 0;
857 }
858
859 int loop_device_open_from_fd(
860 int fd,
861 int open_flags,
862 int lock_op,
863 LoopDevice **ret) {
864
865 _cleanup_(sd_device_unrefp) sd_device *dev = NULL;
866 int r;
867
868 assert(fd >= 0);
869
870 r = block_device_new_from_fd(fd, 0, &dev);
871 if (r < 0)
872 return r;
873
874 return loop_device_open(dev, open_flags, lock_op, ret);
875 }
876
877 int loop_device_open_from_path(
878 const char *path,
879 int open_flags,
880 int lock_op,
881 LoopDevice **ret) {
882
883 _cleanup_(sd_device_unrefp) sd_device *dev = NULL;
884 int r;
885
886 assert(path);
887
888 r = block_device_new_from_path(path, 0, &dev);
889 if (r < 0)
890 return r;
891
892 return loop_device_open(dev, open_flags, lock_op, ret);
893 }
894
895 static int resize_partition(int partition_fd, uint64_t offset, uint64_t size) {
896 char sysfs[STRLEN("/sys/dev/block/:/partition") + 2*DECIMAL_STR_MAX(dev_t) + 1];
897 _cleanup_free_ char *buffer = NULL;
898 uint64_t current_offset, current_size, partno;
899 _cleanup_close_ int whole_fd = -EBADF;
900 struct stat st;
901 dev_t devno;
902 int r;
903
904 assert(partition_fd >= 0);
905
906 /* Resizes the partition the loopback device refer to (assuming it refers to one instead of an actual
907 * loopback device), and changes the offset, if needed. This is a fancy wrapper around
908 * BLKPG_RESIZE_PARTITION. */
909
910 if (fstat(partition_fd, &st) < 0)
911 return -errno;
912
913 assert(S_ISBLK(st.st_mode));
914
915 xsprintf(sysfs, "/sys/dev/block/" DEVNUM_FORMAT_STR "/partition", DEVNUM_FORMAT_VAL(st.st_rdev));
916 r = read_one_line_file(sysfs, &buffer);
917 if (r == -ENOENT) /* not a partition, cannot resize */
918 return -ENOTTY;
919 if (r < 0)
920 return r;
921 r = safe_atou64(buffer, &partno);
922 if (r < 0)
923 return r;
924
925 xsprintf(sysfs, "/sys/dev/block/" DEVNUM_FORMAT_STR "/start", DEVNUM_FORMAT_VAL(st.st_rdev));
926
927 buffer = mfree(buffer);
928 r = read_one_line_file(sysfs, &buffer);
929 if (r < 0)
930 return r;
931 r = safe_atou64(buffer, &current_offset);
932 if (r < 0)
933 return r;
934 if (current_offset > UINT64_MAX/512U)
935 return -EINVAL;
936 current_offset *= 512U;
937
938 if (ioctl(partition_fd, BLKGETSIZE64, &current_size) < 0)
939 return -EINVAL;
940
941 if (size == UINT64_MAX && offset == UINT64_MAX)
942 return 0;
943 if (current_size == size && current_offset == offset)
944 return 0;
945
946 xsprintf(sysfs, "/sys/dev/block/" DEVNUM_FORMAT_STR "/../dev", DEVNUM_FORMAT_VAL(st.st_rdev));
947
948 buffer = mfree(buffer);
949 r = read_one_line_file(sysfs, &buffer);
950 if (r < 0)
951 return r;
952 r = parse_devnum(buffer, &devno);
953 if (r < 0)
954 return r;
955
956 whole_fd = r = device_open_from_devnum(S_IFBLK, devno, O_RDWR|O_CLOEXEC|O_NONBLOCK|O_NOCTTY, NULL);
957 if (r < 0)
958 return r;
959
960 return block_device_resize_partition(
961 whole_fd,
962 partno,
963 offset == UINT64_MAX ? current_offset : offset,
964 size == UINT64_MAX ? current_size : size);
965 }
966
967 int loop_device_refresh_size(LoopDevice *d, uint64_t offset, uint64_t size) {
968 struct loop_info64 info;
969
970 assert(d);
971 assert(d->fd >= 0);
972
973 /* Changes the offset/start of the loop device relative to the beginning of the underlying file or
974 * block device. If this loop device actually refers to a partition and not a loopback device, we'll
975 * try to adjust the partition offsets instead.
976 *
977 * If either offset or size is UINT64_MAX we won't change that parameter. */
978
979 if (d->nr < 0) /* not a loopback device */
980 return resize_partition(d->fd, offset, size);
981
982 if (ioctl(d->fd, LOOP_GET_STATUS64, &info) < 0)
983 return -errno;
984
985 #if HAVE_VALGRIND_MEMCHECK_H
986 /* Valgrind currently doesn't know LOOP_GET_STATUS64. Remove this once it does */
987 VALGRIND_MAKE_MEM_DEFINED(&info, sizeof(info));
988 #endif
989
990 if (size == UINT64_MAX && offset == UINT64_MAX)
991 return 0;
992 if (info.lo_sizelimit == size && info.lo_offset == offset)
993 return 0;
994
995 if (size != UINT64_MAX)
996 info.lo_sizelimit = size;
997 if (offset != UINT64_MAX)
998 info.lo_offset = offset;
999
1000 return RET_NERRNO(ioctl(d->fd, LOOP_SET_STATUS64, &info));
1001 }
1002
1003 int loop_device_flock(LoopDevice *d, int operation) {
1004 assert(IN_SET(operation & ~LOCK_NB, LOCK_UN, LOCK_SH, LOCK_EX));
1005 assert(d);
1006
1007 /* When unlocking just close the lock fd */
1008 if ((operation & ~LOCK_NB) == LOCK_UN) {
1009 d->lock_fd = safe_close(d->lock_fd);
1010 return 0;
1011 }
1012
1013 /* If we had no lock fd so far, create one and lock it right-away */
1014 if (d->lock_fd < 0) {
1015 assert(d->fd >= 0);
1016
1017 d->lock_fd = open_lock_fd(d->fd, operation);
1018 if (d->lock_fd < 0)
1019 return d->lock_fd;
1020
1021 return 0;
1022 }
1023
1024 /* Otherwise change the current lock mode on the existing fd */
1025 return RET_NERRNO(flock(d->lock_fd, operation));
1026 }
1027
1028 int loop_device_sync(LoopDevice *d) {
1029 assert(d);
1030 assert(d->fd >= 0);
1031
1032 /* We also do this implicitly in loop_device_unref(). Doing this explicitly here has the benefit that
1033 * we can check the return value though. */
1034
1035 return RET_NERRNO(fsync(d->fd));
1036 }