]> git.ipfire.org Git - thirdparty/systemd.git/blame - src/shared/loop-util.c
loop-util: use the right error variable in log_debug_errno after fd_reopen
[thirdparty/systemd.git] / src / shared / loop-util.c
CommitLineData
db9ecf05 1/* SPDX-License-Identifier: LGPL-2.1-or-later */
8c1be37e 2
10c1b188
LP
3#if HAVE_VALGRIND_MEMCHECK_H
4#include <valgrind/memcheck.h>
5#endif
6
dccca82b 7#include <errno.h>
8c1be37e 8#include <fcntl.h>
f1443709
LP
9#include <linux/blkpg.h>
10#include <linux/fs.h>
8c1be37e 11#include <linux/loop.h>
441ec804 12#include <sys/file.h>
8c1be37e 13#include <sys/ioctl.h>
f2d9213f 14#include <unistd.h>
8c1be37e 15
021bf175
LP
16#include "sd-device.h"
17
8c1be37e 18#include "alloc-util.h"
86c1c1f3 19#include "blockdev-util.h"
fcd8a19d 20#include "data-fd-util.h"
021bf175 21#include "device-util.h"
7176f06c 22#include "devnum-util.h"
22ee78a8 23#include "dissect-image.h"
e8c7c4d9 24#include "env-util.h"
b0a94268 25#include "errno-util.h"
8c1be37e 26#include "fd-util.h"
972c8db5 27#include "fs-util.h"
f1443709 28#include "fileio.h"
8c1be37e 29#include "loop-util.h"
86c1c1f3 30#include "missing_loop.h"
f1443709 31#include "parse-util.h"
e77cab82 32#include "path-util.h"
b202ec20 33#include "random-util.h"
3cc44114 34#include "stat-util.h"
f1443709 35#include "stdio-util.h"
f2d9213f 36#include "string-util.h"
021bf175 37#include "tmpfile-util.h"
8c1be37e 38
e8af3bfd 39static void cleanup_clear_loop_close(int *fd) {
86c1c1f3
LP
40 if (*fd < 0)
41 return;
42
43 (void) ioctl(*fd, LOOP_CLR_FD);
44 (void) safe_close(*fd);
45}
46
021bf175
LP
47static int loop_is_bound(int fd) {
48 struct loop_info64 info;
49
8e398254 50 if (ioctl(ASSERT_FD(fd), LOOP_GET_STATUS64, &info) < 0) {
021bf175
LP
51 if (errno == ENXIO)
52 return false; /* not bound! */
53
54 return -errno;
55 }
56
57 return true; /* bound! */
58}
59
31c75fcc
LP
60static int get_current_uevent_seqnum(uint64_t *ret) {
61 _cleanup_free_ char *p = NULL;
62 int r;
63
64 r = read_full_virtual_file("/sys/kernel/uevent_seqnum", &p, NULL);
65 if (r < 0)
66 return log_debug_errno(r, "Failed to read current uevent sequence number: %m");
67
a145f8c0 68 r = safe_atou64(strstrip(p), ret);
31c75fcc
LP
69 if (r < 0)
70 return log_debug_errno(r, "Failed to parse current uevent sequence number: %s", p);
71
72 return 0;
73}
74
7f52206a 75static int open_lock_fd(int primary_fd, int operation) {
254d1313 76 _cleanup_close_ int lock_fd = -EBADF;
7f52206a 77
10719a6f 78 assert(IN_SET(operation & ~LOCK_NB, LOCK_SH, LOCK_EX));
7f52206a 79
8e398254 80 lock_fd = fd_reopen(ASSERT_FD(primary_fd), O_RDONLY|O_CLOEXEC|O_NONBLOCK|O_NOCTTY);
7f52206a
LP
81 if (lock_fd < 0)
82 return lock_fd;
10719a6f 83
7f52206a
LP
84 if (flock(lock_fd, operation) < 0)
85 return -errno;
86
10719a6f 87 return TAKE_FD(lock_fd);
7f52206a
LP
88}
89
54ba7daf 90static int loop_configure_verify_direct_io(int fd, const struct loop_config *c) {
ac110243 91 assert(fd >= 0);
54ba7daf
YW
92 assert(c);
93
94 if (FLAGS_SET(c->info.lo_flags, LO_FLAGS_DIRECT_IO)) {
95 struct loop_info64 info;
96
97 if (ioctl(fd, LOOP_GET_STATUS64, &info) < 0)
98 return log_debug_errno(errno, "Failed to issue LOOP_GET_STATUS64: %m");
99
100#if HAVE_VALGRIND_MEMCHECK_H
101 VALGRIND_MAKE_MEM_DEFINED(&info, sizeof(info));
102#endif
103
104 /* On older kernels (<= 5.3) it was necessary to set the block size of the loopback block
105 * device to the logical block size of the underlying file system. Since there was no nice
106 * way to query the value, we are not bothering to do this however. On newer kernels the
107 * block size is propagated automatically and does not require intervention from us. We'll
108 * check here if enabling direct IO worked, to make this easily debuggable however.
109 *
110 * (Should anyone really care and actually wants direct IO on old kernels: it might be worth
f5bb0a31
LB
111 * enabling direct IO with iteratively larger block sizes until it eventually works.)
112 *
113 * On older kernels (e.g.: 5.10) when this is attempted on a file stored on a dm-crypt
114 * backed partition the kernel will start returning I/O errors when accessing the mounted
115 * loop device, so return a recognizable error that causes the operation to be started
116 * from scratch without the LO_FLAGS_DIRECT_IO flag. */
54ba7daf 117 if (!FLAGS_SET(info.lo_flags, LO_FLAGS_DIRECT_IO))
f5bb0a31
LB
118 return log_debug_errno(
119 SYNTHETIC_ERRNO(ENOANO),
120 "Could not enable direct IO mode, retrying in buffered IO mode.");
54ba7daf
YW
121 }
122
123 return 0;
124}
125
126static int loop_configure_verify(int fd, const struct loop_config *c) {
127 bool broken = false;
128 int r;
129
130 assert(fd >= 0);
131 assert(c);
132
fd83c98e 133 if (c->block_size != 0) {
65046b92 134 uint32_t ssz;
fd83c98e 135
65046b92
LP
136 r = blockdev_get_sector_size(fd, &ssz);
137 if (r < 0)
138 return r;
fd83c98e 139
1163ddb3 140 if (ssz != c->block_size) {
65046b92 141 log_debug("LOOP_CONFIGURE didn't honour requested block size %" PRIu32 ", got %" PRIu32 " instead. Ignoring.", c->block_size, ssz);
1163ddb3
LP
142 broken = true;
143 }
fd83c98e
AD
144 }
145
54ba7daf
YW
146 if (c->info.lo_sizelimit != 0) {
147 /* Kernel 5.8 vanilla doesn't properly propagate the size limit into the
148 * block device. If it's used, let's immediately check if it had the desired
149 * effect hence. And if not use classic LOOP_SET_STATUS64. */
150 uint64_t z;
151
152 if (ioctl(fd, BLKGETSIZE64, &z) < 0)
153 return -errno;
154
155 if (z != c->info.lo_sizelimit) {
fd83c98e 156 log_debug("LOOP_CONFIGURE is broken, doesn't honour .info.lo_sizelimit. Falling back to LOOP_SET_STATUS64.");
54ba7daf
YW
157 broken = true;
158 }
159 }
160
161 if (FLAGS_SET(c->info.lo_flags, LO_FLAGS_PARTSCAN)) {
162 /* Kernel 5.8 vanilla doesn't properly propagate the partition scanning flag
163 * into the block device. Let's hence verify if things work correctly here
164 * before returning. */
165
166 r = blockdev_partscan_enabled(fd);
167 if (r < 0)
168 return r;
169 if (r == 0) {
170 log_debug("LOOP_CONFIGURE is broken, doesn't honour LO_FLAGS_PARTSCAN. Falling back to LOOP_SET_STATUS64.");
171 broken = true;
172 }
173 }
174
175 r = loop_configure_verify_direct_io(fd, c);
176 if (r < 0)
177 return r;
178
179 return !broken;
180}
181
182static int loop_configure_fallback(int fd, const struct loop_config *c) {
183 struct loop_info64 info_copy;
1163ddb3 184 int r;
54ba7daf
YW
185
186 assert(fd >= 0);
187 assert(c);
188
189 /* Only some of the flags LOOP_CONFIGURE can set are also settable via LOOP_SET_STATUS64, hence mask
190 * them out. */
191 info_copy = c->info;
192 info_copy.lo_flags &= LOOP_SET_STATUS_SETTABLE_FLAGS;
193
194 /* Since kernel commit 5db470e229e22b7eda6e23b5566e532c96fb5bc3 (kernel v5.0) the LOOP_SET_STATUS64
fd83c98e 195 * ioctl can return EAGAIN in case we change the info.lo_offset field, if someone else is accessing the
54ba7daf
YW
196 * block device while we try to reconfigure it. This is a pretty common case, since udev might
197 * instantly start probing the device as soon as we attach an fd to it. Hence handle it in two ways:
198 * first, let's take the BSD lock to ensure that udev will not step in between the point in
199 * time where we attach the fd and where we reconfigure the device. Secondly, let's wait 50ms on
200 * EAGAIN and retry. The former should be an efficient mechanism to avoid we have to wait 50ms
201 * needlessly if we are just racing against udev. The latter is protection against all other cases,
202 * i.e. peers that do not take the BSD lock. */
203
204 for (unsigned n_attempts = 0;;) {
205 if (ioctl(fd, LOOP_SET_STATUS64, &info_copy) >= 0)
206 break;
207
208 if (errno != EAGAIN || ++n_attempts >= 64)
209 return log_debug_errno(errno, "Failed to configure loopback block device: %m");
210
211 /* Sleep some random time, but at least 10ms, at most 250ms. Increase the delay the more
212 * failed attempts we see */
4251512e 213 (void) usleep_safe(UINT64_C(10) * USEC_PER_MSEC +
54ba7daf
YW
214 random_u64_range(UINT64_C(240) * USEC_PER_MSEC * n_attempts/64));
215 }
216
217 /* Work around a kernel bug, where changing offset/size of the loopback device doesn't correctly
218 * invalidate the buffer cache. For details see:
219 *
220 * https://android.googlesource.com/platform/system/apex/+/bef74542fbbb4cd629793f4efee8e0053b360570
221 *
222 * This was fixed in kernel 5.0, see:
223 *
224 * https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=5db470e229e22b7eda6e23b5566e532c96fb5bc3
225 *
226 * We'll run the work-around here in the legacy LOOP_SET_STATUS64 codepath. In the LOOP_CONFIGURE
227 * codepath above it should not be necessary. */
228 if (c->info.lo_offset != 0 || c->info.lo_sizelimit != 0)
229 if (ioctl(fd, BLKFLSBUF, 0) < 0)
230 log_debug_errno(errno, "Failed to issue BLKFLSBUF ioctl, ignoring: %m");
231
1163ddb3
LP
232 /* If a block size is requested then try to configure it. If that doesn't work, ignore errors, but
233 * afterwards, let's validate what is in effect, and if it doesn't match what we want, fail */
234 if (c->block_size != 0) {
235 uint32_t ssz;
236
237 if (ioctl(fd, LOOP_SET_BLOCK_SIZE, (unsigned long) c->block_size) < 0)
238 log_debug_errno(errno, "Failed to set sector size, ignoring: %m");
239
240 r = blockdev_get_sector_size(fd, &ssz);
241 if (r < 0)
242 return log_debug_errno(r, "Failed to read sector size: %m");
243 if (ssz != c->block_size)
244 return log_debug_errno(SYNTHETIC_ERRNO(EIO), "Sector size of loopback device doesn't match what we requested, refusing.");
245 }
246
54ba7daf
YW
247 /* LO_FLAGS_DIRECT_IO is a flags we need to configure via explicit ioctls. */
248 if (FLAGS_SET(c->info.lo_flags, LO_FLAGS_DIRECT_IO))
249 if (ioctl(fd, LOOP_SET_DIRECT_IO, 1UL) < 0)
250 log_debug_errno(errno, "Failed to enable direct IO mode, ignoring: %m");
251
252 return loop_configure_verify_direct_io(fd, c);
253}
254
95c50092 255static int loop_configure(
021bf175 256 int nr,
da4fd288
YW
257 int open_flags,
258 int lock_op,
95c50092 259 const struct loop_config *c,
da4fd288 260 LoopDevice **ret) {
95c50092 261
bb273a51
YW
262 static bool loop_configure_broken = false;
263
da4fd288 264 _cleanup_(sd_device_unrefp) sd_device *dev = NULL;
254d1313
ZJS
265 _cleanup_(cleanup_clear_loop_close) int loop_with_fd = -EBADF; /* This must be declared before lock_fd. */
266 _cleanup_close_ int fd = -EBADF, lock_fd = -EBADF;
da4fd288
YW
267 _cleanup_free_ char *node = NULL;
268 uint64_t diskseq = 0, seqnum = UINT64_MAX;
269 usec_t timestamp = USEC_INFINITY;
270 dev_t devno;
86c1c1f3
LP
271 int r;
272
021bf175 273 assert(nr >= 0);
86c1c1f3 274 assert(c);
da4fd288
YW
275 assert(ret);
276
277 if (asprintf(&node, "/dev/loop%i", nr) < 0)
432f1fa8 278 return log_oom_debug();
da4fd288
YW
279
280 r = sd_device_new_from_devname(&dev, node);
281 if (r < 0)
432f1fa8 282 return log_debug_errno(r, "Failed to create sd_device object for \"%s\": %m", node);
da4fd288
YW
283
284 r = sd_device_get_devnum(dev, &devno);
285 if (r < 0)
432f1fa8 286 return log_device_debug_errno(dev, r, "Failed to get devnum: %m");
da4fd288
YW
287
288 fd = sd_device_open(dev, O_CLOEXEC|O_NONBLOCK|O_NOCTTY|open_flags);
289 if (fd < 0)
432f1fa8 290 return log_device_debug_errno(dev, fd, "Failed to open device: %m");
95c50092 291
021bf175
LP
292 /* Let's lock the device before we do anything. We take the BSD lock on a second, separately opened
293 * fd for the device. udev after all watches for close() events (specifically IN_CLOSE_WRITE) on
294 * block devices to reprobe them, hence by having a separate fd we will later close() we can ensure
295 * we trigger udev after everything is done. If we'd lock our own fd instead and keep it open for a
296 * long time udev would possibly never run on it again, even though the fd is unlocked, simply
297 * because we never close() it. It also has the nice benefit we can use the _cleanup_close_ logic to
298 * automatically release the lock, after we are done. */
7f52206a 299 lock_fd = open_lock_fd(fd, LOCK_EX);
021bf175 300 if (lock_fd < 0)
432f1fa8
YW
301 return log_device_debug_errno(dev, lock_fd, "Failed to acquire lock: %m");
302
303 log_device_debug(dev, "Acquired exclusive lock.");
021bf175 304
53274fdb
YW
305 /* Let's see if backing file is really unattached. Someone may already attach a backing file without
306 * taking BSD lock. */
307 r = loop_is_bound(fd);
308 if (r < 0)
432f1fa8 309 return log_device_debug_errno(dev, r, "Failed to check if the loopback block device is bound: %m");
53274fdb 310 if (r > 0)
432f1fa8
YW
311 return log_device_debug_errno(dev, SYNTHETIC_ERRNO(EBUSY),
312 "The loopback block device is already bound, ignoring.");
53274fdb 313
021bf175
LP
314 /* Let's see if the device is really detached, i.e. currently has no associated partition block
315 * devices. On various kernels (such as 5.8) it is possible to have a loopback block device that
247738b4
LP
316 * superficially is detached but still has partition block devices associated for it. Let's then
317 * manually remove the partitions via BLKPG, and tell the caller we did that via EUCLEAN, so they try
318 * again. */
833106b8 319 r = block_device_remove_all_partitions(dev, fd);
021bf175 320 if (r < 0)
432f1fa8 321 return log_device_debug_errno(dev, r, "Failed to remove partitions on the loopback block device: %m");
833106b8
YW
322 if (r > 0)
323 /* Removed all partitions. Let's report this to the caller, to try again, and count this as
53274fdb 324 * an attempt. */
432f1fa8
YW
325 return log_device_debug_errno(dev, SYNTHETIC_ERRNO(EUCLEAN),
326 "Removed partitions on the loopback block device.");
021bf175 327
bb273a51 328 if (!loop_configure_broken) {
31c75fcc
LP
329 /* Acquire uevent seqnum immediately before attaching the loopback device. This allows
330 * callers to ignore all uevents with a seqnum before this one, if they need to associate
331 * uevent with this attachment. Doing so isn't race-free though, as uevents that happen in
332 * the window between this reading of the seqnum, and the LOOP_CONFIGURE call might still be
333 * mistaken as originating from our attachment, even though might be caused by an earlier
334 * use. But doing this at least shortens the race window a bit. */
335 r = get_current_uevent_seqnum(&seqnum);
336 if (r < 0)
432f1fa8 337 return log_device_debug_errno(dev, r, "Failed to get the current uevent seqnum: %m");
54ba7daf 338
8ede1e86 339 timestamp = now(CLOCK_MONOTONIC);
31c75fcc 340
95c50092
LP
341 if (ioctl(fd, LOOP_CONFIGURE, c) < 0) {
342 /* Do fallback only if LOOP_CONFIGURE is not supported, propagate all other
343 * errors. Note that the kernel is weird: non-existing ioctls currently return EINVAL
344 * rather than ENOTTY on loopback block devices. They should fix that in the kernel,
345 * but in the meantime we accept both here. */
346 if (!ERRNO_IS_NOT_SUPPORTED(errno) && errno != EINVAL)
432f1fa8 347 return log_device_debug_errno(dev, errno, "ioctl(LOOP_CONFIGURE) failed: %m");
86c1c1f3 348
bb273a51 349 loop_configure_broken = true;
95c50092 350 } else {
da4fd288
YW
351 loop_with_fd = TAKE_FD(fd);
352
353 r = loop_configure_verify(loop_with_fd, c);
54ba7daf 354 if (r < 0)
432f1fa8 355 return log_device_debug_errno(dev, r, "Failed to verify if loopback block device is correctly configured: %m");
54ba7daf 356 if (r == 0) {
95c50092 357 /* LOOP_CONFIGURE doesn't work. Remember that. */
bb273a51 358 loop_configure_broken = true;
95c50092
LP
359
360 /* We return EBUSY here instead of retrying immediately with LOOP_SET_FD,
361 * because LOOP_CLR_FD is async: if the operation cannot be executed right
362 * away it just sets the autoclear flag on the device. This means there's a
363 * good chance we cannot actually reuse the loopback device right-away. Hence
364 * let's assume it's busy, avoid the trouble and let the calling loop call us
365 * again with a new, likely unused device. */
da4fd288 366 return -EBUSY;
bb2551bd 367 }
95c50092 368 }
86c1c1f3
LP
369 }
370
bb273a51
YW
371 if (loop_configure_broken) {
372 /* Let's read the seqnum again, to shorten the window. */
373 r = get_current_uevent_seqnum(&seqnum);
374 if (r < 0)
432f1fa8 375 return log_device_debug_errno(dev, r, "Failed to get the current uevent seqnum: %m");
31c75fcc 376
bb273a51 377 timestamp = now(CLOCK_MONOTONIC);
738f29cb 378
bb273a51 379 if (ioctl(fd, LOOP_SET_FD, c->fd) < 0)
432f1fa8 380 return log_device_debug_errno(dev, errno, "ioctl(LOOP_SET_FD) failed: %m");
86c1c1f3 381
da4fd288
YW
382 loop_with_fd = TAKE_FD(fd);
383
384 r = loop_configure_fallback(loop_with_fd, c);
bb273a51 385 if (r < 0)
da4fd288 386 return r;
bb273a51 387 }
e8c7c4d9 388
da4fd288
YW
389 r = fd_get_diskseq(loop_with_fd, &diskseq);
390 if (r < 0 && r != -EOPNOTSUPP)
432f1fa8 391 return log_device_debug_errno(dev, r, "Failed to get diskseq: %m");
31c75fcc 392
da4fd288
YW
393 switch (lock_op & ~LOCK_NB) {
394 case LOCK_EX: /* Already in effect */
395 break;
396 case LOCK_SH: /* Downgrade */
397 if (flock(lock_fd, lock_op) < 0)
432f1fa8 398 return log_device_debug_errno(dev, errno, "Failed to downgrade lock level: %m");
da4fd288
YW
399 break;
400 case LOCK_UN: /* Release */
401 lock_fd = safe_close(lock_fd);
402 break;
403 default:
404 assert_not_reached();
405 }
406
407 LoopDevice *d = new(LoopDevice, 1);
408 if (!d)
432f1fa8 409 return log_oom_debug();
da4fd288
YW
410
411 *d = (LoopDevice) {
36d5eb0b 412 .n_ref = 1,
da4fd288
YW
413 .fd = TAKE_FD(loop_with_fd),
414 .lock_fd = TAKE_FD(lock_fd),
415 .node = TAKE_PTR(node),
416 .nr = nr,
417 .devno = devno,
418 .dev = TAKE_PTR(dev),
419 .diskseq = diskseq,
420 .uevent_seqnum_not_before = seqnum,
421 .timestamp_not_before = timestamp,
22ee78a8 422 .sector_size = c->block_size,
da4fd288 423 };
86c1c1f3 424
da4fd288
YW
425 *ret = TAKE_PTR(d);
426 return 0;
e8af3bfd
ZJS
427}
428
e8c7c4d9 429static int loop_device_make_internal(
e77cab82 430 const char *path,
ed9eeb7b
LP
431 int fd,
432 int open_flags,
433 uint64_t offset,
434 uint64_t size,
22ee78a8 435 uint32_t sector_size,
ed9eeb7b 436 uint32_t loop_flags,
7f52206a 437 int lock_op,
ed9eeb7b 438 LoopDevice **ret) {
8c1be37e 439
da4fd288 440 _cleanup_(loop_device_unrefp) LoopDevice *d = NULL;
f5bb0a31 441 _cleanup_close_ int reopened_fd = -EBADF, control = -EBADF;
da4fd288 442 _cleanup_free_ char *backing_file = NULL;
86c1c1f3 443 struct loop_config config;
da4fd288 444 int r, f_flags;
8c1be37e 445 struct stat st;
8c1be37e 446
8c1be37e
LP
447 assert(ret);
448 assert(IN_SET(open_flags, O_RDWR, O_RDONLY));
449
8e398254 450 if (fstat(ASSERT_FD(fd), &st) < 0)
8c1be37e
LP
451 return -errno;
452
453 if (S_ISBLK(st.st_mode)) {
1996ad28 454 if (offset == 0 && IN_SET(size, 0, UINT64_MAX))
d7654742
LP
455 /* If this is already a block device and we are supposed to cover the whole of it
456 * then store an fd to the original open device node — and do not actually create an
1996ad28 457 * unnecessary loopback device for it. */
de3b7f16 458 return loop_device_open_from_fd(fd, open_flags, lock_op, ret);
ed9eeb7b
LP
459 } else {
460 r = stat_verify_regular(&st);
461 if (r < 0)
462 return r;
8c1be37e
LP
463 }
464
e77cab82
YW
465 if (path) {
466 r = path_make_absolute_cwd(path, &backing_file);
467 if (r < 0)
468 return r;
469
470 path_simplify(backing_file);
471 } else {
472 r = fd_get_path(fd, &backing_file);
473 if (r < 0)
474 return r;
475 }
476
e8c7c4d9
LP
477 f_flags = fcntl(fd, F_GETFL);
478 if (f_flags < 0)
479 return -errno;
480
481 if (FLAGS_SET(loop_flags, LO_FLAGS_DIRECT_IO) != FLAGS_SET(f_flags, O_DIRECT)) {
482 /* If LO_FLAGS_DIRECT_IO is requested, then make sure we have the fd open with O_DIRECT, as
483 * that's required. Conversely, if it's off require that O_DIRECT is off too (that's because
484 * new kernels will implicitly enable LO_FLAGS_DIRECT_IO if O_DIRECT is set).
485 *
486 * Our intention here is that LO_FLAGS_DIRECT_IO is the primary knob, and O_DIRECT derived
487 * from that automatically. */
488
f5bb0a31
LB
489 reopened_fd = fd_reopen(fd, (FLAGS_SET(loop_flags, LO_FLAGS_DIRECT_IO) ? O_DIRECT : 0)|O_CLOEXEC|O_NONBLOCK|open_flags);
490 if (reopened_fd < 0) {
e8c7c4d9 491 if (!FLAGS_SET(loop_flags, LO_FLAGS_DIRECT_IO))
d579c42e 492 return log_debug_errno(reopened_fd, "Failed to reopen file descriptor without O_DIRECT: %m");
e8c7c4d9
LP
493
494 /* Some file systems might not support O_DIRECT, let's gracefully continue without it then. */
d579c42e 495 log_debug_errno(reopened_fd, "Failed to enable O_DIRECT for backing file descriptor for loopback device. Continuing without.");
e8c7c4d9
LP
496 loop_flags &= ~LO_FLAGS_DIRECT_IO;
497 } else
f5bb0a31 498 fd = reopened_fd; /* From now on, operate on our new O_DIRECT fd */
e8c7c4d9
LP
499 }
500
8c1be37e
LP
501 control = open("/dev/loop-control", O_RDWR|O_CLOEXEC|O_NOCTTY|O_NONBLOCK);
502 if (control < 0)
503 return -errno;
504
22ee78a8
LP
505 if (sector_size == 0)
506 /* If no sector size is specified, default to the classic default */
507 sector_size = 512;
508 else if (sector_size == UINT32_MAX) {
509
510 if (S_ISBLK(st.st_mode))
511 /* If the sector size is specified as UINT32_MAX we'll propagate the sector size of
512 * the underlying block device. */
513 r = blockdev_get_sector_size(fd, &sector_size);
514 else {
92651a7a 515 _cleanup_close_ int non_direct_io_fd = -EBADF;
22ee78a8
LP
516 int probe_fd;
517
518 assert(S_ISREG(st.st_mode));
519
520 /* If sector size is specified as UINT32_MAX, we'll try to probe the right sector
521 * size of the image in question by looking for the GPT partition header at various
522 * offsets. This of course only works if the image already has a disk label.
523 *
524 * So here we actually want to read the file contents ourselves. This is quite likely
525 * not going to work if we managed to enable O_DIRECT, because in such a case there
526 * are some pretty strict alignment requirements to offset, size and target, but
527 * there's no way to query what alignment specifically is actually required. Hence,
528 * let's avoid the mess, and temporarily open an fd without O_DIRECT for the probing
529 * logic. */
530
531 if (FLAGS_SET(loop_flags, LO_FLAGS_DIRECT_IO)) {
532 non_direct_io_fd = fd_reopen(fd, O_RDONLY|O_CLOEXEC|O_NONBLOCK);
533 if (non_direct_io_fd < 0)
534 return non_direct_io_fd;
535
536 probe_fd = non_direct_io_fd;
537 } else
538 probe_fd = fd;
539
540 r = probe_sector_size(probe_fd, &sector_size);
541 }
542 if (r < 0)
543 return r;
544 }
545
86c1c1f3
LP
546 config = (struct loop_config) {
547 .fd = fd,
22ee78a8 548 .block_size = sector_size,
86c1c1f3
LP
549 .info = {
550 /* Use the specified flags, but configure the read-only flag from the open flags, and force autoclear */
0950526a 551 .lo_flags = (loop_flags & ~LO_FLAGS_READ_ONLY) | ((open_flags & O_ACCMODE) == O_RDONLY ? LO_FLAGS_READ_ONLY : 0) | LO_FLAGS_AUTOCLEAR,
86c1c1f3
LP
552 .lo_offset = offset,
553 .lo_sizelimit = size == UINT64_MAX ? 0 : size,
554 },
555 };
556
0f6519d4
LP
557 /* Loop around LOOP_CTL_GET_FREE, since at the moment we attempt to open the returned device it might
558 * be gone already, taken by somebody else racing against us. */
e8af3bfd 559 for (unsigned n_attempts = 0;;) {
432f1fa8 560 usec_t usec;
da4fd288 561 int nr;
e8af3bfd 562
cc530466
LP
563 /* Let's take a lock on the control device first. On a busy system, where many programs
564 * attempt to allocate a loopback device at the same time, we might otherwise keep looping
565 * around relatively heavy operations: asking for a free loopback device, then opening it,
566 * validating it, attaching something to it. Let's serialize this whole operation, to make
567 * unnecessary busywork less likely. Note that this is just something we do to optimize our
568 * own code (and whoever else decides to use LOCK_EX locks for this), taking this lock is not
569 * necessary, it just means it's less likely we have to iterate through this loop again and
4c1d50e6
LP
570 * again if our own code races against our own code.
571 *
572 * Note: our lock protocol is to take the /dev/loop-control lock first, and the block device
573 * lock second, if both are taken, and always in this order, to avoid ABBA locking issues. */
cc530466
LP
574 if (flock(control, LOCK_EX) < 0)
575 return -errno;
576
0f6519d4
LP
577 nr = ioctl(control, LOOP_CTL_GET_FREE);
578 if (nr < 0)
579 return -errno;
8c1be37e 580
da4fd288
YW
581 r = loop_configure(nr, open_flags, lock_op, &config, &d);
582 if (r >= 0)
583 break;
cc5bae6c 584
da4fd288
YW
585 /* -ENODEV or friends: Somebody might've gotten the same number from the kernel, used the
586 * device, and called LOOP_CTL_REMOVE on it. Let's retry with a new number.
587 * -EBUSY: a file descriptor is already bound to the loopback block device.
f5bb0a31
LB
588 * -EUCLEAN: some left-over partition devices that were cleaned up.
589 * -ENOANO: we tried to use LO_FLAGS_DIRECT_IO but the kernel rejected it. */
590 if (!ERRNO_IS_DEVICE_ABSENT(r) && !IN_SET(r, -EBUSY, -EUCLEAN, -ENOANO))
2421dd72 591 return r;
01813148 592
cc530466
LP
593 /* OK, this didn't work, let's try again a bit later, but first release the lock on the
594 * control device */
595 if (flock(control, LOCK_UN) < 0)
596 return -errno;
597
e8af3bfd
ZJS
598 if (++n_attempts >= 64) /* Give up eventually */
599 return -EBUSY;
0f6519d4 600
f5bb0a31
LB
601 /* If we failed to enable direct IO mode, let's retry without it. We restart the process as
602 * on some combination of kernel version and storage filesystem, the kernel is very unhappy
603 * about a failed DIRECT_IO enablement and throws I/O errors. */
604 if (r == -ENOANO && FLAGS_SET(config.info.lo_flags, LO_FLAGS_DIRECT_IO)) {
605 config.info.lo_flags &= ~LO_FLAGS_DIRECT_IO;
606 open_flags &= ~O_DIRECT;
607
608 int non_direct_io_fd = fd_reopen(config.fd, O_CLOEXEC|O_NONBLOCK|open_flags);
609 if (non_direct_io_fd < 0)
610 return log_debug_errno(
611 non_direct_io_fd,
612 "Failed to reopen file descriptor without O_DIRECT: %m");
613
614 safe_close(reopened_fd);
615 fd = config.fd = /* For cleanups */ reopened_fd = non_direct_io_fd;
616 }
617
b202ec20
LP
618 /* Wait some random time, to make collision less likely. Let's pick a random time in the
619 * range 0ms…250ms, linearly scaled by the number of failed attempts. */
432f1fa8
YW
620 usec = random_u64_range(UINT64_C(10) * USEC_PER_MSEC +
621 UINT64_C(240) * USEC_PER_MSEC * n_attempts/64);
622 log_debug("Trying again after %s.", FORMAT_TIMESPAN(usec, USEC_PER_MSEC));
4251512e 623 (void) usleep_safe(usec);
0f6519d4 624 }
8c1be37e 625
da4fd288 626 d->backing_file = TAKE_PTR(backing_file);
4d2a9e3e
LP
627 d->backing_inode = st.st_ino;
628 d->backing_devno = st.st_dev;
8c1be37e 629
3b195f63
LP
630 log_debug("Successfully acquired %s, devno=%u:%u, nr=%i, diskseq=%" PRIu64,
631 d->node,
632 major(d->devno), minor(d->devno),
633 d->nr,
634 d->diskseq);
635
da4fd288
YW
636 *ret = TAKE_PTR(d);
637 return 0;
8c1be37e
LP
638}
639
e8c7c4d9
LP
640static uint32_t loop_flags_mangle(uint32_t loop_flags) {
641 int r;
642
643 r = getenv_bool("SYSTEMD_LOOP_DIRECT_IO");
644 if (r < 0 && r != -ENXIO)
645 log_debug_errno(r, "Failed to parse $SYSTEMD_LOOP_DIRECT_IO, ignoring: %m");
646
bfd08445 647 return UPDATE_FLAG(loop_flags, LO_FLAGS_DIRECT_IO, r != 0); /* Turn on LO_FLAGS_DIRECT_IO by default, unless explicitly configured to off. */
e8c7c4d9
LP
648}
649
650int loop_device_make(
651 int fd,
652 int open_flags,
653 uint64_t offset,
654 uint64_t size,
22ee78a8 655 uint32_t sector_size,
e8c7c4d9 656 uint32_t loop_flags,
7f52206a 657 int lock_op,
e8c7c4d9
LP
658 LoopDevice **ret) {
659
660 assert(fd >= 0);
661 assert(ret);
e8c7c4d9
LP
662
663 return loop_device_make_internal(
e77cab82 664 NULL,
e8c7c4d9
LP
665 fd,
666 open_flags,
667 offset,
668 size,
22ee78a8 669 sector_size,
bfd08445 670 loop_flags_mangle(loop_flags),
7f52206a 671 lock_op,
e8c7c4d9
LP
672 ret);
673}
674
972c8db5
DDM
675int loop_device_make_by_path_at(
676 int dir_fd,
79e8393a
LP
677 const char *path,
678 int open_flags,
22ee78a8 679 uint32_t sector_size,
79e8393a 680 uint32_t loop_flags,
7f52206a 681 int lock_op,
79e8393a
LP
682 LoopDevice **ret) {
683
e8c7c4d9 684 int r, basic_flags, direct_flags, rdwr_flags;
254d1313 685 _cleanup_close_ int fd = -EBADF;
aa4d3aa3 686 bool direct = false;
8c1be37e 687
972c8db5 688 assert(dir_fd >= 0 || dir_fd == AT_FDCWD);
8c1be37e
LP
689 assert(path);
690 assert(ret);
b0a94268 691 assert(open_flags < 0 || IN_SET(open_flags, O_RDWR, O_RDONLY));
8c1be37e 692
b0a94268
LP
693 /* Passing < 0 as open_flags here means we'll try to open the device writable if we can, retrying
694 * read-only if we cannot. */
695
e8c7c4d9
LP
696 loop_flags = loop_flags_mangle(loop_flags);
697
698 /* Let's open with O_DIRECT if we can. But not all file systems support that, hence fall back to
699 * non-O_DIRECT mode automatically, if it fails. */
700
701 basic_flags = O_CLOEXEC|O_NONBLOCK|O_NOCTTY;
702 direct_flags = FLAGS_SET(loop_flags, LO_FLAGS_DIRECT_IO) ? O_DIRECT : 0;
703 rdwr_flags = open_flags >= 0 ? open_flags : O_RDWR;
704
420d2e31 705 fd = xopenat(dir_fd, path, basic_flags|direct_flags|rdwr_flags, /* xopen_flags = */ 0, /* mode = */ 0);
e8c7c4d9 706 if (fd < 0 && direct_flags != 0) /* If we had O_DIRECT on, and things failed with that, let's immediately try again without */
420d2e31 707 fd = xopenat(dir_fd, path, basic_flags|rdwr_flags, /* xopen_flags = */ 0, /* mode = */ 0);
aa4d3aa3
LP
708 else
709 direct = direct_flags != 0;
b0a94268
LP
710 if (fd < 0) {
711 r = -errno;
712
713 /* Retry read-only? */
714 if (open_flags >= 0 || !(ERRNO_IS_PRIVILEGE(r) || r == -EROFS))
715 return r;
716
420d2e31 717 fd = xopenat(dir_fd, path, basic_flags|direct_flags|O_RDONLY, /* xopen_flags = */ 0, /* mode = */ 0);
e8c7c4d9 718 if (fd < 0 && direct_flags != 0) /* as above */
420d2e31 719 fd = xopenat(dir_fd, path, basic_flags|O_RDONLY, /* xopen_flags = */ 0, /* mode = */ 0);
aa4d3aa3
LP
720 else
721 direct = direct_flags != 0;
b0a94268
LP
722 if (fd < 0)
723 return r; /* Propagate original error */
724
725 open_flags = O_RDONLY;
726 } else if (open_flags < 0)
727 open_flags = O_RDWR;
8c1be37e 728
aa4d3aa3
LP
729 log_debug("Opened '%s' in %s access mode%s, with O_DIRECT %s%s.",
730 path,
731 open_flags == O_RDWR ? "O_RDWR" : "O_RDONLY",
732 open_flags != rdwr_flags ? " (O_RDWR was requested but not allowed)" : "",
733 direct ? "enabled" : "disabled",
734 direct != (direct_flags != 0) ? " (O_DIRECT was requested but not supported)" : "");
735
972c8db5
DDM
736 return loop_device_make_internal(
737 dir_fd == AT_FDCWD ? path : NULL,
738 fd,
739 open_flags,
740 /* offset = */ 0,
741 /* size = */ 0,
742 sector_size,
743 loop_flags,
744 lock_op,
745 ret);
8c1be37e
LP
746}
747
fcd8a19d
LP
748int loop_device_make_by_path_memory(
749 const char *path,
750 int open_flags,
22ee78a8 751 uint32_t sector_size,
fcd8a19d
LP
752 uint32_t loop_flags,
753 int lock_op,
754 LoopDevice **ret) {
755
756 _cleanup_close_ int fd = -EBADF, mfd = -EBADF;
757 _cleanup_free_ char *fn = NULL;
758 struct stat st;
759 int r;
760
761 assert(path);
762 assert(IN_SET(open_flags, O_RDWR, O_RDONLY));
763 assert(ret);
764
765 loop_flags &= ~LO_FLAGS_DIRECT_IO; /* memfds don't support O_DIRECT, hence LO_FLAGS_DIRECT_IO can't be used either */
766
767 fd = open(path, O_CLOEXEC|O_NONBLOCK|O_NOCTTY|O_RDONLY);
768 if (fd < 0)
769 return -errno;
770
771 if (fstat(fd, &st) < 0)
772 return -errno;
773
774 if (!S_ISREG(st.st_mode) && !S_ISBLK(st.st_mode))
775 return -EBADF;
776
777 r = path_extract_filename(path, &fn);
778 if (r < 0)
779 return r;
780
781 mfd = memfd_clone_fd(fd, fn, open_flags|O_CLOEXEC);
782 if (mfd < 0)
783 return mfd;
784
785 fd = safe_close(fd); /* Let's close the original early */
786
22ee78a8 787 return loop_device_make_internal(NULL, mfd, open_flags, 0, 0, sector_size, loop_flags, lock_op, ret);
fcd8a19d
LP
788}
789
36d5eb0b 790static LoopDevice* loop_device_free(LoopDevice *d) {
5bb1d7fb 791 _cleanup_close_ int control = -EBADF;
3a6ed1e1
LP
792 int r;
793
8c1be37e
LP
794 if (!d)
795 return NULL;
796
4c1d50e6
LP
797 /* Release any lock we might have on the device first. We want to open+lock the /dev/loop-control
798 * device below, but our lock protocol says that if both control and block device locks are taken,
799 * the control lock needs to be taken first, the block device lock second — in order to avoid ABBA
800 * locking issues. Moreover, we want to issue LOOP_CLR_FD on the block device further down, and that
801 * would fail if we had another fd open to the device. */
7f52206a
LP
802 d->lock_fd = safe_close(d->lock_fd);
803
4c1d50e6
LP
804 /* Let's open the control device early, and lock it, so that we can release our block device and
805 * delete it in a synchronized fashion, and allocators won't needlessly see the block device as free
806 * while we are about to delete it. */
7cb349f0 807 if (!LOOP_DEVICE_IS_FOREIGN(d) && !d->relinquished) {
4c1d50e6
LP
808 control = open("/dev/loop-control", O_RDWR|O_CLOEXEC|O_NOCTTY|O_NONBLOCK);
809 if (control < 0)
810 log_debug_errno(errno, "Failed to open loop control device, cannot remove loop device '%s', ignoring: %m", strna(d->node));
811 else if (flock(control, LOCK_EX) < 0)
812 log_debug_errno(errno, "Failed to lock loop control device, ignoring: %m");
813 }
814
815 /* Then let's release the loopback block device */
8c1be37e 816 if (d->fd >= 0) {
cae1e8fb
LP
817 /* Implicitly sync the device, since otherwise in-flight blocks might not get written */
818 if (fsync(d->fd) < 0)
819 log_debug_errno(errno, "Failed to sync loop block device, ignoring: %m");
820
7cb349f0 821 if (!LOOP_DEVICE_IS_FOREIGN(d) && !d->relinquished) {
3a6ed1e1
LP
822 /* We are supposed to clear the loopback device. Let's do this synchronously: lock
823 * the device, manually remove all partitions and then clear it. This should ensure
824 * udev doesn't concurrently access the devices, and we can be reasonably sure that
825 * once we are done here the device is cleared and all its partition children
826 * removed. Note that we lock our primary device fd here (and not a separate locking
827 * fd, as we do during allocation, since we want to keep the lock all the way through
828 * the LOOP_CLR_FD, but that call would fail if we had more than one fd open.) */
8c1be37e 829
3a6ed1e1
LP
830 if (flock(d->fd, LOCK_EX) < 0)
831 log_debug_errno(errno, "Failed to lock loop block device, ignoring: %m");
832
46c3a288 833 r = block_device_remove_all_partitions(d->dev, d->fd);
3a6ed1e1
LP
834 if (r < 0)
835 log_debug_errno(r, "Failed to remove partitions of loopback block device, ignoring: %m");
836
837 if (ioctl(d->fd, LOOP_CLR_FD) < 0)
838 log_debug_errno(errno, "Failed to clear loop device, ignoring: %m");
8c1be37e
LP
839 }
840
841 safe_close(d->fd);
842 }
843
4c1d50e6 844 /* Now that the block device is released, let's also try to remove it */
afbe20b7 845 if (control >= 0) {
6483bcef
ZJS
846 useconds_t delay = 5 * USEC_PER_MSEC; /* A total delay of 5090 ms between 39 attempts,
847 * (4*5 + 5*10 + 5*20 + … + 3*640) = 5090. */
afbe20b7
ZJS
848
849 for (unsigned attempt = 1;; attempt++) {
4c1d50e6
LP
850 if (ioctl(control, LOOP_CTL_REMOVE, d->nr) >= 0)
851 break;
afbe20b7 852 if (errno != EBUSY || attempt > 38) {
4c1d50e6
LP
853 log_debug_errno(errno, "Failed to remove device %s: %m", strna(d->node));
854 break;
f2d9213f 855 }
afbe20b7
ZJS
856 if (attempt % 5 == 0) {
857 log_debug("Device is still busy after %u attempts…", attempt);
858 delay *= 2;
859 }
860
4251512e 861 (void) usleep_safe(delay);
4c1d50e6 862 }
afbe20b7 863 }
8c1be37e
LP
864
865 free(d->node);
cc5bae6c 866 sd_device_unref(d->dev);
e77cab82 867 free(d->backing_file);
5fecf46d 868 return mfree(d);
8c1be37e 869}
a2ea3b2f 870
36d5eb0b
YW
871DEFINE_TRIVIAL_REF_UNREF_FUNC(LoopDevice, loop_device, loop_device_free);
872
a2ea3b2f
LP
873void loop_device_relinquish(LoopDevice *d) {
874 assert(d);
875
876 /* Don't attempt to clean up the loop device anymore from this point on. Leave the clean-ing up to the kernel
877 * itself, using the loop device "auto-clear" logic we already turned on when creating the device. */
878
879 d->relinquished = true;
880}
9dabc4fd 881
24d59aee
DDM
882void loop_device_unrelinquish(LoopDevice *d) {
883 assert(d);
884 d->relinquished = false;
885}
886
4f0ad43e
YW
887int loop_device_open(
888 sd_device *dev,
7f52206a
LP
889 int open_flags,
890 int lock_op,
891 LoopDevice **ret) {
892
254d1313 893 _cleanup_close_ int fd = -EBADF, lock_fd = -EBADF;
4f0ad43e 894 _cleanup_free_ char *node = NULL, *backing_file = NULL;
4d2a9e3e 895 dev_t devnum, backing_devno = 0;
b26c39ad 896 struct loop_info64 info;
4d2a9e3e 897 ino_t backing_inode = 0;
ffcb3324 898 uint64_t diskseq = 0;
9dabc4fd 899 LoopDevice *d;
4f0ad43e 900 const char *s;
a8d8a619 901 int r, nr = -1;
9dabc4fd 902
4f0ad43e 903 assert(dev);
e8c7c4d9 904 assert(IN_SET(open_flags, O_RDWR, O_RDONLY));
9dabc4fd
LP
905 assert(ret);
906
4f0ad43e
YW
907 /* Even if fd is provided through the argument in loop_device_open_from_fd(), we reopen the inode
908 * here, instead of keeping just a dup() clone of it around, since we want to ensure that the
909 * O_DIRECT flag of the handle we keep is off, we have our own file index, and have the right
910 * read/write mode in effect. */
911 fd = sd_device_open(dev, O_CLOEXEC|O_NONBLOCK|O_NOCTTY|open_flags);
912 if (fd < 0)
913 return fd;
cc5bae6c 914
4f0ad43e
YW
915 if ((lock_op & ~LOCK_NB) != LOCK_UN) {
916 lock_fd = open_lock_fd(fd, lock_op);
917 if (lock_fd < 0)
918 return lock_fd;
a8d8a619
YW
919 }
920
4f0ad43e 921 if (ioctl(fd, LOOP_GET_STATUS64, &info) >= 0) {
10c1b188
LP
922#if HAVE_VALGRIND_MEMCHECK_H
923 /* Valgrind currently doesn't know LOOP_GET_STATUS64. Remove this once it does */
924 VALGRIND_MAKE_MEM_DEFINED(&info, sizeof(info));
925#endif
b26c39ad 926 nr = info.lo_number;
e77cab82
YW
927
928 if (sd_device_get_sysattr_value(dev, "loop/backing_file", &s) >= 0) {
929 backing_file = strdup(s);
930 if (!backing_file)
931 return -ENOMEM;
932 }
4d2a9e3e
LP
933
934 backing_devno = info.lo_device;
935 backing_inode = info.lo_inode;
a8d8a619 936 }
b26c39ad 937
4f0ad43e 938 r = fd_get_diskseq(fd, &diskseq);
ffcb3324
YW
939 if (r < 0 && r != -EOPNOTSUPP)
940 return r;
941
22ee78a8
LP
942 uint32_t sector_size;
943 r = blockdev_get_sector_size(fd, &sector_size);
944 if (r < 0)
945 return r;
946
4f0ad43e
YW
947 r = sd_device_get_devnum(dev, &devnum);
948 if (r < 0)
949 return r;
7f52206a 950
4f0ad43e 951 r = sd_device_get_devname(dev, &s);
cc5bae6c
YW
952 if (r < 0)
953 return r;
954
4f0ad43e
YW
955 node = strdup(s);
956 if (!node)
cc5bae6c 957 return -ENOMEM;
9dabc4fd
LP
958
959 d = new(LoopDevice, 1);
960 if (!d)
961 return -ENOMEM;
962
963 *d = (LoopDevice) {
36d5eb0b 964 .n_ref = 1,
a8d8a619 965 .fd = TAKE_FD(fd),
7f52206a 966 .lock_fd = TAKE_FD(lock_fd),
b26c39ad 967 .nr = nr,
4f0ad43e
YW
968 .node = TAKE_PTR(node),
969 .dev = sd_device_ref(dev),
e77cab82 970 .backing_file = TAKE_PTR(backing_file),
4d2a9e3e
LP
971 .backing_inode = backing_inode,
972 .backing_devno = backing_devno,
9dabc4fd 973 .relinquished = true, /* It's not ours, don't try to destroy it when this object is freed */
4f0ad43e 974 .devno = devnum,
ffcb3324 975 .diskseq = diskseq,
31c75fcc 976 .uevent_seqnum_not_before = UINT64_MAX,
8ede1e86 977 .timestamp_not_before = USEC_INFINITY,
22ee78a8 978 .sector_size = sector_size,
9dabc4fd
LP
979 };
980
981 *ret = d;
4f0ad43e
YW
982 return 0;
983}
984
985int loop_device_open_from_fd(
986 int fd,
987 int open_flags,
988 int lock_op,
989 LoopDevice **ret) {
990
991 _cleanup_(sd_device_unrefp) sd_device *dev = NULL;
992 int r;
993
8e398254 994 r = block_device_new_from_fd(ASSERT_FD(fd), 0, &dev);
4f0ad43e
YW
995 if (r < 0)
996 return r;
997
998 return loop_device_open(dev, open_flags, lock_op, ret);
999}
1000
1001int loop_device_open_from_path(
1002 const char *path,
1003 int open_flags,
1004 int lock_op,
1005 LoopDevice **ret) {
1006
1007 _cleanup_(sd_device_unrefp) sd_device *dev = NULL;
1008 int r;
1009
1010 assert(path);
1011
1012 r = block_device_new_from_path(path, 0, &dev);
1013 if (r < 0)
1014 return r;
1015
1016 return loop_device_open(dev, open_flags, lock_op, ret);
9dabc4fd
LP
1017}
1018
f1443709
LP
1019static int resize_partition(int partition_fd, uint64_t offset, uint64_t size) {
1020 char sysfs[STRLEN("/sys/dev/block/:/partition") + 2*DECIMAL_STR_MAX(dev_t) + 1];
ca822829 1021 _cleanup_free_ char *buffer = NULL;
f1443709 1022 uint64_t current_offset, current_size, partno;
254d1313 1023 _cleanup_close_ int whole_fd = -EBADF;
f1443709
LP
1024 struct stat st;
1025 dev_t devno;
1026 int r;
1027
f1443709
LP
1028 /* Resizes the partition the loopback device refer to (assuming it refers to one instead of an actual
1029 * loopback device), and changes the offset, if needed. This is a fancy wrapper around
1030 * BLKPG_RESIZE_PARTITION. */
1031
8e398254 1032 if (fstat(ASSERT_FD(partition_fd), &st) < 0)
f1443709
LP
1033 return -errno;
1034
1035 assert(S_ISBLK(st.st_mode));
1036
ed13feff 1037 xsprintf(sysfs, "/sys/dev/block/" DEVNUM_FORMAT_STR "/partition", DEVNUM_FORMAT_VAL(st.st_rdev));
f1443709
LP
1038 r = read_one_line_file(sysfs, &buffer);
1039 if (r == -ENOENT) /* not a partition, cannot resize */
1040 return -ENOTTY;
1041 if (r < 0)
1042 return r;
1043 r = safe_atou64(buffer, &partno);
1044 if (r < 0)
1045 return r;
1046
ed13feff 1047 xsprintf(sysfs, "/sys/dev/block/" DEVNUM_FORMAT_STR "/start", DEVNUM_FORMAT_VAL(st.st_rdev));
f1443709
LP
1048
1049 buffer = mfree(buffer);
1050 r = read_one_line_file(sysfs, &buffer);
1051 if (r < 0)
1052 return r;
1053 r = safe_atou64(buffer, &current_offset);
1054 if (r < 0)
1055 return r;
1056 if (current_offset > UINT64_MAX/512U)
1057 return -EINVAL;
1058 current_offset *= 512U;
1059
1060 if (ioctl(partition_fd, BLKGETSIZE64, &current_size) < 0)
1061 return -EINVAL;
1062
1063 if (size == UINT64_MAX && offset == UINT64_MAX)
1064 return 0;
1065 if (current_size == size && current_offset == offset)
1066 return 0;
1067
ed13feff 1068 xsprintf(sysfs, "/sys/dev/block/" DEVNUM_FORMAT_STR "/../dev", DEVNUM_FORMAT_VAL(st.st_rdev));
f1443709
LP
1069
1070 buffer = mfree(buffer);
1071 r = read_one_line_file(sysfs, &buffer);
1072 if (r < 0)
1073 return r;
7176f06c 1074 r = parse_devnum(buffer, &devno);
f1443709
LP
1075 if (r < 0)
1076 return r;
1077
ca822829 1078 whole_fd = r = device_open_from_devnum(S_IFBLK, devno, O_RDWR|O_CLOEXEC|O_NONBLOCK|O_NOCTTY, NULL);
f1443709
LP
1079 if (r < 0)
1080 return r;
1081
91e1ce1a
LP
1082 return block_device_resize_partition(
1083 whole_fd,
1084 partno,
1085 offset == UINT64_MAX ? current_offset : offset,
1086 size == UINT64_MAX ? current_size : size);
f1443709
LP
1087}
1088
c37878fc
LP
1089int loop_device_refresh_size(LoopDevice *d, uint64_t offset, uint64_t size) {
1090 struct loop_info64 info;
ff27ef4b 1091
9dabc4fd 1092 assert(d);
ff27ef4b 1093 assert(d->fd >= 0);
9dabc4fd 1094
f1443709
LP
1095 /* Changes the offset/start of the loop device relative to the beginning of the underlying file or
1096 * block device. If this loop device actually refers to a partition and not a loopback device, we'll
1097 * try to adjust the partition offsets instead.
1098 *
1099 * If either offset or size is UINT64_MAX we won't change that parameter. */
1100
f1443709
LP
1101 if (d->nr < 0) /* not a loopback device */
1102 return resize_partition(d->fd, offset, size);
1103
c37878fc
LP
1104 if (ioctl(d->fd, LOOP_GET_STATUS64, &info) < 0)
1105 return -errno;
1106
10c1b188
LP
1107#if HAVE_VALGRIND_MEMCHECK_H
1108 /* Valgrind currently doesn't know LOOP_GET_STATUS64. Remove this once it does */
1109 VALGRIND_MAKE_MEM_DEFINED(&info, sizeof(info));
1110#endif
1111
c37878fc
LP
1112 if (size == UINT64_MAX && offset == UINT64_MAX)
1113 return 0;
1114 if (info.lo_sizelimit == size && info.lo_offset == offset)
1115 return 0;
1116
1117 if (size != UINT64_MAX)
1118 info.lo_sizelimit = size;
1119 if (offset != UINT64_MAX)
1120 info.lo_offset = offset;
1121
7c248223 1122 return RET_NERRNO(ioctl(d->fd, LOOP_SET_STATUS64, &info));
9dabc4fd 1123}
441ec804
LP
1124
1125int loop_device_flock(LoopDevice *d, int operation) {
7f52206a 1126 assert(IN_SET(operation & ~LOCK_NB, LOCK_UN, LOCK_SH, LOCK_EX));
441ec804
LP
1127 assert(d);
1128
7f52206a
LP
1129 /* When unlocking just close the lock fd */
1130 if ((operation & ~LOCK_NB) == LOCK_UN) {
1131 d->lock_fd = safe_close(d->lock_fd);
1132 return 0;
1133 }
1134
1135 /* If we had no lock fd so far, create one and lock it right-away */
1136 if (d->lock_fd < 0) {
8e398254 1137 d->lock_fd = open_lock_fd(ASSERT_FD(d->fd), operation);
7f52206a
LP
1138 if (d->lock_fd < 0)
1139 return d->lock_fd;
1140
1141 return 0;
1142 }
441ec804 1143
7f52206a
LP
1144 /* Otherwise change the current lock mode on the existing fd */
1145 return RET_NERRNO(flock(d->lock_fd, operation));
441ec804 1146}
8dbc208c
LP
1147
1148int loop_device_sync(LoopDevice *d) {
1149 assert(d);
1150
1151 /* We also do this implicitly in loop_device_unref(). Doing this explicitly here has the benefit that
1152 * we can check the return value though. */
1153
8e398254 1154 return RET_NERRNO(fsync(ASSERT_FD(d->fd)));
8dbc208c 1155}
d2430d50
LP
1156
1157int loop_device_set_autoclear(LoopDevice *d, bool autoclear) {
1158 struct loop_info64 info;
1159
1160 assert(d);
1161
8e398254 1162 if (ioctl(ASSERT_FD(d->fd), LOOP_GET_STATUS64, &info) < 0)
d2430d50
LP
1163 return -errno;
1164
1165 if (autoclear == FLAGS_SET(info.lo_flags, LO_FLAGS_AUTOCLEAR))
1166 return 0;
1167
1168 SET_FLAG(info.lo_flags, LO_FLAGS_AUTOCLEAR, autoclear);
1169
1170 if (ioctl(d->fd, LOOP_SET_STATUS64, &info) < 0)
1171 return -errno;
1172
1173 return 1;
1174}
999ac3e2
LP
1175
1176int loop_device_set_filename(LoopDevice *d, const char *name) {
1177 struct loop_info64 info;
1178
1179 assert(d);
1180
1181 /* Sets the .lo_file_name of the loopback device. This is supposed to contain the path to the file
1182 * backing the block device, but is actually just a free-form string you can pass to the kernel. Most
1183 * tools that actually care for the backing file path use the sysfs attribute file loop/backing_file
7a05926f
YW
1184 * which is a kernel generated string, subject to file system namespaces and such.
1185 *
1186 * .lo_file_name is useful since userspace can select it freely when creating a loopback block
1187 * device, and we can use it for /dev/disk/by-loop-ref/ symlinks, and similar, so that apps can
1188 * recognize their own loopback files. */
999ac3e2
LP
1189
1190 if (name && strlen(name) >= sizeof(info.lo_file_name))
1191 return -ENOBUFS;
1192
8e398254 1193 if (ioctl(ASSERT_FD(d->fd), LOOP_GET_STATUS64, &info) < 0)
999ac3e2
LP
1194 return -errno;
1195
1196 if (strneq((char*) info.lo_file_name, strempty(name), sizeof(info.lo_file_name)))
1197 return 0;
1198
1199 if (name) {
1200 strncpy((char*) info.lo_file_name, name, sizeof(info.lo_file_name)-1);
1201 info.lo_file_name[sizeof(info.lo_file_name)-1] = 0;
1202 } else
1203 memzero(info.lo_file_name, sizeof(info.lo_file_name));
1204
1205 if (ioctl(d->fd, LOOP_SET_STATUS64, &info) < 0)
1206 return -errno;
1207
1208 return 1;
1209}