]>
Commit | Line | Data |
---|---|---|
1 | /* SPDX-License-Identifier: LGPL-2.1-or-later */ | |
2 | ||
3 | #if HAVE_VALGRIND_MEMCHECK_H | |
4 | #include <valgrind/memcheck.h> | |
5 | #endif | |
6 | ||
7 | #include <fcntl.h> | |
8 | #include <linux/loop.h> | |
9 | #include <sys/file.h> | |
10 | #include <sys/ioctl.h> | |
11 | #include <unistd.h> | |
12 | ||
13 | #include "sd-device.h" | |
14 | ||
15 | #include "alloc-util.h" | |
16 | #include "blockdev-util.h" | |
17 | #include "data-fd-util.h" | |
18 | #include "device-util.h" | |
19 | #include "devnum-util.h" | |
20 | #include "dissect-image.h" | |
21 | #include "env-util.h" | |
22 | #include "errno-util.h" | |
23 | #include "fd-util.h" | |
24 | #include "fileio.h" | |
25 | #include "fs-util.h" | |
26 | #include "loop-util.h" | |
27 | #include "parse-util.h" | |
28 | #include "path-util.h" | |
29 | #include "random-util.h" | |
30 | #include "stat-util.h" | |
31 | #include "stdio-util.h" | |
32 | #include "string-util.h" | |
33 | #include "time-util.h" | |
34 | ||
35 | static void cleanup_clear_loop_close(int *fd) { | |
36 | if (*fd < 0) | |
37 | return; | |
38 | ||
39 | (void) ioctl(*fd, LOOP_CLR_FD); | |
40 | (void) safe_close(*fd); | |
41 | } | |
42 | ||
43 | static int loop_is_bound(int fd) { | |
44 | struct loop_info64 info; | |
45 | ||
46 | if (ioctl(ASSERT_FD(fd), LOOP_GET_STATUS64, &info) < 0) { | |
47 | if (errno == ENXIO) | |
48 | return false; /* not bound! */ | |
49 | ||
50 | return -errno; | |
51 | } | |
52 | ||
53 | return true; /* bound! */ | |
54 | } | |
55 | ||
56 | static int open_lock_fd(int primary_fd, int operation) { | |
57 | _cleanup_close_ int lock_fd = -EBADF; | |
58 | ||
59 | assert(IN_SET(operation & ~LOCK_NB, LOCK_SH, LOCK_EX)); | |
60 | ||
61 | lock_fd = fd_reopen(ASSERT_FD(primary_fd), O_RDONLY|O_CLOEXEC|O_NONBLOCK|O_NOCTTY); | |
62 | if (lock_fd < 0) | |
63 | return lock_fd; | |
64 | ||
65 | if (flock(lock_fd, operation) < 0) | |
66 | return -errno; | |
67 | ||
68 | return TAKE_FD(lock_fd); | |
69 | } | |
70 | ||
71 | static int loop_configure_verify_direct_io(int fd, const struct loop_config *c) { | |
72 | assert(fd >= 0); | |
73 | assert(c); | |
74 | ||
75 | if (FLAGS_SET(c->info.lo_flags, LO_FLAGS_DIRECT_IO)) { | |
76 | struct loop_info64 info; | |
77 | ||
78 | if (ioctl(fd, LOOP_GET_STATUS64, &info) < 0) | |
79 | return log_debug_errno(errno, "Failed to issue LOOP_GET_STATUS64: %m"); | |
80 | ||
81 | #if HAVE_VALGRIND_MEMCHECK_H | |
82 | VALGRIND_MAKE_MEM_DEFINED(&info, sizeof(info)); | |
83 | #endif | |
84 | ||
85 | /* On older kernels (<= 5.3) it was necessary to set the block size of the loopback block | |
86 | * device to the logical block size of the underlying file system. Since there was no nice | |
87 | * way to query the value, we are not bothering to do this however. On newer kernels the | |
88 | * block size is propagated automatically and does not require intervention from us. We'll | |
89 | * check here if enabling direct IO worked, to make this easily debuggable however. | |
90 | * | |
91 | * (Should anyone really care and actually wants direct IO on old kernels: it might be worth | |
92 | * enabling direct IO with iteratively larger block sizes until it eventually works.) | |
93 | * | |
94 | * On older kernels (e.g.: 5.10) when this is attempted on a file stored on a dm-crypt | |
95 | * backed partition the kernel will start returning I/O errors when accessing the mounted | |
96 | * loop device, so return a recognizable error that causes the operation to be started | |
97 | * from scratch without the LO_FLAGS_DIRECT_IO flag. */ | |
98 | if (!FLAGS_SET(info.lo_flags, LO_FLAGS_DIRECT_IO)) | |
99 | return log_debug_errno( | |
100 | SYNTHETIC_ERRNO(ENOANO), | |
101 | "Could not enable direct IO mode, retrying in buffered IO mode."); | |
102 | } | |
103 | ||
104 | return 0; | |
105 | } | |
106 | ||
107 | static int loop_configure_verify(int fd, const struct loop_config *c) { | |
108 | bool broken = false; | |
109 | int r; | |
110 | ||
111 | assert(fd >= 0); | |
112 | assert(c); | |
113 | ||
114 | if (c->block_size != 0) { | |
115 | uint32_t ssz; | |
116 | ||
117 | r = blockdev_get_sector_size(fd, &ssz); | |
118 | if (r < 0) | |
119 | return r; | |
120 | ||
121 | if (ssz != c->block_size) { | |
122 | log_debug("LOOP_CONFIGURE didn't honour requested block size %" PRIu32 ", got %" PRIu32 " instead. Ignoring.", c->block_size, ssz); | |
123 | broken = true; | |
124 | } | |
125 | } | |
126 | ||
127 | if (c->info.lo_sizelimit != 0) { | |
128 | /* Kernel 5.8 vanilla doesn't properly propagate the size limit into the | |
129 | * block device. If it's used, let's immediately check if it had the desired | |
130 | * effect hence. And if not use classic LOOP_SET_STATUS64. */ | |
131 | uint64_t z; | |
132 | ||
133 | r = blockdev_get_device_size(fd, &z); | |
134 | if (r < 0) | |
135 | return r; | |
136 | ||
137 | if (z != c->info.lo_sizelimit) { | |
138 | log_debug("LOOP_CONFIGURE is broken, doesn't honour .info.lo_sizelimit. Falling back to LOOP_SET_STATUS64."); | |
139 | broken = true; | |
140 | } | |
141 | } | |
142 | ||
143 | if (FLAGS_SET(c->info.lo_flags, LO_FLAGS_PARTSCAN)) { | |
144 | /* Kernel 5.8 vanilla doesn't properly propagate the partition scanning flag | |
145 | * into the block device. Let's hence verify if things work correctly here | |
146 | * before returning. */ | |
147 | ||
148 | r = blockdev_partscan_enabled_fd(fd); | |
149 | if (r < 0) | |
150 | return r; | |
151 | if (r == 0) { | |
152 | log_debug("LOOP_CONFIGURE is broken, doesn't honour LO_FLAGS_PARTSCAN. Falling back to LOOP_SET_STATUS64."); | |
153 | broken = true; | |
154 | } | |
155 | } | |
156 | ||
157 | r = loop_configure_verify_direct_io(fd, c); | |
158 | if (r < 0) | |
159 | return r; | |
160 | ||
161 | return !broken; | |
162 | } | |
163 | ||
164 | static int loop_configure_fallback(int fd, const struct loop_config *c) { | |
165 | struct loop_info64 info_copy; | |
166 | int r; | |
167 | ||
168 | assert(fd >= 0); | |
169 | assert(c); | |
170 | ||
171 | /* Only some of the flags LOOP_CONFIGURE can set are also settable via LOOP_SET_STATUS64, hence mask | |
172 | * them out. */ | |
173 | info_copy = c->info; | |
174 | info_copy.lo_flags &= LOOP_SET_STATUS_SETTABLE_FLAGS; | |
175 | ||
176 | /* Since kernel commit 5db470e229e22b7eda6e23b5566e532c96fb5bc3 (kernel v5.0) the LOOP_SET_STATUS64 | |
177 | * ioctl can return EAGAIN in case we change the info.lo_offset field, if someone else is accessing the | |
178 | * block device while we try to reconfigure it. This is a pretty common case, since udev might | |
179 | * instantly start probing the device as soon as we attach an fd to it. Hence handle it in two ways: | |
180 | * first, let's take the BSD lock to ensure that udev will not step in between the point in | |
181 | * time where we attach the fd and where we reconfigure the device. Secondly, let's wait 50ms on | |
182 | * EAGAIN and retry. The former should be an efficient mechanism to avoid we have to wait 50ms | |
183 | * needlessly if we are just racing against udev. The latter is protection against all other cases, | |
184 | * i.e. peers that do not take the BSD lock. */ | |
185 | ||
186 | for (unsigned n_attempts = 0;;) { | |
187 | if (ioctl(fd, LOOP_SET_STATUS64, &info_copy) >= 0) | |
188 | break; | |
189 | ||
190 | if (errno != EAGAIN || ++n_attempts >= 64) | |
191 | return log_debug_errno(errno, "Failed to configure loopback block device: %m"); | |
192 | ||
193 | /* Sleep some random time, but at least 10ms, at most 250ms. Increase the delay the more | |
194 | * failed attempts we see */ | |
195 | (void) usleep_safe(UINT64_C(10) * USEC_PER_MSEC + | |
196 | random_u64_range(UINT64_C(240) * USEC_PER_MSEC * n_attempts/64)); | |
197 | } | |
198 | ||
199 | /* If a block size is requested then try to configure it. If that doesn't work, ignore errors, but | |
200 | * afterwards, let's validate what is in effect, and if it doesn't match what we want, fail */ | |
201 | if (c->block_size != 0) { | |
202 | uint32_t ssz; | |
203 | ||
204 | if (ioctl(fd, LOOP_SET_BLOCK_SIZE, (unsigned long) c->block_size) < 0) | |
205 | log_debug_errno(errno, "Failed to set sector size, ignoring: %m"); | |
206 | ||
207 | r = blockdev_get_sector_size(fd, &ssz); | |
208 | if (r < 0) | |
209 | return log_debug_errno(r, "Failed to read sector size: %m"); | |
210 | if (ssz != c->block_size) | |
211 | return log_debug_errno(SYNTHETIC_ERRNO(EIO), "Sector size of loopback device doesn't match what we requested, refusing."); | |
212 | } | |
213 | ||
214 | /* LO_FLAGS_DIRECT_IO is a flags we need to configure via explicit ioctls. */ | |
215 | if (FLAGS_SET(c->info.lo_flags, LO_FLAGS_DIRECT_IO)) | |
216 | if (ioctl(fd, LOOP_SET_DIRECT_IO, 1UL) < 0) | |
217 | log_debug_errno(errno, "Failed to enable direct IO mode, ignoring: %m"); | |
218 | ||
219 | return loop_configure_verify_direct_io(fd, c); | |
220 | } | |
221 | ||
222 | static int loop_configure( | |
223 | int nr, | |
224 | int open_flags, | |
225 | int lock_op, | |
226 | const struct loop_config *c, | |
227 | LoopDevice **ret) { | |
228 | ||
229 | static bool loop_configure_broken = false; | |
230 | ||
231 | _cleanup_(sd_device_unrefp) sd_device *dev = NULL; | |
232 | _cleanup_(cleanup_clear_loop_close) int loop_with_fd = -EBADF; /* This must be declared before lock_fd. */ | |
233 | _cleanup_close_ int fd = -EBADF, lock_fd = -EBADF; | |
234 | _cleanup_free_ char *node = NULL; | |
235 | uint64_t diskseq = 0; | |
236 | dev_t devno; | |
237 | int r; | |
238 | ||
239 | assert(nr >= 0); | |
240 | assert(c); | |
241 | assert(ret); | |
242 | ||
243 | if (asprintf(&node, "/dev/loop%i", nr) < 0) | |
244 | return log_oom_debug(); | |
245 | ||
246 | r = sd_device_new_from_devname(&dev, node); | |
247 | if (r < 0) | |
248 | return log_debug_errno(r, "Failed to create sd_device object for \"%s\": %m", node); | |
249 | ||
250 | r = sd_device_get_devnum(dev, &devno); | |
251 | if (r < 0) | |
252 | return log_device_debug_errno(dev, r, "Failed to get devnum: %m"); | |
253 | ||
254 | fd = sd_device_open(dev, O_CLOEXEC|O_NONBLOCK|O_NOCTTY|open_flags); | |
255 | if (fd < 0) | |
256 | return log_device_debug_errno(dev, fd, "Failed to open device: %m"); | |
257 | ||
258 | /* Let's lock the device before we do anything. We take the BSD lock on a second, separately opened | |
259 | * fd for the device. udev after all watches for close() events (specifically IN_CLOSE_WRITE) on | |
260 | * block devices to reprobe them, hence by having a separate fd we will later close() we can ensure | |
261 | * we trigger udev after everything is done. If we'd lock our own fd instead and keep it open for a | |
262 | * long time udev would possibly never run on it again, even though the fd is unlocked, simply | |
263 | * because we never close() it. It also has the nice benefit we can use the _cleanup_close_ logic to | |
264 | * automatically release the lock, after we are done. */ | |
265 | lock_fd = open_lock_fd(fd, LOCK_EX); | |
266 | if (lock_fd < 0) | |
267 | return log_device_debug_errno(dev, lock_fd, "Failed to acquire lock: %m"); | |
268 | ||
269 | log_device_debug(dev, "Acquired exclusive lock."); | |
270 | ||
271 | /* Let's see if backing file is really unattached. Someone may already attach a backing file without | |
272 | * taking BSD lock. */ | |
273 | r = loop_is_bound(fd); | |
274 | if (r < 0) | |
275 | return log_device_debug_errno(dev, r, "Failed to check if the loopback block device is bound: %m"); | |
276 | if (r > 0) | |
277 | return log_device_debug_errno(dev, SYNTHETIC_ERRNO(EBUSY), | |
278 | "The loopback block device is already bound, ignoring."); | |
279 | ||
280 | /* Let's see if the device is really detached, i.e. currently has no associated partition block | |
281 | * devices. On various kernels (such as 5.8) it is possible to have a loopback block device that | |
282 | * superficially is detached but still has partition block devices associated for it. Let's then | |
283 | * manually remove the partitions via BLKPG, and tell the caller we did that via EUCLEAN, so they try | |
284 | * again. */ | |
285 | r = block_device_remove_all_partitions(dev, fd); | |
286 | if (r < 0) | |
287 | return log_device_debug_errno(dev, r, "Failed to remove partitions on the loopback block device: %m"); | |
288 | if (r > 0) | |
289 | /* Removed all partitions. Let's report this to the caller, to try again, and count this as | |
290 | * an attempt. */ | |
291 | return log_device_debug_errno(dev, SYNTHETIC_ERRNO(EUCLEAN), | |
292 | "Removed partitions on the loopback block device."); | |
293 | ||
294 | if (!loop_configure_broken) { | |
295 | if (ioctl(fd, LOOP_CONFIGURE, c) < 0) { | |
296 | /* Do fallback only if LOOP_CONFIGURE is not supported, propagate all other errors. */ | |
297 | if (!ERRNO_IS_IOCTL_NOT_SUPPORTED(errno)) | |
298 | return log_device_debug_errno(dev, errno, "ioctl(LOOP_CONFIGURE) failed: %m"); | |
299 | ||
300 | loop_configure_broken = true; | |
301 | } else { | |
302 | loop_with_fd = TAKE_FD(fd); | |
303 | ||
304 | r = loop_configure_verify(loop_with_fd, c); | |
305 | if (r < 0) | |
306 | return log_device_debug_errno(dev, r, "Failed to verify if loopback block device is correctly configured: %m"); | |
307 | if (r == 0) { | |
308 | /* LOOP_CONFIGURE doesn't work. Remember that. */ | |
309 | loop_configure_broken = true; | |
310 | ||
311 | /* We return EBUSY here instead of retrying immediately with LOOP_SET_FD, | |
312 | * because LOOP_CLR_FD is async: if the operation cannot be executed right | |
313 | * away it just sets the autoclear flag on the device. This means there's a | |
314 | * good chance we cannot actually reuse the loopback device right-away. Hence | |
315 | * let's assume it's busy, avoid the trouble and let the calling loop call us | |
316 | * again with a new, likely unused device. */ | |
317 | return -EBUSY; | |
318 | } | |
319 | } | |
320 | } | |
321 | ||
322 | if (loop_configure_broken) { | |
323 | if (ioctl(fd, LOOP_SET_FD, c->fd) < 0) | |
324 | return log_device_debug_errno(dev, errno, "ioctl(LOOP_SET_FD) failed: %m"); | |
325 | ||
326 | loop_with_fd = TAKE_FD(fd); | |
327 | ||
328 | r = loop_configure_fallback(loop_with_fd, c); | |
329 | if (r < 0) | |
330 | return r; | |
331 | } | |
332 | ||
333 | r = fd_get_diskseq(loop_with_fd, &diskseq); | |
334 | if (r < 0 && r != -EOPNOTSUPP) | |
335 | return log_device_debug_errno(dev, r, "Failed to get diskseq: %m"); | |
336 | ||
337 | switch (lock_op & ~LOCK_NB) { | |
338 | case LOCK_EX: /* Already in effect */ | |
339 | break; | |
340 | case LOCK_SH: /* Downgrade */ | |
341 | if (flock(lock_fd, lock_op) < 0) | |
342 | return log_device_debug_errno(dev, errno, "Failed to downgrade lock level: %m"); | |
343 | break; | |
344 | case LOCK_UN: /* Release */ | |
345 | lock_fd = safe_close(lock_fd); | |
346 | break; | |
347 | default: | |
348 | assert_not_reached(); | |
349 | } | |
350 | ||
351 | uint64_t device_size; | |
352 | r = blockdev_get_device_size(loop_with_fd, &device_size); | |
353 | if (r < 0) | |
354 | return log_device_debug_errno(dev, r, "Failed to get loopback device size: %m"); | |
355 | ||
356 | LoopDevice *d = new(LoopDevice, 1); | |
357 | if (!d) | |
358 | return log_oom_debug(); | |
359 | ||
360 | *d = (LoopDevice) { | |
361 | .n_ref = 1, | |
362 | .fd = TAKE_FD(loop_with_fd), | |
363 | .lock_fd = TAKE_FD(lock_fd), | |
364 | .node = TAKE_PTR(node), | |
365 | .nr = nr, | |
366 | .devno = devno, | |
367 | .dev = TAKE_PTR(dev), | |
368 | .diskseq = diskseq, | |
369 | .sector_size = c->block_size, | |
370 | .device_size = device_size, | |
371 | .created = true, | |
372 | }; | |
373 | ||
374 | *ret = TAKE_PTR(d); | |
375 | return 0; | |
376 | } | |
377 | ||
378 | static int loop_device_make_internal( | |
379 | const char *path, | |
380 | int fd, | |
381 | int open_flags, | |
382 | uint64_t offset, | |
383 | uint64_t size, | |
384 | uint32_t sector_size, | |
385 | uint32_t loop_flags, | |
386 | int lock_op, | |
387 | LoopDevice **ret) { | |
388 | ||
389 | _cleanup_(loop_device_unrefp) LoopDevice *d = NULL; | |
390 | _cleanup_close_ int reopened_fd = -EBADF, control = -EBADF; | |
391 | _cleanup_free_ char *backing_file = NULL; | |
392 | struct loop_config config; | |
393 | int r, f_flags; | |
394 | struct stat st; | |
395 | ||
396 | assert(ret); | |
397 | assert(IN_SET(open_flags, O_RDWR, O_RDONLY)); | |
398 | ||
399 | if (fstat(ASSERT_FD(fd), &st) < 0) | |
400 | return -errno; | |
401 | ||
402 | if (S_ISBLK(st.st_mode)) { | |
403 | if (offset == 0 && IN_SET(size, 0, UINT64_MAX)) | |
404 | /* If this is already a block device and we are supposed to cover the whole of it | |
405 | * then store an fd to the original open device node — and do not actually create an | |
406 | * unnecessary loopback device for it. */ | |
407 | return loop_device_open_from_fd(fd, open_flags, lock_op, ret); | |
408 | } else { | |
409 | r = stat_verify_regular(&st); | |
410 | if (r < 0) | |
411 | return r; | |
412 | } | |
413 | ||
414 | if (path) { | |
415 | r = path_make_absolute_cwd(path, &backing_file); | |
416 | if (r < 0) | |
417 | return r; | |
418 | ||
419 | path_simplify(backing_file); | |
420 | } else { | |
421 | r = fd_get_path(fd, &backing_file); | |
422 | if (r < 0) | |
423 | return r; | |
424 | } | |
425 | ||
426 | f_flags = fcntl(fd, F_GETFL); | |
427 | if (f_flags < 0) | |
428 | return -errno; | |
429 | ||
430 | if (FLAGS_SET(loop_flags, LO_FLAGS_DIRECT_IO) != FLAGS_SET(f_flags, O_DIRECT)) { | |
431 | /* If LO_FLAGS_DIRECT_IO is requested, then make sure we have the fd open with O_DIRECT, as | |
432 | * that's required. Conversely, if it's off require that O_DIRECT is off too (that's because | |
433 | * new kernels will implicitly enable LO_FLAGS_DIRECT_IO if O_DIRECT is set). | |
434 | * | |
435 | * Our intention here is that LO_FLAGS_DIRECT_IO is the primary knob, and O_DIRECT derived | |
436 | * from that automatically. */ | |
437 | ||
438 | reopened_fd = fd_reopen(fd, (FLAGS_SET(loop_flags, LO_FLAGS_DIRECT_IO) ? O_DIRECT : 0)|O_CLOEXEC|O_NONBLOCK|open_flags); | |
439 | if (reopened_fd < 0) { | |
440 | if (!FLAGS_SET(loop_flags, LO_FLAGS_DIRECT_IO)) | |
441 | return log_debug_errno(reopened_fd, "Failed to reopen file descriptor without O_DIRECT: %m"); | |
442 | ||
443 | /* Some file systems might not support O_DIRECT, let's gracefully continue without it then. */ | |
444 | log_debug_errno(reopened_fd, "Failed to enable O_DIRECT for backing file descriptor for loopback device. Continuing without."); | |
445 | loop_flags &= ~LO_FLAGS_DIRECT_IO; | |
446 | } else | |
447 | fd = reopened_fd; /* From now on, operate on our new O_DIRECT fd */ | |
448 | } | |
449 | ||
450 | control = open("/dev/loop-control", O_RDWR|O_CLOEXEC|O_NOCTTY|O_NONBLOCK); | |
451 | if (control < 0) | |
452 | return -errno; | |
453 | ||
454 | if (sector_size == 0) | |
455 | /* If no sector size is specified, default to the classic default */ | |
456 | sector_size = 512; | |
457 | else if (sector_size == UINT32_MAX) { | |
458 | ||
459 | if (S_ISBLK(st.st_mode)) | |
460 | /* If the sector size is specified as UINT32_MAX we'll propagate the sector size of | |
461 | * the underlying block device. */ | |
462 | r = blockdev_get_sector_size(fd, §or_size); | |
463 | else { | |
464 | _cleanup_close_ int non_direct_io_fd = -EBADF; | |
465 | int probe_fd; | |
466 | ||
467 | assert(S_ISREG(st.st_mode)); | |
468 | ||
469 | /* If sector size is specified as UINT32_MAX, we'll try to probe the right sector | |
470 | * size of the image in question by looking for the GPT partition header at various | |
471 | * offsets. This of course only works if the image already has a disk label. | |
472 | * | |
473 | * So here we actually want to read the file contents ourselves. This is quite likely | |
474 | * not going to work if we managed to enable O_DIRECT, because in such a case there | |
475 | * are some pretty strict alignment requirements to offset, size and target, but | |
476 | * there's no way to query what alignment specifically is actually required. Hence, | |
477 | * let's avoid the mess, and temporarily open an fd without O_DIRECT for the probing | |
478 | * logic. */ | |
479 | ||
480 | if (FLAGS_SET(loop_flags, LO_FLAGS_DIRECT_IO)) { | |
481 | non_direct_io_fd = fd_reopen(fd, O_RDONLY|O_CLOEXEC|O_NONBLOCK); | |
482 | if (non_direct_io_fd < 0) | |
483 | return non_direct_io_fd; | |
484 | ||
485 | probe_fd = non_direct_io_fd; | |
486 | } else | |
487 | probe_fd = fd; | |
488 | ||
489 | r = probe_sector_size(probe_fd, §or_size); | |
490 | } | |
491 | if (r < 0) | |
492 | return r; | |
493 | } | |
494 | ||
495 | config = (struct loop_config) { | |
496 | .fd = fd, | |
497 | .block_size = sector_size, | |
498 | .info = { | |
499 | /* Use the specified flags, but configure the read-only flag from the open flags, and force autoclear */ | |
500 | .lo_flags = (loop_flags & ~LO_FLAGS_READ_ONLY) | ((open_flags & O_ACCMODE_STRICT) == O_RDONLY ? LO_FLAGS_READ_ONLY : 0) | LO_FLAGS_AUTOCLEAR, | |
501 | .lo_offset = offset, | |
502 | .lo_sizelimit = size == UINT64_MAX ? 0 : size, | |
503 | }, | |
504 | }; | |
505 | ||
506 | /* Loop around LOOP_CTL_GET_FREE, since at the moment we attempt to open the returned device it might | |
507 | * be gone already, taken by somebody else racing against us. */ | |
508 | for (unsigned n_attempts = 0;;) { | |
509 | usec_t usec; | |
510 | int nr; | |
511 | ||
512 | /* Let's take a lock on the control device first. On a busy system, where many programs | |
513 | * attempt to allocate a loopback device at the same time, we might otherwise keep looping | |
514 | * around relatively heavy operations: asking for a free loopback device, then opening it, | |
515 | * validating it, attaching something to it. Let's serialize this whole operation, to make | |
516 | * unnecessary busywork less likely. Note that this is just something we do to optimize our | |
517 | * own code (and whoever else decides to use LOCK_EX locks for this), taking this lock is not | |
518 | * necessary, it just means it's less likely we have to iterate through this loop again and | |
519 | * again if our own code races against our own code. | |
520 | * | |
521 | * Note: our lock protocol is to take the /dev/loop-control lock first, and the block device | |
522 | * lock second, if both are taken, and always in this order, to avoid ABBA locking issues. */ | |
523 | if (flock(control, LOCK_EX) < 0) | |
524 | return -errno; | |
525 | ||
526 | nr = ioctl(control, LOOP_CTL_GET_FREE); | |
527 | if (nr < 0) | |
528 | return -errno; | |
529 | ||
530 | r = loop_configure(nr, open_flags, lock_op, &config, &d); | |
531 | if (r >= 0) | |
532 | break; | |
533 | ||
534 | /* -ENODEV or friends: Somebody might've gotten the same number from the kernel, used the | |
535 | * device, and called LOOP_CTL_REMOVE on it. Let's retry with a new number. | |
536 | * -EBUSY: a file descriptor is already bound to the loopback block device. | |
537 | * -EUCLEAN: some left-over partition devices that were cleaned up. | |
538 | * -ENOANO: we tried to use LO_FLAGS_DIRECT_IO but the kernel rejected it. */ | |
539 | if (!ERRNO_IS_DEVICE_ABSENT(r) && !IN_SET(r, -EBUSY, -EUCLEAN, -ENOANO)) | |
540 | return r; | |
541 | ||
542 | /* OK, this didn't work, let's try again a bit later, but first release the lock on the | |
543 | * control device */ | |
544 | if (flock(control, LOCK_UN) < 0) | |
545 | return -errno; | |
546 | ||
547 | if (++n_attempts >= 64) /* Give up eventually */ | |
548 | return -EBUSY; | |
549 | ||
550 | /* If we failed to enable direct IO mode, let's retry without it. We restart the process as | |
551 | * on some combination of kernel version and storage filesystem, the kernel is very unhappy | |
552 | * about a failed DIRECT_IO enablement and throws I/O errors. */ | |
553 | if (r == -ENOANO && FLAGS_SET(config.info.lo_flags, LO_FLAGS_DIRECT_IO)) { | |
554 | config.info.lo_flags &= ~LO_FLAGS_DIRECT_IO; | |
555 | open_flags &= ~O_DIRECT; | |
556 | ||
557 | int non_direct_io_fd = fd_reopen(config.fd, O_CLOEXEC|O_NONBLOCK|open_flags); | |
558 | if (non_direct_io_fd < 0) | |
559 | return log_debug_errno( | |
560 | non_direct_io_fd, | |
561 | "Failed to reopen file descriptor without O_DIRECT: %m"); | |
562 | ||
563 | safe_close(reopened_fd); | |
564 | fd = config.fd = /* For cleanups */ reopened_fd = non_direct_io_fd; | |
565 | } | |
566 | ||
567 | /* Wait some random time, to make collision less likely. Let's pick a random time in the | |
568 | * range 0ms…250ms, linearly scaled by the number of failed attempts. */ | |
569 | usec = random_u64_range(UINT64_C(10) * USEC_PER_MSEC + | |
570 | UINT64_C(240) * USEC_PER_MSEC * n_attempts/64); | |
571 | log_debug("Trying again after %s.", FORMAT_TIMESPAN(usec, USEC_PER_MSEC)); | |
572 | (void) usleep_safe(usec); | |
573 | } | |
574 | ||
575 | d->backing_file = TAKE_PTR(backing_file); | |
576 | d->backing_inode = st.st_ino; | |
577 | d->backing_devno = st.st_dev; | |
578 | ||
579 | log_debug("Successfully acquired %s, devno=%u:%u, nr=%i, diskseq=%" PRIu64, | |
580 | d->node, | |
581 | major(d->devno), minor(d->devno), | |
582 | d->nr, | |
583 | d->diskseq); | |
584 | ||
585 | *ret = TAKE_PTR(d); | |
586 | return 0; | |
587 | } | |
588 | ||
589 | static uint32_t loop_flags_mangle(uint32_t loop_flags) { | |
590 | int r; | |
591 | ||
592 | r = getenv_bool("SYSTEMD_LOOP_DIRECT_IO"); | |
593 | if (r < 0 && r != -ENXIO) | |
594 | log_debug_errno(r, "Failed to parse $SYSTEMD_LOOP_DIRECT_IO, ignoring: %m"); | |
595 | ||
596 | return UPDATE_FLAG(loop_flags, LO_FLAGS_DIRECT_IO, r != 0); /* Turn on LO_FLAGS_DIRECT_IO by default, unless explicitly configured to off. */ | |
597 | } | |
598 | ||
599 | int loop_device_make( | |
600 | int fd, | |
601 | int open_flags, | |
602 | uint64_t offset, | |
603 | uint64_t size, | |
604 | uint32_t sector_size, | |
605 | uint32_t loop_flags, | |
606 | int lock_op, | |
607 | LoopDevice **ret) { | |
608 | ||
609 | assert(fd >= 0); | |
610 | assert(ret); | |
611 | ||
612 | return loop_device_make_internal( | |
613 | NULL, | |
614 | fd, | |
615 | open_flags, | |
616 | offset, | |
617 | size, | |
618 | sector_size, | |
619 | loop_flags_mangle(loop_flags), | |
620 | lock_op, | |
621 | ret); | |
622 | } | |
623 | ||
624 | int loop_device_make_by_path_at( | |
625 | int dir_fd, | |
626 | const char *path, | |
627 | int open_flags, | |
628 | uint32_t sector_size, | |
629 | uint32_t loop_flags, | |
630 | int lock_op, | |
631 | LoopDevice **ret) { | |
632 | ||
633 | int r, basic_flags, direct_flags, rdwr_flags; | |
634 | _cleanup_close_ int fd = -EBADF; | |
635 | bool direct = false; | |
636 | ||
637 | assert(dir_fd >= 0 || dir_fd == AT_FDCWD); | |
638 | assert(path); | |
639 | assert(ret); | |
640 | assert(open_flags < 0 || IN_SET(open_flags, O_RDWR, O_RDONLY)); | |
641 | ||
642 | /* Passing < 0 as open_flags here means we'll try to open the device writable if we can, retrying | |
643 | * read-only if we cannot. */ | |
644 | ||
645 | loop_flags = loop_flags_mangle(loop_flags); | |
646 | ||
647 | /* Let's open with O_DIRECT if we can. But not all file systems support that, hence fall back to | |
648 | * non-O_DIRECT mode automatically, if it fails. */ | |
649 | ||
650 | basic_flags = O_CLOEXEC|O_NONBLOCK|O_NOCTTY; | |
651 | direct_flags = FLAGS_SET(loop_flags, LO_FLAGS_DIRECT_IO) ? O_DIRECT : 0; | |
652 | rdwr_flags = open_flags >= 0 ? open_flags : O_RDWR; | |
653 | ||
654 | fd = xopenat(dir_fd, path, basic_flags|direct_flags|rdwr_flags); | |
655 | if (fd < 0 && direct_flags != 0) /* If we had O_DIRECT on, and things failed with that, let's immediately try again without */ | |
656 | fd = xopenat(dir_fd, path, basic_flags|rdwr_flags); | |
657 | else | |
658 | direct = direct_flags != 0; | |
659 | if (fd < 0) { | |
660 | r = fd; | |
661 | ||
662 | /* Retry read-only? */ | |
663 | if (open_flags >= 0 || !(ERRNO_IS_PRIVILEGE(r) || r == -EROFS)) | |
664 | return r; | |
665 | ||
666 | fd = xopenat(dir_fd, path, basic_flags|direct_flags|O_RDONLY); | |
667 | if (fd < 0 && direct_flags != 0) /* as above */ | |
668 | fd = xopenat(dir_fd, path, basic_flags|O_RDONLY); | |
669 | else | |
670 | direct = direct_flags != 0; | |
671 | if (fd < 0) | |
672 | return r; /* Propagate original error */ | |
673 | ||
674 | open_flags = O_RDONLY; | |
675 | } else if (open_flags < 0) | |
676 | open_flags = O_RDWR; | |
677 | ||
678 | log_debug("Opened '%s' in %s access mode%s, with O_DIRECT %s%s.", | |
679 | path, | |
680 | open_flags == O_RDWR ? "O_RDWR" : "O_RDONLY", | |
681 | open_flags != rdwr_flags ? " (O_RDWR was requested but not allowed)" : "", | |
682 | direct ? "enabled" : "disabled", | |
683 | direct != (direct_flags != 0) ? " (O_DIRECT was requested but not supported)" : ""); | |
684 | ||
685 | return loop_device_make_internal( | |
686 | dir_fd == AT_FDCWD ? path : NULL, | |
687 | fd, | |
688 | open_flags, | |
689 | /* offset = */ 0, | |
690 | /* size = */ 0, | |
691 | sector_size, | |
692 | loop_flags, | |
693 | lock_op, | |
694 | ret); | |
695 | } | |
696 | ||
697 | int loop_device_make_by_path_memory( | |
698 | const char *path, | |
699 | int open_flags, | |
700 | uint32_t sector_size, | |
701 | uint32_t loop_flags, | |
702 | int lock_op, | |
703 | LoopDevice **ret) { | |
704 | ||
705 | _cleanup_close_ int fd = -EBADF, mfd = -EBADF; | |
706 | _cleanup_free_ char *fn = NULL; | |
707 | struct stat st; | |
708 | int r; | |
709 | ||
710 | assert(path); | |
711 | assert(IN_SET(open_flags, O_RDWR, O_RDONLY)); | |
712 | assert(ret); | |
713 | ||
714 | loop_flags &= ~LO_FLAGS_DIRECT_IO; /* memfds don't support O_DIRECT, hence LO_FLAGS_DIRECT_IO can't be used either */ | |
715 | ||
716 | fd = open(path, O_CLOEXEC|O_NONBLOCK|O_NOCTTY|O_RDONLY); | |
717 | if (fd < 0) | |
718 | return -errno; | |
719 | ||
720 | if (fstat(fd, &st) < 0) | |
721 | return -errno; | |
722 | ||
723 | if (!S_ISREG(st.st_mode) && !S_ISBLK(st.st_mode)) | |
724 | return -EBADF; | |
725 | ||
726 | r = path_extract_filename(path, &fn); | |
727 | if (r < 0) | |
728 | return r; | |
729 | ||
730 | mfd = memfd_clone_fd(fd, fn, open_flags|O_CLOEXEC); | |
731 | if (mfd < 0) | |
732 | return mfd; | |
733 | ||
734 | fd = safe_close(fd); /* Let's close the original early */ | |
735 | ||
736 | return loop_device_make_internal(NULL, mfd, open_flags, 0, 0, sector_size, loop_flags, lock_op, ret); | |
737 | } | |
738 | ||
739 | static LoopDevice* loop_device_free(LoopDevice *d) { | |
740 | _cleanup_close_ int control = -EBADF; | |
741 | int r; | |
742 | ||
743 | if (!d) | |
744 | return NULL; | |
745 | ||
746 | /* Release any lock we might have on the device first. We want to open+lock the /dev/loop-control | |
747 | * device below, but our lock protocol says that if both control and block device locks are taken, | |
748 | * the control lock needs to be taken first, the block device lock second — in order to avoid ABBA | |
749 | * locking issues. Moreover, we want to issue LOOP_CLR_FD on the block device further down, and that | |
750 | * would fail if we had another fd open to the device. */ | |
751 | d->lock_fd = safe_close(d->lock_fd); | |
752 | ||
753 | /* Let's open the control device early, and lock it, so that we can release our block device and | |
754 | * delete it in a synchronized fashion, and allocators won't needlessly see the block device as free | |
755 | * while we are about to delete it. */ | |
756 | if (!LOOP_DEVICE_IS_FOREIGN(d) && !d->relinquished) { | |
757 | control = open("/dev/loop-control", O_RDWR|O_CLOEXEC|O_NOCTTY|O_NONBLOCK); | |
758 | if (control < 0) | |
759 | log_debug_errno(errno, "Failed to open loop control device, cannot remove loop device '%s', ignoring: %m", strna(d->node)); | |
760 | else if (flock(control, LOCK_EX) < 0) | |
761 | log_debug_errno(errno, "Failed to lock loop control device, ignoring: %m"); | |
762 | } | |
763 | ||
764 | /* Then let's release the loopback block device */ | |
765 | if (d->fd >= 0) { | |
766 | /* Implicitly sync the device, since otherwise in-flight blocks might not get written */ | |
767 | if (fsync(d->fd) < 0) | |
768 | log_debug_errno(errno, "Failed to sync loop block device, ignoring: %m"); | |
769 | ||
770 | if (!LOOP_DEVICE_IS_FOREIGN(d) && !d->relinquished) { | |
771 | /* We are supposed to clear the loopback device. Let's do this synchronously: lock | |
772 | * the device, manually remove all partitions and then clear it. This should ensure | |
773 | * udev doesn't concurrently access the devices, and we can be reasonably sure that | |
774 | * once we are done here the device is cleared and all its partition children | |
775 | * removed. Note that we lock our primary device fd here (and not a separate locking | |
776 | * fd, as we do during allocation, since we want to keep the lock all the way through | |
777 | * the LOOP_CLR_FD, but that call would fail if we had more than one fd open.) */ | |
778 | ||
779 | if (flock(d->fd, LOCK_EX) < 0) | |
780 | log_debug_errno(errno, "Failed to lock loop block device, ignoring: %m"); | |
781 | ||
782 | r = block_device_remove_all_partitions(d->dev, d->fd); | |
783 | if (r < 0) | |
784 | log_debug_errno(r, "Failed to remove partitions of loopback block device, ignoring: %m"); | |
785 | ||
786 | if (ioctl(d->fd, LOOP_CLR_FD) < 0) | |
787 | log_debug_errno(errno, "Failed to clear loop device, ignoring: %m"); | |
788 | } | |
789 | ||
790 | safe_close(d->fd); | |
791 | } | |
792 | ||
793 | /* Now that the block device is released, let's also try to remove it */ | |
794 | if (control >= 0) { | |
795 | useconds_t delay = 5 * USEC_PER_MSEC; /* A total delay of 5090 ms between 39 attempts, | |
796 | * (4*5 + 5*10 + 5*20 + … + 3*640) = 5090. */ | |
797 | ||
798 | for (unsigned attempt = 1;; attempt++) { | |
799 | if (ioctl(control, LOOP_CTL_REMOVE, d->nr) >= 0) | |
800 | break; | |
801 | if (errno != EBUSY || attempt > 38) { | |
802 | log_debug_errno(errno, "Failed to remove device %s: %m", strna(d->node)); | |
803 | break; | |
804 | } | |
805 | if (attempt % 5 == 0) { | |
806 | log_debug("Device is still busy after %u attempts…", attempt); | |
807 | delay *= 2; | |
808 | } | |
809 | ||
810 | (void) usleep_safe(delay); | |
811 | } | |
812 | } | |
813 | ||
814 | free(d->node); | |
815 | sd_device_unref(d->dev); | |
816 | free(d->backing_file); | |
817 | return mfree(d); | |
818 | } | |
819 | ||
820 | DEFINE_TRIVIAL_REF_UNREF_FUNC(LoopDevice, loop_device, loop_device_free); | |
821 | ||
822 | void loop_device_relinquish(LoopDevice *d) { | |
823 | assert(d); | |
824 | ||
825 | /* Don't attempt to clean up the loop device anymore from this point on. Leave the clean-ing up to the kernel | |
826 | * itself, using the loop device "auto-clear" logic we already turned on when creating the device. */ | |
827 | ||
828 | d->relinquished = true; | |
829 | } | |
830 | ||
831 | void loop_device_unrelinquish(LoopDevice *d) { | |
832 | assert(d); | |
833 | d->relinquished = false; | |
834 | } | |
835 | ||
836 | int loop_device_open( | |
837 | sd_device *dev, | |
838 | int open_flags, | |
839 | int lock_op, | |
840 | LoopDevice **ret) { | |
841 | ||
842 | _cleanup_close_ int fd = -EBADF, lock_fd = -EBADF; | |
843 | _cleanup_free_ char *node = NULL, *backing_file = NULL; | |
844 | dev_t devnum, backing_devno = 0; | |
845 | struct loop_info64 info; | |
846 | ino_t backing_inode = 0; | |
847 | uint64_t diskseq = 0; | |
848 | LoopDevice *d; | |
849 | const char *s; | |
850 | int r, nr = -1; | |
851 | ||
852 | assert(dev); | |
853 | assert(IN_SET(open_flags, O_RDWR, O_RDONLY)); | |
854 | assert(ret); | |
855 | ||
856 | /* Even if fd is provided through the argument in loop_device_open_from_fd(), we reopen the inode | |
857 | * here, instead of keeping just a dup() clone of it around, since we want to ensure that the | |
858 | * O_DIRECT flag of the handle we keep is off, we have our own file index, and have the right | |
859 | * read/write mode in effect. */ | |
860 | fd = sd_device_open(dev, O_CLOEXEC|O_NONBLOCK|O_NOCTTY|open_flags); | |
861 | if (fd < 0) | |
862 | return fd; | |
863 | ||
864 | if ((lock_op & ~LOCK_NB) != LOCK_UN) { | |
865 | lock_fd = open_lock_fd(fd, lock_op); | |
866 | if (lock_fd < 0) | |
867 | return lock_fd; | |
868 | } | |
869 | ||
870 | if (ioctl(fd, LOOP_GET_STATUS64, &info) >= 0) { | |
871 | #if HAVE_VALGRIND_MEMCHECK_H | |
872 | /* Valgrind currently doesn't know LOOP_GET_STATUS64. Remove this once it does */ | |
873 | VALGRIND_MAKE_MEM_DEFINED(&info, sizeof(info)); | |
874 | #endif | |
875 | nr = info.lo_number; | |
876 | ||
877 | if (sd_device_get_sysattr_value(dev, "loop/backing_file", &s) >= 0) { | |
878 | backing_file = strdup(s); | |
879 | if (!backing_file) | |
880 | return -ENOMEM; | |
881 | } | |
882 | ||
883 | backing_devno = info.lo_device; | |
884 | backing_inode = info.lo_inode; | |
885 | } | |
886 | ||
887 | r = fd_get_diskseq(fd, &diskseq); | |
888 | if (r < 0 && r != -EOPNOTSUPP) | |
889 | return r; | |
890 | ||
891 | uint32_t sector_size; | |
892 | r = blockdev_get_sector_size(fd, §or_size); | |
893 | if (r < 0) | |
894 | return r; | |
895 | ||
896 | uint64_t device_size; | |
897 | r = blockdev_get_device_size(fd, &device_size); | |
898 | if (r < 0) | |
899 | return r; | |
900 | ||
901 | r = sd_device_get_devnum(dev, &devnum); | |
902 | if (r < 0) | |
903 | return r; | |
904 | ||
905 | r = sd_device_get_devname(dev, &s); | |
906 | if (r < 0) | |
907 | return r; | |
908 | ||
909 | node = strdup(s); | |
910 | if (!node) | |
911 | return -ENOMEM; | |
912 | ||
913 | d = new(LoopDevice, 1); | |
914 | if (!d) | |
915 | return -ENOMEM; | |
916 | ||
917 | *d = (LoopDevice) { | |
918 | .n_ref = 1, | |
919 | .fd = TAKE_FD(fd), | |
920 | .lock_fd = TAKE_FD(lock_fd), | |
921 | .nr = nr, | |
922 | .node = TAKE_PTR(node), | |
923 | .dev = sd_device_ref(dev), | |
924 | .backing_file = TAKE_PTR(backing_file), | |
925 | .backing_inode = backing_inode, | |
926 | .backing_devno = backing_devno, | |
927 | .relinquished = true, /* It's not ours, don't try to destroy it when this object is freed */ | |
928 | .devno = devnum, | |
929 | .diskseq = diskseq, | |
930 | .sector_size = sector_size, | |
931 | .device_size = device_size, | |
932 | .created = false, | |
933 | }; | |
934 | ||
935 | *ret = d; | |
936 | return 0; | |
937 | } | |
938 | ||
939 | int loop_device_open_from_fd( | |
940 | int fd, | |
941 | int open_flags, | |
942 | int lock_op, | |
943 | LoopDevice **ret) { | |
944 | ||
945 | _cleanup_(sd_device_unrefp) sd_device *dev = NULL; | |
946 | int r; | |
947 | ||
948 | r = block_device_new_from_fd(ASSERT_FD(fd), 0, &dev); | |
949 | if (r < 0) | |
950 | return r; | |
951 | ||
952 | return loop_device_open(dev, open_flags, lock_op, ret); | |
953 | } | |
954 | ||
955 | int loop_device_open_from_path( | |
956 | const char *path, | |
957 | int open_flags, | |
958 | int lock_op, | |
959 | LoopDevice **ret) { | |
960 | ||
961 | _cleanup_(sd_device_unrefp) sd_device *dev = NULL; | |
962 | int r; | |
963 | ||
964 | assert(path); | |
965 | ||
966 | r = block_device_new_from_path(path, 0, &dev); | |
967 | if (r < 0) | |
968 | return r; | |
969 | ||
970 | return loop_device_open(dev, open_flags, lock_op, ret); | |
971 | } | |
972 | ||
973 | static int resize_partition(int partition_fd, uint64_t offset, uint64_t size) { | |
974 | char sysfs[STRLEN("/sys/dev/block/:/partition") + 2*DECIMAL_STR_MAX(dev_t) + 1]; | |
975 | _cleanup_free_ char *buffer = NULL; | |
976 | uint64_t current_offset, current_size, partno; | |
977 | _cleanup_close_ int whole_fd = -EBADF; | |
978 | struct stat st; | |
979 | dev_t devno; | |
980 | int r; | |
981 | ||
982 | /* Resizes the partition the loopback device refer to (assuming it refers to one instead of an actual | |
983 | * loopback device), and changes the offset, if needed. This is a fancy wrapper around | |
984 | * BLKPG_RESIZE_PARTITION. */ | |
985 | ||
986 | if (fstat(ASSERT_FD(partition_fd), &st) < 0) | |
987 | return -errno; | |
988 | ||
989 | assert(S_ISBLK(st.st_mode)); | |
990 | ||
991 | xsprintf(sysfs, "/sys/dev/block/" DEVNUM_FORMAT_STR "/partition", DEVNUM_FORMAT_VAL(st.st_rdev)); | |
992 | r = read_one_line_file(sysfs, &buffer); | |
993 | if (r == -ENOENT) /* not a partition, cannot resize */ | |
994 | return -ENOTTY; | |
995 | if (r < 0) | |
996 | return r; | |
997 | r = safe_atou64(buffer, &partno); | |
998 | if (r < 0) | |
999 | return r; | |
1000 | ||
1001 | xsprintf(sysfs, "/sys/dev/block/" DEVNUM_FORMAT_STR "/start", DEVNUM_FORMAT_VAL(st.st_rdev)); | |
1002 | ||
1003 | buffer = mfree(buffer); | |
1004 | r = read_one_line_file(sysfs, &buffer); | |
1005 | if (r < 0) | |
1006 | return r; | |
1007 | r = safe_atou64(buffer, ¤t_offset); | |
1008 | if (r < 0) | |
1009 | return r; | |
1010 | if (current_offset > UINT64_MAX/512U) | |
1011 | return -EINVAL; | |
1012 | current_offset *= 512U; | |
1013 | ||
1014 | r = blockdev_get_device_size(partition_fd, ¤t_size); | |
1015 | if (r < 0) | |
1016 | return r; | |
1017 | ||
1018 | if (size == UINT64_MAX && offset == UINT64_MAX) | |
1019 | return 0; | |
1020 | if (current_size == size && current_offset == offset) | |
1021 | return 0; | |
1022 | ||
1023 | xsprintf(sysfs, "/sys/dev/block/" DEVNUM_FORMAT_STR "/../dev", DEVNUM_FORMAT_VAL(st.st_rdev)); | |
1024 | ||
1025 | buffer = mfree(buffer); | |
1026 | r = read_one_line_file(sysfs, &buffer); | |
1027 | if (r < 0) | |
1028 | return r; | |
1029 | r = parse_devnum(buffer, &devno); | |
1030 | if (r < 0) | |
1031 | return r; | |
1032 | ||
1033 | whole_fd = r = device_open_from_devnum(S_IFBLK, devno, O_RDWR|O_CLOEXEC|O_NONBLOCK|O_NOCTTY, NULL); | |
1034 | if (r < 0) | |
1035 | return r; | |
1036 | ||
1037 | return block_device_resize_partition( | |
1038 | whole_fd, | |
1039 | partno, | |
1040 | offset == UINT64_MAX ? current_offset : offset, | |
1041 | size == UINT64_MAX ? current_size : size); | |
1042 | } | |
1043 | ||
1044 | int loop_device_refresh_size(LoopDevice *d, uint64_t offset, uint64_t size) { | |
1045 | struct loop_info64 info; | |
1046 | ||
1047 | assert(d); | |
1048 | assert(d->fd >= 0); | |
1049 | ||
1050 | /* Changes the offset/start of the loop device relative to the beginning of the underlying file or | |
1051 | * block device. If this loop device actually refers to a partition and not a loopback device, we'll | |
1052 | * try to adjust the partition offsets instead. | |
1053 | * | |
1054 | * If either offset or size is UINT64_MAX we won't change that parameter. */ | |
1055 | ||
1056 | if (d->nr < 0) /* not a loopback device */ | |
1057 | return resize_partition(d->fd, offset, size); | |
1058 | ||
1059 | if (ioctl(d->fd, LOOP_GET_STATUS64, &info) < 0) | |
1060 | return -errno; | |
1061 | ||
1062 | #if HAVE_VALGRIND_MEMCHECK_H | |
1063 | /* Valgrind currently doesn't know LOOP_GET_STATUS64. Remove this once it does */ | |
1064 | VALGRIND_MAKE_MEM_DEFINED(&info, sizeof(info)); | |
1065 | #endif | |
1066 | ||
1067 | if (size == UINT64_MAX && offset == UINT64_MAX) | |
1068 | return 0; | |
1069 | if (info.lo_sizelimit == size && info.lo_offset == offset) | |
1070 | return 0; | |
1071 | ||
1072 | if (size != UINT64_MAX) | |
1073 | info.lo_sizelimit = size; | |
1074 | if (offset != UINT64_MAX) | |
1075 | info.lo_offset = offset; | |
1076 | ||
1077 | return RET_NERRNO(ioctl(d->fd, LOOP_SET_STATUS64, &info)); | |
1078 | } | |
1079 | ||
1080 | int loop_device_flock(LoopDevice *d, int operation) { | |
1081 | assert(IN_SET(operation & ~LOCK_NB, LOCK_UN, LOCK_SH, LOCK_EX)); | |
1082 | assert(d); | |
1083 | ||
1084 | /* When unlocking just close the lock fd */ | |
1085 | if ((operation & ~LOCK_NB) == LOCK_UN) { | |
1086 | d->lock_fd = safe_close(d->lock_fd); | |
1087 | return 0; | |
1088 | } | |
1089 | ||
1090 | /* If we had no lock fd so far, create one and lock it right-away */ | |
1091 | if (d->lock_fd < 0) { | |
1092 | d->lock_fd = open_lock_fd(ASSERT_FD(d->fd), operation); | |
1093 | if (d->lock_fd < 0) | |
1094 | return d->lock_fd; | |
1095 | ||
1096 | return 0; | |
1097 | } | |
1098 | ||
1099 | /* Otherwise change the current lock mode on the existing fd */ | |
1100 | return RET_NERRNO(flock(d->lock_fd, operation)); | |
1101 | } | |
1102 | ||
1103 | int loop_device_sync(LoopDevice *d) { | |
1104 | assert(d); | |
1105 | ||
1106 | /* We also do this implicitly in loop_device_unref(). Doing this explicitly here has the benefit that | |
1107 | * we can check the return value though. */ | |
1108 | ||
1109 | return RET_NERRNO(fsync(ASSERT_FD(d->fd))); | |
1110 | } | |
1111 | ||
1112 | int loop_device_set_autoclear(LoopDevice *d, bool autoclear) { | |
1113 | struct loop_info64 info; | |
1114 | ||
1115 | assert(d); | |
1116 | ||
1117 | if (ioctl(ASSERT_FD(d->fd), LOOP_GET_STATUS64, &info) < 0) | |
1118 | return -errno; | |
1119 | ||
1120 | if (autoclear == FLAGS_SET(info.lo_flags, LO_FLAGS_AUTOCLEAR)) | |
1121 | return 0; | |
1122 | ||
1123 | SET_FLAG(info.lo_flags, LO_FLAGS_AUTOCLEAR, autoclear); | |
1124 | ||
1125 | if (ioctl(d->fd, LOOP_SET_STATUS64, &info) < 0) | |
1126 | return -errno; | |
1127 | ||
1128 | return 1; | |
1129 | } | |
1130 | ||
1131 | int loop_device_set_filename(LoopDevice *d, const char *name) { | |
1132 | struct loop_info64 info; | |
1133 | ||
1134 | assert(d); | |
1135 | ||
1136 | /* Sets the .lo_file_name of the loopback device. This is supposed to contain the path to the file | |
1137 | * backing the block device, but is actually just a free-form string you can pass to the kernel. Most | |
1138 | * tools that actually care for the backing file path use the sysfs attribute file loop/backing_file | |
1139 | * which is a kernel generated string, subject to file system namespaces and such. | |
1140 | * | |
1141 | * .lo_file_name is useful since userspace can select it freely when creating a loopback block | |
1142 | * device, and we can use it for /dev/disk/by-loop-ref/ symlinks, and similar, so that apps can | |
1143 | * recognize their own loopback files. */ | |
1144 | ||
1145 | if (name && strlen(name) >= sizeof(info.lo_file_name)) | |
1146 | return -ENOBUFS; | |
1147 | ||
1148 | if (ioctl(ASSERT_FD(d->fd), LOOP_GET_STATUS64, &info) < 0) | |
1149 | return -errno; | |
1150 | ||
1151 | if (strneq((char*) info.lo_file_name, strempty(name), sizeof(info.lo_file_name))) | |
1152 | return 0; | |
1153 | ||
1154 | if (name) { | |
1155 | strncpy((char*) info.lo_file_name, name, sizeof(info.lo_file_name)-1); | |
1156 | info.lo_file_name[sizeof(info.lo_file_name)-1] = 0; | |
1157 | } else | |
1158 | memzero(info.lo_file_name, sizeof(info.lo_file_name)); | |
1159 | ||
1160 | if (ioctl(d->fd, LOOP_SET_STATUS64, &info) < 0) | |
1161 | return -errno; | |
1162 | ||
1163 | return 1; | |
1164 | } |