]>
Commit | Line | Data |
---|---|---|
53e1b683 | 1 | /* SPDX-License-Identifier: LGPL-2.1+ */ |
8c1be37e | 2 | |
10c1b188 LP |
3 | #if HAVE_VALGRIND_MEMCHECK_H |
4 | #include <valgrind/memcheck.h> | |
5 | #endif | |
6 | ||
dccca82b | 7 | #include <errno.h> |
8c1be37e | 8 | #include <fcntl.h> |
f1443709 LP |
9 | #include <linux/blkpg.h> |
10 | #include <linux/fs.h> | |
8c1be37e | 11 | #include <linux/loop.h> |
441ec804 | 12 | #include <sys/file.h> |
8c1be37e | 13 | #include <sys/ioctl.h> |
f2d9213f | 14 | #include <unistd.h> |
8c1be37e | 15 | |
021bf175 LP |
16 | #include "sd-device.h" |
17 | ||
8c1be37e | 18 | #include "alloc-util.h" |
86c1c1f3 | 19 | #include "blockdev-util.h" |
021bf175 | 20 | #include "device-util.h" |
b0a94268 | 21 | #include "errno-util.h" |
8c1be37e | 22 | #include "fd-util.h" |
f1443709 | 23 | #include "fileio.h" |
8c1be37e | 24 | #include "loop-util.h" |
86c1c1f3 | 25 | #include "missing_loop.h" |
f1443709 | 26 | #include "parse-util.h" |
b202ec20 | 27 | #include "random-util.h" |
3cc44114 | 28 | #include "stat-util.h" |
f1443709 | 29 | #include "stdio-util.h" |
f2d9213f | 30 | #include "string-util.h" |
021bf175 | 31 | #include "tmpfile-util.h" |
8c1be37e | 32 | |
e8af3bfd | 33 | static void cleanup_clear_loop_close(int *fd) { |
86c1c1f3 LP |
34 | if (*fd < 0) |
35 | return; | |
36 | ||
37 | (void) ioctl(*fd, LOOP_CLR_FD); | |
38 | (void) safe_close(*fd); | |
39 | } | |
40 | ||
021bf175 LP |
41 | static int loop_is_bound(int fd) { |
42 | struct loop_info64 info; | |
43 | ||
44 | assert(fd >= 0); | |
45 | ||
46 | if (ioctl(fd, LOOP_GET_STATUS64, &info) < 0) { | |
47 | if (errno == ENXIO) | |
48 | return false; /* not bound! */ | |
49 | ||
50 | return -errno; | |
51 | } | |
52 | ||
53 | return true; /* bound! */ | |
54 | } | |
55 | ||
56 | static int device_has_block_children(sd_device *d) { | |
57 | _cleanup_(sd_device_enumerator_unrefp) sd_device_enumerator *e = NULL; | |
58 | const char *main_sn, *main_ss; | |
59 | sd_device *q; | |
60 | int r; | |
61 | ||
62 | assert(d); | |
63 | ||
64 | /* Checks if the specified device currently has block device children (i.e. partition block | |
65 | * devices). */ | |
66 | ||
67 | r = sd_device_get_sysname(d, &main_sn); | |
68 | if (r < 0) | |
69 | return r; | |
70 | ||
71 | r = sd_device_get_subsystem(d, &main_ss); | |
72 | if (r < 0) | |
73 | return r; | |
74 | ||
75 | if (!streq(main_ss, "block")) | |
76 | return -EINVAL; | |
77 | ||
78 | r = sd_device_enumerator_new(&e); | |
79 | if (r < 0) | |
80 | return r; | |
81 | ||
82 | r = sd_device_enumerator_allow_uninitialized(e); | |
83 | if (r < 0) | |
84 | return r; | |
85 | ||
86 | r = sd_device_enumerator_add_match_parent(e, d); | |
87 | if (r < 0) | |
88 | return r; | |
89 | ||
90 | FOREACH_DEVICE(e, q) { | |
91 | const char *ss, *sn; | |
92 | ||
93 | r = sd_device_get_subsystem(q, &ss); | |
94 | if (r < 0) | |
95 | continue; | |
96 | ||
97 | if (!streq(ss, "block")) | |
98 | continue; | |
99 | ||
100 | r = sd_device_get_sysname(q, &sn); | |
101 | if (r < 0) | |
102 | continue; | |
103 | ||
104 | if (streq(sn, main_sn)) | |
105 | continue; | |
106 | ||
107 | return 1; /* we have block device children */ | |
108 | } | |
109 | ||
110 | return 0; | |
111 | } | |
112 | ||
95c50092 LP |
113 | static int loop_configure( |
114 | int fd, | |
021bf175 | 115 | int nr, |
95c50092 LP |
116 | const struct loop_config *c, |
117 | bool *try_loop_configure) { | |
118 | ||
021bf175 LP |
119 | _cleanup_(sd_device_unrefp) sd_device *d = NULL; |
120 | _cleanup_free_ char *sysname = NULL; | |
738f29cb | 121 | _cleanup_close_ int lock_fd = -1; |
86c1c1f3 LP |
122 | int r; |
123 | ||
124 | assert(fd >= 0); | |
021bf175 | 125 | assert(nr >= 0); |
86c1c1f3 | 126 | assert(c); |
95c50092 LP |
127 | assert(try_loop_configure); |
128 | ||
021bf175 LP |
129 | if (asprintf(&sysname, "loop%i", nr) < 0) |
130 | return -ENOMEM; | |
131 | ||
132 | r = sd_device_new_from_subsystem_sysname(&d, "block", sysname); | |
133 | if (r < 0) | |
134 | return r; | |
135 | ||
136 | /* Let's lock the device before we do anything. We take the BSD lock on a second, separately opened | |
137 | * fd for the device. udev after all watches for close() events (specifically IN_CLOSE_WRITE) on | |
138 | * block devices to reprobe them, hence by having a separate fd we will later close() we can ensure | |
139 | * we trigger udev after everything is done. If we'd lock our own fd instead and keep it open for a | |
140 | * long time udev would possibly never run on it again, even though the fd is unlocked, simply | |
141 | * because we never close() it. It also has the nice benefit we can use the _cleanup_close_ logic to | |
142 | * automatically release the lock, after we are done. */ | |
143 | lock_fd = fd_reopen(fd, O_RDWR|O_CLOEXEC|O_NONBLOCK|O_NOCTTY); | |
144 | if (lock_fd < 0) | |
145 | return lock_fd; | |
146 | if (flock(lock_fd, LOCK_EX) < 0) | |
147 | return -errno; | |
148 | ||
149 | /* Let's see if the device is really detached, i.e. currently has no associated partition block | |
150 | * devices. On various kernels (such as 5.8) it is possible to have a loopback block device that | |
151 | * superficially is detached but still has partition block devices associated for it. They only go | |
152 | * away when the device is reattached. (Yes, LOOP_CLR_FD doesn't work then, because officially | |
153 | * nothing is attached and LOOP_CTL_REMOVE doesn't either, since it doesn't care about partition | |
154 | * block devices. */ | |
155 | r = device_has_block_children(d); | |
156 | if (r < 0) | |
157 | return r; | |
158 | if (r > 0) { | |
159 | r = loop_is_bound(fd); | |
160 | if (r < 0) | |
161 | return r; | |
162 | if (r > 0) | |
163 | return -EBUSY; | |
164 | ||
165 | return -EUCLEAN; /* Bound but children? Tell caller to reattach something so that the | |
166 | * partition block devices are gone too. */ | |
167 | } | |
168 | ||
95c50092 LP |
169 | if (*try_loop_configure) { |
170 | if (ioctl(fd, LOOP_CONFIGURE, c) < 0) { | |
171 | /* Do fallback only if LOOP_CONFIGURE is not supported, propagate all other | |
172 | * errors. Note that the kernel is weird: non-existing ioctls currently return EINVAL | |
173 | * rather than ENOTTY on loopback block devices. They should fix that in the kernel, | |
174 | * but in the meantime we accept both here. */ | |
175 | if (!ERRNO_IS_NOT_SUPPORTED(errno) && errno != EINVAL) | |
176 | return -errno; | |
86c1c1f3 | 177 | |
95c50092 LP |
178 | *try_loop_configure = false; |
179 | } else { | |
180 | bool good = true; | |
181 | ||
182 | if (c->info.lo_sizelimit != 0) { | |
183 | /* Kernel 5.8 vanilla doesn't properly propagate the size limit into the | |
184 | * block device. If it's used, let's immediately check if it had the desired | |
185 | * effect hence. And if not use classic LOOP_SET_STATUS64. */ | |
186 | uint64_t z; | |
187 | ||
188 | if (ioctl(fd, BLKGETSIZE64, &z) < 0) { | |
189 | r = -errno; | |
190 | goto fail; | |
191 | } | |
192 | ||
193 | if (z != c->info.lo_sizelimit) { | |
194 | log_debug("LOOP_CONFIGURE is broken, doesn't honour .lo_sizelimit. Falling back to LOOP_SET_STATUS64."); | |
195 | good = false; | |
196 | } | |
bb2551bd | 197 | } |
86c1c1f3 | 198 | |
95c50092 LP |
199 | if (FLAGS_SET(c->info.lo_flags, LO_FLAGS_PARTSCAN)) { |
200 | /* Kernel 5.8 vanilla doesn't properly propagate the partition scanning flag | |
201 | * into the block device. Let's hence verify if things work correctly here | |
202 | * before returning. */ | |
203 | ||
204 | r = blockdev_partscan_enabled(fd); | |
205 | if (r < 0) | |
206 | goto fail; | |
207 | if (r == 0) { | |
208 | log_debug("LOOP_CONFIGURE is broken, doesn't honour LO_FLAGS_PARTSCAN. Falling back to LOOP_SET_STATUS64."); | |
209 | good = false; | |
210 | } | |
bb2551bd | 211 | } |
86c1c1f3 | 212 | |
95c50092 LP |
213 | if (!good) { |
214 | /* LOOP_CONFIGURE doesn't work. Remember that. */ | |
215 | *try_loop_configure = false; | |
216 | ||
217 | /* We return EBUSY here instead of retrying immediately with LOOP_SET_FD, | |
218 | * because LOOP_CLR_FD is async: if the operation cannot be executed right | |
219 | * away it just sets the autoclear flag on the device. This means there's a | |
220 | * good chance we cannot actually reuse the loopback device right-away. Hence | |
221 | * let's assume it's busy, avoid the trouble and let the calling loop call us | |
222 | * again with a new, likely unused device. */ | |
223 | r = -EBUSY; | |
bb2551bd | 224 | goto fail; |
bb2551bd | 225 | } |
bb2551bd | 226 | |
bb2551bd | 227 | return 0; |
95c50092 | 228 | } |
86c1c1f3 LP |
229 | } |
230 | ||
738f29cb LP |
231 | /* Since kernel commit 5db470e229e22b7eda6e23b5566e532c96fb5bc3 (kernel v5.0) the LOOP_SET_STATUS64 |
232 | * ioctl can return EAGAIN in case we change the lo_offset field, if someone else is accessing the | |
233 | * block device while we try to reconfigure it. This is a pretty common case, since udev might | |
234 | * instantly start probing the device as soon as we attach an fd to it. Hence handle it in two ways: | |
235 | * first, let's take the BSD lock that that ensures that udev will not step in between the point in | |
236 | * time where we attach the fd and where we reconfigure the device. Secondly, let's wait 50ms on | |
237 | * EAGAIN and retry. The former should be an efficient mechanism to avoid we have to wait 50ms | |
238 | * needlessly if we are just racing against udev. The latter is protection against all other cases, | |
021bf175 | 239 | * i.e. peers that do not take the BSD lock. */ |
738f29cb | 240 | |
86c1c1f3 LP |
241 | if (ioctl(fd, LOOP_SET_FD, c->fd) < 0) |
242 | return -errno; | |
243 | ||
738f29cb LP |
244 | for (unsigned n_attempts = 0;;) { |
245 | if (ioctl(fd, LOOP_SET_STATUS64, &c->info) >= 0) | |
246 | break; | |
247 | if (errno != EAGAIN || ++n_attempts >= 64) { | |
248 | r = log_debug_errno(errno, "Failed to configure loopback device: %m"); | |
249 | goto fail; | |
250 | } | |
251 | ||
b202ec20 LP |
252 | /* Sleep some random time, but at least 10ms, at most 250ms. Increase the delay the more |
253 | * failed attempts we see */ | |
254 | (void) usleep(UINT64_C(10) * USEC_PER_MSEC + | |
255 | random_u64() % (UINT64_C(240) * USEC_PER_MSEC * n_attempts/64)); | |
e8af3bfd | 256 | } |
86c1c1f3 LP |
257 | |
258 | return 0; | |
259 | ||
260 | fail: | |
261 | (void) ioctl(fd, LOOP_CLR_FD); | |
262 | return r; | |
e8af3bfd ZJS |
263 | } |
264 | ||
021bf175 LP |
265 | static int attach_empty_file(int loop, int nr) { |
266 | _cleanup_close_ int fd = -1; | |
267 | ||
268 | /* So here's the thing: on various kernels (5.8 at least) loop block devices might enter a state | |
269 | * where they are detached but nonetheless have partitions, when used heavily. Accessing these | |
270 | * partitions results in immediatey IO errors. There's no pretty way to get rid of them | |
271 | * again. Neither LOOP_CLR_FD nor LOOP_CTL_REMOVE suffice (see above). What does work is to | |
272 | * reassociate them with a new fd however. This is what we do here hence: we associate the devices | |
377a9545 | 273 | * with an empty file (i.e. an image that definitely has no partitions). We then immediately clear it |
021bf175 LP |
274 | * again. This suffices to make the partitions go away. Ugly but appears to work. */ |
275 | ||
276 | log_debug("Found unattached loopback block device /dev/loop%i with partitions. Attaching empty file to remove them.", nr); | |
277 | ||
278 | fd = open_tmpfile_unlinkable(NULL, O_RDONLY); | |
279 | if (fd < 0) | |
280 | return fd; | |
281 | ||
282 | if (flock(loop, LOCK_EX) < 0) | |
283 | return -errno; | |
284 | ||
285 | if (ioctl(loop, LOOP_SET_FD, fd) < 0) | |
286 | return -errno; | |
287 | ||
288 | if (ioctl(loop, LOOP_SET_STATUS64, &(struct loop_info64) { | |
289 | .lo_flags = LO_FLAGS_READ_ONLY| | |
290 | LO_FLAGS_AUTOCLEAR| | |
291 | LO_FLAGS_PARTSCAN, /* enable partscan, so that the partitions really go away */ | |
292 | }) < 0) | |
293 | return -errno; | |
294 | ||
295 | if (ioctl(loop, LOOP_CLR_FD) < 0) | |
296 | return -errno; | |
297 | ||
298 | /* The caller is expected to immediately close the loopback device after this, so that the BSD lock | |
299 | * is released, and udev sees the changes. */ | |
300 | return 0; | |
301 | } | |
302 | ||
1b49e3e3 | 303 | int loop_device_make( |
ed9eeb7b LP |
304 | int fd, |
305 | int open_flags, | |
306 | uint64_t offset, | |
307 | uint64_t size, | |
308 | uint32_t loop_flags, | |
309 | LoopDevice **ret) { | |
8c1be37e | 310 | |
8c1be37e | 311 | _cleanup_free_ char *loopdev = NULL; |
95c50092 | 312 | bool try_loop_configure = true; |
86c1c1f3 | 313 | struct loop_config config; |
50d04699 | 314 | LoopDevice *d = NULL; |
8c1be37e | 315 | struct stat st; |
b26c39ad | 316 | int nr = -1, r; |
8c1be37e LP |
317 | |
318 | assert(fd >= 0); | |
319 | assert(ret); | |
320 | assert(IN_SET(open_flags, O_RDWR, O_RDONLY)); | |
321 | ||
322 | if (fstat(fd, &st) < 0) | |
323 | return -errno; | |
324 | ||
325 | if (S_ISBLK(st.st_mode)) { | |
86c1c1f3 | 326 | if (ioctl(fd, LOOP_GET_STATUS64, &config.info) >= 0) { |
b26c39ad | 327 | /* Oh! This is a loopback device? That's interesting! */ |
10c1b188 LP |
328 | |
329 | #if HAVE_VALGRIND_MEMCHECK_H | |
330 | /* Valgrind currently doesn't know LOOP_GET_STATUS64. Remove this once it does */ | |
86c1c1f3 | 331 | VALGRIND_MAKE_MEM_DEFINED(&config.info, sizeof(config.info)); |
10c1b188 | 332 | #endif |
86c1c1f3 | 333 | nr = config.info.lo_number; |
b26c39ad LP |
334 | |
335 | if (asprintf(&loopdev, "/dev/loop%i", nr) < 0) | |
336 | return -ENOMEM; | |
337 | } | |
338 | ||
ed9eeb7b | 339 | if (offset == 0 && IN_SET(size, 0, UINT64_MAX)) { |
ba5450f4 | 340 | _cleanup_close_ int copy = -1; |
8c1be37e | 341 | |
ed9eeb7b | 342 | /* If this is already a block device, store a copy of the fd as it is */ |
8c1be37e | 343 | |
ed9eeb7b LP |
344 | copy = fcntl(fd, F_DUPFD_CLOEXEC, 3); |
345 | if (copy < 0) | |
346 | return -errno; | |
8c1be37e | 347 | |
ed9eeb7b LP |
348 | d = new(LoopDevice, 1); |
349 | if (!d) | |
350 | return -ENOMEM; | |
ed9eeb7b | 351 | *d = (LoopDevice) { |
ba5450f4 | 352 | .fd = TAKE_FD(copy), |
b26c39ad LP |
353 | .nr = nr, |
354 | .node = TAKE_PTR(loopdev), | |
ed9eeb7b LP |
355 | .relinquished = true, /* It's not allocated by us, don't destroy it when this object is freed */ |
356 | }; | |
357 | ||
358 | *ret = d; | |
359 | return d->fd; | |
360 | } | |
361 | } else { | |
362 | r = stat_verify_regular(&st); | |
363 | if (r < 0) | |
364 | return r; | |
8c1be37e LP |
365 | } |
366 | ||
e8af3bfd ZJS |
367 | _cleanup_close_ int control = -1; |
368 | _cleanup_(cleanup_clear_loop_close) int loop_with_fd = -1; | |
369 | ||
8c1be37e LP |
370 | control = open("/dev/loop-control", O_RDWR|O_CLOEXEC|O_NOCTTY|O_NONBLOCK); |
371 | if (control < 0) | |
372 | return -errno; | |
373 | ||
86c1c1f3 LP |
374 | config = (struct loop_config) { |
375 | .fd = fd, | |
376 | .info = { | |
377 | /* Use the specified flags, but configure the read-only flag from the open flags, and force autoclear */ | |
0950526a | 378 | .lo_flags = (loop_flags & ~LO_FLAGS_READ_ONLY) | ((open_flags & O_ACCMODE) == O_RDONLY ? LO_FLAGS_READ_ONLY : 0) | LO_FLAGS_AUTOCLEAR, |
86c1c1f3 LP |
379 | .lo_offset = offset, |
380 | .lo_sizelimit = size == UINT64_MAX ? 0 : size, | |
381 | }, | |
382 | }; | |
383 | ||
0f6519d4 LP |
384 | /* Loop around LOOP_CTL_GET_FREE, since at the moment we attempt to open the returned device it might |
385 | * be gone already, taken by somebody else racing against us. */ | |
e8af3bfd ZJS |
386 | for (unsigned n_attempts = 0;;) { |
387 | _cleanup_close_ int loop = -1; | |
388 | ||
0f6519d4 LP |
389 | nr = ioctl(control, LOOP_CTL_GET_FREE); |
390 | if (nr < 0) | |
391 | return -errno; | |
8c1be37e | 392 | |
0f6519d4 LP |
393 | if (asprintf(&loopdev, "/dev/loop%i", nr) < 0) |
394 | return -ENOMEM; | |
8c1be37e | 395 | |
0f6519d4 | 396 | loop = open(loopdev, O_CLOEXEC|O_NONBLOCK|O_NOCTTY|open_flags); |
01813148 ZJS |
397 | if (loop < 0) { |
398 | /* Somebody might've gotten the same number from the kernel, used the device, | |
399 | * and called LOOP_CTL_REMOVE on it. Let's retry with a new number. */ | |
77ad674b | 400 | if (!IN_SET(errno, ENOENT, ENXIO)) |
01813148 ZJS |
401 | return -errno; |
402 | } else { | |
021bf175 | 403 | r = loop_configure(loop, nr, &config, &try_loop_configure); |
86c1c1f3 | 404 | if (r >= 0) { |
01813148 ZJS |
405 | loop_with_fd = TAKE_FD(loop); |
406 | break; | |
407 | } | |
021bf175 LP |
408 | if (r == -EUCLEAN) { |
409 | /* Make left-over partition disappear hack (see above) */ | |
410 | r = attach_empty_file(loop, nr); | |
411 | if (r < 0 && r != -EBUSY) | |
412 | return r; | |
413 | } else if (r != -EBUSY) | |
86c1c1f3 | 414 | return r; |
e8af3bfd | 415 | } |
01813148 | 416 | |
e8af3bfd ZJS |
417 | if (++n_attempts >= 64) /* Give up eventually */ |
418 | return -EBUSY; | |
0f6519d4 LP |
419 | |
420 | loopdev = mfree(loopdev); | |
b202ec20 LP |
421 | |
422 | /* Wait some random time, to make collision less likely. Let's pick a random time in the | |
423 | * range 0ms…250ms, linearly scaled by the number of failed attempts. */ | |
424 | (void) usleep(random_u64() % (UINT64_C(10) * USEC_PER_MSEC + | |
425 | UINT64_C(240) * USEC_PER_MSEC * n_attempts/64)); | |
0f6519d4 | 426 | } |
8c1be37e | 427 | |
8c1be37e | 428 | d = new(LoopDevice, 1); |
e8af3bfd ZJS |
429 | if (!d) |
430 | return -ENOMEM; | |
8c1be37e | 431 | *d = (LoopDevice) { |
e8af3bfd | 432 | .fd = TAKE_FD(loop_with_fd), |
1cc6c93a | 433 | .node = TAKE_PTR(loopdev), |
8c1be37e LP |
434 | .nr = nr, |
435 | }; | |
436 | ||
8c1be37e | 437 | *ret = d; |
e8af3bfd | 438 | return 0; |
8c1be37e LP |
439 | } |
440 | ||
e08f94ac | 441 | int loop_device_make_by_path(const char *path, int open_flags, uint32_t loop_flags, LoopDevice **ret) { |
8c1be37e | 442 | _cleanup_close_ int fd = -1; |
b0a94268 | 443 | int r; |
8c1be37e LP |
444 | |
445 | assert(path); | |
446 | assert(ret); | |
b0a94268 | 447 | assert(open_flags < 0 || IN_SET(open_flags, O_RDWR, O_RDONLY)); |
8c1be37e | 448 | |
b0a94268 LP |
449 | /* Passing < 0 as open_flags here means we'll try to open the device writable if we can, retrying |
450 | * read-only if we cannot. */ | |
451 | ||
452 | fd = open(path, O_CLOEXEC|O_NONBLOCK|O_NOCTTY|(open_flags >= 0 ? open_flags : O_RDWR)); | |
453 | if (fd < 0) { | |
454 | r = -errno; | |
455 | ||
456 | /* Retry read-only? */ | |
457 | if (open_flags >= 0 || !(ERRNO_IS_PRIVILEGE(r) || r == -EROFS)) | |
458 | return r; | |
459 | ||
460 | fd = open(path, O_CLOEXEC|O_NONBLOCK|O_NOCTTY|O_RDONLY); | |
461 | if (fd < 0) | |
462 | return r; /* Propagate original error */ | |
463 | ||
464 | open_flags = O_RDONLY; | |
465 | } else if (open_flags < 0) | |
466 | open_flags = O_RDWR; | |
8c1be37e | 467 | |
1b49e3e3 | 468 | return loop_device_make(fd, open_flags, 0, 0, loop_flags, ret); |
8c1be37e LP |
469 | } |
470 | ||
471 | LoopDevice* loop_device_unref(LoopDevice *d) { | |
472 | if (!d) | |
473 | return NULL; | |
474 | ||
475 | if (d->fd >= 0) { | |
cae1e8fb LP |
476 | /* Implicitly sync the device, since otherwise in-flight blocks might not get written */ |
477 | if (fsync(d->fd) < 0) | |
478 | log_debug_errno(errno, "Failed to sync loop block device, ignoring: %m"); | |
479 | ||
a2ea3b2f | 480 | if (d->nr >= 0 && !d->relinquished) { |
8c1be37e LP |
481 | if (ioctl(d->fd, LOOP_CLR_FD) < 0) |
482 | log_debug_errno(errno, "Failed to clear loop device: %m"); | |
483 | ||
484 | } | |
485 | ||
486 | safe_close(d->fd); | |
487 | } | |
488 | ||
a2ea3b2f | 489 | if (d->nr >= 0 && !d->relinquished) { |
8c1be37e LP |
490 | _cleanup_close_ int control = -1; |
491 | ||
492 | control = open("/dev/loop-control", O_RDWR|O_CLOEXEC|O_NOCTTY|O_NONBLOCK); | |
493 | if (control < 0) | |
f2d9213f ZJS |
494 | log_warning_errno(errno, |
495 | "Failed to open loop control device, cannot remove loop device %s: %m", | |
496 | strna(d->node)); | |
497 | else | |
498 | for (unsigned n_attempts = 0;;) { | |
499 | if (ioctl(control, LOOP_CTL_REMOVE, d->nr) >= 0) | |
500 | break; | |
501 | if (errno != EBUSY || ++n_attempts >= 64) { | |
502 | log_warning_errno(errno, "Failed to remove device %s: %m", strna(d->node)); | |
503 | break; | |
504 | } | |
cae1e8fb | 505 | (void) usleep(50 * USEC_PER_MSEC); |
f2d9213f | 506 | } |
8c1be37e LP |
507 | } |
508 | ||
509 | free(d->node); | |
5fecf46d | 510 | return mfree(d); |
8c1be37e | 511 | } |
a2ea3b2f LP |
512 | |
513 | void loop_device_relinquish(LoopDevice *d) { | |
514 | assert(d); | |
515 | ||
516 | /* Don't attempt to clean up the loop device anymore from this point on. Leave the clean-ing up to the kernel | |
517 | * itself, using the loop device "auto-clear" logic we already turned on when creating the device. */ | |
518 | ||
519 | d->relinquished = true; | |
520 | } | |
9dabc4fd LP |
521 | |
522 | int loop_device_open(const char *loop_path, int open_flags, LoopDevice **ret) { | |
523 | _cleanup_close_ int loop_fd = -1; | |
524 | _cleanup_free_ char *p = NULL; | |
b26c39ad | 525 | struct loop_info64 info; |
9dabc4fd LP |
526 | struct stat st; |
527 | LoopDevice *d; | |
b26c39ad | 528 | int nr; |
9dabc4fd LP |
529 | |
530 | assert(loop_path); | |
531 | assert(ret); | |
532 | ||
533 | loop_fd = open(loop_path, O_CLOEXEC|O_NONBLOCK|O_NOCTTY|open_flags); | |
534 | if (loop_fd < 0) | |
535 | return -errno; | |
536 | ||
537 | if (fstat(loop_fd, &st) < 0) | |
538 | return -errno; | |
9dabc4fd LP |
539 | if (!S_ISBLK(st.st_mode)) |
540 | return -ENOTBLK; | |
541 | ||
10c1b188 LP |
542 | if (ioctl(loop_fd, LOOP_GET_STATUS64, &info) >= 0) { |
543 | #if HAVE_VALGRIND_MEMCHECK_H | |
544 | /* Valgrind currently doesn't know LOOP_GET_STATUS64. Remove this once it does */ | |
545 | VALGRIND_MAKE_MEM_DEFINED(&info, sizeof(info)); | |
546 | #endif | |
b26c39ad | 547 | nr = info.lo_number; |
10c1b188 | 548 | } else |
b26c39ad LP |
549 | nr = -1; |
550 | ||
9dabc4fd LP |
551 | p = strdup(loop_path); |
552 | if (!p) | |
553 | return -ENOMEM; | |
554 | ||
555 | d = new(LoopDevice, 1); | |
556 | if (!d) | |
557 | return -ENOMEM; | |
558 | ||
559 | *d = (LoopDevice) { | |
560 | .fd = TAKE_FD(loop_fd), | |
b26c39ad | 561 | .nr = nr, |
9dabc4fd LP |
562 | .node = TAKE_PTR(p), |
563 | .relinquished = true, /* It's not ours, don't try to destroy it when this object is freed */ | |
564 | }; | |
565 | ||
566 | *ret = d; | |
567 | return d->fd; | |
568 | } | |
569 | ||
f1443709 LP |
570 | static int resize_partition(int partition_fd, uint64_t offset, uint64_t size) { |
571 | char sysfs[STRLEN("/sys/dev/block/:/partition") + 2*DECIMAL_STR_MAX(dev_t) + 1]; | |
572 | _cleanup_free_ char *whole = NULL, *buffer = NULL; | |
573 | uint64_t current_offset, current_size, partno; | |
574 | _cleanup_close_ int whole_fd = -1; | |
575 | struct stat st; | |
576 | dev_t devno; | |
577 | int r; | |
578 | ||
579 | assert(partition_fd >= 0); | |
580 | ||
581 | /* Resizes the partition the loopback device refer to (assuming it refers to one instead of an actual | |
582 | * loopback device), and changes the offset, if needed. This is a fancy wrapper around | |
583 | * BLKPG_RESIZE_PARTITION. */ | |
584 | ||
585 | if (fstat(partition_fd, &st) < 0) | |
586 | return -errno; | |
587 | ||
588 | assert(S_ISBLK(st.st_mode)); | |
589 | ||
590 | xsprintf(sysfs, "/sys/dev/block/%u:%u/partition", major(st.st_rdev), minor(st.st_rdev)); | |
591 | r = read_one_line_file(sysfs, &buffer); | |
592 | if (r == -ENOENT) /* not a partition, cannot resize */ | |
593 | return -ENOTTY; | |
594 | if (r < 0) | |
595 | return r; | |
596 | r = safe_atou64(buffer, &partno); | |
597 | if (r < 0) | |
598 | return r; | |
599 | ||
600 | xsprintf(sysfs, "/sys/dev/block/%u:%u/start", major(st.st_rdev), minor(st.st_rdev)); | |
601 | ||
602 | buffer = mfree(buffer); | |
603 | r = read_one_line_file(sysfs, &buffer); | |
604 | if (r < 0) | |
605 | return r; | |
606 | r = safe_atou64(buffer, ¤t_offset); | |
607 | if (r < 0) | |
608 | return r; | |
609 | if (current_offset > UINT64_MAX/512U) | |
610 | return -EINVAL; | |
611 | current_offset *= 512U; | |
612 | ||
613 | if (ioctl(partition_fd, BLKGETSIZE64, ¤t_size) < 0) | |
614 | return -EINVAL; | |
615 | ||
616 | if (size == UINT64_MAX && offset == UINT64_MAX) | |
617 | return 0; | |
618 | if (current_size == size && current_offset == offset) | |
619 | return 0; | |
620 | ||
621 | xsprintf(sysfs, "/sys/dev/block/%u:%u/../dev", major(st.st_rdev), minor(st.st_rdev)); | |
622 | ||
623 | buffer = mfree(buffer); | |
624 | r = read_one_line_file(sysfs, &buffer); | |
625 | if (r < 0) | |
626 | return r; | |
627 | r = parse_dev(buffer, &devno); | |
628 | if (r < 0) | |
629 | return r; | |
630 | ||
631 | r = device_path_make_major_minor(S_IFBLK, devno, &whole); | |
632 | if (r < 0) | |
633 | return r; | |
634 | ||
635 | whole_fd = open(whole, O_RDWR|O_CLOEXEC|O_NONBLOCK|O_NOCTTY); | |
636 | if (whole_fd < 0) | |
637 | return -errno; | |
638 | ||
639 | struct blkpg_partition bp = { | |
640 | .pno = partno, | |
641 | .start = offset == UINT64_MAX ? current_offset : offset, | |
642 | .length = size == UINT64_MAX ? current_size : size, | |
643 | }; | |
644 | ||
645 | struct blkpg_ioctl_arg ba = { | |
646 | .op = BLKPG_RESIZE_PARTITION, | |
647 | .data = &bp, | |
648 | .datalen = sizeof(bp), | |
649 | }; | |
650 | ||
651 | if (ioctl(whole_fd, BLKPG, &ba) < 0) | |
652 | return -errno; | |
653 | ||
654 | return 0; | |
655 | } | |
656 | ||
c37878fc LP |
657 | int loop_device_refresh_size(LoopDevice *d, uint64_t offset, uint64_t size) { |
658 | struct loop_info64 info; | |
9dabc4fd LP |
659 | assert(d); |
660 | ||
f1443709 LP |
661 | /* Changes the offset/start of the loop device relative to the beginning of the underlying file or |
662 | * block device. If this loop device actually refers to a partition and not a loopback device, we'll | |
663 | * try to adjust the partition offsets instead. | |
664 | * | |
665 | * If either offset or size is UINT64_MAX we won't change that parameter. */ | |
666 | ||
9dabc4fd LP |
667 | if (d->fd < 0) |
668 | return -EBADF; | |
669 | ||
f1443709 LP |
670 | if (d->nr < 0) /* not a loopback device */ |
671 | return resize_partition(d->fd, offset, size); | |
672 | ||
c37878fc LP |
673 | if (ioctl(d->fd, LOOP_GET_STATUS64, &info) < 0) |
674 | return -errno; | |
675 | ||
10c1b188 LP |
676 | #if HAVE_VALGRIND_MEMCHECK_H |
677 | /* Valgrind currently doesn't know LOOP_GET_STATUS64. Remove this once it does */ | |
678 | VALGRIND_MAKE_MEM_DEFINED(&info, sizeof(info)); | |
679 | #endif | |
680 | ||
c37878fc LP |
681 | if (size == UINT64_MAX && offset == UINT64_MAX) |
682 | return 0; | |
683 | if (info.lo_sizelimit == size && info.lo_offset == offset) | |
684 | return 0; | |
685 | ||
686 | if (size != UINT64_MAX) | |
687 | info.lo_sizelimit = size; | |
688 | if (offset != UINT64_MAX) | |
689 | info.lo_offset = offset; | |
690 | ||
691 | if (ioctl(d->fd, LOOP_SET_STATUS64, &info) < 0) | |
9dabc4fd LP |
692 | return -errno; |
693 | ||
694 | return 0; | |
695 | } | |
441ec804 LP |
696 | |
697 | int loop_device_flock(LoopDevice *d, int operation) { | |
698 | assert(d); | |
699 | ||
700 | if (d->fd < 0) | |
701 | return -EBADF; | |
702 | ||
703 | if (flock(d->fd, operation) < 0) | |
704 | return -errno; | |
705 | ||
706 | return 0; | |
707 | } | |
8dbc208c LP |
708 | |
709 | int loop_device_sync(LoopDevice *d) { | |
710 | assert(d); | |
711 | ||
712 | /* We also do this implicitly in loop_device_unref(). Doing this explicitly here has the benefit that | |
713 | * we can check the return value though. */ | |
714 | ||
715 | if (d->fd < 0) | |
716 | return -EBADF; | |
717 | ||
718 | if (fsync(d->fd) < 0) | |
719 | return -errno; | |
720 | ||
721 | return 0; | |
722 | } |