]> git.ipfire.org Git - thirdparty/kernel/linux.git/blob - fs/remap_range.c
ipmi:ipmb: Don't call ipmi_unregister_smi() on a register failure
[thirdparty/kernel/linux.git] / fs / remap_range.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/slab.h>
3 #include <linux/stat.h>
4 #include <linux/sched/xacct.h>
5 #include <linux/fcntl.h>
6 #include <linux/file.h>
7 #include <linux/uio.h>
8 #include <linux/fsnotify.h>
9 #include <linux/security.h>
10 #include <linux/export.h>
11 #include <linux/syscalls.h>
12 #include <linux/pagemap.h>
13 #include <linux/splice.h>
14 #include <linux/compat.h>
15 #include <linux/mount.h>
16 #include <linux/fs.h>
17 #include "internal.h"
18
19 #include <linux/uaccess.h>
20 #include <asm/unistd.h>
21
22 /*
23 * Performs necessary checks before doing a clone.
24 *
25 * Can adjust amount of bytes to clone via @req_count argument.
26 * Returns appropriate error code that caller should return or
27 * zero in case the clone should be allowed.
28 */
29 static int generic_remap_checks(struct file *file_in, loff_t pos_in,
30 struct file *file_out, loff_t pos_out,
31 loff_t *req_count, unsigned int remap_flags)
32 {
33 struct inode *inode_in = file_in->f_mapping->host;
34 struct inode *inode_out = file_out->f_mapping->host;
35 uint64_t count = *req_count;
36 uint64_t bcount;
37 loff_t size_in, size_out;
38 loff_t bs = inode_out->i_sb->s_blocksize;
39 int ret;
40
41 /* The start of both ranges must be aligned to an fs block. */
42 if (!IS_ALIGNED(pos_in, bs) || !IS_ALIGNED(pos_out, bs))
43 return -EINVAL;
44
45 /* Ensure offsets don't wrap. */
46 if (pos_in + count < pos_in || pos_out + count < pos_out)
47 return -EINVAL;
48
49 size_in = i_size_read(inode_in);
50 size_out = i_size_read(inode_out);
51
52 /* Dedupe requires both ranges to be within EOF. */
53 if ((remap_flags & REMAP_FILE_DEDUP) &&
54 (pos_in >= size_in || pos_in + count > size_in ||
55 pos_out >= size_out || pos_out + count > size_out))
56 return -EINVAL;
57
58 /* Ensure the infile range is within the infile. */
59 if (pos_in >= size_in)
60 return -EINVAL;
61 count = min(count, size_in - (uint64_t)pos_in);
62
63 ret = generic_write_check_limits(file_out, pos_out, &count);
64 if (ret)
65 return ret;
66
67 /*
68 * If the user wanted us to link to the infile's EOF, round up to the
69 * next block boundary for this check.
70 *
71 * Otherwise, make sure the count is also block-aligned, having
72 * already confirmed the starting offsets' block alignment.
73 */
74 if (pos_in + count == size_in &&
75 (!(remap_flags & REMAP_FILE_DEDUP) || pos_out + count == size_out)) {
76 bcount = ALIGN(size_in, bs) - pos_in;
77 } else {
78 if (!IS_ALIGNED(count, bs))
79 count = ALIGN_DOWN(count, bs);
80 bcount = count;
81 }
82
83 /* Don't allow overlapped cloning within the same file. */
84 if (inode_in == inode_out &&
85 pos_out + bcount > pos_in &&
86 pos_out < pos_in + bcount)
87 return -EINVAL;
88
89 /*
90 * We shortened the request but the caller can't deal with that, so
91 * bounce the request back to userspace.
92 */
93 if (*req_count != count && !(remap_flags & REMAP_FILE_CAN_SHORTEN))
94 return -EINVAL;
95
96 *req_count = count;
97 return 0;
98 }
99
100 static int remap_verify_area(struct file *file, loff_t pos, loff_t len,
101 bool write)
102 {
103 if (unlikely(pos < 0 || len < 0))
104 return -EINVAL;
105
106 if (unlikely((loff_t) (pos + len) < 0))
107 return -EINVAL;
108
109 return security_file_permission(file, write ? MAY_WRITE : MAY_READ);
110 }
111
112 /*
113 * Ensure that we don't remap a partial EOF block in the middle of something
114 * else. Assume that the offsets have already been checked for block
115 * alignment.
116 *
117 * For clone we only link a partial EOF block above or at the destination file's
118 * EOF. For deduplication we accept a partial EOF block only if it ends at the
119 * destination file's EOF (can not link it into the middle of a file).
120 *
121 * Shorten the request if possible.
122 */
123 static int generic_remap_check_len(struct inode *inode_in,
124 struct inode *inode_out,
125 loff_t pos_out,
126 loff_t *len,
127 unsigned int remap_flags)
128 {
129 u64 blkmask = i_blocksize(inode_in) - 1;
130 loff_t new_len = *len;
131
132 if ((*len & blkmask) == 0)
133 return 0;
134
135 if (pos_out + *len < i_size_read(inode_out))
136 new_len &= ~blkmask;
137
138 if (new_len == *len)
139 return 0;
140
141 if (remap_flags & REMAP_FILE_CAN_SHORTEN) {
142 *len = new_len;
143 return 0;
144 }
145
146 return (remap_flags & REMAP_FILE_DEDUP) ? -EBADE : -EINVAL;
147 }
148
149 /* Read a page's worth of file data into the page cache. */
150 static struct folio *vfs_dedupe_get_folio(struct file *file, loff_t pos)
151 {
152 struct folio *folio;
153
154 folio = read_mapping_folio(file->f_mapping, pos >> PAGE_SHIFT, file);
155 if (IS_ERR(folio))
156 return folio;
157 if (!folio_test_uptodate(folio)) {
158 folio_put(folio);
159 return ERR_PTR(-EIO);
160 }
161 return folio;
162 }
163
164 /*
165 * Lock two folios, ensuring that we lock in offset order if the folios
166 * are from the same file.
167 */
168 static void vfs_lock_two_folios(struct folio *folio1, struct folio *folio2)
169 {
170 /* Always lock in order of increasing index. */
171 if (folio1->index > folio2->index)
172 swap(folio1, folio2);
173
174 folio_lock(folio1);
175 if (folio1 != folio2)
176 folio_lock(folio2);
177 }
178
179 /* Unlock two folios, being careful not to unlock the same folio twice. */
180 static void vfs_unlock_two_folios(struct folio *folio1, struct folio *folio2)
181 {
182 folio_unlock(folio1);
183 if (folio1 != folio2)
184 folio_unlock(folio2);
185 }
186
187 /*
188 * Compare extents of two files to see if they are the same.
189 * Caller must have locked both inodes to prevent write races.
190 */
191 static int vfs_dedupe_file_range_compare(struct file *src, loff_t srcoff,
192 struct file *dest, loff_t dstoff,
193 loff_t len, bool *is_same)
194 {
195 bool same = true;
196 int error = -EINVAL;
197
198 while (len) {
199 struct folio *src_folio, *dst_folio;
200 void *src_addr, *dst_addr;
201 loff_t cmp_len = min(PAGE_SIZE - offset_in_page(srcoff),
202 PAGE_SIZE - offset_in_page(dstoff));
203
204 cmp_len = min(cmp_len, len);
205 if (cmp_len <= 0)
206 goto out_error;
207
208 src_folio = vfs_dedupe_get_folio(src, srcoff);
209 if (IS_ERR(src_folio)) {
210 error = PTR_ERR(src_folio);
211 goto out_error;
212 }
213 dst_folio = vfs_dedupe_get_folio(dest, dstoff);
214 if (IS_ERR(dst_folio)) {
215 error = PTR_ERR(dst_folio);
216 folio_put(src_folio);
217 goto out_error;
218 }
219
220 vfs_lock_two_folios(src_folio, dst_folio);
221
222 /*
223 * Now that we've locked both folios, make sure they're still
224 * mapped to the file data we're interested in. If not,
225 * someone is invalidating pages on us and we lose.
226 */
227 if (!folio_test_uptodate(src_folio) || !folio_test_uptodate(dst_folio) ||
228 src_folio->mapping != src->f_mapping ||
229 dst_folio->mapping != dest->f_mapping) {
230 same = false;
231 goto unlock;
232 }
233
234 src_addr = kmap_local_folio(src_folio,
235 offset_in_folio(src_folio, srcoff));
236 dst_addr = kmap_local_folio(dst_folio,
237 offset_in_folio(dst_folio, dstoff));
238
239 flush_dcache_folio(src_folio);
240 flush_dcache_folio(dst_folio);
241
242 if (memcmp(src_addr, dst_addr, cmp_len))
243 same = false;
244
245 kunmap_local(dst_addr);
246 kunmap_local(src_addr);
247 unlock:
248 vfs_unlock_two_folios(src_folio, dst_folio);
249 folio_put(dst_folio);
250 folio_put(src_folio);
251
252 if (!same)
253 break;
254
255 srcoff += cmp_len;
256 dstoff += cmp_len;
257 len -= cmp_len;
258 }
259
260 *is_same = same;
261 return 0;
262
263 out_error:
264 return error;
265 }
266
267 /*
268 * Check that the two inodes are eligible for cloning, the ranges make
269 * sense, and then flush all dirty data. Caller must ensure that the
270 * inodes have been locked against any other modifications.
271 *
272 * If there's an error, then the usual negative error code is returned.
273 * Otherwise returns 0 with *len set to the request length.
274 */
275 int generic_remap_file_range_prep(struct file *file_in, loff_t pos_in,
276 struct file *file_out, loff_t pos_out,
277 loff_t *len, unsigned int remap_flags)
278 {
279 struct inode *inode_in = file_inode(file_in);
280 struct inode *inode_out = file_inode(file_out);
281 bool same_inode = (inode_in == inode_out);
282 int ret;
283
284 /* Don't touch certain kinds of inodes */
285 if (IS_IMMUTABLE(inode_out))
286 return -EPERM;
287
288 if (IS_SWAPFILE(inode_in) || IS_SWAPFILE(inode_out))
289 return -ETXTBSY;
290
291 /* Don't reflink dirs, pipes, sockets... */
292 if (S_ISDIR(inode_in->i_mode) || S_ISDIR(inode_out->i_mode))
293 return -EISDIR;
294 if (!S_ISREG(inode_in->i_mode) || !S_ISREG(inode_out->i_mode))
295 return -EINVAL;
296
297 /* Zero length dedupe exits immediately; reflink goes to EOF. */
298 if (*len == 0) {
299 loff_t isize = i_size_read(inode_in);
300
301 if ((remap_flags & REMAP_FILE_DEDUP) || pos_in == isize)
302 return 0;
303 if (pos_in > isize)
304 return -EINVAL;
305 *len = isize - pos_in;
306 if (*len == 0)
307 return 0;
308 }
309
310 /* Check that we don't violate system file offset limits. */
311 ret = generic_remap_checks(file_in, pos_in, file_out, pos_out, len,
312 remap_flags);
313 if (ret)
314 return ret;
315
316 /* Wait for the completion of any pending IOs on both files */
317 inode_dio_wait(inode_in);
318 if (!same_inode)
319 inode_dio_wait(inode_out);
320
321 ret = filemap_write_and_wait_range(inode_in->i_mapping,
322 pos_in, pos_in + *len - 1);
323 if (ret)
324 return ret;
325
326 ret = filemap_write_and_wait_range(inode_out->i_mapping,
327 pos_out, pos_out + *len - 1);
328 if (ret)
329 return ret;
330
331 /*
332 * Check that the extents are the same.
333 */
334 if (remap_flags & REMAP_FILE_DEDUP) {
335 bool is_same = false;
336
337 ret = vfs_dedupe_file_range_compare(file_in, pos_in,
338 file_out, pos_out, *len, &is_same);
339 if (ret)
340 return ret;
341 if (!is_same)
342 return -EBADE;
343 }
344
345 ret = generic_remap_check_len(inode_in, inode_out, pos_out, len,
346 remap_flags);
347 if (ret)
348 return ret;
349
350 /* If can't alter the file contents, we're done. */
351 if (!(remap_flags & REMAP_FILE_DEDUP))
352 ret = file_modified(file_out);
353
354 return ret;
355 }
356 EXPORT_SYMBOL(generic_remap_file_range_prep);
357
358 loff_t do_clone_file_range(struct file *file_in, loff_t pos_in,
359 struct file *file_out, loff_t pos_out,
360 loff_t len, unsigned int remap_flags)
361 {
362 loff_t ret;
363
364 WARN_ON_ONCE(remap_flags & REMAP_FILE_DEDUP);
365
366 if (file_inode(file_in)->i_sb != file_inode(file_out)->i_sb)
367 return -EXDEV;
368
369 ret = generic_file_rw_checks(file_in, file_out);
370 if (ret < 0)
371 return ret;
372
373 if (!file_in->f_op->remap_file_range)
374 return -EOPNOTSUPP;
375
376 ret = remap_verify_area(file_in, pos_in, len, false);
377 if (ret)
378 return ret;
379
380 ret = remap_verify_area(file_out, pos_out, len, true);
381 if (ret)
382 return ret;
383
384 ret = file_in->f_op->remap_file_range(file_in, pos_in,
385 file_out, pos_out, len, remap_flags);
386 if (ret < 0)
387 return ret;
388
389 fsnotify_access(file_in);
390 fsnotify_modify(file_out);
391 return ret;
392 }
393 EXPORT_SYMBOL(do_clone_file_range);
394
395 loff_t vfs_clone_file_range(struct file *file_in, loff_t pos_in,
396 struct file *file_out, loff_t pos_out,
397 loff_t len, unsigned int remap_flags)
398 {
399 loff_t ret;
400
401 file_start_write(file_out);
402 ret = do_clone_file_range(file_in, pos_in, file_out, pos_out, len,
403 remap_flags);
404 file_end_write(file_out);
405
406 return ret;
407 }
408 EXPORT_SYMBOL(vfs_clone_file_range);
409
410 /* Check whether we are allowed to dedupe the destination file */
411 static bool allow_file_dedupe(struct file *file)
412 {
413 struct user_namespace *mnt_userns = file_mnt_user_ns(file);
414 struct inode *inode = file_inode(file);
415
416 if (capable(CAP_SYS_ADMIN))
417 return true;
418 if (file->f_mode & FMODE_WRITE)
419 return true;
420 if (uid_eq(current_fsuid(), i_uid_into_mnt(mnt_userns, inode)))
421 return true;
422 if (!inode_permission(mnt_userns, inode, MAY_WRITE))
423 return true;
424 return false;
425 }
426
427 loff_t vfs_dedupe_file_range_one(struct file *src_file, loff_t src_pos,
428 struct file *dst_file, loff_t dst_pos,
429 loff_t len, unsigned int remap_flags)
430 {
431 loff_t ret;
432
433 WARN_ON_ONCE(remap_flags & ~(REMAP_FILE_DEDUP |
434 REMAP_FILE_CAN_SHORTEN));
435
436 ret = mnt_want_write_file(dst_file);
437 if (ret)
438 return ret;
439
440 /*
441 * This is redundant if called from vfs_dedupe_file_range(), but other
442 * callers need it and it's not performance sesitive...
443 */
444 ret = remap_verify_area(src_file, src_pos, len, false);
445 if (ret)
446 goto out_drop_write;
447
448 ret = remap_verify_area(dst_file, dst_pos, len, true);
449 if (ret)
450 goto out_drop_write;
451
452 ret = -EPERM;
453 if (!allow_file_dedupe(dst_file))
454 goto out_drop_write;
455
456 ret = -EXDEV;
457 if (file_inode(src_file)->i_sb != file_inode(dst_file)->i_sb)
458 goto out_drop_write;
459
460 ret = -EISDIR;
461 if (S_ISDIR(file_inode(dst_file)->i_mode))
462 goto out_drop_write;
463
464 ret = -EINVAL;
465 if (!dst_file->f_op->remap_file_range)
466 goto out_drop_write;
467
468 if (len == 0) {
469 ret = 0;
470 goto out_drop_write;
471 }
472
473 ret = dst_file->f_op->remap_file_range(src_file, src_pos, dst_file,
474 dst_pos, len, remap_flags | REMAP_FILE_DEDUP);
475 out_drop_write:
476 mnt_drop_write_file(dst_file);
477
478 return ret;
479 }
480 EXPORT_SYMBOL(vfs_dedupe_file_range_one);
481
482 int vfs_dedupe_file_range(struct file *file, struct file_dedupe_range *same)
483 {
484 struct file_dedupe_range_info *info;
485 struct inode *src = file_inode(file);
486 u64 off;
487 u64 len;
488 int i;
489 int ret;
490 u16 count = same->dest_count;
491 loff_t deduped;
492
493 if (!(file->f_mode & FMODE_READ))
494 return -EINVAL;
495
496 if (same->reserved1 || same->reserved2)
497 return -EINVAL;
498
499 off = same->src_offset;
500 len = same->src_length;
501
502 if (S_ISDIR(src->i_mode))
503 return -EISDIR;
504
505 if (!S_ISREG(src->i_mode))
506 return -EINVAL;
507
508 if (!file->f_op->remap_file_range)
509 return -EOPNOTSUPP;
510
511 ret = remap_verify_area(file, off, len, false);
512 if (ret < 0)
513 return ret;
514 ret = 0;
515
516 if (off + len > i_size_read(src))
517 return -EINVAL;
518
519 /* Arbitrary 1G limit on a single dedupe request, can be raised. */
520 len = min_t(u64, len, 1 << 30);
521
522 /* pre-format output fields to sane values */
523 for (i = 0; i < count; i++) {
524 same->info[i].bytes_deduped = 0ULL;
525 same->info[i].status = FILE_DEDUPE_RANGE_SAME;
526 }
527
528 for (i = 0, info = same->info; i < count; i++, info++) {
529 struct fd dst_fd = fdget(info->dest_fd);
530 struct file *dst_file = dst_fd.file;
531
532 if (!dst_file) {
533 info->status = -EBADF;
534 goto next_loop;
535 }
536
537 if (info->reserved) {
538 info->status = -EINVAL;
539 goto next_fdput;
540 }
541
542 deduped = vfs_dedupe_file_range_one(file, off, dst_file,
543 info->dest_offset, len,
544 REMAP_FILE_CAN_SHORTEN);
545 if (deduped == -EBADE)
546 info->status = FILE_DEDUPE_RANGE_DIFFERS;
547 else if (deduped < 0)
548 info->status = deduped;
549 else
550 info->bytes_deduped = len;
551
552 next_fdput:
553 fdput(dst_fd);
554 next_loop:
555 if (fatal_signal_pending(current))
556 break;
557 }
558 return ret;
559 }
560 EXPORT_SYMBOL(vfs_dedupe_file_range);