1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/slab.h>
3 #include <linux/stat.h>
4 #include <linux/sched/xacct.h>
5 #include <linux/fcntl.h>
6 #include <linux/file.h>
8 #include <linux/fsnotify.h>
9 #include <linux/security.h>
10 #include <linux/export.h>
11 #include <linux/syscalls.h>
12 #include <linux/pagemap.h>
13 #include <linux/splice.h>
14 #include <linux/compat.h>
15 #include <linux/mount.h>
19 #include <linux/uaccess.h>
20 #include <asm/unistd.h>
23 * Performs necessary checks before doing a clone.
25 * Can adjust amount of bytes to clone via @req_count argument.
26 * Returns appropriate error code that caller should return or
27 * zero in case the clone should be allowed.
29 static int generic_remap_checks(struct file
*file_in
, loff_t pos_in
,
30 struct file
*file_out
, loff_t pos_out
,
31 loff_t
*req_count
, unsigned int remap_flags
)
33 struct inode
*inode_in
= file_in
->f_mapping
->host
;
34 struct inode
*inode_out
= file_out
->f_mapping
->host
;
35 uint64_t count
= *req_count
;
37 loff_t size_in
, size_out
;
38 loff_t bs
= inode_out
->i_sb
->s_blocksize
;
41 /* The start of both ranges must be aligned to an fs block. */
42 if (!IS_ALIGNED(pos_in
, bs
) || !IS_ALIGNED(pos_out
, bs
))
45 /* Ensure offsets don't wrap. */
46 if (pos_in
+ count
< pos_in
|| pos_out
+ count
< pos_out
)
49 size_in
= i_size_read(inode_in
);
50 size_out
= i_size_read(inode_out
);
52 /* Dedupe requires both ranges to be within EOF. */
53 if ((remap_flags
& REMAP_FILE_DEDUP
) &&
54 (pos_in
>= size_in
|| pos_in
+ count
> size_in
||
55 pos_out
>= size_out
|| pos_out
+ count
> size_out
))
58 /* Ensure the infile range is within the infile. */
59 if (pos_in
>= size_in
)
61 count
= min(count
, size_in
- (uint64_t)pos_in
);
63 ret
= generic_write_check_limits(file_out
, pos_out
, &count
);
68 * If the user wanted us to link to the infile's EOF, round up to the
69 * next block boundary for this check.
71 * Otherwise, make sure the count is also block-aligned, having
72 * already confirmed the starting offsets' block alignment.
74 if (pos_in
+ count
== size_in
) {
75 bcount
= ALIGN(size_in
, bs
) - pos_in
;
77 if (!IS_ALIGNED(count
, bs
))
78 count
= ALIGN_DOWN(count
, bs
);
82 /* Don't allow overlapped cloning within the same file. */
83 if (inode_in
== inode_out
&&
84 pos_out
+ bcount
> pos_in
&&
85 pos_out
< pos_in
+ bcount
)
89 * We shortened the request but the caller can't deal with that, so
90 * bounce the request back to userspace.
92 if (*req_count
!= count
&& !(remap_flags
& REMAP_FILE_CAN_SHORTEN
))
99 static int remap_verify_area(struct file
*file
, loff_t pos
, loff_t len
,
102 if (unlikely(pos
< 0 || len
< 0))
105 if (unlikely((loff_t
) (pos
+ len
) < 0))
108 return security_file_permission(file
, write
? MAY_WRITE
: MAY_READ
);
112 * Ensure that we don't remap a partial EOF block in the middle of something
113 * else. Assume that the offsets have already been checked for block
116 * For clone we only link a partial EOF block above or at the destination file's
117 * EOF. For deduplication we accept a partial EOF block only if it ends at the
118 * destination file's EOF (can not link it into the middle of a file).
120 * Shorten the request if possible.
122 static int generic_remap_check_len(struct inode
*inode_in
,
123 struct inode
*inode_out
,
126 unsigned int remap_flags
)
128 u64 blkmask
= i_blocksize(inode_in
) - 1;
129 loff_t new_len
= *len
;
131 if ((*len
& blkmask
) == 0)
134 if (pos_out
+ *len
< i_size_read(inode_out
))
140 if (remap_flags
& REMAP_FILE_CAN_SHORTEN
) {
145 return (remap_flags
& REMAP_FILE_DEDUP
) ? -EBADE
: -EINVAL
;
148 /* Read a page's worth of file data into the page cache. */
149 static struct folio
*vfs_dedupe_get_folio(struct file
*file
, loff_t pos
)
153 folio
= read_mapping_folio(file
->f_mapping
, pos
>> PAGE_SHIFT
, file
);
156 if (!folio_test_uptodate(folio
)) {
158 return ERR_PTR(-EIO
);
164 * Lock two folios, ensuring that we lock in offset order if the folios
165 * are from the same file.
167 static void vfs_lock_two_folios(struct folio
*folio1
, struct folio
*folio2
)
169 /* Always lock in order of increasing index. */
170 if (folio1
->index
> folio2
->index
)
171 swap(folio1
, folio2
);
174 if (folio1
!= folio2
)
178 /* Unlock two folios, being careful not to unlock the same folio twice. */
179 static void vfs_unlock_two_folios(struct folio
*folio1
, struct folio
*folio2
)
181 folio_unlock(folio1
);
182 if (folio1
!= folio2
)
183 folio_unlock(folio2
);
187 * Compare extents of two files to see if they are the same.
188 * Caller must have locked both inodes to prevent write races.
190 static int vfs_dedupe_file_range_compare(struct file
*src
, loff_t srcoff
,
191 struct file
*dest
, loff_t dstoff
,
192 loff_t len
, bool *is_same
)
198 struct folio
*src_folio
, *dst_folio
;
199 void *src_addr
, *dst_addr
;
200 loff_t cmp_len
= min(PAGE_SIZE
- offset_in_page(srcoff
),
201 PAGE_SIZE
- offset_in_page(dstoff
));
203 cmp_len
= min(cmp_len
, len
);
207 src_folio
= vfs_dedupe_get_folio(src
, srcoff
);
208 if (IS_ERR(src_folio
)) {
209 error
= PTR_ERR(src_folio
);
212 dst_folio
= vfs_dedupe_get_folio(dest
, dstoff
);
213 if (IS_ERR(dst_folio
)) {
214 error
= PTR_ERR(dst_folio
);
215 folio_put(src_folio
);
219 vfs_lock_two_folios(src_folio
, dst_folio
);
222 * Now that we've locked both folios, make sure they're still
223 * mapped to the file data we're interested in. If not,
224 * someone is invalidating pages on us and we lose.
226 if (!folio_test_uptodate(src_folio
) || !folio_test_uptodate(dst_folio
) ||
227 src_folio
->mapping
!= src
->f_mapping
||
228 dst_folio
->mapping
!= dest
->f_mapping
) {
233 src_addr
= kmap_local_folio(src_folio
,
234 offset_in_folio(src_folio
, srcoff
));
235 dst_addr
= kmap_local_folio(dst_folio
,
236 offset_in_folio(dst_folio
, dstoff
));
238 flush_dcache_folio(src_folio
);
239 flush_dcache_folio(dst_folio
);
241 if (memcmp(src_addr
, dst_addr
, cmp_len
))
244 kunmap_local(dst_addr
);
245 kunmap_local(src_addr
);
247 vfs_unlock_two_folios(src_folio
, dst_folio
);
248 folio_put(dst_folio
);
249 folio_put(src_folio
);
267 * Check that the two inodes are eligible for cloning, the ranges make
268 * sense, and then flush all dirty data. Caller must ensure that the
269 * inodes have been locked against any other modifications.
271 * If there's an error, then the usual negative error code is returned.
272 * Otherwise returns 0 with *len set to the request length.
274 int generic_remap_file_range_prep(struct file
*file_in
, loff_t pos_in
,
275 struct file
*file_out
, loff_t pos_out
,
276 loff_t
*len
, unsigned int remap_flags
)
278 struct inode
*inode_in
= file_inode(file_in
);
279 struct inode
*inode_out
= file_inode(file_out
);
280 bool same_inode
= (inode_in
== inode_out
);
283 /* Don't touch certain kinds of inodes */
284 if (IS_IMMUTABLE(inode_out
))
287 if (IS_SWAPFILE(inode_in
) || IS_SWAPFILE(inode_out
))
290 /* Don't reflink dirs, pipes, sockets... */
291 if (S_ISDIR(inode_in
->i_mode
) || S_ISDIR(inode_out
->i_mode
))
293 if (!S_ISREG(inode_in
->i_mode
) || !S_ISREG(inode_out
->i_mode
))
296 /* Zero length dedupe exits immediately; reflink goes to EOF. */
298 loff_t isize
= i_size_read(inode_in
);
300 if ((remap_flags
& REMAP_FILE_DEDUP
) || pos_in
== isize
)
304 *len
= isize
- pos_in
;
309 /* Check that we don't violate system file offset limits. */
310 ret
= generic_remap_checks(file_in
, pos_in
, file_out
, pos_out
, len
,
315 /* Wait for the completion of any pending IOs on both files */
316 inode_dio_wait(inode_in
);
318 inode_dio_wait(inode_out
);
320 ret
= filemap_write_and_wait_range(inode_in
->i_mapping
,
321 pos_in
, pos_in
+ *len
- 1);
325 ret
= filemap_write_and_wait_range(inode_out
->i_mapping
,
326 pos_out
, pos_out
+ *len
- 1);
331 * Check that the extents are the same.
333 if (remap_flags
& REMAP_FILE_DEDUP
) {
334 bool is_same
= false;
336 ret
= vfs_dedupe_file_range_compare(file_in
, pos_in
,
337 file_out
, pos_out
, *len
, &is_same
);
344 ret
= generic_remap_check_len(inode_in
, inode_out
, pos_out
, len
,
349 /* If can't alter the file contents, we're done. */
350 if (!(remap_flags
& REMAP_FILE_DEDUP
))
351 ret
= file_modified(file_out
);
355 EXPORT_SYMBOL(generic_remap_file_range_prep
);
357 loff_t
do_clone_file_range(struct file
*file_in
, loff_t pos_in
,
358 struct file
*file_out
, loff_t pos_out
,
359 loff_t len
, unsigned int remap_flags
)
363 WARN_ON_ONCE(remap_flags
& REMAP_FILE_DEDUP
);
365 if (file_inode(file_in
)->i_sb
!= file_inode(file_out
)->i_sb
)
368 ret
= generic_file_rw_checks(file_in
, file_out
);
372 if (!file_in
->f_op
->remap_file_range
)
375 ret
= remap_verify_area(file_in
, pos_in
, len
, false);
379 ret
= remap_verify_area(file_out
, pos_out
, len
, true);
383 ret
= file_in
->f_op
->remap_file_range(file_in
, pos_in
,
384 file_out
, pos_out
, len
, remap_flags
);
388 fsnotify_access(file_in
);
389 fsnotify_modify(file_out
);
392 EXPORT_SYMBOL(do_clone_file_range
);
394 loff_t
vfs_clone_file_range(struct file
*file_in
, loff_t pos_in
,
395 struct file
*file_out
, loff_t pos_out
,
396 loff_t len
, unsigned int remap_flags
)
400 file_start_write(file_out
);
401 ret
= do_clone_file_range(file_in
, pos_in
, file_out
, pos_out
, len
,
403 file_end_write(file_out
);
407 EXPORT_SYMBOL(vfs_clone_file_range
);
409 /* Check whether we are allowed to dedupe the destination file */
410 static bool allow_file_dedupe(struct file
*file
)
412 struct user_namespace
*mnt_userns
= file_mnt_user_ns(file
);
413 struct inode
*inode
= file_inode(file
);
415 if (capable(CAP_SYS_ADMIN
))
417 if (file
->f_mode
& FMODE_WRITE
)
419 if (uid_eq(current_fsuid(), i_uid_into_mnt(mnt_userns
, inode
)))
421 if (!inode_permission(mnt_userns
, inode
, MAY_WRITE
))
426 loff_t
vfs_dedupe_file_range_one(struct file
*src_file
, loff_t src_pos
,
427 struct file
*dst_file
, loff_t dst_pos
,
428 loff_t len
, unsigned int remap_flags
)
432 WARN_ON_ONCE(remap_flags
& ~(REMAP_FILE_DEDUP
|
433 REMAP_FILE_CAN_SHORTEN
));
435 ret
= mnt_want_write_file(dst_file
);
440 * This is redundant if called from vfs_dedupe_file_range(), but other
441 * callers need it and it's not performance sesitive...
443 ret
= remap_verify_area(src_file
, src_pos
, len
, false);
447 ret
= remap_verify_area(dst_file
, dst_pos
, len
, true);
452 if (!allow_file_dedupe(dst_file
))
456 if (file_inode(src_file
)->i_sb
!= file_inode(dst_file
)->i_sb
)
460 if (S_ISDIR(file_inode(dst_file
)->i_mode
))
464 if (!dst_file
->f_op
->remap_file_range
)
472 ret
= dst_file
->f_op
->remap_file_range(src_file
, src_pos
, dst_file
,
473 dst_pos
, len
, remap_flags
| REMAP_FILE_DEDUP
);
475 mnt_drop_write_file(dst_file
);
479 EXPORT_SYMBOL(vfs_dedupe_file_range_one
);
481 int vfs_dedupe_file_range(struct file
*file
, struct file_dedupe_range
*same
)
483 struct file_dedupe_range_info
*info
;
484 struct inode
*src
= file_inode(file
);
489 u16 count
= same
->dest_count
;
492 if (!(file
->f_mode
& FMODE_READ
))
495 if (same
->reserved1
|| same
->reserved2
)
498 off
= same
->src_offset
;
499 len
= same
->src_length
;
501 if (S_ISDIR(src
->i_mode
))
504 if (!S_ISREG(src
->i_mode
))
507 if (!file
->f_op
->remap_file_range
)
510 ret
= remap_verify_area(file
, off
, len
, false);
515 if (off
+ len
> i_size_read(src
))
518 /* Arbitrary 1G limit on a single dedupe request, can be raised. */
519 len
= min_t(u64
, len
, 1 << 30);
521 /* pre-format output fields to sane values */
522 for (i
= 0; i
< count
; i
++) {
523 same
->info
[i
].bytes_deduped
= 0ULL;
524 same
->info
[i
].status
= FILE_DEDUPE_RANGE_SAME
;
527 for (i
= 0, info
= same
->info
; i
< count
; i
++, info
++) {
528 struct fd dst_fd
= fdget(info
->dest_fd
);
529 struct file
*dst_file
= dst_fd
.file
;
532 info
->status
= -EBADF
;
536 if (info
->reserved
) {
537 info
->status
= -EINVAL
;
541 deduped
= vfs_dedupe_file_range_one(file
, off
, dst_file
,
542 info
->dest_offset
, len
,
543 REMAP_FILE_CAN_SHORTEN
);
544 if (deduped
== -EBADE
)
545 info
->status
= FILE_DEDUPE_RANGE_DIFFERS
;
546 else if (deduped
< 0)
547 info
->status
= deduped
;
549 info
->bytes_deduped
= len
;
554 if (fatal_signal_pending(current
))
559 EXPORT_SYMBOL(vfs_dedupe_file_range
);