1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/blkdev.h>
4 #include <linux/iversion.h>
8 #include "compression.h"
9 #include "delalloc-space.h"
12 #include "transaction.h"
14 #include "accessors.h"
15 #include "file-item.h"
19 #define BTRFS_MAX_DEDUPE_LEN SZ_16M
21 static int clone_finish_inode_update(struct btrfs_trans_handle
*trans
,
28 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
31 inode_inc_iversion(inode
);
32 if (!no_time_update
) {
33 inode
->i_mtime
= inode_set_ctime_current(inode
);
36 * We round up to the block size at eof when determining which
37 * extents to clone above, but shouldn't round up the file size.
39 if (endoff
> destoff
+ olen
)
40 endoff
= destoff
+ olen
;
41 if (endoff
> inode
->i_size
) {
42 i_size_write(inode
, endoff
);
43 btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode
), 0);
46 ret
= btrfs_update_inode(trans
, root
, BTRFS_I(inode
));
48 btrfs_abort_transaction(trans
, ret
);
49 btrfs_end_transaction(trans
);
52 ret
= btrfs_end_transaction(trans
);
57 static int copy_inline_to_page(struct btrfs_inode
*inode
,
58 const u64 file_offset
,
64 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
65 const u32 block_size
= fs_info
->sectorsize
;
66 const u64 range_end
= file_offset
+ block_size
- 1;
67 const size_t inline_size
= size
- btrfs_file_extent_calc_inline_size(0);
68 char *data_start
= inline_data
+ btrfs_file_extent_calc_inline_size(0);
69 struct extent_changeset
*data_reserved
= NULL
;
70 struct page
*page
= NULL
;
71 struct address_space
*mapping
= inode
->vfs_inode
.i_mapping
;
74 ASSERT(IS_ALIGNED(file_offset
, block_size
));
77 * We have flushed and locked the ranges of the source and destination
78 * inodes, we also have locked the inodes, so we are safe to do a
79 * reservation here. Also we must not do the reservation while holding
80 * a transaction open, otherwise we would deadlock.
82 ret
= btrfs_delalloc_reserve_space(inode
, &data_reserved
, file_offset
,
87 page
= find_or_create_page(mapping
, file_offset
>> PAGE_SHIFT
,
88 btrfs_alloc_write_mask(mapping
));
94 ret
= set_page_extent_mapped(page
);
98 clear_extent_bit(&inode
->io_tree
, file_offset
, range_end
,
99 EXTENT_DELALLOC
| EXTENT_DO_ACCOUNTING
| EXTENT_DEFRAG
,
101 ret
= btrfs_set_extent_delalloc(inode
, file_offset
, range_end
, 0, NULL
);
106 * After dirtying the page our caller will need to start a transaction,
107 * and if we are low on metadata free space, that can cause flushing of
108 * delalloc for all inodes in order to get metadata space released.
109 * However we are holding the range locked for the whole duration of
110 * the clone/dedupe operation, so we may deadlock if that happens and no
111 * other task releases enough space. So mark this inode as not being
112 * possible to flush to avoid such deadlock. We will clear that flag
113 * when we finish cloning all extents, since a transaction is started
114 * after finding each extent to clone.
116 set_bit(BTRFS_INODE_NO_DELALLOC_FLUSH
, &inode
->runtime_flags
);
118 if (comp_type
== BTRFS_COMPRESS_NONE
) {
119 memcpy_to_page(page
, offset_in_page(file_offset
), data_start
,
122 ret
= btrfs_decompress(comp_type
, data_start
, page
,
123 offset_in_page(file_offset
),
127 flush_dcache_page(page
);
131 * If our inline data is smaller then the block/page size, then the
132 * remaining of the block/page is equivalent to zeroes. We had something
133 * like the following done:
135 * $ xfs_io -f -c "pwrite -S 0xab 0 500" file
136 * $ sync # (or fsync)
137 * $ xfs_io -c "falloc 0 4K" file
138 * $ xfs_io -c "pwrite -S 0xcd 4K 4K"
140 * So what's in the range [500, 4095] corresponds to zeroes.
142 if (datal
< block_size
)
143 memzero_page(page
, datal
, block_size
- datal
);
145 btrfs_page_set_uptodate(fs_info
, page
, file_offset
, block_size
);
146 btrfs_page_clear_checked(fs_info
, page
, file_offset
, block_size
);
147 btrfs_page_set_dirty(fs_info
, page
, file_offset
, block_size
);
154 btrfs_delalloc_release_space(inode
, data_reserved
, file_offset
,
156 btrfs_delalloc_release_extents(inode
, block_size
);
158 extent_changeset_free(data_reserved
);
164 * Deal with cloning of inline extents. We try to copy the inline extent from
165 * the source inode to destination inode when possible. When not possible we
166 * copy the inline extent's data into the respective page of the inode.
168 static int clone_copy_inline_extent(struct inode
*dst
,
169 struct btrfs_path
*path
,
170 struct btrfs_key
*new_key
,
171 const u64 drop_start
,
176 struct btrfs_trans_handle
**trans_out
)
178 struct btrfs_fs_info
*fs_info
= btrfs_sb(dst
->i_sb
);
179 struct btrfs_root
*root
= BTRFS_I(dst
)->root
;
180 const u64 aligned_end
= ALIGN(new_key
->offset
+ datal
,
181 fs_info
->sectorsize
);
182 struct btrfs_trans_handle
*trans
= NULL
;
183 struct btrfs_drop_extents_args drop_args
= { 0 };
185 struct btrfs_key key
;
187 if (new_key
->offset
> 0) {
188 ret
= copy_inline_to_page(BTRFS_I(dst
), new_key
->offset
,
189 inline_data
, size
, datal
, comp_type
);
193 key
.objectid
= btrfs_ino(BTRFS_I(dst
));
194 key
.type
= BTRFS_EXTENT_DATA_KEY
;
196 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
199 } else if (ret
> 0) {
200 if (path
->slots
[0] >= btrfs_header_nritems(path
->nodes
[0])) {
201 ret
= btrfs_next_leaf(root
, path
);
205 goto copy_inline_extent
;
207 btrfs_item_key_to_cpu(path
->nodes
[0], &key
, path
->slots
[0]);
208 if (key
.objectid
== btrfs_ino(BTRFS_I(dst
)) &&
209 key
.type
== BTRFS_EXTENT_DATA_KEY
) {
211 * There's an implicit hole at file offset 0, copy the
212 * inline extent's data to the page.
214 ASSERT(key
.offset
> 0);
217 } else if (i_size_read(dst
) <= datal
) {
218 struct btrfs_file_extent_item
*ei
;
220 ei
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
221 struct btrfs_file_extent_item
);
223 * If it's an inline extent replace it with the source inline
224 * extent, otherwise copy the source inline extent data into
225 * the respective page at the destination inode.
227 if (btrfs_file_extent_type(path
->nodes
[0], ei
) ==
228 BTRFS_FILE_EXTENT_INLINE
)
229 goto copy_inline_extent
;
236 * We have no extent items, or we have an extent at offset 0 which may
237 * or may not be inlined. All these cases are dealt the same way.
239 if (i_size_read(dst
) > datal
) {
241 * At the destination offset 0 we have either a hole, a regular
242 * extent or an inline extent larger then the one we want to
243 * clone. Deal with all these cases by copying the inline extent
244 * data into the respective page at the destination inode.
250 * Release path before starting a new transaction so we don't hold locks
251 * that would confuse lockdep.
253 btrfs_release_path(path
);
255 * If we end up here it means were copy the inline extent into a leaf
256 * of the destination inode. We know we will drop or adjust at most one
257 * extent item in the destination root.
259 * 1 unit - adjusting old extent (we may have to split it)
260 * 1 unit - add new extent
261 * 1 unit - inode update
263 trans
= btrfs_start_transaction(root
, 3);
265 ret
= PTR_ERR(trans
);
269 drop_args
.path
= path
;
270 drop_args
.start
= drop_start
;
271 drop_args
.end
= aligned_end
;
272 drop_args
.drop_cache
= true;
273 ret
= btrfs_drop_extents(trans
, root
, BTRFS_I(dst
), &drop_args
);
276 ret
= btrfs_insert_empty_item(trans
, root
, path
, new_key
, size
);
280 write_extent_buffer(path
->nodes
[0], inline_data
,
281 btrfs_item_ptr_offset(path
->nodes
[0],
284 btrfs_update_inode_bytes(BTRFS_I(dst
), datal
, drop_args
.bytes_found
);
285 btrfs_set_inode_full_sync(BTRFS_I(dst
));
286 ret
= btrfs_inode_set_file_extent_range(BTRFS_I(dst
), 0, aligned_end
);
288 if (!ret
&& !trans
) {
290 * No transaction here means we copied the inline extent into a
291 * page of the destination inode.
293 * 1 unit to update inode item
295 trans
= btrfs_start_transaction(root
, 1);
297 ret
= PTR_ERR(trans
);
302 btrfs_abort_transaction(trans
, ret
);
303 btrfs_end_transaction(trans
);
312 * Release our path because we don't need it anymore and also because
313 * copy_inline_to_page() needs to reserve data and metadata, which may
314 * need to flush delalloc when we are low on available space and
315 * therefore cause a deadlock if writeback of an inline extent needs to
316 * write to the same leaf or an ordered extent completion needs to write
319 btrfs_release_path(path
);
321 ret
= copy_inline_to_page(BTRFS_I(dst
), new_key
->offset
,
322 inline_data
, size
, datal
, comp_type
);
327 * Clone a range from inode file to another.
329 * @src: Inode to clone from
330 * @inode: Inode to clone to
331 * @off: Offset within source to start clone from
332 * @olen: Original length, passed by user, of range to clone
333 * @olen_aligned: Block-aligned value of olen
334 * @destoff: Offset within @inode to start clone
335 * @no_time_update: Whether to update mtime/ctime on the target inode
337 static int btrfs_clone(struct inode
*src
, struct inode
*inode
,
338 const u64 off
, const u64 olen
, const u64 olen_aligned
,
339 const u64 destoff
, int no_time_update
)
341 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
342 struct btrfs_path
*path
= NULL
;
343 struct extent_buffer
*leaf
;
344 struct btrfs_trans_handle
*trans
;
346 struct btrfs_key key
;
350 const u64 len
= olen_aligned
;
351 u64 last_dest_end
= destoff
;
352 u64 prev_extent_end
= off
;
355 buf
= kvmalloc(fs_info
->nodesize
, GFP_KERNEL
);
359 path
= btrfs_alloc_path();
365 path
->reada
= READA_FORWARD
;
367 key
.objectid
= btrfs_ino(BTRFS_I(src
));
368 key
.type
= BTRFS_EXTENT_DATA_KEY
;
372 struct btrfs_file_extent_item
*extent
;
376 struct btrfs_key new_key
;
377 u64 disko
= 0, diskl
= 0;
378 u64 datao
= 0, datal
= 0;
382 /* Note the key will change type as we walk through the tree */
383 ret
= btrfs_search_slot(NULL
, BTRFS_I(src
)->root
, &key
, path
,
388 * First search, if no extent item that starts at offset off was
389 * found but the previous item is an extent item, it's possible
390 * it might overlap our target range, therefore process it.
392 if (key
.offset
== off
&& ret
> 0 && path
->slots
[0] > 0) {
393 btrfs_item_key_to_cpu(path
->nodes
[0], &key
,
395 if (key
.type
== BTRFS_EXTENT_DATA_KEY
)
399 nritems
= btrfs_header_nritems(path
->nodes
[0]);
401 if (path
->slots
[0] >= nritems
) {
402 ret
= btrfs_next_leaf(BTRFS_I(src
)->root
, path
);
407 nritems
= btrfs_header_nritems(path
->nodes
[0]);
409 leaf
= path
->nodes
[0];
410 slot
= path
->slots
[0];
412 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
413 if (key
.type
> BTRFS_EXTENT_DATA_KEY
||
414 key
.objectid
!= btrfs_ino(BTRFS_I(src
)))
417 ASSERT(key
.type
== BTRFS_EXTENT_DATA_KEY
);
419 extent
= btrfs_item_ptr(leaf
, slot
,
420 struct btrfs_file_extent_item
);
421 extent_gen
= btrfs_file_extent_generation(leaf
, extent
);
422 comp
= btrfs_file_extent_compression(leaf
, extent
);
423 type
= btrfs_file_extent_type(leaf
, extent
);
424 if (type
== BTRFS_FILE_EXTENT_REG
||
425 type
== BTRFS_FILE_EXTENT_PREALLOC
) {
426 disko
= btrfs_file_extent_disk_bytenr(leaf
, extent
);
427 diskl
= btrfs_file_extent_disk_num_bytes(leaf
, extent
);
428 datao
= btrfs_file_extent_offset(leaf
, extent
);
429 datal
= btrfs_file_extent_num_bytes(leaf
, extent
);
430 } else if (type
== BTRFS_FILE_EXTENT_INLINE
) {
431 /* Take upper bound, may be compressed */
432 datal
= btrfs_file_extent_ram_bytes(leaf
, extent
);
436 * The first search might have left us at an extent item that
437 * ends before our target range's start, can happen if we have
438 * holes and NO_HOLES feature enabled.
440 * Subsequent searches may leave us on a file range we have
441 * processed before - this happens due to a race with ordered
442 * extent completion for a file range that is outside our source
443 * range, but that range was part of a file extent item that
444 * also covered a leading part of our source range.
446 if (key
.offset
+ datal
<= prev_extent_end
) {
449 } else if (key
.offset
>= off
+ len
) {
453 prev_extent_end
= key
.offset
+ datal
;
454 size
= btrfs_item_size(leaf
, slot
);
455 read_extent_buffer(leaf
, buf
, btrfs_item_ptr_offset(leaf
, slot
),
458 btrfs_release_path(path
);
460 memcpy(&new_key
, &key
, sizeof(new_key
));
461 new_key
.objectid
= btrfs_ino(BTRFS_I(inode
));
462 if (off
<= key
.offset
)
463 new_key
.offset
= key
.offset
+ destoff
- off
;
465 new_key
.offset
= destoff
;
468 * Deal with a hole that doesn't have an extent item that
469 * represents it (NO_HOLES feature enabled).
470 * This hole is either in the middle of the cloning range or at
471 * the beginning (fully overlaps it or partially overlaps it).
473 if (new_key
.offset
!= last_dest_end
)
474 drop_start
= last_dest_end
;
476 drop_start
= new_key
.offset
;
478 if (type
== BTRFS_FILE_EXTENT_REG
||
479 type
== BTRFS_FILE_EXTENT_PREALLOC
) {
480 struct btrfs_replace_extent_info clone_info
;
483 * a | --- range to clone ---| b
484 * | ------------- extent ------------- |
487 /* Subtract range b */
488 if (key
.offset
+ datal
> off
+ len
)
489 datal
= off
+ len
- key
.offset
;
491 /* Subtract range a */
492 if (off
> key
.offset
) {
493 datao
+= off
- key
.offset
;
494 datal
-= off
- key
.offset
;
497 clone_info
.disk_offset
= disko
;
498 clone_info
.disk_len
= diskl
;
499 clone_info
.data_offset
= datao
;
500 clone_info
.data_len
= datal
;
501 clone_info
.file_offset
= new_key
.offset
;
502 clone_info
.extent_buf
= buf
;
503 clone_info
.is_new_extent
= false;
504 clone_info
.update_times
= !no_time_update
;
505 ret
= btrfs_replace_file_extents(BTRFS_I(inode
), path
,
506 drop_start
, new_key
.offset
+ datal
- 1,
507 &clone_info
, &trans
);
511 ASSERT(type
== BTRFS_FILE_EXTENT_INLINE
);
513 * Inline extents always have to start at file offset 0
514 * and can never be bigger then the sector size. We can
515 * never clone only parts of an inline extent, since all
516 * reflink operations must start at a sector size aligned
517 * offset, and the length must be aligned too or end at
518 * the i_size (which implies the whole inlined data).
520 ASSERT(key
.offset
== 0);
521 ASSERT(datal
<= fs_info
->sectorsize
);
522 if (WARN_ON(type
!= BTRFS_FILE_EXTENT_INLINE
) ||
523 WARN_ON(key
.offset
!= 0) ||
524 WARN_ON(datal
> fs_info
->sectorsize
)) {
529 ret
= clone_copy_inline_extent(inode
, path
, &new_key
,
530 drop_start
, datal
, size
,
536 btrfs_release_path(path
);
539 * Whenever we share an extent we update the last_reflink_trans
540 * of each inode to the current transaction. This is needed to
541 * make sure fsync does not log multiple checksum items with
542 * overlapping ranges (because some extent items might refer
543 * only to sections of the original extent). For the destination
544 * inode we do this regardless of the generation of the extents
545 * or even if they are inline extents or explicit holes, to make
546 * sure a full fsync does not skip them. For the source inode,
547 * we only need to update last_reflink_trans in case it's a new
548 * extent that is not a hole or an inline extent, to deal with
549 * the checksums problem on fsync.
551 if (extent_gen
== trans
->transid
&& disko
> 0)
552 BTRFS_I(src
)->last_reflink_trans
= trans
->transid
;
554 BTRFS_I(inode
)->last_reflink_trans
= trans
->transid
;
556 last_dest_end
= ALIGN(new_key
.offset
+ datal
,
557 fs_info
->sectorsize
);
558 ret
= clone_finish_inode_update(trans
, inode
, last_dest_end
,
559 destoff
, olen
, no_time_update
);
562 if (new_key
.offset
+ datal
>= destoff
+ len
)
565 btrfs_release_path(path
);
566 key
.offset
= prev_extent_end
;
568 if (fatal_signal_pending(current
)) {
577 if (last_dest_end
< destoff
+ len
) {
579 * We have an implicit hole that fully or partially overlaps our
580 * cloning range at its end. This means that we either have the
581 * NO_HOLES feature enabled or the implicit hole happened due to
582 * mixing buffered and direct IO writes against this file.
584 btrfs_release_path(path
);
587 * When using NO_HOLES and we are cloning a range that covers
588 * only a hole (no extents) into a range beyond the current
589 * i_size, punching a hole in the target range will not create
590 * an extent map defining a hole, because the range starts at or
591 * beyond current i_size. If the file previously had an i_size
592 * greater than the new i_size set by this clone operation, we
593 * need to make sure the next fsync is a full fsync, so that it
594 * detects and logs a hole covering a range from the current
595 * i_size to the new i_size. If the clone range covers extents,
596 * besides a hole, then we know the full sync flag was already
597 * set by previous calls to btrfs_replace_file_extents() that
598 * replaced file extent items.
600 if (last_dest_end
>= i_size_read(inode
))
601 btrfs_set_inode_full_sync(BTRFS_I(inode
));
603 ret
= btrfs_replace_file_extents(BTRFS_I(inode
), path
,
604 last_dest_end
, destoff
+ len
- 1, NULL
, &trans
);
608 ret
= clone_finish_inode_update(trans
, inode
, destoff
+ len
,
609 destoff
, olen
, no_time_update
);
613 btrfs_free_path(path
);
615 clear_bit(BTRFS_INODE_NO_DELALLOC_FLUSH
, &BTRFS_I(inode
)->runtime_flags
);
620 static void btrfs_double_extent_unlock(struct inode
*inode1
, u64 loff1
,
621 struct inode
*inode2
, u64 loff2
, u64 len
)
623 unlock_extent(&BTRFS_I(inode1
)->io_tree
, loff1
, loff1
+ len
- 1, NULL
);
624 unlock_extent(&BTRFS_I(inode2
)->io_tree
, loff2
, loff2
+ len
- 1, NULL
);
627 static void btrfs_double_extent_lock(struct inode
*inode1
, u64 loff1
,
628 struct inode
*inode2
, u64 loff2
, u64 len
)
630 u64 range1_end
= loff1
+ len
- 1;
631 u64 range2_end
= loff2
+ len
- 1;
633 if (inode1
< inode2
) {
634 swap(inode1
, inode2
);
636 swap(range1_end
, range2_end
);
637 } else if (inode1
== inode2
&& loff2
< loff1
) {
639 swap(range1_end
, range2_end
);
642 lock_extent(&BTRFS_I(inode1
)->io_tree
, loff1
, range1_end
, NULL
);
643 lock_extent(&BTRFS_I(inode2
)->io_tree
, loff2
, range2_end
, NULL
);
645 btrfs_assert_inode_range_clean(BTRFS_I(inode1
), loff1
, range1_end
);
646 btrfs_assert_inode_range_clean(BTRFS_I(inode2
), loff2
, range2_end
);
649 static void btrfs_double_mmap_lock(struct inode
*inode1
, struct inode
*inode2
)
652 swap(inode1
, inode2
);
653 down_write(&BTRFS_I(inode1
)->i_mmap_lock
);
654 down_write_nested(&BTRFS_I(inode2
)->i_mmap_lock
, SINGLE_DEPTH_NESTING
);
657 static void btrfs_double_mmap_unlock(struct inode
*inode1
, struct inode
*inode2
)
659 up_write(&BTRFS_I(inode1
)->i_mmap_lock
);
660 up_write(&BTRFS_I(inode2
)->i_mmap_lock
);
663 static int btrfs_extent_same_range(struct inode
*src
, u64 loff
, u64 len
,
664 struct inode
*dst
, u64 dst_loff
)
666 struct btrfs_fs_info
*fs_info
= BTRFS_I(src
)->root
->fs_info
;
667 const u64 bs
= fs_info
->sb
->s_blocksize
;
671 * Lock destination range to serialize with concurrent readahead() and
672 * source range to serialize with relocation.
674 btrfs_double_extent_lock(src
, loff
, dst
, dst_loff
, len
);
675 ret
= btrfs_clone(src
, dst
, loff
, len
, ALIGN(len
, bs
), dst_loff
, 1);
676 btrfs_double_extent_unlock(src
, loff
, dst
, dst_loff
, len
);
678 btrfs_btree_balance_dirty(fs_info
);
683 static int btrfs_extent_same(struct inode
*src
, u64 loff
, u64 olen
,
684 struct inode
*dst
, u64 dst_loff
)
687 u64 i
, tail_len
, chunk_count
;
688 struct btrfs_root
*root_dst
= BTRFS_I(dst
)->root
;
690 spin_lock(&root_dst
->root_item_lock
);
691 if (root_dst
->send_in_progress
) {
692 btrfs_warn_rl(root_dst
->fs_info
,
693 "cannot deduplicate to root %llu while send operations are using it (%d in progress)",
694 root_dst
->root_key
.objectid
,
695 root_dst
->send_in_progress
);
696 spin_unlock(&root_dst
->root_item_lock
);
699 root_dst
->dedupe_in_progress
++;
700 spin_unlock(&root_dst
->root_item_lock
);
702 tail_len
= olen
% BTRFS_MAX_DEDUPE_LEN
;
703 chunk_count
= div_u64(olen
, BTRFS_MAX_DEDUPE_LEN
);
705 for (i
= 0; i
< chunk_count
; i
++) {
706 ret
= btrfs_extent_same_range(src
, loff
, BTRFS_MAX_DEDUPE_LEN
,
711 loff
+= BTRFS_MAX_DEDUPE_LEN
;
712 dst_loff
+= BTRFS_MAX_DEDUPE_LEN
;
716 ret
= btrfs_extent_same_range(src
, loff
, tail_len
, dst
, dst_loff
);
718 spin_lock(&root_dst
->root_item_lock
);
719 root_dst
->dedupe_in_progress
--;
720 spin_unlock(&root_dst
->root_item_lock
);
725 static noinline
int btrfs_clone_files(struct file
*file
, struct file
*file_src
,
726 u64 off
, u64 olen
, u64 destoff
)
728 struct inode
*inode
= file_inode(file
);
729 struct inode
*src
= file_inode(file_src
);
730 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
734 u64 bs
= fs_info
->sb
->s_blocksize
;
737 * VFS's generic_remap_file_range_prep() protects us from cloning the
738 * eof block into the middle of a file, which would result in corruption
739 * if the file size is not blocksize aligned. So we don't need to check
740 * for that case here.
742 if (off
+ len
== src
->i_size
)
743 len
= ALIGN(src
->i_size
, bs
) - off
;
745 if (destoff
> inode
->i_size
) {
746 const u64 wb_start
= ALIGN_DOWN(inode
->i_size
, bs
);
748 ret
= btrfs_cont_expand(BTRFS_I(inode
), inode
->i_size
, destoff
);
752 * We may have truncated the last block if the inode's size is
753 * not sector size aligned, so we need to wait for writeback to
754 * complete before proceeding further, otherwise we can race
755 * with cloning and attempt to increment a reference to an
756 * extent that no longer exists (writeback completed right after
757 * we found the previous extent covering eof and before we
758 * attempted to increment its reference count).
760 ret
= btrfs_wait_ordered_range(inode
, wb_start
,
767 * Lock destination range to serialize with concurrent readahead() and
768 * source range to serialize with relocation.
770 btrfs_double_extent_lock(src
, off
, inode
, destoff
, len
);
771 ret
= btrfs_clone(src
, inode
, off
, olen
, len
, destoff
, 0);
772 btrfs_double_extent_unlock(src
, off
, inode
, destoff
, len
);
775 * We may have copied an inline extent into a page of the destination
776 * range, so wait for writeback to complete before truncating pages
777 * from the page cache. This is a rare case.
779 wb_ret
= btrfs_wait_ordered_range(inode
, destoff
, len
);
780 ret
= ret
? ret
: wb_ret
;
782 * Truncate page cache pages so that future reads will see the cloned
783 * data immediately and not the previous data.
785 truncate_inode_pages_range(&inode
->i_data
,
786 round_down(destoff
, PAGE_SIZE
),
787 round_up(destoff
+ len
, PAGE_SIZE
) - 1);
789 btrfs_btree_balance_dirty(fs_info
);
794 static int btrfs_remap_file_range_prep(struct file
*file_in
, loff_t pos_in
,
795 struct file
*file_out
, loff_t pos_out
,
796 loff_t
*len
, unsigned int remap_flags
)
798 struct inode
*inode_in
= file_inode(file_in
);
799 struct inode
*inode_out
= file_inode(file_out
);
800 u64 bs
= BTRFS_I(inode_out
)->root
->fs_info
->sb
->s_blocksize
;
804 if (!(remap_flags
& REMAP_FILE_DEDUP
)) {
805 struct btrfs_root
*root_out
= BTRFS_I(inode_out
)->root
;
807 if (btrfs_root_readonly(root_out
))
810 ASSERT(inode_in
->i_sb
== inode_out
->i_sb
);
813 /* Don't make the dst file partly checksummed */
814 if ((BTRFS_I(inode_in
)->flags
& BTRFS_INODE_NODATASUM
) !=
815 (BTRFS_I(inode_out
)->flags
& BTRFS_INODE_NODATASUM
)) {
820 * Now that the inodes are locked, we need to start writeback ourselves
821 * and can not rely on the writeback from the VFS's generic helper
822 * generic_remap_file_range_prep() because:
824 * 1) For compression we must call filemap_fdatawrite_range() range
825 * twice (btrfs_fdatawrite_range() does it for us), and the generic
826 * helper only calls it once;
828 * 2) filemap_fdatawrite_range(), called by the generic helper only
829 * waits for the writeback to complete, i.e. for IO to be done, and
830 * not for the ordered extents to complete. We need to wait for them
831 * to complete so that new file extent items are in the fs tree.
833 if (*len
== 0 && !(remap_flags
& REMAP_FILE_DEDUP
))
834 wb_len
= ALIGN(inode_in
->i_size
, bs
) - ALIGN_DOWN(pos_in
, bs
);
836 wb_len
= ALIGN(*len
, bs
);
839 * Workaround to make sure NOCOW buffered write reach disk as NOCOW.
841 * Btrfs' back references do not have a block level granularity, they
842 * work at the whole extent level.
843 * NOCOW buffered write without data space reserved may not be able
844 * to fall back to CoW due to lack of data space, thus could cause
847 * Here we take a shortcut by flushing the whole inode, so that all
848 * nocow write should reach disk as nocow before we increase the
849 * reference of the extent. We could do better by only flushing NOCOW
850 * data, but that needs extra accounting.
852 * Also we don't need to check ASYNC_EXTENT, as async extent will be
853 * CoWed anyway, not affecting nocow part.
855 ret
= filemap_flush(inode_in
->i_mapping
);
859 ret
= btrfs_wait_ordered_range(inode_in
, ALIGN_DOWN(pos_in
, bs
),
863 ret
= btrfs_wait_ordered_range(inode_out
, ALIGN_DOWN(pos_out
, bs
),
868 return generic_remap_file_range_prep(file_in
, pos_in
, file_out
, pos_out
,
872 static bool file_sync_write(const struct file
*file
)
874 if (file
->f_flags
& (__O_SYNC
| O_DSYNC
))
876 if (IS_SYNC(file_inode(file
)))
882 loff_t
btrfs_remap_file_range(struct file
*src_file
, loff_t off
,
883 struct file
*dst_file
, loff_t destoff
, loff_t len
,
884 unsigned int remap_flags
)
886 struct inode
*src_inode
= file_inode(src_file
);
887 struct inode
*dst_inode
= file_inode(dst_file
);
888 bool same_inode
= dst_inode
== src_inode
;
891 if (remap_flags
& ~(REMAP_FILE_DEDUP
| REMAP_FILE_ADVISORY
))
895 btrfs_inode_lock(BTRFS_I(src_inode
), BTRFS_ILOCK_MMAP
);
897 lock_two_nondirectories(src_inode
, dst_inode
);
898 btrfs_double_mmap_lock(src_inode
, dst_inode
);
901 ret
= btrfs_remap_file_range_prep(src_file
, off
, dst_file
, destoff
,
903 if (ret
< 0 || len
== 0)
906 if (remap_flags
& REMAP_FILE_DEDUP
)
907 ret
= btrfs_extent_same(src_inode
, off
, len
, dst_inode
, destoff
);
909 ret
= btrfs_clone_files(dst_file
, src_file
, off
, len
, destoff
);
913 btrfs_inode_unlock(BTRFS_I(src_inode
), BTRFS_ILOCK_MMAP
);
915 btrfs_double_mmap_unlock(src_inode
, dst_inode
);
916 unlock_two_nondirectories(src_inode
, dst_inode
);
920 * If either the source or the destination file was opened with O_SYNC,
921 * O_DSYNC or has the S_SYNC attribute, fsync both the destination and
922 * source files/ranges, so that after a successful return (0) followed
923 * by a power failure results in the reflinked data to be readable from
926 if (ret
== 0 && len
> 0 &&
927 (file_sync_write(src_file
) || file_sync_write(dst_file
))) {
928 ret
= btrfs_sync_file(src_file
, off
, off
+ len
- 1, 0);
930 ret
= btrfs_sync_file(dst_file
, destoff
,
931 destoff
+ len
- 1, 0);
934 return ret
< 0 ? ret
: len
;