1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2007 Oracle. All rights reserved.
6 #include <crypto/hash.h>
7 #include <linux/kernel.h>
9 #include <linux/blk-cgroup.h>
10 #include <linux/file.h>
12 #include <linux/pagemap.h>
13 #include <linux/highmem.h>
14 #include <linux/time.h>
15 #include <linux/init.h>
16 #include <linux/string.h>
17 #include <linux/backing-dev.h>
18 #include <linux/writeback.h>
19 #include <linux/compat.h>
20 #include <linux/xattr.h>
21 #include <linux/posix_acl.h>
22 #include <linux/falloc.h>
23 #include <linux/slab.h>
24 #include <linux/ratelimit.h>
25 #include <linux/btrfs.h>
26 #include <linux/blkdev.h>
27 #include <linux/posix_acl_xattr.h>
28 #include <linux/uio.h>
29 #include <linux/magic.h>
30 #include <linux/iversion.h>
31 #include <linux/swap.h>
32 #include <linux/migrate.h>
33 #include <linux/sched/mm.h>
34 #include <linux/iomap.h>
35 #include <asm/unaligned.h>
36 #include <linux/fsverity.h>
40 #include "transaction.h"
41 #include "btrfs_inode.h"
42 #include "print-tree.h"
43 #include "ordered-data.h"
47 #include "compression.h"
49 #include "free-space-cache.h"
52 #include "delalloc-space.h"
53 #include "block-group.h"
54 #include "space-info.h"
57 #include "inode-item.h"
59 #include "accessors.h"
60 #include "extent-tree.h"
61 #include "root-tree.h"
64 #include "file-item.h"
65 #include "uuid-tree.h"
69 #include "relocation.h"
75 struct btrfs_iget_args
{
77 struct btrfs_root
*root
;
80 struct btrfs_dio_data
{
82 struct extent_changeset
*data_reserved
;
83 struct btrfs_ordered_extent
*ordered
;
84 bool data_space_reserved
;
88 struct btrfs_dio_private
{
93 /* This must be last */
94 struct btrfs_bio bbio
;
97 static struct bio_set btrfs_dio_bioset
;
99 struct btrfs_rename_ctx
{
100 /* Output field. Stores the index number of the old directory entry. */
105 * Used by data_reloc_print_warning_inode() to pass needed info for filename
106 * resolution and output of error message.
108 struct data_reloc_warn
{
109 struct btrfs_path path
;
110 struct btrfs_fs_info
*fs_info
;
111 u64 extent_item_size
;
116 static const struct inode_operations btrfs_dir_inode_operations
;
117 static const struct inode_operations btrfs_symlink_inode_operations
;
118 static const struct inode_operations btrfs_special_inode_operations
;
119 static const struct inode_operations btrfs_file_inode_operations
;
120 static const struct address_space_operations btrfs_aops
;
121 static const struct file_operations btrfs_dir_file_operations
;
123 static struct kmem_cache
*btrfs_inode_cachep
;
125 static int btrfs_setsize(struct inode
*inode
, struct iattr
*attr
);
126 static int btrfs_truncate(struct btrfs_inode
*inode
, bool skip_writeback
);
127 static noinline
int cow_file_range(struct btrfs_inode
*inode
,
128 struct page
*locked_page
,
129 u64 start
, u64 end
, int *page_started
,
130 unsigned long *nr_written
, int unlock
,
132 static struct extent_map
*create_io_em(struct btrfs_inode
*inode
, u64 start
,
133 u64 len
, u64 orig_start
, u64 block_start
,
134 u64 block_len
, u64 orig_block_len
,
135 u64 ram_bytes
, int compress_type
,
138 static int data_reloc_print_warning_inode(u64 inum
, u64 offset
, u64 num_bytes
,
139 u64 root
, void *warn_ctx
)
141 struct data_reloc_warn
*warn
= warn_ctx
;
142 struct btrfs_fs_info
*fs_info
= warn
->fs_info
;
143 struct extent_buffer
*eb
;
144 struct btrfs_inode_item
*inode_item
;
145 struct inode_fs_paths
*ipath
= NULL
;
146 struct btrfs_root
*local_root
;
147 struct btrfs_key key
;
148 unsigned int nofs_flag
;
152 local_root
= btrfs_get_fs_root(fs_info
, root
, true);
153 if (IS_ERR(local_root
)) {
154 ret
= PTR_ERR(local_root
);
158 /* This makes the path point to (inum INODE_ITEM ioff). */
160 key
.type
= BTRFS_INODE_ITEM_KEY
;
163 ret
= btrfs_search_slot(NULL
, local_root
, &key
, &warn
->path
, 0, 0);
165 btrfs_put_root(local_root
);
166 btrfs_release_path(&warn
->path
);
170 eb
= warn
->path
.nodes
[0];
171 inode_item
= btrfs_item_ptr(eb
, warn
->path
.slots
[0], struct btrfs_inode_item
);
172 nlink
= btrfs_inode_nlink(eb
, inode_item
);
173 btrfs_release_path(&warn
->path
);
175 nofs_flag
= memalloc_nofs_save();
176 ipath
= init_ipath(4096, local_root
, &warn
->path
);
177 memalloc_nofs_restore(nofs_flag
);
179 btrfs_put_root(local_root
);
180 ret
= PTR_ERR(ipath
);
183 * -ENOMEM, not a critical error, just output an generic error
187 "checksum error at logical %llu mirror %u root %llu, inode %llu offset %llu",
188 warn
->logical
, warn
->mirror_num
, root
, inum
, offset
);
191 ret
= paths_from_inode(inum
, ipath
);
196 * We deliberately ignore the bit ipath might have been too small to
197 * hold all of the paths here
199 for (int i
= 0; i
< ipath
->fspath
->elem_cnt
; i
++) {
201 "checksum error at logical %llu mirror %u root %llu inode %llu offset %llu length %u links %u (path: %s)",
202 warn
->logical
, warn
->mirror_num
, root
, inum
, offset
,
203 fs_info
->sectorsize
, nlink
,
204 (char *)(unsigned long)ipath
->fspath
->val
[i
]);
207 btrfs_put_root(local_root
);
213 "checksum error at logical %llu mirror %u root %llu inode %llu offset %llu, path resolving failed with ret=%d",
214 warn
->logical
, warn
->mirror_num
, root
, inum
, offset
, ret
);
221 * Do extra user-friendly error output (e.g. lookup all the affected files).
223 * Return true if we succeeded doing the backref lookup.
224 * Return false if such lookup failed, and has to fallback to the old error message.
226 static void print_data_reloc_error(const struct btrfs_inode
*inode
, u64 file_off
,
227 const u8
*csum
, const u8
*csum_expected
,
230 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
231 struct btrfs_path path
= { 0 };
232 struct btrfs_key found_key
= { 0 };
233 struct extent_buffer
*eb
;
234 struct btrfs_extent_item
*ei
;
235 const u32 csum_size
= fs_info
->csum_size
;
241 mutex_lock(&fs_info
->reloc_mutex
);
242 logical
= btrfs_get_reloc_bg_bytenr(fs_info
);
243 mutex_unlock(&fs_info
->reloc_mutex
);
245 if (logical
== U64_MAX
) {
246 btrfs_warn_rl(fs_info
, "has data reloc tree but no running relocation");
247 btrfs_warn_rl(fs_info
,
248 "csum failed root %lld ino %llu off %llu csum " CSUM_FMT
" expected csum " CSUM_FMT
" mirror %d",
249 inode
->root
->root_key
.objectid
, btrfs_ino(inode
), file_off
,
250 CSUM_FMT_VALUE(csum_size
, csum
),
251 CSUM_FMT_VALUE(csum_size
, csum_expected
),
257 btrfs_warn_rl(fs_info
,
258 "csum failed root %lld ino %llu off %llu logical %llu csum " CSUM_FMT
" expected csum " CSUM_FMT
" mirror %d",
259 inode
->root
->root_key
.objectid
,
260 btrfs_ino(inode
), file_off
, logical
,
261 CSUM_FMT_VALUE(csum_size
, csum
),
262 CSUM_FMT_VALUE(csum_size
, csum_expected
),
265 ret
= extent_from_logical(fs_info
, logical
, &path
, &found_key
, &flags
);
267 btrfs_err_rl(fs_info
, "failed to lookup extent item for logical %llu: %d",
272 ei
= btrfs_item_ptr(eb
, path
.slots
[0], struct btrfs_extent_item
);
273 item_size
= btrfs_item_size(eb
, path
.slots
[0]);
274 if (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
) {
275 unsigned long ptr
= 0;
280 ret
= tree_backref_for_extent(&ptr
, eb
, &found_key
, ei
,
281 item_size
, &ref_root
,
284 btrfs_warn_rl(fs_info
,
285 "failed to resolve tree backref for logical %llu: %d",
292 btrfs_warn_rl(fs_info
,
293 "csum error at logical %llu mirror %u: metadata %s (level %d) in tree %llu",
295 (ref_level
? "node" : "leaf"),
296 ref_level
, ref_root
);
298 btrfs_release_path(&path
);
300 struct btrfs_backref_walk_ctx ctx
= { 0 };
301 struct data_reloc_warn reloc_warn
= { 0 };
303 btrfs_release_path(&path
);
305 ctx
.bytenr
= found_key
.objectid
;
306 ctx
.extent_item_pos
= logical
- found_key
.objectid
;
307 ctx
.fs_info
= fs_info
;
309 reloc_warn
.logical
= logical
;
310 reloc_warn
.extent_item_size
= found_key
.offset
;
311 reloc_warn
.mirror_num
= mirror_num
;
312 reloc_warn
.fs_info
= fs_info
;
314 iterate_extent_inodes(&ctx
, true,
315 data_reloc_print_warning_inode
, &reloc_warn
);
319 static void __cold
btrfs_print_data_csum_error(struct btrfs_inode
*inode
,
320 u64 logical_start
, u8
*csum
, u8
*csum_expected
, int mirror_num
)
322 struct btrfs_root
*root
= inode
->root
;
323 const u32 csum_size
= root
->fs_info
->csum_size
;
325 /* For data reloc tree, it's better to do a backref lookup instead. */
326 if (root
->root_key
.objectid
== BTRFS_DATA_RELOC_TREE_OBJECTID
)
327 return print_data_reloc_error(inode
, logical_start
, csum
,
328 csum_expected
, mirror_num
);
330 /* Output without objectid, which is more meaningful */
331 if (root
->root_key
.objectid
>= BTRFS_LAST_FREE_OBJECTID
) {
332 btrfs_warn_rl(root
->fs_info
,
333 "csum failed root %lld ino %lld off %llu csum " CSUM_FMT
" expected csum " CSUM_FMT
" mirror %d",
334 root
->root_key
.objectid
, btrfs_ino(inode
),
336 CSUM_FMT_VALUE(csum_size
, csum
),
337 CSUM_FMT_VALUE(csum_size
, csum_expected
),
340 btrfs_warn_rl(root
->fs_info
,
341 "csum failed root %llu ino %llu off %llu csum " CSUM_FMT
" expected csum " CSUM_FMT
" mirror %d",
342 root
->root_key
.objectid
, btrfs_ino(inode
),
344 CSUM_FMT_VALUE(csum_size
, csum
),
345 CSUM_FMT_VALUE(csum_size
, csum_expected
),
351 * btrfs_inode_lock - lock inode i_rwsem based on arguments passed
353 * ilock_flags can have the following bit set:
355 * BTRFS_ILOCK_SHARED - acquire a shared lock on the inode
356 * BTRFS_ILOCK_TRY - try to acquire the lock, if fails on first attempt
358 * BTRFS_ILOCK_MMAP - acquire a write lock on the i_mmap_lock
360 int btrfs_inode_lock(struct btrfs_inode
*inode
, unsigned int ilock_flags
)
362 if (ilock_flags
& BTRFS_ILOCK_SHARED
) {
363 if (ilock_flags
& BTRFS_ILOCK_TRY
) {
364 if (!inode_trylock_shared(&inode
->vfs_inode
))
369 inode_lock_shared(&inode
->vfs_inode
);
371 if (ilock_flags
& BTRFS_ILOCK_TRY
) {
372 if (!inode_trylock(&inode
->vfs_inode
))
377 inode_lock(&inode
->vfs_inode
);
379 if (ilock_flags
& BTRFS_ILOCK_MMAP
)
380 down_write(&inode
->i_mmap_lock
);
385 * btrfs_inode_unlock - unock inode i_rwsem
387 * ilock_flags should contain the same bits set as passed to btrfs_inode_lock()
388 * to decide whether the lock acquired is shared or exclusive.
390 void btrfs_inode_unlock(struct btrfs_inode
*inode
, unsigned int ilock_flags
)
392 if (ilock_flags
& BTRFS_ILOCK_MMAP
)
393 up_write(&inode
->i_mmap_lock
);
394 if (ilock_flags
& BTRFS_ILOCK_SHARED
)
395 inode_unlock_shared(&inode
->vfs_inode
);
397 inode_unlock(&inode
->vfs_inode
);
401 * Cleanup all submitted ordered extents in specified range to handle errors
402 * from the btrfs_run_delalloc_range() callback.
404 * NOTE: caller must ensure that when an error happens, it can not call
405 * extent_clear_unlock_delalloc() to clear both the bits EXTENT_DO_ACCOUNTING
406 * and EXTENT_DELALLOC simultaneously, because that causes the reserved metadata
407 * to be released, which we want to happen only when finishing the ordered
408 * extent (btrfs_finish_ordered_io()).
410 static inline void btrfs_cleanup_ordered_extents(struct btrfs_inode
*inode
,
411 struct page
*locked_page
,
412 u64 offset
, u64 bytes
)
414 unsigned long index
= offset
>> PAGE_SHIFT
;
415 unsigned long end_index
= (offset
+ bytes
- 1) >> PAGE_SHIFT
;
416 u64 page_start
= 0, page_end
= 0;
420 page_start
= page_offset(locked_page
);
421 page_end
= page_start
+ PAGE_SIZE
- 1;
424 while (index
<= end_index
) {
426 * For locked page, we will call end_extent_writepage() on it
427 * in run_delalloc_range() for the error handling. That
428 * end_extent_writepage() function will call
429 * btrfs_mark_ordered_io_finished() to clear page Ordered and
430 * run the ordered extent accounting.
432 * Here we can't just clear the Ordered bit, or
433 * btrfs_mark_ordered_io_finished() would skip the accounting
434 * for the page range, and the ordered extent will never finish.
436 if (locked_page
&& index
== (page_start
>> PAGE_SHIFT
)) {
440 page
= find_get_page(inode
->vfs_inode
.i_mapping
, index
);
446 * Here we just clear all Ordered bits for every page in the
447 * range, then btrfs_mark_ordered_io_finished() will handle
448 * the ordered extent accounting for the range.
450 btrfs_page_clamp_clear_ordered(inode
->root
->fs_info
, page
,
456 /* The locked page covers the full range, nothing needs to be done */
457 if (bytes
+ offset
<= page_start
+ PAGE_SIZE
)
460 * In case this page belongs to the delalloc range being
461 * instantiated then skip it, since the first page of a range is
462 * going to be properly cleaned up by the caller of
465 if (page_start
>= offset
&& page_end
<= (offset
+ bytes
- 1)) {
466 bytes
= offset
+ bytes
- page_offset(locked_page
) - PAGE_SIZE
;
467 offset
= page_offset(locked_page
) + PAGE_SIZE
;
471 return btrfs_mark_ordered_io_finished(inode
, NULL
, offset
, bytes
, false);
474 static int btrfs_dirty_inode(struct btrfs_inode
*inode
);
476 static int btrfs_init_inode_security(struct btrfs_trans_handle
*trans
,
477 struct btrfs_new_inode_args
*args
)
481 if (args
->default_acl
) {
482 err
= __btrfs_set_acl(trans
, args
->inode
, args
->default_acl
,
488 err
= __btrfs_set_acl(trans
, args
->inode
, args
->acl
, ACL_TYPE_ACCESS
);
492 if (!args
->default_acl
&& !args
->acl
)
493 cache_no_acl(args
->inode
);
494 return btrfs_xattr_security_init(trans
, args
->inode
, args
->dir
,
495 &args
->dentry
->d_name
);
499 * this does all the hard work for inserting an inline extent into
500 * the btree. The caller should have done a btrfs_drop_extents so that
501 * no overlapping inline items exist in the btree
503 static int insert_inline_extent(struct btrfs_trans_handle
*trans
,
504 struct btrfs_path
*path
,
505 struct btrfs_inode
*inode
, bool extent_inserted
,
506 size_t size
, size_t compressed_size
,
508 struct page
**compressed_pages
,
511 struct btrfs_root
*root
= inode
->root
;
512 struct extent_buffer
*leaf
;
513 struct page
*page
= NULL
;
516 struct btrfs_file_extent_item
*ei
;
518 size_t cur_size
= size
;
521 ASSERT((compressed_size
> 0 && compressed_pages
) ||
522 (compressed_size
== 0 && !compressed_pages
));
524 if (compressed_size
&& compressed_pages
)
525 cur_size
= compressed_size
;
527 if (!extent_inserted
) {
528 struct btrfs_key key
;
531 key
.objectid
= btrfs_ino(inode
);
533 key
.type
= BTRFS_EXTENT_DATA_KEY
;
535 datasize
= btrfs_file_extent_calc_inline_size(cur_size
);
536 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
541 leaf
= path
->nodes
[0];
542 ei
= btrfs_item_ptr(leaf
, path
->slots
[0],
543 struct btrfs_file_extent_item
);
544 btrfs_set_file_extent_generation(leaf
, ei
, trans
->transid
);
545 btrfs_set_file_extent_type(leaf
, ei
, BTRFS_FILE_EXTENT_INLINE
);
546 btrfs_set_file_extent_encryption(leaf
, ei
, 0);
547 btrfs_set_file_extent_other_encoding(leaf
, ei
, 0);
548 btrfs_set_file_extent_ram_bytes(leaf
, ei
, size
);
549 ptr
= btrfs_file_extent_inline_start(ei
);
551 if (compress_type
!= BTRFS_COMPRESS_NONE
) {
554 while (compressed_size
> 0) {
555 cpage
= compressed_pages
[i
];
556 cur_size
= min_t(unsigned long, compressed_size
,
559 kaddr
= kmap_local_page(cpage
);
560 write_extent_buffer(leaf
, kaddr
, ptr
, cur_size
);
565 compressed_size
-= cur_size
;
567 btrfs_set_file_extent_compression(leaf
, ei
,
570 page
= find_get_page(inode
->vfs_inode
.i_mapping
, 0);
571 btrfs_set_file_extent_compression(leaf
, ei
, 0);
572 kaddr
= kmap_local_page(page
);
573 write_extent_buffer(leaf
, kaddr
, ptr
, size
);
577 btrfs_mark_buffer_dirty(leaf
);
578 btrfs_release_path(path
);
581 * We align size to sectorsize for inline extents just for simplicity
584 ret
= btrfs_inode_set_file_extent_range(inode
, 0,
585 ALIGN(size
, root
->fs_info
->sectorsize
));
590 * We're an inline extent, so nobody can extend the file past i_size
591 * without locking a page we already have locked.
593 * We must do any i_size and inode updates before we unlock the pages.
594 * Otherwise we could end up racing with unlink.
596 i_size
= i_size_read(&inode
->vfs_inode
);
597 if (update_i_size
&& size
> i_size
) {
598 i_size_write(&inode
->vfs_inode
, size
);
601 inode
->disk_i_size
= i_size
;
609 * conditionally insert an inline extent into the file. This
610 * does the checks required to make sure the data is small enough
611 * to fit as an inline extent.
613 static noinline
int cow_file_range_inline(struct btrfs_inode
*inode
, u64 size
,
614 size_t compressed_size
,
616 struct page
**compressed_pages
,
619 struct btrfs_drop_extents_args drop_args
= { 0 };
620 struct btrfs_root
*root
= inode
->root
;
621 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
622 struct btrfs_trans_handle
*trans
;
623 u64 data_len
= (compressed_size
?: size
);
625 struct btrfs_path
*path
;
628 * We can create an inline extent if it ends at or beyond the current
629 * i_size, is no larger than a sector (decompressed), and the (possibly
630 * compressed) data fits in a leaf and the configured maximum inline
633 if (size
< i_size_read(&inode
->vfs_inode
) ||
634 size
> fs_info
->sectorsize
||
635 data_len
> BTRFS_MAX_INLINE_DATA_SIZE(fs_info
) ||
636 data_len
> fs_info
->max_inline
)
639 path
= btrfs_alloc_path();
643 trans
= btrfs_join_transaction(root
);
645 btrfs_free_path(path
);
646 return PTR_ERR(trans
);
648 trans
->block_rsv
= &inode
->block_rsv
;
650 drop_args
.path
= path
;
652 drop_args
.end
= fs_info
->sectorsize
;
653 drop_args
.drop_cache
= true;
654 drop_args
.replace_extent
= true;
655 drop_args
.extent_item_size
= btrfs_file_extent_calc_inline_size(data_len
);
656 ret
= btrfs_drop_extents(trans
, root
, inode
, &drop_args
);
658 btrfs_abort_transaction(trans
, ret
);
662 ret
= insert_inline_extent(trans
, path
, inode
, drop_args
.extent_inserted
,
663 size
, compressed_size
, compress_type
,
664 compressed_pages
, update_i_size
);
665 if (ret
&& ret
!= -ENOSPC
) {
666 btrfs_abort_transaction(trans
, ret
);
668 } else if (ret
== -ENOSPC
) {
673 btrfs_update_inode_bytes(inode
, size
, drop_args
.bytes_found
);
674 ret
= btrfs_update_inode(trans
, root
, inode
);
675 if (ret
&& ret
!= -ENOSPC
) {
676 btrfs_abort_transaction(trans
, ret
);
678 } else if (ret
== -ENOSPC
) {
683 btrfs_set_inode_full_sync(inode
);
686 * Don't forget to free the reserved space, as for inlined extent
687 * it won't count as data extent, free them directly here.
688 * And at reserve time, it's always aligned to page size, so
689 * just free one page here.
691 btrfs_qgroup_free_data(inode
, NULL
, 0, PAGE_SIZE
);
692 btrfs_free_path(path
);
693 btrfs_end_transaction(trans
);
697 struct async_extent
{
702 unsigned long nr_pages
;
704 struct list_head list
;
708 struct btrfs_inode
*inode
;
709 struct page
*locked_page
;
712 blk_opf_t write_flags
;
713 struct list_head extents
;
714 struct cgroup_subsys_state
*blkcg_css
;
715 struct btrfs_work work
;
716 struct async_cow
*async_cow
;
721 struct async_chunk chunks
[];
724 static noinline
int add_async_extent(struct async_chunk
*cow
,
725 u64 start
, u64 ram_size
,
728 unsigned long nr_pages
,
731 struct async_extent
*async_extent
;
733 async_extent
= kmalloc(sizeof(*async_extent
), GFP_NOFS
);
734 BUG_ON(!async_extent
); /* -ENOMEM */
735 async_extent
->start
= start
;
736 async_extent
->ram_size
= ram_size
;
737 async_extent
->compressed_size
= compressed_size
;
738 async_extent
->pages
= pages
;
739 async_extent
->nr_pages
= nr_pages
;
740 async_extent
->compress_type
= compress_type
;
741 list_add_tail(&async_extent
->list
, &cow
->extents
);
746 * Check if the inode needs to be submitted to compression, based on mount
747 * options, defragmentation, properties or heuristics.
749 static inline int inode_need_compress(struct btrfs_inode
*inode
, u64 start
,
752 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
754 if (!btrfs_inode_can_compress(inode
)) {
755 WARN(IS_ENABLED(CONFIG_BTRFS_DEBUG
),
756 KERN_ERR
"BTRFS: unexpected compression for ino %llu\n",
761 * Special check for subpage.
763 * We lock the full page then run each delalloc range in the page, thus
764 * for the following case, we will hit some subpage specific corner case:
767 * | |///////| |///////|
770 * In above case, both range A and range B will try to unlock the full
771 * page [0, 64K), causing the one finished later will have page
772 * unlocked already, triggering various page lock requirement BUG_ON()s.
774 * So here we add an artificial limit that subpage compression can only
775 * if the range is fully page aligned.
777 * In theory we only need to ensure the first page is fully covered, but
778 * the tailing partial page will be locked until the full compression
779 * finishes, delaying the write of other range.
781 * TODO: Make btrfs_run_delalloc_range() to lock all delalloc range
782 * first to prevent any submitted async extent to unlock the full page.
783 * By this, we can ensure for subpage case that only the last async_cow
784 * will unlock the full page.
786 if (fs_info
->sectorsize
< PAGE_SIZE
) {
787 if (!PAGE_ALIGNED(start
) ||
788 !PAGE_ALIGNED(end
+ 1))
793 if (btrfs_test_opt(fs_info
, FORCE_COMPRESS
))
796 if (inode
->defrag_compress
)
798 /* bad compression ratios */
799 if (inode
->flags
& BTRFS_INODE_NOCOMPRESS
)
801 if (btrfs_test_opt(fs_info
, COMPRESS
) ||
802 inode
->flags
& BTRFS_INODE_COMPRESS
||
803 inode
->prop_compress
)
804 return btrfs_compress_heuristic(&inode
->vfs_inode
, start
, end
);
808 static inline void inode_should_defrag(struct btrfs_inode
*inode
,
809 u64 start
, u64 end
, u64 num_bytes
, u32 small_write
)
811 /* If this is a small write inside eof, kick off a defrag */
812 if (num_bytes
< small_write
&&
813 (start
> 0 || end
+ 1 < inode
->disk_i_size
))
814 btrfs_add_inode_defrag(NULL
, inode
, small_write
);
818 * we create compressed extents in two phases. The first
819 * phase compresses a range of pages that have already been
820 * locked (both pages and state bits are locked).
822 * This is done inside an ordered work queue, and the compression
823 * is spread across many cpus. The actual IO submission is step
824 * two, and the ordered work queue takes care of making sure that
825 * happens in the same order things were put onto the queue by
826 * writepages and friends.
828 * If this code finds it can't get good compression, it puts an
829 * entry onto the work queue to write the uncompressed bytes. This
830 * makes sure that both compressed inodes and uncompressed inodes
831 * are written in the same order that the flusher thread sent them
834 static noinline
int compress_file_range(struct async_chunk
*async_chunk
)
836 struct btrfs_inode
*inode
= async_chunk
->inode
;
837 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
838 u64 blocksize
= fs_info
->sectorsize
;
839 u64 start
= async_chunk
->start
;
840 u64 end
= async_chunk
->end
;
844 struct page
**pages
= NULL
;
845 unsigned long nr_pages
;
846 unsigned long total_compressed
= 0;
847 unsigned long total_in
= 0;
850 int compress_type
= fs_info
->compress_type
;
851 int compressed_extents
= 0;
854 inode_should_defrag(inode
, start
, end
, end
- start
+ 1, SZ_16K
);
857 * We need to save i_size before now because it could change in between
858 * us evaluating the size and assigning it. This is because we lock and
859 * unlock the page in truncate and fallocate, and then modify the i_size
862 * The barriers are to emulate READ_ONCE, remove that once i_size_read
866 i_size
= i_size_read(&inode
->vfs_inode
);
868 actual_end
= min_t(u64
, i_size
, end
+ 1);
871 nr_pages
= (end
>> PAGE_SHIFT
) - (start
>> PAGE_SHIFT
) + 1;
872 nr_pages
= min_t(unsigned long, nr_pages
, BTRFS_MAX_COMPRESSED_PAGES
);
875 * we don't want to send crud past the end of i_size through
876 * compression, that's just a waste of CPU time. So, if the
877 * end of the file is before the start of our current
878 * requested range of bytes, we bail out to the uncompressed
879 * cleanup code that can deal with all of this.
881 * It isn't really the fastest way to fix things, but this is a
882 * very uncommon corner.
884 if (actual_end
<= start
)
885 goto cleanup_and_bail_uncompressed
;
887 total_compressed
= actual_end
- start
;
890 * Skip compression for a small file range(<=blocksize) that
891 * isn't an inline extent, since it doesn't save disk space at all.
893 if (total_compressed
<= blocksize
&&
894 (start
> 0 || end
+ 1 < inode
->disk_i_size
))
895 goto cleanup_and_bail_uncompressed
;
898 * For subpage case, we require full page alignment for the sector
900 * Thus we must also check against @actual_end, not just @end.
902 if (blocksize
< PAGE_SIZE
) {
903 if (!PAGE_ALIGNED(start
) ||
904 !PAGE_ALIGNED(round_up(actual_end
, blocksize
)))
905 goto cleanup_and_bail_uncompressed
;
908 total_compressed
= min_t(unsigned long, total_compressed
,
909 BTRFS_MAX_UNCOMPRESSED
);
914 * we do compression for mount -o compress and when the
915 * inode has not been flagged as nocompress. This flag can
916 * change at any time if we discover bad compression ratios.
918 if (inode_need_compress(inode
, start
, end
)) {
920 pages
= kcalloc(nr_pages
, sizeof(struct page
*), GFP_NOFS
);
922 /* just bail out to the uncompressed code */
927 if (inode
->defrag_compress
)
928 compress_type
= inode
->defrag_compress
;
929 else if (inode
->prop_compress
)
930 compress_type
= inode
->prop_compress
;
933 * we need to call clear_page_dirty_for_io on each
934 * page in the range. Otherwise applications with the file
935 * mmap'd can wander in and change the page contents while
936 * we are compressing them.
938 * If the compression fails for any reason, we set the pages
939 * dirty again later on.
941 * Note that the remaining part is redirtied, the start pointer
942 * has moved, the end is the original one.
945 extent_range_clear_dirty_for_io(&inode
->vfs_inode
, start
, end
);
949 /* Compression level is applied here and only here */
950 ret
= btrfs_compress_pages(
951 compress_type
| (fs_info
->compress_level
<< 4),
952 inode
->vfs_inode
.i_mapping
, start
,
959 unsigned long offset
= offset_in_page(total_compressed
);
960 struct page
*page
= pages
[nr_pages
- 1];
962 /* zero the tail end of the last page, we might be
963 * sending it down to disk
966 memzero_page(page
, offset
, PAGE_SIZE
- offset
);
972 * Check cow_file_range() for why we don't even try to create inline
973 * extent for subpage case.
975 if (start
== 0 && fs_info
->sectorsize
== PAGE_SIZE
) {
976 /* lets try to make an inline extent */
977 if (ret
|| total_in
< actual_end
) {
978 /* we didn't compress the entire range, try
979 * to make an uncompressed inline extent.
981 ret
= cow_file_range_inline(inode
, actual_end
,
982 0, BTRFS_COMPRESS_NONE
,
985 /* try making a compressed inline extent */
986 ret
= cow_file_range_inline(inode
, actual_end
,
988 compress_type
, pages
,
992 unsigned long clear_flags
= EXTENT_DELALLOC
|
993 EXTENT_DELALLOC_NEW
| EXTENT_DEFRAG
|
994 EXTENT_DO_ACCOUNTING
;
995 unsigned long page_error_op
;
997 page_error_op
= ret
< 0 ? PAGE_SET_ERROR
: 0;
1000 * inline extent creation worked or returned error,
1001 * we don't need to create any more async work items.
1002 * Unlock and free up our temp pages.
1004 * We use DO_ACCOUNTING here because we need the
1005 * delalloc_release_metadata to be done _after_ we drop
1006 * our outstanding extent for clearing delalloc for this
1009 extent_clear_unlock_delalloc(inode
, start
, end
,
1013 PAGE_START_WRITEBACK
|
1015 PAGE_END_WRITEBACK
);
1018 * Ensure we only free the compressed pages if we have
1019 * them allocated, as we can still reach here with
1020 * inode_need_compress() == false.
1023 for (i
= 0; i
< nr_pages
; i
++) {
1024 WARN_ON(pages
[i
]->mapping
);
1033 if (will_compress
) {
1035 * we aren't doing an inline extent round the compressed size
1036 * up to a block size boundary so the allocator does sane
1039 total_compressed
= ALIGN(total_compressed
, blocksize
);
1042 * one last check to make sure the compression is really a
1043 * win, compare the page count read with the blocks on disk,
1044 * compression must free at least one sector size
1046 total_in
= round_up(total_in
, fs_info
->sectorsize
);
1047 if (total_compressed
+ blocksize
<= total_in
) {
1048 compressed_extents
++;
1051 * The async work queues will take care of doing actual
1052 * allocation on disk for these compressed pages, and
1053 * will submit them to the elevator.
1055 add_async_extent(async_chunk
, start
, total_in
,
1056 total_compressed
, pages
, nr_pages
,
1059 if (start
+ total_in
< end
) {
1065 return compressed_extents
;
1070 * the compression code ran but failed to make things smaller,
1071 * free any pages it allocated and our page pointer array
1073 for (i
= 0; i
< nr_pages
; i
++) {
1074 WARN_ON(pages
[i
]->mapping
);
1079 total_compressed
= 0;
1082 /* flag the file so we don't compress in the future */
1083 if (!btrfs_test_opt(fs_info
, FORCE_COMPRESS
) &&
1084 !(inode
->prop_compress
)) {
1085 inode
->flags
|= BTRFS_INODE_NOCOMPRESS
;
1088 cleanup_and_bail_uncompressed
:
1090 * No compression, but we still need to write the pages in the file
1091 * we've been given so far. redirty the locked page if it corresponds
1092 * to our extent and set things up for the async work queue to run
1093 * cow_file_range to do the normal delalloc dance.
1095 if (async_chunk
->locked_page
&&
1096 (page_offset(async_chunk
->locked_page
) >= start
&&
1097 page_offset(async_chunk
->locked_page
)) <= end
) {
1098 __set_page_dirty_nobuffers(async_chunk
->locked_page
);
1099 /* unlocked later on in the async handlers */
1103 extent_range_redirty_for_io(&inode
->vfs_inode
, start
, end
);
1104 add_async_extent(async_chunk
, start
, end
- start
+ 1, 0, NULL
, 0,
1105 BTRFS_COMPRESS_NONE
);
1106 compressed_extents
++;
1108 return compressed_extents
;
1111 static void free_async_extent_pages(struct async_extent
*async_extent
)
1115 if (!async_extent
->pages
)
1118 for (i
= 0; i
< async_extent
->nr_pages
; i
++) {
1119 WARN_ON(async_extent
->pages
[i
]->mapping
);
1120 put_page(async_extent
->pages
[i
]);
1122 kfree(async_extent
->pages
);
1123 async_extent
->nr_pages
= 0;
1124 async_extent
->pages
= NULL
;
1127 static int submit_uncompressed_range(struct btrfs_inode
*inode
,
1128 struct async_extent
*async_extent
,
1129 struct page
*locked_page
)
1131 u64 start
= async_extent
->start
;
1132 u64 end
= async_extent
->start
+ async_extent
->ram_size
- 1;
1133 unsigned long nr_written
= 0;
1134 int page_started
= 0;
1138 * Call cow_file_range() to run the delalloc range directly, since we
1139 * won't go to NOCOW or async path again.
1141 * Also we call cow_file_range() with @unlock_page == 0, so that we
1142 * can directly submit them without interruption.
1144 ret
= cow_file_range(inode
, locked_page
, start
, end
, &page_started
,
1145 &nr_written
, 0, NULL
);
1146 /* Inline extent inserted, page gets unlocked and everything is done */
1151 btrfs_cleanup_ordered_extents(inode
, locked_page
, start
, end
- start
+ 1);
1153 const u64 page_start
= page_offset(locked_page
);
1154 const u64 page_end
= page_start
+ PAGE_SIZE
- 1;
1156 btrfs_page_set_error(inode
->root
->fs_info
, locked_page
,
1157 page_start
, PAGE_SIZE
);
1158 set_page_writeback(locked_page
);
1159 end_page_writeback(locked_page
);
1160 end_extent_writepage(locked_page
, ret
, page_start
, page_end
);
1161 unlock_page(locked_page
);
1166 /* All pages will be unlocked, including @locked_page */
1167 return extent_write_locked_range(&inode
->vfs_inode
, start
, end
);
1170 static int submit_one_async_extent(struct btrfs_inode
*inode
,
1171 struct async_chunk
*async_chunk
,
1172 struct async_extent
*async_extent
,
1175 struct extent_io_tree
*io_tree
= &inode
->io_tree
;
1176 struct btrfs_root
*root
= inode
->root
;
1177 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1178 struct btrfs_key ins
;
1179 struct page
*locked_page
= NULL
;
1180 struct extent_map
*em
;
1182 u64 start
= async_extent
->start
;
1183 u64 end
= async_extent
->start
+ async_extent
->ram_size
- 1;
1185 if (async_chunk
->blkcg_css
)
1186 kthread_associate_blkcg(async_chunk
->blkcg_css
);
1189 * If async_chunk->locked_page is in the async_extent range, we need to
1192 if (async_chunk
->locked_page
) {
1193 u64 locked_page_start
= page_offset(async_chunk
->locked_page
);
1194 u64 locked_page_end
= locked_page_start
+ PAGE_SIZE
- 1;
1196 if (!(start
>= locked_page_end
|| end
<= locked_page_start
))
1197 locked_page
= async_chunk
->locked_page
;
1199 lock_extent(io_tree
, start
, end
, NULL
);
1201 /* We have fall back to uncompressed write */
1202 if (!async_extent
->pages
) {
1203 ret
= submit_uncompressed_range(inode
, async_extent
, locked_page
);
1207 ret
= btrfs_reserve_extent(root
, async_extent
->ram_size
,
1208 async_extent
->compressed_size
,
1209 async_extent
->compressed_size
,
1210 0, *alloc_hint
, &ins
, 1, 1);
1212 free_async_extent_pages(async_extent
);
1214 * Here we used to try again by going back to non-compressed
1215 * path for ENOSPC. But we can't reserve space even for
1216 * compressed size, how could it work for uncompressed size
1217 * which requires larger size? So here we directly go error
1223 /* Here we're doing allocation and writeback of the compressed pages */
1224 em
= create_io_em(inode
, start
,
1225 async_extent
->ram_size
, /* len */
1226 start
, /* orig_start */
1227 ins
.objectid
, /* block_start */
1228 ins
.offset
, /* block_len */
1229 ins
.offset
, /* orig_block_len */
1230 async_extent
->ram_size
, /* ram_bytes */
1231 async_extent
->compress_type
,
1232 BTRFS_ORDERED_COMPRESSED
);
1235 goto out_free_reserve
;
1237 free_extent_map(em
);
1239 ret
= btrfs_add_ordered_extent(inode
, start
, /* file_offset */
1240 async_extent
->ram_size
, /* num_bytes */
1241 async_extent
->ram_size
, /* ram_bytes */
1242 ins
.objectid
, /* disk_bytenr */
1243 ins
.offset
, /* disk_num_bytes */
1245 1 << BTRFS_ORDERED_COMPRESSED
,
1246 async_extent
->compress_type
);
1248 btrfs_drop_extent_map_range(inode
, start
, end
, false);
1249 goto out_free_reserve
;
1251 btrfs_dec_block_group_reservations(fs_info
, ins
.objectid
);
1253 /* Clear dirty, set writeback and unlock the pages. */
1254 extent_clear_unlock_delalloc(inode
, start
, end
,
1255 NULL
, EXTENT_LOCKED
| EXTENT_DELALLOC
,
1256 PAGE_UNLOCK
| PAGE_START_WRITEBACK
);
1258 btrfs_submit_compressed_write(inode
, start
, /* file_offset */
1259 async_extent
->ram_size
, /* num_bytes */
1260 ins
.objectid
, /* disk_bytenr */
1261 ins
.offset
, /* compressed_len */
1262 async_extent
->pages
, /* compressed_pages */
1263 async_extent
->nr_pages
,
1264 async_chunk
->write_flags
, true);
1265 *alloc_hint
= ins
.objectid
+ ins
.offset
;
1267 if (async_chunk
->blkcg_css
)
1268 kthread_associate_blkcg(NULL
);
1269 kfree(async_extent
);
1273 btrfs_dec_block_group_reservations(fs_info
, ins
.objectid
);
1274 btrfs_free_reserved_extent(fs_info
, ins
.objectid
, ins
.offset
, 1);
1276 extent_clear_unlock_delalloc(inode
, start
, end
,
1277 NULL
, EXTENT_LOCKED
| EXTENT_DELALLOC
|
1278 EXTENT_DELALLOC_NEW
|
1279 EXTENT_DEFRAG
| EXTENT_DO_ACCOUNTING
,
1280 PAGE_UNLOCK
| PAGE_START_WRITEBACK
|
1281 PAGE_END_WRITEBACK
| PAGE_SET_ERROR
);
1282 free_async_extent_pages(async_extent
);
1287 * Phase two of compressed writeback. This is the ordered portion of the code,
1288 * which only gets called in the order the work was queued. We walk all the
1289 * async extents created by compress_file_range and send them down to the disk.
1291 static noinline
void submit_compressed_extents(struct async_chunk
*async_chunk
)
1293 struct btrfs_inode
*inode
= async_chunk
->inode
;
1294 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
1295 struct async_extent
*async_extent
;
1299 while (!list_empty(&async_chunk
->extents
)) {
1303 async_extent
= list_entry(async_chunk
->extents
.next
,
1304 struct async_extent
, list
);
1305 list_del(&async_extent
->list
);
1306 extent_start
= async_extent
->start
;
1307 ram_size
= async_extent
->ram_size
;
1309 ret
= submit_one_async_extent(inode
, async_chunk
, async_extent
,
1311 btrfs_debug(fs_info
,
1312 "async extent submission failed root=%lld inode=%llu start=%llu len=%llu ret=%d",
1313 inode
->root
->root_key
.objectid
,
1314 btrfs_ino(inode
), extent_start
, ram_size
, ret
);
1318 static u64
get_extent_allocation_hint(struct btrfs_inode
*inode
, u64 start
,
1321 struct extent_map_tree
*em_tree
= &inode
->extent_tree
;
1322 struct extent_map
*em
;
1325 read_lock(&em_tree
->lock
);
1326 em
= search_extent_mapping(em_tree
, start
, num_bytes
);
1329 * if block start isn't an actual block number then find the
1330 * first block in this inode and use that as a hint. If that
1331 * block is also bogus then just don't worry about it.
1333 if (em
->block_start
>= EXTENT_MAP_LAST_BYTE
) {
1334 free_extent_map(em
);
1335 em
= search_extent_mapping(em_tree
, 0, 0);
1336 if (em
&& em
->block_start
< EXTENT_MAP_LAST_BYTE
)
1337 alloc_hint
= em
->block_start
;
1339 free_extent_map(em
);
1341 alloc_hint
= em
->block_start
;
1342 free_extent_map(em
);
1345 read_unlock(&em_tree
->lock
);
1351 * when extent_io.c finds a delayed allocation range in the file,
1352 * the call backs end up in this code. The basic idea is to
1353 * allocate extents on disk for the range, and create ordered data structs
1354 * in ram to track those extents.
1356 * locked_page is the page that writepage had locked already. We use
1357 * it to make sure we don't do extra locks or unlocks.
1359 * *page_started is set to one if we unlock locked_page and do everything
1360 * required to start IO on it. It may be clean and already done with
1361 * IO when we return.
1363 * When unlock == 1, we unlock the pages in successfully allocated regions.
1364 * When unlock == 0, we leave them locked for writing them out.
1366 * However, we unlock all the pages except @locked_page in case of failure.
1368 * In summary, page locking state will be as follow:
1370 * - page_started == 1 (return value)
1371 * - All the pages are unlocked. IO is started.
1372 * - Note that this can happen only on success
1374 * - All the pages except @locked_page are unlocked in any case
1376 * - On success, all the pages are locked for writing out them
1377 * - On failure, all the pages except @locked_page are unlocked
1379 * When a failure happens in the second or later iteration of the
1380 * while-loop, the ordered extents created in previous iterations are kept
1381 * intact. So, the caller must clean them up by calling
1382 * btrfs_cleanup_ordered_extents(). See btrfs_run_delalloc_range() for
1385 static noinline
int cow_file_range(struct btrfs_inode
*inode
,
1386 struct page
*locked_page
,
1387 u64 start
, u64 end
, int *page_started
,
1388 unsigned long *nr_written
, int unlock
,
1391 struct btrfs_root
*root
= inode
->root
;
1392 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1394 u64 orig_start
= start
;
1396 unsigned long ram_size
;
1397 u64 cur_alloc_size
= 0;
1399 u64 blocksize
= fs_info
->sectorsize
;
1400 struct btrfs_key ins
;
1401 struct extent_map
*em
;
1402 unsigned clear_bits
;
1403 unsigned long page_ops
;
1404 bool extent_reserved
= false;
1407 if (btrfs_is_free_space_inode(inode
)) {
1412 num_bytes
= ALIGN(end
- start
+ 1, blocksize
);
1413 num_bytes
= max(blocksize
, num_bytes
);
1414 ASSERT(num_bytes
<= btrfs_super_total_bytes(fs_info
->super_copy
));
1416 inode_should_defrag(inode
, start
, end
, num_bytes
, SZ_64K
);
1419 * Due to the page size limit, for subpage we can only trigger the
1420 * writeback for the dirty sectors of page, that means data writeback
1421 * is doing more writeback than what we want.
1423 * This is especially unexpected for some call sites like fallocate,
1424 * where we only increase i_size after everything is done.
1425 * This means we can trigger inline extent even if we didn't want to.
1426 * So here we skip inline extent creation completely.
1428 if (start
== 0 && fs_info
->sectorsize
== PAGE_SIZE
) {
1429 u64 actual_end
= min_t(u64
, i_size_read(&inode
->vfs_inode
),
1432 /* lets try to make an inline extent */
1433 ret
= cow_file_range_inline(inode
, actual_end
, 0,
1434 BTRFS_COMPRESS_NONE
, NULL
, false);
1437 * We use DO_ACCOUNTING here because we need the
1438 * delalloc_release_metadata to be run _after_ we drop
1439 * our outstanding extent for clearing delalloc for this
1442 extent_clear_unlock_delalloc(inode
, start
, end
,
1444 EXTENT_LOCKED
| EXTENT_DELALLOC
|
1445 EXTENT_DELALLOC_NEW
| EXTENT_DEFRAG
|
1446 EXTENT_DO_ACCOUNTING
, PAGE_UNLOCK
|
1447 PAGE_START_WRITEBACK
| PAGE_END_WRITEBACK
);
1448 *nr_written
= *nr_written
+
1449 (end
- start
+ PAGE_SIZE
) / PAGE_SIZE
;
1452 * locked_page is locked by the caller of
1453 * writepage_delalloc(), not locked by
1454 * __process_pages_contig().
1456 * We can't let __process_pages_contig() to unlock it,
1457 * as it doesn't have any subpage::writers recorded.
1459 * Here we manually unlock the page, since the caller
1460 * can't use page_started to determine if it's an
1461 * inline extent or a compressed extent.
1463 unlock_page(locked_page
);
1465 } else if (ret
< 0) {
1470 alloc_hint
= get_extent_allocation_hint(inode
, start
, num_bytes
);
1473 * Relocation relies on the relocated extents to have exactly the same
1474 * size as the original extents. Normally writeback for relocation data
1475 * extents follows a NOCOW path because relocation preallocates the
1476 * extents. However, due to an operation such as scrub turning a block
1477 * group to RO mode, it may fallback to COW mode, so we must make sure
1478 * an extent allocated during COW has exactly the requested size and can
1479 * not be split into smaller extents, otherwise relocation breaks and
1480 * fails during the stage where it updates the bytenr of file extent
1483 if (btrfs_is_data_reloc_root(root
))
1484 min_alloc_size
= num_bytes
;
1486 min_alloc_size
= fs_info
->sectorsize
;
1488 while (num_bytes
> 0) {
1489 cur_alloc_size
= num_bytes
;
1490 ret
= btrfs_reserve_extent(root
, cur_alloc_size
, cur_alloc_size
,
1491 min_alloc_size
, 0, alloc_hint
,
1495 cur_alloc_size
= ins
.offset
;
1496 extent_reserved
= true;
1498 ram_size
= ins
.offset
;
1499 em
= create_io_em(inode
, start
, ins
.offset
, /* len */
1500 start
, /* orig_start */
1501 ins
.objectid
, /* block_start */
1502 ins
.offset
, /* block_len */
1503 ins
.offset
, /* orig_block_len */
1504 ram_size
, /* ram_bytes */
1505 BTRFS_COMPRESS_NONE
, /* compress_type */
1506 BTRFS_ORDERED_REGULAR
/* type */);
1511 free_extent_map(em
);
1513 ret
= btrfs_add_ordered_extent(inode
, start
, ram_size
, ram_size
,
1514 ins
.objectid
, cur_alloc_size
, 0,
1515 1 << BTRFS_ORDERED_REGULAR
,
1516 BTRFS_COMPRESS_NONE
);
1518 goto out_drop_extent_cache
;
1520 if (btrfs_is_data_reloc_root(root
)) {
1521 ret
= btrfs_reloc_clone_csums(inode
, start
,
1524 * Only drop cache here, and process as normal.
1526 * We must not allow extent_clear_unlock_delalloc()
1527 * at out_unlock label to free meta of this ordered
1528 * extent, as its meta should be freed by
1529 * btrfs_finish_ordered_io().
1531 * So we must continue until @start is increased to
1532 * skip current ordered extent.
1535 btrfs_drop_extent_map_range(inode
, start
,
1536 start
+ ram_size
- 1,
1540 btrfs_dec_block_group_reservations(fs_info
, ins
.objectid
);
1543 * We're not doing compressed IO, don't unlock the first page
1544 * (which the caller expects to stay locked), don't clear any
1545 * dirty bits and don't set any writeback bits
1547 * Do set the Ordered (Private2) bit so we know this page was
1548 * properly setup for writepage.
1550 page_ops
= unlock
? PAGE_UNLOCK
: 0;
1551 page_ops
|= PAGE_SET_ORDERED
;
1553 extent_clear_unlock_delalloc(inode
, start
, start
+ ram_size
- 1,
1555 EXTENT_LOCKED
| EXTENT_DELALLOC
,
1557 if (num_bytes
< cur_alloc_size
)
1560 num_bytes
-= cur_alloc_size
;
1561 alloc_hint
= ins
.objectid
+ ins
.offset
;
1562 start
+= cur_alloc_size
;
1563 extent_reserved
= false;
1566 * btrfs_reloc_clone_csums() error, since start is increased
1567 * extent_clear_unlock_delalloc() at out_unlock label won't
1568 * free metadata of current ordered extent, we're OK to exit.
1576 out_drop_extent_cache
:
1577 btrfs_drop_extent_map_range(inode
, start
, start
+ ram_size
- 1, false);
1579 btrfs_dec_block_group_reservations(fs_info
, ins
.objectid
);
1580 btrfs_free_reserved_extent(fs_info
, ins
.objectid
, ins
.offset
, 1);
1583 * If done_offset is non-NULL and ret == -EAGAIN, we expect the
1584 * caller to write out the successfully allocated region and retry.
1586 if (done_offset
&& ret
== -EAGAIN
) {
1587 if (orig_start
< start
)
1588 *done_offset
= start
- 1;
1590 *done_offset
= start
;
1592 } else if (ret
== -EAGAIN
) {
1593 /* Convert to -ENOSPC since the caller cannot retry. */
1598 * Now, we have three regions to clean up:
1600 * |-------(1)----|---(2)---|-------------(3)----------|
1601 * `- orig_start `- start `- start + cur_alloc_size `- end
1603 * We process each region below.
1606 clear_bits
= EXTENT_LOCKED
| EXTENT_DELALLOC
| EXTENT_DELALLOC_NEW
|
1607 EXTENT_DEFRAG
| EXTENT_CLEAR_META_RESV
;
1608 page_ops
= PAGE_UNLOCK
| PAGE_START_WRITEBACK
| PAGE_END_WRITEBACK
;
1611 * For the range (1). We have already instantiated the ordered extents
1612 * for this region. They are cleaned up by
1613 * btrfs_cleanup_ordered_extents() in e.g,
1614 * btrfs_run_delalloc_range(). EXTENT_LOCKED | EXTENT_DELALLOC are
1615 * already cleared in the above loop. And, EXTENT_DELALLOC_NEW |
1616 * EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV are handled by the cleanup
1619 * However, in case of unlock == 0, we still need to unlock the pages
1620 * (except @locked_page) to ensure all the pages are unlocked.
1622 if (!unlock
&& orig_start
< start
) {
1624 mapping_set_error(inode
->vfs_inode
.i_mapping
, ret
);
1625 extent_clear_unlock_delalloc(inode
, orig_start
, start
- 1,
1626 locked_page
, 0, page_ops
);
1630 * For the range (2). If we reserved an extent for our delalloc range
1631 * (or a subrange) and failed to create the respective ordered extent,
1632 * then it means that when we reserved the extent we decremented the
1633 * extent's size from the data space_info's bytes_may_use counter and
1634 * incremented the space_info's bytes_reserved counter by the same
1635 * amount. We must make sure extent_clear_unlock_delalloc() does not try
1636 * to decrement again the data space_info's bytes_may_use counter,
1637 * therefore we do not pass it the flag EXTENT_CLEAR_DATA_RESV.
1639 if (extent_reserved
) {
1640 extent_clear_unlock_delalloc(inode
, start
,
1641 start
+ cur_alloc_size
- 1,
1645 start
+= cur_alloc_size
;
1651 * For the range (3). We never touched the region. In addition to the
1652 * clear_bits above, we add EXTENT_CLEAR_DATA_RESV to release the data
1653 * space_info's bytes_may_use counter, reserved in
1654 * btrfs_check_data_free_space().
1656 extent_clear_unlock_delalloc(inode
, start
, end
, locked_page
,
1657 clear_bits
| EXTENT_CLEAR_DATA_RESV
,
1663 * work queue call back to started compression on a file and pages
1665 static noinline
void async_cow_start(struct btrfs_work
*work
)
1667 struct async_chunk
*async_chunk
;
1668 int compressed_extents
;
1670 async_chunk
= container_of(work
, struct async_chunk
, work
);
1672 compressed_extents
= compress_file_range(async_chunk
);
1673 if (compressed_extents
== 0) {
1674 btrfs_add_delayed_iput(async_chunk
->inode
);
1675 async_chunk
->inode
= NULL
;
1680 * work queue call back to submit previously compressed pages
1682 static noinline
void async_cow_submit(struct btrfs_work
*work
)
1684 struct async_chunk
*async_chunk
= container_of(work
, struct async_chunk
,
1686 struct btrfs_fs_info
*fs_info
= btrfs_work_owner(work
);
1687 unsigned long nr_pages
;
1689 nr_pages
= (async_chunk
->end
- async_chunk
->start
+ PAGE_SIZE
) >>
1693 * ->inode could be NULL if async_chunk_start has failed to compress,
1694 * in which case we don't have anything to submit, yet we need to
1695 * always adjust ->async_delalloc_pages as its paired with the init
1696 * happening in run_delalloc_compressed
1698 if (async_chunk
->inode
)
1699 submit_compressed_extents(async_chunk
);
1701 /* atomic_sub_return implies a barrier */
1702 if (atomic_sub_return(nr_pages
, &fs_info
->async_delalloc_pages
) <
1704 cond_wake_up_nomb(&fs_info
->async_submit_wait
);
1707 static noinline
void async_cow_free(struct btrfs_work
*work
)
1709 struct async_chunk
*async_chunk
;
1710 struct async_cow
*async_cow
;
1712 async_chunk
= container_of(work
, struct async_chunk
, work
);
1713 if (async_chunk
->inode
)
1714 btrfs_add_delayed_iput(async_chunk
->inode
);
1715 if (async_chunk
->blkcg_css
)
1716 css_put(async_chunk
->blkcg_css
);
1718 async_cow
= async_chunk
->async_cow
;
1719 if (atomic_dec_and_test(&async_cow
->num_chunks
))
1723 static bool run_delalloc_compressed(struct btrfs_inode
*inode
,
1724 struct writeback_control
*wbc
,
1725 struct page
*locked_page
,
1726 u64 start
, u64 end
, int *page_started
,
1727 unsigned long *nr_written
)
1729 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
1730 struct cgroup_subsys_state
*blkcg_css
= wbc_blkcg_css(wbc
);
1731 struct async_cow
*ctx
;
1732 struct async_chunk
*async_chunk
;
1733 unsigned long nr_pages
;
1734 u64 num_chunks
= DIV_ROUND_UP(end
- start
, SZ_512K
);
1737 const blk_opf_t write_flags
= wbc_to_write_flags(wbc
);
1739 nofs_flag
= memalloc_nofs_save();
1740 ctx
= kvmalloc(struct_size(ctx
, chunks
, num_chunks
), GFP_KERNEL
);
1741 memalloc_nofs_restore(nofs_flag
);
1745 unlock_extent(&inode
->io_tree
, start
, end
, NULL
);
1746 set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT
, &inode
->runtime_flags
);
1748 async_chunk
= ctx
->chunks
;
1749 atomic_set(&ctx
->num_chunks
, num_chunks
);
1751 for (i
= 0; i
< num_chunks
; i
++) {
1752 u64 cur_end
= min(end
, start
+ SZ_512K
- 1);
1755 * igrab is called higher up in the call chain, take only the
1756 * lightweight reference for the callback lifetime
1758 ihold(&inode
->vfs_inode
);
1759 async_chunk
[i
].async_cow
= ctx
;
1760 async_chunk
[i
].inode
= inode
;
1761 async_chunk
[i
].start
= start
;
1762 async_chunk
[i
].end
= cur_end
;
1763 async_chunk
[i
].write_flags
= write_flags
;
1764 INIT_LIST_HEAD(&async_chunk
[i
].extents
);
1767 * The locked_page comes all the way from writepage and its
1768 * the original page we were actually given. As we spread
1769 * this large delalloc region across multiple async_chunk
1770 * structs, only the first struct needs a pointer to locked_page
1772 * This way we don't need racey decisions about who is supposed
1777 * Depending on the compressibility, the pages might or
1778 * might not go through async. We want all of them to
1779 * be accounted against wbc once. Let's do it here
1780 * before the paths diverge. wbc accounting is used
1781 * only for foreign writeback detection and doesn't
1782 * need full accuracy. Just account the whole thing
1783 * against the first page.
1785 wbc_account_cgroup_owner(wbc
, locked_page
,
1787 async_chunk
[i
].locked_page
= locked_page
;
1790 async_chunk
[i
].locked_page
= NULL
;
1793 if (blkcg_css
!= blkcg_root_css
) {
1795 async_chunk
[i
].blkcg_css
= blkcg_css
;
1796 async_chunk
[i
].write_flags
|= REQ_BTRFS_CGROUP_PUNT
;
1798 async_chunk
[i
].blkcg_css
= NULL
;
1801 btrfs_init_work(&async_chunk
[i
].work
, async_cow_start
,
1802 async_cow_submit
, async_cow_free
);
1804 nr_pages
= DIV_ROUND_UP(cur_end
- start
, PAGE_SIZE
);
1805 atomic_add(nr_pages
, &fs_info
->async_delalloc_pages
);
1807 btrfs_queue_work(fs_info
->delalloc_workers
, &async_chunk
[i
].work
);
1809 *nr_written
+= nr_pages
;
1810 start
= cur_end
+ 1;
1816 static noinline
int run_delalloc_zoned(struct btrfs_inode
*inode
,
1817 struct page
*locked_page
, u64 start
,
1818 u64 end
, int *page_started
,
1819 unsigned long *nr_written
)
1821 u64 done_offset
= end
;
1823 bool locked_page_done
= false;
1825 while (start
<= end
) {
1826 ret
= cow_file_range(inode
, locked_page
, start
, end
, page_started
,
1827 nr_written
, 0, &done_offset
);
1828 if (ret
&& ret
!= -EAGAIN
)
1831 if (*page_started
) {
1839 if (done_offset
== start
) {
1840 wait_on_bit_io(&inode
->root
->fs_info
->flags
,
1841 BTRFS_FS_NEED_ZONE_FINISH
,
1842 TASK_UNINTERRUPTIBLE
);
1846 if (!locked_page_done
) {
1847 __set_page_dirty_nobuffers(locked_page
);
1848 account_page_redirty(locked_page
);
1850 locked_page_done
= true;
1851 extent_write_locked_range(&inode
->vfs_inode
, start
, done_offset
);
1853 start
= done_offset
+ 1;
1861 static noinline
int csum_exist_in_range(struct btrfs_fs_info
*fs_info
,
1862 u64 bytenr
, u64 num_bytes
, bool nowait
)
1864 struct btrfs_root
*csum_root
= btrfs_csum_root(fs_info
, bytenr
);
1865 struct btrfs_ordered_sum
*sums
;
1869 ret
= btrfs_lookup_csums_list(csum_root
, bytenr
, bytenr
+ num_bytes
- 1,
1871 if (ret
== 0 && list_empty(&list
))
1874 while (!list_empty(&list
)) {
1875 sums
= list_entry(list
.next
, struct btrfs_ordered_sum
, list
);
1876 list_del(&sums
->list
);
1884 static int fallback_to_cow(struct btrfs_inode
*inode
, struct page
*locked_page
,
1885 const u64 start
, const u64 end
,
1886 int *page_started
, unsigned long *nr_written
)
1888 const bool is_space_ino
= btrfs_is_free_space_inode(inode
);
1889 const bool is_reloc_ino
= btrfs_is_data_reloc_root(inode
->root
);
1890 const u64 range_bytes
= end
+ 1 - start
;
1891 struct extent_io_tree
*io_tree
= &inode
->io_tree
;
1892 u64 range_start
= start
;
1896 * If EXTENT_NORESERVE is set it means that when the buffered write was
1897 * made we had not enough available data space and therefore we did not
1898 * reserve data space for it, since we though we could do NOCOW for the
1899 * respective file range (either there is prealloc extent or the inode
1900 * has the NOCOW bit set).
1902 * However when we need to fallback to COW mode (because for example the
1903 * block group for the corresponding extent was turned to RO mode by a
1904 * scrub or relocation) we need to do the following:
1906 * 1) We increment the bytes_may_use counter of the data space info.
1907 * If COW succeeds, it allocates a new data extent and after doing
1908 * that it decrements the space info's bytes_may_use counter and
1909 * increments its bytes_reserved counter by the same amount (we do
1910 * this at btrfs_add_reserved_bytes()). So we need to increment the
1911 * bytes_may_use counter to compensate (when space is reserved at
1912 * buffered write time, the bytes_may_use counter is incremented);
1914 * 2) We clear the EXTENT_NORESERVE bit from the range. We do this so
1915 * that if the COW path fails for any reason, it decrements (through
1916 * extent_clear_unlock_delalloc()) the bytes_may_use counter of the
1917 * data space info, which we incremented in the step above.
1919 * If we need to fallback to cow and the inode corresponds to a free
1920 * space cache inode or an inode of the data relocation tree, we must
1921 * also increment bytes_may_use of the data space_info for the same
1922 * reason. Space caches and relocated data extents always get a prealloc
1923 * extent for them, however scrub or balance may have set the block
1924 * group that contains that extent to RO mode and therefore force COW
1925 * when starting writeback.
1927 count
= count_range_bits(io_tree
, &range_start
, end
, range_bytes
,
1928 EXTENT_NORESERVE
, 0, NULL
);
1929 if (count
> 0 || is_space_ino
|| is_reloc_ino
) {
1931 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
1932 struct btrfs_space_info
*sinfo
= fs_info
->data_sinfo
;
1934 if (is_space_ino
|| is_reloc_ino
)
1935 bytes
= range_bytes
;
1937 spin_lock(&sinfo
->lock
);
1938 btrfs_space_info_update_bytes_may_use(fs_info
, sinfo
, bytes
);
1939 spin_unlock(&sinfo
->lock
);
1942 clear_extent_bit(io_tree
, start
, end
, EXTENT_NORESERVE
,
1946 return cow_file_range(inode
, locked_page
, start
, end
, page_started
,
1947 nr_written
, 1, NULL
);
1950 struct can_nocow_file_extent_args
{
1953 /* Start file offset of the range we want to NOCOW. */
1955 /* End file offset (inclusive) of the range we want to NOCOW. */
1957 bool writeback_path
;
1960 * Free the path passed to can_nocow_file_extent() once it's not needed
1965 /* Output fields. Only set when can_nocow_file_extent() returns 1. */
1970 /* Number of bytes that can be written to in NOCOW mode. */
1975 * Check if we can NOCOW the file extent that the path points to.
1976 * This function may return with the path released, so the caller should check
1977 * if path->nodes[0] is NULL or not if it needs to use the path afterwards.
1979 * Returns: < 0 on error
1980 * 0 if we can not NOCOW
1983 static int can_nocow_file_extent(struct btrfs_path
*path
,
1984 struct btrfs_key
*key
,
1985 struct btrfs_inode
*inode
,
1986 struct can_nocow_file_extent_args
*args
)
1988 const bool is_freespace_inode
= btrfs_is_free_space_inode(inode
);
1989 struct extent_buffer
*leaf
= path
->nodes
[0];
1990 struct btrfs_root
*root
= inode
->root
;
1991 struct btrfs_file_extent_item
*fi
;
1996 bool nowait
= path
->nowait
;
1998 fi
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_file_extent_item
);
1999 extent_type
= btrfs_file_extent_type(leaf
, fi
);
2001 if (extent_type
== BTRFS_FILE_EXTENT_INLINE
)
2004 /* Can't access these fields unless we know it's not an inline extent. */
2005 args
->disk_bytenr
= btrfs_file_extent_disk_bytenr(leaf
, fi
);
2006 args
->disk_num_bytes
= btrfs_file_extent_disk_num_bytes(leaf
, fi
);
2007 args
->extent_offset
= btrfs_file_extent_offset(leaf
, fi
);
2009 if (!(inode
->flags
& BTRFS_INODE_NODATACOW
) &&
2010 extent_type
== BTRFS_FILE_EXTENT_REG
)
2014 * If the extent was created before the generation where the last snapshot
2015 * for its subvolume was created, then this implies the extent is shared,
2016 * hence we must COW.
2018 if (!args
->strict
&&
2019 btrfs_file_extent_generation(leaf
, fi
) <=
2020 btrfs_root_last_snapshot(&root
->root_item
))
2023 /* An explicit hole, must COW. */
2024 if (args
->disk_bytenr
== 0)
2027 /* Compressed/encrypted/encoded extents must be COWed. */
2028 if (btrfs_file_extent_compression(leaf
, fi
) ||
2029 btrfs_file_extent_encryption(leaf
, fi
) ||
2030 btrfs_file_extent_other_encoding(leaf
, fi
))
2033 extent_end
= btrfs_file_extent_end(path
);
2036 * The following checks can be expensive, as they need to take other
2037 * locks and do btree or rbtree searches, so release the path to avoid
2038 * blocking other tasks for too long.
2040 btrfs_release_path(path
);
2042 ret
= btrfs_cross_ref_exist(root
, btrfs_ino(inode
),
2043 key
->offset
- args
->extent_offset
,
2044 args
->disk_bytenr
, args
->strict
, path
);
2045 WARN_ON_ONCE(ret
> 0 && is_freespace_inode
);
2049 if (args
->free_path
) {
2051 * We don't need the path anymore, plus through the
2052 * csum_exist_in_range() call below we will end up allocating
2053 * another path. So free the path to avoid unnecessary extra
2056 btrfs_free_path(path
);
2060 /* If there are pending snapshots for this root, we must COW. */
2061 if (args
->writeback_path
&& !is_freespace_inode
&&
2062 atomic_read(&root
->snapshot_force_cow
))
2065 args
->disk_bytenr
+= args
->extent_offset
;
2066 args
->disk_bytenr
+= args
->start
- key
->offset
;
2067 args
->num_bytes
= min(args
->end
+ 1, extent_end
) - args
->start
;
2070 * Force COW if csums exist in the range. This ensures that csums for a
2071 * given extent are either valid or do not exist.
2073 ret
= csum_exist_in_range(root
->fs_info
, args
->disk_bytenr
, args
->num_bytes
,
2075 WARN_ON_ONCE(ret
> 0 && is_freespace_inode
);
2081 if (args
->free_path
&& path
)
2082 btrfs_free_path(path
);
2084 return ret
< 0 ? ret
: can_nocow
;
2088 * when nowcow writeback call back. This checks for snapshots or COW copies
2089 * of the extents that exist in the file, and COWs the file as required.
2091 * If no cow copies or snapshots exist, we write directly to the existing
2094 static noinline
int run_delalloc_nocow(struct btrfs_inode
*inode
,
2095 struct page
*locked_page
,
2096 const u64 start
, const u64 end
,
2098 unsigned long *nr_written
)
2100 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
2101 struct btrfs_root
*root
= inode
->root
;
2102 struct btrfs_path
*path
;
2103 u64 cow_start
= (u64
)-1;
2104 u64 cur_offset
= start
;
2106 bool check_prev
= true;
2107 u64 ino
= btrfs_ino(inode
);
2108 struct btrfs_block_group
*bg
;
2110 struct can_nocow_file_extent_args nocow_args
= { 0 };
2112 path
= btrfs_alloc_path();
2114 extent_clear_unlock_delalloc(inode
, start
, end
, locked_page
,
2115 EXTENT_LOCKED
| EXTENT_DELALLOC
|
2116 EXTENT_DO_ACCOUNTING
|
2117 EXTENT_DEFRAG
, PAGE_UNLOCK
|
2118 PAGE_START_WRITEBACK
|
2119 PAGE_END_WRITEBACK
);
2123 nocow_args
.end
= end
;
2124 nocow_args
.writeback_path
= true;
2127 struct btrfs_key found_key
;
2128 struct btrfs_file_extent_item
*fi
;
2129 struct extent_buffer
*leaf
;
2137 ret
= btrfs_lookup_file_extent(NULL
, root
, path
, ino
,
2143 * If there is no extent for our range when doing the initial
2144 * search, then go back to the previous slot as it will be the
2145 * one containing the search offset
2147 if (ret
> 0 && path
->slots
[0] > 0 && check_prev
) {
2148 leaf
= path
->nodes
[0];
2149 btrfs_item_key_to_cpu(leaf
, &found_key
,
2150 path
->slots
[0] - 1);
2151 if (found_key
.objectid
== ino
&&
2152 found_key
.type
== BTRFS_EXTENT_DATA_KEY
)
2157 /* Go to next leaf if we have exhausted the current one */
2158 leaf
= path
->nodes
[0];
2159 if (path
->slots
[0] >= btrfs_header_nritems(leaf
)) {
2160 ret
= btrfs_next_leaf(root
, path
);
2162 if (cow_start
!= (u64
)-1)
2163 cur_offset
= cow_start
;
2168 leaf
= path
->nodes
[0];
2171 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
2173 /* Didn't find anything for our INO */
2174 if (found_key
.objectid
> ino
)
2177 * Keep searching until we find an EXTENT_ITEM or there are no
2178 * more extents for this inode
2180 if (WARN_ON_ONCE(found_key
.objectid
< ino
) ||
2181 found_key
.type
< BTRFS_EXTENT_DATA_KEY
) {
2186 /* Found key is not EXTENT_DATA_KEY or starts after req range */
2187 if (found_key
.type
> BTRFS_EXTENT_DATA_KEY
||
2188 found_key
.offset
> end
)
2192 * If the found extent starts after requested offset, then
2193 * adjust extent_end to be right before this extent begins
2195 if (found_key
.offset
> cur_offset
) {
2196 extent_end
= found_key
.offset
;
2202 * Found extent which begins before our range and potentially
2205 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
2206 struct btrfs_file_extent_item
);
2207 extent_type
= btrfs_file_extent_type(leaf
, fi
);
2208 /* If this is triggered then we have a memory corruption. */
2209 ASSERT(extent_type
< BTRFS_NR_FILE_EXTENT_TYPES
);
2210 if (WARN_ON(extent_type
>= BTRFS_NR_FILE_EXTENT_TYPES
)) {
2214 ram_bytes
= btrfs_file_extent_ram_bytes(leaf
, fi
);
2215 extent_end
= btrfs_file_extent_end(path
);
2218 * If the extent we got ends before our current offset, skip to
2221 if (extent_end
<= cur_offset
) {
2226 nocow_args
.start
= cur_offset
;
2227 ret
= can_nocow_file_extent(path
, &found_key
, inode
, &nocow_args
);
2229 if (cow_start
!= (u64
)-1)
2230 cur_offset
= cow_start
;
2232 } else if (ret
== 0) {
2237 bg
= btrfs_inc_nocow_writers(fs_info
, nocow_args
.disk_bytenr
);
2242 * If nocow is false then record the beginning of the range
2243 * that needs to be COWed
2246 if (cow_start
== (u64
)-1)
2247 cow_start
= cur_offset
;
2248 cur_offset
= extent_end
;
2249 if (cur_offset
> end
)
2251 if (!path
->nodes
[0])
2258 * COW range from cow_start to found_key.offset - 1. As the key
2259 * will contain the beginning of the first extent that can be
2260 * NOCOW, following one which needs to be COW'ed
2262 if (cow_start
!= (u64
)-1) {
2263 ret
= fallback_to_cow(inode
, locked_page
,
2264 cow_start
, found_key
.offset
- 1,
2265 page_started
, nr_written
);
2268 cow_start
= (u64
)-1;
2271 nocow_end
= cur_offset
+ nocow_args
.num_bytes
- 1;
2273 if (extent_type
== BTRFS_FILE_EXTENT_PREALLOC
) {
2274 u64 orig_start
= found_key
.offset
- nocow_args
.extent_offset
;
2275 struct extent_map
*em
;
2277 em
= create_io_em(inode
, cur_offset
, nocow_args
.num_bytes
,
2279 nocow_args
.disk_bytenr
, /* block_start */
2280 nocow_args
.num_bytes
, /* block_len */
2281 nocow_args
.disk_num_bytes
, /* orig_block_len */
2282 ram_bytes
, BTRFS_COMPRESS_NONE
,
2283 BTRFS_ORDERED_PREALLOC
);
2288 free_extent_map(em
);
2289 ret
= btrfs_add_ordered_extent(inode
,
2290 cur_offset
, nocow_args
.num_bytes
,
2291 nocow_args
.num_bytes
,
2292 nocow_args
.disk_bytenr
,
2293 nocow_args
.num_bytes
, 0,
2294 1 << BTRFS_ORDERED_PREALLOC
,
2295 BTRFS_COMPRESS_NONE
);
2297 btrfs_drop_extent_map_range(inode
, cur_offset
,
2302 ret
= btrfs_add_ordered_extent(inode
, cur_offset
,
2303 nocow_args
.num_bytes
,
2304 nocow_args
.num_bytes
,
2305 nocow_args
.disk_bytenr
,
2306 nocow_args
.num_bytes
,
2308 1 << BTRFS_ORDERED_NOCOW
,
2309 BTRFS_COMPRESS_NONE
);
2315 btrfs_dec_nocow_writers(bg
);
2319 if (btrfs_is_data_reloc_root(root
))
2321 * Error handled later, as we must prevent
2322 * extent_clear_unlock_delalloc() in error handler
2323 * from freeing metadata of created ordered extent.
2325 ret
= btrfs_reloc_clone_csums(inode
, cur_offset
,
2326 nocow_args
.num_bytes
);
2328 extent_clear_unlock_delalloc(inode
, cur_offset
, nocow_end
,
2329 locked_page
, EXTENT_LOCKED
|
2331 EXTENT_CLEAR_DATA_RESV
,
2332 PAGE_UNLOCK
| PAGE_SET_ORDERED
);
2334 cur_offset
= extent_end
;
2337 * btrfs_reloc_clone_csums() error, now we're OK to call error
2338 * handler, as metadata for created ordered extent will only
2339 * be freed by btrfs_finish_ordered_io().
2343 if (cur_offset
> end
)
2346 btrfs_release_path(path
);
2348 if (cur_offset
<= end
&& cow_start
== (u64
)-1)
2349 cow_start
= cur_offset
;
2351 if (cow_start
!= (u64
)-1) {
2353 ret
= fallback_to_cow(inode
, locked_page
, cow_start
, end
,
2354 page_started
, nr_written
);
2361 btrfs_dec_nocow_writers(bg
);
2363 if (ret
&& cur_offset
< end
)
2364 extent_clear_unlock_delalloc(inode
, cur_offset
, end
,
2365 locked_page
, EXTENT_LOCKED
|
2366 EXTENT_DELALLOC
| EXTENT_DEFRAG
|
2367 EXTENT_DO_ACCOUNTING
, PAGE_UNLOCK
|
2368 PAGE_START_WRITEBACK
|
2369 PAGE_END_WRITEBACK
);
2370 btrfs_free_path(path
);
2374 static bool should_nocow(struct btrfs_inode
*inode
, u64 start
, u64 end
)
2376 if (inode
->flags
& (BTRFS_INODE_NODATACOW
| BTRFS_INODE_PREALLOC
)) {
2377 if (inode
->defrag_bytes
&&
2378 test_range_bit(&inode
->io_tree
, start
, end
, EXTENT_DEFRAG
,
2387 * Function to process delayed allocation (create CoW) for ranges which are
2388 * being touched for the first time.
2390 int btrfs_run_delalloc_range(struct btrfs_inode
*inode
, struct page
*locked_page
,
2391 u64 start
, u64 end
, int *page_started
, unsigned long *nr_written
,
2392 struct writeback_control
*wbc
)
2395 const bool zoned
= btrfs_is_zoned(inode
->root
->fs_info
);
2398 * The range must cover part of the @locked_page, or the returned
2399 * @page_started can confuse the caller.
2401 ASSERT(!(end
<= page_offset(locked_page
) ||
2402 start
>= page_offset(locked_page
) + PAGE_SIZE
));
2404 if (should_nocow(inode
, start
, end
)) {
2406 * Normally on a zoned device we're only doing COW writes, but
2407 * in case of relocation on a zoned filesystem we have taken
2408 * precaution, that we're only writing sequentially. It's safe
2409 * to use run_delalloc_nocow() here, like for regular
2410 * preallocated inodes.
2412 ASSERT(!zoned
|| btrfs_is_data_reloc_root(inode
->root
));
2413 ret
= run_delalloc_nocow(inode
, locked_page
, start
, end
,
2414 page_started
, nr_written
);
2418 if (btrfs_inode_can_compress(inode
) &&
2419 inode_need_compress(inode
, start
, end
) &&
2420 run_delalloc_compressed(inode
, wbc
, locked_page
, start
,
2421 end
, page_started
, nr_written
))
2425 ret
= run_delalloc_zoned(inode
, locked_page
, start
, end
,
2426 page_started
, nr_written
);
2428 ret
= cow_file_range(inode
, locked_page
, start
, end
,
2429 page_started
, nr_written
, 1, NULL
);
2434 btrfs_cleanup_ordered_extents(inode
, locked_page
, start
,
2439 void btrfs_split_delalloc_extent(struct btrfs_inode
*inode
,
2440 struct extent_state
*orig
, u64 split
)
2442 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
2445 /* not delalloc, ignore it */
2446 if (!(orig
->state
& EXTENT_DELALLOC
))
2449 size
= orig
->end
- orig
->start
+ 1;
2450 if (size
> fs_info
->max_extent_size
) {
2455 * See the explanation in btrfs_merge_delalloc_extent, the same
2456 * applies here, just in reverse.
2458 new_size
= orig
->end
- split
+ 1;
2459 num_extents
= count_max_extents(fs_info
, new_size
);
2460 new_size
= split
- orig
->start
;
2461 num_extents
+= count_max_extents(fs_info
, new_size
);
2462 if (count_max_extents(fs_info
, size
) >= num_extents
)
2466 spin_lock(&inode
->lock
);
2467 btrfs_mod_outstanding_extents(inode
, 1);
2468 spin_unlock(&inode
->lock
);
2472 * Handle merged delayed allocation extents so we can keep track of new extents
2473 * that are just merged onto old extents, such as when we are doing sequential
2474 * writes, so we can properly account for the metadata space we'll need.
2476 void btrfs_merge_delalloc_extent(struct btrfs_inode
*inode
, struct extent_state
*new,
2477 struct extent_state
*other
)
2479 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
2480 u64 new_size
, old_size
;
2483 /* not delalloc, ignore it */
2484 if (!(other
->state
& EXTENT_DELALLOC
))
2487 if (new->start
> other
->start
)
2488 new_size
= new->end
- other
->start
+ 1;
2490 new_size
= other
->end
- new->start
+ 1;
2492 /* we're not bigger than the max, unreserve the space and go */
2493 if (new_size
<= fs_info
->max_extent_size
) {
2494 spin_lock(&inode
->lock
);
2495 btrfs_mod_outstanding_extents(inode
, -1);
2496 spin_unlock(&inode
->lock
);
2501 * We have to add up either side to figure out how many extents were
2502 * accounted for before we merged into one big extent. If the number of
2503 * extents we accounted for is <= the amount we need for the new range
2504 * then we can return, otherwise drop. Think of it like this
2508 * So we've grown the extent by a MAX_SIZE extent, this would mean we
2509 * need 2 outstanding extents, on one side we have 1 and the other side
2510 * we have 1 so they are == and we can return. But in this case
2512 * [MAX_SIZE+4k][MAX_SIZE+4k]
2514 * Each range on their own accounts for 2 extents, but merged together
2515 * they are only 3 extents worth of accounting, so we need to drop in
2518 old_size
= other
->end
- other
->start
+ 1;
2519 num_extents
= count_max_extents(fs_info
, old_size
);
2520 old_size
= new->end
- new->start
+ 1;
2521 num_extents
+= count_max_extents(fs_info
, old_size
);
2522 if (count_max_extents(fs_info
, new_size
) >= num_extents
)
2525 spin_lock(&inode
->lock
);
2526 btrfs_mod_outstanding_extents(inode
, -1);
2527 spin_unlock(&inode
->lock
);
2530 static void btrfs_add_delalloc_inodes(struct btrfs_root
*root
,
2531 struct btrfs_inode
*inode
)
2533 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
2535 spin_lock(&root
->delalloc_lock
);
2536 if (list_empty(&inode
->delalloc_inodes
)) {
2537 list_add_tail(&inode
->delalloc_inodes
, &root
->delalloc_inodes
);
2538 set_bit(BTRFS_INODE_IN_DELALLOC_LIST
, &inode
->runtime_flags
);
2539 root
->nr_delalloc_inodes
++;
2540 if (root
->nr_delalloc_inodes
== 1) {
2541 spin_lock(&fs_info
->delalloc_root_lock
);
2542 BUG_ON(!list_empty(&root
->delalloc_root
));
2543 list_add_tail(&root
->delalloc_root
,
2544 &fs_info
->delalloc_roots
);
2545 spin_unlock(&fs_info
->delalloc_root_lock
);
2548 spin_unlock(&root
->delalloc_lock
);
2551 void __btrfs_del_delalloc_inode(struct btrfs_root
*root
,
2552 struct btrfs_inode
*inode
)
2554 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2556 if (!list_empty(&inode
->delalloc_inodes
)) {
2557 list_del_init(&inode
->delalloc_inodes
);
2558 clear_bit(BTRFS_INODE_IN_DELALLOC_LIST
,
2559 &inode
->runtime_flags
);
2560 root
->nr_delalloc_inodes
--;
2561 if (!root
->nr_delalloc_inodes
) {
2562 ASSERT(list_empty(&root
->delalloc_inodes
));
2563 spin_lock(&fs_info
->delalloc_root_lock
);
2564 BUG_ON(list_empty(&root
->delalloc_root
));
2565 list_del_init(&root
->delalloc_root
);
2566 spin_unlock(&fs_info
->delalloc_root_lock
);
2571 static void btrfs_del_delalloc_inode(struct btrfs_root
*root
,
2572 struct btrfs_inode
*inode
)
2574 spin_lock(&root
->delalloc_lock
);
2575 __btrfs_del_delalloc_inode(root
, inode
);
2576 spin_unlock(&root
->delalloc_lock
);
2580 * Properly track delayed allocation bytes in the inode and to maintain the
2581 * list of inodes that have pending delalloc work to be done.
2583 void btrfs_set_delalloc_extent(struct btrfs_inode
*inode
, struct extent_state
*state
,
2586 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
2588 if ((bits
& EXTENT_DEFRAG
) && !(bits
& EXTENT_DELALLOC
))
2591 * set_bit and clear bit hooks normally require _irqsave/restore
2592 * but in this case, we are only testing for the DELALLOC
2593 * bit, which is only set or cleared with irqs on
2595 if (!(state
->state
& EXTENT_DELALLOC
) && (bits
& EXTENT_DELALLOC
)) {
2596 struct btrfs_root
*root
= inode
->root
;
2597 u64 len
= state
->end
+ 1 - state
->start
;
2598 u32 num_extents
= count_max_extents(fs_info
, len
);
2599 bool do_list
= !btrfs_is_free_space_inode(inode
);
2601 spin_lock(&inode
->lock
);
2602 btrfs_mod_outstanding_extents(inode
, num_extents
);
2603 spin_unlock(&inode
->lock
);
2605 /* For sanity tests */
2606 if (btrfs_is_testing(fs_info
))
2609 percpu_counter_add_batch(&fs_info
->delalloc_bytes
, len
,
2610 fs_info
->delalloc_batch
);
2611 spin_lock(&inode
->lock
);
2612 inode
->delalloc_bytes
+= len
;
2613 if (bits
& EXTENT_DEFRAG
)
2614 inode
->defrag_bytes
+= len
;
2615 if (do_list
&& !test_bit(BTRFS_INODE_IN_DELALLOC_LIST
,
2616 &inode
->runtime_flags
))
2617 btrfs_add_delalloc_inodes(root
, inode
);
2618 spin_unlock(&inode
->lock
);
2621 if (!(state
->state
& EXTENT_DELALLOC_NEW
) &&
2622 (bits
& EXTENT_DELALLOC_NEW
)) {
2623 spin_lock(&inode
->lock
);
2624 inode
->new_delalloc_bytes
+= state
->end
+ 1 - state
->start
;
2625 spin_unlock(&inode
->lock
);
2630 * Once a range is no longer delalloc this function ensures that proper
2631 * accounting happens.
2633 void btrfs_clear_delalloc_extent(struct btrfs_inode
*inode
,
2634 struct extent_state
*state
, u32 bits
)
2636 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
2637 u64 len
= state
->end
+ 1 - state
->start
;
2638 u32 num_extents
= count_max_extents(fs_info
, len
);
2640 if ((state
->state
& EXTENT_DEFRAG
) && (bits
& EXTENT_DEFRAG
)) {
2641 spin_lock(&inode
->lock
);
2642 inode
->defrag_bytes
-= len
;
2643 spin_unlock(&inode
->lock
);
2647 * set_bit and clear bit hooks normally require _irqsave/restore
2648 * but in this case, we are only testing for the DELALLOC
2649 * bit, which is only set or cleared with irqs on
2651 if ((state
->state
& EXTENT_DELALLOC
) && (bits
& EXTENT_DELALLOC
)) {
2652 struct btrfs_root
*root
= inode
->root
;
2653 bool do_list
= !btrfs_is_free_space_inode(inode
);
2655 spin_lock(&inode
->lock
);
2656 btrfs_mod_outstanding_extents(inode
, -num_extents
);
2657 spin_unlock(&inode
->lock
);
2660 * We don't reserve metadata space for space cache inodes so we
2661 * don't need to call delalloc_release_metadata if there is an
2664 if (bits
& EXTENT_CLEAR_META_RESV
&&
2665 root
!= fs_info
->tree_root
)
2666 btrfs_delalloc_release_metadata(inode
, len
, false);
2668 /* For sanity tests. */
2669 if (btrfs_is_testing(fs_info
))
2672 if (!btrfs_is_data_reloc_root(root
) &&
2673 do_list
&& !(state
->state
& EXTENT_NORESERVE
) &&
2674 (bits
& EXTENT_CLEAR_DATA_RESV
))
2675 btrfs_free_reserved_data_space_noquota(fs_info
, len
);
2677 percpu_counter_add_batch(&fs_info
->delalloc_bytes
, -len
,
2678 fs_info
->delalloc_batch
);
2679 spin_lock(&inode
->lock
);
2680 inode
->delalloc_bytes
-= len
;
2681 if (do_list
&& inode
->delalloc_bytes
== 0 &&
2682 test_bit(BTRFS_INODE_IN_DELALLOC_LIST
,
2683 &inode
->runtime_flags
))
2684 btrfs_del_delalloc_inode(root
, inode
);
2685 spin_unlock(&inode
->lock
);
2688 if ((state
->state
& EXTENT_DELALLOC_NEW
) &&
2689 (bits
& EXTENT_DELALLOC_NEW
)) {
2690 spin_lock(&inode
->lock
);
2691 ASSERT(inode
->new_delalloc_bytes
>= len
);
2692 inode
->new_delalloc_bytes
-= len
;
2693 if (bits
& EXTENT_ADD_INODE_BYTES
)
2694 inode_add_bytes(&inode
->vfs_inode
, len
);
2695 spin_unlock(&inode
->lock
);
2699 static int btrfs_extract_ordered_extent(struct btrfs_bio
*bbio
,
2700 struct btrfs_ordered_extent
*ordered
)
2702 u64 start
= (u64
)bbio
->bio
.bi_iter
.bi_sector
<< SECTOR_SHIFT
;
2703 u64 len
= bbio
->bio
.bi_iter
.bi_size
;
2704 struct btrfs_ordered_extent
*new;
2707 /* Must always be called for the beginning of an ordered extent. */
2708 if (WARN_ON_ONCE(start
!= ordered
->disk_bytenr
))
2711 /* No need to split if the ordered extent covers the entire bio. */
2712 if (ordered
->disk_num_bytes
== len
)
2716 * Don't split the extent_map for NOCOW extents, as we're writing into
2717 * a pre-existing one.
2719 if (!test_bit(BTRFS_ORDERED_NOCOW
, &ordered
->flags
)) {
2720 ret
= split_extent_map(bbio
->inode
, bbio
->file_offset
,
2721 ordered
->num_bytes
, len
,
2722 ordered
->disk_bytenr
);
2727 new = btrfs_split_ordered_extent(ordered
, len
);
2729 return PTR_ERR(new);
2730 btrfs_put_ordered_extent(new);
2736 * given a list of ordered sums record them in the inode. This happens
2737 * at IO completion time based on sums calculated at bio submission time.
2739 static int add_pending_csums(struct btrfs_trans_handle
*trans
,
2740 struct list_head
*list
)
2742 struct btrfs_ordered_sum
*sum
;
2743 struct btrfs_root
*csum_root
= NULL
;
2746 list_for_each_entry(sum
, list
, list
) {
2747 trans
->adding_csums
= true;
2749 csum_root
= btrfs_csum_root(trans
->fs_info
,
2751 ret
= btrfs_csum_file_blocks(trans
, csum_root
, sum
);
2752 trans
->adding_csums
= false;
2759 static int btrfs_find_new_delalloc_bytes(struct btrfs_inode
*inode
,
2762 struct extent_state
**cached_state
)
2764 u64 search_start
= start
;
2765 const u64 end
= start
+ len
- 1;
2767 while (search_start
< end
) {
2768 const u64 search_len
= end
- search_start
+ 1;
2769 struct extent_map
*em
;
2773 em
= btrfs_get_extent(inode
, NULL
, 0, search_start
, search_len
);
2777 if (em
->block_start
!= EXTENT_MAP_HOLE
)
2781 if (em
->start
< search_start
)
2782 em_len
-= search_start
- em
->start
;
2783 if (em_len
> search_len
)
2784 em_len
= search_len
;
2786 ret
= set_extent_bit(&inode
->io_tree
, search_start
,
2787 search_start
+ em_len
- 1,
2788 EXTENT_DELALLOC_NEW
, cached_state
);
2790 search_start
= extent_map_end(em
);
2791 free_extent_map(em
);
2798 int btrfs_set_extent_delalloc(struct btrfs_inode
*inode
, u64 start
, u64 end
,
2799 unsigned int extra_bits
,
2800 struct extent_state
**cached_state
)
2802 WARN_ON(PAGE_ALIGNED(end
));
2804 if (start
>= i_size_read(&inode
->vfs_inode
) &&
2805 !(inode
->flags
& BTRFS_INODE_PREALLOC
)) {
2807 * There can't be any extents following eof in this case so just
2808 * set the delalloc new bit for the range directly.
2810 extra_bits
|= EXTENT_DELALLOC_NEW
;
2814 ret
= btrfs_find_new_delalloc_bytes(inode
, start
,
2821 return set_extent_bit(&inode
->io_tree
, start
, end
,
2822 EXTENT_DELALLOC
| extra_bits
, cached_state
);
2825 /* see btrfs_writepage_start_hook for details on why this is required */
2826 struct btrfs_writepage_fixup
{
2828 struct btrfs_inode
*inode
;
2829 struct btrfs_work work
;
2832 static void btrfs_writepage_fixup_worker(struct btrfs_work
*work
)
2834 struct btrfs_writepage_fixup
*fixup
;
2835 struct btrfs_ordered_extent
*ordered
;
2836 struct extent_state
*cached_state
= NULL
;
2837 struct extent_changeset
*data_reserved
= NULL
;
2839 struct btrfs_inode
*inode
;
2843 bool free_delalloc_space
= true;
2845 fixup
= container_of(work
, struct btrfs_writepage_fixup
, work
);
2847 inode
= fixup
->inode
;
2848 page_start
= page_offset(page
);
2849 page_end
= page_offset(page
) + PAGE_SIZE
- 1;
2852 * This is similar to page_mkwrite, we need to reserve the space before
2853 * we take the page lock.
2855 ret
= btrfs_delalloc_reserve_space(inode
, &data_reserved
, page_start
,
2861 * Before we queued this fixup, we took a reference on the page.
2862 * page->mapping may go NULL, but it shouldn't be moved to a different
2865 if (!page
->mapping
|| !PageDirty(page
) || !PageChecked(page
)) {
2867 * Unfortunately this is a little tricky, either
2869 * 1) We got here and our page had already been dealt with and
2870 * we reserved our space, thus ret == 0, so we need to just
2871 * drop our space reservation and bail. This can happen the
2872 * first time we come into the fixup worker, or could happen
2873 * while waiting for the ordered extent.
2874 * 2) Our page was already dealt with, but we happened to get an
2875 * ENOSPC above from the btrfs_delalloc_reserve_space. In
2876 * this case we obviously don't have anything to release, but
2877 * because the page was already dealt with we don't want to
2878 * mark the page with an error, so make sure we're resetting
2879 * ret to 0. This is why we have this check _before_ the ret
2880 * check, because we do not want to have a surprise ENOSPC
2881 * when the page was already properly dealt with.
2884 btrfs_delalloc_release_extents(inode
, PAGE_SIZE
);
2885 btrfs_delalloc_release_space(inode
, data_reserved
,
2886 page_start
, PAGE_SIZE
,
2894 * We can't mess with the page state unless it is locked, so now that
2895 * it is locked bail if we failed to make our space reservation.
2900 lock_extent(&inode
->io_tree
, page_start
, page_end
, &cached_state
);
2902 /* already ordered? We're done */
2903 if (PageOrdered(page
))
2906 ordered
= btrfs_lookup_ordered_range(inode
, page_start
, PAGE_SIZE
);
2908 unlock_extent(&inode
->io_tree
, page_start
, page_end
,
2911 btrfs_start_ordered_extent(ordered
);
2912 btrfs_put_ordered_extent(ordered
);
2916 ret
= btrfs_set_extent_delalloc(inode
, page_start
, page_end
, 0,
2922 * Everything went as planned, we're now the owner of a dirty page with
2923 * delayed allocation bits set and space reserved for our COW
2926 * The page was dirty when we started, nothing should have cleaned it.
2928 BUG_ON(!PageDirty(page
));
2929 free_delalloc_space
= false;
2931 btrfs_delalloc_release_extents(inode
, PAGE_SIZE
);
2932 if (free_delalloc_space
)
2933 btrfs_delalloc_release_space(inode
, data_reserved
, page_start
,
2935 unlock_extent(&inode
->io_tree
, page_start
, page_end
, &cached_state
);
2939 * We hit ENOSPC or other errors. Update the mapping and page
2940 * to reflect the errors and clean the page.
2942 mapping_set_error(page
->mapping
, ret
);
2943 end_extent_writepage(page
, ret
, page_start
, page_end
);
2944 clear_page_dirty_for_io(page
);
2947 btrfs_page_clear_checked(inode
->root
->fs_info
, page
, page_start
, PAGE_SIZE
);
2951 extent_changeset_free(data_reserved
);
2953 * As a precaution, do a delayed iput in case it would be the last iput
2954 * that could need flushing space. Recursing back to fixup worker would
2957 btrfs_add_delayed_iput(inode
);
2961 * There are a few paths in the higher layers of the kernel that directly
2962 * set the page dirty bit without asking the filesystem if it is a
2963 * good idea. This causes problems because we want to make sure COW
2964 * properly happens and the data=ordered rules are followed.
2966 * In our case any range that doesn't have the ORDERED bit set
2967 * hasn't been properly setup for IO. We kick off an async process
2968 * to fix it up. The async helper will wait for ordered extents, set
2969 * the delalloc bit and make it safe to write the page.
2971 int btrfs_writepage_cow_fixup(struct page
*page
)
2973 struct inode
*inode
= page
->mapping
->host
;
2974 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
2975 struct btrfs_writepage_fixup
*fixup
;
2977 /* This page has ordered extent covering it already */
2978 if (PageOrdered(page
))
2982 * PageChecked is set below when we create a fixup worker for this page,
2983 * don't try to create another one if we're already PageChecked()
2985 * The extent_io writepage code will redirty the page if we send back
2988 if (PageChecked(page
))
2991 fixup
= kzalloc(sizeof(*fixup
), GFP_NOFS
);
2996 * We are already holding a reference to this inode from
2997 * write_cache_pages. We need to hold it because the space reservation
2998 * takes place outside of the page lock, and we can't trust
2999 * page->mapping outside of the page lock.
3002 btrfs_page_set_checked(fs_info
, page
, page_offset(page
), PAGE_SIZE
);
3004 btrfs_init_work(&fixup
->work
, btrfs_writepage_fixup_worker
, NULL
, NULL
);
3006 fixup
->inode
= BTRFS_I(inode
);
3007 btrfs_queue_work(fs_info
->fixup_workers
, &fixup
->work
);
3012 static int insert_reserved_file_extent(struct btrfs_trans_handle
*trans
,
3013 struct btrfs_inode
*inode
, u64 file_pos
,
3014 struct btrfs_file_extent_item
*stack_fi
,
3015 const bool update_inode_bytes
,
3016 u64 qgroup_reserved
)
3018 struct btrfs_root
*root
= inode
->root
;
3019 const u64 sectorsize
= root
->fs_info
->sectorsize
;
3020 struct btrfs_path
*path
;
3021 struct extent_buffer
*leaf
;
3022 struct btrfs_key ins
;
3023 u64 disk_num_bytes
= btrfs_stack_file_extent_disk_num_bytes(stack_fi
);
3024 u64 disk_bytenr
= btrfs_stack_file_extent_disk_bytenr(stack_fi
);
3025 u64 offset
= btrfs_stack_file_extent_offset(stack_fi
);
3026 u64 num_bytes
= btrfs_stack_file_extent_num_bytes(stack_fi
);
3027 u64 ram_bytes
= btrfs_stack_file_extent_ram_bytes(stack_fi
);
3028 struct btrfs_drop_extents_args drop_args
= { 0 };
3031 path
= btrfs_alloc_path();
3036 * we may be replacing one extent in the tree with another.
3037 * The new extent is pinned in the extent map, and we don't want
3038 * to drop it from the cache until it is completely in the btree.
3040 * So, tell btrfs_drop_extents to leave this extent in the cache.
3041 * the caller is expected to unpin it and allow it to be merged
3044 drop_args
.path
= path
;
3045 drop_args
.start
= file_pos
;
3046 drop_args
.end
= file_pos
+ num_bytes
;
3047 drop_args
.replace_extent
= true;
3048 drop_args
.extent_item_size
= sizeof(*stack_fi
);
3049 ret
= btrfs_drop_extents(trans
, root
, inode
, &drop_args
);
3053 if (!drop_args
.extent_inserted
) {
3054 ins
.objectid
= btrfs_ino(inode
);
3055 ins
.offset
= file_pos
;
3056 ins
.type
= BTRFS_EXTENT_DATA_KEY
;
3058 ret
= btrfs_insert_empty_item(trans
, root
, path
, &ins
,
3063 leaf
= path
->nodes
[0];
3064 btrfs_set_stack_file_extent_generation(stack_fi
, trans
->transid
);
3065 write_extent_buffer(leaf
, stack_fi
,
3066 btrfs_item_ptr_offset(leaf
, path
->slots
[0]),
3067 sizeof(struct btrfs_file_extent_item
));
3069 btrfs_mark_buffer_dirty(leaf
);
3070 btrfs_release_path(path
);
3073 * If we dropped an inline extent here, we know the range where it is
3074 * was not marked with the EXTENT_DELALLOC_NEW bit, so we update the
3075 * number of bytes only for that range containing the inline extent.
3076 * The remaining of the range will be processed when clearning the
3077 * EXTENT_DELALLOC_BIT bit through the ordered extent completion.
3079 if (file_pos
== 0 && !IS_ALIGNED(drop_args
.bytes_found
, sectorsize
)) {
3080 u64 inline_size
= round_down(drop_args
.bytes_found
, sectorsize
);
3082 inline_size
= drop_args
.bytes_found
- inline_size
;
3083 btrfs_update_inode_bytes(inode
, sectorsize
, inline_size
);
3084 drop_args
.bytes_found
-= inline_size
;
3085 num_bytes
-= sectorsize
;
3088 if (update_inode_bytes
)
3089 btrfs_update_inode_bytes(inode
, num_bytes
, drop_args
.bytes_found
);
3091 ins
.objectid
= disk_bytenr
;
3092 ins
.offset
= disk_num_bytes
;
3093 ins
.type
= BTRFS_EXTENT_ITEM_KEY
;
3095 ret
= btrfs_inode_set_file_extent_range(inode
, file_pos
, ram_bytes
);
3099 ret
= btrfs_alloc_reserved_file_extent(trans
, root
, btrfs_ino(inode
),
3101 qgroup_reserved
, &ins
);
3103 btrfs_free_path(path
);
3108 static void btrfs_release_delalloc_bytes(struct btrfs_fs_info
*fs_info
,
3111 struct btrfs_block_group
*cache
;
3113 cache
= btrfs_lookup_block_group(fs_info
, start
);
3116 spin_lock(&cache
->lock
);
3117 cache
->delalloc_bytes
-= len
;
3118 spin_unlock(&cache
->lock
);
3120 btrfs_put_block_group(cache
);
3123 static int insert_ordered_extent_file_extent(struct btrfs_trans_handle
*trans
,
3124 struct btrfs_ordered_extent
*oe
)
3126 struct btrfs_file_extent_item stack_fi
;
3127 bool update_inode_bytes
;
3128 u64 num_bytes
= oe
->num_bytes
;
3129 u64 ram_bytes
= oe
->ram_bytes
;
3131 memset(&stack_fi
, 0, sizeof(stack_fi
));
3132 btrfs_set_stack_file_extent_type(&stack_fi
, BTRFS_FILE_EXTENT_REG
);
3133 btrfs_set_stack_file_extent_disk_bytenr(&stack_fi
, oe
->disk_bytenr
);
3134 btrfs_set_stack_file_extent_disk_num_bytes(&stack_fi
,
3135 oe
->disk_num_bytes
);
3136 btrfs_set_stack_file_extent_offset(&stack_fi
, oe
->offset
);
3137 if (test_bit(BTRFS_ORDERED_TRUNCATED
, &oe
->flags
)) {
3138 num_bytes
= oe
->truncated_len
;
3139 ram_bytes
= num_bytes
;
3141 btrfs_set_stack_file_extent_num_bytes(&stack_fi
, num_bytes
);
3142 btrfs_set_stack_file_extent_ram_bytes(&stack_fi
, ram_bytes
);
3143 btrfs_set_stack_file_extent_compression(&stack_fi
, oe
->compress_type
);
3144 /* Encryption and other encoding is reserved and all 0 */
3147 * For delalloc, when completing an ordered extent we update the inode's
3148 * bytes when clearing the range in the inode's io tree, so pass false
3149 * as the argument 'update_inode_bytes' to insert_reserved_file_extent(),
3150 * except if the ordered extent was truncated.
3152 update_inode_bytes
= test_bit(BTRFS_ORDERED_DIRECT
, &oe
->flags
) ||
3153 test_bit(BTRFS_ORDERED_ENCODED
, &oe
->flags
) ||
3154 test_bit(BTRFS_ORDERED_TRUNCATED
, &oe
->flags
);
3156 return insert_reserved_file_extent(trans
, BTRFS_I(oe
->inode
),
3157 oe
->file_offset
, &stack_fi
,
3158 update_inode_bytes
, oe
->qgroup_rsv
);
3162 * As ordered data IO finishes, this gets called so we can finish
3163 * an ordered extent if the range of bytes in the file it covers are
3166 int btrfs_finish_one_ordered(struct btrfs_ordered_extent
*ordered_extent
)
3168 struct btrfs_inode
*inode
= BTRFS_I(ordered_extent
->inode
);
3169 struct btrfs_root
*root
= inode
->root
;
3170 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
3171 struct btrfs_trans_handle
*trans
= NULL
;
3172 struct extent_io_tree
*io_tree
= &inode
->io_tree
;
3173 struct extent_state
*cached_state
= NULL
;
3175 int compress_type
= 0;
3177 u64 logical_len
= ordered_extent
->num_bytes
;
3178 bool freespace_inode
;
3179 bool truncated
= false;
3180 bool clear_reserved_extent
= true;
3181 unsigned int clear_bits
= EXTENT_DEFRAG
;
3183 start
= ordered_extent
->file_offset
;
3184 end
= start
+ ordered_extent
->num_bytes
- 1;
3186 if (!test_bit(BTRFS_ORDERED_NOCOW
, &ordered_extent
->flags
) &&
3187 !test_bit(BTRFS_ORDERED_PREALLOC
, &ordered_extent
->flags
) &&
3188 !test_bit(BTRFS_ORDERED_DIRECT
, &ordered_extent
->flags
) &&
3189 !test_bit(BTRFS_ORDERED_ENCODED
, &ordered_extent
->flags
))
3190 clear_bits
|= EXTENT_DELALLOC_NEW
;
3192 freespace_inode
= btrfs_is_free_space_inode(inode
);
3193 if (!freespace_inode
)
3194 btrfs_lockdep_acquire(fs_info
, btrfs_ordered_extent
);
3196 if (test_bit(BTRFS_ORDERED_IOERR
, &ordered_extent
->flags
)) {
3201 if (btrfs_is_zoned(fs_info
))
3202 btrfs_zone_finish_endio(fs_info
, ordered_extent
->disk_bytenr
,
3203 ordered_extent
->disk_num_bytes
);
3205 if (test_bit(BTRFS_ORDERED_TRUNCATED
, &ordered_extent
->flags
)) {
3207 logical_len
= ordered_extent
->truncated_len
;
3208 /* Truncated the entire extent, don't bother adding */
3213 if (test_bit(BTRFS_ORDERED_NOCOW
, &ordered_extent
->flags
)) {
3214 BUG_ON(!list_empty(&ordered_extent
->list
)); /* Logic error */
3216 btrfs_inode_safe_disk_i_size_write(inode
, 0);
3217 if (freespace_inode
)
3218 trans
= btrfs_join_transaction_spacecache(root
);
3220 trans
= btrfs_join_transaction(root
);
3221 if (IS_ERR(trans
)) {
3222 ret
= PTR_ERR(trans
);
3226 trans
->block_rsv
= &inode
->block_rsv
;
3227 ret
= btrfs_update_inode_fallback(trans
, root
, inode
);
3228 if (ret
) /* -ENOMEM or corruption */
3229 btrfs_abort_transaction(trans
, ret
);
3233 clear_bits
|= EXTENT_LOCKED
;
3234 lock_extent(io_tree
, start
, end
, &cached_state
);
3236 if (freespace_inode
)
3237 trans
= btrfs_join_transaction_spacecache(root
);
3239 trans
= btrfs_join_transaction(root
);
3240 if (IS_ERR(trans
)) {
3241 ret
= PTR_ERR(trans
);
3246 trans
->block_rsv
= &inode
->block_rsv
;
3248 if (test_bit(BTRFS_ORDERED_COMPRESSED
, &ordered_extent
->flags
))
3249 compress_type
= ordered_extent
->compress_type
;
3250 if (test_bit(BTRFS_ORDERED_PREALLOC
, &ordered_extent
->flags
)) {
3251 BUG_ON(compress_type
);
3252 ret
= btrfs_mark_extent_written(trans
, inode
,
3253 ordered_extent
->file_offset
,
3254 ordered_extent
->file_offset
+
3256 btrfs_zoned_release_data_reloc_bg(fs_info
, ordered_extent
->disk_bytenr
,
3257 ordered_extent
->disk_num_bytes
);
3259 BUG_ON(root
== fs_info
->tree_root
);
3260 ret
= insert_ordered_extent_file_extent(trans
, ordered_extent
);
3262 clear_reserved_extent
= false;
3263 btrfs_release_delalloc_bytes(fs_info
,
3264 ordered_extent
->disk_bytenr
,
3265 ordered_extent
->disk_num_bytes
);
3268 unpin_extent_cache(&inode
->extent_tree
, ordered_extent
->file_offset
,
3269 ordered_extent
->num_bytes
, trans
->transid
);
3271 btrfs_abort_transaction(trans
, ret
);
3275 ret
= add_pending_csums(trans
, &ordered_extent
->list
);
3277 btrfs_abort_transaction(trans
, ret
);
3282 * If this is a new delalloc range, clear its new delalloc flag to
3283 * update the inode's number of bytes. This needs to be done first
3284 * before updating the inode item.
3286 if ((clear_bits
& EXTENT_DELALLOC_NEW
) &&
3287 !test_bit(BTRFS_ORDERED_TRUNCATED
, &ordered_extent
->flags
))
3288 clear_extent_bit(&inode
->io_tree
, start
, end
,
3289 EXTENT_DELALLOC_NEW
| EXTENT_ADD_INODE_BYTES
,
3292 btrfs_inode_safe_disk_i_size_write(inode
, 0);
3293 ret
= btrfs_update_inode_fallback(trans
, root
, inode
);
3294 if (ret
) { /* -ENOMEM or corruption */
3295 btrfs_abort_transaction(trans
, ret
);
3300 clear_extent_bit(&inode
->io_tree
, start
, end
, clear_bits
,
3304 btrfs_end_transaction(trans
);
3306 if (ret
|| truncated
) {
3307 u64 unwritten_start
= start
;
3310 * If we failed to finish this ordered extent for any reason we
3311 * need to make sure BTRFS_ORDERED_IOERR is set on the ordered
3312 * extent, and mark the inode with the error if it wasn't
3313 * already set. Any error during writeback would have already
3314 * set the mapping error, so we need to set it if we're the ones
3315 * marking this ordered extent as failed.
3317 if (ret
&& !test_and_set_bit(BTRFS_ORDERED_IOERR
,
3318 &ordered_extent
->flags
))
3319 mapping_set_error(ordered_extent
->inode
->i_mapping
, -EIO
);
3322 unwritten_start
+= logical_len
;
3323 clear_extent_uptodate(io_tree
, unwritten_start
, end
, NULL
);
3325 /* Drop extent maps for the part of the extent we didn't write. */
3326 btrfs_drop_extent_map_range(inode
, unwritten_start
, end
, false);
3329 * If the ordered extent had an IOERR or something else went
3330 * wrong we need to return the space for this ordered extent
3331 * back to the allocator. We only free the extent in the
3332 * truncated case if we didn't write out the extent at all.
3334 * If we made it past insert_reserved_file_extent before we
3335 * errored out then we don't need to do this as the accounting
3336 * has already been done.
3338 if ((ret
|| !logical_len
) &&
3339 clear_reserved_extent
&&
3340 !test_bit(BTRFS_ORDERED_NOCOW
, &ordered_extent
->flags
) &&
3341 !test_bit(BTRFS_ORDERED_PREALLOC
, &ordered_extent
->flags
)) {
3343 * Discard the range before returning it back to the
3346 if (ret
&& btrfs_test_opt(fs_info
, DISCARD_SYNC
))
3347 btrfs_discard_extent(fs_info
,
3348 ordered_extent
->disk_bytenr
,
3349 ordered_extent
->disk_num_bytes
,
3351 btrfs_free_reserved_extent(fs_info
,
3352 ordered_extent
->disk_bytenr
,
3353 ordered_extent
->disk_num_bytes
, 1);
3358 * This needs to be done to make sure anybody waiting knows we are done
3359 * updating everything for this ordered extent.
3361 btrfs_remove_ordered_extent(inode
, ordered_extent
);
3364 btrfs_put_ordered_extent(ordered_extent
);
3365 /* once for the tree */
3366 btrfs_put_ordered_extent(ordered_extent
);
3371 int btrfs_finish_ordered_io(struct btrfs_ordered_extent
*ordered
)
3373 if (btrfs_is_zoned(btrfs_sb(ordered
->inode
->i_sb
)) &&
3374 !test_bit(BTRFS_ORDERED_IOERR
, &ordered
->flags
))
3375 btrfs_finish_ordered_zoned(ordered
);
3376 return btrfs_finish_one_ordered(ordered
);
3379 void btrfs_writepage_endio_finish_ordered(struct btrfs_inode
*inode
,
3380 struct page
*page
, u64 start
,
3381 u64 end
, bool uptodate
)
3383 trace_btrfs_writepage_end_io_hook(inode
, start
, end
, uptodate
);
3385 btrfs_mark_ordered_io_finished(inode
, page
, start
, end
+ 1 - start
, uptodate
);
3389 * Verify the checksum for a single sector without any extra action that depend
3390 * on the type of I/O.
3392 int btrfs_check_sector_csum(struct btrfs_fs_info
*fs_info
, struct page
*page
,
3393 u32 pgoff
, u8
*csum
, const u8
* const csum_expected
)
3395 SHASH_DESC_ON_STACK(shash
, fs_info
->csum_shash
);
3398 ASSERT(pgoff
+ fs_info
->sectorsize
<= PAGE_SIZE
);
3400 shash
->tfm
= fs_info
->csum_shash
;
3402 kaddr
= kmap_local_page(page
) + pgoff
;
3403 crypto_shash_digest(shash
, kaddr
, fs_info
->sectorsize
, csum
);
3404 kunmap_local(kaddr
);
3406 if (memcmp(csum
, csum_expected
, fs_info
->csum_size
))
3412 * Verify the checksum of a single data sector.
3414 * @bbio: btrfs_io_bio which contains the csum
3415 * @dev: device the sector is on
3416 * @bio_offset: offset to the beginning of the bio (in bytes)
3417 * @bv: bio_vec to check
3419 * Check if the checksum on a data block is valid. When a checksum mismatch is
3420 * detected, report the error and fill the corrupted range with zero.
3422 * Return %true if the sector is ok or had no checksum to start with, else %false.
3424 bool btrfs_data_csum_ok(struct btrfs_bio
*bbio
, struct btrfs_device
*dev
,
3425 u32 bio_offset
, struct bio_vec
*bv
)
3427 struct btrfs_inode
*inode
= bbio
->inode
;
3428 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
3429 u64 file_offset
= bbio
->file_offset
+ bio_offset
;
3430 u64 end
= file_offset
+ bv
->bv_len
- 1;
3432 u8 csum
[BTRFS_CSUM_SIZE
];
3434 ASSERT(bv
->bv_len
== fs_info
->sectorsize
);
3439 if (btrfs_is_data_reloc_root(inode
->root
) &&
3440 test_range_bit(&inode
->io_tree
, file_offset
, end
, EXTENT_NODATASUM
,
3442 /* Skip the range without csum for data reloc inode */
3443 clear_extent_bits(&inode
->io_tree
, file_offset
, end
,
3448 csum_expected
= bbio
->csum
+ (bio_offset
>> fs_info
->sectorsize_bits
) *
3450 if (btrfs_check_sector_csum(fs_info
, bv
->bv_page
, bv
->bv_offset
, csum
,
3456 btrfs_print_data_csum_error(inode
, file_offset
, csum
, csum_expected
,
3459 btrfs_dev_stat_inc_and_print(dev
, BTRFS_DEV_STAT_CORRUPTION_ERRS
);
3465 * btrfs_add_delayed_iput - perform a delayed iput on @inode
3467 * @inode: The inode we want to perform iput on
3469 * This function uses the generic vfs_inode::i_count to track whether we should
3470 * just decrement it (in case it's > 1) or if this is the last iput then link
3471 * the inode to the delayed iput machinery. Delayed iputs are processed at
3472 * transaction commit time/superblock commit/cleaner kthread.
3474 void btrfs_add_delayed_iput(struct btrfs_inode
*inode
)
3476 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
3478 if (atomic_add_unless(&inode
->vfs_inode
.i_count
, -1, 1))
3481 atomic_inc(&fs_info
->nr_delayed_iputs
);
3482 spin_lock(&fs_info
->delayed_iput_lock
);
3483 ASSERT(list_empty(&inode
->delayed_iput
));
3484 list_add_tail(&inode
->delayed_iput
, &fs_info
->delayed_iputs
);
3485 spin_unlock(&fs_info
->delayed_iput_lock
);
3486 if (!test_bit(BTRFS_FS_CLEANER_RUNNING
, &fs_info
->flags
))
3487 wake_up_process(fs_info
->cleaner_kthread
);
3490 static void run_delayed_iput_locked(struct btrfs_fs_info
*fs_info
,
3491 struct btrfs_inode
*inode
)
3493 list_del_init(&inode
->delayed_iput
);
3494 spin_unlock(&fs_info
->delayed_iput_lock
);
3495 iput(&inode
->vfs_inode
);
3496 if (atomic_dec_and_test(&fs_info
->nr_delayed_iputs
))
3497 wake_up(&fs_info
->delayed_iputs_wait
);
3498 spin_lock(&fs_info
->delayed_iput_lock
);
3501 static void btrfs_run_delayed_iput(struct btrfs_fs_info
*fs_info
,
3502 struct btrfs_inode
*inode
)
3504 if (!list_empty(&inode
->delayed_iput
)) {
3505 spin_lock(&fs_info
->delayed_iput_lock
);
3506 if (!list_empty(&inode
->delayed_iput
))
3507 run_delayed_iput_locked(fs_info
, inode
);
3508 spin_unlock(&fs_info
->delayed_iput_lock
);
3512 void btrfs_run_delayed_iputs(struct btrfs_fs_info
*fs_info
)
3515 spin_lock(&fs_info
->delayed_iput_lock
);
3516 while (!list_empty(&fs_info
->delayed_iputs
)) {
3517 struct btrfs_inode
*inode
;
3519 inode
= list_first_entry(&fs_info
->delayed_iputs
,
3520 struct btrfs_inode
, delayed_iput
);
3521 run_delayed_iput_locked(fs_info
, inode
);
3522 cond_resched_lock(&fs_info
->delayed_iput_lock
);
3524 spin_unlock(&fs_info
->delayed_iput_lock
);
3528 * Wait for flushing all delayed iputs
3530 * @fs_info: the filesystem
3532 * This will wait on any delayed iputs that are currently running with KILLABLE
3533 * set. Once they are all done running we will return, unless we are killed in
3534 * which case we return EINTR. This helps in user operations like fallocate etc
3535 * that might get blocked on the iputs.
3537 * Return EINTR if we were killed, 0 if nothing's pending
3539 int btrfs_wait_on_delayed_iputs(struct btrfs_fs_info
*fs_info
)
3541 int ret
= wait_event_killable(fs_info
->delayed_iputs_wait
,
3542 atomic_read(&fs_info
->nr_delayed_iputs
) == 0);
3549 * This creates an orphan entry for the given inode in case something goes wrong
3550 * in the middle of an unlink.
3552 int btrfs_orphan_add(struct btrfs_trans_handle
*trans
,
3553 struct btrfs_inode
*inode
)
3557 ret
= btrfs_insert_orphan_item(trans
, inode
->root
, btrfs_ino(inode
));
3558 if (ret
&& ret
!= -EEXIST
) {
3559 btrfs_abort_transaction(trans
, ret
);
3567 * We have done the delete so we can go ahead and remove the orphan item for
3568 * this particular inode.
3570 static int btrfs_orphan_del(struct btrfs_trans_handle
*trans
,
3571 struct btrfs_inode
*inode
)
3573 return btrfs_del_orphan_item(trans
, inode
->root
, btrfs_ino(inode
));
3577 * this cleans up any orphans that may be left on the list from the last use
3580 int btrfs_orphan_cleanup(struct btrfs_root
*root
)
3582 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
3583 struct btrfs_path
*path
;
3584 struct extent_buffer
*leaf
;
3585 struct btrfs_key key
, found_key
;
3586 struct btrfs_trans_handle
*trans
;
3587 struct inode
*inode
;
3588 u64 last_objectid
= 0;
3589 int ret
= 0, nr_unlink
= 0;
3591 if (test_and_set_bit(BTRFS_ROOT_ORPHAN_CLEANUP
, &root
->state
))
3594 path
= btrfs_alloc_path();
3599 path
->reada
= READA_BACK
;
3601 key
.objectid
= BTRFS_ORPHAN_OBJECTID
;
3602 key
.type
= BTRFS_ORPHAN_ITEM_KEY
;
3603 key
.offset
= (u64
)-1;
3606 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
3611 * if ret == 0 means we found what we were searching for, which
3612 * is weird, but possible, so only screw with path if we didn't
3613 * find the key and see if we have stuff that matches
3617 if (path
->slots
[0] == 0)
3622 /* pull out the item */
3623 leaf
= path
->nodes
[0];
3624 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
3626 /* make sure the item matches what we want */
3627 if (found_key
.objectid
!= BTRFS_ORPHAN_OBJECTID
)
3629 if (found_key
.type
!= BTRFS_ORPHAN_ITEM_KEY
)
3632 /* release the path since we're done with it */
3633 btrfs_release_path(path
);
3636 * this is where we are basically btrfs_lookup, without the
3637 * crossing root thing. we store the inode number in the
3638 * offset of the orphan item.
3641 if (found_key
.offset
== last_objectid
) {
3643 "Error removing orphan entry, stopping orphan cleanup");
3648 last_objectid
= found_key
.offset
;
3650 found_key
.objectid
= found_key
.offset
;
3651 found_key
.type
= BTRFS_INODE_ITEM_KEY
;
3652 found_key
.offset
= 0;
3653 inode
= btrfs_iget(fs_info
->sb
, last_objectid
, root
);
3654 ret
= PTR_ERR_OR_ZERO(inode
);
3655 if (ret
&& ret
!= -ENOENT
)
3658 if (ret
== -ENOENT
&& root
== fs_info
->tree_root
) {
3659 struct btrfs_root
*dead_root
;
3660 int is_dead_root
= 0;
3663 * This is an orphan in the tree root. Currently these
3664 * could come from 2 sources:
3665 * a) a root (snapshot/subvolume) deletion in progress
3666 * b) a free space cache inode
3667 * We need to distinguish those two, as the orphan item
3668 * for a root must not get deleted before the deletion
3669 * of the snapshot/subvolume's tree completes.
3671 * btrfs_find_orphan_roots() ran before us, which has
3672 * found all deleted roots and loaded them into
3673 * fs_info->fs_roots_radix. So here we can find if an
3674 * orphan item corresponds to a deleted root by looking
3675 * up the root from that radix tree.
3678 spin_lock(&fs_info
->fs_roots_radix_lock
);
3679 dead_root
= radix_tree_lookup(&fs_info
->fs_roots_radix
,
3680 (unsigned long)found_key
.objectid
);
3681 if (dead_root
&& btrfs_root_refs(&dead_root
->root_item
) == 0)
3683 spin_unlock(&fs_info
->fs_roots_radix_lock
);
3686 /* prevent this orphan from being found again */
3687 key
.offset
= found_key
.objectid
- 1;
3694 * If we have an inode with links, there are a couple of
3697 * 1. We were halfway through creating fsverity metadata for the
3698 * file. In that case, the orphan item represents incomplete
3699 * fsverity metadata which must be cleaned up with
3700 * btrfs_drop_verity_items and deleting the orphan item.
3702 * 2. Old kernels (before v3.12) used to create an
3703 * orphan item for truncate indicating that there were possibly
3704 * extent items past i_size that needed to be deleted. In v3.12,
3705 * truncate was changed to update i_size in sync with the extent
3706 * items, but the (useless) orphan item was still created. Since
3707 * v4.18, we don't create the orphan item for truncate at all.
3709 * So, this item could mean that we need to do a truncate, but
3710 * only if this filesystem was last used on a pre-v3.12 kernel
3711 * and was not cleanly unmounted. The odds of that are quite
3712 * slim, and it's a pain to do the truncate now, so just delete
3715 * It's also possible that this orphan item was supposed to be
3716 * deleted but wasn't. The inode number may have been reused,
3717 * but either way, we can delete the orphan item.
3719 if (ret
== -ENOENT
|| inode
->i_nlink
) {
3721 ret
= btrfs_drop_verity_items(BTRFS_I(inode
));
3726 trans
= btrfs_start_transaction(root
, 1);
3727 if (IS_ERR(trans
)) {
3728 ret
= PTR_ERR(trans
);
3732 btrfs_debug(fs_info
, "auto deleting %Lu",
3733 found_key
.objectid
);
3734 ret
= btrfs_del_orphan_item(trans
, root
,
3735 found_key
.objectid
);
3736 btrfs_end_transaction(trans
);
3746 /* this will do delete_inode and everything for us */
3749 /* release the path since we're done with it */
3750 btrfs_release_path(path
);
3752 if (test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED
, &root
->state
)) {
3753 trans
= btrfs_join_transaction(root
);
3755 btrfs_end_transaction(trans
);
3759 btrfs_debug(fs_info
, "unlinked %d orphans", nr_unlink
);
3763 btrfs_err(fs_info
, "could not do orphan cleanup %d", ret
);
3764 btrfs_free_path(path
);
3769 * very simple check to peek ahead in the leaf looking for xattrs. If we
3770 * don't find any xattrs, we know there can't be any acls.
3772 * slot is the slot the inode is in, objectid is the objectid of the inode
3774 static noinline
int acls_after_inode_item(struct extent_buffer
*leaf
,
3775 int slot
, u64 objectid
,
3776 int *first_xattr_slot
)
3778 u32 nritems
= btrfs_header_nritems(leaf
);
3779 struct btrfs_key found_key
;
3780 static u64 xattr_access
= 0;
3781 static u64 xattr_default
= 0;
3784 if (!xattr_access
) {
3785 xattr_access
= btrfs_name_hash(XATTR_NAME_POSIX_ACL_ACCESS
,
3786 strlen(XATTR_NAME_POSIX_ACL_ACCESS
));
3787 xattr_default
= btrfs_name_hash(XATTR_NAME_POSIX_ACL_DEFAULT
,
3788 strlen(XATTR_NAME_POSIX_ACL_DEFAULT
));
3792 *first_xattr_slot
= -1;
3793 while (slot
< nritems
) {
3794 btrfs_item_key_to_cpu(leaf
, &found_key
, slot
);
3796 /* we found a different objectid, there must not be acls */
3797 if (found_key
.objectid
!= objectid
)
3800 /* we found an xattr, assume we've got an acl */
3801 if (found_key
.type
== BTRFS_XATTR_ITEM_KEY
) {
3802 if (*first_xattr_slot
== -1)
3803 *first_xattr_slot
= slot
;
3804 if (found_key
.offset
== xattr_access
||
3805 found_key
.offset
== xattr_default
)
3810 * we found a key greater than an xattr key, there can't
3811 * be any acls later on
3813 if (found_key
.type
> BTRFS_XATTR_ITEM_KEY
)
3820 * it goes inode, inode backrefs, xattrs, extents,
3821 * so if there are a ton of hard links to an inode there can
3822 * be a lot of backrefs. Don't waste time searching too hard,
3823 * this is just an optimization
3828 /* we hit the end of the leaf before we found an xattr or
3829 * something larger than an xattr. We have to assume the inode
3832 if (*first_xattr_slot
== -1)
3833 *first_xattr_slot
= slot
;
3838 * read an inode from the btree into the in-memory inode
3840 static int btrfs_read_locked_inode(struct inode
*inode
,
3841 struct btrfs_path
*in_path
)
3843 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
3844 struct btrfs_path
*path
= in_path
;
3845 struct extent_buffer
*leaf
;
3846 struct btrfs_inode_item
*inode_item
;
3847 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
3848 struct btrfs_key location
;
3853 bool filled
= false;
3854 int first_xattr_slot
;
3856 ret
= btrfs_fill_inode(inode
, &rdev
);
3861 path
= btrfs_alloc_path();
3866 memcpy(&location
, &BTRFS_I(inode
)->location
, sizeof(location
));
3868 ret
= btrfs_lookup_inode(NULL
, root
, path
, &location
, 0);
3870 if (path
!= in_path
)
3871 btrfs_free_path(path
);
3875 leaf
= path
->nodes
[0];
3880 inode_item
= btrfs_item_ptr(leaf
, path
->slots
[0],
3881 struct btrfs_inode_item
);
3882 inode
->i_mode
= btrfs_inode_mode(leaf
, inode_item
);
3883 set_nlink(inode
, btrfs_inode_nlink(leaf
, inode_item
));
3884 i_uid_write(inode
, btrfs_inode_uid(leaf
, inode_item
));
3885 i_gid_write(inode
, btrfs_inode_gid(leaf
, inode_item
));
3886 btrfs_i_size_write(BTRFS_I(inode
), btrfs_inode_size(leaf
, inode_item
));
3887 btrfs_inode_set_file_extent_range(BTRFS_I(inode
), 0,
3888 round_up(i_size_read(inode
), fs_info
->sectorsize
));
3890 inode
->i_atime
.tv_sec
= btrfs_timespec_sec(leaf
, &inode_item
->atime
);
3891 inode
->i_atime
.tv_nsec
= btrfs_timespec_nsec(leaf
, &inode_item
->atime
);
3893 inode
->i_mtime
.tv_sec
= btrfs_timespec_sec(leaf
, &inode_item
->mtime
);
3894 inode
->i_mtime
.tv_nsec
= btrfs_timespec_nsec(leaf
, &inode_item
->mtime
);
3896 inode
->i_ctime
.tv_sec
= btrfs_timespec_sec(leaf
, &inode_item
->ctime
);
3897 inode
->i_ctime
.tv_nsec
= btrfs_timespec_nsec(leaf
, &inode_item
->ctime
);
3899 BTRFS_I(inode
)->i_otime
.tv_sec
=
3900 btrfs_timespec_sec(leaf
, &inode_item
->otime
);
3901 BTRFS_I(inode
)->i_otime
.tv_nsec
=
3902 btrfs_timespec_nsec(leaf
, &inode_item
->otime
);
3904 inode_set_bytes(inode
, btrfs_inode_nbytes(leaf
, inode_item
));
3905 BTRFS_I(inode
)->generation
= btrfs_inode_generation(leaf
, inode_item
);
3906 BTRFS_I(inode
)->last_trans
= btrfs_inode_transid(leaf
, inode_item
);
3908 inode_set_iversion_queried(inode
,
3909 btrfs_inode_sequence(leaf
, inode_item
));
3910 inode
->i_generation
= BTRFS_I(inode
)->generation
;
3912 rdev
= btrfs_inode_rdev(leaf
, inode_item
);
3914 BTRFS_I(inode
)->index_cnt
= (u64
)-1;
3915 btrfs_inode_split_flags(btrfs_inode_flags(leaf
, inode_item
),
3916 &BTRFS_I(inode
)->flags
, &BTRFS_I(inode
)->ro_flags
);
3920 * If we were modified in the current generation and evicted from memory
3921 * and then re-read we need to do a full sync since we don't have any
3922 * idea about which extents were modified before we were evicted from
3925 * This is required for both inode re-read from disk and delayed inode
3926 * in delayed_nodes_tree.
3928 if (BTRFS_I(inode
)->last_trans
== fs_info
->generation
)
3929 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC
,
3930 &BTRFS_I(inode
)->runtime_flags
);
3933 * We don't persist the id of the transaction where an unlink operation
3934 * against the inode was last made. So here we assume the inode might
3935 * have been evicted, and therefore the exact value of last_unlink_trans
3936 * lost, and set it to last_trans to avoid metadata inconsistencies
3937 * between the inode and its parent if the inode is fsync'ed and the log
3938 * replayed. For example, in the scenario:
3941 * ln mydir/foo mydir/bar
3944 * echo 2 > /proc/sys/vm/drop_caches # evicts inode
3945 * xfs_io -c fsync mydir/foo
3947 * mount fs, triggers fsync log replay
3949 * We must make sure that when we fsync our inode foo we also log its
3950 * parent inode, otherwise after log replay the parent still has the
3951 * dentry with the "bar" name but our inode foo has a link count of 1
3952 * and doesn't have an inode ref with the name "bar" anymore.
3954 * Setting last_unlink_trans to last_trans is a pessimistic approach,
3955 * but it guarantees correctness at the expense of occasional full
3956 * transaction commits on fsync if our inode is a directory, or if our
3957 * inode is not a directory, logging its parent unnecessarily.
3959 BTRFS_I(inode
)->last_unlink_trans
= BTRFS_I(inode
)->last_trans
;
3962 * Same logic as for last_unlink_trans. We don't persist the generation
3963 * of the last transaction where this inode was used for a reflink
3964 * operation, so after eviction and reloading the inode we must be
3965 * pessimistic and assume the last transaction that modified the inode.
3967 BTRFS_I(inode
)->last_reflink_trans
= BTRFS_I(inode
)->last_trans
;
3970 if (inode
->i_nlink
!= 1 ||
3971 path
->slots
[0] >= btrfs_header_nritems(leaf
))
3974 btrfs_item_key_to_cpu(leaf
, &location
, path
->slots
[0]);
3975 if (location
.objectid
!= btrfs_ino(BTRFS_I(inode
)))
3978 ptr
= btrfs_item_ptr_offset(leaf
, path
->slots
[0]);
3979 if (location
.type
== BTRFS_INODE_REF_KEY
) {
3980 struct btrfs_inode_ref
*ref
;
3982 ref
= (struct btrfs_inode_ref
*)ptr
;
3983 BTRFS_I(inode
)->dir_index
= btrfs_inode_ref_index(leaf
, ref
);
3984 } else if (location
.type
== BTRFS_INODE_EXTREF_KEY
) {
3985 struct btrfs_inode_extref
*extref
;
3987 extref
= (struct btrfs_inode_extref
*)ptr
;
3988 BTRFS_I(inode
)->dir_index
= btrfs_inode_extref_index(leaf
,
3993 * try to precache a NULL acl entry for files that don't have
3994 * any xattrs or acls
3996 maybe_acls
= acls_after_inode_item(leaf
, path
->slots
[0],
3997 btrfs_ino(BTRFS_I(inode
)), &first_xattr_slot
);
3998 if (first_xattr_slot
!= -1) {
3999 path
->slots
[0] = first_xattr_slot
;
4000 ret
= btrfs_load_inode_props(inode
, path
);
4003 "error loading props for ino %llu (root %llu): %d",
4004 btrfs_ino(BTRFS_I(inode
)),
4005 root
->root_key
.objectid
, ret
);
4007 if (path
!= in_path
)
4008 btrfs_free_path(path
);
4011 cache_no_acl(inode
);
4013 switch (inode
->i_mode
& S_IFMT
) {
4015 inode
->i_mapping
->a_ops
= &btrfs_aops
;
4016 inode
->i_fop
= &btrfs_file_operations
;
4017 inode
->i_op
= &btrfs_file_inode_operations
;
4020 inode
->i_fop
= &btrfs_dir_file_operations
;
4021 inode
->i_op
= &btrfs_dir_inode_operations
;
4024 inode
->i_op
= &btrfs_symlink_inode_operations
;
4025 inode_nohighmem(inode
);
4026 inode
->i_mapping
->a_ops
= &btrfs_aops
;
4029 inode
->i_op
= &btrfs_special_inode_operations
;
4030 init_special_inode(inode
, inode
->i_mode
, rdev
);
4034 btrfs_sync_inode_flags_to_i_flags(inode
);
4039 * given a leaf and an inode, copy the inode fields into the leaf
4041 static void fill_inode_item(struct btrfs_trans_handle
*trans
,
4042 struct extent_buffer
*leaf
,
4043 struct btrfs_inode_item
*item
,
4044 struct inode
*inode
)
4046 struct btrfs_map_token token
;
4049 btrfs_init_map_token(&token
, leaf
);
4051 btrfs_set_token_inode_uid(&token
, item
, i_uid_read(inode
));
4052 btrfs_set_token_inode_gid(&token
, item
, i_gid_read(inode
));
4053 btrfs_set_token_inode_size(&token
, item
, BTRFS_I(inode
)->disk_i_size
);
4054 btrfs_set_token_inode_mode(&token
, item
, inode
->i_mode
);
4055 btrfs_set_token_inode_nlink(&token
, item
, inode
->i_nlink
);
4057 btrfs_set_token_timespec_sec(&token
, &item
->atime
,
4058 inode
->i_atime
.tv_sec
);
4059 btrfs_set_token_timespec_nsec(&token
, &item
->atime
,
4060 inode
->i_atime
.tv_nsec
);
4062 btrfs_set_token_timespec_sec(&token
, &item
->mtime
,
4063 inode
->i_mtime
.tv_sec
);
4064 btrfs_set_token_timespec_nsec(&token
, &item
->mtime
,
4065 inode
->i_mtime
.tv_nsec
);
4067 btrfs_set_token_timespec_sec(&token
, &item
->ctime
,
4068 inode
->i_ctime
.tv_sec
);
4069 btrfs_set_token_timespec_nsec(&token
, &item
->ctime
,
4070 inode
->i_ctime
.tv_nsec
);
4072 btrfs_set_token_timespec_sec(&token
, &item
->otime
,
4073 BTRFS_I(inode
)->i_otime
.tv_sec
);
4074 btrfs_set_token_timespec_nsec(&token
, &item
->otime
,
4075 BTRFS_I(inode
)->i_otime
.tv_nsec
);
4077 btrfs_set_token_inode_nbytes(&token
, item
, inode_get_bytes(inode
));
4078 btrfs_set_token_inode_generation(&token
, item
,
4079 BTRFS_I(inode
)->generation
);
4080 btrfs_set_token_inode_sequence(&token
, item
, inode_peek_iversion(inode
));
4081 btrfs_set_token_inode_transid(&token
, item
, trans
->transid
);
4082 btrfs_set_token_inode_rdev(&token
, item
, inode
->i_rdev
);
4083 flags
= btrfs_inode_combine_flags(BTRFS_I(inode
)->flags
,
4084 BTRFS_I(inode
)->ro_flags
);
4085 btrfs_set_token_inode_flags(&token
, item
, flags
);
4086 btrfs_set_token_inode_block_group(&token
, item
, 0);
4090 * copy everything in the in-memory inode into the btree.
4092 static noinline
int btrfs_update_inode_item(struct btrfs_trans_handle
*trans
,
4093 struct btrfs_root
*root
,
4094 struct btrfs_inode
*inode
)
4096 struct btrfs_inode_item
*inode_item
;
4097 struct btrfs_path
*path
;
4098 struct extent_buffer
*leaf
;
4101 path
= btrfs_alloc_path();
4105 ret
= btrfs_lookup_inode(trans
, root
, path
, &inode
->location
, 1);
4112 leaf
= path
->nodes
[0];
4113 inode_item
= btrfs_item_ptr(leaf
, path
->slots
[0],
4114 struct btrfs_inode_item
);
4116 fill_inode_item(trans
, leaf
, inode_item
, &inode
->vfs_inode
);
4117 btrfs_mark_buffer_dirty(leaf
);
4118 btrfs_set_inode_last_trans(trans
, inode
);
4121 btrfs_free_path(path
);
4126 * copy everything in the in-memory inode into the btree.
4128 noinline
int btrfs_update_inode(struct btrfs_trans_handle
*trans
,
4129 struct btrfs_root
*root
,
4130 struct btrfs_inode
*inode
)
4132 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4136 * If the inode is a free space inode, we can deadlock during commit
4137 * if we put it into the delayed code.
4139 * The data relocation inode should also be directly updated
4142 if (!btrfs_is_free_space_inode(inode
)
4143 && !btrfs_is_data_reloc_root(root
)
4144 && !test_bit(BTRFS_FS_LOG_RECOVERING
, &fs_info
->flags
)) {
4145 btrfs_update_root_times(trans
, root
);
4147 ret
= btrfs_delayed_update_inode(trans
, root
, inode
);
4149 btrfs_set_inode_last_trans(trans
, inode
);
4153 return btrfs_update_inode_item(trans
, root
, inode
);
4156 int btrfs_update_inode_fallback(struct btrfs_trans_handle
*trans
,
4157 struct btrfs_root
*root
, struct btrfs_inode
*inode
)
4161 ret
= btrfs_update_inode(trans
, root
, inode
);
4163 return btrfs_update_inode_item(trans
, root
, inode
);
4168 * unlink helper that gets used here in inode.c and in the tree logging
4169 * recovery code. It remove a link in a directory with a given name, and
4170 * also drops the back refs in the inode to the directory
4172 static int __btrfs_unlink_inode(struct btrfs_trans_handle
*trans
,
4173 struct btrfs_inode
*dir
,
4174 struct btrfs_inode
*inode
,
4175 const struct fscrypt_str
*name
,
4176 struct btrfs_rename_ctx
*rename_ctx
)
4178 struct btrfs_root
*root
= dir
->root
;
4179 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4180 struct btrfs_path
*path
;
4182 struct btrfs_dir_item
*di
;
4184 u64 ino
= btrfs_ino(inode
);
4185 u64 dir_ino
= btrfs_ino(dir
);
4187 path
= btrfs_alloc_path();
4193 di
= btrfs_lookup_dir_item(trans
, root
, path
, dir_ino
, name
, -1);
4194 if (IS_ERR_OR_NULL(di
)) {
4195 ret
= di
? PTR_ERR(di
) : -ENOENT
;
4198 ret
= btrfs_delete_one_dir_name(trans
, root
, path
, di
);
4201 btrfs_release_path(path
);
4204 * If we don't have dir index, we have to get it by looking up
4205 * the inode ref, since we get the inode ref, remove it directly,
4206 * it is unnecessary to do delayed deletion.
4208 * But if we have dir index, needn't search inode ref to get it.
4209 * Since the inode ref is close to the inode item, it is better
4210 * that we delay to delete it, and just do this deletion when
4211 * we update the inode item.
4213 if (inode
->dir_index
) {
4214 ret
= btrfs_delayed_delete_inode_ref(inode
);
4216 index
= inode
->dir_index
;
4221 ret
= btrfs_del_inode_ref(trans
, root
, name
, ino
, dir_ino
, &index
);
4224 "failed to delete reference to %.*s, inode %llu parent %llu",
4225 name
->len
, name
->name
, ino
, dir_ino
);
4226 btrfs_abort_transaction(trans
, ret
);
4231 rename_ctx
->index
= index
;
4233 ret
= btrfs_delete_delayed_dir_index(trans
, dir
, index
);
4235 btrfs_abort_transaction(trans
, ret
);
4240 * If we are in a rename context, we don't need to update anything in the
4241 * log. That will be done later during the rename by btrfs_log_new_name().
4242 * Besides that, doing it here would only cause extra unnecessary btree
4243 * operations on the log tree, increasing latency for applications.
4246 btrfs_del_inode_ref_in_log(trans
, root
, name
, inode
, dir_ino
);
4247 btrfs_del_dir_entries_in_log(trans
, root
, name
, dir
, index
);
4251 * If we have a pending delayed iput we could end up with the final iput
4252 * being run in btrfs-cleaner context. If we have enough of these built
4253 * up we can end up burning a lot of time in btrfs-cleaner without any
4254 * way to throttle the unlinks. Since we're currently holding a ref on
4255 * the inode we can run the delayed iput here without any issues as the
4256 * final iput won't be done until after we drop the ref we're currently
4259 btrfs_run_delayed_iput(fs_info
, inode
);
4261 btrfs_free_path(path
);
4265 btrfs_i_size_write(dir
, dir
->vfs_inode
.i_size
- name
->len
* 2);
4266 inode_inc_iversion(&inode
->vfs_inode
);
4267 inode_inc_iversion(&dir
->vfs_inode
);
4268 inode
->vfs_inode
.i_ctime
= current_time(&inode
->vfs_inode
);
4269 dir
->vfs_inode
.i_mtime
= inode
->vfs_inode
.i_ctime
;
4270 dir
->vfs_inode
.i_ctime
= inode
->vfs_inode
.i_ctime
;
4271 ret
= btrfs_update_inode(trans
, root
, dir
);
4276 int btrfs_unlink_inode(struct btrfs_trans_handle
*trans
,
4277 struct btrfs_inode
*dir
, struct btrfs_inode
*inode
,
4278 const struct fscrypt_str
*name
)
4282 ret
= __btrfs_unlink_inode(trans
, dir
, inode
, name
, NULL
);
4284 drop_nlink(&inode
->vfs_inode
);
4285 ret
= btrfs_update_inode(trans
, inode
->root
, inode
);
4291 * helper to start transaction for unlink and rmdir.
4293 * unlink and rmdir are special in btrfs, they do not always free space, so
4294 * if we cannot make our reservations the normal way try and see if there is
4295 * plenty of slack room in the global reserve to migrate, otherwise we cannot
4296 * allow the unlink to occur.
4298 static struct btrfs_trans_handle
*__unlink_start_trans(struct btrfs_inode
*dir
)
4300 struct btrfs_root
*root
= dir
->root
;
4302 return btrfs_start_transaction_fallback_global_rsv(root
,
4303 BTRFS_UNLINK_METADATA_UNITS
);
4306 static int btrfs_unlink(struct inode
*dir
, struct dentry
*dentry
)
4308 struct btrfs_trans_handle
*trans
;
4309 struct inode
*inode
= d_inode(dentry
);
4311 struct fscrypt_name fname
;
4313 ret
= fscrypt_setup_filename(dir
, &dentry
->d_name
, 1, &fname
);
4317 /* This needs to handle no-key deletions later on */
4319 trans
= __unlink_start_trans(BTRFS_I(dir
));
4320 if (IS_ERR(trans
)) {
4321 ret
= PTR_ERR(trans
);
4325 btrfs_record_unlink_dir(trans
, BTRFS_I(dir
), BTRFS_I(d_inode(dentry
)),
4328 ret
= btrfs_unlink_inode(trans
, BTRFS_I(dir
), BTRFS_I(d_inode(dentry
)),
4333 if (inode
->i_nlink
== 0) {
4334 ret
= btrfs_orphan_add(trans
, BTRFS_I(inode
));
4340 btrfs_end_transaction(trans
);
4341 btrfs_btree_balance_dirty(BTRFS_I(dir
)->root
->fs_info
);
4343 fscrypt_free_filename(&fname
);
4347 static int btrfs_unlink_subvol(struct btrfs_trans_handle
*trans
,
4348 struct btrfs_inode
*dir
, struct dentry
*dentry
)
4350 struct btrfs_root
*root
= dir
->root
;
4351 struct btrfs_inode
*inode
= BTRFS_I(d_inode(dentry
));
4352 struct btrfs_path
*path
;
4353 struct extent_buffer
*leaf
;
4354 struct btrfs_dir_item
*di
;
4355 struct btrfs_key key
;
4359 u64 dir_ino
= btrfs_ino(dir
);
4360 struct fscrypt_name fname
;
4362 ret
= fscrypt_setup_filename(&dir
->vfs_inode
, &dentry
->d_name
, 1, &fname
);
4366 /* This needs to handle no-key deletions later on */
4368 if (btrfs_ino(inode
) == BTRFS_FIRST_FREE_OBJECTID
) {
4369 objectid
= inode
->root
->root_key
.objectid
;
4370 } else if (btrfs_ino(inode
) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID
) {
4371 objectid
= inode
->location
.objectid
;
4374 fscrypt_free_filename(&fname
);
4378 path
= btrfs_alloc_path();
4384 di
= btrfs_lookup_dir_item(trans
, root
, path
, dir_ino
,
4385 &fname
.disk_name
, -1);
4386 if (IS_ERR_OR_NULL(di
)) {
4387 ret
= di
? PTR_ERR(di
) : -ENOENT
;
4391 leaf
= path
->nodes
[0];
4392 btrfs_dir_item_key_to_cpu(leaf
, di
, &key
);
4393 WARN_ON(key
.type
!= BTRFS_ROOT_ITEM_KEY
|| key
.objectid
!= objectid
);
4394 ret
= btrfs_delete_one_dir_name(trans
, root
, path
, di
);
4396 btrfs_abort_transaction(trans
, ret
);
4399 btrfs_release_path(path
);
4402 * This is a placeholder inode for a subvolume we didn't have a
4403 * reference to at the time of the snapshot creation. In the meantime
4404 * we could have renamed the real subvol link into our snapshot, so
4405 * depending on btrfs_del_root_ref to return -ENOENT here is incorrect.
4406 * Instead simply lookup the dir_index_item for this entry so we can
4407 * remove it. Otherwise we know we have a ref to the root and we can
4408 * call btrfs_del_root_ref, and it _shouldn't_ fail.
4410 if (btrfs_ino(inode
) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID
) {
4411 di
= btrfs_search_dir_index_item(root
, path
, dir_ino
, &fname
.disk_name
);
4412 if (IS_ERR_OR_NULL(di
)) {
4417 btrfs_abort_transaction(trans
, ret
);
4421 leaf
= path
->nodes
[0];
4422 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
4424 btrfs_release_path(path
);
4426 ret
= btrfs_del_root_ref(trans
, objectid
,
4427 root
->root_key
.objectid
, dir_ino
,
4428 &index
, &fname
.disk_name
);
4430 btrfs_abort_transaction(trans
, ret
);
4435 ret
= btrfs_delete_delayed_dir_index(trans
, dir
, index
);
4437 btrfs_abort_transaction(trans
, ret
);
4441 btrfs_i_size_write(dir
, dir
->vfs_inode
.i_size
- fname
.disk_name
.len
* 2);
4442 inode_inc_iversion(&dir
->vfs_inode
);
4443 dir
->vfs_inode
.i_mtime
= current_time(&dir
->vfs_inode
);
4444 dir
->vfs_inode
.i_ctime
= dir
->vfs_inode
.i_mtime
;
4445 ret
= btrfs_update_inode_fallback(trans
, root
, dir
);
4447 btrfs_abort_transaction(trans
, ret
);
4449 btrfs_free_path(path
);
4450 fscrypt_free_filename(&fname
);
4455 * Helper to check if the subvolume references other subvolumes or if it's
4458 static noinline
int may_destroy_subvol(struct btrfs_root
*root
)
4460 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4461 struct btrfs_path
*path
;
4462 struct btrfs_dir_item
*di
;
4463 struct btrfs_key key
;
4464 struct fscrypt_str name
= FSTR_INIT("default", 7);
4468 path
= btrfs_alloc_path();
4472 /* Make sure this root isn't set as the default subvol */
4473 dir_id
= btrfs_super_root_dir(fs_info
->super_copy
);
4474 di
= btrfs_lookup_dir_item(NULL
, fs_info
->tree_root
, path
,
4476 if (di
&& !IS_ERR(di
)) {
4477 btrfs_dir_item_key_to_cpu(path
->nodes
[0], di
, &key
);
4478 if (key
.objectid
== root
->root_key
.objectid
) {
4481 "deleting default subvolume %llu is not allowed",
4485 btrfs_release_path(path
);
4488 key
.objectid
= root
->root_key
.objectid
;
4489 key
.type
= BTRFS_ROOT_REF_KEY
;
4490 key
.offset
= (u64
)-1;
4492 ret
= btrfs_search_slot(NULL
, fs_info
->tree_root
, &key
, path
, 0, 0);
4498 if (path
->slots
[0] > 0) {
4500 btrfs_item_key_to_cpu(path
->nodes
[0], &key
, path
->slots
[0]);
4501 if (key
.objectid
== root
->root_key
.objectid
&&
4502 key
.type
== BTRFS_ROOT_REF_KEY
)
4506 btrfs_free_path(path
);
4510 /* Delete all dentries for inodes belonging to the root */
4511 static void btrfs_prune_dentries(struct btrfs_root
*root
)
4513 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4514 struct rb_node
*node
;
4515 struct rb_node
*prev
;
4516 struct btrfs_inode
*entry
;
4517 struct inode
*inode
;
4520 if (!BTRFS_FS_ERROR(fs_info
))
4521 WARN_ON(btrfs_root_refs(&root
->root_item
) != 0);
4523 spin_lock(&root
->inode_lock
);
4525 node
= root
->inode_tree
.rb_node
;
4529 entry
= rb_entry(node
, struct btrfs_inode
, rb_node
);
4531 if (objectid
< btrfs_ino(entry
))
4532 node
= node
->rb_left
;
4533 else if (objectid
> btrfs_ino(entry
))
4534 node
= node
->rb_right
;
4540 entry
= rb_entry(prev
, struct btrfs_inode
, rb_node
);
4541 if (objectid
<= btrfs_ino(entry
)) {
4545 prev
= rb_next(prev
);
4549 entry
= rb_entry(node
, struct btrfs_inode
, rb_node
);
4550 objectid
= btrfs_ino(entry
) + 1;
4551 inode
= igrab(&entry
->vfs_inode
);
4553 spin_unlock(&root
->inode_lock
);
4554 if (atomic_read(&inode
->i_count
) > 1)
4555 d_prune_aliases(inode
);
4557 * btrfs_drop_inode will have it removed from the inode
4558 * cache when its usage count hits zero.
4562 spin_lock(&root
->inode_lock
);
4566 if (cond_resched_lock(&root
->inode_lock
))
4569 node
= rb_next(node
);
4571 spin_unlock(&root
->inode_lock
);
4574 int btrfs_delete_subvolume(struct btrfs_inode
*dir
, struct dentry
*dentry
)
4576 struct btrfs_fs_info
*fs_info
= btrfs_sb(dentry
->d_sb
);
4577 struct btrfs_root
*root
= dir
->root
;
4578 struct inode
*inode
= d_inode(dentry
);
4579 struct btrfs_root
*dest
= BTRFS_I(inode
)->root
;
4580 struct btrfs_trans_handle
*trans
;
4581 struct btrfs_block_rsv block_rsv
;
4586 * Don't allow to delete a subvolume with send in progress. This is
4587 * inside the inode lock so the error handling that has to drop the bit
4588 * again is not run concurrently.
4590 spin_lock(&dest
->root_item_lock
);
4591 if (dest
->send_in_progress
) {
4592 spin_unlock(&dest
->root_item_lock
);
4594 "attempt to delete subvolume %llu during send",
4595 dest
->root_key
.objectid
);
4598 if (atomic_read(&dest
->nr_swapfiles
)) {
4599 spin_unlock(&dest
->root_item_lock
);
4601 "attempt to delete subvolume %llu with active swapfile",
4602 root
->root_key
.objectid
);
4605 root_flags
= btrfs_root_flags(&dest
->root_item
);
4606 btrfs_set_root_flags(&dest
->root_item
,
4607 root_flags
| BTRFS_ROOT_SUBVOL_DEAD
);
4608 spin_unlock(&dest
->root_item_lock
);
4610 down_write(&fs_info
->subvol_sem
);
4612 ret
= may_destroy_subvol(dest
);
4616 btrfs_init_block_rsv(&block_rsv
, BTRFS_BLOCK_RSV_TEMP
);
4618 * One for dir inode,
4619 * two for dir entries,
4620 * two for root ref/backref.
4622 ret
= btrfs_subvolume_reserve_metadata(root
, &block_rsv
, 5, true);
4626 trans
= btrfs_start_transaction(root
, 0);
4627 if (IS_ERR(trans
)) {
4628 ret
= PTR_ERR(trans
);
4631 trans
->block_rsv
= &block_rsv
;
4632 trans
->bytes_reserved
= block_rsv
.size
;
4634 btrfs_record_snapshot_destroy(trans
, dir
);
4636 ret
= btrfs_unlink_subvol(trans
, dir
, dentry
);
4638 btrfs_abort_transaction(trans
, ret
);
4642 ret
= btrfs_record_root_in_trans(trans
, dest
);
4644 btrfs_abort_transaction(trans
, ret
);
4648 memset(&dest
->root_item
.drop_progress
, 0,
4649 sizeof(dest
->root_item
.drop_progress
));
4650 btrfs_set_root_drop_level(&dest
->root_item
, 0);
4651 btrfs_set_root_refs(&dest
->root_item
, 0);
4653 if (!test_and_set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED
, &dest
->state
)) {
4654 ret
= btrfs_insert_orphan_item(trans
,
4656 dest
->root_key
.objectid
);
4658 btrfs_abort_transaction(trans
, ret
);
4663 ret
= btrfs_uuid_tree_remove(trans
, dest
->root_item
.uuid
,
4664 BTRFS_UUID_KEY_SUBVOL
,
4665 dest
->root_key
.objectid
);
4666 if (ret
&& ret
!= -ENOENT
) {
4667 btrfs_abort_transaction(trans
, ret
);
4670 if (!btrfs_is_empty_uuid(dest
->root_item
.received_uuid
)) {
4671 ret
= btrfs_uuid_tree_remove(trans
,
4672 dest
->root_item
.received_uuid
,
4673 BTRFS_UUID_KEY_RECEIVED_SUBVOL
,
4674 dest
->root_key
.objectid
);
4675 if (ret
&& ret
!= -ENOENT
) {
4676 btrfs_abort_transaction(trans
, ret
);
4681 free_anon_bdev(dest
->anon_dev
);
4684 trans
->block_rsv
= NULL
;
4685 trans
->bytes_reserved
= 0;
4686 ret
= btrfs_end_transaction(trans
);
4687 inode
->i_flags
|= S_DEAD
;
4689 btrfs_subvolume_release_metadata(root
, &block_rsv
);
4691 up_write(&fs_info
->subvol_sem
);
4693 spin_lock(&dest
->root_item_lock
);
4694 root_flags
= btrfs_root_flags(&dest
->root_item
);
4695 btrfs_set_root_flags(&dest
->root_item
,
4696 root_flags
& ~BTRFS_ROOT_SUBVOL_DEAD
);
4697 spin_unlock(&dest
->root_item_lock
);
4699 d_invalidate(dentry
);
4700 btrfs_prune_dentries(dest
);
4701 ASSERT(dest
->send_in_progress
== 0);
4707 static int btrfs_rmdir(struct inode
*dir
, struct dentry
*dentry
)
4709 struct inode
*inode
= d_inode(dentry
);
4710 struct btrfs_fs_info
*fs_info
= BTRFS_I(inode
)->root
->fs_info
;
4712 struct btrfs_trans_handle
*trans
;
4713 u64 last_unlink_trans
;
4714 struct fscrypt_name fname
;
4716 if (inode
->i_size
> BTRFS_EMPTY_DIR_SIZE
)
4718 if (btrfs_ino(BTRFS_I(inode
)) == BTRFS_FIRST_FREE_OBJECTID
) {
4719 if (unlikely(btrfs_fs_incompat(fs_info
, EXTENT_TREE_V2
))) {
4721 "extent tree v2 doesn't support snapshot deletion yet");
4724 return btrfs_delete_subvolume(BTRFS_I(dir
), dentry
);
4727 err
= fscrypt_setup_filename(dir
, &dentry
->d_name
, 1, &fname
);
4731 /* This needs to handle no-key deletions later on */
4733 trans
= __unlink_start_trans(BTRFS_I(dir
));
4734 if (IS_ERR(trans
)) {
4735 err
= PTR_ERR(trans
);
4739 if (unlikely(btrfs_ino(BTRFS_I(inode
)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID
)) {
4740 err
= btrfs_unlink_subvol(trans
, BTRFS_I(dir
), dentry
);
4744 err
= btrfs_orphan_add(trans
, BTRFS_I(inode
));
4748 last_unlink_trans
= BTRFS_I(inode
)->last_unlink_trans
;
4750 /* now the directory is empty */
4751 err
= btrfs_unlink_inode(trans
, BTRFS_I(dir
), BTRFS_I(d_inode(dentry
)),
4754 btrfs_i_size_write(BTRFS_I(inode
), 0);
4756 * Propagate the last_unlink_trans value of the deleted dir to
4757 * its parent directory. This is to prevent an unrecoverable
4758 * log tree in the case we do something like this:
4760 * 2) create snapshot under dir foo
4761 * 3) delete the snapshot
4764 * 6) fsync foo or some file inside foo
4766 if (last_unlink_trans
>= trans
->transid
)
4767 BTRFS_I(dir
)->last_unlink_trans
= last_unlink_trans
;
4770 btrfs_end_transaction(trans
);
4772 btrfs_btree_balance_dirty(fs_info
);
4773 fscrypt_free_filename(&fname
);
4779 * btrfs_truncate_block - read, zero a chunk and write a block
4780 * @inode - inode that we're zeroing
4781 * @from - the offset to start zeroing
4782 * @len - the length to zero, 0 to zero the entire range respective to the
4784 * @front - zero up to the offset instead of from the offset on
4786 * This will find the block for the "from" offset and cow the block and zero the
4787 * part we want to zero. This is used with truncate and hole punching.
4789 int btrfs_truncate_block(struct btrfs_inode
*inode
, loff_t from
, loff_t len
,
4792 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
4793 struct address_space
*mapping
= inode
->vfs_inode
.i_mapping
;
4794 struct extent_io_tree
*io_tree
= &inode
->io_tree
;
4795 struct btrfs_ordered_extent
*ordered
;
4796 struct extent_state
*cached_state
= NULL
;
4797 struct extent_changeset
*data_reserved
= NULL
;
4798 bool only_release_metadata
= false;
4799 u32 blocksize
= fs_info
->sectorsize
;
4800 pgoff_t index
= from
>> PAGE_SHIFT
;
4801 unsigned offset
= from
& (blocksize
- 1);
4803 gfp_t mask
= btrfs_alloc_write_mask(mapping
);
4804 size_t write_bytes
= blocksize
;
4809 if (IS_ALIGNED(offset
, blocksize
) &&
4810 (!len
|| IS_ALIGNED(len
, blocksize
)))
4813 block_start
= round_down(from
, blocksize
);
4814 block_end
= block_start
+ blocksize
- 1;
4816 ret
= btrfs_check_data_free_space(inode
, &data_reserved
, block_start
,
4819 if (btrfs_check_nocow_lock(inode
, block_start
, &write_bytes
, false) > 0) {
4820 /* For nocow case, no need to reserve data space */
4821 only_release_metadata
= true;
4826 ret
= btrfs_delalloc_reserve_metadata(inode
, blocksize
, blocksize
, false);
4828 if (!only_release_metadata
)
4829 btrfs_free_reserved_data_space(inode
, data_reserved
,
4830 block_start
, blocksize
);
4834 page
= find_or_create_page(mapping
, index
, mask
);
4836 btrfs_delalloc_release_space(inode
, data_reserved
, block_start
,
4838 btrfs_delalloc_release_extents(inode
, blocksize
);
4842 ret
= set_page_extent_mapped(page
);
4846 if (!PageUptodate(page
)) {
4847 ret
= btrfs_read_folio(NULL
, page_folio(page
));
4849 if (page
->mapping
!= mapping
) {
4854 if (!PageUptodate(page
)) {
4859 wait_on_page_writeback(page
);
4861 lock_extent(io_tree
, block_start
, block_end
, &cached_state
);
4863 ordered
= btrfs_lookup_ordered_extent(inode
, block_start
);
4865 unlock_extent(io_tree
, block_start
, block_end
, &cached_state
);
4868 btrfs_start_ordered_extent(ordered
);
4869 btrfs_put_ordered_extent(ordered
);
4873 clear_extent_bit(&inode
->io_tree
, block_start
, block_end
,
4874 EXTENT_DELALLOC
| EXTENT_DO_ACCOUNTING
| EXTENT_DEFRAG
,
4877 ret
= btrfs_set_extent_delalloc(inode
, block_start
, block_end
, 0,
4880 unlock_extent(io_tree
, block_start
, block_end
, &cached_state
);
4884 if (offset
!= blocksize
) {
4886 len
= blocksize
- offset
;
4888 memzero_page(page
, (block_start
- page_offset(page
)),
4891 memzero_page(page
, (block_start
- page_offset(page
)) + offset
,
4894 btrfs_page_clear_checked(fs_info
, page
, block_start
,
4895 block_end
+ 1 - block_start
);
4896 btrfs_page_set_dirty(fs_info
, page
, block_start
, block_end
+ 1 - block_start
);
4897 unlock_extent(io_tree
, block_start
, block_end
, &cached_state
);
4899 if (only_release_metadata
)
4900 set_extent_bit(&inode
->io_tree
, block_start
, block_end
,
4901 EXTENT_NORESERVE
, NULL
);
4905 if (only_release_metadata
)
4906 btrfs_delalloc_release_metadata(inode
, blocksize
, true);
4908 btrfs_delalloc_release_space(inode
, data_reserved
,
4909 block_start
, blocksize
, true);
4911 btrfs_delalloc_release_extents(inode
, blocksize
);
4915 if (only_release_metadata
)
4916 btrfs_check_nocow_unlock(inode
);
4917 extent_changeset_free(data_reserved
);
4921 static int maybe_insert_hole(struct btrfs_root
*root
, struct btrfs_inode
*inode
,
4922 u64 offset
, u64 len
)
4924 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4925 struct btrfs_trans_handle
*trans
;
4926 struct btrfs_drop_extents_args drop_args
= { 0 };
4930 * If NO_HOLES is enabled, we don't need to do anything.
4931 * Later, up in the call chain, either btrfs_set_inode_last_sub_trans()
4932 * or btrfs_update_inode() will be called, which guarantee that the next
4933 * fsync will know this inode was changed and needs to be logged.
4935 if (btrfs_fs_incompat(fs_info
, NO_HOLES
))
4939 * 1 - for the one we're dropping
4940 * 1 - for the one we're adding
4941 * 1 - for updating the inode.
4943 trans
= btrfs_start_transaction(root
, 3);
4945 return PTR_ERR(trans
);
4947 drop_args
.start
= offset
;
4948 drop_args
.end
= offset
+ len
;
4949 drop_args
.drop_cache
= true;
4951 ret
= btrfs_drop_extents(trans
, root
, inode
, &drop_args
);
4953 btrfs_abort_transaction(trans
, ret
);
4954 btrfs_end_transaction(trans
);
4958 ret
= btrfs_insert_hole_extent(trans
, root
, btrfs_ino(inode
), offset
, len
);
4960 btrfs_abort_transaction(trans
, ret
);
4962 btrfs_update_inode_bytes(inode
, 0, drop_args
.bytes_found
);
4963 btrfs_update_inode(trans
, root
, inode
);
4965 btrfs_end_transaction(trans
);
4970 * This function puts in dummy file extents for the area we're creating a hole
4971 * for. So if we are truncating this file to a larger size we need to insert
4972 * these file extents so that btrfs_get_extent will return a EXTENT_MAP_HOLE for
4973 * the range between oldsize and size
4975 int btrfs_cont_expand(struct btrfs_inode
*inode
, loff_t oldsize
, loff_t size
)
4977 struct btrfs_root
*root
= inode
->root
;
4978 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4979 struct extent_io_tree
*io_tree
= &inode
->io_tree
;
4980 struct extent_map
*em
= NULL
;
4981 struct extent_state
*cached_state
= NULL
;
4982 u64 hole_start
= ALIGN(oldsize
, fs_info
->sectorsize
);
4983 u64 block_end
= ALIGN(size
, fs_info
->sectorsize
);
4990 * If our size started in the middle of a block we need to zero out the
4991 * rest of the block before we expand the i_size, otherwise we could
4992 * expose stale data.
4994 err
= btrfs_truncate_block(inode
, oldsize
, 0, 0);
4998 if (size
<= hole_start
)
5001 btrfs_lock_and_flush_ordered_range(inode
, hole_start
, block_end
- 1,
5003 cur_offset
= hole_start
;
5005 em
= btrfs_get_extent(inode
, NULL
, 0, cur_offset
,
5006 block_end
- cur_offset
);
5012 last_byte
= min(extent_map_end(em
), block_end
);
5013 last_byte
= ALIGN(last_byte
, fs_info
->sectorsize
);
5014 hole_size
= last_byte
- cur_offset
;
5016 if (!test_bit(EXTENT_FLAG_PREALLOC
, &em
->flags
)) {
5017 struct extent_map
*hole_em
;
5019 err
= maybe_insert_hole(root
, inode
, cur_offset
,
5024 err
= btrfs_inode_set_file_extent_range(inode
,
5025 cur_offset
, hole_size
);
5029 hole_em
= alloc_extent_map();
5031 btrfs_drop_extent_map_range(inode
, cur_offset
,
5032 cur_offset
+ hole_size
- 1,
5034 btrfs_set_inode_full_sync(inode
);
5037 hole_em
->start
= cur_offset
;
5038 hole_em
->len
= hole_size
;
5039 hole_em
->orig_start
= cur_offset
;
5041 hole_em
->block_start
= EXTENT_MAP_HOLE
;
5042 hole_em
->block_len
= 0;
5043 hole_em
->orig_block_len
= 0;
5044 hole_em
->ram_bytes
= hole_size
;
5045 hole_em
->compress_type
= BTRFS_COMPRESS_NONE
;
5046 hole_em
->generation
= fs_info
->generation
;
5048 err
= btrfs_replace_extent_map_range(inode
, hole_em
, true);
5049 free_extent_map(hole_em
);
5051 err
= btrfs_inode_set_file_extent_range(inode
,
5052 cur_offset
, hole_size
);
5057 free_extent_map(em
);
5059 cur_offset
= last_byte
;
5060 if (cur_offset
>= block_end
)
5063 free_extent_map(em
);
5064 unlock_extent(io_tree
, hole_start
, block_end
- 1, &cached_state
);
5068 static int btrfs_setsize(struct inode
*inode
, struct iattr
*attr
)
5070 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
5071 struct btrfs_trans_handle
*trans
;
5072 loff_t oldsize
= i_size_read(inode
);
5073 loff_t newsize
= attr
->ia_size
;
5074 int mask
= attr
->ia_valid
;
5078 * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a
5079 * special case where we need to update the times despite not having
5080 * these flags set. For all other operations the VFS set these flags
5081 * explicitly if it wants a timestamp update.
5083 if (newsize
!= oldsize
) {
5084 inode_inc_iversion(inode
);
5085 if (!(mask
& (ATTR_CTIME
| ATTR_MTIME
))) {
5086 inode
->i_mtime
= current_time(inode
);
5087 inode
->i_ctime
= inode
->i_mtime
;
5091 if (newsize
> oldsize
) {
5093 * Don't do an expanding truncate while snapshotting is ongoing.
5094 * This is to ensure the snapshot captures a fully consistent
5095 * state of this file - if the snapshot captures this expanding
5096 * truncation, it must capture all writes that happened before
5099 btrfs_drew_write_lock(&root
->snapshot_lock
);
5100 ret
= btrfs_cont_expand(BTRFS_I(inode
), oldsize
, newsize
);
5102 btrfs_drew_write_unlock(&root
->snapshot_lock
);
5106 trans
= btrfs_start_transaction(root
, 1);
5107 if (IS_ERR(trans
)) {
5108 btrfs_drew_write_unlock(&root
->snapshot_lock
);
5109 return PTR_ERR(trans
);
5112 i_size_write(inode
, newsize
);
5113 btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode
), 0);
5114 pagecache_isize_extended(inode
, oldsize
, newsize
);
5115 ret
= btrfs_update_inode(trans
, root
, BTRFS_I(inode
));
5116 btrfs_drew_write_unlock(&root
->snapshot_lock
);
5117 btrfs_end_transaction(trans
);
5119 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
5121 if (btrfs_is_zoned(fs_info
)) {
5122 ret
= btrfs_wait_ordered_range(inode
,
5123 ALIGN(newsize
, fs_info
->sectorsize
),
5130 * We're truncating a file that used to have good data down to
5131 * zero. Make sure any new writes to the file get on disk
5135 set_bit(BTRFS_INODE_FLUSH_ON_CLOSE
,
5136 &BTRFS_I(inode
)->runtime_flags
);
5138 truncate_setsize(inode
, newsize
);
5140 inode_dio_wait(inode
);
5142 ret
= btrfs_truncate(BTRFS_I(inode
), newsize
== oldsize
);
5143 if (ret
&& inode
->i_nlink
) {
5147 * Truncate failed, so fix up the in-memory size. We
5148 * adjusted disk_i_size down as we removed extents, so
5149 * wait for disk_i_size to be stable and then update the
5150 * in-memory size to match.
5152 err
= btrfs_wait_ordered_range(inode
, 0, (u64
)-1);
5155 i_size_write(inode
, BTRFS_I(inode
)->disk_i_size
);
5162 static int btrfs_setattr(struct mnt_idmap
*idmap
, struct dentry
*dentry
,
5165 struct inode
*inode
= d_inode(dentry
);
5166 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
5169 if (btrfs_root_readonly(root
))
5172 err
= setattr_prepare(idmap
, dentry
, attr
);
5176 if (S_ISREG(inode
->i_mode
) && (attr
->ia_valid
& ATTR_SIZE
)) {
5177 err
= btrfs_setsize(inode
, attr
);
5182 if (attr
->ia_valid
) {
5183 setattr_copy(idmap
, inode
, attr
);
5184 inode_inc_iversion(inode
);
5185 err
= btrfs_dirty_inode(BTRFS_I(inode
));
5187 if (!err
&& attr
->ia_valid
& ATTR_MODE
)
5188 err
= posix_acl_chmod(idmap
, dentry
, inode
->i_mode
);
5195 * While truncating the inode pages during eviction, we get the VFS
5196 * calling btrfs_invalidate_folio() against each folio of the inode. This
5197 * is slow because the calls to btrfs_invalidate_folio() result in a
5198 * huge amount of calls to lock_extent() and clear_extent_bit(),
5199 * which keep merging and splitting extent_state structures over and over,
5200 * wasting lots of time.
5202 * Therefore if the inode is being evicted, let btrfs_invalidate_folio()
5203 * skip all those expensive operations on a per folio basis and do only
5204 * the ordered io finishing, while we release here the extent_map and
5205 * extent_state structures, without the excessive merging and splitting.
5207 static void evict_inode_truncate_pages(struct inode
*inode
)
5209 struct extent_io_tree
*io_tree
= &BTRFS_I(inode
)->io_tree
;
5210 struct rb_node
*node
;
5212 ASSERT(inode
->i_state
& I_FREEING
);
5213 truncate_inode_pages_final(&inode
->i_data
);
5215 btrfs_drop_extent_map_range(BTRFS_I(inode
), 0, (u64
)-1, false);
5218 * Keep looping until we have no more ranges in the io tree.
5219 * We can have ongoing bios started by readahead that have
5220 * their endio callback (extent_io.c:end_bio_extent_readpage)
5221 * still in progress (unlocked the pages in the bio but did not yet
5222 * unlocked the ranges in the io tree). Therefore this means some
5223 * ranges can still be locked and eviction started because before
5224 * submitting those bios, which are executed by a separate task (work
5225 * queue kthread), inode references (inode->i_count) were not taken
5226 * (which would be dropped in the end io callback of each bio).
5227 * Therefore here we effectively end up waiting for those bios and
5228 * anyone else holding locked ranges without having bumped the inode's
5229 * reference count - if we don't do it, when they access the inode's
5230 * io_tree to unlock a range it may be too late, leading to an
5231 * use-after-free issue.
5233 spin_lock(&io_tree
->lock
);
5234 while (!RB_EMPTY_ROOT(&io_tree
->state
)) {
5235 struct extent_state
*state
;
5236 struct extent_state
*cached_state
= NULL
;
5239 unsigned state_flags
;
5241 node
= rb_first(&io_tree
->state
);
5242 state
= rb_entry(node
, struct extent_state
, rb_node
);
5243 start
= state
->start
;
5245 state_flags
= state
->state
;
5246 spin_unlock(&io_tree
->lock
);
5248 lock_extent(io_tree
, start
, end
, &cached_state
);
5251 * If still has DELALLOC flag, the extent didn't reach disk,
5252 * and its reserved space won't be freed by delayed_ref.
5253 * So we need to free its reserved space here.
5254 * (Refer to comment in btrfs_invalidate_folio, case 2)
5256 * Note, end is the bytenr of last byte, so we need + 1 here.
5258 if (state_flags
& EXTENT_DELALLOC
)
5259 btrfs_qgroup_free_data(BTRFS_I(inode
), NULL
, start
,
5262 clear_extent_bit(io_tree
, start
, end
,
5263 EXTENT_CLEAR_ALL_BITS
| EXTENT_DO_ACCOUNTING
,
5267 spin_lock(&io_tree
->lock
);
5269 spin_unlock(&io_tree
->lock
);
5272 static struct btrfs_trans_handle
*evict_refill_and_join(struct btrfs_root
*root
,
5273 struct btrfs_block_rsv
*rsv
)
5275 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
5276 struct btrfs_trans_handle
*trans
;
5277 u64 delayed_refs_extra
= btrfs_calc_delayed_ref_bytes(fs_info
, 1);
5281 * Eviction should be taking place at some place safe because of our
5282 * delayed iputs. However the normal flushing code will run delayed
5283 * iputs, so we cannot use FLUSH_ALL otherwise we'll deadlock.
5285 * We reserve the delayed_refs_extra here again because we can't use
5286 * btrfs_start_transaction(root, 0) for the same deadlocky reason as
5287 * above. We reserve our extra bit here because we generate a ton of
5288 * delayed refs activity by truncating.
5290 * BTRFS_RESERVE_FLUSH_EVICT will steal from the global_rsv if it can,
5291 * if we fail to make this reservation we can re-try without the
5292 * delayed_refs_extra so we can make some forward progress.
5294 ret
= btrfs_block_rsv_refill(fs_info
, rsv
, rsv
->size
+ delayed_refs_extra
,
5295 BTRFS_RESERVE_FLUSH_EVICT
);
5297 ret
= btrfs_block_rsv_refill(fs_info
, rsv
, rsv
->size
,
5298 BTRFS_RESERVE_FLUSH_EVICT
);
5301 "could not allocate space for delete; will truncate on mount");
5302 return ERR_PTR(-ENOSPC
);
5304 delayed_refs_extra
= 0;
5307 trans
= btrfs_join_transaction(root
);
5311 if (delayed_refs_extra
) {
5312 trans
->block_rsv
= &fs_info
->trans_block_rsv
;
5313 trans
->bytes_reserved
= delayed_refs_extra
;
5314 btrfs_block_rsv_migrate(rsv
, trans
->block_rsv
,
5315 delayed_refs_extra
, true);
5320 void btrfs_evict_inode(struct inode
*inode
)
5322 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
5323 struct btrfs_trans_handle
*trans
;
5324 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
5325 struct btrfs_block_rsv
*rsv
= NULL
;
5328 trace_btrfs_inode_evict(inode
);
5331 fsverity_cleanup_inode(inode
);
5336 evict_inode_truncate_pages(inode
);
5338 if (inode
->i_nlink
&&
5339 ((btrfs_root_refs(&root
->root_item
) != 0 &&
5340 root
->root_key
.objectid
!= BTRFS_ROOT_TREE_OBJECTID
) ||
5341 btrfs_is_free_space_inode(BTRFS_I(inode
))))
5344 if (is_bad_inode(inode
))
5347 if (test_bit(BTRFS_FS_LOG_RECOVERING
, &fs_info
->flags
))
5350 if (inode
->i_nlink
> 0) {
5351 BUG_ON(btrfs_root_refs(&root
->root_item
) != 0 &&
5352 root
->root_key
.objectid
!= BTRFS_ROOT_TREE_OBJECTID
);
5357 * This makes sure the inode item in tree is uptodate and the space for
5358 * the inode update is released.
5360 ret
= btrfs_commit_inode_delayed_inode(BTRFS_I(inode
));
5365 * This drops any pending insert or delete operations we have for this
5366 * inode. We could have a delayed dir index deletion queued up, but
5367 * we're removing the inode completely so that'll be taken care of in
5370 btrfs_kill_delayed_inode_items(BTRFS_I(inode
));
5372 rsv
= btrfs_alloc_block_rsv(fs_info
, BTRFS_BLOCK_RSV_TEMP
);
5375 rsv
->size
= btrfs_calc_metadata_size(fs_info
, 1);
5376 rsv
->failfast
= true;
5378 btrfs_i_size_write(BTRFS_I(inode
), 0);
5381 struct btrfs_truncate_control control
= {
5382 .inode
= BTRFS_I(inode
),
5383 .ino
= btrfs_ino(BTRFS_I(inode
)),
5388 trans
= evict_refill_and_join(root
, rsv
);
5392 trans
->block_rsv
= rsv
;
5394 ret
= btrfs_truncate_inode_items(trans
, root
, &control
);
5395 trans
->block_rsv
= &fs_info
->trans_block_rsv
;
5396 btrfs_end_transaction(trans
);
5398 * We have not added new delayed items for our inode after we
5399 * have flushed its delayed items, so no need to throttle on
5400 * delayed items. However we have modified extent buffers.
5402 btrfs_btree_balance_dirty_nodelay(fs_info
);
5403 if (ret
&& ret
!= -ENOSPC
&& ret
!= -EAGAIN
)
5410 * Errors here aren't a big deal, it just means we leave orphan items in
5411 * the tree. They will be cleaned up on the next mount. If the inode
5412 * number gets reused, cleanup deletes the orphan item without doing
5413 * anything, and unlink reuses the existing orphan item.
5415 * If it turns out that we are dropping too many of these, we might want
5416 * to add a mechanism for retrying these after a commit.
5418 trans
= evict_refill_and_join(root
, rsv
);
5419 if (!IS_ERR(trans
)) {
5420 trans
->block_rsv
= rsv
;
5421 btrfs_orphan_del(trans
, BTRFS_I(inode
));
5422 trans
->block_rsv
= &fs_info
->trans_block_rsv
;
5423 btrfs_end_transaction(trans
);
5427 btrfs_free_block_rsv(fs_info
, rsv
);
5429 * If we didn't successfully delete, the orphan item will still be in
5430 * the tree and we'll retry on the next mount. Again, we might also want
5431 * to retry these periodically in the future.
5433 btrfs_remove_delayed_node(BTRFS_I(inode
));
5434 fsverity_cleanup_inode(inode
);
5439 * Return the key found in the dir entry in the location pointer, fill @type
5440 * with BTRFS_FT_*, and return 0.
5442 * If no dir entries were found, returns -ENOENT.
5443 * If found a corrupted location in dir entry, returns -EUCLEAN.
5445 static int btrfs_inode_by_name(struct btrfs_inode
*dir
, struct dentry
*dentry
,
5446 struct btrfs_key
*location
, u8
*type
)
5448 struct btrfs_dir_item
*di
;
5449 struct btrfs_path
*path
;
5450 struct btrfs_root
*root
= dir
->root
;
5452 struct fscrypt_name fname
;
5454 path
= btrfs_alloc_path();
5458 ret
= fscrypt_setup_filename(&dir
->vfs_inode
, &dentry
->d_name
, 1, &fname
);
5462 * fscrypt_setup_filename() should never return a positive value, but
5463 * gcc on sparc/parisc thinks it can, so assert that doesn't happen.
5467 /* This needs to handle no-key deletions later on */
5469 di
= btrfs_lookup_dir_item(NULL
, root
, path
, btrfs_ino(dir
),
5470 &fname
.disk_name
, 0);
5471 if (IS_ERR_OR_NULL(di
)) {
5472 ret
= di
? PTR_ERR(di
) : -ENOENT
;
5476 btrfs_dir_item_key_to_cpu(path
->nodes
[0], di
, location
);
5477 if (location
->type
!= BTRFS_INODE_ITEM_KEY
&&
5478 location
->type
!= BTRFS_ROOT_ITEM_KEY
) {
5480 btrfs_warn(root
->fs_info
,
5481 "%s gets something invalid in DIR_ITEM (name %s, directory ino %llu, location(%llu %u %llu))",
5482 __func__
, fname
.disk_name
.name
, btrfs_ino(dir
),
5483 location
->objectid
, location
->type
, location
->offset
);
5486 *type
= btrfs_dir_ftype(path
->nodes
[0], di
);
5488 fscrypt_free_filename(&fname
);
5489 btrfs_free_path(path
);
5494 * when we hit a tree root in a directory, the btrfs part of the inode
5495 * needs to be changed to reflect the root directory of the tree root. This
5496 * is kind of like crossing a mount point.
5498 static int fixup_tree_root_location(struct btrfs_fs_info
*fs_info
,
5499 struct btrfs_inode
*dir
,
5500 struct dentry
*dentry
,
5501 struct btrfs_key
*location
,
5502 struct btrfs_root
**sub_root
)
5504 struct btrfs_path
*path
;
5505 struct btrfs_root
*new_root
;
5506 struct btrfs_root_ref
*ref
;
5507 struct extent_buffer
*leaf
;
5508 struct btrfs_key key
;
5511 struct fscrypt_name fname
;
5513 ret
= fscrypt_setup_filename(&dir
->vfs_inode
, &dentry
->d_name
, 0, &fname
);
5517 path
= btrfs_alloc_path();
5524 key
.objectid
= dir
->root
->root_key
.objectid
;
5525 key
.type
= BTRFS_ROOT_REF_KEY
;
5526 key
.offset
= location
->objectid
;
5528 ret
= btrfs_search_slot(NULL
, fs_info
->tree_root
, &key
, path
, 0, 0);
5535 leaf
= path
->nodes
[0];
5536 ref
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_root_ref
);
5537 if (btrfs_root_ref_dirid(leaf
, ref
) != btrfs_ino(dir
) ||
5538 btrfs_root_ref_name_len(leaf
, ref
) != fname
.disk_name
.len
)
5541 ret
= memcmp_extent_buffer(leaf
, fname
.disk_name
.name
,
5542 (unsigned long)(ref
+ 1), fname
.disk_name
.len
);
5546 btrfs_release_path(path
);
5548 new_root
= btrfs_get_fs_root(fs_info
, location
->objectid
, true);
5549 if (IS_ERR(new_root
)) {
5550 err
= PTR_ERR(new_root
);
5554 *sub_root
= new_root
;
5555 location
->objectid
= btrfs_root_dirid(&new_root
->root_item
);
5556 location
->type
= BTRFS_INODE_ITEM_KEY
;
5557 location
->offset
= 0;
5560 btrfs_free_path(path
);
5561 fscrypt_free_filename(&fname
);
5565 static void inode_tree_add(struct btrfs_inode
*inode
)
5567 struct btrfs_root
*root
= inode
->root
;
5568 struct btrfs_inode
*entry
;
5570 struct rb_node
*parent
;
5571 struct rb_node
*new = &inode
->rb_node
;
5572 u64 ino
= btrfs_ino(inode
);
5574 if (inode_unhashed(&inode
->vfs_inode
))
5577 spin_lock(&root
->inode_lock
);
5578 p
= &root
->inode_tree
.rb_node
;
5581 entry
= rb_entry(parent
, struct btrfs_inode
, rb_node
);
5583 if (ino
< btrfs_ino(entry
))
5584 p
= &parent
->rb_left
;
5585 else if (ino
> btrfs_ino(entry
))
5586 p
= &parent
->rb_right
;
5588 WARN_ON(!(entry
->vfs_inode
.i_state
&
5589 (I_WILL_FREE
| I_FREEING
)));
5590 rb_replace_node(parent
, new, &root
->inode_tree
);
5591 RB_CLEAR_NODE(parent
);
5592 spin_unlock(&root
->inode_lock
);
5596 rb_link_node(new, parent
, p
);
5597 rb_insert_color(new, &root
->inode_tree
);
5598 spin_unlock(&root
->inode_lock
);
5601 static void inode_tree_del(struct btrfs_inode
*inode
)
5603 struct btrfs_root
*root
= inode
->root
;
5606 spin_lock(&root
->inode_lock
);
5607 if (!RB_EMPTY_NODE(&inode
->rb_node
)) {
5608 rb_erase(&inode
->rb_node
, &root
->inode_tree
);
5609 RB_CLEAR_NODE(&inode
->rb_node
);
5610 empty
= RB_EMPTY_ROOT(&root
->inode_tree
);
5612 spin_unlock(&root
->inode_lock
);
5614 if (empty
&& btrfs_root_refs(&root
->root_item
) == 0) {
5615 spin_lock(&root
->inode_lock
);
5616 empty
= RB_EMPTY_ROOT(&root
->inode_tree
);
5617 spin_unlock(&root
->inode_lock
);
5619 btrfs_add_dead_root(root
);
5624 static int btrfs_init_locked_inode(struct inode
*inode
, void *p
)
5626 struct btrfs_iget_args
*args
= p
;
5628 inode
->i_ino
= args
->ino
;
5629 BTRFS_I(inode
)->location
.objectid
= args
->ino
;
5630 BTRFS_I(inode
)->location
.type
= BTRFS_INODE_ITEM_KEY
;
5631 BTRFS_I(inode
)->location
.offset
= 0;
5632 BTRFS_I(inode
)->root
= btrfs_grab_root(args
->root
);
5633 BUG_ON(args
->root
&& !BTRFS_I(inode
)->root
);
5635 if (args
->root
&& args
->root
== args
->root
->fs_info
->tree_root
&&
5636 args
->ino
!= BTRFS_BTREE_INODE_OBJECTID
)
5637 set_bit(BTRFS_INODE_FREE_SPACE_INODE
,
5638 &BTRFS_I(inode
)->runtime_flags
);
5642 static int btrfs_find_actor(struct inode
*inode
, void *opaque
)
5644 struct btrfs_iget_args
*args
= opaque
;
5646 return args
->ino
== BTRFS_I(inode
)->location
.objectid
&&
5647 args
->root
== BTRFS_I(inode
)->root
;
5650 static struct inode
*btrfs_iget_locked(struct super_block
*s
, u64 ino
,
5651 struct btrfs_root
*root
)
5653 struct inode
*inode
;
5654 struct btrfs_iget_args args
;
5655 unsigned long hashval
= btrfs_inode_hash(ino
, root
);
5660 inode
= iget5_locked(s
, hashval
, btrfs_find_actor
,
5661 btrfs_init_locked_inode
,
5667 * Get an inode object given its inode number and corresponding root.
5668 * Path can be preallocated to prevent recursing back to iget through
5669 * allocator. NULL is also valid but may require an additional allocation
5672 struct inode
*btrfs_iget_path(struct super_block
*s
, u64 ino
,
5673 struct btrfs_root
*root
, struct btrfs_path
*path
)
5675 struct inode
*inode
;
5677 inode
= btrfs_iget_locked(s
, ino
, root
);
5679 return ERR_PTR(-ENOMEM
);
5681 if (inode
->i_state
& I_NEW
) {
5684 ret
= btrfs_read_locked_inode(inode
, path
);
5686 inode_tree_add(BTRFS_I(inode
));
5687 unlock_new_inode(inode
);
5691 * ret > 0 can come from btrfs_search_slot called by
5692 * btrfs_read_locked_inode, this means the inode item
5697 inode
= ERR_PTR(ret
);
5704 struct inode
*btrfs_iget(struct super_block
*s
, u64 ino
, struct btrfs_root
*root
)
5706 return btrfs_iget_path(s
, ino
, root
, NULL
);
5709 static struct inode
*new_simple_dir(struct super_block
*s
,
5710 struct btrfs_key
*key
,
5711 struct btrfs_root
*root
)
5713 struct inode
*inode
= new_inode(s
);
5716 return ERR_PTR(-ENOMEM
);
5718 BTRFS_I(inode
)->root
= btrfs_grab_root(root
);
5719 memcpy(&BTRFS_I(inode
)->location
, key
, sizeof(*key
));
5720 set_bit(BTRFS_INODE_DUMMY
, &BTRFS_I(inode
)->runtime_flags
);
5722 inode
->i_ino
= BTRFS_EMPTY_SUBVOL_DIR_OBJECTID
;
5724 * We only need lookup, the rest is read-only and there's no inode
5725 * associated with the dentry
5727 inode
->i_op
= &simple_dir_inode_operations
;
5728 inode
->i_opflags
&= ~IOP_XATTR
;
5729 inode
->i_fop
= &simple_dir_operations
;
5730 inode
->i_mode
= S_IFDIR
| S_IRUGO
| S_IWUSR
| S_IXUGO
;
5731 inode
->i_mtime
= current_time(inode
);
5732 inode
->i_atime
= inode
->i_mtime
;
5733 inode
->i_ctime
= inode
->i_mtime
;
5734 BTRFS_I(inode
)->i_otime
= inode
->i_mtime
;
5739 static_assert(BTRFS_FT_UNKNOWN
== FT_UNKNOWN
);
5740 static_assert(BTRFS_FT_REG_FILE
== FT_REG_FILE
);
5741 static_assert(BTRFS_FT_DIR
== FT_DIR
);
5742 static_assert(BTRFS_FT_CHRDEV
== FT_CHRDEV
);
5743 static_assert(BTRFS_FT_BLKDEV
== FT_BLKDEV
);
5744 static_assert(BTRFS_FT_FIFO
== FT_FIFO
);
5745 static_assert(BTRFS_FT_SOCK
== FT_SOCK
);
5746 static_assert(BTRFS_FT_SYMLINK
== FT_SYMLINK
);
5748 static inline u8
btrfs_inode_type(struct inode
*inode
)
5750 return fs_umode_to_ftype(inode
->i_mode
);
5753 struct inode
*btrfs_lookup_dentry(struct inode
*dir
, struct dentry
*dentry
)
5755 struct btrfs_fs_info
*fs_info
= btrfs_sb(dir
->i_sb
);
5756 struct inode
*inode
;
5757 struct btrfs_root
*root
= BTRFS_I(dir
)->root
;
5758 struct btrfs_root
*sub_root
= root
;
5759 struct btrfs_key location
;
5763 if (dentry
->d_name
.len
> BTRFS_NAME_LEN
)
5764 return ERR_PTR(-ENAMETOOLONG
);
5766 ret
= btrfs_inode_by_name(BTRFS_I(dir
), dentry
, &location
, &di_type
);
5768 return ERR_PTR(ret
);
5770 if (location
.type
== BTRFS_INODE_ITEM_KEY
) {
5771 inode
= btrfs_iget(dir
->i_sb
, location
.objectid
, root
);
5775 /* Do extra check against inode mode with di_type */
5776 if (btrfs_inode_type(inode
) != di_type
) {
5778 "inode mode mismatch with dir: inode mode=0%o btrfs type=%u dir type=%u",
5779 inode
->i_mode
, btrfs_inode_type(inode
),
5782 return ERR_PTR(-EUCLEAN
);
5787 ret
= fixup_tree_root_location(fs_info
, BTRFS_I(dir
), dentry
,
5788 &location
, &sub_root
);
5791 inode
= ERR_PTR(ret
);
5793 inode
= new_simple_dir(dir
->i_sb
, &location
, root
);
5795 inode
= btrfs_iget(dir
->i_sb
, location
.objectid
, sub_root
);
5796 btrfs_put_root(sub_root
);
5801 down_read(&fs_info
->cleanup_work_sem
);
5802 if (!sb_rdonly(inode
->i_sb
))
5803 ret
= btrfs_orphan_cleanup(sub_root
);
5804 up_read(&fs_info
->cleanup_work_sem
);
5807 inode
= ERR_PTR(ret
);
5814 static int btrfs_dentry_delete(const struct dentry
*dentry
)
5816 struct btrfs_root
*root
;
5817 struct inode
*inode
= d_inode(dentry
);
5819 if (!inode
&& !IS_ROOT(dentry
))
5820 inode
= d_inode(dentry
->d_parent
);
5823 root
= BTRFS_I(inode
)->root
;
5824 if (btrfs_root_refs(&root
->root_item
) == 0)
5827 if (btrfs_ino(BTRFS_I(inode
)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID
)
5833 static struct dentry
*btrfs_lookup(struct inode
*dir
, struct dentry
*dentry
,
5836 struct inode
*inode
= btrfs_lookup_dentry(dir
, dentry
);
5838 if (inode
== ERR_PTR(-ENOENT
))
5840 return d_splice_alias(inode
, dentry
);
5844 * All this infrastructure exists because dir_emit can fault, and we are holding
5845 * the tree lock when doing readdir. For now just allocate a buffer and copy
5846 * our information into that, and then dir_emit from the buffer. This is
5847 * similar to what NFS does, only we don't keep the buffer around in pagecache
5848 * because I'm afraid I'll mess that up. Long term we need to make filldir do
5849 * copy_to_user_inatomic so we don't have to worry about page faulting under the
5852 static int btrfs_opendir(struct inode
*inode
, struct file
*file
)
5854 struct btrfs_file_private
*private;
5856 private = kzalloc(sizeof(struct btrfs_file_private
), GFP_KERNEL
);
5859 private->filldir_buf
= kzalloc(PAGE_SIZE
, GFP_KERNEL
);
5860 if (!private->filldir_buf
) {
5864 file
->private_data
= private;
5875 static int btrfs_filldir(void *addr
, int entries
, struct dir_context
*ctx
)
5878 struct dir_entry
*entry
= addr
;
5879 char *name
= (char *)(entry
+ 1);
5881 ctx
->pos
= get_unaligned(&entry
->offset
);
5882 if (!dir_emit(ctx
, name
, get_unaligned(&entry
->name_len
),
5883 get_unaligned(&entry
->ino
),
5884 get_unaligned(&entry
->type
)))
5886 addr
+= sizeof(struct dir_entry
) +
5887 get_unaligned(&entry
->name_len
);
5893 static int btrfs_real_readdir(struct file
*file
, struct dir_context
*ctx
)
5895 struct inode
*inode
= file_inode(file
);
5896 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
5897 struct btrfs_file_private
*private = file
->private_data
;
5898 struct btrfs_dir_item
*di
;
5899 struct btrfs_key key
;
5900 struct btrfs_key found_key
;
5901 struct btrfs_path
*path
;
5903 struct list_head ins_list
;
5904 struct list_head del_list
;
5911 struct btrfs_key location
;
5913 if (!dir_emit_dots(file
, ctx
))
5916 path
= btrfs_alloc_path();
5920 addr
= private->filldir_buf
;
5921 path
->reada
= READA_FORWARD
;
5923 INIT_LIST_HEAD(&ins_list
);
5924 INIT_LIST_HEAD(&del_list
);
5925 put
= btrfs_readdir_get_delayed_items(inode
, &ins_list
, &del_list
);
5928 key
.type
= BTRFS_DIR_INDEX_KEY
;
5929 key
.offset
= ctx
->pos
;
5930 key
.objectid
= btrfs_ino(BTRFS_I(inode
));
5932 btrfs_for_each_slot(root
, &key
, &found_key
, path
, ret
) {
5933 struct dir_entry
*entry
;
5934 struct extent_buffer
*leaf
= path
->nodes
[0];
5937 if (found_key
.objectid
!= key
.objectid
)
5939 if (found_key
.type
!= BTRFS_DIR_INDEX_KEY
)
5941 if (found_key
.offset
< ctx
->pos
)
5943 if (btrfs_should_delete_dir_index(&del_list
, found_key
.offset
))
5945 di
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_dir_item
);
5946 name_len
= btrfs_dir_name_len(leaf
, di
);
5947 if ((total_len
+ sizeof(struct dir_entry
) + name_len
) >=
5949 btrfs_release_path(path
);
5950 ret
= btrfs_filldir(private->filldir_buf
, entries
, ctx
);
5953 addr
= private->filldir_buf
;
5959 ftype
= btrfs_dir_flags_to_ftype(btrfs_dir_flags(leaf
, di
));
5961 name_ptr
= (char *)(entry
+ 1);
5962 read_extent_buffer(leaf
, name_ptr
,
5963 (unsigned long)(di
+ 1), name_len
);
5964 put_unaligned(name_len
, &entry
->name_len
);
5965 put_unaligned(fs_ftype_to_dtype(ftype
), &entry
->type
);
5966 btrfs_dir_item_key_to_cpu(leaf
, di
, &location
);
5967 put_unaligned(location
.objectid
, &entry
->ino
);
5968 put_unaligned(found_key
.offset
, &entry
->offset
);
5970 addr
+= sizeof(struct dir_entry
) + name_len
;
5971 total_len
+= sizeof(struct dir_entry
) + name_len
;
5973 /* Catch error encountered during iteration */
5977 btrfs_release_path(path
);
5979 ret
= btrfs_filldir(private->filldir_buf
, entries
, ctx
);
5983 ret
= btrfs_readdir_delayed_dir_index(ctx
, &ins_list
);
5988 * Stop new entries from being returned after we return the last
5991 * New directory entries are assigned a strictly increasing
5992 * offset. This means that new entries created during readdir
5993 * are *guaranteed* to be seen in the future by that readdir.
5994 * This has broken buggy programs which operate on names as
5995 * they're returned by readdir. Until we re-use freed offsets
5996 * we have this hack to stop new entries from being returned
5997 * under the assumption that they'll never reach this huge
6000 * This is being careful not to overflow 32bit loff_t unless the
6001 * last entry requires it because doing so has broken 32bit apps
6004 if (ctx
->pos
>= INT_MAX
)
6005 ctx
->pos
= LLONG_MAX
;
6012 btrfs_readdir_put_delayed_items(inode
, &ins_list
, &del_list
);
6013 btrfs_free_path(path
);
6018 * This is somewhat expensive, updating the tree every time the
6019 * inode changes. But, it is most likely to find the inode in cache.
6020 * FIXME, needs more benchmarking...there are no reasons other than performance
6021 * to keep or drop this code.
6023 static int btrfs_dirty_inode(struct btrfs_inode
*inode
)
6025 struct btrfs_root
*root
= inode
->root
;
6026 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
6027 struct btrfs_trans_handle
*trans
;
6030 if (test_bit(BTRFS_INODE_DUMMY
, &inode
->runtime_flags
))
6033 trans
= btrfs_join_transaction(root
);
6035 return PTR_ERR(trans
);
6037 ret
= btrfs_update_inode(trans
, root
, inode
);
6038 if (ret
&& (ret
== -ENOSPC
|| ret
== -EDQUOT
)) {
6039 /* whoops, lets try again with the full transaction */
6040 btrfs_end_transaction(trans
);
6041 trans
= btrfs_start_transaction(root
, 1);
6043 return PTR_ERR(trans
);
6045 ret
= btrfs_update_inode(trans
, root
, inode
);
6047 btrfs_end_transaction(trans
);
6048 if (inode
->delayed_node
)
6049 btrfs_balance_delayed_items(fs_info
);
6055 * This is a copy of file_update_time. We need this so we can return error on
6056 * ENOSPC for updating the inode in the case of file write and mmap writes.
6058 static int btrfs_update_time(struct inode
*inode
, struct timespec64
*now
,
6061 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
6062 bool dirty
= flags
& ~S_VERSION
;
6064 if (btrfs_root_readonly(root
))
6067 if (flags
& S_VERSION
)
6068 dirty
|= inode_maybe_inc_iversion(inode
, dirty
);
6069 if (flags
& S_CTIME
)
6070 inode
->i_ctime
= *now
;
6071 if (flags
& S_MTIME
)
6072 inode
->i_mtime
= *now
;
6073 if (flags
& S_ATIME
)
6074 inode
->i_atime
= *now
;
6075 return dirty
? btrfs_dirty_inode(BTRFS_I(inode
)) : 0;
6079 * find the highest existing sequence number in a directory
6080 * and then set the in-memory index_cnt variable to reflect
6081 * free sequence numbers
6083 static int btrfs_set_inode_index_count(struct btrfs_inode
*inode
)
6085 struct btrfs_root
*root
= inode
->root
;
6086 struct btrfs_key key
, found_key
;
6087 struct btrfs_path
*path
;
6088 struct extent_buffer
*leaf
;
6091 key
.objectid
= btrfs_ino(inode
);
6092 key
.type
= BTRFS_DIR_INDEX_KEY
;
6093 key
.offset
= (u64
)-1;
6095 path
= btrfs_alloc_path();
6099 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
6102 /* FIXME: we should be able to handle this */
6107 if (path
->slots
[0] == 0) {
6108 inode
->index_cnt
= BTRFS_DIR_START_INDEX
;
6114 leaf
= path
->nodes
[0];
6115 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
6117 if (found_key
.objectid
!= btrfs_ino(inode
) ||
6118 found_key
.type
!= BTRFS_DIR_INDEX_KEY
) {
6119 inode
->index_cnt
= BTRFS_DIR_START_INDEX
;
6123 inode
->index_cnt
= found_key
.offset
+ 1;
6125 btrfs_free_path(path
);
6130 * helper to find a free sequence number in a given directory. This current
6131 * code is very simple, later versions will do smarter things in the btree
6133 int btrfs_set_inode_index(struct btrfs_inode
*dir
, u64
*index
)
6137 if (dir
->index_cnt
== (u64
)-1) {
6138 ret
= btrfs_inode_delayed_dir_index_count(dir
);
6140 ret
= btrfs_set_inode_index_count(dir
);
6146 *index
= dir
->index_cnt
;
6152 static int btrfs_insert_inode_locked(struct inode
*inode
)
6154 struct btrfs_iget_args args
;
6156 args
.ino
= BTRFS_I(inode
)->location
.objectid
;
6157 args
.root
= BTRFS_I(inode
)->root
;
6159 return insert_inode_locked4(inode
,
6160 btrfs_inode_hash(inode
->i_ino
, BTRFS_I(inode
)->root
),
6161 btrfs_find_actor
, &args
);
6164 int btrfs_new_inode_prepare(struct btrfs_new_inode_args
*args
,
6165 unsigned int *trans_num_items
)
6167 struct inode
*dir
= args
->dir
;
6168 struct inode
*inode
= args
->inode
;
6171 if (!args
->orphan
) {
6172 ret
= fscrypt_setup_filename(dir
, &args
->dentry
->d_name
, 0,
6178 ret
= posix_acl_create(dir
, &inode
->i_mode
, &args
->default_acl
, &args
->acl
);
6180 fscrypt_free_filename(&args
->fname
);
6184 /* 1 to add inode item */
6185 *trans_num_items
= 1;
6186 /* 1 to add compression property */
6187 if (BTRFS_I(dir
)->prop_compress
)
6188 (*trans_num_items
)++;
6189 /* 1 to add default ACL xattr */
6190 if (args
->default_acl
)
6191 (*trans_num_items
)++;
6192 /* 1 to add access ACL xattr */
6194 (*trans_num_items
)++;
6195 #ifdef CONFIG_SECURITY
6196 /* 1 to add LSM xattr */
6197 if (dir
->i_security
)
6198 (*trans_num_items
)++;
6201 /* 1 to add orphan item */
6202 (*trans_num_items
)++;
6206 * 1 to add dir index
6207 * 1 to update parent inode item
6209 * No need for 1 unit for the inode ref item because it is
6210 * inserted in a batch together with the inode item at
6211 * btrfs_create_new_inode().
6213 *trans_num_items
+= 3;
6218 void btrfs_new_inode_args_destroy(struct btrfs_new_inode_args
*args
)
6220 posix_acl_release(args
->acl
);
6221 posix_acl_release(args
->default_acl
);
6222 fscrypt_free_filename(&args
->fname
);
6226 * Inherit flags from the parent inode.
6228 * Currently only the compression flags and the cow flags are inherited.
6230 static void btrfs_inherit_iflags(struct btrfs_inode
*inode
, struct btrfs_inode
*dir
)
6236 if (flags
& BTRFS_INODE_NOCOMPRESS
) {
6237 inode
->flags
&= ~BTRFS_INODE_COMPRESS
;
6238 inode
->flags
|= BTRFS_INODE_NOCOMPRESS
;
6239 } else if (flags
& BTRFS_INODE_COMPRESS
) {
6240 inode
->flags
&= ~BTRFS_INODE_NOCOMPRESS
;
6241 inode
->flags
|= BTRFS_INODE_COMPRESS
;
6244 if (flags
& BTRFS_INODE_NODATACOW
) {
6245 inode
->flags
|= BTRFS_INODE_NODATACOW
;
6246 if (S_ISREG(inode
->vfs_inode
.i_mode
))
6247 inode
->flags
|= BTRFS_INODE_NODATASUM
;
6250 btrfs_sync_inode_flags_to_i_flags(&inode
->vfs_inode
);
6253 int btrfs_create_new_inode(struct btrfs_trans_handle
*trans
,
6254 struct btrfs_new_inode_args
*args
)
6256 struct inode
*dir
= args
->dir
;
6257 struct inode
*inode
= args
->inode
;
6258 const struct fscrypt_str
*name
= args
->orphan
? NULL
: &args
->fname
.disk_name
;
6259 struct btrfs_fs_info
*fs_info
= btrfs_sb(dir
->i_sb
);
6260 struct btrfs_root
*root
;
6261 struct btrfs_inode_item
*inode_item
;
6262 struct btrfs_key
*location
;
6263 struct btrfs_path
*path
;
6265 struct btrfs_inode_ref
*ref
;
6266 struct btrfs_key key
[2];
6268 struct btrfs_item_batch batch
;
6272 path
= btrfs_alloc_path();
6277 BTRFS_I(inode
)->root
= btrfs_grab_root(BTRFS_I(dir
)->root
);
6278 root
= BTRFS_I(inode
)->root
;
6280 ret
= btrfs_get_free_objectid(root
, &objectid
);
6283 inode
->i_ino
= objectid
;
6287 * O_TMPFILE, set link count to 0, so that after this point, we
6288 * fill in an inode item with the correct link count.
6290 set_nlink(inode
, 0);
6292 trace_btrfs_inode_request(dir
);
6294 ret
= btrfs_set_inode_index(BTRFS_I(dir
), &BTRFS_I(inode
)->dir_index
);
6298 /* index_cnt is ignored for everything but a dir. */
6299 BTRFS_I(inode
)->index_cnt
= BTRFS_DIR_START_INDEX
;
6300 BTRFS_I(inode
)->generation
= trans
->transid
;
6301 inode
->i_generation
= BTRFS_I(inode
)->generation
;
6304 * Subvolumes don't inherit flags from their parent directory.
6305 * Originally this was probably by accident, but we probably can't
6306 * change it now without compatibility issues.
6309 btrfs_inherit_iflags(BTRFS_I(inode
), BTRFS_I(dir
));
6311 if (S_ISREG(inode
->i_mode
)) {
6312 if (btrfs_test_opt(fs_info
, NODATASUM
))
6313 BTRFS_I(inode
)->flags
|= BTRFS_INODE_NODATASUM
;
6314 if (btrfs_test_opt(fs_info
, NODATACOW
))
6315 BTRFS_I(inode
)->flags
|= BTRFS_INODE_NODATACOW
|
6316 BTRFS_INODE_NODATASUM
;
6319 location
= &BTRFS_I(inode
)->location
;
6320 location
->objectid
= objectid
;
6321 location
->offset
= 0;
6322 location
->type
= BTRFS_INODE_ITEM_KEY
;
6324 ret
= btrfs_insert_inode_locked(inode
);
6327 BTRFS_I(dir
)->index_cnt
--;
6332 * We could have gotten an inode number from somebody who was fsynced
6333 * and then removed in this same transaction, so let's just set full
6334 * sync since it will be a full sync anyway and this will blow away the
6335 * old info in the log.
6337 btrfs_set_inode_full_sync(BTRFS_I(inode
));
6339 key
[0].objectid
= objectid
;
6340 key
[0].type
= BTRFS_INODE_ITEM_KEY
;
6343 sizes
[0] = sizeof(struct btrfs_inode_item
);
6345 if (!args
->orphan
) {
6347 * Start new inodes with an inode_ref. This is slightly more
6348 * efficient for small numbers of hard links since they will
6349 * be packed into one item. Extended refs will kick in if we
6350 * add more hard links than can fit in the ref item.
6352 key
[1].objectid
= objectid
;
6353 key
[1].type
= BTRFS_INODE_REF_KEY
;
6355 key
[1].offset
= objectid
;
6356 sizes
[1] = 2 + sizeof(*ref
);
6358 key
[1].offset
= btrfs_ino(BTRFS_I(dir
));
6359 sizes
[1] = name
->len
+ sizeof(*ref
);
6363 batch
.keys
= &key
[0];
6364 batch
.data_sizes
= &sizes
[0];
6365 batch
.total_data_size
= sizes
[0] + (args
->orphan
? 0 : sizes
[1]);
6366 batch
.nr
= args
->orphan
? 1 : 2;
6367 ret
= btrfs_insert_empty_items(trans
, root
, path
, &batch
);
6369 btrfs_abort_transaction(trans
, ret
);
6373 inode
->i_mtime
= current_time(inode
);
6374 inode
->i_atime
= inode
->i_mtime
;
6375 inode
->i_ctime
= inode
->i_mtime
;
6376 BTRFS_I(inode
)->i_otime
= inode
->i_mtime
;
6379 * We're going to fill the inode item now, so at this point the inode
6380 * must be fully initialized.
6383 inode_item
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
6384 struct btrfs_inode_item
);
6385 memzero_extent_buffer(path
->nodes
[0], (unsigned long)inode_item
,
6386 sizeof(*inode_item
));
6387 fill_inode_item(trans
, path
->nodes
[0], inode_item
, inode
);
6389 if (!args
->orphan
) {
6390 ref
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0] + 1,
6391 struct btrfs_inode_ref
);
6392 ptr
= (unsigned long)(ref
+ 1);
6394 btrfs_set_inode_ref_name_len(path
->nodes
[0], ref
, 2);
6395 btrfs_set_inode_ref_index(path
->nodes
[0], ref
, 0);
6396 write_extent_buffer(path
->nodes
[0], "..", ptr
, 2);
6398 btrfs_set_inode_ref_name_len(path
->nodes
[0], ref
,
6400 btrfs_set_inode_ref_index(path
->nodes
[0], ref
,
6401 BTRFS_I(inode
)->dir_index
);
6402 write_extent_buffer(path
->nodes
[0], name
->name
, ptr
,
6407 btrfs_mark_buffer_dirty(path
->nodes
[0]);
6409 * We don't need the path anymore, plus inheriting properties, adding
6410 * ACLs, security xattrs, orphan item or adding the link, will result in
6411 * allocating yet another path. So just free our path.
6413 btrfs_free_path(path
);
6417 struct inode
*parent
;
6420 * Subvolumes inherit properties from their parent subvolume,
6421 * not the directory they were created in.
6423 parent
= btrfs_iget(fs_info
->sb
, BTRFS_FIRST_FREE_OBJECTID
,
6424 BTRFS_I(dir
)->root
);
6425 if (IS_ERR(parent
)) {
6426 ret
= PTR_ERR(parent
);
6428 ret
= btrfs_inode_inherit_props(trans
, inode
, parent
);
6432 ret
= btrfs_inode_inherit_props(trans
, inode
, dir
);
6436 "error inheriting props for ino %llu (root %llu): %d",
6437 btrfs_ino(BTRFS_I(inode
)), root
->root_key
.objectid
,
6442 * Subvolumes don't inherit ACLs or get passed to the LSM. This is
6445 if (!args
->subvol
) {
6446 ret
= btrfs_init_inode_security(trans
, args
);
6448 btrfs_abort_transaction(trans
, ret
);
6453 inode_tree_add(BTRFS_I(inode
));
6455 trace_btrfs_inode_new(inode
);
6456 btrfs_set_inode_last_trans(trans
, BTRFS_I(inode
));
6458 btrfs_update_root_times(trans
, root
);
6461 ret
= btrfs_orphan_add(trans
, BTRFS_I(inode
));
6463 ret
= btrfs_add_link(trans
, BTRFS_I(dir
), BTRFS_I(inode
), name
,
6464 0, BTRFS_I(inode
)->dir_index
);
6467 btrfs_abort_transaction(trans
, ret
);
6475 * discard_new_inode() calls iput(), but the caller owns the reference
6479 discard_new_inode(inode
);
6481 btrfs_free_path(path
);
6486 * utility function to add 'inode' into 'parent_inode' with
6487 * a give name and a given sequence number.
6488 * if 'add_backref' is true, also insert a backref from the
6489 * inode to the parent directory.
6491 int btrfs_add_link(struct btrfs_trans_handle
*trans
,
6492 struct btrfs_inode
*parent_inode
, struct btrfs_inode
*inode
,
6493 const struct fscrypt_str
*name
, int add_backref
, u64 index
)
6496 struct btrfs_key key
;
6497 struct btrfs_root
*root
= parent_inode
->root
;
6498 u64 ino
= btrfs_ino(inode
);
6499 u64 parent_ino
= btrfs_ino(parent_inode
);
6501 if (unlikely(ino
== BTRFS_FIRST_FREE_OBJECTID
)) {
6502 memcpy(&key
, &inode
->root
->root_key
, sizeof(key
));
6505 key
.type
= BTRFS_INODE_ITEM_KEY
;
6509 if (unlikely(ino
== BTRFS_FIRST_FREE_OBJECTID
)) {
6510 ret
= btrfs_add_root_ref(trans
, key
.objectid
,
6511 root
->root_key
.objectid
, parent_ino
,
6513 } else if (add_backref
) {
6514 ret
= btrfs_insert_inode_ref(trans
, root
, name
,
6515 ino
, parent_ino
, index
);
6518 /* Nothing to clean up yet */
6522 ret
= btrfs_insert_dir_item(trans
, name
, parent_inode
, &key
,
6523 btrfs_inode_type(&inode
->vfs_inode
), index
);
6524 if (ret
== -EEXIST
|| ret
== -EOVERFLOW
)
6527 btrfs_abort_transaction(trans
, ret
);
6531 btrfs_i_size_write(parent_inode
, parent_inode
->vfs_inode
.i_size
+
6533 inode_inc_iversion(&parent_inode
->vfs_inode
);
6535 * If we are replaying a log tree, we do not want to update the mtime
6536 * and ctime of the parent directory with the current time, since the
6537 * log replay procedure is responsible for setting them to their correct
6538 * values (the ones it had when the fsync was done).
6540 if (!test_bit(BTRFS_FS_LOG_RECOVERING
, &root
->fs_info
->flags
)) {
6541 struct timespec64 now
= current_time(&parent_inode
->vfs_inode
);
6543 parent_inode
->vfs_inode
.i_mtime
= now
;
6544 parent_inode
->vfs_inode
.i_ctime
= now
;
6546 ret
= btrfs_update_inode(trans
, root
, parent_inode
);
6548 btrfs_abort_transaction(trans
, ret
);
6552 if (unlikely(ino
== BTRFS_FIRST_FREE_OBJECTID
)) {
6555 err
= btrfs_del_root_ref(trans
, key
.objectid
,
6556 root
->root_key
.objectid
, parent_ino
,
6557 &local_index
, name
);
6559 btrfs_abort_transaction(trans
, err
);
6560 } else if (add_backref
) {
6564 err
= btrfs_del_inode_ref(trans
, root
, name
, ino
, parent_ino
,
6567 btrfs_abort_transaction(trans
, err
);
6570 /* Return the original error code */
6574 static int btrfs_create_common(struct inode
*dir
, struct dentry
*dentry
,
6575 struct inode
*inode
)
6577 struct btrfs_fs_info
*fs_info
= btrfs_sb(dir
->i_sb
);
6578 struct btrfs_root
*root
= BTRFS_I(dir
)->root
;
6579 struct btrfs_new_inode_args new_inode_args
= {
6584 unsigned int trans_num_items
;
6585 struct btrfs_trans_handle
*trans
;
6588 err
= btrfs_new_inode_prepare(&new_inode_args
, &trans_num_items
);
6592 trans
= btrfs_start_transaction(root
, trans_num_items
);
6593 if (IS_ERR(trans
)) {
6594 err
= PTR_ERR(trans
);
6595 goto out_new_inode_args
;
6598 err
= btrfs_create_new_inode(trans
, &new_inode_args
);
6600 d_instantiate_new(dentry
, inode
);
6602 btrfs_end_transaction(trans
);
6603 btrfs_btree_balance_dirty(fs_info
);
6605 btrfs_new_inode_args_destroy(&new_inode_args
);
6612 static int btrfs_mknod(struct mnt_idmap
*idmap
, struct inode
*dir
,
6613 struct dentry
*dentry
, umode_t mode
, dev_t rdev
)
6615 struct inode
*inode
;
6617 inode
= new_inode(dir
->i_sb
);
6620 inode_init_owner(idmap
, inode
, dir
, mode
);
6621 inode
->i_op
= &btrfs_special_inode_operations
;
6622 init_special_inode(inode
, inode
->i_mode
, rdev
);
6623 return btrfs_create_common(dir
, dentry
, inode
);
6626 static int btrfs_create(struct mnt_idmap
*idmap
, struct inode
*dir
,
6627 struct dentry
*dentry
, umode_t mode
, bool excl
)
6629 struct inode
*inode
;
6631 inode
= new_inode(dir
->i_sb
);
6634 inode_init_owner(idmap
, inode
, dir
, mode
);
6635 inode
->i_fop
= &btrfs_file_operations
;
6636 inode
->i_op
= &btrfs_file_inode_operations
;
6637 inode
->i_mapping
->a_ops
= &btrfs_aops
;
6638 return btrfs_create_common(dir
, dentry
, inode
);
6641 static int btrfs_link(struct dentry
*old_dentry
, struct inode
*dir
,
6642 struct dentry
*dentry
)
6644 struct btrfs_trans_handle
*trans
= NULL
;
6645 struct btrfs_root
*root
= BTRFS_I(dir
)->root
;
6646 struct inode
*inode
= d_inode(old_dentry
);
6647 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
6648 struct fscrypt_name fname
;
6653 /* do not allow sys_link's with other subvols of the same device */
6654 if (root
->root_key
.objectid
!= BTRFS_I(inode
)->root
->root_key
.objectid
)
6657 if (inode
->i_nlink
>= BTRFS_LINK_MAX
)
6660 err
= fscrypt_setup_filename(dir
, &dentry
->d_name
, 0, &fname
);
6664 err
= btrfs_set_inode_index(BTRFS_I(dir
), &index
);
6669 * 2 items for inode and inode ref
6670 * 2 items for dir items
6671 * 1 item for parent inode
6672 * 1 item for orphan item deletion if O_TMPFILE
6674 trans
= btrfs_start_transaction(root
, inode
->i_nlink
? 5 : 6);
6675 if (IS_ERR(trans
)) {
6676 err
= PTR_ERR(trans
);
6681 /* There are several dir indexes for this inode, clear the cache. */
6682 BTRFS_I(inode
)->dir_index
= 0ULL;
6684 inode_inc_iversion(inode
);
6685 inode
->i_ctime
= current_time(inode
);
6687 set_bit(BTRFS_INODE_COPY_EVERYTHING
, &BTRFS_I(inode
)->runtime_flags
);
6689 err
= btrfs_add_link(trans
, BTRFS_I(dir
), BTRFS_I(inode
),
6690 &fname
.disk_name
, 1, index
);
6695 struct dentry
*parent
= dentry
->d_parent
;
6697 err
= btrfs_update_inode(trans
, root
, BTRFS_I(inode
));
6700 if (inode
->i_nlink
== 1) {
6702 * If new hard link count is 1, it's a file created
6703 * with open(2) O_TMPFILE flag.
6705 err
= btrfs_orphan_del(trans
, BTRFS_I(inode
));
6709 d_instantiate(dentry
, inode
);
6710 btrfs_log_new_name(trans
, old_dentry
, NULL
, 0, parent
);
6714 fscrypt_free_filename(&fname
);
6716 btrfs_end_transaction(trans
);
6718 inode_dec_link_count(inode
);
6721 btrfs_btree_balance_dirty(fs_info
);
6725 static int btrfs_mkdir(struct mnt_idmap
*idmap
, struct inode
*dir
,
6726 struct dentry
*dentry
, umode_t mode
)
6728 struct inode
*inode
;
6730 inode
= new_inode(dir
->i_sb
);
6733 inode_init_owner(idmap
, inode
, dir
, S_IFDIR
| mode
);
6734 inode
->i_op
= &btrfs_dir_inode_operations
;
6735 inode
->i_fop
= &btrfs_dir_file_operations
;
6736 return btrfs_create_common(dir
, dentry
, inode
);
6739 static noinline
int uncompress_inline(struct btrfs_path
*path
,
6741 struct btrfs_file_extent_item
*item
)
6744 struct extent_buffer
*leaf
= path
->nodes
[0];
6747 unsigned long inline_size
;
6751 compress_type
= btrfs_file_extent_compression(leaf
, item
);
6752 max_size
= btrfs_file_extent_ram_bytes(leaf
, item
);
6753 inline_size
= btrfs_file_extent_inline_item_len(leaf
, path
->slots
[0]);
6754 tmp
= kmalloc(inline_size
, GFP_NOFS
);
6757 ptr
= btrfs_file_extent_inline_start(item
);
6759 read_extent_buffer(leaf
, tmp
, ptr
, inline_size
);
6761 max_size
= min_t(unsigned long, PAGE_SIZE
, max_size
);
6762 ret
= btrfs_decompress(compress_type
, tmp
, page
, 0, inline_size
, max_size
);
6765 * decompression code contains a memset to fill in any space between the end
6766 * of the uncompressed data and the end of max_size in case the decompressed
6767 * data ends up shorter than ram_bytes. That doesn't cover the hole between
6768 * the end of an inline extent and the beginning of the next block, so we
6769 * cover that region here.
6772 if (max_size
< PAGE_SIZE
)
6773 memzero_page(page
, max_size
, PAGE_SIZE
- max_size
);
6778 static int read_inline_extent(struct btrfs_inode
*inode
, struct btrfs_path
*path
,
6781 struct btrfs_file_extent_item
*fi
;
6785 if (!page
|| PageUptodate(page
))
6788 ASSERT(page_offset(page
) == 0);
6790 fi
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
6791 struct btrfs_file_extent_item
);
6792 if (btrfs_file_extent_compression(path
->nodes
[0], fi
) != BTRFS_COMPRESS_NONE
)
6793 return uncompress_inline(path
, page
, fi
);
6795 copy_size
= min_t(u64
, PAGE_SIZE
,
6796 btrfs_file_extent_ram_bytes(path
->nodes
[0], fi
));
6797 kaddr
= kmap_local_page(page
);
6798 read_extent_buffer(path
->nodes
[0], kaddr
,
6799 btrfs_file_extent_inline_start(fi
), copy_size
);
6800 kunmap_local(kaddr
);
6801 if (copy_size
< PAGE_SIZE
)
6802 memzero_page(page
, copy_size
, PAGE_SIZE
- copy_size
);
6807 * Lookup the first extent overlapping a range in a file.
6809 * @inode: file to search in
6810 * @page: page to read extent data into if the extent is inline
6811 * @pg_offset: offset into @page to copy to
6812 * @start: file offset
6813 * @len: length of range starting at @start
6815 * Return the first &struct extent_map which overlaps the given range, reading
6816 * it from the B-tree and caching it if necessary. Note that there may be more
6817 * extents which overlap the given range after the returned extent_map.
6819 * If @page is not NULL and the extent is inline, this also reads the extent
6820 * data directly into the page and marks the extent up to date in the io_tree.
6822 * Return: ERR_PTR on error, non-NULL extent_map on success.
6824 struct extent_map
*btrfs_get_extent(struct btrfs_inode
*inode
,
6825 struct page
*page
, size_t pg_offset
,
6828 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
6830 u64 extent_start
= 0;
6832 u64 objectid
= btrfs_ino(inode
);
6833 int extent_type
= -1;
6834 struct btrfs_path
*path
= NULL
;
6835 struct btrfs_root
*root
= inode
->root
;
6836 struct btrfs_file_extent_item
*item
;
6837 struct extent_buffer
*leaf
;
6838 struct btrfs_key found_key
;
6839 struct extent_map
*em
= NULL
;
6840 struct extent_map_tree
*em_tree
= &inode
->extent_tree
;
6842 read_lock(&em_tree
->lock
);
6843 em
= lookup_extent_mapping(em_tree
, start
, len
);
6844 read_unlock(&em_tree
->lock
);
6847 if (em
->start
> start
|| em
->start
+ em
->len
<= start
)
6848 free_extent_map(em
);
6849 else if (em
->block_start
== EXTENT_MAP_INLINE
&& page
)
6850 free_extent_map(em
);
6854 em
= alloc_extent_map();
6859 em
->start
= EXTENT_MAP_HOLE
;
6860 em
->orig_start
= EXTENT_MAP_HOLE
;
6862 em
->block_len
= (u64
)-1;
6864 path
= btrfs_alloc_path();
6870 /* Chances are we'll be called again, so go ahead and do readahead */
6871 path
->reada
= READA_FORWARD
;
6874 * The same explanation in load_free_space_cache applies here as well,
6875 * we only read when we're loading the free space cache, and at that
6876 * point the commit_root has everything we need.
6878 if (btrfs_is_free_space_inode(inode
)) {
6879 path
->search_commit_root
= 1;
6880 path
->skip_locking
= 1;
6883 ret
= btrfs_lookup_file_extent(NULL
, root
, path
, objectid
, start
, 0);
6886 } else if (ret
> 0) {
6887 if (path
->slots
[0] == 0)
6893 leaf
= path
->nodes
[0];
6894 item
= btrfs_item_ptr(leaf
, path
->slots
[0],
6895 struct btrfs_file_extent_item
);
6896 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
6897 if (found_key
.objectid
!= objectid
||
6898 found_key
.type
!= BTRFS_EXTENT_DATA_KEY
) {
6900 * If we backup past the first extent we want to move forward
6901 * and see if there is an extent in front of us, otherwise we'll
6902 * say there is a hole for our whole search range which can
6909 extent_type
= btrfs_file_extent_type(leaf
, item
);
6910 extent_start
= found_key
.offset
;
6911 extent_end
= btrfs_file_extent_end(path
);
6912 if (extent_type
== BTRFS_FILE_EXTENT_REG
||
6913 extent_type
== BTRFS_FILE_EXTENT_PREALLOC
) {
6914 /* Only regular file could have regular/prealloc extent */
6915 if (!S_ISREG(inode
->vfs_inode
.i_mode
)) {
6918 "regular/prealloc extent found for non-regular inode %llu",
6922 trace_btrfs_get_extent_show_fi_regular(inode
, leaf
, item
,
6924 } else if (extent_type
== BTRFS_FILE_EXTENT_INLINE
) {
6925 trace_btrfs_get_extent_show_fi_inline(inode
, leaf
, item
,
6930 if (start
>= extent_end
) {
6932 if (path
->slots
[0] >= btrfs_header_nritems(leaf
)) {
6933 ret
= btrfs_next_leaf(root
, path
);
6939 leaf
= path
->nodes
[0];
6941 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
6942 if (found_key
.objectid
!= objectid
||
6943 found_key
.type
!= BTRFS_EXTENT_DATA_KEY
)
6945 if (start
+ len
<= found_key
.offset
)
6947 if (start
> found_key
.offset
)
6950 /* New extent overlaps with existing one */
6952 em
->orig_start
= start
;
6953 em
->len
= found_key
.offset
- start
;
6954 em
->block_start
= EXTENT_MAP_HOLE
;
6958 btrfs_extent_item_to_extent_map(inode
, path
, item
, em
);
6960 if (extent_type
== BTRFS_FILE_EXTENT_REG
||
6961 extent_type
== BTRFS_FILE_EXTENT_PREALLOC
) {
6963 } else if (extent_type
== BTRFS_FILE_EXTENT_INLINE
) {
6965 * Inline extent can only exist at file offset 0. This is
6966 * ensured by tree-checker and inline extent creation path.
6967 * Thus all members representing file offsets should be zero.
6969 ASSERT(pg_offset
== 0);
6970 ASSERT(extent_start
== 0);
6971 ASSERT(em
->start
== 0);
6974 * btrfs_extent_item_to_extent_map() should have properly
6975 * initialized em members already.
6977 * Other members are not utilized for inline extents.
6979 ASSERT(em
->block_start
== EXTENT_MAP_INLINE
);
6980 ASSERT(em
->len
== fs_info
->sectorsize
);
6982 ret
= read_inline_extent(inode
, path
, page
);
6989 em
->orig_start
= start
;
6991 em
->block_start
= EXTENT_MAP_HOLE
;
6994 btrfs_release_path(path
);
6995 if (em
->start
> start
|| extent_map_end(em
) <= start
) {
6997 "bad extent! em: [%llu %llu] passed [%llu %llu]",
6998 em
->start
, em
->len
, start
, len
);
7003 write_lock(&em_tree
->lock
);
7004 ret
= btrfs_add_extent_mapping(fs_info
, em_tree
, &em
, start
, len
);
7005 write_unlock(&em_tree
->lock
);
7007 btrfs_free_path(path
);
7009 trace_btrfs_get_extent(root
, inode
, em
);
7012 free_extent_map(em
);
7013 return ERR_PTR(ret
);
7018 static struct extent_map
*btrfs_create_dio_extent(struct btrfs_inode
*inode
,
7019 struct btrfs_dio_data
*dio_data
,
7022 const u64 orig_start
,
7023 const u64 block_start
,
7024 const u64 block_len
,
7025 const u64 orig_block_len
,
7026 const u64 ram_bytes
,
7029 struct extent_map
*em
= NULL
;
7030 struct btrfs_ordered_extent
*ordered
;
7032 if (type
!= BTRFS_ORDERED_NOCOW
) {
7033 em
= create_io_em(inode
, start
, len
, orig_start
, block_start
,
7034 block_len
, orig_block_len
, ram_bytes
,
7035 BTRFS_COMPRESS_NONE
, /* compress_type */
7040 ordered
= btrfs_alloc_ordered_extent(inode
, start
, len
, len
,
7041 block_start
, block_len
, 0,
7043 (1 << BTRFS_ORDERED_DIRECT
),
7044 BTRFS_COMPRESS_NONE
);
7045 if (IS_ERR(ordered
)) {
7047 free_extent_map(em
);
7048 btrfs_drop_extent_map_range(inode
, start
,
7049 start
+ len
- 1, false);
7051 em
= ERR_CAST(ordered
);
7053 ASSERT(!dio_data
->ordered
);
7054 dio_data
->ordered
= ordered
;
7061 static struct extent_map
*btrfs_new_extent_direct(struct btrfs_inode
*inode
,
7062 struct btrfs_dio_data
*dio_data
,
7065 struct btrfs_root
*root
= inode
->root
;
7066 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
7067 struct extent_map
*em
;
7068 struct btrfs_key ins
;
7072 alloc_hint
= get_extent_allocation_hint(inode
, start
, len
);
7073 ret
= btrfs_reserve_extent(root
, len
, len
, fs_info
->sectorsize
,
7074 0, alloc_hint
, &ins
, 1, 1);
7076 return ERR_PTR(ret
);
7078 em
= btrfs_create_dio_extent(inode
, dio_data
, start
, ins
.offset
, start
,
7079 ins
.objectid
, ins
.offset
, ins
.offset
,
7080 ins
.offset
, BTRFS_ORDERED_REGULAR
);
7081 btrfs_dec_block_group_reservations(fs_info
, ins
.objectid
);
7083 btrfs_free_reserved_extent(fs_info
, ins
.objectid
, ins
.offset
,
7089 static bool btrfs_extent_readonly(struct btrfs_fs_info
*fs_info
, u64 bytenr
)
7091 struct btrfs_block_group
*block_group
;
7092 bool readonly
= false;
7094 block_group
= btrfs_lookup_block_group(fs_info
, bytenr
);
7095 if (!block_group
|| block_group
->ro
)
7098 btrfs_put_block_group(block_group
);
7103 * Check if we can do nocow write into the range [@offset, @offset + @len)
7105 * @offset: File offset
7106 * @len: The length to write, will be updated to the nocow writeable
7108 * @orig_start: (optional) Return the original file offset of the file extent
7109 * @orig_len: (optional) Return the original on-disk length of the file extent
7110 * @ram_bytes: (optional) Return the ram_bytes of the file extent
7111 * @strict: if true, omit optimizations that might force us into unnecessary
7112 * cow. e.g., don't trust generation number.
7115 * >0 and update @len if we can do nocow write
7116 * 0 if we can't do nocow write
7117 * <0 if error happened
7119 * NOTE: This only checks the file extents, caller is responsible to wait for
7120 * any ordered extents.
7122 noinline
int can_nocow_extent(struct inode
*inode
, u64 offset
, u64
*len
,
7123 u64
*orig_start
, u64
*orig_block_len
,
7124 u64
*ram_bytes
, bool nowait
, bool strict
)
7126 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
7127 struct can_nocow_file_extent_args nocow_args
= { 0 };
7128 struct btrfs_path
*path
;
7130 struct extent_buffer
*leaf
;
7131 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
7132 struct extent_io_tree
*io_tree
= &BTRFS_I(inode
)->io_tree
;
7133 struct btrfs_file_extent_item
*fi
;
7134 struct btrfs_key key
;
7137 path
= btrfs_alloc_path();
7140 path
->nowait
= nowait
;
7142 ret
= btrfs_lookup_file_extent(NULL
, root
, path
,
7143 btrfs_ino(BTRFS_I(inode
)), offset
, 0);
7148 if (path
->slots
[0] == 0) {
7149 /* can't find the item, must cow */
7156 leaf
= path
->nodes
[0];
7157 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
7158 if (key
.objectid
!= btrfs_ino(BTRFS_I(inode
)) ||
7159 key
.type
!= BTRFS_EXTENT_DATA_KEY
) {
7160 /* not our file or wrong item type, must cow */
7164 if (key
.offset
> offset
) {
7165 /* Wrong offset, must cow */
7169 if (btrfs_file_extent_end(path
) <= offset
)
7172 fi
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_file_extent_item
);
7173 found_type
= btrfs_file_extent_type(leaf
, fi
);
7175 *ram_bytes
= btrfs_file_extent_ram_bytes(leaf
, fi
);
7177 nocow_args
.start
= offset
;
7178 nocow_args
.end
= offset
+ *len
- 1;
7179 nocow_args
.strict
= strict
;
7180 nocow_args
.free_path
= true;
7182 ret
= can_nocow_file_extent(path
, &key
, BTRFS_I(inode
), &nocow_args
);
7183 /* can_nocow_file_extent() has freed the path. */
7187 /* Treat errors as not being able to NOCOW. */
7193 if (btrfs_extent_readonly(fs_info
, nocow_args
.disk_bytenr
))
7196 if (!(BTRFS_I(inode
)->flags
& BTRFS_INODE_NODATACOW
) &&
7197 found_type
== BTRFS_FILE_EXTENT_PREALLOC
) {
7200 range_end
= round_up(offset
+ nocow_args
.num_bytes
,
7201 root
->fs_info
->sectorsize
) - 1;
7202 ret
= test_range_bit(io_tree
, offset
, range_end
,
7203 EXTENT_DELALLOC
, 0, NULL
);
7211 *orig_start
= key
.offset
- nocow_args
.extent_offset
;
7213 *orig_block_len
= nocow_args
.disk_num_bytes
;
7215 *len
= nocow_args
.num_bytes
;
7218 btrfs_free_path(path
);
7222 static int lock_extent_direct(struct inode
*inode
, u64 lockstart
, u64 lockend
,
7223 struct extent_state
**cached_state
,
7224 unsigned int iomap_flags
)
7226 const bool writing
= (iomap_flags
& IOMAP_WRITE
);
7227 const bool nowait
= (iomap_flags
& IOMAP_NOWAIT
);
7228 struct extent_io_tree
*io_tree
= &BTRFS_I(inode
)->io_tree
;
7229 struct btrfs_ordered_extent
*ordered
;
7234 if (!try_lock_extent(io_tree
, lockstart
, lockend
,
7238 lock_extent(io_tree
, lockstart
, lockend
, cached_state
);
7241 * We're concerned with the entire range that we're going to be
7242 * doing DIO to, so we need to make sure there's no ordered
7243 * extents in this range.
7245 ordered
= btrfs_lookup_ordered_range(BTRFS_I(inode
), lockstart
,
7246 lockend
- lockstart
+ 1);
7249 * We need to make sure there are no buffered pages in this
7250 * range either, we could have raced between the invalidate in
7251 * generic_file_direct_write and locking the extent. The
7252 * invalidate needs to happen so that reads after a write do not
7256 (!writing
|| !filemap_range_has_page(inode
->i_mapping
,
7257 lockstart
, lockend
)))
7260 unlock_extent(io_tree
, lockstart
, lockend
, cached_state
);
7264 btrfs_put_ordered_extent(ordered
);
7269 * If we are doing a DIO read and the ordered extent we
7270 * found is for a buffered write, we can not wait for it
7271 * to complete and retry, because if we do so we can
7272 * deadlock with concurrent buffered writes on page
7273 * locks. This happens only if our DIO read covers more
7274 * than one extent map, if at this point has already
7275 * created an ordered extent for a previous extent map
7276 * and locked its range in the inode's io tree, and a
7277 * concurrent write against that previous extent map's
7278 * range and this range started (we unlock the ranges
7279 * in the io tree only when the bios complete and
7280 * buffered writes always lock pages before attempting
7281 * to lock range in the io tree).
7284 test_bit(BTRFS_ORDERED_DIRECT
, &ordered
->flags
))
7285 btrfs_start_ordered_extent(ordered
);
7287 ret
= nowait
? -EAGAIN
: -ENOTBLK
;
7288 btrfs_put_ordered_extent(ordered
);
7291 * We could trigger writeback for this range (and wait
7292 * for it to complete) and then invalidate the pages for
7293 * this range (through invalidate_inode_pages2_range()),
7294 * but that can lead us to a deadlock with a concurrent
7295 * call to readahead (a buffered read or a defrag call
7296 * triggered a readahead) on a page lock due to an
7297 * ordered dio extent we created before but did not have
7298 * yet a corresponding bio submitted (whence it can not
7299 * complete), which makes readahead wait for that
7300 * ordered extent to complete while holding a lock on
7303 ret
= nowait
? -EAGAIN
: -ENOTBLK
;
7315 /* The callers of this must take lock_extent() */
7316 static struct extent_map
*create_io_em(struct btrfs_inode
*inode
, u64 start
,
7317 u64 len
, u64 orig_start
, u64 block_start
,
7318 u64 block_len
, u64 orig_block_len
,
7319 u64 ram_bytes
, int compress_type
,
7322 struct extent_map
*em
;
7325 ASSERT(type
== BTRFS_ORDERED_PREALLOC
||
7326 type
== BTRFS_ORDERED_COMPRESSED
||
7327 type
== BTRFS_ORDERED_NOCOW
||
7328 type
== BTRFS_ORDERED_REGULAR
);
7330 em
= alloc_extent_map();
7332 return ERR_PTR(-ENOMEM
);
7335 em
->orig_start
= orig_start
;
7337 em
->block_len
= block_len
;
7338 em
->block_start
= block_start
;
7339 em
->orig_block_len
= orig_block_len
;
7340 em
->ram_bytes
= ram_bytes
;
7341 em
->generation
= -1;
7342 set_bit(EXTENT_FLAG_PINNED
, &em
->flags
);
7343 if (type
== BTRFS_ORDERED_PREALLOC
) {
7344 set_bit(EXTENT_FLAG_FILLING
, &em
->flags
);
7345 } else if (type
== BTRFS_ORDERED_COMPRESSED
) {
7346 set_bit(EXTENT_FLAG_COMPRESSED
, &em
->flags
);
7347 em
->compress_type
= compress_type
;
7350 ret
= btrfs_replace_extent_map_range(inode
, em
, true);
7352 free_extent_map(em
);
7353 return ERR_PTR(ret
);
7356 /* em got 2 refs now, callers needs to do free_extent_map once. */
7361 static int btrfs_get_blocks_direct_write(struct extent_map
**map
,
7362 struct inode
*inode
,
7363 struct btrfs_dio_data
*dio_data
,
7364 u64 start
, u64
*lenp
,
7365 unsigned int iomap_flags
)
7367 const bool nowait
= (iomap_flags
& IOMAP_NOWAIT
);
7368 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
7369 struct extent_map
*em
= *map
;
7371 u64 block_start
, orig_start
, orig_block_len
, ram_bytes
;
7372 struct btrfs_block_group
*bg
;
7373 bool can_nocow
= false;
7374 bool space_reserved
= false;
7380 * We don't allocate a new extent in the following cases
7382 * 1) The inode is marked as NODATACOW. In this case we'll just use the
7384 * 2) The extent is marked as PREALLOC. We're good to go here and can
7385 * just use the extent.
7388 if (test_bit(EXTENT_FLAG_PREALLOC
, &em
->flags
) ||
7389 ((BTRFS_I(inode
)->flags
& BTRFS_INODE_NODATACOW
) &&
7390 em
->block_start
!= EXTENT_MAP_HOLE
)) {
7391 if (test_bit(EXTENT_FLAG_PREALLOC
, &em
->flags
))
7392 type
= BTRFS_ORDERED_PREALLOC
;
7394 type
= BTRFS_ORDERED_NOCOW
;
7395 len
= min(len
, em
->len
- (start
- em
->start
));
7396 block_start
= em
->block_start
+ (start
- em
->start
);
7398 if (can_nocow_extent(inode
, start
, &len
, &orig_start
,
7399 &orig_block_len
, &ram_bytes
, false, false) == 1) {
7400 bg
= btrfs_inc_nocow_writers(fs_info
, block_start
);
7408 struct extent_map
*em2
;
7410 /* We can NOCOW, so only need to reserve metadata space. */
7411 ret
= btrfs_delalloc_reserve_metadata(BTRFS_I(inode
), len
, len
,
7414 /* Our caller expects us to free the input extent map. */
7415 free_extent_map(em
);
7417 btrfs_dec_nocow_writers(bg
);
7418 if (nowait
&& (ret
== -ENOSPC
|| ret
== -EDQUOT
))
7422 space_reserved
= true;
7424 em2
= btrfs_create_dio_extent(BTRFS_I(inode
), dio_data
, start
, len
,
7425 orig_start
, block_start
,
7426 len
, orig_block_len
,
7428 btrfs_dec_nocow_writers(bg
);
7429 if (type
== BTRFS_ORDERED_PREALLOC
) {
7430 free_extent_map(em
);
7440 dio_data
->nocow_done
= true;
7442 /* Our caller expects us to free the input extent map. */
7443 free_extent_map(em
);
7452 * If we could not allocate data space before locking the file
7453 * range and we can't do a NOCOW write, then we have to fail.
7455 if (!dio_data
->data_space_reserved
) {
7461 * We have to COW and we have already reserved data space before,
7462 * so now we reserve only metadata.
7464 ret
= btrfs_delalloc_reserve_metadata(BTRFS_I(inode
), len
, len
,
7468 space_reserved
= true;
7470 em
= btrfs_new_extent_direct(BTRFS_I(inode
), dio_data
, start
, len
);
7476 len
= min(len
, em
->len
- (start
- em
->start
));
7478 btrfs_delalloc_release_metadata(BTRFS_I(inode
),
7479 prev_len
- len
, true);
7483 * We have created our ordered extent, so we can now release our reservation
7484 * for an outstanding extent.
7486 btrfs_delalloc_release_extents(BTRFS_I(inode
), prev_len
);
7489 * Need to update the i_size under the extent lock so buffered
7490 * readers will get the updated i_size when we unlock.
7492 if (start
+ len
> i_size_read(inode
))
7493 i_size_write(inode
, start
+ len
);
7495 if (ret
&& space_reserved
) {
7496 btrfs_delalloc_release_extents(BTRFS_I(inode
), len
);
7497 btrfs_delalloc_release_metadata(BTRFS_I(inode
), len
, true);
7503 static int btrfs_dio_iomap_begin(struct inode
*inode
, loff_t start
,
7504 loff_t length
, unsigned int flags
, struct iomap
*iomap
,
7505 struct iomap
*srcmap
)
7507 struct iomap_iter
*iter
= container_of(iomap
, struct iomap_iter
, iomap
);
7508 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
7509 struct extent_map
*em
;
7510 struct extent_state
*cached_state
= NULL
;
7511 struct btrfs_dio_data
*dio_data
= iter
->private;
7512 u64 lockstart
, lockend
;
7513 const bool write
= !!(flags
& IOMAP_WRITE
);
7516 const u64 data_alloc_len
= length
;
7517 bool unlock_extents
= false;
7520 * We could potentially fault if we have a buffer > PAGE_SIZE, and if
7521 * we're NOWAIT we may submit a bio for a partial range and return
7522 * EIOCBQUEUED, which would result in an errant short read.
7524 * The best way to handle this would be to allow for partial completions
7525 * of iocb's, so we could submit the partial bio, return and fault in
7526 * the rest of the pages, and then submit the io for the rest of the
7527 * range. However we don't have that currently, so simply return
7528 * -EAGAIN at this point so that the normal path is used.
7530 if (!write
&& (flags
& IOMAP_NOWAIT
) && length
> PAGE_SIZE
)
7534 * Cap the size of reads to that usually seen in buffered I/O as we need
7535 * to allocate a contiguous array for the checksums.
7538 len
= min_t(u64
, len
, fs_info
->sectorsize
* BTRFS_MAX_BIO_SECTORS
);
7541 lockend
= start
+ len
- 1;
7544 * iomap_dio_rw() only does filemap_write_and_wait_range(), which isn't
7545 * enough if we've written compressed pages to this area, so we need to
7546 * flush the dirty pages again to make absolutely sure that any
7547 * outstanding dirty pages are on disk - the first flush only starts
7548 * compression on the data, while keeping the pages locked, so by the
7549 * time the second flush returns we know bios for the compressed pages
7550 * were submitted and finished, and the pages no longer under writeback.
7552 * If we have a NOWAIT request and we have any pages in the range that
7553 * are locked, likely due to compression still in progress, we don't want
7554 * to block on page locks. We also don't want to block on pages marked as
7555 * dirty or under writeback (same as for the non-compression case).
7556 * iomap_dio_rw() did the same check, but after that and before we got
7557 * here, mmap'ed writes may have happened or buffered reads started
7558 * (readpage() and readahead(), which lock pages), as we haven't locked
7559 * the file range yet.
7561 if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT
,
7562 &BTRFS_I(inode
)->runtime_flags
)) {
7563 if (flags
& IOMAP_NOWAIT
) {
7564 if (filemap_range_needs_writeback(inode
->i_mapping
,
7565 lockstart
, lockend
))
7568 ret
= filemap_fdatawrite_range(inode
->i_mapping
, start
,
7569 start
+ length
- 1);
7575 memset(dio_data
, 0, sizeof(*dio_data
));
7578 * We always try to allocate data space and must do it before locking
7579 * the file range, to avoid deadlocks with concurrent writes to the same
7580 * range if the range has several extents and the writes don't expand the
7581 * current i_size (the inode lock is taken in shared mode). If we fail to
7582 * allocate data space here we continue and later, after locking the
7583 * file range, we fail with ENOSPC only if we figure out we can not do a
7586 if (write
&& !(flags
& IOMAP_NOWAIT
)) {
7587 ret
= btrfs_check_data_free_space(BTRFS_I(inode
),
7588 &dio_data
->data_reserved
,
7589 start
, data_alloc_len
, false);
7591 dio_data
->data_space_reserved
= true;
7592 else if (ret
&& !(BTRFS_I(inode
)->flags
&
7593 (BTRFS_INODE_NODATACOW
| BTRFS_INODE_PREALLOC
)))
7598 * If this errors out it's because we couldn't invalidate pagecache for
7599 * this range and we need to fallback to buffered IO, or we are doing a
7600 * NOWAIT read/write and we need to block.
7602 ret
= lock_extent_direct(inode
, lockstart
, lockend
, &cached_state
, flags
);
7606 em
= btrfs_get_extent(BTRFS_I(inode
), NULL
, 0, start
, len
);
7613 * Ok for INLINE and COMPRESSED extents we need to fallback on buffered
7614 * io. INLINE is special, and we could probably kludge it in here, but
7615 * it's still buffered so for safety lets just fall back to the generic
7618 * For COMPRESSED we _have_ to read the entire extent in so we can
7619 * decompress it, so there will be buffering required no matter what we
7620 * do, so go ahead and fallback to buffered.
7622 * We return -ENOTBLK because that's what makes DIO go ahead and go back
7623 * to buffered IO. Don't blame me, this is the price we pay for using
7626 if (test_bit(EXTENT_FLAG_COMPRESSED
, &em
->flags
) ||
7627 em
->block_start
== EXTENT_MAP_INLINE
) {
7628 free_extent_map(em
);
7630 * If we are in a NOWAIT context, return -EAGAIN in order to
7631 * fallback to buffered IO. This is not only because we can
7632 * block with buffered IO (no support for NOWAIT semantics at
7633 * the moment) but also to avoid returning short reads to user
7634 * space - this happens if we were able to read some data from
7635 * previous non-compressed extents and then when we fallback to
7636 * buffered IO, at btrfs_file_read_iter() by calling
7637 * filemap_read(), we fail to fault in pages for the read buffer,
7638 * in which case filemap_read() returns a short read (the number
7639 * of bytes previously read is > 0, so it does not return -EFAULT).
7641 ret
= (flags
& IOMAP_NOWAIT
) ? -EAGAIN
: -ENOTBLK
;
7645 len
= min(len
, em
->len
- (start
- em
->start
));
7648 * If we have a NOWAIT request and the range contains multiple extents
7649 * (or a mix of extents and holes), then we return -EAGAIN to make the
7650 * caller fallback to a context where it can do a blocking (without
7651 * NOWAIT) request. This way we avoid doing partial IO and returning
7652 * success to the caller, which is not optimal for writes and for reads
7653 * it can result in unexpected behaviour for an application.
7655 * When doing a read, because we use IOMAP_DIO_PARTIAL when calling
7656 * iomap_dio_rw(), we can end up returning less data then what the caller
7657 * asked for, resulting in an unexpected, and incorrect, short read.
7658 * That is, the caller asked to read N bytes and we return less than that,
7659 * which is wrong unless we are crossing EOF. This happens if we get a
7660 * page fault error when trying to fault in pages for the buffer that is
7661 * associated to the struct iov_iter passed to iomap_dio_rw(), and we
7662 * have previously submitted bios for other extents in the range, in
7663 * which case iomap_dio_rw() may return us EIOCBQUEUED if not all of
7664 * those bios have completed by the time we get the page fault error,
7665 * which we return back to our caller - we should only return EIOCBQUEUED
7666 * after we have submitted bios for all the extents in the range.
7668 if ((flags
& IOMAP_NOWAIT
) && len
< length
) {
7669 free_extent_map(em
);
7675 ret
= btrfs_get_blocks_direct_write(&em
, inode
, dio_data
,
7676 start
, &len
, flags
);
7679 unlock_extents
= true;
7680 /* Recalc len in case the new em is smaller than requested */
7681 len
= min(len
, em
->len
- (start
- em
->start
));
7682 if (dio_data
->data_space_reserved
) {
7684 u64 release_len
= 0;
7686 if (dio_data
->nocow_done
) {
7687 release_offset
= start
;
7688 release_len
= data_alloc_len
;
7689 } else if (len
< data_alloc_len
) {
7690 release_offset
= start
+ len
;
7691 release_len
= data_alloc_len
- len
;
7694 if (release_len
> 0)
7695 btrfs_free_reserved_data_space(BTRFS_I(inode
),
7696 dio_data
->data_reserved
,
7702 * We need to unlock only the end area that we aren't using.
7703 * The rest is going to be unlocked by the endio routine.
7705 lockstart
= start
+ len
;
7706 if (lockstart
< lockend
)
7707 unlock_extents
= true;
7711 unlock_extent(&BTRFS_I(inode
)->io_tree
, lockstart
, lockend
,
7714 free_extent_state(cached_state
);
7717 * Translate extent map information to iomap.
7718 * We trim the extents (and move the addr) even though iomap code does
7719 * that, since we have locked only the parts we are performing I/O in.
7721 if ((em
->block_start
== EXTENT_MAP_HOLE
) ||
7722 (test_bit(EXTENT_FLAG_PREALLOC
, &em
->flags
) && !write
)) {
7723 iomap
->addr
= IOMAP_NULL_ADDR
;
7724 iomap
->type
= IOMAP_HOLE
;
7726 iomap
->addr
= em
->block_start
+ (start
- em
->start
);
7727 iomap
->type
= IOMAP_MAPPED
;
7729 iomap
->offset
= start
;
7730 iomap
->bdev
= fs_info
->fs_devices
->latest_dev
->bdev
;
7731 iomap
->length
= len
;
7732 free_extent_map(em
);
7737 unlock_extent(&BTRFS_I(inode
)->io_tree
, lockstart
, lockend
,
7740 if (dio_data
->data_space_reserved
) {
7741 btrfs_free_reserved_data_space(BTRFS_I(inode
),
7742 dio_data
->data_reserved
,
7743 start
, data_alloc_len
);
7744 extent_changeset_free(dio_data
->data_reserved
);
7750 static int btrfs_dio_iomap_end(struct inode
*inode
, loff_t pos
, loff_t length
,
7751 ssize_t written
, unsigned int flags
, struct iomap
*iomap
)
7753 struct iomap_iter
*iter
= container_of(iomap
, struct iomap_iter
, iomap
);
7754 struct btrfs_dio_data
*dio_data
= iter
->private;
7755 size_t submitted
= dio_data
->submitted
;
7756 const bool write
= !!(flags
& IOMAP_WRITE
);
7759 if (!write
&& (iomap
->type
== IOMAP_HOLE
)) {
7760 /* If reading from a hole, unlock and return */
7761 unlock_extent(&BTRFS_I(inode
)->io_tree
, pos
, pos
+ length
- 1,
7766 if (submitted
< length
) {
7768 length
-= submitted
;
7770 btrfs_mark_ordered_io_finished(BTRFS_I(inode
), NULL
,
7771 pos
, length
, false);
7773 unlock_extent(&BTRFS_I(inode
)->io_tree
, pos
,
7774 pos
+ length
- 1, NULL
);
7778 btrfs_put_ordered_extent(dio_data
->ordered
);
7779 dio_data
->ordered
= NULL
;
7783 extent_changeset_free(dio_data
->data_reserved
);
7787 static void btrfs_dio_end_io(struct btrfs_bio
*bbio
)
7789 struct btrfs_dio_private
*dip
=
7790 container_of(bbio
, struct btrfs_dio_private
, bbio
);
7791 struct btrfs_inode
*inode
= bbio
->inode
;
7792 struct bio
*bio
= &bbio
->bio
;
7794 if (bio
->bi_status
) {
7795 btrfs_warn(inode
->root
->fs_info
,
7796 "direct IO failed ino %llu op 0x%0x offset %#llx len %u err no %d",
7797 btrfs_ino(inode
), bio
->bi_opf
,
7798 dip
->file_offset
, dip
->bytes
, bio
->bi_status
);
7801 if (btrfs_op(bio
) == BTRFS_MAP_WRITE
)
7802 btrfs_mark_ordered_io_finished(inode
, NULL
, dip
->file_offset
,
7803 dip
->bytes
, !bio
->bi_status
);
7805 unlock_extent(&inode
->io_tree
, dip
->file_offset
,
7806 dip
->file_offset
+ dip
->bytes
- 1, NULL
);
7808 bbio
->bio
.bi_private
= bbio
->private;
7809 iomap_dio_bio_end_io(bio
);
7812 static void btrfs_dio_submit_io(const struct iomap_iter
*iter
, struct bio
*bio
,
7815 struct btrfs_bio
*bbio
= btrfs_bio(bio
);
7816 struct btrfs_dio_private
*dip
=
7817 container_of(bbio
, struct btrfs_dio_private
, bbio
);
7818 struct btrfs_dio_data
*dio_data
= iter
->private;
7820 btrfs_bio_init(bbio
, BTRFS_I(iter
->inode
)->root
->fs_info
,
7821 btrfs_dio_end_io
, bio
->bi_private
);
7822 bbio
->inode
= BTRFS_I(iter
->inode
);
7823 bbio
->file_offset
= file_offset
;
7825 dip
->file_offset
= file_offset
;
7826 dip
->bytes
= bio
->bi_iter
.bi_size
;
7828 dio_data
->submitted
+= bio
->bi_iter
.bi_size
;
7831 * Check if we are doing a partial write. If we are, we need to split
7832 * the ordered extent to match the submitted bio. Hang on to the
7833 * remaining unfinishable ordered_extent in dio_data so that it can be
7834 * cancelled in iomap_end to avoid a deadlock wherein faulting the
7835 * remaining pages is blocked on the outstanding ordered extent.
7837 if (iter
->flags
& IOMAP_WRITE
) {
7840 ret
= btrfs_extract_ordered_extent(bbio
, dio_data
->ordered
);
7842 btrfs_bio_end_io(bbio
, errno_to_blk_status(ret
));
7847 btrfs_submit_bio(bbio
, 0);
7850 static const struct iomap_ops btrfs_dio_iomap_ops
= {
7851 .iomap_begin
= btrfs_dio_iomap_begin
,
7852 .iomap_end
= btrfs_dio_iomap_end
,
7855 static const struct iomap_dio_ops btrfs_dio_ops
= {
7856 .submit_io
= btrfs_dio_submit_io
,
7857 .bio_set
= &btrfs_dio_bioset
,
7860 ssize_t
btrfs_dio_read(struct kiocb
*iocb
, struct iov_iter
*iter
, size_t done_before
)
7862 struct btrfs_dio_data data
= { 0 };
7864 return iomap_dio_rw(iocb
, iter
, &btrfs_dio_iomap_ops
, &btrfs_dio_ops
,
7865 IOMAP_DIO_PARTIAL
, &data
, done_before
);
7868 struct iomap_dio
*btrfs_dio_write(struct kiocb
*iocb
, struct iov_iter
*iter
,
7871 struct btrfs_dio_data data
= { 0 };
7873 return __iomap_dio_rw(iocb
, iter
, &btrfs_dio_iomap_ops
, &btrfs_dio_ops
,
7874 IOMAP_DIO_PARTIAL
, &data
, done_before
);
7877 static int btrfs_fiemap(struct inode
*inode
, struct fiemap_extent_info
*fieinfo
,
7882 ret
= fiemap_prep(inode
, fieinfo
, start
, &len
, 0);
7887 * fiemap_prep() called filemap_write_and_wait() for the whole possible
7888 * file range (0 to LLONG_MAX), but that is not enough if we have
7889 * compression enabled. The first filemap_fdatawrite_range() only kicks
7890 * in the compression of data (in an async thread) and will return
7891 * before the compression is done and writeback is started. A second
7892 * filemap_fdatawrite_range() is needed to wait for the compression to
7893 * complete and writeback to start. We also need to wait for ordered
7894 * extents to complete, because our fiemap implementation uses mainly
7895 * file extent items to list the extents, searching for extent maps
7896 * only for file ranges with holes or prealloc extents to figure out
7897 * if we have delalloc in those ranges.
7899 if (fieinfo
->fi_flags
& FIEMAP_FLAG_SYNC
) {
7900 ret
= btrfs_wait_ordered_range(inode
, 0, LLONG_MAX
);
7905 return extent_fiemap(BTRFS_I(inode
), fieinfo
, start
, len
);
7908 static int btrfs_writepages(struct address_space
*mapping
,
7909 struct writeback_control
*wbc
)
7911 return extent_writepages(mapping
, wbc
);
7914 static void btrfs_readahead(struct readahead_control
*rac
)
7916 extent_readahead(rac
);
7920 * For release_folio() and invalidate_folio() we have a race window where
7921 * folio_end_writeback() is called but the subpage spinlock is not yet released.
7922 * If we continue to release/invalidate the page, we could cause use-after-free
7923 * for subpage spinlock. So this function is to spin and wait for subpage
7926 static void wait_subpage_spinlock(struct page
*page
)
7928 struct btrfs_fs_info
*fs_info
= btrfs_sb(page
->mapping
->host
->i_sb
);
7929 struct btrfs_subpage
*subpage
;
7931 if (!btrfs_is_subpage(fs_info
, page
))
7934 ASSERT(PagePrivate(page
) && page
->private);
7935 subpage
= (struct btrfs_subpage
*)page
->private;
7938 * This may look insane as we just acquire the spinlock and release it,
7939 * without doing anything. But we just want to make sure no one is
7940 * still holding the subpage spinlock.
7941 * And since the page is not dirty nor writeback, and we have page
7942 * locked, the only possible way to hold a spinlock is from the endio
7943 * function to clear page writeback.
7945 * Here we just acquire the spinlock so that all existing callers
7946 * should exit and we're safe to release/invalidate the page.
7948 spin_lock_irq(&subpage
->lock
);
7949 spin_unlock_irq(&subpage
->lock
);
7952 static bool __btrfs_release_folio(struct folio
*folio
, gfp_t gfp_flags
)
7954 int ret
= try_release_extent_mapping(&folio
->page
, gfp_flags
);
7957 wait_subpage_spinlock(&folio
->page
);
7958 clear_page_extent_mapped(&folio
->page
);
7963 static bool btrfs_release_folio(struct folio
*folio
, gfp_t gfp_flags
)
7965 if (folio_test_writeback(folio
) || folio_test_dirty(folio
))
7967 return __btrfs_release_folio(folio
, gfp_flags
);
7970 #ifdef CONFIG_MIGRATION
7971 static int btrfs_migrate_folio(struct address_space
*mapping
,
7972 struct folio
*dst
, struct folio
*src
,
7973 enum migrate_mode mode
)
7975 int ret
= filemap_migrate_folio(mapping
, dst
, src
, mode
);
7977 if (ret
!= MIGRATEPAGE_SUCCESS
)
7980 if (folio_test_ordered(src
)) {
7981 folio_clear_ordered(src
);
7982 folio_set_ordered(dst
);
7985 return MIGRATEPAGE_SUCCESS
;
7988 #define btrfs_migrate_folio NULL
7991 static void btrfs_invalidate_folio(struct folio
*folio
, size_t offset
,
7994 struct btrfs_inode
*inode
= BTRFS_I(folio
->mapping
->host
);
7995 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
7996 struct extent_io_tree
*tree
= &inode
->io_tree
;
7997 struct extent_state
*cached_state
= NULL
;
7998 u64 page_start
= folio_pos(folio
);
7999 u64 page_end
= page_start
+ folio_size(folio
) - 1;
8001 int inode_evicting
= inode
->vfs_inode
.i_state
& I_FREEING
;
8004 * We have folio locked so no new ordered extent can be created on this
8005 * page, nor bio can be submitted for this folio.
8007 * But already submitted bio can still be finished on this folio.
8008 * Furthermore, endio function won't skip folio which has Ordered
8009 * (Private2) already cleared, so it's possible for endio and
8010 * invalidate_folio to do the same ordered extent accounting twice
8013 * So here we wait for any submitted bios to finish, so that we won't
8014 * do double ordered extent accounting on the same folio.
8016 folio_wait_writeback(folio
);
8017 wait_subpage_spinlock(&folio
->page
);
8020 * For subpage case, we have call sites like
8021 * btrfs_punch_hole_lock_range() which passes range not aligned to
8023 * If the range doesn't cover the full folio, we don't need to and
8024 * shouldn't clear page extent mapped, as folio->private can still
8025 * record subpage dirty bits for other part of the range.
8027 * For cases that invalidate the full folio even the range doesn't
8028 * cover the full folio, like invalidating the last folio, we're
8029 * still safe to wait for ordered extent to finish.
8031 if (!(offset
== 0 && length
== folio_size(folio
))) {
8032 btrfs_release_folio(folio
, GFP_NOFS
);
8036 if (!inode_evicting
)
8037 lock_extent(tree
, page_start
, page_end
, &cached_state
);
8040 while (cur
< page_end
) {
8041 struct btrfs_ordered_extent
*ordered
;
8044 u32 extra_flags
= 0;
8046 ordered
= btrfs_lookup_first_ordered_range(inode
, cur
,
8047 page_end
+ 1 - cur
);
8049 range_end
= page_end
;
8051 * No ordered extent covering this range, we are safe
8052 * to delete all extent states in the range.
8054 extra_flags
= EXTENT_CLEAR_ALL_BITS
;
8057 if (ordered
->file_offset
> cur
) {
8059 * There is a range between [cur, oe->file_offset) not
8060 * covered by any ordered extent.
8061 * We are safe to delete all extent states, and handle
8062 * the ordered extent in the next iteration.
8064 range_end
= ordered
->file_offset
- 1;
8065 extra_flags
= EXTENT_CLEAR_ALL_BITS
;
8069 range_end
= min(ordered
->file_offset
+ ordered
->num_bytes
- 1,
8071 ASSERT(range_end
+ 1 - cur
< U32_MAX
);
8072 range_len
= range_end
+ 1 - cur
;
8073 if (!btrfs_page_test_ordered(fs_info
, &folio
->page
, cur
, range_len
)) {
8075 * If Ordered (Private2) is cleared, it means endio has
8076 * already been executed for the range.
8077 * We can't delete the extent states as
8078 * btrfs_finish_ordered_io() may still use some of them.
8082 btrfs_page_clear_ordered(fs_info
, &folio
->page
, cur
, range_len
);
8085 * IO on this page will never be started, so we need to account
8086 * for any ordered extents now. Don't clear EXTENT_DELALLOC_NEW
8087 * here, must leave that up for the ordered extent completion.
8089 * This will also unlock the range for incoming
8090 * btrfs_finish_ordered_io().
8092 if (!inode_evicting
)
8093 clear_extent_bit(tree
, cur
, range_end
,
8095 EXTENT_LOCKED
| EXTENT_DO_ACCOUNTING
|
8096 EXTENT_DEFRAG
, &cached_state
);
8098 spin_lock_irq(&inode
->ordered_tree
.lock
);
8099 set_bit(BTRFS_ORDERED_TRUNCATED
, &ordered
->flags
);
8100 ordered
->truncated_len
= min(ordered
->truncated_len
,
8101 cur
- ordered
->file_offset
);
8102 spin_unlock_irq(&inode
->ordered_tree
.lock
);
8105 * If the ordered extent has finished, we're safe to delete all
8106 * the extent states of the range, otherwise
8107 * btrfs_finish_ordered_io() will get executed by endio for
8108 * other pages, so we can't delete extent states.
8110 if (btrfs_dec_test_ordered_pending(inode
, &ordered
,
8111 cur
, range_end
+ 1 - cur
)) {
8112 btrfs_finish_ordered_io(ordered
);
8114 * The ordered extent has finished, now we're again
8115 * safe to delete all extent states of the range.
8117 extra_flags
= EXTENT_CLEAR_ALL_BITS
;
8121 btrfs_put_ordered_extent(ordered
);
8123 * Qgroup reserved space handler
8124 * Sector(s) here will be either:
8126 * 1) Already written to disk or bio already finished
8127 * Then its QGROUP_RESERVED bit in io_tree is already cleared.
8128 * Qgroup will be handled by its qgroup_record then.
8129 * btrfs_qgroup_free_data() call will do nothing here.
8131 * 2) Not written to disk yet
8132 * Then btrfs_qgroup_free_data() call will clear the
8133 * QGROUP_RESERVED bit of its io_tree, and free the qgroup
8134 * reserved data space.
8135 * Since the IO will never happen for this page.
8137 btrfs_qgroup_free_data(inode
, NULL
, cur
, range_end
+ 1 - cur
);
8138 if (!inode_evicting
) {
8139 clear_extent_bit(tree
, cur
, range_end
, EXTENT_LOCKED
|
8140 EXTENT_DELALLOC
| EXTENT_UPTODATE
|
8141 EXTENT_DO_ACCOUNTING
| EXTENT_DEFRAG
|
8142 extra_flags
, &cached_state
);
8144 cur
= range_end
+ 1;
8147 * We have iterated through all ordered extents of the page, the page
8148 * should not have Ordered (Private2) anymore, or the above iteration
8149 * did something wrong.
8151 ASSERT(!folio_test_ordered(folio
));
8152 btrfs_page_clear_checked(fs_info
, &folio
->page
, folio_pos(folio
), folio_size(folio
));
8153 if (!inode_evicting
)
8154 __btrfs_release_folio(folio
, GFP_NOFS
);
8155 clear_page_extent_mapped(&folio
->page
);
8159 * btrfs_page_mkwrite() is not allowed to change the file size as it gets
8160 * called from a page fault handler when a page is first dirtied. Hence we must
8161 * be careful to check for EOF conditions here. We set the page up correctly
8162 * for a written page which means we get ENOSPC checking when writing into
8163 * holes and correct delalloc and unwritten extent mapping on filesystems that
8164 * support these features.
8166 * We are not allowed to take the i_mutex here so we have to play games to
8167 * protect against truncate races as the page could now be beyond EOF. Because
8168 * truncate_setsize() writes the inode size before removing pages, once we have
8169 * the page lock we can determine safely if the page is beyond EOF. If it is not
8170 * beyond EOF, then the page is guaranteed safe against truncation until we
8173 vm_fault_t
btrfs_page_mkwrite(struct vm_fault
*vmf
)
8175 struct page
*page
= vmf
->page
;
8176 struct inode
*inode
= file_inode(vmf
->vma
->vm_file
);
8177 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
8178 struct extent_io_tree
*io_tree
= &BTRFS_I(inode
)->io_tree
;
8179 struct btrfs_ordered_extent
*ordered
;
8180 struct extent_state
*cached_state
= NULL
;
8181 struct extent_changeset
*data_reserved
= NULL
;
8182 unsigned long zero_start
;
8192 reserved_space
= PAGE_SIZE
;
8194 sb_start_pagefault(inode
->i_sb
);
8195 page_start
= page_offset(page
);
8196 page_end
= page_start
+ PAGE_SIZE
- 1;
8200 * Reserving delalloc space after obtaining the page lock can lead to
8201 * deadlock. For example, if a dirty page is locked by this function
8202 * and the call to btrfs_delalloc_reserve_space() ends up triggering
8203 * dirty page write out, then the btrfs_writepages() function could
8204 * end up waiting indefinitely to get a lock on the page currently
8205 * being processed by btrfs_page_mkwrite() function.
8207 ret2
= btrfs_delalloc_reserve_space(BTRFS_I(inode
), &data_reserved
,
8208 page_start
, reserved_space
);
8210 ret2
= file_update_time(vmf
->vma
->vm_file
);
8214 ret
= vmf_error(ret2
);
8220 ret
= VM_FAULT_NOPAGE
; /* make the VM retry the fault */
8222 down_read(&BTRFS_I(inode
)->i_mmap_lock
);
8224 size
= i_size_read(inode
);
8226 if ((page
->mapping
!= inode
->i_mapping
) ||
8227 (page_start
>= size
)) {
8228 /* page got truncated out from underneath us */
8231 wait_on_page_writeback(page
);
8233 lock_extent(io_tree
, page_start
, page_end
, &cached_state
);
8234 ret2
= set_page_extent_mapped(page
);
8236 ret
= vmf_error(ret2
);
8237 unlock_extent(io_tree
, page_start
, page_end
, &cached_state
);
8242 * we can't set the delalloc bits if there are pending ordered
8243 * extents. Drop our locks and wait for them to finish
8245 ordered
= btrfs_lookup_ordered_range(BTRFS_I(inode
), page_start
,
8248 unlock_extent(io_tree
, page_start
, page_end
, &cached_state
);
8250 up_read(&BTRFS_I(inode
)->i_mmap_lock
);
8251 btrfs_start_ordered_extent(ordered
);
8252 btrfs_put_ordered_extent(ordered
);
8256 if (page
->index
== ((size
- 1) >> PAGE_SHIFT
)) {
8257 reserved_space
= round_up(size
- page_start
,
8258 fs_info
->sectorsize
);
8259 if (reserved_space
< PAGE_SIZE
) {
8260 end
= page_start
+ reserved_space
- 1;
8261 btrfs_delalloc_release_space(BTRFS_I(inode
),
8262 data_reserved
, page_start
,
8263 PAGE_SIZE
- reserved_space
, true);
8268 * page_mkwrite gets called when the page is firstly dirtied after it's
8269 * faulted in, but write(2) could also dirty a page and set delalloc
8270 * bits, thus in this case for space account reason, we still need to
8271 * clear any delalloc bits within this page range since we have to
8272 * reserve data&meta space before lock_page() (see above comments).
8274 clear_extent_bit(&BTRFS_I(inode
)->io_tree
, page_start
, end
,
8275 EXTENT_DELALLOC
| EXTENT_DO_ACCOUNTING
|
8276 EXTENT_DEFRAG
, &cached_state
);
8278 ret2
= btrfs_set_extent_delalloc(BTRFS_I(inode
), page_start
, end
, 0,
8281 unlock_extent(io_tree
, page_start
, page_end
, &cached_state
);
8282 ret
= VM_FAULT_SIGBUS
;
8286 /* page is wholly or partially inside EOF */
8287 if (page_start
+ PAGE_SIZE
> size
)
8288 zero_start
= offset_in_page(size
);
8290 zero_start
= PAGE_SIZE
;
8292 if (zero_start
!= PAGE_SIZE
)
8293 memzero_page(page
, zero_start
, PAGE_SIZE
- zero_start
);
8295 btrfs_page_clear_checked(fs_info
, page
, page_start
, PAGE_SIZE
);
8296 btrfs_page_set_dirty(fs_info
, page
, page_start
, end
+ 1 - page_start
);
8297 btrfs_page_set_uptodate(fs_info
, page
, page_start
, end
+ 1 - page_start
);
8299 btrfs_set_inode_last_sub_trans(BTRFS_I(inode
));
8301 unlock_extent(io_tree
, page_start
, page_end
, &cached_state
);
8302 up_read(&BTRFS_I(inode
)->i_mmap_lock
);
8304 btrfs_delalloc_release_extents(BTRFS_I(inode
), PAGE_SIZE
);
8305 sb_end_pagefault(inode
->i_sb
);
8306 extent_changeset_free(data_reserved
);
8307 return VM_FAULT_LOCKED
;
8311 up_read(&BTRFS_I(inode
)->i_mmap_lock
);
8313 btrfs_delalloc_release_extents(BTRFS_I(inode
), PAGE_SIZE
);
8314 btrfs_delalloc_release_space(BTRFS_I(inode
), data_reserved
, page_start
,
8315 reserved_space
, (ret
!= 0));
8317 sb_end_pagefault(inode
->i_sb
);
8318 extent_changeset_free(data_reserved
);
8322 static int btrfs_truncate(struct btrfs_inode
*inode
, bool skip_writeback
)
8324 struct btrfs_truncate_control control
= {
8326 .ino
= btrfs_ino(inode
),
8327 .min_type
= BTRFS_EXTENT_DATA_KEY
,
8328 .clear_extent_range
= true,
8330 struct btrfs_root
*root
= inode
->root
;
8331 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
8332 struct btrfs_block_rsv
*rsv
;
8334 struct btrfs_trans_handle
*trans
;
8335 u64 mask
= fs_info
->sectorsize
- 1;
8336 u64 min_size
= btrfs_calc_metadata_size(fs_info
, 1);
8338 if (!skip_writeback
) {
8339 ret
= btrfs_wait_ordered_range(&inode
->vfs_inode
,
8340 inode
->vfs_inode
.i_size
& (~mask
),
8347 * Yes ladies and gentlemen, this is indeed ugly. We have a couple of
8348 * things going on here:
8350 * 1) We need to reserve space to update our inode.
8352 * 2) We need to have something to cache all the space that is going to
8353 * be free'd up by the truncate operation, but also have some slack
8354 * space reserved in case it uses space during the truncate (thank you
8355 * very much snapshotting).
8357 * And we need these to be separate. The fact is we can use a lot of
8358 * space doing the truncate, and we have no earthly idea how much space
8359 * we will use, so we need the truncate reservation to be separate so it
8360 * doesn't end up using space reserved for updating the inode. We also
8361 * need to be able to stop the transaction and start a new one, which
8362 * means we need to be able to update the inode several times, and we
8363 * have no idea of knowing how many times that will be, so we can't just
8364 * reserve 1 item for the entirety of the operation, so that has to be
8365 * done separately as well.
8367 * So that leaves us with
8369 * 1) rsv - for the truncate reservation, which we will steal from the
8370 * transaction reservation.
8371 * 2) fs_info->trans_block_rsv - this will have 1 items worth left for
8372 * updating the inode.
8374 rsv
= btrfs_alloc_block_rsv(fs_info
, BTRFS_BLOCK_RSV_TEMP
);
8377 rsv
->size
= min_size
;
8378 rsv
->failfast
= true;
8381 * 1 for the truncate slack space
8382 * 1 for updating the inode.
8384 trans
= btrfs_start_transaction(root
, 2);
8385 if (IS_ERR(trans
)) {
8386 ret
= PTR_ERR(trans
);
8390 /* Migrate the slack space for the truncate to our reserve */
8391 ret
= btrfs_block_rsv_migrate(&fs_info
->trans_block_rsv
, rsv
,
8395 trans
->block_rsv
= rsv
;
8398 struct extent_state
*cached_state
= NULL
;
8399 const u64 new_size
= inode
->vfs_inode
.i_size
;
8400 const u64 lock_start
= ALIGN_DOWN(new_size
, fs_info
->sectorsize
);
8402 control
.new_size
= new_size
;
8403 lock_extent(&inode
->io_tree
, lock_start
, (u64
)-1, &cached_state
);
8405 * We want to drop from the next block forward in case this new
8406 * size is not block aligned since we will be keeping the last
8407 * block of the extent just the way it is.
8409 btrfs_drop_extent_map_range(inode
,
8410 ALIGN(new_size
, fs_info
->sectorsize
),
8413 ret
= btrfs_truncate_inode_items(trans
, root
, &control
);
8415 inode_sub_bytes(&inode
->vfs_inode
, control
.sub_bytes
);
8416 btrfs_inode_safe_disk_i_size_write(inode
, control
.last_size
);
8418 unlock_extent(&inode
->io_tree
, lock_start
, (u64
)-1, &cached_state
);
8420 trans
->block_rsv
= &fs_info
->trans_block_rsv
;
8421 if (ret
!= -ENOSPC
&& ret
!= -EAGAIN
)
8424 ret
= btrfs_update_inode(trans
, root
, inode
);
8428 btrfs_end_transaction(trans
);
8429 btrfs_btree_balance_dirty(fs_info
);
8431 trans
= btrfs_start_transaction(root
, 2);
8432 if (IS_ERR(trans
)) {
8433 ret
= PTR_ERR(trans
);
8438 btrfs_block_rsv_release(fs_info
, rsv
, -1, NULL
);
8439 ret
= btrfs_block_rsv_migrate(&fs_info
->trans_block_rsv
,
8440 rsv
, min_size
, false);
8441 BUG_ON(ret
); /* shouldn't happen */
8442 trans
->block_rsv
= rsv
;
8446 * We can't call btrfs_truncate_block inside a trans handle as we could
8447 * deadlock with freeze, if we got BTRFS_NEED_TRUNCATE_BLOCK then we
8448 * know we've truncated everything except the last little bit, and can
8449 * do btrfs_truncate_block and then update the disk_i_size.
8451 if (ret
== BTRFS_NEED_TRUNCATE_BLOCK
) {
8452 btrfs_end_transaction(trans
);
8453 btrfs_btree_balance_dirty(fs_info
);
8455 ret
= btrfs_truncate_block(inode
, inode
->vfs_inode
.i_size
, 0, 0);
8458 trans
= btrfs_start_transaction(root
, 1);
8459 if (IS_ERR(trans
)) {
8460 ret
= PTR_ERR(trans
);
8463 btrfs_inode_safe_disk_i_size_write(inode
, 0);
8469 trans
->block_rsv
= &fs_info
->trans_block_rsv
;
8470 ret2
= btrfs_update_inode(trans
, root
, inode
);
8474 ret2
= btrfs_end_transaction(trans
);
8477 btrfs_btree_balance_dirty(fs_info
);
8480 btrfs_free_block_rsv(fs_info
, rsv
);
8482 * So if we truncate and then write and fsync we normally would just
8483 * write the extents that changed, which is a problem if we need to
8484 * first truncate that entire inode. So set this flag so we write out
8485 * all of the extents in the inode to the sync log so we're completely
8488 * If no extents were dropped or trimmed we don't need to force the next
8489 * fsync to truncate all the inode's items from the log and re-log them
8490 * all. This means the truncate operation did not change the file size,
8491 * or changed it to a smaller size but there was only an implicit hole
8492 * between the old i_size and the new i_size, and there were no prealloc
8493 * extents beyond i_size to drop.
8495 if (control
.extents_found
> 0)
8496 btrfs_set_inode_full_sync(inode
);
8501 struct inode
*btrfs_new_subvol_inode(struct mnt_idmap
*idmap
,
8504 struct inode
*inode
;
8506 inode
= new_inode(dir
->i_sb
);
8509 * Subvolumes don't inherit the sgid bit or the parent's gid if
8510 * the parent's sgid bit is set. This is probably a bug.
8512 inode_init_owner(idmap
, inode
, NULL
,
8513 S_IFDIR
| (~current_umask() & S_IRWXUGO
));
8514 inode
->i_op
= &btrfs_dir_inode_operations
;
8515 inode
->i_fop
= &btrfs_dir_file_operations
;
8520 struct inode
*btrfs_alloc_inode(struct super_block
*sb
)
8522 struct btrfs_fs_info
*fs_info
= btrfs_sb(sb
);
8523 struct btrfs_inode
*ei
;
8524 struct inode
*inode
;
8526 ei
= alloc_inode_sb(sb
, btrfs_inode_cachep
, GFP_KERNEL
);
8533 ei
->last_sub_trans
= 0;
8534 ei
->logged_trans
= 0;
8535 ei
->delalloc_bytes
= 0;
8536 ei
->new_delalloc_bytes
= 0;
8537 ei
->defrag_bytes
= 0;
8538 ei
->disk_i_size
= 0;
8542 ei
->index_cnt
= (u64
)-1;
8544 ei
->last_unlink_trans
= 0;
8545 ei
->last_reflink_trans
= 0;
8546 ei
->last_log_commit
= 0;
8548 spin_lock_init(&ei
->lock
);
8549 ei
->outstanding_extents
= 0;
8550 if (sb
->s_magic
!= BTRFS_TEST_MAGIC
)
8551 btrfs_init_metadata_block_rsv(fs_info
, &ei
->block_rsv
,
8552 BTRFS_BLOCK_RSV_DELALLOC
);
8553 ei
->runtime_flags
= 0;
8554 ei
->prop_compress
= BTRFS_COMPRESS_NONE
;
8555 ei
->defrag_compress
= BTRFS_COMPRESS_NONE
;
8557 ei
->delayed_node
= NULL
;
8559 ei
->i_otime
.tv_sec
= 0;
8560 ei
->i_otime
.tv_nsec
= 0;
8562 inode
= &ei
->vfs_inode
;
8563 extent_map_tree_init(&ei
->extent_tree
);
8564 extent_io_tree_init(fs_info
, &ei
->io_tree
, IO_TREE_INODE_IO
);
8565 ei
->io_tree
.inode
= ei
;
8566 extent_io_tree_init(fs_info
, &ei
->file_extent_tree
,
8567 IO_TREE_INODE_FILE_EXTENT
);
8568 mutex_init(&ei
->log_mutex
);
8569 btrfs_ordered_inode_tree_init(&ei
->ordered_tree
);
8570 INIT_LIST_HEAD(&ei
->delalloc_inodes
);
8571 INIT_LIST_HEAD(&ei
->delayed_iput
);
8572 RB_CLEAR_NODE(&ei
->rb_node
);
8573 init_rwsem(&ei
->i_mmap_lock
);
8578 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
8579 void btrfs_test_destroy_inode(struct inode
*inode
)
8581 btrfs_drop_extent_map_range(BTRFS_I(inode
), 0, (u64
)-1, false);
8582 kmem_cache_free(btrfs_inode_cachep
, BTRFS_I(inode
));
8586 void btrfs_free_inode(struct inode
*inode
)
8588 kmem_cache_free(btrfs_inode_cachep
, BTRFS_I(inode
));
8591 void btrfs_destroy_inode(struct inode
*vfs_inode
)
8593 struct btrfs_ordered_extent
*ordered
;
8594 struct btrfs_inode
*inode
= BTRFS_I(vfs_inode
);
8595 struct btrfs_root
*root
= inode
->root
;
8596 bool freespace_inode
;
8598 WARN_ON(!hlist_empty(&vfs_inode
->i_dentry
));
8599 WARN_ON(vfs_inode
->i_data
.nrpages
);
8600 WARN_ON(inode
->block_rsv
.reserved
);
8601 WARN_ON(inode
->block_rsv
.size
);
8602 WARN_ON(inode
->outstanding_extents
);
8603 if (!S_ISDIR(vfs_inode
->i_mode
)) {
8604 WARN_ON(inode
->delalloc_bytes
);
8605 WARN_ON(inode
->new_delalloc_bytes
);
8607 WARN_ON(inode
->csum_bytes
);
8608 WARN_ON(inode
->defrag_bytes
);
8611 * This can happen where we create an inode, but somebody else also
8612 * created the same inode and we need to destroy the one we already
8619 * If this is a free space inode do not take the ordered extents lockdep
8622 freespace_inode
= btrfs_is_free_space_inode(inode
);
8625 ordered
= btrfs_lookup_first_ordered_extent(inode
, (u64
)-1);
8629 btrfs_err(root
->fs_info
,
8630 "found ordered extent %llu %llu on inode cleanup",
8631 ordered
->file_offset
, ordered
->num_bytes
);
8633 if (!freespace_inode
)
8634 btrfs_lockdep_acquire(root
->fs_info
, btrfs_ordered_extent
);
8636 btrfs_remove_ordered_extent(inode
, ordered
);
8637 btrfs_put_ordered_extent(ordered
);
8638 btrfs_put_ordered_extent(ordered
);
8641 btrfs_qgroup_check_reserved_leak(inode
);
8642 inode_tree_del(inode
);
8643 btrfs_drop_extent_map_range(inode
, 0, (u64
)-1, false);
8644 btrfs_inode_clear_file_extent_range(inode
, 0, (u64
)-1);
8645 btrfs_put_root(inode
->root
);
8648 int btrfs_drop_inode(struct inode
*inode
)
8650 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
8655 /* the snap/subvol tree is on deleting */
8656 if (btrfs_root_refs(&root
->root_item
) == 0)
8659 return generic_drop_inode(inode
);
8662 static void init_once(void *foo
)
8664 struct btrfs_inode
*ei
= foo
;
8666 inode_init_once(&ei
->vfs_inode
);
8669 void __cold
btrfs_destroy_cachep(void)
8672 * Make sure all delayed rcu free inodes are flushed before we
8676 bioset_exit(&btrfs_dio_bioset
);
8677 kmem_cache_destroy(btrfs_inode_cachep
);
8680 int __init
btrfs_init_cachep(void)
8682 btrfs_inode_cachep
= kmem_cache_create("btrfs_inode",
8683 sizeof(struct btrfs_inode
), 0,
8684 SLAB_RECLAIM_ACCOUNT
| SLAB_MEM_SPREAD
| SLAB_ACCOUNT
,
8686 if (!btrfs_inode_cachep
)
8689 if (bioset_init(&btrfs_dio_bioset
, BIO_POOL_SIZE
,
8690 offsetof(struct btrfs_dio_private
, bbio
.bio
),
8696 btrfs_destroy_cachep();
8700 static int btrfs_getattr(struct mnt_idmap
*idmap
,
8701 const struct path
*path
, struct kstat
*stat
,
8702 u32 request_mask
, unsigned int flags
)
8706 struct inode
*inode
= d_inode(path
->dentry
);
8707 u32 blocksize
= inode
->i_sb
->s_blocksize
;
8708 u32 bi_flags
= BTRFS_I(inode
)->flags
;
8709 u32 bi_ro_flags
= BTRFS_I(inode
)->ro_flags
;
8711 stat
->result_mask
|= STATX_BTIME
;
8712 stat
->btime
.tv_sec
= BTRFS_I(inode
)->i_otime
.tv_sec
;
8713 stat
->btime
.tv_nsec
= BTRFS_I(inode
)->i_otime
.tv_nsec
;
8714 if (bi_flags
& BTRFS_INODE_APPEND
)
8715 stat
->attributes
|= STATX_ATTR_APPEND
;
8716 if (bi_flags
& BTRFS_INODE_COMPRESS
)
8717 stat
->attributes
|= STATX_ATTR_COMPRESSED
;
8718 if (bi_flags
& BTRFS_INODE_IMMUTABLE
)
8719 stat
->attributes
|= STATX_ATTR_IMMUTABLE
;
8720 if (bi_flags
& BTRFS_INODE_NODUMP
)
8721 stat
->attributes
|= STATX_ATTR_NODUMP
;
8722 if (bi_ro_flags
& BTRFS_INODE_RO_VERITY
)
8723 stat
->attributes
|= STATX_ATTR_VERITY
;
8725 stat
->attributes_mask
|= (STATX_ATTR_APPEND
|
8726 STATX_ATTR_COMPRESSED
|
8727 STATX_ATTR_IMMUTABLE
|
8730 generic_fillattr(idmap
, inode
, stat
);
8731 stat
->dev
= BTRFS_I(inode
)->root
->anon_dev
;
8733 spin_lock(&BTRFS_I(inode
)->lock
);
8734 delalloc_bytes
= BTRFS_I(inode
)->new_delalloc_bytes
;
8735 inode_bytes
= inode_get_bytes(inode
);
8736 spin_unlock(&BTRFS_I(inode
)->lock
);
8737 stat
->blocks
= (ALIGN(inode_bytes
, blocksize
) +
8738 ALIGN(delalloc_bytes
, blocksize
)) >> SECTOR_SHIFT
;
8742 static int btrfs_rename_exchange(struct inode
*old_dir
,
8743 struct dentry
*old_dentry
,
8744 struct inode
*new_dir
,
8745 struct dentry
*new_dentry
)
8747 struct btrfs_fs_info
*fs_info
= btrfs_sb(old_dir
->i_sb
);
8748 struct btrfs_trans_handle
*trans
;
8749 unsigned int trans_num_items
;
8750 struct btrfs_root
*root
= BTRFS_I(old_dir
)->root
;
8751 struct btrfs_root
*dest
= BTRFS_I(new_dir
)->root
;
8752 struct inode
*new_inode
= new_dentry
->d_inode
;
8753 struct inode
*old_inode
= old_dentry
->d_inode
;
8754 struct timespec64 ctime
= current_time(old_inode
);
8755 struct btrfs_rename_ctx old_rename_ctx
;
8756 struct btrfs_rename_ctx new_rename_ctx
;
8757 u64 old_ino
= btrfs_ino(BTRFS_I(old_inode
));
8758 u64 new_ino
= btrfs_ino(BTRFS_I(new_inode
));
8763 bool need_abort
= false;
8764 struct fscrypt_name old_fname
, new_fname
;
8765 struct fscrypt_str
*old_name
, *new_name
;
8768 * For non-subvolumes allow exchange only within one subvolume, in the
8769 * same inode namespace. Two subvolumes (represented as directory) can
8770 * be exchanged as they're a logical link and have a fixed inode number.
8773 (old_ino
!= BTRFS_FIRST_FREE_OBJECTID
||
8774 new_ino
!= BTRFS_FIRST_FREE_OBJECTID
))
8777 ret
= fscrypt_setup_filename(old_dir
, &old_dentry
->d_name
, 0, &old_fname
);
8781 ret
= fscrypt_setup_filename(new_dir
, &new_dentry
->d_name
, 0, &new_fname
);
8783 fscrypt_free_filename(&old_fname
);
8787 old_name
= &old_fname
.disk_name
;
8788 new_name
= &new_fname
.disk_name
;
8790 /* close the race window with snapshot create/destroy ioctl */
8791 if (old_ino
== BTRFS_FIRST_FREE_OBJECTID
||
8792 new_ino
== BTRFS_FIRST_FREE_OBJECTID
)
8793 down_read(&fs_info
->subvol_sem
);
8797 * 1 to remove old dir item
8798 * 1 to remove old dir index
8799 * 1 to add new dir item
8800 * 1 to add new dir index
8801 * 1 to update parent inode
8803 * If the parents are the same, we only need to account for one
8805 trans_num_items
= (old_dir
== new_dir
? 9 : 10);
8806 if (old_ino
== BTRFS_FIRST_FREE_OBJECTID
) {
8808 * 1 to remove old root ref
8809 * 1 to remove old root backref
8810 * 1 to add new root ref
8811 * 1 to add new root backref
8813 trans_num_items
+= 4;
8816 * 1 to update inode item
8817 * 1 to remove old inode ref
8818 * 1 to add new inode ref
8820 trans_num_items
+= 3;
8822 if (new_ino
== BTRFS_FIRST_FREE_OBJECTID
)
8823 trans_num_items
+= 4;
8825 trans_num_items
+= 3;
8826 trans
= btrfs_start_transaction(root
, trans_num_items
);
8827 if (IS_ERR(trans
)) {
8828 ret
= PTR_ERR(trans
);
8833 ret
= btrfs_record_root_in_trans(trans
, dest
);
8839 * We need to find a free sequence number both in the source and
8840 * in the destination directory for the exchange.
8842 ret
= btrfs_set_inode_index(BTRFS_I(new_dir
), &old_idx
);
8845 ret
= btrfs_set_inode_index(BTRFS_I(old_dir
), &new_idx
);
8849 BTRFS_I(old_inode
)->dir_index
= 0ULL;
8850 BTRFS_I(new_inode
)->dir_index
= 0ULL;
8852 /* Reference for the source. */
8853 if (old_ino
== BTRFS_FIRST_FREE_OBJECTID
) {
8854 /* force full log commit if subvolume involved. */
8855 btrfs_set_log_full_commit(trans
);
8857 ret
= btrfs_insert_inode_ref(trans
, dest
, new_name
, old_ino
,
8858 btrfs_ino(BTRFS_I(new_dir
)),
8865 /* And now for the dest. */
8866 if (new_ino
== BTRFS_FIRST_FREE_OBJECTID
) {
8867 /* force full log commit if subvolume involved. */
8868 btrfs_set_log_full_commit(trans
);
8870 ret
= btrfs_insert_inode_ref(trans
, root
, old_name
, new_ino
,
8871 btrfs_ino(BTRFS_I(old_dir
)),
8875 btrfs_abort_transaction(trans
, ret
);
8880 /* Update inode version and ctime/mtime. */
8881 inode_inc_iversion(old_dir
);
8882 inode_inc_iversion(new_dir
);
8883 inode_inc_iversion(old_inode
);
8884 inode_inc_iversion(new_inode
);
8885 old_dir
->i_mtime
= ctime
;
8886 old_dir
->i_ctime
= ctime
;
8887 new_dir
->i_mtime
= ctime
;
8888 new_dir
->i_ctime
= ctime
;
8889 old_inode
->i_ctime
= ctime
;
8890 new_inode
->i_ctime
= ctime
;
8892 if (old_dentry
->d_parent
!= new_dentry
->d_parent
) {
8893 btrfs_record_unlink_dir(trans
, BTRFS_I(old_dir
),
8894 BTRFS_I(old_inode
), true);
8895 btrfs_record_unlink_dir(trans
, BTRFS_I(new_dir
),
8896 BTRFS_I(new_inode
), true);
8899 /* src is a subvolume */
8900 if (old_ino
== BTRFS_FIRST_FREE_OBJECTID
) {
8901 ret
= btrfs_unlink_subvol(trans
, BTRFS_I(old_dir
), old_dentry
);
8902 } else { /* src is an inode */
8903 ret
= __btrfs_unlink_inode(trans
, BTRFS_I(old_dir
),
8904 BTRFS_I(old_dentry
->d_inode
),
8905 old_name
, &old_rename_ctx
);
8907 ret
= btrfs_update_inode(trans
, root
, BTRFS_I(old_inode
));
8910 btrfs_abort_transaction(trans
, ret
);
8914 /* dest is a subvolume */
8915 if (new_ino
== BTRFS_FIRST_FREE_OBJECTID
) {
8916 ret
= btrfs_unlink_subvol(trans
, BTRFS_I(new_dir
), new_dentry
);
8917 } else { /* dest is an inode */
8918 ret
= __btrfs_unlink_inode(trans
, BTRFS_I(new_dir
),
8919 BTRFS_I(new_dentry
->d_inode
),
8920 new_name
, &new_rename_ctx
);
8922 ret
= btrfs_update_inode(trans
, dest
, BTRFS_I(new_inode
));
8925 btrfs_abort_transaction(trans
, ret
);
8929 ret
= btrfs_add_link(trans
, BTRFS_I(new_dir
), BTRFS_I(old_inode
),
8930 new_name
, 0, old_idx
);
8932 btrfs_abort_transaction(trans
, ret
);
8936 ret
= btrfs_add_link(trans
, BTRFS_I(old_dir
), BTRFS_I(new_inode
),
8937 old_name
, 0, new_idx
);
8939 btrfs_abort_transaction(trans
, ret
);
8943 if (old_inode
->i_nlink
== 1)
8944 BTRFS_I(old_inode
)->dir_index
= old_idx
;
8945 if (new_inode
->i_nlink
== 1)
8946 BTRFS_I(new_inode
)->dir_index
= new_idx
;
8949 * Now pin the logs of the roots. We do it to ensure that no other task
8950 * can sync the logs while we are in progress with the rename, because
8951 * that could result in an inconsistency in case any of the inodes that
8952 * are part of this rename operation were logged before.
8954 if (old_ino
!= BTRFS_FIRST_FREE_OBJECTID
)
8955 btrfs_pin_log_trans(root
);
8956 if (new_ino
!= BTRFS_FIRST_FREE_OBJECTID
)
8957 btrfs_pin_log_trans(dest
);
8959 /* Do the log updates for all inodes. */
8960 if (old_ino
!= BTRFS_FIRST_FREE_OBJECTID
)
8961 btrfs_log_new_name(trans
, old_dentry
, BTRFS_I(old_dir
),
8962 old_rename_ctx
.index
, new_dentry
->d_parent
);
8963 if (new_ino
!= BTRFS_FIRST_FREE_OBJECTID
)
8964 btrfs_log_new_name(trans
, new_dentry
, BTRFS_I(new_dir
),
8965 new_rename_ctx
.index
, old_dentry
->d_parent
);
8967 /* Now unpin the logs. */
8968 if (old_ino
!= BTRFS_FIRST_FREE_OBJECTID
)
8969 btrfs_end_log_trans(root
);
8970 if (new_ino
!= BTRFS_FIRST_FREE_OBJECTID
)
8971 btrfs_end_log_trans(dest
);
8973 ret2
= btrfs_end_transaction(trans
);
8974 ret
= ret
? ret
: ret2
;
8976 if (new_ino
== BTRFS_FIRST_FREE_OBJECTID
||
8977 old_ino
== BTRFS_FIRST_FREE_OBJECTID
)
8978 up_read(&fs_info
->subvol_sem
);
8980 fscrypt_free_filename(&new_fname
);
8981 fscrypt_free_filename(&old_fname
);
8985 static struct inode
*new_whiteout_inode(struct mnt_idmap
*idmap
,
8988 struct inode
*inode
;
8990 inode
= new_inode(dir
->i_sb
);
8992 inode_init_owner(idmap
, inode
, dir
,
8993 S_IFCHR
| WHITEOUT_MODE
);
8994 inode
->i_op
= &btrfs_special_inode_operations
;
8995 init_special_inode(inode
, inode
->i_mode
, WHITEOUT_DEV
);
9000 static int btrfs_rename(struct mnt_idmap
*idmap
,
9001 struct inode
*old_dir
, struct dentry
*old_dentry
,
9002 struct inode
*new_dir
, struct dentry
*new_dentry
,
9005 struct btrfs_fs_info
*fs_info
= btrfs_sb(old_dir
->i_sb
);
9006 struct btrfs_new_inode_args whiteout_args
= {
9008 .dentry
= old_dentry
,
9010 struct btrfs_trans_handle
*trans
;
9011 unsigned int trans_num_items
;
9012 struct btrfs_root
*root
= BTRFS_I(old_dir
)->root
;
9013 struct btrfs_root
*dest
= BTRFS_I(new_dir
)->root
;
9014 struct inode
*new_inode
= d_inode(new_dentry
);
9015 struct inode
*old_inode
= d_inode(old_dentry
);
9016 struct btrfs_rename_ctx rename_ctx
;
9020 u64 old_ino
= btrfs_ino(BTRFS_I(old_inode
));
9021 struct fscrypt_name old_fname
, new_fname
;
9023 if (btrfs_ino(BTRFS_I(new_dir
)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID
)
9026 /* we only allow rename subvolume link between subvolumes */
9027 if (old_ino
!= BTRFS_FIRST_FREE_OBJECTID
&& root
!= dest
)
9030 if (old_ino
== BTRFS_EMPTY_SUBVOL_DIR_OBJECTID
||
9031 (new_inode
&& btrfs_ino(BTRFS_I(new_inode
)) == BTRFS_FIRST_FREE_OBJECTID
))
9034 if (S_ISDIR(old_inode
->i_mode
) && new_inode
&&
9035 new_inode
->i_size
> BTRFS_EMPTY_DIR_SIZE
)
9038 ret
= fscrypt_setup_filename(old_dir
, &old_dentry
->d_name
, 0, &old_fname
);
9042 ret
= fscrypt_setup_filename(new_dir
, &new_dentry
->d_name
, 0, &new_fname
);
9044 fscrypt_free_filename(&old_fname
);
9048 /* check for collisions, even if the name isn't there */
9049 ret
= btrfs_check_dir_item_collision(dest
, new_dir
->i_ino
, &new_fname
.disk_name
);
9051 if (ret
== -EEXIST
) {
9053 * eexist without a new_inode */
9054 if (WARN_ON(!new_inode
)) {
9055 goto out_fscrypt_names
;
9058 /* maybe -EOVERFLOW */
9059 goto out_fscrypt_names
;
9065 * we're using rename to replace one file with another. Start IO on it
9066 * now so we don't add too much work to the end of the transaction
9068 if (new_inode
&& S_ISREG(old_inode
->i_mode
) && new_inode
->i_size
)
9069 filemap_flush(old_inode
->i_mapping
);
9071 if (flags
& RENAME_WHITEOUT
) {
9072 whiteout_args
.inode
= new_whiteout_inode(idmap
, old_dir
);
9073 if (!whiteout_args
.inode
) {
9075 goto out_fscrypt_names
;
9077 ret
= btrfs_new_inode_prepare(&whiteout_args
, &trans_num_items
);
9079 goto out_whiteout_inode
;
9081 /* 1 to update the old parent inode. */
9082 trans_num_items
= 1;
9085 if (old_ino
== BTRFS_FIRST_FREE_OBJECTID
) {
9086 /* Close the race window with snapshot create/destroy ioctl */
9087 down_read(&fs_info
->subvol_sem
);
9089 * 1 to remove old root ref
9090 * 1 to remove old root backref
9091 * 1 to add new root ref
9092 * 1 to add new root backref
9094 trans_num_items
+= 4;
9098 * 1 to remove old inode ref
9099 * 1 to add new inode ref
9101 trans_num_items
+= 3;
9104 * 1 to remove old dir item
9105 * 1 to remove old dir index
9106 * 1 to add new dir item
9107 * 1 to add new dir index
9109 trans_num_items
+= 4;
9110 /* 1 to update new parent inode if it's not the same as the old parent */
9111 if (new_dir
!= old_dir
)
9116 * 1 to remove inode ref
9117 * 1 to remove dir item
9118 * 1 to remove dir index
9119 * 1 to possibly add orphan item
9121 trans_num_items
+= 5;
9123 trans
= btrfs_start_transaction(root
, trans_num_items
);
9124 if (IS_ERR(trans
)) {
9125 ret
= PTR_ERR(trans
);
9130 ret
= btrfs_record_root_in_trans(trans
, dest
);
9135 ret
= btrfs_set_inode_index(BTRFS_I(new_dir
), &index
);
9139 BTRFS_I(old_inode
)->dir_index
= 0ULL;
9140 if (unlikely(old_ino
== BTRFS_FIRST_FREE_OBJECTID
)) {
9141 /* force full log commit if subvolume involved. */
9142 btrfs_set_log_full_commit(trans
);
9144 ret
= btrfs_insert_inode_ref(trans
, dest
, &new_fname
.disk_name
,
9145 old_ino
, btrfs_ino(BTRFS_I(new_dir
)),
9151 inode_inc_iversion(old_dir
);
9152 inode_inc_iversion(new_dir
);
9153 inode_inc_iversion(old_inode
);
9154 old_dir
->i_mtime
= current_time(old_dir
);
9155 old_dir
->i_ctime
= old_dir
->i_mtime
;
9156 new_dir
->i_mtime
= old_dir
->i_mtime
;
9157 new_dir
->i_ctime
= old_dir
->i_mtime
;
9158 old_inode
->i_ctime
= old_dir
->i_mtime
;
9160 if (old_dentry
->d_parent
!= new_dentry
->d_parent
)
9161 btrfs_record_unlink_dir(trans
, BTRFS_I(old_dir
),
9162 BTRFS_I(old_inode
), true);
9164 if (unlikely(old_ino
== BTRFS_FIRST_FREE_OBJECTID
)) {
9165 ret
= btrfs_unlink_subvol(trans
, BTRFS_I(old_dir
), old_dentry
);
9167 ret
= __btrfs_unlink_inode(trans
, BTRFS_I(old_dir
),
9168 BTRFS_I(d_inode(old_dentry
)),
9169 &old_fname
.disk_name
, &rename_ctx
);
9171 ret
= btrfs_update_inode(trans
, root
, BTRFS_I(old_inode
));
9174 btrfs_abort_transaction(trans
, ret
);
9179 inode_inc_iversion(new_inode
);
9180 new_inode
->i_ctime
= current_time(new_inode
);
9181 if (unlikely(btrfs_ino(BTRFS_I(new_inode
)) ==
9182 BTRFS_EMPTY_SUBVOL_DIR_OBJECTID
)) {
9183 ret
= btrfs_unlink_subvol(trans
, BTRFS_I(new_dir
), new_dentry
);
9184 BUG_ON(new_inode
->i_nlink
== 0);
9186 ret
= btrfs_unlink_inode(trans
, BTRFS_I(new_dir
),
9187 BTRFS_I(d_inode(new_dentry
)),
9188 &new_fname
.disk_name
);
9190 if (!ret
&& new_inode
->i_nlink
== 0)
9191 ret
= btrfs_orphan_add(trans
,
9192 BTRFS_I(d_inode(new_dentry
)));
9194 btrfs_abort_transaction(trans
, ret
);
9199 ret
= btrfs_add_link(trans
, BTRFS_I(new_dir
), BTRFS_I(old_inode
),
9200 &new_fname
.disk_name
, 0, index
);
9202 btrfs_abort_transaction(trans
, ret
);
9206 if (old_inode
->i_nlink
== 1)
9207 BTRFS_I(old_inode
)->dir_index
= index
;
9209 if (old_ino
!= BTRFS_FIRST_FREE_OBJECTID
)
9210 btrfs_log_new_name(trans
, old_dentry
, BTRFS_I(old_dir
),
9211 rename_ctx
.index
, new_dentry
->d_parent
);
9213 if (flags
& RENAME_WHITEOUT
) {
9214 ret
= btrfs_create_new_inode(trans
, &whiteout_args
);
9216 btrfs_abort_transaction(trans
, ret
);
9219 unlock_new_inode(whiteout_args
.inode
);
9220 iput(whiteout_args
.inode
);
9221 whiteout_args
.inode
= NULL
;
9225 ret2
= btrfs_end_transaction(trans
);
9226 ret
= ret
? ret
: ret2
;
9228 if (old_ino
== BTRFS_FIRST_FREE_OBJECTID
)
9229 up_read(&fs_info
->subvol_sem
);
9230 if (flags
& RENAME_WHITEOUT
)
9231 btrfs_new_inode_args_destroy(&whiteout_args
);
9233 if (flags
& RENAME_WHITEOUT
)
9234 iput(whiteout_args
.inode
);
9236 fscrypt_free_filename(&old_fname
);
9237 fscrypt_free_filename(&new_fname
);
9241 static int btrfs_rename2(struct mnt_idmap
*idmap
, struct inode
*old_dir
,
9242 struct dentry
*old_dentry
, struct inode
*new_dir
,
9243 struct dentry
*new_dentry
, unsigned int flags
)
9247 if (flags
& ~(RENAME_NOREPLACE
| RENAME_EXCHANGE
| RENAME_WHITEOUT
))
9250 if (flags
& RENAME_EXCHANGE
)
9251 ret
= btrfs_rename_exchange(old_dir
, old_dentry
, new_dir
,
9254 ret
= btrfs_rename(idmap
, old_dir
, old_dentry
, new_dir
,
9257 btrfs_btree_balance_dirty(BTRFS_I(new_dir
)->root
->fs_info
);
9262 struct btrfs_delalloc_work
{
9263 struct inode
*inode
;
9264 struct completion completion
;
9265 struct list_head list
;
9266 struct btrfs_work work
;
9269 static void btrfs_run_delalloc_work(struct btrfs_work
*work
)
9271 struct btrfs_delalloc_work
*delalloc_work
;
9272 struct inode
*inode
;
9274 delalloc_work
= container_of(work
, struct btrfs_delalloc_work
,
9276 inode
= delalloc_work
->inode
;
9277 filemap_flush(inode
->i_mapping
);
9278 if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT
,
9279 &BTRFS_I(inode
)->runtime_flags
))
9280 filemap_flush(inode
->i_mapping
);
9283 complete(&delalloc_work
->completion
);
9286 static struct btrfs_delalloc_work
*btrfs_alloc_delalloc_work(struct inode
*inode
)
9288 struct btrfs_delalloc_work
*work
;
9290 work
= kmalloc(sizeof(*work
), GFP_NOFS
);
9294 init_completion(&work
->completion
);
9295 INIT_LIST_HEAD(&work
->list
);
9296 work
->inode
= inode
;
9297 btrfs_init_work(&work
->work
, btrfs_run_delalloc_work
, NULL
, NULL
);
9303 * some fairly slow code that needs optimization. This walks the list
9304 * of all the inodes with pending delalloc and forces them to disk.
9306 static int start_delalloc_inodes(struct btrfs_root
*root
,
9307 struct writeback_control
*wbc
, bool snapshot
,
9308 bool in_reclaim_context
)
9310 struct btrfs_inode
*binode
;
9311 struct inode
*inode
;
9312 struct btrfs_delalloc_work
*work
, *next
;
9313 struct list_head works
;
9314 struct list_head splice
;
9316 bool full_flush
= wbc
->nr_to_write
== LONG_MAX
;
9318 INIT_LIST_HEAD(&works
);
9319 INIT_LIST_HEAD(&splice
);
9321 mutex_lock(&root
->delalloc_mutex
);
9322 spin_lock(&root
->delalloc_lock
);
9323 list_splice_init(&root
->delalloc_inodes
, &splice
);
9324 while (!list_empty(&splice
)) {
9325 binode
= list_entry(splice
.next
, struct btrfs_inode
,
9328 list_move_tail(&binode
->delalloc_inodes
,
9329 &root
->delalloc_inodes
);
9331 if (in_reclaim_context
&&
9332 test_bit(BTRFS_INODE_NO_DELALLOC_FLUSH
, &binode
->runtime_flags
))
9335 inode
= igrab(&binode
->vfs_inode
);
9337 cond_resched_lock(&root
->delalloc_lock
);
9340 spin_unlock(&root
->delalloc_lock
);
9343 set_bit(BTRFS_INODE_SNAPSHOT_FLUSH
,
9344 &binode
->runtime_flags
);
9346 work
= btrfs_alloc_delalloc_work(inode
);
9352 list_add_tail(&work
->list
, &works
);
9353 btrfs_queue_work(root
->fs_info
->flush_workers
,
9356 ret
= filemap_fdatawrite_wbc(inode
->i_mapping
, wbc
);
9357 btrfs_add_delayed_iput(BTRFS_I(inode
));
9358 if (ret
|| wbc
->nr_to_write
<= 0)
9362 spin_lock(&root
->delalloc_lock
);
9364 spin_unlock(&root
->delalloc_lock
);
9367 list_for_each_entry_safe(work
, next
, &works
, list
) {
9368 list_del_init(&work
->list
);
9369 wait_for_completion(&work
->completion
);
9373 if (!list_empty(&splice
)) {
9374 spin_lock(&root
->delalloc_lock
);
9375 list_splice_tail(&splice
, &root
->delalloc_inodes
);
9376 spin_unlock(&root
->delalloc_lock
);
9378 mutex_unlock(&root
->delalloc_mutex
);
9382 int btrfs_start_delalloc_snapshot(struct btrfs_root
*root
, bool in_reclaim_context
)
9384 struct writeback_control wbc
= {
9385 .nr_to_write
= LONG_MAX
,
9386 .sync_mode
= WB_SYNC_NONE
,
9388 .range_end
= LLONG_MAX
,
9390 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
9392 if (BTRFS_FS_ERROR(fs_info
))
9395 return start_delalloc_inodes(root
, &wbc
, true, in_reclaim_context
);
9398 int btrfs_start_delalloc_roots(struct btrfs_fs_info
*fs_info
, long nr
,
9399 bool in_reclaim_context
)
9401 struct writeback_control wbc
= {
9403 .sync_mode
= WB_SYNC_NONE
,
9405 .range_end
= LLONG_MAX
,
9407 struct btrfs_root
*root
;
9408 struct list_head splice
;
9411 if (BTRFS_FS_ERROR(fs_info
))
9414 INIT_LIST_HEAD(&splice
);
9416 mutex_lock(&fs_info
->delalloc_root_mutex
);
9417 spin_lock(&fs_info
->delalloc_root_lock
);
9418 list_splice_init(&fs_info
->delalloc_roots
, &splice
);
9419 while (!list_empty(&splice
)) {
9421 * Reset nr_to_write here so we know that we're doing a full
9425 wbc
.nr_to_write
= LONG_MAX
;
9427 root
= list_first_entry(&splice
, struct btrfs_root
,
9429 root
= btrfs_grab_root(root
);
9431 list_move_tail(&root
->delalloc_root
,
9432 &fs_info
->delalloc_roots
);
9433 spin_unlock(&fs_info
->delalloc_root_lock
);
9435 ret
= start_delalloc_inodes(root
, &wbc
, false, in_reclaim_context
);
9436 btrfs_put_root(root
);
9437 if (ret
< 0 || wbc
.nr_to_write
<= 0)
9439 spin_lock(&fs_info
->delalloc_root_lock
);
9441 spin_unlock(&fs_info
->delalloc_root_lock
);
9445 if (!list_empty(&splice
)) {
9446 spin_lock(&fs_info
->delalloc_root_lock
);
9447 list_splice_tail(&splice
, &fs_info
->delalloc_roots
);
9448 spin_unlock(&fs_info
->delalloc_root_lock
);
9450 mutex_unlock(&fs_info
->delalloc_root_mutex
);
9454 static int btrfs_symlink(struct mnt_idmap
*idmap
, struct inode
*dir
,
9455 struct dentry
*dentry
, const char *symname
)
9457 struct btrfs_fs_info
*fs_info
= btrfs_sb(dir
->i_sb
);
9458 struct btrfs_trans_handle
*trans
;
9459 struct btrfs_root
*root
= BTRFS_I(dir
)->root
;
9460 struct btrfs_path
*path
;
9461 struct btrfs_key key
;
9462 struct inode
*inode
;
9463 struct btrfs_new_inode_args new_inode_args
= {
9467 unsigned int trans_num_items
;
9472 struct btrfs_file_extent_item
*ei
;
9473 struct extent_buffer
*leaf
;
9475 name_len
= strlen(symname
);
9476 if (name_len
> BTRFS_MAX_INLINE_DATA_SIZE(fs_info
))
9477 return -ENAMETOOLONG
;
9479 inode
= new_inode(dir
->i_sb
);
9482 inode_init_owner(idmap
, inode
, dir
, S_IFLNK
| S_IRWXUGO
);
9483 inode
->i_op
= &btrfs_symlink_inode_operations
;
9484 inode_nohighmem(inode
);
9485 inode
->i_mapping
->a_ops
= &btrfs_aops
;
9486 btrfs_i_size_write(BTRFS_I(inode
), name_len
);
9487 inode_set_bytes(inode
, name_len
);
9489 new_inode_args
.inode
= inode
;
9490 err
= btrfs_new_inode_prepare(&new_inode_args
, &trans_num_items
);
9493 /* 1 additional item for the inline extent */
9496 trans
= btrfs_start_transaction(root
, trans_num_items
);
9497 if (IS_ERR(trans
)) {
9498 err
= PTR_ERR(trans
);
9499 goto out_new_inode_args
;
9502 err
= btrfs_create_new_inode(trans
, &new_inode_args
);
9506 path
= btrfs_alloc_path();
9509 btrfs_abort_transaction(trans
, err
);
9510 discard_new_inode(inode
);
9514 key
.objectid
= btrfs_ino(BTRFS_I(inode
));
9516 key
.type
= BTRFS_EXTENT_DATA_KEY
;
9517 datasize
= btrfs_file_extent_calc_inline_size(name_len
);
9518 err
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
9521 btrfs_abort_transaction(trans
, err
);
9522 btrfs_free_path(path
);
9523 discard_new_inode(inode
);
9527 leaf
= path
->nodes
[0];
9528 ei
= btrfs_item_ptr(leaf
, path
->slots
[0],
9529 struct btrfs_file_extent_item
);
9530 btrfs_set_file_extent_generation(leaf
, ei
, trans
->transid
);
9531 btrfs_set_file_extent_type(leaf
, ei
,
9532 BTRFS_FILE_EXTENT_INLINE
);
9533 btrfs_set_file_extent_encryption(leaf
, ei
, 0);
9534 btrfs_set_file_extent_compression(leaf
, ei
, 0);
9535 btrfs_set_file_extent_other_encoding(leaf
, ei
, 0);
9536 btrfs_set_file_extent_ram_bytes(leaf
, ei
, name_len
);
9538 ptr
= btrfs_file_extent_inline_start(ei
);
9539 write_extent_buffer(leaf
, symname
, ptr
, name_len
);
9540 btrfs_mark_buffer_dirty(leaf
);
9541 btrfs_free_path(path
);
9543 d_instantiate_new(dentry
, inode
);
9546 btrfs_end_transaction(trans
);
9547 btrfs_btree_balance_dirty(fs_info
);
9549 btrfs_new_inode_args_destroy(&new_inode_args
);
9556 static struct btrfs_trans_handle
*insert_prealloc_file_extent(
9557 struct btrfs_trans_handle
*trans_in
,
9558 struct btrfs_inode
*inode
,
9559 struct btrfs_key
*ins
,
9562 struct btrfs_file_extent_item stack_fi
;
9563 struct btrfs_replace_extent_info extent_info
;
9564 struct btrfs_trans_handle
*trans
= trans_in
;
9565 struct btrfs_path
*path
;
9566 u64 start
= ins
->objectid
;
9567 u64 len
= ins
->offset
;
9568 int qgroup_released
;
9571 memset(&stack_fi
, 0, sizeof(stack_fi
));
9573 btrfs_set_stack_file_extent_type(&stack_fi
, BTRFS_FILE_EXTENT_PREALLOC
);
9574 btrfs_set_stack_file_extent_disk_bytenr(&stack_fi
, start
);
9575 btrfs_set_stack_file_extent_disk_num_bytes(&stack_fi
, len
);
9576 btrfs_set_stack_file_extent_num_bytes(&stack_fi
, len
);
9577 btrfs_set_stack_file_extent_ram_bytes(&stack_fi
, len
);
9578 btrfs_set_stack_file_extent_compression(&stack_fi
, BTRFS_COMPRESS_NONE
);
9579 /* Encryption and other encoding is reserved and all 0 */
9581 qgroup_released
= btrfs_qgroup_release_data(inode
, file_offset
, len
);
9582 if (qgroup_released
< 0)
9583 return ERR_PTR(qgroup_released
);
9586 ret
= insert_reserved_file_extent(trans
, inode
,
9587 file_offset
, &stack_fi
,
9588 true, qgroup_released
);
9594 extent_info
.disk_offset
= start
;
9595 extent_info
.disk_len
= len
;
9596 extent_info
.data_offset
= 0;
9597 extent_info
.data_len
= len
;
9598 extent_info
.file_offset
= file_offset
;
9599 extent_info
.extent_buf
= (char *)&stack_fi
;
9600 extent_info
.is_new_extent
= true;
9601 extent_info
.update_times
= true;
9602 extent_info
.qgroup_reserved
= qgroup_released
;
9603 extent_info
.insertions
= 0;
9605 path
= btrfs_alloc_path();
9611 ret
= btrfs_replace_file_extents(inode
, path
, file_offset
,
9612 file_offset
+ len
- 1, &extent_info
,
9614 btrfs_free_path(path
);
9621 * We have released qgroup data range at the beginning of the function,
9622 * and normally qgroup_released bytes will be freed when committing
9624 * But if we error out early, we have to free what we have released
9625 * or we leak qgroup data reservation.
9627 btrfs_qgroup_free_refroot(inode
->root
->fs_info
,
9628 inode
->root
->root_key
.objectid
, qgroup_released
,
9629 BTRFS_QGROUP_RSV_DATA
);
9630 return ERR_PTR(ret
);
9633 static int __btrfs_prealloc_file_range(struct inode
*inode
, int mode
,
9634 u64 start
, u64 num_bytes
, u64 min_size
,
9635 loff_t actual_len
, u64
*alloc_hint
,
9636 struct btrfs_trans_handle
*trans
)
9638 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
9639 struct extent_map
*em
;
9640 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
9641 struct btrfs_key ins
;
9642 u64 cur_offset
= start
;
9643 u64 clear_offset
= start
;
9646 u64 last_alloc
= (u64
)-1;
9648 bool own_trans
= true;
9649 u64 end
= start
+ num_bytes
- 1;
9653 while (num_bytes
> 0) {
9654 cur_bytes
= min_t(u64
, num_bytes
, SZ_256M
);
9655 cur_bytes
= max(cur_bytes
, min_size
);
9657 * If we are severely fragmented we could end up with really
9658 * small allocations, so if the allocator is returning small
9659 * chunks lets make its job easier by only searching for those
9662 cur_bytes
= min(cur_bytes
, last_alloc
);
9663 ret
= btrfs_reserve_extent(root
, cur_bytes
, cur_bytes
,
9664 min_size
, 0, *alloc_hint
, &ins
, 1, 0);
9669 * We've reserved this space, and thus converted it from
9670 * ->bytes_may_use to ->bytes_reserved. Any error that happens
9671 * from here on out we will only need to clear our reservation
9672 * for the remaining unreserved area, so advance our
9673 * clear_offset by our extent size.
9675 clear_offset
+= ins
.offset
;
9677 last_alloc
= ins
.offset
;
9678 trans
= insert_prealloc_file_extent(trans
, BTRFS_I(inode
),
9681 * Now that we inserted the prealloc extent we can finally
9682 * decrement the number of reservations in the block group.
9683 * If we did it before, we could race with relocation and have
9684 * relocation miss the reserved extent, making it fail later.
9686 btrfs_dec_block_group_reservations(fs_info
, ins
.objectid
);
9687 if (IS_ERR(trans
)) {
9688 ret
= PTR_ERR(trans
);
9689 btrfs_free_reserved_extent(fs_info
, ins
.objectid
,
9694 em
= alloc_extent_map();
9696 btrfs_drop_extent_map_range(BTRFS_I(inode
), cur_offset
,
9697 cur_offset
+ ins
.offset
- 1, false);
9698 btrfs_set_inode_full_sync(BTRFS_I(inode
));
9702 em
->start
= cur_offset
;
9703 em
->orig_start
= cur_offset
;
9704 em
->len
= ins
.offset
;
9705 em
->block_start
= ins
.objectid
;
9706 em
->block_len
= ins
.offset
;
9707 em
->orig_block_len
= ins
.offset
;
9708 em
->ram_bytes
= ins
.offset
;
9709 set_bit(EXTENT_FLAG_PREALLOC
, &em
->flags
);
9710 em
->generation
= trans
->transid
;
9712 ret
= btrfs_replace_extent_map_range(BTRFS_I(inode
), em
, true);
9713 free_extent_map(em
);
9715 num_bytes
-= ins
.offset
;
9716 cur_offset
+= ins
.offset
;
9717 *alloc_hint
= ins
.objectid
+ ins
.offset
;
9719 inode_inc_iversion(inode
);
9720 inode
->i_ctime
= current_time(inode
);
9721 BTRFS_I(inode
)->flags
|= BTRFS_INODE_PREALLOC
;
9722 if (!(mode
& FALLOC_FL_KEEP_SIZE
) &&
9723 (actual_len
> inode
->i_size
) &&
9724 (cur_offset
> inode
->i_size
)) {
9725 if (cur_offset
> actual_len
)
9726 i_size
= actual_len
;
9728 i_size
= cur_offset
;
9729 i_size_write(inode
, i_size
);
9730 btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode
), 0);
9733 ret
= btrfs_update_inode(trans
, root
, BTRFS_I(inode
));
9736 btrfs_abort_transaction(trans
, ret
);
9738 btrfs_end_transaction(trans
);
9743 btrfs_end_transaction(trans
);
9747 if (clear_offset
< end
)
9748 btrfs_free_reserved_data_space(BTRFS_I(inode
), NULL
, clear_offset
,
9749 end
- clear_offset
+ 1);
9753 int btrfs_prealloc_file_range(struct inode
*inode
, int mode
,
9754 u64 start
, u64 num_bytes
, u64 min_size
,
9755 loff_t actual_len
, u64
*alloc_hint
)
9757 return __btrfs_prealloc_file_range(inode
, mode
, start
, num_bytes
,
9758 min_size
, actual_len
, alloc_hint
,
9762 int btrfs_prealloc_file_range_trans(struct inode
*inode
,
9763 struct btrfs_trans_handle
*trans
, int mode
,
9764 u64 start
, u64 num_bytes
, u64 min_size
,
9765 loff_t actual_len
, u64
*alloc_hint
)
9767 return __btrfs_prealloc_file_range(inode
, mode
, start
, num_bytes
,
9768 min_size
, actual_len
, alloc_hint
, trans
);
9771 static int btrfs_permission(struct mnt_idmap
*idmap
,
9772 struct inode
*inode
, int mask
)
9774 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
9775 umode_t mode
= inode
->i_mode
;
9777 if (mask
& MAY_WRITE
&&
9778 (S_ISREG(mode
) || S_ISDIR(mode
) || S_ISLNK(mode
))) {
9779 if (btrfs_root_readonly(root
))
9781 if (BTRFS_I(inode
)->flags
& BTRFS_INODE_READONLY
)
9784 return generic_permission(idmap
, inode
, mask
);
9787 static int btrfs_tmpfile(struct mnt_idmap
*idmap
, struct inode
*dir
,
9788 struct file
*file
, umode_t mode
)
9790 struct btrfs_fs_info
*fs_info
= btrfs_sb(dir
->i_sb
);
9791 struct btrfs_trans_handle
*trans
;
9792 struct btrfs_root
*root
= BTRFS_I(dir
)->root
;
9793 struct inode
*inode
;
9794 struct btrfs_new_inode_args new_inode_args
= {
9796 .dentry
= file
->f_path
.dentry
,
9799 unsigned int trans_num_items
;
9802 inode
= new_inode(dir
->i_sb
);
9805 inode_init_owner(idmap
, inode
, dir
, mode
);
9806 inode
->i_fop
= &btrfs_file_operations
;
9807 inode
->i_op
= &btrfs_file_inode_operations
;
9808 inode
->i_mapping
->a_ops
= &btrfs_aops
;
9810 new_inode_args
.inode
= inode
;
9811 ret
= btrfs_new_inode_prepare(&new_inode_args
, &trans_num_items
);
9815 trans
= btrfs_start_transaction(root
, trans_num_items
);
9816 if (IS_ERR(trans
)) {
9817 ret
= PTR_ERR(trans
);
9818 goto out_new_inode_args
;
9821 ret
= btrfs_create_new_inode(trans
, &new_inode_args
);
9824 * We set number of links to 0 in btrfs_create_new_inode(), and here we
9825 * set it to 1 because d_tmpfile() will issue a warning if the count is
9828 * d_tmpfile() -> inode_dec_link_count() -> drop_nlink()
9830 set_nlink(inode
, 1);
9833 d_tmpfile(file
, inode
);
9834 unlock_new_inode(inode
);
9835 mark_inode_dirty(inode
);
9838 btrfs_end_transaction(trans
);
9839 btrfs_btree_balance_dirty(fs_info
);
9841 btrfs_new_inode_args_destroy(&new_inode_args
);
9845 return finish_open_simple(file
, ret
);
9848 void btrfs_set_range_writeback(struct btrfs_inode
*inode
, u64 start
, u64 end
)
9850 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
9851 unsigned long index
= start
>> PAGE_SHIFT
;
9852 unsigned long end_index
= end
>> PAGE_SHIFT
;
9856 ASSERT(end
+ 1 - start
<= U32_MAX
);
9857 len
= end
+ 1 - start
;
9858 while (index
<= end_index
) {
9859 page
= find_get_page(inode
->vfs_inode
.i_mapping
, index
);
9860 ASSERT(page
); /* Pages should be in the extent_io_tree */
9862 btrfs_page_set_writeback(fs_info
, page
, start
, len
);
9868 int btrfs_encoded_io_compression_from_extent(struct btrfs_fs_info
*fs_info
,
9871 switch (compress_type
) {
9872 case BTRFS_COMPRESS_NONE
:
9873 return BTRFS_ENCODED_IO_COMPRESSION_NONE
;
9874 case BTRFS_COMPRESS_ZLIB
:
9875 return BTRFS_ENCODED_IO_COMPRESSION_ZLIB
;
9876 case BTRFS_COMPRESS_LZO
:
9878 * The LZO format depends on the sector size. 64K is the maximum
9879 * sector size that we support.
9881 if (fs_info
->sectorsize
< SZ_4K
|| fs_info
->sectorsize
> SZ_64K
)
9883 return BTRFS_ENCODED_IO_COMPRESSION_LZO_4K
+
9884 (fs_info
->sectorsize_bits
- 12);
9885 case BTRFS_COMPRESS_ZSTD
:
9886 return BTRFS_ENCODED_IO_COMPRESSION_ZSTD
;
9892 static ssize_t
btrfs_encoded_read_inline(
9894 struct iov_iter
*iter
, u64 start
,
9896 struct extent_state
**cached_state
,
9897 u64 extent_start
, size_t count
,
9898 struct btrfs_ioctl_encoded_io_args
*encoded
,
9901 struct btrfs_inode
*inode
= BTRFS_I(file_inode(iocb
->ki_filp
));
9902 struct btrfs_root
*root
= inode
->root
;
9903 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
9904 struct extent_io_tree
*io_tree
= &inode
->io_tree
;
9905 struct btrfs_path
*path
;
9906 struct extent_buffer
*leaf
;
9907 struct btrfs_file_extent_item
*item
;
9913 path
= btrfs_alloc_path();
9918 ret
= btrfs_lookup_file_extent(NULL
, root
, path
, btrfs_ino(inode
),
9922 /* The extent item disappeared? */
9927 leaf
= path
->nodes
[0];
9928 item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_file_extent_item
);
9930 ram_bytes
= btrfs_file_extent_ram_bytes(leaf
, item
);
9931 ptr
= btrfs_file_extent_inline_start(item
);
9933 encoded
->len
= min_t(u64
, extent_start
+ ram_bytes
,
9934 inode
->vfs_inode
.i_size
) - iocb
->ki_pos
;
9935 ret
= btrfs_encoded_io_compression_from_extent(fs_info
,
9936 btrfs_file_extent_compression(leaf
, item
));
9939 encoded
->compression
= ret
;
9940 if (encoded
->compression
) {
9943 inline_size
= btrfs_file_extent_inline_item_len(leaf
,
9945 if (inline_size
> count
) {
9949 count
= inline_size
;
9950 encoded
->unencoded_len
= ram_bytes
;
9951 encoded
->unencoded_offset
= iocb
->ki_pos
- extent_start
;
9953 count
= min_t(u64
, count
, encoded
->len
);
9954 encoded
->len
= count
;
9955 encoded
->unencoded_len
= count
;
9956 ptr
+= iocb
->ki_pos
- extent_start
;
9959 tmp
= kmalloc(count
, GFP_NOFS
);
9964 read_extent_buffer(leaf
, tmp
, ptr
, count
);
9965 btrfs_release_path(path
);
9966 unlock_extent(io_tree
, start
, lockend
, cached_state
);
9967 btrfs_inode_unlock(inode
, BTRFS_ILOCK_SHARED
);
9970 ret
= copy_to_iter(tmp
, count
, iter
);
9975 btrfs_free_path(path
);
9979 struct btrfs_encoded_read_private
{
9980 wait_queue_head_t wait
;
9982 blk_status_t status
;
9985 static void btrfs_encoded_read_endio(struct btrfs_bio
*bbio
)
9987 struct btrfs_encoded_read_private
*priv
= bbio
->private;
9989 if (bbio
->bio
.bi_status
) {
9991 * The memory barrier implied by the atomic_dec_return() here
9992 * pairs with the memory barrier implied by the
9993 * atomic_dec_return() or io_wait_event() in
9994 * btrfs_encoded_read_regular_fill_pages() to ensure that this
9995 * write is observed before the load of status in
9996 * btrfs_encoded_read_regular_fill_pages().
9998 WRITE_ONCE(priv
->status
, bbio
->bio
.bi_status
);
10000 if (!atomic_dec_return(&priv
->pending
))
10001 wake_up(&priv
->wait
);
10002 bio_put(&bbio
->bio
);
10005 int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode
*inode
,
10006 u64 file_offset
, u64 disk_bytenr
,
10007 u64 disk_io_size
, struct page
**pages
)
10009 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
10010 struct btrfs_encoded_read_private priv
= {
10011 .pending
= ATOMIC_INIT(1),
10013 unsigned long i
= 0;
10014 struct btrfs_bio
*bbio
;
10016 init_waitqueue_head(&priv
.wait
);
10018 bbio
= btrfs_bio_alloc(BIO_MAX_VECS
, REQ_OP_READ
, fs_info
,
10019 btrfs_encoded_read_endio
, &priv
);
10020 bbio
->bio
.bi_iter
.bi_sector
= disk_bytenr
>> SECTOR_SHIFT
;
10021 bbio
->inode
= inode
;
10024 size_t bytes
= min_t(u64
, disk_io_size
, PAGE_SIZE
);
10026 if (bio_add_page(&bbio
->bio
, pages
[i
], bytes
, 0) < bytes
) {
10027 atomic_inc(&priv
.pending
);
10028 btrfs_submit_bio(bbio
, 0);
10030 bbio
= btrfs_bio_alloc(BIO_MAX_VECS
, REQ_OP_READ
, fs_info
,
10031 btrfs_encoded_read_endio
, &priv
);
10032 bbio
->bio
.bi_iter
.bi_sector
= disk_bytenr
>> SECTOR_SHIFT
;
10033 bbio
->inode
= inode
;
10038 disk_bytenr
+= bytes
;
10039 disk_io_size
-= bytes
;
10040 } while (disk_io_size
);
10042 atomic_inc(&priv
.pending
);
10043 btrfs_submit_bio(bbio
, 0);
10045 if (atomic_dec_return(&priv
.pending
))
10046 io_wait_event(priv
.wait
, !atomic_read(&priv
.pending
));
10047 /* See btrfs_encoded_read_endio() for ordering. */
10048 return blk_status_to_errno(READ_ONCE(priv
.status
));
10051 static ssize_t
btrfs_encoded_read_regular(struct kiocb
*iocb
,
10052 struct iov_iter
*iter
,
10053 u64 start
, u64 lockend
,
10054 struct extent_state
**cached_state
,
10055 u64 disk_bytenr
, u64 disk_io_size
,
10056 size_t count
, bool compressed
,
10059 struct btrfs_inode
*inode
= BTRFS_I(file_inode(iocb
->ki_filp
));
10060 struct extent_io_tree
*io_tree
= &inode
->io_tree
;
10061 struct page
**pages
;
10062 unsigned long nr_pages
, i
;
10064 size_t page_offset
;
10067 nr_pages
= DIV_ROUND_UP(disk_io_size
, PAGE_SIZE
);
10068 pages
= kcalloc(nr_pages
, sizeof(struct page
*), GFP_NOFS
);
10071 ret
= btrfs_alloc_page_array(nr_pages
, pages
);
10077 ret
= btrfs_encoded_read_regular_fill_pages(inode
, start
, disk_bytenr
,
10078 disk_io_size
, pages
);
10082 unlock_extent(io_tree
, start
, lockend
, cached_state
);
10083 btrfs_inode_unlock(inode
, BTRFS_ILOCK_SHARED
);
10090 i
= (iocb
->ki_pos
- start
) >> PAGE_SHIFT
;
10091 page_offset
= (iocb
->ki_pos
- start
) & (PAGE_SIZE
- 1);
10094 while (cur
< count
) {
10095 size_t bytes
= min_t(size_t, count
- cur
,
10096 PAGE_SIZE
- page_offset
);
10098 if (copy_page_to_iter(pages
[i
], page_offset
, bytes
,
10109 for (i
= 0; i
< nr_pages
; i
++) {
10111 __free_page(pages
[i
]);
10117 ssize_t
btrfs_encoded_read(struct kiocb
*iocb
, struct iov_iter
*iter
,
10118 struct btrfs_ioctl_encoded_io_args
*encoded
)
10120 struct btrfs_inode
*inode
= BTRFS_I(file_inode(iocb
->ki_filp
));
10121 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
10122 struct extent_io_tree
*io_tree
= &inode
->io_tree
;
10124 size_t count
= iov_iter_count(iter
);
10125 u64 start
, lockend
, disk_bytenr
, disk_io_size
;
10126 struct extent_state
*cached_state
= NULL
;
10127 struct extent_map
*em
;
10128 bool unlocked
= false;
10130 file_accessed(iocb
->ki_filp
);
10132 btrfs_inode_lock(inode
, BTRFS_ILOCK_SHARED
);
10134 if (iocb
->ki_pos
>= inode
->vfs_inode
.i_size
) {
10135 btrfs_inode_unlock(inode
, BTRFS_ILOCK_SHARED
);
10138 start
= ALIGN_DOWN(iocb
->ki_pos
, fs_info
->sectorsize
);
10140 * We don't know how long the extent containing iocb->ki_pos is, but if
10141 * it's compressed we know that it won't be longer than this.
10143 lockend
= start
+ BTRFS_MAX_UNCOMPRESSED
- 1;
10146 struct btrfs_ordered_extent
*ordered
;
10148 ret
= btrfs_wait_ordered_range(&inode
->vfs_inode
, start
,
10149 lockend
- start
+ 1);
10151 goto out_unlock_inode
;
10152 lock_extent(io_tree
, start
, lockend
, &cached_state
);
10153 ordered
= btrfs_lookup_ordered_range(inode
, start
,
10154 lockend
- start
+ 1);
10157 btrfs_put_ordered_extent(ordered
);
10158 unlock_extent(io_tree
, start
, lockend
, &cached_state
);
10162 em
= btrfs_get_extent(inode
, NULL
, 0, start
, lockend
- start
+ 1);
10165 goto out_unlock_extent
;
10168 if (em
->block_start
== EXTENT_MAP_INLINE
) {
10169 u64 extent_start
= em
->start
;
10172 * For inline extents we get everything we need out of the
10175 free_extent_map(em
);
10177 ret
= btrfs_encoded_read_inline(iocb
, iter
, start
, lockend
,
10178 &cached_state
, extent_start
,
10179 count
, encoded
, &unlocked
);
10184 * We only want to return up to EOF even if the extent extends beyond
10187 encoded
->len
= min_t(u64
, extent_map_end(em
),
10188 inode
->vfs_inode
.i_size
) - iocb
->ki_pos
;
10189 if (em
->block_start
== EXTENT_MAP_HOLE
||
10190 test_bit(EXTENT_FLAG_PREALLOC
, &em
->flags
)) {
10191 disk_bytenr
= EXTENT_MAP_HOLE
;
10192 count
= min_t(u64
, count
, encoded
->len
);
10193 encoded
->len
= count
;
10194 encoded
->unencoded_len
= count
;
10195 } else if (test_bit(EXTENT_FLAG_COMPRESSED
, &em
->flags
)) {
10196 disk_bytenr
= em
->block_start
;
10198 * Bail if the buffer isn't large enough to return the whole
10199 * compressed extent.
10201 if (em
->block_len
> count
) {
10205 disk_io_size
= em
->block_len
;
10206 count
= em
->block_len
;
10207 encoded
->unencoded_len
= em
->ram_bytes
;
10208 encoded
->unencoded_offset
= iocb
->ki_pos
- em
->orig_start
;
10209 ret
= btrfs_encoded_io_compression_from_extent(fs_info
,
10210 em
->compress_type
);
10213 encoded
->compression
= ret
;
10215 disk_bytenr
= em
->block_start
+ (start
- em
->start
);
10216 if (encoded
->len
> count
)
10217 encoded
->len
= count
;
10219 * Don't read beyond what we locked. This also limits the page
10220 * allocations that we'll do.
10222 disk_io_size
= min(lockend
+ 1, iocb
->ki_pos
+ encoded
->len
) - start
;
10223 count
= start
+ disk_io_size
- iocb
->ki_pos
;
10224 encoded
->len
= count
;
10225 encoded
->unencoded_len
= count
;
10226 disk_io_size
= ALIGN(disk_io_size
, fs_info
->sectorsize
);
10228 free_extent_map(em
);
10231 if (disk_bytenr
== EXTENT_MAP_HOLE
) {
10232 unlock_extent(io_tree
, start
, lockend
, &cached_state
);
10233 btrfs_inode_unlock(inode
, BTRFS_ILOCK_SHARED
);
10235 ret
= iov_iter_zero(count
, iter
);
10239 ret
= btrfs_encoded_read_regular(iocb
, iter
, start
, lockend
,
10240 &cached_state
, disk_bytenr
,
10241 disk_io_size
, count
,
10242 encoded
->compression
,
10248 iocb
->ki_pos
+= encoded
->len
;
10250 free_extent_map(em
);
10253 unlock_extent(io_tree
, start
, lockend
, &cached_state
);
10256 btrfs_inode_unlock(inode
, BTRFS_ILOCK_SHARED
);
10260 ssize_t
btrfs_do_encoded_write(struct kiocb
*iocb
, struct iov_iter
*from
,
10261 const struct btrfs_ioctl_encoded_io_args
*encoded
)
10263 struct btrfs_inode
*inode
= BTRFS_I(file_inode(iocb
->ki_filp
));
10264 struct btrfs_root
*root
= inode
->root
;
10265 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
10266 struct extent_io_tree
*io_tree
= &inode
->io_tree
;
10267 struct extent_changeset
*data_reserved
= NULL
;
10268 struct extent_state
*cached_state
= NULL
;
10272 u64 num_bytes
, ram_bytes
, disk_num_bytes
;
10273 unsigned long nr_pages
, i
;
10274 struct page
**pages
;
10275 struct btrfs_key ins
;
10276 bool extent_reserved
= false;
10277 struct extent_map
*em
;
10280 switch (encoded
->compression
) {
10281 case BTRFS_ENCODED_IO_COMPRESSION_ZLIB
:
10282 compression
= BTRFS_COMPRESS_ZLIB
;
10284 case BTRFS_ENCODED_IO_COMPRESSION_ZSTD
:
10285 compression
= BTRFS_COMPRESS_ZSTD
;
10287 case BTRFS_ENCODED_IO_COMPRESSION_LZO_4K
:
10288 case BTRFS_ENCODED_IO_COMPRESSION_LZO_8K
:
10289 case BTRFS_ENCODED_IO_COMPRESSION_LZO_16K
:
10290 case BTRFS_ENCODED_IO_COMPRESSION_LZO_32K
:
10291 case BTRFS_ENCODED_IO_COMPRESSION_LZO_64K
:
10292 /* The sector size must match for LZO. */
10293 if (encoded
->compression
-
10294 BTRFS_ENCODED_IO_COMPRESSION_LZO_4K
+ 12 !=
10295 fs_info
->sectorsize_bits
)
10297 compression
= BTRFS_COMPRESS_LZO
;
10302 if (encoded
->encryption
!= BTRFS_ENCODED_IO_ENCRYPTION_NONE
)
10305 orig_count
= iov_iter_count(from
);
10307 /* The extent size must be sane. */
10308 if (encoded
->unencoded_len
> BTRFS_MAX_UNCOMPRESSED
||
10309 orig_count
> BTRFS_MAX_COMPRESSED
|| orig_count
== 0)
10313 * The compressed data must be smaller than the decompressed data.
10315 * It's of course possible for data to compress to larger or the same
10316 * size, but the buffered I/O path falls back to no compression for such
10317 * data, and we don't want to break any assumptions by creating these
10320 * Note that this is less strict than the current check we have that the
10321 * compressed data must be at least one sector smaller than the
10322 * decompressed data. We only want to enforce the weaker requirement
10323 * from old kernels that it is at least one byte smaller.
10325 if (orig_count
>= encoded
->unencoded_len
)
10328 /* The extent must start on a sector boundary. */
10329 start
= iocb
->ki_pos
;
10330 if (!IS_ALIGNED(start
, fs_info
->sectorsize
))
10334 * The extent must end on a sector boundary. However, we allow a write
10335 * which ends at or extends i_size to have an unaligned length; we round
10336 * up the extent size and set i_size to the unaligned end.
10338 if (start
+ encoded
->len
< inode
->vfs_inode
.i_size
&&
10339 !IS_ALIGNED(start
+ encoded
->len
, fs_info
->sectorsize
))
10342 /* Finally, the offset in the unencoded data must be sector-aligned. */
10343 if (!IS_ALIGNED(encoded
->unencoded_offset
, fs_info
->sectorsize
))
10346 num_bytes
= ALIGN(encoded
->len
, fs_info
->sectorsize
);
10347 ram_bytes
= ALIGN(encoded
->unencoded_len
, fs_info
->sectorsize
);
10348 end
= start
+ num_bytes
- 1;
10351 * If the extent cannot be inline, the compressed data on disk must be
10352 * sector-aligned. For convenience, we extend it with zeroes if it
10355 disk_num_bytes
= ALIGN(orig_count
, fs_info
->sectorsize
);
10356 nr_pages
= DIV_ROUND_UP(disk_num_bytes
, PAGE_SIZE
);
10357 pages
= kvcalloc(nr_pages
, sizeof(struct page
*), GFP_KERNEL_ACCOUNT
);
10360 for (i
= 0; i
< nr_pages
; i
++) {
10361 size_t bytes
= min_t(size_t, PAGE_SIZE
, iov_iter_count(from
));
10364 pages
[i
] = alloc_page(GFP_KERNEL_ACCOUNT
);
10369 kaddr
= kmap_local_page(pages
[i
]);
10370 if (copy_from_iter(kaddr
, bytes
, from
) != bytes
) {
10371 kunmap_local(kaddr
);
10375 if (bytes
< PAGE_SIZE
)
10376 memset(kaddr
+ bytes
, 0, PAGE_SIZE
- bytes
);
10377 kunmap_local(kaddr
);
10381 struct btrfs_ordered_extent
*ordered
;
10383 ret
= btrfs_wait_ordered_range(&inode
->vfs_inode
, start
, num_bytes
);
10386 ret
= invalidate_inode_pages2_range(inode
->vfs_inode
.i_mapping
,
10387 start
>> PAGE_SHIFT
,
10388 end
>> PAGE_SHIFT
);
10391 lock_extent(io_tree
, start
, end
, &cached_state
);
10392 ordered
= btrfs_lookup_ordered_range(inode
, start
, num_bytes
);
10394 !filemap_range_has_page(inode
->vfs_inode
.i_mapping
, start
, end
))
10397 btrfs_put_ordered_extent(ordered
);
10398 unlock_extent(io_tree
, start
, end
, &cached_state
);
10403 * We don't use the higher-level delalloc space functions because our
10404 * num_bytes and disk_num_bytes are different.
10406 ret
= btrfs_alloc_data_chunk_ondemand(inode
, disk_num_bytes
);
10409 ret
= btrfs_qgroup_reserve_data(inode
, &data_reserved
, start
, num_bytes
);
10411 goto out_free_data_space
;
10412 ret
= btrfs_delalloc_reserve_metadata(inode
, num_bytes
, disk_num_bytes
,
10415 goto out_qgroup_free_data
;
10417 /* Try an inline extent first. */
10418 if (start
== 0 && encoded
->unencoded_len
== encoded
->len
&&
10419 encoded
->unencoded_offset
== 0) {
10420 ret
= cow_file_range_inline(inode
, encoded
->len
, orig_count
,
10421 compression
, pages
, true);
10425 goto out_delalloc_release
;
10429 ret
= btrfs_reserve_extent(root
, disk_num_bytes
, disk_num_bytes
,
10430 disk_num_bytes
, 0, 0, &ins
, 1, 1);
10432 goto out_delalloc_release
;
10433 extent_reserved
= true;
10435 em
= create_io_em(inode
, start
, num_bytes
,
10436 start
- encoded
->unencoded_offset
, ins
.objectid
,
10437 ins
.offset
, ins
.offset
, ram_bytes
, compression
,
10438 BTRFS_ORDERED_COMPRESSED
);
10441 goto out_free_reserved
;
10443 free_extent_map(em
);
10445 ret
= btrfs_add_ordered_extent(inode
, start
, num_bytes
, ram_bytes
,
10446 ins
.objectid
, ins
.offset
,
10447 encoded
->unencoded_offset
,
10448 (1 << BTRFS_ORDERED_ENCODED
) |
10449 (1 << BTRFS_ORDERED_COMPRESSED
),
10452 btrfs_drop_extent_map_range(inode
, start
, end
, false);
10453 goto out_free_reserved
;
10455 btrfs_dec_block_group_reservations(fs_info
, ins
.objectid
);
10457 if (start
+ encoded
->len
> inode
->vfs_inode
.i_size
)
10458 i_size_write(&inode
->vfs_inode
, start
+ encoded
->len
);
10460 unlock_extent(io_tree
, start
, end
, &cached_state
);
10462 btrfs_delalloc_release_extents(inode
, num_bytes
);
10464 btrfs_submit_compressed_write(inode
, start
, num_bytes
, ins
.objectid
,
10465 ins
.offset
, pages
, nr_pages
, 0, false);
10470 btrfs_dec_block_group_reservations(fs_info
, ins
.objectid
);
10471 btrfs_free_reserved_extent(fs_info
, ins
.objectid
, ins
.offset
, 1);
10472 out_delalloc_release
:
10473 btrfs_delalloc_release_extents(inode
, num_bytes
);
10474 btrfs_delalloc_release_metadata(inode
, disk_num_bytes
, ret
< 0);
10475 out_qgroup_free_data
:
10477 btrfs_qgroup_free_data(inode
, data_reserved
, start
, num_bytes
);
10478 out_free_data_space
:
10480 * If btrfs_reserve_extent() succeeded, then we already decremented
10483 if (!extent_reserved
)
10484 btrfs_free_reserved_data_space_noquota(fs_info
, disk_num_bytes
);
10486 unlock_extent(io_tree
, start
, end
, &cached_state
);
10488 for (i
= 0; i
< nr_pages
; i
++) {
10490 __free_page(pages
[i
]);
10495 iocb
->ki_pos
+= encoded
->len
;
10501 * Add an entry indicating a block group or device which is pinned by a
10502 * swapfile. Returns 0 on success, 1 if there is already an entry for it, or a
10503 * negative errno on failure.
10505 static int btrfs_add_swapfile_pin(struct inode
*inode
, void *ptr
,
10506 bool is_block_group
)
10508 struct btrfs_fs_info
*fs_info
= BTRFS_I(inode
)->root
->fs_info
;
10509 struct btrfs_swapfile_pin
*sp
, *entry
;
10510 struct rb_node
**p
;
10511 struct rb_node
*parent
= NULL
;
10513 sp
= kmalloc(sizeof(*sp
), GFP_NOFS
);
10518 sp
->is_block_group
= is_block_group
;
10519 sp
->bg_extent_count
= 1;
10521 spin_lock(&fs_info
->swapfile_pins_lock
);
10522 p
= &fs_info
->swapfile_pins
.rb_node
;
10525 entry
= rb_entry(parent
, struct btrfs_swapfile_pin
, node
);
10526 if (sp
->ptr
< entry
->ptr
||
10527 (sp
->ptr
== entry
->ptr
&& sp
->inode
< entry
->inode
)) {
10528 p
= &(*p
)->rb_left
;
10529 } else if (sp
->ptr
> entry
->ptr
||
10530 (sp
->ptr
== entry
->ptr
&& sp
->inode
> entry
->inode
)) {
10531 p
= &(*p
)->rb_right
;
10533 if (is_block_group
)
10534 entry
->bg_extent_count
++;
10535 spin_unlock(&fs_info
->swapfile_pins_lock
);
10540 rb_link_node(&sp
->node
, parent
, p
);
10541 rb_insert_color(&sp
->node
, &fs_info
->swapfile_pins
);
10542 spin_unlock(&fs_info
->swapfile_pins_lock
);
10546 /* Free all of the entries pinned by this swapfile. */
10547 static void btrfs_free_swapfile_pins(struct inode
*inode
)
10549 struct btrfs_fs_info
*fs_info
= BTRFS_I(inode
)->root
->fs_info
;
10550 struct btrfs_swapfile_pin
*sp
;
10551 struct rb_node
*node
, *next
;
10553 spin_lock(&fs_info
->swapfile_pins_lock
);
10554 node
= rb_first(&fs_info
->swapfile_pins
);
10556 next
= rb_next(node
);
10557 sp
= rb_entry(node
, struct btrfs_swapfile_pin
, node
);
10558 if (sp
->inode
== inode
) {
10559 rb_erase(&sp
->node
, &fs_info
->swapfile_pins
);
10560 if (sp
->is_block_group
) {
10561 btrfs_dec_block_group_swap_extents(sp
->ptr
,
10562 sp
->bg_extent_count
);
10563 btrfs_put_block_group(sp
->ptr
);
10569 spin_unlock(&fs_info
->swapfile_pins_lock
);
10572 struct btrfs_swap_info
{
10578 unsigned long nr_pages
;
10582 static int btrfs_add_swap_extent(struct swap_info_struct
*sis
,
10583 struct btrfs_swap_info
*bsi
)
10585 unsigned long nr_pages
;
10586 unsigned long max_pages
;
10587 u64 first_ppage
, first_ppage_reported
, next_ppage
;
10591 * Our swapfile may have had its size extended after the swap header was
10592 * written. In that case activating the swapfile should not go beyond
10593 * the max size set in the swap header.
10595 if (bsi
->nr_pages
>= sis
->max
)
10598 max_pages
= sis
->max
- bsi
->nr_pages
;
10599 first_ppage
= PAGE_ALIGN(bsi
->block_start
) >> PAGE_SHIFT
;
10600 next_ppage
= PAGE_ALIGN_DOWN(bsi
->block_start
+ bsi
->block_len
) >> PAGE_SHIFT
;
10602 if (first_ppage
>= next_ppage
)
10604 nr_pages
= next_ppage
- first_ppage
;
10605 nr_pages
= min(nr_pages
, max_pages
);
10607 first_ppage_reported
= first_ppage
;
10608 if (bsi
->start
== 0)
10609 first_ppage_reported
++;
10610 if (bsi
->lowest_ppage
> first_ppage_reported
)
10611 bsi
->lowest_ppage
= first_ppage_reported
;
10612 if (bsi
->highest_ppage
< (next_ppage
- 1))
10613 bsi
->highest_ppage
= next_ppage
- 1;
10615 ret
= add_swap_extent(sis
, bsi
->nr_pages
, nr_pages
, first_ppage
);
10618 bsi
->nr_extents
+= ret
;
10619 bsi
->nr_pages
+= nr_pages
;
10623 static void btrfs_swap_deactivate(struct file
*file
)
10625 struct inode
*inode
= file_inode(file
);
10627 btrfs_free_swapfile_pins(inode
);
10628 atomic_dec(&BTRFS_I(inode
)->root
->nr_swapfiles
);
10631 static int btrfs_swap_activate(struct swap_info_struct
*sis
, struct file
*file
,
10634 struct inode
*inode
= file_inode(file
);
10635 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
10636 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
10637 struct extent_io_tree
*io_tree
= &BTRFS_I(inode
)->io_tree
;
10638 struct extent_state
*cached_state
= NULL
;
10639 struct extent_map
*em
= NULL
;
10640 struct btrfs_device
*device
= NULL
;
10641 struct btrfs_swap_info bsi
= {
10642 .lowest_ppage
= (sector_t
)-1ULL,
10649 * If the swap file was just created, make sure delalloc is done. If the
10650 * file changes again after this, the user is doing something stupid and
10651 * we don't really care.
10653 ret
= btrfs_wait_ordered_range(inode
, 0, (u64
)-1);
10658 * The inode is locked, so these flags won't change after we check them.
10660 if (BTRFS_I(inode
)->flags
& BTRFS_INODE_COMPRESS
) {
10661 btrfs_warn(fs_info
, "swapfile must not be compressed");
10664 if (!(BTRFS_I(inode
)->flags
& BTRFS_INODE_NODATACOW
)) {
10665 btrfs_warn(fs_info
, "swapfile must not be copy-on-write");
10668 if (!(BTRFS_I(inode
)->flags
& BTRFS_INODE_NODATASUM
)) {
10669 btrfs_warn(fs_info
, "swapfile must not be checksummed");
10674 * Balance or device remove/replace/resize can move stuff around from
10675 * under us. The exclop protection makes sure they aren't running/won't
10676 * run concurrently while we are mapping the swap extents, and
10677 * fs_info->swapfile_pins prevents them from running while the swap
10678 * file is active and moving the extents. Note that this also prevents
10679 * a concurrent device add which isn't actually necessary, but it's not
10680 * really worth the trouble to allow it.
10682 if (!btrfs_exclop_start(fs_info
, BTRFS_EXCLOP_SWAP_ACTIVATE
)) {
10683 btrfs_warn(fs_info
,
10684 "cannot activate swapfile while exclusive operation is running");
10689 * Prevent snapshot creation while we are activating the swap file.
10690 * We do not want to race with snapshot creation. If snapshot creation
10691 * already started before we bumped nr_swapfiles from 0 to 1 and
10692 * completes before the first write into the swap file after it is
10693 * activated, than that write would fallback to COW.
10695 if (!btrfs_drew_try_write_lock(&root
->snapshot_lock
)) {
10696 btrfs_exclop_finish(fs_info
);
10697 btrfs_warn(fs_info
,
10698 "cannot activate swapfile because snapshot creation is in progress");
10702 * Snapshots can create extents which require COW even if NODATACOW is
10703 * set. We use this counter to prevent snapshots. We must increment it
10704 * before walking the extents because we don't want a concurrent
10705 * snapshot to run after we've already checked the extents.
10707 * It is possible that subvolume is marked for deletion but still not
10708 * removed yet. To prevent this race, we check the root status before
10709 * activating the swapfile.
10711 spin_lock(&root
->root_item_lock
);
10712 if (btrfs_root_dead(root
)) {
10713 spin_unlock(&root
->root_item_lock
);
10715 btrfs_exclop_finish(fs_info
);
10716 btrfs_warn(fs_info
,
10717 "cannot activate swapfile because subvolume %llu is being deleted",
10718 root
->root_key
.objectid
);
10721 atomic_inc(&root
->nr_swapfiles
);
10722 spin_unlock(&root
->root_item_lock
);
10724 isize
= ALIGN_DOWN(inode
->i_size
, fs_info
->sectorsize
);
10726 lock_extent(io_tree
, 0, isize
- 1, &cached_state
);
10728 while (start
< isize
) {
10729 u64 logical_block_start
, physical_block_start
;
10730 struct btrfs_block_group
*bg
;
10731 u64 len
= isize
- start
;
10733 em
= btrfs_get_extent(BTRFS_I(inode
), NULL
, 0, start
, len
);
10739 if (em
->block_start
== EXTENT_MAP_HOLE
) {
10740 btrfs_warn(fs_info
, "swapfile must not have holes");
10744 if (em
->block_start
== EXTENT_MAP_INLINE
) {
10746 * It's unlikely we'll ever actually find ourselves
10747 * here, as a file small enough to fit inline won't be
10748 * big enough to store more than the swap header, but in
10749 * case something changes in the future, let's catch it
10750 * here rather than later.
10752 btrfs_warn(fs_info
, "swapfile must not be inline");
10756 if (test_bit(EXTENT_FLAG_COMPRESSED
, &em
->flags
)) {
10757 btrfs_warn(fs_info
, "swapfile must not be compressed");
10762 logical_block_start
= em
->block_start
+ (start
- em
->start
);
10763 len
= min(len
, em
->len
- (start
- em
->start
));
10764 free_extent_map(em
);
10767 ret
= can_nocow_extent(inode
, start
, &len
, NULL
, NULL
, NULL
, false, true);
10773 btrfs_warn(fs_info
,
10774 "swapfile must not be copy-on-write");
10779 em
= btrfs_get_chunk_map(fs_info
, logical_block_start
, len
);
10785 if (em
->map_lookup
->type
& BTRFS_BLOCK_GROUP_PROFILE_MASK
) {
10786 btrfs_warn(fs_info
,
10787 "swapfile must have single data profile");
10792 if (device
== NULL
) {
10793 device
= em
->map_lookup
->stripes
[0].dev
;
10794 ret
= btrfs_add_swapfile_pin(inode
, device
, false);
10799 } else if (device
!= em
->map_lookup
->stripes
[0].dev
) {
10800 btrfs_warn(fs_info
, "swapfile must be on one device");
10805 physical_block_start
= (em
->map_lookup
->stripes
[0].physical
+
10806 (logical_block_start
- em
->start
));
10807 len
= min(len
, em
->len
- (logical_block_start
- em
->start
));
10808 free_extent_map(em
);
10811 bg
= btrfs_lookup_block_group(fs_info
, logical_block_start
);
10813 btrfs_warn(fs_info
,
10814 "could not find block group containing swapfile");
10819 if (!btrfs_inc_block_group_swap_extents(bg
)) {
10820 btrfs_warn(fs_info
,
10821 "block group for swapfile at %llu is read-only%s",
10823 atomic_read(&fs_info
->scrubs_running
) ?
10824 " (scrub running)" : "");
10825 btrfs_put_block_group(bg
);
10830 ret
= btrfs_add_swapfile_pin(inode
, bg
, true);
10832 btrfs_put_block_group(bg
);
10839 if (bsi
.block_len
&&
10840 bsi
.block_start
+ bsi
.block_len
== physical_block_start
) {
10841 bsi
.block_len
+= len
;
10843 if (bsi
.block_len
) {
10844 ret
= btrfs_add_swap_extent(sis
, &bsi
);
10849 bsi
.block_start
= physical_block_start
;
10850 bsi
.block_len
= len
;
10857 ret
= btrfs_add_swap_extent(sis
, &bsi
);
10860 if (!IS_ERR_OR_NULL(em
))
10861 free_extent_map(em
);
10863 unlock_extent(io_tree
, 0, isize
- 1, &cached_state
);
10866 btrfs_swap_deactivate(file
);
10868 btrfs_drew_write_unlock(&root
->snapshot_lock
);
10870 btrfs_exclop_finish(fs_info
);
10876 sis
->bdev
= device
->bdev
;
10877 *span
= bsi
.highest_ppage
- bsi
.lowest_ppage
+ 1;
10878 sis
->max
= bsi
.nr_pages
;
10879 sis
->pages
= bsi
.nr_pages
- 1;
10880 sis
->highest_bit
= bsi
.nr_pages
- 1;
10881 return bsi
.nr_extents
;
10884 static void btrfs_swap_deactivate(struct file
*file
)
10888 static int btrfs_swap_activate(struct swap_info_struct
*sis
, struct file
*file
,
10891 return -EOPNOTSUPP
;
10896 * Update the number of bytes used in the VFS' inode. When we replace extents in
10897 * a range (clone, dedupe, fallocate's zero range), we must update the number of
10898 * bytes used by the inode in an atomic manner, so that concurrent stat(2) calls
10899 * always get a correct value.
10901 void btrfs_update_inode_bytes(struct btrfs_inode
*inode
,
10902 const u64 add_bytes
,
10903 const u64 del_bytes
)
10905 if (add_bytes
== del_bytes
)
10908 spin_lock(&inode
->lock
);
10910 inode_sub_bytes(&inode
->vfs_inode
, del_bytes
);
10912 inode_add_bytes(&inode
->vfs_inode
, add_bytes
);
10913 spin_unlock(&inode
->lock
);
10917 * Verify that there are no ordered extents for a given file range.
10919 * @inode: The target inode.
10920 * @start: Start offset of the file range, should be sector size aligned.
10921 * @end: End offset (inclusive) of the file range, its value +1 should be
10922 * sector size aligned.
10924 * This should typically be used for cases where we locked an inode's VFS lock in
10925 * exclusive mode, we have also locked the inode's i_mmap_lock in exclusive mode,
10926 * we have flushed all delalloc in the range, we have waited for all ordered
10927 * extents in the range to complete and finally we have locked the file range in
10928 * the inode's io_tree.
10930 void btrfs_assert_inode_range_clean(struct btrfs_inode
*inode
, u64 start
, u64 end
)
10932 struct btrfs_root
*root
= inode
->root
;
10933 struct btrfs_ordered_extent
*ordered
;
10935 if (!IS_ENABLED(CONFIG_BTRFS_ASSERT
))
10938 ordered
= btrfs_lookup_first_ordered_range(inode
, start
, end
+ 1 - start
);
10940 btrfs_err(root
->fs_info
,
10941 "found unexpected ordered extent in file range [%llu, %llu] for inode %llu root %llu (ordered range [%llu, %llu])",
10942 start
, end
, btrfs_ino(inode
), root
->root_key
.objectid
,
10943 ordered
->file_offset
,
10944 ordered
->file_offset
+ ordered
->num_bytes
- 1);
10945 btrfs_put_ordered_extent(ordered
);
10948 ASSERT(ordered
== NULL
);
10951 static const struct inode_operations btrfs_dir_inode_operations
= {
10952 .getattr
= btrfs_getattr
,
10953 .lookup
= btrfs_lookup
,
10954 .create
= btrfs_create
,
10955 .unlink
= btrfs_unlink
,
10956 .link
= btrfs_link
,
10957 .mkdir
= btrfs_mkdir
,
10958 .rmdir
= btrfs_rmdir
,
10959 .rename
= btrfs_rename2
,
10960 .symlink
= btrfs_symlink
,
10961 .setattr
= btrfs_setattr
,
10962 .mknod
= btrfs_mknod
,
10963 .listxattr
= btrfs_listxattr
,
10964 .permission
= btrfs_permission
,
10965 .get_inode_acl
= btrfs_get_acl
,
10966 .set_acl
= btrfs_set_acl
,
10967 .update_time
= btrfs_update_time
,
10968 .tmpfile
= btrfs_tmpfile
,
10969 .fileattr_get
= btrfs_fileattr_get
,
10970 .fileattr_set
= btrfs_fileattr_set
,
10973 static const struct file_operations btrfs_dir_file_operations
= {
10974 .llseek
= generic_file_llseek
,
10975 .read
= generic_read_dir
,
10976 .iterate_shared
= btrfs_real_readdir
,
10977 .open
= btrfs_opendir
,
10978 .unlocked_ioctl
= btrfs_ioctl
,
10979 #ifdef CONFIG_COMPAT
10980 .compat_ioctl
= btrfs_compat_ioctl
,
10982 .release
= btrfs_release_file
,
10983 .fsync
= btrfs_sync_file
,
10987 * btrfs doesn't support the bmap operation because swapfiles
10988 * use bmap to make a mapping of extents in the file. They assume
10989 * these extents won't change over the life of the file and they
10990 * use the bmap result to do IO directly to the drive.
10992 * the btrfs bmap call would return logical addresses that aren't
10993 * suitable for IO and they also will change frequently as COW
10994 * operations happen. So, swapfile + btrfs == corruption.
10996 * For now we're avoiding this by dropping bmap.
10998 static const struct address_space_operations btrfs_aops
= {
10999 .read_folio
= btrfs_read_folio
,
11000 .writepages
= btrfs_writepages
,
11001 .readahead
= btrfs_readahead
,
11002 .direct_IO
= noop_direct_IO
,
11003 .invalidate_folio
= btrfs_invalidate_folio
,
11004 .release_folio
= btrfs_release_folio
,
11005 .migrate_folio
= btrfs_migrate_folio
,
11006 .dirty_folio
= filemap_dirty_folio
,
11007 .error_remove_page
= generic_error_remove_page
,
11008 .swap_activate
= btrfs_swap_activate
,
11009 .swap_deactivate
= btrfs_swap_deactivate
,
11012 static const struct inode_operations btrfs_file_inode_operations
= {
11013 .getattr
= btrfs_getattr
,
11014 .setattr
= btrfs_setattr
,
11015 .listxattr
= btrfs_listxattr
,
11016 .permission
= btrfs_permission
,
11017 .fiemap
= btrfs_fiemap
,
11018 .get_inode_acl
= btrfs_get_acl
,
11019 .set_acl
= btrfs_set_acl
,
11020 .update_time
= btrfs_update_time
,
11021 .fileattr_get
= btrfs_fileattr_get
,
11022 .fileattr_set
= btrfs_fileattr_set
,
11024 static const struct inode_operations btrfs_special_inode_operations
= {
11025 .getattr
= btrfs_getattr
,
11026 .setattr
= btrfs_setattr
,
11027 .permission
= btrfs_permission
,
11028 .listxattr
= btrfs_listxattr
,
11029 .get_inode_acl
= btrfs_get_acl
,
11030 .set_acl
= btrfs_set_acl
,
11031 .update_time
= btrfs_update_time
,
11033 static const struct inode_operations btrfs_symlink_inode_operations
= {
11034 .get_link
= page_get_link
,
11035 .getattr
= btrfs_getattr
,
11036 .setattr
= btrfs_setattr
,
11037 .permission
= btrfs_permission
,
11038 .listxattr
= btrfs_listxattr
,
11039 .update_time
= btrfs_update_time
,
11042 const struct dentry_operations btrfs_dentry_operations
= {
11043 .d_delete
= btrfs_dentry_delete
,