1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2007 Oracle. All rights reserved.
6 #include <crypto/hash.h>
7 #include <linux/kernel.h>
9 #include <linux/blk-cgroup.h>
10 #include <linux/file.h>
12 #include <linux/pagemap.h>
13 #include <linux/highmem.h>
14 #include <linux/time.h>
15 #include <linux/init.h>
16 #include <linux/string.h>
17 #include <linux/backing-dev.h>
18 #include <linux/writeback.h>
19 #include <linux/compat.h>
20 #include <linux/xattr.h>
21 #include <linux/posix_acl.h>
22 #include <linux/falloc.h>
23 #include <linux/slab.h>
24 #include <linux/ratelimit.h>
25 #include <linux/btrfs.h>
26 #include <linux/blkdev.h>
27 #include <linux/posix_acl_xattr.h>
28 #include <linux/uio.h>
29 #include <linux/magic.h>
30 #include <linux/iversion.h>
31 #include <linux/swap.h>
32 #include <linux/migrate.h>
33 #include <linux/sched/mm.h>
34 #include <linux/iomap.h>
35 #include <asm/unaligned.h>
36 #include <linux/fsverity.h>
40 #include "transaction.h"
41 #include "btrfs_inode.h"
42 #include "print-tree.h"
43 #include "ordered-data.h"
47 #include "compression.h"
49 #include "free-space-cache.h"
52 #include "delalloc-space.h"
53 #include "block-group.h"
54 #include "space-info.h"
57 #include "inode-item.h"
59 #include "accessors.h"
60 #include "extent-tree.h"
61 #include "root-tree.h"
64 #include "file-item.h"
65 #include "uuid-tree.h"
69 #include "relocation.h"
74 #include "raid-stripe-tree.h"
76 struct btrfs_iget_args
{
78 struct btrfs_root
*root
;
81 struct btrfs_dio_data
{
83 struct extent_changeset
*data_reserved
;
84 struct btrfs_ordered_extent
*ordered
;
85 bool data_space_reserved
;
89 struct btrfs_dio_private
{
94 /* This must be last */
95 struct btrfs_bio bbio
;
98 static struct bio_set btrfs_dio_bioset
;
100 struct btrfs_rename_ctx
{
101 /* Output field. Stores the index number of the old directory entry. */
106 * Used by data_reloc_print_warning_inode() to pass needed info for filename
107 * resolution and output of error message.
109 struct data_reloc_warn
{
110 struct btrfs_path path
;
111 struct btrfs_fs_info
*fs_info
;
112 u64 extent_item_size
;
118 * For the file_extent_tree, we want to hold the inode lock when we lookup and
119 * update the disk_i_size, but lockdep will complain because our io_tree we hold
120 * the tree lock and get the inode lock when setting delalloc. These two things
121 * are unrelated, so make a class for the file_extent_tree so we don't get the
122 * two locking patterns mixed up.
124 static struct lock_class_key file_extent_tree_class
;
126 static const struct inode_operations btrfs_dir_inode_operations
;
127 static const struct inode_operations btrfs_symlink_inode_operations
;
128 static const struct inode_operations btrfs_special_inode_operations
;
129 static const struct inode_operations btrfs_file_inode_operations
;
130 static const struct address_space_operations btrfs_aops
;
131 static const struct file_operations btrfs_dir_file_operations
;
133 static struct kmem_cache
*btrfs_inode_cachep
;
135 static int btrfs_setsize(struct inode
*inode
, struct iattr
*attr
);
136 static int btrfs_truncate(struct btrfs_inode
*inode
, bool skip_writeback
);
138 static noinline
int run_delalloc_cow(struct btrfs_inode
*inode
,
139 struct page
*locked_page
, u64 start
,
140 u64 end
, struct writeback_control
*wbc
,
142 static struct extent_map
*create_io_em(struct btrfs_inode
*inode
, u64 start
,
143 u64 len
, u64 orig_start
, u64 block_start
,
144 u64 block_len
, u64 orig_block_len
,
145 u64 ram_bytes
, int compress_type
,
148 static int data_reloc_print_warning_inode(u64 inum
, u64 offset
, u64 num_bytes
,
149 u64 root
, void *warn_ctx
)
151 struct data_reloc_warn
*warn
= warn_ctx
;
152 struct btrfs_fs_info
*fs_info
= warn
->fs_info
;
153 struct extent_buffer
*eb
;
154 struct btrfs_inode_item
*inode_item
;
155 struct inode_fs_paths
*ipath
= NULL
;
156 struct btrfs_root
*local_root
;
157 struct btrfs_key key
;
158 unsigned int nofs_flag
;
162 local_root
= btrfs_get_fs_root(fs_info
, root
, true);
163 if (IS_ERR(local_root
)) {
164 ret
= PTR_ERR(local_root
);
168 /* This makes the path point to (inum INODE_ITEM ioff). */
170 key
.type
= BTRFS_INODE_ITEM_KEY
;
173 ret
= btrfs_search_slot(NULL
, local_root
, &key
, &warn
->path
, 0, 0);
175 btrfs_put_root(local_root
);
176 btrfs_release_path(&warn
->path
);
180 eb
= warn
->path
.nodes
[0];
181 inode_item
= btrfs_item_ptr(eb
, warn
->path
.slots
[0], struct btrfs_inode_item
);
182 nlink
= btrfs_inode_nlink(eb
, inode_item
);
183 btrfs_release_path(&warn
->path
);
185 nofs_flag
= memalloc_nofs_save();
186 ipath
= init_ipath(4096, local_root
, &warn
->path
);
187 memalloc_nofs_restore(nofs_flag
);
189 btrfs_put_root(local_root
);
190 ret
= PTR_ERR(ipath
);
193 * -ENOMEM, not a critical error, just output an generic error
197 "checksum error at logical %llu mirror %u root %llu, inode %llu offset %llu",
198 warn
->logical
, warn
->mirror_num
, root
, inum
, offset
);
201 ret
= paths_from_inode(inum
, ipath
);
206 * We deliberately ignore the bit ipath might have been too small to
207 * hold all of the paths here
209 for (int i
= 0; i
< ipath
->fspath
->elem_cnt
; i
++) {
211 "checksum error at logical %llu mirror %u root %llu inode %llu offset %llu length %u links %u (path: %s)",
212 warn
->logical
, warn
->mirror_num
, root
, inum
, offset
,
213 fs_info
->sectorsize
, nlink
,
214 (char *)(unsigned long)ipath
->fspath
->val
[i
]);
217 btrfs_put_root(local_root
);
223 "checksum error at logical %llu mirror %u root %llu inode %llu offset %llu, path resolving failed with ret=%d",
224 warn
->logical
, warn
->mirror_num
, root
, inum
, offset
, ret
);
231 * Do extra user-friendly error output (e.g. lookup all the affected files).
233 * Return true if we succeeded doing the backref lookup.
234 * Return false if such lookup failed, and has to fallback to the old error message.
236 static void print_data_reloc_error(const struct btrfs_inode
*inode
, u64 file_off
,
237 const u8
*csum
, const u8
*csum_expected
,
240 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
241 struct btrfs_path path
= { 0 };
242 struct btrfs_key found_key
= { 0 };
243 struct extent_buffer
*eb
;
244 struct btrfs_extent_item
*ei
;
245 const u32 csum_size
= fs_info
->csum_size
;
251 mutex_lock(&fs_info
->reloc_mutex
);
252 logical
= btrfs_get_reloc_bg_bytenr(fs_info
);
253 mutex_unlock(&fs_info
->reloc_mutex
);
255 if (logical
== U64_MAX
) {
256 btrfs_warn_rl(fs_info
, "has data reloc tree but no running relocation");
257 btrfs_warn_rl(fs_info
,
258 "csum failed root %lld ino %llu off %llu csum " CSUM_FMT
" expected csum " CSUM_FMT
" mirror %d",
259 inode
->root
->root_key
.objectid
, btrfs_ino(inode
), file_off
,
260 CSUM_FMT_VALUE(csum_size
, csum
),
261 CSUM_FMT_VALUE(csum_size
, csum_expected
),
267 btrfs_warn_rl(fs_info
,
268 "csum failed root %lld ino %llu off %llu logical %llu csum " CSUM_FMT
" expected csum " CSUM_FMT
" mirror %d",
269 inode
->root
->root_key
.objectid
,
270 btrfs_ino(inode
), file_off
, logical
,
271 CSUM_FMT_VALUE(csum_size
, csum
),
272 CSUM_FMT_VALUE(csum_size
, csum_expected
),
275 ret
= extent_from_logical(fs_info
, logical
, &path
, &found_key
, &flags
);
277 btrfs_err_rl(fs_info
, "failed to lookup extent item for logical %llu: %d",
282 ei
= btrfs_item_ptr(eb
, path
.slots
[0], struct btrfs_extent_item
);
283 item_size
= btrfs_item_size(eb
, path
.slots
[0]);
284 if (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
) {
285 unsigned long ptr
= 0;
290 ret
= tree_backref_for_extent(&ptr
, eb
, &found_key
, ei
,
291 item_size
, &ref_root
,
294 btrfs_warn_rl(fs_info
,
295 "failed to resolve tree backref for logical %llu: %d",
302 btrfs_warn_rl(fs_info
,
303 "csum error at logical %llu mirror %u: metadata %s (level %d) in tree %llu",
305 (ref_level
? "node" : "leaf"),
306 ref_level
, ref_root
);
308 btrfs_release_path(&path
);
310 struct btrfs_backref_walk_ctx ctx
= { 0 };
311 struct data_reloc_warn reloc_warn
= { 0 };
313 btrfs_release_path(&path
);
315 ctx
.bytenr
= found_key
.objectid
;
316 ctx
.extent_item_pos
= logical
- found_key
.objectid
;
317 ctx
.fs_info
= fs_info
;
319 reloc_warn
.logical
= logical
;
320 reloc_warn
.extent_item_size
= found_key
.offset
;
321 reloc_warn
.mirror_num
= mirror_num
;
322 reloc_warn
.fs_info
= fs_info
;
324 iterate_extent_inodes(&ctx
, true,
325 data_reloc_print_warning_inode
, &reloc_warn
);
329 static void __cold
btrfs_print_data_csum_error(struct btrfs_inode
*inode
,
330 u64 logical_start
, u8
*csum
, u8
*csum_expected
, int mirror_num
)
332 struct btrfs_root
*root
= inode
->root
;
333 const u32 csum_size
= root
->fs_info
->csum_size
;
335 /* For data reloc tree, it's better to do a backref lookup instead. */
336 if (root
->root_key
.objectid
== BTRFS_DATA_RELOC_TREE_OBJECTID
)
337 return print_data_reloc_error(inode
, logical_start
, csum
,
338 csum_expected
, mirror_num
);
340 /* Output without objectid, which is more meaningful */
341 if (root
->root_key
.objectid
>= BTRFS_LAST_FREE_OBJECTID
) {
342 btrfs_warn_rl(root
->fs_info
,
343 "csum failed root %lld ino %lld off %llu csum " CSUM_FMT
" expected csum " CSUM_FMT
" mirror %d",
344 root
->root_key
.objectid
, btrfs_ino(inode
),
346 CSUM_FMT_VALUE(csum_size
, csum
),
347 CSUM_FMT_VALUE(csum_size
, csum_expected
),
350 btrfs_warn_rl(root
->fs_info
,
351 "csum failed root %llu ino %llu off %llu csum " CSUM_FMT
" expected csum " CSUM_FMT
" mirror %d",
352 root
->root_key
.objectid
, btrfs_ino(inode
),
354 CSUM_FMT_VALUE(csum_size
, csum
),
355 CSUM_FMT_VALUE(csum_size
, csum_expected
),
361 * Lock inode i_rwsem based on arguments passed.
363 * ilock_flags can have the following bit set:
365 * BTRFS_ILOCK_SHARED - acquire a shared lock on the inode
366 * BTRFS_ILOCK_TRY - try to acquire the lock, if fails on first attempt
368 * BTRFS_ILOCK_MMAP - acquire a write lock on the i_mmap_lock
370 int btrfs_inode_lock(struct btrfs_inode
*inode
, unsigned int ilock_flags
)
372 if (ilock_flags
& BTRFS_ILOCK_SHARED
) {
373 if (ilock_flags
& BTRFS_ILOCK_TRY
) {
374 if (!inode_trylock_shared(&inode
->vfs_inode
))
379 inode_lock_shared(&inode
->vfs_inode
);
381 if (ilock_flags
& BTRFS_ILOCK_TRY
) {
382 if (!inode_trylock(&inode
->vfs_inode
))
387 inode_lock(&inode
->vfs_inode
);
389 if (ilock_flags
& BTRFS_ILOCK_MMAP
)
390 down_write(&inode
->i_mmap_lock
);
395 * Unock inode i_rwsem.
397 * ilock_flags should contain the same bits set as passed to btrfs_inode_lock()
398 * to decide whether the lock acquired is shared or exclusive.
400 void btrfs_inode_unlock(struct btrfs_inode
*inode
, unsigned int ilock_flags
)
402 if (ilock_flags
& BTRFS_ILOCK_MMAP
)
403 up_write(&inode
->i_mmap_lock
);
404 if (ilock_flags
& BTRFS_ILOCK_SHARED
)
405 inode_unlock_shared(&inode
->vfs_inode
);
407 inode_unlock(&inode
->vfs_inode
);
411 * Cleanup all submitted ordered extents in specified range to handle errors
412 * from the btrfs_run_delalloc_range() callback.
414 * NOTE: caller must ensure that when an error happens, it can not call
415 * extent_clear_unlock_delalloc() to clear both the bits EXTENT_DO_ACCOUNTING
416 * and EXTENT_DELALLOC simultaneously, because that causes the reserved metadata
417 * to be released, which we want to happen only when finishing the ordered
418 * extent (btrfs_finish_ordered_io()).
420 static inline void btrfs_cleanup_ordered_extents(struct btrfs_inode
*inode
,
421 struct page
*locked_page
,
422 u64 offset
, u64 bytes
)
424 unsigned long index
= offset
>> PAGE_SHIFT
;
425 unsigned long end_index
= (offset
+ bytes
- 1) >> PAGE_SHIFT
;
426 u64 page_start
= 0, page_end
= 0;
430 page_start
= page_offset(locked_page
);
431 page_end
= page_start
+ PAGE_SIZE
- 1;
434 while (index
<= end_index
) {
436 * For locked page, we will call btrfs_mark_ordered_io_finished
437 * through btrfs_mark_ordered_io_finished() on it
438 * in run_delalloc_range() for the error handling, which will
439 * clear page Ordered and run the ordered extent accounting.
441 * Here we can't just clear the Ordered bit, or
442 * btrfs_mark_ordered_io_finished() would skip the accounting
443 * for the page range, and the ordered extent will never finish.
445 if (locked_page
&& index
== (page_start
>> PAGE_SHIFT
)) {
449 page
= find_get_page(inode
->vfs_inode
.i_mapping
, index
);
455 * Here we just clear all Ordered bits for every page in the
456 * range, then btrfs_mark_ordered_io_finished() will handle
457 * the ordered extent accounting for the range.
459 btrfs_folio_clamp_clear_ordered(inode
->root
->fs_info
,
460 page_folio(page
), offset
, bytes
);
465 /* The locked page covers the full range, nothing needs to be done */
466 if (bytes
+ offset
<= page_start
+ PAGE_SIZE
)
469 * In case this page belongs to the delalloc range being
470 * instantiated then skip it, since the first page of a range is
471 * going to be properly cleaned up by the caller of
474 if (page_start
>= offset
&& page_end
<= (offset
+ bytes
- 1)) {
475 bytes
= offset
+ bytes
- page_offset(locked_page
) - PAGE_SIZE
;
476 offset
= page_offset(locked_page
) + PAGE_SIZE
;
480 return btrfs_mark_ordered_io_finished(inode
, NULL
, offset
, bytes
, false);
483 static int btrfs_dirty_inode(struct btrfs_inode
*inode
);
485 static int btrfs_init_inode_security(struct btrfs_trans_handle
*trans
,
486 struct btrfs_new_inode_args
*args
)
490 if (args
->default_acl
) {
491 err
= __btrfs_set_acl(trans
, args
->inode
, args
->default_acl
,
497 err
= __btrfs_set_acl(trans
, args
->inode
, args
->acl
, ACL_TYPE_ACCESS
);
501 if (!args
->default_acl
&& !args
->acl
)
502 cache_no_acl(args
->inode
);
503 return btrfs_xattr_security_init(trans
, args
->inode
, args
->dir
,
504 &args
->dentry
->d_name
);
508 * this does all the hard work for inserting an inline extent into
509 * the btree. The caller should have done a btrfs_drop_extents so that
510 * no overlapping inline items exist in the btree
512 static int insert_inline_extent(struct btrfs_trans_handle
*trans
,
513 struct btrfs_path
*path
,
514 struct btrfs_inode
*inode
, bool extent_inserted
,
515 size_t size
, size_t compressed_size
,
517 struct page
**compressed_pages
,
520 struct btrfs_root
*root
= inode
->root
;
521 struct extent_buffer
*leaf
;
522 struct page
*page
= NULL
;
525 struct btrfs_file_extent_item
*ei
;
527 size_t cur_size
= size
;
530 ASSERT((compressed_size
> 0 && compressed_pages
) ||
531 (compressed_size
== 0 && !compressed_pages
));
533 if (compressed_size
&& compressed_pages
)
534 cur_size
= compressed_size
;
536 if (!extent_inserted
) {
537 struct btrfs_key key
;
540 key
.objectid
= btrfs_ino(inode
);
542 key
.type
= BTRFS_EXTENT_DATA_KEY
;
544 datasize
= btrfs_file_extent_calc_inline_size(cur_size
);
545 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
550 leaf
= path
->nodes
[0];
551 ei
= btrfs_item_ptr(leaf
, path
->slots
[0],
552 struct btrfs_file_extent_item
);
553 btrfs_set_file_extent_generation(leaf
, ei
, trans
->transid
);
554 btrfs_set_file_extent_type(leaf
, ei
, BTRFS_FILE_EXTENT_INLINE
);
555 btrfs_set_file_extent_encryption(leaf
, ei
, 0);
556 btrfs_set_file_extent_other_encoding(leaf
, ei
, 0);
557 btrfs_set_file_extent_ram_bytes(leaf
, ei
, size
);
558 ptr
= btrfs_file_extent_inline_start(ei
);
560 if (compress_type
!= BTRFS_COMPRESS_NONE
) {
563 while (compressed_size
> 0) {
564 cpage
= compressed_pages
[i
];
565 cur_size
= min_t(unsigned long, compressed_size
,
568 kaddr
= kmap_local_page(cpage
);
569 write_extent_buffer(leaf
, kaddr
, ptr
, cur_size
);
574 compressed_size
-= cur_size
;
576 btrfs_set_file_extent_compression(leaf
, ei
,
579 page
= find_get_page(inode
->vfs_inode
.i_mapping
, 0);
580 btrfs_set_file_extent_compression(leaf
, ei
, 0);
581 kaddr
= kmap_local_page(page
);
582 write_extent_buffer(leaf
, kaddr
, ptr
, size
);
586 btrfs_mark_buffer_dirty(trans
, leaf
);
587 btrfs_release_path(path
);
590 * We align size to sectorsize for inline extents just for simplicity
593 ret
= btrfs_inode_set_file_extent_range(inode
, 0,
594 ALIGN(size
, root
->fs_info
->sectorsize
));
599 * We're an inline extent, so nobody can extend the file past i_size
600 * without locking a page we already have locked.
602 * We must do any i_size and inode updates before we unlock the pages.
603 * Otherwise we could end up racing with unlink.
605 i_size
= i_size_read(&inode
->vfs_inode
);
606 if (update_i_size
&& size
> i_size
) {
607 i_size_write(&inode
->vfs_inode
, size
);
610 inode
->disk_i_size
= i_size
;
618 * conditionally insert an inline extent into the file. This
619 * does the checks required to make sure the data is small enough
620 * to fit as an inline extent.
622 static noinline
int cow_file_range_inline(struct btrfs_inode
*inode
, u64 size
,
623 size_t compressed_size
,
625 struct page
**compressed_pages
,
628 struct btrfs_drop_extents_args drop_args
= { 0 };
629 struct btrfs_root
*root
= inode
->root
;
630 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
631 struct btrfs_trans_handle
*trans
;
632 u64 data_len
= (compressed_size
?: size
);
634 struct btrfs_path
*path
;
637 * We can create an inline extent if it ends at or beyond the current
638 * i_size, is no larger than a sector (decompressed), and the (possibly
639 * compressed) data fits in a leaf and the configured maximum inline
642 if (size
< i_size_read(&inode
->vfs_inode
) ||
643 size
> fs_info
->sectorsize
||
644 data_len
> BTRFS_MAX_INLINE_DATA_SIZE(fs_info
) ||
645 data_len
> fs_info
->max_inline
)
648 path
= btrfs_alloc_path();
652 trans
= btrfs_join_transaction(root
);
654 btrfs_free_path(path
);
655 return PTR_ERR(trans
);
657 trans
->block_rsv
= &inode
->block_rsv
;
659 drop_args
.path
= path
;
661 drop_args
.end
= fs_info
->sectorsize
;
662 drop_args
.drop_cache
= true;
663 drop_args
.replace_extent
= true;
664 drop_args
.extent_item_size
= btrfs_file_extent_calc_inline_size(data_len
);
665 ret
= btrfs_drop_extents(trans
, root
, inode
, &drop_args
);
667 btrfs_abort_transaction(trans
, ret
);
671 ret
= insert_inline_extent(trans
, path
, inode
, drop_args
.extent_inserted
,
672 size
, compressed_size
, compress_type
,
673 compressed_pages
, update_i_size
);
674 if (ret
&& ret
!= -ENOSPC
) {
675 btrfs_abort_transaction(trans
, ret
);
677 } else if (ret
== -ENOSPC
) {
682 btrfs_update_inode_bytes(inode
, size
, drop_args
.bytes_found
);
683 ret
= btrfs_update_inode(trans
, inode
);
684 if (ret
&& ret
!= -ENOSPC
) {
685 btrfs_abort_transaction(trans
, ret
);
687 } else if (ret
== -ENOSPC
) {
692 btrfs_set_inode_full_sync(inode
);
695 * Don't forget to free the reserved space, as for inlined extent
696 * it won't count as data extent, free them directly here.
697 * And at reserve time, it's always aligned to page size, so
698 * just free one page here.
700 btrfs_qgroup_free_data(inode
, NULL
, 0, PAGE_SIZE
, NULL
);
701 btrfs_free_path(path
);
702 btrfs_end_transaction(trans
);
706 struct async_extent
{
711 unsigned long nr_pages
;
713 struct list_head list
;
717 struct btrfs_inode
*inode
;
718 struct page
*locked_page
;
721 blk_opf_t write_flags
;
722 struct list_head extents
;
723 struct cgroup_subsys_state
*blkcg_css
;
724 struct btrfs_work work
;
725 struct async_cow
*async_cow
;
730 struct async_chunk chunks
[];
733 static noinline
int add_async_extent(struct async_chunk
*cow
,
734 u64 start
, u64 ram_size
,
737 unsigned long nr_pages
,
740 struct async_extent
*async_extent
;
742 async_extent
= kmalloc(sizeof(*async_extent
), GFP_NOFS
);
743 BUG_ON(!async_extent
); /* -ENOMEM */
744 async_extent
->start
= start
;
745 async_extent
->ram_size
= ram_size
;
746 async_extent
->compressed_size
= compressed_size
;
747 async_extent
->pages
= pages
;
748 async_extent
->nr_pages
= nr_pages
;
749 async_extent
->compress_type
= compress_type
;
750 list_add_tail(&async_extent
->list
, &cow
->extents
);
755 * Check if the inode needs to be submitted to compression, based on mount
756 * options, defragmentation, properties or heuristics.
758 static inline int inode_need_compress(struct btrfs_inode
*inode
, u64 start
,
761 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
763 if (!btrfs_inode_can_compress(inode
)) {
764 WARN(IS_ENABLED(CONFIG_BTRFS_DEBUG
),
765 KERN_ERR
"BTRFS: unexpected compression for ino %llu\n",
770 * Special check for subpage.
772 * We lock the full page then run each delalloc range in the page, thus
773 * for the following case, we will hit some subpage specific corner case:
776 * | |///////| |///////|
779 * In above case, both range A and range B will try to unlock the full
780 * page [0, 64K), causing the one finished later will have page
781 * unlocked already, triggering various page lock requirement BUG_ON()s.
783 * So here we add an artificial limit that subpage compression can only
784 * if the range is fully page aligned.
786 * In theory we only need to ensure the first page is fully covered, but
787 * the tailing partial page will be locked until the full compression
788 * finishes, delaying the write of other range.
790 * TODO: Make btrfs_run_delalloc_range() to lock all delalloc range
791 * first to prevent any submitted async extent to unlock the full page.
792 * By this, we can ensure for subpage case that only the last async_cow
793 * will unlock the full page.
795 if (fs_info
->sectorsize
< PAGE_SIZE
) {
796 if (!PAGE_ALIGNED(start
) ||
797 !PAGE_ALIGNED(end
+ 1))
802 if (btrfs_test_opt(fs_info
, FORCE_COMPRESS
))
805 if (inode
->defrag_compress
)
807 /* bad compression ratios */
808 if (inode
->flags
& BTRFS_INODE_NOCOMPRESS
)
810 if (btrfs_test_opt(fs_info
, COMPRESS
) ||
811 inode
->flags
& BTRFS_INODE_COMPRESS
||
812 inode
->prop_compress
)
813 return btrfs_compress_heuristic(&inode
->vfs_inode
, start
, end
);
817 static inline void inode_should_defrag(struct btrfs_inode
*inode
,
818 u64 start
, u64 end
, u64 num_bytes
, u32 small_write
)
820 /* If this is a small write inside eof, kick off a defrag */
821 if (num_bytes
< small_write
&&
822 (start
> 0 || end
+ 1 < inode
->disk_i_size
))
823 btrfs_add_inode_defrag(NULL
, inode
, small_write
);
827 * Work queue call back to started compression on a file and pages.
829 * This is done inside an ordered work queue, and the compression is spread
830 * across many cpus. The actual IO submission is step two, and the ordered work
831 * queue takes care of making sure that happens in the same order things were
832 * put onto the queue by writepages and friends.
834 * If this code finds it can't get good compression, it puts an entry onto the
835 * work queue to write the uncompressed bytes. This makes sure that both
836 * compressed inodes and uncompressed inodes are written in the same order that
837 * the flusher thread sent them down.
839 static void compress_file_range(struct btrfs_work
*work
)
841 struct async_chunk
*async_chunk
=
842 container_of(work
, struct async_chunk
, work
);
843 struct btrfs_inode
*inode
= async_chunk
->inode
;
844 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
845 struct address_space
*mapping
= inode
->vfs_inode
.i_mapping
;
846 u64 blocksize
= fs_info
->sectorsize
;
847 u64 start
= async_chunk
->start
;
848 u64 end
= async_chunk
->end
;
853 unsigned long nr_pages
;
854 unsigned long total_compressed
= 0;
855 unsigned long total_in
= 0;
858 int compress_type
= fs_info
->compress_type
;
860 inode_should_defrag(inode
, start
, end
, end
- start
+ 1, SZ_16K
);
863 * We need to call clear_page_dirty_for_io on each page in the range.
864 * Otherwise applications with the file mmap'd can wander in and change
865 * the page contents while we are compressing them.
867 extent_range_clear_dirty_for_io(&inode
->vfs_inode
, start
, end
);
870 * We need to save i_size before now because it could change in between
871 * us evaluating the size and assigning it. This is because we lock and
872 * unlock the page in truncate and fallocate, and then modify the i_size
875 * The barriers are to emulate READ_ONCE, remove that once i_size_read
879 i_size
= i_size_read(&inode
->vfs_inode
);
881 actual_end
= min_t(u64
, i_size
, end
+ 1);
884 nr_pages
= (end
>> PAGE_SHIFT
) - (start
>> PAGE_SHIFT
) + 1;
885 nr_pages
= min_t(unsigned long, nr_pages
, BTRFS_MAX_COMPRESSED_PAGES
);
888 * we don't want to send crud past the end of i_size through
889 * compression, that's just a waste of CPU time. So, if the
890 * end of the file is before the start of our current
891 * requested range of bytes, we bail out to the uncompressed
892 * cleanup code that can deal with all of this.
894 * It isn't really the fastest way to fix things, but this is a
895 * very uncommon corner.
897 if (actual_end
<= start
)
898 goto cleanup_and_bail_uncompressed
;
900 total_compressed
= actual_end
- start
;
903 * Skip compression for a small file range(<=blocksize) that
904 * isn't an inline extent, since it doesn't save disk space at all.
906 if (total_compressed
<= blocksize
&&
907 (start
> 0 || end
+ 1 < inode
->disk_i_size
))
908 goto cleanup_and_bail_uncompressed
;
911 * For subpage case, we require full page alignment for the sector
913 * Thus we must also check against @actual_end, not just @end.
915 if (blocksize
< PAGE_SIZE
) {
916 if (!PAGE_ALIGNED(start
) ||
917 !PAGE_ALIGNED(round_up(actual_end
, blocksize
)))
918 goto cleanup_and_bail_uncompressed
;
921 total_compressed
= min_t(unsigned long, total_compressed
,
922 BTRFS_MAX_UNCOMPRESSED
);
927 * We do compression for mount -o compress and when the inode has not
928 * been flagged as NOCOMPRESS. This flag can change at any time if we
929 * discover bad compression ratios.
931 if (!inode_need_compress(inode
, start
, end
))
932 goto cleanup_and_bail_uncompressed
;
934 pages
= kcalloc(nr_pages
, sizeof(struct page
*), GFP_NOFS
);
937 * Memory allocation failure is not a fatal error, we can fall
938 * back to uncompressed code.
940 goto cleanup_and_bail_uncompressed
;
943 if (inode
->defrag_compress
)
944 compress_type
= inode
->defrag_compress
;
945 else if (inode
->prop_compress
)
946 compress_type
= inode
->prop_compress
;
948 /* Compression level is applied here. */
949 ret
= btrfs_compress_pages(compress_type
| (fs_info
->compress_level
<< 4),
950 mapping
, start
, pages
, &nr_pages
, &total_in
,
953 goto mark_incompressible
;
956 * Zero the tail end of the last page, as we might be sending it down
959 poff
= offset_in_page(total_compressed
);
961 memzero_page(pages
[nr_pages
- 1], poff
, PAGE_SIZE
- poff
);
964 * Try to create an inline extent.
966 * If we didn't compress the entire range, try to create an uncompressed
967 * inline extent, else a compressed one.
969 * Check cow_file_range() for why we don't even try to create inline
970 * extent for the subpage case.
972 if (start
== 0 && fs_info
->sectorsize
== PAGE_SIZE
) {
973 if (total_in
< actual_end
) {
974 ret
= cow_file_range_inline(inode
, actual_end
, 0,
975 BTRFS_COMPRESS_NONE
, NULL
,
978 ret
= cow_file_range_inline(inode
, actual_end
,
980 compress_type
, pages
,
984 unsigned long clear_flags
= EXTENT_DELALLOC
|
985 EXTENT_DELALLOC_NEW
| EXTENT_DEFRAG
|
986 EXTENT_DO_ACCOUNTING
;
989 mapping_set_error(mapping
, -EIO
);
992 * inline extent creation worked or returned error,
993 * we don't need to create any more async work items.
994 * Unlock and free up our temp pages.
996 * We use DO_ACCOUNTING here because we need the
997 * delalloc_release_metadata to be done _after_ we drop
998 * our outstanding extent for clearing delalloc for this
1001 extent_clear_unlock_delalloc(inode
, start
, end
,
1005 PAGE_START_WRITEBACK
|
1006 PAGE_END_WRITEBACK
);
1012 * We aren't doing an inline extent. Round the compressed size up to a
1013 * block size boundary so the allocator does sane things.
1015 total_compressed
= ALIGN(total_compressed
, blocksize
);
1018 * One last check to make sure the compression is really a win, compare
1019 * the page count read with the blocks on disk, compression must free at
1022 total_in
= round_up(total_in
, fs_info
->sectorsize
);
1023 if (total_compressed
+ blocksize
> total_in
)
1024 goto mark_incompressible
;
1027 * The async work queues will take care of doing actual allocation on
1028 * disk for these compressed pages, and will submit the bios.
1030 add_async_extent(async_chunk
, start
, total_in
, total_compressed
, pages
,
1031 nr_pages
, compress_type
);
1032 if (start
+ total_in
< end
) {
1039 mark_incompressible
:
1040 if (!btrfs_test_opt(fs_info
, FORCE_COMPRESS
) && !inode
->prop_compress
)
1041 inode
->flags
|= BTRFS_INODE_NOCOMPRESS
;
1042 cleanup_and_bail_uncompressed
:
1043 add_async_extent(async_chunk
, start
, end
- start
+ 1, 0, NULL
, 0,
1044 BTRFS_COMPRESS_NONE
);
1047 for (i
= 0; i
< nr_pages
; i
++) {
1048 WARN_ON(pages
[i
]->mapping
);
1049 btrfs_free_compr_page(pages
[i
]);
1055 static void free_async_extent_pages(struct async_extent
*async_extent
)
1059 if (!async_extent
->pages
)
1062 for (i
= 0; i
< async_extent
->nr_pages
; i
++) {
1063 WARN_ON(async_extent
->pages
[i
]->mapping
);
1064 btrfs_free_compr_page(async_extent
->pages
[i
]);
1066 kfree(async_extent
->pages
);
1067 async_extent
->nr_pages
= 0;
1068 async_extent
->pages
= NULL
;
1071 static void submit_uncompressed_range(struct btrfs_inode
*inode
,
1072 struct async_extent
*async_extent
,
1073 struct page
*locked_page
)
1075 u64 start
= async_extent
->start
;
1076 u64 end
= async_extent
->start
+ async_extent
->ram_size
- 1;
1078 struct writeback_control wbc
= {
1079 .sync_mode
= WB_SYNC_ALL
,
1080 .range_start
= start
,
1082 .no_cgroup_owner
= 1,
1085 wbc_attach_fdatawrite_inode(&wbc
, &inode
->vfs_inode
);
1086 ret
= run_delalloc_cow(inode
, locked_page
, start
, end
, &wbc
, false);
1087 wbc_detach_inode(&wbc
);
1089 btrfs_cleanup_ordered_extents(inode
, locked_page
, start
, end
- start
+ 1);
1091 const u64 page_start
= page_offset(locked_page
);
1093 set_page_writeback(locked_page
);
1094 end_page_writeback(locked_page
);
1095 btrfs_mark_ordered_io_finished(inode
, locked_page
,
1096 page_start
, PAGE_SIZE
,
1098 mapping_set_error(locked_page
->mapping
, ret
);
1099 unlock_page(locked_page
);
1104 static void submit_one_async_extent(struct async_chunk
*async_chunk
,
1105 struct async_extent
*async_extent
,
1108 struct btrfs_inode
*inode
= async_chunk
->inode
;
1109 struct extent_io_tree
*io_tree
= &inode
->io_tree
;
1110 struct btrfs_root
*root
= inode
->root
;
1111 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1112 struct btrfs_ordered_extent
*ordered
;
1113 struct btrfs_key ins
;
1114 struct page
*locked_page
= NULL
;
1115 struct extent_map
*em
;
1117 u64 start
= async_extent
->start
;
1118 u64 end
= async_extent
->start
+ async_extent
->ram_size
- 1;
1120 if (async_chunk
->blkcg_css
)
1121 kthread_associate_blkcg(async_chunk
->blkcg_css
);
1124 * If async_chunk->locked_page is in the async_extent range, we need to
1127 if (async_chunk
->locked_page
) {
1128 u64 locked_page_start
= page_offset(async_chunk
->locked_page
);
1129 u64 locked_page_end
= locked_page_start
+ PAGE_SIZE
- 1;
1131 if (!(start
>= locked_page_end
|| end
<= locked_page_start
))
1132 locked_page
= async_chunk
->locked_page
;
1134 lock_extent(io_tree
, start
, end
, NULL
);
1136 if (async_extent
->compress_type
== BTRFS_COMPRESS_NONE
) {
1137 submit_uncompressed_range(inode
, async_extent
, locked_page
);
1141 ret
= btrfs_reserve_extent(root
, async_extent
->ram_size
,
1142 async_extent
->compressed_size
,
1143 async_extent
->compressed_size
,
1144 0, *alloc_hint
, &ins
, 1, 1);
1147 * Here we used to try again by going back to non-compressed
1148 * path for ENOSPC. But we can't reserve space even for
1149 * compressed size, how could it work for uncompressed size
1150 * which requires larger size? So here we directly go error
1156 /* Here we're doing allocation and writeback of the compressed pages */
1157 em
= create_io_em(inode
, start
,
1158 async_extent
->ram_size
, /* len */
1159 start
, /* orig_start */
1160 ins
.objectid
, /* block_start */
1161 ins
.offset
, /* block_len */
1162 ins
.offset
, /* orig_block_len */
1163 async_extent
->ram_size
, /* ram_bytes */
1164 async_extent
->compress_type
,
1165 BTRFS_ORDERED_COMPRESSED
);
1168 goto out_free_reserve
;
1170 free_extent_map(em
);
1172 ordered
= btrfs_alloc_ordered_extent(inode
, start
, /* file_offset */
1173 async_extent
->ram_size
, /* num_bytes */
1174 async_extent
->ram_size
, /* ram_bytes */
1175 ins
.objectid
, /* disk_bytenr */
1176 ins
.offset
, /* disk_num_bytes */
1178 1 << BTRFS_ORDERED_COMPRESSED
,
1179 async_extent
->compress_type
);
1180 if (IS_ERR(ordered
)) {
1181 btrfs_drop_extent_map_range(inode
, start
, end
, false);
1182 ret
= PTR_ERR(ordered
);
1183 goto out_free_reserve
;
1185 btrfs_dec_block_group_reservations(fs_info
, ins
.objectid
);
1187 /* Clear dirty, set writeback and unlock the pages. */
1188 extent_clear_unlock_delalloc(inode
, start
, end
,
1189 NULL
, EXTENT_LOCKED
| EXTENT_DELALLOC
,
1190 PAGE_UNLOCK
| PAGE_START_WRITEBACK
);
1191 btrfs_submit_compressed_write(ordered
,
1192 async_extent
->pages
, /* compressed_pages */
1193 async_extent
->nr_pages
,
1194 async_chunk
->write_flags
, true);
1195 *alloc_hint
= ins
.objectid
+ ins
.offset
;
1197 if (async_chunk
->blkcg_css
)
1198 kthread_associate_blkcg(NULL
);
1199 kfree(async_extent
);
1203 btrfs_dec_block_group_reservations(fs_info
, ins
.objectid
);
1204 btrfs_free_reserved_extent(fs_info
, ins
.objectid
, ins
.offset
, 1);
1206 mapping_set_error(inode
->vfs_inode
.i_mapping
, -EIO
);
1207 extent_clear_unlock_delalloc(inode
, start
, end
,
1208 NULL
, EXTENT_LOCKED
| EXTENT_DELALLOC
|
1209 EXTENT_DELALLOC_NEW
|
1210 EXTENT_DEFRAG
| EXTENT_DO_ACCOUNTING
,
1211 PAGE_UNLOCK
| PAGE_START_WRITEBACK
|
1212 PAGE_END_WRITEBACK
);
1213 free_async_extent_pages(async_extent
);
1214 if (async_chunk
->blkcg_css
)
1215 kthread_associate_blkcg(NULL
);
1216 btrfs_debug(fs_info
,
1217 "async extent submission failed root=%lld inode=%llu start=%llu len=%llu ret=%d",
1218 root
->root_key
.objectid
, btrfs_ino(inode
), start
,
1219 async_extent
->ram_size
, ret
);
1220 kfree(async_extent
);
1223 static u64
get_extent_allocation_hint(struct btrfs_inode
*inode
, u64 start
,
1226 struct extent_map_tree
*em_tree
= &inode
->extent_tree
;
1227 struct extent_map
*em
;
1230 read_lock(&em_tree
->lock
);
1231 em
= search_extent_mapping(em_tree
, start
, num_bytes
);
1234 * if block start isn't an actual block number then find the
1235 * first block in this inode and use that as a hint. If that
1236 * block is also bogus then just don't worry about it.
1238 if (em
->block_start
>= EXTENT_MAP_LAST_BYTE
) {
1239 free_extent_map(em
);
1240 em
= search_extent_mapping(em_tree
, 0, 0);
1241 if (em
&& em
->block_start
< EXTENT_MAP_LAST_BYTE
)
1242 alloc_hint
= em
->block_start
;
1244 free_extent_map(em
);
1246 alloc_hint
= em
->block_start
;
1247 free_extent_map(em
);
1250 read_unlock(&em_tree
->lock
);
1256 * when extent_io.c finds a delayed allocation range in the file,
1257 * the call backs end up in this code. The basic idea is to
1258 * allocate extents on disk for the range, and create ordered data structs
1259 * in ram to track those extents.
1261 * locked_page is the page that writepage had locked already. We use
1262 * it to make sure we don't do extra locks or unlocks.
1264 * When this function fails, it unlocks all pages except @locked_page.
1266 * When this function successfully creates an inline extent, it returns 1 and
1267 * unlocks all pages including locked_page and starts I/O on them.
1268 * (In reality inline extents are limited to a single page, so locked_page is
1269 * the only page handled anyway).
1271 * When this function succeed and creates a normal extent, the page locking
1272 * status depends on the passed in flags:
1274 * - If @keep_locked is set, all pages are kept locked.
1275 * - Else all pages except for @locked_page are unlocked.
1277 * When a failure happens in the second or later iteration of the
1278 * while-loop, the ordered extents created in previous iterations are kept
1279 * intact. So, the caller must clean them up by calling
1280 * btrfs_cleanup_ordered_extents(). See btrfs_run_delalloc_range() for
1283 static noinline
int cow_file_range(struct btrfs_inode
*inode
,
1284 struct page
*locked_page
, u64 start
, u64 end
,
1286 bool keep_locked
, bool no_inline
)
1288 struct btrfs_root
*root
= inode
->root
;
1289 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1291 u64 orig_start
= start
;
1293 unsigned long ram_size
;
1294 u64 cur_alloc_size
= 0;
1296 u64 blocksize
= fs_info
->sectorsize
;
1297 struct btrfs_key ins
;
1298 struct extent_map
*em
;
1299 unsigned clear_bits
;
1300 unsigned long page_ops
;
1301 bool extent_reserved
= false;
1304 if (btrfs_is_free_space_inode(inode
)) {
1309 num_bytes
= ALIGN(end
- start
+ 1, blocksize
);
1310 num_bytes
= max(blocksize
, num_bytes
);
1311 ASSERT(num_bytes
<= btrfs_super_total_bytes(fs_info
->super_copy
));
1313 inode_should_defrag(inode
, start
, end
, num_bytes
, SZ_64K
);
1316 * Due to the page size limit, for subpage we can only trigger the
1317 * writeback for the dirty sectors of page, that means data writeback
1318 * is doing more writeback than what we want.
1320 * This is especially unexpected for some call sites like fallocate,
1321 * where we only increase i_size after everything is done.
1322 * This means we can trigger inline extent even if we didn't want to.
1323 * So here we skip inline extent creation completely.
1325 if (start
== 0 && fs_info
->sectorsize
== PAGE_SIZE
&& !no_inline
) {
1326 u64 actual_end
= min_t(u64
, i_size_read(&inode
->vfs_inode
),
1329 /* lets try to make an inline extent */
1330 ret
= cow_file_range_inline(inode
, actual_end
, 0,
1331 BTRFS_COMPRESS_NONE
, NULL
, false);
1334 * We use DO_ACCOUNTING here because we need the
1335 * delalloc_release_metadata to be run _after_ we drop
1336 * our outstanding extent for clearing delalloc for this
1339 extent_clear_unlock_delalloc(inode
, start
, end
,
1341 EXTENT_LOCKED
| EXTENT_DELALLOC
|
1342 EXTENT_DELALLOC_NEW
| EXTENT_DEFRAG
|
1343 EXTENT_DO_ACCOUNTING
, PAGE_UNLOCK
|
1344 PAGE_START_WRITEBACK
| PAGE_END_WRITEBACK
);
1346 * locked_page is locked by the caller of
1347 * writepage_delalloc(), not locked by
1348 * __process_pages_contig().
1350 * We can't let __process_pages_contig() to unlock it,
1351 * as it doesn't have any subpage::writers recorded.
1353 * Here we manually unlock the page, since the caller
1354 * can't determine if it's an inline extent or a
1355 * compressed extent.
1357 unlock_page(locked_page
);
1360 } else if (ret
< 0) {
1365 alloc_hint
= get_extent_allocation_hint(inode
, start
, num_bytes
);
1368 * Relocation relies on the relocated extents to have exactly the same
1369 * size as the original extents. Normally writeback for relocation data
1370 * extents follows a NOCOW path because relocation preallocates the
1371 * extents. However, due to an operation such as scrub turning a block
1372 * group to RO mode, it may fallback to COW mode, so we must make sure
1373 * an extent allocated during COW has exactly the requested size and can
1374 * not be split into smaller extents, otherwise relocation breaks and
1375 * fails during the stage where it updates the bytenr of file extent
1378 if (btrfs_is_data_reloc_root(root
))
1379 min_alloc_size
= num_bytes
;
1381 min_alloc_size
= fs_info
->sectorsize
;
1383 while (num_bytes
> 0) {
1384 struct btrfs_ordered_extent
*ordered
;
1386 cur_alloc_size
= num_bytes
;
1387 ret
= btrfs_reserve_extent(root
, cur_alloc_size
, cur_alloc_size
,
1388 min_alloc_size
, 0, alloc_hint
,
1390 if (ret
== -EAGAIN
) {
1392 * btrfs_reserve_extent only returns -EAGAIN for zoned
1393 * file systems, which is an indication that there are
1394 * no active zones to allocate from at the moment.
1396 * If this is the first loop iteration, wait for at
1397 * least one zone to finish before retrying the
1398 * allocation. Otherwise ask the caller to write out
1399 * the already allocated blocks before coming back to
1400 * us, or return -ENOSPC if it can't handle retries.
1402 ASSERT(btrfs_is_zoned(fs_info
));
1403 if (start
== orig_start
) {
1404 wait_on_bit_io(&inode
->root
->fs_info
->flags
,
1405 BTRFS_FS_NEED_ZONE_FINISH
,
1406 TASK_UNINTERRUPTIBLE
);
1410 *done_offset
= start
- 1;
1417 cur_alloc_size
= ins
.offset
;
1418 extent_reserved
= true;
1420 ram_size
= ins
.offset
;
1421 em
= create_io_em(inode
, start
, ins
.offset
, /* len */
1422 start
, /* orig_start */
1423 ins
.objectid
, /* block_start */
1424 ins
.offset
, /* block_len */
1425 ins
.offset
, /* orig_block_len */
1426 ram_size
, /* ram_bytes */
1427 BTRFS_COMPRESS_NONE
, /* compress_type */
1428 BTRFS_ORDERED_REGULAR
/* type */);
1433 free_extent_map(em
);
1435 ordered
= btrfs_alloc_ordered_extent(inode
, start
, ram_size
,
1436 ram_size
, ins
.objectid
, cur_alloc_size
,
1437 0, 1 << BTRFS_ORDERED_REGULAR
,
1438 BTRFS_COMPRESS_NONE
);
1439 if (IS_ERR(ordered
)) {
1440 ret
= PTR_ERR(ordered
);
1441 goto out_drop_extent_cache
;
1444 if (btrfs_is_data_reloc_root(root
)) {
1445 ret
= btrfs_reloc_clone_csums(ordered
);
1448 * Only drop cache here, and process as normal.
1450 * We must not allow extent_clear_unlock_delalloc()
1451 * at out_unlock label to free meta of this ordered
1452 * extent, as its meta should be freed by
1453 * btrfs_finish_ordered_io().
1455 * So we must continue until @start is increased to
1456 * skip current ordered extent.
1459 btrfs_drop_extent_map_range(inode
, start
,
1460 start
+ ram_size
- 1,
1463 btrfs_put_ordered_extent(ordered
);
1465 btrfs_dec_block_group_reservations(fs_info
, ins
.objectid
);
1468 * We're not doing compressed IO, don't unlock the first page
1469 * (which the caller expects to stay locked), don't clear any
1470 * dirty bits and don't set any writeback bits
1472 * Do set the Ordered (Private2) bit so we know this page was
1473 * properly setup for writepage.
1475 page_ops
= (keep_locked
? 0 : PAGE_UNLOCK
);
1476 page_ops
|= PAGE_SET_ORDERED
;
1478 extent_clear_unlock_delalloc(inode
, start
, start
+ ram_size
- 1,
1480 EXTENT_LOCKED
| EXTENT_DELALLOC
,
1482 if (num_bytes
< cur_alloc_size
)
1485 num_bytes
-= cur_alloc_size
;
1486 alloc_hint
= ins
.objectid
+ ins
.offset
;
1487 start
+= cur_alloc_size
;
1488 extent_reserved
= false;
1491 * btrfs_reloc_clone_csums() error, since start is increased
1492 * extent_clear_unlock_delalloc() at out_unlock label won't
1493 * free metadata of current ordered extent, we're OK to exit.
1503 out_drop_extent_cache
:
1504 btrfs_drop_extent_map_range(inode
, start
, start
+ ram_size
- 1, false);
1506 btrfs_dec_block_group_reservations(fs_info
, ins
.objectid
);
1507 btrfs_free_reserved_extent(fs_info
, ins
.objectid
, ins
.offset
, 1);
1510 * Now, we have three regions to clean up:
1512 * |-------(1)----|---(2)---|-------------(3)----------|
1513 * `- orig_start `- start `- start + cur_alloc_size `- end
1515 * We process each region below.
1518 clear_bits
= EXTENT_LOCKED
| EXTENT_DELALLOC
| EXTENT_DELALLOC_NEW
|
1519 EXTENT_DEFRAG
| EXTENT_CLEAR_META_RESV
;
1520 page_ops
= PAGE_UNLOCK
| PAGE_START_WRITEBACK
| PAGE_END_WRITEBACK
;
1523 * For the range (1). We have already instantiated the ordered extents
1524 * for this region. They are cleaned up by
1525 * btrfs_cleanup_ordered_extents() in e.g,
1526 * btrfs_run_delalloc_range(). EXTENT_LOCKED | EXTENT_DELALLOC are
1527 * already cleared in the above loop. And, EXTENT_DELALLOC_NEW |
1528 * EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV are handled by the cleanup
1531 * However, in case of @keep_locked, we still need to unlock the pages
1532 * (except @locked_page) to ensure all the pages are unlocked.
1534 if (keep_locked
&& orig_start
< start
) {
1536 mapping_set_error(inode
->vfs_inode
.i_mapping
, ret
);
1537 extent_clear_unlock_delalloc(inode
, orig_start
, start
- 1,
1538 locked_page
, 0, page_ops
);
1542 * For the range (2). If we reserved an extent for our delalloc range
1543 * (or a subrange) and failed to create the respective ordered extent,
1544 * then it means that when we reserved the extent we decremented the
1545 * extent's size from the data space_info's bytes_may_use counter and
1546 * incremented the space_info's bytes_reserved counter by the same
1547 * amount. We must make sure extent_clear_unlock_delalloc() does not try
1548 * to decrement again the data space_info's bytes_may_use counter,
1549 * therefore we do not pass it the flag EXTENT_CLEAR_DATA_RESV.
1551 if (extent_reserved
) {
1552 extent_clear_unlock_delalloc(inode
, start
,
1553 start
+ cur_alloc_size
- 1,
1557 start
+= cur_alloc_size
;
1561 * For the range (3). We never touched the region. In addition to the
1562 * clear_bits above, we add EXTENT_CLEAR_DATA_RESV to release the data
1563 * space_info's bytes_may_use counter, reserved in
1564 * btrfs_check_data_free_space().
1567 clear_bits
|= EXTENT_CLEAR_DATA_RESV
;
1568 extent_clear_unlock_delalloc(inode
, start
, end
, locked_page
,
1569 clear_bits
, page_ops
);
1575 * Phase two of compressed writeback. This is the ordered portion of the code,
1576 * which only gets called in the order the work was queued. We walk all the
1577 * async extents created by compress_file_range and send them down to the disk.
1579 * If called with @do_free == true then it'll try to finish the work and free
1580 * the work struct eventually.
1582 static noinline
void submit_compressed_extents(struct btrfs_work
*work
, bool do_free
)
1584 struct async_chunk
*async_chunk
= container_of(work
, struct async_chunk
,
1586 struct btrfs_fs_info
*fs_info
= btrfs_work_owner(work
);
1587 struct async_extent
*async_extent
;
1588 unsigned long nr_pages
;
1592 struct async_chunk
*async_chunk
;
1593 struct async_cow
*async_cow
;
1595 async_chunk
= container_of(work
, struct async_chunk
, work
);
1596 btrfs_add_delayed_iput(async_chunk
->inode
);
1597 if (async_chunk
->blkcg_css
)
1598 css_put(async_chunk
->blkcg_css
);
1600 async_cow
= async_chunk
->async_cow
;
1601 if (atomic_dec_and_test(&async_cow
->num_chunks
))
1606 nr_pages
= (async_chunk
->end
- async_chunk
->start
+ PAGE_SIZE
) >>
1609 while (!list_empty(&async_chunk
->extents
)) {
1610 async_extent
= list_entry(async_chunk
->extents
.next
,
1611 struct async_extent
, list
);
1612 list_del(&async_extent
->list
);
1613 submit_one_async_extent(async_chunk
, async_extent
, &alloc_hint
);
1616 /* atomic_sub_return implies a barrier */
1617 if (atomic_sub_return(nr_pages
, &fs_info
->async_delalloc_pages
) <
1619 cond_wake_up_nomb(&fs_info
->async_submit_wait
);
1622 static bool run_delalloc_compressed(struct btrfs_inode
*inode
,
1623 struct page
*locked_page
, u64 start
,
1624 u64 end
, struct writeback_control
*wbc
)
1626 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
1627 struct cgroup_subsys_state
*blkcg_css
= wbc_blkcg_css(wbc
);
1628 struct async_cow
*ctx
;
1629 struct async_chunk
*async_chunk
;
1630 unsigned long nr_pages
;
1631 u64 num_chunks
= DIV_ROUND_UP(end
- start
, SZ_512K
);
1634 const blk_opf_t write_flags
= wbc_to_write_flags(wbc
);
1636 nofs_flag
= memalloc_nofs_save();
1637 ctx
= kvmalloc(struct_size(ctx
, chunks
, num_chunks
), GFP_KERNEL
);
1638 memalloc_nofs_restore(nofs_flag
);
1642 unlock_extent(&inode
->io_tree
, start
, end
, NULL
);
1643 set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT
, &inode
->runtime_flags
);
1645 async_chunk
= ctx
->chunks
;
1646 atomic_set(&ctx
->num_chunks
, num_chunks
);
1648 for (i
= 0; i
< num_chunks
; i
++) {
1649 u64 cur_end
= min(end
, start
+ SZ_512K
- 1);
1652 * igrab is called higher up in the call chain, take only the
1653 * lightweight reference for the callback lifetime
1655 ihold(&inode
->vfs_inode
);
1656 async_chunk
[i
].async_cow
= ctx
;
1657 async_chunk
[i
].inode
= inode
;
1658 async_chunk
[i
].start
= start
;
1659 async_chunk
[i
].end
= cur_end
;
1660 async_chunk
[i
].write_flags
= write_flags
;
1661 INIT_LIST_HEAD(&async_chunk
[i
].extents
);
1664 * The locked_page comes all the way from writepage and its
1665 * the original page we were actually given. As we spread
1666 * this large delalloc region across multiple async_chunk
1667 * structs, only the first struct needs a pointer to locked_page
1669 * This way we don't need racey decisions about who is supposed
1674 * Depending on the compressibility, the pages might or
1675 * might not go through async. We want all of them to
1676 * be accounted against wbc once. Let's do it here
1677 * before the paths diverge. wbc accounting is used
1678 * only for foreign writeback detection and doesn't
1679 * need full accuracy. Just account the whole thing
1680 * against the first page.
1682 wbc_account_cgroup_owner(wbc
, locked_page
,
1684 async_chunk
[i
].locked_page
= locked_page
;
1687 async_chunk
[i
].locked_page
= NULL
;
1690 if (blkcg_css
!= blkcg_root_css
) {
1692 async_chunk
[i
].blkcg_css
= blkcg_css
;
1693 async_chunk
[i
].write_flags
|= REQ_BTRFS_CGROUP_PUNT
;
1695 async_chunk
[i
].blkcg_css
= NULL
;
1698 btrfs_init_work(&async_chunk
[i
].work
, compress_file_range
,
1699 submit_compressed_extents
);
1701 nr_pages
= DIV_ROUND_UP(cur_end
- start
, PAGE_SIZE
);
1702 atomic_add(nr_pages
, &fs_info
->async_delalloc_pages
);
1704 btrfs_queue_work(fs_info
->delalloc_workers
, &async_chunk
[i
].work
);
1706 start
= cur_end
+ 1;
1712 * Run the delalloc range from start to end, and write back any dirty pages
1713 * covered by the range.
1715 static noinline
int run_delalloc_cow(struct btrfs_inode
*inode
,
1716 struct page
*locked_page
, u64 start
,
1717 u64 end
, struct writeback_control
*wbc
,
1720 u64 done_offset
= end
;
1723 while (start
<= end
) {
1724 ret
= cow_file_range(inode
, locked_page
, start
, end
, &done_offset
,
1728 extent_write_locked_range(&inode
->vfs_inode
, locked_page
, start
,
1729 done_offset
, wbc
, pages_dirty
);
1730 start
= done_offset
+ 1;
1736 static noinline
int csum_exist_in_range(struct btrfs_fs_info
*fs_info
,
1737 u64 bytenr
, u64 num_bytes
, bool nowait
)
1739 struct btrfs_root
*csum_root
= btrfs_csum_root(fs_info
, bytenr
);
1740 struct btrfs_ordered_sum
*sums
;
1744 ret
= btrfs_lookup_csums_list(csum_root
, bytenr
, bytenr
+ num_bytes
- 1,
1746 if (ret
== 0 && list_empty(&list
))
1749 while (!list_empty(&list
)) {
1750 sums
= list_entry(list
.next
, struct btrfs_ordered_sum
, list
);
1751 list_del(&sums
->list
);
1759 static int fallback_to_cow(struct btrfs_inode
*inode
, struct page
*locked_page
,
1760 const u64 start
, const u64 end
)
1762 const bool is_space_ino
= btrfs_is_free_space_inode(inode
);
1763 const bool is_reloc_ino
= btrfs_is_data_reloc_root(inode
->root
);
1764 const u64 range_bytes
= end
+ 1 - start
;
1765 struct extent_io_tree
*io_tree
= &inode
->io_tree
;
1766 u64 range_start
= start
;
1771 * If EXTENT_NORESERVE is set it means that when the buffered write was
1772 * made we had not enough available data space and therefore we did not
1773 * reserve data space for it, since we though we could do NOCOW for the
1774 * respective file range (either there is prealloc extent or the inode
1775 * has the NOCOW bit set).
1777 * However when we need to fallback to COW mode (because for example the
1778 * block group for the corresponding extent was turned to RO mode by a
1779 * scrub or relocation) we need to do the following:
1781 * 1) We increment the bytes_may_use counter of the data space info.
1782 * If COW succeeds, it allocates a new data extent and after doing
1783 * that it decrements the space info's bytes_may_use counter and
1784 * increments its bytes_reserved counter by the same amount (we do
1785 * this at btrfs_add_reserved_bytes()). So we need to increment the
1786 * bytes_may_use counter to compensate (when space is reserved at
1787 * buffered write time, the bytes_may_use counter is incremented);
1789 * 2) We clear the EXTENT_NORESERVE bit from the range. We do this so
1790 * that if the COW path fails for any reason, it decrements (through
1791 * extent_clear_unlock_delalloc()) the bytes_may_use counter of the
1792 * data space info, which we incremented in the step above.
1794 * If we need to fallback to cow and the inode corresponds to a free
1795 * space cache inode or an inode of the data relocation tree, we must
1796 * also increment bytes_may_use of the data space_info for the same
1797 * reason. Space caches and relocated data extents always get a prealloc
1798 * extent for them, however scrub or balance may have set the block
1799 * group that contains that extent to RO mode and therefore force COW
1800 * when starting writeback.
1802 count
= count_range_bits(io_tree
, &range_start
, end
, range_bytes
,
1803 EXTENT_NORESERVE
, 0, NULL
);
1804 if (count
> 0 || is_space_ino
|| is_reloc_ino
) {
1806 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
1807 struct btrfs_space_info
*sinfo
= fs_info
->data_sinfo
;
1809 if (is_space_ino
|| is_reloc_ino
)
1810 bytes
= range_bytes
;
1812 spin_lock(&sinfo
->lock
);
1813 btrfs_space_info_update_bytes_may_use(fs_info
, sinfo
, bytes
);
1814 spin_unlock(&sinfo
->lock
);
1817 clear_extent_bit(io_tree
, start
, end
, EXTENT_NORESERVE
,
1822 * Don't try to create inline extents, as a mix of inline extent that
1823 * is written out and unlocked directly and a normal NOCOW extent
1826 ret
= cow_file_range(inode
, locked_page
, start
, end
, NULL
, false, true);
1831 struct can_nocow_file_extent_args
{
1834 /* Start file offset of the range we want to NOCOW. */
1836 /* End file offset (inclusive) of the range we want to NOCOW. */
1838 bool writeback_path
;
1841 * Free the path passed to can_nocow_file_extent() once it's not needed
1846 /* Output fields. Only set when can_nocow_file_extent() returns 1. */
1851 /* Number of bytes that can be written to in NOCOW mode. */
1856 * Check if we can NOCOW the file extent that the path points to.
1857 * This function may return with the path released, so the caller should check
1858 * if path->nodes[0] is NULL or not if it needs to use the path afterwards.
1860 * Returns: < 0 on error
1861 * 0 if we can not NOCOW
1864 static int can_nocow_file_extent(struct btrfs_path
*path
,
1865 struct btrfs_key
*key
,
1866 struct btrfs_inode
*inode
,
1867 struct can_nocow_file_extent_args
*args
)
1869 const bool is_freespace_inode
= btrfs_is_free_space_inode(inode
);
1870 struct extent_buffer
*leaf
= path
->nodes
[0];
1871 struct btrfs_root
*root
= inode
->root
;
1872 struct btrfs_file_extent_item
*fi
;
1877 bool nowait
= path
->nowait
;
1879 fi
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_file_extent_item
);
1880 extent_type
= btrfs_file_extent_type(leaf
, fi
);
1882 if (extent_type
== BTRFS_FILE_EXTENT_INLINE
)
1885 /* Can't access these fields unless we know it's not an inline extent. */
1886 args
->disk_bytenr
= btrfs_file_extent_disk_bytenr(leaf
, fi
);
1887 args
->disk_num_bytes
= btrfs_file_extent_disk_num_bytes(leaf
, fi
);
1888 args
->extent_offset
= btrfs_file_extent_offset(leaf
, fi
);
1890 if (!(inode
->flags
& BTRFS_INODE_NODATACOW
) &&
1891 extent_type
== BTRFS_FILE_EXTENT_REG
)
1895 * If the extent was created before the generation where the last snapshot
1896 * for its subvolume was created, then this implies the extent is shared,
1897 * hence we must COW.
1899 if (!args
->strict
&&
1900 btrfs_file_extent_generation(leaf
, fi
) <=
1901 btrfs_root_last_snapshot(&root
->root_item
))
1904 /* An explicit hole, must COW. */
1905 if (args
->disk_bytenr
== 0)
1908 /* Compressed/encrypted/encoded extents must be COWed. */
1909 if (btrfs_file_extent_compression(leaf
, fi
) ||
1910 btrfs_file_extent_encryption(leaf
, fi
) ||
1911 btrfs_file_extent_other_encoding(leaf
, fi
))
1914 extent_end
= btrfs_file_extent_end(path
);
1917 * The following checks can be expensive, as they need to take other
1918 * locks and do btree or rbtree searches, so release the path to avoid
1919 * blocking other tasks for too long.
1921 btrfs_release_path(path
);
1923 ret
= btrfs_cross_ref_exist(root
, btrfs_ino(inode
),
1924 key
->offset
- args
->extent_offset
,
1925 args
->disk_bytenr
, args
->strict
, path
);
1926 WARN_ON_ONCE(ret
> 0 && is_freespace_inode
);
1930 if (args
->free_path
) {
1932 * We don't need the path anymore, plus through the
1933 * csum_exist_in_range() call below we will end up allocating
1934 * another path. So free the path to avoid unnecessary extra
1937 btrfs_free_path(path
);
1941 /* If there are pending snapshots for this root, we must COW. */
1942 if (args
->writeback_path
&& !is_freespace_inode
&&
1943 atomic_read(&root
->snapshot_force_cow
))
1946 args
->disk_bytenr
+= args
->extent_offset
;
1947 args
->disk_bytenr
+= args
->start
- key
->offset
;
1948 args
->num_bytes
= min(args
->end
+ 1, extent_end
) - args
->start
;
1951 * Force COW if csums exist in the range. This ensures that csums for a
1952 * given extent are either valid or do not exist.
1954 ret
= csum_exist_in_range(root
->fs_info
, args
->disk_bytenr
, args
->num_bytes
,
1956 WARN_ON_ONCE(ret
> 0 && is_freespace_inode
);
1962 if (args
->free_path
&& path
)
1963 btrfs_free_path(path
);
1965 return ret
< 0 ? ret
: can_nocow
;
1969 * when nowcow writeback call back. This checks for snapshots or COW copies
1970 * of the extents that exist in the file, and COWs the file as required.
1972 * If no cow copies or snapshots exist, we write directly to the existing
1975 static noinline
int run_delalloc_nocow(struct btrfs_inode
*inode
,
1976 struct page
*locked_page
,
1977 const u64 start
, const u64 end
)
1979 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
1980 struct btrfs_root
*root
= inode
->root
;
1981 struct btrfs_path
*path
;
1982 u64 cow_start
= (u64
)-1;
1983 u64 cur_offset
= start
;
1985 bool check_prev
= true;
1986 u64 ino
= btrfs_ino(inode
);
1987 struct can_nocow_file_extent_args nocow_args
= { 0 };
1990 * Normally on a zoned device we're only doing COW writes, but in case
1991 * of relocation on a zoned filesystem serializes I/O so that we're only
1992 * writing sequentially and can end up here as well.
1994 ASSERT(!btrfs_is_zoned(fs_info
) || btrfs_is_data_reloc_root(root
));
1996 path
= btrfs_alloc_path();
2002 nocow_args
.end
= end
;
2003 nocow_args
.writeback_path
= true;
2006 struct btrfs_block_group
*nocow_bg
= NULL
;
2007 struct btrfs_ordered_extent
*ordered
;
2008 struct btrfs_key found_key
;
2009 struct btrfs_file_extent_item
*fi
;
2010 struct extent_buffer
*leaf
;
2017 ret
= btrfs_lookup_file_extent(NULL
, root
, path
, ino
,
2023 * If there is no extent for our range when doing the initial
2024 * search, then go back to the previous slot as it will be the
2025 * one containing the search offset
2027 if (ret
> 0 && path
->slots
[0] > 0 && check_prev
) {
2028 leaf
= path
->nodes
[0];
2029 btrfs_item_key_to_cpu(leaf
, &found_key
,
2030 path
->slots
[0] - 1);
2031 if (found_key
.objectid
== ino
&&
2032 found_key
.type
== BTRFS_EXTENT_DATA_KEY
)
2037 /* Go to next leaf if we have exhausted the current one */
2038 leaf
= path
->nodes
[0];
2039 if (path
->slots
[0] >= btrfs_header_nritems(leaf
)) {
2040 ret
= btrfs_next_leaf(root
, path
);
2045 leaf
= path
->nodes
[0];
2048 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
2050 /* Didn't find anything for our INO */
2051 if (found_key
.objectid
> ino
)
2054 * Keep searching until we find an EXTENT_ITEM or there are no
2055 * more extents for this inode
2057 if (WARN_ON_ONCE(found_key
.objectid
< ino
) ||
2058 found_key
.type
< BTRFS_EXTENT_DATA_KEY
) {
2063 /* Found key is not EXTENT_DATA_KEY or starts after req range */
2064 if (found_key
.type
> BTRFS_EXTENT_DATA_KEY
||
2065 found_key
.offset
> end
)
2069 * If the found extent starts after requested offset, then
2070 * adjust extent_end to be right before this extent begins
2072 if (found_key
.offset
> cur_offset
) {
2073 extent_end
= found_key
.offset
;
2079 * Found extent which begins before our range and potentially
2082 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
2083 struct btrfs_file_extent_item
);
2084 extent_type
= btrfs_file_extent_type(leaf
, fi
);
2085 /* If this is triggered then we have a memory corruption. */
2086 ASSERT(extent_type
< BTRFS_NR_FILE_EXTENT_TYPES
);
2087 if (WARN_ON(extent_type
>= BTRFS_NR_FILE_EXTENT_TYPES
)) {
2091 ram_bytes
= btrfs_file_extent_ram_bytes(leaf
, fi
);
2092 extent_end
= btrfs_file_extent_end(path
);
2095 * If the extent we got ends before our current offset, skip to
2098 if (extent_end
<= cur_offset
) {
2103 nocow_args
.start
= cur_offset
;
2104 ret
= can_nocow_file_extent(path
, &found_key
, inode
, &nocow_args
);
2111 nocow_bg
= btrfs_inc_nocow_writers(fs_info
, nocow_args
.disk_bytenr
);
2115 * If we can't perform NOCOW writeback for the range,
2116 * then record the beginning of the range that needs to
2117 * be COWed. It will be written out before the next
2118 * NOCOW range if we find one, or when exiting this
2121 if (cow_start
== (u64
)-1)
2122 cow_start
= cur_offset
;
2123 cur_offset
= extent_end
;
2124 if (cur_offset
> end
)
2126 if (!path
->nodes
[0])
2133 * COW range from cow_start to found_key.offset - 1. As the key
2134 * will contain the beginning of the first extent that can be
2135 * NOCOW, following one which needs to be COW'ed
2137 if (cow_start
!= (u64
)-1) {
2138 ret
= fallback_to_cow(inode
, locked_page
,
2139 cow_start
, found_key
.offset
- 1);
2140 cow_start
= (u64
)-1;
2142 btrfs_dec_nocow_writers(nocow_bg
);
2147 nocow_end
= cur_offset
+ nocow_args
.num_bytes
- 1;
2148 is_prealloc
= extent_type
== BTRFS_FILE_EXTENT_PREALLOC
;
2150 u64 orig_start
= found_key
.offset
- nocow_args
.extent_offset
;
2151 struct extent_map
*em
;
2153 em
= create_io_em(inode
, cur_offset
, nocow_args
.num_bytes
,
2155 nocow_args
.disk_bytenr
, /* block_start */
2156 nocow_args
.num_bytes
, /* block_len */
2157 nocow_args
.disk_num_bytes
, /* orig_block_len */
2158 ram_bytes
, BTRFS_COMPRESS_NONE
,
2159 BTRFS_ORDERED_PREALLOC
);
2161 btrfs_dec_nocow_writers(nocow_bg
);
2165 free_extent_map(em
);
2168 ordered
= btrfs_alloc_ordered_extent(inode
, cur_offset
,
2169 nocow_args
.num_bytes
, nocow_args
.num_bytes
,
2170 nocow_args
.disk_bytenr
, nocow_args
.num_bytes
, 0,
2172 ? (1 << BTRFS_ORDERED_PREALLOC
)
2173 : (1 << BTRFS_ORDERED_NOCOW
),
2174 BTRFS_COMPRESS_NONE
);
2175 btrfs_dec_nocow_writers(nocow_bg
);
2176 if (IS_ERR(ordered
)) {
2178 btrfs_drop_extent_map_range(inode
, cur_offset
,
2181 ret
= PTR_ERR(ordered
);
2185 if (btrfs_is_data_reloc_root(root
))
2187 * Error handled later, as we must prevent
2188 * extent_clear_unlock_delalloc() in error handler
2189 * from freeing metadata of created ordered extent.
2191 ret
= btrfs_reloc_clone_csums(ordered
);
2192 btrfs_put_ordered_extent(ordered
);
2194 extent_clear_unlock_delalloc(inode
, cur_offset
, nocow_end
,
2195 locked_page
, EXTENT_LOCKED
|
2197 EXTENT_CLEAR_DATA_RESV
,
2198 PAGE_UNLOCK
| PAGE_SET_ORDERED
);
2200 cur_offset
= extent_end
;
2203 * btrfs_reloc_clone_csums() error, now we're OK to call error
2204 * handler, as metadata for created ordered extent will only
2205 * be freed by btrfs_finish_ordered_io().
2209 if (cur_offset
> end
)
2212 btrfs_release_path(path
);
2214 if (cur_offset
<= end
&& cow_start
== (u64
)-1)
2215 cow_start
= cur_offset
;
2217 if (cow_start
!= (u64
)-1) {
2219 ret
= fallback_to_cow(inode
, locked_page
, cow_start
, end
);
2220 cow_start
= (u64
)-1;
2225 btrfs_free_path(path
);
2230 * If an error happened while a COW region is outstanding, cur_offset
2231 * needs to be reset to cow_start to ensure the COW region is unlocked
2234 if (cow_start
!= (u64
)-1)
2235 cur_offset
= cow_start
;
2236 if (cur_offset
< end
)
2237 extent_clear_unlock_delalloc(inode
, cur_offset
, end
,
2238 locked_page
, EXTENT_LOCKED
|
2239 EXTENT_DELALLOC
| EXTENT_DEFRAG
|
2240 EXTENT_DO_ACCOUNTING
, PAGE_UNLOCK
|
2241 PAGE_START_WRITEBACK
|
2242 PAGE_END_WRITEBACK
);
2243 btrfs_free_path(path
);
2247 static bool should_nocow(struct btrfs_inode
*inode
, u64 start
, u64 end
)
2249 if (inode
->flags
& (BTRFS_INODE_NODATACOW
| BTRFS_INODE_PREALLOC
)) {
2250 if (inode
->defrag_bytes
&&
2251 test_range_bit_exists(&inode
->io_tree
, start
, end
, EXTENT_DEFRAG
))
2259 * Function to process delayed allocation (create CoW) for ranges which are
2260 * being touched for the first time.
2262 int btrfs_run_delalloc_range(struct btrfs_inode
*inode
, struct page
*locked_page
,
2263 u64 start
, u64 end
, struct writeback_control
*wbc
)
2265 const bool zoned
= btrfs_is_zoned(inode
->root
->fs_info
);
2269 * The range must cover part of the @locked_page, or a return of 1
2270 * can confuse the caller.
2272 ASSERT(!(end
<= page_offset(locked_page
) ||
2273 start
>= page_offset(locked_page
) + PAGE_SIZE
));
2275 if (should_nocow(inode
, start
, end
)) {
2276 ret
= run_delalloc_nocow(inode
, locked_page
, start
, end
);
2280 if (btrfs_inode_can_compress(inode
) &&
2281 inode_need_compress(inode
, start
, end
) &&
2282 run_delalloc_compressed(inode
, locked_page
, start
, end
, wbc
))
2286 ret
= run_delalloc_cow(inode
, locked_page
, start
, end
, wbc
,
2289 ret
= cow_file_range(inode
, locked_page
, start
, end
, NULL
,
2294 btrfs_cleanup_ordered_extents(inode
, locked_page
, start
,
2299 void btrfs_split_delalloc_extent(struct btrfs_inode
*inode
,
2300 struct extent_state
*orig
, u64 split
)
2302 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
2305 /* not delalloc, ignore it */
2306 if (!(orig
->state
& EXTENT_DELALLOC
))
2309 size
= orig
->end
- orig
->start
+ 1;
2310 if (size
> fs_info
->max_extent_size
) {
2315 * See the explanation in btrfs_merge_delalloc_extent, the same
2316 * applies here, just in reverse.
2318 new_size
= orig
->end
- split
+ 1;
2319 num_extents
= count_max_extents(fs_info
, new_size
);
2320 new_size
= split
- orig
->start
;
2321 num_extents
+= count_max_extents(fs_info
, new_size
);
2322 if (count_max_extents(fs_info
, size
) >= num_extents
)
2326 spin_lock(&inode
->lock
);
2327 btrfs_mod_outstanding_extents(inode
, 1);
2328 spin_unlock(&inode
->lock
);
2332 * Handle merged delayed allocation extents so we can keep track of new extents
2333 * that are just merged onto old extents, such as when we are doing sequential
2334 * writes, so we can properly account for the metadata space we'll need.
2336 void btrfs_merge_delalloc_extent(struct btrfs_inode
*inode
, struct extent_state
*new,
2337 struct extent_state
*other
)
2339 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
2340 u64 new_size
, old_size
;
2343 /* not delalloc, ignore it */
2344 if (!(other
->state
& EXTENT_DELALLOC
))
2347 if (new->start
> other
->start
)
2348 new_size
= new->end
- other
->start
+ 1;
2350 new_size
= other
->end
- new->start
+ 1;
2352 /* we're not bigger than the max, unreserve the space and go */
2353 if (new_size
<= fs_info
->max_extent_size
) {
2354 spin_lock(&inode
->lock
);
2355 btrfs_mod_outstanding_extents(inode
, -1);
2356 spin_unlock(&inode
->lock
);
2361 * We have to add up either side to figure out how many extents were
2362 * accounted for before we merged into one big extent. If the number of
2363 * extents we accounted for is <= the amount we need for the new range
2364 * then we can return, otherwise drop. Think of it like this
2368 * So we've grown the extent by a MAX_SIZE extent, this would mean we
2369 * need 2 outstanding extents, on one side we have 1 and the other side
2370 * we have 1 so they are == and we can return. But in this case
2372 * [MAX_SIZE+4k][MAX_SIZE+4k]
2374 * Each range on their own accounts for 2 extents, but merged together
2375 * they are only 3 extents worth of accounting, so we need to drop in
2378 old_size
= other
->end
- other
->start
+ 1;
2379 num_extents
= count_max_extents(fs_info
, old_size
);
2380 old_size
= new->end
- new->start
+ 1;
2381 num_extents
+= count_max_extents(fs_info
, old_size
);
2382 if (count_max_extents(fs_info
, new_size
) >= num_extents
)
2385 spin_lock(&inode
->lock
);
2386 btrfs_mod_outstanding_extents(inode
, -1);
2387 spin_unlock(&inode
->lock
);
2390 static void btrfs_add_delalloc_inodes(struct btrfs_root
*root
,
2391 struct btrfs_inode
*inode
)
2393 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
2395 spin_lock(&root
->delalloc_lock
);
2396 if (list_empty(&inode
->delalloc_inodes
)) {
2397 list_add_tail(&inode
->delalloc_inodes
, &root
->delalloc_inodes
);
2398 set_bit(BTRFS_INODE_IN_DELALLOC_LIST
, &inode
->runtime_flags
);
2399 root
->nr_delalloc_inodes
++;
2400 if (root
->nr_delalloc_inodes
== 1) {
2401 spin_lock(&fs_info
->delalloc_root_lock
);
2402 BUG_ON(!list_empty(&root
->delalloc_root
));
2403 list_add_tail(&root
->delalloc_root
,
2404 &fs_info
->delalloc_roots
);
2405 spin_unlock(&fs_info
->delalloc_root_lock
);
2408 spin_unlock(&root
->delalloc_lock
);
2411 void __btrfs_del_delalloc_inode(struct btrfs_root
*root
,
2412 struct btrfs_inode
*inode
)
2414 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2416 if (!list_empty(&inode
->delalloc_inodes
)) {
2417 list_del_init(&inode
->delalloc_inodes
);
2418 clear_bit(BTRFS_INODE_IN_DELALLOC_LIST
,
2419 &inode
->runtime_flags
);
2420 root
->nr_delalloc_inodes
--;
2421 if (!root
->nr_delalloc_inodes
) {
2422 ASSERT(list_empty(&root
->delalloc_inodes
));
2423 spin_lock(&fs_info
->delalloc_root_lock
);
2424 BUG_ON(list_empty(&root
->delalloc_root
));
2425 list_del_init(&root
->delalloc_root
);
2426 spin_unlock(&fs_info
->delalloc_root_lock
);
2431 static void btrfs_del_delalloc_inode(struct btrfs_root
*root
,
2432 struct btrfs_inode
*inode
)
2434 spin_lock(&root
->delalloc_lock
);
2435 __btrfs_del_delalloc_inode(root
, inode
);
2436 spin_unlock(&root
->delalloc_lock
);
2440 * Properly track delayed allocation bytes in the inode and to maintain the
2441 * list of inodes that have pending delalloc work to be done.
2443 void btrfs_set_delalloc_extent(struct btrfs_inode
*inode
, struct extent_state
*state
,
2446 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
2448 if ((bits
& EXTENT_DEFRAG
) && !(bits
& EXTENT_DELALLOC
))
2451 * set_bit and clear bit hooks normally require _irqsave/restore
2452 * but in this case, we are only testing for the DELALLOC
2453 * bit, which is only set or cleared with irqs on
2455 if (!(state
->state
& EXTENT_DELALLOC
) && (bits
& EXTENT_DELALLOC
)) {
2456 struct btrfs_root
*root
= inode
->root
;
2457 u64 len
= state
->end
+ 1 - state
->start
;
2458 u32 num_extents
= count_max_extents(fs_info
, len
);
2459 bool do_list
= !btrfs_is_free_space_inode(inode
);
2461 spin_lock(&inode
->lock
);
2462 btrfs_mod_outstanding_extents(inode
, num_extents
);
2463 spin_unlock(&inode
->lock
);
2465 /* For sanity tests */
2466 if (btrfs_is_testing(fs_info
))
2469 percpu_counter_add_batch(&fs_info
->delalloc_bytes
, len
,
2470 fs_info
->delalloc_batch
);
2471 spin_lock(&inode
->lock
);
2472 inode
->delalloc_bytes
+= len
;
2473 if (bits
& EXTENT_DEFRAG
)
2474 inode
->defrag_bytes
+= len
;
2475 if (do_list
&& !test_bit(BTRFS_INODE_IN_DELALLOC_LIST
,
2476 &inode
->runtime_flags
))
2477 btrfs_add_delalloc_inodes(root
, inode
);
2478 spin_unlock(&inode
->lock
);
2481 if (!(state
->state
& EXTENT_DELALLOC_NEW
) &&
2482 (bits
& EXTENT_DELALLOC_NEW
)) {
2483 spin_lock(&inode
->lock
);
2484 inode
->new_delalloc_bytes
+= state
->end
+ 1 - state
->start
;
2485 spin_unlock(&inode
->lock
);
2490 * Once a range is no longer delalloc this function ensures that proper
2491 * accounting happens.
2493 void btrfs_clear_delalloc_extent(struct btrfs_inode
*inode
,
2494 struct extent_state
*state
, u32 bits
)
2496 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
2497 u64 len
= state
->end
+ 1 - state
->start
;
2498 u32 num_extents
= count_max_extents(fs_info
, len
);
2500 if ((state
->state
& EXTENT_DEFRAG
) && (bits
& EXTENT_DEFRAG
)) {
2501 spin_lock(&inode
->lock
);
2502 inode
->defrag_bytes
-= len
;
2503 spin_unlock(&inode
->lock
);
2507 * set_bit and clear bit hooks normally require _irqsave/restore
2508 * but in this case, we are only testing for the DELALLOC
2509 * bit, which is only set or cleared with irqs on
2511 if ((state
->state
& EXTENT_DELALLOC
) && (bits
& EXTENT_DELALLOC
)) {
2512 struct btrfs_root
*root
= inode
->root
;
2513 bool do_list
= !btrfs_is_free_space_inode(inode
);
2515 spin_lock(&inode
->lock
);
2516 btrfs_mod_outstanding_extents(inode
, -num_extents
);
2517 spin_unlock(&inode
->lock
);
2520 * We don't reserve metadata space for space cache inodes so we
2521 * don't need to call delalloc_release_metadata if there is an
2524 if (bits
& EXTENT_CLEAR_META_RESV
&&
2525 root
!= fs_info
->tree_root
)
2526 btrfs_delalloc_release_metadata(inode
, len
, false);
2528 /* For sanity tests. */
2529 if (btrfs_is_testing(fs_info
))
2532 if (!btrfs_is_data_reloc_root(root
) &&
2533 do_list
&& !(state
->state
& EXTENT_NORESERVE
) &&
2534 (bits
& EXTENT_CLEAR_DATA_RESV
))
2535 btrfs_free_reserved_data_space_noquota(fs_info
, len
);
2537 percpu_counter_add_batch(&fs_info
->delalloc_bytes
, -len
,
2538 fs_info
->delalloc_batch
);
2539 spin_lock(&inode
->lock
);
2540 inode
->delalloc_bytes
-= len
;
2541 if (do_list
&& inode
->delalloc_bytes
== 0 &&
2542 test_bit(BTRFS_INODE_IN_DELALLOC_LIST
,
2543 &inode
->runtime_flags
))
2544 btrfs_del_delalloc_inode(root
, inode
);
2545 spin_unlock(&inode
->lock
);
2548 if ((state
->state
& EXTENT_DELALLOC_NEW
) &&
2549 (bits
& EXTENT_DELALLOC_NEW
)) {
2550 spin_lock(&inode
->lock
);
2551 ASSERT(inode
->new_delalloc_bytes
>= len
);
2552 inode
->new_delalloc_bytes
-= len
;
2553 if (bits
& EXTENT_ADD_INODE_BYTES
)
2554 inode_add_bytes(&inode
->vfs_inode
, len
);
2555 spin_unlock(&inode
->lock
);
2559 static int btrfs_extract_ordered_extent(struct btrfs_bio
*bbio
,
2560 struct btrfs_ordered_extent
*ordered
)
2562 u64 start
= (u64
)bbio
->bio
.bi_iter
.bi_sector
<< SECTOR_SHIFT
;
2563 u64 len
= bbio
->bio
.bi_iter
.bi_size
;
2564 struct btrfs_ordered_extent
*new;
2567 /* Must always be called for the beginning of an ordered extent. */
2568 if (WARN_ON_ONCE(start
!= ordered
->disk_bytenr
))
2571 /* No need to split if the ordered extent covers the entire bio. */
2572 if (ordered
->disk_num_bytes
== len
) {
2573 refcount_inc(&ordered
->refs
);
2574 bbio
->ordered
= ordered
;
2579 * Don't split the extent_map for NOCOW extents, as we're writing into
2580 * a pre-existing one.
2582 if (!test_bit(BTRFS_ORDERED_NOCOW
, &ordered
->flags
)) {
2583 ret
= split_extent_map(bbio
->inode
, bbio
->file_offset
,
2584 ordered
->num_bytes
, len
,
2585 ordered
->disk_bytenr
);
2590 new = btrfs_split_ordered_extent(ordered
, len
);
2592 return PTR_ERR(new);
2593 bbio
->ordered
= new;
2598 * given a list of ordered sums record them in the inode. This happens
2599 * at IO completion time based on sums calculated at bio submission time.
2601 static int add_pending_csums(struct btrfs_trans_handle
*trans
,
2602 struct list_head
*list
)
2604 struct btrfs_ordered_sum
*sum
;
2605 struct btrfs_root
*csum_root
= NULL
;
2608 list_for_each_entry(sum
, list
, list
) {
2609 trans
->adding_csums
= true;
2611 csum_root
= btrfs_csum_root(trans
->fs_info
,
2613 ret
= btrfs_csum_file_blocks(trans
, csum_root
, sum
);
2614 trans
->adding_csums
= false;
2621 static int btrfs_find_new_delalloc_bytes(struct btrfs_inode
*inode
,
2624 struct extent_state
**cached_state
)
2626 u64 search_start
= start
;
2627 const u64 end
= start
+ len
- 1;
2629 while (search_start
< end
) {
2630 const u64 search_len
= end
- search_start
+ 1;
2631 struct extent_map
*em
;
2635 em
= btrfs_get_extent(inode
, NULL
, 0, search_start
, search_len
);
2639 if (em
->block_start
!= EXTENT_MAP_HOLE
)
2643 if (em
->start
< search_start
)
2644 em_len
-= search_start
- em
->start
;
2645 if (em_len
> search_len
)
2646 em_len
= search_len
;
2648 ret
= set_extent_bit(&inode
->io_tree
, search_start
,
2649 search_start
+ em_len
- 1,
2650 EXTENT_DELALLOC_NEW
, cached_state
);
2652 search_start
= extent_map_end(em
);
2653 free_extent_map(em
);
2660 int btrfs_set_extent_delalloc(struct btrfs_inode
*inode
, u64 start
, u64 end
,
2661 unsigned int extra_bits
,
2662 struct extent_state
**cached_state
)
2664 WARN_ON(PAGE_ALIGNED(end
));
2666 if (start
>= i_size_read(&inode
->vfs_inode
) &&
2667 !(inode
->flags
& BTRFS_INODE_PREALLOC
)) {
2669 * There can't be any extents following eof in this case so just
2670 * set the delalloc new bit for the range directly.
2672 extra_bits
|= EXTENT_DELALLOC_NEW
;
2676 ret
= btrfs_find_new_delalloc_bytes(inode
, start
,
2683 return set_extent_bit(&inode
->io_tree
, start
, end
,
2684 EXTENT_DELALLOC
| extra_bits
, cached_state
);
2687 /* see btrfs_writepage_start_hook for details on why this is required */
2688 struct btrfs_writepage_fixup
{
2690 struct btrfs_inode
*inode
;
2691 struct btrfs_work work
;
2694 static void btrfs_writepage_fixup_worker(struct btrfs_work
*work
)
2696 struct btrfs_writepage_fixup
*fixup
=
2697 container_of(work
, struct btrfs_writepage_fixup
, work
);
2698 struct btrfs_ordered_extent
*ordered
;
2699 struct extent_state
*cached_state
= NULL
;
2700 struct extent_changeset
*data_reserved
= NULL
;
2701 struct page
*page
= fixup
->page
;
2702 struct btrfs_inode
*inode
= fixup
->inode
;
2703 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
2704 u64 page_start
= page_offset(page
);
2705 u64 page_end
= page_offset(page
) + PAGE_SIZE
- 1;
2707 bool free_delalloc_space
= true;
2710 * This is similar to page_mkwrite, we need to reserve the space before
2711 * we take the page lock.
2713 ret
= btrfs_delalloc_reserve_space(inode
, &data_reserved
, page_start
,
2719 * Before we queued this fixup, we took a reference on the page.
2720 * page->mapping may go NULL, but it shouldn't be moved to a different
2723 if (!page
->mapping
|| !PageDirty(page
) || !PageChecked(page
)) {
2725 * Unfortunately this is a little tricky, either
2727 * 1) We got here and our page had already been dealt with and
2728 * we reserved our space, thus ret == 0, so we need to just
2729 * drop our space reservation and bail. This can happen the
2730 * first time we come into the fixup worker, or could happen
2731 * while waiting for the ordered extent.
2732 * 2) Our page was already dealt with, but we happened to get an
2733 * ENOSPC above from the btrfs_delalloc_reserve_space. In
2734 * this case we obviously don't have anything to release, but
2735 * because the page was already dealt with we don't want to
2736 * mark the page with an error, so make sure we're resetting
2737 * ret to 0. This is why we have this check _before_ the ret
2738 * check, because we do not want to have a surprise ENOSPC
2739 * when the page was already properly dealt with.
2742 btrfs_delalloc_release_extents(inode
, PAGE_SIZE
);
2743 btrfs_delalloc_release_space(inode
, data_reserved
,
2744 page_start
, PAGE_SIZE
,
2752 * We can't mess with the page state unless it is locked, so now that
2753 * it is locked bail if we failed to make our space reservation.
2758 lock_extent(&inode
->io_tree
, page_start
, page_end
, &cached_state
);
2760 /* already ordered? We're done */
2761 if (PageOrdered(page
))
2764 ordered
= btrfs_lookup_ordered_range(inode
, page_start
, PAGE_SIZE
);
2766 unlock_extent(&inode
->io_tree
, page_start
, page_end
,
2769 btrfs_start_ordered_extent(ordered
);
2770 btrfs_put_ordered_extent(ordered
);
2774 ret
= btrfs_set_extent_delalloc(inode
, page_start
, page_end
, 0,
2780 * Everything went as planned, we're now the owner of a dirty page with
2781 * delayed allocation bits set and space reserved for our COW
2784 * The page was dirty when we started, nothing should have cleaned it.
2786 BUG_ON(!PageDirty(page
));
2787 free_delalloc_space
= false;
2789 btrfs_delalloc_release_extents(inode
, PAGE_SIZE
);
2790 if (free_delalloc_space
)
2791 btrfs_delalloc_release_space(inode
, data_reserved
, page_start
,
2793 unlock_extent(&inode
->io_tree
, page_start
, page_end
, &cached_state
);
2797 * We hit ENOSPC or other errors. Update the mapping and page
2798 * to reflect the errors and clean the page.
2800 mapping_set_error(page
->mapping
, ret
);
2801 btrfs_mark_ordered_io_finished(inode
, page
, page_start
,
2803 clear_page_dirty_for_io(page
);
2805 btrfs_folio_clear_checked(fs_info
, page_folio(page
), page_start
, PAGE_SIZE
);
2809 extent_changeset_free(data_reserved
);
2811 * As a precaution, do a delayed iput in case it would be the last iput
2812 * that could need flushing space. Recursing back to fixup worker would
2815 btrfs_add_delayed_iput(inode
);
2819 * There are a few paths in the higher layers of the kernel that directly
2820 * set the page dirty bit without asking the filesystem if it is a
2821 * good idea. This causes problems because we want to make sure COW
2822 * properly happens and the data=ordered rules are followed.
2824 * In our case any range that doesn't have the ORDERED bit set
2825 * hasn't been properly setup for IO. We kick off an async process
2826 * to fix it up. The async helper will wait for ordered extents, set
2827 * the delalloc bit and make it safe to write the page.
2829 int btrfs_writepage_cow_fixup(struct page
*page
)
2831 struct inode
*inode
= page
->mapping
->host
;
2832 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
2833 struct btrfs_writepage_fixup
*fixup
;
2835 /* This page has ordered extent covering it already */
2836 if (PageOrdered(page
))
2840 * PageChecked is set below when we create a fixup worker for this page,
2841 * don't try to create another one if we're already PageChecked()
2843 * The extent_io writepage code will redirty the page if we send back
2846 if (PageChecked(page
))
2849 fixup
= kzalloc(sizeof(*fixup
), GFP_NOFS
);
2854 * We are already holding a reference to this inode from
2855 * write_cache_pages. We need to hold it because the space reservation
2856 * takes place outside of the page lock, and we can't trust
2857 * page->mapping outside of the page lock.
2860 btrfs_folio_set_checked(fs_info
, page_folio(page
), page_offset(page
), PAGE_SIZE
);
2862 btrfs_init_work(&fixup
->work
, btrfs_writepage_fixup_worker
, NULL
);
2864 fixup
->inode
= BTRFS_I(inode
);
2865 btrfs_queue_work(fs_info
->fixup_workers
, &fixup
->work
);
2870 static int insert_reserved_file_extent(struct btrfs_trans_handle
*trans
,
2871 struct btrfs_inode
*inode
, u64 file_pos
,
2872 struct btrfs_file_extent_item
*stack_fi
,
2873 const bool update_inode_bytes
,
2874 u64 qgroup_reserved
)
2876 struct btrfs_root
*root
= inode
->root
;
2877 const u64 sectorsize
= root
->fs_info
->sectorsize
;
2878 struct btrfs_path
*path
;
2879 struct extent_buffer
*leaf
;
2880 struct btrfs_key ins
;
2881 u64 disk_num_bytes
= btrfs_stack_file_extent_disk_num_bytes(stack_fi
);
2882 u64 disk_bytenr
= btrfs_stack_file_extent_disk_bytenr(stack_fi
);
2883 u64 offset
= btrfs_stack_file_extent_offset(stack_fi
);
2884 u64 num_bytes
= btrfs_stack_file_extent_num_bytes(stack_fi
);
2885 u64 ram_bytes
= btrfs_stack_file_extent_ram_bytes(stack_fi
);
2886 struct btrfs_drop_extents_args drop_args
= { 0 };
2889 path
= btrfs_alloc_path();
2894 * we may be replacing one extent in the tree with another.
2895 * The new extent is pinned in the extent map, and we don't want
2896 * to drop it from the cache until it is completely in the btree.
2898 * So, tell btrfs_drop_extents to leave this extent in the cache.
2899 * the caller is expected to unpin it and allow it to be merged
2902 drop_args
.path
= path
;
2903 drop_args
.start
= file_pos
;
2904 drop_args
.end
= file_pos
+ num_bytes
;
2905 drop_args
.replace_extent
= true;
2906 drop_args
.extent_item_size
= sizeof(*stack_fi
);
2907 ret
= btrfs_drop_extents(trans
, root
, inode
, &drop_args
);
2911 if (!drop_args
.extent_inserted
) {
2912 ins
.objectid
= btrfs_ino(inode
);
2913 ins
.offset
= file_pos
;
2914 ins
.type
= BTRFS_EXTENT_DATA_KEY
;
2916 ret
= btrfs_insert_empty_item(trans
, root
, path
, &ins
,
2921 leaf
= path
->nodes
[0];
2922 btrfs_set_stack_file_extent_generation(stack_fi
, trans
->transid
);
2923 write_extent_buffer(leaf
, stack_fi
,
2924 btrfs_item_ptr_offset(leaf
, path
->slots
[0]),
2925 sizeof(struct btrfs_file_extent_item
));
2927 btrfs_mark_buffer_dirty(trans
, leaf
);
2928 btrfs_release_path(path
);
2931 * If we dropped an inline extent here, we know the range where it is
2932 * was not marked with the EXTENT_DELALLOC_NEW bit, so we update the
2933 * number of bytes only for that range containing the inline extent.
2934 * The remaining of the range will be processed when clearning the
2935 * EXTENT_DELALLOC_BIT bit through the ordered extent completion.
2937 if (file_pos
== 0 && !IS_ALIGNED(drop_args
.bytes_found
, sectorsize
)) {
2938 u64 inline_size
= round_down(drop_args
.bytes_found
, sectorsize
);
2940 inline_size
= drop_args
.bytes_found
- inline_size
;
2941 btrfs_update_inode_bytes(inode
, sectorsize
, inline_size
);
2942 drop_args
.bytes_found
-= inline_size
;
2943 num_bytes
-= sectorsize
;
2946 if (update_inode_bytes
)
2947 btrfs_update_inode_bytes(inode
, num_bytes
, drop_args
.bytes_found
);
2949 ins
.objectid
= disk_bytenr
;
2950 ins
.offset
= disk_num_bytes
;
2951 ins
.type
= BTRFS_EXTENT_ITEM_KEY
;
2953 ret
= btrfs_inode_set_file_extent_range(inode
, file_pos
, ram_bytes
);
2957 ret
= btrfs_alloc_reserved_file_extent(trans
, root
, btrfs_ino(inode
),
2959 qgroup_reserved
, &ins
);
2961 btrfs_free_path(path
);
2966 static void btrfs_release_delalloc_bytes(struct btrfs_fs_info
*fs_info
,
2969 struct btrfs_block_group
*cache
;
2971 cache
= btrfs_lookup_block_group(fs_info
, start
);
2974 spin_lock(&cache
->lock
);
2975 cache
->delalloc_bytes
-= len
;
2976 spin_unlock(&cache
->lock
);
2978 btrfs_put_block_group(cache
);
2981 static int insert_ordered_extent_file_extent(struct btrfs_trans_handle
*trans
,
2982 struct btrfs_ordered_extent
*oe
)
2984 struct btrfs_file_extent_item stack_fi
;
2985 bool update_inode_bytes
;
2986 u64 num_bytes
= oe
->num_bytes
;
2987 u64 ram_bytes
= oe
->ram_bytes
;
2989 memset(&stack_fi
, 0, sizeof(stack_fi
));
2990 btrfs_set_stack_file_extent_type(&stack_fi
, BTRFS_FILE_EXTENT_REG
);
2991 btrfs_set_stack_file_extent_disk_bytenr(&stack_fi
, oe
->disk_bytenr
);
2992 btrfs_set_stack_file_extent_disk_num_bytes(&stack_fi
,
2993 oe
->disk_num_bytes
);
2994 btrfs_set_stack_file_extent_offset(&stack_fi
, oe
->offset
);
2995 if (test_bit(BTRFS_ORDERED_TRUNCATED
, &oe
->flags
)) {
2996 num_bytes
= oe
->truncated_len
;
2997 ram_bytes
= num_bytes
;
2999 btrfs_set_stack_file_extent_num_bytes(&stack_fi
, num_bytes
);
3000 btrfs_set_stack_file_extent_ram_bytes(&stack_fi
, ram_bytes
);
3001 btrfs_set_stack_file_extent_compression(&stack_fi
, oe
->compress_type
);
3002 /* Encryption and other encoding is reserved and all 0 */
3005 * For delalloc, when completing an ordered extent we update the inode's
3006 * bytes when clearing the range in the inode's io tree, so pass false
3007 * as the argument 'update_inode_bytes' to insert_reserved_file_extent(),
3008 * except if the ordered extent was truncated.
3010 update_inode_bytes
= test_bit(BTRFS_ORDERED_DIRECT
, &oe
->flags
) ||
3011 test_bit(BTRFS_ORDERED_ENCODED
, &oe
->flags
) ||
3012 test_bit(BTRFS_ORDERED_TRUNCATED
, &oe
->flags
);
3014 return insert_reserved_file_extent(trans
, BTRFS_I(oe
->inode
),
3015 oe
->file_offset
, &stack_fi
,
3016 update_inode_bytes
, oe
->qgroup_rsv
);
3020 * As ordered data IO finishes, this gets called so we can finish
3021 * an ordered extent if the range of bytes in the file it covers are
3024 int btrfs_finish_one_ordered(struct btrfs_ordered_extent
*ordered_extent
)
3026 struct btrfs_inode
*inode
= BTRFS_I(ordered_extent
->inode
);
3027 struct btrfs_root
*root
= inode
->root
;
3028 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
3029 struct btrfs_trans_handle
*trans
= NULL
;
3030 struct extent_io_tree
*io_tree
= &inode
->io_tree
;
3031 struct extent_state
*cached_state
= NULL
;
3033 int compress_type
= 0;
3035 u64 logical_len
= ordered_extent
->num_bytes
;
3036 bool freespace_inode
;
3037 bool truncated
= false;
3038 bool clear_reserved_extent
= true;
3039 unsigned int clear_bits
= EXTENT_DEFRAG
;
3041 start
= ordered_extent
->file_offset
;
3042 end
= start
+ ordered_extent
->num_bytes
- 1;
3044 if (!test_bit(BTRFS_ORDERED_NOCOW
, &ordered_extent
->flags
) &&
3045 !test_bit(BTRFS_ORDERED_PREALLOC
, &ordered_extent
->flags
) &&
3046 !test_bit(BTRFS_ORDERED_DIRECT
, &ordered_extent
->flags
) &&
3047 !test_bit(BTRFS_ORDERED_ENCODED
, &ordered_extent
->flags
))
3048 clear_bits
|= EXTENT_DELALLOC_NEW
;
3050 freespace_inode
= btrfs_is_free_space_inode(inode
);
3051 if (!freespace_inode
)
3052 btrfs_lockdep_acquire(fs_info
, btrfs_ordered_extent
);
3054 if (test_bit(BTRFS_ORDERED_IOERR
, &ordered_extent
->flags
)) {
3059 if (btrfs_is_zoned(fs_info
))
3060 btrfs_zone_finish_endio(fs_info
, ordered_extent
->disk_bytenr
,
3061 ordered_extent
->disk_num_bytes
);
3063 if (test_bit(BTRFS_ORDERED_TRUNCATED
, &ordered_extent
->flags
)) {
3065 logical_len
= ordered_extent
->truncated_len
;
3066 /* Truncated the entire extent, don't bother adding */
3071 if (test_bit(BTRFS_ORDERED_NOCOW
, &ordered_extent
->flags
)) {
3072 BUG_ON(!list_empty(&ordered_extent
->list
)); /* Logic error */
3074 btrfs_inode_safe_disk_i_size_write(inode
, 0);
3075 if (freespace_inode
)
3076 trans
= btrfs_join_transaction_spacecache(root
);
3078 trans
= btrfs_join_transaction(root
);
3079 if (IS_ERR(trans
)) {
3080 ret
= PTR_ERR(trans
);
3084 trans
->block_rsv
= &inode
->block_rsv
;
3085 ret
= btrfs_update_inode_fallback(trans
, inode
);
3086 if (ret
) /* -ENOMEM or corruption */
3087 btrfs_abort_transaction(trans
, ret
);
3091 clear_bits
|= EXTENT_LOCKED
;
3092 lock_extent(io_tree
, start
, end
, &cached_state
);
3094 if (freespace_inode
)
3095 trans
= btrfs_join_transaction_spacecache(root
);
3097 trans
= btrfs_join_transaction(root
);
3098 if (IS_ERR(trans
)) {
3099 ret
= PTR_ERR(trans
);
3104 trans
->block_rsv
= &inode
->block_rsv
;
3106 ret
= btrfs_insert_raid_extent(trans
, ordered_extent
);
3110 if (test_bit(BTRFS_ORDERED_COMPRESSED
, &ordered_extent
->flags
))
3111 compress_type
= ordered_extent
->compress_type
;
3112 if (test_bit(BTRFS_ORDERED_PREALLOC
, &ordered_extent
->flags
)) {
3113 BUG_ON(compress_type
);
3114 ret
= btrfs_mark_extent_written(trans
, inode
,
3115 ordered_extent
->file_offset
,
3116 ordered_extent
->file_offset
+
3118 btrfs_zoned_release_data_reloc_bg(fs_info
, ordered_extent
->disk_bytenr
,
3119 ordered_extent
->disk_num_bytes
);
3121 BUG_ON(root
== fs_info
->tree_root
);
3122 ret
= insert_ordered_extent_file_extent(trans
, ordered_extent
);
3124 clear_reserved_extent
= false;
3125 btrfs_release_delalloc_bytes(fs_info
,
3126 ordered_extent
->disk_bytenr
,
3127 ordered_extent
->disk_num_bytes
);
3130 unpin_extent_cache(inode
, ordered_extent
->file_offset
,
3131 ordered_extent
->num_bytes
, trans
->transid
);
3133 btrfs_abort_transaction(trans
, ret
);
3137 ret
= add_pending_csums(trans
, &ordered_extent
->list
);
3139 btrfs_abort_transaction(trans
, ret
);
3144 * If this is a new delalloc range, clear its new delalloc flag to
3145 * update the inode's number of bytes. This needs to be done first
3146 * before updating the inode item.
3148 if ((clear_bits
& EXTENT_DELALLOC_NEW
) &&
3149 !test_bit(BTRFS_ORDERED_TRUNCATED
, &ordered_extent
->flags
))
3150 clear_extent_bit(&inode
->io_tree
, start
, end
,
3151 EXTENT_DELALLOC_NEW
| EXTENT_ADD_INODE_BYTES
,
3154 btrfs_inode_safe_disk_i_size_write(inode
, 0);
3155 ret
= btrfs_update_inode_fallback(trans
, inode
);
3156 if (ret
) { /* -ENOMEM or corruption */
3157 btrfs_abort_transaction(trans
, ret
);
3162 clear_extent_bit(&inode
->io_tree
, start
, end
, clear_bits
,
3166 btrfs_end_transaction(trans
);
3168 if (ret
|| truncated
) {
3169 u64 unwritten_start
= start
;
3172 * If we failed to finish this ordered extent for any reason we
3173 * need to make sure BTRFS_ORDERED_IOERR is set on the ordered
3174 * extent, and mark the inode with the error if it wasn't
3175 * already set. Any error during writeback would have already
3176 * set the mapping error, so we need to set it if we're the ones
3177 * marking this ordered extent as failed.
3179 if (ret
&& !test_and_set_bit(BTRFS_ORDERED_IOERR
,
3180 &ordered_extent
->flags
))
3181 mapping_set_error(ordered_extent
->inode
->i_mapping
, -EIO
);
3184 unwritten_start
+= logical_len
;
3185 clear_extent_uptodate(io_tree
, unwritten_start
, end
, NULL
);
3187 /* Drop extent maps for the part of the extent we didn't write. */
3188 btrfs_drop_extent_map_range(inode
, unwritten_start
, end
, false);
3191 * If the ordered extent had an IOERR or something else went
3192 * wrong we need to return the space for this ordered extent
3193 * back to the allocator. We only free the extent in the
3194 * truncated case if we didn't write out the extent at all.
3196 * If we made it past insert_reserved_file_extent before we
3197 * errored out then we don't need to do this as the accounting
3198 * has already been done.
3200 if ((ret
|| !logical_len
) &&
3201 clear_reserved_extent
&&
3202 !test_bit(BTRFS_ORDERED_NOCOW
, &ordered_extent
->flags
) &&
3203 !test_bit(BTRFS_ORDERED_PREALLOC
, &ordered_extent
->flags
)) {
3205 * Discard the range before returning it back to the
3208 if (ret
&& btrfs_test_opt(fs_info
, DISCARD_SYNC
))
3209 btrfs_discard_extent(fs_info
,
3210 ordered_extent
->disk_bytenr
,
3211 ordered_extent
->disk_num_bytes
,
3213 btrfs_free_reserved_extent(fs_info
,
3214 ordered_extent
->disk_bytenr
,
3215 ordered_extent
->disk_num_bytes
, 1);
3217 * Actually free the qgroup rsv which was released when
3218 * the ordered extent was created.
3220 btrfs_qgroup_free_refroot(fs_info
, inode
->root
->root_key
.objectid
,
3221 ordered_extent
->qgroup_rsv
,
3222 BTRFS_QGROUP_RSV_DATA
);
3227 * This needs to be done to make sure anybody waiting knows we are done
3228 * updating everything for this ordered extent.
3230 btrfs_remove_ordered_extent(inode
, ordered_extent
);
3233 btrfs_put_ordered_extent(ordered_extent
);
3234 /* once for the tree */
3235 btrfs_put_ordered_extent(ordered_extent
);
3240 int btrfs_finish_ordered_io(struct btrfs_ordered_extent
*ordered
)
3242 if (btrfs_is_zoned(btrfs_sb(ordered
->inode
->i_sb
)) &&
3243 !test_bit(BTRFS_ORDERED_IOERR
, &ordered
->flags
) &&
3244 list_empty(&ordered
->bioc_list
))
3245 btrfs_finish_ordered_zoned(ordered
);
3246 return btrfs_finish_one_ordered(ordered
);
3250 * Verify the checksum for a single sector without any extra action that depend
3251 * on the type of I/O.
3253 int btrfs_check_sector_csum(struct btrfs_fs_info
*fs_info
, struct page
*page
,
3254 u32 pgoff
, u8
*csum
, const u8
* const csum_expected
)
3256 SHASH_DESC_ON_STACK(shash
, fs_info
->csum_shash
);
3259 ASSERT(pgoff
+ fs_info
->sectorsize
<= PAGE_SIZE
);
3261 shash
->tfm
= fs_info
->csum_shash
;
3263 kaddr
= kmap_local_page(page
) + pgoff
;
3264 crypto_shash_digest(shash
, kaddr
, fs_info
->sectorsize
, csum
);
3265 kunmap_local(kaddr
);
3267 if (memcmp(csum
, csum_expected
, fs_info
->csum_size
))
3273 * Verify the checksum of a single data sector.
3275 * @bbio: btrfs_io_bio which contains the csum
3276 * @dev: device the sector is on
3277 * @bio_offset: offset to the beginning of the bio (in bytes)
3278 * @bv: bio_vec to check
3280 * Check if the checksum on a data block is valid. When a checksum mismatch is
3281 * detected, report the error and fill the corrupted range with zero.
3283 * Return %true if the sector is ok or had no checksum to start with, else %false.
3285 bool btrfs_data_csum_ok(struct btrfs_bio
*bbio
, struct btrfs_device
*dev
,
3286 u32 bio_offset
, struct bio_vec
*bv
)
3288 struct btrfs_inode
*inode
= bbio
->inode
;
3289 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
3290 u64 file_offset
= bbio
->file_offset
+ bio_offset
;
3291 u64 end
= file_offset
+ bv
->bv_len
- 1;
3293 u8 csum
[BTRFS_CSUM_SIZE
];
3295 ASSERT(bv
->bv_len
== fs_info
->sectorsize
);
3300 if (btrfs_is_data_reloc_root(inode
->root
) &&
3301 test_range_bit(&inode
->io_tree
, file_offset
, end
, EXTENT_NODATASUM
,
3303 /* Skip the range without csum for data reloc inode */
3304 clear_extent_bits(&inode
->io_tree
, file_offset
, end
,
3309 csum_expected
= bbio
->csum
+ (bio_offset
>> fs_info
->sectorsize_bits
) *
3311 if (btrfs_check_sector_csum(fs_info
, bv
->bv_page
, bv
->bv_offset
, csum
,
3317 btrfs_print_data_csum_error(inode
, file_offset
, csum
, csum_expected
,
3320 btrfs_dev_stat_inc_and_print(dev
, BTRFS_DEV_STAT_CORRUPTION_ERRS
);
3326 * Perform a delayed iput on @inode.
3328 * @inode: The inode we want to perform iput on
3330 * This function uses the generic vfs_inode::i_count to track whether we should
3331 * just decrement it (in case it's > 1) or if this is the last iput then link
3332 * the inode to the delayed iput machinery. Delayed iputs are processed at
3333 * transaction commit time/superblock commit/cleaner kthread.
3335 void btrfs_add_delayed_iput(struct btrfs_inode
*inode
)
3337 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
3338 unsigned long flags
;
3340 if (atomic_add_unless(&inode
->vfs_inode
.i_count
, -1, 1))
3343 atomic_inc(&fs_info
->nr_delayed_iputs
);
3345 * Need to be irq safe here because we can be called from either an irq
3346 * context (see bio.c and btrfs_put_ordered_extent()) or a non-irq
3349 spin_lock_irqsave(&fs_info
->delayed_iput_lock
, flags
);
3350 ASSERT(list_empty(&inode
->delayed_iput
));
3351 list_add_tail(&inode
->delayed_iput
, &fs_info
->delayed_iputs
);
3352 spin_unlock_irqrestore(&fs_info
->delayed_iput_lock
, flags
);
3353 if (!test_bit(BTRFS_FS_CLEANER_RUNNING
, &fs_info
->flags
))
3354 wake_up_process(fs_info
->cleaner_kthread
);
3357 static void run_delayed_iput_locked(struct btrfs_fs_info
*fs_info
,
3358 struct btrfs_inode
*inode
)
3360 list_del_init(&inode
->delayed_iput
);
3361 spin_unlock_irq(&fs_info
->delayed_iput_lock
);
3362 iput(&inode
->vfs_inode
);
3363 if (atomic_dec_and_test(&fs_info
->nr_delayed_iputs
))
3364 wake_up(&fs_info
->delayed_iputs_wait
);
3365 spin_lock_irq(&fs_info
->delayed_iput_lock
);
3368 static void btrfs_run_delayed_iput(struct btrfs_fs_info
*fs_info
,
3369 struct btrfs_inode
*inode
)
3371 if (!list_empty(&inode
->delayed_iput
)) {
3372 spin_lock_irq(&fs_info
->delayed_iput_lock
);
3373 if (!list_empty(&inode
->delayed_iput
))
3374 run_delayed_iput_locked(fs_info
, inode
);
3375 spin_unlock_irq(&fs_info
->delayed_iput_lock
);
3379 void btrfs_run_delayed_iputs(struct btrfs_fs_info
*fs_info
)
3382 * btrfs_put_ordered_extent() can run in irq context (see bio.c), which
3383 * calls btrfs_add_delayed_iput() and that needs to lock
3384 * fs_info->delayed_iput_lock. So we need to disable irqs here to
3385 * prevent a deadlock.
3387 spin_lock_irq(&fs_info
->delayed_iput_lock
);
3388 while (!list_empty(&fs_info
->delayed_iputs
)) {
3389 struct btrfs_inode
*inode
;
3391 inode
= list_first_entry(&fs_info
->delayed_iputs
,
3392 struct btrfs_inode
, delayed_iput
);
3393 run_delayed_iput_locked(fs_info
, inode
);
3394 if (need_resched()) {
3395 spin_unlock_irq(&fs_info
->delayed_iput_lock
);
3397 spin_lock_irq(&fs_info
->delayed_iput_lock
);
3400 spin_unlock_irq(&fs_info
->delayed_iput_lock
);
3404 * Wait for flushing all delayed iputs
3406 * @fs_info: the filesystem
3408 * This will wait on any delayed iputs that are currently running with KILLABLE
3409 * set. Once they are all done running we will return, unless we are killed in
3410 * which case we return EINTR. This helps in user operations like fallocate etc
3411 * that might get blocked on the iputs.
3413 * Return EINTR if we were killed, 0 if nothing's pending
3415 int btrfs_wait_on_delayed_iputs(struct btrfs_fs_info
*fs_info
)
3417 int ret
= wait_event_killable(fs_info
->delayed_iputs_wait
,
3418 atomic_read(&fs_info
->nr_delayed_iputs
) == 0);
3425 * This creates an orphan entry for the given inode in case something goes wrong
3426 * in the middle of an unlink.
3428 int btrfs_orphan_add(struct btrfs_trans_handle
*trans
,
3429 struct btrfs_inode
*inode
)
3433 ret
= btrfs_insert_orphan_item(trans
, inode
->root
, btrfs_ino(inode
));
3434 if (ret
&& ret
!= -EEXIST
) {
3435 btrfs_abort_transaction(trans
, ret
);
3443 * We have done the delete so we can go ahead and remove the orphan item for
3444 * this particular inode.
3446 static int btrfs_orphan_del(struct btrfs_trans_handle
*trans
,
3447 struct btrfs_inode
*inode
)
3449 return btrfs_del_orphan_item(trans
, inode
->root
, btrfs_ino(inode
));
3453 * this cleans up any orphans that may be left on the list from the last use
3456 int btrfs_orphan_cleanup(struct btrfs_root
*root
)
3458 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
3459 struct btrfs_path
*path
;
3460 struct extent_buffer
*leaf
;
3461 struct btrfs_key key
, found_key
;
3462 struct btrfs_trans_handle
*trans
;
3463 struct inode
*inode
;
3464 u64 last_objectid
= 0;
3465 int ret
= 0, nr_unlink
= 0;
3467 if (test_and_set_bit(BTRFS_ROOT_ORPHAN_CLEANUP
, &root
->state
))
3470 path
= btrfs_alloc_path();
3475 path
->reada
= READA_BACK
;
3477 key
.objectid
= BTRFS_ORPHAN_OBJECTID
;
3478 key
.type
= BTRFS_ORPHAN_ITEM_KEY
;
3479 key
.offset
= (u64
)-1;
3482 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
3487 * if ret == 0 means we found what we were searching for, which
3488 * is weird, but possible, so only screw with path if we didn't
3489 * find the key and see if we have stuff that matches
3493 if (path
->slots
[0] == 0)
3498 /* pull out the item */
3499 leaf
= path
->nodes
[0];
3500 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
3502 /* make sure the item matches what we want */
3503 if (found_key
.objectid
!= BTRFS_ORPHAN_OBJECTID
)
3505 if (found_key
.type
!= BTRFS_ORPHAN_ITEM_KEY
)
3508 /* release the path since we're done with it */
3509 btrfs_release_path(path
);
3512 * this is where we are basically btrfs_lookup, without the
3513 * crossing root thing. we store the inode number in the
3514 * offset of the orphan item.
3517 if (found_key
.offset
== last_objectid
) {
3519 * We found the same inode as before. This means we were
3520 * not able to remove its items via eviction triggered
3521 * by an iput(). A transaction abort may have happened,
3522 * due to -ENOSPC for example, so try to grab the error
3523 * that lead to a transaction abort, if any.
3526 "Error removing orphan entry, stopping orphan cleanup");
3527 ret
= BTRFS_FS_ERROR(fs_info
) ?: -EINVAL
;
3531 last_objectid
= found_key
.offset
;
3533 found_key
.objectid
= found_key
.offset
;
3534 found_key
.type
= BTRFS_INODE_ITEM_KEY
;
3535 found_key
.offset
= 0;
3536 inode
= btrfs_iget(fs_info
->sb
, last_objectid
, root
);
3537 if (IS_ERR(inode
)) {
3538 ret
= PTR_ERR(inode
);
3544 if (!inode
&& root
== fs_info
->tree_root
) {
3545 struct btrfs_root
*dead_root
;
3546 int is_dead_root
= 0;
3549 * This is an orphan in the tree root. Currently these
3550 * could come from 2 sources:
3551 * a) a root (snapshot/subvolume) deletion in progress
3552 * b) a free space cache inode
3553 * We need to distinguish those two, as the orphan item
3554 * for a root must not get deleted before the deletion
3555 * of the snapshot/subvolume's tree completes.
3557 * btrfs_find_orphan_roots() ran before us, which has
3558 * found all deleted roots and loaded them into
3559 * fs_info->fs_roots_radix. So here we can find if an
3560 * orphan item corresponds to a deleted root by looking
3561 * up the root from that radix tree.
3564 spin_lock(&fs_info
->fs_roots_radix_lock
);
3565 dead_root
= radix_tree_lookup(&fs_info
->fs_roots_radix
,
3566 (unsigned long)found_key
.objectid
);
3567 if (dead_root
&& btrfs_root_refs(&dead_root
->root_item
) == 0)
3569 spin_unlock(&fs_info
->fs_roots_radix_lock
);
3572 /* prevent this orphan from being found again */
3573 key
.offset
= found_key
.objectid
- 1;
3580 * If we have an inode with links, there are a couple of
3583 * 1. We were halfway through creating fsverity metadata for the
3584 * file. In that case, the orphan item represents incomplete
3585 * fsverity metadata which must be cleaned up with
3586 * btrfs_drop_verity_items and deleting the orphan item.
3588 * 2. Old kernels (before v3.12) used to create an
3589 * orphan item for truncate indicating that there were possibly
3590 * extent items past i_size that needed to be deleted. In v3.12,
3591 * truncate was changed to update i_size in sync with the extent
3592 * items, but the (useless) orphan item was still created. Since
3593 * v4.18, we don't create the orphan item for truncate at all.
3595 * So, this item could mean that we need to do a truncate, but
3596 * only if this filesystem was last used on a pre-v3.12 kernel
3597 * and was not cleanly unmounted. The odds of that are quite
3598 * slim, and it's a pain to do the truncate now, so just delete
3601 * It's also possible that this orphan item was supposed to be
3602 * deleted but wasn't. The inode number may have been reused,
3603 * but either way, we can delete the orphan item.
3605 if (!inode
|| inode
->i_nlink
) {
3607 ret
= btrfs_drop_verity_items(BTRFS_I(inode
));
3613 trans
= btrfs_start_transaction(root
, 1);
3614 if (IS_ERR(trans
)) {
3615 ret
= PTR_ERR(trans
);
3618 btrfs_debug(fs_info
, "auto deleting %Lu",
3619 found_key
.objectid
);
3620 ret
= btrfs_del_orphan_item(trans
, root
,
3621 found_key
.objectid
);
3622 btrfs_end_transaction(trans
);
3630 /* this will do delete_inode and everything for us */
3633 /* release the path since we're done with it */
3634 btrfs_release_path(path
);
3636 if (test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED
, &root
->state
)) {
3637 trans
= btrfs_join_transaction(root
);
3639 btrfs_end_transaction(trans
);
3643 btrfs_debug(fs_info
, "unlinked %d orphans", nr_unlink
);
3647 btrfs_err(fs_info
, "could not do orphan cleanup %d", ret
);
3648 btrfs_free_path(path
);
3653 * very simple check to peek ahead in the leaf looking for xattrs. If we
3654 * don't find any xattrs, we know there can't be any acls.
3656 * slot is the slot the inode is in, objectid is the objectid of the inode
3658 static noinline
int acls_after_inode_item(struct extent_buffer
*leaf
,
3659 int slot
, u64 objectid
,
3660 int *first_xattr_slot
)
3662 u32 nritems
= btrfs_header_nritems(leaf
);
3663 struct btrfs_key found_key
;
3664 static u64 xattr_access
= 0;
3665 static u64 xattr_default
= 0;
3668 if (!xattr_access
) {
3669 xattr_access
= btrfs_name_hash(XATTR_NAME_POSIX_ACL_ACCESS
,
3670 strlen(XATTR_NAME_POSIX_ACL_ACCESS
));
3671 xattr_default
= btrfs_name_hash(XATTR_NAME_POSIX_ACL_DEFAULT
,
3672 strlen(XATTR_NAME_POSIX_ACL_DEFAULT
));
3676 *first_xattr_slot
= -1;
3677 while (slot
< nritems
) {
3678 btrfs_item_key_to_cpu(leaf
, &found_key
, slot
);
3680 /* we found a different objectid, there must not be acls */
3681 if (found_key
.objectid
!= objectid
)
3684 /* we found an xattr, assume we've got an acl */
3685 if (found_key
.type
== BTRFS_XATTR_ITEM_KEY
) {
3686 if (*first_xattr_slot
== -1)
3687 *first_xattr_slot
= slot
;
3688 if (found_key
.offset
== xattr_access
||
3689 found_key
.offset
== xattr_default
)
3694 * we found a key greater than an xattr key, there can't
3695 * be any acls later on
3697 if (found_key
.type
> BTRFS_XATTR_ITEM_KEY
)
3704 * it goes inode, inode backrefs, xattrs, extents,
3705 * so if there are a ton of hard links to an inode there can
3706 * be a lot of backrefs. Don't waste time searching too hard,
3707 * this is just an optimization
3712 /* we hit the end of the leaf before we found an xattr or
3713 * something larger than an xattr. We have to assume the inode
3716 if (*first_xattr_slot
== -1)
3717 *first_xattr_slot
= slot
;
3722 * read an inode from the btree into the in-memory inode
3724 static int btrfs_read_locked_inode(struct inode
*inode
,
3725 struct btrfs_path
*in_path
)
3727 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
3728 struct btrfs_path
*path
= in_path
;
3729 struct extent_buffer
*leaf
;
3730 struct btrfs_inode_item
*inode_item
;
3731 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
3732 struct btrfs_key location
;
3737 bool filled
= false;
3738 int first_xattr_slot
;
3740 ret
= btrfs_fill_inode(inode
, &rdev
);
3745 path
= btrfs_alloc_path();
3750 memcpy(&location
, &BTRFS_I(inode
)->location
, sizeof(location
));
3752 ret
= btrfs_lookup_inode(NULL
, root
, path
, &location
, 0);
3754 if (path
!= in_path
)
3755 btrfs_free_path(path
);
3759 leaf
= path
->nodes
[0];
3764 inode_item
= btrfs_item_ptr(leaf
, path
->slots
[0],
3765 struct btrfs_inode_item
);
3766 inode
->i_mode
= btrfs_inode_mode(leaf
, inode_item
);
3767 set_nlink(inode
, btrfs_inode_nlink(leaf
, inode_item
));
3768 i_uid_write(inode
, btrfs_inode_uid(leaf
, inode_item
));
3769 i_gid_write(inode
, btrfs_inode_gid(leaf
, inode_item
));
3770 btrfs_i_size_write(BTRFS_I(inode
), btrfs_inode_size(leaf
, inode_item
));
3771 btrfs_inode_set_file_extent_range(BTRFS_I(inode
), 0,
3772 round_up(i_size_read(inode
), fs_info
->sectorsize
));
3774 inode_set_atime(inode
, btrfs_timespec_sec(leaf
, &inode_item
->atime
),
3775 btrfs_timespec_nsec(leaf
, &inode_item
->atime
));
3777 inode_set_mtime(inode
, btrfs_timespec_sec(leaf
, &inode_item
->mtime
),
3778 btrfs_timespec_nsec(leaf
, &inode_item
->mtime
));
3780 inode_set_ctime(inode
, btrfs_timespec_sec(leaf
, &inode_item
->ctime
),
3781 btrfs_timespec_nsec(leaf
, &inode_item
->ctime
));
3783 BTRFS_I(inode
)->i_otime_sec
= btrfs_timespec_sec(leaf
, &inode_item
->otime
);
3784 BTRFS_I(inode
)->i_otime_nsec
= btrfs_timespec_nsec(leaf
, &inode_item
->otime
);
3786 inode_set_bytes(inode
, btrfs_inode_nbytes(leaf
, inode_item
));
3787 BTRFS_I(inode
)->generation
= btrfs_inode_generation(leaf
, inode_item
);
3788 BTRFS_I(inode
)->last_trans
= btrfs_inode_transid(leaf
, inode_item
);
3790 inode_set_iversion_queried(inode
,
3791 btrfs_inode_sequence(leaf
, inode_item
));
3792 inode
->i_generation
= BTRFS_I(inode
)->generation
;
3794 rdev
= btrfs_inode_rdev(leaf
, inode_item
);
3796 BTRFS_I(inode
)->index_cnt
= (u64
)-1;
3797 btrfs_inode_split_flags(btrfs_inode_flags(leaf
, inode_item
),
3798 &BTRFS_I(inode
)->flags
, &BTRFS_I(inode
)->ro_flags
);
3802 * If we were modified in the current generation and evicted from memory
3803 * and then re-read we need to do a full sync since we don't have any
3804 * idea about which extents were modified before we were evicted from
3807 * This is required for both inode re-read from disk and delayed inode
3808 * in the delayed_nodes xarray.
3810 if (BTRFS_I(inode
)->last_trans
== btrfs_get_fs_generation(fs_info
))
3811 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC
,
3812 &BTRFS_I(inode
)->runtime_flags
);
3815 * We don't persist the id of the transaction where an unlink operation
3816 * against the inode was last made. So here we assume the inode might
3817 * have been evicted, and therefore the exact value of last_unlink_trans
3818 * lost, and set it to last_trans to avoid metadata inconsistencies
3819 * between the inode and its parent if the inode is fsync'ed and the log
3820 * replayed. For example, in the scenario:
3823 * ln mydir/foo mydir/bar
3826 * echo 2 > /proc/sys/vm/drop_caches # evicts inode
3827 * xfs_io -c fsync mydir/foo
3829 * mount fs, triggers fsync log replay
3831 * We must make sure that when we fsync our inode foo we also log its
3832 * parent inode, otherwise after log replay the parent still has the
3833 * dentry with the "bar" name but our inode foo has a link count of 1
3834 * and doesn't have an inode ref with the name "bar" anymore.
3836 * Setting last_unlink_trans to last_trans is a pessimistic approach,
3837 * but it guarantees correctness at the expense of occasional full
3838 * transaction commits on fsync if our inode is a directory, or if our
3839 * inode is not a directory, logging its parent unnecessarily.
3841 BTRFS_I(inode
)->last_unlink_trans
= BTRFS_I(inode
)->last_trans
;
3844 * Same logic as for last_unlink_trans. We don't persist the generation
3845 * of the last transaction where this inode was used for a reflink
3846 * operation, so after eviction and reloading the inode we must be
3847 * pessimistic and assume the last transaction that modified the inode.
3849 BTRFS_I(inode
)->last_reflink_trans
= BTRFS_I(inode
)->last_trans
;
3852 if (inode
->i_nlink
!= 1 ||
3853 path
->slots
[0] >= btrfs_header_nritems(leaf
))
3856 btrfs_item_key_to_cpu(leaf
, &location
, path
->slots
[0]);
3857 if (location
.objectid
!= btrfs_ino(BTRFS_I(inode
)))
3860 ptr
= btrfs_item_ptr_offset(leaf
, path
->slots
[0]);
3861 if (location
.type
== BTRFS_INODE_REF_KEY
) {
3862 struct btrfs_inode_ref
*ref
;
3864 ref
= (struct btrfs_inode_ref
*)ptr
;
3865 BTRFS_I(inode
)->dir_index
= btrfs_inode_ref_index(leaf
, ref
);
3866 } else if (location
.type
== BTRFS_INODE_EXTREF_KEY
) {
3867 struct btrfs_inode_extref
*extref
;
3869 extref
= (struct btrfs_inode_extref
*)ptr
;
3870 BTRFS_I(inode
)->dir_index
= btrfs_inode_extref_index(leaf
,
3875 * try to precache a NULL acl entry for files that don't have
3876 * any xattrs or acls
3878 maybe_acls
= acls_after_inode_item(leaf
, path
->slots
[0],
3879 btrfs_ino(BTRFS_I(inode
)), &first_xattr_slot
);
3880 if (first_xattr_slot
!= -1) {
3881 path
->slots
[0] = first_xattr_slot
;
3882 ret
= btrfs_load_inode_props(inode
, path
);
3885 "error loading props for ino %llu (root %llu): %d",
3886 btrfs_ino(BTRFS_I(inode
)),
3887 root
->root_key
.objectid
, ret
);
3889 if (path
!= in_path
)
3890 btrfs_free_path(path
);
3893 cache_no_acl(inode
);
3895 switch (inode
->i_mode
& S_IFMT
) {
3897 inode
->i_mapping
->a_ops
= &btrfs_aops
;
3898 inode
->i_fop
= &btrfs_file_operations
;
3899 inode
->i_op
= &btrfs_file_inode_operations
;
3902 inode
->i_fop
= &btrfs_dir_file_operations
;
3903 inode
->i_op
= &btrfs_dir_inode_operations
;
3906 inode
->i_op
= &btrfs_symlink_inode_operations
;
3907 inode_nohighmem(inode
);
3908 inode
->i_mapping
->a_ops
= &btrfs_aops
;
3911 inode
->i_op
= &btrfs_special_inode_operations
;
3912 init_special_inode(inode
, inode
->i_mode
, rdev
);
3916 btrfs_sync_inode_flags_to_i_flags(inode
);
3921 * given a leaf and an inode, copy the inode fields into the leaf
3923 static void fill_inode_item(struct btrfs_trans_handle
*trans
,
3924 struct extent_buffer
*leaf
,
3925 struct btrfs_inode_item
*item
,
3926 struct inode
*inode
)
3928 struct btrfs_map_token token
;
3931 btrfs_init_map_token(&token
, leaf
);
3933 btrfs_set_token_inode_uid(&token
, item
, i_uid_read(inode
));
3934 btrfs_set_token_inode_gid(&token
, item
, i_gid_read(inode
));
3935 btrfs_set_token_inode_size(&token
, item
, BTRFS_I(inode
)->disk_i_size
);
3936 btrfs_set_token_inode_mode(&token
, item
, inode
->i_mode
);
3937 btrfs_set_token_inode_nlink(&token
, item
, inode
->i_nlink
);
3939 btrfs_set_token_timespec_sec(&token
, &item
->atime
,
3940 inode_get_atime_sec(inode
));
3941 btrfs_set_token_timespec_nsec(&token
, &item
->atime
,
3942 inode_get_atime_nsec(inode
));
3944 btrfs_set_token_timespec_sec(&token
, &item
->mtime
,
3945 inode_get_mtime_sec(inode
));
3946 btrfs_set_token_timespec_nsec(&token
, &item
->mtime
,
3947 inode_get_mtime_nsec(inode
));
3949 btrfs_set_token_timespec_sec(&token
, &item
->ctime
,
3950 inode_get_ctime_sec(inode
));
3951 btrfs_set_token_timespec_nsec(&token
, &item
->ctime
,
3952 inode_get_ctime_nsec(inode
));
3954 btrfs_set_token_timespec_sec(&token
, &item
->otime
, BTRFS_I(inode
)->i_otime_sec
);
3955 btrfs_set_token_timespec_nsec(&token
, &item
->otime
, BTRFS_I(inode
)->i_otime_nsec
);
3957 btrfs_set_token_inode_nbytes(&token
, item
, inode_get_bytes(inode
));
3958 btrfs_set_token_inode_generation(&token
, item
,
3959 BTRFS_I(inode
)->generation
);
3960 btrfs_set_token_inode_sequence(&token
, item
, inode_peek_iversion(inode
));
3961 btrfs_set_token_inode_transid(&token
, item
, trans
->transid
);
3962 btrfs_set_token_inode_rdev(&token
, item
, inode
->i_rdev
);
3963 flags
= btrfs_inode_combine_flags(BTRFS_I(inode
)->flags
,
3964 BTRFS_I(inode
)->ro_flags
);
3965 btrfs_set_token_inode_flags(&token
, item
, flags
);
3966 btrfs_set_token_inode_block_group(&token
, item
, 0);
3970 * copy everything in the in-memory inode into the btree.
3972 static noinline
int btrfs_update_inode_item(struct btrfs_trans_handle
*trans
,
3973 struct btrfs_inode
*inode
)
3975 struct btrfs_inode_item
*inode_item
;
3976 struct btrfs_path
*path
;
3977 struct extent_buffer
*leaf
;
3980 path
= btrfs_alloc_path();
3984 ret
= btrfs_lookup_inode(trans
, inode
->root
, path
, &inode
->location
, 1);
3991 leaf
= path
->nodes
[0];
3992 inode_item
= btrfs_item_ptr(leaf
, path
->slots
[0],
3993 struct btrfs_inode_item
);
3995 fill_inode_item(trans
, leaf
, inode_item
, &inode
->vfs_inode
);
3996 btrfs_mark_buffer_dirty(trans
, leaf
);
3997 btrfs_set_inode_last_trans(trans
, inode
);
4000 btrfs_free_path(path
);
4005 * copy everything in the in-memory inode into the btree.
4007 int btrfs_update_inode(struct btrfs_trans_handle
*trans
,
4008 struct btrfs_inode
*inode
)
4010 struct btrfs_root
*root
= inode
->root
;
4011 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4015 * If the inode is a free space inode, we can deadlock during commit
4016 * if we put it into the delayed code.
4018 * The data relocation inode should also be directly updated
4021 if (!btrfs_is_free_space_inode(inode
)
4022 && !btrfs_is_data_reloc_root(root
)
4023 && !test_bit(BTRFS_FS_LOG_RECOVERING
, &fs_info
->flags
)) {
4024 btrfs_update_root_times(trans
, root
);
4026 ret
= btrfs_delayed_update_inode(trans
, inode
);
4028 btrfs_set_inode_last_trans(trans
, inode
);
4032 return btrfs_update_inode_item(trans
, inode
);
4035 int btrfs_update_inode_fallback(struct btrfs_trans_handle
*trans
,
4036 struct btrfs_inode
*inode
)
4040 ret
= btrfs_update_inode(trans
, inode
);
4042 return btrfs_update_inode_item(trans
, inode
);
4047 * unlink helper that gets used here in inode.c and in the tree logging
4048 * recovery code. It remove a link in a directory with a given name, and
4049 * also drops the back refs in the inode to the directory
4051 static int __btrfs_unlink_inode(struct btrfs_trans_handle
*trans
,
4052 struct btrfs_inode
*dir
,
4053 struct btrfs_inode
*inode
,
4054 const struct fscrypt_str
*name
,
4055 struct btrfs_rename_ctx
*rename_ctx
)
4057 struct btrfs_root
*root
= dir
->root
;
4058 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4059 struct btrfs_path
*path
;
4061 struct btrfs_dir_item
*di
;
4063 u64 ino
= btrfs_ino(inode
);
4064 u64 dir_ino
= btrfs_ino(dir
);
4066 path
= btrfs_alloc_path();
4072 di
= btrfs_lookup_dir_item(trans
, root
, path
, dir_ino
, name
, -1);
4073 if (IS_ERR_OR_NULL(di
)) {
4074 ret
= di
? PTR_ERR(di
) : -ENOENT
;
4077 ret
= btrfs_delete_one_dir_name(trans
, root
, path
, di
);
4080 btrfs_release_path(path
);
4083 * If we don't have dir index, we have to get it by looking up
4084 * the inode ref, since we get the inode ref, remove it directly,
4085 * it is unnecessary to do delayed deletion.
4087 * But if we have dir index, needn't search inode ref to get it.
4088 * Since the inode ref is close to the inode item, it is better
4089 * that we delay to delete it, and just do this deletion when
4090 * we update the inode item.
4092 if (inode
->dir_index
) {
4093 ret
= btrfs_delayed_delete_inode_ref(inode
);
4095 index
= inode
->dir_index
;
4100 ret
= btrfs_del_inode_ref(trans
, root
, name
, ino
, dir_ino
, &index
);
4103 "failed to delete reference to %.*s, inode %llu parent %llu",
4104 name
->len
, name
->name
, ino
, dir_ino
);
4105 btrfs_abort_transaction(trans
, ret
);
4110 rename_ctx
->index
= index
;
4112 ret
= btrfs_delete_delayed_dir_index(trans
, dir
, index
);
4114 btrfs_abort_transaction(trans
, ret
);
4119 * If we are in a rename context, we don't need to update anything in the
4120 * log. That will be done later during the rename by btrfs_log_new_name().
4121 * Besides that, doing it here would only cause extra unnecessary btree
4122 * operations on the log tree, increasing latency for applications.
4125 btrfs_del_inode_ref_in_log(trans
, root
, name
, inode
, dir_ino
);
4126 btrfs_del_dir_entries_in_log(trans
, root
, name
, dir
, index
);
4130 * If we have a pending delayed iput we could end up with the final iput
4131 * being run in btrfs-cleaner context. If we have enough of these built
4132 * up we can end up burning a lot of time in btrfs-cleaner without any
4133 * way to throttle the unlinks. Since we're currently holding a ref on
4134 * the inode we can run the delayed iput here without any issues as the
4135 * final iput won't be done until after we drop the ref we're currently
4138 btrfs_run_delayed_iput(fs_info
, inode
);
4140 btrfs_free_path(path
);
4144 btrfs_i_size_write(dir
, dir
->vfs_inode
.i_size
- name
->len
* 2);
4145 inode_inc_iversion(&inode
->vfs_inode
);
4146 inode_inc_iversion(&dir
->vfs_inode
);
4147 inode_set_mtime_to_ts(&dir
->vfs_inode
, inode_set_ctime_current(&dir
->vfs_inode
));
4148 ret
= btrfs_update_inode(trans
, dir
);
4153 int btrfs_unlink_inode(struct btrfs_trans_handle
*trans
,
4154 struct btrfs_inode
*dir
, struct btrfs_inode
*inode
,
4155 const struct fscrypt_str
*name
)
4159 ret
= __btrfs_unlink_inode(trans
, dir
, inode
, name
, NULL
);
4161 drop_nlink(&inode
->vfs_inode
);
4162 ret
= btrfs_update_inode(trans
, inode
);
4168 * helper to start transaction for unlink and rmdir.
4170 * unlink and rmdir are special in btrfs, they do not always free space, so
4171 * if we cannot make our reservations the normal way try and see if there is
4172 * plenty of slack room in the global reserve to migrate, otherwise we cannot
4173 * allow the unlink to occur.
4175 static struct btrfs_trans_handle
*__unlink_start_trans(struct btrfs_inode
*dir
)
4177 struct btrfs_root
*root
= dir
->root
;
4179 return btrfs_start_transaction_fallback_global_rsv(root
,
4180 BTRFS_UNLINK_METADATA_UNITS
);
4183 static int btrfs_unlink(struct inode
*dir
, struct dentry
*dentry
)
4185 struct btrfs_trans_handle
*trans
;
4186 struct inode
*inode
= d_inode(dentry
);
4188 struct fscrypt_name fname
;
4190 ret
= fscrypt_setup_filename(dir
, &dentry
->d_name
, 1, &fname
);
4194 /* This needs to handle no-key deletions later on */
4196 trans
= __unlink_start_trans(BTRFS_I(dir
));
4197 if (IS_ERR(trans
)) {
4198 ret
= PTR_ERR(trans
);
4202 btrfs_record_unlink_dir(trans
, BTRFS_I(dir
), BTRFS_I(d_inode(dentry
)),
4205 ret
= btrfs_unlink_inode(trans
, BTRFS_I(dir
), BTRFS_I(d_inode(dentry
)),
4210 if (inode
->i_nlink
== 0) {
4211 ret
= btrfs_orphan_add(trans
, BTRFS_I(inode
));
4217 btrfs_end_transaction(trans
);
4218 btrfs_btree_balance_dirty(BTRFS_I(dir
)->root
->fs_info
);
4220 fscrypt_free_filename(&fname
);
4224 static int btrfs_unlink_subvol(struct btrfs_trans_handle
*trans
,
4225 struct btrfs_inode
*dir
, struct dentry
*dentry
)
4227 struct btrfs_root
*root
= dir
->root
;
4228 struct btrfs_inode
*inode
= BTRFS_I(d_inode(dentry
));
4229 struct btrfs_path
*path
;
4230 struct extent_buffer
*leaf
;
4231 struct btrfs_dir_item
*di
;
4232 struct btrfs_key key
;
4236 u64 dir_ino
= btrfs_ino(dir
);
4237 struct fscrypt_name fname
;
4239 ret
= fscrypt_setup_filename(&dir
->vfs_inode
, &dentry
->d_name
, 1, &fname
);
4243 /* This needs to handle no-key deletions later on */
4245 if (btrfs_ino(inode
) == BTRFS_FIRST_FREE_OBJECTID
) {
4246 objectid
= inode
->root
->root_key
.objectid
;
4247 } else if (btrfs_ino(inode
) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID
) {
4248 objectid
= inode
->location
.objectid
;
4251 fscrypt_free_filename(&fname
);
4255 path
= btrfs_alloc_path();
4261 di
= btrfs_lookup_dir_item(trans
, root
, path
, dir_ino
,
4262 &fname
.disk_name
, -1);
4263 if (IS_ERR_OR_NULL(di
)) {
4264 ret
= di
? PTR_ERR(di
) : -ENOENT
;
4268 leaf
= path
->nodes
[0];
4269 btrfs_dir_item_key_to_cpu(leaf
, di
, &key
);
4270 WARN_ON(key
.type
!= BTRFS_ROOT_ITEM_KEY
|| key
.objectid
!= objectid
);
4271 ret
= btrfs_delete_one_dir_name(trans
, root
, path
, di
);
4273 btrfs_abort_transaction(trans
, ret
);
4276 btrfs_release_path(path
);
4279 * This is a placeholder inode for a subvolume we didn't have a
4280 * reference to at the time of the snapshot creation. In the meantime
4281 * we could have renamed the real subvol link into our snapshot, so
4282 * depending on btrfs_del_root_ref to return -ENOENT here is incorrect.
4283 * Instead simply lookup the dir_index_item for this entry so we can
4284 * remove it. Otherwise we know we have a ref to the root and we can
4285 * call btrfs_del_root_ref, and it _shouldn't_ fail.
4287 if (btrfs_ino(inode
) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID
) {
4288 di
= btrfs_search_dir_index_item(root
, path
, dir_ino
, &fname
.disk_name
);
4289 if (IS_ERR_OR_NULL(di
)) {
4294 btrfs_abort_transaction(trans
, ret
);
4298 leaf
= path
->nodes
[0];
4299 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
4301 btrfs_release_path(path
);
4303 ret
= btrfs_del_root_ref(trans
, objectid
,
4304 root
->root_key
.objectid
, dir_ino
,
4305 &index
, &fname
.disk_name
);
4307 btrfs_abort_transaction(trans
, ret
);
4312 ret
= btrfs_delete_delayed_dir_index(trans
, dir
, index
);
4314 btrfs_abort_transaction(trans
, ret
);
4318 btrfs_i_size_write(dir
, dir
->vfs_inode
.i_size
- fname
.disk_name
.len
* 2);
4319 inode_inc_iversion(&dir
->vfs_inode
);
4320 inode_set_mtime_to_ts(&dir
->vfs_inode
, inode_set_ctime_current(&dir
->vfs_inode
));
4321 ret
= btrfs_update_inode_fallback(trans
, dir
);
4323 btrfs_abort_transaction(trans
, ret
);
4325 btrfs_free_path(path
);
4326 fscrypt_free_filename(&fname
);
4331 * Helper to check if the subvolume references other subvolumes or if it's
4334 static noinline
int may_destroy_subvol(struct btrfs_root
*root
)
4336 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4337 struct btrfs_path
*path
;
4338 struct btrfs_dir_item
*di
;
4339 struct btrfs_key key
;
4340 struct fscrypt_str name
= FSTR_INIT("default", 7);
4344 path
= btrfs_alloc_path();
4348 /* Make sure this root isn't set as the default subvol */
4349 dir_id
= btrfs_super_root_dir(fs_info
->super_copy
);
4350 di
= btrfs_lookup_dir_item(NULL
, fs_info
->tree_root
, path
,
4352 if (di
&& !IS_ERR(di
)) {
4353 btrfs_dir_item_key_to_cpu(path
->nodes
[0], di
, &key
);
4354 if (key
.objectid
== root
->root_key
.objectid
) {
4357 "deleting default subvolume %llu is not allowed",
4361 btrfs_release_path(path
);
4364 key
.objectid
= root
->root_key
.objectid
;
4365 key
.type
= BTRFS_ROOT_REF_KEY
;
4366 key
.offset
= (u64
)-1;
4368 ret
= btrfs_search_slot(NULL
, fs_info
->tree_root
, &key
, path
, 0, 0);
4374 if (path
->slots
[0] > 0) {
4376 btrfs_item_key_to_cpu(path
->nodes
[0], &key
, path
->slots
[0]);
4377 if (key
.objectid
== root
->root_key
.objectid
&&
4378 key
.type
== BTRFS_ROOT_REF_KEY
)
4382 btrfs_free_path(path
);
4386 /* Delete all dentries for inodes belonging to the root */
4387 static void btrfs_prune_dentries(struct btrfs_root
*root
)
4389 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4390 struct rb_node
*node
;
4391 struct rb_node
*prev
;
4392 struct btrfs_inode
*entry
;
4393 struct inode
*inode
;
4396 if (!BTRFS_FS_ERROR(fs_info
))
4397 WARN_ON(btrfs_root_refs(&root
->root_item
) != 0);
4399 spin_lock(&root
->inode_lock
);
4401 node
= root
->inode_tree
.rb_node
;
4405 entry
= rb_entry(node
, struct btrfs_inode
, rb_node
);
4407 if (objectid
< btrfs_ino(entry
))
4408 node
= node
->rb_left
;
4409 else if (objectid
> btrfs_ino(entry
))
4410 node
= node
->rb_right
;
4416 entry
= rb_entry(prev
, struct btrfs_inode
, rb_node
);
4417 if (objectid
<= btrfs_ino(entry
)) {
4421 prev
= rb_next(prev
);
4425 entry
= rb_entry(node
, struct btrfs_inode
, rb_node
);
4426 objectid
= btrfs_ino(entry
) + 1;
4427 inode
= igrab(&entry
->vfs_inode
);
4429 spin_unlock(&root
->inode_lock
);
4430 if (atomic_read(&inode
->i_count
) > 1)
4431 d_prune_aliases(inode
);
4433 * btrfs_drop_inode will have it removed from the inode
4434 * cache when its usage count hits zero.
4438 spin_lock(&root
->inode_lock
);
4442 if (cond_resched_lock(&root
->inode_lock
))
4445 node
= rb_next(node
);
4447 spin_unlock(&root
->inode_lock
);
4450 int btrfs_delete_subvolume(struct btrfs_inode
*dir
, struct dentry
*dentry
)
4452 struct btrfs_fs_info
*fs_info
= btrfs_sb(dentry
->d_sb
);
4453 struct btrfs_root
*root
= dir
->root
;
4454 struct inode
*inode
= d_inode(dentry
);
4455 struct btrfs_root
*dest
= BTRFS_I(inode
)->root
;
4456 struct btrfs_trans_handle
*trans
;
4457 struct btrfs_block_rsv block_rsv
;
4461 down_write(&fs_info
->subvol_sem
);
4464 * Don't allow to delete a subvolume with send in progress. This is
4465 * inside the inode lock so the error handling that has to drop the bit
4466 * again is not run concurrently.
4468 spin_lock(&dest
->root_item_lock
);
4469 if (dest
->send_in_progress
) {
4470 spin_unlock(&dest
->root_item_lock
);
4472 "attempt to delete subvolume %llu during send",
4473 dest
->root_key
.objectid
);
4477 if (atomic_read(&dest
->nr_swapfiles
)) {
4478 spin_unlock(&dest
->root_item_lock
);
4480 "attempt to delete subvolume %llu with active swapfile",
4481 root
->root_key
.objectid
);
4485 root_flags
= btrfs_root_flags(&dest
->root_item
);
4486 btrfs_set_root_flags(&dest
->root_item
,
4487 root_flags
| BTRFS_ROOT_SUBVOL_DEAD
);
4488 spin_unlock(&dest
->root_item_lock
);
4490 ret
= may_destroy_subvol(dest
);
4494 btrfs_init_block_rsv(&block_rsv
, BTRFS_BLOCK_RSV_TEMP
);
4496 * One for dir inode,
4497 * two for dir entries,
4498 * two for root ref/backref.
4500 ret
= btrfs_subvolume_reserve_metadata(root
, &block_rsv
, 5, true);
4504 trans
= btrfs_start_transaction(root
, 0);
4505 if (IS_ERR(trans
)) {
4506 ret
= PTR_ERR(trans
);
4509 trans
->block_rsv
= &block_rsv
;
4510 trans
->bytes_reserved
= block_rsv
.size
;
4512 btrfs_record_snapshot_destroy(trans
, dir
);
4514 ret
= btrfs_unlink_subvol(trans
, dir
, dentry
);
4516 btrfs_abort_transaction(trans
, ret
);
4520 ret
= btrfs_record_root_in_trans(trans
, dest
);
4522 btrfs_abort_transaction(trans
, ret
);
4526 memset(&dest
->root_item
.drop_progress
, 0,
4527 sizeof(dest
->root_item
.drop_progress
));
4528 btrfs_set_root_drop_level(&dest
->root_item
, 0);
4529 btrfs_set_root_refs(&dest
->root_item
, 0);
4531 if (!test_and_set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED
, &dest
->state
)) {
4532 ret
= btrfs_insert_orphan_item(trans
,
4534 dest
->root_key
.objectid
);
4536 btrfs_abort_transaction(trans
, ret
);
4541 ret
= btrfs_uuid_tree_remove(trans
, dest
->root_item
.uuid
,
4542 BTRFS_UUID_KEY_SUBVOL
,
4543 dest
->root_key
.objectid
);
4544 if (ret
&& ret
!= -ENOENT
) {
4545 btrfs_abort_transaction(trans
, ret
);
4548 if (!btrfs_is_empty_uuid(dest
->root_item
.received_uuid
)) {
4549 ret
= btrfs_uuid_tree_remove(trans
,
4550 dest
->root_item
.received_uuid
,
4551 BTRFS_UUID_KEY_RECEIVED_SUBVOL
,
4552 dest
->root_key
.objectid
);
4553 if (ret
&& ret
!= -ENOENT
) {
4554 btrfs_abort_transaction(trans
, ret
);
4559 free_anon_bdev(dest
->anon_dev
);
4562 trans
->block_rsv
= NULL
;
4563 trans
->bytes_reserved
= 0;
4564 ret
= btrfs_end_transaction(trans
);
4565 inode
->i_flags
|= S_DEAD
;
4567 btrfs_subvolume_release_metadata(root
, &block_rsv
);
4570 spin_lock(&dest
->root_item_lock
);
4571 root_flags
= btrfs_root_flags(&dest
->root_item
);
4572 btrfs_set_root_flags(&dest
->root_item
,
4573 root_flags
& ~BTRFS_ROOT_SUBVOL_DEAD
);
4574 spin_unlock(&dest
->root_item_lock
);
4577 up_write(&fs_info
->subvol_sem
);
4579 d_invalidate(dentry
);
4580 btrfs_prune_dentries(dest
);
4581 ASSERT(dest
->send_in_progress
== 0);
4587 static int btrfs_rmdir(struct inode
*dir
, struct dentry
*dentry
)
4589 struct inode
*inode
= d_inode(dentry
);
4590 struct btrfs_fs_info
*fs_info
= BTRFS_I(inode
)->root
->fs_info
;
4592 struct btrfs_trans_handle
*trans
;
4593 u64 last_unlink_trans
;
4594 struct fscrypt_name fname
;
4596 if (inode
->i_size
> BTRFS_EMPTY_DIR_SIZE
)
4598 if (btrfs_ino(BTRFS_I(inode
)) == BTRFS_FIRST_FREE_OBJECTID
) {
4599 if (unlikely(btrfs_fs_incompat(fs_info
, EXTENT_TREE_V2
))) {
4601 "extent tree v2 doesn't support snapshot deletion yet");
4604 return btrfs_delete_subvolume(BTRFS_I(dir
), dentry
);
4607 err
= fscrypt_setup_filename(dir
, &dentry
->d_name
, 1, &fname
);
4611 /* This needs to handle no-key deletions later on */
4613 trans
= __unlink_start_trans(BTRFS_I(dir
));
4614 if (IS_ERR(trans
)) {
4615 err
= PTR_ERR(trans
);
4619 if (unlikely(btrfs_ino(BTRFS_I(inode
)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID
)) {
4620 err
= btrfs_unlink_subvol(trans
, BTRFS_I(dir
), dentry
);
4624 err
= btrfs_orphan_add(trans
, BTRFS_I(inode
));
4628 last_unlink_trans
= BTRFS_I(inode
)->last_unlink_trans
;
4630 /* now the directory is empty */
4631 err
= btrfs_unlink_inode(trans
, BTRFS_I(dir
), BTRFS_I(d_inode(dentry
)),
4634 btrfs_i_size_write(BTRFS_I(inode
), 0);
4636 * Propagate the last_unlink_trans value of the deleted dir to
4637 * its parent directory. This is to prevent an unrecoverable
4638 * log tree in the case we do something like this:
4640 * 2) create snapshot under dir foo
4641 * 3) delete the snapshot
4644 * 6) fsync foo or some file inside foo
4646 if (last_unlink_trans
>= trans
->transid
)
4647 BTRFS_I(dir
)->last_unlink_trans
= last_unlink_trans
;
4650 btrfs_end_transaction(trans
);
4652 btrfs_btree_balance_dirty(fs_info
);
4653 fscrypt_free_filename(&fname
);
4659 * Read, zero a chunk and write a block.
4661 * @inode - inode that we're zeroing
4662 * @from - the offset to start zeroing
4663 * @len - the length to zero, 0 to zero the entire range respective to the
4665 * @front - zero up to the offset instead of from the offset on
4667 * This will find the block for the "from" offset and cow the block and zero the
4668 * part we want to zero. This is used with truncate and hole punching.
4670 int btrfs_truncate_block(struct btrfs_inode
*inode
, loff_t from
, loff_t len
,
4673 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
4674 struct address_space
*mapping
= inode
->vfs_inode
.i_mapping
;
4675 struct extent_io_tree
*io_tree
= &inode
->io_tree
;
4676 struct btrfs_ordered_extent
*ordered
;
4677 struct extent_state
*cached_state
= NULL
;
4678 struct extent_changeset
*data_reserved
= NULL
;
4679 bool only_release_metadata
= false;
4680 u32 blocksize
= fs_info
->sectorsize
;
4681 pgoff_t index
= from
>> PAGE_SHIFT
;
4682 unsigned offset
= from
& (blocksize
- 1);
4684 gfp_t mask
= btrfs_alloc_write_mask(mapping
);
4685 size_t write_bytes
= blocksize
;
4690 if (IS_ALIGNED(offset
, blocksize
) &&
4691 (!len
|| IS_ALIGNED(len
, blocksize
)))
4694 block_start
= round_down(from
, blocksize
);
4695 block_end
= block_start
+ blocksize
- 1;
4697 ret
= btrfs_check_data_free_space(inode
, &data_reserved
, block_start
,
4700 if (btrfs_check_nocow_lock(inode
, block_start
, &write_bytes
, false) > 0) {
4701 /* For nocow case, no need to reserve data space */
4702 only_release_metadata
= true;
4707 ret
= btrfs_delalloc_reserve_metadata(inode
, blocksize
, blocksize
, false);
4709 if (!only_release_metadata
)
4710 btrfs_free_reserved_data_space(inode
, data_reserved
,
4711 block_start
, blocksize
);
4715 page
= find_or_create_page(mapping
, index
, mask
);
4717 btrfs_delalloc_release_space(inode
, data_reserved
, block_start
,
4719 btrfs_delalloc_release_extents(inode
, blocksize
);
4724 if (!PageUptodate(page
)) {
4725 ret
= btrfs_read_folio(NULL
, page_folio(page
));
4727 if (page
->mapping
!= mapping
) {
4732 if (!PageUptodate(page
)) {
4739 * We unlock the page after the io is completed and then re-lock it
4740 * above. release_folio() could have come in between that and cleared
4741 * folio private, but left the page in the mapping. Set the page mapped
4742 * here to make sure it's properly set for the subpage stuff.
4744 ret
= set_page_extent_mapped(page
);
4748 wait_on_page_writeback(page
);
4750 lock_extent(io_tree
, block_start
, block_end
, &cached_state
);
4752 ordered
= btrfs_lookup_ordered_extent(inode
, block_start
);
4754 unlock_extent(io_tree
, block_start
, block_end
, &cached_state
);
4757 btrfs_start_ordered_extent(ordered
);
4758 btrfs_put_ordered_extent(ordered
);
4762 clear_extent_bit(&inode
->io_tree
, block_start
, block_end
,
4763 EXTENT_DELALLOC
| EXTENT_DO_ACCOUNTING
| EXTENT_DEFRAG
,
4766 ret
= btrfs_set_extent_delalloc(inode
, block_start
, block_end
, 0,
4769 unlock_extent(io_tree
, block_start
, block_end
, &cached_state
);
4773 if (offset
!= blocksize
) {
4775 len
= blocksize
- offset
;
4777 memzero_page(page
, (block_start
- page_offset(page
)),
4780 memzero_page(page
, (block_start
- page_offset(page
)) + offset
,
4783 btrfs_folio_clear_checked(fs_info
, page_folio(page
), block_start
,
4784 block_end
+ 1 - block_start
);
4785 btrfs_folio_set_dirty(fs_info
, page_folio(page
), block_start
,
4786 block_end
+ 1 - block_start
);
4787 unlock_extent(io_tree
, block_start
, block_end
, &cached_state
);
4789 if (only_release_metadata
)
4790 set_extent_bit(&inode
->io_tree
, block_start
, block_end
,
4791 EXTENT_NORESERVE
, NULL
);
4795 if (only_release_metadata
)
4796 btrfs_delalloc_release_metadata(inode
, blocksize
, true);
4798 btrfs_delalloc_release_space(inode
, data_reserved
,
4799 block_start
, blocksize
, true);
4801 btrfs_delalloc_release_extents(inode
, blocksize
);
4805 if (only_release_metadata
)
4806 btrfs_check_nocow_unlock(inode
);
4807 extent_changeset_free(data_reserved
);
4811 static int maybe_insert_hole(struct btrfs_inode
*inode
, u64 offset
, u64 len
)
4813 struct btrfs_root
*root
= inode
->root
;
4814 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4815 struct btrfs_trans_handle
*trans
;
4816 struct btrfs_drop_extents_args drop_args
= { 0 };
4820 * If NO_HOLES is enabled, we don't need to do anything.
4821 * Later, up in the call chain, either btrfs_set_inode_last_sub_trans()
4822 * or btrfs_update_inode() will be called, which guarantee that the next
4823 * fsync will know this inode was changed and needs to be logged.
4825 if (btrfs_fs_incompat(fs_info
, NO_HOLES
))
4829 * 1 - for the one we're dropping
4830 * 1 - for the one we're adding
4831 * 1 - for updating the inode.
4833 trans
= btrfs_start_transaction(root
, 3);
4835 return PTR_ERR(trans
);
4837 drop_args
.start
= offset
;
4838 drop_args
.end
= offset
+ len
;
4839 drop_args
.drop_cache
= true;
4841 ret
= btrfs_drop_extents(trans
, root
, inode
, &drop_args
);
4843 btrfs_abort_transaction(trans
, ret
);
4844 btrfs_end_transaction(trans
);
4848 ret
= btrfs_insert_hole_extent(trans
, root
, btrfs_ino(inode
), offset
, len
);
4850 btrfs_abort_transaction(trans
, ret
);
4852 btrfs_update_inode_bytes(inode
, 0, drop_args
.bytes_found
);
4853 btrfs_update_inode(trans
, inode
);
4855 btrfs_end_transaction(trans
);
4860 * This function puts in dummy file extents for the area we're creating a hole
4861 * for. So if we are truncating this file to a larger size we need to insert
4862 * these file extents so that btrfs_get_extent will return a EXTENT_MAP_HOLE for
4863 * the range between oldsize and size
4865 int btrfs_cont_expand(struct btrfs_inode
*inode
, loff_t oldsize
, loff_t size
)
4867 struct btrfs_root
*root
= inode
->root
;
4868 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4869 struct extent_io_tree
*io_tree
= &inode
->io_tree
;
4870 struct extent_map
*em
= NULL
;
4871 struct extent_state
*cached_state
= NULL
;
4872 u64 hole_start
= ALIGN(oldsize
, fs_info
->sectorsize
);
4873 u64 block_end
= ALIGN(size
, fs_info
->sectorsize
);
4880 * If our size started in the middle of a block we need to zero out the
4881 * rest of the block before we expand the i_size, otherwise we could
4882 * expose stale data.
4884 err
= btrfs_truncate_block(inode
, oldsize
, 0, 0);
4888 if (size
<= hole_start
)
4891 btrfs_lock_and_flush_ordered_range(inode
, hole_start
, block_end
- 1,
4893 cur_offset
= hole_start
;
4895 em
= btrfs_get_extent(inode
, NULL
, 0, cur_offset
,
4896 block_end
- cur_offset
);
4902 last_byte
= min(extent_map_end(em
), block_end
);
4903 last_byte
= ALIGN(last_byte
, fs_info
->sectorsize
);
4904 hole_size
= last_byte
- cur_offset
;
4906 if (!(em
->flags
& EXTENT_FLAG_PREALLOC
)) {
4907 struct extent_map
*hole_em
;
4909 err
= maybe_insert_hole(inode
, cur_offset
, hole_size
);
4913 err
= btrfs_inode_set_file_extent_range(inode
,
4914 cur_offset
, hole_size
);
4918 hole_em
= alloc_extent_map();
4920 btrfs_drop_extent_map_range(inode
, cur_offset
,
4921 cur_offset
+ hole_size
- 1,
4923 btrfs_set_inode_full_sync(inode
);
4926 hole_em
->start
= cur_offset
;
4927 hole_em
->len
= hole_size
;
4928 hole_em
->orig_start
= cur_offset
;
4930 hole_em
->block_start
= EXTENT_MAP_HOLE
;
4931 hole_em
->block_len
= 0;
4932 hole_em
->orig_block_len
= 0;
4933 hole_em
->ram_bytes
= hole_size
;
4934 hole_em
->generation
= btrfs_get_fs_generation(fs_info
);
4936 err
= btrfs_replace_extent_map_range(inode
, hole_em
, true);
4937 free_extent_map(hole_em
);
4939 err
= btrfs_inode_set_file_extent_range(inode
,
4940 cur_offset
, hole_size
);
4945 free_extent_map(em
);
4947 cur_offset
= last_byte
;
4948 if (cur_offset
>= block_end
)
4951 free_extent_map(em
);
4952 unlock_extent(io_tree
, hole_start
, block_end
- 1, &cached_state
);
4956 static int btrfs_setsize(struct inode
*inode
, struct iattr
*attr
)
4958 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
4959 struct btrfs_trans_handle
*trans
;
4960 loff_t oldsize
= i_size_read(inode
);
4961 loff_t newsize
= attr
->ia_size
;
4962 int mask
= attr
->ia_valid
;
4966 * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a
4967 * special case where we need to update the times despite not having
4968 * these flags set. For all other operations the VFS set these flags
4969 * explicitly if it wants a timestamp update.
4971 if (newsize
!= oldsize
) {
4972 inode_inc_iversion(inode
);
4973 if (!(mask
& (ATTR_CTIME
| ATTR_MTIME
))) {
4974 inode_set_mtime_to_ts(inode
,
4975 inode_set_ctime_current(inode
));
4979 if (newsize
> oldsize
) {
4981 * Don't do an expanding truncate while snapshotting is ongoing.
4982 * This is to ensure the snapshot captures a fully consistent
4983 * state of this file - if the snapshot captures this expanding
4984 * truncation, it must capture all writes that happened before
4987 btrfs_drew_write_lock(&root
->snapshot_lock
);
4988 ret
= btrfs_cont_expand(BTRFS_I(inode
), oldsize
, newsize
);
4990 btrfs_drew_write_unlock(&root
->snapshot_lock
);
4994 trans
= btrfs_start_transaction(root
, 1);
4995 if (IS_ERR(trans
)) {
4996 btrfs_drew_write_unlock(&root
->snapshot_lock
);
4997 return PTR_ERR(trans
);
5000 i_size_write(inode
, newsize
);
5001 btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode
), 0);
5002 pagecache_isize_extended(inode
, oldsize
, newsize
);
5003 ret
= btrfs_update_inode(trans
, BTRFS_I(inode
));
5004 btrfs_drew_write_unlock(&root
->snapshot_lock
);
5005 btrfs_end_transaction(trans
);
5007 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
5009 if (btrfs_is_zoned(fs_info
)) {
5010 ret
= btrfs_wait_ordered_range(inode
,
5011 ALIGN(newsize
, fs_info
->sectorsize
),
5018 * We're truncating a file that used to have good data down to
5019 * zero. Make sure any new writes to the file get on disk
5023 set_bit(BTRFS_INODE_FLUSH_ON_CLOSE
,
5024 &BTRFS_I(inode
)->runtime_flags
);
5026 truncate_setsize(inode
, newsize
);
5028 inode_dio_wait(inode
);
5030 ret
= btrfs_truncate(BTRFS_I(inode
), newsize
== oldsize
);
5031 if (ret
&& inode
->i_nlink
) {
5035 * Truncate failed, so fix up the in-memory size. We
5036 * adjusted disk_i_size down as we removed extents, so
5037 * wait for disk_i_size to be stable and then update the
5038 * in-memory size to match.
5040 err
= btrfs_wait_ordered_range(inode
, 0, (u64
)-1);
5043 i_size_write(inode
, BTRFS_I(inode
)->disk_i_size
);
5050 static int btrfs_setattr(struct mnt_idmap
*idmap
, struct dentry
*dentry
,
5053 struct inode
*inode
= d_inode(dentry
);
5054 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
5057 if (btrfs_root_readonly(root
))
5060 err
= setattr_prepare(idmap
, dentry
, attr
);
5064 if (S_ISREG(inode
->i_mode
) && (attr
->ia_valid
& ATTR_SIZE
)) {
5065 err
= btrfs_setsize(inode
, attr
);
5070 if (attr
->ia_valid
) {
5071 setattr_copy(idmap
, inode
, attr
);
5072 inode_inc_iversion(inode
);
5073 err
= btrfs_dirty_inode(BTRFS_I(inode
));
5075 if (!err
&& attr
->ia_valid
& ATTR_MODE
)
5076 err
= posix_acl_chmod(idmap
, dentry
, inode
->i_mode
);
5083 * While truncating the inode pages during eviction, we get the VFS
5084 * calling btrfs_invalidate_folio() against each folio of the inode. This
5085 * is slow because the calls to btrfs_invalidate_folio() result in a
5086 * huge amount of calls to lock_extent() and clear_extent_bit(),
5087 * which keep merging and splitting extent_state structures over and over,
5088 * wasting lots of time.
5090 * Therefore if the inode is being evicted, let btrfs_invalidate_folio()
5091 * skip all those expensive operations on a per folio basis and do only
5092 * the ordered io finishing, while we release here the extent_map and
5093 * extent_state structures, without the excessive merging and splitting.
5095 static void evict_inode_truncate_pages(struct inode
*inode
)
5097 struct extent_io_tree
*io_tree
= &BTRFS_I(inode
)->io_tree
;
5098 struct rb_node
*node
;
5100 ASSERT(inode
->i_state
& I_FREEING
);
5101 truncate_inode_pages_final(&inode
->i_data
);
5103 btrfs_drop_extent_map_range(BTRFS_I(inode
), 0, (u64
)-1, false);
5106 * Keep looping until we have no more ranges in the io tree.
5107 * We can have ongoing bios started by readahead that have
5108 * their endio callback (extent_io.c:end_bio_extent_readpage)
5109 * still in progress (unlocked the pages in the bio but did not yet
5110 * unlocked the ranges in the io tree). Therefore this means some
5111 * ranges can still be locked and eviction started because before
5112 * submitting those bios, which are executed by a separate task (work
5113 * queue kthread), inode references (inode->i_count) were not taken
5114 * (which would be dropped in the end io callback of each bio).
5115 * Therefore here we effectively end up waiting for those bios and
5116 * anyone else holding locked ranges without having bumped the inode's
5117 * reference count - if we don't do it, when they access the inode's
5118 * io_tree to unlock a range it may be too late, leading to an
5119 * use-after-free issue.
5121 spin_lock(&io_tree
->lock
);
5122 while (!RB_EMPTY_ROOT(&io_tree
->state
)) {
5123 struct extent_state
*state
;
5124 struct extent_state
*cached_state
= NULL
;
5127 unsigned state_flags
;
5129 node
= rb_first(&io_tree
->state
);
5130 state
= rb_entry(node
, struct extent_state
, rb_node
);
5131 start
= state
->start
;
5133 state_flags
= state
->state
;
5134 spin_unlock(&io_tree
->lock
);
5136 lock_extent(io_tree
, start
, end
, &cached_state
);
5139 * If still has DELALLOC flag, the extent didn't reach disk,
5140 * and its reserved space won't be freed by delayed_ref.
5141 * So we need to free its reserved space here.
5142 * (Refer to comment in btrfs_invalidate_folio, case 2)
5144 * Note, end is the bytenr of last byte, so we need + 1 here.
5146 if (state_flags
& EXTENT_DELALLOC
)
5147 btrfs_qgroup_free_data(BTRFS_I(inode
), NULL
, start
,
5148 end
- start
+ 1, NULL
);
5150 clear_extent_bit(io_tree
, start
, end
,
5151 EXTENT_CLEAR_ALL_BITS
| EXTENT_DO_ACCOUNTING
,
5155 spin_lock(&io_tree
->lock
);
5157 spin_unlock(&io_tree
->lock
);
5160 static struct btrfs_trans_handle
*evict_refill_and_join(struct btrfs_root
*root
,
5161 struct btrfs_block_rsv
*rsv
)
5163 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
5164 struct btrfs_trans_handle
*trans
;
5165 u64 delayed_refs_extra
= btrfs_calc_delayed_ref_bytes(fs_info
, 1);
5169 * Eviction should be taking place at some place safe because of our
5170 * delayed iputs. However the normal flushing code will run delayed
5171 * iputs, so we cannot use FLUSH_ALL otherwise we'll deadlock.
5173 * We reserve the delayed_refs_extra here again because we can't use
5174 * btrfs_start_transaction(root, 0) for the same deadlocky reason as
5175 * above. We reserve our extra bit here because we generate a ton of
5176 * delayed refs activity by truncating.
5178 * BTRFS_RESERVE_FLUSH_EVICT will steal from the global_rsv if it can,
5179 * if we fail to make this reservation we can re-try without the
5180 * delayed_refs_extra so we can make some forward progress.
5182 ret
= btrfs_block_rsv_refill(fs_info
, rsv
, rsv
->size
+ delayed_refs_extra
,
5183 BTRFS_RESERVE_FLUSH_EVICT
);
5185 ret
= btrfs_block_rsv_refill(fs_info
, rsv
, rsv
->size
,
5186 BTRFS_RESERVE_FLUSH_EVICT
);
5189 "could not allocate space for delete; will truncate on mount");
5190 return ERR_PTR(-ENOSPC
);
5192 delayed_refs_extra
= 0;
5195 trans
= btrfs_join_transaction(root
);
5199 if (delayed_refs_extra
) {
5200 trans
->block_rsv
= &fs_info
->trans_block_rsv
;
5201 trans
->bytes_reserved
= delayed_refs_extra
;
5202 btrfs_block_rsv_migrate(rsv
, trans
->block_rsv
,
5203 delayed_refs_extra
, true);
5208 void btrfs_evict_inode(struct inode
*inode
)
5210 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
5211 struct btrfs_trans_handle
*trans
;
5212 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
5213 struct btrfs_block_rsv
*rsv
= NULL
;
5216 trace_btrfs_inode_evict(inode
);
5219 fsverity_cleanup_inode(inode
);
5224 evict_inode_truncate_pages(inode
);
5226 if (inode
->i_nlink
&&
5227 ((btrfs_root_refs(&root
->root_item
) != 0 &&
5228 root
->root_key
.objectid
!= BTRFS_ROOT_TREE_OBJECTID
) ||
5229 btrfs_is_free_space_inode(BTRFS_I(inode
))))
5232 if (is_bad_inode(inode
))
5235 if (test_bit(BTRFS_FS_LOG_RECOVERING
, &fs_info
->flags
))
5238 if (inode
->i_nlink
> 0) {
5239 BUG_ON(btrfs_root_refs(&root
->root_item
) != 0 &&
5240 root
->root_key
.objectid
!= BTRFS_ROOT_TREE_OBJECTID
);
5245 * This makes sure the inode item in tree is uptodate and the space for
5246 * the inode update is released.
5248 ret
= btrfs_commit_inode_delayed_inode(BTRFS_I(inode
));
5253 * This drops any pending insert or delete operations we have for this
5254 * inode. We could have a delayed dir index deletion queued up, but
5255 * we're removing the inode completely so that'll be taken care of in
5258 btrfs_kill_delayed_inode_items(BTRFS_I(inode
));
5260 rsv
= btrfs_alloc_block_rsv(fs_info
, BTRFS_BLOCK_RSV_TEMP
);
5263 rsv
->size
= btrfs_calc_metadata_size(fs_info
, 1);
5264 rsv
->failfast
= true;
5266 btrfs_i_size_write(BTRFS_I(inode
), 0);
5269 struct btrfs_truncate_control control
= {
5270 .inode
= BTRFS_I(inode
),
5271 .ino
= btrfs_ino(BTRFS_I(inode
)),
5276 trans
= evict_refill_and_join(root
, rsv
);
5280 trans
->block_rsv
= rsv
;
5282 ret
= btrfs_truncate_inode_items(trans
, root
, &control
);
5283 trans
->block_rsv
= &fs_info
->trans_block_rsv
;
5284 btrfs_end_transaction(trans
);
5286 * We have not added new delayed items for our inode after we
5287 * have flushed its delayed items, so no need to throttle on
5288 * delayed items. However we have modified extent buffers.
5290 btrfs_btree_balance_dirty_nodelay(fs_info
);
5291 if (ret
&& ret
!= -ENOSPC
&& ret
!= -EAGAIN
)
5298 * Errors here aren't a big deal, it just means we leave orphan items in
5299 * the tree. They will be cleaned up on the next mount. If the inode
5300 * number gets reused, cleanup deletes the orphan item without doing
5301 * anything, and unlink reuses the existing orphan item.
5303 * If it turns out that we are dropping too many of these, we might want
5304 * to add a mechanism for retrying these after a commit.
5306 trans
= evict_refill_and_join(root
, rsv
);
5307 if (!IS_ERR(trans
)) {
5308 trans
->block_rsv
= rsv
;
5309 btrfs_orphan_del(trans
, BTRFS_I(inode
));
5310 trans
->block_rsv
= &fs_info
->trans_block_rsv
;
5311 btrfs_end_transaction(trans
);
5315 btrfs_free_block_rsv(fs_info
, rsv
);
5317 * If we didn't successfully delete, the orphan item will still be in
5318 * the tree and we'll retry on the next mount. Again, we might also want
5319 * to retry these periodically in the future.
5321 btrfs_remove_delayed_node(BTRFS_I(inode
));
5322 fsverity_cleanup_inode(inode
);
5327 * Return the key found in the dir entry in the location pointer, fill @type
5328 * with BTRFS_FT_*, and return 0.
5330 * If no dir entries were found, returns -ENOENT.
5331 * If found a corrupted location in dir entry, returns -EUCLEAN.
5333 static int btrfs_inode_by_name(struct btrfs_inode
*dir
, struct dentry
*dentry
,
5334 struct btrfs_key
*location
, u8
*type
)
5336 struct btrfs_dir_item
*di
;
5337 struct btrfs_path
*path
;
5338 struct btrfs_root
*root
= dir
->root
;
5340 struct fscrypt_name fname
;
5342 path
= btrfs_alloc_path();
5346 ret
= fscrypt_setup_filename(&dir
->vfs_inode
, &dentry
->d_name
, 1, &fname
);
5350 * fscrypt_setup_filename() should never return a positive value, but
5351 * gcc on sparc/parisc thinks it can, so assert that doesn't happen.
5355 /* This needs to handle no-key deletions later on */
5357 di
= btrfs_lookup_dir_item(NULL
, root
, path
, btrfs_ino(dir
),
5358 &fname
.disk_name
, 0);
5359 if (IS_ERR_OR_NULL(di
)) {
5360 ret
= di
? PTR_ERR(di
) : -ENOENT
;
5364 btrfs_dir_item_key_to_cpu(path
->nodes
[0], di
, location
);
5365 if (location
->type
!= BTRFS_INODE_ITEM_KEY
&&
5366 location
->type
!= BTRFS_ROOT_ITEM_KEY
) {
5368 btrfs_warn(root
->fs_info
,
5369 "%s gets something invalid in DIR_ITEM (name %s, directory ino %llu, location(%llu %u %llu))",
5370 __func__
, fname
.disk_name
.name
, btrfs_ino(dir
),
5371 location
->objectid
, location
->type
, location
->offset
);
5374 *type
= btrfs_dir_ftype(path
->nodes
[0], di
);
5376 fscrypt_free_filename(&fname
);
5377 btrfs_free_path(path
);
5382 * when we hit a tree root in a directory, the btrfs part of the inode
5383 * needs to be changed to reflect the root directory of the tree root. This
5384 * is kind of like crossing a mount point.
5386 static int fixup_tree_root_location(struct btrfs_fs_info
*fs_info
,
5387 struct btrfs_inode
*dir
,
5388 struct dentry
*dentry
,
5389 struct btrfs_key
*location
,
5390 struct btrfs_root
**sub_root
)
5392 struct btrfs_path
*path
;
5393 struct btrfs_root
*new_root
;
5394 struct btrfs_root_ref
*ref
;
5395 struct extent_buffer
*leaf
;
5396 struct btrfs_key key
;
5399 struct fscrypt_name fname
;
5401 ret
= fscrypt_setup_filename(&dir
->vfs_inode
, &dentry
->d_name
, 0, &fname
);
5405 path
= btrfs_alloc_path();
5412 key
.objectid
= dir
->root
->root_key
.objectid
;
5413 key
.type
= BTRFS_ROOT_REF_KEY
;
5414 key
.offset
= location
->objectid
;
5416 ret
= btrfs_search_slot(NULL
, fs_info
->tree_root
, &key
, path
, 0, 0);
5423 leaf
= path
->nodes
[0];
5424 ref
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_root_ref
);
5425 if (btrfs_root_ref_dirid(leaf
, ref
) != btrfs_ino(dir
) ||
5426 btrfs_root_ref_name_len(leaf
, ref
) != fname
.disk_name
.len
)
5429 ret
= memcmp_extent_buffer(leaf
, fname
.disk_name
.name
,
5430 (unsigned long)(ref
+ 1), fname
.disk_name
.len
);
5434 btrfs_release_path(path
);
5436 new_root
= btrfs_get_fs_root(fs_info
, location
->objectid
, true);
5437 if (IS_ERR(new_root
)) {
5438 err
= PTR_ERR(new_root
);
5442 *sub_root
= new_root
;
5443 location
->objectid
= btrfs_root_dirid(&new_root
->root_item
);
5444 location
->type
= BTRFS_INODE_ITEM_KEY
;
5445 location
->offset
= 0;
5448 btrfs_free_path(path
);
5449 fscrypt_free_filename(&fname
);
5453 static void inode_tree_add(struct btrfs_inode
*inode
)
5455 struct btrfs_root
*root
= inode
->root
;
5456 struct btrfs_inode
*entry
;
5458 struct rb_node
*parent
;
5459 struct rb_node
*new = &inode
->rb_node
;
5460 u64 ino
= btrfs_ino(inode
);
5462 if (inode_unhashed(&inode
->vfs_inode
))
5465 spin_lock(&root
->inode_lock
);
5466 p
= &root
->inode_tree
.rb_node
;
5469 entry
= rb_entry(parent
, struct btrfs_inode
, rb_node
);
5471 if (ino
< btrfs_ino(entry
))
5472 p
= &parent
->rb_left
;
5473 else if (ino
> btrfs_ino(entry
))
5474 p
= &parent
->rb_right
;
5476 WARN_ON(!(entry
->vfs_inode
.i_state
&
5477 (I_WILL_FREE
| I_FREEING
)));
5478 rb_replace_node(parent
, new, &root
->inode_tree
);
5479 RB_CLEAR_NODE(parent
);
5480 spin_unlock(&root
->inode_lock
);
5484 rb_link_node(new, parent
, p
);
5485 rb_insert_color(new, &root
->inode_tree
);
5486 spin_unlock(&root
->inode_lock
);
5489 static void inode_tree_del(struct btrfs_inode
*inode
)
5491 struct btrfs_root
*root
= inode
->root
;
5494 spin_lock(&root
->inode_lock
);
5495 if (!RB_EMPTY_NODE(&inode
->rb_node
)) {
5496 rb_erase(&inode
->rb_node
, &root
->inode_tree
);
5497 RB_CLEAR_NODE(&inode
->rb_node
);
5498 empty
= RB_EMPTY_ROOT(&root
->inode_tree
);
5500 spin_unlock(&root
->inode_lock
);
5502 if (empty
&& btrfs_root_refs(&root
->root_item
) == 0) {
5503 spin_lock(&root
->inode_lock
);
5504 empty
= RB_EMPTY_ROOT(&root
->inode_tree
);
5505 spin_unlock(&root
->inode_lock
);
5507 btrfs_add_dead_root(root
);
5512 static int btrfs_init_locked_inode(struct inode
*inode
, void *p
)
5514 struct btrfs_iget_args
*args
= p
;
5516 inode
->i_ino
= args
->ino
;
5517 BTRFS_I(inode
)->location
.objectid
= args
->ino
;
5518 BTRFS_I(inode
)->location
.type
= BTRFS_INODE_ITEM_KEY
;
5519 BTRFS_I(inode
)->location
.offset
= 0;
5520 BTRFS_I(inode
)->root
= btrfs_grab_root(args
->root
);
5521 BUG_ON(args
->root
&& !BTRFS_I(inode
)->root
);
5523 if (args
->root
&& args
->root
== args
->root
->fs_info
->tree_root
&&
5524 args
->ino
!= BTRFS_BTREE_INODE_OBJECTID
)
5525 set_bit(BTRFS_INODE_FREE_SPACE_INODE
,
5526 &BTRFS_I(inode
)->runtime_flags
);
5530 static int btrfs_find_actor(struct inode
*inode
, void *opaque
)
5532 struct btrfs_iget_args
*args
= opaque
;
5534 return args
->ino
== BTRFS_I(inode
)->location
.objectid
&&
5535 args
->root
== BTRFS_I(inode
)->root
;
5538 static struct inode
*btrfs_iget_locked(struct super_block
*s
, u64 ino
,
5539 struct btrfs_root
*root
)
5541 struct inode
*inode
;
5542 struct btrfs_iget_args args
;
5543 unsigned long hashval
= btrfs_inode_hash(ino
, root
);
5548 inode
= iget5_locked(s
, hashval
, btrfs_find_actor
,
5549 btrfs_init_locked_inode
,
5555 * Get an inode object given its inode number and corresponding root.
5556 * Path can be preallocated to prevent recursing back to iget through
5557 * allocator. NULL is also valid but may require an additional allocation
5560 struct inode
*btrfs_iget_path(struct super_block
*s
, u64 ino
,
5561 struct btrfs_root
*root
, struct btrfs_path
*path
)
5563 struct inode
*inode
;
5565 inode
= btrfs_iget_locked(s
, ino
, root
);
5567 return ERR_PTR(-ENOMEM
);
5569 if (inode
->i_state
& I_NEW
) {
5572 ret
= btrfs_read_locked_inode(inode
, path
);
5574 inode_tree_add(BTRFS_I(inode
));
5575 unlock_new_inode(inode
);
5579 * ret > 0 can come from btrfs_search_slot called by
5580 * btrfs_read_locked_inode, this means the inode item
5585 inode
= ERR_PTR(ret
);
5592 struct inode
*btrfs_iget(struct super_block
*s
, u64 ino
, struct btrfs_root
*root
)
5594 return btrfs_iget_path(s
, ino
, root
, NULL
);
5597 static struct inode
*new_simple_dir(struct inode
*dir
,
5598 struct btrfs_key
*key
,
5599 struct btrfs_root
*root
)
5601 struct timespec64 ts
;
5602 struct inode
*inode
= new_inode(dir
->i_sb
);
5605 return ERR_PTR(-ENOMEM
);
5607 BTRFS_I(inode
)->root
= btrfs_grab_root(root
);
5608 memcpy(&BTRFS_I(inode
)->location
, key
, sizeof(*key
));
5609 set_bit(BTRFS_INODE_DUMMY
, &BTRFS_I(inode
)->runtime_flags
);
5611 inode
->i_ino
= BTRFS_EMPTY_SUBVOL_DIR_OBJECTID
;
5613 * We only need lookup, the rest is read-only and there's no inode
5614 * associated with the dentry
5616 inode
->i_op
= &simple_dir_inode_operations
;
5617 inode
->i_opflags
&= ~IOP_XATTR
;
5618 inode
->i_fop
= &simple_dir_operations
;
5619 inode
->i_mode
= S_IFDIR
| S_IRUGO
| S_IWUSR
| S_IXUGO
;
5621 ts
= inode_set_ctime_current(inode
);
5622 inode_set_mtime_to_ts(inode
, ts
);
5623 inode_set_atime_to_ts(inode
, inode_get_atime(dir
));
5624 BTRFS_I(inode
)->i_otime_sec
= ts
.tv_sec
;
5625 BTRFS_I(inode
)->i_otime_nsec
= ts
.tv_nsec
;
5627 inode
->i_uid
= dir
->i_uid
;
5628 inode
->i_gid
= dir
->i_gid
;
5633 static_assert(BTRFS_FT_UNKNOWN
== FT_UNKNOWN
);
5634 static_assert(BTRFS_FT_REG_FILE
== FT_REG_FILE
);
5635 static_assert(BTRFS_FT_DIR
== FT_DIR
);
5636 static_assert(BTRFS_FT_CHRDEV
== FT_CHRDEV
);
5637 static_assert(BTRFS_FT_BLKDEV
== FT_BLKDEV
);
5638 static_assert(BTRFS_FT_FIFO
== FT_FIFO
);
5639 static_assert(BTRFS_FT_SOCK
== FT_SOCK
);
5640 static_assert(BTRFS_FT_SYMLINK
== FT_SYMLINK
);
5642 static inline u8
btrfs_inode_type(struct inode
*inode
)
5644 return fs_umode_to_ftype(inode
->i_mode
);
5647 struct inode
*btrfs_lookup_dentry(struct inode
*dir
, struct dentry
*dentry
)
5649 struct btrfs_fs_info
*fs_info
= btrfs_sb(dir
->i_sb
);
5650 struct inode
*inode
;
5651 struct btrfs_root
*root
= BTRFS_I(dir
)->root
;
5652 struct btrfs_root
*sub_root
= root
;
5653 struct btrfs_key location
;
5657 if (dentry
->d_name
.len
> BTRFS_NAME_LEN
)
5658 return ERR_PTR(-ENAMETOOLONG
);
5660 ret
= btrfs_inode_by_name(BTRFS_I(dir
), dentry
, &location
, &di_type
);
5662 return ERR_PTR(ret
);
5664 if (location
.type
== BTRFS_INODE_ITEM_KEY
) {
5665 inode
= btrfs_iget(dir
->i_sb
, location
.objectid
, root
);
5669 /* Do extra check against inode mode with di_type */
5670 if (btrfs_inode_type(inode
) != di_type
) {
5672 "inode mode mismatch with dir: inode mode=0%o btrfs type=%u dir type=%u",
5673 inode
->i_mode
, btrfs_inode_type(inode
),
5676 return ERR_PTR(-EUCLEAN
);
5681 ret
= fixup_tree_root_location(fs_info
, BTRFS_I(dir
), dentry
,
5682 &location
, &sub_root
);
5685 inode
= ERR_PTR(ret
);
5687 inode
= new_simple_dir(dir
, &location
, root
);
5689 inode
= btrfs_iget(dir
->i_sb
, location
.objectid
, sub_root
);
5690 btrfs_put_root(sub_root
);
5695 down_read(&fs_info
->cleanup_work_sem
);
5696 if (!sb_rdonly(inode
->i_sb
))
5697 ret
= btrfs_orphan_cleanup(sub_root
);
5698 up_read(&fs_info
->cleanup_work_sem
);
5701 inode
= ERR_PTR(ret
);
5708 static int btrfs_dentry_delete(const struct dentry
*dentry
)
5710 struct btrfs_root
*root
;
5711 struct inode
*inode
= d_inode(dentry
);
5713 if (!inode
&& !IS_ROOT(dentry
))
5714 inode
= d_inode(dentry
->d_parent
);
5717 root
= BTRFS_I(inode
)->root
;
5718 if (btrfs_root_refs(&root
->root_item
) == 0)
5721 if (btrfs_ino(BTRFS_I(inode
)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID
)
5727 static struct dentry
*btrfs_lookup(struct inode
*dir
, struct dentry
*dentry
,
5730 struct inode
*inode
= btrfs_lookup_dentry(dir
, dentry
);
5732 if (inode
== ERR_PTR(-ENOENT
))
5734 return d_splice_alias(inode
, dentry
);
5738 * Find the highest existing sequence number in a directory and then set the
5739 * in-memory index_cnt variable to the first free sequence number.
5741 static int btrfs_set_inode_index_count(struct btrfs_inode
*inode
)
5743 struct btrfs_root
*root
= inode
->root
;
5744 struct btrfs_key key
, found_key
;
5745 struct btrfs_path
*path
;
5746 struct extent_buffer
*leaf
;
5749 key
.objectid
= btrfs_ino(inode
);
5750 key
.type
= BTRFS_DIR_INDEX_KEY
;
5751 key
.offset
= (u64
)-1;
5753 path
= btrfs_alloc_path();
5757 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
5760 /* FIXME: we should be able to handle this */
5765 if (path
->slots
[0] == 0) {
5766 inode
->index_cnt
= BTRFS_DIR_START_INDEX
;
5772 leaf
= path
->nodes
[0];
5773 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
5775 if (found_key
.objectid
!= btrfs_ino(inode
) ||
5776 found_key
.type
!= BTRFS_DIR_INDEX_KEY
) {
5777 inode
->index_cnt
= BTRFS_DIR_START_INDEX
;
5781 inode
->index_cnt
= found_key
.offset
+ 1;
5783 btrfs_free_path(path
);
5787 static int btrfs_get_dir_last_index(struct btrfs_inode
*dir
, u64
*index
)
5791 btrfs_inode_lock(dir
, 0);
5792 if (dir
->index_cnt
== (u64
)-1) {
5793 ret
= btrfs_inode_delayed_dir_index_count(dir
);
5795 ret
= btrfs_set_inode_index_count(dir
);
5801 /* index_cnt is the index number of next new entry, so decrement it. */
5802 *index
= dir
->index_cnt
- 1;
5804 btrfs_inode_unlock(dir
, 0);
5810 * All this infrastructure exists because dir_emit can fault, and we are holding
5811 * the tree lock when doing readdir. For now just allocate a buffer and copy
5812 * our information into that, and then dir_emit from the buffer. This is
5813 * similar to what NFS does, only we don't keep the buffer around in pagecache
5814 * because I'm afraid I'll mess that up. Long term we need to make filldir do
5815 * copy_to_user_inatomic so we don't have to worry about page faulting under the
5818 static int btrfs_opendir(struct inode
*inode
, struct file
*file
)
5820 struct btrfs_file_private
*private;
5824 ret
= btrfs_get_dir_last_index(BTRFS_I(inode
), &last_index
);
5828 private = kzalloc(sizeof(struct btrfs_file_private
), GFP_KERNEL
);
5831 private->last_index
= last_index
;
5832 private->filldir_buf
= kzalloc(PAGE_SIZE
, GFP_KERNEL
);
5833 if (!private->filldir_buf
) {
5837 file
->private_data
= private;
5841 static loff_t
btrfs_dir_llseek(struct file
*file
, loff_t offset
, int whence
)
5843 struct btrfs_file_private
*private = file
->private_data
;
5846 ret
= btrfs_get_dir_last_index(BTRFS_I(file_inode(file
)),
5847 &private->last_index
);
5851 return generic_file_llseek(file
, offset
, whence
);
5861 static int btrfs_filldir(void *addr
, int entries
, struct dir_context
*ctx
)
5864 struct dir_entry
*entry
= addr
;
5865 char *name
= (char *)(entry
+ 1);
5867 ctx
->pos
= get_unaligned(&entry
->offset
);
5868 if (!dir_emit(ctx
, name
, get_unaligned(&entry
->name_len
),
5869 get_unaligned(&entry
->ino
),
5870 get_unaligned(&entry
->type
)))
5872 addr
+= sizeof(struct dir_entry
) +
5873 get_unaligned(&entry
->name_len
);
5879 static int btrfs_real_readdir(struct file
*file
, struct dir_context
*ctx
)
5881 struct inode
*inode
= file_inode(file
);
5882 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
5883 struct btrfs_file_private
*private = file
->private_data
;
5884 struct btrfs_dir_item
*di
;
5885 struct btrfs_key key
;
5886 struct btrfs_key found_key
;
5887 struct btrfs_path
*path
;
5889 LIST_HEAD(ins_list
);
5890 LIST_HEAD(del_list
);
5897 struct btrfs_key location
;
5899 if (!dir_emit_dots(file
, ctx
))
5902 path
= btrfs_alloc_path();
5906 addr
= private->filldir_buf
;
5907 path
->reada
= READA_FORWARD
;
5909 put
= btrfs_readdir_get_delayed_items(inode
, private->last_index
,
5910 &ins_list
, &del_list
);
5913 key
.type
= BTRFS_DIR_INDEX_KEY
;
5914 key
.offset
= ctx
->pos
;
5915 key
.objectid
= btrfs_ino(BTRFS_I(inode
));
5917 btrfs_for_each_slot(root
, &key
, &found_key
, path
, ret
) {
5918 struct dir_entry
*entry
;
5919 struct extent_buffer
*leaf
= path
->nodes
[0];
5922 if (found_key
.objectid
!= key
.objectid
)
5924 if (found_key
.type
!= BTRFS_DIR_INDEX_KEY
)
5926 if (found_key
.offset
< ctx
->pos
)
5928 if (found_key
.offset
> private->last_index
)
5930 if (btrfs_should_delete_dir_index(&del_list
, found_key
.offset
))
5932 di
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_dir_item
);
5933 name_len
= btrfs_dir_name_len(leaf
, di
);
5934 if ((total_len
+ sizeof(struct dir_entry
) + name_len
) >=
5936 btrfs_release_path(path
);
5937 ret
= btrfs_filldir(private->filldir_buf
, entries
, ctx
);
5940 addr
= private->filldir_buf
;
5946 ftype
= btrfs_dir_flags_to_ftype(btrfs_dir_flags(leaf
, di
));
5948 name_ptr
= (char *)(entry
+ 1);
5949 read_extent_buffer(leaf
, name_ptr
,
5950 (unsigned long)(di
+ 1), name_len
);
5951 put_unaligned(name_len
, &entry
->name_len
);
5952 put_unaligned(fs_ftype_to_dtype(ftype
), &entry
->type
);
5953 btrfs_dir_item_key_to_cpu(leaf
, di
, &location
);
5954 put_unaligned(location
.objectid
, &entry
->ino
);
5955 put_unaligned(found_key
.offset
, &entry
->offset
);
5957 addr
+= sizeof(struct dir_entry
) + name_len
;
5958 total_len
+= sizeof(struct dir_entry
) + name_len
;
5960 /* Catch error encountered during iteration */
5964 btrfs_release_path(path
);
5966 ret
= btrfs_filldir(private->filldir_buf
, entries
, ctx
);
5970 ret
= btrfs_readdir_delayed_dir_index(ctx
, &ins_list
);
5975 * Stop new entries from being returned after we return the last
5978 * New directory entries are assigned a strictly increasing
5979 * offset. This means that new entries created during readdir
5980 * are *guaranteed* to be seen in the future by that readdir.
5981 * This has broken buggy programs which operate on names as
5982 * they're returned by readdir. Until we re-use freed offsets
5983 * we have this hack to stop new entries from being returned
5984 * under the assumption that they'll never reach this huge
5987 * This is being careful not to overflow 32bit loff_t unless the
5988 * last entry requires it because doing so has broken 32bit apps
5991 if (ctx
->pos
>= INT_MAX
)
5992 ctx
->pos
= LLONG_MAX
;
5999 btrfs_readdir_put_delayed_items(inode
, &ins_list
, &del_list
);
6000 btrfs_free_path(path
);
6005 * This is somewhat expensive, updating the tree every time the
6006 * inode changes. But, it is most likely to find the inode in cache.
6007 * FIXME, needs more benchmarking...there are no reasons other than performance
6008 * to keep or drop this code.
6010 static int btrfs_dirty_inode(struct btrfs_inode
*inode
)
6012 struct btrfs_root
*root
= inode
->root
;
6013 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
6014 struct btrfs_trans_handle
*trans
;
6017 if (test_bit(BTRFS_INODE_DUMMY
, &inode
->runtime_flags
))
6020 trans
= btrfs_join_transaction(root
);
6022 return PTR_ERR(trans
);
6024 ret
= btrfs_update_inode(trans
, inode
);
6025 if (ret
== -ENOSPC
|| ret
== -EDQUOT
) {
6026 /* whoops, lets try again with the full transaction */
6027 btrfs_end_transaction(trans
);
6028 trans
= btrfs_start_transaction(root
, 1);
6030 return PTR_ERR(trans
);
6032 ret
= btrfs_update_inode(trans
, inode
);
6034 btrfs_end_transaction(trans
);
6035 if (inode
->delayed_node
)
6036 btrfs_balance_delayed_items(fs_info
);
6042 * This is a copy of file_update_time. We need this so we can return error on
6043 * ENOSPC for updating the inode in the case of file write and mmap writes.
6045 static int btrfs_update_time(struct inode
*inode
, int flags
)
6047 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
6050 if (btrfs_root_readonly(root
))
6053 dirty
= inode_update_timestamps(inode
, flags
);
6054 return dirty
? btrfs_dirty_inode(BTRFS_I(inode
)) : 0;
6058 * helper to find a free sequence number in a given directory. This current
6059 * code is very simple, later versions will do smarter things in the btree
6061 int btrfs_set_inode_index(struct btrfs_inode
*dir
, u64
*index
)
6065 if (dir
->index_cnt
== (u64
)-1) {
6066 ret
= btrfs_inode_delayed_dir_index_count(dir
);
6068 ret
= btrfs_set_inode_index_count(dir
);
6074 *index
= dir
->index_cnt
;
6080 static int btrfs_insert_inode_locked(struct inode
*inode
)
6082 struct btrfs_iget_args args
;
6084 args
.ino
= BTRFS_I(inode
)->location
.objectid
;
6085 args
.root
= BTRFS_I(inode
)->root
;
6087 return insert_inode_locked4(inode
,
6088 btrfs_inode_hash(inode
->i_ino
, BTRFS_I(inode
)->root
),
6089 btrfs_find_actor
, &args
);
6092 int btrfs_new_inode_prepare(struct btrfs_new_inode_args
*args
,
6093 unsigned int *trans_num_items
)
6095 struct inode
*dir
= args
->dir
;
6096 struct inode
*inode
= args
->inode
;
6099 if (!args
->orphan
) {
6100 ret
= fscrypt_setup_filename(dir
, &args
->dentry
->d_name
, 0,
6106 ret
= posix_acl_create(dir
, &inode
->i_mode
, &args
->default_acl
, &args
->acl
);
6108 fscrypt_free_filename(&args
->fname
);
6112 /* 1 to add inode item */
6113 *trans_num_items
= 1;
6114 /* 1 to add compression property */
6115 if (BTRFS_I(dir
)->prop_compress
)
6116 (*trans_num_items
)++;
6117 /* 1 to add default ACL xattr */
6118 if (args
->default_acl
)
6119 (*trans_num_items
)++;
6120 /* 1 to add access ACL xattr */
6122 (*trans_num_items
)++;
6123 #ifdef CONFIG_SECURITY
6124 /* 1 to add LSM xattr */
6125 if (dir
->i_security
)
6126 (*trans_num_items
)++;
6129 /* 1 to add orphan item */
6130 (*trans_num_items
)++;
6134 * 1 to add dir index
6135 * 1 to update parent inode item
6137 * No need for 1 unit for the inode ref item because it is
6138 * inserted in a batch together with the inode item at
6139 * btrfs_create_new_inode().
6141 *trans_num_items
+= 3;
6146 void btrfs_new_inode_args_destroy(struct btrfs_new_inode_args
*args
)
6148 posix_acl_release(args
->acl
);
6149 posix_acl_release(args
->default_acl
);
6150 fscrypt_free_filename(&args
->fname
);
6154 * Inherit flags from the parent inode.
6156 * Currently only the compression flags and the cow flags are inherited.
6158 static void btrfs_inherit_iflags(struct btrfs_inode
*inode
, struct btrfs_inode
*dir
)
6164 if (flags
& BTRFS_INODE_NOCOMPRESS
) {
6165 inode
->flags
&= ~BTRFS_INODE_COMPRESS
;
6166 inode
->flags
|= BTRFS_INODE_NOCOMPRESS
;
6167 } else if (flags
& BTRFS_INODE_COMPRESS
) {
6168 inode
->flags
&= ~BTRFS_INODE_NOCOMPRESS
;
6169 inode
->flags
|= BTRFS_INODE_COMPRESS
;
6172 if (flags
& BTRFS_INODE_NODATACOW
) {
6173 inode
->flags
|= BTRFS_INODE_NODATACOW
;
6174 if (S_ISREG(inode
->vfs_inode
.i_mode
))
6175 inode
->flags
|= BTRFS_INODE_NODATASUM
;
6178 btrfs_sync_inode_flags_to_i_flags(&inode
->vfs_inode
);
6181 int btrfs_create_new_inode(struct btrfs_trans_handle
*trans
,
6182 struct btrfs_new_inode_args
*args
)
6184 struct timespec64 ts
;
6185 struct inode
*dir
= args
->dir
;
6186 struct inode
*inode
= args
->inode
;
6187 const struct fscrypt_str
*name
= args
->orphan
? NULL
: &args
->fname
.disk_name
;
6188 struct btrfs_fs_info
*fs_info
= btrfs_sb(dir
->i_sb
);
6189 struct btrfs_root
*root
;
6190 struct btrfs_inode_item
*inode_item
;
6191 struct btrfs_key
*location
;
6192 struct btrfs_path
*path
;
6194 struct btrfs_inode_ref
*ref
;
6195 struct btrfs_key key
[2];
6197 struct btrfs_item_batch batch
;
6201 path
= btrfs_alloc_path();
6206 BTRFS_I(inode
)->root
= btrfs_grab_root(BTRFS_I(dir
)->root
);
6207 root
= BTRFS_I(inode
)->root
;
6209 ret
= btrfs_get_free_objectid(root
, &objectid
);
6212 inode
->i_ino
= objectid
;
6216 * O_TMPFILE, set link count to 0, so that after this point, we
6217 * fill in an inode item with the correct link count.
6219 set_nlink(inode
, 0);
6221 trace_btrfs_inode_request(dir
);
6223 ret
= btrfs_set_inode_index(BTRFS_I(dir
), &BTRFS_I(inode
)->dir_index
);
6227 /* index_cnt is ignored for everything but a dir. */
6228 BTRFS_I(inode
)->index_cnt
= BTRFS_DIR_START_INDEX
;
6229 BTRFS_I(inode
)->generation
= trans
->transid
;
6230 inode
->i_generation
= BTRFS_I(inode
)->generation
;
6233 * We don't have any capability xattrs set here yet, shortcut any
6234 * queries for the xattrs here. If we add them later via the inode
6235 * security init path or any other path this flag will be cleared.
6237 set_bit(BTRFS_INODE_NO_CAP_XATTR
, &BTRFS_I(inode
)->runtime_flags
);
6240 * Subvolumes don't inherit flags from their parent directory.
6241 * Originally this was probably by accident, but we probably can't
6242 * change it now without compatibility issues.
6245 btrfs_inherit_iflags(BTRFS_I(inode
), BTRFS_I(dir
));
6247 if (S_ISREG(inode
->i_mode
)) {
6248 if (btrfs_test_opt(fs_info
, NODATASUM
))
6249 BTRFS_I(inode
)->flags
|= BTRFS_INODE_NODATASUM
;
6250 if (btrfs_test_opt(fs_info
, NODATACOW
))
6251 BTRFS_I(inode
)->flags
|= BTRFS_INODE_NODATACOW
|
6252 BTRFS_INODE_NODATASUM
;
6255 location
= &BTRFS_I(inode
)->location
;
6256 location
->objectid
= objectid
;
6257 location
->offset
= 0;
6258 location
->type
= BTRFS_INODE_ITEM_KEY
;
6260 ret
= btrfs_insert_inode_locked(inode
);
6263 BTRFS_I(dir
)->index_cnt
--;
6268 * We could have gotten an inode number from somebody who was fsynced
6269 * and then removed in this same transaction, so let's just set full
6270 * sync since it will be a full sync anyway and this will blow away the
6271 * old info in the log.
6273 btrfs_set_inode_full_sync(BTRFS_I(inode
));
6275 key
[0].objectid
= objectid
;
6276 key
[0].type
= BTRFS_INODE_ITEM_KEY
;
6279 sizes
[0] = sizeof(struct btrfs_inode_item
);
6281 if (!args
->orphan
) {
6283 * Start new inodes with an inode_ref. This is slightly more
6284 * efficient for small numbers of hard links since they will
6285 * be packed into one item. Extended refs will kick in if we
6286 * add more hard links than can fit in the ref item.
6288 key
[1].objectid
= objectid
;
6289 key
[1].type
= BTRFS_INODE_REF_KEY
;
6291 key
[1].offset
= objectid
;
6292 sizes
[1] = 2 + sizeof(*ref
);
6294 key
[1].offset
= btrfs_ino(BTRFS_I(dir
));
6295 sizes
[1] = name
->len
+ sizeof(*ref
);
6299 batch
.keys
= &key
[0];
6300 batch
.data_sizes
= &sizes
[0];
6301 batch
.total_data_size
= sizes
[0] + (args
->orphan
? 0 : sizes
[1]);
6302 batch
.nr
= args
->orphan
? 1 : 2;
6303 ret
= btrfs_insert_empty_items(trans
, root
, path
, &batch
);
6305 btrfs_abort_transaction(trans
, ret
);
6309 ts
= simple_inode_init_ts(inode
);
6310 BTRFS_I(inode
)->i_otime_sec
= ts
.tv_sec
;
6311 BTRFS_I(inode
)->i_otime_nsec
= ts
.tv_nsec
;
6314 * We're going to fill the inode item now, so at this point the inode
6315 * must be fully initialized.
6318 inode_item
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
6319 struct btrfs_inode_item
);
6320 memzero_extent_buffer(path
->nodes
[0], (unsigned long)inode_item
,
6321 sizeof(*inode_item
));
6322 fill_inode_item(trans
, path
->nodes
[0], inode_item
, inode
);
6324 if (!args
->orphan
) {
6325 ref
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0] + 1,
6326 struct btrfs_inode_ref
);
6327 ptr
= (unsigned long)(ref
+ 1);
6329 btrfs_set_inode_ref_name_len(path
->nodes
[0], ref
, 2);
6330 btrfs_set_inode_ref_index(path
->nodes
[0], ref
, 0);
6331 write_extent_buffer(path
->nodes
[0], "..", ptr
, 2);
6333 btrfs_set_inode_ref_name_len(path
->nodes
[0], ref
,
6335 btrfs_set_inode_ref_index(path
->nodes
[0], ref
,
6336 BTRFS_I(inode
)->dir_index
);
6337 write_extent_buffer(path
->nodes
[0], name
->name
, ptr
,
6342 btrfs_mark_buffer_dirty(trans
, path
->nodes
[0]);
6344 * We don't need the path anymore, plus inheriting properties, adding
6345 * ACLs, security xattrs, orphan item or adding the link, will result in
6346 * allocating yet another path. So just free our path.
6348 btrfs_free_path(path
);
6352 struct inode
*parent
;
6355 * Subvolumes inherit properties from their parent subvolume,
6356 * not the directory they were created in.
6358 parent
= btrfs_iget(fs_info
->sb
, BTRFS_FIRST_FREE_OBJECTID
,
6359 BTRFS_I(dir
)->root
);
6360 if (IS_ERR(parent
)) {
6361 ret
= PTR_ERR(parent
);
6363 ret
= btrfs_inode_inherit_props(trans
, inode
, parent
);
6367 ret
= btrfs_inode_inherit_props(trans
, inode
, dir
);
6371 "error inheriting props for ino %llu (root %llu): %d",
6372 btrfs_ino(BTRFS_I(inode
)), root
->root_key
.objectid
,
6377 * Subvolumes don't inherit ACLs or get passed to the LSM. This is
6380 if (!args
->subvol
) {
6381 ret
= btrfs_init_inode_security(trans
, args
);
6383 btrfs_abort_transaction(trans
, ret
);
6388 inode_tree_add(BTRFS_I(inode
));
6390 trace_btrfs_inode_new(inode
);
6391 btrfs_set_inode_last_trans(trans
, BTRFS_I(inode
));
6393 btrfs_update_root_times(trans
, root
);
6396 ret
= btrfs_orphan_add(trans
, BTRFS_I(inode
));
6398 ret
= btrfs_add_link(trans
, BTRFS_I(dir
), BTRFS_I(inode
), name
,
6399 0, BTRFS_I(inode
)->dir_index
);
6402 btrfs_abort_transaction(trans
, ret
);
6410 * discard_new_inode() calls iput(), but the caller owns the reference
6414 discard_new_inode(inode
);
6416 btrfs_free_path(path
);
6421 * utility function to add 'inode' into 'parent_inode' with
6422 * a give name and a given sequence number.
6423 * if 'add_backref' is true, also insert a backref from the
6424 * inode to the parent directory.
6426 int btrfs_add_link(struct btrfs_trans_handle
*trans
,
6427 struct btrfs_inode
*parent_inode
, struct btrfs_inode
*inode
,
6428 const struct fscrypt_str
*name
, int add_backref
, u64 index
)
6431 struct btrfs_key key
;
6432 struct btrfs_root
*root
= parent_inode
->root
;
6433 u64 ino
= btrfs_ino(inode
);
6434 u64 parent_ino
= btrfs_ino(parent_inode
);
6436 if (unlikely(ino
== BTRFS_FIRST_FREE_OBJECTID
)) {
6437 memcpy(&key
, &inode
->root
->root_key
, sizeof(key
));
6440 key
.type
= BTRFS_INODE_ITEM_KEY
;
6444 if (unlikely(ino
== BTRFS_FIRST_FREE_OBJECTID
)) {
6445 ret
= btrfs_add_root_ref(trans
, key
.objectid
,
6446 root
->root_key
.objectid
, parent_ino
,
6448 } else if (add_backref
) {
6449 ret
= btrfs_insert_inode_ref(trans
, root
, name
,
6450 ino
, parent_ino
, index
);
6453 /* Nothing to clean up yet */
6457 ret
= btrfs_insert_dir_item(trans
, name
, parent_inode
, &key
,
6458 btrfs_inode_type(&inode
->vfs_inode
), index
);
6459 if (ret
== -EEXIST
|| ret
== -EOVERFLOW
)
6462 btrfs_abort_transaction(trans
, ret
);
6466 btrfs_i_size_write(parent_inode
, parent_inode
->vfs_inode
.i_size
+
6468 inode_inc_iversion(&parent_inode
->vfs_inode
);
6470 * If we are replaying a log tree, we do not want to update the mtime
6471 * and ctime of the parent directory with the current time, since the
6472 * log replay procedure is responsible for setting them to their correct
6473 * values (the ones it had when the fsync was done).
6475 if (!test_bit(BTRFS_FS_LOG_RECOVERING
, &root
->fs_info
->flags
))
6476 inode_set_mtime_to_ts(&parent_inode
->vfs_inode
,
6477 inode_set_ctime_current(&parent_inode
->vfs_inode
));
6479 ret
= btrfs_update_inode(trans
, parent_inode
);
6481 btrfs_abort_transaction(trans
, ret
);
6485 if (unlikely(ino
== BTRFS_FIRST_FREE_OBJECTID
)) {
6488 err
= btrfs_del_root_ref(trans
, key
.objectid
,
6489 root
->root_key
.objectid
, parent_ino
,
6490 &local_index
, name
);
6492 btrfs_abort_transaction(trans
, err
);
6493 } else if (add_backref
) {
6497 err
= btrfs_del_inode_ref(trans
, root
, name
, ino
, parent_ino
,
6500 btrfs_abort_transaction(trans
, err
);
6503 /* Return the original error code */
6507 static int btrfs_create_common(struct inode
*dir
, struct dentry
*dentry
,
6508 struct inode
*inode
)
6510 struct btrfs_fs_info
*fs_info
= btrfs_sb(dir
->i_sb
);
6511 struct btrfs_root
*root
= BTRFS_I(dir
)->root
;
6512 struct btrfs_new_inode_args new_inode_args
= {
6517 unsigned int trans_num_items
;
6518 struct btrfs_trans_handle
*trans
;
6521 err
= btrfs_new_inode_prepare(&new_inode_args
, &trans_num_items
);
6525 trans
= btrfs_start_transaction(root
, trans_num_items
);
6526 if (IS_ERR(trans
)) {
6527 err
= PTR_ERR(trans
);
6528 goto out_new_inode_args
;
6531 err
= btrfs_create_new_inode(trans
, &new_inode_args
);
6533 d_instantiate_new(dentry
, inode
);
6535 btrfs_end_transaction(trans
);
6536 btrfs_btree_balance_dirty(fs_info
);
6538 btrfs_new_inode_args_destroy(&new_inode_args
);
6545 static int btrfs_mknod(struct mnt_idmap
*idmap
, struct inode
*dir
,
6546 struct dentry
*dentry
, umode_t mode
, dev_t rdev
)
6548 struct inode
*inode
;
6550 inode
= new_inode(dir
->i_sb
);
6553 inode_init_owner(idmap
, inode
, dir
, mode
);
6554 inode
->i_op
= &btrfs_special_inode_operations
;
6555 init_special_inode(inode
, inode
->i_mode
, rdev
);
6556 return btrfs_create_common(dir
, dentry
, inode
);
6559 static int btrfs_create(struct mnt_idmap
*idmap
, struct inode
*dir
,
6560 struct dentry
*dentry
, umode_t mode
, bool excl
)
6562 struct inode
*inode
;
6564 inode
= new_inode(dir
->i_sb
);
6567 inode_init_owner(idmap
, inode
, dir
, mode
);
6568 inode
->i_fop
= &btrfs_file_operations
;
6569 inode
->i_op
= &btrfs_file_inode_operations
;
6570 inode
->i_mapping
->a_ops
= &btrfs_aops
;
6571 return btrfs_create_common(dir
, dentry
, inode
);
6574 static int btrfs_link(struct dentry
*old_dentry
, struct inode
*dir
,
6575 struct dentry
*dentry
)
6577 struct btrfs_trans_handle
*trans
= NULL
;
6578 struct btrfs_root
*root
= BTRFS_I(dir
)->root
;
6579 struct inode
*inode
= d_inode(old_dentry
);
6580 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
6581 struct fscrypt_name fname
;
6586 /* do not allow sys_link's with other subvols of the same device */
6587 if (root
->root_key
.objectid
!= BTRFS_I(inode
)->root
->root_key
.objectid
)
6590 if (inode
->i_nlink
>= BTRFS_LINK_MAX
)
6593 err
= fscrypt_setup_filename(dir
, &dentry
->d_name
, 0, &fname
);
6597 err
= btrfs_set_inode_index(BTRFS_I(dir
), &index
);
6602 * 2 items for inode and inode ref
6603 * 2 items for dir items
6604 * 1 item for parent inode
6605 * 1 item for orphan item deletion if O_TMPFILE
6607 trans
= btrfs_start_transaction(root
, inode
->i_nlink
? 5 : 6);
6608 if (IS_ERR(trans
)) {
6609 err
= PTR_ERR(trans
);
6614 /* There are several dir indexes for this inode, clear the cache. */
6615 BTRFS_I(inode
)->dir_index
= 0ULL;
6617 inode_inc_iversion(inode
);
6618 inode_set_ctime_current(inode
);
6620 set_bit(BTRFS_INODE_COPY_EVERYTHING
, &BTRFS_I(inode
)->runtime_flags
);
6622 err
= btrfs_add_link(trans
, BTRFS_I(dir
), BTRFS_I(inode
),
6623 &fname
.disk_name
, 1, index
);
6628 struct dentry
*parent
= dentry
->d_parent
;
6630 err
= btrfs_update_inode(trans
, BTRFS_I(inode
));
6633 if (inode
->i_nlink
== 1) {
6635 * If new hard link count is 1, it's a file created
6636 * with open(2) O_TMPFILE flag.
6638 err
= btrfs_orphan_del(trans
, BTRFS_I(inode
));
6642 d_instantiate(dentry
, inode
);
6643 btrfs_log_new_name(trans
, old_dentry
, NULL
, 0, parent
);
6647 fscrypt_free_filename(&fname
);
6649 btrfs_end_transaction(trans
);
6651 inode_dec_link_count(inode
);
6654 btrfs_btree_balance_dirty(fs_info
);
6658 static int btrfs_mkdir(struct mnt_idmap
*idmap
, struct inode
*dir
,
6659 struct dentry
*dentry
, umode_t mode
)
6661 struct inode
*inode
;
6663 inode
= new_inode(dir
->i_sb
);
6666 inode_init_owner(idmap
, inode
, dir
, S_IFDIR
| mode
);
6667 inode
->i_op
= &btrfs_dir_inode_operations
;
6668 inode
->i_fop
= &btrfs_dir_file_operations
;
6669 return btrfs_create_common(dir
, dentry
, inode
);
6672 static noinline
int uncompress_inline(struct btrfs_path
*path
,
6674 struct btrfs_file_extent_item
*item
)
6677 struct extent_buffer
*leaf
= path
->nodes
[0];
6680 unsigned long inline_size
;
6684 compress_type
= btrfs_file_extent_compression(leaf
, item
);
6685 max_size
= btrfs_file_extent_ram_bytes(leaf
, item
);
6686 inline_size
= btrfs_file_extent_inline_item_len(leaf
, path
->slots
[0]);
6687 tmp
= kmalloc(inline_size
, GFP_NOFS
);
6690 ptr
= btrfs_file_extent_inline_start(item
);
6692 read_extent_buffer(leaf
, tmp
, ptr
, inline_size
);
6694 max_size
= min_t(unsigned long, PAGE_SIZE
, max_size
);
6695 ret
= btrfs_decompress(compress_type
, tmp
, page
, 0, inline_size
, max_size
);
6698 * decompression code contains a memset to fill in any space between the end
6699 * of the uncompressed data and the end of max_size in case the decompressed
6700 * data ends up shorter than ram_bytes. That doesn't cover the hole between
6701 * the end of an inline extent and the beginning of the next block, so we
6702 * cover that region here.
6705 if (max_size
< PAGE_SIZE
)
6706 memzero_page(page
, max_size
, PAGE_SIZE
- max_size
);
6711 static int read_inline_extent(struct btrfs_inode
*inode
, struct btrfs_path
*path
,
6714 struct btrfs_file_extent_item
*fi
;
6718 if (!page
|| PageUptodate(page
))
6721 ASSERT(page_offset(page
) == 0);
6723 fi
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
6724 struct btrfs_file_extent_item
);
6725 if (btrfs_file_extent_compression(path
->nodes
[0], fi
) != BTRFS_COMPRESS_NONE
)
6726 return uncompress_inline(path
, page
, fi
);
6728 copy_size
= min_t(u64
, PAGE_SIZE
,
6729 btrfs_file_extent_ram_bytes(path
->nodes
[0], fi
));
6730 kaddr
= kmap_local_page(page
);
6731 read_extent_buffer(path
->nodes
[0], kaddr
,
6732 btrfs_file_extent_inline_start(fi
), copy_size
);
6733 kunmap_local(kaddr
);
6734 if (copy_size
< PAGE_SIZE
)
6735 memzero_page(page
, copy_size
, PAGE_SIZE
- copy_size
);
6740 * Lookup the first extent overlapping a range in a file.
6742 * @inode: file to search in
6743 * @page: page to read extent data into if the extent is inline
6744 * @pg_offset: offset into @page to copy to
6745 * @start: file offset
6746 * @len: length of range starting at @start
6748 * Return the first &struct extent_map which overlaps the given range, reading
6749 * it from the B-tree and caching it if necessary. Note that there may be more
6750 * extents which overlap the given range after the returned extent_map.
6752 * If @page is not NULL and the extent is inline, this also reads the extent
6753 * data directly into the page and marks the extent up to date in the io_tree.
6755 * Return: ERR_PTR on error, non-NULL extent_map on success.
6757 struct extent_map
*btrfs_get_extent(struct btrfs_inode
*inode
,
6758 struct page
*page
, size_t pg_offset
,
6761 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
6763 u64 extent_start
= 0;
6765 u64 objectid
= btrfs_ino(inode
);
6766 int extent_type
= -1;
6767 struct btrfs_path
*path
= NULL
;
6768 struct btrfs_root
*root
= inode
->root
;
6769 struct btrfs_file_extent_item
*item
;
6770 struct extent_buffer
*leaf
;
6771 struct btrfs_key found_key
;
6772 struct extent_map
*em
= NULL
;
6773 struct extent_map_tree
*em_tree
= &inode
->extent_tree
;
6775 read_lock(&em_tree
->lock
);
6776 em
= lookup_extent_mapping(em_tree
, start
, len
);
6777 read_unlock(&em_tree
->lock
);
6780 if (em
->start
> start
|| em
->start
+ em
->len
<= start
)
6781 free_extent_map(em
);
6782 else if (em
->block_start
== EXTENT_MAP_INLINE
&& page
)
6783 free_extent_map(em
);
6787 em
= alloc_extent_map();
6792 em
->start
= EXTENT_MAP_HOLE
;
6793 em
->orig_start
= EXTENT_MAP_HOLE
;
6795 em
->block_len
= (u64
)-1;
6797 path
= btrfs_alloc_path();
6803 /* Chances are we'll be called again, so go ahead and do readahead */
6804 path
->reada
= READA_FORWARD
;
6807 * The same explanation in load_free_space_cache applies here as well,
6808 * we only read when we're loading the free space cache, and at that
6809 * point the commit_root has everything we need.
6811 if (btrfs_is_free_space_inode(inode
)) {
6812 path
->search_commit_root
= 1;
6813 path
->skip_locking
= 1;
6816 ret
= btrfs_lookup_file_extent(NULL
, root
, path
, objectid
, start
, 0);
6819 } else if (ret
> 0) {
6820 if (path
->slots
[0] == 0)
6826 leaf
= path
->nodes
[0];
6827 item
= btrfs_item_ptr(leaf
, path
->slots
[0],
6828 struct btrfs_file_extent_item
);
6829 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
6830 if (found_key
.objectid
!= objectid
||
6831 found_key
.type
!= BTRFS_EXTENT_DATA_KEY
) {
6833 * If we backup past the first extent we want to move forward
6834 * and see if there is an extent in front of us, otherwise we'll
6835 * say there is a hole for our whole search range which can
6842 extent_type
= btrfs_file_extent_type(leaf
, item
);
6843 extent_start
= found_key
.offset
;
6844 extent_end
= btrfs_file_extent_end(path
);
6845 if (extent_type
== BTRFS_FILE_EXTENT_REG
||
6846 extent_type
== BTRFS_FILE_EXTENT_PREALLOC
) {
6847 /* Only regular file could have regular/prealloc extent */
6848 if (!S_ISREG(inode
->vfs_inode
.i_mode
)) {
6851 "regular/prealloc extent found for non-regular inode %llu",
6855 trace_btrfs_get_extent_show_fi_regular(inode
, leaf
, item
,
6857 } else if (extent_type
== BTRFS_FILE_EXTENT_INLINE
) {
6858 trace_btrfs_get_extent_show_fi_inline(inode
, leaf
, item
,
6863 if (start
>= extent_end
) {
6865 if (path
->slots
[0] >= btrfs_header_nritems(leaf
)) {
6866 ret
= btrfs_next_leaf(root
, path
);
6872 leaf
= path
->nodes
[0];
6874 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
6875 if (found_key
.objectid
!= objectid
||
6876 found_key
.type
!= BTRFS_EXTENT_DATA_KEY
)
6878 if (start
+ len
<= found_key
.offset
)
6880 if (start
> found_key
.offset
)
6883 /* New extent overlaps with existing one */
6885 em
->orig_start
= start
;
6886 em
->len
= found_key
.offset
- start
;
6887 em
->block_start
= EXTENT_MAP_HOLE
;
6891 btrfs_extent_item_to_extent_map(inode
, path
, item
, em
);
6893 if (extent_type
== BTRFS_FILE_EXTENT_REG
||
6894 extent_type
== BTRFS_FILE_EXTENT_PREALLOC
) {
6896 } else if (extent_type
== BTRFS_FILE_EXTENT_INLINE
) {
6898 * Inline extent can only exist at file offset 0. This is
6899 * ensured by tree-checker and inline extent creation path.
6900 * Thus all members representing file offsets should be zero.
6902 ASSERT(pg_offset
== 0);
6903 ASSERT(extent_start
== 0);
6904 ASSERT(em
->start
== 0);
6907 * btrfs_extent_item_to_extent_map() should have properly
6908 * initialized em members already.
6910 * Other members are not utilized for inline extents.
6912 ASSERT(em
->block_start
== EXTENT_MAP_INLINE
);
6913 ASSERT(em
->len
== fs_info
->sectorsize
);
6915 ret
= read_inline_extent(inode
, path
, page
);
6922 em
->orig_start
= start
;
6924 em
->block_start
= EXTENT_MAP_HOLE
;
6927 btrfs_release_path(path
);
6928 if (em
->start
> start
|| extent_map_end(em
) <= start
) {
6930 "bad extent! em: [%llu %llu] passed [%llu %llu]",
6931 em
->start
, em
->len
, start
, len
);
6936 write_lock(&em_tree
->lock
);
6937 ret
= btrfs_add_extent_mapping(fs_info
, em_tree
, &em
, start
, len
);
6938 write_unlock(&em_tree
->lock
);
6940 btrfs_free_path(path
);
6942 trace_btrfs_get_extent(root
, inode
, em
);
6945 free_extent_map(em
);
6946 return ERR_PTR(ret
);
6951 static struct extent_map
*btrfs_create_dio_extent(struct btrfs_inode
*inode
,
6952 struct btrfs_dio_data
*dio_data
,
6955 const u64 orig_start
,
6956 const u64 block_start
,
6957 const u64 block_len
,
6958 const u64 orig_block_len
,
6959 const u64 ram_bytes
,
6962 struct extent_map
*em
= NULL
;
6963 struct btrfs_ordered_extent
*ordered
;
6965 if (type
!= BTRFS_ORDERED_NOCOW
) {
6966 em
= create_io_em(inode
, start
, len
, orig_start
, block_start
,
6967 block_len
, orig_block_len
, ram_bytes
,
6968 BTRFS_COMPRESS_NONE
, /* compress_type */
6973 ordered
= btrfs_alloc_ordered_extent(inode
, start
, len
, len
,
6974 block_start
, block_len
, 0,
6976 (1 << BTRFS_ORDERED_DIRECT
),
6977 BTRFS_COMPRESS_NONE
);
6978 if (IS_ERR(ordered
)) {
6980 free_extent_map(em
);
6981 btrfs_drop_extent_map_range(inode
, start
,
6982 start
+ len
- 1, false);
6984 em
= ERR_CAST(ordered
);
6986 ASSERT(!dio_data
->ordered
);
6987 dio_data
->ordered
= ordered
;
6994 static struct extent_map
*btrfs_new_extent_direct(struct btrfs_inode
*inode
,
6995 struct btrfs_dio_data
*dio_data
,
6998 struct btrfs_root
*root
= inode
->root
;
6999 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
7000 struct extent_map
*em
;
7001 struct btrfs_key ins
;
7005 alloc_hint
= get_extent_allocation_hint(inode
, start
, len
);
7007 ret
= btrfs_reserve_extent(root
, len
, len
, fs_info
->sectorsize
,
7008 0, alloc_hint
, &ins
, 1, 1);
7009 if (ret
== -EAGAIN
) {
7010 ASSERT(btrfs_is_zoned(fs_info
));
7011 wait_on_bit_io(&inode
->root
->fs_info
->flags
, BTRFS_FS_NEED_ZONE_FINISH
,
7012 TASK_UNINTERRUPTIBLE
);
7016 return ERR_PTR(ret
);
7018 em
= btrfs_create_dio_extent(inode
, dio_data
, start
, ins
.offset
, start
,
7019 ins
.objectid
, ins
.offset
, ins
.offset
,
7020 ins
.offset
, BTRFS_ORDERED_REGULAR
);
7021 btrfs_dec_block_group_reservations(fs_info
, ins
.objectid
);
7023 btrfs_free_reserved_extent(fs_info
, ins
.objectid
, ins
.offset
,
7029 static bool btrfs_extent_readonly(struct btrfs_fs_info
*fs_info
, u64 bytenr
)
7031 struct btrfs_block_group
*block_group
;
7032 bool readonly
= false;
7034 block_group
= btrfs_lookup_block_group(fs_info
, bytenr
);
7035 if (!block_group
|| block_group
->ro
)
7038 btrfs_put_block_group(block_group
);
7043 * Check if we can do nocow write into the range [@offset, @offset + @len)
7045 * @offset: File offset
7046 * @len: The length to write, will be updated to the nocow writeable
7048 * @orig_start: (optional) Return the original file offset of the file extent
7049 * @orig_len: (optional) Return the original on-disk length of the file extent
7050 * @ram_bytes: (optional) Return the ram_bytes of the file extent
7051 * @strict: if true, omit optimizations that might force us into unnecessary
7052 * cow. e.g., don't trust generation number.
7055 * >0 and update @len if we can do nocow write
7056 * 0 if we can't do nocow write
7057 * <0 if error happened
7059 * NOTE: This only checks the file extents, caller is responsible to wait for
7060 * any ordered extents.
7062 noinline
int can_nocow_extent(struct inode
*inode
, u64 offset
, u64
*len
,
7063 u64
*orig_start
, u64
*orig_block_len
,
7064 u64
*ram_bytes
, bool nowait
, bool strict
)
7066 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
7067 struct can_nocow_file_extent_args nocow_args
= { 0 };
7068 struct btrfs_path
*path
;
7070 struct extent_buffer
*leaf
;
7071 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
7072 struct extent_io_tree
*io_tree
= &BTRFS_I(inode
)->io_tree
;
7073 struct btrfs_file_extent_item
*fi
;
7074 struct btrfs_key key
;
7077 path
= btrfs_alloc_path();
7080 path
->nowait
= nowait
;
7082 ret
= btrfs_lookup_file_extent(NULL
, root
, path
,
7083 btrfs_ino(BTRFS_I(inode
)), offset
, 0);
7088 if (path
->slots
[0] == 0) {
7089 /* can't find the item, must cow */
7096 leaf
= path
->nodes
[0];
7097 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
7098 if (key
.objectid
!= btrfs_ino(BTRFS_I(inode
)) ||
7099 key
.type
!= BTRFS_EXTENT_DATA_KEY
) {
7100 /* not our file or wrong item type, must cow */
7104 if (key
.offset
> offset
) {
7105 /* Wrong offset, must cow */
7109 if (btrfs_file_extent_end(path
) <= offset
)
7112 fi
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_file_extent_item
);
7113 found_type
= btrfs_file_extent_type(leaf
, fi
);
7115 *ram_bytes
= btrfs_file_extent_ram_bytes(leaf
, fi
);
7117 nocow_args
.start
= offset
;
7118 nocow_args
.end
= offset
+ *len
- 1;
7119 nocow_args
.strict
= strict
;
7120 nocow_args
.free_path
= true;
7122 ret
= can_nocow_file_extent(path
, &key
, BTRFS_I(inode
), &nocow_args
);
7123 /* can_nocow_file_extent() has freed the path. */
7127 /* Treat errors as not being able to NOCOW. */
7133 if (btrfs_extent_readonly(fs_info
, nocow_args
.disk_bytenr
))
7136 if (!(BTRFS_I(inode
)->flags
& BTRFS_INODE_NODATACOW
) &&
7137 found_type
== BTRFS_FILE_EXTENT_PREALLOC
) {
7140 range_end
= round_up(offset
+ nocow_args
.num_bytes
,
7141 root
->fs_info
->sectorsize
) - 1;
7142 ret
= test_range_bit_exists(io_tree
, offset
, range_end
, EXTENT_DELALLOC
);
7150 *orig_start
= key
.offset
- nocow_args
.extent_offset
;
7152 *orig_block_len
= nocow_args
.disk_num_bytes
;
7154 *len
= nocow_args
.num_bytes
;
7157 btrfs_free_path(path
);
7161 static int lock_extent_direct(struct inode
*inode
, u64 lockstart
, u64 lockend
,
7162 struct extent_state
**cached_state
,
7163 unsigned int iomap_flags
)
7165 const bool writing
= (iomap_flags
& IOMAP_WRITE
);
7166 const bool nowait
= (iomap_flags
& IOMAP_NOWAIT
);
7167 struct extent_io_tree
*io_tree
= &BTRFS_I(inode
)->io_tree
;
7168 struct btrfs_ordered_extent
*ordered
;
7173 if (!try_lock_extent(io_tree
, lockstart
, lockend
,
7177 lock_extent(io_tree
, lockstart
, lockend
, cached_state
);
7180 * We're concerned with the entire range that we're going to be
7181 * doing DIO to, so we need to make sure there's no ordered
7182 * extents in this range.
7184 ordered
= btrfs_lookup_ordered_range(BTRFS_I(inode
), lockstart
,
7185 lockend
- lockstart
+ 1);
7188 * We need to make sure there are no buffered pages in this
7189 * range either, we could have raced between the invalidate in
7190 * generic_file_direct_write and locking the extent. The
7191 * invalidate needs to happen so that reads after a write do not
7195 (!writing
|| !filemap_range_has_page(inode
->i_mapping
,
7196 lockstart
, lockend
)))
7199 unlock_extent(io_tree
, lockstart
, lockend
, cached_state
);
7203 btrfs_put_ordered_extent(ordered
);
7208 * If we are doing a DIO read and the ordered extent we
7209 * found is for a buffered write, we can not wait for it
7210 * to complete and retry, because if we do so we can
7211 * deadlock with concurrent buffered writes on page
7212 * locks. This happens only if our DIO read covers more
7213 * than one extent map, if at this point has already
7214 * created an ordered extent for a previous extent map
7215 * and locked its range in the inode's io tree, and a
7216 * concurrent write against that previous extent map's
7217 * range and this range started (we unlock the ranges
7218 * in the io tree only when the bios complete and
7219 * buffered writes always lock pages before attempting
7220 * to lock range in the io tree).
7223 test_bit(BTRFS_ORDERED_DIRECT
, &ordered
->flags
))
7224 btrfs_start_ordered_extent(ordered
);
7226 ret
= nowait
? -EAGAIN
: -ENOTBLK
;
7227 btrfs_put_ordered_extent(ordered
);
7230 * We could trigger writeback for this range (and wait
7231 * for it to complete) and then invalidate the pages for
7232 * this range (through invalidate_inode_pages2_range()),
7233 * but that can lead us to a deadlock with a concurrent
7234 * call to readahead (a buffered read or a defrag call
7235 * triggered a readahead) on a page lock due to an
7236 * ordered dio extent we created before but did not have
7237 * yet a corresponding bio submitted (whence it can not
7238 * complete), which makes readahead wait for that
7239 * ordered extent to complete while holding a lock on
7242 ret
= nowait
? -EAGAIN
: -ENOTBLK
;
7254 /* The callers of this must take lock_extent() */
7255 static struct extent_map
*create_io_em(struct btrfs_inode
*inode
, u64 start
,
7256 u64 len
, u64 orig_start
, u64 block_start
,
7257 u64 block_len
, u64 orig_block_len
,
7258 u64 ram_bytes
, int compress_type
,
7261 struct extent_map
*em
;
7264 ASSERT(type
== BTRFS_ORDERED_PREALLOC
||
7265 type
== BTRFS_ORDERED_COMPRESSED
||
7266 type
== BTRFS_ORDERED_NOCOW
||
7267 type
== BTRFS_ORDERED_REGULAR
);
7269 em
= alloc_extent_map();
7271 return ERR_PTR(-ENOMEM
);
7274 em
->orig_start
= orig_start
;
7276 em
->block_len
= block_len
;
7277 em
->block_start
= block_start
;
7278 em
->orig_block_len
= orig_block_len
;
7279 em
->ram_bytes
= ram_bytes
;
7280 em
->generation
= -1;
7281 em
->flags
|= EXTENT_FLAG_PINNED
;
7282 if (type
== BTRFS_ORDERED_PREALLOC
)
7283 em
->flags
|= EXTENT_FLAG_FILLING
;
7284 else if (type
== BTRFS_ORDERED_COMPRESSED
)
7285 extent_map_set_compression(em
, compress_type
);
7287 ret
= btrfs_replace_extent_map_range(inode
, em
, true);
7289 free_extent_map(em
);
7290 return ERR_PTR(ret
);
7293 /* em got 2 refs now, callers needs to do free_extent_map once. */
7298 static int btrfs_get_blocks_direct_write(struct extent_map
**map
,
7299 struct inode
*inode
,
7300 struct btrfs_dio_data
*dio_data
,
7301 u64 start
, u64
*lenp
,
7302 unsigned int iomap_flags
)
7304 const bool nowait
= (iomap_flags
& IOMAP_NOWAIT
);
7305 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
7306 struct extent_map
*em
= *map
;
7308 u64 block_start
, orig_start
, orig_block_len
, ram_bytes
;
7309 struct btrfs_block_group
*bg
;
7310 bool can_nocow
= false;
7311 bool space_reserved
= false;
7317 * We don't allocate a new extent in the following cases
7319 * 1) The inode is marked as NODATACOW. In this case we'll just use the
7321 * 2) The extent is marked as PREALLOC. We're good to go here and can
7322 * just use the extent.
7325 if ((em
->flags
& EXTENT_FLAG_PREALLOC
) ||
7326 ((BTRFS_I(inode
)->flags
& BTRFS_INODE_NODATACOW
) &&
7327 em
->block_start
!= EXTENT_MAP_HOLE
)) {
7328 if (em
->flags
& EXTENT_FLAG_PREALLOC
)
7329 type
= BTRFS_ORDERED_PREALLOC
;
7331 type
= BTRFS_ORDERED_NOCOW
;
7332 len
= min(len
, em
->len
- (start
- em
->start
));
7333 block_start
= em
->block_start
+ (start
- em
->start
);
7335 if (can_nocow_extent(inode
, start
, &len
, &orig_start
,
7336 &orig_block_len
, &ram_bytes
, false, false) == 1) {
7337 bg
= btrfs_inc_nocow_writers(fs_info
, block_start
);
7345 struct extent_map
*em2
;
7347 /* We can NOCOW, so only need to reserve metadata space. */
7348 ret
= btrfs_delalloc_reserve_metadata(BTRFS_I(inode
), len
, len
,
7351 /* Our caller expects us to free the input extent map. */
7352 free_extent_map(em
);
7354 btrfs_dec_nocow_writers(bg
);
7355 if (nowait
&& (ret
== -ENOSPC
|| ret
== -EDQUOT
))
7359 space_reserved
= true;
7361 em2
= btrfs_create_dio_extent(BTRFS_I(inode
), dio_data
, start
, len
,
7362 orig_start
, block_start
,
7363 len
, orig_block_len
,
7365 btrfs_dec_nocow_writers(bg
);
7366 if (type
== BTRFS_ORDERED_PREALLOC
) {
7367 free_extent_map(em
);
7377 dio_data
->nocow_done
= true;
7379 /* Our caller expects us to free the input extent map. */
7380 free_extent_map(em
);
7389 * If we could not allocate data space before locking the file
7390 * range and we can't do a NOCOW write, then we have to fail.
7392 if (!dio_data
->data_space_reserved
) {
7398 * We have to COW and we have already reserved data space before,
7399 * so now we reserve only metadata.
7401 ret
= btrfs_delalloc_reserve_metadata(BTRFS_I(inode
), len
, len
,
7405 space_reserved
= true;
7407 em
= btrfs_new_extent_direct(BTRFS_I(inode
), dio_data
, start
, len
);
7413 len
= min(len
, em
->len
- (start
- em
->start
));
7415 btrfs_delalloc_release_metadata(BTRFS_I(inode
),
7416 prev_len
- len
, true);
7420 * We have created our ordered extent, so we can now release our reservation
7421 * for an outstanding extent.
7423 btrfs_delalloc_release_extents(BTRFS_I(inode
), prev_len
);
7426 * Need to update the i_size under the extent lock so buffered
7427 * readers will get the updated i_size when we unlock.
7429 if (start
+ len
> i_size_read(inode
))
7430 i_size_write(inode
, start
+ len
);
7432 if (ret
&& space_reserved
) {
7433 btrfs_delalloc_release_extents(BTRFS_I(inode
), len
);
7434 btrfs_delalloc_release_metadata(BTRFS_I(inode
), len
, true);
7440 static int btrfs_dio_iomap_begin(struct inode
*inode
, loff_t start
,
7441 loff_t length
, unsigned int flags
, struct iomap
*iomap
,
7442 struct iomap
*srcmap
)
7444 struct iomap_iter
*iter
= container_of(iomap
, struct iomap_iter
, iomap
);
7445 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
7446 struct extent_map
*em
;
7447 struct extent_state
*cached_state
= NULL
;
7448 struct btrfs_dio_data
*dio_data
= iter
->private;
7449 u64 lockstart
, lockend
;
7450 const bool write
= !!(flags
& IOMAP_WRITE
);
7453 const u64 data_alloc_len
= length
;
7454 bool unlock_extents
= false;
7457 * We could potentially fault if we have a buffer > PAGE_SIZE, and if
7458 * we're NOWAIT we may submit a bio for a partial range and return
7459 * EIOCBQUEUED, which would result in an errant short read.
7461 * The best way to handle this would be to allow for partial completions
7462 * of iocb's, so we could submit the partial bio, return and fault in
7463 * the rest of the pages, and then submit the io for the rest of the
7464 * range. However we don't have that currently, so simply return
7465 * -EAGAIN at this point so that the normal path is used.
7467 if (!write
&& (flags
& IOMAP_NOWAIT
) && length
> PAGE_SIZE
)
7471 * Cap the size of reads to that usually seen in buffered I/O as we need
7472 * to allocate a contiguous array for the checksums.
7475 len
= min_t(u64
, len
, fs_info
->sectorsize
* BTRFS_MAX_BIO_SECTORS
);
7478 lockend
= start
+ len
- 1;
7481 * iomap_dio_rw() only does filemap_write_and_wait_range(), which isn't
7482 * enough if we've written compressed pages to this area, so we need to
7483 * flush the dirty pages again to make absolutely sure that any
7484 * outstanding dirty pages are on disk - the first flush only starts
7485 * compression on the data, while keeping the pages locked, so by the
7486 * time the second flush returns we know bios for the compressed pages
7487 * were submitted and finished, and the pages no longer under writeback.
7489 * If we have a NOWAIT request and we have any pages in the range that
7490 * are locked, likely due to compression still in progress, we don't want
7491 * to block on page locks. We also don't want to block on pages marked as
7492 * dirty or under writeback (same as for the non-compression case).
7493 * iomap_dio_rw() did the same check, but after that and before we got
7494 * here, mmap'ed writes may have happened or buffered reads started
7495 * (readpage() and readahead(), which lock pages), as we haven't locked
7496 * the file range yet.
7498 if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT
,
7499 &BTRFS_I(inode
)->runtime_flags
)) {
7500 if (flags
& IOMAP_NOWAIT
) {
7501 if (filemap_range_needs_writeback(inode
->i_mapping
,
7502 lockstart
, lockend
))
7505 ret
= filemap_fdatawrite_range(inode
->i_mapping
, start
,
7506 start
+ length
- 1);
7512 memset(dio_data
, 0, sizeof(*dio_data
));
7515 * We always try to allocate data space and must do it before locking
7516 * the file range, to avoid deadlocks with concurrent writes to the same
7517 * range if the range has several extents and the writes don't expand the
7518 * current i_size (the inode lock is taken in shared mode). If we fail to
7519 * allocate data space here we continue and later, after locking the
7520 * file range, we fail with ENOSPC only if we figure out we can not do a
7523 if (write
&& !(flags
& IOMAP_NOWAIT
)) {
7524 ret
= btrfs_check_data_free_space(BTRFS_I(inode
),
7525 &dio_data
->data_reserved
,
7526 start
, data_alloc_len
, false);
7528 dio_data
->data_space_reserved
= true;
7529 else if (ret
&& !(BTRFS_I(inode
)->flags
&
7530 (BTRFS_INODE_NODATACOW
| BTRFS_INODE_PREALLOC
)))
7535 * If this errors out it's because we couldn't invalidate pagecache for
7536 * this range and we need to fallback to buffered IO, or we are doing a
7537 * NOWAIT read/write and we need to block.
7539 ret
= lock_extent_direct(inode
, lockstart
, lockend
, &cached_state
, flags
);
7543 em
= btrfs_get_extent(BTRFS_I(inode
), NULL
, 0, start
, len
);
7550 * Ok for INLINE and COMPRESSED extents we need to fallback on buffered
7551 * io. INLINE is special, and we could probably kludge it in here, but
7552 * it's still buffered so for safety lets just fall back to the generic
7555 * For COMPRESSED we _have_ to read the entire extent in so we can
7556 * decompress it, so there will be buffering required no matter what we
7557 * do, so go ahead and fallback to buffered.
7559 * We return -ENOTBLK because that's what makes DIO go ahead and go back
7560 * to buffered IO. Don't blame me, this is the price we pay for using
7563 if (extent_map_is_compressed(em
) ||
7564 em
->block_start
== EXTENT_MAP_INLINE
) {
7565 free_extent_map(em
);
7567 * If we are in a NOWAIT context, return -EAGAIN in order to
7568 * fallback to buffered IO. This is not only because we can
7569 * block with buffered IO (no support for NOWAIT semantics at
7570 * the moment) but also to avoid returning short reads to user
7571 * space - this happens if we were able to read some data from
7572 * previous non-compressed extents and then when we fallback to
7573 * buffered IO, at btrfs_file_read_iter() by calling
7574 * filemap_read(), we fail to fault in pages for the read buffer,
7575 * in which case filemap_read() returns a short read (the number
7576 * of bytes previously read is > 0, so it does not return -EFAULT).
7578 ret
= (flags
& IOMAP_NOWAIT
) ? -EAGAIN
: -ENOTBLK
;
7582 len
= min(len
, em
->len
- (start
- em
->start
));
7585 * If we have a NOWAIT request and the range contains multiple extents
7586 * (or a mix of extents and holes), then we return -EAGAIN to make the
7587 * caller fallback to a context where it can do a blocking (without
7588 * NOWAIT) request. This way we avoid doing partial IO and returning
7589 * success to the caller, which is not optimal for writes and for reads
7590 * it can result in unexpected behaviour for an application.
7592 * When doing a read, because we use IOMAP_DIO_PARTIAL when calling
7593 * iomap_dio_rw(), we can end up returning less data then what the caller
7594 * asked for, resulting in an unexpected, and incorrect, short read.
7595 * That is, the caller asked to read N bytes and we return less than that,
7596 * which is wrong unless we are crossing EOF. This happens if we get a
7597 * page fault error when trying to fault in pages for the buffer that is
7598 * associated to the struct iov_iter passed to iomap_dio_rw(), and we
7599 * have previously submitted bios for other extents in the range, in
7600 * which case iomap_dio_rw() may return us EIOCBQUEUED if not all of
7601 * those bios have completed by the time we get the page fault error,
7602 * which we return back to our caller - we should only return EIOCBQUEUED
7603 * after we have submitted bios for all the extents in the range.
7605 if ((flags
& IOMAP_NOWAIT
) && len
< length
) {
7606 free_extent_map(em
);
7612 ret
= btrfs_get_blocks_direct_write(&em
, inode
, dio_data
,
7613 start
, &len
, flags
);
7616 unlock_extents
= true;
7617 /* Recalc len in case the new em is smaller than requested */
7618 len
= min(len
, em
->len
- (start
- em
->start
));
7619 if (dio_data
->data_space_reserved
) {
7621 u64 release_len
= 0;
7623 if (dio_data
->nocow_done
) {
7624 release_offset
= start
;
7625 release_len
= data_alloc_len
;
7626 } else if (len
< data_alloc_len
) {
7627 release_offset
= start
+ len
;
7628 release_len
= data_alloc_len
- len
;
7631 if (release_len
> 0)
7632 btrfs_free_reserved_data_space(BTRFS_I(inode
),
7633 dio_data
->data_reserved
,
7639 * We need to unlock only the end area that we aren't using.
7640 * The rest is going to be unlocked by the endio routine.
7642 lockstart
= start
+ len
;
7643 if (lockstart
< lockend
)
7644 unlock_extents
= true;
7648 unlock_extent(&BTRFS_I(inode
)->io_tree
, lockstart
, lockend
,
7651 free_extent_state(cached_state
);
7654 * Translate extent map information to iomap.
7655 * We trim the extents (and move the addr) even though iomap code does
7656 * that, since we have locked only the parts we are performing I/O in.
7658 if ((em
->block_start
== EXTENT_MAP_HOLE
) ||
7659 ((em
->flags
& EXTENT_FLAG_PREALLOC
) && !write
)) {
7660 iomap
->addr
= IOMAP_NULL_ADDR
;
7661 iomap
->type
= IOMAP_HOLE
;
7663 iomap
->addr
= em
->block_start
+ (start
- em
->start
);
7664 iomap
->type
= IOMAP_MAPPED
;
7666 iomap
->offset
= start
;
7667 iomap
->bdev
= fs_info
->fs_devices
->latest_dev
->bdev
;
7668 iomap
->length
= len
;
7669 free_extent_map(em
);
7674 unlock_extent(&BTRFS_I(inode
)->io_tree
, lockstart
, lockend
,
7677 if (dio_data
->data_space_reserved
) {
7678 btrfs_free_reserved_data_space(BTRFS_I(inode
),
7679 dio_data
->data_reserved
,
7680 start
, data_alloc_len
);
7681 extent_changeset_free(dio_data
->data_reserved
);
7687 static int btrfs_dio_iomap_end(struct inode
*inode
, loff_t pos
, loff_t length
,
7688 ssize_t written
, unsigned int flags
, struct iomap
*iomap
)
7690 struct iomap_iter
*iter
= container_of(iomap
, struct iomap_iter
, iomap
);
7691 struct btrfs_dio_data
*dio_data
= iter
->private;
7692 size_t submitted
= dio_data
->submitted
;
7693 const bool write
= !!(flags
& IOMAP_WRITE
);
7696 if (!write
&& (iomap
->type
== IOMAP_HOLE
)) {
7697 /* If reading from a hole, unlock and return */
7698 unlock_extent(&BTRFS_I(inode
)->io_tree
, pos
, pos
+ length
- 1,
7703 if (submitted
< length
) {
7705 length
-= submitted
;
7707 btrfs_finish_ordered_extent(dio_data
->ordered
, NULL
,
7708 pos
, length
, false);
7710 unlock_extent(&BTRFS_I(inode
)->io_tree
, pos
,
7711 pos
+ length
- 1, NULL
);
7715 btrfs_put_ordered_extent(dio_data
->ordered
);
7716 dio_data
->ordered
= NULL
;
7720 extent_changeset_free(dio_data
->data_reserved
);
7724 static void btrfs_dio_end_io(struct btrfs_bio
*bbio
)
7726 struct btrfs_dio_private
*dip
=
7727 container_of(bbio
, struct btrfs_dio_private
, bbio
);
7728 struct btrfs_inode
*inode
= bbio
->inode
;
7729 struct bio
*bio
= &bbio
->bio
;
7731 if (bio
->bi_status
) {
7732 btrfs_warn(inode
->root
->fs_info
,
7733 "direct IO failed ino %llu op 0x%0x offset %#llx len %u err no %d",
7734 btrfs_ino(inode
), bio
->bi_opf
,
7735 dip
->file_offset
, dip
->bytes
, bio
->bi_status
);
7738 if (btrfs_op(bio
) == BTRFS_MAP_WRITE
) {
7739 btrfs_finish_ordered_extent(bbio
->ordered
, NULL
,
7740 dip
->file_offset
, dip
->bytes
,
7743 unlock_extent(&inode
->io_tree
, dip
->file_offset
,
7744 dip
->file_offset
+ dip
->bytes
- 1, NULL
);
7747 bbio
->bio
.bi_private
= bbio
->private;
7748 iomap_dio_bio_end_io(bio
);
7751 static void btrfs_dio_submit_io(const struct iomap_iter
*iter
, struct bio
*bio
,
7754 struct btrfs_bio
*bbio
= btrfs_bio(bio
);
7755 struct btrfs_dio_private
*dip
=
7756 container_of(bbio
, struct btrfs_dio_private
, bbio
);
7757 struct btrfs_dio_data
*dio_data
= iter
->private;
7759 btrfs_bio_init(bbio
, BTRFS_I(iter
->inode
)->root
->fs_info
,
7760 btrfs_dio_end_io
, bio
->bi_private
);
7761 bbio
->inode
= BTRFS_I(iter
->inode
);
7762 bbio
->file_offset
= file_offset
;
7764 dip
->file_offset
= file_offset
;
7765 dip
->bytes
= bio
->bi_iter
.bi_size
;
7767 dio_data
->submitted
+= bio
->bi_iter
.bi_size
;
7770 * Check if we are doing a partial write. If we are, we need to split
7771 * the ordered extent to match the submitted bio. Hang on to the
7772 * remaining unfinishable ordered_extent in dio_data so that it can be
7773 * cancelled in iomap_end to avoid a deadlock wherein faulting the
7774 * remaining pages is blocked on the outstanding ordered extent.
7776 if (iter
->flags
& IOMAP_WRITE
) {
7779 ret
= btrfs_extract_ordered_extent(bbio
, dio_data
->ordered
);
7781 btrfs_finish_ordered_extent(dio_data
->ordered
, NULL
,
7782 file_offset
, dip
->bytes
,
7784 bio
->bi_status
= errno_to_blk_status(ret
);
7785 iomap_dio_bio_end_io(bio
);
7790 btrfs_submit_bio(bbio
, 0);
7793 static const struct iomap_ops btrfs_dio_iomap_ops
= {
7794 .iomap_begin
= btrfs_dio_iomap_begin
,
7795 .iomap_end
= btrfs_dio_iomap_end
,
7798 static const struct iomap_dio_ops btrfs_dio_ops
= {
7799 .submit_io
= btrfs_dio_submit_io
,
7800 .bio_set
= &btrfs_dio_bioset
,
7803 ssize_t
btrfs_dio_read(struct kiocb
*iocb
, struct iov_iter
*iter
, size_t done_before
)
7805 struct btrfs_dio_data data
= { 0 };
7807 return iomap_dio_rw(iocb
, iter
, &btrfs_dio_iomap_ops
, &btrfs_dio_ops
,
7808 IOMAP_DIO_PARTIAL
, &data
, done_before
);
7811 struct iomap_dio
*btrfs_dio_write(struct kiocb
*iocb
, struct iov_iter
*iter
,
7814 struct btrfs_dio_data data
= { 0 };
7816 return __iomap_dio_rw(iocb
, iter
, &btrfs_dio_iomap_ops
, &btrfs_dio_ops
,
7817 IOMAP_DIO_PARTIAL
, &data
, done_before
);
7820 static int btrfs_fiemap(struct inode
*inode
, struct fiemap_extent_info
*fieinfo
,
7825 ret
= fiemap_prep(inode
, fieinfo
, start
, &len
, 0);
7830 * fiemap_prep() called filemap_write_and_wait() for the whole possible
7831 * file range (0 to LLONG_MAX), but that is not enough if we have
7832 * compression enabled. The first filemap_fdatawrite_range() only kicks
7833 * in the compression of data (in an async thread) and will return
7834 * before the compression is done and writeback is started. A second
7835 * filemap_fdatawrite_range() is needed to wait for the compression to
7836 * complete and writeback to start. We also need to wait for ordered
7837 * extents to complete, because our fiemap implementation uses mainly
7838 * file extent items to list the extents, searching for extent maps
7839 * only for file ranges with holes or prealloc extents to figure out
7840 * if we have delalloc in those ranges.
7842 if (fieinfo
->fi_flags
& FIEMAP_FLAG_SYNC
) {
7843 ret
= btrfs_wait_ordered_range(inode
, 0, LLONG_MAX
);
7848 return extent_fiemap(BTRFS_I(inode
), fieinfo
, start
, len
);
7851 static int btrfs_writepages(struct address_space
*mapping
,
7852 struct writeback_control
*wbc
)
7854 return extent_writepages(mapping
, wbc
);
7857 static void btrfs_readahead(struct readahead_control
*rac
)
7859 extent_readahead(rac
);
7863 * For release_folio() and invalidate_folio() we have a race window where
7864 * folio_end_writeback() is called but the subpage spinlock is not yet released.
7865 * If we continue to release/invalidate the page, we could cause use-after-free
7866 * for subpage spinlock. So this function is to spin and wait for subpage
7869 static void wait_subpage_spinlock(struct page
*page
)
7871 struct btrfs_fs_info
*fs_info
= btrfs_sb(page
->mapping
->host
->i_sb
);
7872 struct folio
*folio
= page_folio(page
);
7873 struct btrfs_subpage
*subpage
;
7875 if (!btrfs_is_subpage(fs_info
, page
->mapping
))
7878 ASSERT(folio_test_private(folio
) && folio_get_private(folio
));
7879 subpage
= folio_get_private(folio
);
7882 * This may look insane as we just acquire the spinlock and release it,
7883 * without doing anything. But we just want to make sure no one is
7884 * still holding the subpage spinlock.
7885 * And since the page is not dirty nor writeback, and we have page
7886 * locked, the only possible way to hold a spinlock is from the endio
7887 * function to clear page writeback.
7889 * Here we just acquire the spinlock so that all existing callers
7890 * should exit and we're safe to release/invalidate the page.
7892 spin_lock_irq(&subpage
->lock
);
7893 spin_unlock_irq(&subpage
->lock
);
7896 static bool __btrfs_release_folio(struct folio
*folio
, gfp_t gfp_flags
)
7898 int ret
= try_release_extent_mapping(&folio
->page
, gfp_flags
);
7901 wait_subpage_spinlock(&folio
->page
);
7902 clear_page_extent_mapped(&folio
->page
);
7907 static bool btrfs_release_folio(struct folio
*folio
, gfp_t gfp_flags
)
7909 if (folio_test_writeback(folio
) || folio_test_dirty(folio
))
7911 return __btrfs_release_folio(folio
, gfp_flags
);
7914 #ifdef CONFIG_MIGRATION
7915 static int btrfs_migrate_folio(struct address_space
*mapping
,
7916 struct folio
*dst
, struct folio
*src
,
7917 enum migrate_mode mode
)
7919 int ret
= filemap_migrate_folio(mapping
, dst
, src
, mode
);
7921 if (ret
!= MIGRATEPAGE_SUCCESS
)
7924 if (folio_test_ordered(src
)) {
7925 folio_clear_ordered(src
);
7926 folio_set_ordered(dst
);
7929 return MIGRATEPAGE_SUCCESS
;
7932 #define btrfs_migrate_folio NULL
7935 static void btrfs_invalidate_folio(struct folio
*folio
, size_t offset
,
7938 struct btrfs_inode
*inode
= BTRFS_I(folio
->mapping
->host
);
7939 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
7940 struct extent_io_tree
*tree
= &inode
->io_tree
;
7941 struct extent_state
*cached_state
= NULL
;
7942 u64 page_start
= folio_pos(folio
);
7943 u64 page_end
= page_start
+ folio_size(folio
) - 1;
7945 int inode_evicting
= inode
->vfs_inode
.i_state
& I_FREEING
;
7948 * We have folio locked so no new ordered extent can be created on this
7949 * page, nor bio can be submitted for this folio.
7951 * But already submitted bio can still be finished on this folio.
7952 * Furthermore, endio function won't skip folio which has Ordered
7953 * (Private2) already cleared, so it's possible for endio and
7954 * invalidate_folio to do the same ordered extent accounting twice
7957 * So here we wait for any submitted bios to finish, so that we won't
7958 * do double ordered extent accounting on the same folio.
7960 folio_wait_writeback(folio
);
7961 wait_subpage_spinlock(&folio
->page
);
7964 * For subpage case, we have call sites like
7965 * btrfs_punch_hole_lock_range() which passes range not aligned to
7967 * If the range doesn't cover the full folio, we don't need to and
7968 * shouldn't clear page extent mapped, as folio->private can still
7969 * record subpage dirty bits for other part of the range.
7971 * For cases that invalidate the full folio even the range doesn't
7972 * cover the full folio, like invalidating the last folio, we're
7973 * still safe to wait for ordered extent to finish.
7975 if (!(offset
== 0 && length
== folio_size(folio
))) {
7976 btrfs_release_folio(folio
, GFP_NOFS
);
7980 if (!inode_evicting
)
7981 lock_extent(tree
, page_start
, page_end
, &cached_state
);
7984 while (cur
< page_end
) {
7985 struct btrfs_ordered_extent
*ordered
;
7988 u32 extra_flags
= 0;
7990 ordered
= btrfs_lookup_first_ordered_range(inode
, cur
,
7991 page_end
+ 1 - cur
);
7993 range_end
= page_end
;
7995 * No ordered extent covering this range, we are safe
7996 * to delete all extent states in the range.
7998 extra_flags
= EXTENT_CLEAR_ALL_BITS
;
8001 if (ordered
->file_offset
> cur
) {
8003 * There is a range between [cur, oe->file_offset) not
8004 * covered by any ordered extent.
8005 * We are safe to delete all extent states, and handle
8006 * the ordered extent in the next iteration.
8008 range_end
= ordered
->file_offset
- 1;
8009 extra_flags
= EXTENT_CLEAR_ALL_BITS
;
8013 range_end
= min(ordered
->file_offset
+ ordered
->num_bytes
- 1,
8015 ASSERT(range_end
+ 1 - cur
< U32_MAX
);
8016 range_len
= range_end
+ 1 - cur
;
8017 if (!btrfs_folio_test_ordered(fs_info
, folio
, cur
, range_len
)) {
8019 * If Ordered (Private2) is cleared, it means endio has
8020 * already been executed for the range.
8021 * We can't delete the extent states as
8022 * btrfs_finish_ordered_io() may still use some of them.
8026 btrfs_folio_clear_ordered(fs_info
, folio
, cur
, range_len
);
8029 * IO on this page will never be started, so we need to account
8030 * for any ordered extents now. Don't clear EXTENT_DELALLOC_NEW
8031 * here, must leave that up for the ordered extent completion.
8033 * This will also unlock the range for incoming
8034 * btrfs_finish_ordered_io().
8036 if (!inode_evicting
)
8037 clear_extent_bit(tree
, cur
, range_end
,
8039 EXTENT_LOCKED
| EXTENT_DO_ACCOUNTING
|
8040 EXTENT_DEFRAG
, &cached_state
);
8042 spin_lock_irq(&inode
->ordered_tree_lock
);
8043 set_bit(BTRFS_ORDERED_TRUNCATED
, &ordered
->flags
);
8044 ordered
->truncated_len
= min(ordered
->truncated_len
,
8045 cur
- ordered
->file_offset
);
8046 spin_unlock_irq(&inode
->ordered_tree_lock
);
8049 * If the ordered extent has finished, we're safe to delete all
8050 * the extent states of the range, otherwise
8051 * btrfs_finish_ordered_io() will get executed by endio for
8052 * other pages, so we can't delete extent states.
8054 if (btrfs_dec_test_ordered_pending(inode
, &ordered
,
8055 cur
, range_end
+ 1 - cur
)) {
8056 btrfs_finish_ordered_io(ordered
);
8058 * The ordered extent has finished, now we're again
8059 * safe to delete all extent states of the range.
8061 extra_flags
= EXTENT_CLEAR_ALL_BITS
;
8065 btrfs_put_ordered_extent(ordered
);
8067 * Qgroup reserved space handler
8068 * Sector(s) here will be either:
8070 * 1) Already written to disk or bio already finished
8071 * Then its QGROUP_RESERVED bit in io_tree is already cleared.
8072 * Qgroup will be handled by its qgroup_record then.
8073 * btrfs_qgroup_free_data() call will do nothing here.
8075 * 2) Not written to disk yet
8076 * Then btrfs_qgroup_free_data() call will clear the
8077 * QGROUP_RESERVED bit of its io_tree, and free the qgroup
8078 * reserved data space.
8079 * Since the IO will never happen for this page.
8081 btrfs_qgroup_free_data(inode
, NULL
, cur
, range_end
+ 1 - cur
, NULL
);
8082 if (!inode_evicting
) {
8083 clear_extent_bit(tree
, cur
, range_end
, EXTENT_LOCKED
|
8084 EXTENT_DELALLOC
| EXTENT_UPTODATE
|
8085 EXTENT_DO_ACCOUNTING
| EXTENT_DEFRAG
|
8086 extra_flags
, &cached_state
);
8088 cur
= range_end
+ 1;
8091 * We have iterated through all ordered extents of the page, the page
8092 * should not have Ordered (Private2) anymore, or the above iteration
8093 * did something wrong.
8095 ASSERT(!folio_test_ordered(folio
));
8096 btrfs_folio_clear_checked(fs_info
, folio
, folio_pos(folio
), folio_size(folio
));
8097 if (!inode_evicting
)
8098 __btrfs_release_folio(folio
, GFP_NOFS
);
8099 clear_page_extent_mapped(&folio
->page
);
8103 * btrfs_page_mkwrite() is not allowed to change the file size as it gets
8104 * called from a page fault handler when a page is first dirtied. Hence we must
8105 * be careful to check for EOF conditions here. We set the page up correctly
8106 * for a written page which means we get ENOSPC checking when writing into
8107 * holes and correct delalloc and unwritten extent mapping on filesystems that
8108 * support these features.
8110 * We are not allowed to take the i_mutex here so we have to play games to
8111 * protect against truncate races as the page could now be beyond EOF. Because
8112 * truncate_setsize() writes the inode size before removing pages, once we have
8113 * the page lock we can determine safely if the page is beyond EOF. If it is not
8114 * beyond EOF, then the page is guaranteed safe against truncation until we
8117 vm_fault_t
btrfs_page_mkwrite(struct vm_fault
*vmf
)
8119 struct page
*page
= vmf
->page
;
8120 struct folio
*folio
= page_folio(page
);
8121 struct inode
*inode
= file_inode(vmf
->vma
->vm_file
);
8122 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
8123 struct extent_io_tree
*io_tree
= &BTRFS_I(inode
)->io_tree
;
8124 struct btrfs_ordered_extent
*ordered
;
8125 struct extent_state
*cached_state
= NULL
;
8126 struct extent_changeset
*data_reserved
= NULL
;
8127 unsigned long zero_start
;
8137 ASSERT(folio_order(folio
) == 0);
8139 reserved_space
= PAGE_SIZE
;
8141 sb_start_pagefault(inode
->i_sb
);
8142 page_start
= page_offset(page
);
8143 page_end
= page_start
+ PAGE_SIZE
- 1;
8147 * Reserving delalloc space after obtaining the page lock can lead to
8148 * deadlock. For example, if a dirty page is locked by this function
8149 * and the call to btrfs_delalloc_reserve_space() ends up triggering
8150 * dirty page write out, then the btrfs_writepages() function could
8151 * end up waiting indefinitely to get a lock on the page currently
8152 * being processed by btrfs_page_mkwrite() function.
8154 ret2
= btrfs_delalloc_reserve_space(BTRFS_I(inode
), &data_reserved
,
8155 page_start
, reserved_space
);
8157 ret2
= file_update_time(vmf
->vma
->vm_file
);
8161 ret
= vmf_error(ret2
);
8167 ret
= VM_FAULT_NOPAGE
; /* make the VM retry the fault */
8169 down_read(&BTRFS_I(inode
)->i_mmap_lock
);
8171 size
= i_size_read(inode
);
8173 if ((page
->mapping
!= inode
->i_mapping
) ||
8174 (page_start
>= size
)) {
8175 /* page got truncated out from underneath us */
8178 wait_on_page_writeback(page
);
8180 lock_extent(io_tree
, page_start
, page_end
, &cached_state
);
8181 ret2
= set_page_extent_mapped(page
);
8183 ret
= vmf_error(ret2
);
8184 unlock_extent(io_tree
, page_start
, page_end
, &cached_state
);
8189 * we can't set the delalloc bits if there are pending ordered
8190 * extents. Drop our locks and wait for them to finish
8192 ordered
= btrfs_lookup_ordered_range(BTRFS_I(inode
), page_start
,
8195 unlock_extent(io_tree
, page_start
, page_end
, &cached_state
);
8197 up_read(&BTRFS_I(inode
)->i_mmap_lock
);
8198 btrfs_start_ordered_extent(ordered
);
8199 btrfs_put_ordered_extent(ordered
);
8203 if (page
->index
== ((size
- 1) >> PAGE_SHIFT
)) {
8204 reserved_space
= round_up(size
- page_start
,
8205 fs_info
->sectorsize
);
8206 if (reserved_space
< PAGE_SIZE
) {
8207 end
= page_start
+ reserved_space
- 1;
8208 btrfs_delalloc_release_space(BTRFS_I(inode
),
8209 data_reserved
, page_start
,
8210 PAGE_SIZE
- reserved_space
, true);
8215 * page_mkwrite gets called when the page is firstly dirtied after it's
8216 * faulted in, but write(2) could also dirty a page and set delalloc
8217 * bits, thus in this case for space account reason, we still need to
8218 * clear any delalloc bits within this page range since we have to
8219 * reserve data&meta space before lock_page() (see above comments).
8221 clear_extent_bit(&BTRFS_I(inode
)->io_tree
, page_start
, end
,
8222 EXTENT_DELALLOC
| EXTENT_DO_ACCOUNTING
|
8223 EXTENT_DEFRAG
, &cached_state
);
8225 ret2
= btrfs_set_extent_delalloc(BTRFS_I(inode
), page_start
, end
, 0,
8228 unlock_extent(io_tree
, page_start
, page_end
, &cached_state
);
8229 ret
= VM_FAULT_SIGBUS
;
8233 /* page is wholly or partially inside EOF */
8234 if (page_start
+ PAGE_SIZE
> size
)
8235 zero_start
= offset_in_page(size
);
8237 zero_start
= PAGE_SIZE
;
8239 if (zero_start
!= PAGE_SIZE
)
8240 memzero_page(page
, zero_start
, PAGE_SIZE
- zero_start
);
8242 btrfs_folio_clear_checked(fs_info
, folio
, page_start
, PAGE_SIZE
);
8243 btrfs_folio_set_dirty(fs_info
, folio
, page_start
, end
+ 1 - page_start
);
8244 btrfs_folio_set_uptodate(fs_info
, folio
, page_start
, end
+ 1 - page_start
);
8246 btrfs_set_inode_last_sub_trans(BTRFS_I(inode
));
8248 unlock_extent(io_tree
, page_start
, page_end
, &cached_state
);
8249 up_read(&BTRFS_I(inode
)->i_mmap_lock
);
8251 btrfs_delalloc_release_extents(BTRFS_I(inode
), PAGE_SIZE
);
8252 sb_end_pagefault(inode
->i_sb
);
8253 extent_changeset_free(data_reserved
);
8254 return VM_FAULT_LOCKED
;
8258 up_read(&BTRFS_I(inode
)->i_mmap_lock
);
8260 btrfs_delalloc_release_extents(BTRFS_I(inode
), PAGE_SIZE
);
8261 btrfs_delalloc_release_space(BTRFS_I(inode
), data_reserved
, page_start
,
8262 reserved_space
, (ret
!= 0));
8264 sb_end_pagefault(inode
->i_sb
);
8265 extent_changeset_free(data_reserved
);
8269 static int btrfs_truncate(struct btrfs_inode
*inode
, bool skip_writeback
)
8271 struct btrfs_truncate_control control
= {
8273 .ino
= btrfs_ino(inode
),
8274 .min_type
= BTRFS_EXTENT_DATA_KEY
,
8275 .clear_extent_range
= true,
8277 struct btrfs_root
*root
= inode
->root
;
8278 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
8279 struct btrfs_block_rsv
*rsv
;
8281 struct btrfs_trans_handle
*trans
;
8282 u64 mask
= fs_info
->sectorsize
- 1;
8283 const u64 min_size
= btrfs_calc_metadata_size(fs_info
, 1);
8285 if (!skip_writeback
) {
8286 ret
= btrfs_wait_ordered_range(&inode
->vfs_inode
,
8287 inode
->vfs_inode
.i_size
& (~mask
),
8294 * Yes ladies and gentlemen, this is indeed ugly. We have a couple of
8295 * things going on here:
8297 * 1) We need to reserve space to update our inode.
8299 * 2) We need to have something to cache all the space that is going to
8300 * be free'd up by the truncate operation, but also have some slack
8301 * space reserved in case it uses space during the truncate (thank you
8302 * very much snapshotting).
8304 * And we need these to be separate. The fact is we can use a lot of
8305 * space doing the truncate, and we have no earthly idea how much space
8306 * we will use, so we need the truncate reservation to be separate so it
8307 * doesn't end up using space reserved for updating the inode. We also
8308 * need to be able to stop the transaction and start a new one, which
8309 * means we need to be able to update the inode several times, and we
8310 * have no idea of knowing how many times that will be, so we can't just
8311 * reserve 1 item for the entirety of the operation, so that has to be
8312 * done separately as well.
8314 * So that leaves us with
8316 * 1) rsv - for the truncate reservation, which we will steal from the
8317 * transaction reservation.
8318 * 2) fs_info->trans_block_rsv - this will have 1 items worth left for
8319 * updating the inode.
8321 rsv
= btrfs_alloc_block_rsv(fs_info
, BTRFS_BLOCK_RSV_TEMP
);
8324 rsv
->size
= min_size
;
8325 rsv
->failfast
= true;
8328 * 1 for the truncate slack space
8329 * 1 for updating the inode.
8331 trans
= btrfs_start_transaction(root
, 2);
8332 if (IS_ERR(trans
)) {
8333 ret
= PTR_ERR(trans
);
8337 /* Migrate the slack space for the truncate to our reserve */
8338 ret
= btrfs_block_rsv_migrate(&fs_info
->trans_block_rsv
, rsv
,
8341 * We have reserved 2 metadata units when we started the transaction and
8342 * min_size matches 1 unit, so this should never fail, but if it does,
8343 * it's not critical we just fail truncation.
8346 btrfs_end_transaction(trans
);
8350 trans
->block_rsv
= rsv
;
8353 struct extent_state
*cached_state
= NULL
;
8354 const u64 new_size
= inode
->vfs_inode
.i_size
;
8355 const u64 lock_start
= ALIGN_DOWN(new_size
, fs_info
->sectorsize
);
8357 control
.new_size
= new_size
;
8358 lock_extent(&inode
->io_tree
, lock_start
, (u64
)-1, &cached_state
);
8360 * We want to drop from the next block forward in case this new
8361 * size is not block aligned since we will be keeping the last
8362 * block of the extent just the way it is.
8364 btrfs_drop_extent_map_range(inode
,
8365 ALIGN(new_size
, fs_info
->sectorsize
),
8368 ret
= btrfs_truncate_inode_items(trans
, root
, &control
);
8370 inode_sub_bytes(&inode
->vfs_inode
, control
.sub_bytes
);
8371 btrfs_inode_safe_disk_i_size_write(inode
, control
.last_size
);
8373 unlock_extent(&inode
->io_tree
, lock_start
, (u64
)-1, &cached_state
);
8375 trans
->block_rsv
= &fs_info
->trans_block_rsv
;
8376 if (ret
!= -ENOSPC
&& ret
!= -EAGAIN
)
8379 ret
= btrfs_update_inode(trans
, inode
);
8383 btrfs_end_transaction(trans
);
8384 btrfs_btree_balance_dirty(fs_info
);
8386 trans
= btrfs_start_transaction(root
, 2);
8387 if (IS_ERR(trans
)) {
8388 ret
= PTR_ERR(trans
);
8393 btrfs_block_rsv_release(fs_info
, rsv
, -1, NULL
);
8394 ret
= btrfs_block_rsv_migrate(&fs_info
->trans_block_rsv
,
8395 rsv
, min_size
, false);
8397 * We have reserved 2 metadata units when we started the
8398 * transaction and min_size matches 1 unit, so this should never
8399 * fail, but if it does, it's not critical we just fail truncation.
8404 trans
->block_rsv
= rsv
;
8408 * We can't call btrfs_truncate_block inside a trans handle as we could
8409 * deadlock with freeze, if we got BTRFS_NEED_TRUNCATE_BLOCK then we
8410 * know we've truncated everything except the last little bit, and can
8411 * do btrfs_truncate_block and then update the disk_i_size.
8413 if (ret
== BTRFS_NEED_TRUNCATE_BLOCK
) {
8414 btrfs_end_transaction(trans
);
8415 btrfs_btree_balance_dirty(fs_info
);
8417 ret
= btrfs_truncate_block(inode
, inode
->vfs_inode
.i_size
, 0, 0);
8420 trans
= btrfs_start_transaction(root
, 1);
8421 if (IS_ERR(trans
)) {
8422 ret
= PTR_ERR(trans
);
8425 btrfs_inode_safe_disk_i_size_write(inode
, 0);
8431 trans
->block_rsv
= &fs_info
->trans_block_rsv
;
8432 ret2
= btrfs_update_inode(trans
, inode
);
8436 ret2
= btrfs_end_transaction(trans
);
8439 btrfs_btree_balance_dirty(fs_info
);
8442 btrfs_free_block_rsv(fs_info
, rsv
);
8444 * So if we truncate and then write and fsync we normally would just
8445 * write the extents that changed, which is a problem if we need to
8446 * first truncate that entire inode. So set this flag so we write out
8447 * all of the extents in the inode to the sync log so we're completely
8450 * If no extents were dropped or trimmed we don't need to force the next
8451 * fsync to truncate all the inode's items from the log and re-log them
8452 * all. This means the truncate operation did not change the file size,
8453 * or changed it to a smaller size but there was only an implicit hole
8454 * between the old i_size and the new i_size, and there were no prealloc
8455 * extents beyond i_size to drop.
8457 if (control
.extents_found
> 0)
8458 btrfs_set_inode_full_sync(inode
);
8463 struct inode
*btrfs_new_subvol_inode(struct mnt_idmap
*idmap
,
8466 struct inode
*inode
;
8468 inode
= new_inode(dir
->i_sb
);
8471 * Subvolumes don't inherit the sgid bit or the parent's gid if
8472 * the parent's sgid bit is set. This is probably a bug.
8474 inode_init_owner(idmap
, inode
, NULL
,
8475 S_IFDIR
| (~current_umask() & S_IRWXUGO
));
8476 inode
->i_op
= &btrfs_dir_inode_operations
;
8477 inode
->i_fop
= &btrfs_dir_file_operations
;
8482 struct inode
*btrfs_alloc_inode(struct super_block
*sb
)
8484 struct btrfs_fs_info
*fs_info
= btrfs_sb(sb
);
8485 struct btrfs_inode
*ei
;
8486 struct inode
*inode
;
8487 struct extent_io_tree
*file_extent_tree
= NULL
;
8489 /* Self tests may pass a NULL fs_info. */
8490 if (fs_info
&& !btrfs_fs_incompat(fs_info
, NO_HOLES
)) {
8491 file_extent_tree
= kmalloc(sizeof(struct extent_io_tree
), GFP_KERNEL
);
8492 if (!file_extent_tree
)
8496 ei
= alloc_inode_sb(sb
, btrfs_inode_cachep
, GFP_KERNEL
);
8498 kfree(file_extent_tree
);
8505 ei
->last_sub_trans
= 0;
8506 ei
->logged_trans
= 0;
8507 ei
->delalloc_bytes
= 0;
8508 ei
->new_delalloc_bytes
= 0;
8509 ei
->defrag_bytes
= 0;
8510 ei
->disk_i_size
= 0;
8514 ei
->index_cnt
= (u64
)-1;
8516 ei
->last_unlink_trans
= 0;
8517 ei
->last_reflink_trans
= 0;
8518 ei
->last_log_commit
= 0;
8520 spin_lock_init(&ei
->lock
);
8521 ei
->outstanding_extents
= 0;
8522 if (sb
->s_magic
!= BTRFS_TEST_MAGIC
)
8523 btrfs_init_metadata_block_rsv(fs_info
, &ei
->block_rsv
,
8524 BTRFS_BLOCK_RSV_DELALLOC
);
8525 ei
->runtime_flags
= 0;
8526 ei
->prop_compress
= BTRFS_COMPRESS_NONE
;
8527 ei
->defrag_compress
= BTRFS_COMPRESS_NONE
;
8529 ei
->delayed_node
= NULL
;
8531 ei
->i_otime_sec
= 0;
8532 ei
->i_otime_nsec
= 0;
8534 inode
= &ei
->vfs_inode
;
8535 extent_map_tree_init(&ei
->extent_tree
);
8537 /* This io tree sets the valid inode. */
8538 extent_io_tree_init(fs_info
, &ei
->io_tree
, IO_TREE_INODE_IO
);
8539 ei
->io_tree
.inode
= ei
;
8541 ei
->file_extent_tree
= file_extent_tree
;
8542 if (file_extent_tree
) {
8543 extent_io_tree_init(fs_info
, ei
->file_extent_tree
,
8544 IO_TREE_INODE_FILE_EXTENT
);
8545 /* Lockdep class is set only for the file extent tree. */
8546 lockdep_set_class(&ei
->file_extent_tree
->lock
, &file_extent_tree_class
);
8548 mutex_init(&ei
->log_mutex
);
8549 spin_lock_init(&ei
->ordered_tree_lock
);
8550 ei
->ordered_tree
= RB_ROOT
;
8551 ei
->ordered_tree_last
= NULL
;
8552 INIT_LIST_HEAD(&ei
->delalloc_inodes
);
8553 INIT_LIST_HEAD(&ei
->delayed_iput
);
8554 RB_CLEAR_NODE(&ei
->rb_node
);
8555 init_rwsem(&ei
->i_mmap_lock
);
8560 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
8561 void btrfs_test_destroy_inode(struct inode
*inode
)
8563 btrfs_drop_extent_map_range(BTRFS_I(inode
), 0, (u64
)-1, false);
8564 kfree(BTRFS_I(inode
)->file_extent_tree
);
8565 kmem_cache_free(btrfs_inode_cachep
, BTRFS_I(inode
));
8569 void btrfs_free_inode(struct inode
*inode
)
8571 kfree(BTRFS_I(inode
)->file_extent_tree
);
8572 kmem_cache_free(btrfs_inode_cachep
, BTRFS_I(inode
));
8575 void btrfs_destroy_inode(struct inode
*vfs_inode
)
8577 struct btrfs_ordered_extent
*ordered
;
8578 struct btrfs_inode
*inode
= BTRFS_I(vfs_inode
);
8579 struct btrfs_root
*root
= inode
->root
;
8580 bool freespace_inode
;
8582 WARN_ON(!hlist_empty(&vfs_inode
->i_dentry
));
8583 WARN_ON(vfs_inode
->i_data
.nrpages
);
8584 WARN_ON(inode
->block_rsv
.reserved
);
8585 WARN_ON(inode
->block_rsv
.size
);
8586 WARN_ON(inode
->outstanding_extents
);
8587 if (!S_ISDIR(vfs_inode
->i_mode
)) {
8588 WARN_ON(inode
->delalloc_bytes
);
8589 WARN_ON(inode
->new_delalloc_bytes
);
8591 WARN_ON(inode
->csum_bytes
);
8592 WARN_ON(inode
->defrag_bytes
);
8595 * This can happen where we create an inode, but somebody else also
8596 * created the same inode and we need to destroy the one we already
8603 * If this is a free space inode do not take the ordered extents lockdep
8606 freespace_inode
= btrfs_is_free_space_inode(inode
);
8609 ordered
= btrfs_lookup_first_ordered_extent(inode
, (u64
)-1);
8613 btrfs_err(root
->fs_info
,
8614 "found ordered extent %llu %llu on inode cleanup",
8615 ordered
->file_offset
, ordered
->num_bytes
);
8617 if (!freespace_inode
)
8618 btrfs_lockdep_acquire(root
->fs_info
, btrfs_ordered_extent
);
8620 btrfs_remove_ordered_extent(inode
, ordered
);
8621 btrfs_put_ordered_extent(ordered
);
8622 btrfs_put_ordered_extent(ordered
);
8625 btrfs_qgroup_check_reserved_leak(inode
);
8626 inode_tree_del(inode
);
8627 btrfs_drop_extent_map_range(inode
, 0, (u64
)-1, false);
8628 btrfs_inode_clear_file_extent_range(inode
, 0, (u64
)-1);
8629 btrfs_put_root(inode
->root
);
8632 int btrfs_drop_inode(struct inode
*inode
)
8634 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
8639 /* the snap/subvol tree is on deleting */
8640 if (btrfs_root_refs(&root
->root_item
) == 0)
8643 return generic_drop_inode(inode
);
8646 static void init_once(void *foo
)
8648 struct btrfs_inode
*ei
= foo
;
8650 inode_init_once(&ei
->vfs_inode
);
8653 void __cold
btrfs_destroy_cachep(void)
8656 * Make sure all delayed rcu free inodes are flushed before we
8660 bioset_exit(&btrfs_dio_bioset
);
8661 kmem_cache_destroy(btrfs_inode_cachep
);
8664 int __init
btrfs_init_cachep(void)
8666 btrfs_inode_cachep
= kmem_cache_create("btrfs_inode",
8667 sizeof(struct btrfs_inode
), 0,
8668 SLAB_RECLAIM_ACCOUNT
| SLAB_MEM_SPREAD
| SLAB_ACCOUNT
,
8670 if (!btrfs_inode_cachep
)
8673 if (bioset_init(&btrfs_dio_bioset
, BIO_POOL_SIZE
,
8674 offsetof(struct btrfs_dio_private
, bbio
.bio
),
8680 btrfs_destroy_cachep();
8684 static int btrfs_getattr(struct mnt_idmap
*idmap
,
8685 const struct path
*path
, struct kstat
*stat
,
8686 u32 request_mask
, unsigned int flags
)
8690 struct inode
*inode
= d_inode(path
->dentry
);
8691 u32 blocksize
= inode
->i_sb
->s_blocksize
;
8692 u32 bi_flags
= BTRFS_I(inode
)->flags
;
8693 u32 bi_ro_flags
= BTRFS_I(inode
)->ro_flags
;
8695 stat
->result_mask
|= STATX_BTIME
;
8696 stat
->btime
.tv_sec
= BTRFS_I(inode
)->i_otime_sec
;
8697 stat
->btime
.tv_nsec
= BTRFS_I(inode
)->i_otime_nsec
;
8698 if (bi_flags
& BTRFS_INODE_APPEND
)
8699 stat
->attributes
|= STATX_ATTR_APPEND
;
8700 if (bi_flags
& BTRFS_INODE_COMPRESS
)
8701 stat
->attributes
|= STATX_ATTR_COMPRESSED
;
8702 if (bi_flags
& BTRFS_INODE_IMMUTABLE
)
8703 stat
->attributes
|= STATX_ATTR_IMMUTABLE
;
8704 if (bi_flags
& BTRFS_INODE_NODUMP
)
8705 stat
->attributes
|= STATX_ATTR_NODUMP
;
8706 if (bi_ro_flags
& BTRFS_INODE_RO_VERITY
)
8707 stat
->attributes
|= STATX_ATTR_VERITY
;
8709 stat
->attributes_mask
|= (STATX_ATTR_APPEND
|
8710 STATX_ATTR_COMPRESSED
|
8711 STATX_ATTR_IMMUTABLE
|
8714 generic_fillattr(idmap
, request_mask
, inode
, stat
);
8715 stat
->dev
= BTRFS_I(inode
)->root
->anon_dev
;
8717 spin_lock(&BTRFS_I(inode
)->lock
);
8718 delalloc_bytes
= BTRFS_I(inode
)->new_delalloc_bytes
;
8719 inode_bytes
= inode_get_bytes(inode
);
8720 spin_unlock(&BTRFS_I(inode
)->lock
);
8721 stat
->blocks
= (ALIGN(inode_bytes
, blocksize
) +
8722 ALIGN(delalloc_bytes
, blocksize
)) >> SECTOR_SHIFT
;
8726 static int btrfs_rename_exchange(struct inode
*old_dir
,
8727 struct dentry
*old_dentry
,
8728 struct inode
*new_dir
,
8729 struct dentry
*new_dentry
)
8731 struct btrfs_fs_info
*fs_info
= btrfs_sb(old_dir
->i_sb
);
8732 struct btrfs_trans_handle
*trans
;
8733 unsigned int trans_num_items
;
8734 struct btrfs_root
*root
= BTRFS_I(old_dir
)->root
;
8735 struct btrfs_root
*dest
= BTRFS_I(new_dir
)->root
;
8736 struct inode
*new_inode
= new_dentry
->d_inode
;
8737 struct inode
*old_inode
= old_dentry
->d_inode
;
8738 struct btrfs_rename_ctx old_rename_ctx
;
8739 struct btrfs_rename_ctx new_rename_ctx
;
8740 u64 old_ino
= btrfs_ino(BTRFS_I(old_inode
));
8741 u64 new_ino
= btrfs_ino(BTRFS_I(new_inode
));
8746 bool need_abort
= false;
8747 struct fscrypt_name old_fname
, new_fname
;
8748 struct fscrypt_str
*old_name
, *new_name
;
8751 * For non-subvolumes allow exchange only within one subvolume, in the
8752 * same inode namespace. Two subvolumes (represented as directory) can
8753 * be exchanged as they're a logical link and have a fixed inode number.
8756 (old_ino
!= BTRFS_FIRST_FREE_OBJECTID
||
8757 new_ino
!= BTRFS_FIRST_FREE_OBJECTID
))
8760 ret
= fscrypt_setup_filename(old_dir
, &old_dentry
->d_name
, 0, &old_fname
);
8764 ret
= fscrypt_setup_filename(new_dir
, &new_dentry
->d_name
, 0, &new_fname
);
8766 fscrypt_free_filename(&old_fname
);
8770 old_name
= &old_fname
.disk_name
;
8771 new_name
= &new_fname
.disk_name
;
8773 /* close the race window with snapshot create/destroy ioctl */
8774 if (old_ino
== BTRFS_FIRST_FREE_OBJECTID
||
8775 new_ino
== BTRFS_FIRST_FREE_OBJECTID
)
8776 down_read(&fs_info
->subvol_sem
);
8780 * 1 to remove old dir item
8781 * 1 to remove old dir index
8782 * 1 to add new dir item
8783 * 1 to add new dir index
8784 * 1 to update parent inode
8786 * If the parents are the same, we only need to account for one
8788 trans_num_items
= (old_dir
== new_dir
? 9 : 10);
8789 if (old_ino
== BTRFS_FIRST_FREE_OBJECTID
) {
8791 * 1 to remove old root ref
8792 * 1 to remove old root backref
8793 * 1 to add new root ref
8794 * 1 to add new root backref
8796 trans_num_items
+= 4;
8799 * 1 to update inode item
8800 * 1 to remove old inode ref
8801 * 1 to add new inode ref
8803 trans_num_items
+= 3;
8805 if (new_ino
== BTRFS_FIRST_FREE_OBJECTID
)
8806 trans_num_items
+= 4;
8808 trans_num_items
+= 3;
8809 trans
= btrfs_start_transaction(root
, trans_num_items
);
8810 if (IS_ERR(trans
)) {
8811 ret
= PTR_ERR(trans
);
8816 ret
= btrfs_record_root_in_trans(trans
, dest
);
8822 * We need to find a free sequence number both in the source and
8823 * in the destination directory for the exchange.
8825 ret
= btrfs_set_inode_index(BTRFS_I(new_dir
), &old_idx
);
8828 ret
= btrfs_set_inode_index(BTRFS_I(old_dir
), &new_idx
);
8832 BTRFS_I(old_inode
)->dir_index
= 0ULL;
8833 BTRFS_I(new_inode
)->dir_index
= 0ULL;
8835 /* Reference for the source. */
8836 if (old_ino
== BTRFS_FIRST_FREE_OBJECTID
) {
8837 /* force full log commit if subvolume involved. */
8838 btrfs_set_log_full_commit(trans
);
8840 ret
= btrfs_insert_inode_ref(trans
, dest
, new_name
, old_ino
,
8841 btrfs_ino(BTRFS_I(new_dir
)),
8848 /* And now for the dest. */
8849 if (new_ino
== BTRFS_FIRST_FREE_OBJECTID
) {
8850 /* force full log commit if subvolume involved. */
8851 btrfs_set_log_full_commit(trans
);
8853 ret
= btrfs_insert_inode_ref(trans
, root
, old_name
, new_ino
,
8854 btrfs_ino(BTRFS_I(old_dir
)),
8858 btrfs_abort_transaction(trans
, ret
);
8863 /* Update inode version and ctime/mtime. */
8864 inode_inc_iversion(old_dir
);
8865 inode_inc_iversion(new_dir
);
8866 inode_inc_iversion(old_inode
);
8867 inode_inc_iversion(new_inode
);
8868 simple_rename_timestamp(old_dir
, old_dentry
, new_dir
, new_dentry
);
8870 if (old_dentry
->d_parent
!= new_dentry
->d_parent
) {
8871 btrfs_record_unlink_dir(trans
, BTRFS_I(old_dir
),
8872 BTRFS_I(old_inode
), true);
8873 btrfs_record_unlink_dir(trans
, BTRFS_I(new_dir
),
8874 BTRFS_I(new_inode
), true);
8877 /* src is a subvolume */
8878 if (old_ino
== BTRFS_FIRST_FREE_OBJECTID
) {
8879 ret
= btrfs_unlink_subvol(trans
, BTRFS_I(old_dir
), old_dentry
);
8880 } else { /* src is an inode */
8881 ret
= __btrfs_unlink_inode(trans
, BTRFS_I(old_dir
),
8882 BTRFS_I(old_dentry
->d_inode
),
8883 old_name
, &old_rename_ctx
);
8885 ret
= btrfs_update_inode(trans
, BTRFS_I(old_inode
));
8888 btrfs_abort_transaction(trans
, ret
);
8892 /* dest is a subvolume */
8893 if (new_ino
== BTRFS_FIRST_FREE_OBJECTID
) {
8894 ret
= btrfs_unlink_subvol(trans
, BTRFS_I(new_dir
), new_dentry
);
8895 } else { /* dest is an inode */
8896 ret
= __btrfs_unlink_inode(trans
, BTRFS_I(new_dir
),
8897 BTRFS_I(new_dentry
->d_inode
),
8898 new_name
, &new_rename_ctx
);
8900 ret
= btrfs_update_inode(trans
, BTRFS_I(new_inode
));
8903 btrfs_abort_transaction(trans
, ret
);
8907 ret
= btrfs_add_link(trans
, BTRFS_I(new_dir
), BTRFS_I(old_inode
),
8908 new_name
, 0, old_idx
);
8910 btrfs_abort_transaction(trans
, ret
);
8914 ret
= btrfs_add_link(trans
, BTRFS_I(old_dir
), BTRFS_I(new_inode
),
8915 old_name
, 0, new_idx
);
8917 btrfs_abort_transaction(trans
, ret
);
8921 if (old_inode
->i_nlink
== 1)
8922 BTRFS_I(old_inode
)->dir_index
= old_idx
;
8923 if (new_inode
->i_nlink
== 1)
8924 BTRFS_I(new_inode
)->dir_index
= new_idx
;
8927 * Now pin the logs of the roots. We do it to ensure that no other task
8928 * can sync the logs while we are in progress with the rename, because
8929 * that could result in an inconsistency in case any of the inodes that
8930 * are part of this rename operation were logged before.
8932 if (old_ino
!= BTRFS_FIRST_FREE_OBJECTID
)
8933 btrfs_pin_log_trans(root
);
8934 if (new_ino
!= BTRFS_FIRST_FREE_OBJECTID
)
8935 btrfs_pin_log_trans(dest
);
8937 /* Do the log updates for all inodes. */
8938 if (old_ino
!= BTRFS_FIRST_FREE_OBJECTID
)
8939 btrfs_log_new_name(trans
, old_dentry
, BTRFS_I(old_dir
),
8940 old_rename_ctx
.index
, new_dentry
->d_parent
);
8941 if (new_ino
!= BTRFS_FIRST_FREE_OBJECTID
)
8942 btrfs_log_new_name(trans
, new_dentry
, BTRFS_I(new_dir
),
8943 new_rename_ctx
.index
, old_dentry
->d_parent
);
8945 /* Now unpin the logs. */
8946 if (old_ino
!= BTRFS_FIRST_FREE_OBJECTID
)
8947 btrfs_end_log_trans(root
);
8948 if (new_ino
!= BTRFS_FIRST_FREE_OBJECTID
)
8949 btrfs_end_log_trans(dest
);
8951 ret2
= btrfs_end_transaction(trans
);
8952 ret
= ret
? ret
: ret2
;
8954 if (new_ino
== BTRFS_FIRST_FREE_OBJECTID
||
8955 old_ino
== BTRFS_FIRST_FREE_OBJECTID
)
8956 up_read(&fs_info
->subvol_sem
);
8958 fscrypt_free_filename(&new_fname
);
8959 fscrypt_free_filename(&old_fname
);
8963 static struct inode
*new_whiteout_inode(struct mnt_idmap
*idmap
,
8966 struct inode
*inode
;
8968 inode
= new_inode(dir
->i_sb
);
8970 inode_init_owner(idmap
, inode
, dir
,
8971 S_IFCHR
| WHITEOUT_MODE
);
8972 inode
->i_op
= &btrfs_special_inode_operations
;
8973 init_special_inode(inode
, inode
->i_mode
, WHITEOUT_DEV
);
8978 static int btrfs_rename(struct mnt_idmap
*idmap
,
8979 struct inode
*old_dir
, struct dentry
*old_dentry
,
8980 struct inode
*new_dir
, struct dentry
*new_dentry
,
8983 struct btrfs_fs_info
*fs_info
= btrfs_sb(old_dir
->i_sb
);
8984 struct btrfs_new_inode_args whiteout_args
= {
8986 .dentry
= old_dentry
,
8988 struct btrfs_trans_handle
*trans
;
8989 unsigned int trans_num_items
;
8990 struct btrfs_root
*root
= BTRFS_I(old_dir
)->root
;
8991 struct btrfs_root
*dest
= BTRFS_I(new_dir
)->root
;
8992 struct inode
*new_inode
= d_inode(new_dentry
);
8993 struct inode
*old_inode
= d_inode(old_dentry
);
8994 struct btrfs_rename_ctx rename_ctx
;
8998 u64 old_ino
= btrfs_ino(BTRFS_I(old_inode
));
8999 struct fscrypt_name old_fname
, new_fname
;
9001 if (btrfs_ino(BTRFS_I(new_dir
)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID
)
9004 /* we only allow rename subvolume link between subvolumes */
9005 if (old_ino
!= BTRFS_FIRST_FREE_OBJECTID
&& root
!= dest
)
9008 if (old_ino
== BTRFS_EMPTY_SUBVOL_DIR_OBJECTID
||
9009 (new_inode
&& btrfs_ino(BTRFS_I(new_inode
)) == BTRFS_FIRST_FREE_OBJECTID
))
9012 if (S_ISDIR(old_inode
->i_mode
) && new_inode
&&
9013 new_inode
->i_size
> BTRFS_EMPTY_DIR_SIZE
)
9016 ret
= fscrypt_setup_filename(old_dir
, &old_dentry
->d_name
, 0, &old_fname
);
9020 ret
= fscrypt_setup_filename(new_dir
, &new_dentry
->d_name
, 0, &new_fname
);
9022 fscrypt_free_filename(&old_fname
);
9026 /* check for collisions, even if the name isn't there */
9027 ret
= btrfs_check_dir_item_collision(dest
, new_dir
->i_ino
, &new_fname
.disk_name
);
9029 if (ret
== -EEXIST
) {
9031 * eexist without a new_inode */
9032 if (WARN_ON(!new_inode
)) {
9033 goto out_fscrypt_names
;
9036 /* maybe -EOVERFLOW */
9037 goto out_fscrypt_names
;
9043 * we're using rename to replace one file with another. Start IO on it
9044 * now so we don't add too much work to the end of the transaction
9046 if (new_inode
&& S_ISREG(old_inode
->i_mode
) && new_inode
->i_size
)
9047 filemap_flush(old_inode
->i_mapping
);
9049 if (flags
& RENAME_WHITEOUT
) {
9050 whiteout_args
.inode
= new_whiteout_inode(idmap
, old_dir
);
9051 if (!whiteout_args
.inode
) {
9053 goto out_fscrypt_names
;
9055 ret
= btrfs_new_inode_prepare(&whiteout_args
, &trans_num_items
);
9057 goto out_whiteout_inode
;
9059 /* 1 to update the old parent inode. */
9060 trans_num_items
= 1;
9063 if (old_ino
== BTRFS_FIRST_FREE_OBJECTID
) {
9064 /* Close the race window with snapshot create/destroy ioctl */
9065 down_read(&fs_info
->subvol_sem
);
9067 * 1 to remove old root ref
9068 * 1 to remove old root backref
9069 * 1 to add new root ref
9070 * 1 to add new root backref
9072 trans_num_items
+= 4;
9076 * 1 to remove old inode ref
9077 * 1 to add new inode ref
9079 trans_num_items
+= 3;
9082 * 1 to remove old dir item
9083 * 1 to remove old dir index
9084 * 1 to add new dir item
9085 * 1 to add new dir index
9087 trans_num_items
+= 4;
9088 /* 1 to update new parent inode if it's not the same as the old parent */
9089 if (new_dir
!= old_dir
)
9094 * 1 to remove inode ref
9095 * 1 to remove dir item
9096 * 1 to remove dir index
9097 * 1 to possibly add orphan item
9099 trans_num_items
+= 5;
9101 trans
= btrfs_start_transaction(root
, trans_num_items
);
9102 if (IS_ERR(trans
)) {
9103 ret
= PTR_ERR(trans
);
9108 ret
= btrfs_record_root_in_trans(trans
, dest
);
9113 ret
= btrfs_set_inode_index(BTRFS_I(new_dir
), &index
);
9117 BTRFS_I(old_inode
)->dir_index
= 0ULL;
9118 if (unlikely(old_ino
== BTRFS_FIRST_FREE_OBJECTID
)) {
9119 /* force full log commit if subvolume involved. */
9120 btrfs_set_log_full_commit(trans
);
9122 ret
= btrfs_insert_inode_ref(trans
, dest
, &new_fname
.disk_name
,
9123 old_ino
, btrfs_ino(BTRFS_I(new_dir
)),
9129 inode_inc_iversion(old_dir
);
9130 inode_inc_iversion(new_dir
);
9131 inode_inc_iversion(old_inode
);
9132 simple_rename_timestamp(old_dir
, old_dentry
, new_dir
, new_dentry
);
9134 if (old_dentry
->d_parent
!= new_dentry
->d_parent
)
9135 btrfs_record_unlink_dir(trans
, BTRFS_I(old_dir
),
9136 BTRFS_I(old_inode
), true);
9138 if (unlikely(old_ino
== BTRFS_FIRST_FREE_OBJECTID
)) {
9139 ret
= btrfs_unlink_subvol(trans
, BTRFS_I(old_dir
), old_dentry
);
9141 ret
= __btrfs_unlink_inode(trans
, BTRFS_I(old_dir
),
9142 BTRFS_I(d_inode(old_dentry
)),
9143 &old_fname
.disk_name
, &rename_ctx
);
9145 ret
= btrfs_update_inode(trans
, BTRFS_I(old_inode
));
9148 btrfs_abort_transaction(trans
, ret
);
9153 inode_inc_iversion(new_inode
);
9154 if (unlikely(btrfs_ino(BTRFS_I(new_inode
)) ==
9155 BTRFS_EMPTY_SUBVOL_DIR_OBJECTID
)) {
9156 ret
= btrfs_unlink_subvol(trans
, BTRFS_I(new_dir
), new_dentry
);
9157 BUG_ON(new_inode
->i_nlink
== 0);
9159 ret
= btrfs_unlink_inode(trans
, BTRFS_I(new_dir
),
9160 BTRFS_I(d_inode(new_dentry
)),
9161 &new_fname
.disk_name
);
9163 if (!ret
&& new_inode
->i_nlink
== 0)
9164 ret
= btrfs_orphan_add(trans
,
9165 BTRFS_I(d_inode(new_dentry
)));
9167 btrfs_abort_transaction(trans
, ret
);
9172 ret
= btrfs_add_link(trans
, BTRFS_I(new_dir
), BTRFS_I(old_inode
),
9173 &new_fname
.disk_name
, 0, index
);
9175 btrfs_abort_transaction(trans
, ret
);
9179 if (old_inode
->i_nlink
== 1)
9180 BTRFS_I(old_inode
)->dir_index
= index
;
9182 if (old_ino
!= BTRFS_FIRST_FREE_OBJECTID
)
9183 btrfs_log_new_name(trans
, old_dentry
, BTRFS_I(old_dir
),
9184 rename_ctx
.index
, new_dentry
->d_parent
);
9186 if (flags
& RENAME_WHITEOUT
) {
9187 ret
= btrfs_create_new_inode(trans
, &whiteout_args
);
9189 btrfs_abort_transaction(trans
, ret
);
9192 unlock_new_inode(whiteout_args
.inode
);
9193 iput(whiteout_args
.inode
);
9194 whiteout_args
.inode
= NULL
;
9198 ret2
= btrfs_end_transaction(trans
);
9199 ret
= ret
? ret
: ret2
;
9201 if (old_ino
== BTRFS_FIRST_FREE_OBJECTID
)
9202 up_read(&fs_info
->subvol_sem
);
9203 if (flags
& RENAME_WHITEOUT
)
9204 btrfs_new_inode_args_destroy(&whiteout_args
);
9206 if (flags
& RENAME_WHITEOUT
)
9207 iput(whiteout_args
.inode
);
9209 fscrypt_free_filename(&old_fname
);
9210 fscrypt_free_filename(&new_fname
);
9214 static int btrfs_rename2(struct mnt_idmap
*idmap
, struct inode
*old_dir
,
9215 struct dentry
*old_dentry
, struct inode
*new_dir
,
9216 struct dentry
*new_dentry
, unsigned int flags
)
9220 if (flags
& ~(RENAME_NOREPLACE
| RENAME_EXCHANGE
| RENAME_WHITEOUT
))
9223 if (flags
& RENAME_EXCHANGE
)
9224 ret
= btrfs_rename_exchange(old_dir
, old_dentry
, new_dir
,
9227 ret
= btrfs_rename(idmap
, old_dir
, old_dentry
, new_dir
,
9230 btrfs_btree_balance_dirty(BTRFS_I(new_dir
)->root
->fs_info
);
9235 struct btrfs_delalloc_work
{
9236 struct inode
*inode
;
9237 struct completion completion
;
9238 struct list_head list
;
9239 struct btrfs_work work
;
9242 static void btrfs_run_delalloc_work(struct btrfs_work
*work
)
9244 struct btrfs_delalloc_work
*delalloc_work
;
9245 struct inode
*inode
;
9247 delalloc_work
= container_of(work
, struct btrfs_delalloc_work
,
9249 inode
= delalloc_work
->inode
;
9250 filemap_flush(inode
->i_mapping
);
9251 if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT
,
9252 &BTRFS_I(inode
)->runtime_flags
))
9253 filemap_flush(inode
->i_mapping
);
9256 complete(&delalloc_work
->completion
);
9259 static struct btrfs_delalloc_work
*btrfs_alloc_delalloc_work(struct inode
*inode
)
9261 struct btrfs_delalloc_work
*work
;
9263 work
= kmalloc(sizeof(*work
), GFP_NOFS
);
9267 init_completion(&work
->completion
);
9268 INIT_LIST_HEAD(&work
->list
);
9269 work
->inode
= inode
;
9270 btrfs_init_work(&work
->work
, btrfs_run_delalloc_work
, NULL
);
9276 * some fairly slow code that needs optimization. This walks the list
9277 * of all the inodes with pending delalloc and forces them to disk.
9279 static int start_delalloc_inodes(struct btrfs_root
*root
,
9280 struct writeback_control
*wbc
, bool snapshot
,
9281 bool in_reclaim_context
)
9283 struct btrfs_inode
*binode
;
9284 struct inode
*inode
;
9285 struct btrfs_delalloc_work
*work
, *next
;
9289 bool full_flush
= wbc
->nr_to_write
== LONG_MAX
;
9291 mutex_lock(&root
->delalloc_mutex
);
9292 spin_lock(&root
->delalloc_lock
);
9293 list_splice_init(&root
->delalloc_inodes
, &splice
);
9294 while (!list_empty(&splice
)) {
9295 binode
= list_entry(splice
.next
, struct btrfs_inode
,
9298 list_move_tail(&binode
->delalloc_inodes
,
9299 &root
->delalloc_inodes
);
9301 if (in_reclaim_context
&&
9302 test_bit(BTRFS_INODE_NO_DELALLOC_FLUSH
, &binode
->runtime_flags
))
9305 inode
= igrab(&binode
->vfs_inode
);
9307 cond_resched_lock(&root
->delalloc_lock
);
9310 spin_unlock(&root
->delalloc_lock
);
9313 set_bit(BTRFS_INODE_SNAPSHOT_FLUSH
,
9314 &binode
->runtime_flags
);
9316 work
= btrfs_alloc_delalloc_work(inode
);
9322 list_add_tail(&work
->list
, &works
);
9323 btrfs_queue_work(root
->fs_info
->flush_workers
,
9326 ret
= filemap_fdatawrite_wbc(inode
->i_mapping
, wbc
);
9327 btrfs_add_delayed_iput(BTRFS_I(inode
));
9328 if (ret
|| wbc
->nr_to_write
<= 0)
9332 spin_lock(&root
->delalloc_lock
);
9334 spin_unlock(&root
->delalloc_lock
);
9337 list_for_each_entry_safe(work
, next
, &works
, list
) {
9338 list_del_init(&work
->list
);
9339 wait_for_completion(&work
->completion
);
9343 if (!list_empty(&splice
)) {
9344 spin_lock(&root
->delalloc_lock
);
9345 list_splice_tail(&splice
, &root
->delalloc_inodes
);
9346 spin_unlock(&root
->delalloc_lock
);
9348 mutex_unlock(&root
->delalloc_mutex
);
9352 int btrfs_start_delalloc_snapshot(struct btrfs_root
*root
, bool in_reclaim_context
)
9354 struct writeback_control wbc
= {
9355 .nr_to_write
= LONG_MAX
,
9356 .sync_mode
= WB_SYNC_NONE
,
9358 .range_end
= LLONG_MAX
,
9360 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
9362 if (BTRFS_FS_ERROR(fs_info
))
9365 return start_delalloc_inodes(root
, &wbc
, true, in_reclaim_context
);
9368 int btrfs_start_delalloc_roots(struct btrfs_fs_info
*fs_info
, long nr
,
9369 bool in_reclaim_context
)
9371 struct writeback_control wbc
= {
9373 .sync_mode
= WB_SYNC_NONE
,
9375 .range_end
= LLONG_MAX
,
9377 struct btrfs_root
*root
;
9381 if (BTRFS_FS_ERROR(fs_info
))
9384 mutex_lock(&fs_info
->delalloc_root_mutex
);
9385 spin_lock(&fs_info
->delalloc_root_lock
);
9386 list_splice_init(&fs_info
->delalloc_roots
, &splice
);
9387 while (!list_empty(&splice
)) {
9389 * Reset nr_to_write here so we know that we're doing a full
9393 wbc
.nr_to_write
= LONG_MAX
;
9395 root
= list_first_entry(&splice
, struct btrfs_root
,
9397 root
= btrfs_grab_root(root
);
9399 list_move_tail(&root
->delalloc_root
,
9400 &fs_info
->delalloc_roots
);
9401 spin_unlock(&fs_info
->delalloc_root_lock
);
9403 ret
= start_delalloc_inodes(root
, &wbc
, false, in_reclaim_context
);
9404 btrfs_put_root(root
);
9405 if (ret
< 0 || wbc
.nr_to_write
<= 0)
9407 spin_lock(&fs_info
->delalloc_root_lock
);
9409 spin_unlock(&fs_info
->delalloc_root_lock
);
9413 if (!list_empty(&splice
)) {
9414 spin_lock(&fs_info
->delalloc_root_lock
);
9415 list_splice_tail(&splice
, &fs_info
->delalloc_roots
);
9416 spin_unlock(&fs_info
->delalloc_root_lock
);
9418 mutex_unlock(&fs_info
->delalloc_root_mutex
);
9422 static int btrfs_symlink(struct mnt_idmap
*idmap
, struct inode
*dir
,
9423 struct dentry
*dentry
, const char *symname
)
9425 struct btrfs_fs_info
*fs_info
= btrfs_sb(dir
->i_sb
);
9426 struct btrfs_trans_handle
*trans
;
9427 struct btrfs_root
*root
= BTRFS_I(dir
)->root
;
9428 struct btrfs_path
*path
;
9429 struct btrfs_key key
;
9430 struct inode
*inode
;
9431 struct btrfs_new_inode_args new_inode_args
= {
9435 unsigned int trans_num_items
;
9440 struct btrfs_file_extent_item
*ei
;
9441 struct extent_buffer
*leaf
;
9443 name_len
= strlen(symname
);
9444 if (name_len
> BTRFS_MAX_INLINE_DATA_SIZE(fs_info
))
9445 return -ENAMETOOLONG
;
9447 inode
= new_inode(dir
->i_sb
);
9450 inode_init_owner(idmap
, inode
, dir
, S_IFLNK
| S_IRWXUGO
);
9451 inode
->i_op
= &btrfs_symlink_inode_operations
;
9452 inode_nohighmem(inode
);
9453 inode
->i_mapping
->a_ops
= &btrfs_aops
;
9454 btrfs_i_size_write(BTRFS_I(inode
), name_len
);
9455 inode_set_bytes(inode
, name_len
);
9457 new_inode_args
.inode
= inode
;
9458 err
= btrfs_new_inode_prepare(&new_inode_args
, &trans_num_items
);
9461 /* 1 additional item for the inline extent */
9464 trans
= btrfs_start_transaction(root
, trans_num_items
);
9465 if (IS_ERR(trans
)) {
9466 err
= PTR_ERR(trans
);
9467 goto out_new_inode_args
;
9470 err
= btrfs_create_new_inode(trans
, &new_inode_args
);
9474 path
= btrfs_alloc_path();
9477 btrfs_abort_transaction(trans
, err
);
9478 discard_new_inode(inode
);
9482 key
.objectid
= btrfs_ino(BTRFS_I(inode
));
9484 key
.type
= BTRFS_EXTENT_DATA_KEY
;
9485 datasize
= btrfs_file_extent_calc_inline_size(name_len
);
9486 err
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
9489 btrfs_abort_transaction(trans
, err
);
9490 btrfs_free_path(path
);
9491 discard_new_inode(inode
);
9495 leaf
= path
->nodes
[0];
9496 ei
= btrfs_item_ptr(leaf
, path
->slots
[0],
9497 struct btrfs_file_extent_item
);
9498 btrfs_set_file_extent_generation(leaf
, ei
, trans
->transid
);
9499 btrfs_set_file_extent_type(leaf
, ei
,
9500 BTRFS_FILE_EXTENT_INLINE
);
9501 btrfs_set_file_extent_encryption(leaf
, ei
, 0);
9502 btrfs_set_file_extent_compression(leaf
, ei
, 0);
9503 btrfs_set_file_extent_other_encoding(leaf
, ei
, 0);
9504 btrfs_set_file_extent_ram_bytes(leaf
, ei
, name_len
);
9506 ptr
= btrfs_file_extent_inline_start(ei
);
9507 write_extent_buffer(leaf
, symname
, ptr
, name_len
);
9508 btrfs_mark_buffer_dirty(trans
, leaf
);
9509 btrfs_free_path(path
);
9511 d_instantiate_new(dentry
, inode
);
9514 btrfs_end_transaction(trans
);
9515 btrfs_btree_balance_dirty(fs_info
);
9517 btrfs_new_inode_args_destroy(&new_inode_args
);
9524 static struct btrfs_trans_handle
*insert_prealloc_file_extent(
9525 struct btrfs_trans_handle
*trans_in
,
9526 struct btrfs_inode
*inode
,
9527 struct btrfs_key
*ins
,
9530 struct btrfs_file_extent_item stack_fi
;
9531 struct btrfs_replace_extent_info extent_info
;
9532 struct btrfs_trans_handle
*trans
= trans_in
;
9533 struct btrfs_path
*path
;
9534 u64 start
= ins
->objectid
;
9535 u64 len
= ins
->offset
;
9536 u64 qgroup_released
= 0;
9539 memset(&stack_fi
, 0, sizeof(stack_fi
));
9541 btrfs_set_stack_file_extent_type(&stack_fi
, BTRFS_FILE_EXTENT_PREALLOC
);
9542 btrfs_set_stack_file_extent_disk_bytenr(&stack_fi
, start
);
9543 btrfs_set_stack_file_extent_disk_num_bytes(&stack_fi
, len
);
9544 btrfs_set_stack_file_extent_num_bytes(&stack_fi
, len
);
9545 btrfs_set_stack_file_extent_ram_bytes(&stack_fi
, len
);
9546 btrfs_set_stack_file_extent_compression(&stack_fi
, BTRFS_COMPRESS_NONE
);
9547 /* Encryption and other encoding is reserved and all 0 */
9549 ret
= btrfs_qgroup_release_data(inode
, file_offset
, len
, &qgroup_released
);
9551 return ERR_PTR(ret
);
9554 ret
= insert_reserved_file_extent(trans
, inode
,
9555 file_offset
, &stack_fi
,
9556 true, qgroup_released
);
9562 extent_info
.disk_offset
= start
;
9563 extent_info
.disk_len
= len
;
9564 extent_info
.data_offset
= 0;
9565 extent_info
.data_len
= len
;
9566 extent_info
.file_offset
= file_offset
;
9567 extent_info
.extent_buf
= (char *)&stack_fi
;
9568 extent_info
.is_new_extent
= true;
9569 extent_info
.update_times
= true;
9570 extent_info
.qgroup_reserved
= qgroup_released
;
9571 extent_info
.insertions
= 0;
9573 path
= btrfs_alloc_path();
9579 ret
= btrfs_replace_file_extents(inode
, path
, file_offset
,
9580 file_offset
+ len
- 1, &extent_info
,
9582 btrfs_free_path(path
);
9589 * We have released qgroup data range at the beginning of the function,
9590 * and normally qgroup_released bytes will be freed when committing
9592 * But if we error out early, we have to free what we have released
9593 * or we leak qgroup data reservation.
9595 btrfs_qgroup_free_refroot(inode
->root
->fs_info
,
9596 inode
->root
->root_key
.objectid
, qgroup_released
,
9597 BTRFS_QGROUP_RSV_DATA
);
9598 return ERR_PTR(ret
);
9601 static int __btrfs_prealloc_file_range(struct inode
*inode
, int mode
,
9602 u64 start
, u64 num_bytes
, u64 min_size
,
9603 loff_t actual_len
, u64
*alloc_hint
,
9604 struct btrfs_trans_handle
*trans
)
9606 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
9607 struct extent_map
*em
;
9608 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
9609 struct btrfs_key ins
;
9610 u64 cur_offset
= start
;
9611 u64 clear_offset
= start
;
9614 u64 last_alloc
= (u64
)-1;
9616 bool own_trans
= true;
9617 u64 end
= start
+ num_bytes
- 1;
9621 while (num_bytes
> 0) {
9622 cur_bytes
= min_t(u64
, num_bytes
, SZ_256M
);
9623 cur_bytes
= max(cur_bytes
, min_size
);
9625 * If we are severely fragmented we could end up with really
9626 * small allocations, so if the allocator is returning small
9627 * chunks lets make its job easier by only searching for those
9630 cur_bytes
= min(cur_bytes
, last_alloc
);
9631 ret
= btrfs_reserve_extent(root
, cur_bytes
, cur_bytes
,
9632 min_size
, 0, *alloc_hint
, &ins
, 1, 0);
9637 * We've reserved this space, and thus converted it from
9638 * ->bytes_may_use to ->bytes_reserved. Any error that happens
9639 * from here on out we will only need to clear our reservation
9640 * for the remaining unreserved area, so advance our
9641 * clear_offset by our extent size.
9643 clear_offset
+= ins
.offset
;
9645 last_alloc
= ins
.offset
;
9646 trans
= insert_prealloc_file_extent(trans
, BTRFS_I(inode
),
9649 * Now that we inserted the prealloc extent we can finally
9650 * decrement the number of reservations in the block group.
9651 * If we did it before, we could race with relocation and have
9652 * relocation miss the reserved extent, making it fail later.
9654 btrfs_dec_block_group_reservations(fs_info
, ins
.objectid
);
9655 if (IS_ERR(trans
)) {
9656 ret
= PTR_ERR(trans
);
9657 btrfs_free_reserved_extent(fs_info
, ins
.objectid
,
9662 em
= alloc_extent_map();
9664 btrfs_drop_extent_map_range(BTRFS_I(inode
), cur_offset
,
9665 cur_offset
+ ins
.offset
- 1, false);
9666 btrfs_set_inode_full_sync(BTRFS_I(inode
));
9670 em
->start
= cur_offset
;
9671 em
->orig_start
= cur_offset
;
9672 em
->len
= ins
.offset
;
9673 em
->block_start
= ins
.objectid
;
9674 em
->block_len
= ins
.offset
;
9675 em
->orig_block_len
= ins
.offset
;
9676 em
->ram_bytes
= ins
.offset
;
9677 em
->flags
|= EXTENT_FLAG_PREALLOC
;
9678 em
->generation
= trans
->transid
;
9680 ret
= btrfs_replace_extent_map_range(BTRFS_I(inode
), em
, true);
9681 free_extent_map(em
);
9683 num_bytes
-= ins
.offset
;
9684 cur_offset
+= ins
.offset
;
9685 *alloc_hint
= ins
.objectid
+ ins
.offset
;
9687 inode_inc_iversion(inode
);
9688 inode_set_ctime_current(inode
);
9689 BTRFS_I(inode
)->flags
|= BTRFS_INODE_PREALLOC
;
9690 if (!(mode
& FALLOC_FL_KEEP_SIZE
) &&
9691 (actual_len
> inode
->i_size
) &&
9692 (cur_offset
> inode
->i_size
)) {
9693 if (cur_offset
> actual_len
)
9694 i_size
= actual_len
;
9696 i_size
= cur_offset
;
9697 i_size_write(inode
, i_size
);
9698 btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode
), 0);
9701 ret
= btrfs_update_inode(trans
, BTRFS_I(inode
));
9704 btrfs_abort_transaction(trans
, ret
);
9706 btrfs_end_transaction(trans
);
9711 btrfs_end_transaction(trans
);
9715 if (clear_offset
< end
)
9716 btrfs_free_reserved_data_space(BTRFS_I(inode
), NULL
, clear_offset
,
9717 end
- clear_offset
+ 1);
9721 int btrfs_prealloc_file_range(struct inode
*inode
, int mode
,
9722 u64 start
, u64 num_bytes
, u64 min_size
,
9723 loff_t actual_len
, u64
*alloc_hint
)
9725 return __btrfs_prealloc_file_range(inode
, mode
, start
, num_bytes
,
9726 min_size
, actual_len
, alloc_hint
,
9730 int btrfs_prealloc_file_range_trans(struct inode
*inode
,
9731 struct btrfs_trans_handle
*trans
, int mode
,
9732 u64 start
, u64 num_bytes
, u64 min_size
,
9733 loff_t actual_len
, u64
*alloc_hint
)
9735 return __btrfs_prealloc_file_range(inode
, mode
, start
, num_bytes
,
9736 min_size
, actual_len
, alloc_hint
, trans
);
9739 static int btrfs_permission(struct mnt_idmap
*idmap
,
9740 struct inode
*inode
, int mask
)
9742 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
9743 umode_t mode
= inode
->i_mode
;
9745 if (mask
& MAY_WRITE
&&
9746 (S_ISREG(mode
) || S_ISDIR(mode
) || S_ISLNK(mode
))) {
9747 if (btrfs_root_readonly(root
))
9749 if (BTRFS_I(inode
)->flags
& BTRFS_INODE_READONLY
)
9752 return generic_permission(idmap
, inode
, mask
);
9755 static int btrfs_tmpfile(struct mnt_idmap
*idmap
, struct inode
*dir
,
9756 struct file
*file
, umode_t mode
)
9758 struct btrfs_fs_info
*fs_info
= btrfs_sb(dir
->i_sb
);
9759 struct btrfs_trans_handle
*trans
;
9760 struct btrfs_root
*root
= BTRFS_I(dir
)->root
;
9761 struct inode
*inode
;
9762 struct btrfs_new_inode_args new_inode_args
= {
9764 .dentry
= file
->f_path
.dentry
,
9767 unsigned int trans_num_items
;
9770 inode
= new_inode(dir
->i_sb
);
9773 inode_init_owner(idmap
, inode
, dir
, mode
);
9774 inode
->i_fop
= &btrfs_file_operations
;
9775 inode
->i_op
= &btrfs_file_inode_operations
;
9776 inode
->i_mapping
->a_ops
= &btrfs_aops
;
9778 new_inode_args
.inode
= inode
;
9779 ret
= btrfs_new_inode_prepare(&new_inode_args
, &trans_num_items
);
9783 trans
= btrfs_start_transaction(root
, trans_num_items
);
9784 if (IS_ERR(trans
)) {
9785 ret
= PTR_ERR(trans
);
9786 goto out_new_inode_args
;
9789 ret
= btrfs_create_new_inode(trans
, &new_inode_args
);
9792 * We set number of links to 0 in btrfs_create_new_inode(), and here we
9793 * set it to 1 because d_tmpfile() will issue a warning if the count is
9796 * d_tmpfile() -> inode_dec_link_count() -> drop_nlink()
9798 set_nlink(inode
, 1);
9801 d_tmpfile(file
, inode
);
9802 unlock_new_inode(inode
);
9803 mark_inode_dirty(inode
);
9806 btrfs_end_transaction(trans
);
9807 btrfs_btree_balance_dirty(fs_info
);
9809 btrfs_new_inode_args_destroy(&new_inode_args
);
9813 return finish_open_simple(file
, ret
);
9816 void btrfs_set_range_writeback(struct btrfs_inode
*inode
, u64 start
, u64 end
)
9818 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
9819 unsigned long index
= start
>> PAGE_SHIFT
;
9820 unsigned long end_index
= end
>> PAGE_SHIFT
;
9824 ASSERT(end
+ 1 - start
<= U32_MAX
);
9825 len
= end
+ 1 - start
;
9826 while (index
<= end_index
) {
9827 page
= find_get_page(inode
->vfs_inode
.i_mapping
, index
);
9828 ASSERT(page
); /* Pages should be in the extent_io_tree */
9830 /* This is for data, which doesn't yet support larger folio. */
9831 ASSERT(folio_order(page_folio(page
)) == 0);
9832 btrfs_folio_set_writeback(fs_info
, page_folio(page
), start
, len
);
9838 int btrfs_encoded_io_compression_from_extent(struct btrfs_fs_info
*fs_info
,
9841 switch (compress_type
) {
9842 case BTRFS_COMPRESS_NONE
:
9843 return BTRFS_ENCODED_IO_COMPRESSION_NONE
;
9844 case BTRFS_COMPRESS_ZLIB
:
9845 return BTRFS_ENCODED_IO_COMPRESSION_ZLIB
;
9846 case BTRFS_COMPRESS_LZO
:
9848 * The LZO format depends on the sector size. 64K is the maximum
9849 * sector size that we support.
9851 if (fs_info
->sectorsize
< SZ_4K
|| fs_info
->sectorsize
> SZ_64K
)
9853 return BTRFS_ENCODED_IO_COMPRESSION_LZO_4K
+
9854 (fs_info
->sectorsize_bits
- 12);
9855 case BTRFS_COMPRESS_ZSTD
:
9856 return BTRFS_ENCODED_IO_COMPRESSION_ZSTD
;
9862 static ssize_t
btrfs_encoded_read_inline(
9864 struct iov_iter
*iter
, u64 start
,
9866 struct extent_state
**cached_state
,
9867 u64 extent_start
, size_t count
,
9868 struct btrfs_ioctl_encoded_io_args
*encoded
,
9871 struct btrfs_inode
*inode
= BTRFS_I(file_inode(iocb
->ki_filp
));
9872 struct btrfs_root
*root
= inode
->root
;
9873 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
9874 struct extent_io_tree
*io_tree
= &inode
->io_tree
;
9875 struct btrfs_path
*path
;
9876 struct extent_buffer
*leaf
;
9877 struct btrfs_file_extent_item
*item
;
9883 path
= btrfs_alloc_path();
9888 ret
= btrfs_lookup_file_extent(NULL
, root
, path
, btrfs_ino(inode
),
9892 /* The extent item disappeared? */
9897 leaf
= path
->nodes
[0];
9898 item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_file_extent_item
);
9900 ram_bytes
= btrfs_file_extent_ram_bytes(leaf
, item
);
9901 ptr
= btrfs_file_extent_inline_start(item
);
9903 encoded
->len
= min_t(u64
, extent_start
+ ram_bytes
,
9904 inode
->vfs_inode
.i_size
) - iocb
->ki_pos
;
9905 ret
= btrfs_encoded_io_compression_from_extent(fs_info
,
9906 btrfs_file_extent_compression(leaf
, item
));
9909 encoded
->compression
= ret
;
9910 if (encoded
->compression
) {
9913 inline_size
= btrfs_file_extent_inline_item_len(leaf
,
9915 if (inline_size
> count
) {
9919 count
= inline_size
;
9920 encoded
->unencoded_len
= ram_bytes
;
9921 encoded
->unencoded_offset
= iocb
->ki_pos
- extent_start
;
9923 count
= min_t(u64
, count
, encoded
->len
);
9924 encoded
->len
= count
;
9925 encoded
->unencoded_len
= count
;
9926 ptr
+= iocb
->ki_pos
- extent_start
;
9929 tmp
= kmalloc(count
, GFP_NOFS
);
9934 read_extent_buffer(leaf
, tmp
, ptr
, count
);
9935 btrfs_release_path(path
);
9936 unlock_extent(io_tree
, start
, lockend
, cached_state
);
9937 btrfs_inode_unlock(inode
, BTRFS_ILOCK_SHARED
);
9940 ret
= copy_to_iter(tmp
, count
, iter
);
9945 btrfs_free_path(path
);
9949 struct btrfs_encoded_read_private
{
9950 wait_queue_head_t wait
;
9952 blk_status_t status
;
9955 static void btrfs_encoded_read_endio(struct btrfs_bio
*bbio
)
9957 struct btrfs_encoded_read_private
*priv
= bbio
->private;
9959 if (bbio
->bio
.bi_status
) {
9961 * The memory barrier implied by the atomic_dec_return() here
9962 * pairs with the memory barrier implied by the
9963 * atomic_dec_return() or io_wait_event() in
9964 * btrfs_encoded_read_regular_fill_pages() to ensure that this
9965 * write is observed before the load of status in
9966 * btrfs_encoded_read_regular_fill_pages().
9968 WRITE_ONCE(priv
->status
, bbio
->bio
.bi_status
);
9970 if (!atomic_dec_return(&priv
->pending
))
9971 wake_up(&priv
->wait
);
9972 bio_put(&bbio
->bio
);
9975 int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode
*inode
,
9976 u64 file_offset
, u64 disk_bytenr
,
9977 u64 disk_io_size
, struct page
**pages
)
9979 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
9980 struct btrfs_encoded_read_private priv
= {
9981 .pending
= ATOMIC_INIT(1),
9983 unsigned long i
= 0;
9984 struct btrfs_bio
*bbio
;
9986 init_waitqueue_head(&priv
.wait
);
9988 bbio
= btrfs_bio_alloc(BIO_MAX_VECS
, REQ_OP_READ
, fs_info
,
9989 btrfs_encoded_read_endio
, &priv
);
9990 bbio
->bio
.bi_iter
.bi_sector
= disk_bytenr
>> SECTOR_SHIFT
;
9991 bbio
->inode
= inode
;
9994 size_t bytes
= min_t(u64
, disk_io_size
, PAGE_SIZE
);
9996 if (bio_add_page(&bbio
->bio
, pages
[i
], bytes
, 0) < bytes
) {
9997 atomic_inc(&priv
.pending
);
9998 btrfs_submit_bio(bbio
, 0);
10000 bbio
= btrfs_bio_alloc(BIO_MAX_VECS
, REQ_OP_READ
, fs_info
,
10001 btrfs_encoded_read_endio
, &priv
);
10002 bbio
->bio
.bi_iter
.bi_sector
= disk_bytenr
>> SECTOR_SHIFT
;
10003 bbio
->inode
= inode
;
10008 disk_bytenr
+= bytes
;
10009 disk_io_size
-= bytes
;
10010 } while (disk_io_size
);
10012 atomic_inc(&priv
.pending
);
10013 btrfs_submit_bio(bbio
, 0);
10015 if (atomic_dec_return(&priv
.pending
))
10016 io_wait_event(priv
.wait
, !atomic_read(&priv
.pending
));
10017 /* See btrfs_encoded_read_endio() for ordering. */
10018 return blk_status_to_errno(READ_ONCE(priv
.status
));
10021 static ssize_t
btrfs_encoded_read_regular(struct kiocb
*iocb
,
10022 struct iov_iter
*iter
,
10023 u64 start
, u64 lockend
,
10024 struct extent_state
**cached_state
,
10025 u64 disk_bytenr
, u64 disk_io_size
,
10026 size_t count
, bool compressed
,
10029 struct btrfs_inode
*inode
= BTRFS_I(file_inode(iocb
->ki_filp
));
10030 struct extent_io_tree
*io_tree
= &inode
->io_tree
;
10031 struct page
**pages
;
10032 unsigned long nr_pages
, i
;
10034 size_t page_offset
;
10037 nr_pages
= DIV_ROUND_UP(disk_io_size
, PAGE_SIZE
);
10038 pages
= kcalloc(nr_pages
, sizeof(struct page
*), GFP_NOFS
);
10041 ret
= btrfs_alloc_page_array(nr_pages
, pages
, 0);
10047 ret
= btrfs_encoded_read_regular_fill_pages(inode
, start
, disk_bytenr
,
10048 disk_io_size
, pages
);
10052 unlock_extent(io_tree
, start
, lockend
, cached_state
);
10053 btrfs_inode_unlock(inode
, BTRFS_ILOCK_SHARED
);
10060 i
= (iocb
->ki_pos
- start
) >> PAGE_SHIFT
;
10061 page_offset
= (iocb
->ki_pos
- start
) & (PAGE_SIZE
- 1);
10064 while (cur
< count
) {
10065 size_t bytes
= min_t(size_t, count
- cur
,
10066 PAGE_SIZE
- page_offset
);
10068 if (copy_page_to_iter(pages
[i
], page_offset
, bytes
,
10079 for (i
= 0; i
< nr_pages
; i
++) {
10081 __free_page(pages
[i
]);
10087 ssize_t
btrfs_encoded_read(struct kiocb
*iocb
, struct iov_iter
*iter
,
10088 struct btrfs_ioctl_encoded_io_args
*encoded
)
10090 struct btrfs_inode
*inode
= BTRFS_I(file_inode(iocb
->ki_filp
));
10091 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
10092 struct extent_io_tree
*io_tree
= &inode
->io_tree
;
10094 size_t count
= iov_iter_count(iter
);
10095 u64 start
, lockend
, disk_bytenr
, disk_io_size
;
10096 struct extent_state
*cached_state
= NULL
;
10097 struct extent_map
*em
;
10098 bool unlocked
= false;
10100 file_accessed(iocb
->ki_filp
);
10102 btrfs_inode_lock(inode
, BTRFS_ILOCK_SHARED
);
10104 if (iocb
->ki_pos
>= inode
->vfs_inode
.i_size
) {
10105 btrfs_inode_unlock(inode
, BTRFS_ILOCK_SHARED
);
10108 start
= ALIGN_DOWN(iocb
->ki_pos
, fs_info
->sectorsize
);
10110 * We don't know how long the extent containing iocb->ki_pos is, but if
10111 * it's compressed we know that it won't be longer than this.
10113 lockend
= start
+ BTRFS_MAX_UNCOMPRESSED
- 1;
10116 struct btrfs_ordered_extent
*ordered
;
10118 ret
= btrfs_wait_ordered_range(&inode
->vfs_inode
, start
,
10119 lockend
- start
+ 1);
10121 goto out_unlock_inode
;
10122 lock_extent(io_tree
, start
, lockend
, &cached_state
);
10123 ordered
= btrfs_lookup_ordered_range(inode
, start
,
10124 lockend
- start
+ 1);
10127 btrfs_put_ordered_extent(ordered
);
10128 unlock_extent(io_tree
, start
, lockend
, &cached_state
);
10132 em
= btrfs_get_extent(inode
, NULL
, 0, start
, lockend
- start
+ 1);
10135 goto out_unlock_extent
;
10138 if (em
->block_start
== EXTENT_MAP_INLINE
) {
10139 u64 extent_start
= em
->start
;
10142 * For inline extents we get everything we need out of the
10145 free_extent_map(em
);
10147 ret
= btrfs_encoded_read_inline(iocb
, iter
, start
, lockend
,
10148 &cached_state
, extent_start
,
10149 count
, encoded
, &unlocked
);
10154 * We only want to return up to EOF even if the extent extends beyond
10157 encoded
->len
= min_t(u64
, extent_map_end(em
),
10158 inode
->vfs_inode
.i_size
) - iocb
->ki_pos
;
10159 if (em
->block_start
== EXTENT_MAP_HOLE
||
10160 (em
->flags
& EXTENT_FLAG_PREALLOC
)) {
10161 disk_bytenr
= EXTENT_MAP_HOLE
;
10162 count
= min_t(u64
, count
, encoded
->len
);
10163 encoded
->len
= count
;
10164 encoded
->unencoded_len
= count
;
10165 } else if (extent_map_is_compressed(em
)) {
10166 disk_bytenr
= em
->block_start
;
10168 * Bail if the buffer isn't large enough to return the whole
10169 * compressed extent.
10171 if (em
->block_len
> count
) {
10175 disk_io_size
= em
->block_len
;
10176 count
= em
->block_len
;
10177 encoded
->unencoded_len
= em
->ram_bytes
;
10178 encoded
->unencoded_offset
= iocb
->ki_pos
- em
->orig_start
;
10179 ret
= btrfs_encoded_io_compression_from_extent(fs_info
,
10180 extent_map_compression(em
));
10183 encoded
->compression
= ret
;
10185 disk_bytenr
= em
->block_start
+ (start
- em
->start
);
10186 if (encoded
->len
> count
)
10187 encoded
->len
= count
;
10189 * Don't read beyond what we locked. This also limits the page
10190 * allocations that we'll do.
10192 disk_io_size
= min(lockend
+ 1, iocb
->ki_pos
+ encoded
->len
) - start
;
10193 count
= start
+ disk_io_size
- iocb
->ki_pos
;
10194 encoded
->len
= count
;
10195 encoded
->unencoded_len
= count
;
10196 disk_io_size
= ALIGN(disk_io_size
, fs_info
->sectorsize
);
10198 free_extent_map(em
);
10201 if (disk_bytenr
== EXTENT_MAP_HOLE
) {
10202 unlock_extent(io_tree
, start
, lockend
, &cached_state
);
10203 btrfs_inode_unlock(inode
, BTRFS_ILOCK_SHARED
);
10205 ret
= iov_iter_zero(count
, iter
);
10209 ret
= btrfs_encoded_read_regular(iocb
, iter
, start
, lockend
,
10210 &cached_state
, disk_bytenr
,
10211 disk_io_size
, count
,
10212 encoded
->compression
,
10218 iocb
->ki_pos
+= encoded
->len
;
10220 free_extent_map(em
);
10223 unlock_extent(io_tree
, start
, lockend
, &cached_state
);
10226 btrfs_inode_unlock(inode
, BTRFS_ILOCK_SHARED
);
10230 ssize_t
btrfs_do_encoded_write(struct kiocb
*iocb
, struct iov_iter
*from
,
10231 const struct btrfs_ioctl_encoded_io_args
*encoded
)
10233 struct btrfs_inode
*inode
= BTRFS_I(file_inode(iocb
->ki_filp
));
10234 struct btrfs_root
*root
= inode
->root
;
10235 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
10236 struct extent_io_tree
*io_tree
= &inode
->io_tree
;
10237 struct extent_changeset
*data_reserved
= NULL
;
10238 struct extent_state
*cached_state
= NULL
;
10239 struct btrfs_ordered_extent
*ordered
;
10243 u64 num_bytes
, ram_bytes
, disk_num_bytes
;
10244 unsigned long nr_pages
, i
;
10245 struct page
**pages
;
10246 struct btrfs_key ins
;
10247 bool extent_reserved
= false;
10248 struct extent_map
*em
;
10251 switch (encoded
->compression
) {
10252 case BTRFS_ENCODED_IO_COMPRESSION_ZLIB
:
10253 compression
= BTRFS_COMPRESS_ZLIB
;
10255 case BTRFS_ENCODED_IO_COMPRESSION_ZSTD
:
10256 compression
= BTRFS_COMPRESS_ZSTD
;
10258 case BTRFS_ENCODED_IO_COMPRESSION_LZO_4K
:
10259 case BTRFS_ENCODED_IO_COMPRESSION_LZO_8K
:
10260 case BTRFS_ENCODED_IO_COMPRESSION_LZO_16K
:
10261 case BTRFS_ENCODED_IO_COMPRESSION_LZO_32K
:
10262 case BTRFS_ENCODED_IO_COMPRESSION_LZO_64K
:
10263 /* The sector size must match for LZO. */
10264 if (encoded
->compression
-
10265 BTRFS_ENCODED_IO_COMPRESSION_LZO_4K
+ 12 !=
10266 fs_info
->sectorsize_bits
)
10268 compression
= BTRFS_COMPRESS_LZO
;
10273 if (encoded
->encryption
!= BTRFS_ENCODED_IO_ENCRYPTION_NONE
)
10276 orig_count
= iov_iter_count(from
);
10278 /* The extent size must be sane. */
10279 if (encoded
->unencoded_len
> BTRFS_MAX_UNCOMPRESSED
||
10280 orig_count
> BTRFS_MAX_COMPRESSED
|| orig_count
== 0)
10284 * The compressed data must be smaller than the decompressed data.
10286 * It's of course possible for data to compress to larger or the same
10287 * size, but the buffered I/O path falls back to no compression for such
10288 * data, and we don't want to break any assumptions by creating these
10291 * Note that this is less strict than the current check we have that the
10292 * compressed data must be at least one sector smaller than the
10293 * decompressed data. We only want to enforce the weaker requirement
10294 * from old kernels that it is at least one byte smaller.
10296 if (orig_count
>= encoded
->unencoded_len
)
10299 /* The extent must start on a sector boundary. */
10300 start
= iocb
->ki_pos
;
10301 if (!IS_ALIGNED(start
, fs_info
->sectorsize
))
10305 * The extent must end on a sector boundary. However, we allow a write
10306 * which ends at or extends i_size to have an unaligned length; we round
10307 * up the extent size and set i_size to the unaligned end.
10309 if (start
+ encoded
->len
< inode
->vfs_inode
.i_size
&&
10310 !IS_ALIGNED(start
+ encoded
->len
, fs_info
->sectorsize
))
10313 /* Finally, the offset in the unencoded data must be sector-aligned. */
10314 if (!IS_ALIGNED(encoded
->unencoded_offset
, fs_info
->sectorsize
))
10317 num_bytes
= ALIGN(encoded
->len
, fs_info
->sectorsize
);
10318 ram_bytes
= ALIGN(encoded
->unencoded_len
, fs_info
->sectorsize
);
10319 end
= start
+ num_bytes
- 1;
10322 * If the extent cannot be inline, the compressed data on disk must be
10323 * sector-aligned. For convenience, we extend it with zeroes if it
10326 disk_num_bytes
= ALIGN(orig_count
, fs_info
->sectorsize
);
10327 nr_pages
= DIV_ROUND_UP(disk_num_bytes
, PAGE_SIZE
);
10328 pages
= kvcalloc(nr_pages
, sizeof(struct page
*), GFP_KERNEL_ACCOUNT
);
10331 for (i
= 0; i
< nr_pages
; i
++) {
10332 size_t bytes
= min_t(size_t, PAGE_SIZE
, iov_iter_count(from
));
10335 pages
[i
] = alloc_page(GFP_KERNEL_ACCOUNT
);
10340 kaddr
= kmap_local_page(pages
[i
]);
10341 if (copy_from_iter(kaddr
, bytes
, from
) != bytes
) {
10342 kunmap_local(kaddr
);
10346 if (bytes
< PAGE_SIZE
)
10347 memset(kaddr
+ bytes
, 0, PAGE_SIZE
- bytes
);
10348 kunmap_local(kaddr
);
10352 struct btrfs_ordered_extent
*ordered
;
10354 ret
= btrfs_wait_ordered_range(&inode
->vfs_inode
, start
, num_bytes
);
10357 ret
= invalidate_inode_pages2_range(inode
->vfs_inode
.i_mapping
,
10358 start
>> PAGE_SHIFT
,
10359 end
>> PAGE_SHIFT
);
10362 lock_extent(io_tree
, start
, end
, &cached_state
);
10363 ordered
= btrfs_lookup_ordered_range(inode
, start
, num_bytes
);
10365 !filemap_range_has_page(inode
->vfs_inode
.i_mapping
, start
, end
))
10368 btrfs_put_ordered_extent(ordered
);
10369 unlock_extent(io_tree
, start
, end
, &cached_state
);
10374 * We don't use the higher-level delalloc space functions because our
10375 * num_bytes and disk_num_bytes are different.
10377 ret
= btrfs_alloc_data_chunk_ondemand(inode
, disk_num_bytes
);
10380 ret
= btrfs_qgroup_reserve_data(inode
, &data_reserved
, start
, num_bytes
);
10382 goto out_free_data_space
;
10383 ret
= btrfs_delalloc_reserve_metadata(inode
, num_bytes
, disk_num_bytes
,
10386 goto out_qgroup_free_data
;
10388 /* Try an inline extent first. */
10389 if (start
== 0 && encoded
->unencoded_len
== encoded
->len
&&
10390 encoded
->unencoded_offset
== 0) {
10391 ret
= cow_file_range_inline(inode
, encoded
->len
, orig_count
,
10392 compression
, pages
, true);
10396 goto out_delalloc_release
;
10400 ret
= btrfs_reserve_extent(root
, disk_num_bytes
, disk_num_bytes
,
10401 disk_num_bytes
, 0, 0, &ins
, 1, 1);
10403 goto out_delalloc_release
;
10404 extent_reserved
= true;
10406 em
= create_io_em(inode
, start
, num_bytes
,
10407 start
- encoded
->unencoded_offset
, ins
.objectid
,
10408 ins
.offset
, ins
.offset
, ram_bytes
, compression
,
10409 BTRFS_ORDERED_COMPRESSED
);
10412 goto out_free_reserved
;
10414 free_extent_map(em
);
10416 ordered
= btrfs_alloc_ordered_extent(inode
, start
, num_bytes
, ram_bytes
,
10417 ins
.objectid
, ins
.offset
,
10418 encoded
->unencoded_offset
,
10419 (1 << BTRFS_ORDERED_ENCODED
) |
10420 (1 << BTRFS_ORDERED_COMPRESSED
),
10422 if (IS_ERR(ordered
)) {
10423 btrfs_drop_extent_map_range(inode
, start
, end
, false);
10424 ret
= PTR_ERR(ordered
);
10425 goto out_free_reserved
;
10427 btrfs_dec_block_group_reservations(fs_info
, ins
.objectid
);
10429 if (start
+ encoded
->len
> inode
->vfs_inode
.i_size
)
10430 i_size_write(&inode
->vfs_inode
, start
+ encoded
->len
);
10432 unlock_extent(io_tree
, start
, end
, &cached_state
);
10434 btrfs_delalloc_release_extents(inode
, num_bytes
);
10436 btrfs_submit_compressed_write(ordered
, pages
, nr_pages
, 0, false);
10441 btrfs_dec_block_group_reservations(fs_info
, ins
.objectid
);
10442 btrfs_free_reserved_extent(fs_info
, ins
.objectid
, ins
.offset
, 1);
10443 out_delalloc_release
:
10444 btrfs_delalloc_release_extents(inode
, num_bytes
);
10445 btrfs_delalloc_release_metadata(inode
, disk_num_bytes
, ret
< 0);
10446 out_qgroup_free_data
:
10448 btrfs_qgroup_free_data(inode
, data_reserved
, start
, num_bytes
, NULL
);
10449 out_free_data_space
:
10451 * If btrfs_reserve_extent() succeeded, then we already decremented
10454 if (!extent_reserved
)
10455 btrfs_free_reserved_data_space_noquota(fs_info
, disk_num_bytes
);
10457 unlock_extent(io_tree
, start
, end
, &cached_state
);
10459 for (i
= 0; i
< nr_pages
; i
++) {
10461 __free_page(pages
[i
]);
10466 iocb
->ki_pos
+= encoded
->len
;
10472 * Add an entry indicating a block group or device which is pinned by a
10473 * swapfile. Returns 0 on success, 1 if there is already an entry for it, or a
10474 * negative errno on failure.
10476 static int btrfs_add_swapfile_pin(struct inode
*inode
, void *ptr
,
10477 bool is_block_group
)
10479 struct btrfs_fs_info
*fs_info
= BTRFS_I(inode
)->root
->fs_info
;
10480 struct btrfs_swapfile_pin
*sp
, *entry
;
10481 struct rb_node
**p
;
10482 struct rb_node
*parent
= NULL
;
10484 sp
= kmalloc(sizeof(*sp
), GFP_NOFS
);
10489 sp
->is_block_group
= is_block_group
;
10490 sp
->bg_extent_count
= 1;
10492 spin_lock(&fs_info
->swapfile_pins_lock
);
10493 p
= &fs_info
->swapfile_pins
.rb_node
;
10496 entry
= rb_entry(parent
, struct btrfs_swapfile_pin
, node
);
10497 if (sp
->ptr
< entry
->ptr
||
10498 (sp
->ptr
== entry
->ptr
&& sp
->inode
< entry
->inode
)) {
10499 p
= &(*p
)->rb_left
;
10500 } else if (sp
->ptr
> entry
->ptr
||
10501 (sp
->ptr
== entry
->ptr
&& sp
->inode
> entry
->inode
)) {
10502 p
= &(*p
)->rb_right
;
10504 if (is_block_group
)
10505 entry
->bg_extent_count
++;
10506 spin_unlock(&fs_info
->swapfile_pins_lock
);
10511 rb_link_node(&sp
->node
, parent
, p
);
10512 rb_insert_color(&sp
->node
, &fs_info
->swapfile_pins
);
10513 spin_unlock(&fs_info
->swapfile_pins_lock
);
10517 /* Free all of the entries pinned by this swapfile. */
10518 static void btrfs_free_swapfile_pins(struct inode
*inode
)
10520 struct btrfs_fs_info
*fs_info
= BTRFS_I(inode
)->root
->fs_info
;
10521 struct btrfs_swapfile_pin
*sp
;
10522 struct rb_node
*node
, *next
;
10524 spin_lock(&fs_info
->swapfile_pins_lock
);
10525 node
= rb_first(&fs_info
->swapfile_pins
);
10527 next
= rb_next(node
);
10528 sp
= rb_entry(node
, struct btrfs_swapfile_pin
, node
);
10529 if (sp
->inode
== inode
) {
10530 rb_erase(&sp
->node
, &fs_info
->swapfile_pins
);
10531 if (sp
->is_block_group
) {
10532 btrfs_dec_block_group_swap_extents(sp
->ptr
,
10533 sp
->bg_extent_count
);
10534 btrfs_put_block_group(sp
->ptr
);
10540 spin_unlock(&fs_info
->swapfile_pins_lock
);
10543 struct btrfs_swap_info
{
10549 unsigned long nr_pages
;
10553 static int btrfs_add_swap_extent(struct swap_info_struct
*sis
,
10554 struct btrfs_swap_info
*bsi
)
10556 unsigned long nr_pages
;
10557 unsigned long max_pages
;
10558 u64 first_ppage
, first_ppage_reported
, next_ppage
;
10562 * Our swapfile may have had its size extended after the swap header was
10563 * written. In that case activating the swapfile should not go beyond
10564 * the max size set in the swap header.
10566 if (bsi
->nr_pages
>= sis
->max
)
10569 max_pages
= sis
->max
- bsi
->nr_pages
;
10570 first_ppage
= PAGE_ALIGN(bsi
->block_start
) >> PAGE_SHIFT
;
10571 next_ppage
= PAGE_ALIGN_DOWN(bsi
->block_start
+ bsi
->block_len
) >> PAGE_SHIFT
;
10573 if (first_ppage
>= next_ppage
)
10575 nr_pages
= next_ppage
- first_ppage
;
10576 nr_pages
= min(nr_pages
, max_pages
);
10578 first_ppage_reported
= first_ppage
;
10579 if (bsi
->start
== 0)
10580 first_ppage_reported
++;
10581 if (bsi
->lowest_ppage
> first_ppage_reported
)
10582 bsi
->lowest_ppage
= first_ppage_reported
;
10583 if (bsi
->highest_ppage
< (next_ppage
- 1))
10584 bsi
->highest_ppage
= next_ppage
- 1;
10586 ret
= add_swap_extent(sis
, bsi
->nr_pages
, nr_pages
, first_ppage
);
10589 bsi
->nr_extents
+= ret
;
10590 bsi
->nr_pages
+= nr_pages
;
10594 static void btrfs_swap_deactivate(struct file
*file
)
10596 struct inode
*inode
= file_inode(file
);
10598 btrfs_free_swapfile_pins(inode
);
10599 atomic_dec(&BTRFS_I(inode
)->root
->nr_swapfiles
);
10602 static int btrfs_swap_activate(struct swap_info_struct
*sis
, struct file
*file
,
10605 struct inode
*inode
= file_inode(file
);
10606 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
10607 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
10608 struct extent_io_tree
*io_tree
= &BTRFS_I(inode
)->io_tree
;
10609 struct extent_state
*cached_state
= NULL
;
10610 struct extent_map
*em
= NULL
;
10611 struct btrfs_chunk_map
*map
= NULL
;
10612 struct btrfs_device
*device
= NULL
;
10613 struct btrfs_swap_info bsi
= {
10614 .lowest_ppage
= (sector_t
)-1ULL,
10621 * If the swap file was just created, make sure delalloc is done. If the
10622 * file changes again after this, the user is doing something stupid and
10623 * we don't really care.
10625 ret
= btrfs_wait_ordered_range(inode
, 0, (u64
)-1);
10630 * The inode is locked, so these flags won't change after we check them.
10632 if (BTRFS_I(inode
)->flags
& BTRFS_INODE_COMPRESS
) {
10633 btrfs_warn(fs_info
, "swapfile must not be compressed");
10636 if (!(BTRFS_I(inode
)->flags
& BTRFS_INODE_NODATACOW
)) {
10637 btrfs_warn(fs_info
, "swapfile must not be copy-on-write");
10640 if (!(BTRFS_I(inode
)->flags
& BTRFS_INODE_NODATASUM
)) {
10641 btrfs_warn(fs_info
, "swapfile must not be checksummed");
10646 * Balance or device remove/replace/resize can move stuff around from
10647 * under us. The exclop protection makes sure they aren't running/won't
10648 * run concurrently while we are mapping the swap extents, and
10649 * fs_info->swapfile_pins prevents them from running while the swap
10650 * file is active and moving the extents. Note that this also prevents
10651 * a concurrent device add which isn't actually necessary, but it's not
10652 * really worth the trouble to allow it.
10654 if (!btrfs_exclop_start(fs_info
, BTRFS_EXCLOP_SWAP_ACTIVATE
)) {
10655 btrfs_warn(fs_info
,
10656 "cannot activate swapfile while exclusive operation is running");
10661 * Prevent snapshot creation while we are activating the swap file.
10662 * We do not want to race with snapshot creation. If snapshot creation
10663 * already started before we bumped nr_swapfiles from 0 to 1 and
10664 * completes before the first write into the swap file after it is
10665 * activated, than that write would fallback to COW.
10667 if (!btrfs_drew_try_write_lock(&root
->snapshot_lock
)) {
10668 btrfs_exclop_finish(fs_info
);
10669 btrfs_warn(fs_info
,
10670 "cannot activate swapfile because snapshot creation is in progress");
10674 * Snapshots can create extents which require COW even if NODATACOW is
10675 * set. We use this counter to prevent snapshots. We must increment it
10676 * before walking the extents because we don't want a concurrent
10677 * snapshot to run after we've already checked the extents.
10679 * It is possible that subvolume is marked for deletion but still not
10680 * removed yet. To prevent this race, we check the root status before
10681 * activating the swapfile.
10683 spin_lock(&root
->root_item_lock
);
10684 if (btrfs_root_dead(root
)) {
10685 spin_unlock(&root
->root_item_lock
);
10687 btrfs_exclop_finish(fs_info
);
10688 btrfs_warn(fs_info
,
10689 "cannot activate swapfile because subvolume %llu is being deleted",
10690 root
->root_key
.objectid
);
10693 atomic_inc(&root
->nr_swapfiles
);
10694 spin_unlock(&root
->root_item_lock
);
10696 isize
= ALIGN_DOWN(inode
->i_size
, fs_info
->sectorsize
);
10698 lock_extent(io_tree
, 0, isize
- 1, &cached_state
);
10700 while (start
< isize
) {
10701 u64 logical_block_start
, physical_block_start
;
10702 struct btrfs_block_group
*bg
;
10703 u64 len
= isize
- start
;
10705 em
= btrfs_get_extent(BTRFS_I(inode
), NULL
, 0, start
, len
);
10711 if (em
->block_start
== EXTENT_MAP_HOLE
) {
10712 btrfs_warn(fs_info
, "swapfile must not have holes");
10716 if (em
->block_start
== EXTENT_MAP_INLINE
) {
10718 * It's unlikely we'll ever actually find ourselves
10719 * here, as a file small enough to fit inline won't be
10720 * big enough to store more than the swap header, but in
10721 * case something changes in the future, let's catch it
10722 * here rather than later.
10724 btrfs_warn(fs_info
, "swapfile must not be inline");
10728 if (extent_map_is_compressed(em
)) {
10729 btrfs_warn(fs_info
, "swapfile must not be compressed");
10734 logical_block_start
= em
->block_start
+ (start
- em
->start
);
10735 len
= min(len
, em
->len
- (start
- em
->start
));
10736 free_extent_map(em
);
10739 ret
= can_nocow_extent(inode
, start
, &len
, NULL
, NULL
, NULL
, false, true);
10745 btrfs_warn(fs_info
,
10746 "swapfile must not be copy-on-write");
10751 map
= btrfs_get_chunk_map(fs_info
, logical_block_start
, len
);
10753 ret
= PTR_ERR(map
);
10757 if (map
->type
& BTRFS_BLOCK_GROUP_PROFILE_MASK
) {
10758 btrfs_warn(fs_info
,
10759 "swapfile must have single data profile");
10764 if (device
== NULL
) {
10765 device
= map
->stripes
[0].dev
;
10766 ret
= btrfs_add_swapfile_pin(inode
, device
, false);
10771 } else if (device
!= map
->stripes
[0].dev
) {
10772 btrfs_warn(fs_info
, "swapfile must be on one device");
10777 physical_block_start
= (map
->stripes
[0].physical
+
10778 (logical_block_start
- map
->start
));
10779 len
= min(len
, map
->chunk_len
- (logical_block_start
- map
->start
));
10780 btrfs_free_chunk_map(map
);
10783 bg
= btrfs_lookup_block_group(fs_info
, logical_block_start
);
10785 btrfs_warn(fs_info
,
10786 "could not find block group containing swapfile");
10791 if (!btrfs_inc_block_group_swap_extents(bg
)) {
10792 btrfs_warn(fs_info
,
10793 "block group for swapfile at %llu is read-only%s",
10795 atomic_read(&fs_info
->scrubs_running
) ?
10796 " (scrub running)" : "");
10797 btrfs_put_block_group(bg
);
10802 ret
= btrfs_add_swapfile_pin(inode
, bg
, true);
10804 btrfs_put_block_group(bg
);
10811 if (bsi
.block_len
&&
10812 bsi
.block_start
+ bsi
.block_len
== physical_block_start
) {
10813 bsi
.block_len
+= len
;
10815 if (bsi
.block_len
) {
10816 ret
= btrfs_add_swap_extent(sis
, &bsi
);
10821 bsi
.block_start
= physical_block_start
;
10822 bsi
.block_len
= len
;
10829 ret
= btrfs_add_swap_extent(sis
, &bsi
);
10832 if (!IS_ERR_OR_NULL(em
))
10833 free_extent_map(em
);
10834 if (!IS_ERR_OR_NULL(map
))
10835 btrfs_free_chunk_map(map
);
10837 unlock_extent(io_tree
, 0, isize
- 1, &cached_state
);
10840 btrfs_swap_deactivate(file
);
10842 btrfs_drew_write_unlock(&root
->snapshot_lock
);
10844 btrfs_exclop_finish(fs_info
);
10850 sis
->bdev
= device
->bdev
;
10851 *span
= bsi
.highest_ppage
- bsi
.lowest_ppage
+ 1;
10852 sis
->max
= bsi
.nr_pages
;
10853 sis
->pages
= bsi
.nr_pages
- 1;
10854 sis
->highest_bit
= bsi
.nr_pages
- 1;
10855 return bsi
.nr_extents
;
10858 static void btrfs_swap_deactivate(struct file
*file
)
10862 static int btrfs_swap_activate(struct swap_info_struct
*sis
, struct file
*file
,
10865 return -EOPNOTSUPP
;
10870 * Update the number of bytes used in the VFS' inode. When we replace extents in
10871 * a range (clone, dedupe, fallocate's zero range), we must update the number of
10872 * bytes used by the inode in an atomic manner, so that concurrent stat(2) calls
10873 * always get a correct value.
10875 void btrfs_update_inode_bytes(struct btrfs_inode
*inode
,
10876 const u64 add_bytes
,
10877 const u64 del_bytes
)
10879 if (add_bytes
== del_bytes
)
10882 spin_lock(&inode
->lock
);
10884 inode_sub_bytes(&inode
->vfs_inode
, del_bytes
);
10886 inode_add_bytes(&inode
->vfs_inode
, add_bytes
);
10887 spin_unlock(&inode
->lock
);
10891 * Verify that there are no ordered extents for a given file range.
10893 * @inode: The target inode.
10894 * @start: Start offset of the file range, should be sector size aligned.
10895 * @end: End offset (inclusive) of the file range, its value +1 should be
10896 * sector size aligned.
10898 * This should typically be used for cases where we locked an inode's VFS lock in
10899 * exclusive mode, we have also locked the inode's i_mmap_lock in exclusive mode,
10900 * we have flushed all delalloc in the range, we have waited for all ordered
10901 * extents in the range to complete and finally we have locked the file range in
10902 * the inode's io_tree.
10904 void btrfs_assert_inode_range_clean(struct btrfs_inode
*inode
, u64 start
, u64 end
)
10906 struct btrfs_root
*root
= inode
->root
;
10907 struct btrfs_ordered_extent
*ordered
;
10909 if (!IS_ENABLED(CONFIG_BTRFS_ASSERT
))
10912 ordered
= btrfs_lookup_first_ordered_range(inode
, start
, end
+ 1 - start
);
10914 btrfs_err(root
->fs_info
,
10915 "found unexpected ordered extent in file range [%llu, %llu] for inode %llu root %llu (ordered range [%llu, %llu])",
10916 start
, end
, btrfs_ino(inode
), root
->root_key
.objectid
,
10917 ordered
->file_offset
,
10918 ordered
->file_offset
+ ordered
->num_bytes
- 1);
10919 btrfs_put_ordered_extent(ordered
);
10922 ASSERT(ordered
== NULL
);
10925 static const struct inode_operations btrfs_dir_inode_operations
= {
10926 .getattr
= btrfs_getattr
,
10927 .lookup
= btrfs_lookup
,
10928 .create
= btrfs_create
,
10929 .unlink
= btrfs_unlink
,
10930 .link
= btrfs_link
,
10931 .mkdir
= btrfs_mkdir
,
10932 .rmdir
= btrfs_rmdir
,
10933 .rename
= btrfs_rename2
,
10934 .symlink
= btrfs_symlink
,
10935 .setattr
= btrfs_setattr
,
10936 .mknod
= btrfs_mknod
,
10937 .listxattr
= btrfs_listxattr
,
10938 .permission
= btrfs_permission
,
10939 .get_inode_acl
= btrfs_get_acl
,
10940 .set_acl
= btrfs_set_acl
,
10941 .update_time
= btrfs_update_time
,
10942 .tmpfile
= btrfs_tmpfile
,
10943 .fileattr_get
= btrfs_fileattr_get
,
10944 .fileattr_set
= btrfs_fileattr_set
,
10947 static const struct file_operations btrfs_dir_file_operations
= {
10948 .llseek
= btrfs_dir_llseek
,
10949 .read
= generic_read_dir
,
10950 .iterate_shared
= btrfs_real_readdir
,
10951 .open
= btrfs_opendir
,
10952 .unlocked_ioctl
= btrfs_ioctl
,
10953 #ifdef CONFIG_COMPAT
10954 .compat_ioctl
= btrfs_compat_ioctl
,
10956 .release
= btrfs_release_file
,
10957 .fsync
= btrfs_sync_file
,
10961 * btrfs doesn't support the bmap operation because swapfiles
10962 * use bmap to make a mapping of extents in the file. They assume
10963 * these extents won't change over the life of the file and they
10964 * use the bmap result to do IO directly to the drive.
10966 * the btrfs bmap call would return logical addresses that aren't
10967 * suitable for IO and they also will change frequently as COW
10968 * operations happen. So, swapfile + btrfs == corruption.
10970 * For now we're avoiding this by dropping bmap.
10972 static const struct address_space_operations btrfs_aops
= {
10973 .read_folio
= btrfs_read_folio
,
10974 .writepages
= btrfs_writepages
,
10975 .readahead
= btrfs_readahead
,
10976 .invalidate_folio
= btrfs_invalidate_folio
,
10977 .release_folio
= btrfs_release_folio
,
10978 .migrate_folio
= btrfs_migrate_folio
,
10979 .dirty_folio
= filemap_dirty_folio
,
10980 .error_remove_folio
= generic_error_remove_folio
,
10981 .swap_activate
= btrfs_swap_activate
,
10982 .swap_deactivate
= btrfs_swap_deactivate
,
10985 static const struct inode_operations btrfs_file_inode_operations
= {
10986 .getattr
= btrfs_getattr
,
10987 .setattr
= btrfs_setattr
,
10988 .listxattr
= btrfs_listxattr
,
10989 .permission
= btrfs_permission
,
10990 .fiemap
= btrfs_fiemap
,
10991 .get_inode_acl
= btrfs_get_acl
,
10992 .set_acl
= btrfs_set_acl
,
10993 .update_time
= btrfs_update_time
,
10994 .fileattr_get
= btrfs_fileattr_get
,
10995 .fileattr_set
= btrfs_fileattr_set
,
10997 static const struct inode_operations btrfs_special_inode_operations
= {
10998 .getattr
= btrfs_getattr
,
10999 .setattr
= btrfs_setattr
,
11000 .permission
= btrfs_permission
,
11001 .listxattr
= btrfs_listxattr
,
11002 .get_inode_acl
= btrfs_get_acl
,
11003 .set_acl
= btrfs_set_acl
,
11004 .update_time
= btrfs_update_time
,
11006 static const struct inode_operations btrfs_symlink_inode_operations
= {
11007 .get_link
= page_get_link
,
11008 .getattr
= btrfs_getattr
,
11009 .setattr
= btrfs_setattr
,
11010 .permission
= btrfs_permission
,
11011 .listxattr
= btrfs_listxattr
,
11012 .update_time
= btrfs_update_time
,
11015 const struct dentry_operations btrfs_dentry_operations
= {
11016 .d_delete
= btrfs_dentry_delete
,