1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2007 Oracle. All rights reserved.
6 #include <crypto/hash.h>
7 #include <linux/kernel.h>
9 #include <linux/blk-cgroup.h>
10 #include <linux/file.h>
12 #include <linux/pagemap.h>
13 #include <linux/highmem.h>
14 #include <linux/time.h>
15 #include <linux/init.h>
16 #include <linux/string.h>
17 #include <linux/backing-dev.h>
18 #include <linux/writeback.h>
19 #include <linux/compat.h>
20 #include <linux/xattr.h>
21 #include <linux/posix_acl.h>
22 #include <linux/falloc.h>
23 #include <linux/slab.h>
24 #include <linux/ratelimit.h>
25 #include <linux/btrfs.h>
26 #include <linux/blkdev.h>
27 #include <linux/posix_acl_xattr.h>
28 #include <linux/uio.h>
29 #include <linux/magic.h>
30 #include <linux/iversion.h>
31 #include <linux/swap.h>
32 #include <linux/migrate.h>
33 #include <linux/sched/mm.h>
34 #include <linux/iomap.h>
35 #include <asm/unaligned.h>
36 #include <linux/fsverity.h>
40 #include "transaction.h"
41 #include "btrfs_inode.h"
42 #include "print-tree.h"
43 #include "ordered-data.h"
47 #include "compression.h"
49 #include "free-space-cache.h"
52 #include "delalloc-space.h"
53 #include "block-group.h"
54 #include "space-info.h"
57 #include "inode-item.h"
59 #include "accessors.h"
60 #include "extent-tree.h"
61 #include "root-tree.h"
64 #include "file-item.h"
65 #include "uuid-tree.h"
69 #include "relocation.h"
75 struct btrfs_iget_args
{
77 struct btrfs_root
*root
;
80 struct btrfs_dio_data
{
82 struct extent_changeset
*data_reserved
;
83 struct btrfs_ordered_extent
*ordered
;
84 bool data_space_reserved
;
88 struct btrfs_dio_private
{
93 /* This must be last */
94 struct btrfs_bio bbio
;
97 static struct bio_set btrfs_dio_bioset
;
99 struct btrfs_rename_ctx
{
100 /* Output field. Stores the index number of the old directory entry. */
105 * Used by data_reloc_print_warning_inode() to pass needed info for filename
106 * resolution and output of error message.
108 struct data_reloc_warn
{
109 struct btrfs_path path
;
110 struct btrfs_fs_info
*fs_info
;
111 u64 extent_item_size
;
116 static const struct inode_operations btrfs_dir_inode_operations
;
117 static const struct inode_operations btrfs_symlink_inode_operations
;
118 static const struct inode_operations btrfs_special_inode_operations
;
119 static const struct inode_operations btrfs_file_inode_operations
;
120 static const struct address_space_operations btrfs_aops
;
121 static const struct file_operations btrfs_dir_file_operations
;
123 static struct kmem_cache
*btrfs_inode_cachep
;
125 static int btrfs_setsize(struct inode
*inode
, struct iattr
*attr
);
126 static int btrfs_truncate(struct btrfs_inode
*inode
, bool skip_writeback
);
128 static noinline
int run_delalloc_cow(struct btrfs_inode
*inode
,
129 struct page
*locked_page
, u64 start
,
130 u64 end
, struct writeback_control
*wbc
,
132 static struct extent_map
*create_io_em(struct btrfs_inode
*inode
, u64 start
,
133 u64 len
, u64 orig_start
, u64 block_start
,
134 u64 block_len
, u64 orig_block_len
,
135 u64 ram_bytes
, int compress_type
,
138 static int data_reloc_print_warning_inode(u64 inum
, u64 offset
, u64 num_bytes
,
139 u64 root
, void *warn_ctx
)
141 struct data_reloc_warn
*warn
= warn_ctx
;
142 struct btrfs_fs_info
*fs_info
= warn
->fs_info
;
143 struct extent_buffer
*eb
;
144 struct btrfs_inode_item
*inode_item
;
145 struct inode_fs_paths
*ipath
= NULL
;
146 struct btrfs_root
*local_root
;
147 struct btrfs_key key
;
148 unsigned int nofs_flag
;
152 local_root
= btrfs_get_fs_root(fs_info
, root
, true);
153 if (IS_ERR(local_root
)) {
154 ret
= PTR_ERR(local_root
);
158 /* This makes the path point to (inum INODE_ITEM ioff). */
160 key
.type
= BTRFS_INODE_ITEM_KEY
;
163 ret
= btrfs_search_slot(NULL
, local_root
, &key
, &warn
->path
, 0, 0);
165 btrfs_put_root(local_root
);
166 btrfs_release_path(&warn
->path
);
170 eb
= warn
->path
.nodes
[0];
171 inode_item
= btrfs_item_ptr(eb
, warn
->path
.slots
[0], struct btrfs_inode_item
);
172 nlink
= btrfs_inode_nlink(eb
, inode_item
);
173 btrfs_release_path(&warn
->path
);
175 nofs_flag
= memalloc_nofs_save();
176 ipath
= init_ipath(4096, local_root
, &warn
->path
);
177 memalloc_nofs_restore(nofs_flag
);
179 btrfs_put_root(local_root
);
180 ret
= PTR_ERR(ipath
);
183 * -ENOMEM, not a critical error, just output an generic error
187 "checksum error at logical %llu mirror %u root %llu, inode %llu offset %llu",
188 warn
->logical
, warn
->mirror_num
, root
, inum
, offset
);
191 ret
= paths_from_inode(inum
, ipath
);
196 * We deliberately ignore the bit ipath might have been too small to
197 * hold all of the paths here
199 for (int i
= 0; i
< ipath
->fspath
->elem_cnt
; i
++) {
201 "checksum error at logical %llu mirror %u root %llu inode %llu offset %llu length %u links %u (path: %s)",
202 warn
->logical
, warn
->mirror_num
, root
, inum
, offset
,
203 fs_info
->sectorsize
, nlink
,
204 (char *)(unsigned long)ipath
->fspath
->val
[i
]);
207 btrfs_put_root(local_root
);
213 "checksum error at logical %llu mirror %u root %llu inode %llu offset %llu, path resolving failed with ret=%d",
214 warn
->logical
, warn
->mirror_num
, root
, inum
, offset
, ret
);
221 * Do extra user-friendly error output (e.g. lookup all the affected files).
223 * Return true if we succeeded doing the backref lookup.
224 * Return false if such lookup failed, and has to fallback to the old error message.
226 static void print_data_reloc_error(const struct btrfs_inode
*inode
, u64 file_off
,
227 const u8
*csum
, const u8
*csum_expected
,
230 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
231 struct btrfs_path path
= { 0 };
232 struct btrfs_key found_key
= { 0 };
233 struct extent_buffer
*eb
;
234 struct btrfs_extent_item
*ei
;
235 const u32 csum_size
= fs_info
->csum_size
;
241 mutex_lock(&fs_info
->reloc_mutex
);
242 logical
= btrfs_get_reloc_bg_bytenr(fs_info
);
243 mutex_unlock(&fs_info
->reloc_mutex
);
245 if (logical
== U64_MAX
) {
246 btrfs_warn_rl(fs_info
, "has data reloc tree but no running relocation");
247 btrfs_warn_rl(fs_info
,
248 "csum failed root %lld ino %llu off %llu csum " CSUM_FMT
" expected csum " CSUM_FMT
" mirror %d",
249 inode
->root
->root_key
.objectid
, btrfs_ino(inode
), file_off
,
250 CSUM_FMT_VALUE(csum_size
, csum
),
251 CSUM_FMT_VALUE(csum_size
, csum_expected
),
257 btrfs_warn_rl(fs_info
,
258 "csum failed root %lld ino %llu off %llu logical %llu csum " CSUM_FMT
" expected csum " CSUM_FMT
" mirror %d",
259 inode
->root
->root_key
.objectid
,
260 btrfs_ino(inode
), file_off
, logical
,
261 CSUM_FMT_VALUE(csum_size
, csum
),
262 CSUM_FMT_VALUE(csum_size
, csum_expected
),
265 ret
= extent_from_logical(fs_info
, logical
, &path
, &found_key
, &flags
);
267 btrfs_err_rl(fs_info
, "failed to lookup extent item for logical %llu: %d",
272 ei
= btrfs_item_ptr(eb
, path
.slots
[0], struct btrfs_extent_item
);
273 item_size
= btrfs_item_size(eb
, path
.slots
[0]);
274 if (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
) {
275 unsigned long ptr
= 0;
280 ret
= tree_backref_for_extent(&ptr
, eb
, &found_key
, ei
,
281 item_size
, &ref_root
,
284 btrfs_warn_rl(fs_info
,
285 "failed to resolve tree backref for logical %llu: %d",
292 btrfs_warn_rl(fs_info
,
293 "csum error at logical %llu mirror %u: metadata %s (level %d) in tree %llu",
295 (ref_level
? "node" : "leaf"),
296 ref_level
, ref_root
);
298 btrfs_release_path(&path
);
300 struct btrfs_backref_walk_ctx ctx
= { 0 };
301 struct data_reloc_warn reloc_warn
= { 0 };
303 btrfs_release_path(&path
);
305 ctx
.bytenr
= found_key
.objectid
;
306 ctx
.extent_item_pos
= logical
- found_key
.objectid
;
307 ctx
.fs_info
= fs_info
;
309 reloc_warn
.logical
= logical
;
310 reloc_warn
.extent_item_size
= found_key
.offset
;
311 reloc_warn
.mirror_num
= mirror_num
;
312 reloc_warn
.fs_info
= fs_info
;
314 iterate_extent_inodes(&ctx
, true,
315 data_reloc_print_warning_inode
, &reloc_warn
);
319 static void __cold
btrfs_print_data_csum_error(struct btrfs_inode
*inode
,
320 u64 logical_start
, u8
*csum
, u8
*csum_expected
, int mirror_num
)
322 struct btrfs_root
*root
= inode
->root
;
323 const u32 csum_size
= root
->fs_info
->csum_size
;
325 /* For data reloc tree, it's better to do a backref lookup instead. */
326 if (root
->root_key
.objectid
== BTRFS_DATA_RELOC_TREE_OBJECTID
)
327 return print_data_reloc_error(inode
, logical_start
, csum
,
328 csum_expected
, mirror_num
);
330 /* Output without objectid, which is more meaningful */
331 if (root
->root_key
.objectid
>= BTRFS_LAST_FREE_OBJECTID
) {
332 btrfs_warn_rl(root
->fs_info
,
333 "csum failed root %lld ino %lld off %llu csum " CSUM_FMT
" expected csum " CSUM_FMT
" mirror %d",
334 root
->root_key
.objectid
, btrfs_ino(inode
),
336 CSUM_FMT_VALUE(csum_size
, csum
),
337 CSUM_FMT_VALUE(csum_size
, csum_expected
),
340 btrfs_warn_rl(root
->fs_info
,
341 "csum failed root %llu ino %llu off %llu csum " CSUM_FMT
" expected csum " CSUM_FMT
" mirror %d",
342 root
->root_key
.objectid
, btrfs_ino(inode
),
344 CSUM_FMT_VALUE(csum_size
, csum
),
345 CSUM_FMT_VALUE(csum_size
, csum_expected
),
351 * btrfs_inode_lock - lock inode i_rwsem based on arguments passed
353 * ilock_flags can have the following bit set:
355 * BTRFS_ILOCK_SHARED - acquire a shared lock on the inode
356 * BTRFS_ILOCK_TRY - try to acquire the lock, if fails on first attempt
358 * BTRFS_ILOCK_MMAP - acquire a write lock on the i_mmap_lock
360 int btrfs_inode_lock(struct btrfs_inode
*inode
, unsigned int ilock_flags
)
362 if (ilock_flags
& BTRFS_ILOCK_SHARED
) {
363 if (ilock_flags
& BTRFS_ILOCK_TRY
) {
364 if (!inode_trylock_shared(&inode
->vfs_inode
))
369 inode_lock_shared(&inode
->vfs_inode
);
371 if (ilock_flags
& BTRFS_ILOCK_TRY
) {
372 if (!inode_trylock(&inode
->vfs_inode
))
377 inode_lock(&inode
->vfs_inode
);
379 if (ilock_flags
& BTRFS_ILOCK_MMAP
)
380 down_write(&inode
->i_mmap_lock
);
385 * btrfs_inode_unlock - unock inode i_rwsem
387 * ilock_flags should contain the same bits set as passed to btrfs_inode_lock()
388 * to decide whether the lock acquired is shared or exclusive.
390 void btrfs_inode_unlock(struct btrfs_inode
*inode
, unsigned int ilock_flags
)
392 if (ilock_flags
& BTRFS_ILOCK_MMAP
)
393 up_write(&inode
->i_mmap_lock
);
394 if (ilock_flags
& BTRFS_ILOCK_SHARED
)
395 inode_unlock_shared(&inode
->vfs_inode
);
397 inode_unlock(&inode
->vfs_inode
);
401 * Cleanup all submitted ordered extents in specified range to handle errors
402 * from the btrfs_run_delalloc_range() callback.
404 * NOTE: caller must ensure that when an error happens, it can not call
405 * extent_clear_unlock_delalloc() to clear both the bits EXTENT_DO_ACCOUNTING
406 * and EXTENT_DELALLOC simultaneously, because that causes the reserved metadata
407 * to be released, which we want to happen only when finishing the ordered
408 * extent (btrfs_finish_ordered_io()).
410 static inline void btrfs_cleanup_ordered_extents(struct btrfs_inode
*inode
,
411 struct page
*locked_page
,
412 u64 offset
, u64 bytes
)
414 unsigned long index
= offset
>> PAGE_SHIFT
;
415 unsigned long end_index
= (offset
+ bytes
- 1) >> PAGE_SHIFT
;
416 u64 page_start
= 0, page_end
= 0;
420 page_start
= page_offset(locked_page
);
421 page_end
= page_start
+ PAGE_SIZE
- 1;
424 while (index
<= end_index
) {
426 * For locked page, we will call btrfs_mark_ordered_io_finished
427 * through btrfs_mark_ordered_io_finished() on it
428 * in run_delalloc_range() for the error handling, which will
429 * clear page Ordered and run the ordered extent accounting.
431 * Here we can't just clear the Ordered bit, or
432 * btrfs_mark_ordered_io_finished() would skip the accounting
433 * for the page range, and the ordered extent will never finish.
435 if (locked_page
&& index
== (page_start
>> PAGE_SHIFT
)) {
439 page
= find_get_page(inode
->vfs_inode
.i_mapping
, index
);
445 * Here we just clear all Ordered bits for every page in the
446 * range, then btrfs_mark_ordered_io_finished() will handle
447 * the ordered extent accounting for the range.
449 btrfs_page_clamp_clear_ordered(inode
->root
->fs_info
, page
,
455 /* The locked page covers the full range, nothing needs to be done */
456 if (bytes
+ offset
<= page_start
+ PAGE_SIZE
)
459 * In case this page belongs to the delalloc range being
460 * instantiated then skip it, since the first page of a range is
461 * going to be properly cleaned up by the caller of
464 if (page_start
>= offset
&& page_end
<= (offset
+ bytes
- 1)) {
465 bytes
= offset
+ bytes
- page_offset(locked_page
) - PAGE_SIZE
;
466 offset
= page_offset(locked_page
) + PAGE_SIZE
;
470 return btrfs_mark_ordered_io_finished(inode
, NULL
, offset
, bytes
, false);
473 static int btrfs_dirty_inode(struct btrfs_inode
*inode
);
475 static int btrfs_init_inode_security(struct btrfs_trans_handle
*trans
,
476 struct btrfs_new_inode_args
*args
)
480 if (args
->default_acl
) {
481 err
= __btrfs_set_acl(trans
, args
->inode
, args
->default_acl
,
487 err
= __btrfs_set_acl(trans
, args
->inode
, args
->acl
, ACL_TYPE_ACCESS
);
491 if (!args
->default_acl
&& !args
->acl
)
492 cache_no_acl(args
->inode
);
493 return btrfs_xattr_security_init(trans
, args
->inode
, args
->dir
,
494 &args
->dentry
->d_name
);
498 * this does all the hard work for inserting an inline extent into
499 * the btree. The caller should have done a btrfs_drop_extents so that
500 * no overlapping inline items exist in the btree
502 static int insert_inline_extent(struct btrfs_trans_handle
*trans
,
503 struct btrfs_path
*path
,
504 struct btrfs_inode
*inode
, bool extent_inserted
,
505 size_t size
, size_t compressed_size
,
507 struct page
**compressed_pages
,
510 struct btrfs_root
*root
= inode
->root
;
511 struct extent_buffer
*leaf
;
512 struct page
*page
= NULL
;
515 struct btrfs_file_extent_item
*ei
;
517 size_t cur_size
= size
;
520 ASSERT((compressed_size
> 0 && compressed_pages
) ||
521 (compressed_size
== 0 && !compressed_pages
));
523 if (compressed_size
&& compressed_pages
)
524 cur_size
= compressed_size
;
526 if (!extent_inserted
) {
527 struct btrfs_key key
;
530 key
.objectid
= btrfs_ino(inode
);
532 key
.type
= BTRFS_EXTENT_DATA_KEY
;
534 datasize
= btrfs_file_extent_calc_inline_size(cur_size
);
535 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
540 leaf
= path
->nodes
[0];
541 ei
= btrfs_item_ptr(leaf
, path
->slots
[0],
542 struct btrfs_file_extent_item
);
543 btrfs_set_file_extent_generation(leaf
, ei
, trans
->transid
);
544 btrfs_set_file_extent_type(leaf
, ei
, BTRFS_FILE_EXTENT_INLINE
);
545 btrfs_set_file_extent_encryption(leaf
, ei
, 0);
546 btrfs_set_file_extent_other_encoding(leaf
, ei
, 0);
547 btrfs_set_file_extent_ram_bytes(leaf
, ei
, size
);
548 ptr
= btrfs_file_extent_inline_start(ei
);
550 if (compress_type
!= BTRFS_COMPRESS_NONE
) {
553 while (compressed_size
> 0) {
554 cpage
= compressed_pages
[i
];
555 cur_size
= min_t(unsigned long, compressed_size
,
558 kaddr
= kmap_local_page(cpage
);
559 write_extent_buffer(leaf
, kaddr
, ptr
, cur_size
);
564 compressed_size
-= cur_size
;
566 btrfs_set_file_extent_compression(leaf
, ei
,
569 page
= find_get_page(inode
->vfs_inode
.i_mapping
, 0);
570 btrfs_set_file_extent_compression(leaf
, ei
, 0);
571 kaddr
= kmap_local_page(page
);
572 write_extent_buffer(leaf
, kaddr
, ptr
, size
);
576 btrfs_mark_buffer_dirty(leaf
);
577 btrfs_release_path(path
);
580 * We align size to sectorsize for inline extents just for simplicity
583 ret
= btrfs_inode_set_file_extent_range(inode
, 0,
584 ALIGN(size
, root
->fs_info
->sectorsize
));
589 * We're an inline extent, so nobody can extend the file past i_size
590 * without locking a page we already have locked.
592 * We must do any i_size and inode updates before we unlock the pages.
593 * Otherwise we could end up racing with unlink.
595 i_size
= i_size_read(&inode
->vfs_inode
);
596 if (update_i_size
&& size
> i_size
) {
597 i_size_write(&inode
->vfs_inode
, size
);
600 inode
->disk_i_size
= i_size
;
608 * conditionally insert an inline extent into the file. This
609 * does the checks required to make sure the data is small enough
610 * to fit as an inline extent.
612 static noinline
int cow_file_range_inline(struct btrfs_inode
*inode
, u64 size
,
613 size_t compressed_size
,
615 struct page
**compressed_pages
,
618 struct btrfs_drop_extents_args drop_args
= { 0 };
619 struct btrfs_root
*root
= inode
->root
;
620 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
621 struct btrfs_trans_handle
*trans
;
622 u64 data_len
= (compressed_size
?: size
);
624 struct btrfs_path
*path
;
627 * We can create an inline extent if it ends at or beyond the current
628 * i_size, is no larger than a sector (decompressed), and the (possibly
629 * compressed) data fits in a leaf and the configured maximum inline
632 if (size
< i_size_read(&inode
->vfs_inode
) ||
633 size
> fs_info
->sectorsize
||
634 data_len
> BTRFS_MAX_INLINE_DATA_SIZE(fs_info
) ||
635 data_len
> fs_info
->max_inline
)
638 path
= btrfs_alloc_path();
642 trans
= btrfs_join_transaction(root
);
644 btrfs_free_path(path
);
645 return PTR_ERR(trans
);
647 trans
->block_rsv
= &inode
->block_rsv
;
649 drop_args
.path
= path
;
651 drop_args
.end
= fs_info
->sectorsize
;
652 drop_args
.drop_cache
= true;
653 drop_args
.replace_extent
= true;
654 drop_args
.extent_item_size
= btrfs_file_extent_calc_inline_size(data_len
);
655 ret
= btrfs_drop_extents(trans
, root
, inode
, &drop_args
);
657 btrfs_abort_transaction(trans
, ret
);
661 ret
= insert_inline_extent(trans
, path
, inode
, drop_args
.extent_inserted
,
662 size
, compressed_size
, compress_type
,
663 compressed_pages
, update_i_size
);
664 if (ret
&& ret
!= -ENOSPC
) {
665 btrfs_abort_transaction(trans
, ret
);
667 } else if (ret
== -ENOSPC
) {
672 btrfs_update_inode_bytes(inode
, size
, drop_args
.bytes_found
);
673 ret
= btrfs_update_inode(trans
, root
, inode
);
674 if (ret
&& ret
!= -ENOSPC
) {
675 btrfs_abort_transaction(trans
, ret
);
677 } else if (ret
== -ENOSPC
) {
682 btrfs_set_inode_full_sync(inode
);
685 * Don't forget to free the reserved space, as for inlined extent
686 * it won't count as data extent, free them directly here.
687 * And at reserve time, it's always aligned to page size, so
688 * just free one page here.
690 btrfs_qgroup_free_data(inode
, NULL
, 0, PAGE_SIZE
);
691 btrfs_free_path(path
);
692 btrfs_end_transaction(trans
);
696 struct async_extent
{
701 unsigned long nr_pages
;
703 struct list_head list
;
707 struct btrfs_inode
*inode
;
708 struct page
*locked_page
;
711 blk_opf_t write_flags
;
712 struct list_head extents
;
713 struct cgroup_subsys_state
*blkcg_css
;
714 struct btrfs_work work
;
715 struct async_cow
*async_cow
;
720 struct async_chunk chunks
[];
723 static noinline
int add_async_extent(struct async_chunk
*cow
,
724 u64 start
, u64 ram_size
,
727 unsigned long nr_pages
,
730 struct async_extent
*async_extent
;
732 async_extent
= kmalloc(sizeof(*async_extent
), GFP_NOFS
);
733 BUG_ON(!async_extent
); /* -ENOMEM */
734 async_extent
->start
= start
;
735 async_extent
->ram_size
= ram_size
;
736 async_extent
->compressed_size
= compressed_size
;
737 async_extent
->pages
= pages
;
738 async_extent
->nr_pages
= nr_pages
;
739 async_extent
->compress_type
= compress_type
;
740 list_add_tail(&async_extent
->list
, &cow
->extents
);
745 * Check if the inode needs to be submitted to compression, based on mount
746 * options, defragmentation, properties or heuristics.
748 static inline int inode_need_compress(struct btrfs_inode
*inode
, u64 start
,
751 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
753 if (!btrfs_inode_can_compress(inode
)) {
754 WARN(IS_ENABLED(CONFIG_BTRFS_DEBUG
),
755 KERN_ERR
"BTRFS: unexpected compression for ino %llu\n",
760 * Special check for subpage.
762 * We lock the full page then run each delalloc range in the page, thus
763 * for the following case, we will hit some subpage specific corner case:
766 * | |///////| |///////|
769 * In above case, both range A and range B will try to unlock the full
770 * page [0, 64K), causing the one finished later will have page
771 * unlocked already, triggering various page lock requirement BUG_ON()s.
773 * So here we add an artificial limit that subpage compression can only
774 * if the range is fully page aligned.
776 * In theory we only need to ensure the first page is fully covered, but
777 * the tailing partial page will be locked until the full compression
778 * finishes, delaying the write of other range.
780 * TODO: Make btrfs_run_delalloc_range() to lock all delalloc range
781 * first to prevent any submitted async extent to unlock the full page.
782 * By this, we can ensure for subpage case that only the last async_cow
783 * will unlock the full page.
785 if (fs_info
->sectorsize
< PAGE_SIZE
) {
786 if (!PAGE_ALIGNED(start
) ||
787 !PAGE_ALIGNED(end
+ 1))
792 if (btrfs_test_opt(fs_info
, FORCE_COMPRESS
))
795 if (inode
->defrag_compress
)
797 /* bad compression ratios */
798 if (inode
->flags
& BTRFS_INODE_NOCOMPRESS
)
800 if (btrfs_test_opt(fs_info
, COMPRESS
) ||
801 inode
->flags
& BTRFS_INODE_COMPRESS
||
802 inode
->prop_compress
)
803 return btrfs_compress_heuristic(&inode
->vfs_inode
, start
, end
);
807 static inline void inode_should_defrag(struct btrfs_inode
*inode
,
808 u64 start
, u64 end
, u64 num_bytes
, u32 small_write
)
810 /* If this is a small write inside eof, kick off a defrag */
811 if (num_bytes
< small_write
&&
812 (start
> 0 || end
+ 1 < inode
->disk_i_size
))
813 btrfs_add_inode_defrag(NULL
, inode
, small_write
);
817 * Work queue call back to started compression on a file and pages.
819 * This is done inside an ordered work queue, and the compression is spread
820 * across many cpus. The actual IO submission is step two, and the ordered work
821 * queue takes care of making sure that happens in the same order things were
822 * put onto the queue by writepages and friends.
824 * If this code finds it can't get good compression, it puts an entry onto the
825 * work queue to write the uncompressed bytes. This makes sure that both
826 * compressed inodes and uncompressed inodes are written in the same order that
827 * the flusher thread sent them down.
829 static void compress_file_range(struct btrfs_work
*work
)
831 struct async_chunk
*async_chunk
=
832 container_of(work
, struct async_chunk
, work
);
833 struct btrfs_inode
*inode
= async_chunk
->inode
;
834 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
835 struct address_space
*mapping
= inode
->vfs_inode
.i_mapping
;
836 u64 blocksize
= fs_info
->sectorsize
;
837 u64 start
= async_chunk
->start
;
838 u64 end
= async_chunk
->end
;
843 unsigned long nr_pages
;
844 unsigned long total_compressed
= 0;
845 unsigned long total_in
= 0;
848 int compress_type
= fs_info
->compress_type
;
850 inode_should_defrag(inode
, start
, end
, end
- start
+ 1, SZ_16K
);
853 * We need to call clear_page_dirty_for_io on each page in the range.
854 * Otherwise applications with the file mmap'd can wander in and change
855 * the page contents while we are compressing them.
857 extent_range_clear_dirty_for_io(&inode
->vfs_inode
, start
, end
);
860 * We need to save i_size before now because it could change in between
861 * us evaluating the size and assigning it. This is because we lock and
862 * unlock the page in truncate and fallocate, and then modify the i_size
865 * The barriers are to emulate READ_ONCE, remove that once i_size_read
869 i_size
= i_size_read(&inode
->vfs_inode
);
871 actual_end
= min_t(u64
, i_size
, end
+ 1);
874 nr_pages
= (end
>> PAGE_SHIFT
) - (start
>> PAGE_SHIFT
) + 1;
875 nr_pages
= min_t(unsigned long, nr_pages
, BTRFS_MAX_COMPRESSED_PAGES
);
878 * we don't want to send crud past the end of i_size through
879 * compression, that's just a waste of CPU time. So, if the
880 * end of the file is before the start of our current
881 * requested range of bytes, we bail out to the uncompressed
882 * cleanup code that can deal with all of this.
884 * It isn't really the fastest way to fix things, but this is a
885 * very uncommon corner.
887 if (actual_end
<= start
)
888 goto cleanup_and_bail_uncompressed
;
890 total_compressed
= actual_end
- start
;
893 * Skip compression for a small file range(<=blocksize) that
894 * isn't an inline extent, since it doesn't save disk space at all.
896 if (total_compressed
<= blocksize
&&
897 (start
> 0 || end
+ 1 < inode
->disk_i_size
))
898 goto cleanup_and_bail_uncompressed
;
901 * For subpage case, we require full page alignment for the sector
903 * Thus we must also check against @actual_end, not just @end.
905 if (blocksize
< PAGE_SIZE
) {
906 if (!PAGE_ALIGNED(start
) ||
907 !PAGE_ALIGNED(round_up(actual_end
, blocksize
)))
908 goto cleanup_and_bail_uncompressed
;
911 total_compressed
= min_t(unsigned long, total_compressed
,
912 BTRFS_MAX_UNCOMPRESSED
);
917 * We do compression for mount -o compress and when the inode has not
918 * been flagged as NOCOMPRESS. This flag can change at any time if we
919 * discover bad compression ratios.
921 if (!inode_need_compress(inode
, start
, end
))
922 goto cleanup_and_bail_uncompressed
;
924 pages
= kcalloc(nr_pages
, sizeof(struct page
*), GFP_NOFS
);
927 * Memory allocation failure is not a fatal error, we can fall
928 * back to uncompressed code.
930 goto cleanup_and_bail_uncompressed
;
933 if (inode
->defrag_compress
)
934 compress_type
= inode
->defrag_compress
;
935 else if (inode
->prop_compress
)
936 compress_type
= inode
->prop_compress
;
938 /* Compression level is applied here. */
939 ret
= btrfs_compress_pages(compress_type
| (fs_info
->compress_level
<< 4),
940 mapping
, start
, pages
, &nr_pages
, &total_in
,
943 goto mark_incompressible
;
946 * Zero the tail end of the last page, as we might be sending it down
949 poff
= offset_in_page(total_compressed
);
951 memzero_page(pages
[nr_pages
- 1], poff
, PAGE_SIZE
- poff
);
954 * Try to create an inline extent.
956 * If we didn't compress the entire range, try to create an uncompressed
957 * inline extent, else a compressed one.
959 * Check cow_file_range() for why we don't even try to create inline
960 * extent for the subpage case.
962 if (start
== 0 && fs_info
->sectorsize
== PAGE_SIZE
) {
963 if (total_in
< actual_end
) {
964 ret
= cow_file_range_inline(inode
, actual_end
, 0,
965 BTRFS_COMPRESS_NONE
, NULL
,
968 ret
= cow_file_range_inline(inode
, actual_end
,
970 compress_type
, pages
,
974 unsigned long clear_flags
= EXTENT_DELALLOC
|
975 EXTENT_DELALLOC_NEW
| EXTENT_DEFRAG
|
976 EXTENT_DO_ACCOUNTING
;
979 mapping_set_error(mapping
, -EIO
);
982 * inline extent creation worked or returned error,
983 * we don't need to create any more async work items.
984 * Unlock and free up our temp pages.
986 * We use DO_ACCOUNTING here because we need the
987 * delalloc_release_metadata to be done _after_ we drop
988 * our outstanding extent for clearing delalloc for this
991 extent_clear_unlock_delalloc(inode
, start
, end
,
995 PAGE_START_WRITEBACK
|
1002 * We aren't doing an inline extent. Round the compressed size up to a
1003 * block size boundary so the allocator does sane things.
1005 total_compressed
= ALIGN(total_compressed
, blocksize
);
1008 * One last check to make sure the compression is really a win, compare
1009 * the page count read with the blocks on disk, compression must free at
1012 total_in
= round_up(total_in
, fs_info
->sectorsize
);
1013 if (total_compressed
+ blocksize
> total_in
)
1014 goto mark_incompressible
;
1017 * The async work queues will take care of doing actual allocation on
1018 * disk for these compressed pages, and will submit the bios.
1020 add_async_extent(async_chunk
, start
, total_in
, total_compressed
, pages
,
1021 nr_pages
, compress_type
);
1022 if (start
+ total_in
< end
) {
1029 mark_incompressible
:
1030 if (!btrfs_test_opt(fs_info
, FORCE_COMPRESS
) && !inode
->prop_compress
)
1031 inode
->flags
|= BTRFS_INODE_NOCOMPRESS
;
1032 cleanup_and_bail_uncompressed
:
1033 add_async_extent(async_chunk
, start
, end
- start
+ 1, 0, NULL
, 0,
1034 BTRFS_COMPRESS_NONE
);
1037 for (i
= 0; i
< nr_pages
; i
++) {
1038 WARN_ON(pages
[i
]->mapping
);
1045 static void free_async_extent_pages(struct async_extent
*async_extent
)
1049 if (!async_extent
->pages
)
1052 for (i
= 0; i
< async_extent
->nr_pages
; i
++) {
1053 WARN_ON(async_extent
->pages
[i
]->mapping
);
1054 put_page(async_extent
->pages
[i
]);
1056 kfree(async_extent
->pages
);
1057 async_extent
->nr_pages
= 0;
1058 async_extent
->pages
= NULL
;
1061 static void submit_uncompressed_range(struct btrfs_inode
*inode
,
1062 struct async_extent
*async_extent
,
1063 struct page
*locked_page
)
1065 u64 start
= async_extent
->start
;
1066 u64 end
= async_extent
->start
+ async_extent
->ram_size
- 1;
1068 struct writeback_control wbc
= {
1069 .sync_mode
= WB_SYNC_ALL
,
1070 .range_start
= start
,
1072 .no_cgroup_owner
= 1,
1075 wbc_attach_fdatawrite_inode(&wbc
, &inode
->vfs_inode
);
1076 ret
= run_delalloc_cow(inode
, locked_page
, start
, end
, &wbc
, false);
1077 wbc_detach_inode(&wbc
);
1079 btrfs_cleanup_ordered_extents(inode
, locked_page
, start
, end
- start
+ 1);
1081 const u64 page_start
= page_offset(locked_page
);
1083 set_page_writeback(locked_page
);
1084 end_page_writeback(locked_page
);
1085 btrfs_mark_ordered_io_finished(inode
, locked_page
,
1086 page_start
, PAGE_SIZE
,
1088 mapping_set_error(locked_page
->mapping
, ret
);
1089 unlock_page(locked_page
);
1094 static void submit_one_async_extent(struct async_chunk
*async_chunk
,
1095 struct async_extent
*async_extent
,
1098 struct btrfs_inode
*inode
= async_chunk
->inode
;
1099 struct extent_io_tree
*io_tree
= &inode
->io_tree
;
1100 struct btrfs_root
*root
= inode
->root
;
1101 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1102 struct btrfs_ordered_extent
*ordered
;
1103 struct btrfs_key ins
;
1104 struct page
*locked_page
= NULL
;
1105 struct extent_map
*em
;
1107 u64 start
= async_extent
->start
;
1108 u64 end
= async_extent
->start
+ async_extent
->ram_size
- 1;
1110 if (async_chunk
->blkcg_css
)
1111 kthread_associate_blkcg(async_chunk
->blkcg_css
);
1114 * If async_chunk->locked_page is in the async_extent range, we need to
1117 if (async_chunk
->locked_page
) {
1118 u64 locked_page_start
= page_offset(async_chunk
->locked_page
);
1119 u64 locked_page_end
= locked_page_start
+ PAGE_SIZE
- 1;
1121 if (!(start
>= locked_page_end
|| end
<= locked_page_start
))
1122 locked_page
= async_chunk
->locked_page
;
1124 lock_extent(io_tree
, start
, end
, NULL
);
1126 if (async_extent
->compress_type
== BTRFS_COMPRESS_NONE
) {
1127 submit_uncompressed_range(inode
, async_extent
, locked_page
);
1131 ret
= btrfs_reserve_extent(root
, async_extent
->ram_size
,
1132 async_extent
->compressed_size
,
1133 async_extent
->compressed_size
,
1134 0, *alloc_hint
, &ins
, 1, 1);
1137 * Here we used to try again by going back to non-compressed
1138 * path for ENOSPC. But we can't reserve space even for
1139 * compressed size, how could it work for uncompressed size
1140 * which requires larger size? So here we directly go error
1146 /* Here we're doing allocation and writeback of the compressed pages */
1147 em
= create_io_em(inode
, start
,
1148 async_extent
->ram_size
, /* len */
1149 start
, /* orig_start */
1150 ins
.objectid
, /* block_start */
1151 ins
.offset
, /* block_len */
1152 ins
.offset
, /* orig_block_len */
1153 async_extent
->ram_size
, /* ram_bytes */
1154 async_extent
->compress_type
,
1155 BTRFS_ORDERED_COMPRESSED
);
1158 goto out_free_reserve
;
1160 free_extent_map(em
);
1162 ordered
= btrfs_alloc_ordered_extent(inode
, start
, /* file_offset */
1163 async_extent
->ram_size
, /* num_bytes */
1164 async_extent
->ram_size
, /* ram_bytes */
1165 ins
.objectid
, /* disk_bytenr */
1166 ins
.offset
, /* disk_num_bytes */
1168 1 << BTRFS_ORDERED_COMPRESSED
,
1169 async_extent
->compress_type
);
1170 if (IS_ERR(ordered
)) {
1171 btrfs_drop_extent_map_range(inode
, start
, end
, false);
1172 ret
= PTR_ERR(ordered
);
1173 goto out_free_reserve
;
1175 btrfs_dec_block_group_reservations(fs_info
, ins
.objectid
);
1177 /* Clear dirty, set writeback and unlock the pages. */
1178 extent_clear_unlock_delalloc(inode
, start
, end
,
1179 NULL
, EXTENT_LOCKED
| EXTENT_DELALLOC
,
1180 PAGE_UNLOCK
| PAGE_START_WRITEBACK
);
1181 btrfs_submit_compressed_write(ordered
,
1182 async_extent
->pages
, /* compressed_pages */
1183 async_extent
->nr_pages
,
1184 async_chunk
->write_flags
, true);
1185 *alloc_hint
= ins
.objectid
+ ins
.offset
;
1187 if (async_chunk
->blkcg_css
)
1188 kthread_associate_blkcg(NULL
);
1189 kfree(async_extent
);
1193 btrfs_dec_block_group_reservations(fs_info
, ins
.objectid
);
1194 btrfs_free_reserved_extent(fs_info
, ins
.objectid
, ins
.offset
, 1);
1196 mapping_set_error(inode
->vfs_inode
.i_mapping
, -EIO
);
1197 extent_clear_unlock_delalloc(inode
, start
, end
,
1198 NULL
, EXTENT_LOCKED
| EXTENT_DELALLOC
|
1199 EXTENT_DELALLOC_NEW
|
1200 EXTENT_DEFRAG
| EXTENT_DO_ACCOUNTING
,
1201 PAGE_UNLOCK
| PAGE_START_WRITEBACK
|
1202 PAGE_END_WRITEBACK
);
1203 free_async_extent_pages(async_extent
);
1204 if (async_chunk
->blkcg_css
)
1205 kthread_associate_blkcg(NULL
);
1206 btrfs_debug(fs_info
,
1207 "async extent submission failed root=%lld inode=%llu start=%llu len=%llu ret=%d",
1208 root
->root_key
.objectid
, btrfs_ino(inode
), start
,
1209 async_extent
->ram_size
, ret
);
1210 kfree(async_extent
);
1213 static u64
get_extent_allocation_hint(struct btrfs_inode
*inode
, u64 start
,
1216 struct extent_map_tree
*em_tree
= &inode
->extent_tree
;
1217 struct extent_map
*em
;
1220 read_lock(&em_tree
->lock
);
1221 em
= search_extent_mapping(em_tree
, start
, num_bytes
);
1224 * if block start isn't an actual block number then find the
1225 * first block in this inode and use that as a hint. If that
1226 * block is also bogus then just don't worry about it.
1228 if (em
->block_start
>= EXTENT_MAP_LAST_BYTE
) {
1229 free_extent_map(em
);
1230 em
= search_extent_mapping(em_tree
, 0, 0);
1231 if (em
&& em
->block_start
< EXTENT_MAP_LAST_BYTE
)
1232 alloc_hint
= em
->block_start
;
1234 free_extent_map(em
);
1236 alloc_hint
= em
->block_start
;
1237 free_extent_map(em
);
1240 read_unlock(&em_tree
->lock
);
1246 * when extent_io.c finds a delayed allocation range in the file,
1247 * the call backs end up in this code. The basic idea is to
1248 * allocate extents on disk for the range, and create ordered data structs
1249 * in ram to track those extents.
1251 * locked_page is the page that writepage had locked already. We use
1252 * it to make sure we don't do extra locks or unlocks.
1254 * When this function fails, it unlocks all pages except @locked_page.
1256 * When this function successfully creates an inline extent, it returns 1 and
1257 * unlocks all pages including locked_page and starts I/O on them.
1258 * (In reality inline extents are limited to a single page, so locked_page is
1259 * the only page handled anyway).
1261 * When this function succeed and creates a normal extent, the page locking
1262 * status depends on the passed in flags:
1264 * - If @keep_locked is set, all pages are kept locked.
1265 * - Else all pages except for @locked_page are unlocked.
1267 * When a failure happens in the second or later iteration of the
1268 * while-loop, the ordered extents created in previous iterations are kept
1269 * intact. So, the caller must clean them up by calling
1270 * btrfs_cleanup_ordered_extents(). See btrfs_run_delalloc_range() for
1273 static noinline
int cow_file_range(struct btrfs_inode
*inode
,
1274 struct page
*locked_page
, u64 start
, u64 end
,
1276 bool keep_locked
, bool no_inline
)
1278 struct btrfs_root
*root
= inode
->root
;
1279 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1281 u64 orig_start
= start
;
1283 unsigned long ram_size
;
1284 u64 cur_alloc_size
= 0;
1286 u64 blocksize
= fs_info
->sectorsize
;
1287 struct btrfs_key ins
;
1288 struct extent_map
*em
;
1289 unsigned clear_bits
;
1290 unsigned long page_ops
;
1291 bool extent_reserved
= false;
1294 if (btrfs_is_free_space_inode(inode
)) {
1299 num_bytes
= ALIGN(end
- start
+ 1, blocksize
);
1300 num_bytes
= max(blocksize
, num_bytes
);
1301 ASSERT(num_bytes
<= btrfs_super_total_bytes(fs_info
->super_copy
));
1303 inode_should_defrag(inode
, start
, end
, num_bytes
, SZ_64K
);
1306 * Due to the page size limit, for subpage we can only trigger the
1307 * writeback for the dirty sectors of page, that means data writeback
1308 * is doing more writeback than what we want.
1310 * This is especially unexpected for some call sites like fallocate,
1311 * where we only increase i_size after everything is done.
1312 * This means we can trigger inline extent even if we didn't want to.
1313 * So here we skip inline extent creation completely.
1315 if (start
== 0 && fs_info
->sectorsize
== PAGE_SIZE
&& !no_inline
) {
1316 u64 actual_end
= min_t(u64
, i_size_read(&inode
->vfs_inode
),
1319 /* lets try to make an inline extent */
1320 ret
= cow_file_range_inline(inode
, actual_end
, 0,
1321 BTRFS_COMPRESS_NONE
, NULL
, false);
1324 * We use DO_ACCOUNTING here because we need the
1325 * delalloc_release_metadata to be run _after_ we drop
1326 * our outstanding extent for clearing delalloc for this
1329 extent_clear_unlock_delalloc(inode
, start
, end
,
1331 EXTENT_LOCKED
| EXTENT_DELALLOC
|
1332 EXTENT_DELALLOC_NEW
| EXTENT_DEFRAG
|
1333 EXTENT_DO_ACCOUNTING
, PAGE_UNLOCK
|
1334 PAGE_START_WRITEBACK
| PAGE_END_WRITEBACK
);
1336 * locked_page is locked by the caller of
1337 * writepage_delalloc(), not locked by
1338 * __process_pages_contig().
1340 * We can't let __process_pages_contig() to unlock it,
1341 * as it doesn't have any subpage::writers recorded.
1343 * Here we manually unlock the page, since the caller
1344 * can't determine if it's an inline extent or a
1345 * compressed extent.
1347 unlock_page(locked_page
);
1350 } else if (ret
< 0) {
1355 alloc_hint
= get_extent_allocation_hint(inode
, start
, num_bytes
);
1358 * Relocation relies on the relocated extents to have exactly the same
1359 * size as the original extents. Normally writeback for relocation data
1360 * extents follows a NOCOW path because relocation preallocates the
1361 * extents. However, due to an operation such as scrub turning a block
1362 * group to RO mode, it may fallback to COW mode, so we must make sure
1363 * an extent allocated during COW has exactly the requested size and can
1364 * not be split into smaller extents, otherwise relocation breaks and
1365 * fails during the stage where it updates the bytenr of file extent
1368 if (btrfs_is_data_reloc_root(root
))
1369 min_alloc_size
= num_bytes
;
1371 min_alloc_size
= fs_info
->sectorsize
;
1373 while (num_bytes
> 0) {
1374 struct btrfs_ordered_extent
*ordered
;
1376 cur_alloc_size
= num_bytes
;
1377 ret
= btrfs_reserve_extent(root
, cur_alloc_size
, cur_alloc_size
,
1378 min_alloc_size
, 0, alloc_hint
,
1380 if (ret
== -EAGAIN
) {
1382 * btrfs_reserve_extent only returns -EAGAIN for zoned
1383 * file systems, which is an indication that there are
1384 * no active zones to allocate from at the moment.
1386 * If this is the first loop iteration, wait for at
1387 * least one zone to finish before retrying the
1388 * allocation. Otherwise ask the caller to write out
1389 * the already allocated blocks before coming back to
1390 * us, or return -ENOSPC if it can't handle retries.
1392 ASSERT(btrfs_is_zoned(fs_info
));
1393 if (start
== orig_start
) {
1394 wait_on_bit_io(&inode
->root
->fs_info
->flags
,
1395 BTRFS_FS_NEED_ZONE_FINISH
,
1396 TASK_UNINTERRUPTIBLE
);
1400 *done_offset
= start
- 1;
1407 cur_alloc_size
= ins
.offset
;
1408 extent_reserved
= true;
1410 ram_size
= ins
.offset
;
1411 em
= create_io_em(inode
, start
, ins
.offset
, /* len */
1412 start
, /* orig_start */
1413 ins
.objectid
, /* block_start */
1414 ins
.offset
, /* block_len */
1415 ins
.offset
, /* orig_block_len */
1416 ram_size
, /* ram_bytes */
1417 BTRFS_COMPRESS_NONE
, /* compress_type */
1418 BTRFS_ORDERED_REGULAR
/* type */);
1423 free_extent_map(em
);
1425 ordered
= btrfs_alloc_ordered_extent(inode
, start
, ram_size
,
1426 ram_size
, ins
.objectid
, cur_alloc_size
,
1427 0, 1 << BTRFS_ORDERED_REGULAR
,
1428 BTRFS_COMPRESS_NONE
);
1429 if (IS_ERR(ordered
)) {
1430 ret
= PTR_ERR(ordered
);
1431 goto out_drop_extent_cache
;
1434 if (btrfs_is_data_reloc_root(root
)) {
1435 ret
= btrfs_reloc_clone_csums(ordered
);
1438 * Only drop cache here, and process as normal.
1440 * We must not allow extent_clear_unlock_delalloc()
1441 * at out_unlock label to free meta of this ordered
1442 * extent, as its meta should be freed by
1443 * btrfs_finish_ordered_io().
1445 * So we must continue until @start is increased to
1446 * skip current ordered extent.
1449 btrfs_drop_extent_map_range(inode
, start
,
1450 start
+ ram_size
- 1,
1453 btrfs_put_ordered_extent(ordered
);
1455 btrfs_dec_block_group_reservations(fs_info
, ins
.objectid
);
1458 * We're not doing compressed IO, don't unlock the first page
1459 * (which the caller expects to stay locked), don't clear any
1460 * dirty bits and don't set any writeback bits
1462 * Do set the Ordered (Private2) bit so we know this page was
1463 * properly setup for writepage.
1465 page_ops
= (keep_locked
? 0 : PAGE_UNLOCK
);
1466 page_ops
|= PAGE_SET_ORDERED
;
1468 extent_clear_unlock_delalloc(inode
, start
, start
+ ram_size
- 1,
1470 EXTENT_LOCKED
| EXTENT_DELALLOC
,
1472 if (num_bytes
< cur_alloc_size
)
1475 num_bytes
-= cur_alloc_size
;
1476 alloc_hint
= ins
.objectid
+ ins
.offset
;
1477 start
+= cur_alloc_size
;
1478 extent_reserved
= false;
1481 * btrfs_reloc_clone_csums() error, since start is increased
1482 * extent_clear_unlock_delalloc() at out_unlock label won't
1483 * free metadata of current ordered extent, we're OK to exit.
1493 out_drop_extent_cache
:
1494 btrfs_drop_extent_map_range(inode
, start
, start
+ ram_size
- 1, false);
1496 btrfs_dec_block_group_reservations(fs_info
, ins
.objectid
);
1497 btrfs_free_reserved_extent(fs_info
, ins
.objectid
, ins
.offset
, 1);
1500 * Now, we have three regions to clean up:
1502 * |-------(1)----|---(2)---|-------------(3)----------|
1503 * `- orig_start `- start `- start + cur_alloc_size `- end
1505 * We process each region below.
1508 clear_bits
= EXTENT_LOCKED
| EXTENT_DELALLOC
| EXTENT_DELALLOC_NEW
|
1509 EXTENT_DEFRAG
| EXTENT_CLEAR_META_RESV
;
1510 page_ops
= PAGE_UNLOCK
| PAGE_START_WRITEBACK
| PAGE_END_WRITEBACK
;
1513 * For the range (1). We have already instantiated the ordered extents
1514 * for this region. They are cleaned up by
1515 * btrfs_cleanup_ordered_extents() in e.g,
1516 * btrfs_run_delalloc_range(). EXTENT_LOCKED | EXTENT_DELALLOC are
1517 * already cleared in the above loop. And, EXTENT_DELALLOC_NEW |
1518 * EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV are handled by the cleanup
1521 * However, in case of @keep_locked, we still need to unlock the pages
1522 * (except @locked_page) to ensure all the pages are unlocked.
1524 if (keep_locked
&& orig_start
< start
) {
1526 mapping_set_error(inode
->vfs_inode
.i_mapping
, ret
);
1527 extent_clear_unlock_delalloc(inode
, orig_start
, start
- 1,
1528 locked_page
, 0, page_ops
);
1532 * For the range (2). If we reserved an extent for our delalloc range
1533 * (or a subrange) and failed to create the respective ordered extent,
1534 * then it means that when we reserved the extent we decremented the
1535 * extent's size from the data space_info's bytes_may_use counter and
1536 * incremented the space_info's bytes_reserved counter by the same
1537 * amount. We must make sure extent_clear_unlock_delalloc() does not try
1538 * to decrement again the data space_info's bytes_may_use counter,
1539 * therefore we do not pass it the flag EXTENT_CLEAR_DATA_RESV.
1541 if (extent_reserved
) {
1542 extent_clear_unlock_delalloc(inode
, start
,
1543 start
+ cur_alloc_size
- 1,
1547 start
+= cur_alloc_size
;
1551 * For the range (3). We never touched the region. In addition to the
1552 * clear_bits above, we add EXTENT_CLEAR_DATA_RESV to release the data
1553 * space_info's bytes_may_use counter, reserved in
1554 * btrfs_check_data_free_space().
1557 clear_bits
|= EXTENT_CLEAR_DATA_RESV
;
1558 extent_clear_unlock_delalloc(inode
, start
, end
, locked_page
,
1559 clear_bits
, page_ops
);
1565 * Phase two of compressed writeback. This is the ordered portion of the code,
1566 * which only gets called in the order the work was queued. We walk all the
1567 * async extents created by compress_file_range and send them down to the disk.
1569 static noinline
void submit_compressed_extents(struct btrfs_work
*work
)
1571 struct async_chunk
*async_chunk
= container_of(work
, struct async_chunk
,
1573 struct btrfs_fs_info
*fs_info
= btrfs_work_owner(work
);
1574 struct async_extent
*async_extent
;
1575 unsigned long nr_pages
;
1578 nr_pages
= (async_chunk
->end
- async_chunk
->start
+ PAGE_SIZE
) >>
1581 while (!list_empty(&async_chunk
->extents
)) {
1582 async_extent
= list_entry(async_chunk
->extents
.next
,
1583 struct async_extent
, list
);
1584 list_del(&async_extent
->list
);
1585 submit_one_async_extent(async_chunk
, async_extent
, &alloc_hint
);
1588 /* atomic_sub_return implies a barrier */
1589 if (atomic_sub_return(nr_pages
, &fs_info
->async_delalloc_pages
) <
1591 cond_wake_up_nomb(&fs_info
->async_submit_wait
);
1594 static noinline
void async_cow_free(struct btrfs_work
*work
)
1596 struct async_chunk
*async_chunk
;
1597 struct async_cow
*async_cow
;
1599 async_chunk
= container_of(work
, struct async_chunk
, work
);
1600 btrfs_add_delayed_iput(async_chunk
->inode
);
1601 if (async_chunk
->blkcg_css
)
1602 css_put(async_chunk
->blkcg_css
);
1604 async_cow
= async_chunk
->async_cow
;
1605 if (atomic_dec_and_test(&async_cow
->num_chunks
))
1609 static bool run_delalloc_compressed(struct btrfs_inode
*inode
,
1610 struct page
*locked_page
, u64 start
,
1611 u64 end
, struct writeback_control
*wbc
)
1613 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
1614 struct cgroup_subsys_state
*blkcg_css
= wbc_blkcg_css(wbc
);
1615 struct async_cow
*ctx
;
1616 struct async_chunk
*async_chunk
;
1617 unsigned long nr_pages
;
1618 u64 num_chunks
= DIV_ROUND_UP(end
- start
, SZ_512K
);
1621 const blk_opf_t write_flags
= wbc_to_write_flags(wbc
);
1623 nofs_flag
= memalloc_nofs_save();
1624 ctx
= kvmalloc(struct_size(ctx
, chunks
, num_chunks
), GFP_KERNEL
);
1625 memalloc_nofs_restore(nofs_flag
);
1629 unlock_extent(&inode
->io_tree
, start
, end
, NULL
);
1630 set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT
, &inode
->runtime_flags
);
1632 async_chunk
= ctx
->chunks
;
1633 atomic_set(&ctx
->num_chunks
, num_chunks
);
1635 for (i
= 0; i
< num_chunks
; i
++) {
1636 u64 cur_end
= min(end
, start
+ SZ_512K
- 1);
1639 * igrab is called higher up in the call chain, take only the
1640 * lightweight reference for the callback lifetime
1642 ihold(&inode
->vfs_inode
);
1643 async_chunk
[i
].async_cow
= ctx
;
1644 async_chunk
[i
].inode
= inode
;
1645 async_chunk
[i
].start
= start
;
1646 async_chunk
[i
].end
= cur_end
;
1647 async_chunk
[i
].write_flags
= write_flags
;
1648 INIT_LIST_HEAD(&async_chunk
[i
].extents
);
1651 * The locked_page comes all the way from writepage and its
1652 * the original page we were actually given. As we spread
1653 * this large delalloc region across multiple async_chunk
1654 * structs, only the first struct needs a pointer to locked_page
1656 * This way we don't need racey decisions about who is supposed
1661 * Depending on the compressibility, the pages might or
1662 * might not go through async. We want all of them to
1663 * be accounted against wbc once. Let's do it here
1664 * before the paths diverge. wbc accounting is used
1665 * only for foreign writeback detection and doesn't
1666 * need full accuracy. Just account the whole thing
1667 * against the first page.
1669 wbc_account_cgroup_owner(wbc
, locked_page
,
1671 async_chunk
[i
].locked_page
= locked_page
;
1674 async_chunk
[i
].locked_page
= NULL
;
1677 if (blkcg_css
!= blkcg_root_css
) {
1679 async_chunk
[i
].blkcg_css
= blkcg_css
;
1680 async_chunk
[i
].write_flags
|= REQ_BTRFS_CGROUP_PUNT
;
1682 async_chunk
[i
].blkcg_css
= NULL
;
1685 btrfs_init_work(&async_chunk
[i
].work
, compress_file_range
,
1686 submit_compressed_extents
, async_cow_free
);
1688 nr_pages
= DIV_ROUND_UP(cur_end
- start
, PAGE_SIZE
);
1689 atomic_add(nr_pages
, &fs_info
->async_delalloc_pages
);
1691 btrfs_queue_work(fs_info
->delalloc_workers
, &async_chunk
[i
].work
);
1693 start
= cur_end
+ 1;
1699 * Run the delalloc range from start to end, and write back any dirty pages
1700 * covered by the range.
1702 static noinline
int run_delalloc_cow(struct btrfs_inode
*inode
,
1703 struct page
*locked_page
, u64 start
,
1704 u64 end
, struct writeback_control
*wbc
,
1707 u64 done_offset
= end
;
1710 while (start
<= end
) {
1711 ret
= cow_file_range(inode
, locked_page
, start
, end
, &done_offset
,
1715 extent_write_locked_range(&inode
->vfs_inode
, locked_page
, start
,
1716 done_offset
, wbc
, pages_dirty
);
1717 start
= done_offset
+ 1;
1723 static noinline
int csum_exist_in_range(struct btrfs_fs_info
*fs_info
,
1724 u64 bytenr
, u64 num_bytes
, bool nowait
)
1726 struct btrfs_root
*csum_root
= btrfs_csum_root(fs_info
, bytenr
);
1727 struct btrfs_ordered_sum
*sums
;
1731 ret
= btrfs_lookup_csums_list(csum_root
, bytenr
, bytenr
+ num_bytes
- 1,
1733 if (ret
== 0 && list_empty(&list
))
1736 while (!list_empty(&list
)) {
1737 sums
= list_entry(list
.next
, struct btrfs_ordered_sum
, list
);
1738 list_del(&sums
->list
);
1746 static int fallback_to_cow(struct btrfs_inode
*inode
, struct page
*locked_page
,
1747 const u64 start
, const u64 end
)
1749 const bool is_space_ino
= btrfs_is_free_space_inode(inode
);
1750 const bool is_reloc_ino
= btrfs_is_data_reloc_root(inode
->root
);
1751 const u64 range_bytes
= end
+ 1 - start
;
1752 struct extent_io_tree
*io_tree
= &inode
->io_tree
;
1753 u64 range_start
= start
;
1758 * If EXTENT_NORESERVE is set it means that when the buffered write was
1759 * made we had not enough available data space and therefore we did not
1760 * reserve data space for it, since we though we could do NOCOW for the
1761 * respective file range (either there is prealloc extent or the inode
1762 * has the NOCOW bit set).
1764 * However when we need to fallback to COW mode (because for example the
1765 * block group for the corresponding extent was turned to RO mode by a
1766 * scrub or relocation) we need to do the following:
1768 * 1) We increment the bytes_may_use counter of the data space info.
1769 * If COW succeeds, it allocates a new data extent and after doing
1770 * that it decrements the space info's bytes_may_use counter and
1771 * increments its bytes_reserved counter by the same amount (we do
1772 * this at btrfs_add_reserved_bytes()). So we need to increment the
1773 * bytes_may_use counter to compensate (when space is reserved at
1774 * buffered write time, the bytes_may_use counter is incremented);
1776 * 2) We clear the EXTENT_NORESERVE bit from the range. We do this so
1777 * that if the COW path fails for any reason, it decrements (through
1778 * extent_clear_unlock_delalloc()) the bytes_may_use counter of the
1779 * data space info, which we incremented in the step above.
1781 * If we need to fallback to cow and the inode corresponds to a free
1782 * space cache inode or an inode of the data relocation tree, we must
1783 * also increment bytes_may_use of the data space_info for the same
1784 * reason. Space caches and relocated data extents always get a prealloc
1785 * extent for them, however scrub or balance may have set the block
1786 * group that contains that extent to RO mode and therefore force COW
1787 * when starting writeback.
1789 count
= count_range_bits(io_tree
, &range_start
, end
, range_bytes
,
1790 EXTENT_NORESERVE
, 0, NULL
);
1791 if (count
> 0 || is_space_ino
|| is_reloc_ino
) {
1793 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
1794 struct btrfs_space_info
*sinfo
= fs_info
->data_sinfo
;
1796 if (is_space_ino
|| is_reloc_ino
)
1797 bytes
= range_bytes
;
1799 spin_lock(&sinfo
->lock
);
1800 btrfs_space_info_update_bytes_may_use(fs_info
, sinfo
, bytes
);
1801 spin_unlock(&sinfo
->lock
);
1804 clear_extent_bit(io_tree
, start
, end
, EXTENT_NORESERVE
,
1809 * Don't try to create inline extents, as a mix of inline extent that
1810 * is written out and unlocked directly and a normal NOCOW extent
1813 ret
= cow_file_range(inode
, locked_page
, start
, end
, NULL
, false, true);
1818 struct can_nocow_file_extent_args
{
1821 /* Start file offset of the range we want to NOCOW. */
1823 /* End file offset (inclusive) of the range we want to NOCOW. */
1825 bool writeback_path
;
1828 * Free the path passed to can_nocow_file_extent() once it's not needed
1833 /* Output fields. Only set when can_nocow_file_extent() returns 1. */
1838 /* Number of bytes that can be written to in NOCOW mode. */
1843 * Check if we can NOCOW the file extent that the path points to.
1844 * This function may return with the path released, so the caller should check
1845 * if path->nodes[0] is NULL or not if it needs to use the path afterwards.
1847 * Returns: < 0 on error
1848 * 0 if we can not NOCOW
1851 static int can_nocow_file_extent(struct btrfs_path
*path
,
1852 struct btrfs_key
*key
,
1853 struct btrfs_inode
*inode
,
1854 struct can_nocow_file_extent_args
*args
)
1856 const bool is_freespace_inode
= btrfs_is_free_space_inode(inode
);
1857 struct extent_buffer
*leaf
= path
->nodes
[0];
1858 struct btrfs_root
*root
= inode
->root
;
1859 struct btrfs_file_extent_item
*fi
;
1864 bool nowait
= path
->nowait
;
1866 fi
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_file_extent_item
);
1867 extent_type
= btrfs_file_extent_type(leaf
, fi
);
1869 if (extent_type
== BTRFS_FILE_EXTENT_INLINE
)
1872 /* Can't access these fields unless we know it's not an inline extent. */
1873 args
->disk_bytenr
= btrfs_file_extent_disk_bytenr(leaf
, fi
);
1874 args
->disk_num_bytes
= btrfs_file_extent_disk_num_bytes(leaf
, fi
);
1875 args
->extent_offset
= btrfs_file_extent_offset(leaf
, fi
);
1877 if (!(inode
->flags
& BTRFS_INODE_NODATACOW
) &&
1878 extent_type
== BTRFS_FILE_EXTENT_REG
)
1882 * If the extent was created before the generation where the last snapshot
1883 * for its subvolume was created, then this implies the extent is shared,
1884 * hence we must COW.
1886 if (!args
->strict
&&
1887 btrfs_file_extent_generation(leaf
, fi
) <=
1888 btrfs_root_last_snapshot(&root
->root_item
))
1891 /* An explicit hole, must COW. */
1892 if (args
->disk_bytenr
== 0)
1895 /* Compressed/encrypted/encoded extents must be COWed. */
1896 if (btrfs_file_extent_compression(leaf
, fi
) ||
1897 btrfs_file_extent_encryption(leaf
, fi
) ||
1898 btrfs_file_extent_other_encoding(leaf
, fi
))
1901 extent_end
= btrfs_file_extent_end(path
);
1904 * The following checks can be expensive, as they need to take other
1905 * locks and do btree or rbtree searches, so release the path to avoid
1906 * blocking other tasks for too long.
1908 btrfs_release_path(path
);
1910 ret
= btrfs_cross_ref_exist(root
, btrfs_ino(inode
),
1911 key
->offset
- args
->extent_offset
,
1912 args
->disk_bytenr
, args
->strict
, path
);
1913 WARN_ON_ONCE(ret
> 0 && is_freespace_inode
);
1917 if (args
->free_path
) {
1919 * We don't need the path anymore, plus through the
1920 * csum_exist_in_range() call below we will end up allocating
1921 * another path. So free the path to avoid unnecessary extra
1924 btrfs_free_path(path
);
1928 /* If there are pending snapshots for this root, we must COW. */
1929 if (args
->writeback_path
&& !is_freespace_inode
&&
1930 atomic_read(&root
->snapshot_force_cow
))
1933 args
->disk_bytenr
+= args
->extent_offset
;
1934 args
->disk_bytenr
+= args
->start
- key
->offset
;
1935 args
->num_bytes
= min(args
->end
+ 1, extent_end
) - args
->start
;
1938 * Force COW if csums exist in the range. This ensures that csums for a
1939 * given extent are either valid or do not exist.
1941 ret
= csum_exist_in_range(root
->fs_info
, args
->disk_bytenr
, args
->num_bytes
,
1943 WARN_ON_ONCE(ret
> 0 && is_freespace_inode
);
1949 if (args
->free_path
&& path
)
1950 btrfs_free_path(path
);
1952 return ret
< 0 ? ret
: can_nocow
;
1956 * when nowcow writeback call back. This checks for snapshots or COW copies
1957 * of the extents that exist in the file, and COWs the file as required.
1959 * If no cow copies or snapshots exist, we write directly to the existing
1962 static noinline
int run_delalloc_nocow(struct btrfs_inode
*inode
,
1963 struct page
*locked_page
,
1964 const u64 start
, const u64 end
)
1966 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
1967 struct btrfs_root
*root
= inode
->root
;
1968 struct btrfs_path
*path
;
1969 u64 cow_start
= (u64
)-1;
1970 u64 cur_offset
= start
;
1972 bool check_prev
= true;
1973 u64 ino
= btrfs_ino(inode
);
1974 struct can_nocow_file_extent_args nocow_args
= { 0 };
1977 * Normally on a zoned device we're only doing COW writes, but in case
1978 * of relocation on a zoned filesystem serializes I/O so that we're only
1979 * writing sequentially and can end up here as well.
1981 ASSERT(!btrfs_is_zoned(fs_info
) || btrfs_is_data_reloc_root(root
));
1983 path
= btrfs_alloc_path();
1989 nocow_args
.end
= end
;
1990 nocow_args
.writeback_path
= true;
1993 struct btrfs_block_group
*nocow_bg
= NULL
;
1994 struct btrfs_ordered_extent
*ordered
;
1995 struct btrfs_key found_key
;
1996 struct btrfs_file_extent_item
*fi
;
1997 struct extent_buffer
*leaf
;
2004 ret
= btrfs_lookup_file_extent(NULL
, root
, path
, ino
,
2010 * If there is no extent for our range when doing the initial
2011 * search, then go back to the previous slot as it will be the
2012 * one containing the search offset
2014 if (ret
> 0 && path
->slots
[0] > 0 && check_prev
) {
2015 leaf
= path
->nodes
[0];
2016 btrfs_item_key_to_cpu(leaf
, &found_key
,
2017 path
->slots
[0] - 1);
2018 if (found_key
.objectid
== ino
&&
2019 found_key
.type
== BTRFS_EXTENT_DATA_KEY
)
2024 /* Go to next leaf if we have exhausted the current one */
2025 leaf
= path
->nodes
[0];
2026 if (path
->slots
[0] >= btrfs_header_nritems(leaf
)) {
2027 ret
= btrfs_next_leaf(root
, path
);
2032 leaf
= path
->nodes
[0];
2035 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
2037 /* Didn't find anything for our INO */
2038 if (found_key
.objectid
> ino
)
2041 * Keep searching until we find an EXTENT_ITEM or there are no
2042 * more extents for this inode
2044 if (WARN_ON_ONCE(found_key
.objectid
< ino
) ||
2045 found_key
.type
< BTRFS_EXTENT_DATA_KEY
) {
2050 /* Found key is not EXTENT_DATA_KEY or starts after req range */
2051 if (found_key
.type
> BTRFS_EXTENT_DATA_KEY
||
2052 found_key
.offset
> end
)
2056 * If the found extent starts after requested offset, then
2057 * adjust extent_end to be right before this extent begins
2059 if (found_key
.offset
> cur_offset
) {
2060 extent_end
= found_key
.offset
;
2066 * Found extent which begins before our range and potentially
2069 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
2070 struct btrfs_file_extent_item
);
2071 extent_type
= btrfs_file_extent_type(leaf
, fi
);
2072 /* If this is triggered then we have a memory corruption. */
2073 ASSERT(extent_type
< BTRFS_NR_FILE_EXTENT_TYPES
);
2074 if (WARN_ON(extent_type
>= BTRFS_NR_FILE_EXTENT_TYPES
)) {
2078 ram_bytes
= btrfs_file_extent_ram_bytes(leaf
, fi
);
2079 extent_end
= btrfs_file_extent_end(path
);
2082 * If the extent we got ends before our current offset, skip to
2085 if (extent_end
<= cur_offset
) {
2090 nocow_args
.start
= cur_offset
;
2091 ret
= can_nocow_file_extent(path
, &found_key
, inode
, &nocow_args
);
2098 nocow_bg
= btrfs_inc_nocow_writers(fs_info
, nocow_args
.disk_bytenr
);
2102 * If we can't perform NOCOW writeback for the range,
2103 * then record the beginning of the range that needs to
2104 * be COWed. It will be written out before the next
2105 * NOCOW range if we find one, or when exiting this
2108 if (cow_start
== (u64
)-1)
2109 cow_start
= cur_offset
;
2110 cur_offset
= extent_end
;
2111 if (cur_offset
> end
)
2113 if (!path
->nodes
[0])
2120 * COW range from cow_start to found_key.offset - 1. As the key
2121 * will contain the beginning of the first extent that can be
2122 * NOCOW, following one which needs to be COW'ed
2124 if (cow_start
!= (u64
)-1) {
2125 ret
= fallback_to_cow(inode
, locked_page
,
2126 cow_start
, found_key
.offset
- 1);
2127 cow_start
= (u64
)-1;
2129 btrfs_dec_nocow_writers(nocow_bg
);
2134 nocow_end
= cur_offset
+ nocow_args
.num_bytes
- 1;
2135 is_prealloc
= extent_type
== BTRFS_FILE_EXTENT_PREALLOC
;
2137 u64 orig_start
= found_key
.offset
- nocow_args
.extent_offset
;
2138 struct extent_map
*em
;
2140 em
= create_io_em(inode
, cur_offset
, nocow_args
.num_bytes
,
2142 nocow_args
.disk_bytenr
, /* block_start */
2143 nocow_args
.num_bytes
, /* block_len */
2144 nocow_args
.disk_num_bytes
, /* orig_block_len */
2145 ram_bytes
, BTRFS_COMPRESS_NONE
,
2146 BTRFS_ORDERED_PREALLOC
);
2148 btrfs_dec_nocow_writers(nocow_bg
);
2152 free_extent_map(em
);
2155 ordered
= btrfs_alloc_ordered_extent(inode
, cur_offset
,
2156 nocow_args
.num_bytes
, nocow_args
.num_bytes
,
2157 nocow_args
.disk_bytenr
, nocow_args
.num_bytes
, 0,
2159 ? (1 << BTRFS_ORDERED_PREALLOC
)
2160 : (1 << BTRFS_ORDERED_NOCOW
),
2161 BTRFS_COMPRESS_NONE
);
2162 btrfs_dec_nocow_writers(nocow_bg
);
2163 if (IS_ERR(ordered
)) {
2165 btrfs_drop_extent_map_range(inode
, cur_offset
,
2168 ret
= PTR_ERR(ordered
);
2172 if (btrfs_is_data_reloc_root(root
))
2174 * Error handled later, as we must prevent
2175 * extent_clear_unlock_delalloc() in error handler
2176 * from freeing metadata of created ordered extent.
2178 ret
= btrfs_reloc_clone_csums(ordered
);
2179 btrfs_put_ordered_extent(ordered
);
2181 extent_clear_unlock_delalloc(inode
, cur_offset
, nocow_end
,
2182 locked_page
, EXTENT_LOCKED
|
2184 EXTENT_CLEAR_DATA_RESV
,
2185 PAGE_UNLOCK
| PAGE_SET_ORDERED
);
2187 cur_offset
= extent_end
;
2190 * btrfs_reloc_clone_csums() error, now we're OK to call error
2191 * handler, as metadata for created ordered extent will only
2192 * be freed by btrfs_finish_ordered_io().
2196 if (cur_offset
> end
)
2199 btrfs_release_path(path
);
2201 if (cur_offset
<= end
&& cow_start
== (u64
)-1)
2202 cow_start
= cur_offset
;
2204 if (cow_start
!= (u64
)-1) {
2206 ret
= fallback_to_cow(inode
, locked_page
, cow_start
, end
);
2207 cow_start
= (u64
)-1;
2212 btrfs_free_path(path
);
2217 * If an error happened while a COW region is outstanding, cur_offset
2218 * needs to be reset to cow_start to ensure the COW region is unlocked
2221 if (cow_start
!= (u64
)-1)
2222 cur_offset
= cow_start
;
2223 if (cur_offset
< end
)
2224 extent_clear_unlock_delalloc(inode
, cur_offset
, end
,
2225 locked_page
, EXTENT_LOCKED
|
2226 EXTENT_DELALLOC
| EXTENT_DEFRAG
|
2227 EXTENT_DO_ACCOUNTING
, PAGE_UNLOCK
|
2228 PAGE_START_WRITEBACK
|
2229 PAGE_END_WRITEBACK
);
2230 btrfs_free_path(path
);
2234 static bool should_nocow(struct btrfs_inode
*inode
, u64 start
, u64 end
)
2236 if (inode
->flags
& (BTRFS_INODE_NODATACOW
| BTRFS_INODE_PREALLOC
)) {
2237 if (inode
->defrag_bytes
&&
2238 test_range_bit(&inode
->io_tree
, start
, end
, EXTENT_DEFRAG
,
2247 * Function to process delayed allocation (create CoW) for ranges which are
2248 * being touched for the first time.
2250 int btrfs_run_delalloc_range(struct btrfs_inode
*inode
, struct page
*locked_page
,
2251 u64 start
, u64 end
, struct writeback_control
*wbc
)
2253 const bool zoned
= btrfs_is_zoned(inode
->root
->fs_info
);
2257 * The range must cover part of the @locked_page, or a return of 1
2258 * can confuse the caller.
2260 ASSERT(!(end
<= page_offset(locked_page
) ||
2261 start
>= page_offset(locked_page
) + PAGE_SIZE
));
2263 if (should_nocow(inode
, start
, end
)) {
2264 ret
= run_delalloc_nocow(inode
, locked_page
, start
, end
);
2268 if (btrfs_inode_can_compress(inode
) &&
2269 inode_need_compress(inode
, start
, end
) &&
2270 run_delalloc_compressed(inode
, locked_page
, start
, end
, wbc
))
2274 ret
= run_delalloc_cow(inode
, locked_page
, start
, end
, wbc
,
2277 ret
= cow_file_range(inode
, locked_page
, start
, end
, NULL
,
2282 btrfs_cleanup_ordered_extents(inode
, locked_page
, start
,
2287 void btrfs_split_delalloc_extent(struct btrfs_inode
*inode
,
2288 struct extent_state
*orig
, u64 split
)
2290 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
2293 /* not delalloc, ignore it */
2294 if (!(orig
->state
& EXTENT_DELALLOC
))
2297 size
= orig
->end
- orig
->start
+ 1;
2298 if (size
> fs_info
->max_extent_size
) {
2303 * See the explanation in btrfs_merge_delalloc_extent, the same
2304 * applies here, just in reverse.
2306 new_size
= orig
->end
- split
+ 1;
2307 num_extents
= count_max_extents(fs_info
, new_size
);
2308 new_size
= split
- orig
->start
;
2309 num_extents
+= count_max_extents(fs_info
, new_size
);
2310 if (count_max_extents(fs_info
, size
) >= num_extents
)
2314 spin_lock(&inode
->lock
);
2315 btrfs_mod_outstanding_extents(inode
, 1);
2316 spin_unlock(&inode
->lock
);
2320 * Handle merged delayed allocation extents so we can keep track of new extents
2321 * that are just merged onto old extents, such as when we are doing sequential
2322 * writes, so we can properly account for the metadata space we'll need.
2324 void btrfs_merge_delalloc_extent(struct btrfs_inode
*inode
, struct extent_state
*new,
2325 struct extent_state
*other
)
2327 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
2328 u64 new_size
, old_size
;
2331 /* not delalloc, ignore it */
2332 if (!(other
->state
& EXTENT_DELALLOC
))
2335 if (new->start
> other
->start
)
2336 new_size
= new->end
- other
->start
+ 1;
2338 new_size
= other
->end
- new->start
+ 1;
2340 /* we're not bigger than the max, unreserve the space and go */
2341 if (new_size
<= fs_info
->max_extent_size
) {
2342 spin_lock(&inode
->lock
);
2343 btrfs_mod_outstanding_extents(inode
, -1);
2344 spin_unlock(&inode
->lock
);
2349 * We have to add up either side to figure out how many extents were
2350 * accounted for before we merged into one big extent. If the number of
2351 * extents we accounted for is <= the amount we need for the new range
2352 * then we can return, otherwise drop. Think of it like this
2356 * So we've grown the extent by a MAX_SIZE extent, this would mean we
2357 * need 2 outstanding extents, on one side we have 1 and the other side
2358 * we have 1 so they are == and we can return. But in this case
2360 * [MAX_SIZE+4k][MAX_SIZE+4k]
2362 * Each range on their own accounts for 2 extents, but merged together
2363 * they are only 3 extents worth of accounting, so we need to drop in
2366 old_size
= other
->end
- other
->start
+ 1;
2367 num_extents
= count_max_extents(fs_info
, old_size
);
2368 old_size
= new->end
- new->start
+ 1;
2369 num_extents
+= count_max_extents(fs_info
, old_size
);
2370 if (count_max_extents(fs_info
, new_size
) >= num_extents
)
2373 spin_lock(&inode
->lock
);
2374 btrfs_mod_outstanding_extents(inode
, -1);
2375 spin_unlock(&inode
->lock
);
2378 static void btrfs_add_delalloc_inodes(struct btrfs_root
*root
,
2379 struct btrfs_inode
*inode
)
2381 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
2383 spin_lock(&root
->delalloc_lock
);
2384 if (list_empty(&inode
->delalloc_inodes
)) {
2385 list_add_tail(&inode
->delalloc_inodes
, &root
->delalloc_inodes
);
2386 set_bit(BTRFS_INODE_IN_DELALLOC_LIST
, &inode
->runtime_flags
);
2387 root
->nr_delalloc_inodes
++;
2388 if (root
->nr_delalloc_inodes
== 1) {
2389 spin_lock(&fs_info
->delalloc_root_lock
);
2390 BUG_ON(!list_empty(&root
->delalloc_root
));
2391 list_add_tail(&root
->delalloc_root
,
2392 &fs_info
->delalloc_roots
);
2393 spin_unlock(&fs_info
->delalloc_root_lock
);
2396 spin_unlock(&root
->delalloc_lock
);
2399 void __btrfs_del_delalloc_inode(struct btrfs_root
*root
,
2400 struct btrfs_inode
*inode
)
2402 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2404 if (!list_empty(&inode
->delalloc_inodes
)) {
2405 list_del_init(&inode
->delalloc_inodes
);
2406 clear_bit(BTRFS_INODE_IN_DELALLOC_LIST
,
2407 &inode
->runtime_flags
);
2408 root
->nr_delalloc_inodes
--;
2409 if (!root
->nr_delalloc_inodes
) {
2410 ASSERT(list_empty(&root
->delalloc_inodes
));
2411 spin_lock(&fs_info
->delalloc_root_lock
);
2412 BUG_ON(list_empty(&root
->delalloc_root
));
2413 list_del_init(&root
->delalloc_root
);
2414 spin_unlock(&fs_info
->delalloc_root_lock
);
2419 static void btrfs_del_delalloc_inode(struct btrfs_root
*root
,
2420 struct btrfs_inode
*inode
)
2422 spin_lock(&root
->delalloc_lock
);
2423 __btrfs_del_delalloc_inode(root
, inode
);
2424 spin_unlock(&root
->delalloc_lock
);
2428 * Properly track delayed allocation bytes in the inode and to maintain the
2429 * list of inodes that have pending delalloc work to be done.
2431 void btrfs_set_delalloc_extent(struct btrfs_inode
*inode
, struct extent_state
*state
,
2434 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
2436 if ((bits
& EXTENT_DEFRAG
) && !(bits
& EXTENT_DELALLOC
))
2439 * set_bit and clear bit hooks normally require _irqsave/restore
2440 * but in this case, we are only testing for the DELALLOC
2441 * bit, which is only set or cleared with irqs on
2443 if (!(state
->state
& EXTENT_DELALLOC
) && (bits
& EXTENT_DELALLOC
)) {
2444 struct btrfs_root
*root
= inode
->root
;
2445 u64 len
= state
->end
+ 1 - state
->start
;
2446 u32 num_extents
= count_max_extents(fs_info
, len
);
2447 bool do_list
= !btrfs_is_free_space_inode(inode
);
2449 spin_lock(&inode
->lock
);
2450 btrfs_mod_outstanding_extents(inode
, num_extents
);
2451 spin_unlock(&inode
->lock
);
2453 /* For sanity tests */
2454 if (btrfs_is_testing(fs_info
))
2457 percpu_counter_add_batch(&fs_info
->delalloc_bytes
, len
,
2458 fs_info
->delalloc_batch
);
2459 spin_lock(&inode
->lock
);
2460 inode
->delalloc_bytes
+= len
;
2461 if (bits
& EXTENT_DEFRAG
)
2462 inode
->defrag_bytes
+= len
;
2463 if (do_list
&& !test_bit(BTRFS_INODE_IN_DELALLOC_LIST
,
2464 &inode
->runtime_flags
))
2465 btrfs_add_delalloc_inodes(root
, inode
);
2466 spin_unlock(&inode
->lock
);
2469 if (!(state
->state
& EXTENT_DELALLOC_NEW
) &&
2470 (bits
& EXTENT_DELALLOC_NEW
)) {
2471 spin_lock(&inode
->lock
);
2472 inode
->new_delalloc_bytes
+= state
->end
+ 1 - state
->start
;
2473 spin_unlock(&inode
->lock
);
2478 * Once a range is no longer delalloc this function ensures that proper
2479 * accounting happens.
2481 void btrfs_clear_delalloc_extent(struct btrfs_inode
*inode
,
2482 struct extent_state
*state
, u32 bits
)
2484 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
2485 u64 len
= state
->end
+ 1 - state
->start
;
2486 u32 num_extents
= count_max_extents(fs_info
, len
);
2488 if ((state
->state
& EXTENT_DEFRAG
) && (bits
& EXTENT_DEFRAG
)) {
2489 spin_lock(&inode
->lock
);
2490 inode
->defrag_bytes
-= len
;
2491 spin_unlock(&inode
->lock
);
2495 * set_bit and clear bit hooks normally require _irqsave/restore
2496 * but in this case, we are only testing for the DELALLOC
2497 * bit, which is only set or cleared with irqs on
2499 if ((state
->state
& EXTENT_DELALLOC
) && (bits
& EXTENT_DELALLOC
)) {
2500 struct btrfs_root
*root
= inode
->root
;
2501 bool do_list
= !btrfs_is_free_space_inode(inode
);
2503 spin_lock(&inode
->lock
);
2504 btrfs_mod_outstanding_extents(inode
, -num_extents
);
2505 spin_unlock(&inode
->lock
);
2508 * We don't reserve metadata space for space cache inodes so we
2509 * don't need to call delalloc_release_metadata if there is an
2512 if (bits
& EXTENT_CLEAR_META_RESV
&&
2513 root
!= fs_info
->tree_root
)
2514 btrfs_delalloc_release_metadata(inode
, len
, false);
2516 /* For sanity tests. */
2517 if (btrfs_is_testing(fs_info
))
2520 if (!btrfs_is_data_reloc_root(root
) &&
2521 do_list
&& !(state
->state
& EXTENT_NORESERVE
) &&
2522 (bits
& EXTENT_CLEAR_DATA_RESV
))
2523 btrfs_free_reserved_data_space_noquota(fs_info
, len
);
2525 percpu_counter_add_batch(&fs_info
->delalloc_bytes
, -len
,
2526 fs_info
->delalloc_batch
);
2527 spin_lock(&inode
->lock
);
2528 inode
->delalloc_bytes
-= len
;
2529 if (do_list
&& inode
->delalloc_bytes
== 0 &&
2530 test_bit(BTRFS_INODE_IN_DELALLOC_LIST
,
2531 &inode
->runtime_flags
))
2532 btrfs_del_delalloc_inode(root
, inode
);
2533 spin_unlock(&inode
->lock
);
2536 if ((state
->state
& EXTENT_DELALLOC_NEW
) &&
2537 (bits
& EXTENT_DELALLOC_NEW
)) {
2538 spin_lock(&inode
->lock
);
2539 ASSERT(inode
->new_delalloc_bytes
>= len
);
2540 inode
->new_delalloc_bytes
-= len
;
2541 if (bits
& EXTENT_ADD_INODE_BYTES
)
2542 inode_add_bytes(&inode
->vfs_inode
, len
);
2543 spin_unlock(&inode
->lock
);
2547 static int btrfs_extract_ordered_extent(struct btrfs_bio
*bbio
,
2548 struct btrfs_ordered_extent
*ordered
)
2550 u64 start
= (u64
)bbio
->bio
.bi_iter
.bi_sector
<< SECTOR_SHIFT
;
2551 u64 len
= bbio
->bio
.bi_iter
.bi_size
;
2552 struct btrfs_ordered_extent
*new;
2555 /* Must always be called for the beginning of an ordered extent. */
2556 if (WARN_ON_ONCE(start
!= ordered
->disk_bytenr
))
2559 /* No need to split if the ordered extent covers the entire bio. */
2560 if (ordered
->disk_num_bytes
== len
) {
2561 refcount_inc(&ordered
->refs
);
2562 bbio
->ordered
= ordered
;
2567 * Don't split the extent_map for NOCOW extents, as we're writing into
2568 * a pre-existing one.
2570 if (!test_bit(BTRFS_ORDERED_NOCOW
, &ordered
->flags
)) {
2571 ret
= split_extent_map(bbio
->inode
, bbio
->file_offset
,
2572 ordered
->num_bytes
, len
,
2573 ordered
->disk_bytenr
);
2578 new = btrfs_split_ordered_extent(ordered
, len
);
2580 return PTR_ERR(new);
2581 bbio
->ordered
= new;
2586 * given a list of ordered sums record them in the inode. This happens
2587 * at IO completion time based on sums calculated at bio submission time.
2589 static int add_pending_csums(struct btrfs_trans_handle
*trans
,
2590 struct list_head
*list
)
2592 struct btrfs_ordered_sum
*sum
;
2593 struct btrfs_root
*csum_root
= NULL
;
2596 list_for_each_entry(sum
, list
, list
) {
2597 trans
->adding_csums
= true;
2599 csum_root
= btrfs_csum_root(trans
->fs_info
,
2601 ret
= btrfs_csum_file_blocks(trans
, csum_root
, sum
);
2602 trans
->adding_csums
= false;
2609 static int btrfs_find_new_delalloc_bytes(struct btrfs_inode
*inode
,
2612 struct extent_state
**cached_state
)
2614 u64 search_start
= start
;
2615 const u64 end
= start
+ len
- 1;
2617 while (search_start
< end
) {
2618 const u64 search_len
= end
- search_start
+ 1;
2619 struct extent_map
*em
;
2623 em
= btrfs_get_extent(inode
, NULL
, 0, search_start
, search_len
);
2627 if (em
->block_start
!= EXTENT_MAP_HOLE
)
2631 if (em
->start
< search_start
)
2632 em_len
-= search_start
- em
->start
;
2633 if (em_len
> search_len
)
2634 em_len
= search_len
;
2636 ret
= set_extent_bit(&inode
->io_tree
, search_start
,
2637 search_start
+ em_len
- 1,
2638 EXTENT_DELALLOC_NEW
, cached_state
);
2640 search_start
= extent_map_end(em
);
2641 free_extent_map(em
);
2648 int btrfs_set_extent_delalloc(struct btrfs_inode
*inode
, u64 start
, u64 end
,
2649 unsigned int extra_bits
,
2650 struct extent_state
**cached_state
)
2652 WARN_ON(PAGE_ALIGNED(end
));
2654 if (start
>= i_size_read(&inode
->vfs_inode
) &&
2655 !(inode
->flags
& BTRFS_INODE_PREALLOC
)) {
2657 * There can't be any extents following eof in this case so just
2658 * set the delalloc new bit for the range directly.
2660 extra_bits
|= EXTENT_DELALLOC_NEW
;
2664 ret
= btrfs_find_new_delalloc_bytes(inode
, start
,
2671 return set_extent_bit(&inode
->io_tree
, start
, end
,
2672 EXTENT_DELALLOC
| extra_bits
, cached_state
);
2675 /* see btrfs_writepage_start_hook for details on why this is required */
2676 struct btrfs_writepage_fixup
{
2678 struct btrfs_inode
*inode
;
2679 struct btrfs_work work
;
2682 static void btrfs_writepage_fixup_worker(struct btrfs_work
*work
)
2684 struct btrfs_writepage_fixup
*fixup
=
2685 container_of(work
, struct btrfs_writepage_fixup
, work
);
2686 struct btrfs_ordered_extent
*ordered
;
2687 struct extent_state
*cached_state
= NULL
;
2688 struct extent_changeset
*data_reserved
= NULL
;
2689 struct page
*page
= fixup
->page
;
2690 struct btrfs_inode
*inode
= fixup
->inode
;
2691 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
2692 u64 page_start
= page_offset(page
);
2693 u64 page_end
= page_offset(page
) + PAGE_SIZE
- 1;
2695 bool free_delalloc_space
= true;
2698 * This is similar to page_mkwrite, we need to reserve the space before
2699 * we take the page lock.
2701 ret
= btrfs_delalloc_reserve_space(inode
, &data_reserved
, page_start
,
2707 * Before we queued this fixup, we took a reference on the page.
2708 * page->mapping may go NULL, but it shouldn't be moved to a different
2711 if (!page
->mapping
|| !PageDirty(page
) || !PageChecked(page
)) {
2713 * Unfortunately this is a little tricky, either
2715 * 1) We got here and our page had already been dealt with and
2716 * we reserved our space, thus ret == 0, so we need to just
2717 * drop our space reservation and bail. This can happen the
2718 * first time we come into the fixup worker, or could happen
2719 * while waiting for the ordered extent.
2720 * 2) Our page was already dealt with, but we happened to get an
2721 * ENOSPC above from the btrfs_delalloc_reserve_space. In
2722 * this case we obviously don't have anything to release, but
2723 * because the page was already dealt with we don't want to
2724 * mark the page with an error, so make sure we're resetting
2725 * ret to 0. This is why we have this check _before_ the ret
2726 * check, because we do not want to have a surprise ENOSPC
2727 * when the page was already properly dealt with.
2730 btrfs_delalloc_release_extents(inode
, PAGE_SIZE
);
2731 btrfs_delalloc_release_space(inode
, data_reserved
,
2732 page_start
, PAGE_SIZE
,
2740 * We can't mess with the page state unless it is locked, so now that
2741 * it is locked bail if we failed to make our space reservation.
2746 lock_extent(&inode
->io_tree
, page_start
, page_end
, &cached_state
);
2748 /* already ordered? We're done */
2749 if (PageOrdered(page
))
2752 ordered
= btrfs_lookup_ordered_range(inode
, page_start
, PAGE_SIZE
);
2754 unlock_extent(&inode
->io_tree
, page_start
, page_end
,
2757 btrfs_start_ordered_extent(ordered
);
2758 btrfs_put_ordered_extent(ordered
);
2762 ret
= btrfs_set_extent_delalloc(inode
, page_start
, page_end
, 0,
2768 * Everything went as planned, we're now the owner of a dirty page with
2769 * delayed allocation bits set and space reserved for our COW
2772 * The page was dirty when we started, nothing should have cleaned it.
2774 BUG_ON(!PageDirty(page
));
2775 free_delalloc_space
= false;
2777 btrfs_delalloc_release_extents(inode
, PAGE_SIZE
);
2778 if (free_delalloc_space
)
2779 btrfs_delalloc_release_space(inode
, data_reserved
, page_start
,
2781 unlock_extent(&inode
->io_tree
, page_start
, page_end
, &cached_state
);
2785 * We hit ENOSPC or other errors. Update the mapping and page
2786 * to reflect the errors and clean the page.
2788 mapping_set_error(page
->mapping
, ret
);
2789 btrfs_mark_ordered_io_finished(inode
, page
, page_start
,
2791 clear_page_dirty_for_io(page
);
2793 btrfs_page_clear_checked(fs_info
, page
, page_start
, PAGE_SIZE
);
2797 extent_changeset_free(data_reserved
);
2799 * As a precaution, do a delayed iput in case it would be the last iput
2800 * that could need flushing space. Recursing back to fixup worker would
2803 btrfs_add_delayed_iput(inode
);
2807 * There are a few paths in the higher layers of the kernel that directly
2808 * set the page dirty bit without asking the filesystem if it is a
2809 * good idea. This causes problems because we want to make sure COW
2810 * properly happens and the data=ordered rules are followed.
2812 * In our case any range that doesn't have the ORDERED bit set
2813 * hasn't been properly setup for IO. We kick off an async process
2814 * to fix it up. The async helper will wait for ordered extents, set
2815 * the delalloc bit and make it safe to write the page.
2817 int btrfs_writepage_cow_fixup(struct page
*page
)
2819 struct inode
*inode
= page
->mapping
->host
;
2820 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
2821 struct btrfs_writepage_fixup
*fixup
;
2823 /* This page has ordered extent covering it already */
2824 if (PageOrdered(page
))
2828 * PageChecked is set below when we create a fixup worker for this page,
2829 * don't try to create another one if we're already PageChecked()
2831 * The extent_io writepage code will redirty the page if we send back
2834 if (PageChecked(page
))
2837 fixup
= kzalloc(sizeof(*fixup
), GFP_NOFS
);
2842 * We are already holding a reference to this inode from
2843 * write_cache_pages. We need to hold it because the space reservation
2844 * takes place outside of the page lock, and we can't trust
2845 * page->mapping outside of the page lock.
2848 btrfs_page_set_checked(fs_info
, page
, page_offset(page
), PAGE_SIZE
);
2850 btrfs_init_work(&fixup
->work
, btrfs_writepage_fixup_worker
, NULL
, NULL
);
2852 fixup
->inode
= BTRFS_I(inode
);
2853 btrfs_queue_work(fs_info
->fixup_workers
, &fixup
->work
);
2858 static int insert_reserved_file_extent(struct btrfs_trans_handle
*trans
,
2859 struct btrfs_inode
*inode
, u64 file_pos
,
2860 struct btrfs_file_extent_item
*stack_fi
,
2861 const bool update_inode_bytes
,
2862 u64 qgroup_reserved
)
2864 struct btrfs_root
*root
= inode
->root
;
2865 const u64 sectorsize
= root
->fs_info
->sectorsize
;
2866 struct btrfs_path
*path
;
2867 struct extent_buffer
*leaf
;
2868 struct btrfs_key ins
;
2869 u64 disk_num_bytes
= btrfs_stack_file_extent_disk_num_bytes(stack_fi
);
2870 u64 disk_bytenr
= btrfs_stack_file_extent_disk_bytenr(stack_fi
);
2871 u64 offset
= btrfs_stack_file_extent_offset(stack_fi
);
2872 u64 num_bytes
= btrfs_stack_file_extent_num_bytes(stack_fi
);
2873 u64 ram_bytes
= btrfs_stack_file_extent_ram_bytes(stack_fi
);
2874 struct btrfs_drop_extents_args drop_args
= { 0 };
2877 path
= btrfs_alloc_path();
2882 * we may be replacing one extent in the tree with another.
2883 * The new extent is pinned in the extent map, and we don't want
2884 * to drop it from the cache until it is completely in the btree.
2886 * So, tell btrfs_drop_extents to leave this extent in the cache.
2887 * the caller is expected to unpin it and allow it to be merged
2890 drop_args
.path
= path
;
2891 drop_args
.start
= file_pos
;
2892 drop_args
.end
= file_pos
+ num_bytes
;
2893 drop_args
.replace_extent
= true;
2894 drop_args
.extent_item_size
= sizeof(*stack_fi
);
2895 ret
= btrfs_drop_extents(trans
, root
, inode
, &drop_args
);
2899 if (!drop_args
.extent_inserted
) {
2900 ins
.objectid
= btrfs_ino(inode
);
2901 ins
.offset
= file_pos
;
2902 ins
.type
= BTRFS_EXTENT_DATA_KEY
;
2904 ret
= btrfs_insert_empty_item(trans
, root
, path
, &ins
,
2909 leaf
= path
->nodes
[0];
2910 btrfs_set_stack_file_extent_generation(stack_fi
, trans
->transid
);
2911 write_extent_buffer(leaf
, stack_fi
,
2912 btrfs_item_ptr_offset(leaf
, path
->slots
[0]),
2913 sizeof(struct btrfs_file_extent_item
));
2915 btrfs_mark_buffer_dirty(leaf
);
2916 btrfs_release_path(path
);
2919 * If we dropped an inline extent here, we know the range where it is
2920 * was not marked with the EXTENT_DELALLOC_NEW bit, so we update the
2921 * number of bytes only for that range containing the inline extent.
2922 * The remaining of the range will be processed when clearning the
2923 * EXTENT_DELALLOC_BIT bit through the ordered extent completion.
2925 if (file_pos
== 0 && !IS_ALIGNED(drop_args
.bytes_found
, sectorsize
)) {
2926 u64 inline_size
= round_down(drop_args
.bytes_found
, sectorsize
);
2928 inline_size
= drop_args
.bytes_found
- inline_size
;
2929 btrfs_update_inode_bytes(inode
, sectorsize
, inline_size
);
2930 drop_args
.bytes_found
-= inline_size
;
2931 num_bytes
-= sectorsize
;
2934 if (update_inode_bytes
)
2935 btrfs_update_inode_bytes(inode
, num_bytes
, drop_args
.bytes_found
);
2937 ins
.objectid
= disk_bytenr
;
2938 ins
.offset
= disk_num_bytes
;
2939 ins
.type
= BTRFS_EXTENT_ITEM_KEY
;
2941 ret
= btrfs_inode_set_file_extent_range(inode
, file_pos
, ram_bytes
);
2945 ret
= btrfs_alloc_reserved_file_extent(trans
, root
, btrfs_ino(inode
),
2947 qgroup_reserved
, &ins
);
2949 btrfs_free_path(path
);
2954 static void btrfs_release_delalloc_bytes(struct btrfs_fs_info
*fs_info
,
2957 struct btrfs_block_group
*cache
;
2959 cache
= btrfs_lookup_block_group(fs_info
, start
);
2962 spin_lock(&cache
->lock
);
2963 cache
->delalloc_bytes
-= len
;
2964 spin_unlock(&cache
->lock
);
2966 btrfs_put_block_group(cache
);
2969 static int insert_ordered_extent_file_extent(struct btrfs_trans_handle
*trans
,
2970 struct btrfs_ordered_extent
*oe
)
2972 struct btrfs_file_extent_item stack_fi
;
2973 bool update_inode_bytes
;
2974 u64 num_bytes
= oe
->num_bytes
;
2975 u64 ram_bytes
= oe
->ram_bytes
;
2977 memset(&stack_fi
, 0, sizeof(stack_fi
));
2978 btrfs_set_stack_file_extent_type(&stack_fi
, BTRFS_FILE_EXTENT_REG
);
2979 btrfs_set_stack_file_extent_disk_bytenr(&stack_fi
, oe
->disk_bytenr
);
2980 btrfs_set_stack_file_extent_disk_num_bytes(&stack_fi
,
2981 oe
->disk_num_bytes
);
2982 btrfs_set_stack_file_extent_offset(&stack_fi
, oe
->offset
);
2983 if (test_bit(BTRFS_ORDERED_TRUNCATED
, &oe
->flags
)) {
2984 num_bytes
= oe
->truncated_len
;
2985 ram_bytes
= num_bytes
;
2987 btrfs_set_stack_file_extent_num_bytes(&stack_fi
, num_bytes
);
2988 btrfs_set_stack_file_extent_ram_bytes(&stack_fi
, ram_bytes
);
2989 btrfs_set_stack_file_extent_compression(&stack_fi
, oe
->compress_type
);
2990 /* Encryption and other encoding is reserved and all 0 */
2993 * For delalloc, when completing an ordered extent we update the inode's
2994 * bytes when clearing the range in the inode's io tree, so pass false
2995 * as the argument 'update_inode_bytes' to insert_reserved_file_extent(),
2996 * except if the ordered extent was truncated.
2998 update_inode_bytes
= test_bit(BTRFS_ORDERED_DIRECT
, &oe
->flags
) ||
2999 test_bit(BTRFS_ORDERED_ENCODED
, &oe
->flags
) ||
3000 test_bit(BTRFS_ORDERED_TRUNCATED
, &oe
->flags
);
3002 return insert_reserved_file_extent(trans
, BTRFS_I(oe
->inode
),
3003 oe
->file_offset
, &stack_fi
,
3004 update_inode_bytes
, oe
->qgroup_rsv
);
3008 * As ordered data IO finishes, this gets called so we can finish
3009 * an ordered extent if the range of bytes in the file it covers are
3012 int btrfs_finish_one_ordered(struct btrfs_ordered_extent
*ordered_extent
)
3014 struct btrfs_inode
*inode
= BTRFS_I(ordered_extent
->inode
);
3015 struct btrfs_root
*root
= inode
->root
;
3016 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
3017 struct btrfs_trans_handle
*trans
= NULL
;
3018 struct extent_io_tree
*io_tree
= &inode
->io_tree
;
3019 struct extent_state
*cached_state
= NULL
;
3021 int compress_type
= 0;
3023 u64 logical_len
= ordered_extent
->num_bytes
;
3024 bool freespace_inode
;
3025 bool truncated
= false;
3026 bool clear_reserved_extent
= true;
3027 unsigned int clear_bits
= EXTENT_DEFRAG
;
3029 start
= ordered_extent
->file_offset
;
3030 end
= start
+ ordered_extent
->num_bytes
- 1;
3032 if (!test_bit(BTRFS_ORDERED_NOCOW
, &ordered_extent
->flags
) &&
3033 !test_bit(BTRFS_ORDERED_PREALLOC
, &ordered_extent
->flags
) &&
3034 !test_bit(BTRFS_ORDERED_DIRECT
, &ordered_extent
->flags
) &&
3035 !test_bit(BTRFS_ORDERED_ENCODED
, &ordered_extent
->flags
))
3036 clear_bits
|= EXTENT_DELALLOC_NEW
;
3038 freespace_inode
= btrfs_is_free_space_inode(inode
);
3039 if (!freespace_inode
)
3040 btrfs_lockdep_acquire(fs_info
, btrfs_ordered_extent
);
3042 if (test_bit(BTRFS_ORDERED_IOERR
, &ordered_extent
->flags
)) {
3047 if (btrfs_is_zoned(fs_info
))
3048 btrfs_zone_finish_endio(fs_info
, ordered_extent
->disk_bytenr
,
3049 ordered_extent
->disk_num_bytes
);
3051 if (test_bit(BTRFS_ORDERED_TRUNCATED
, &ordered_extent
->flags
)) {
3053 logical_len
= ordered_extent
->truncated_len
;
3054 /* Truncated the entire extent, don't bother adding */
3059 if (test_bit(BTRFS_ORDERED_NOCOW
, &ordered_extent
->flags
)) {
3060 BUG_ON(!list_empty(&ordered_extent
->list
)); /* Logic error */
3062 btrfs_inode_safe_disk_i_size_write(inode
, 0);
3063 if (freespace_inode
)
3064 trans
= btrfs_join_transaction_spacecache(root
);
3066 trans
= btrfs_join_transaction(root
);
3067 if (IS_ERR(trans
)) {
3068 ret
= PTR_ERR(trans
);
3072 trans
->block_rsv
= &inode
->block_rsv
;
3073 ret
= btrfs_update_inode_fallback(trans
, root
, inode
);
3074 if (ret
) /* -ENOMEM or corruption */
3075 btrfs_abort_transaction(trans
, ret
);
3079 clear_bits
|= EXTENT_LOCKED
;
3080 lock_extent(io_tree
, start
, end
, &cached_state
);
3082 if (freespace_inode
)
3083 trans
= btrfs_join_transaction_spacecache(root
);
3085 trans
= btrfs_join_transaction(root
);
3086 if (IS_ERR(trans
)) {
3087 ret
= PTR_ERR(trans
);
3092 trans
->block_rsv
= &inode
->block_rsv
;
3094 if (test_bit(BTRFS_ORDERED_COMPRESSED
, &ordered_extent
->flags
))
3095 compress_type
= ordered_extent
->compress_type
;
3096 if (test_bit(BTRFS_ORDERED_PREALLOC
, &ordered_extent
->flags
)) {
3097 BUG_ON(compress_type
);
3098 ret
= btrfs_mark_extent_written(trans
, inode
,
3099 ordered_extent
->file_offset
,
3100 ordered_extent
->file_offset
+
3102 btrfs_zoned_release_data_reloc_bg(fs_info
, ordered_extent
->disk_bytenr
,
3103 ordered_extent
->disk_num_bytes
);
3105 BUG_ON(root
== fs_info
->tree_root
);
3106 ret
= insert_ordered_extent_file_extent(trans
, ordered_extent
);
3108 clear_reserved_extent
= false;
3109 btrfs_release_delalloc_bytes(fs_info
,
3110 ordered_extent
->disk_bytenr
,
3111 ordered_extent
->disk_num_bytes
);
3114 unpin_extent_cache(&inode
->extent_tree
, ordered_extent
->file_offset
,
3115 ordered_extent
->num_bytes
, trans
->transid
);
3117 btrfs_abort_transaction(trans
, ret
);
3121 ret
= add_pending_csums(trans
, &ordered_extent
->list
);
3123 btrfs_abort_transaction(trans
, ret
);
3128 * If this is a new delalloc range, clear its new delalloc flag to
3129 * update the inode's number of bytes. This needs to be done first
3130 * before updating the inode item.
3132 if ((clear_bits
& EXTENT_DELALLOC_NEW
) &&
3133 !test_bit(BTRFS_ORDERED_TRUNCATED
, &ordered_extent
->flags
))
3134 clear_extent_bit(&inode
->io_tree
, start
, end
,
3135 EXTENT_DELALLOC_NEW
| EXTENT_ADD_INODE_BYTES
,
3138 btrfs_inode_safe_disk_i_size_write(inode
, 0);
3139 ret
= btrfs_update_inode_fallback(trans
, root
, inode
);
3140 if (ret
) { /* -ENOMEM or corruption */
3141 btrfs_abort_transaction(trans
, ret
);
3146 clear_extent_bit(&inode
->io_tree
, start
, end
, clear_bits
,
3150 btrfs_end_transaction(trans
);
3152 if (ret
|| truncated
) {
3153 u64 unwritten_start
= start
;
3156 * If we failed to finish this ordered extent for any reason we
3157 * need to make sure BTRFS_ORDERED_IOERR is set on the ordered
3158 * extent, and mark the inode with the error if it wasn't
3159 * already set. Any error during writeback would have already
3160 * set the mapping error, so we need to set it if we're the ones
3161 * marking this ordered extent as failed.
3163 if (ret
&& !test_and_set_bit(BTRFS_ORDERED_IOERR
,
3164 &ordered_extent
->flags
))
3165 mapping_set_error(ordered_extent
->inode
->i_mapping
, -EIO
);
3168 unwritten_start
+= logical_len
;
3169 clear_extent_uptodate(io_tree
, unwritten_start
, end
, NULL
);
3171 /* Drop extent maps for the part of the extent we didn't write. */
3172 btrfs_drop_extent_map_range(inode
, unwritten_start
, end
, false);
3175 * If the ordered extent had an IOERR or something else went
3176 * wrong we need to return the space for this ordered extent
3177 * back to the allocator. We only free the extent in the
3178 * truncated case if we didn't write out the extent at all.
3180 * If we made it past insert_reserved_file_extent before we
3181 * errored out then we don't need to do this as the accounting
3182 * has already been done.
3184 if ((ret
|| !logical_len
) &&
3185 clear_reserved_extent
&&
3186 !test_bit(BTRFS_ORDERED_NOCOW
, &ordered_extent
->flags
) &&
3187 !test_bit(BTRFS_ORDERED_PREALLOC
, &ordered_extent
->flags
)) {
3189 * Discard the range before returning it back to the
3192 if (ret
&& btrfs_test_opt(fs_info
, DISCARD_SYNC
))
3193 btrfs_discard_extent(fs_info
,
3194 ordered_extent
->disk_bytenr
,
3195 ordered_extent
->disk_num_bytes
,
3197 btrfs_free_reserved_extent(fs_info
,
3198 ordered_extent
->disk_bytenr
,
3199 ordered_extent
->disk_num_bytes
, 1);
3201 * Actually free the qgroup rsv which was released when
3202 * the ordered extent was created.
3204 btrfs_qgroup_free_refroot(fs_info
, inode
->root
->root_key
.objectid
,
3205 ordered_extent
->qgroup_rsv
,
3206 BTRFS_QGROUP_RSV_DATA
);
3211 * This needs to be done to make sure anybody waiting knows we are done
3212 * updating everything for this ordered extent.
3214 btrfs_remove_ordered_extent(inode
, ordered_extent
);
3217 btrfs_put_ordered_extent(ordered_extent
);
3218 /* once for the tree */
3219 btrfs_put_ordered_extent(ordered_extent
);
3224 int btrfs_finish_ordered_io(struct btrfs_ordered_extent
*ordered
)
3226 if (btrfs_is_zoned(btrfs_sb(ordered
->inode
->i_sb
)) &&
3227 !test_bit(BTRFS_ORDERED_IOERR
, &ordered
->flags
))
3228 btrfs_finish_ordered_zoned(ordered
);
3229 return btrfs_finish_one_ordered(ordered
);
3233 * Verify the checksum for a single sector without any extra action that depend
3234 * on the type of I/O.
3236 int btrfs_check_sector_csum(struct btrfs_fs_info
*fs_info
, struct page
*page
,
3237 u32 pgoff
, u8
*csum
, const u8
* const csum_expected
)
3239 SHASH_DESC_ON_STACK(shash
, fs_info
->csum_shash
);
3242 ASSERT(pgoff
+ fs_info
->sectorsize
<= PAGE_SIZE
);
3244 shash
->tfm
= fs_info
->csum_shash
;
3246 kaddr
= kmap_local_page(page
) + pgoff
;
3247 crypto_shash_digest(shash
, kaddr
, fs_info
->sectorsize
, csum
);
3248 kunmap_local(kaddr
);
3250 if (memcmp(csum
, csum_expected
, fs_info
->csum_size
))
3256 * Verify the checksum of a single data sector.
3258 * @bbio: btrfs_io_bio which contains the csum
3259 * @dev: device the sector is on
3260 * @bio_offset: offset to the beginning of the bio (in bytes)
3261 * @bv: bio_vec to check
3263 * Check if the checksum on a data block is valid. When a checksum mismatch is
3264 * detected, report the error and fill the corrupted range with zero.
3266 * Return %true if the sector is ok or had no checksum to start with, else %false.
3268 bool btrfs_data_csum_ok(struct btrfs_bio
*bbio
, struct btrfs_device
*dev
,
3269 u32 bio_offset
, struct bio_vec
*bv
)
3271 struct btrfs_inode
*inode
= bbio
->inode
;
3272 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
3273 u64 file_offset
= bbio
->file_offset
+ bio_offset
;
3274 u64 end
= file_offset
+ bv
->bv_len
- 1;
3276 u8 csum
[BTRFS_CSUM_SIZE
];
3278 ASSERT(bv
->bv_len
== fs_info
->sectorsize
);
3283 if (btrfs_is_data_reloc_root(inode
->root
) &&
3284 test_range_bit(&inode
->io_tree
, file_offset
, end
, EXTENT_NODATASUM
,
3286 /* Skip the range without csum for data reloc inode */
3287 clear_extent_bits(&inode
->io_tree
, file_offset
, end
,
3292 csum_expected
= bbio
->csum
+ (bio_offset
>> fs_info
->sectorsize_bits
) *
3294 if (btrfs_check_sector_csum(fs_info
, bv
->bv_page
, bv
->bv_offset
, csum
,
3300 btrfs_print_data_csum_error(inode
, file_offset
, csum
, csum_expected
,
3303 btrfs_dev_stat_inc_and_print(dev
, BTRFS_DEV_STAT_CORRUPTION_ERRS
);
3309 * btrfs_add_delayed_iput - perform a delayed iput on @inode
3311 * @inode: The inode we want to perform iput on
3313 * This function uses the generic vfs_inode::i_count to track whether we should
3314 * just decrement it (in case it's > 1) or if this is the last iput then link
3315 * the inode to the delayed iput machinery. Delayed iputs are processed at
3316 * transaction commit time/superblock commit/cleaner kthread.
3318 void btrfs_add_delayed_iput(struct btrfs_inode
*inode
)
3320 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
3321 unsigned long flags
;
3323 if (atomic_add_unless(&inode
->vfs_inode
.i_count
, -1, 1))
3326 atomic_inc(&fs_info
->nr_delayed_iputs
);
3328 * Need to be irq safe here because we can be called from either an irq
3329 * context (see bio.c and btrfs_put_ordered_extent()) or a non-irq
3332 spin_lock_irqsave(&fs_info
->delayed_iput_lock
, flags
);
3333 ASSERT(list_empty(&inode
->delayed_iput
));
3334 list_add_tail(&inode
->delayed_iput
, &fs_info
->delayed_iputs
);
3335 spin_unlock_irqrestore(&fs_info
->delayed_iput_lock
, flags
);
3336 if (!test_bit(BTRFS_FS_CLEANER_RUNNING
, &fs_info
->flags
))
3337 wake_up_process(fs_info
->cleaner_kthread
);
3340 static void run_delayed_iput_locked(struct btrfs_fs_info
*fs_info
,
3341 struct btrfs_inode
*inode
)
3343 list_del_init(&inode
->delayed_iput
);
3344 spin_unlock_irq(&fs_info
->delayed_iput_lock
);
3345 iput(&inode
->vfs_inode
);
3346 if (atomic_dec_and_test(&fs_info
->nr_delayed_iputs
))
3347 wake_up(&fs_info
->delayed_iputs_wait
);
3348 spin_lock_irq(&fs_info
->delayed_iput_lock
);
3351 static void btrfs_run_delayed_iput(struct btrfs_fs_info
*fs_info
,
3352 struct btrfs_inode
*inode
)
3354 if (!list_empty(&inode
->delayed_iput
)) {
3355 spin_lock_irq(&fs_info
->delayed_iput_lock
);
3356 if (!list_empty(&inode
->delayed_iput
))
3357 run_delayed_iput_locked(fs_info
, inode
);
3358 spin_unlock_irq(&fs_info
->delayed_iput_lock
);
3362 void btrfs_run_delayed_iputs(struct btrfs_fs_info
*fs_info
)
3365 * btrfs_put_ordered_extent() can run in irq context (see bio.c), which
3366 * calls btrfs_add_delayed_iput() and that needs to lock
3367 * fs_info->delayed_iput_lock. So we need to disable irqs here to
3368 * prevent a deadlock.
3370 spin_lock_irq(&fs_info
->delayed_iput_lock
);
3371 while (!list_empty(&fs_info
->delayed_iputs
)) {
3372 struct btrfs_inode
*inode
;
3374 inode
= list_first_entry(&fs_info
->delayed_iputs
,
3375 struct btrfs_inode
, delayed_iput
);
3376 run_delayed_iput_locked(fs_info
, inode
);
3377 if (need_resched()) {
3378 spin_unlock_irq(&fs_info
->delayed_iput_lock
);
3380 spin_lock_irq(&fs_info
->delayed_iput_lock
);
3383 spin_unlock_irq(&fs_info
->delayed_iput_lock
);
3387 * Wait for flushing all delayed iputs
3389 * @fs_info: the filesystem
3391 * This will wait on any delayed iputs that are currently running with KILLABLE
3392 * set. Once they are all done running we will return, unless we are killed in
3393 * which case we return EINTR. This helps in user operations like fallocate etc
3394 * that might get blocked on the iputs.
3396 * Return EINTR if we were killed, 0 if nothing's pending
3398 int btrfs_wait_on_delayed_iputs(struct btrfs_fs_info
*fs_info
)
3400 int ret
= wait_event_killable(fs_info
->delayed_iputs_wait
,
3401 atomic_read(&fs_info
->nr_delayed_iputs
) == 0);
3408 * This creates an orphan entry for the given inode in case something goes wrong
3409 * in the middle of an unlink.
3411 int btrfs_orphan_add(struct btrfs_trans_handle
*trans
,
3412 struct btrfs_inode
*inode
)
3416 ret
= btrfs_insert_orphan_item(trans
, inode
->root
, btrfs_ino(inode
));
3417 if (ret
&& ret
!= -EEXIST
) {
3418 btrfs_abort_transaction(trans
, ret
);
3426 * We have done the delete so we can go ahead and remove the orphan item for
3427 * this particular inode.
3429 static int btrfs_orphan_del(struct btrfs_trans_handle
*trans
,
3430 struct btrfs_inode
*inode
)
3432 return btrfs_del_orphan_item(trans
, inode
->root
, btrfs_ino(inode
));
3436 * this cleans up any orphans that may be left on the list from the last use
3439 int btrfs_orphan_cleanup(struct btrfs_root
*root
)
3441 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
3442 struct btrfs_path
*path
;
3443 struct extent_buffer
*leaf
;
3444 struct btrfs_key key
, found_key
;
3445 struct btrfs_trans_handle
*trans
;
3446 struct inode
*inode
;
3447 u64 last_objectid
= 0;
3448 int ret
= 0, nr_unlink
= 0;
3450 if (test_and_set_bit(BTRFS_ROOT_ORPHAN_CLEANUP
, &root
->state
))
3453 path
= btrfs_alloc_path();
3458 path
->reada
= READA_BACK
;
3460 key
.objectid
= BTRFS_ORPHAN_OBJECTID
;
3461 key
.type
= BTRFS_ORPHAN_ITEM_KEY
;
3462 key
.offset
= (u64
)-1;
3465 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
3470 * if ret == 0 means we found what we were searching for, which
3471 * is weird, but possible, so only screw with path if we didn't
3472 * find the key and see if we have stuff that matches
3476 if (path
->slots
[0] == 0)
3481 /* pull out the item */
3482 leaf
= path
->nodes
[0];
3483 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
3485 /* make sure the item matches what we want */
3486 if (found_key
.objectid
!= BTRFS_ORPHAN_OBJECTID
)
3488 if (found_key
.type
!= BTRFS_ORPHAN_ITEM_KEY
)
3491 /* release the path since we're done with it */
3492 btrfs_release_path(path
);
3495 * this is where we are basically btrfs_lookup, without the
3496 * crossing root thing. we store the inode number in the
3497 * offset of the orphan item.
3500 if (found_key
.offset
== last_objectid
) {
3502 * We found the same inode as before. This means we were
3503 * not able to remove its items via eviction triggered
3504 * by an iput(). A transaction abort may have happened,
3505 * due to -ENOSPC for example, so try to grab the error
3506 * that lead to a transaction abort, if any.
3509 "Error removing orphan entry, stopping orphan cleanup");
3510 ret
= BTRFS_FS_ERROR(fs_info
) ?: -EINVAL
;
3514 last_objectid
= found_key
.offset
;
3516 found_key
.objectid
= found_key
.offset
;
3517 found_key
.type
= BTRFS_INODE_ITEM_KEY
;
3518 found_key
.offset
= 0;
3519 inode
= btrfs_iget(fs_info
->sb
, last_objectid
, root
);
3520 if (IS_ERR(inode
)) {
3521 ret
= PTR_ERR(inode
);
3527 if (!inode
&& root
== fs_info
->tree_root
) {
3528 struct btrfs_root
*dead_root
;
3529 int is_dead_root
= 0;
3532 * This is an orphan in the tree root. Currently these
3533 * could come from 2 sources:
3534 * a) a root (snapshot/subvolume) deletion in progress
3535 * b) a free space cache inode
3536 * We need to distinguish those two, as the orphan item
3537 * for a root must not get deleted before the deletion
3538 * of the snapshot/subvolume's tree completes.
3540 * btrfs_find_orphan_roots() ran before us, which has
3541 * found all deleted roots and loaded them into
3542 * fs_info->fs_roots_radix. So here we can find if an
3543 * orphan item corresponds to a deleted root by looking
3544 * up the root from that radix tree.
3547 spin_lock(&fs_info
->fs_roots_radix_lock
);
3548 dead_root
= radix_tree_lookup(&fs_info
->fs_roots_radix
,
3549 (unsigned long)found_key
.objectid
);
3550 if (dead_root
&& btrfs_root_refs(&dead_root
->root_item
) == 0)
3552 spin_unlock(&fs_info
->fs_roots_radix_lock
);
3555 /* prevent this orphan from being found again */
3556 key
.offset
= found_key
.objectid
- 1;
3563 * If we have an inode with links, there are a couple of
3566 * 1. We were halfway through creating fsverity metadata for the
3567 * file. In that case, the orphan item represents incomplete
3568 * fsverity metadata which must be cleaned up with
3569 * btrfs_drop_verity_items and deleting the orphan item.
3571 * 2. Old kernels (before v3.12) used to create an
3572 * orphan item for truncate indicating that there were possibly
3573 * extent items past i_size that needed to be deleted. In v3.12,
3574 * truncate was changed to update i_size in sync with the extent
3575 * items, but the (useless) orphan item was still created. Since
3576 * v4.18, we don't create the orphan item for truncate at all.
3578 * So, this item could mean that we need to do a truncate, but
3579 * only if this filesystem was last used on a pre-v3.12 kernel
3580 * and was not cleanly unmounted. The odds of that are quite
3581 * slim, and it's a pain to do the truncate now, so just delete
3584 * It's also possible that this orphan item was supposed to be
3585 * deleted but wasn't. The inode number may have been reused,
3586 * but either way, we can delete the orphan item.
3588 if (!inode
|| inode
->i_nlink
) {
3590 ret
= btrfs_drop_verity_items(BTRFS_I(inode
));
3596 trans
= btrfs_start_transaction(root
, 1);
3597 if (IS_ERR(trans
)) {
3598 ret
= PTR_ERR(trans
);
3601 btrfs_debug(fs_info
, "auto deleting %Lu",
3602 found_key
.objectid
);
3603 ret
= btrfs_del_orphan_item(trans
, root
,
3604 found_key
.objectid
);
3605 btrfs_end_transaction(trans
);
3613 /* this will do delete_inode and everything for us */
3616 /* release the path since we're done with it */
3617 btrfs_release_path(path
);
3619 if (test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED
, &root
->state
)) {
3620 trans
= btrfs_join_transaction(root
);
3622 btrfs_end_transaction(trans
);
3626 btrfs_debug(fs_info
, "unlinked %d orphans", nr_unlink
);
3630 btrfs_err(fs_info
, "could not do orphan cleanup %d", ret
);
3631 btrfs_free_path(path
);
3636 * very simple check to peek ahead in the leaf looking for xattrs. If we
3637 * don't find any xattrs, we know there can't be any acls.
3639 * slot is the slot the inode is in, objectid is the objectid of the inode
3641 static noinline
int acls_after_inode_item(struct extent_buffer
*leaf
,
3642 int slot
, u64 objectid
,
3643 int *first_xattr_slot
)
3645 u32 nritems
= btrfs_header_nritems(leaf
);
3646 struct btrfs_key found_key
;
3647 static u64 xattr_access
= 0;
3648 static u64 xattr_default
= 0;
3651 if (!xattr_access
) {
3652 xattr_access
= btrfs_name_hash(XATTR_NAME_POSIX_ACL_ACCESS
,
3653 strlen(XATTR_NAME_POSIX_ACL_ACCESS
));
3654 xattr_default
= btrfs_name_hash(XATTR_NAME_POSIX_ACL_DEFAULT
,
3655 strlen(XATTR_NAME_POSIX_ACL_DEFAULT
));
3659 *first_xattr_slot
= -1;
3660 while (slot
< nritems
) {
3661 btrfs_item_key_to_cpu(leaf
, &found_key
, slot
);
3663 /* we found a different objectid, there must not be acls */
3664 if (found_key
.objectid
!= objectid
)
3667 /* we found an xattr, assume we've got an acl */
3668 if (found_key
.type
== BTRFS_XATTR_ITEM_KEY
) {
3669 if (*first_xattr_slot
== -1)
3670 *first_xattr_slot
= slot
;
3671 if (found_key
.offset
== xattr_access
||
3672 found_key
.offset
== xattr_default
)
3677 * we found a key greater than an xattr key, there can't
3678 * be any acls later on
3680 if (found_key
.type
> BTRFS_XATTR_ITEM_KEY
)
3687 * it goes inode, inode backrefs, xattrs, extents,
3688 * so if there are a ton of hard links to an inode there can
3689 * be a lot of backrefs. Don't waste time searching too hard,
3690 * this is just an optimization
3695 /* we hit the end of the leaf before we found an xattr or
3696 * something larger than an xattr. We have to assume the inode
3699 if (*first_xattr_slot
== -1)
3700 *first_xattr_slot
= slot
;
3705 * read an inode from the btree into the in-memory inode
3707 static int btrfs_read_locked_inode(struct inode
*inode
,
3708 struct btrfs_path
*in_path
)
3710 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
3711 struct btrfs_path
*path
= in_path
;
3712 struct extent_buffer
*leaf
;
3713 struct btrfs_inode_item
*inode_item
;
3714 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
3715 struct btrfs_key location
;
3720 bool filled
= false;
3721 int first_xattr_slot
;
3723 ret
= btrfs_fill_inode(inode
, &rdev
);
3728 path
= btrfs_alloc_path();
3733 memcpy(&location
, &BTRFS_I(inode
)->location
, sizeof(location
));
3735 ret
= btrfs_lookup_inode(NULL
, root
, path
, &location
, 0);
3737 if (path
!= in_path
)
3738 btrfs_free_path(path
);
3742 leaf
= path
->nodes
[0];
3747 inode_item
= btrfs_item_ptr(leaf
, path
->slots
[0],
3748 struct btrfs_inode_item
);
3749 inode
->i_mode
= btrfs_inode_mode(leaf
, inode_item
);
3750 set_nlink(inode
, btrfs_inode_nlink(leaf
, inode_item
));
3751 i_uid_write(inode
, btrfs_inode_uid(leaf
, inode_item
));
3752 i_gid_write(inode
, btrfs_inode_gid(leaf
, inode_item
));
3753 btrfs_i_size_write(BTRFS_I(inode
), btrfs_inode_size(leaf
, inode_item
));
3754 btrfs_inode_set_file_extent_range(BTRFS_I(inode
), 0,
3755 round_up(i_size_read(inode
), fs_info
->sectorsize
));
3757 inode
->i_atime
.tv_sec
= btrfs_timespec_sec(leaf
, &inode_item
->atime
);
3758 inode
->i_atime
.tv_nsec
= btrfs_timespec_nsec(leaf
, &inode_item
->atime
);
3760 inode
->i_mtime
.tv_sec
= btrfs_timespec_sec(leaf
, &inode_item
->mtime
);
3761 inode
->i_mtime
.tv_nsec
= btrfs_timespec_nsec(leaf
, &inode_item
->mtime
);
3763 inode_set_ctime(inode
, btrfs_timespec_sec(leaf
, &inode_item
->ctime
),
3764 btrfs_timespec_nsec(leaf
, &inode_item
->ctime
));
3766 BTRFS_I(inode
)->i_otime
.tv_sec
=
3767 btrfs_timespec_sec(leaf
, &inode_item
->otime
);
3768 BTRFS_I(inode
)->i_otime
.tv_nsec
=
3769 btrfs_timespec_nsec(leaf
, &inode_item
->otime
);
3771 inode_set_bytes(inode
, btrfs_inode_nbytes(leaf
, inode_item
));
3772 BTRFS_I(inode
)->generation
= btrfs_inode_generation(leaf
, inode_item
);
3773 BTRFS_I(inode
)->last_trans
= btrfs_inode_transid(leaf
, inode_item
);
3775 inode_set_iversion_queried(inode
,
3776 btrfs_inode_sequence(leaf
, inode_item
));
3777 inode
->i_generation
= BTRFS_I(inode
)->generation
;
3779 rdev
= btrfs_inode_rdev(leaf
, inode_item
);
3781 BTRFS_I(inode
)->index_cnt
= (u64
)-1;
3782 btrfs_inode_split_flags(btrfs_inode_flags(leaf
, inode_item
),
3783 &BTRFS_I(inode
)->flags
, &BTRFS_I(inode
)->ro_flags
);
3787 * If we were modified in the current generation and evicted from memory
3788 * and then re-read we need to do a full sync since we don't have any
3789 * idea about which extents were modified before we were evicted from
3792 * This is required for both inode re-read from disk and delayed inode
3793 * in delayed_nodes_tree.
3795 if (BTRFS_I(inode
)->last_trans
== fs_info
->generation
)
3796 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC
,
3797 &BTRFS_I(inode
)->runtime_flags
);
3800 * We don't persist the id of the transaction where an unlink operation
3801 * against the inode was last made. So here we assume the inode might
3802 * have been evicted, and therefore the exact value of last_unlink_trans
3803 * lost, and set it to last_trans to avoid metadata inconsistencies
3804 * between the inode and its parent if the inode is fsync'ed and the log
3805 * replayed. For example, in the scenario:
3808 * ln mydir/foo mydir/bar
3811 * echo 2 > /proc/sys/vm/drop_caches # evicts inode
3812 * xfs_io -c fsync mydir/foo
3814 * mount fs, triggers fsync log replay
3816 * We must make sure that when we fsync our inode foo we also log its
3817 * parent inode, otherwise after log replay the parent still has the
3818 * dentry with the "bar" name but our inode foo has a link count of 1
3819 * and doesn't have an inode ref with the name "bar" anymore.
3821 * Setting last_unlink_trans to last_trans is a pessimistic approach,
3822 * but it guarantees correctness at the expense of occasional full
3823 * transaction commits on fsync if our inode is a directory, or if our
3824 * inode is not a directory, logging its parent unnecessarily.
3826 BTRFS_I(inode
)->last_unlink_trans
= BTRFS_I(inode
)->last_trans
;
3829 * Same logic as for last_unlink_trans. We don't persist the generation
3830 * of the last transaction where this inode was used for a reflink
3831 * operation, so after eviction and reloading the inode we must be
3832 * pessimistic and assume the last transaction that modified the inode.
3834 BTRFS_I(inode
)->last_reflink_trans
= BTRFS_I(inode
)->last_trans
;
3837 if (inode
->i_nlink
!= 1 ||
3838 path
->slots
[0] >= btrfs_header_nritems(leaf
))
3841 btrfs_item_key_to_cpu(leaf
, &location
, path
->slots
[0]);
3842 if (location
.objectid
!= btrfs_ino(BTRFS_I(inode
)))
3845 ptr
= btrfs_item_ptr_offset(leaf
, path
->slots
[0]);
3846 if (location
.type
== BTRFS_INODE_REF_KEY
) {
3847 struct btrfs_inode_ref
*ref
;
3849 ref
= (struct btrfs_inode_ref
*)ptr
;
3850 BTRFS_I(inode
)->dir_index
= btrfs_inode_ref_index(leaf
, ref
);
3851 } else if (location
.type
== BTRFS_INODE_EXTREF_KEY
) {
3852 struct btrfs_inode_extref
*extref
;
3854 extref
= (struct btrfs_inode_extref
*)ptr
;
3855 BTRFS_I(inode
)->dir_index
= btrfs_inode_extref_index(leaf
,
3860 * try to precache a NULL acl entry for files that don't have
3861 * any xattrs or acls
3863 maybe_acls
= acls_after_inode_item(leaf
, path
->slots
[0],
3864 btrfs_ino(BTRFS_I(inode
)), &first_xattr_slot
);
3865 if (first_xattr_slot
!= -1) {
3866 path
->slots
[0] = first_xattr_slot
;
3867 ret
= btrfs_load_inode_props(inode
, path
);
3870 "error loading props for ino %llu (root %llu): %d",
3871 btrfs_ino(BTRFS_I(inode
)),
3872 root
->root_key
.objectid
, ret
);
3874 if (path
!= in_path
)
3875 btrfs_free_path(path
);
3878 cache_no_acl(inode
);
3880 switch (inode
->i_mode
& S_IFMT
) {
3882 inode
->i_mapping
->a_ops
= &btrfs_aops
;
3883 inode
->i_fop
= &btrfs_file_operations
;
3884 inode
->i_op
= &btrfs_file_inode_operations
;
3887 inode
->i_fop
= &btrfs_dir_file_operations
;
3888 inode
->i_op
= &btrfs_dir_inode_operations
;
3891 inode
->i_op
= &btrfs_symlink_inode_operations
;
3892 inode_nohighmem(inode
);
3893 inode
->i_mapping
->a_ops
= &btrfs_aops
;
3896 inode
->i_op
= &btrfs_special_inode_operations
;
3897 init_special_inode(inode
, inode
->i_mode
, rdev
);
3901 btrfs_sync_inode_flags_to_i_flags(inode
);
3906 * given a leaf and an inode, copy the inode fields into the leaf
3908 static void fill_inode_item(struct btrfs_trans_handle
*trans
,
3909 struct extent_buffer
*leaf
,
3910 struct btrfs_inode_item
*item
,
3911 struct inode
*inode
)
3913 struct btrfs_map_token token
;
3916 btrfs_init_map_token(&token
, leaf
);
3918 btrfs_set_token_inode_uid(&token
, item
, i_uid_read(inode
));
3919 btrfs_set_token_inode_gid(&token
, item
, i_gid_read(inode
));
3920 btrfs_set_token_inode_size(&token
, item
, BTRFS_I(inode
)->disk_i_size
);
3921 btrfs_set_token_inode_mode(&token
, item
, inode
->i_mode
);
3922 btrfs_set_token_inode_nlink(&token
, item
, inode
->i_nlink
);
3924 btrfs_set_token_timespec_sec(&token
, &item
->atime
,
3925 inode
->i_atime
.tv_sec
);
3926 btrfs_set_token_timespec_nsec(&token
, &item
->atime
,
3927 inode
->i_atime
.tv_nsec
);
3929 btrfs_set_token_timespec_sec(&token
, &item
->mtime
,
3930 inode
->i_mtime
.tv_sec
);
3931 btrfs_set_token_timespec_nsec(&token
, &item
->mtime
,
3932 inode
->i_mtime
.tv_nsec
);
3934 btrfs_set_token_timespec_sec(&token
, &item
->ctime
,
3935 inode_get_ctime(inode
).tv_sec
);
3936 btrfs_set_token_timespec_nsec(&token
, &item
->ctime
,
3937 inode_get_ctime(inode
).tv_nsec
);
3939 btrfs_set_token_timespec_sec(&token
, &item
->otime
,
3940 BTRFS_I(inode
)->i_otime
.tv_sec
);
3941 btrfs_set_token_timespec_nsec(&token
, &item
->otime
,
3942 BTRFS_I(inode
)->i_otime
.tv_nsec
);
3944 btrfs_set_token_inode_nbytes(&token
, item
, inode_get_bytes(inode
));
3945 btrfs_set_token_inode_generation(&token
, item
,
3946 BTRFS_I(inode
)->generation
);
3947 btrfs_set_token_inode_sequence(&token
, item
, inode_peek_iversion(inode
));
3948 btrfs_set_token_inode_transid(&token
, item
, trans
->transid
);
3949 btrfs_set_token_inode_rdev(&token
, item
, inode
->i_rdev
);
3950 flags
= btrfs_inode_combine_flags(BTRFS_I(inode
)->flags
,
3951 BTRFS_I(inode
)->ro_flags
);
3952 btrfs_set_token_inode_flags(&token
, item
, flags
);
3953 btrfs_set_token_inode_block_group(&token
, item
, 0);
3957 * copy everything in the in-memory inode into the btree.
3959 static noinline
int btrfs_update_inode_item(struct btrfs_trans_handle
*trans
,
3960 struct btrfs_root
*root
,
3961 struct btrfs_inode
*inode
)
3963 struct btrfs_inode_item
*inode_item
;
3964 struct btrfs_path
*path
;
3965 struct extent_buffer
*leaf
;
3968 path
= btrfs_alloc_path();
3972 ret
= btrfs_lookup_inode(trans
, root
, path
, &inode
->location
, 1);
3979 leaf
= path
->nodes
[0];
3980 inode_item
= btrfs_item_ptr(leaf
, path
->slots
[0],
3981 struct btrfs_inode_item
);
3983 fill_inode_item(trans
, leaf
, inode_item
, &inode
->vfs_inode
);
3984 btrfs_mark_buffer_dirty(leaf
);
3985 btrfs_set_inode_last_trans(trans
, inode
);
3988 btrfs_free_path(path
);
3993 * copy everything in the in-memory inode into the btree.
3995 noinline
int btrfs_update_inode(struct btrfs_trans_handle
*trans
,
3996 struct btrfs_root
*root
,
3997 struct btrfs_inode
*inode
)
3999 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4003 * If the inode is a free space inode, we can deadlock during commit
4004 * if we put it into the delayed code.
4006 * The data relocation inode should also be directly updated
4009 if (!btrfs_is_free_space_inode(inode
)
4010 && !btrfs_is_data_reloc_root(root
)
4011 && !test_bit(BTRFS_FS_LOG_RECOVERING
, &fs_info
->flags
)) {
4012 btrfs_update_root_times(trans
, root
);
4014 ret
= btrfs_delayed_update_inode(trans
, root
, inode
);
4016 btrfs_set_inode_last_trans(trans
, inode
);
4020 return btrfs_update_inode_item(trans
, root
, inode
);
4023 int btrfs_update_inode_fallback(struct btrfs_trans_handle
*trans
,
4024 struct btrfs_root
*root
, struct btrfs_inode
*inode
)
4028 ret
= btrfs_update_inode(trans
, root
, inode
);
4030 return btrfs_update_inode_item(trans
, root
, inode
);
4035 * unlink helper that gets used here in inode.c and in the tree logging
4036 * recovery code. It remove a link in a directory with a given name, and
4037 * also drops the back refs in the inode to the directory
4039 static int __btrfs_unlink_inode(struct btrfs_trans_handle
*trans
,
4040 struct btrfs_inode
*dir
,
4041 struct btrfs_inode
*inode
,
4042 const struct fscrypt_str
*name
,
4043 struct btrfs_rename_ctx
*rename_ctx
)
4045 struct btrfs_root
*root
= dir
->root
;
4046 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4047 struct btrfs_path
*path
;
4049 struct btrfs_dir_item
*di
;
4051 u64 ino
= btrfs_ino(inode
);
4052 u64 dir_ino
= btrfs_ino(dir
);
4054 path
= btrfs_alloc_path();
4060 di
= btrfs_lookup_dir_item(trans
, root
, path
, dir_ino
, name
, -1);
4061 if (IS_ERR_OR_NULL(di
)) {
4062 ret
= di
? PTR_ERR(di
) : -ENOENT
;
4065 ret
= btrfs_delete_one_dir_name(trans
, root
, path
, di
);
4068 btrfs_release_path(path
);
4071 * If we don't have dir index, we have to get it by looking up
4072 * the inode ref, since we get the inode ref, remove it directly,
4073 * it is unnecessary to do delayed deletion.
4075 * But if we have dir index, needn't search inode ref to get it.
4076 * Since the inode ref is close to the inode item, it is better
4077 * that we delay to delete it, and just do this deletion when
4078 * we update the inode item.
4080 if (inode
->dir_index
) {
4081 ret
= btrfs_delayed_delete_inode_ref(inode
);
4083 index
= inode
->dir_index
;
4088 ret
= btrfs_del_inode_ref(trans
, root
, name
, ino
, dir_ino
, &index
);
4091 "failed to delete reference to %.*s, inode %llu parent %llu",
4092 name
->len
, name
->name
, ino
, dir_ino
);
4093 btrfs_abort_transaction(trans
, ret
);
4098 rename_ctx
->index
= index
;
4100 ret
= btrfs_delete_delayed_dir_index(trans
, dir
, index
);
4102 btrfs_abort_transaction(trans
, ret
);
4107 * If we are in a rename context, we don't need to update anything in the
4108 * log. That will be done later during the rename by btrfs_log_new_name().
4109 * Besides that, doing it here would only cause extra unnecessary btree
4110 * operations on the log tree, increasing latency for applications.
4113 btrfs_del_inode_ref_in_log(trans
, root
, name
, inode
, dir_ino
);
4114 btrfs_del_dir_entries_in_log(trans
, root
, name
, dir
, index
);
4118 * If we have a pending delayed iput we could end up with the final iput
4119 * being run in btrfs-cleaner context. If we have enough of these built
4120 * up we can end up burning a lot of time in btrfs-cleaner without any
4121 * way to throttle the unlinks. Since we're currently holding a ref on
4122 * the inode we can run the delayed iput here without any issues as the
4123 * final iput won't be done until after we drop the ref we're currently
4126 btrfs_run_delayed_iput(fs_info
, inode
);
4128 btrfs_free_path(path
);
4132 btrfs_i_size_write(dir
, dir
->vfs_inode
.i_size
- name
->len
* 2);
4133 inode_inc_iversion(&inode
->vfs_inode
);
4134 inode_inc_iversion(&dir
->vfs_inode
);
4135 inode_set_ctime_current(&inode
->vfs_inode
);
4136 dir
->vfs_inode
.i_mtime
= inode_set_ctime_current(&dir
->vfs_inode
);
4137 ret
= btrfs_update_inode(trans
, root
, dir
);
4142 int btrfs_unlink_inode(struct btrfs_trans_handle
*trans
,
4143 struct btrfs_inode
*dir
, struct btrfs_inode
*inode
,
4144 const struct fscrypt_str
*name
)
4148 ret
= __btrfs_unlink_inode(trans
, dir
, inode
, name
, NULL
);
4150 drop_nlink(&inode
->vfs_inode
);
4151 ret
= btrfs_update_inode(trans
, inode
->root
, inode
);
4157 * helper to start transaction for unlink and rmdir.
4159 * unlink and rmdir are special in btrfs, they do not always free space, so
4160 * if we cannot make our reservations the normal way try and see if there is
4161 * plenty of slack room in the global reserve to migrate, otherwise we cannot
4162 * allow the unlink to occur.
4164 static struct btrfs_trans_handle
*__unlink_start_trans(struct btrfs_inode
*dir
)
4166 struct btrfs_root
*root
= dir
->root
;
4168 return btrfs_start_transaction_fallback_global_rsv(root
,
4169 BTRFS_UNLINK_METADATA_UNITS
);
4172 static int btrfs_unlink(struct inode
*dir
, struct dentry
*dentry
)
4174 struct btrfs_trans_handle
*trans
;
4175 struct inode
*inode
= d_inode(dentry
);
4177 struct fscrypt_name fname
;
4179 ret
= fscrypt_setup_filename(dir
, &dentry
->d_name
, 1, &fname
);
4183 /* This needs to handle no-key deletions later on */
4185 trans
= __unlink_start_trans(BTRFS_I(dir
));
4186 if (IS_ERR(trans
)) {
4187 ret
= PTR_ERR(trans
);
4191 btrfs_record_unlink_dir(trans
, BTRFS_I(dir
), BTRFS_I(d_inode(dentry
)),
4194 ret
= btrfs_unlink_inode(trans
, BTRFS_I(dir
), BTRFS_I(d_inode(dentry
)),
4199 if (inode
->i_nlink
== 0) {
4200 ret
= btrfs_orphan_add(trans
, BTRFS_I(inode
));
4206 btrfs_end_transaction(trans
);
4207 btrfs_btree_balance_dirty(BTRFS_I(dir
)->root
->fs_info
);
4209 fscrypt_free_filename(&fname
);
4213 static int btrfs_unlink_subvol(struct btrfs_trans_handle
*trans
,
4214 struct btrfs_inode
*dir
, struct dentry
*dentry
)
4216 struct btrfs_root
*root
= dir
->root
;
4217 struct btrfs_inode
*inode
= BTRFS_I(d_inode(dentry
));
4218 struct btrfs_path
*path
;
4219 struct extent_buffer
*leaf
;
4220 struct btrfs_dir_item
*di
;
4221 struct btrfs_key key
;
4225 u64 dir_ino
= btrfs_ino(dir
);
4226 struct fscrypt_name fname
;
4228 ret
= fscrypt_setup_filename(&dir
->vfs_inode
, &dentry
->d_name
, 1, &fname
);
4232 /* This needs to handle no-key deletions later on */
4234 if (btrfs_ino(inode
) == BTRFS_FIRST_FREE_OBJECTID
) {
4235 objectid
= inode
->root
->root_key
.objectid
;
4236 } else if (btrfs_ino(inode
) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID
) {
4237 objectid
= inode
->location
.objectid
;
4240 fscrypt_free_filename(&fname
);
4244 path
= btrfs_alloc_path();
4250 di
= btrfs_lookup_dir_item(trans
, root
, path
, dir_ino
,
4251 &fname
.disk_name
, -1);
4252 if (IS_ERR_OR_NULL(di
)) {
4253 ret
= di
? PTR_ERR(di
) : -ENOENT
;
4257 leaf
= path
->nodes
[0];
4258 btrfs_dir_item_key_to_cpu(leaf
, di
, &key
);
4259 WARN_ON(key
.type
!= BTRFS_ROOT_ITEM_KEY
|| key
.objectid
!= objectid
);
4260 ret
= btrfs_delete_one_dir_name(trans
, root
, path
, di
);
4262 btrfs_abort_transaction(trans
, ret
);
4265 btrfs_release_path(path
);
4268 * This is a placeholder inode for a subvolume we didn't have a
4269 * reference to at the time of the snapshot creation. In the meantime
4270 * we could have renamed the real subvol link into our snapshot, so
4271 * depending on btrfs_del_root_ref to return -ENOENT here is incorrect.
4272 * Instead simply lookup the dir_index_item for this entry so we can
4273 * remove it. Otherwise we know we have a ref to the root and we can
4274 * call btrfs_del_root_ref, and it _shouldn't_ fail.
4276 if (btrfs_ino(inode
) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID
) {
4277 di
= btrfs_search_dir_index_item(root
, path
, dir_ino
, &fname
.disk_name
);
4278 if (IS_ERR_OR_NULL(di
)) {
4283 btrfs_abort_transaction(trans
, ret
);
4287 leaf
= path
->nodes
[0];
4288 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
4290 btrfs_release_path(path
);
4292 ret
= btrfs_del_root_ref(trans
, objectid
,
4293 root
->root_key
.objectid
, dir_ino
,
4294 &index
, &fname
.disk_name
);
4296 btrfs_abort_transaction(trans
, ret
);
4301 ret
= btrfs_delete_delayed_dir_index(trans
, dir
, index
);
4303 btrfs_abort_transaction(trans
, ret
);
4307 btrfs_i_size_write(dir
, dir
->vfs_inode
.i_size
- fname
.disk_name
.len
* 2);
4308 inode_inc_iversion(&dir
->vfs_inode
);
4309 dir
->vfs_inode
.i_mtime
= inode_set_ctime_current(&dir
->vfs_inode
);
4310 ret
= btrfs_update_inode_fallback(trans
, root
, dir
);
4312 btrfs_abort_transaction(trans
, ret
);
4314 btrfs_free_path(path
);
4315 fscrypt_free_filename(&fname
);
4320 * Helper to check if the subvolume references other subvolumes or if it's
4323 static noinline
int may_destroy_subvol(struct btrfs_root
*root
)
4325 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4326 struct btrfs_path
*path
;
4327 struct btrfs_dir_item
*di
;
4328 struct btrfs_key key
;
4329 struct fscrypt_str name
= FSTR_INIT("default", 7);
4333 path
= btrfs_alloc_path();
4337 /* Make sure this root isn't set as the default subvol */
4338 dir_id
= btrfs_super_root_dir(fs_info
->super_copy
);
4339 di
= btrfs_lookup_dir_item(NULL
, fs_info
->tree_root
, path
,
4341 if (di
&& !IS_ERR(di
)) {
4342 btrfs_dir_item_key_to_cpu(path
->nodes
[0], di
, &key
);
4343 if (key
.objectid
== root
->root_key
.objectid
) {
4346 "deleting default subvolume %llu is not allowed",
4350 btrfs_release_path(path
);
4353 key
.objectid
= root
->root_key
.objectid
;
4354 key
.type
= BTRFS_ROOT_REF_KEY
;
4355 key
.offset
= (u64
)-1;
4357 ret
= btrfs_search_slot(NULL
, fs_info
->tree_root
, &key
, path
, 0, 0);
4363 if (path
->slots
[0] > 0) {
4365 btrfs_item_key_to_cpu(path
->nodes
[0], &key
, path
->slots
[0]);
4366 if (key
.objectid
== root
->root_key
.objectid
&&
4367 key
.type
== BTRFS_ROOT_REF_KEY
)
4371 btrfs_free_path(path
);
4375 /* Delete all dentries for inodes belonging to the root */
4376 static void btrfs_prune_dentries(struct btrfs_root
*root
)
4378 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4379 struct rb_node
*node
;
4380 struct rb_node
*prev
;
4381 struct btrfs_inode
*entry
;
4382 struct inode
*inode
;
4385 if (!BTRFS_FS_ERROR(fs_info
))
4386 WARN_ON(btrfs_root_refs(&root
->root_item
) != 0);
4388 spin_lock(&root
->inode_lock
);
4390 node
= root
->inode_tree
.rb_node
;
4394 entry
= rb_entry(node
, struct btrfs_inode
, rb_node
);
4396 if (objectid
< btrfs_ino(entry
))
4397 node
= node
->rb_left
;
4398 else if (objectid
> btrfs_ino(entry
))
4399 node
= node
->rb_right
;
4405 entry
= rb_entry(prev
, struct btrfs_inode
, rb_node
);
4406 if (objectid
<= btrfs_ino(entry
)) {
4410 prev
= rb_next(prev
);
4414 entry
= rb_entry(node
, struct btrfs_inode
, rb_node
);
4415 objectid
= btrfs_ino(entry
) + 1;
4416 inode
= igrab(&entry
->vfs_inode
);
4418 spin_unlock(&root
->inode_lock
);
4419 if (atomic_read(&inode
->i_count
) > 1)
4420 d_prune_aliases(inode
);
4422 * btrfs_drop_inode will have it removed from the inode
4423 * cache when its usage count hits zero.
4427 spin_lock(&root
->inode_lock
);
4431 if (cond_resched_lock(&root
->inode_lock
))
4434 node
= rb_next(node
);
4436 spin_unlock(&root
->inode_lock
);
4439 int btrfs_delete_subvolume(struct btrfs_inode
*dir
, struct dentry
*dentry
)
4441 struct btrfs_fs_info
*fs_info
= btrfs_sb(dentry
->d_sb
);
4442 struct btrfs_root
*root
= dir
->root
;
4443 struct inode
*inode
= d_inode(dentry
);
4444 struct btrfs_root
*dest
= BTRFS_I(inode
)->root
;
4445 struct btrfs_trans_handle
*trans
;
4446 struct btrfs_block_rsv block_rsv
;
4451 * Don't allow to delete a subvolume with send in progress. This is
4452 * inside the inode lock so the error handling that has to drop the bit
4453 * again is not run concurrently.
4455 spin_lock(&dest
->root_item_lock
);
4456 if (dest
->send_in_progress
) {
4457 spin_unlock(&dest
->root_item_lock
);
4459 "attempt to delete subvolume %llu during send",
4460 dest
->root_key
.objectid
);
4463 if (atomic_read(&dest
->nr_swapfiles
)) {
4464 spin_unlock(&dest
->root_item_lock
);
4466 "attempt to delete subvolume %llu with active swapfile",
4467 root
->root_key
.objectid
);
4470 root_flags
= btrfs_root_flags(&dest
->root_item
);
4471 btrfs_set_root_flags(&dest
->root_item
,
4472 root_flags
| BTRFS_ROOT_SUBVOL_DEAD
);
4473 spin_unlock(&dest
->root_item_lock
);
4475 down_write(&fs_info
->subvol_sem
);
4477 ret
= may_destroy_subvol(dest
);
4481 btrfs_init_block_rsv(&block_rsv
, BTRFS_BLOCK_RSV_TEMP
);
4483 * One for dir inode,
4484 * two for dir entries,
4485 * two for root ref/backref.
4487 ret
= btrfs_subvolume_reserve_metadata(root
, &block_rsv
, 5, true);
4491 trans
= btrfs_start_transaction(root
, 0);
4492 if (IS_ERR(trans
)) {
4493 ret
= PTR_ERR(trans
);
4496 trans
->block_rsv
= &block_rsv
;
4497 trans
->bytes_reserved
= block_rsv
.size
;
4499 btrfs_record_snapshot_destroy(trans
, dir
);
4501 ret
= btrfs_unlink_subvol(trans
, dir
, dentry
);
4503 btrfs_abort_transaction(trans
, ret
);
4507 ret
= btrfs_record_root_in_trans(trans
, dest
);
4509 btrfs_abort_transaction(trans
, ret
);
4513 memset(&dest
->root_item
.drop_progress
, 0,
4514 sizeof(dest
->root_item
.drop_progress
));
4515 btrfs_set_root_drop_level(&dest
->root_item
, 0);
4516 btrfs_set_root_refs(&dest
->root_item
, 0);
4518 if (!test_and_set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED
, &dest
->state
)) {
4519 ret
= btrfs_insert_orphan_item(trans
,
4521 dest
->root_key
.objectid
);
4523 btrfs_abort_transaction(trans
, ret
);
4528 ret
= btrfs_uuid_tree_remove(trans
, dest
->root_item
.uuid
,
4529 BTRFS_UUID_KEY_SUBVOL
,
4530 dest
->root_key
.objectid
);
4531 if (ret
&& ret
!= -ENOENT
) {
4532 btrfs_abort_transaction(trans
, ret
);
4535 if (!btrfs_is_empty_uuid(dest
->root_item
.received_uuid
)) {
4536 ret
= btrfs_uuid_tree_remove(trans
,
4537 dest
->root_item
.received_uuid
,
4538 BTRFS_UUID_KEY_RECEIVED_SUBVOL
,
4539 dest
->root_key
.objectid
);
4540 if (ret
&& ret
!= -ENOENT
) {
4541 btrfs_abort_transaction(trans
, ret
);
4546 free_anon_bdev(dest
->anon_dev
);
4549 trans
->block_rsv
= NULL
;
4550 trans
->bytes_reserved
= 0;
4551 ret
= btrfs_end_transaction(trans
);
4552 inode
->i_flags
|= S_DEAD
;
4554 btrfs_subvolume_release_metadata(root
, &block_rsv
);
4556 up_write(&fs_info
->subvol_sem
);
4558 spin_lock(&dest
->root_item_lock
);
4559 root_flags
= btrfs_root_flags(&dest
->root_item
);
4560 btrfs_set_root_flags(&dest
->root_item
,
4561 root_flags
& ~BTRFS_ROOT_SUBVOL_DEAD
);
4562 spin_unlock(&dest
->root_item_lock
);
4564 d_invalidate(dentry
);
4565 btrfs_prune_dentries(dest
);
4566 ASSERT(dest
->send_in_progress
== 0);
4572 static int btrfs_rmdir(struct inode
*dir
, struct dentry
*dentry
)
4574 struct inode
*inode
= d_inode(dentry
);
4575 struct btrfs_fs_info
*fs_info
= BTRFS_I(inode
)->root
->fs_info
;
4577 struct btrfs_trans_handle
*trans
;
4578 u64 last_unlink_trans
;
4579 struct fscrypt_name fname
;
4581 if (inode
->i_size
> BTRFS_EMPTY_DIR_SIZE
)
4583 if (btrfs_ino(BTRFS_I(inode
)) == BTRFS_FIRST_FREE_OBJECTID
) {
4584 if (unlikely(btrfs_fs_incompat(fs_info
, EXTENT_TREE_V2
))) {
4586 "extent tree v2 doesn't support snapshot deletion yet");
4589 return btrfs_delete_subvolume(BTRFS_I(dir
), dentry
);
4592 err
= fscrypt_setup_filename(dir
, &dentry
->d_name
, 1, &fname
);
4596 /* This needs to handle no-key deletions later on */
4598 trans
= __unlink_start_trans(BTRFS_I(dir
));
4599 if (IS_ERR(trans
)) {
4600 err
= PTR_ERR(trans
);
4604 if (unlikely(btrfs_ino(BTRFS_I(inode
)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID
)) {
4605 err
= btrfs_unlink_subvol(trans
, BTRFS_I(dir
), dentry
);
4609 err
= btrfs_orphan_add(trans
, BTRFS_I(inode
));
4613 last_unlink_trans
= BTRFS_I(inode
)->last_unlink_trans
;
4615 /* now the directory is empty */
4616 err
= btrfs_unlink_inode(trans
, BTRFS_I(dir
), BTRFS_I(d_inode(dentry
)),
4619 btrfs_i_size_write(BTRFS_I(inode
), 0);
4621 * Propagate the last_unlink_trans value of the deleted dir to
4622 * its parent directory. This is to prevent an unrecoverable
4623 * log tree in the case we do something like this:
4625 * 2) create snapshot under dir foo
4626 * 3) delete the snapshot
4629 * 6) fsync foo or some file inside foo
4631 if (last_unlink_trans
>= trans
->transid
)
4632 BTRFS_I(dir
)->last_unlink_trans
= last_unlink_trans
;
4635 btrfs_end_transaction(trans
);
4637 btrfs_btree_balance_dirty(fs_info
);
4638 fscrypt_free_filename(&fname
);
4644 * btrfs_truncate_block - read, zero a chunk and write a block
4645 * @inode - inode that we're zeroing
4646 * @from - the offset to start zeroing
4647 * @len - the length to zero, 0 to zero the entire range respective to the
4649 * @front - zero up to the offset instead of from the offset on
4651 * This will find the block for the "from" offset and cow the block and zero the
4652 * part we want to zero. This is used with truncate and hole punching.
4654 int btrfs_truncate_block(struct btrfs_inode
*inode
, loff_t from
, loff_t len
,
4657 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
4658 struct address_space
*mapping
= inode
->vfs_inode
.i_mapping
;
4659 struct extent_io_tree
*io_tree
= &inode
->io_tree
;
4660 struct btrfs_ordered_extent
*ordered
;
4661 struct extent_state
*cached_state
= NULL
;
4662 struct extent_changeset
*data_reserved
= NULL
;
4663 bool only_release_metadata
= false;
4664 u32 blocksize
= fs_info
->sectorsize
;
4665 pgoff_t index
= from
>> PAGE_SHIFT
;
4666 unsigned offset
= from
& (blocksize
- 1);
4668 gfp_t mask
= btrfs_alloc_write_mask(mapping
);
4669 size_t write_bytes
= blocksize
;
4674 if (IS_ALIGNED(offset
, blocksize
) &&
4675 (!len
|| IS_ALIGNED(len
, blocksize
)))
4678 block_start
= round_down(from
, blocksize
);
4679 block_end
= block_start
+ blocksize
- 1;
4681 ret
= btrfs_check_data_free_space(inode
, &data_reserved
, block_start
,
4684 if (btrfs_check_nocow_lock(inode
, block_start
, &write_bytes
, false) > 0) {
4685 /* For nocow case, no need to reserve data space */
4686 only_release_metadata
= true;
4691 ret
= btrfs_delalloc_reserve_metadata(inode
, blocksize
, blocksize
, false);
4693 if (!only_release_metadata
)
4694 btrfs_free_reserved_data_space(inode
, data_reserved
,
4695 block_start
, blocksize
);
4699 page
= find_or_create_page(mapping
, index
, mask
);
4701 btrfs_delalloc_release_space(inode
, data_reserved
, block_start
,
4703 btrfs_delalloc_release_extents(inode
, blocksize
);
4708 if (!PageUptodate(page
)) {
4709 ret
= btrfs_read_folio(NULL
, page_folio(page
));
4711 if (page
->mapping
!= mapping
) {
4716 if (!PageUptodate(page
)) {
4723 * We unlock the page after the io is completed and then re-lock it
4724 * above. release_folio() could have come in between that and cleared
4725 * PagePrivate(), but left the page in the mapping. Set the page mapped
4726 * here to make sure it's properly set for the subpage stuff.
4728 ret
= set_page_extent_mapped(page
);
4732 wait_on_page_writeback(page
);
4734 lock_extent(io_tree
, block_start
, block_end
, &cached_state
);
4736 ordered
= btrfs_lookup_ordered_extent(inode
, block_start
);
4738 unlock_extent(io_tree
, block_start
, block_end
, &cached_state
);
4741 btrfs_start_ordered_extent(ordered
);
4742 btrfs_put_ordered_extent(ordered
);
4746 clear_extent_bit(&inode
->io_tree
, block_start
, block_end
,
4747 EXTENT_DELALLOC
| EXTENT_DO_ACCOUNTING
| EXTENT_DEFRAG
,
4750 ret
= btrfs_set_extent_delalloc(inode
, block_start
, block_end
, 0,
4753 unlock_extent(io_tree
, block_start
, block_end
, &cached_state
);
4757 if (offset
!= blocksize
) {
4759 len
= blocksize
- offset
;
4761 memzero_page(page
, (block_start
- page_offset(page
)),
4764 memzero_page(page
, (block_start
- page_offset(page
)) + offset
,
4767 btrfs_page_clear_checked(fs_info
, page
, block_start
,
4768 block_end
+ 1 - block_start
);
4769 btrfs_page_set_dirty(fs_info
, page
, block_start
, block_end
+ 1 - block_start
);
4770 unlock_extent(io_tree
, block_start
, block_end
, &cached_state
);
4772 if (only_release_metadata
)
4773 set_extent_bit(&inode
->io_tree
, block_start
, block_end
,
4774 EXTENT_NORESERVE
, NULL
);
4778 if (only_release_metadata
)
4779 btrfs_delalloc_release_metadata(inode
, blocksize
, true);
4781 btrfs_delalloc_release_space(inode
, data_reserved
,
4782 block_start
, blocksize
, true);
4784 btrfs_delalloc_release_extents(inode
, blocksize
);
4788 if (only_release_metadata
)
4789 btrfs_check_nocow_unlock(inode
);
4790 extent_changeset_free(data_reserved
);
4794 static int maybe_insert_hole(struct btrfs_root
*root
, struct btrfs_inode
*inode
,
4795 u64 offset
, u64 len
)
4797 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4798 struct btrfs_trans_handle
*trans
;
4799 struct btrfs_drop_extents_args drop_args
= { 0 };
4803 * If NO_HOLES is enabled, we don't need to do anything.
4804 * Later, up in the call chain, either btrfs_set_inode_last_sub_trans()
4805 * or btrfs_update_inode() will be called, which guarantee that the next
4806 * fsync will know this inode was changed and needs to be logged.
4808 if (btrfs_fs_incompat(fs_info
, NO_HOLES
))
4812 * 1 - for the one we're dropping
4813 * 1 - for the one we're adding
4814 * 1 - for updating the inode.
4816 trans
= btrfs_start_transaction(root
, 3);
4818 return PTR_ERR(trans
);
4820 drop_args
.start
= offset
;
4821 drop_args
.end
= offset
+ len
;
4822 drop_args
.drop_cache
= true;
4824 ret
= btrfs_drop_extents(trans
, root
, inode
, &drop_args
);
4826 btrfs_abort_transaction(trans
, ret
);
4827 btrfs_end_transaction(trans
);
4831 ret
= btrfs_insert_hole_extent(trans
, root
, btrfs_ino(inode
), offset
, len
);
4833 btrfs_abort_transaction(trans
, ret
);
4835 btrfs_update_inode_bytes(inode
, 0, drop_args
.bytes_found
);
4836 btrfs_update_inode(trans
, root
, inode
);
4838 btrfs_end_transaction(trans
);
4843 * This function puts in dummy file extents for the area we're creating a hole
4844 * for. So if we are truncating this file to a larger size we need to insert
4845 * these file extents so that btrfs_get_extent will return a EXTENT_MAP_HOLE for
4846 * the range between oldsize and size
4848 int btrfs_cont_expand(struct btrfs_inode
*inode
, loff_t oldsize
, loff_t size
)
4850 struct btrfs_root
*root
= inode
->root
;
4851 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4852 struct extent_io_tree
*io_tree
= &inode
->io_tree
;
4853 struct extent_map
*em
= NULL
;
4854 struct extent_state
*cached_state
= NULL
;
4855 u64 hole_start
= ALIGN(oldsize
, fs_info
->sectorsize
);
4856 u64 block_end
= ALIGN(size
, fs_info
->sectorsize
);
4863 * If our size started in the middle of a block we need to zero out the
4864 * rest of the block before we expand the i_size, otherwise we could
4865 * expose stale data.
4867 err
= btrfs_truncate_block(inode
, oldsize
, 0, 0);
4871 if (size
<= hole_start
)
4874 btrfs_lock_and_flush_ordered_range(inode
, hole_start
, block_end
- 1,
4876 cur_offset
= hole_start
;
4878 em
= btrfs_get_extent(inode
, NULL
, 0, cur_offset
,
4879 block_end
- cur_offset
);
4885 last_byte
= min(extent_map_end(em
), block_end
);
4886 last_byte
= ALIGN(last_byte
, fs_info
->sectorsize
);
4887 hole_size
= last_byte
- cur_offset
;
4889 if (!test_bit(EXTENT_FLAG_PREALLOC
, &em
->flags
)) {
4890 struct extent_map
*hole_em
;
4892 err
= maybe_insert_hole(root
, inode
, cur_offset
,
4897 err
= btrfs_inode_set_file_extent_range(inode
,
4898 cur_offset
, hole_size
);
4902 hole_em
= alloc_extent_map();
4904 btrfs_drop_extent_map_range(inode
, cur_offset
,
4905 cur_offset
+ hole_size
- 1,
4907 btrfs_set_inode_full_sync(inode
);
4910 hole_em
->start
= cur_offset
;
4911 hole_em
->len
= hole_size
;
4912 hole_em
->orig_start
= cur_offset
;
4914 hole_em
->block_start
= EXTENT_MAP_HOLE
;
4915 hole_em
->block_len
= 0;
4916 hole_em
->orig_block_len
= 0;
4917 hole_em
->ram_bytes
= hole_size
;
4918 hole_em
->compress_type
= BTRFS_COMPRESS_NONE
;
4919 hole_em
->generation
= fs_info
->generation
;
4921 err
= btrfs_replace_extent_map_range(inode
, hole_em
, true);
4922 free_extent_map(hole_em
);
4924 err
= btrfs_inode_set_file_extent_range(inode
,
4925 cur_offset
, hole_size
);
4930 free_extent_map(em
);
4932 cur_offset
= last_byte
;
4933 if (cur_offset
>= block_end
)
4936 free_extent_map(em
);
4937 unlock_extent(io_tree
, hole_start
, block_end
- 1, &cached_state
);
4941 static int btrfs_setsize(struct inode
*inode
, struct iattr
*attr
)
4943 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
4944 struct btrfs_trans_handle
*trans
;
4945 loff_t oldsize
= i_size_read(inode
);
4946 loff_t newsize
= attr
->ia_size
;
4947 int mask
= attr
->ia_valid
;
4951 * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a
4952 * special case where we need to update the times despite not having
4953 * these flags set. For all other operations the VFS set these flags
4954 * explicitly if it wants a timestamp update.
4956 if (newsize
!= oldsize
) {
4957 inode_inc_iversion(inode
);
4958 if (!(mask
& (ATTR_CTIME
| ATTR_MTIME
))) {
4959 inode
->i_mtime
= inode_set_ctime_current(inode
);
4963 if (newsize
> oldsize
) {
4965 * Don't do an expanding truncate while snapshotting is ongoing.
4966 * This is to ensure the snapshot captures a fully consistent
4967 * state of this file - if the snapshot captures this expanding
4968 * truncation, it must capture all writes that happened before
4971 btrfs_drew_write_lock(&root
->snapshot_lock
);
4972 ret
= btrfs_cont_expand(BTRFS_I(inode
), oldsize
, newsize
);
4974 btrfs_drew_write_unlock(&root
->snapshot_lock
);
4978 trans
= btrfs_start_transaction(root
, 1);
4979 if (IS_ERR(trans
)) {
4980 btrfs_drew_write_unlock(&root
->snapshot_lock
);
4981 return PTR_ERR(trans
);
4984 i_size_write(inode
, newsize
);
4985 btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode
), 0);
4986 pagecache_isize_extended(inode
, oldsize
, newsize
);
4987 ret
= btrfs_update_inode(trans
, root
, BTRFS_I(inode
));
4988 btrfs_drew_write_unlock(&root
->snapshot_lock
);
4989 btrfs_end_transaction(trans
);
4991 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
4993 if (btrfs_is_zoned(fs_info
)) {
4994 ret
= btrfs_wait_ordered_range(inode
,
4995 ALIGN(newsize
, fs_info
->sectorsize
),
5002 * We're truncating a file that used to have good data down to
5003 * zero. Make sure any new writes to the file get on disk
5007 set_bit(BTRFS_INODE_FLUSH_ON_CLOSE
,
5008 &BTRFS_I(inode
)->runtime_flags
);
5010 truncate_setsize(inode
, newsize
);
5012 inode_dio_wait(inode
);
5014 ret
= btrfs_truncate(BTRFS_I(inode
), newsize
== oldsize
);
5015 if (ret
&& inode
->i_nlink
) {
5019 * Truncate failed, so fix up the in-memory size. We
5020 * adjusted disk_i_size down as we removed extents, so
5021 * wait for disk_i_size to be stable and then update the
5022 * in-memory size to match.
5024 err
= btrfs_wait_ordered_range(inode
, 0, (u64
)-1);
5027 i_size_write(inode
, BTRFS_I(inode
)->disk_i_size
);
5034 static int btrfs_setattr(struct mnt_idmap
*idmap
, struct dentry
*dentry
,
5037 struct inode
*inode
= d_inode(dentry
);
5038 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
5041 if (btrfs_root_readonly(root
))
5044 err
= setattr_prepare(idmap
, dentry
, attr
);
5048 if (S_ISREG(inode
->i_mode
) && (attr
->ia_valid
& ATTR_SIZE
)) {
5049 err
= btrfs_setsize(inode
, attr
);
5054 if (attr
->ia_valid
) {
5055 setattr_copy(idmap
, inode
, attr
);
5056 inode_inc_iversion(inode
);
5057 err
= btrfs_dirty_inode(BTRFS_I(inode
));
5059 if (!err
&& attr
->ia_valid
& ATTR_MODE
)
5060 err
= posix_acl_chmod(idmap
, dentry
, inode
->i_mode
);
5067 * While truncating the inode pages during eviction, we get the VFS
5068 * calling btrfs_invalidate_folio() against each folio of the inode. This
5069 * is slow because the calls to btrfs_invalidate_folio() result in a
5070 * huge amount of calls to lock_extent() and clear_extent_bit(),
5071 * which keep merging and splitting extent_state structures over and over,
5072 * wasting lots of time.
5074 * Therefore if the inode is being evicted, let btrfs_invalidate_folio()
5075 * skip all those expensive operations on a per folio basis and do only
5076 * the ordered io finishing, while we release here the extent_map and
5077 * extent_state structures, without the excessive merging and splitting.
5079 static void evict_inode_truncate_pages(struct inode
*inode
)
5081 struct extent_io_tree
*io_tree
= &BTRFS_I(inode
)->io_tree
;
5082 struct rb_node
*node
;
5084 ASSERT(inode
->i_state
& I_FREEING
);
5085 truncate_inode_pages_final(&inode
->i_data
);
5087 btrfs_drop_extent_map_range(BTRFS_I(inode
), 0, (u64
)-1, false);
5090 * Keep looping until we have no more ranges in the io tree.
5091 * We can have ongoing bios started by readahead that have
5092 * their endio callback (extent_io.c:end_bio_extent_readpage)
5093 * still in progress (unlocked the pages in the bio but did not yet
5094 * unlocked the ranges in the io tree). Therefore this means some
5095 * ranges can still be locked and eviction started because before
5096 * submitting those bios, which are executed by a separate task (work
5097 * queue kthread), inode references (inode->i_count) were not taken
5098 * (which would be dropped in the end io callback of each bio).
5099 * Therefore here we effectively end up waiting for those bios and
5100 * anyone else holding locked ranges without having bumped the inode's
5101 * reference count - if we don't do it, when they access the inode's
5102 * io_tree to unlock a range it may be too late, leading to an
5103 * use-after-free issue.
5105 spin_lock(&io_tree
->lock
);
5106 while (!RB_EMPTY_ROOT(&io_tree
->state
)) {
5107 struct extent_state
*state
;
5108 struct extent_state
*cached_state
= NULL
;
5111 unsigned state_flags
;
5113 node
= rb_first(&io_tree
->state
);
5114 state
= rb_entry(node
, struct extent_state
, rb_node
);
5115 start
= state
->start
;
5117 state_flags
= state
->state
;
5118 spin_unlock(&io_tree
->lock
);
5120 lock_extent(io_tree
, start
, end
, &cached_state
);
5123 * If still has DELALLOC flag, the extent didn't reach disk,
5124 * and its reserved space won't be freed by delayed_ref.
5125 * So we need to free its reserved space here.
5126 * (Refer to comment in btrfs_invalidate_folio, case 2)
5128 * Note, end is the bytenr of last byte, so we need + 1 here.
5130 if (state_flags
& EXTENT_DELALLOC
)
5131 btrfs_qgroup_free_data(BTRFS_I(inode
), NULL
, start
,
5134 clear_extent_bit(io_tree
, start
, end
,
5135 EXTENT_CLEAR_ALL_BITS
| EXTENT_DO_ACCOUNTING
,
5139 spin_lock(&io_tree
->lock
);
5141 spin_unlock(&io_tree
->lock
);
5144 static struct btrfs_trans_handle
*evict_refill_and_join(struct btrfs_root
*root
,
5145 struct btrfs_block_rsv
*rsv
)
5147 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
5148 struct btrfs_trans_handle
*trans
;
5149 u64 delayed_refs_extra
= btrfs_calc_delayed_ref_bytes(fs_info
, 1);
5153 * Eviction should be taking place at some place safe because of our
5154 * delayed iputs. However the normal flushing code will run delayed
5155 * iputs, so we cannot use FLUSH_ALL otherwise we'll deadlock.
5157 * We reserve the delayed_refs_extra here again because we can't use
5158 * btrfs_start_transaction(root, 0) for the same deadlocky reason as
5159 * above. We reserve our extra bit here because we generate a ton of
5160 * delayed refs activity by truncating.
5162 * BTRFS_RESERVE_FLUSH_EVICT will steal from the global_rsv if it can,
5163 * if we fail to make this reservation we can re-try without the
5164 * delayed_refs_extra so we can make some forward progress.
5166 ret
= btrfs_block_rsv_refill(fs_info
, rsv
, rsv
->size
+ delayed_refs_extra
,
5167 BTRFS_RESERVE_FLUSH_EVICT
);
5169 ret
= btrfs_block_rsv_refill(fs_info
, rsv
, rsv
->size
,
5170 BTRFS_RESERVE_FLUSH_EVICT
);
5173 "could not allocate space for delete; will truncate on mount");
5174 return ERR_PTR(-ENOSPC
);
5176 delayed_refs_extra
= 0;
5179 trans
= btrfs_join_transaction(root
);
5183 if (delayed_refs_extra
) {
5184 trans
->block_rsv
= &fs_info
->trans_block_rsv
;
5185 trans
->bytes_reserved
= delayed_refs_extra
;
5186 btrfs_block_rsv_migrate(rsv
, trans
->block_rsv
,
5187 delayed_refs_extra
, true);
5192 void btrfs_evict_inode(struct inode
*inode
)
5194 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
5195 struct btrfs_trans_handle
*trans
;
5196 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
5197 struct btrfs_block_rsv
*rsv
= NULL
;
5200 trace_btrfs_inode_evict(inode
);
5203 fsverity_cleanup_inode(inode
);
5208 evict_inode_truncate_pages(inode
);
5210 if (inode
->i_nlink
&&
5211 ((btrfs_root_refs(&root
->root_item
) != 0 &&
5212 root
->root_key
.objectid
!= BTRFS_ROOT_TREE_OBJECTID
) ||
5213 btrfs_is_free_space_inode(BTRFS_I(inode
))))
5216 if (is_bad_inode(inode
))
5219 if (test_bit(BTRFS_FS_LOG_RECOVERING
, &fs_info
->flags
))
5222 if (inode
->i_nlink
> 0) {
5223 BUG_ON(btrfs_root_refs(&root
->root_item
) != 0 &&
5224 root
->root_key
.objectid
!= BTRFS_ROOT_TREE_OBJECTID
);
5229 * This makes sure the inode item in tree is uptodate and the space for
5230 * the inode update is released.
5232 ret
= btrfs_commit_inode_delayed_inode(BTRFS_I(inode
));
5237 * This drops any pending insert or delete operations we have for this
5238 * inode. We could have a delayed dir index deletion queued up, but
5239 * we're removing the inode completely so that'll be taken care of in
5242 btrfs_kill_delayed_inode_items(BTRFS_I(inode
));
5244 rsv
= btrfs_alloc_block_rsv(fs_info
, BTRFS_BLOCK_RSV_TEMP
);
5247 rsv
->size
= btrfs_calc_metadata_size(fs_info
, 1);
5248 rsv
->failfast
= true;
5250 btrfs_i_size_write(BTRFS_I(inode
), 0);
5253 struct btrfs_truncate_control control
= {
5254 .inode
= BTRFS_I(inode
),
5255 .ino
= btrfs_ino(BTRFS_I(inode
)),
5260 trans
= evict_refill_and_join(root
, rsv
);
5264 trans
->block_rsv
= rsv
;
5266 ret
= btrfs_truncate_inode_items(trans
, root
, &control
);
5267 trans
->block_rsv
= &fs_info
->trans_block_rsv
;
5268 btrfs_end_transaction(trans
);
5270 * We have not added new delayed items for our inode after we
5271 * have flushed its delayed items, so no need to throttle on
5272 * delayed items. However we have modified extent buffers.
5274 btrfs_btree_balance_dirty_nodelay(fs_info
);
5275 if (ret
&& ret
!= -ENOSPC
&& ret
!= -EAGAIN
)
5282 * Errors here aren't a big deal, it just means we leave orphan items in
5283 * the tree. They will be cleaned up on the next mount. If the inode
5284 * number gets reused, cleanup deletes the orphan item without doing
5285 * anything, and unlink reuses the existing orphan item.
5287 * If it turns out that we are dropping too many of these, we might want
5288 * to add a mechanism for retrying these after a commit.
5290 trans
= evict_refill_and_join(root
, rsv
);
5291 if (!IS_ERR(trans
)) {
5292 trans
->block_rsv
= rsv
;
5293 btrfs_orphan_del(trans
, BTRFS_I(inode
));
5294 trans
->block_rsv
= &fs_info
->trans_block_rsv
;
5295 btrfs_end_transaction(trans
);
5299 btrfs_free_block_rsv(fs_info
, rsv
);
5301 * If we didn't successfully delete, the orphan item will still be in
5302 * the tree and we'll retry on the next mount. Again, we might also want
5303 * to retry these periodically in the future.
5305 btrfs_remove_delayed_node(BTRFS_I(inode
));
5306 fsverity_cleanup_inode(inode
);
5311 * Return the key found in the dir entry in the location pointer, fill @type
5312 * with BTRFS_FT_*, and return 0.
5314 * If no dir entries were found, returns -ENOENT.
5315 * If found a corrupted location in dir entry, returns -EUCLEAN.
5317 static int btrfs_inode_by_name(struct btrfs_inode
*dir
, struct dentry
*dentry
,
5318 struct btrfs_key
*location
, u8
*type
)
5320 struct btrfs_dir_item
*di
;
5321 struct btrfs_path
*path
;
5322 struct btrfs_root
*root
= dir
->root
;
5324 struct fscrypt_name fname
;
5326 path
= btrfs_alloc_path();
5330 ret
= fscrypt_setup_filename(&dir
->vfs_inode
, &dentry
->d_name
, 1, &fname
);
5334 * fscrypt_setup_filename() should never return a positive value, but
5335 * gcc on sparc/parisc thinks it can, so assert that doesn't happen.
5339 /* This needs to handle no-key deletions later on */
5341 di
= btrfs_lookup_dir_item(NULL
, root
, path
, btrfs_ino(dir
),
5342 &fname
.disk_name
, 0);
5343 if (IS_ERR_OR_NULL(di
)) {
5344 ret
= di
? PTR_ERR(di
) : -ENOENT
;
5348 btrfs_dir_item_key_to_cpu(path
->nodes
[0], di
, location
);
5349 if (location
->type
!= BTRFS_INODE_ITEM_KEY
&&
5350 location
->type
!= BTRFS_ROOT_ITEM_KEY
) {
5352 btrfs_warn(root
->fs_info
,
5353 "%s gets something invalid in DIR_ITEM (name %s, directory ino %llu, location(%llu %u %llu))",
5354 __func__
, fname
.disk_name
.name
, btrfs_ino(dir
),
5355 location
->objectid
, location
->type
, location
->offset
);
5358 *type
= btrfs_dir_ftype(path
->nodes
[0], di
);
5360 fscrypt_free_filename(&fname
);
5361 btrfs_free_path(path
);
5366 * when we hit a tree root in a directory, the btrfs part of the inode
5367 * needs to be changed to reflect the root directory of the tree root. This
5368 * is kind of like crossing a mount point.
5370 static int fixup_tree_root_location(struct btrfs_fs_info
*fs_info
,
5371 struct btrfs_inode
*dir
,
5372 struct dentry
*dentry
,
5373 struct btrfs_key
*location
,
5374 struct btrfs_root
**sub_root
)
5376 struct btrfs_path
*path
;
5377 struct btrfs_root
*new_root
;
5378 struct btrfs_root_ref
*ref
;
5379 struct extent_buffer
*leaf
;
5380 struct btrfs_key key
;
5383 struct fscrypt_name fname
;
5385 ret
= fscrypt_setup_filename(&dir
->vfs_inode
, &dentry
->d_name
, 0, &fname
);
5389 path
= btrfs_alloc_path();
5396 key
.objectid
= dir
->root
->root_key
.objectid
;
5397 key
.type
= BTRFS_ROOT_REF_KEY
;
5398 key
.offset
= location
->objectid
;
5400 ret
= btrfs_search_slot(NULL
, fs_info
->tree_root
, &key
, path
, 0, 0);
5407 leaf
= path
->nodes
[0];
5408 ref
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_root_ref
);
5409 if (btrfs_root_ref_dirid(leaf
, ref
) != btrfs_ino(dir
) ||
5410 btrfs_root_ref_name_len(leaf
, ref
) != fname
.disk_name
.len
)
5413 ret
= memcmp_extent_buffer(leaf
, fname
.disk_name
.name
,
5414 (unsigned long)(ref
+ 1), fname
.disk_name
.len
);
5418 btrfs_release_path(path
);
5420 new_root
= btrfs_get_fs_root(fs_info
, location
->objectid
, true);
5421 if (IS_ERR(new_root
)) {
5422 err
= PTR_ERR(new_root
);
5426 *sub_root
= new_root
;
5427 location
->objectid
= btrfs_root_dirid(&new_root
->root_item
);
5428 location
->type
= BTRFS_INODE_ITEM_KEY
;
5429 location
->offset
= 0;
5432 btrfs_free_path(path
);
5433 fscrypt_free_filename(&fname
);
5437 static void inode_tree_add(struct btrfs_inode
*inode
)
5439 struct btrfs_root
*root
= inode
->root
;
5440 struct btrfs_inode
*entry
;
5442 struct rb_node
*parent
;
5443 struct rb_node
*new = &inode
->rb_node
;
5444 u64 ino
= btrfs_ino(inode
);
5446 if (inode_unhashed(&inode
->vfs_inode
))
5449 spin_lock(&root
->inode_lock
);
5450 p
= &root
->inode_tree
.rb_node
;
5453 entry
= rb_entry(parent
, struct btrfs_inode
, rb_node
);
5455 if (ino
< btrfs_ino(entry
))
5456 p
= &parent
->rb_left
;
5457 else if (ino
> btrfs_ino(entry
))
5458 p
= &parent
->rb_right
;
5460 WARN_ON(!(entry
->vfs_inode
.i_state
&
5461 (I_WILL_FREE
| I_FREEING
)));
5462 rb_replace_node(parent
, new, &root
->inode_tree
);
5463 RB_CLEAR_NODE(parent
);
5464 spin_unlock(&root
->inode_lock
);
5468 rb_link_node(new, parent
, p
);
5469 rb_insert_color(new, &root
->inode_tree
);
5470 spin_unlock(&root
->inode_lock
);
5473 static void inode_tree_del(struct btrfs_inode
*inode
)
5475 struct btrfs_root
*root
= inode
->root
;
5478 spin_lock(&root
->inode_lock
);
5479 if (!RB_EMPTY_NODE(&inode
->rb_node
)) {
5480 rb_erase(&inode
->rb_node
, &root
->inode_tree
);
5481 RB_CLEAR_NODE(&inode
->rb_node
);
5482 empty
= RB_EMPTY_ROOT(&root
->inode_tree
);
5484 spin_unlock(&root
->inode_lock
);
5486 if (empty
&& btrfs_root_refs(&root
->root_item
) == 0) {
5487 spin_lock(&root
->inode_lock
);
5488 empty
= RB_EMPTY_ROOT(&root
->inode_tree
);
5489 spin_unlock(&root
->inode_lock
);
5491 btrfs_add_dead_root(root
);
5496 static int btrfs_init_locked_inode(struct inode
*inode
, void *p
)
5498 struct btrfs_iget_args
*args
= p
;
5500 inode
->i_ino
= args
->ino
;
5501 BTRFS_I(inode
)->location
.objectid
= args
->ino
;
5502 BTRFS_I(inode
)->location
.type
= BTRFS_INODE_ITEM_KEY
;
5503 BTRFS_I(inode
)->location
.offset
= 0;
5504 BTRFS_I(inode
)->root
= btrfs_grab_root(args
->root
);
5505 BUG_ON(args
->root
&& !BTRFS_I(inode
)->root
);
5507 if (args
->root
&& args
->root
== args
->root
->fs_info
->tree_root
&&
5508 args
->ino
!= BTRFS_BTREE_INODE_OBJECTID
)
5509 set_bit(BTRFS_INODE_FREE_SPACE_INODE
,
5510 &BTRFS_I(inode
)->runtime_flags
);
5514 static int btrfs_find_actor(struct inode
*inode
, void *opaque
)
5516 struct btrfs_iget_args
*args
= opaque
;
5518 return args
->ino
== BTRFS_I(inode
)->location
.objectid
&&
5519 args
->root
== BTRFS_I(inode
)->root
;
5522 static struct inode
*btrfs_iget_locked(struct super_block
*s
, u64 ino
,
5523 struct btrfs_root
*root
)
5525 struct inode
*inode
;
5526 struct btrfs_iget_args args
;
5527 unsigned long hashval
= btrfs_inode_hash(ino
, root
);
5532 inode
= iget5_locked(s
, hashval
, btrfs_find_actor
,
5533 btrfs_init_locked_inode
,
5539 * Get an inode object given its inode number and corresponding root.
5540 * Path can be preallocated to prevent recursing back to iget through
5541 * allocator. NULL is also valid but may require an additional allocation
5544 struct inode
*btrfs_iget_path(struct super_block
*s
, u64 ino
,
5545 struct btrfs_root
*root
, struct btrfs_path
*path
)
5547 struct inode
*inode
;
5549 inode
= btrfs_iget_locked(s
, ino
, root
);
5551 return ERR_PTR(-ENOMEM
);
5553 if (inode
->i_state
& I_NEW
) {
5556 ret
= btrfs_read_locked_inode(inode
, path
);
5558 inode_tree_add(BTRFS_I(inode
));
5559 unlock_new_inode(inode
);
5563 * ret > 0 can come from btrfs_search_slot called by
5564 * btrfs_read_locked_inode, this means the inode item
5569 inode
= ERR_PTR(ret
);
5576 struct inode
*btrfs_iget(struct super_block
*s
, u64 ino
, struct btrfs_root
*root
)
5578 return btrfs_iget_path(s
, ino
, root
, NULL
);
5581 static struct inode
*new_simple_dir(struct inode
*dir
,
5582 struct btrfs_key
*key
,
5583 struct btrfs_root
*root
)
5585 struct inode
*inode
= new_inode(dir
->i_sb
);
5588 return ERR_PTR(-ENOMEM
);
5590 BTRFS_I(inode
)->root
= btrfs_grab_root(root
);
5591 memcpy(&BTRFS_I(inode
)->location
, key
, sizeof(*key
));
5592 set_bit(BTRFS_INODE_DUMMY
, &BTRFS_I(inode
)->runtime_flags
);
5594 inode
->i_ino
= BTRFS_EMPTY_SUBVOL_DIR_OBJECTID
;
5596 * We only need lookup, the rest is read-only and there's no inode
5597 * associated with the dentry
5599 inode
->i_op
= &simple_dir_inode_operations
;
5600 inode
->i_opflags
&= ~IOP_XATTR
;
5601 inode
->i_fop
= &simple_dir_operations
;
5602 inode
->i_mode
= S_IFDIR
| S_IRUGO
| S_IWUSR
| S_IXUGO
;
5603 inode
->i_mtime
= inode_set_ctime_current(inode
);
5604 inode
->i_atime
= dir
->i_atime
;
5605 BTRFS_I(inode
)->i_otime
= inode
->i_mtime
;
5606 inode
->i_uid
= dir
->i_uid
;
5607 inode
->i_gid
= dir
->i_gid
;
5612 static_assert(BTRFS_FT_UNKNOWN
== FT_UNKNOWN
);
5613 static_assert(BTRFS_FT_REG_FILE
== FT_REG_FILE
);
5614 static_assert(BTRFS_FT_DIR
== FT_DIR
);
5615 static_assert(BTRFS_FT_CHRDEV
== FT_CHRDEV
);
5616 static_assert(BTRFS_FT_BLKDEV
== FT_BLKDEV
);
5617 static_assert(BTRFS_FT_FIFO
== FT_FIFO
);
5618 static_assert(BTRFS_FT_SOCK
== FT_SOCK
);
5619 static_assert(BTRFS_FT_SYMLINK
== FT_SYMLINK
);
5621 static inline u8
btrfs_inode_type(struct inode
*inode
)
5623 return fs_umode_to_ftype(inode
->i_mode
);
5626 struct inode
*btrfs_lookup_dentry(struct inode
*dir
, struct dentry
*dentry
)
5628 struct btrfs_fs_info
*fs_info
= btrfs_sb(dir
->i_sb
);
5629 struct inode
*inode
;
5630 struct btrfs_root
*root
= BTRFS_I(dir
)->root
;
5631 struct btrfs_root
*sub_root
= root
;
5632 struct btrfs_key location
;
5636 if (dentry
->d_name
.len
> BTRFS_NAME_LEN
)
5637 return ERR_PTR(-ENAMETOOLONG
);
5639 ret
= btrfs_inode_by_name(BTRFS_I(dir
), dentry
, &location
, &di_type
);
5641 return ERR_PTR(ret
);
5643 if (location
.type
== BTRFS_INODE_ITEM_KEY
) {
5644 inode
= btrfs_iget(dir
->i_sb
, location
.objectid
, root
);
5648 /* Do extra check against inode mode with di_type */
5649 if (btrfs_inode_type(inode
) != di_type
) {
5651 "inode mode mismatch with dir: inode mode=0%o btrfs type=%u dir type=%u",
5652 inode
->i_mode
, btrfs_inode_type(inode
),
5655 return ERR_PTR(-EUCLEAN
);
5660 ret
= fixup_tree_root_location(fs_info
, BTRFS_I(dir
), dentry
,
5661 &location
, &sub_root
);
5664 inode
= ERR_PTR(ret
);
5666 inode
= new_simple_dir(dir
, &location
, root
);
5668 inode
= btrfs_iget(dir
->i_sb
, location
.objectid
, sub_root
);
5669 btrfs_put_root(sub_root
);
5674 down_read(&fs_info
->cleanup_work_sem
);
5675 if (!sb_rdonly(inode
->i_sb
))
5676 ret
= btrfs_orphan_cleanup(sub_root
);
5677 up_read(&fs_info
->cleanup_work_sem
);
5680 inode
= ERR_PTR(ret
);
5687 static int btrfs_dentry_delete(const struct dentry
*dentry
)
5689 struct btrfs_root
*root
;
5690 struct inode
*inode
= d_inode(dentry
);
5692 if (!inode
&& !IS_ROOT(dentry
))
5693 inode
= d_inode(dentry
->d_parent
);
5696 root
= BTRFS_I(inode
)->root
;
5697 if (btrfs_root_refs(&root
->root_item
) == 0)
5700 if (btrfs_ino(BTRFS_I(inode
)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID
)
5706 static struct dentry
*btrfs_lookup(struct inode
*dir
, struct dentry
*dentry
,
5709 struct inode
*inode
= btrfs_lookup_dentry(dir
, dentry
);
5711 if (inode
== ERR_PTR(-ENOENT
))
5713 return d_splice_alias(inode
, dentry
);
5717 * Find the highest existing sequence number in a directory and then set the
5718 * in-memory index_cnt variable to the first free sequence number.
5720 static int btrfs_set_inode_index_count(struct btrfs_inode
*inode
)
5722 struct btrfs_root
*root
= inode
->root
;
5723 struct btrfs_key key
, found_key
;
5724 struct btrfs_path
*path
;
5725 struct extent_buffer
*leaf
;
5728 key
.objectid
= btrfs_ino(inode
);
5729 key
.type
= BTRFS_DIR_INDEX_KEY
;
5730 key
.offset
= (u64
)-1;
5732 path
= btrfs_alloc_path();
5736 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
5739 /* FIXME: we should be able to handle this */
5744 if (path
->slots
[0] == 0) {
5745 inode
->index_cnt
= BTRFS_DIR_START_INDEX
;
5751 leaf
= path
->nodes
[0];
5752 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
5754 if (found_key
.objectid
!= btrfs_ino(inode
) ||
5755 found_key
.type
!= BTRFS_DIR_INDEX_KEY
) {
5756 inode
->index_cnt
= BTRFS_DIR_START_INDEX
;
5760 inode
->index_cnt
= found_key
.offset
+ 1;
5762 btrfs_free_path(path
);
5766 static int btrfs_get_dir_last_index(struct btrfs_inode
*dir
, u64
*index
)
5770 btrfs_inode_lock(dir
, 0);
5771 if (dir
->index_cnt
== (u64
)-1) {
5772 ret
= btrfs_inode_delayed_dir_index_count(dir
);
5774 ret
= btrfs_set_inode_index_count(dir
);
5780 /* index_cnt is the index number of next new entry, so decrement it. */
5781 *index
= dir
->index_cnt
- 1;
5783 btrfs_inode_unlock(dir
, 0);
5789 * All this infrastructure exists because dir_emit can fault, and we are holding
5790 * the tree lock when doing readdir. For now just allocate a buffer and copy
5791 * our information into that, and then dir_emit from the buffer. This is
5792 * similar to what NFS does, only we don't keep the buffer around in pagecache
5793 * because I'm afraid I'll mess that up. Long term we need to make filldir do
5794 * copy_to_user_inatomic so we don't have to worry about page faulting under the
5797 static int btrfs_opendir(struct inode
*inode
, struct file
*file
)
5799 struct btrfs_file_private
*private;
5803 ret
= btrfs_get_dir_last_index(BTRFS_I(inode
), &last_index
);
5807 private = kzalloc(sizeof(struct btrfs_file_private
), GFP_KERNEL
);
5810 private->last_index
= last_index
;
5811 private->filldir_buf
= kzalloc(PAGE_SIZE
, GFP_KERNEL
);
5812 if (!private->filldir_buf
) {
5816 file
->private_data
= private;
5820 static loff_t
btrfs_dir_llseek(struct file
*file
, loff_t offset
, int whence
)
5822 struct btrfs_file_private
*private = file
->private_data
;
5825 ret
= btrfs_get_dir_last_index(BTRFS_I(file_inode(file
)),
5826 &private->last_index
);
5830 return generic_file_llseek(file
, offset
, whence
);
5840 static int btrfs_filldir(void *addr
, int entries
, struct dir_context
*ctx
)
5843 struct dir_entry
*entry
= addr
;
5844 char *name
= (char *)(entry
+ 1);
5846 ctx
->pos
= get_unaligned(&entry
->offset
);
5847 if (!dir_emit(ctx
, name
, get_unaligned(&entry
->name_len
),
5848 get_unaligned(&entry
->ino
),
5849 get_unaligned(&entry
->type
)))
5851 addr
+= sizeof(struct dir_entry
) +
5852 get_unaligned(&entry
->name_len
);
5858 static int btrfs_real_readdir(struct file
*file
, struct dir_context
*ctx
)
5860 struct inode
*inode
= file_inode(file
);
5861 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
5862 struct btrfs_file_private
*private = file
->private_data
;
5863 struct btrfs_dir_item
*di
;
5864 struct btrfs_key key
;
5865 struct btrfs_key found_key
;
5866 struct btrfs_path
*path
;
5868 LIST_HEAD(ins_list
);
5869 LIST_HEAD(del_list
);
5876 struct btrfs_key location
;
5878 if (!dir_emit_dots(file
, ctx
))
5881 path
= btrfs_alloc_path();
5885 addr
= private->filldir_buf
;
5886 path
->reada
= READA_FORWARD
;
5888 put
= btrfs_readdir_get_delayed_items(inode
, private->last_index
,
5889 &ins_list
, &del_list
);
5892 key
.type
= BTRFS_DIR_INDEX_KEY
;
5893 key
.offset
= ctx
->pos
;
5894 key
.objectid
= btrfs_ino(BTRFS_I(inode
));
5896 btrfs_for_each_slot(root
, &key
, &found_key
, path
, ret
) {
5897 struct dir_entry
*entry
;
5898 struct extent_buffer
*leaf
= path
->nodes
[0];
5901 if (found_key
.objectid
!= key
.objectid
)
5903 if (found_key
.type
!= BTRFS_DIR_INDEX_KEY
)
5905 if (found_key
.offset
< ctx
->pos
)
5907 if (found_key
.offset
> private->last_index
)
5909 if (btrfs_should_delete_dir_index(&del_list
, found_key
.offset
))
5911 di
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_dir_item
);
5912 name_len
= btrfs_dir_name_len(leaf
, di
);
5913 if ((total_len
+ sizeof(struct dir_entry
) + name_len
) >=
5915 btrfs_release_path(path
);
5916 ret
= btrfs_filldir(private->filldir_buf
, entries
, ctx
);
5919 addr
= private->filldir_buf
;
5925 ftype
= btrfs_dir_flags_to_ftype(btrfs_dir_flags(leaf
, di
));
5927 name_ptr
= (char *)(entry
+ 1);
5928 read_extent_buffer(leaf
, name_ptr
,
5929 (unsigned long)(di
+ 1), name_len
);
5930 put_unaligned(name_len
, &entry
->name_len
);
5931 put_unaligned(fs_ftype_to_dtype(ftype
), &entry
->type
);
5932 btrfs_dir_item_key_to_cpu(leaf
, di
, &location
);
5933 put_unaligned(location
.objectid
, &entry
->ino
);
5934 put_unaligned(found_key
.offset
, &entry
->offset
);
5936 addr
+= sizeof(struct dir_entry
) + name_len
;
5937 total_len
+= sizeof(struct dir_entry
) + name_len
;
5939 /* Catch error encountered during iteration */
5943 btrfs_release_path(path
);
5945 ret
= btrfs_filldir(private->filldir_buf
, entries
, ctx
);
5949 ret
= btrfs_readdir_delayed_dir_index(ctx
, &ins_list
);
5954 * Stop new entries from being returned after we return the last
5957 * New directory entries are assigned a strictly increasing
5958 * offset. This means that new entries created during readdir
5959 * are *guaranteed* to be seen in the future by that readdir.
5960 * This has broken buggy programs which operate on names as
5961 * they're returned by readdir. Until we re-use freed offsets
5962 * we have this hack to stop new entries from being returned
5963 * under the assumption that they'll never reach this huge
5966 * This is being careful not to overflow 32bit loff_t unless the
5967 * last entry requires it because doing so has broken 32bit apps
5970 if (ctx
->pos
>= INT_MAX
)
5971 ctx
->pos
= LLONG_MAX
;
5978 btrfs_readdir_put_delayed_items(inode
, &ins_list
, &del_list
);
5979 btrfs_free_path(path
);
5984 * This is somewhat expensive, updating the tree every time the
5985 * inode changes. But, it is most likely to find the inode in cache.
5986 * FIXME, needs more benchmarking...there are no reasons other than performance
5987 * to keep or drop this code.
5989 static int btrfs_dirty_inode(struct btrfs_inode
*inode
)
5991 struct btrfs_root
*root
= inode
->root
;
5992 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
5993 struct btrfs_trans_handle
*trans
;
5996 if (test_bit(BTRFS_INODE_DUMMY
, &inode
->runtime_flags
))
5999 trans
= btrfs_join_transaction(root
);
6001 return PTR_ERR(trans
);
6003 ret
= btrfs_update_inode(trans
, root
, inode
);
6004 if (ret
&& (ret
== -ENOSPC
|| ret
== -EDQUOT
)) {
6005 /* whoops, lets try again with the full transaction */
6006 btrfs_end_transaction(trans
);
6007 trans
= btrfs_start_transaction(root
, 1);
6009 return PTR_ERR(trans
);
6011 ret
= btrfs_update_inode(trans
, root
, inode
);
6013 btrfs_end_transaction(trans
);
6014 if (inode
->delayed_node
)
6015 btrfs_balance_delayed_items(fs_info
);
6021 * This is a copy of file_update_time. We need this so we can return error on
6022 * ENOSPC for updating the inode in the case of file write and mmap writes.
6024 static int btrfs_update_time(struct inode
*inode
, int flags
)
6026 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
6027 bool dirty
= flags
& ~S_VERSION
;
6029 if (btrfs_root_readonly(root
))
6032 dirty
= inode_update_timestamps(inode
, flags
);
6033 return dirty
? btrfs_dirty_inode(BTRFS_I(inode
)) : 0;
6037 * helper to find a free sequence number in a given directory. This current
6038 * code is very simple, later versions will do smarter things in the btree
6040 int btrfs_set_inode_index(struct btrfs_inode
*dir
, u64
*index
)
6044 if (dir
->index_cnt
== (u64
)-1) {
6045 ret
= btrfs_inode_delayed_dir_index_count(dir
);
6047 ret
= btrfs_set_inode_index_count(dir
);
6053 *index
= dir
->index_cnt
;
6059 static int btrfs_insert_inode_locked(struct inode
*inode
)
6061 struct btrfs_iget_args args
;
6063 args
.ino
= BTRFS_I(inode
)->location
.objectid
;
6064 args
.root
= BTRFS_I(inode
)->root
;
6066 return insert_inode_locked4(inode
,
6067 btrfs_inode_hash(inode
->i_ino
, BTRFS_I(inode
)->root
),
6068 btrfs_find_actor
, &args
);
6071 int btrfs_new_inode_prepare(struct btrfs_new_inode_args
*args
,
6072 unsigned int *trans_num_items
)
6074 struct inode
*dir
= args
->dir
;
6075 struct inode
*inode
= args
->inode
;
6078 if (!args
->orphan
) {
6079 ret
= fscrypt_setup_filename(dir
, &args
->dentry
->d_name
, 0,
6085 ret
= posix_acl_create(dir
, &inode
->i_mode
, &args
->default_acl
, &args
->acl
);
6087 fscrypt_free_filename(&args
->fname
);
6091 /* 1 to add inode item */
6092 *trans_num_items
= 1;
6093 /* 1 to add compression property */
6094 if (BTRFS_I(dir
)->prop_compress
)
6095 (*trans_num_items
)++;
6096 /* 1 to add default ACL xattr */
6097 if (args
->default_acl
)
6098 (*trans_num_items
)++;
6099 /* 1 to add access ACL xattr */
6101 (*trans_num_items
)++;
6102 #ifdef CONFIG_SECURITY
6103 /* 1 to add LSM xattr */
6104 if (dir
->i_security
)
6105 (*trans_num_items
)++;
6108 /* 1 to add orphan item */
6109 (*trans_num_items
)++;
6113 * 1 to add dir index
6114 * 1 to update parent inode item
6116 * No need for 1 unit for the inode ref item because it is
6117 * inserted in a batch together with the inode item at
6118 * btrfs_create_new_inode().
6120 *trans_num_items
+= 3;
6125 void btrfs_new_inode_args_destroy(struct btrfs_new_inode_args
*args
)
6127 posix_acl_release(args
->acl
);
6128 posix_acl_release(args
->default_acl
);
6129 fscrypt_free_filename(&args
->fname
);
6133 * Inherit flags from the parent inode.
6135 * Currently only the compression flags and the cow flags are inherited.
6137 static void btrfs_inherit_iflags(struct btrfs_inode
*inode
, struct btrfs_inode
*dir
)
6143 if (flags
& BTRFS_INODE_NOCOMPRESS
) {
6144 inode
->flags
&= ~BTRFS_INODE_COMPRESS
;
6145 inode
->flags
|= BTRFS_INODE_NOCOMPRESS
;
6146 } else if (flags
& BTRFS_INODE_COMPRESS
) {
6147 inode
->flags
&= ~BTRFS_INODE_NOCOMPRESS
;
6148 inode
->flags
|= BTRFS_INODE_COMPRESS
;
6151 if (flags
& BTRFS_INODE_NODATACOW
) {
6152 inode
->flags
|= BTRFS_INODE_NODATACOW
;
6153 if (S_ISREG(inode
->vfs_inode
.i_mode
))
6154 inode
->flags
|= BTRFS_INODE_NODATASUM
;
6157 btrfs_sync_inode_flags_to_i_flags(&inode
->vfs_inode
);
6160 int btrfs_create_new_inode(struct btrfs_trans_handle
*trans
,
6161 struct btrfs_new_inode_args
*args
)
6163 struct inode
*dir
= args
->dir
;
6164 struct inode
*inode
= args
->inode
;
6165 const struct fscrypt_str
*name
= args
->orphan
? NULL
: &args
->fname
.disk_name
;
6166 struct btrfs_fs_info
*fs_info
= btrfs_sb(dir
->i_sb
);
6167 struct btrfs_root
*root
;
6168 struct btrfs_inode_item
*inode_item
;
6169 struct btrfs_key
*location
;
6170 struct btrfs_path
*path
;
6172 struct btrfs_inode_ref
*ref
;
6173 struct btrfs_key key
[2];
6175 struct btrfs_item_batch batch
;
6179 path
= btrfs_alloc_path();
6184 BTRFS_I(inode
)->root
= btrfs_grab_root(BTRFS_I(dir
)->root
);
6185 root
= BTRFS_I(inode
)->root
;
6187 ret
= btrfs_get_free_objectid(root
, &objectid
);
6190 inode
->i_ino
= objectid
;
6194 * O_TMPFILE, set link count to 0, so that after this point, we
6195 * fill in an inode item with the correct link count.
6197 set_nlink(inode
, 0);
6199 trace_btrfs_inode_request(dir
);
6201 ret
= btrfs_set_inode_index(BTRFS_I(dir
), &BTRFS_I(inode
)->dir_index
);
6205 /* index_cnt is ignored for everything but a dir. */
6206 BTRFS_I(inode
)->index_cnt
= BTRFS_DIR_START_INDEX
;
6207 BTRFS_I(inode
)->generation
= trans
->transid
;
6208 inode
->i_generation
= BTRFS_I(inode
)->generation
;
6211 * Subvolumes don't inherit flags from their parent directory.
6212 * Originally this was probably by accident, but we probably can't
6213 * change it now without compatibility issues.
6216 btrfs_inherit_iflags(BTRFS_I(inode
), BTRFS_I(dir
));
6218 if (S_ISREG(inode
->i_mode
)) {
6219 if (btrfs_test_opt(fs_info
, NODATASUM
))
6220 BTRFS_I(inode
)->flags
|= BTRFS_INODE_NODATASUM
;
6221 if (btrfs_test_opt(fs_info
, NODATACOW
))
6222 BTRFS_I(inode
)->flags
|= BTRFS_INODE_NODATACOW
|
6223 BTRFS_INODE_NODATASUM
;
6226 location
= &BTRFS_I(inode
)->location
;
6227 location
->objectid
= objectid
;
6228 location
->offset
= 0;
6229 location
->type
= BTRFS_INODE_ITEM_KEY
;
6231 ret
= btrfs_insert_inode_locked(inode
);
6234 BTRFS_I(dir
)->index_cnt
--;
6239 * We could have gotten an inode number from somebody who was fsynced
6240 * and then removed in this same transaction, so let's just set full
6241 * sync since it will be a full sync anyway and this will blow away the
6242 * old info in the log.
6244 btrfs_set_inode_full_sync(BTRFS_I(inode
));
6246 key
[0].objectid
= objectid
;
6247 key
[0].type
= BTRFS_INODE_ITEM_KEY
;
6250 sizes
[0] = sizeof(struct btrfs_inode_item
);
6252 if (!args
->orphan
) {
6254 * Start new inodes with an inode_ref. This is slightly more
6255 * efficient for small numbers of hard links since they will
6256 * be packed into one item. Extended refs will kick in if we
6257 * add more hard links than can fit in the ref item.
6259 key
[1].objectid
= objectid
;
6260 key
[1].type
= BTRFS_INODE_REF_KEY
;
6262 key
[1].offset
= objectid
;
6263 sizes
[1] = 2 + sizeof(*ref
);
6265 key
[1].offset
= btrfs_ino(BTRFS_I(dir
));
6266 sizes
[1] = name
->len
+ sizeof(*ref
);
6270 batch
.keys
= &key
[0];
6271 batch
.data_sizes
= &sizes
[0];
6272 batch
.total_data_size
= sizes
[0] + (args
->orphan
? 0 : sizes
[1]);
6273 batch
.nr
= args
->orphan
? 1 : 2;
6274 ret
= btrfs_insert_empty_items(trans
, root
, path
, &batch
);
6276 btrfs_abort_transaction(trans
, ret
);
6280 inode
->i_mtime
= inode_set_ctime_current(inode
);
6281 inode
->i_atime
= inode
->i_mtime
;
6282 BTRFS_I(inode
)->i_otime
= inode
->i_mtime
;
6285 * We're going to fill the inode item now, so at this point the inode
6286 * must be fully initialized.
6289 inode_item
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
6290 struct btrfs_inode_item
);
6291 memzero_extent_buffer(path
->nodes
[0], (unsigned long)inode_item
,
6292 sizeof(*inode_item
));
6293 fill_inode_item(trans
, path
->nodes
[0], inode_item
, inode
);
6295 if (!args
->orphan
) {
6296 ref
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0] + 1,
6297 struct btrfs_inode_ref
);
6298 ptr
= (unsigned long)(ref
+ 1);
6300 btrfs_set_inode_ref_name_len(path
->nodes
[0], ref
, 2);
6301 btrfs_set_inode_ref_index(path
->nodes
[0], ref
, 0);
6302 write_extent_buffer(path
->nodes
[0], "..", ptr
, 2);
6304 btrfs_set_inode_ref_name_len(path
->nodes
[0], ref
,
6306 btrfs_set_inode_ref_index(path
->nodes
[0], ref
,
6307 BTRFS_I(inode
)->dir_index
);
6308 write_extent_buffer(path
->nodes
[0], name
->name
, ptr
,
6313 btrfs_mark_buffer_dirty(path
->nodes
[0]);
6315 * We don't need the path anymore, plus inheriting properties, adding
6316 * ACLs, security xattrs, orphan item or adding the link, will result in
6317 * allocating yet another path. So just free our path.
6319 btrfs_free_path(path
);
6323 struct inode
*parent
;
6326 * Subvolumes inherit properties from their parent subvolume,
6327 * not the directory they were created in.
6329 parent
= btrfs_iget(fs_info
->sb
, BTRFS_FIRST_FREE_OBJECTID
,
6330 BTRFS_I(dir
)->root
);
6331 if (IS_ERR(parent
)) {
6332 ret
= PTR_ERR(parent
);
6334 ret
= btrfs_inode_inherit_props(trans
, inode
, parent
);
6338 ret
= btrfs_inode_inherit_props(trans
, inode
, dir
);
6342 "error inheriting props for ino %llu (root %llu): %d",
6343 btrfs_ino(BTRFS_I(inode
)), root
->root_key
.objectid
,
6348 * Subvolumes don't inherit ACLs or get passed to the LSM. This is
6351 if (!args
->subvol
) {
6352 ret
= btrfs_init_inode_security(trans
, args
);
6354 btrfs_abort_transaction(trans
, ret
);
6359 inode_tree_add(BTRFS_I(inode
));
6361 trace_btrfs_inode_new(inode
);
6362 btrfs_set_inode_last_trans(trans
, BTRFS_I(inode
));
6364 btrfs_update_root_times(trans
, root
);
6367 ret
= btrfs_orphan_add(trans
, BTRFS_I(inode
));
6369 ret
= btrfs_add_link(trans
, BTRFS_I(dir
), BTRFS_I(inode
), name
,
6370 0, BTRFS_I(inode
)->dir_index
);
6373 btrfs_abort_transaction(trans
, ret
);
6381 * discard_new_inode() calls iput(), but the caller owns the reference
6385 discard_new_inode(inode
);
6387 btrfs_free_path(path
);
6392 * utility function to add 'inode' into 'parent_inode' with
6393 * a give name and a given sequence number.
6394 * if 'add_backref' is true, also insert a backref from the
6395 * inode to the parent directory.
6397 int btrfs_add_link(struct btrfs_trans_handle
*trans
,
6398 struct btrfs_inode
*parent_inode
, struct btrfs_inode
*inode
,
6399 const struct fscrypt_str
*name
, int add_backref
, u64 index
)
6402 struct btrfs_key key
;
6403 struct btrfs_root
*root
= parent_inode
->root
;
6404 u64 ino
= btrfs_ino(inode
);
6405 u64 parent_ino
= btrfs_ino(parent_inode
);
6407 if (unlikely(ino
== BTRFS_FIRST_FREE_OBJECTID
)) {
6408 memcpy(&key
, &inode
->root
->root_key
, sizeof(key
));
6411 key
.type
= BTRFS_INODE_ITEM_KEY
;
6415 if (unlikely(ino
== BTRFS_FIRST_FREE_OBJECTID
)) {
6416 ret
= btrfs_add_root_ref(trans
, key
.objectid
,
6417 root
->root_key
.objectid
, parent_ino
,
6419 } else if (add_backref
) {
6420 ret
= btrfs_insert_inode_ref(trans
, root
, name
,
6421 ino
, parent_ino
, index
);
6424 /* Nothing to clean up yet */
6428 ret
= btrfs_insert_dir_item(trans
, name
, parent_inode
, &key
,
6429 btrfs_inode_type(&inode
->vfs_inode
), index
);
6430 if (ret
== -EEXIST
|| ret
== -EOVERFLOW
)
6433 btrfs_abort_transaction(trans
, ret
);
6437 btrfs_i_size_write(parent_inode
, parent_inode
->vfs_inode
.i_size
+
6439 inode_inc_iversion(&parent_inode
->vfs_inode
);
6441 * If we are replaying a log tree, we do not want to update the mtime
6442 * and ctime of the parent directory with the current time, since the
6443 * log replay procedure is responsible for setting them to their correct
6444 * values (the ones it had when the fsync was done).
6446 if (!test_bit(BTRFS_FS_LOG_RECOVERING
, &root
->fs_info
->flags
))
6447 parent_inode
->vfs_inode
.i_mtime
=
6448 inode_set_ctime_current(&parent_inode
->vfs_inode
);
6450 ret
= btrfs_update_inode(trans
, root
, parent_inode
);
6452 btrfs_abort_transaction(trans
, ret
);
6456 if (unlikely(ino
== BTRFS_FIRST_FREE_OBJECTID
)) {
6459 err
= btrfs_del_root_ref(trans
, key
.objectid
,
6460 root
->root_key
.objectid
, parent_ino
,
6461 &local_index
, name
);
6463 btrfs_abort_transaction(trans
, err
);
6464 } else if (add_backref
) {
6468 err
= btrfs_del_inode_ref(trans
, root
, name
, ino
, parent_ino
,
6471 btrfs_abort_transaction(trans
, err
);
6474 /* Return the original error code */
6478 static int btrfs_create_common(struct inode
*dir
, struct dentry
*dentry
,
6479 struct inode
*inode
)
6481 struct btrfs_fs_info
*fs_info
= btrfs_sb(dir
->i_sb
);
6482 struct btrfs_root
*root
= BTRFS_I(dir
)->root
;
6483 struct btrfs_new_inode_args new_inode_args
= {
6488 unsigned int trans_num_items
;
6489 struct btrfs_trans_handle
*trans
;
6492 err
= btrfs_new_inode_prepare(&new_inode_args
, &trans_num_items
);
6496 trans
= btrfs_start_transaction(root
, trans_num_items
);
6497 if (IS_ERR(trans
)) {
6498 err
= PTR_ERR(trans
);
6499 goto out_new_inode_args
;
6502 err
= btrfs_create_new_inode(trans
, &new_inode_args
);
6504 d_instantiate_new(dentry
, inode
);
6506 btrfs_end_transaction(trans
);
6507 btrfs_btree_balance_dirty(fs_info
);
6509 btrfs_new_inode_args_destroy(&new_inode_args
);
6516 static int btrfs_mknod(struct mnt_idmap
*idmap
, struct inode
*dir
,
6517 struct dentry
*dentry
, umode_t mode
, dev_t rdev
)
6519 struct inode
*inode
;
6521 inode
= new_inode(dir
->i_sb
);
6524 inode_init_owner(idmap
, inode
, dir
, mode
);
6525 inode
->i_op
= &btrfs_special_inode_operations
;
6526 init_special_inode(inode
, inode
->i_mode
, rdev
);
6527 return btrfs_create_common(dir
, dentry
, inode
);
6530 static int btrfs_create(struct mnt_idmap
*idmap
, struct inode
*dir
,
6531 struct dentry
*dentry
, umode_t mode
, bool excl
)
6533 struct inode
*inode
;
6535 inode
= new_inode(dir
->i_sb
);
6538 inode_init_owner(idmap
, inode
, dir
, mode
);
6539 inode
->i_fop
= &btrfs_file_operations
;
6540 inode
->i_op
= &btrfs_file_inode_operations
;
6541 inode
->i_mapping
->a_ops
= &btrfs_aops
;
6542 return btrfs_create_common(dir
, dentry
, inode
);
6545 static int btrfs_link(struct dentry
*old_dentry
, struct inode
*dir
,
6546 struct dentry
*dentry
)
6548 struct btrfs_trans_handle
*trans
= NULL
;
6549 struct btrfs_root
*root
= BTRFS_I(dir
)->root
;
6550 struct inode
*inode
= d_inode(old_dentry
);
6551 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
6552 struct fscrypt_name fname
;
6557 /* do not allow sys_link's with other subvols of the same device */
6558 if (root
->root_key
.objectid
!= BTRFS_I(inode
)->root
->root_key
.objectid
)
6561 if (inode
->i_nlink
>= BTRFS_LINK_MAX
)
6564 err
= fscrypt_setup_filename(dir
, &dentry
->d_name
, 0, &fname
);
6568 err
= btrfs_set_inode_index(BTRFS_I(dir
), &index
);
6573 * 2 items for inode and inode ref
6574 * 2 items for dir items
6575 * 1 item for parent inode
6576 * 1 item for orphan item deletion if O_TMPFILE
6578 trans
= btrfs_start_transaction(root
, inode
->i_nlink
? 5 : 6);
6579 if (IS_ERR(trans
)) {
6580 err
= PTR_ERR(trans
);
6585 /* There are several dir indexes for this inode, clear the cache. */
6586 BTRFS_I(inode
)->dir_index
= 0ULL;
6588 inode_inc_iversion(inode
);
6589 inode_set_ctime_current(inode
);
6591 set_bit(BTRFS_INODE_COPY_EVERYTHING
, &BTRFS_I(inode
)->runtime_flags
);
6593 err
= btrfs_add_link(trans
, BTRFS_I(dir
), BTRFS_I(inode
),
6594 &fname
.disk_name
, 1, index
);
6599 struct dentry
*parent
= dentry
->d_parent
;
6601 err
= btrfs_update_inode(trans
, root
, BTRFS_I(inode
));
6604 if (inode
->i_nlink
== 1) {
6606 * If new hard link count is 1, it's a file created
6607 * with open(2) O_TMPFILE flag.
6609 err
= btrfs_orphan_del(trans
, BTRFS_I(inode
));
6613 d_instantiate(dentry
, inode
);
6614 btrfs_log_new_name(trans
, old_dentry
, NULL
, 0, parent
);
6618 fscrypt_free_filename(&fname
);
6620 btrfs_end_transaction(trans
);
6622 inode_dec_link_count(inode
);
6625 btrfs_btree_balance_dirty(fs_info
);
6629 static int btrfs_mkdir(struct mnt_idmap
*idmap
, struct inode
*dir
,
6630 struct dentry
*dentry
, umode_t mode
)
6632 struct inode
*inode
;
6634 inode
= new_inode(dir
->i_sb
);
6637 inode_init_owner(idmap
, inode
, dir
, S_IFDIR
| mode
);
6638 inode
->i_op
= &btrfs_dir_inode_operations
;
6639 inode
->i_fop
= &btrfs_dir_file_operations
;
6640 return btrfs_create_common(dir
, dentry
, inode
);
6643 static noinline
int uncompress_inline(struct btrfs_path
*path
,
6645 struct btrfs_file_extent_item
*item
)
6648 struct extent_buffer
*leaf
= path
->nodes
[0];
6651 unsigned long inline_size
;
6655 compress_type
= btrfs_file_extent_compression(leaf
, item
);
6656 max_size
= btrfs_file_extent_ram_bytes(leaf
, item
);
6657 inline_size
= btrfs_file_extent_inline_item_len(leaf
, path
->slots
[0]);
6658 tmp
= kmalloc(inline_size
, GFP_NOFS
);
6661 ptr
= btrfs_file_extent_inline_start(item
);
6663 read_extent_buffer(leaf
, tmp
, ptr
, inline_size
);
6665 max_size
= min_t(unsigned long, PAGE_SIZE
, max_size
);
6666 ret
= btrfs_decompress(compress_type
, tmp
, page
, 0, inline_size
, max_size
);
6669 * decompression code contains a memset to fill in any space between the end
6670 * of the uncompressed data and the end of max_size in case the decompressed
6671 * data ends up shorter than ram_bytes. That doesn't cover the hole between
6672 * the end of an inline extent and the beginning of the next block, so we
6673 * cover that region here.
6676 if (max_size
< PAGE_SIZE
)
6677 memzero_page(page
, max_size
, PAGE_SIZE
- max_size
);
6682 static int read_inline_extent(struct btrfs_inode
*inode
, struct btrfs_path
*path
,
6685 struct btrfs_file_extent_item
*fi
;
6689 if (!page
|| PageUptodate(page
))
6692 ASSERT(page_offset(page
) == 0);
6694 fi
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
6695 struct btrfs_file_extent_item
);
6696 if (btrfs_file_extent_compression(path
->nodes
[0], fi
) != BTRFS_COMPRESS_NONE
)
6697 return uncompress_inline(path
, page
, fi
);
6699 copy_size
= min_t(u64
, PAGE_SIZE
,
6700 btrfs_file_extent_ram_bytes(path
->nodes
[0], fi
));
6701 kaddr
= kmap_local_page(page
);
6702 read_extent_buffer(path
->nodes
[0], kaddr
,
6703 btrfs_file_extent_inline_start(fi
), copy_size
);
6704 kunmap_local(kaddr
);
6705 if (copy_size
< PAGE_SIZE
)
6706 memzero_page(page
, copy_size
, PAGE_SIZE
- copy_size
);
6711 * Lookup the first extent overlapping a range in a file.
6713 * @inode: file to search in
6714 * @page: page to read extent data into if the extent is inline
6715 * @pg_offset: offset into @page to copy to
6716 * @start: file offset
6717 * @len: length of range starting at @start
6719 * Return the first &struct extent_map which overlaps the given range, reading
6720 * it from the B-tree and caching it if necessary. Note that there may be more
6721 * extents which overlap the given range after the returned extent_map.
6723 * If @page is not NULL and the extent is inline, this also reads the extent
6724 * data directly into the page and marks the extent up to date in the io_tree.
6726 * Return: ERR_PTR on error, non-NULL extent_map on success.
6728 struct extent_map
*btrfs_get_extent(struct btrfs_inode
*inode
,
6729 struct page
*page
, size_t pg_offset
,
6732 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
6734 u64 extent_start
= 0;
6736 u64 objectid
= btrfs_ino(inode
);
6737 int extent_type
= -1;
6738 struct btrfs_path
*path
= NULL
;
6739 struct btrfs_root
*root
= inode
->root
;
6740 struct btrfs_file_extent_item
*item
;
6741 struct extent_buffer
*leaf
;
6742 struct btrfs_key found_key
;
6743 struct extent_map
*em
= NULL
;
6744 struct extent_map_tree
*em_tree
= &inode
->extent_tree
;
6746 read_lock(&em_tree
->lock
);
6747 em
= lookup_extent_mapping(em_tree
, start
, len
);
6748 read_unlock(&em_tree
->lock
);
6751 if (em
->start
> start
|| em
->start
+ em
->len
<= start
)
6752 free_extent_map(em
);
6753 else if (em
->block_start
== EXTENT_MAP_INLINE
&& page
)
6754 free_extent_map(em
);
6758 em
= alloc_extent_map();
6763 em
->start
= EXTENT_MAP_HOLE
;
6764 em
->orig_start
= EXTENT_MAP_HOLE
;
6766 em
->block_len
= (u64
)-1;
6768 path
= btrfs_alloc_path();
6774 /* Chances are we'll be called again, so go ahead and do readahead */
6775 path
->reada
= READA_FORWARD
;
6778 * The same explanation in load_free_space_cache applies here as well,
6779 * we only read when we're loading the free space cache, and at that
6780 * point the commit_root has everything we need.
6782 if (btrfs_is_free_space_inode(inode
)) {
6783 path
->search_commit_root
= 1;
6784 path
->skip_locking
= 1;
6787 ret
= btrfs_lookup_file_extent(NULL
, root
, path
, objectid
, start
, 0);
6790 } else if (ret
> 0) {
6791 if (path
->slots
[0] == 0)
6797 leaf
= path
->nodes
[0];
6798 item
= btrfs_item_ptr(leaf
, path
->slots
[0],
6799 struct btrfs_file_extent_item
);
6800 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
6801 if (found_key
.objectid
!= objectid
||
6802 found_key
.type
!= BTRFS_EXTENT_DATA_KEY
) {
6804 * If we backup past the first extent we want to move forward
6805 * and see if there is an extent in front of us, otherwise we'll
6806 * say there is a hole for our whole search range which can
6813 extent_type
= btrfs_file_extent_type(leaf
, item
);
6814 extent_start
= found_key
.offset
;
6815 extent_end
= btrfs_file_extent_end(path
);
6816 if (extent_type
== BTRFS_FILE_EXTENT_REG
||
6817 extent_type
== BTRFS_FILE_EXTENT_PREALLOC
) {
6818 /* Only regular file could have regular/prealloc extent */
6819 if (!S_ISREG(inode
->vfs_inode
.i_mode
)) {
6822 "regular/prealloc extent found for non-regular inode %llu",
6826 trace_btrfs_get_extent_show_fi_regular(inode
, leaf
, item
,
6828 } else if (extent_type
== BTRFS_FILE_EXTENT_INLINE
) {
6829 trace_btrfs_get_extent_show_fi_inline(inode
, leaf
, item
,
6834 if (start
>= extent_end
) {
6836 if (path
->slots
[0] >= btrfs_header_nritems(leaf
)) {
6837 ret
= btrfs_next_leaf(root
, path
);
6843 leaf
= path
->nodes
[0];
6845 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
6846 if (found_key
.objectid
!= objectid
||
6847 found_key
.type
!= BTRFS_EXTENT_DATA_KEY
)
6849 if (start
+ len
<= found_key
.offset
)
6851 if (start
> found_key
.offset
)
6854 /* New extent overlaps with existing one */
6856 em
->orig_start
= start
;
6857 em
->len
= found_key
.offset
- start
;
6858 em
->block_start
= EXTENT_MAP_HOLE
;
6862 btrfs_extent_item_to_extent_map(inode
, path
, item
, em
);
6864 if (extent_type
== BTRFS_FILE_EXTENT_REG
||
6865 extent_type
== BTRFS_FILE_EXTENT_PREALLOC
) {
6867 } else if (extent_type
== BTRFS_FILE_EXTENT_INLINE
) {
6869 * Inline extent can only exist at file offset 0. This is
6870 * ensured by tree-checker and inline extent creation path.
6871 * Thus all members representing file offsets should be zero.
6873 ASSERT(pg_offset
== 0);
6874 ASSERT(extent_start
== 0);
6875 ASSERT(em
->start
== 0);
6878 * btrfs_extent_item_to_extent_map() should have properly
6879 * initialized em members already.
6881 * Other members are not utilized for inline extents.
6883 ASSERT(em
->block_start
== EXTENT_MAP_INLINE
);
6884 ASSERT(em
->len
== fs_info
->sectorsize
);
6886 ret
= read_inline_extent(inode
, path
, page
);
6893 em
->orig_start
= start
;
6895 em
->block_start
= EXTENT_MAP_HOLE
;
6898 btrfs_release_path(path
);
6899 if (em
->start
> start
|| extent_map_end(em
) <= start
) {
6901 "bad extent! em: [%llu %llu] passed [%llu %llu]",
6902 em
->start
, em
->len
, start
, len
);
6907 write_lock(&em_tree
->lock
);
6908 ret
= btrfs_add_extent_mapping(fs_info
, em_tree
, &em
, start
, len
);
6909 write_unlock(&em_tree
->lock
);
6911 btrfs_free_path(path
);
6913 trace_btrfs_get_extent(root
, inode
, em
);
6916 free_extent_map(em
);
6917 return ERR_PTR(ret
);
6922 static struct extent_map
*btrfs_create_dio_extent(struct btrfs_inode
*inode
,
6923 struct btrfs_dio_data
*dio_data
,
6926 const u64 orig_start
,
6927 const u64 block_start
,
6928 const u64 block_len
,
6929 const u64 orig_block_len
,
6930 const u64 ram_bytes
,
6933 struct extent_map
*em
= NULL
;
6934 struct btrfs_ordered_extent
*ordered
;
6936 if (type
!= BTRFS_ORDERED_NOCOW
) {
6937 em
= create_io_em(inode
, start
, len
, orig_start
, block_start
,
6938 block_len
, orig_block_len
, ram_bytes
,
6939 BTRFS_COMPRESS_NONE
, /* compress_type */
6944 ordered
= btrfs_alloc_ordered_extent(inode
, start
, len
, len
,
6945 block_start
, block_len
, 0,
6947 (1 << BTRFS_ORDERED_DIRECT
),
6948 BTRFS_COMPRESS_NONE
);
6949 if (IS_ERR(ordered
)) {
6951 free_extent_map(em
);
6952 btrfs_drop_extent_map_range(inode
, start
,
6953 start
+ len
- 1, false);
6955 em
= ERR_CAST(ordered
);
6957 ASSERT(!dio_data
->ordered
);
6958 dio_data
->ordered
= ordered
;
6965 static struct extent_map
*btrfs_new_extent_direct(struct btrfs_inode
*inode
,
6966 struct btrfs_dio_data
*dio_data
,
6969 struct btrfs_root
*root
= inode
->root
;
6970 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
6971 struct extent_map
*em
;
6972 struct btrfs_key ins
;
6976 alloc_hint
= get_extent_allocation_hint(inode
, start
, len
);
6977 ret
= btrfs_reserve_extent(root
, len
, len
, fs_info
->sectorsize
,
6978 0, alloc_hint
, &ins
, 1, 1);
6980 return ERR_PTR(ret
);
6982 em
= btrfs_create_dio_extent(inode
, dio_data
, start
, ins
.offset
, start
,
6983 ins
.objectid
, ins
.offset
, ins
.offset
,
6984 ins
.offset
, BTRFS_ORDERED_REGULAR
);
6985 btrfs_dec_block_group_reservations(fs_info
, ins
.objectid
);
6987 btrfs_free_reserved_extent(fs_info
, ins
.objectid
, ins
.offset
,
6993 static bool btrfs_extent_readonly(struct btrfs_fs_info
*fs_info
, u64 bytenr
)
6995 struct btrfs_block_group
*block_group
;
6996 bool readonly
= false;
6998 block_group
= btrfs_lookup_block_group(fs_info
, bytenr
);
6999 if (!block_group
|| block_group
->ro
)
7002 btrfs_put_block_group(block_group
);
7007 * Check if we can do nocow write into the range [@offset, @offset + @len)
7009 * @offset: File offset
7010 * @len: The length to write, will be updated to the nocow writeable
7012 * @orig_start: (optional) Return the original file offset of the file extent
7013 * @orig_len: (optional) Return the original on-disk length of the file extent
7014 * @ram_bytes: (optional) Return the ram_bytes of the file extent
7015 * @strict: if true, omit optimizations that might force us into unnecessary
7016 * cow. e.g., don't trust generation number.
7019 * >0 and update @len if we can do nocow write
7020 * 0 if we can't do nocow write
7021 * <0 if error happened
7023 * NOTE: This only checks the file extents, caller is responsible to wait for
7024 * any ordered extents.
7026 noinline
int can_nocow_extent(struct inode
*inode
, u64 offset
, u64
*len
,
7027 u64
*orig_start
, u64
*orig_block_len
,
7028 u64
*ram_bytes
, bool nowait
, bool strict
)
7030 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
7031 struct can_nocow_file_extent_args nocow_args
= { 0 };
7032 struct btrfs_path
*path
;
7034 struct extent_buffer
*leaf
;
7035 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
7036 struct extent_io_tree
*io_tree
= &BTRFS_I(inode
)->io_tree
;
7037 struct btrfs_file_extent_item
*fi
;
7038 struct btrfs_key key
;
7041 path
= btrfs_alloc_path();
7044 path
->nowait
= nowait
;
7046 ret
= btrfs_lookup_file_extent(NULL
, root
, path
,
7047 btrfs_ino(BTRFS_I(inode
)), offset
, 0);
7052 if (path
->slots
[0] == 0) {
7053 /* can't find the item, must cow */
7060 leaf
= path
->nodes
[0];
7061 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
7062 if (key
.objectid
!= btrfs_ino(BTRFS_I(inode
)) ||
7063 key
.type
!= BTRFS_EXTENT_DATA_KEY
) {
7064 /* not our file or wrong item type, must cow */
7068 if (key
.offset
> offset
) {
7069 /* Wrong offset, must cow */
7073 if (btrfs_file_extent_end(path
) <= offset
)
7076 fi
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_file_extent_item
);
7077 found_type
= btrfs_file_extent_type(leaf
, fi
);
7079 *ram_bytes
= btrfs_file_extent_ram_bytes(leaf
, fi
);
7081 nocow_args
.start
= offset
;
7082 nocow_args
.end
= offset
+ *len
- 1;
7083 nocow_args
.strict
= strict
;
7084 nocow_args
.free_path
= true;
7086 ret
= can_nocow_file_extent(path
, &key
, BTRFS_I(inode
), &nocow_args
);
7087 /* can_nocow_file_extent() has freed the path. */
7091 /* Treat errors as not being able to NOCOW. */
7097 if (btrfs_extent_readonly(fs_info
, nocow_args
.disk_bytenr
))
7100 if (!(BTRFS_I(inode
)->flags
& BTRFS_INODE_NODATACOW
) &&
7101 found_type
== BTRFS_FILE_EXTENT_PREALLOC
) {
7104 range_end
= round_up(offset
+ nocow_args
.num_bytes
,
7105 root
->fs_info
->sectorsize
) - 1;
7106 ret
= test_range_bit(io_tree
, offset
, range_end
,
7107 EXTENT_DELALLOC
, 0, NULL
);
7115 *orig_start
= key
.offset
- nocow_args
.extent_offset
;
7117 *orig_block_len
= nocow_args
.disk_num_bytes
;
7119 *len
= nocow_args
.num_bytes
;
7122 btrfs_free_path(path
);
7126 static int lock_extent_direct(struct inode
*inode
, u64 lockstart
, u64 lockend
,
7127 struct extent_state
**cached_state
,
7128 unsigned int iomap_flags
)
7130 const bool writing
= (iomap_flags
& IOMAP_WRITE
);
7131 const bool nowait
= (iomap_flags
& IOMAP_NOWAIT
);
7132 struct extent_io_tree
*io_tree
= &BTRFS_I(inode
)->io_tree
;
7133 struct btrfs_ordered_extent
*ordered
;
7138 if (!try_lock_extent(io_tree
, lockstart
, lockend
,
7142 lock_extent(io_tree
, lockstart
, lockend
, cached_state
);
7145 * We're concerned with the entire range that we're going to be
7146 * doing DIO to, so we need to make sure there's no ordered
7147 * extents in this range.
7149 ordered
= btrfs_lookup_ordered_range(BTRFS_I(inode
), lockstart
,
7150 lockend
- lockstart
+ 1);
7153 * We need to make sure there are no buffered pages in this
7154 * range either, we could have raced between the invalidate in
7155 * generic_file_direct_write and locking the extent. The
7156 * invalidate needs to happen so that reads after a write do not
7160 (!writing
|| !filemap_range_has_page(inode
->i_mapping
,
7161 lockstart
, lockend
)))
7164 unlock_extent(io_tree
, lockstart
, lockend
, cached_state
);
7168 btrfs_put_ordered_extent(ordered
);
7173 * If we are doing a DIO read and the ordered extent we
7174 * found is for a buffered write, we can not wait for it
7175 * to complete and retry, because if we do so we can
7176 * deadlock with concurrent buffered writes on page
7177 * locks. This happens only if our DIO read covers more
7178 * than one extent map, if at this point has already
7179 * created an ordered extent for a previous extent map
7180 * and locked its range in the inode's io tree, and a
7181 * concurrent write against that previous extent map's
7182 * range and this range started (we unlock the ranges
7183 * in the io tree only when the bios complete and
7184 * buffered writes always lock pages before attempting
7185 * to lock range in the io tree).
7188 test_bit(BTRFS_ORDERED_DIRECT
, &ordered
->flags
))
7189 btrfs_start_ordered_extent(ordered
);
7191 ret
= nowait
? -EAGAIN
: -ENOTBLK
;
7192 btrfs_put_ordered_extent(ordered
);
7195 * We could trigger writeback for this range (and wait
7196 * for it to complete) and then invalidate the pages for
7197 * this range (through invalidate_inode_pages2_range()),
7198 * but that can lead us to a deadlock with a concurrent
7199 * call to readahead (a buffered read or a defrag call
7200 * triggered a readahead) on a page lock due to an
7201 * ordered dio extent we created before but did not have
7202 * yet a corresponding bio submitted (whence it can not
7203 * complete), which makes readahead wait for that
7204 * ordered extent to complete while holding a lock on
7207 ret
= nowait
? -EAGAIN
: -ENOTBLK
;
7219 /* The callers of this must take lock_extent() */
7220 static struct extent_map
*create_io_em(struct btrfs_inode
*inode
, u64 start
,
7221 u64 len
, u64 orig_start
, u64 block_start
,
7222 u64 block_len
, u64 orig_block_len
,
7223 u64 ram_bytes
, int compress_type
,
7226 struct extent_map
*em
;
7229 ASSERT(type
== BTRFS_ORDERED_PREALLOC
||
7230 type
== BTRFS_ORDERED_COMPRESSED
||
7231 type
== BTRFS_ORDERED_NOCOW
||
7232 type
== BTRFS_ORDERED_REGULAR
);
7234 em
= alloc_extent_map();
7236 return ERR_PTR(-ENOMEM
);
7239 em
->orig_start
= orig_start
;
7241 em
->block_len
= block_len
;
7242 em
->block_start
= block_start
;
7243 em
->orig_block_len
= orig_block_len
;
7244 em
->ram_bytes
= ram_bytes
;
7245 em
->generation
= -1;
7246 set_bit(EXTENT_FLAG_PINNED
, &em
->flags
);
7247 if (type
== BTRFS_ORDERED_PREALLOC
) {
7248 set_bit(EXTENT_FLAG_FILLING
, &em
->flags
);
7249 } else if (type
== BTRFS_ORDERED_COMPRESSED
) {
7250 set_bit(EXTENT_FLAG_COMPRESSED
, &em
->flags
);
7251 em
->compress_type
= compress_type
;
7254 ret
= btrfs_replace_extent_map_range(inode
, em
, true);
7256 free_extent_map(em
);
7257 return ERR_PTR(ret
);
7260 /* em got 2 refs now, callers needs to do free_extent_map once. */
7265 static int btrfs_get_blocks_direct_write(struct extent_map
**map
,
7266 struct inode
*inode
,
7267 struct btrfs_dio_data
*dio_data
,
7268 u64 start
, u64
*lenp
,
7269 unsigned int iomap_flags
)
7271 const bool nowait
= (iomap_flags
& IOMAP_NOWAIT
);
7272 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
7273 struct extent_map
*em
= *map
;
7275 u64 block_start
, orig_start
, orig_block_len
, ram_bytes
;
7276 struct btrfs_block_group
*bg
;
7277 bool can_nocow
= false;
7278 bool space_reserved
= false;
7284 * We don't allocate a new extent in the following cases
7286 * 1) The inode is marked as NODATACOW. In this case we'll just use the
7288 * 2) The extent is marked as PREALLOC. We're good to go here and can
7289 * just use the extent.
7292 if (test_bit(EXTENT_FLAG_PREALLOC
, &em
->flags
) ||
7293 ((BTRFS_I(inode
)->flags
& BTRFS_INODE_NODATACOW
) &&
7294 em
->block_start
!= EXTENT_MAP_HOLE
)) {
7295 if (test_bit(EXTENT_FLAG_PREALLOC
, &em
->flags
))
7296 type
= BTRFS_ORDERED_PREALLOC
;
7298 type
= BTRFS_ORDERED_NOCOW
;
7299 len
= min(len
, em
->len
- (start
- em
->start
));
7300 block_start
= em
->block_start
+ (start
- em
->start
);
7302 if (can_nocow_extent(inode
, start
, &len
, &orig_start
,
7303 &orig_block_len
, &ram_bytes
, false, false) == 1) {
7304 bg
= btrfs_inc_nocow_writers(fs_info
, block_start
);
7312 struct extent_map
*em2
;
7314 /* We can NOCOW, so only need to reserve metadata space. */
7315 ret
= btrfs_delalloc_reserve_metadata(BTRFS_I(inode
), len
, len
,
7318 /* Our caller expects us to free the input extent map. */
7319 free_extent_map(em
);
7321 btrfs_dec_nocow_writers(bg
);
7322 if (nowait
&& (ret
== -ENOSPC
|| ret
== -EDQUOT
))
7326 space_reserved
= true;
7328 em2
= btrfs_create_dio_extent(BTRFS_I(inode
), dio_data
, start
, len
,
7329 orig_start
, block_start
,
7330 len
, orig_block_len
,
7332 btrfs_dec_nocow_writers(bg
);
7333 if (type
== BTRFS_ORDERED_PREALLOC
) {
7334 free_extent_map(em
);
7344 dio_data
->nocow_done
= true;
7346 /* Our caller expects us to free the input extent map. */
7347 free_extent_map(em
);
7356 * If we could not allocate data space before locking the file
7357 * range and we can't do a NOCOW write, then we have to fail.
7359 if (!dio_data
->data_space_reserved
) {
7365 * We have to COW and we have already reserved data space before,
7366 * so now we reserve only metadata.
7368 ret
= btrfs_delalloc_reserve_metadata(BTRFS_I(inode
), len
, len
,
7372 space_reserved
= true;
7374 em
= btrfs_new_extent_direct(BTRFS_I(inode
), dio_data
, start
, len
);
7380 len
= min(len
, em
->len
- (start
- em
->start
));
7382 btrfs_delalloc_release_metadata(BTRFS_I(inode
),
7383 prev_len
- len
, true);
7387 * We have created our ordered extent, so we can now release our reservation
7388 * for an outstanding extent.
7390 btrfs_delalloc_release_extents(BTRFS_I(inode
), prev_len
);
7393 * Need to update the i_size under the extent lock so buffered
7394 * readers will get the updated i_size when we unlock.
7396 if (start
+ len
> i_size_read(inode
))
7397 i_size_write(inode
, start
+ len
);
7399 if (ret
&& space_reserved
) {
7400 btrfs_delalloc_release_extents(BTRFS_I(inode
), len
);
7401 btrfs_delalloc_release_metadata(BTRFS_I(inode
), len
, true);
7407 static int btrfs_dio_iomap_begin(struct inode
*inode
, loff_t start
,
7408 loff_t length
, unsigned int flags
, struct iomap
*iomap
,
7409 struct iomap
*srcmap
)
7411 struct iomap_iter
*iter
= container_of(iomap
, struct iomap_iter
, iomap
);
7412 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
7413 struct extent_map
*em
;
7414 struct extent_state
*cached_state
= NULL
;
7415 struct btrfs_dio_data
*dio_data
= iter
->private;
7416 u64 lockstart
, lockend
;
7417 const bool write
= !!(flags
& IOMAP_WRITE
);
7420 const u64 data_alloc_len
= length
;
7421 bool unlock_extents
= false;
7424 * We could potentially fault if we have a buffer > PAGE_SIZE, and if
7425 * we're NOWAIT we may submit a bio for a partial range and return
7426 * EIOCBQUEUED, which would result in an errant short read.
7428 * The best way to handle this would be to allow for partial completions
7429 * of iocb's, so we could submit the partial bio, return and fault in
7430 * the rest of the pages, and then submit the io for the rest of the
7431 * range. However we don't have that currently, so simply return
7432 * -EAGAIN at this point so that the normal path is used.
7434 if (!write
&& (flags
& IOMAP_NOWAIT
) && length
> PAGE_SIZE
)
7438 * Cap the size of reads to that usually seen in buffered I/O as we need
7439 * to allocate a contiguous array for the checksums.
7442 len
= min_t(u64
, len
, fs_info
->sectorsize
* BTRFS_MAX_BIO_SECTORS
);
7445 lockend
= start
+ len
- 1;
7448 * iomap_dio_rw() only does filemap_write_and_wait_range(), which isn't
7449 * enough if we've written compressed pages to this area, so we need to
7450 * flush the dirty pages again to make absolutely sure that any
7451 * outstanding dirty pages are on disk - the first flush only starts
7452 * compression on the data, while keeping the pages locked, so by the
7453 * time the second flush returns we know bios for the compressed pages
7454 * were submitted and finished, and the pages no longer under writeback.
7456 * If we have a NOWAIT request and we have any pages in the range that
7457 * are locked, likely due to compression still in progress, we don't want
7458 * to block on page locks. We also don't want to block on pages marked as
7459 * dirty or under writeback (same as for the non-compression case).
7460 * iomap_dio_rw() did the same check, but after that and before we got
7461 * here, mmap'ed writes may have happened or buffered reads started
7462 * (readpage() and readahead(), which lock pages), as we haven't locked
7463 * the file range yet.
7465 if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT
,
7466 &BTRFS_I(inode
)->runtime_flags
)) {
7467 if (flags
& IOMAP_NOWAIT
) {
7468 if (filemap_range_needs_writeback(inode
->i_mapping
,
7469 lockstart
, lockend
))
7472 ret
= filemap_fdatawrite_range(inode
->i_mapping
, start
,
7473 start
+ length
- 1);
7479 memset(dio_data
, 0, sizeof(*dio_data
));
7482 * We always try to allocate data space and must do it before locking
7483 * the file range, to avoid deadlocks with concurrent writes to the same
7484 * range if the range has several extents and the writes don't expand the
7485 * current i_size (the inode lock is taken in shared mode). If we fail to
7486 * allocate data space here we continue and later, after locking the
7487 * file range, we fail with ENOSPC only if we figure out we can not do a
7490 if (write
&& !(flags
& IOMAP_NOWAIT
)) {
7491 ret
= btrfs_check_data_free_space(BTRFS_I(inode
),
7492 &dio_data
->data_reserved
,
7493 start
, data_alloc_len
, false);
7495 dio_data
->data_space_reserved
= true;
7496 else if (ret
&& !(BTRFS_I(inode
)->flags
&
7497 (BTRFS_INODE_NODATACOW
| BTRFS_INODE_PREALLOC
)))
7502 * If this errors out it's because we couldn't invalidate pagecache for
7503 * this range and we need to fallback to buffered IO, or we are doing a
7504 * NOWAIT read/write and we need to block.
7506 ret
= lock_extent_direct(inode
, lockstart
, lockend
, &cached_state
, flags
);
7510 em
= btrfs_get_extent(BTRFS_I(inode
), NULL
, 0, start
, len
);
7517 * Ok for INLINE and COMPRESSED extents we need to fallback on buffered
7518 * io. INLINE is special, and we could probably kludge it in here, but
7519 * it's still buffered so for safety lets just fall back to the generic
7522 * For COMPRESSED we _have_ to read the entire extent in so we can
7523 * decompress it, so there will be buffering required no matter what we
7524 * do, so go ahead and fallback to buffered.
7526 * We return -ENOTBLK because that's what makes DIO go ahead and go back
7527 * to buffered IO. Don't blame me, this is the price we pay for using
7530 if (test_bit(EXTENT_FLAG_COMPRESSED
, &em
->flags
) ||
7531 em
->block_start
== EXTENT_MAP_INLINE
) {
7532 free_extent_map(em
);
7534 * If we are in a NOWAIT context, return -EAGAIN in order to
7535 * fallback to buffered IO. This is not only because we can
7536 * block with buffered IO (no support for NOWAIT semantics at
7537 * the moment) but also to avoid returning short reads to user
7538 * space - this happens if we were able to read some data from
7539 * previous non-compressed extents and then when we fallback to
7540 * buffered IO, at btrfs_file_read_iter() by calling
7541 * filemap_read(), we fail to fault in pages for the read buffer,
7542 * in which case filemap_read() returns a short read (the number
7543 * of bytes previously read is > 0, so it does not return -EFAULT).
7545 ret
= (flags
& IOMAP_NOWAIT
) ? -EAGAIN
: -ENOTBLK
;
7549 len
= min(len
, em
->len
- (start
- em
->start
));
7552 * If we have a NOWAIT request and the range contains multiple extents
7553 * (or a mix of extents and holes), then we return -EAGAIN to make the
7554 * caller fallback to a context where it can do a blocking (without
7555 * NOWAIT) request. This way we avoid doing partial IO and returning
7556 * success to the caller, which is not optimal for writes and for reads
7557 * it can result in unexpected behaviour for an application.
7559 * When doing a read, because we use IOMAP_DIO_PARTIAL when calling
7560 * iomap_dio_rw(), we can end up returning less data then what the caller
7561 * asked for, resulting in an unexpected, and incorrect, short read.
7562 * That is, the caller asked to read N bytes and we return less than that,
7563 * which is wrong unless we are crossing EOF. This happens if we get a
7564 * page fault error when trying to fault in pages for the buffer that is
7565 * associated to the struct iov_iter passed to iomap_dio_rw(), and we
7566 * have previously submitted bios for other extents in the range, in
7567 * which case iomap_dio_rw() may return us EIOCBQUEUED if not all of
7568 * those bios have completed by the time we get the page fault error,
7569 * which we return back to our caller - we should only return EIOCBQUEUED
7570 * after we have submitted bios for all the extents in the range.
7572 if ((flags
& IOMAP_NOWAIT
) && len
< length
) {
7573 free_extent_map(em
);
7579 ret
= btrfs_get_blocks_direct_write(&em
, inode
, dio_data
,
7580 start
, &len
, flags
);
7583 unlock_extents
= true;
7584 /* Recalc len in case the new em is smaller than requested */
7585 len
= min(len
, em
->len
- (start
- em
->start
));
7586 if (dio_data
->data_space_reserved
) {
7588 u64 release_len
= 0;
7590 if (dio_data
->nocow_done
) {
7591 release_offset
= start
;
7592 release_len
= data_alloc_len
;
7593 } else if (len
< data_alloc_len
) {
7594 release_offset
= start
+ len
;
7595 release_len
= data_alloc_len
- len
;
7598 if (release_len
> 0)
7599 btrfs_free_reserved_data_space(BTRFS_I(inode
),
7600 dio_data
->data_reserved
,
7606 * We need to unlock only the end area that we aren't using.
7607 * The rest is going to be unlocked by the endio routine.
7609 lockstart
= start
+ len
;
7610 if (lockstart
< lockend
)
7611 unlock_extents
= true;
7615 unlock_extent(&BTRFS_I(inode
)->io_tree
, lockstart
, lockend
,
7618 free_extent_state(cached_state
);
7621 * Translate extent map information to iomap.
7622 * We trim the extents (and move the addr) even though iomap code does
7623 * that, since we have locked only the parts we are performing I/O in.
7625 if ((em
->block_start
== EXTENT_MAP_HOLE
) ||
7626 (test_bit(EXTENT_FLAG_PREALLOC
, &em
->flags
) && !write
)) {
7627 iomap
->addr
= IOMAP_NULL_ADDR
;
7628 iomap
->type
= IOMAP_HOLE
;
7630 iomap
->addr
= em
->block_start
+ (start
- em
->start
);
7631 iomap
->type
= IOMAP_MAPPED
;
7633 iomap
->offset
= start
;
7634 iomap
->bdev
= fs_info
->fs_devices
->latest_dev
->bdev
;
7635 iomap
->length
= len
;
7636 free_extent_map(em
);
7641 unlock_extent(&BTRFS_I(inode
)->io_tree
, lockstart
, lockend
,
7644 if (dio_data
->data_space_reserved
) {
7645 btrfs_free_reserved_data_space(BTRFS_I(inode
),
7646 dio_data
->data_reserved
,
7647 start
, data_alloc_len
);
7648 extent_changeset_free(dio_data
->data_reserved
);
7654 static int btrfs_dio_iomap_end(struct inode
*inode
, loff_t pos
, loff_t length
,
7655 ssize_t written
, unsigned int flags
, struct iomap
*iomap
)
7657 struct iomap_iter
*iter
= container_of(iomap
, struct iomap_iter
, iomap
);
7658 struct btrfs_dio_data
*dio_data
= iter
->private;
7659 size_t submitted
= dio_data
->submitted
;
7660 const bool write
= !!(flags
& IOMAP_WRITE
);
7663 if (!write
&& (iomap
->type
== IOMAP_HOLE
)) {
7664 /* If reading from a hole, unlock and return */
7665 unlock_extent(&BTRFS_I(inode
)->io_tree
, pos
, pos
+ length
- 1,
7670 if (submitted
< length
) {
7672 length
-= submitted
;
7674 btrfs_finish_ordered_extent(dio_data
->ordered
, NULL
,
7675 pos
, length
, false);
7677 unlock_extent(&BTRFS_I(inode
)->io_tree
, pos
,
7678 pos
+ length
- 1, NULL
);
7682 btrfs_put_ordered_extent(dio_data
->ordered
);
7683 dio_data
->ordered
= NULL
;
7687 extent_changeset_free(dio_data
->data_reserved
);
7691 static void btrfs_dio_end_io(struct btrfs_bio
*bbio
)
7693 struct btrfs_dio_private
*dip
=
7694 container_of(bbio
, struct btrfs_dio_private
, bbio
);
7695 struct btrfs_inode
*inode
= bbio
->inode
;
7696 struct bio
*bio
= &bbio
->bio
;
7698 if (bio
->bi_status
) {
7699 btrfs_warn(inode
->root
->fs_info
,
7700 "direct IO failed ino %llu op 0x%0x offset %#llx len %u err no %d",
7701 btrfs_ino(inode
), bio
->bi_opf
,
7702 dip
->file_offset
, dip
->bytes
, bio
->bi_status
);
7705 if (btrfs_op(bio
) == BTRFS_MAP_WRITE
) {
7706 btrfs_finish_ordered_extent(bbio
->ordered
, NULL
,
7707 dip
->file_offset
, dip
->bytes
,
7710 unlock_extent(&inode
->io_tree
, dip
->file_offset
,
7711 dip
->file_offset
+ dip
->bytes
- 1, NULL
);
7714 bbio
->bio
.bi_private
= bbio
->private;
7715 iomap_dio_bio_end_io(bio
);
7718 static void btrfs_dio_submit_io(const struct iomap_iter
*iter
, struct bio
*bio
,
7721 struct btrfs_bio
*bbio
= btrfs_bio(bio
);
7722 struct btrfs_dio_private
*dip
=
7723 container_of(bbio
, struct btrfs_dio_private
, bbio
);
7724 struct btrfs_dio_data
*dio_data
= iter
->private;
7726 btrfs_bio_init(bbio
, BTRFS_I(iter
->inode
)->root
->fs_info
,
7727 btrfs_dio_end_io
, bio
->bi_private
);
7728 bbio
->inode
= BTRFS_I(iter
->inode
);
7729 bbio
->file_offset
= file_offset
;
7731 dip
->file_offset
= file_offset
;
7732 dip
->bytes
= bio
->bi_iter
.bi_size
;
7734 dio_data
->submitted
+= bio
->bi_iter
.bi_size
;
7737 * Check if we are doing a partial write. If we are, we need to split
7738 * the ordered extent to match the submitted bio. Hang on to the
7739 * remaining unfinishable ordered_extent in dio_data so that it can be
7740 * cancelled in iomap_end to avoid a deadlock wherein faulting the
7741 * remaining pages is blocked on the outstanding ordered extent.
7743 if (iter
->flags
& IOMAP_WRITE
) {
7746 ret
= btrfs_extract_ordered_extent(bbio
, dio_data
->ordered
);
7748 btrfs_finish_ordered_extent(dio_data
->ordered
, NULL
,
7749 file_offset
, dip
->bytes
,
7751 bio
->bi_status
= errno_to_blk_status(ret
);
7752 iomap_dio_bio_end_io(bio
);
7757 btrfs_submit_bio(bbio
, 0);
7760 static const struct iomap_ops btrfs_dio_iomap_ops
= {
7761 .iomap_begin
= btrfs_dio_iomap_begin
,
7762 .iomap_end
= btrfs_dio_iomap_end
,
7765 static const struct iomap_dio_ops btrfs_dio_ops
= {
7766 .submit_io
= btrfs_dio_submit_io
,
7767 .bio_set
= &btrfs_dio_bioset
,
7770 ssize_t
btrfs_dio_read(struct kiocb
*iocb
, struct iov_iter
*iter
, size_t done_before
)
7772 struct btrfs_dio_data data
= { 0 };
7774 return iomap_dio_rw(iocb
, iter
, &btrfs_dio_iomap_ops
, &btrfs_dio_ops
,
7775 IOMAP_DIO_PARTIAL
, &data
, done_before
);
7778 struct iomap_dio
*btrfs_dio_write(struct kiocb
*iocb
, struct iov_iter
*iter
,
7781 struct btrfs_dio_data data
= { 0 };
7783 return __iomap_dio_rw(iocb
, iter
, &btrfs_dio_iomap_ops
, &btrfs_dio_ops
,
7784 IOMAP_DIO_PARTIAL
, &data
, done_before
);
7787 static int btrfs_fiemap(struct inode
*inode
, struct fiemap_extent_info
*fieinfo
,
7792 ret
= fiemap_prep(inode
, fieinfo
, start
, &len
, 0);
7797 * fiemap_prep() called filemap_write_and_wait() for the whole possible
7798 * file range (0 to LLONG_MAX), but that is not enough if we have
7799 * compression enabled. The first filemap_fdatawrite_range() only kicks
7800 * in the compression of data (in an async thread) and will return
7801 * before the compression is done and writeback is started. A second
7802 * filemap_fdatawrite_range() is needed to wait for the compression to
7803 * complete and writeback to start. We also need to wait for ordered
7804 * extents to complete, because our fiemap implementation uses mainly
7805 * file extent items to list the extents, searching for extent maps
7806 * only for file ranges with holes or prealloc extents to figure out
7807 * if we have delalloc in those ranges.
7809 if (fieinfo
->fi_flags
& FIEMAP_FLAG_SYNC
) {
7810 ret
= btrfs_wait_ordered_range(inode
, 0, LLONG_MAX
);
7815 return extent_fiemap(BTRFS_I(inode
), fieinfo
, start
, len
);
7818 static int btrfs_writepages(struct address_space
*mapping
,
7819 struct writeback_control
*wbc
)
7821 return extent_writepages(mapping
, wbc
);
7824 static void btrfs_readahead(struct readahead_control
*rac
)
7826 extent_readahead(rac
);
7830 * For release_folio() and invalidate_folio() we have a race window where
7831 * folio_end_writeback() is called but the subpage spinlock is not yet released.
7832 * If we continue to release/invalidate the page, we could cause use-after-free
7833 * for subpage spinlock. So this function is to spin and wait for subpage
7836 static void wait_subpage_spinlock(struct page
*page
)
7838 struct btrfs_fs_info
*fs_info
= btrfs_sb(page
->mapping
->host
->i_sb
);
7839 struct btrfs_subpage
*subpage
;
7841 if (!btrfs_is_subpage(fs_info
, page
))
7844 ASSERT(PagePrivate(page
) && page
->private);
7845 subpage
= (struct btrfs_subpage
*)page
->private;
7848 * This may look insane as we just acquire the spinlock and release it,
7849 * without doing anything. But we just want to make sure no one is
7850 * still holding the subpage spinlock.
7851 * And since the page is not dirty nor writeback, and we have page
7852 * locked, the only possible way to hold a spinlock is from the endio
7853 * function to clear page writeback.
7855 * Here we just acquire the spinlock so that all existing callers
7856 * should exit and we're safe to release/invalidate the page.
7858 spin_lock_irq(&subpage
->lock
);
7859 spin_unlock_irq(&subpage
->lock
);
7862 static bool __btrfs_release_folio(struct folio
*folio
, gfp_t gfp_flags
)
7864 int ret
= try_release_extent_mapping(&folio
->page
, gfp_flags
);
7867 wait_subpage_spinlock(&folio
->page
);
7868 clear_page_extent_mapped(&folio
->page
);
7873 static bool btrfs_release_folio(struct folio
*folio
, gfp_t gfp_flags
)
7875 if (folio_test_writeback(folio
) || folio_test_dirty(folio
))
7877 return __btrfs_release_folio(folio
, gfp_flags
);
7880 #ifdef CONFIG_MIGRATION
7881 static int btrfs_migrate_folio(struct address_space
*mapping
,
7882 struct folio
*dst
, struct folio
*src
,
7883 enum migrate_mode mode
)
7885 int ret
= filemap_migrate_folio(mapping
, dst
, src
, mode
);
7887 if (ret
!= MIGRATEPAGE_SUCCESS
)
7890 if (folio_test_ordered(src
)) {
7891 folio_clear_ordered(src
);
7892 folio_set_ordered(dst
);
7895 return MIGRATEPAGE_SUCCESS
;
7898 #define btrfs_migrate_folio NULL
7901 static void btrfs_invalidate_folio(struct folio
*folio
, size_t offset
,
7904 struct btrfs_inode
*inode
= BTRFS_I(folio
->mapping
->host
);
7905 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
7906 struct extent_io_tree
*tree
= &inode
->io_tree
;
7907 struct extent_state
*cached_state
= NULL
;
7908 u64 page_start
= folio_pos(folio
);
7909 u64 page_end
= page_start
+ folio_size(folio
) - 1;
7911 int inode_evicting
= inode
->vfs_inode
.i_state
& I_FREEING
;
7914 * We have folio locked so no new ordered extent can be created on this
7915 * page, nor bio can be submitted for this folio.
7917 * But already submitted bio can still be finished on this folio.
7918 * Furthermore, endio function won't skip folio which has Ordered
7919 * (Private2) already cleared, so it's possible for endio and
7920 * invalidate_folio to do the same ordered extent accounting twice
7923 * So here we wait for any submitted bios to finish, so that we won't
7924 * do double ordered extent accounting on the same folio.
7926 folio_wait_writeback(folio
);
7927 wait_subpage_spinlock(&folio
->page
);
7930 * For subpage case, we have call sites like
7931 * btrfs_punch_hole_lock_range() which passes range not aligned to
7933 * If the range doesn't cover the full folio, we don't need to and
7934 * shouldn't clear page extent mapped, as folio->private can still
7935 * record subpage dirty bits for other part of the range.
7937 * For cases that invalidate the full folio even the range doesn't
7938 * cover the full folio, like invalidating the last folio, we're
7939 * still safe to wait for ordered extent to finish.
7941 if (!(offset
== 0 && length
== folio_size(folio
))) {
7942 btrfs_release_folio(folio
, GFP_NOFS
);
7946 if (!inode_evicting
)
7947 lock_extent(tree
, page_start
, page_end
, &cached_state
);
7950 while (cur
< page_end
) {
7951 struct btrfs_ordered_extent
*ordered
;
7954 u32 extra_flags
= 0;
7956 ordered
= btrfs_lookup_first_ordered_range(inode
, cur
,
7957 page_end
+ 1 - cur
);
7959 range_end
= page_end
;
7961 * No ordered extent covering this range, we are safe
7962 * to delete all extent states in the range.
7964 extra_flags
= EXTENT_CLEAR_ALL_BITS
;
7967 if (ordered
->file_offset
> cur
) {
7969 * There is a range between [cur, oe->file_offset) not
7970 * covered by any ordered extent.
7971 * We are safe to delete all extent states, and handle
7972 * the ordered extent in the next iteration.
7974 range_end
= ordered
->file_offset
- 1;
7975 extra_flags
= EXTENT_CLEAR_ALL_BITS
;
7979 range_end
= min(ordered
->file_offset
+ ordered
->num_bytes
- 1,
7981 ASSERT(range_end
+ 1 - cur
< U32_MAX
);
7982 range_len
= range_end
+ 1 - cur
;
7983 if (!btrfs_page_test_ordered(fs_info
, &folio
->page
, cur
, range_len
)) {
7985 * If Ordered (Private2) is cleared, it means endio has
7986 * already been executed for the range.
7987 * We can't delete the extent states as
7988 * btrfs_finish_ordered_io() may still use some of them.
7992 btrfs_page_clear_ordered(fs_info
, &folio
->page
, cur
, range_len
);
7995 * IO on this page will never be started, so we need to account
7996 * for any ordered extents now. Don't clear EXTENT_DELALLOC_NEW
7997 * here, must leave that up for the ordered extent completion.
7999 * This will also unlock the range for incoming
8000 * btrfs_finish_ordered_io().
8002 if (!inode_evicting
)
8003 clear_extent_bit(tree
, cur
, range_end
,
8005 EXTENT_LOCKED
| EXTENT_DO_ACCOUNTING
|
8006 EXTENT_DEFRAG
, &cached_state
);
8008 spin_lock_irq(&inode
->ordered_tree
.lock
);
8009 set_bit(BTRFS_ORDERED_TRUNCATED
, &ordered
->flags
);
8010 ordered
->truncated_len
= min(ordered
->truncated_len
,
8011 cur
- ordered
->file_offset
);
8012 spin_unlock_irq(&inode
->ordered_tree
.lock
);
8015 * If the ordered extent has finished, we're safe to delete all
8016 * the extent states of the range, otherwise
8017 * btrfs_finish_ordered_io() will get executed by endio for
8018 * other pages, so we can't delete extent states.
8020 if (btrfs_dec_test_ordered_pending(inode
, &ordered
,
8021 cur
, range_end
+ 1 - cur
)) {
8022 btrfs_finish_ordered_io(ordered
);
8024 * The ordered extent has finished, now we're again
8025 * safe to delete all extent states of the range.
8027 extra_flags
= EXTENT_CLEAR_ALL_BITS
;
8031 btrfs_put_ordered_extent(ordered
);
8033 * Qgroup reserved space handler
8034 * Sector(s) here will be either:
8036 * 1) Already written to disk or bio already finished
8037 * Then its QGROUP_RESERVED bit in io_tree is already cleared.
8038 * Qgroup will be handled by its qgroup_record then.
8039 * btrfs_qgroup_free_data() call will do nothing here.
8041 * 2) Not written to disk yet
8042 * Then btrfs_qgroup_free_data() call will clear the
8043 * QGROUP_RESERVED bit of its io_tree, and free the qgroup
8044 * reserved data space.
8045 * Since the IO will never happen for this page.
8047 btrfs_qgroup_free_data(inode
, NULL
, cur
, range_end
+ 1 - cur
);
8048 if (!inode_evicting
) {
8049 clear_extent_bit(tree
, cur
, range_end
, EXTENT_LOCKED
|
8050 EXTENT_DELALLOC
| EXTENT_UPTODATE
|
8051 EXTENT_DO_ACCOUNTING
| EXTENT_DEFRAG
|
8052 extra_flags
, &cached_state
);
8054 cur
= range_end
+ 1;
8057 * We have iterated through all ordered extents of the page, the page
8058 * should not have Ordered (Private2) anymore, or the above iteration
8059 * did something wrong.
8061 ASSERT(!folio_test_ordered(folio
));
8062 btrfs_page_clear_checked(fs_info
, &folio
->page
, folio_pos(folio
), folio_size(folio
));
8063 if (!inode_evicting
)
8064 __btrfs_release_folio(folio
, GFP_NOFS
);
8065 clear_page_extent_mapped(&folio
->page
);
8069 * btrfs_page_mkwrite() is not allowed to change the file size as it gets
8070 * called from a page fault handler when a page is first dirtied. Hence we must
8071 * be careful to check for EOF conditions here. We set the page up correctly
8072 * for a written page which means we get ENOSPC checking when writing into
8073 * holes and correct delalloc and unwritten extent mapping on filesystems that
8074 * support these features.
8076 * We are not allowed to take the i_mutex here so we have to play games to
8077 * protect against truncate races as the page could now be beyond EOF. Because
8078 * truncate_setsize() writes the inode size before removing pages, once we have
8079 * the page lock we can determine safely if the page is beyond EOF. If it is not
8080 * beyond EOF, then the page is guaranteed safe against truncation until we
8083 vm_fault_t
btrfs_page_mkwrite(struct vm_fault
*vmf
)
8085 struct page
*page
= vmf
->page
;
8086 struct inode
*inode
= file_inode(vmf
->vma
->vm_file
);
8087 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
8088 struct extent_io_tree
*io_tree
= &BTRFS_I(inode
)->io_tree
;
8089 struct btrfs_ordered_extent
*ordered
;
8090 struct extent_state
*cached_state
= NULL
;
8091 struct extent_changeset
*data_reserved
= NULL
;
8092 unsigned long zero_start
;
8102 reserved_space
= PAGE_SIZE
;
8104 sb_start_pagefault(inode
->i_sb
);
8105 page_start
= page_offset(page
);
8106 page_end
= page_start
+ PAGE_SIZE
- 1;
8110 * Reserving delalloc space after obtaining the page lock can lead to
8111 * deadlock. For example, if a dirty page is locked by this function
8112 * and the call to btrfs_delalloc_reserve_space() ends up triggering
8113 * dirty page write out, then the btrfs_writepages() function could
8114 * end up waiting indefinitely to get a lock on the page currently
8115 * being processed by btrfs_page_mkwrite() function.
8117 ret2
= btrfs_delalloc_reserve_space(BTRFS_I(inode
), &data_reserved
,
8118 page_start
, reserved_space
);
8120 ret2
= file_update_time(vmf
->vma
->vm_file
);
8124 ret
= vmf_error(ret2
);
8130 ret
= VM_FAULT_NOPAGE
; /* make the VM retry the fault */
8132 down_read(&BTRFS_I(inode
)->i_mmap_lock
);
8134 size
= i_size_read(inode
);
8136 if ((page
->mapping
!= inode
->i_mapping
) ||
8137 (page_start
>= size
)) {
8138 /* page got truncated out from underneath us */
8141 wait_on_page_writeback(page
);
8143 lock_extent(io_tree
, page_start
, page_end
, &cached_state
);
8144 ret2
= set_page_extent_mapped(page
);
8146 ret
= vmf_error(ret2
);
8147 unlock_extent(io_tree
, page_start
, page_end
, &cached_state
);
8152 * we can't set the delalloc bits if there are pending ordered
8153 * extents. Drop our locks and wait for them to finish
8155 ordered
= btrfs_lookup_ordered_range(BTRFS_I(inode
), page_start
,
8158 unlock_extent(io_tree
, page_start
, page_end
, &cached_state
);
8160 up_read(&BTRFS_I(inode
)->i_mmap_lock
);
8161 btrfs_start_ordered_extent(ordered
);
8162 btrfs_put_ordered_extent(ordered
);
8166 if (page
->index
== ((size
- 1) >> PAGE_SHIFT
)) {
8167 reserved_space
= round_up(size
- page_start
,
8168 fs_info
->sectorsize
);
8169 if (reserved_space
< PAGE_SIZE
) {
8170 end
= page_start
+ reserved_space
- 1;
8171 btrfs_delalloc_release_space(BTRFS_I(inode
),
8172 data_reserved
, page_start
,
8173 PAGE_SIZE
- reserved_space
, true);
8178 * page_mkwrite gets called when the page is firstly dirtied after it's
8179 * faulted in, but write(2) could also dirty a page and set delalloc
8180 * bits, thus in this case for space account reason, we still need to
8181 * clear any delalloc bits within this page range since we have to
8182 * reserve data&meta space before lock_page() (see above comments).
8184 clear_extent_bit(&BTRFS_I(inode
)->io_tree
, page_start
, end
,
8185 EXTENT_DELALLOC
| EXTENT_DO_ACCOUNTING
|
8186 EXTENT_DEFRAG
, &cached_state
);
8188 ret2
= btrfs_set_extent_delalloc(BTRFS_I(inode
), page_start
, end
, 0,
8191 unlock_extent(io_tree
, page_start
, page_end
, &cached_state
);
8192 ret
= VM_FAULT_SIGBUS
;
8196 /* page is wholly or partially inside EOF */
8197 if (page_start
+ PAGE_SIZE
> size
)
8198 zero_start
= offset_in_page(size
);
8200 zero_start
= PAGE_SIZE
;
8202 if (zero_start
!= PAGE_SIZE
)
8203 memzero_page(page
, zero_start
, PAGE_SIZE
- zero_start
);
8205 btrfs_page_clear_checked(fs_info
, page
, page_start
, PAGE_SIZE
);
8206 btrfs_page_set_dirty(fs_info
, page
, page_start
, end
+ 1 - page_start
);
8207 btrfs_page_set_uptodate(fs_info
, page
, page_start
, end
+ 1 - page_start
);
8209 btrfs_set_inode_last_sub_trans(BTRFS_I(inode
));
8211 unlock_extent(io_tree
, page_start
, page_end
, &cached_state
);
8212 up_read(&BTRFS_I(inode
)->i_mmap_lock
);
8214 btrfs_delalloc_release_extents(BTRFS_I(inode
), PAGE_SIZE
);
8215 sb_end_pagefault(inode
->i_sb
);
8216 extent_changeset_free(data_reserved
);
8217 return VM_FAULT_LOCKED
;
8221 up_read(&BTRFS_I(inode
)->i_mmap_lock
);
8223 btrfs_delalloc_release_extents(BTRFS_I(inode
), PAGE_SIZE
);
8224 btrfs_delalloc_release_space(BTRFS_I(inode
), data_reserved
, page_start
,
8225 reserved_space
, (ret
!= 0));
8227 sb_end_pagefault(inode
->i_sb
);
8228 extent_changeset_free(data_reserved
);
8232 static int btrfs_truncate(struct btrfs_inode
*inode
, bool skip_writeback
)
8234 struct btrfs_truncate_control control
= {
8236 .ino
= btrfs_ino(inode
),
8237 .min_type
= BTRFS_EXTENT_DATA_KEY
,
8238 .clear_extent_range
= true,
8240 struct btrfs_root
*root
= inode
->root
;
8241 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
8242 struct btrfs_block_rsv
*rsv
;
8244 struct btrfs_trans_handle
*trans
;
8245 u64 mask
= fs_info
->sectorsize
- 1;
8246 const u64 min_size
= btrfs_calc_metadata_size(fs_info
, 1);
8248 if (!skip_writeback
) {
8249 ret
= btrfs_wait_ordered_range(&inode
->vfs_inode
,
8250 inode
->vfs_inode
.i_size
& (~mask
),
8257 * Yes ladies and gentlemen, this is indeed ugly. We have a couple of
8258 * things going on here:
8260 * 1) We need to reserve space to update our inode.
8262 * 2) We need to have something to cache all the space that is going to
8263 * be free'd up by the truncate operation, but also have some slack
8264 * space reserved in case it uses space during the truncate (thank you
8265 * very much snapshotting).
8267 * And we need these to be separate. The fact is we can use a lot of
8268 * space doing the truncate, and we have no earthly idea how much space
8269 * we will use, so we need the truncate reservation to be separate so it
8270 * doesn't end up using space reserved for updating the inode. We also
8271 * need to be able to stop the transaction and start a new one, which
8272 * means we need to be able to update the inode several times, and we
8273 * have no idea of knowing how many times that will be, so we can't just
8274 * reserve 1 item for the entirety of the operation, so that has to be
8275 * done separately as well.
8277 * So that leaves us with
8279 * 1) rsv - for the truncate reservation, which we will steal from the
8280 * transaction reservation.
8281 * 2) fs_info->trans_block_rsv - this will have 1 items worth left for
8282 * updating the inode.
8284 rsv
= btrfs_alloc_block_rsv(fs_info
, BTRFS_BLOCK_RSV_TEMP
);
8287 rsv
->size
= min_size
;
8288 rsv
->failfast
= true;
8291 * 1 for the truncate slack space
8292 * 1 for updating the inode.
8294 trans
= btrfs_start_transaction(root
, 2);
8295 if (IS_ERR(trans
)) {
8296 ret
= PTR_ERR(trans
);
8300 /* Migrate the slack space for the truncate to our reserve */
8301 ret
= btrfs_block_rsv_migrate(&fs_info
->trans_block_rsv
, rsv
,
8304 * We have reserved 2 metadata units when we started the transaction and
8305 * min_size matches 1 unit, so this should never fail, but if it does,
8306 * it's not critical we just fail truncation.
8309 btrfs_end_transaction(trans
);
8313 trans
->block_rsv
= rsv
;
8316 struct extent_state
*cached_state
= NULL
;
8317 const u64 new_size
= inode
->vfs_inode
.i_size
;
8318 const u64 lock_start
= ALIGN_DOWN(new_size
, fs_info
->sectorsize
);
8320 control
.new_size
= new_size
;
8321 lock_extent(&inode
->io_tree
, lock_start
, (u64
)-1, &cached_state
);
8323 * We want to drop from the next block forward in case this new
8324 * size is not block aligned since we will be keeping the last
8325 * block of the extent just the way it is.
8327 btrfs_drop_extent_map_range(inode
,
8328 ALIGN(new_size
, fs_info
->sectorsize
),
8331 ret
= btrfs_truncate_inode_items(trans
, root
, &control
);
8333 inode_sub_bytes(&inode
->vfs_inode
, control
.sub_bytes
);
8334 btrfs_inode_safe_disk_i_size_write(inode
, control
.last_size
);
8336 unlock_extent(&inode
->io_tree
, lock_start
, (u64
)-1, &cached_state
);
8338 trans
->block_rsv
= &fs_info
->trans_block_rsv
;
8339 if (ret
!= -ENOSPC
&& ret
!= -EAGAIN
)
8342 ret
= btrfs_update_inode(trans
, root
, inode
);
8346 btrfs_end_transaction(trans
);
8347 btrfs_btree_balance_dirty(fs_info
);
8349 trans
= btrfs_start_transaction(root
, 2);
8350 if (IS_ERR(trans
)) {
8351 ret
= PTR_ERR(trans
);
8356 btrfs_block_rsv_release(fs_info
, rsv
, -1, NULL
);
8357 ret
= btrfs_block_rsv_migrate(&fs_info
->trans_block_rsv
,
8358 rsv
, min_size
, false);
8360 * We have reserved 2 metadata units when we started the
8361 * transaction and min_size matches 1 unit, so this should never
8362 * fail, but if it does, it's not critical we just fail truncation.
8367 trans
->block_rsv
= rsv
;
8371 * We can't call btrfs_truncate_block inside a trans handle as we could
8372 * deadlock with freeze, if we got BTRFS_NEED_TRUNCATE_BLOCK then we
8373 * know we've truncated everything except the last little bit, and can
8374 * do btrfs_truncate_block and then update the disk_i_size.
8376 if (ret
== BTRFS_NEED_TRUNCATE_BLOCK
) {
8377 btrfs_end_transaction(trans
);
8378 btrfs_btree_balance_dirty(fs_info
);
8380 ret
= btrfs_truncate_block(inode
, inode
->vfs_inode
.i_size
, 0, 0);
8383 trans
= btrfs_start_transaction(root
, 1);
8384 if (IS_ERR(trans
)) {
8385 ret
= PTR_ERR(trans
);
8388 btrfs_inode_safe_disk_i_size_write(inode
, 0);
8394 trans
->block_rsv
= &fs_info
->trans_block_rsv
;
8395 ret2
= btrfs_update_inode(trans
, root
, inode
);
8399 ret2
= btrfs_end_transaction(trans
);
8402 btrfs_btree_balance_dirty(fs_info
);
8405 btrfs_free_block_rsv(fs_info
, rsv
);
8407 * So if we truncate and then write and fsync we normally would just
8408 * write the extents that changed, which is a problem if we need to
8409 * first truncate that entire inode. So set this flag so we write out
8410 * all of the extents in the inode to the sync log so we're completely
8413 * If no extents were dropped or trimmed we don't need to force the next
8414 * fsync to truncate all the inode's items from the log and re-log them
8415 * all. This means the truncate operation did not change the file size,
8416 * or changed it to a smaller size but there was only an implicit hole
8417 * between the old i_size and the new i_size, and there were no prealloc
8418 * extents beyond i_size to drop.
8420 if (control
.extents_found
> 0)
8421 btrfs_set_inode_full_sync(inode
);
8426 struct inode
*btrfs_new_subvol_inode(struct mnt_idmap
*idmap
,
8429 struct inode
*inode
;
8431 inode
= new_inode(dir
->i_sb
);
8434 * Subvolumes don't inherit the sgid bit or the parent's gid if
8435 * the parent's sgid bit is set. This is probably a bug.
8437 inode_init_owner(idmap
, inode
, NULL
,
8438 S_IFDIR
| (~current_umask() & S_IRWXUGO
));
8439 inode
->i_op
= &btrfs_dir_inode_operations
;
8440 inode
->i_fop
= &btrfs_dir_file_operations
;
8445 struct inode
*btrfs_alloc_inode(struct super_block
*sb
)
8447 struct btrfs_fs_info
*fs_info
= btrfs_sb(sb
);
8448 struct btrfs_inode
*ei
;
8449 struct inode
*inode
;
8451 ei
= alloc_inode_sb(sb
, btrfs_inode_cachep
, GFP_KERNEL
);
8458 ei
->last_sub_trans
= 0;
8459 ei
->logged_trans
= 0;
8460 ei
->delalloc_bytes
= 0;
8461 ei
->new_delalloc_bytes
= 0;
8462 ei
->defrag_bytes
= 0;
8463 ei
->disk_i_size
= 0;
8467 ei
->index_cnt
= (u64
)-1;
8469 ei
->last_unlink_trans
= 0;
8470 ei
->last_reflink_trans
= 0;
8471 ei
->last_log_commit
= 0;
8473 spin_lock_init(&ei
->lock
);
8474 ei
->outstanding_extents
= 0;
8475 if (sb
->s_magic
!= BTRFS_TEST_MAGIC
)
8476 btrfs_init_metadata_block_rsv(fs_info
, &ei
->block_rsv
,
8477 BTRFS_BLOCK_RSV_DELALLOC
);
8478 ei
->runtime_flags
= 0;
8479 ei
->prop_compress
= BTRFS_COMPRESS_NONE
;
8480 ei
->defrag_compress
= BTRFS_COMPRESS_NONE
;
8482 ei
->delayed_node
= NULL
;
8484 ei
->i_otime
.tv_sec
= 0;
8485 ei
->i_otime
.tv_nsec
= 0;
8487 inode
= &ei
->vfs_inode
;
8488 extent_map_tree_init(&ei
->extent_tree
);
8489 extent_io_tree_init(fs_info
, &ei
->io_tree
, IO_TREE_INODE_IO
);
8490 ei
->io_tree
.inode
= ei
;
8491 extent_io_tree_init(fs_info
, &ei
->file_extent_tree
,
8492 IO_TREE_INODE_FILE_EXTENT
);
8493 mutex_init(&ei
->log_mutex
);
8494 btrfs_ordered_inode_tree_init(&ei
->ordered_tree
);
8495 INIT_LIST_HEAD(&ei
->delalloc_inodes
);
8496 INIT_LIST_HEAD(&ei
->delayed_iput
);
8497 RB_CLEAR_NODE(&ei
->rb_node
);
8498 init_rwsem(&ei
->i_mmap_lock
);
8503 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
8504 void btrfs_test_destroy_inode(struct inode
*inode
)
8506 btrfs_drop_extent_map_range(BTRFS_I(inode
), 0, (u64
)-1, false);
8507 kmem_cache_free(btrfs_inode_cachep
, BTRFS_I(inode
));
8511 void btrfs_free_inode(struct inode
*inode
)
8513 kmem_cache_free(btrfs_inode_cachep
, BTRFS_I(inode
));
8516 void btrfs_destroy_inode(struct inode
*vfs_inode
)
8518 struct btrfs_ordered_extent
*ordered
;
8519 struct btrfs_inode
*inode
= BTRFS_I(vfs_inode
);
8520 struct btrfs_root
*root
= inode
->root
;
8521 bool freespace_inode
;
8523 WARN_ON(!hlist_empty(&vfs_inode
->i_dentry
));
8524 WARN_ON(vfs_inode
->i_data
.nrpages
);
8525 WARN_ON(inode
->block_rsv
.reserved
);
8526 WARN_ON(inode
->block_rsv
.size
);
8527 WARN_ON(inode
->outstanding_extents
);
8528 if (!S_ISDIR(vfs_inode
->i_mode
)) {
8529 WARN_ON(inode
->delalloc_bytes
);
8530 WARN_ON(inode
->new_delalloc_bytes
);
8532 WARN_ON(inode
->csum_bytes
);
8533 WARN_ON(inode
->defrag_bytes
);
8536 * This can happen where we create an inode, but somebody else also
8537 * created the same inode and we need to destroy the one we already
8544 * If this is a free space inode do not take the ordered extents lockdep
8547 freespace_inode
= btrfs_is_free_space_inode(inode
);
8550 ordered
= btrfs_lookup_first_ordered_extent(inode
, (u64
)-1);
8554 btrfs_err(root
->fs_info
,
8555 "found ordered extent %llu %llu on inode cleanup",
8556 ordered
->file_offset
, ordered
->num_bytes
);
8558 if (!freespace_inode
)
8559 btrfs_lockdep_acquire(root
->fs_info
, btrfs_ordered_extent
);
8561 btrfs_remove_ordered_extent(inode
, ordered
);
8562 btrfs_put_ordered_extent(ordered
);
8563 btrfs_put_ordered_extent(ordered
);
8566 btrfs_qgroup_check_reserved_leak(inode
);
8567 inode_tree_del(inode
);
8568 btrfs_drop_extent_map_range(inode
, 0, (u64
)-1, false);
8569 btrfs_inode_clear_file_extent_range(inode
, 0, (u64
)-1);
8570 btrfs_put_root(inode
->root
);
8573 int btrfs_drop_inode(struct inode
*inode
)
8575 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
8580 /* the snap/subvol tree is on deleting */
8581 if (btrfs_root_refs(&root
->root_item
) == 0)
8584 return generic_drop_inode(inode
);
8587 static void init_once(void *foo
)
8589 struct btrfs_inode
*ei
= foo
;
8591 inode_init_once(&ei
->vfs_inode
);
8594 void __cold
btrfs_destroy_cachep(void)
8597 * Make sure all delayed rcu free inodes are flushed before we
8601 bioset_exit(&btrfs_dio_bioset
);
8602 kmem_cache_destroy(btrfs_inode_cachep
);
8605 int __init
btrfs_init_cachep(void)
8607 btrfs_inode_cachep
= kmem_cache_create("btrfs_inode",
8608 sizeof(struct btrfs_inode
), 0,
8609 SLAB_RECLAIM_ACCOUNT
| SLAB_MEM_SPREAD
| SLAB_ACCOUNT
,
8611 if (!btrfs_inode_cachep
)
8614 if (bioset_init(&btrfs_dio_bioset
, BIO_POOL_SIZE
,
8615 offsetof(struct btrfs_dio_private
, bbio
.bio
),
8621 btrfs_destroy_cachep();
8625 static int btrfs_getattr(struct mnt_idmap
*idmap
,
8626 const struct path
*path
, struct kstat
*stat
,
8627 u32 request_mask
, unsigned int flags
)
8631 struct inode
*inode
= d_inode(path
->dentry
);
8632 u32 blocksize
= inode
->i_sb
->s_blocksize
;
8633 u32 bi_flags
= BTRFS_I(inode
)->flags
;
8634 u32 bi_ro_flags
= BTRFS_I(inode
)->ro_flags
;
8636 stat
->result_mask
|= STATX_BTIME
;
8637 stat
->btime
.tv_sec
= BTRFS_I(inode
)->i_otime
.tv_sec
;
8638 stat
->btime
.tv_nsec
= BTRFS_I(inode
)->i_otime
.tv_nsec
;
8639 if (bi_flags
& BTRFS_INODE_APPEND
)
8640 stat
->attributes
|= STATX_ATTR_APPEND
;
8641 if (bi_flags
& BTRFS_INODE_COMPRESS
)
8642 stat
->attributes
|= STATX_ATTR_COMPRESSED
;
8643 if (bi_flags
& BTRFS_INODE_IMMUTABLE
)
8644 stat
->attributes
|= STATX_ATTR_IMMUTABLE
;
8645 if (bi_flags
& BTRFS_INODE_NODUMP
)
8646 stat
->attributes
|= STATX_ATTR_NODUMP
;
8647 if (bi_ro_flags
& BTRFS_INODE_RO_VERITY
)
8648 stat
->attributes
|= STATX_ATTR_VERITY
;
8650 stat
->attributes_mask
|= (STATX_ATTR_APPEND
|
8651 STATX_ATTR_COMPRESSED
|
8652 STATX_ATTR_IMMUTABLE
|
8655 generic_fillattr(idmap
, request_mask
, inode
, stat
);
8656 stat
->dev
= BTRFS_I(inode
)->root
->anon_dev
;
8658 spin_lock(&BTRFS_I(inode
)->lock
);
8659 delalloc_bytes
= BTRFS_I(inode
)->new_delalloc_bytes
;
8660 inode_bytes
= inode_get_bytes(inode
);
8661 spin_unlock(&BTRFS_I(inode
)->lock
);
8662 stat
->blocks
= (ALIGN(inode_bytes
, blocksize
) +
8663 ALIGN(delalloc_bytes
, blocksize
)) >> SECTOR_SHIFT
;
8667 static int btrfs_rename_exchange(struct inode
*old_dir
,
8668 struct dentry
*old_dentry
,
8669 struct inode
*new_dir
,
8670 struct dentry
*new_dentry
)
8672 struct btrfs_fs_info
*fs_info
= btrfs_sb(old_dir
->i_sb
);
8673 struct btrfs_trans_handle
*trans
;
8674 unsigned int trans_num_items
;
8675 struct btrfs_root
*root
= BTRFS_I(old_dir
)->root
;
8676 struct btrfs_root
*dest
= BTRFS_I(new_dir
)->root
;
8677 struct inode
*new_inode
= new_dentry
->d_inode
;
8678 struct inode
*old_inode
= old_dentry
->d_inode
;
8679 struct btrfs_rename_ctx old_rename_ctx
;
8680 struct btrfs_rename_ctx new_rename_ctx
;
8681 u64 old_ino
= btrfs_ino(BTRFS_I(old_inode
));
8682 u64 new_ino
= btrfs_ino(BTRFS_I(new_inode
));
8687 bool need_abort
= false;
8688 struct fscrypt_name old_fname
, new_fname
;
8689 struct fscrypt_str
*old_name
, *new_name
;
8692 * For non-subvolumes allow exchange only within one subvolume, in the
8693 * same inode namespace. Two subvolumes (represented as directory) can
8694 * be exchanged as they're a logical link and have a fixed inode number.
8697 (old_ino
!= BTRFS_FIRST_FREE_OBJECTID
||
8698 new_ino
!= BTRFS_FIRST_FREE_OBJECTID
))
8701 ret
= fscrypt_setup_filename(old_dir
, &old_dentry
->d_name
, 0, &old_fname
);
8705 ret
= fscrypt_setup_filename(new_dir
, &new_dentry
->d_name
, 0, &new_fname
);
8707 fscrypt_free_filename(&old_fname
);
8711 old_name
= &old_fname
.disk_name
;
8712 new_name
= &new_fname
.disk_name
;
8714 /* close the race window with snapshot create/destroy ioctl */
8715 if (old_ino
== BTRFS_FIRST_FREE_OBJECTID
||
8716 new_ino
== BTRFS_FIRST_FREE_OBJECTID
)
8717 down_read(&fs_info
->subvol_sem
);
8721 * 1 to remove old dir item
8722 * 1 to remove old dir index
8723 * 1 to add new dir item
8724 * 1 to add new dir index
8725 * 1 to update parent inode
8727 * If the parents are the same, we only need to account for one
8729 trans_num_items
= (old_dir
== new_dir
? 9 : 10);
8730 if (old_ino
== BTRFS_FIRST_FREE_OBJECTID
) {
8732 * 1 to remove old root ref
8733 * 1 to remove old root backref
8734 * 1 to add new root ref
8735 * 1 to add new root backref
8737 trans_num_items
+= 4;
8740 * 1 to update inode item
8741 * 1 to remove old inode ref
8742 * 1 to add new inode ref
8744 trans_num_items
+= 3;
8746 if (new_ino
== BTRFS_FIRST_FREE_OBJECTID
)
8747 trans_num_items
+= 4;
8749 trans_num_items
+= 3;
8750 trans
= btrfs_start_transaction(root
, trans_num_items
);
8751 if (IS_ERR(trans
)) {
8752 ret
= PTR_ERR(trans
);
8757 ret
= btrfs_record_root_in_trans(trans
, dest
);
8763 * We need to find a free sequence number both in the source and
8764 * in the destination directory for the exchange.
8766 ret
= btrfs_set_inode_index(BTRFS_I(new_dir
), &old_idx
);
8769 ret
= btrfs_set_inode_index(BTRFS_I(old_dir
), &new_idx
);
8773 BTRFS_I(old_inode
)->dir_index
= 0ULL;
8774 BTRFS_I(new_inode
)->dir_index
= 0ULL;
8776 /* Reference for the source. */
8777 if (old_ino
== BTRFS_FIRST_FREE_OBJECTID
) {
8778 /* force full log commit if subvolume involved. */
8779 btrfs_set_log_full_commit(trans
);
8781 ret
= btrfs_insert_inode_ref(trans
, dest
, new_name
, old_ino
,
8782 btrfs_ino(BTRFS_I(new_dir
)),
8789 /* And now for the dest. */
8790 if (new_ino
== BTRFS_FIRST_FREE_OBJECTID
) {
8791 /* force full log commit if subvolume involved. */
8792 btrfs_set_log_full_commit(trans
);
8794 ret
= btrfs_insert_inode_ref(trans
, root
, old_name
, new_ino
,
8795 btrfs_ino(BTRFS_I(old_dir
)),
8799 btrfs_abort_transaction(trans
, ret
);
8804 /* Update inode version and ctime/mtime. */
8805 inode_inc_iversion(old_dir
);
8806 inode_inc_iversion(new_dir
);
8807 inode_inc_iversion(old_inode
);
8808 inode_inc_iversion(new_inode
);
8809 simple_rename_timestamp(old_dir
, old_dentry
, new_dir
, new_dentry
);
8811 if (old_dentry
->d_parent
!= new_dentry
->d_parent
) {
8812 btrfs_record_unlink_dir(trans
, BTRFS_I(old_dir
),
8813 BTRFS_I(old_inode
), true);
8814 btrfs_record_unlink_dir(trans
, BTRFS_I(new_dir
),
8815 BTRFS_I(new_inode
), true);
8818 /* src is a subvolume */
8819 if (old_ino
== BTRFS_FIRST_FREE_OBJECTID
) {
8820 ret
= btrfs_unlink_subvol(trans
, BTRFS_I(old_dir
), old_dentry
);
8821 } else { /* src is an inode */
8822 ret
= __btrfs_unlink_inode(trans
, BTRFS_I(old_dir
),
8823 BTRFS_I(old_dentry
->d_inode
),
8824 old_name
, &old_rename_ctx
);
8826 ret
= btrfs_update_inode(trans
, root
, BTRFS_I(old_inode
));
8829 btrfs_abort_transaction(trans
, ret
);
8833 /* dest is a subvolume */
8834 if (new_ino
== BTRFS_FIRST_FREE_OBJECTID
) {
8835 ret
= btrfs_unlink_subvol(trans
, BTRFS_I(new_dir
), new_dentry
);
8836 } else { /* dest is an inode */
8837 ret
= __btrfs_unlink_inode(trans
, BTRFS_I(new_dir
),
8838 BTRFS_I(new_dentry
->d_inode
),
8839 new_name
, &new_rename_ctx
);
8841 ret
= btrfs_update_inode(trans
, dest
, BTRFS_I(new_inode
));
8844 btrfs_abort_transaction(trans
, ret
);
8848 ret
= btrfs_add_link(trans
, BTRFS_I(new_dir
), BTRFS_I(old_inode
),
8849 new_name
, 0, old_idx
);
8851 btrfs_abort_transaction(trans
, ret
);
8855 ret
= btrfs_add_link(trans
, BTRFS_I(old_dir
), BTRFS_I(new_inode
),
8856 old_name
, 0, new_idx
);
8858 btrfs_abort_transaction(trans
, ret
);
8862 if (old_inode
->i_nlink
== 1)
8863 BTRFS_I(old_inode
)->dir_index
= old_idx
;
8864 if (new_inode
->i_nlink
== 1)
8865 BTRFS_I(new_inode
)->dir_index
= new_idx
;
8868 * Now pin the logs of the roots. We do it to ensure that no other task
8869 * can sync the logs while we are in progress with the rename, because
8870 * that could result in an inconsistency in case any of the inodes that
8871 * are part of this rename operation were logged before.
8873 if (old_ino
!= BTRFS_FIRST_FREE_OBJECTID
)
8874 btrfs_pin_log_trans(root
);
8875 if (new_ino
!= BTRFS_FIRST_FREE_OBJECTID
)
8876 btrfs_pin_log_trans(dest
);
8878 /* Do the log updates for all inodes. */
8879 if (old_ino
!= BTRFS_FIRST_FREE_OBJECTID
)
8880 btrfs_log_new_name(trans
, old_dentry
, BTRFS_I(old_dir
),
8881 old_rename_ctx
.index
, new_dentry
->d_parent
);
8882 if (new_ino
!= BTRFS_FIRST_FREE_OBJECTID
)
8883 btrfs_log_new_name(trans
, new_dentry
, BTRFS_I(new_dir
),
8884 new_rename_ctx
.index
, old_dentry
->d_parent
);
8886 /* Now unpin the logs. */
8887 if (old_ino
!= BTRFS_FIRST_FREE_OBJECTID
)
8888 btrfs_end_log_trans(root
);
8889 if (new_ino
!= BTRFS_FIRST_FREE_OBJECTID
)
8890 btrfs_end_log_trans(dest
);
8892 ret2
= btrfs_end_transaction(trans
);
8893 ret
= ret
? ret
: ret2
;
8895 if (new_ino
== BTRFS_FIRST_FREE_OBJECTID
||
8896 old_ino
== BTRFS_FIRST_FREE_OBJECTID
)
8897 up_read(&fs_info
->subvol_sem
);
8899 fscrypt_free_filename(&new_fname
);
8900 fscrypt_free_filename(&old_fname
);
8904 static struct inode
*new_whiteout_inode(struct mnt_idmap
*idmap
,
8907 struct inode
*inode
;
8909 inode
= new_inode(dir
->i_sb
);
8911 inode_init_owner(idmap
, inode
, dir
,
8912 S_IFCHR
| WHITEOUT_MODE
);
8913 inode
->i_op
= &btrfs_special_inode_operations
;
8914 init_special_inode(inode
, inode
->i_mode
, WHITEOUT_DEV
);
8919 static int btrfs_rename(struct mnt_idmap
*idmap
,
8920 struct inode
*old_dir
, struct dentry
*old_dentry
,
8921 struct inode
*new_dir
, struct dentry
*new_dentry
,
8924 struct btrfs_fs_info
*fs_info
= btrfs_sb(old_dir
->i_sb
);
8925 struct btrfs_new_inode_args whiteout_args
= {
8927 .dentry
= old_dentry
,
8929 struct btrfs_trans_handle
*trans
;
8930 unsigned int trans_num_items
;
8931 struct btrfs_root
*root
= BTRFS_I(old_dir
)->root
;
8932 struct btrfs_root
*dest
= BTRFS_I(new_dir
)->root
;
8933 struct inode
*new_inode
= d_inode(new_dentry
);
8934 struct inode
*old_inode
= d_inode(old_dentry
);
8935 struct btrfs_rename_ctx rename_ctx
;
8939 u64 old_ino
= btrfs_ino(BTRFS_I(old_inode
));
8940 struct fscrypt_name old_fname
, new_fname
;
8942 if (btrfs_ino(BTRFS_I(new_dir
)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID
)
8945 /* we only allow rename subvolume link between subvolumes */
8946 if (old_ino
!= BTRFS_FIRST_FREE_OBJECTID
&& root
!= dest
)
8949 if (old_ino
== BTRFS_EMPTY_SUBVOL_DIR_OBJECTID
||
8950 (new_inode
&& btrfs_ino(BTRFS_I(new_inode
)) == BTRFS_FIRST_FREE_OBJECTID
))
8953 if (S_ISDIR(old_inode
->i_mode
) && new_inode
&&
8954 new_inode
->i_size
> BTRFS_EMPTY_DIR_SIZE
)
8957 ret
= fscrypt_setup_filename(old_dir
, &old_dentry
->d_name
, 0, &old_fname
);
8961 ret
= fscrypt_setup_filename(new_dir
, &new_dentry
->d_name
, 0, &new_fname
);
8963 fscrypt_free_filename(&old_fname
);
8967 /* check for collisions, even if the name isn't there */
8968 ret
= btrfs_check_dir_item_collision(dest
, new_dir
->i_ino
, &new_fname
.disk_name
);
8970 if (ret
== -EEXIST
) {
8972 * eexist without a new_inode */
8973 if (WARN_ON(!new_inode
)) {
8974 goto out_fscrypt_names
;
8977 /* maybe -EOVERFLOW */
8978 goto out_fscrypt_names
;
8984 * we're using rename to replace one file with another. Start IO on it
8985 * now so we don't add too much work to the end of the transaction
8987 if (new_inode
&& S_ISREG(old_inode
->i_mode
) && new_inode
->i_size
)
8988 filemap_flush(old_inode
->i_mapping
);
8990 if (flags
& RENAME_WHITEOUT
) {
8991 whiteout_args
.inode
= new_whiteout_inode(idmap
, old_dir
);
8992 if (!whiteout_args
.inode
) {
8994 goto out_fscrypt_names
;
8996 ret
= btrfs_new_inode_prepare(&whiteout_args
, &trans_num_items
);
8998 goto out_whiteout_inode
;
9000 /* 1 to update the old parent inode. */
9001 trans_num_items
= 1;
9004 if (old_ino
== BTRFS_FIRST_FREE_OBJECTID
) {
9005 /* Close the race window with snapshot create/destroy ioctl */
9006 down_read(&fs_info
->subvol_sem
);
9008 * 1 to remove old root ref
9009 * 1 to remove old root backref
9010 * 1 to add new root ref
9011 * 1 to add new root backref
9013 trans_num_items
+= 4;
9017 * 1 to remove old inode ref
9018 * 1 to add new inode ref
9020 trans_num_items
+= 3;
9023 * 1 to remove old dir item
9024 * 1 to remove old dir index
9025 * 1 to add new dir item
9026 * 1 to add new dir index
9028 trans_num_items
+= 4;
9029 /* 1 to update new parent inode if it's not the same as the old parent */
9030 if (new_dir
!= old_dir
)
9035 * 1 to remove inode ref
9036 * 1 to remove dir item
9037 * 1 to remove dir index
9038 * 1 to possibly add orphan item
9040 trans_num_items
+= 5;
9042 trans
= btrfs_start_transaction(root
, trans_num_items
);
9043 if (IS_ERR(trans
)) {
9044 ret
= PTR_ERR(trans
);
9049 ret
= btrfs_record_root_in_trans(trans
, dest
);
9054 ret
= btrfs_set_inode_index(BTRFS_I(new_dir
), &index
);
9058 BTRFS_I(old_inode
)->dir_index
= 0ULL;
9059 if (unlikely(old_ino
== BTRFS_FIRST_FREE_OBJECTID
)) {
9060 /* force full log commit if subvolume involved. */
9061 btrfs_set_log_full_commit(trans
);
9063 ret
= btrfs_insert_inode_ref(trans
, dest
, &new_fname
.disk_name
,
9064 old_ino
, btrfs_ino(BTRFS_I(new_dir
)),
9070 inode_inc_iversion(old_dir
);
9071 inode_inc_iversion(new_dir
);
9072 inode_inc_iversion(old_inode
);
9073 simple_rename_timestamp(old_dir
, old_dentry
, new_dir
, new_dentry
);
9075 if (old_dentry
->d_parent
!= new_dentry
->d_parent
)
9076 btrfs_record_unlink_dir(trans
, BTRFS_I(old_dir
),
9077 BTRFS_I(old_inode
), true);
9079 if (unlikely(old_ino
== BTRFS_FIRST_FREE_OBJECTID
)) {
9080 ret
= btrfs_unlink_subvol(trans
, BTRFS_I(old_dir
), old_dentry
);
9082 ret
= __btrfs_unlink_inode(trans
, BTRFS_I(old_dir
),
9083 BTRFS_I(d_inode(old_dentry
)),
9084 &old_fname
.disk_name
, &rename_ctx
);
9086 ret
= btrfs_update_inode(trans
, root
, BTRFS_I(old_inode
));
9089 btrfs_abort_transaction(trans
, ret
);
9094 inode_inc_iversion(new_inode
);
9095 if (unlikely(btrfs_ino(BTRFS_I(new_inode
)) ==
9096 BTRFS_EMPTY_SUBVOL_DIR_OBJECTID
)) {
9097 ret
= btrfs_unlink_subvol(trans
, BTRFS_I(new_dir
), new_dentry
);
9098 BUG_ON(new_inode
->i_nlink
== 0);
9100 ret
= btrfs_unlink_inode(trans
, BTRFS_I(new_dir
),
9101 BTRFS_I(d_inode(new_dentry
)),
9102 &new_fname
.disk_name
);
9104 if (!ret
&& new_inode
->i_nlink
== 0)
9105 ret
= btrfs_orphan_add(trans
,
9106 BTRFS_I(d_inode(new_dentry
)));
9108 btrfs_abort_transaction(trans
, ret
);
9113 ret
= btrfs_add_link(trans
, BTRFS_I(new_dir
), BTRFS_I(old_inode
),
9114 &new_fname
.disk_name
, 0, index
);
9116 btrfs_abort_transaction(trans
, ret
);
9120 if (old_inode
->i_nlink
== 1)
9121 BTRFS_I(old_inode
)->dir_index
= index
;
9123 if (old_ino
!= BTRFS_FIRST_FREE_OBJECTID
)
9124 btrfs_log_new_name(trans
, old_dentry
, BTRFS_I(old_dir
),
9125 rename_ctx
.index
, new_dentry
->d_parent
);
9127 if (flags
& RENAME_WHITEOUT
) {
9128 ret
= btrfs_create_new_inode(trans
, &whiteout_args
);
9130 btrfs_abort_transaction(trans
, ret
);
9133 unlock_new_inode(whiteout_args
.inode
);
9134 iput(whiteout_args
.inode
);
9135 whiteout_args
.inode
= NULL
;
9139 ret2
= btrfs_end_transaction(trans
);
9140 ret
= ret
? ret
: ret2
;
9142 if (old_ino
== BTRFS_FIRST_FREE_OBJECTID
)
9143 up_read(&fs_info
->subvol_sem
);
9144 if (flags
& RENAME_WHITEOUT
)
9145 btrfs_new_inode_args_destroy(&whiteout_args
);
9147 if (flags
& RENAME_WHITEOUT
)
9148 iput(whiteout_args
.inode
);
9150 fscrypt_free_filename(&old_fname
);
9151 fscrypt_free_filename(&new_fname
);
9155 static int btrfs_rename2(struct mnt_idmap
*idmap
, struct inode
*old_dir
,
9156 struct dentry
*old_dentry
, struct inode
*new_dir
,
9157 struct dentry
*new_dentry
, unsigned int flags
)
9161 if (flags
& ~(RENAME_NOREPLACE
| RENAME_EXCHANGE
| RENAME_WHITEOUT
))
9164 if (flags
& RENAME_EXCHANGE
)
9165 ret
= btrfs_rename_exchange(old_dir
, old_dentry
, new_dir
,
9168 ret
= btrfs_rename(idmap
, old_dir
, old_dentry
, new_dir
,
9171 btrfs_btree_balance_dirty(BTRFS_I(new_dir
)->root
->fs_info
);
9176 struct btrfs_delalloc_work
{
9177 struct inode
*inode
;
9178 struct completion completion
;
9179 struct list_head list
;
9180 struct btrfs_work work
;
9183 static void btrfs_run_delalloc_work(struct btrfs_work
*work
)
9185 struct btrfs_delalloc_work
*delalloc_work
;
9186 struct inode
*inode
;
9188 delalloc_work
= container_of(work
, struct btrfs_delalloc_work
,
9190 inode
= delalloc_work
->inode
;
9191 filemap_flush(inode
->i_mapping
);
9192 if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT
,
9193 &BTRFS_I(inode
)->runtime_flags
))
9194 filemap_flush(inode
->i_mapping
);
9197 complete(&delalloc_work
->completion
);
9200 static struct btrfs_delalloc_work
*btrfs_alloc_delalloc_work(struct inode
*inode
)
9202 struct btrfs_delalloc_work
*work
;
9204 work
= kmalloc(sizeof(*work
), GFP_NOFS
);
9208 init_completion(&work
->completion
);
9209 INIT_LIST_HEAD(&work
->list
);
9210 work
->inode
= inode
;
9211 btrfs_init_work(&work
->work
, btrfs_run_delalloc_work
, NULL
, NULL
);
9217 * some fairly slow code that needs optimization. This walks the list
9218 * of all the inodes with pending delalloc and forces them to disk.
9220 static int start_delalloc_inodes(struct btrfs_root
*root
,
9221 struct writeback_control
*wbc
, bool snapshot
,
9222 bool in_reclaim_context
)
9224 struct btrfs_inode
*binode
;
9225 struct inode
*inode
;
9226 struct btrfs_delalloc_work
*work
, *next
;
9230 bool full_flush
= wbc
->nr_to_write
== LONG_MAX
;
9232 mutex_lock(&root
->delalloc_mutex
);
9233 spin_lock(&root
->delalloc_lock
);
9234 list_splice_init(&root
->delalloc_inodes
, &splice
);
9235 while (!list_empty(&splice
)) {
9236 binode
= list_entry(splice
.next
, struct btrfs_inode
,
9239 list_move_tail(&binode
->delalloc_inodes
,
9240 &root
->delalloc_inodes
);
9242 if (in_reclaim_context
&&
9243 test_bit(BTRFS_INODE_NO_DELALLOC_FLUSH
, &binode
->runtime_flags
))
9246 inode
= igrab(&binode
->vfs_inode
);
9248 cond_resched_lock(&root
->delalloc_lock
);
9251 spin_unlock(&root
->delalloc_lock
);
9254 set_bit(BTRFS_INODE_SNAPSHOT_FLUSH
,
9255 &binode
->runtime_flags
);
9257 work
= btrfs_alloc_delalloc_work(inode
);
9263 list_add_tail(&work
->list
, &works
);
9264 btrfs_queue_work(root
->fs_info
->flush_workers
,
9267 ret
= filemap_fdatawrite_wbc(inode
->i_mapping
, wbc
);
9268 btrfs_add_delayed_iput(BTRFS_I(inode
));
9269 if (ret
|| wbc
->nr_to_write
<= 0)
9273 spin_lock(&root
->delalloc_lock
);
9275 spin_unlock(&root
->delalloc_lock
);
9278 list_for_each_entry_safe(work
, next
, &works
, list
) {
9279 list_del_init(&work
->list
);
9280 wait_for_completion(&work
->completion
);
9284 if (!list_empty(&splice
)) {
9285 spin_lock(&root
->delalloc_lock
);
9286 list_splice_tail(&splice
, &root
->delalloc_inodes
);
9287 spin_unlock(&root
->delalloc_lock
);
9289 mutex_unlock(&root
->delalloc_mutex
);
9293 int btrfs_start_delalloc_snapshot(struct btrfs_root
*root
, bool in_reclaim_context
)
9295 struct writeback_control wbc
= {
9296 .nr_to_write
= LONG_MAX
,
9297 .sync_mode
= WB_SYNC_NONE
,
9299 .range_end
= LLONG_MAX
,
9301 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
9303 if (BTRFS_FS_ERROR(fs_info
))
9306 return start_delalloc_inodes(root
, &wbc
, true, in_reclaim_context
);
9309 int btrfs_start_delalloc_roots(struct btrfs_fs_info
*fs_info
, long nr
,
9310 bool in_reclaim_context
)
9312 struct writeback_control wbc
= {
9314 .sync_mode
= WB_SYNC_NONE
,
9316 .range_end
= LLONG_MAX
,
9318 struct btrfs_root
*root
;
9322 if (BTRFS_FS_ERROR(fs_info
))
9325 mutex_lock(&fs_info
->delalloc_root_mutex
);
9326 spin_lock(&fs_info
->delalloc_root_lock
);
9327 list_splice_init(&fs_info
->delalloc_roots
, &splice
);
9328 while (!list_empty(&splice
)) {
9330 * Reset nr_to_write here so we know that we're doing a full
9334 wbc
.nr_to_write
= LONG_MAX
;
9336 root
= list_first_entry(&splice
, struct btrfs_root
,
9338 root
= btrfs_grab_root(root
);
9340 list_move_tail(&root
->delalloc_root
,
9341 &fs_info
->delalloc_roots
);
9342 spin_unlock(&fs_info
->delalloc_root_lock
);
9344 ret
= start_delalloc_inodes(root
, &wbc
, false, in_reclaim_context
);
9345 btrfs_put_root(root
);
9346 if (ret
< 0 || wbc
.nr_to_write
<= 0)
9348 spin_lock(&fs_info
->delalloc_root_lock
);
9350 spin_unlock(&fs_info
->delalloc_root_lock
);
9354 if (!list_empty(&splice
)) {
9355 spin_lock(&fs_info
->delalloc_root_lock
);
9356 list_splice_tail(&splice
, &fs_info
->delalloc_roots
);
9357 spin_unlock(&fs_info
->delalloc_root_lock
);
9359 mutex_unlock(&fs_info
->delalloc_root_mutex
);
9363 static int btrfs_symlink(struct mnt_idmap
*idmap
, struct inode
*dir
,
9364 struct dentry
*dentry
, const char *symname
)
9366 struct btrfs_fs_info
*fs_info
= btrfs_sb(dir
->i_sb
);
9367 struct btrfs_trans_handle
*trans
;
9368 struct btrfs_root
*root
= BTRFS_I(dir
)->root
;
9369 struct btrfs_path
*path
;
9370 struct btrfs_key key
;
9371 struct inode
*inode
;
9372 struct btrfs_new_inode_args new_inode_args
= {
9376 unsigned int trans_num_items
;
9381 struct btrfs_file_extent_item
*ei
;
9382 struct extent_buffer
*leaf
;
9384 name_len
= strlen(symname
);
9385 if (name_len
> BTRFS_MAX_INLINE_DATA_SIZE(fs_info
))
9386 return -ENAMETOOLONG
;
9388 inode
= new_inode(dir
->i_sb
);
9391 inode_init_owner(idmap
, inode
, dir
, S_IFLNK
| S_IRWXUGO
);
9392 inode
->i_op
= &btrfs_symlink_inode_operations
;
9393 inode_nohighmem(inode
);
9394 inode
->i_mapping
->a_ops
= &btrfs_aops
;
9395 btrfs_i_size_write(BTRFS_I(inode
), name_len
);
9396 inode_set_bytes(inode
, name_len
);
9398 new_inode_args
.inode
= inode
;
9399 err
= btrfs_new_inode_prepare(&new_inode_args
, &trans_num_items
);
9402 /* 1 additional item for the inline extent */
9405 trans
= btrfs_start_transaction(root
, trans_num_items
);
9406 if (IS_ERR(trans
)) {
9407 err
= PTR_ERR(trans
);
9408 goto out_new_inode_args
;
9411 err
= btrfs_create_new_inode(trans
, &new_inode_args
);
9415 path
= btrfs_alloc_path();
9418 btrfs_abort_transaction(trans
, err
);
9419 discard_new_inode(inode
);
9423 key
.objectid
= btrfs_ino(BTRFS_I(inode
));
9425 key
.type
= BTRFS_EXTENT_DATA_KEY
;
9426 datasize
= btrfs_file_extent_calc_inline_size(name_len
);
9427 err
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
9430 btrfs_abort_transaction(trans
, err
);
9431 btrfs_free_path(path
);
9432 discard_new_inode(inode
);
9436 leaf
= path
->nodes
[0];
9437 ei
= btrfs_item_ptr(leaf
, path
->slots
[0],
9438 struct btrfs_file_extent_item
);
9439 btrfs_set_file_extent_generation(leaf
, ei
, trans
->transid
);
9440 btrfs_set_file_extent_type(leaf
, ei
,
9441 BTRFS_FILE_EXTENT_INLINE
);
9442 btrfs_set_file_extent_encryption(leaf
, ei
, 0);
9443 btrfs_set_file_extent_compression(leaf
, ei
, 0);
9444 btrfs_set_file_extent_other_encoding(leaf
, ei
, 0);
9445 btrfs_set_file_extent_ram_bytes(leaf
, ei
, name_len
);
9447 ptr
= btrfs_file_extent_inline_start(ei
);
9448 write_extent_buffer(leaf
, symname
, ptr
, name_len
);
9449 btrfs_mark_buffer_dirty(leaf
);
9450 btrfs_free_path(path
);
9452 d_instantiate_new(dentry
, inode
);
9455 btrfs_end_transaction(trans
);
9456 btrfs_btree_balance_dirty(fs_info
);
9458 btrfs_new_inode_args_destroy(&new_inode_args
);
9465 static struct btrfs_trans_handle
*insert_prealloc_file_extent(
9466 struct btrfs_trans_handle
*trans_in
,
9467 struct btrfs_inode
*inode
,
9468 struct btrfs_key
*ins
,
9471 struct btrfs_file_extent_item stack_fi
;
9472 struct btrfs_replace_extent_info extent_info
;
9473 struct btrfs_trans_handle
*trans
= trans_in
;
9474 struct btrfs_path
*path
;
9475 u64 start
= ins
->objectid
;
9476 u64 len
= ins
->offset
;
9477 int qgroup_released
;
9480 memset(&stack_fi
, 0, sizeof(stack_fi
));
9482 btrfs_set_stack_file_extent_type(&stack_fi
, BTRFS_FILE_EXTENT_PREALLOC
);
9483 btrfs_set_stack_file_extent_disk_bytenr(&stack_fi
, start
);
9484 btrfs_set_stack_file_extent_disk_num_bytes(&stack_fi
, len
);
9485 btrfs_set_stack_file_extent_num_bytes(&stack_fi
, len
);
9486 btrfs_set_stack_file_extent_ram_bytes(&stack_fi
, len
);
9487 btrfs_set_stack_file_extent_compression(&stack_fi
, BTRFS_COMPRESS_NONE
);
9488 /* Encryption and other encoding is reserved and all 0 */
9490 qgroup_released
= btrfs_qgroup_release_data(inode
, file_offset
, len
);
9491 if (qgroup_released
< 0)
9492 return ERR_PTR(qgroup_released
);
9495 ret
= insert_reserved_file_extent(trans
, inode
,
9496 file_offset
, &stack_fi
,
9497 true, qgroup_released
);
9503 extent_info
.disk_offset
= start
;
9504 extent_info
.disk_len
= len
;
9505 extent_info
.data_offset
= 0;
9506 extent_info
.data_len
= len
;
9507 extent_info
.file_offset
= file_offset
;
9508 extent_info
.extent_buf
= (char *)&stack_fi
;
9509 extent_info
.is_new_extent
= true;
9510 extent_info
.update_times
= true;
9511 extent_info
.qgroup_reserved
= qgroup_released
;
9512 extent_info
.insertions
= 0;
9514 path
= btrfs_alloc_path();
9520 ret
= btrfs_replace_file_extents(inode
, path
, file_offset
,
9521 file_offset
+ len
- 1, &extent_info
,
9523 btrfs_free_path(path
);
9530 * We have released qgroup data range at the beginning of the function,
9531 * and normally qgroup_released bytes will be freed when committing
9533 * But if we error out early, we have to free what we have released
9534 * or we leak qgroup data reservation.
9536 btrfs_qgroup_free_refroot(inode
->root
->fs_info
,
9537 inode
->root
->root_key
.objectid
, qgroup_released
,
9538 BTRFS_QGROUP_RSV_DATA
);
9539 return ERR_PTR(ret
);
9542 static int __btrfs_prealloc_file_range(struct inode
*inode
, int mode
,
9543 u64 start
, u64 num_bytes
, u64 min_size
,
9544 loff_t actual_len
, u64
*alloc_hint
,
9545 struct btrfs_trans_handle
*trans
)
9547 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
9548 struct extent_map
*em
;
9549 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
9550 struct btrfs_key ins
;
9551 u64 cur_offset
= start
;
9552 u64 clear_offset
= start
;
9555 u64 last_alloc
= (u64
)-1;
9557 bool own_trans
= true;
9558 u64 end
= start
+ num_bytes
- 1;
9562 while (num_bytes
> 0) {
9563 cur_bytes
= min_t(u64
, num_bytes
, SZ_256M
);
9564 cur_bytes
= max(cur_bytes
, min_size
);
9566 * If we are severely fragmented we could end up with really
9567 * small allocations, so if the allocator is returning small
9568 * chunks lets make its job easier by only searching for those
9571 cur_bytes
= min(cur_bytes
, last_alloc
);
9572 ret
= btrfs_reserve_extent(root
, cur_bytes
, cur_bytes
,
9573 min_size
, 0, *alloc_hint
, &ins
, 1, 0);
9578 * We've reserved this space, and thus converted it from
9579 * ->bytes_may_use to ->bytes_reserved. Any error that happens
9580 * from here on out we will only need to clear our reservation
9581 * for the remaining unreserved area, so advance our
9582 * clear_offset by our extent size.
9584 clear_offset
+= ins
.offset
;
9586 last_alloc
= ins
.offset
;
9587 trans
= insert_prealloc_file_extent(trans
, BTRFS_I(inode
),
9590 * Now that we inserted the prealloc extent we can finally
9591 * decrement the number of reservations in the block group.
9592 * If we did it before, we could race with relocation and have
9593 * relocation miss the reserved extent, making it fail later.
9595 btrfs_dec_block_group_reservations(fs_info
, ins
.objectid
);
9596 if (IS_ERR(trans
)) {
9597 ret
= PTR_ERR(trans
);
9598 btrfs_free_reserved_extent(fs_info
, ins
.objectid
,
9603 em
= alloc_extent_map();
9605 btrfs_drop_extent_map_range(BTRFS_I(inode
), cur_offset
,
9606 cur_offset
+ ins
.offset
- 1, false);
9607 btrfs_set_inode_full_sync(BTRFS_I(inode
));
9611 em
->start
= cur_offset
;
9612 em
->orig_start
= cur_offset
;
9613 em
->len
= ins
.offset
;
9614 em
->block_start
= ins
.objectid
;
9615 em
->block_len
= ins
.offset
;
9616 em
->orig_block_len
= ins
.offset
;
9617 em
->ram_bytes
= ins
.offset
;
9618 set_bit(EXTENT_FLAG_PREALLOC
, &em
->flags
);
9619 em
->generation
= trans
->transid
;
9621 ret
= btrfs_replace_extent_map_range(BTRFS_I(inode
), em
, true);
9622 free_extent_map(em
);
9624 num_bytes
-= ins
.offset
;
9625 cur_offset
+= ins
.offset
;
9626 *alloc_hint
= ins
.objectid
+ ins
.offset
;
9628 inode_inc_iversion(inode
);
9629 inode_set_ctime_current(inode
);
9630 BTRFS_I(inode
)->flags
|= BTRFS_INODE_PREALLOC
;
9631 if (!(mode
& FALLOC_FL_KEEP_SIZE
) &&
9632 (actual_len
> inode
->i_size
) &&
9633 (cur_offset
> inode
->i_size
)) {
9634 if (cur_offset
> actual_len
)
9635 i_size
= actual_len
;
9637 i_size
= cur_offset
;
9638 i_size_write(inode
, i_size
);
9639 btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode
), 0);
9642 ret
= btrfs_update_inode(trans
, root
, BTRFS_I(inode
));
9645 btrfs_abort_transaction(trans
, ret
);
9647 btrfs_end_transaction(trans
);
9652 btrfs_end_transaction(trans
);
9656 if (clear_offset
< end
)
9657 btrfs_free_reserved_data_space(BTRFS_I(inode
), NULL
, clear_offset
,
9658 end
- clear_offset
+ 1);
9662 int btrfs_prealloc_file_range(struct inode
*inode
, int mode
,
9663 u64 start
, u64 num_bytes
, u64 min_size
,
9664 loff_t actual_len
, u64
*alloc_hint
)
9666 return __btrfs_prealloc_file_range(inode
, mode
, start
, num_bytes
,
9667 min_size
, actual_len
, alloc_hint
,
9671 int btrfs_prealloc_file_range_trans(struct inode
*inode
,
9672 struct btrfs_trans_handle
*trans
, int mode
,
9673 u64 start
, u64 num_bytes
, u64 min_size
,
9674 loff_t actual_len
, u64
*alloc_hint
)
9676 return __btrfs_prealloc_file_range(inode
, mode
, start
, num_bytes
,
9677 min_size
, actual_len
, alloc_hint
, trans
);
9680 static int btrfs_permission(struct mnt_idmap
*idmap
,
9681 struct inode
*inode
, int mask
)
9683 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
9684 umode_t mode
= inode
->i_mode
;
9686 if (mask
& MAY_WRITE
&&
9687 (S_ISREG(mode
) || S_ISDIR(mode
) || S_ISLNK(mode
))) {
9688 if (btrfs_root_readonly(root
))
9690 if (BTRFS_I(inode
)->flags
& BTRFS_INODE_READONLY
)
9693 return generic_permission(idmap
, inode
, mask
);
9696 static int btrfs_tmpfile(struct mnt_idmap
*idmap
, struct inode
*dir
,
9697 struct file
*file
, umode_t mode
)
9699 struct btrfs_fs_info
*fs_info
= btrfs_sb(dir
->i_sb
);
9700 struct btrfs_trans_handle
*trans
;
9701 struct btrfs_root
*root
= BTRFS_I(dir
)->root
;
9702 struct inode
*inode
;
9703 struct btrfs_new_inode_args new_inode_args
= {
9705 .dentry
= file
->f_path
.dentry
,
9708 unsigned int trans_num_items
;
9711 inode
= new_inode(dir
->i_sb
);
9714 inode_init_owner(idmap
, inode
, dir
, mode
);
9715 inode
->i_fop
= &btrfs_file_operations
;
9716 inode
->i_op
= &btrfs_file_inode_operations
;
9717 inode
->i_mapping
->a_ops
= &btrfs_aops
;
9719 new_inode_args
.inode
= inode
;
9720 ret
= btrfs_new_inode_prepare(&new_inode_args
, &trans_num_items
);
9724 trans
= btrfs_start_transaction(root
, trans_num_items
);
9725 if (IS_ERR(trans
)) {
9726 ret
= PTR_ERR(trans
);
9727 goto out_new_inode_args
;
9730 ret
= btrfs_create_new_inode(trans
, &new_inode_args
);
9733 * We set number of links to 0 in btrfs_create_new_inode(), and here we
9734 * set it to 1 because d_tmpfile() will issue a warning if the count is
9737 * d_tmpfile() -> inode_dec_link_count() -> drop_nlink()
9739 set_nlink(inode
, 1);
9742 d_tmpfile(file
, inode
);
9743 unlock_new_inode(inode
);
9744 mark_inode_dirty(inode
);
9747 btrfs_end_transaction(trans
);
9748 btrfs_btree_balance_dirty(fs_info
);
9750 btrfs_new_inode_args_destroy(&new_inode_args
);
9754 return finish_open_simple(file
, ret
);
9757 void btrfs_set_range_writeback(struct btrfs_inode
*inode
, u64 start
, u64 end
)
9759 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
9760 unsigned long index
= start
>> PAGE_SHIFT
;
9761 unsigned long end_index
= end
>> PAGE_SHIFT
;
9765 ASSERT(end
+ 1 - start
<= U32_MAX
);
9766 len
= end
+ 1 - start
;
9767 while (index
<= end_index
) {
9768 page
= find_get_page(inode
->vfs_inode
.i_mapping
, index
);
9769 ASSERT(page
); /* Pages should be in the extent_io_tree */
9771 btrfs_page_set_writeback(fs_info
, page
, start
, len
);
9777 int btrfs_encoded_io_compression_from_extent(struct btrfs_fs_info
*fs_info
,
9780 switch (compress_type
) {
9781 case BTRFS_COMPRESS_NONE
:
9782 return BTRFS_ENCODED_IO_COMPRESSION_NONE
;
9783 case BTRFS_COMPRESS_ZLIB
:
9784 return BTRFS_ENCODED_IO_COMPRESSION_ZLIB
;
9785 case BTRFS_COMPRESS_LZO
:
9787 * The LZO format depends on the sector size. 64K is the maximum
9788 * sector size that we support.
9790 if (fs_info
->sectorsize
< SZ_4K
|| fs_info
->sectorsize
> SZ_64K
)
9792 return BTRFS_ENCODED_IO_COMPRESSION_LZO_4K
+
9793 (fs_info
->sectorsize_bits
- 12);
9794 case BTRFS_COMPRESS_ZSTD
:
9795 return BTRFS_ENCODED_IO_COMPRESSION_ZSTD
;
9801 static ssize_t
btrfs_encoded_read_inline(
9803 struct iov_iter
*iter
, u64 start
,
9805 struct extent_state
**cached_state
,
9806 u64 extent_start
, size_t count
,
9807 struct btrfs_ioctl_encoded_io_args
*encoded
,
9810 struct btrfs_inode
*inode
= BTRFS_I(file_inode(iocb
->ki_filp
));
9811 struct btrfs_root
*root
= inode
->root
;
9812 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
9813 struct extent_io_tree
*io_tree
= &inode
->io_tree
;
9814 struct btrfs_path
*path
;
9815 struct extent_buffer
*leaf
;
9816 struct btrfs_file_extent_item
*item
;
9822 path
= btrfs_alloc_path();
9827 ret
= btrfs_lookup_file_extent(NULL
, root
, path
, btrfs_ino(inode
),
9831 /* The extent item disappeared? */
9836 leaf
= path
->nodes
[0];
9837 item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_file_extent_item
);
9839 ram_bytes
= btrfs_file_extent_ram_bytes(leaf
, item
);
9840 ptr
= btrfs_file_extent_inline_start(item
);
9842 encoded
->len
= min_t(u64
, extent_start
+ ram_bytes
,
9843 inode
->vfs_inode
.i_size
) - iocb
->ki_pos
;
9844 ret
= btrfs_encoded_io_compression_from_extent(fs_info
,
9845 btrfs_file_extent_compression(leaf
, item
));
9848 encoded
->compression
= ret
;
9849 if (encoded
->compression
) {
9852 inline_size
= btrfs_file_extent_inline_item_len(leaf
,
9854 if (inline_size
> count
) {
9858 count
= inline_size
;
9859 encoded
->unencoded_len
= ram_bytes
;
9860 encoded
->unencoded_offset
= iocb
->ki_pos
- extent_start
;
9862 count
= min_t(u64
, count
, encoded
->len
);
9863 encoded
->len
= count
;
9864 encoded
->unencoded_len
= count
;
9865 ptr
+= iocb
->ki_pos
- extent_start
;
9868 tmp
= kmalloc(count
, GFP_NOFS
);
9873 read_extent_buffer(leaf
, tmp
, ptr
, count
);
9874 btrfs_release_path(path
);
9875 unlock_extent(io_tree
, start
, lockend
, cached_state
);
9876 btrfs_inode_unlock(inode
, BTRFS_ILOCK_SHARED
);
9879 ret
= copy_to_iter(tmp
, count
, iter
);
9884 btrfs_free_path(path
);
9888 struct btrfs_encoded_read_private
{
9889 wait_queue_head_t wait
;
9891 blk_status_t status
;
9894 static void btrfs_encoded_read_endio(struct btrfs_bio
*bbio
)
9896 struct btrfs_encoded_read_private
*priv
= bbio
->private;
9898 if (bbio
->bio
.bi_status
) {
9900 * The memory barrier implied by the atomic_dec_return() here
9901 * pairs with the memory barrier implied by the
9902 * atomic_dec_return() or io_wait_event() in
9903 * btrfs_encoded_read_regular_fill_pages() to ensure that this
9904 * write is observed before the load of status in
9905 * btrfs_encoded_read_regular_fill_pages().
9907 WRITE_ONCE(priv
->status
, bbio
->bio
.bi_status
);
9909 if (!atomic_dec_return(&priv
->pending
))
9910 wake_up(&priv
->wait
);
9911 bio_put(&bbio
->bio
);
9914 int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode
*inode
,
9915 u64 file_offset
, u64 disk_bytenr
,
9916 u64 disk_io_size
, struct page
**pages
)
9918 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
9919 struct btrfs_encoded_read_private priv
= {
9920 .pending
= ATOMIC_INIT(1),
9922 unsigned long i
= 0;
9923 struct btrfs_bio
*bbio
;
9925 init_waitqueue_head(&priv
.wait
);
9927 bbio
= btrfs_bio_alloc(BIO_MAX_VECS
, REQ_OP_READ
, fs_info
,
9928 btrfs_encoded_read_endio
, &priv
);
9929 bbio
->bio
.bi_iter
.bi_sector
= disk_bytenr
>> SECTOR_SHIFT
;
9930 bbio
->inode
= inode
;
9933 size_t bytes
= min_t(u64
, disk_io_size
, PAGE_SIZE
);
9935 if (bio_add_page(&bbio
->bio
, pages
[i
], bytes
, 0) < bytes
) {
9936 atomic_inc(&priv
.pending
);
9937 btrfs_submit_bio(bbio
, 0);
9939 bbio
= btrfs_bio_alloc(BIO_MAX_VECS
, REQ_OP_READ
, fs_info
,
9940 btrfs_encoded_read_endio
, &priv
);
9941 bbio
->bio
.bi_iter
.bi_sector
= disk_bytenr
>> SECTOR_SHIFT
;
9942 bbio
->inode
= inode
;
9947 disk_bytenr
+= bytes
;
9948 disk_io_size
-= bytes
;
9949 } while (disk_io_size
);
9951 atomic_inc(&priv
.pending
);
9952 btrfs_submit_bio(bbio
, 0);
9954 if (atomic_dec_return(&priv
.pending
))
9955 io_wait_event(priv
.wait
, !atomic_read(&priv
.pending
));
9956 /* See btrfs_encoded_read_endio() for ordering. */
9957 return blk_status_to_errno(READ_ONCE(priv
.status
));
9960 static ssize_t
btrfs_encoded_read_regular(struct kiocb
*iocb
,
9961 struct iov_iter
*iter
,
9962 u64 start
, u64 lockend
,
9963 struct extent_state
**cached_state
,
9964 u64 disk_bytenr
, u64 disk_io_size
,
9965 size_t count
, bool compressed
,
9968 struct btrfs_inode
*inode
= BTRFS_I(file_inode(iocb
->ki_filp
));
9969 struct extent_io_tree
*io_tree
= &inode
->io_tree
;
9970 struct page
**pages
;
9971 unsigned long nr_pages
, i
;
9976 nr_pages
= DIV_ROUND_UP(disk_io_size
, PAGE_SIZE
);
9977 pages
= kcalloc(nr_pages
, sizeof(struct page
*), GFP_NOFS
);
9980 ret
= btrfs_alloc_page_array(nr_pages
, pages
);
9986 ret
= btrfs_encoded_read_regular_fill_pages(inode
, start
, disk_bytenr
,
9987 disk_io_size
, pages
);
9991 unlock_extent(io_tree
, start
, lockend
, cached_state
);
9992 btrfs_inode_unlock(inode
, BTRFS_ILOCK_SHARED
);
9999 i
= (iocb
->ki_pos
- start
) >> PAGE_SHIFT
;
10000 page_offset
= (iocb
->ki_pos
- start
) & (PAGE_SIZE
- 1);
10003 while (cur
< count
) {
10004 size_t bytes
= min_t(size_t, count
- cur
,
10005 PAGE_SIZE
- page_offset
);
10007 if (copy_page_to_iter(pages
[i
], page_offset
, bytes
,
10018 for (i
= 0; i
< nr_pages
; i
++) {
10020 __free_page(pages
[i
]);
10026 ssize_t
btrfs_encoded_read(struct kiocb
*iocb
, struct iov_iter
*iter
,
10027 struct btrfs_ioctl_encoded_io_args
*encoded
)
10029 struct btrfs_inode
*inode
= BTRFS_I(file_inode(iocb
->ki_filp
));
10030 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
10031 struct extent_io_tree
*io_tree
= &inode
->io_tree
;
10033 size_t count
= iov_iter_count(iter
);
10034 u64 start
, lockend
, disk_bytenr
, disk_io_size
;
10035 struct extent_state
*cached_state
= NULL
;
10036 struct extent_map
*em
;
10037 bool unlocked
= false;
10039 file_accessed(iocb
->ki_filp
);
10041 btrfs_inode_lock(inode
, BTRFS_ILOCK_SHARED
);
10043 if (iocb
->ki_pos
>= inode
->vfs_inode
.i_size
) {
10044 btrfs_inode_unlock(inode
, BTRFS_ILOCK_SHARED
);
10047 start
= ALIGN_DOWN(iocb
->ki_pos
, fs_info
->sectorsize
);
10049 * We don't know how long the extent containing iocb->ki_pos is, but if
10050 * it's compressed we know that it won't be longer than this.
10052 lockend
= start
+ BTRFS_MAX_UNCOMPRESSED
- 1;
10055 struct btrfs_ordered_extent
*ordered
;
10057 ret
= btrfs_wait_ordered_range(&inode
->vfs_inode
, start
,
10058 lockend
- start
+ 1);
10060 goto out_unlock_inode
;
10061 lock_extent(io_tree
, start
, lockend
, &cached_state
);
10062 ordered
= btrfs_lookup_ordered_range(inode
, start
,
10063 lockend
- start
+ 1);
10066 btrfs_put_ordered_extent(ordered
);
10067 unlock_extent(io_tree
, start
, lockend
, &cached_state
);
10071 em
= btrfs_get_extent(inode
, NULL
, 0, start
, lockend
- start
+ 1);
10074 goto out_unlock_extent
;
10077 if (em
->block_start
== EXTENT_MAP_INLINE
) {
10078 u64 extent_start
= em
->start
;
10081 * For inline extents we get everything we need out of the
10084 free_extent_map(em
);
10086 ret
= btrfs_encoded_read_inline(iocb
, iter
, start
, lockend
,
10087 &cached_state
, extent_start
,
10088 count
, encoded
, &unlocked
);
10093 * We only want to return up to EOF even if the extent extends beyond
10096 encoded
->len
= min_t(u64
, extent_map_end(em
),
10097 inode
->vfs_inode
.i_size
) - iocb
->ki_pos
;
10098 if (em
->block_start
== EXTENT_MAP_HOLE
||
10099 test_bit(EXTENT_FLAG_PREALLOC
, &em
->flags
)) {
10100 disk_bytenr
= EXTENT_MAP_HOLE
;
10101 count
= min_t(u64
, count
, encoded
->len
);
10102 encoded
->len
= count
;
10103 encoded
->unencoded_len
= count
;
10104 } else if (test_bit(EXTENT_FLAG_COMPRESSED
, &em
->flags
)) {
10105 disk_bytenr
= em
->block_start
;
10107 * Bail if the buffer isn't large enough to return the whole
10108 * compressed extent.
10110 if (em
->block_len
> count
) {
10114 disk_io_size
= em
->block_len
;
10115 count
= em
->block_len
;
10116 encoded
->unencoded_len
= em
->ram_bytes
;
10117 encoded
->unencoded_offset
= iocb
->ki_pos
- em
->orig_start
;
10118 ret
= btrfs_encoded_io_compression_from_extent(fs_info
,
10119 em
->compress_type
);
10122 encoded
->compression
= ret
;
10124 disk_bytenr
= em
->block_start
+ (start
- em
->start
);
10125 if (encoded
->len
> count
)
10126 encoded
->len
= count
;
10128 * Don't read beyond what we locked. This also limits the page
10129 * allocations that we'll do.
10131 disk_io_size
= min(lockend
+ 1, iocb
->ki_pos
+ encoded
->len
) - start
;
10132 count
= start
+ disk_io_size
- iocb
->ki_pos
;
10133 encoded
->len
= count
;
10134 encoded
->unencoded_len
= count
;
10135 disk_io_size
= ALIGN(disk_io_size
, fs_info
->sectorsize
);
10137 free_extent_map(em
);
10140 if (disk_bytenr
== EXTENT_MAP_HOLE
) {
10141 unlock_extent(io_tree
, start
, lockend
, &cached_state
);
10142 btrfs_inode_unlock(inode
, BTRFS_ILOCK_SHARED
);
10144 ret
= iov_iter_zero(count
, iter
);
10148 ret
= btrfs_encoded_read_regular(iocb
, iter
, start
, lockend
,
10149 &cached_state
, disk_bytenr
,
10150 disk_io_size
, count
,
10151 encoded
->compression
,
10157 iocb
->ki_pos
+= encoded
->len
;
10159 free_extent_map(em
);
10162 unlock_extent(io_tree
, start
, lockend
, &cached_state
);
10165 btrfs_inode_unlock(inode
, BTRFS_ILOCK_SHARED
);
10169 ssize_t
btrfs_do_encoded_write(struct kiocb
*iocb
, struct iov_iter
*from
,
10170 const struct btrfs_ioctl_encoded_io_args
*encoded
)
10172 struct btrfs_inode
*inode
= BTRFS_I(file_inode(iocb
->ki_filp
));
10173 struct btrfs_root
*root
= inode
->root
;
10174 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
10175 struct extent_io_tree
*io_tree
= &inode
->io_tree
;
10176 struct extent_changeset
*data_reserved
= NULL
;
10177 struct extent_state
*cached_state
= NULL
;
10178 struct btrfs_ordered_extent
*ordered
;
10182 u64 num_bytes
, ram_bytes
, disk_num_bytes
;
10183 unsigned long nr_pages
, i
;
10184 struct page
**pages
;
10185 struct btrfs_key ins
;
10186 bool extent_reserved
= false;
10187 struct extent_map
*em
;
10190 switch (encoded
->compression
) {
10191 case BTRFS_ENCODED_IO_COMPRESSION_ZLIB
:
10192 compression
= BTRFS_COMPRESS_ZLIB
;
10194 case BTRFS_ENCODED_IO_COMPRESSION_ZSTD
:
10195 compression
= BTRFS_COMPRESS_ZSTD
;
10197 case BTRFS_ENCODED_IO_COMPRESSION_LZO_4K
:
10198 case BTRFS_ENCODED_IO_COMPRESSION_LZO_8K
:
10199 case BTRFS_ENCODED_IO_COMPRESSION_LZO_16K
:
10200 case BTRFS_ENCODED_IO_COMPRESSION_LZO_32K
:
10201 case BTRFS_ENCODED_IO_COMPRESSION_LZO_64K
:
10202 /* The sector size must match for LZO. */
10203 if (encoded
->compression
-
10204 BTRFS_ENCODED_IO_COMPRESSION_LZO_4K
+ 12 !=
10205 fs_info
->sectorsize_bits
)
10207 compression
= BTRFS_COMPRESS_LZO
;
10212 if (encoded
->encryption
!= BTRFS_ENCODED_IO_ENCRYPTION_NONE
)
10215 orig_count
= iov_iter_count(from
);
10217 /* The extent size must be sane. */
10218 if (encoded
->unencoded_len
> BTRFS_MAX_UNCOMPRESSED
||
10219 orig_count
> BTRFS_MAX_COMPRESSED
|| orig_count
== 0)
10223 * The compressed data must be smaller than the decompressed data.
10225 * It's of course possible for data to compress to larger or the same
10226 * size, but the buffered I/O path falls back to no compression for such
10227 * data, and we don't want to break any assumptions by creating these
10230 * Note that this is less strict than the current check we have that the
10231 * compressed data must be at least one sector smaller than the
10232 * decompressed data. We only want to enforce the weaker requirement
10233 * from old kernels that it is at least one byte smaller.
10235 if (orig_count
>= encoded
->unencoded_len
)
10238 /* The extent must start on a sector boundary. */
10239 start
= iocb
->ki_pos
;
10240 if (!IS_ALIGNED(start
, fs_info
->sectorsize
))
10244 * The extent must end on a sector boundary. However, we allow a write
10245 * which ends at or extends i_size to have an unaligned length; we round
10246 * up the extent size and set i_size to the unaligned end.
10248 if (start
+ encoded
->len
< inode
->vfs_inode
.i_size
&&
10249 !IS_ALIGNED(start
+ encoded
->len
, fs_info
->sectorsize
))
10252 /* Finally, the offset in the unencoded data must be sector-aligned. */
10253 if (!IS_ALIGNED(encoded
->unencoded_offset
, fs_info
->sectorsize
))
10256 num_bytes
= ALIGN(encoded
->len
, fs_info
->sectorsize
);
10257 ram_bytes
= ALIGN(encoded
->unencoded_len
, fs_info
->sectorsize
);
10258 end
= start
+ num_bytes
- 1;
10261 * If the extent cannot be inline, the compressed data on disk must be
10262 * sector-aligned. For convenience, we extend it with zeroes if it
10265 disk_num_bytes
= ALIGN(orig_count
, fs_info
->sectorsize
);
10266 nr_pages
= DIV_ROUND_UP(disk_num_bytes
, PAGE_SIZE
);
10267 pages
= kvcalloc(nr_pages
, sizeof(struct page
*), GFP_KERNEL_ACCOUNT
);
10270 for (i
= 0; i
< nr_pages
; i
++) {
10271 size_t bytes
= min_t(size_t, PAGE_SIZE
, iov_iter_count(from
));
10274 pages
[i
] = alloc_page(GFP_KERNEL_ACCOUNT
);
10279 kaddr
= kmap_local_page(pages
[i
]);
10280 if (copy_from_iter(kaddr
, bytes
, from
) != bytes
) {
10281 kunmap_local(kaddr
);
10285 if (bytes
< PAGE_SIZE
)
10286 memset(kaddr
+ bytes
, 0, PAGE_SIZE
- bytes
);
10287 kunmap_local(kaddr
);
10291 struct btrfs_ordered_extent
*ordered
;
10293 ret
= btrfs_wait_ordered_range(&inode
->vfs_inode
, start
, num_bytes
);
10296 ret
= invalidate_inode_pages2_range(inode
->vfs_inode
.i_mapping
,
10297 start
>> PAGE_SHIFT
,
10298 end
>> PAGE_SHIFT
);
10301 lock_extent(io_tree
, start
, end
, &cached_state
);
10302 ordered
= btrfs_lookup_ordered_range(inode
, start
, num_bytes
);
10304 !filemap_range_has_page(inode
->vfs_inode
.i_mapping
, start
, end
))
10307 btrfs_put_ordered_extent(ordered
);
10308 unlock_extent(io_tree
, start
, end
, &cached_state
);
10313 * We don't use the higher-level delalloc space functions because our
10314 * num_bytes and disk_num_bytes are different.
10316 ret
= btrfs_alloc_data_chunk_ondemand(inode
, disk_num_bytes
);
10319 ret
= btrfs_qgroup_reserve_data(inode
, &data_reserved
, start
, num_bytes
);
10321 goto out_free_data_space
;
10322 ret
= btrfs_delalloc_reserve_metadata(inode
, num_bytes
, disk_num_bytes
,
10325 goto out_qgroup_free_data
;
10327 /* Try an inline extent first. */
10328 if (start
== 0 && encoded
->unencoded_len
== encoded
->len
&&
10329 encoded
->unencoded_offset
== 0) {
10330 ret
= cow_file_range_inline(inode
, encoded
->len
, orig_count
,
10331 compression
, pages
, true);
10335 goto out_delalloc_release
;
10339 ret
= btrfs_reserve_extent(root
, disk_num_bytes
, disk_num_bytes
,
10340 disk_num_bytes
, 0, 0, &ins
, 1, 1);
10342 goto out_delalloc_release
;
10343 extent_reserved
= true;
10345 em
= create_io_em(inode
, start
, num_bytes
,
10346 start
- encoded
->unencoded_offset
, ins
.objectid
,
10347 ins
.offset
, ins
.offset
, ram_bytes
, compression
,
10348 BTRFS_ORDERED_COMPRESSED
);
10351 goto out_free_reserved
;
10353 free_extent_map(em
);
10355 ordered
= btrfs_alloc_ordered_extent(inode
, start
, num_bytes
, ram_bytes
,
10356 ins
.objectid
, ins
.offset
,
10357 encoded
->unencoded_offset
,
10358 (1 << BTRFS_ORDERED_ENCODED
) |
10359 (1 << BTRFS_ORDERED_COMPRESSED
),
10361 if (IS_ERR(ordered
)) {
10362 btrfs_drop_extent_map_range(inode
, start
, end
, false);
10363 ret
= PTR_ERR(ordered
);
10364 goto out_free_reserved
;
10366 btrfs_dec_block_group_reservations(fs_info
, ins
.objectid
);
10368 if (start
+ encoded
->len
> inode
->vfs_inode
.i_size
)
10369 i_size_write(&inode
->vfs_inode
, start
+ encoded
->len
);
10371 unlock_extent(io_tree
, start
, end
, &cached_state
);
10373 btrfs_delalloc_release_extents(inode
, num_bytes
);
10375 btrfs_submit_compressed_write(ordered
, pages
, nr_pages
, 0, false);
10380 btrfs_dec_block_group_reservations(fs_info
, ins
.objectid
);
10381 btrfs_free_reserved_extent(fs_info
, ins
.objectid
, ins
.offset
, 1);
10382 out_delalloc_release
:
10383 btrfs_delalloc_release_extents(inode
, num_bytes
);
10384 btrfs_delalloc_release_metadata(inode
, disk_num_bytes
, ret
< 0);
10385 out_qgroup_free_data
:
10387 btrfs_qgroup_free_data(inode
, data_reserved
, start
, num_bytes
);
10388 out_free_data_space
:
10390 * If btrfs_reserve_extent() succeeded, then we already decremented
10393 if (!extent_reserved
)
10394 btrfs_free_reserved_data_space_noquota(fs_info
, disk_num_bytes
);
10396 unlock_extent(io_tree
, start
, end
, &cached_state
);
10398 for (i
= 0; i
< nr_pages
; i
++) {
10400 __free_page(pages
[i
]);
10405 iocb
->ki_pos
+= encoded
->len
;
10411 * Add an entry indicating a block group or device which is pinned by a
10412 * swapfile. Returns 0 on success, 1 if there is already an entry for it, or a
10413 * negative errno on failure.
10415 static int btrfs_add_swapfile_pin(struct inode
*inode
, void *ptr
,
10416 bool is_block_group
)
10418 struct btrfs_fs_info
*fs_info
= BTRFS_I(inode
)->root
->fs_info
;
10419 struct btrfs_swapfile_pin
*sp
, *entry
;
10420 struct rb_node
**p
;
10421 struct rb_node
*parent
= NULL
;
10423 sp
= kmalloc(sizeof(*sp
), GFP_NOFS
);
10428 sp
->is_block_group
= is_block_group
;
10429 sp
->bg_extent_count
= 1;
10431 spin_lock(&fs_info
->swapfile_pins_lock
);
10432 p
= &fs_info
->swapfile_pins
.rb_node
;
10435 entry
= rb_entry(parent
, struct btrfs_swapfile_pin
, node
);
10436 if (sp
->ptr
< entry
->ptr
||
10437 (sp
->ptr
== entry
->ptr
&& sp
->inode
< entry
->inode
)) {
10438 p
= &(*p
)->rb_left
;
10439 } else if (sp
->ptr
> entry
->ptr
||
10440 (sp
->ptr
== entry
->ptr
&& sp
->inode
> entry
->inode
)) {
10441 p
= &(*p
)->rb_right
;
10443 if (is_block_group
)
10444 entry
->bg_extent_count
++;
10445 spin_unlock(&fs_info
->swapfile_pins_lock
);
10450 rb_link_node(&sp
->node
, parent
, p
);
10451 rb_insert_color(&sp
->node
, &fs_info
->swapfile_pins
);
10452 spin_unlock(&fs_info
->swapfile_pins_lock
);
10456 /* Free all of the entries pinned by this swapfile. */
10457 static void btrfs_free_swapfile_pins(struct inode
*inode
)
10459 struct btrfs_fs_info
*fs_info
= BTRFS_I(inode
)->root
->fs_info
;
10460 struct btrfs_swapfile_pin
*sp
;
10461 struct rb_node
*node
, *next
;
10463 spin_lock(&fs_info
->swapfile_pins_lock
);
10464 node
= rb_first(&fs_info
->swapfile_pins
);
10466 next
= rb_next(node
);
10467 sp
= rb_entry(node
, struct btrfs_swapfile_pin
, node
);
10468 if (sp
->inode
== inode
) {
10469 rb_erase(&sp
->node
, &fs_info
->swapfile_pins
);
10470 if (sp
->is_block_group
) {
10471 btrfs_dec_block_group_swap_extents(sp
->ptr
,
10472 sp
->bg_extent_count
);
10473 btrfs_put_block_group(sp
->ptr
);
10479 spin_unlock(&fs_info
->swapfile_pins_lock
);
10482 struct btrfs_swap_info
{
10488 unsigned long nr_pages
;
10492 static int btrfs_add_swap_extent(struct swap_info_struct
*sis
,
10493 struct btrfs_swap_info
*bsi
)
10495 unsigned long nr_pages
;
10496 unsigned long max_pages
;
10497 u64 first_ppage
, first_ppage_reported
, next_ppage
;
10501 * Our swapfile may have had its size extended after the swap header was
10502 * written. In that case activating the swapfile should not go beyond
10503 * the max size set in the swap header.
10505 if (bsi
->nr_pages
>= sis
->max
)
10508 max_pages
= sis
->max
- bsi
->nr_pages
;
10509 first_ppage
= PAGE_ALIGN(bsi
->block_start
) >> PAGE_SHIFT
;
10510 next_ppage
= PAGE_ALIGN_DOWN(bsi
->block_start
+ bsi
->block_len
) >> PAGE_SHIFT
;
10512 if (first_ppage
>= next_ppage
)
10514 nr_pages
= next_ppage
- first_ppage
;
10515 nr_pages
= min(nr_pages
, max_pages
);
10517 first_ppage_reported
= first_ppage
;
10518 if (bsi
->start
== 0)
10519 first_ppage_reported
++;
10520 if (bsi
->lowest_ppage
> first_ppage_reported
)
10521 bsi
->lowest_ppage
= first_ppage_reported
;
10522 if (bsi
->highest_ppage
< (next_ppage
- 1))
10523 bsi
->highest_ppage
= next_ppage
- 1;
10525 ret
= add_swap_extent(sis
, bsi
->nr_pages
, nr_pages
, first_ppage
);
10528 bsi
->nr_extents
+= ret
;
10529 bsi
->nr_pages
+= nr_pages
;
10533 static void btrfs_swap_deactivate(struct file
*file
)
10535 struct inode
*inode
= file_inode(file
);
10537 btrfs_free_swapfile_pins(inode
);
10538 atomic_dec(&BTRFS_I(inode
)->root
->nr_swapfiles
);
10541 static int btrfs_swap_activate(struct swap_info_struct
*sis
, struct file
*file
,
10544 struct inode
*inode
= file_inode(file
);
10545 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
10546 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
10547 struct extent_io_tree
*io_tree
= &BTRFS_I(inode
)->io_tree
;
10548 struct extent_state
*cached_state
= NULL
;
10549 struct extent_map
*em
= NULL
;
10550 struct btrfs_device
*device
= NULL
;
10551 struct btrfs_swap_info bsi
= {
10552 .lowest_ppage
= (sector_t
)-1ULL,
10559 * If the swap file was just created, make sure delalloc is done. If the
10560 * file changes again after this, the user is doing something stupid and
10561 * we don't really care.
10563 ret
= btrfs_wait_ordered_range(inode
, 0, (u64
)-1);
10568 * The inode is locked, so these flags won't change after we check them.
10570 if (BTRFS_I(inode
)->flags
& BTRFS_INODE_COMPRESS
) {
10571 btrfs_warn(fs_info
, "swapfile must not be compressed");
10574 if (!(BTRFS_I(inode
)->flags
& BTRFS_INODE_NODATACOW
)) {
10575 btrfs_warn(fs_info
, "swapfile must not be copy-on-write");
10578 if (!(BTRFS_I(inode
)->flags
& BTRFS_INODE_NODATASUM
)) {
10579 btrfs_warn(fs_info
, "swapfile must not be checksummed");
10584 * Balance or device remove/replace/resize can move stuff around from
10585 * under us. The exclop protection makes sure they aren't running/won't
10586 * run concurrently while we are mapping the swap extents, and
10587 * fs_info->swapfile_pins prevents them from running while the swap
10588 * file is active and moving the extents. Note that this also prevents
10589 * a concurrent device add which isn't actually necessary, but it's not
10590 * really worth the trouble to allow it.
10592 if (!btrfs_exclop_start(fs_info
, BTRFS_EXCLOP_SWAP_ACTIVATE
)) {
10593 btrfs_warn(fs_info
,
10594 "cannot activate swapfile while exclusive operation is running");
10599 * Prevent snapshot creation while we are activating the swap file.
10600 * We do not want to race with snapshot creation. If snapshot creation
10601 * already started before we bumped nr_swapfiles from 0 to 1 and
10602 * completes before the first write into the swap file after it is
10603 * activated, than that write would fallback to COW.
10605 if (!btrfs_drew_try_write_lock(&root
->snapshot_lock
)) {
10606 btrfs_exclop_finish(fs_info
);
10607 btrfs_warn(fs_info
,
10608 "cannot activate swapfile because snapshot creation is in progress");
10612 * Snapshots can create extents which require COW even if NODATACOW is
10613 * set. We use this counter to prevent snapshots. We must increment it
10614 * before walking the extents because we don't want a concurrent
10615 * snapshot to run after we've already checked the extents.
10617 * It is possible that subvolume is marked for deletion but still not
10618 * removed yet. To prevent this race, we check the root status before
10619 * activating the swapfile.
10621 spin_lock(&root
->root_item_lock
);
10622 if (btrfs_root_dead(root
)) {
10623 spin_unlock(&root
->root_item_lock
);
10625 btrfs_exclop_finish(fs_info
);
10626 btrfs_warn(fs_info
,
10627 "cannot activate swapfile because subvolume %llu is being deleted",
10628 root
->root_key
.objectid
);
10631 atomic_inc(&root
->nr_swapfiles
);
10632 spin_unlock(&root
->root_item_lock
);
10634 isize
= ALIGN_DOWN(inode
->i_size
, fs_info
->sectorsize
);
10636 lock_extent(io_tree
, 0, isize
- 1, &cached_state
);
10638 while (start
< isize
) {
10639 u64 logical_block_start
, physical_block_start
;
10640 struct btrfs_block_group
*bg
;
10641 u64 len
= isize
- start
;
10643 em
= btrfs_get_extent(BTRFS_I(inode
), NULL
, 0, start
, len
);
10649 if (em
->block_start
== EXTENT_MAP_HOLE
) {
10650 btrfs_warn(fs_info
, "swapfile must not have holes");
10654 if (em
->block_start
== EXTENT_MAP_INLINE
) {
10656 * It's unlikely we'll ever actually find ourselves
10657 * here, as a file small enough to fit inline won't be
10658 * big enough to store more than the swap header, but in
10659 * case something changes in the future, let's catch it
10660 * here rather than later.
10662 btrfs_warn(fs_info
, "swapfile must not be inline");
10666 if (test_bit(EXTENT_FLAG_COMPRESSED
, &em
->flags
)) {
10667 btrfs_warn(fs_info
, "swapfile must not be compressed");
10672 logical_block_start
= em
->block_start
+ (start
- em
->start
);
10673 len
= min(len
, em
->len
- (start
- em
->start
));
10674 free_extent_map(em
);
10677 ret
= can_nocow_extent(inode
, start
, &len
, NULL
, NULL
, NULL
, false, true);
10683 btrfs_warn(fs_info
,
10684 "swapfile must not be copy-on-write");
10689 em
= btrfs_get_chunk_map(fs_info
, logical_block_start
, len
);
10695 if (em
->map_lookup
->type
& BTRFS_BLOCK_GROUP_PROFILE_MASK
) {
10696 btrfs_warn(fs_info
,
10697 "swapfile must have single data profile");
10702 if (device
== NULL
) {
10703 device
= em
->map_lookup
->stripes
[0].dev
;
10704 ret
= btrfs_add_swapfile_pin(inode
, device
, false);
10709 } else if (device
!= em
->map_lookup
->stripes
[0].dev
) {
10710 btrfs_warn(fs_info
, "swapfile must be on one device");
10715 physical_block_start
= (em
->map_lookup
->stripes
[0].physical
+
10716 (logical_block_start
- em
->start
));
10717 len
= min(len
, em
->len
- (logical_block_start
- em
->start
));
10718 free_extent_map(em
);
10721 bg
= btrfs_lookup_block_group(fs_info
, logical_block_start
);
10723 btrfs_warn(fs_info
,
10724 "could not find block group containing swapfile");
10729 if (!btrfs_inc_block_group_swap_extents(bg
)) {
10730 btrfs_warn(fs_info
,
10731 "block group for swapfile at %llu is read-only%s",
10733 atomic_read(&fs_info
->scrubs_running
) ?
10734 " (scrub running)" : "");
10735 btrfs_put_block_group(bg
);
10740 ret
= btrfs_add_swapfile_pin(inode
, bg
, true);
10742 btrfs_put_block_group(bg
);
10749 if (bsi
.block_len
&&
10750 bsi
.block_start
+ bsi
.block_len
== physical_block_start
) {
10751 bsi
.block_len
+= len
;
10753 if (bsi
.block_len
) {
10754 ret
= btrfs_add_swap_extent(sis
, &bsi
);
10759 bsi
.block_start
= physical_block_start
;
10760 bsi
.block_len
= len
;
10767 ret
= btrfs_add_swap_extent(sis
, &bsi
);
10770 if (!IS_ERR_OR_NULL(em
))
10771 free_extent_map(em
);
10773 unlock_extent(io_tree
, 0, isize
- 1, &cached_state
);
10776 btrfs_swap_deactivate(file
);
10778 btrfs_drew_write_unlock(&root
->snapshot_lock
);
10780 btrfs_exclop_finish(fs_info
);
10786 sis
->bdev
= device
->bdev
;
10787 *span
= bsi
.highest_ppage
- bsi
.lowest_ppage
+ 1;
10788 sis
->max
= bsi
.nr_pages
;
10789 sis
->pages
= bsi
.nr_pages
- 1;
10790 sis
->highest_bit
= bsi
.nr_pages
- 1;
10791 return bsi
.nr_extents
;
10794 static void btrfs_swap_deactivate(struct file
*file
)
10798 static int btrfs_swap_activate(struct swap_info_struct
*sis
, struct file
*file
,
10801 return -EOPNOTSUPP
;
10806 * Update the number of bytes used in the VFS' inode. When we replace extents in
10807 * a range (clone, dedupe, fallocate's zero range), we must update the number of
10808 * bytes used by the inode in an atomic manner, so that concurrent stat(2) calls
10809 * always get a correct value.
10811 void btrfs_update_inode_bytes(struct btrfs_inode
*inode
,
10812 const u64 add_bytes
,
10813 const u64 del_bytes
)
10815 if (add_bytes
== del_bytes
)
10818 spin_lock(&inode
->lock
);
10820 inode_sub_bytes(&inode
->vfs_inode
, del_bytes
);
10822 inode_add_bytes(&inode
->vfs_inode
, add_bytes
);
10823 spin_unlock(&inode
->lock
);
10827 * Verify that there are no ordered extents for a given file range.
10829 * @inode: The target inode.
10830 * @start: Start offset of the file range, should be sector size aligned.
10831 * @end: End offset (inclusive) of the file range, its value +1 should be
10832 * sector size aligned.
10834 * This should typically be used for cases where we locked an inode's VFS lock in
10835 * exclusive mode, we have also locked the inode's i_mmap_lock in exclusive mode,
10836 * we have flushed all delalloc in the range, we have waited for all ordered
10837 * extents in the range to complete and finally we have locked the file range in
10838 * the inode's io_tree.
10840 void btrfs_assert_inode_range_clean(struct btrfs_inode
*inode
, u64 start
, u64 end
)
10842 struct btrfs_root
*root
= inode
->root
;
10843 struct btrfs_ordered_extent
*ordered
;
10845 if (!IS_ENABLED(CONFIG_BTRFS_ASSERT
))
10848 ordered
= btrfs_lookup_first_ordered_range(inode
, start
, end
+ 1 - start
);
10850 btrfs_err(root
->fs_info
,
10851 "found unexpected ordered extent in file range [%llu, %llu] for inode %llu root %llu (ordered range [%llu, %llu])",
10852 start
, end
, btrfs_ino(inode
), root
->root_key
.objectid
,
10853 ordered
->file_offset
,
10854 ordered
->file_offset
+ ordered
->num_bytes
- 1);
10855 btrfs_put_ordered_extent(ordered
);
10858 ASSERT(ordered
== NULL
);
10861 static const struct inode_operations btrfs_dir_inode_operations
= {
10862 .getattr
= btrfs_getattr
,
10863 .lookup
= btrfs_lookup
,
10864 .create
= btrfs_create
,
10865 .unlink
= btrfs_unlink
,
10866 .link
= btrfs_link
,
10867 .mkdir
= btrfs_mkdir
,
10868 .rmdir
= btrfs_rmdir
,
10869 .rename
= btrfs_rename2
,
10870 .symlink
= btrfs_symlink
,
10871 .setattr
= btrfs_setattr
,
10872 .mknod
= btrfs_mknod
,
10873 .listxattr
= btrfs_listxattr
,
10874 .permission
= btrfs_permission
,
10875 .get_inode_acl
= btrfs_get_acl
,
10876 .set_acl
= btrfs_set_acl
,
10877 .update_time
= btrfs_update_time
,
10878 .tmpfile
= btrfs_tmpfile
,
10879 .fileattr_get
= btrfs_fileattr_get
,
10880 .fileattr_set
= btrfs_fileattr_set
,
10883 static const struct file_operations btrfs_dir_file_operations
= {
10884 .llseek
= btrfs_dir_llseek
,
10885 .read
= generic_read_dir
,
10886 .iterate_shared
= btrfs_real_readdir
,
10887 .open
= btrfs_opendir
,
10888 .unlocked_ioctl
= btrfs_ioctl
,
10889 #ifdef CONFIG_COMPAT
10890 .compat_ioctl
= btrfs_compat_ioctl
,
10892 .release
= btrfs_release_file
,
10893 .fsync
= btrfs_sync_file
,
10897 * btrfs doesn't support the bmap operation because swapfiles
10898 * use bmap to make a mapping of extents in the file. They assume
10899 * these extents won't change over the life of the file and they
10900 * use the bmap result to do IO directly to the drive.
10902 * the btrfs bmap call would return logical addresses that aren't
10903 * suitable for IO and they also will change frequently as COW
10904 * operations happen. So, swapfile + btrfs == corruption.
10906 * For now we're avoiding this by dropping bmap.
10908 static const struct address_space_operations btrfs_aops
= {
10909 .read_folio
= btrfs_read_folio
,
10910 .writepages
= btrfs_writepages
,
10911 .readahead
= btrfs_readahead
,
10912 .invalidate_folio
= btrfs_invalidate_folio
,
10913 .release_folio
= btrfs_release_folio
,
10914 .migrate_folio
= btrfs_migrate_folio
,
10915 .dirty_folio
= filemap_dirty_folio
,
10916 .error_remove_page
= generic_error_remove_page
,
10917 .swap_activate
= btrfs_swap_activate
,
10918 .swap_deactivate
= btrfs_swap_deactivate
,
10921 static const struct inode_operations btrfs_file_inode_operations
= {
10922 .getattr
= btrfs_getattr
,
10923 .setattr
= btrfs_setattr
,
10924 .listxattr
= btrfs_listxattr
,
10925 .permission
= btrfs_permission
,
10926 .fiemap
= btrfs_fiemap
,
10927 .get_inode_acl
= btrfs_get_acl
,
10928 .set_acl
= btrfs_set_acl
,
10929 .update_time
= btrfs_update_time
,
10930 .fileattr_get
= btrfs_fileattr_get
,
10931 .fileattr_set
= btrfs_fileattr_set
,
10933 static const struct inode_operations btrfs_special_inode_operations
= {
10934 .getattr
= btrfs_getattr
,
10935 .setattr
= btrfs_setattr
,
10936 .permission
= btrfs_permission
,
10937 .listxattr
= btrfs_listxattr
,
10938 .get_inode_acl
= btrfs_get_acl
,
10939 .set_acl
= btrfs_set_acl
,
10940 .update_time
= btrfs_update_time
,
10942 static const struct inode_operations btrfs_symlink_inode_operations
= {
10943 .get_link
= page_get_link
,
10944 .getattr
= btrfs_getattr
,
10945 .setattr
= btrfs_setattr
,
10946 .permission
= btrfs_permission
,
10947 .listxattr
= btrfs_listxattr
,
10948 .update_time
= btrfs_update_time
,
10951 const struct dentry_operations btrfs_dentry_operations
= {
10952 .d_delete
= btrfs_dentry_delete
,