1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2007 Oracle. All rights reserved.
6 #include <crypto/hash.h>
7 #include <linux/kernel.h>
9 #include <linux/blk-cgroup.h>
10 #include <linux/file.h>
12 #include <linux/pagemap.h>
13 #include <linux/highmem.h>
14 #include <linux/time.h>
15 #include <linux/init.h>
16 #include <linux/string.h>
17 #include <linux/backing-dev.h>
18 #include <linux/writeback.h>
19 #include <linux/compat.h>
20 #include <linux/xattr.h>
21 #include <linux/posix_acl.h>
22 #include <linux/falloc.h>
23 #include <linux/slab.h>
24 #include <linux/ratelimit.h>
25 #include <linux/btrfs.h>
26 #include <linux/blkdev.h>
27 #include <linux/posix_acl_xattr.h>
28 #include <linux/uio.h>
29 #include <linux/magic.h>
30 #include <linux/iversion.h>
31 #include <linux/swap.h>
32 #include <linux/migrate.h>
33 #include <linux/sched/mm.h>
34 #include <linux/iomap.h>
35 #include <asm/unaligned.h>
36 #include <linux/fsverity.h>
40 #include "transaction.h"
41 #include "btrfs_inode.h"
42 #include "print-tree.h"
43 #include "ordered-data.h"
47 #include "compression.h"
49 #include "free-space-cache.h"
52 #include "delalloc-space.h"
53 #include "block-group.h"
54 #include "space-info.h"
57 #include "inode-item.h"
59 struct btrfs_iget_args
{
61 struct btrfs_root
*root
;
64 struct btrfs_dio_data
{
66 struct extent_changeset
*data_reserved
;
67 bool data_space_reserved
;
71 struct btrfs_dio_private
{
75 * Since DIO can use anonymous page, we cannot use page_offset() to
76 * grab the file offset, thus need a dedicated member for file offset.
79 /* Used for bio::bi_size */
83 * References to this structure. There is one reference per in-flight
84 * bio plus one while we're still setting up.
88 /* Array of checksums */
91 /* This must be last */
95 static struct bio_set btrfs_dio_bioset
;
97 struct btrfs_rename_ctx
{
98 /* Output field. Stores the index number of the old directory entry. */
102 static const struct inode_operations btrfs_dir_inode_operations
;
103 static const struct inode_operations btrfs_symlink_inode_operations
;
104 static const struct inode_operations btrfs_special_inode_operations
;
105 static const struct inode_operations btrfs_file_inode_operations
;
106 static const struct address_space_operations btrfs_aops
;
107 static const struct file_operations btrfs_dir_file_operations
;
109 static struct kmem_cache
*btrfs_inode_cachep
;
110 struct kmem_cache
*btrfs_trans_handle_cachep
;
111 struct kmem_cache
*btrfs_path_cachep
;
112 struct kmem_cache
*btrfs_free_space_cachep
;
113 struct kmem_cache
*btrfs_free_space_bitmap_cachep
;
115 static int btrfs_setsize(struct inode
*inode
, struct iattr
*attr
);
116 static int btrfs_truncate(struct inode
*inode
, bool skip_writeback
);
117 static int btrfs_finish_ordered_io(struct btrfs_ordered_extent
*ordered_extent
);
118 static noinline
int cow_file_range(struct btrfs_inode
*inode
,
119 struct page
*locked_page
,
120 u64 start
, u64 end
, int *page_started
,
121 unsigned long *nr_written
, int unlock
);
122 static struct extent_map
*create_io_em(struct btrfs_inode
*inode
, u64 start
,
123 u64 len
, u64 orig_start
, u64 block_start
,
124 u64 block_len
, u64 orig_block_len
,
125 u64 ram_bytes
, int compress_type
,
128 static void __endio_write_update_ordered(struct btrfs_inode
*inode
,
129 const u64 offset
, const u64 bytes
,
130 const bool uptodate
);
133 * btrfs_inode_lock - lock inode i_rwsem based on arguments passed
135 * ilock_flags can have the following bit set:
137 * BTRFS_ILOCK_SHARED - acquire a shared lock on the inode
138 * BTRFS_ILOCK_TRY - try to acquire the lock, if fails on first attempt
140 * BTRFS_ILOCK_MMAP - acquire a write lock on the i_mmap_lock
142 int btrfs_inode_lock(struct inode
*inode
, unsigned int ilock_flags
)
144 if (ilock_flags
& BTRFS_ILOCK_SHARED
) {
145 if (ilock_flags
& BTRFS_ILOCK_TRY
) {
146 if (!inode_trylock_shared(inode
))
151 inode_lock_shared(inode
);
153 if (ilock_flags
& BTRFS_ILOCK_TRY
) {
154 if (!inode_trylock(inode
))
161 if (ilock_flags
& BTRFS_ILOCK_MMAP
)
162 down_write(&BTRFS_I(inode
)->i_mmap_lock
);
167 * btrfs_inode_unlock - unock inode i_rwsem
169 * ilock_flags should contain the same bits set as passed to btrfs_inode_lock()
170 * to decide whether the lock acquired is shared or exclusive.
172 void btrfs_inode_unlock(struct inode
*inode
, unsigned int ilock_flags
)
174 if (ilock_flags
& BTRFS_ILOCK_MMAP
)
175 up_write(&BTRFS_I(inode
)->i_mmap_lock
);
176 if (ilock_flags
& BTRFS_ILOCK_SHARED
)
177 inode_unlock_shared(inode
);
183 * Cleanup all submitted ordered extents in specified range to handle errors
184 * from the btrfs_run_delalloc_range() callback.
186 * NOTE: caller must ensure that when an error happens, it can not call
187 * extent_clear_unlock_delalloc() to clear both the bits EXTENT_DO_ACCOUNTING
188 * and EXTENT_DELALLOC simultaneously, because that causes the reserved metadata
189 * to be released, which we want to happen only when finishing the ordered
190 * extent (btrfs_finish_ordered_io()).
192 static inline void btrfs_cleanup_ordered_extents(struct btrfs_inode
*inode
,
193 struct page
*locked_page
,
194 u64 offset
, u64 bytes
)
196 unsigned long index
= offset
>> PAGE_SHIFT
;
197 unsigned long end_index
= (offset
+ bytes
- 1) >> PAGE_SHIFT
;
198 u64 page_start
= page_offset(locked_page
);
199 u64 page_end
= page_start
+ PAGE_SIZE
- 1;
203 while (index
<= end_index
) {
205 * For locked page, we will call end_extent_writepage() on it
206 * in run_delalloc_range() for the error handling. That
207 * end_extent_writepage() function will call
208 * btrfs_mark_ordered_io_finished() to clear page Ordered and
209 * run the ordered extent accounting.
211 * Here we can't just clear the Ordered bit, or
212 * btrfs_mark_ordered_io_finished() would skip the accounting
213 * for the page range, and the ordered extent will never finish.
215 if (index
== (page_offset(locked_page
) >> PAGE_SHIFT
)) {
219 page
= find_get_page(inode
->vfs_inode
.i_mapping
, index
);
225 * Here we just clear all Ordered bits for every page in the
226 * range, then __endio_write_update_ordered() will handle
227 * the ordered extent accounting for the range.
229 btrfs_page_clamp_clear_ordered(inode
->root
->fs_info
, page
,
234 /* The locked page covers the full range, nothing needs to be done */
235 if (bytes
+ offset
<= page_offset(locked_page
) + PAGE_SIZE
)
238 * In case this page belongs to the delalloc range being instantiated
239 * then skip it, since the first page of a range is going to be
240 * properly cleaned up by the caller of run_delalloc_range
242 if (page_start
>= offset
&& page_end
<= (offset
+ bytes
- 1)) {
243 bytes
= offset
+ bytes
- page_offset(locked_page
) - PAGE_SIZE
;
244 offset
= page_offset(locked_page
) + PAGE_SIZE
;
247 return __endio_write_update_ordered(inode
, offset
, bytes
, false);
250 static int btrfs_dirty_inode(struct inode
*inode
);
252 static int btrfs_init_inode_security(struct btrfs_trans_handle
*trans
,
253 struct btrfs_new_inode_args
*args
)
257 if (args
->default_acl
) {
258 err
= __btrfs_set_acl(trans
, args
->inode
, args
->default_acl
,
264 err
= __btrfs_set_acl(trans
, args
->inode
, args
->acl
, ACL_TYPE_ACCESS
);
268 if (!args
->default_acl
&& !args
->acl
)
269 cache_no_acl(args
->inode
);
270 return btrfs_xattr_security_init(trans
, args
->inode
, args
->dir
,
271 &args
->dentry
->d_name
);
275 * this does all the hard work for inserting an inline extent into
276 * the btree. The caller should have done a btrfs_drop_extents so that
277 * no overlapping inline items exist in the btree
279 static int insert_inline_extent(struct btrfs_trans_handle
*trans
,
280 struct btrfs_path
*path
,
281 struct btrfs_inode
*inode
, bool extent_inserted
,
282 size_t size
, size_t compressed_size
,
284 struct page
**compressed_pages
,
287 struct btrfs_root
*root
= inode
->root
;
288 struct extent_buffer
*leaf
;
289 struct page
*page
= NULL
;
292 struct btrfs_file_extent_item
*ei
;
294 size_t cur_size
= size
;
297 ASSERT((compressed_size
> 0 && compressed_pages
) ||
298 (compressed_size
== 0 && !compressed_pages
));
300 if (compressed_size
&& compressed_pages
)
301 cur_size
= compressed_size
;
303 if (!extent_inserted
) {
304 struct btrfs_key key
;
307 key
.objectid
= btrfs_ino(inode
);
309 key
.type
= BTRFS_EXTENT_DATA_KEY
;
311 datasize
= btrfs_file_extent_calc_inline_size(cur_size
);
312 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
317 leaf
= path
->nodes
[0];
318 ei
= btrfs_item_ptr(leaf
, path
->slots
[0],
319 struct btrfs_file_extent_item
);
320 btrfs_set_file_extent_generation(leaf
, ei
, trans
->transid
);
321 btrfs_set_file_extent_type(leaf
, ei
, BTRFS_FILE_EXTENT_INLINE
);
322 btrfs_set_file_extent_encryption(leaf
, ei
, 0);
323 btrfs_set_file_extent_other_encoding(leaf
, ei
, 0);
324 btrfs_set_file_extent_ram_bytes(leaf
, ei
, size
);
325 ptr
= btrfs_file_extent_inline_start(ei
);
327 if (compress_type
!= BTRFS_COMPRESS_NONE
) {
330 while (compressed_size
> 0) {
331 cpage
= compressed_pages
[i
];
332 cur_size
= min_t(unsigned long, compressed_size
,
335 kaddr
= kmap_atomic(cpage
);
336 write_extent_buffer(leaf
, kaddr
, ptr
, cur_size
);
337 kunmap_atomic(kaddr
);
341 compressed_size
-= cur_size
;
343 btrfs_set_file_extent_compression(leaf
, ei
,
346 page
= find_get_page(inode
->vfs_inode
.i_mapping
, 0);
347 btrfs_set_file_extent_compression(leaf
, ei
, 0);
348 kaddr
= kmap_atomic(page
);
349 write_extent_buffer(leaf
, kaddr
, ptr
, size
);
350 kunmap_atomic(kaddr
);
353 btrfs_mark_buffer_dirty(leaf
);
354 btrfs_release_path(path
);
357 * We align size to sectorsize for inline extents just for simplicity
360 ret
= btrfs_inode_set_file_extent_range(inode
, 0,
361 ALIGN(size
, root
->fs_info
->sectorsize
));
366 * We're an inline extent, so nobody can extend the file past i_size
367 * without locking a page we already have locked.
369 * We must do any i_size and inode updates before we unlock the pages.
370 * Otherwise we could end up racing with unlink.
372 i_size
= i_size_read(&inode
->vfs_inode
);
373 if (update_i_size
&& size
> i_size
) {
374 i_size_write(&inode
->vfs_inode
, size
);
377 inode
->disk_i_size
= i_size
;
385 * conditionally insert an inline extent into the file. This
386 * does the checks required to make sure the data is small enough
387 * to fit as an inline extent.
389 static noinline
int cow_file_range_inline(struct btrfs_inode
*inode
, u64 size
,
390 size_t compressed_size
,
392 struct page
**compressed_pages
,
395 struct btrfs_drop_extents_args drop_args
= { 0 };
396 struct btrfs_root
*root
= inode
->root
;
397 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
398 struct btrfs_trans_handle
*trans
;
399 u64 data_len
= (compressed_size
?: size
);
401 struct btrfs_path
*path
;
404 * We can create an inline extent if it ends at or beyond the current
405 * i_size, is no larger than a sector (decompressed), and the (possibly
406 * compressed) data fits in a leaf and the configured maximum inline
409 if (size
< i_size_read(&inode
->vfs_inode
) ||
410 size
> fs_info
->sectorsize
||
411 data_len
> BTRFS_MAX_INLINE_DATA_SIZE(fs_info
) ||
412 data_len
> fs_info
->max_inline
)
415 path
= btrfs_alloc_path();
419 trans
= btrfs_join_transaction(root
);
421 btrfs_free_path(path
);
422 return PTR_ERR(trans
);
424 trans
->block_rsv
= &inode
->block_rsv
;
426 drop_args
.path
= path
;
428 drop_args
.end
= fs_info
->sectorsize
;
429 drop_args
.drop_cache
= true;
430 drop_args
.replace_extent
= true;
431 drop_args
.extent_item_size
= btrfs_file_extent_calc_inline_size(data_len
);
432 ret
= btrfs_drop_extents(trans
, root
, inode
, &drop_args
);
434 btrfs_abort_transaction(trans
, ret
);
438 ret
= insert_inline_extent(trans
, path
, inode
, drop_args
.extent_inserted
,
439 size
, compressed_size
, compress_type
,
440 compressed_pages
, update_i_size
);
441 if (ret
&& ret
!= -ENOSPC
) {
442 btrfs_abort_transaction(trans
, ret
);
444 } else if (ret
== -ENOSPC
) {
449 btrfs_update_inode_bytes(inode
, size
, drop_args
.bytes_found
);
450 ret
= btrfs_update_inode(trans
, root
, inode
);
451 if (ret
&& ret
!= -ENOSPC
) {
452 btrfs_abort_transaction(trans
, ret
);
454 } else if (ret
== -ENOSPC
) {
459 btrfs_set_inode_full_sync(inode
);
462 * Don't forget to free the reserved space, as for inlined extent
463 * it won't count as data extent, free them directly here.
464 * And at reserve time, it's always aligned to page size, so
465 * just free one page here.
467 btrfs_qgroup_free_data(inode
, NULL
, 0, PAGE_SIZE
);
468 btrfs_free_path(path
);
469 btrfs_end_transaction(trans
);
473 struct async_extent
{
478 unsigned long nr_pages
;
480 struct list_head list
;
485 struct page
*locked_page
;
488 unsigned int write_flags
;
489 struct list_head extents
;
490 struct cgroup_subsys_state
*blkcg_css
;
491 struct btrfs_work work
;
492 struct async_cow
*async_cow
;
497 struct async_chunk chunks
[];
500 static noinline
int add_async_extent(struct async_chunk
*cow
,
501 u64 start
, u64 ram_size
,
504 unsigned long nr_pages
,
507 struct async_extent
*async_extent
;
509 async_extent
= kmalloc(sizeof(*async_extent
), GFP_NOFS
);
510 BUG_ON(!async_extent
); /* -ENOMEM */
511 async_extent
->start
= start
;
512 async_extent
->ram_size
= ram_size
;
513 async_extent
->compressed_size
= compressed_size
;
514 async_extent
->pages
= pages
;
515 async_extent
->nr_pages
= nr_pages
;
516 async_extent
->compress_type
= compress_type
;
517 list_add_tail(&async_extent
->list
, &cow
->extents
);
522 * Check if the inode needs to be submitted to compression, based on mount
523 * options, defragmentation, properties or heuristics.
525 static inline int inode_need_compress(struct btrfs_inode
*inode
, u64 start
,
528 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
530 if (!btrfs_inode_can_compress(inode
)) {
531 WARN(IS_ENABLED(CONFIG_BTRFS_DEBUG
),
532 KERN_ERR
"BTRFS: unexpected compression for ino %llu\n",
537 * Special check for subpage.
539 * We lock the full page then run each delalloc range in the page, thus
540 * for the following case, we will hit some subpage specific corner case:
543 * | |///////| |///////|
546 * In above case, both range A and range B will try to unlock the full
547 * page [0, 64K), causing the one finished later will have page
548 * unlocked already, triggering various page lock requirement BUG_ON()s.
550 * So here we add an artificial limit that subpage compression can only
551 * if the range is fully page aligned.
553 * In theory we only need to ensure the first page is fully covered, but
554 * the tailing partial page will be locked until the full compression
555 * finishes, delaying the write of other range.
557 * TODO: Make btrfs_run_delalloc_range() to lock all delalloc range
558 * first to prevent any submitted async extent to unlock the full page.
559 * By this, we can ensure for subpage case that only the last async_cow
560 * will unlock the full page.
562 if (fs_info
->sectorsize
< PAGE_SIZE
) {
563 if (!IS_ALIGNED(start
, PAGE_SIZE
) ||
564 !IS_ALIGNED(end
+ 1, PAGE_SIZE
))
569 if (btrfs_test_opt(fs_info
, FORCE_COMPRESS
))
572 if (inode
->defrag_compress
)
574 /* bad compression ratios */
575 if (inode
->flags
& BTRFS_INODE_NOCOMPRESS
)
577 if (btrfs_test_opt(fs_info
, COMPRESS
) ||
578 inode
->flags
& BTRFS_INODE_COMPRESS
||
579 inode
->prop_compress
)
580 return btrfs_compress_heuristic(&inode
->vfs_inode
, start
, end
);
584 static inline void inode_should_defrag(struct btrfs_inode
*inode
,
585 u64 start
, u64 end
, u64 num_bytes
, u32 small_write
)
587 /* If this is a small write inside eof, kick off a defrag */
588 if (num_bytes
< small_write
&&
589 (start
> 0 || end
+ 1 < inode
->disk_i_size
))
590 btrfs_add_inode_defrag(NULL
, inode
, small_write
);
594 * we create compressed extents in two phases. The first
595 * phase compresses a range of pages that have already been
596 * locked (both pages and state bits are locked).
598 * This is done inside an ordered work queue, and the compression
599 * is spread across many cpus. The actual IO submission is step
600 * two, and the ordered work queue takes care of making sure that
601 * happens in the same order things were put onto the queue by
602 * writepages and friends.
604 * If this code finds it can't get good compression, it puts an
605 * entry onto the work queue to write the uncompressed bytes. This
606 * makes sure that both compressed inodes and uncompressed inodes
607 * are written in the same order that the flusher thread sent them
610 static noinline
int compress_file_range(struct async_chunk
*async_chunk
)
612 struct inode
*inode
= async_chunk
->inode
;
613 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
614 u64 blocksize
= fs_info
->sectorsize
;
615 u64 start
= async_chunk
->start
;
616 u64 end
= async_chunk
->end
;
620 struct page
**pages
= NULL
;
621 unsigned long nr_pages
;
622 unsigned long total_compressed
= 0;
623 unsigned long total_in
= 0;
626 int compress_type
= fs_info
->compress_type
;
627 int compressed_extents
= 0;
630 inode_should_defrag(BTRFS_I(inode
), start
, end
, end
- start
+ 1,
634 * We need to save i_size before now because it could change in between
635 * us evaluating the size and assigning it. This is because we lock and
636 * unlock the page in truncate and fallocate, and then modify the i_size
639 * The barriers are to emulate READ_ONCE, remove that once i_size_read
643 i_size
= i_size_read(inode
);
645 actual_end
= min_t(u64
, i_size
, end
+ 1);
648 nr_pages
= (end
>> PAGE_SHIFT
) - (start
>> PAGE_SHIFT
) + 1;
649 nr_pages
= min_t(unsigned long, nr_pages
,
650 BTRFS_MAX_COMPRESSED
/ PAGE_SIZE
);
653 * we don't want to send crud past the end of i_size through
654 * compression, that's just a waste of CPU time. So, if the
655 * end of the file is before the start of our current
656 * requested range of bytes, we bail out to the uncompressed
657 * cleanup code that can deal with all of this.
659 * It isn't really the fastest way to fix things, but this is a
660 * very uncommon corner.
662 if (actual_end
<= start
)
663 goto cleanup_and_bail_uncompressed
;
665 total_compressed
= actual_end
- start
;
668 * Skip compression for a small file range(<=blocksize) that
669 * isn't an inline extent, since it doesn't save disk space at all.
671 if (total_compressed
<= blocksize
&&
672 (start
> 0 || end
+ 1 < BTRFS_I(inode
)->disk_i_size
))
673 goto cleanup_and_bail_uncompressed
;
676 * For subpage case, we require full page alignment for the sector
678 * Thus we must also check against @actual_end, not just @end.
680 if (blocksize
< PAGE_SIZE
) {
681 if (!IS_ALIGNED(start
, PAGE_SIZE
) ||
682 !IS_ALIGNED(round_up(actual_end
, blocksize
), PAGE_SIZE
))
683 goto cleanup_and_bail_uncompressed
;
686 total_compressed
= min_t(unsigned long, total_compressed
,
687 BTRFS_MAX_UNCOMPRESSED
);
692 * we do compression for mount -o compress and when the
693 * inode has not been flagged as nocompress. This flag can
694 * change at any time if we discover bad compression ratios.
696 if (inode_need_compress(BTRFS_I(inode
), start
, end
)) {
698 pages
= kcalloc(nr_pages
, sizeof(struct page
*), GFP_NOFS
);
700 /* just bail out to the uncompressed code */
705 if (BTRFS_I(inode
)->defrag_compress
)
706 compress_type
= BTRFS_I(inode
)->defrag_compress
;
707 else if (BTRFS_I(inode
)->prop_compress
)
708 compress_type
= BTRFS_I(inode
)->prop_compress
;
711 * we need to call clear_page_dirty_for_io on each
712 * page in the range. Otherwise applications with the file
713 * mmap'd can wander in and change the page contents while
714 * we are compressing them.
716 * If the compression fails for any reason, we set the pages
717 * dirty again later on.
719 * Note that the remaining part is redirtied, the start pointer
720 * has moved, the end is the original one.
723 extent_range_clear_dirty_for_io(inode
, start
, end
);
727 /* Compression level is applied here and only here */
728 ret
= btrfs_compress_pages(
729 compress_type
| (fs_info
->compress_level
<< 4),
730 inode
->i_mapping
, start
,
737 unsigned long offset
= offset_in_page(total_compressed
);
738 struct page
*page
= pages
[nr_pages
- 1];
740 /* zero the tail end of the last page, we might be
741 * sending it down to disk
744 memzero_page(page
, offset
, PAGE_SIZE
- offset
);
750 * Check cow_file_range() for why we don't even try to create inline
751 * extent for subpage case.
753 if (start
== 0 && fs_info
->sectorsize
== PAGE_SIZE
) {
754 /* lets try to make an inline extent */
755 if (ret
|| total_in
< actual_end
) {
756 /* we didn't compress the entire range, try
757 * to make an uncompressed inline extent.
759 ret
= cow_file_range_inline(BTRFS_I(inode
), actual_end
,
760 0, BTRFS_COMPRESS_NONE
,
763 /* try making a compressed inline extent */
764 ret
= cow_file_range_inline(BTRFS_I(inode
), actual_end
,
766 compress_type
, pages
,
770 unsigned long clear_flags
= EXTENT_DELALLOC
|
771 EXTENT_DELALLOC_NEW
| EXTENT_DEFRAG
|
772 EXTENT_DO_ACCOUNTING
;
773 unsigned long page_error_op
;
775 page_error_op
= ret
< 0 ? PAGE_SET_ERROR
: 0;
778 * inline extent creation worked or returned error,
779 * we don't need to create any more async work items.
780 * Unlock and free up our temp pages.
782 * We use DO_ACCOUNTING here because we need the
783 * delalloc_release_metadata to be done _after_ we drop
784 * our outstanding extent for clearing delalloc for this
787 extent_clear_unlock_delalloc(BTRFS_I(inode
), start
, end
,
791 PAGE_START_WRITEBACK
|
796 * Ensure we only free the compressed pages if we have
797 * them allocated, as we can still reach here with
798 * inode_need_compress() == false.
801 for (i
= 0; i
< nr_pages
; i
++) {
802 WARN_ON(pages
[i
]->mapping
);
813 * we aren't doing an inline extent round the compressed size
814 * up to a block size boundary so the allocator does sane
817 total_compressed
= ALIGN(total_compressed
, blocksize
);
820 * one last check to make sure the compression is really a
821 * win, compare the page count read with the blocks on disk,
822 * compression must free at least one sector size
824 total_in
= round_up(total_in
, fs_info
->sectorsize
);
825 if (total_compressed
+ blocksize
<= total_in
) {
826 compressed_extents
++;
829 * The async work queues will take care of doing actual
830 * allocation on disk for these compressed pages, and
831 * will submit them to the elevator.
833 add_async_extent(async_chunk
, start
, total_in
,
834 total_compressed
, pages
, nr_pages
,
837 if (start
+ total_in
< end
) {
843 return compressed_extents
;
848 * the compression code ran but failed to make things smaller,
849 * free any pages it allocated and our page pointer array
851 for (i
= 0; i
< nr_pages
; i
++) {
852 WARN_ON(pages
[i
]->mapping
);
857 total_compressed
= 0;
860 /* flag the file so we don't compress in the future */
861 if (!btrfs_test_opt(fs_info
, FORCE_COMPRESS
) &&
862 !(BTRFS_I(inode
)->prop_compress
)) {
863 BTRFS_I(inode
)->flags
|= BTRFS_INODE_NOCOMPRESS
;
866 cleanup_and_bail_uncompressed
:
868 * No compression, but we still need to write the pages in the file
869 * we've been given so far. redirty the locked page if it corresponds
870 * to our extent and set things up for the async work queue to run
871 * cow_file_range to do the normal delalloc dance.
873 if (async_chunk
->locked_page
&&
874 (page_offset(async_chunk
->locked_page
) >= start
&&
875 page_offset(async_chunk
->locked_page
)) <= end
) {
876 __set_page_dirty_nobuffers(async_chunk
->locked_page
);
877 /* unlocked later on in the async handlers */
881 extent_range_redirty_for_io(inode
, start
, end
);
882 add_async_extent(async_chunk
, start
, end
- start
+ 1, 0, NULL
, 0,
883 BTRFS_COMPRESS_NONE
);
884 compressed_extents
++;
886 return compressed_extents
;
889 static void free_async_extent_pages(struct async_extent
*async_extent
)
893 if (!async_extent
->pages
)
896 for (i
= 0; i
< async_extent
->nr_pages
; i
++) {
897 WARN_ON(async_extent
->pages
[i
]->mapping
);
898 put_page(async_extent
->pages
[i
]);
900 kfree(async_extent
->pages
);
901 async_extent
->nr_pages
= 0;
902 async_extent
->pages
= NULL
;
905 static int submit_uncompressed_range(struct btrfs_inode
*inode
,
906 struct async_extent
*async_extent
,
907 struct page
*locked_page
)
909 u64 start
= async_extent
->start
;
910 u64 end
= async_extent
->start
+ async_extent
->ram_size
- 1;
911 unsigned long nr_written
= 0;
912 int page_started
= 0;
916 * Call cow_file_range() to run the delalloc range directly, since we
917 * won't go to NOCOW or async path again.
919 * Also we call cow_file_range() with @unlock_page == 0, so that we
920 * can directly submit them without interruption.
922 ret
= cow_file_range(inode
, locked_page
, start
, end
, &page_started
,
924 /* Inline extent inserted, page gets unlocked and everything is done */
931 unlock_page(locked_page
);
935 ret
= extent_write_locked_range(&inode
->vfs_inode
, start
, end
);
936 /* All pages will be unlocked, including @locked_page */
942 static int submit_one_async_extent(struct btrfs_inode
*inode
,
943 struct async_chunk
*async_chunk
,
944 struct async_extent
*async_extent
,
947 struct extent_io_tree
*io_tree
= &inode
->io_tree
;
948 struct btrfs_root
*root
= inode
->root
;
949 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
950 struct btrfs_key ins
;
951 struct page
*locked_page
= NULL
;
952 struct extent_map
*em
;
954 u64 start
= async_extent
->start
;
955 u64 end
= async_extent
->start
+ async_extent
->ram_size
- 1;
958 * If async_chunk->locked_page is in the async_extent range, we need to
961 if (async_chunk
->locked_page
) {
962 u64 locked_page_start
= page_offset(async_chunk
->locked_page
);
963 u64 locked_page_end
= locked_page_start
+ PAGE_SIZE
- 1;
965 if (!(start
>= locked_page_end
|| end
<= locked_page_start
))
966 locked_page
= async_chunk
->locked_page
;
968 lock_extent(io_tree
, start
, end
);
970 /* We have fall back to uncompressed write */
971 if (!async_extent
->pages
)
972 return submit_uncompressed_range(inode
, async_extent
, locked_page
);
974 ret
= btrfs_reserve_extent(root
, async_extent
->ram_size
,
975 async_extent
->compressed_size
,
976 async_extent
->compressed_size
,
977 0, *alloc_hint
, &ins
, 1, 1);
979 free_async_extent_pages(async_extent
);
981 * Here we used to try again by going back to non-compressed
982 * path for ENOSPC. But we can't reserve space even for
983 * compressed size, how could it work for uncompressed size
984 * which requires larger size? So here we directly go error
990 /* Here we're doing allocation and writeback of the compressed pages */
991 em
= create_io_em(inode
, start
,
992 async_extent
->ram_size
, /* len */
993 start
, /* orig_start */
994 ins
.objectid
, /* block_start */
995 ins
.offset
, /* block_len */
996 ins
.offset
, /* orig_block_len */
997 async_extent
->ram_size
, /* ram_bytes */
998 async_extent
->compress_type
,
999 BTRFS_ORDERED_COMPRESSED
);
1002 goto out_free_reserve
;
1004 free_extent_map(em
);
1006 ret
= btrfs_add_ordered_extent(inode
, start
, /* file_offset */
1007 async_extent
->ram_size
, /* num_bytes */
1008 async_extent
->ram_size
, /* ram_bytes */
1009 ins
.objectid
, /* disk_bytenr */
1010 ins
.offset
, /* disk_num_bytes */
1012 1 << BTRFS_ORDERED_COMPRESSED
,
1013 async_extent
->compress_type
);
1015 btrfs_drop_extent_cache(inode
, start
, end
, 0);
1016 goto out_free_reserve
;
1018 btrfs_dec_block_group_reservations(fs_info
, ins
.objectid
);
1020 /* Clear dirty, set writeback and unlock the pages. */
1021 extent_clear_unlock_delalloc(inode
, start
, end
,
1022 NULL
, EXTENT_LOCKED
| EXTENT_DELALLOC
,
1023 PAGE_UNLOCK
| PAGE_START_WRITEBACK
);
1024 if (btrfs_submit_compressed_write(inode
, start
, /* file_offset */
1025 async_extent
->ram_size
, /* num_bytes */
1026 ins
.objectid
, /* disk_bytenr */
1027 ins
.offset
, /* compressed_len */
1028 async_extent
->pages
, /* compressed_pages */
1029 async_extent
->nr_pages
,
1030 async_chunk
->write_flags
,
1031 async_chunk
->blkcg_css
, true)) {
1032 const u64 start
= async_extent
->start
;
1033 const u64 end
= start
+ async_extent
->ram_size
- 1;
1035 btrfs_writepage_endio_finish_ordered(inode
, NULL
, start
, end
, 0);
1037 extent_clear_unlock_delalloc(inode
, start
, end
, NULL
, 0,
1038 PAGE_END_WRITEBACK
| PAGE_SET_ERROR
);
1039 free_async_extent_pages(async_extent
);
1041 *alloc_hint
= ins
.objectid
+ ins
.offset
;
1042 kfree(async_extent
);
1046 btrfs_dec_block_group_reservations(fs_info
, ins
.objectid
);
1047 btrfs_free_reserved_extent(fs_info
, ins
.objectid
, ins
.offset
, 1);
1049 extent_clear_unlock_delalloc(inode
, start
, end
,
1050 NULL
, EXTENT_LOCKED
| EXTENT_DELALLOC
|
1051 EXTENT_DELALLOC_NEW
|
1052 EXTENT_DEFRAG
| EXTENT_DO_ACCOUNTING
,
1053 PAGE_UNLOCK
| PAGE_START_WRITEBACK
|
1054 PAGE_END_WRITEBACK
| PAGE_SET_ERROR
);
1055 free_async_extent_pages(async_extent
);
1056 kfree(async_extent
);
1061 * Phase two of compressed writeback. This is the ordered portion of the code,
1062 * which only gets called in the order the work was queued. We walk all the
1063 * async extents created by compress_file_range and send them down to the disk.
1065 static noinline
void submit_compressed_extents(struct async_chunk
*async_chunk
)
1067 struct btrfs_inode
*inode
= BTRFS_I(async_chunk
->inode
);
1068 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
1069 struct async_extent
*async_extent
;
1073 while (!list_empty(&async_chunk
->extents
)) {
1077 async_extent
= list_entry(async_chunk
->extents
.next
,
1078 struct async_extent
, list
);
1079 list_del(&async_extent
->list
);
1080 extent_start
= async_extent
->start
;
1081 ram_size
= async_extent
->ram_size
;
1083 ret
= submit_one_async_extent(inode
, async_chunk
, async_extent
,
1085 btrfs_debug(fs_info
,
1086 "async extent submission failed root=%lld inode=%llu start=%llu len=%llu ret=%d",
1087 inode
->root
->root_key
.objectid
,
1088 btrfs_ino(inode
), extent_start
, ram_size
, ret
);
1092 static u64
get_extent_allocation_hint(struct btrfs_inode
*inode
, u64 start
,
1095 struct extent_map_tree
*em_tree
= &inode
->extent_tree
;
1096 struct extent_map
*em
;
1099 read_lock(&em_tree
->lock
);
1100 em
= search_extent_mapping(em_tree
, start
, num_bytes
);
1103 * if block start isn't an actual block number then find the
1104 * first block in this inode and use that as a hint. If that
1105 * block is also bogus then just don't worry about it.
1107 if (em
->block_start
>= EXTENT_MAP_LAST_BYTE
) {
1108 free_extent_map(em
);
1109 em
= search_extent_mapping(em_tree
, 0, 0);
1110 if (em
&& em
->block_start
< EXTENT_MAP_LAST_BYTE
)
1111 alloc_hint
= em
->block_start
;
1113 free_extent_map(em
);
1115 alloc_hint
= em
->block_start
;
1116 free_extent_map(em
);
1119 read_unlock(&em_tree
->lock
);
1125 * when extent_io.c finds a delayed allocation range in the file,
1126 * the call backs end up in this code. The basic idea is to
1127 * allocate extents on disk for the range, and create ordered data structs
1128 * in ram to track those extents.
1130 * locked_page is the page that writepage had locked already. We use
1131 * it to make sure we don't do extra locks or unlocks.
1133 * *page_started is set to one if we unlock locked_page and do everything
1134 * required to start IO on it. It may be clean and already done with
1135 * IO when we return.
1137 static noinline
int cow_file_range(struct btrfs_inode
*inode
,
1138 struct page
*locked_page
,
1139 u64 start
, u64 end
, int *page_started
,
1140 unsigned long *nr_written
, int unlock
)
1142 struct btrfs_root
*root
= inode
->root
;
1143 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1146 unsigned long ram_size
;
1147 u64 cur_alloc_size
= 0;
1149 u64 blocksize
= fs_info
->sectorsize
;
1150 struct btrfs_key ins
;
1151 struct extent_map
*em
;
1152 unsigned clear_bits
;
1153 unsigned long page_ops
;
1154 bool extent_reserved
= false;
1157 if (btrfs_is_free_space_inode(inode
)) {
1162 num_bytes
= ALIGN(end
- start
+ 1, blocksize
);
1163 num_bytes
= max(blocksize
, num_bytes
);
1164 ASSERT(num_bytes
<= btrfs_super_total_bytes(fs_info
->super_copy
));
1166 inode_should_defrag(inode
, start
, end
, num_bytes
, SZ_64K
);
1169 * Due to the page size limit, for subpage we can only trigger the
1170 * writeback for the dirty sectors of page, that means data writeback
1171 * is doing more writeback than what we want.
1173 * This is especially unexpected for some call sites like fallocate,
1174 * where we only increase i_size after everything is done.
1175 * This means we can trigger inline extent even if we didn't want to.
1176 * So here we skip inline extent creation completely.
1178 if (start
== 0 && fs_info
->sectorsize
== PAGE_SIZE
) {
1179 u64 actual_end
= min_t(u64
, i_size_read(&inode
->vfs_inode
),
1182 /* lets try to make an inline extent */
1183 ret
= cow_file_range_inline(inode
, actual_end
, 0,
1184 BTRFS_COMPRESS_NONE
, NULL
, false);
1187 * We use DO_ACCOUNTING here because we need the
1188 * delalloc_release_metadata to be run _after_ we drop
1189 * our outstanding extent for clearing delalloc for this
1192 extent_clear_unlock_delalloc(inode
, start
, end
,
1194 EXTENT_LOCKED
| EXTENT_DELALLOC
|
1195 EXTENT_DELALLOC_NEW
| EXTENT_DEFRAG
|
1196 EXTENT_DO_ACCOUNTING
, PAGE_UNLOCK
|
1197 PAGE_START_WRITEBACK
| PAGE_END_WRITEBACK
);
1198 *nr_written
= *nr_written
+
1199 (end
- start
+ PAGE_SIZE
) / PAGE_SIZE
;
1202 * locked_page is locked by the caller of
1203 * writepage_delalloc(), not locked by
1204 * __process_pages_contig().
1206 * We can't let __process_pages_contig() to unlock it,
1207 * as it doesn't have any subpage::writers recorded.
1209 * Here we manually unlock the page, since the caller
1210 * can't use page_started to determine if it's an
1211 * inline extent or a compressed extent.
1213 unlock_page(locked_page
);
1215 } else if (ret
< 0) {
1220 alloc_hint
= get_extent_allocation_hint(inode
, start
, num_bytes
);
1221 btrfs_drop_extent_cache(inode
, start
, start
+ num_bytes
- 1, 0);
1224 * Relocation relies on the relocated extents to have exactly the same
1225 * size as the original extents. Normally writeback for relocation data
1226 * extents follows a NOCOW path because relocation preallocates the
1227 * extents. However, due to an operation such as scrub turning a block
1228 * group to RO mode, it may fallback to COW mode, so we must make sure
1229 * an extent allocated during COW has exactly the requested size and can
1230 * not be split into smaller extents, otherwise relocation breaks and
1231 * fails during the stage where it updates the bytenr of file extent
1234 if (btrfs_is_data_reloc_root(root
))
1235 min_alloc_size
= num_bytes
;
1237 min_alloc_size
= fs_info
->sectorsize
;
1239 while (num_bytes
> 0) {
1240 cur_alloc_size
= num_bytes
;
1241 ret
= btrfs_reserve_extent(root
, cur_alloc_size
, cur_alloc_size
,
1242 min_alloc_size
, 0, alloc_hint
,
1246 cur_alloc_size
= ins
.offset
;
1247 extent_reserved
= true;
1249 ram_size
= ins
.offset
;
1250 em
= create_io_em(inode
, start
, ins
.offset
, /* len */
1251 start
, /* orig_start */
1252 ins
.objectid
, /* block_start */
1253 ins
.offset
, /* block_len */
1254 ins
.offset
, /* orig_block_len */
1255 ram_size
, /* ram_bytes */
1256 BTRFS_COMPRESS_NONE
, /* compress_type */
1257 BTRFS_ORDERED_REGULAR
/* type */);
1262 free_extent_map(em
);
1264 ret
= btrfs_add_ordered_extent(inode
, start
, ram_size
, ram_size
,
1265 ins
.objectid
, cur_alloc_size
, 0,
1266 1 << BTRFS_ORDERED_REGULAR
,
1267 BTRFS_COMPRESS_NONE
);
1269 goto out_drop_extent_cache
;
1271 if (btrfs_is_data_reloc_root(root
)) {
1272 ret
= btrfs_reloc_clone_csums(inode
, start
,
1275 * Only drop cache here, and process as normal.
1277 * We must not allow extent_clear_unlock_delalloc()
1278 * at out_unlock label to free meta of this ordered
1279 * extent, as its meta should be freed by
1280 * btrfs_finish_ordered_io().
1282 * So we must continue until @start is increased to
1283 * skip current ordered extent.
1286 btrfs_drop_extent_cache(inode
, start
,
1287 start
+ ram_size
- 1, 0);
1290 btrfs_dec_block_group_reservations(fs_info
, ins
.objectid
);
1293 * We're not doing compressed IO, don't unlock the first page
1294 * (which the caller expects to stay locked), don't clear any
1295 * dirty bits and don't set any writeback bits
1297 * Do set the Ordered (Private2) bit so we know this page was
1298 * properly setup for writepage.
1300 page_ops
= unlock
? PAGE_UNLOCK
: 0;
1301 page_ops
|= PAGE_SET_ORDERED
;
1303 extent_clear_unlock_delalloc(inode
, start
, start
+ ram_size
- 1,
1305 EXTENT_LOCKED
| EXTENT_DELALLOC
,
1307 if (num_bytes
< cur_alloc_size
)
1310 num_bytes
-= cur_alloc_size
;
1311 alloc_hint
= ins
.objectid
+ ins
.offset
;
1312 start
+= cur_alloc_size
;
1313 extent_reserved
= false;
1316 * btrfs_reloc_clone_csums() error, since start is increased
1317 * extent_clear_unlock_delalloc() at out_unlock label won't
1318 * free metadata of current ordered extent, we're OK to exit.
1326 out_drop_extent_cache
:
1327 btrfs_drop_extent_cache(inode
, start
, start
+ ram_size
- 1, 0);
1329 btrfs_dec_block_group_reservations(fs_info
, ins
.objectid
);
1330 btrfs_free_reserved_extent(fs_info
, ins
.objectid
, ins
.offset
, 1);
1332 clear_bits
= EXTENT_LOCKED
| EXTENT_DELALLOC
| EXTENT_DELALLOC_NEW
|
1333 EXTENT_DEFRAG
| EXTENT_CLEAR_META_RESV
;
1334 page_ops
= PAGE_UNLOCK
| PAGE_START_WRITEBACK
| PAGE_END_WRITEBACK
;
1336 * If we reserved an extent for our delalloc range (or a subrange) and
1337 * failed to create the respective ordered extent, then it means that
1338 * when we reserved the extent we decremented the extent's size from
1339 * the data space_info's bytes_may_use counter and incremented the
1340 * space_info's bytes_reserved counter by the same amount. We must make
1341 * sure extent_clear_unlock_delalloc() does not try to decrement again
1342 * the data space_info's bytes_may_use counter, therefore we do not pass
1343 * it the flag EXTENT_CLEAR_DATA_RESV.
1345 if (extent_reserved
) {
1346 extent_clear_unlock_delalloc(inode
, start
,
1347 start
+ cur_alloc_size
- 1,
1351 start
+= cur_alloc_size
;
1355 extent_clear_unlock_delalloc(inode
, start
, end
, locked_page
,
1356 clear_bits
| EXTENT_CLEAR_DATA_RESV
,
1362 * work queue call back to started compression on a file and pages
1364 static noinline
void async_cow_start(struct btrfs_work
*work
)
1366 struct async_chunk
*async_chunk
;
1367 int compressed_extents
;
1369 async_chunk
= container_of(work
, struct async_chunk
, work
);
1371 compressed_extents
= compress_file_range(async_chunk
);
1372 if (compressed_extents
== 0) {
1373 btrfs_add_delayed_iput(async_chunk
->inode
);
1374 async_chunk
->inode
= NULL
;
1379 * work queue call back to submit previously compressed pages
1381 static noinline
void async_cow_submit(struct btrfs_work
*work
)
1383 struct async_chunk
*async_chunk
= container_of(work
, struct async_chunk
,
1385 struct btrfs_fs_info
*fs_info
= btrfs_work_owner(work
);
1386 unsigned long nr_pages
;
1388 nr_pages
= (async_chunk
->end
- async_chunk
->start
+ PAGE_SIZE
) >>
1392 * ->inode could be NULL if async_chunk_start has failed to compress,
1393 * in which case we don't have anything to submit, yet we need to
1394 * always adjust ->async_delalloc_pages as its paired with the init
1395 * happening in cow_file_range_async
1397 if (async_chunk
->inode
)
1398 submit_compressed_extents(async_chunk
);
1400 /* atomic_sub_return implies a barrier */
1401 if (atomic_sub_return(nr_pages
, &fs_info
->async_delalloc_pages
) <
1403 cond_wake_up_nomb(&fs_info
->async_submit_wait
);
1406 static noinline
void async_cow_free(struct btrfs_work
*work
)
1408 struct async_chunk
*async_chunk
;
1409 struct async_cow
*async_cow
;
1411 async_chunk
= container_of(work
, struct async_chunk
, work
);
1412 if (async_chunk
->inode
)
1413 btrfs_add_delayed_iput(async_chunk
->inode
);
1414 if (async_chunk
->blkcg_css
)
1415 css_put(async_chunk
->blkcg_css
);
1417 async_cow
= async_chunk
->async_cow
;
1418 if (atomic_dec_and_test(&async_cow
->num_chunks
))
1422 static int cow_file_range_async(struct btrfs_inode
*inode
,
1423 struct writeback_control
*wbc
,
1424 struct page
*locked_page
,
1425 u64 start
, u64 end
, int *page_started
,
1426 unsigned long *nr_written
)
1428 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
1429 struct cgroup_subsys_state
*blkcg_css
= wbc_blkcg_css(wbc
);
1430 struct async_cow
*ctx
;
1431 struct async_chunk
*async_chunk
;
1432 unsigned long nr_pages
;
1434 u64 num_chunks
= DIV_ROUND_UP(end
- start
, SZ_512K
);
1436 bool should_compress
;
1438 const unsigned int write_flags
= wbc_to_write_flags(wbc
);
1440 unlock_extent(&inode
->io_tree
, start
, end
);
1442 if (inode
->flags
& BTRFS_INODE_NOCOMPRESS
&&
1443 !btrfs_test_opt(fs_info
, FORCE_COMPRESS
)) {
1445 should_compress
= false;
1447 should_compress
= true;
1450 nofs_flag
= memalloc_nofs_save();
1451 ctx
= kvmalloc(struct_size(ctx
, chunks
, num_chunks
), GFP_KERNEL
);
1452 memalloc_nofs_restore(nofs_flag
);
1455 unsigned clear_bits
= EXTENT_LOCKED
| EXTENT_DELALLOC
|
1456 EXTENT_DELALLOC_NEW
| EXTENT_DEFRAG
|
1457 EXTENT_DO_ACCOUNTING
;
1458 unsigned long page_ops
= PAGE_UNLOCK
| PAGE_START_WRITEBACK
|
1459 PAGE_END_WRITEBACK
| PAGE_SET_ERROR
;
1461 extent_clear_unlock_delalloc(inode
, start
, end
, locked_page
,
1462 clear_bits
, page_ops
);
1466 async_chunk
= ctx
->chunks
;
1467 atomic_set(&ctx
->num_chunks
, num_chunks
);
1469 for (i
= 0; i
< num_chunks
; i
++) {
1470 if (should_compress
)
1471 cur_end
= min(end
, start
+ SZ_512K
- 1);
1476 * igrab is called higher up in the call chain, take only the
1477 * lightweight reference for the callback lifetime
1479 ihold(&inode
->vfs_inode
);
1480 async_chunk
[i
].async_cow
= ctx
;
1481 async_chunk
[i
].inode
= &inode
->vfs_inode
;
1482 async_chunk
[i
].start
= start
;
1483 async_chunk
[i
].end
= cur_end
;
1484 async_chunk
[i
].write_flags
= write_flags
;
1485 INIT_LIST_HEAD(&async_chunk
[i
].extents
);
1488 * The locked_page comes all the way from writepage and its
1489 * the original page we were actually given. As we spread
1490 * this large delalloc region across multiple async_chunk
1491 * structs, only the first struct needs a pointer to locked_page
1493 * This way we don't need racey decisions about who is supposed
1498 * Depending on the compressibility, the pages might or
1499 * might not go through async. We want all of them to
1500 * be accounted against wbc once. Let's do it here
1501 * before the paths diverge. wbc accounting is used
1502 * only for foreign writeback detection and doesn't
1503 * need full accuracy. Just account the whole thing
1504 * against the first page.
1506 wbc_account_cgroup_owner(wbc
, locked_page
,
1508 async_chunk
[i
].locked_page
= locked_page
;
1511 async_chunk
[i
].locked_page
= NULL
;
1514 if (blkcg_css
!= blkcg_root_css
) {
1516 async_chunk
[i
].blkcg_css
= blkcg_css
;
1518 async_chunk
[i
].blkcg_css
= NULL
;
1521 btrfs_init_work(&async_chunk
[i
].work
, async_cow_start
,
1522 async_cow_submit
, async_cow_free
);
1524 nr_pages
= DIV_ROUND_UP(cur_end
- start
, PAGE_SIZE
);
1525 atomic_add(nr_pages
, &fs_info
->async_delalloc_pages
);
1527 btrfs_queue_work(fs_info
->delalloc_workers
, &async_chunk
[i
].work
);
1529 *nr_written
+= nr_pages
;
1530 start
= cur_end
+ 1;
1536 static noinline
int run_delalloc_zoned(struct btrfs_inode
*inode
,
1537 struct page
*locked_page
, u64 start
,
1538 u64 end
, int *page_started
,
1539 unsigned long *nr_written
)
1543 ret
= cow_file_range(inode
, locked_page
, start
, end
, page_started
,
1551 __set_page_dirty_nobuffers(locked_page
);
1552 account_page_redirty(locked_page
);
1553 extent_write_locked_range(&inode
->vfs_inode
, start
, end
);
1559 static noinline
int csum_exist_in_range(struct btrfs_fs_info
*fs_info
,
1560 u64 bytenr
, u64 num_bytes
)
1562 struct btrfs_root
*csum_root
= btrfs_csum_root(fs_info
, bytenr
);
1563 struct btrfs_ordered_sum
*sums
;
1567 ret
= btrfs_lookup_csums_range(csum_root
, bytenr
,
1568 bytenr
+ num_bytes
- 1, &list
, 0);
1569 if (ret
== 0 && list_empty(&list
))
1572 while (!list_empty(&list
)) {
1573 sums
= list_entry(list
.next
, struct btrfs_ordered_sum
, list
);
1574 list_del(&sums
->list
);
1582 static int fallback_to_cow(struct btrfs_inode
*inode
, struct page
*locked_page
,
1583 const u64 start
, const u64 end
,
1584 int *page_started
, unsigned long *nr_written
)
1586 const bool is_space_ino
= btrfs_is_free_space_inode(inode
);
1587 const bool is_reloc_ino
= btrfs_is_data_reloc_root(inode
->root
);
1588 const u64 range_bytes
= end
+ 1 - start
;
1589 struct extent_io_tree
*io_tree
= &inode
->io_tree
;
1590 u64 range_start
= start
;
1594 * If EXTENT_NORESERVE is set it means that when the buffered write was
1595 * made we had not enough available data space and therefore we did not
1596 * reserve data space for it, since we though we could do NOCOW for the
1597 * respective file range (either there is prealloc extent or the inode
1598 * has the NOCOW bit set).
1600 * However when we need to fallback to COW mode (because for example the
1601 * block group for the corresponding extent was turned to RO mode by a
1602 * scrub or relocation) we need to do the following:
1604 * 1) We increment the bytes_may_use counter of the data space info.
1605 * If COW succeeds, it allocates a new data extent and after doing
1606 * that it decrements the space info's bytes_may_use counter and
1607 * increments its bytes_reserved counter by the same amount (we do
1608 * this at btrfs_add_reserved_bytes()). So we need to increment the
1609 * bytes_may_use counter to compensate (when space is reserved at
1610 * buffered write time, the bytes_may_use counter is incremented);
1612 * 2) We clear the EXTENT_NORESERVE bit from the range. We do this so
1613 * that if the COW path fails for any reason, it decrements (through
1614 * extent_clear_unlock_delalloc()) the bytes_may_use counter of the
1615 * data space info, which we incremented in the step above.
1617 * If we need to fallback to cow and the inode corresponds to a free
1618 * space cache inode or an inode of the data relocation tree, we must
1619 * also increment bytes_may_use of the data space_info for the same
1620 * reason. Space caches and relocated data extents always get a prealloc
1621 * extent for them, however scrub or balance may have set the block
1622 * group that contains that extent to RO mode and therefore force COW
1623 * when starting writeback.
1625 count
= count_range_bits(io_tree
, &range_start
, end
, range_bytes
,
1626 EXTENT_NORESERVE
, 0);
1627 if (count
> 0 || is_space_ino
|| is_reloc_ino
) {
1629 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
1630 struct btrfs_space_info
*sinfo
= fs_info
->data_sinfo
;
1632 if (is_space_ino
|| is_reloc_ino
)
1633 bytes
= range_bytes
;
1635 spin_lock(&sinfo
->lock
);
1636 btrfs_space_info_update_bytes_may_use(fs_info
, sinfo
, bytes
);
1637 spin_unlock(&sinfo
->lock
);
1640 clear_extent_bit(io_tree
, start
, end
, EXTENT_NORESERVE
,
1644 return cow_file_range(inode
, locked_page
, start
, end
, page_started
,
1648 struct can_nocow_file_extent_args
{
1651 /* Start file offset of the range we want to NOCOW. */
1653 /* End file offset (inclusive) of the range we want to NOCOW. */
1655 bool writeback_path
;
1658 * Free the path passed to can_nocow_file_extent() once it's not needed
1663 /* Output fields. Only set when can_nocow_file_extent() returns 1. */
1668 /* Number of bytes that can be written to in NOCOW mode. */
1673 * Check if we can NOCOW the file extent that the path points to.
1674 * This function may return with the path released, so the caller should check
1675 * if path->nodes[0] is NULL or not if it needs to use the path afterwards.
1677 * Returns: < 0 on error
1678 * 0 if we can not NOCOW
1681 static int can_nocow_file_extent(struct btrfs_path
*path
,
1682 struct btrfs_key
*key
,
1683 struct btrfs_inode
*inode
,
1684 struct can_nocow_file_extent_args
*args
)
1686 const bool is_freespace_inode
= btrfs_is_free_space_inode(inode
);
1687 struct extent_buffer
*leaf
= path
->nodes
[0];
1688 struct btrfs_root
*root
= inode
->root
;
1689 struct btrfs_file_extent_item
*fi
;
1695 fi
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_file_extent_item
);
1696 extent_type
= btrfs_file_extent_type(leaf
, fi
);
1698 if (extent_type
== BTRFS_FILE_EXTENT_INLINE
)
1701 /* Can't access these fields unless we know it's not an inline extent. */
1702 args
->disk_bytenr
= btrfs_file_extent_disk_bytenr(leaf
, fi
);
1703 args
->disk_num_bytes
= btrfs_file_extent_disk_num_bytes(leaf
, fi
);
1704 args
->extent_offset
= btrfs_file_extent_offset(leaf
, fi
);
1706 if (!(inode
->flags
& BTRFS_INODE_NODATACOW
) &&
1707 extent_type
== BTRFS_FILE_EXTENT_REG
)
1711 * If the extent was created before the generation where the last snapshot
1712 * for its subvolume was created, then this implies the extent is shared,
1713 * hence we must COW.
1715 if (!args
->strict
&&
1716 btrfs_file_extent_generation(leaf
, fi
) <=
1717 btrfs_root_last_snapshot(&root
->root_item
))
1720 /* An explicit hole, must COW. */
1721 if (args
->disk_bytenr
== 0)
1724 /* Compressed/encrypted/encoded extents must be COWed. */
1725 if (btrfs_file_extent_compression(leaf
, fi
) ||
1726 btrfs_file_extent_encryption(leaf
, fi
) ||
1727 btrfs_file_extent_other_encoding(leaf
, fi
))
1730 extent_end
= btrfs_file_extent_end(path
);
1733 * The following checks can be expensive, as they need to take other
1734 * locks and do btree or rbtree searches, so release the path to avoid
1735 * blocking other tasks for too long.
1737 btrfs_release_path(path
);
1739 ret
= btrfs_cross_ref_exist(root
, btrfs_ino(inode
),
1740 key
->offset
- args
->extent_offset
,
1741 args
->disk_bytenr
, false, path
);
1742 WARN_ON_ONCE(ret
> 0 && is_freespace_inode
);
1746 if (args
->free_path
) {
1748 * We don't need the path anymore, plus through the
1749 * csum_exist_in_range() call below we will end up allocating
1750 * another path. So free the path to avoid unnecessary extra
1753 btrfs_free_path(path
);
1757 /* If there are pending snapshots for this root, we must COW. */
1758 if (args
->writeback_path
&& !is_freespace_inode
&&
1759 atomic_read(&root
->snapshot_force_cow
))
1762 args
->disk_bytenr
+= args
->extent_offset
;
1763 args
->disk_bytenr
+= args
->start
- key
->offset
;
1764 args
->num_bytes
= min(args
->end
+ 1, extent_end
) - args
->start
;
1767 * Force COW if csums exist in the range. This ensures that csums for a
1768 * given extent are either valid or do not exist.
1770 ret
= csum_exist_in_range(root
->fs_info
, args
->disk_bytenr
, args
->num_bytes
);
1771 WARN_ON_ONCE(ret
> 0 && is_freespace_inode
);
1777 if (args
->free_path
&& path
)
1778 btrfs_free_path(path
);
1780 return ret
< 0 ? ret
: can_nocow
;
1784 * when nowcow writeback call back. This checks for snapshots or COW copies
1785 * of the extents that exist in the file, and COWs the file as required.
1787 * If no cow copies or snapshots exist, we write directly to the existing
1790 static noinline
int run_delalloc_nocow(struct btrfs_inode
*inode
,
1791 struct page
*locked_page
,
1792 const u64 start
, const u64 end
,
1794 unsigned long *nr_written
)
1796 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
1797 struct btrfs_root
*root
= inode
->root
;
1798 struct btrfs_path
*path
;
1799 u64 cow_start
= (u64
)-1;
1800 u64 cur_offset
= start
;
1802 bool check_prev
= true;
1803 u64 ino
= btrfs_ino(inode
);
1804 struct btrfs_block_group
*bg
;
1806 struct can_nocow_file_extent_args nocow_args
= { 0 };
1808 path
= btrfs_alloc_path();
1810 extent_clear_unlock_delalloc(inode
, start
, end
, locked_page
,
1811 EXTENT_LOCKED
| EXTENT_DELALLOC
|
1812 EXTENT_DO_ACCOUNTING
|
1813 EXTENT_DEFRAG
, PAGE_UNLOCK
|
1814 PAGE_START_WRITEBACK
|
1815 PAGE_END_WRITEBACK
);
1819 nocow_args
.end
= end
;
1820 nocow_args
.writeback_path
= true;
1823 struct btrfs_key found_key
;
1824 struct btrfs_file_extent_item
*fi
;
1825 struct extent_buffer
*leaf
;
1833 ret
= btrfs_lookup_file_extent(NULL
, root
, path
, ino
,
1839 * If there is no extent for our range when doing the initial
1840 * search, then go back to the previous slot as it will be the
1841 * one containing the search offset
1843 if (ret
> 0 && path
->slots
[0] > 0 && check_prev
) {
1844 leaf
= path
->nodes
[0];
1845 btrfs_item_key_to_cpu(leaf
, &found_key
,
1846 path
->slots
[0] - 1);
1847 if (found_key
.objectid
== ino
&&
1848 found_key
.type
== BTRFS_EXTENT_DATA_KEY
)
1853 /* Go to next leaf if we have exhausted the current one */
1854 leaf
= path
->nodes
[0];
1855 if (path
->slots
[0] >= btrfs_header_nritems(leaf
)) {
1856 ret
= btrfs_next_leaf(root
, path
);
1858 if (cow_start
!= (u64
)-1)
1859 cur_offset
= cow_start
;
1864 leaf
= path
->nodes
[0];
1867 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
1869 /* Didn't find anything for our INO */
1870 if (found_key
.objectid
> ino
)
1873 * Keep searching until we find an EXTENT_ITEM or there are no
1874 * more extents for this inode
1876 if (WARN_ON_ONCE(found_key
.objectid
< ino
) ||
1877 found_key
.type
< BTRFS_EXTENT_DATA_KEY
) {
1882 /* Found key is not EXTENT_DATA_KEY or starts after req range */
1883 if (found_key
.type
> BTRFS_EXTENT_DATA_KEY
||
1884 found_key
.offset
> end
)
1888 * If the found extent starts after requested offset, then
1889 * adjust extent_end to be right before this extent begins
1891 if (found_key
.offset
> cur_offset
) {
1892 extent_end
= found_key
.offset
;
1898 * Found extent which begins before our range and potentially
1901 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
1902 struct btrfs_file_extent_item
);
1903 extent_type
= btrfs_file_extent_type(leaf
, fi
);
1904 /* If this is triggered then we have a memory corruption. */
1905 ASSERT(extent_type
< BTRFS_NR_FILE_EXTENT_TYPES
);
1906 if (WARN_ON(extent_type
>= BTRFS_NR_FILE_EXTENT_TYPES
)) {
1910 ram_bytes
= btrfs_file_extent_ram_bytes(leaf
, fi
);
1911 extent_end
= btrfs_file_extent_end(path
);
1914 * If the extent we got ends before our current offset, skip to
1917 if (extent_end
<= cur_offset
) {
1922 nocow_args
.start
= cur_offset
;
1923 ret
= can_nocow_file_extent(path
, &found_key
, inode
, &nocow_args
);
1925 if (cow_start
!= (u64
)-1)
1926 cur_offset
= cow_start
;
1928 } else if (ret
== 0) {
1933 bg
= btrfs_inc_nocow_writers(fs_info
, nocow_args
.disk_bytenr
);
1938 * If nocow is false then record the beginning of the range
1939 * that needs to be COWed
1942 if (cow_start
== (u64
)-1)
1943 cow_start
= cur_offset
;
1944 cur_offset
= extent_end
;
1945 if (cur_offset
> end
)
1947 if (!path
->nodes
[0])
1954 * COW range from cow_start to found_key.offset - 1. As the key
1955 * will contain the beginning of the first extent that can be
1956 * NOCOW, following one which needs to be COW'ed
1958 if (cow_start
!= (u64
)-1) {
1959 ret
= fallback_to_cow(inode
, locked_page
,
1960 cow_start
, found_key
.offset
- 1,
1961 page_started
, nr_written
);
1964 cow_start
= (u64
)-1;
1967 nocow_end
= cur_offset
+ nocow_args
.num_bytes
- 1;
1969 if (extent_type
== BTRFS_FILE_EXTENT_PREALLOC
) {
1970 u64 orig_start
= found_key
.offset
- nocow_args
.extent_offset
;
1971 struct extent_map
*em
;
1973 em
= create_io_em(inode
, cur_offset
, nocow_args
.num_bytes
,
1975 nocow_args
.disk_bytenr
, /* block_start */
1976 nocow_args
.num_bytes
, /* block_len */
1977 nocow_args
.disk_num_bytes
, /* orig_block_len */
1978 ram_bytes
, BTRFS_COMPRESS_NONE
,
1979 BTRFS_ORDERED_PREALLOC
);
1984 free_extent_map(em
);
1985 ret
= btrfs_add_ordered_extent(inode
,
1986 cur_offset
, nocow_args
.num_bytes
,
1987 nocow_args
.num_bytes
,
1988 nocow_args
.disk_bytenr
,
1989 nocow_args
.num_bytes
, 0,
1990 1 << BTRFS_ORDERED_PREALLOC
,
1991 BTRFS_COMPRESS_NONE
);
1993 btrfs_drop_extent_cache(inode
, cur_offset
,
1998 ret
= btrfs_add_ordered_extent(inode
, cur_offset
,
1999 nocow_args
.num_bytes
,
2000 nocow_args
.num_bytes
,
2001 nocow_args
.disk_bytenr
,
2002 nocow_args
.num_bytes
,
2004 1 << BTRFS_ORDERED_NOCOW
,
2005 BTRFS_COMPRESS_NONE
);
2011 btrfs_dec_nocow_writers(bg
);
2015 if (btrfs_is_data_reloc_root(root
))
2017 * Error handled later, as we must prevent
2018 * extent_clear_unlock_delalloc() in error handler
2019 * from freeing metadata of created ordered extent.
2021 ret
= btrfs_reloc_clone_csums(inode
, cur_offset
,
2022 nocow_args
.num_bytes
);
2024 extent_clear_unlock_delalloc(inode
, cur_offset
, nocow_end
,
2025 locked_page
, EXTENT_LOCKED
|
2027 EXTENT_CLEAR_DATA_RESV
,
2028 PAGE_UNLOCK
| PAGE_SET_ORDERED
);
2030 cur_offset
= extent_end
;
2033 * btrfs_reloc_clone_csums() error, now we're OK to call error
2034 * handler, as metadata for created ordered extent will only
2035 * be freed by btrfs_finish_ordered_io().
2039 if (cur_offset
> end
)
2042 btrfs_release_path(path
);
2044 if (cur_offset
<= end
&& cow_start
== (u64
)-1)
2045 cow_start
= cur_offset
;
2047 if (cow_start
!= (u64
)-1) {
2049 ret
= fallback_to_cow(inode
, locked_page
, cow_start
, end
,
2050 page_started
, nr_written
);
2057 btrfs_dec_nocow_writers(bg
);
2059 if (ret
&& cur_offset
< end
)
2060 extent_clear_unlock_delalloc(inode
, cur_offset
, end
,
2061 locked_page
, EXTENT_LOCKED
|
2062 EXTENT_DELALLOC
| EXTENT_DEFRAG
|
2063 EXTENT_DO_ACCOUNTING
, PAGE_UNLOCK
|
2064 PAGE_START_WRITEBACK
|
2065 PAGE_END_WRITEBACK
);
2066 btrfs_free_path(path
);
2070 static bool should_nocow(struct btrfs_inode
*inode
, u64 start
, u64 end
)
2072 if (inode
->flags
& (BTRFS_INODE_NODATACOW
| BTRFS_INODE_PREALLOC
)) {
2073 if (inode
->defrag_bytes
&&
2074 test_range_bit(&inode
->io_tree
, start
, end
, EXTENT_DEFRAG
,
2083 * Function to process delayed allocation (create CoW) for ranges which are
2084 * being touched for the first time.
2086 int btrfs_run_delalloc_range(struct btrfs_inode
*inode
, struct page
*locked_page
,
2087 u64 start
, u64 end
, int *page_started
, unsigned long *nr_written
,
2088 struct writeback_control
*wbc
)
2091 const bool zoned
= btrfs_is_zoned(inode
->root
->fs_info
);
2094 * The range must cover part of the @locked_page, or the returned
2095 * @page_started can confuse the caller.
2097 ASSERT(!(end
<= page_offset(locked_page
) ||
2098 start
>= page_offset(locked_page
) + PAGE_SIZE
));
2100 if (should_nocow(inode
, start
, end
)) {
2102 * Normally on a zoned device we're only doing COW writes, but
2103 * in case of relocation on a zoned filesystem we have taken
2104 * precaution, that we're only writing sequentially. It's safe
2105 * to use run_delalloc_nocow() here, like for regular
2106 * preallocated inodes.
2108 ASSERT(!zoned
|| btrfs_is_data_reloc_root(inode
->root
));
2109 ret
= run_delalloc_nocow(inode
, locked_page
, start
, end
,
2110 page_started
, nr_written
);
2111 } else if (!btrfs_inode_can_compress(inode
) ||
2112 !inode_need_compress(inode
, start
, end
)) {
2114 ret
= run_delalloc_zoned(inode
, locked_page
, start
, end
,
2115 page_started
, nr_written
);
2117 ret
= cow_file_range(inode
, locked_page
, start
, end
,
2118 page_started
, nr_written
, 1);
2120 set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT
, &inode
->runtime_flags
);
2121 ret
= cow_file_range_async(inode
, wbc
, locked_page
, start
, end
,
2122 page_started
, nr_written
);
2126 btrfs_cleanup_ordered_extents(inode
, locked_page
, start
,
2131 void btrfs_split_delalloc_extent(struct inode
*inode
,
2132 struct extent_state
*orig
, u64 split
)
2136 /* not delalloc, ignore it */
2137 if (!(orig
->state
& EXTENT_DELALLOC
))
2140 size
= orig
->end
- orig
->start
+ 1;
2141 if (size
> BTRFS_MAX_EXTENT_SIZE
) {
2146 * See the explanation in btrfs_merge_delalloc_extent, the same
2147 * applies here, just in reverse.
2149 new_size
= orig
->end
- split
+ 1;
2150 num_extents
= count_max_extents(new_size
);
2151 new_size
= split
- orig
->start
;
2152 num_extents
+= count_max_extents(new_size
);
2153 if (count_max_extents(size
) >= num_extents
)
2157 spin_lock(&BTRFS_I(inode
)->lock
);
2158 btrfs_mod_outstanding_extents(BTRFS_I(inode
), 1);
2159 spin_unlock(&BTRFS_I(inode
)->lock
);
2163 * Handle merged delayed allocation extents so we can keep track of new extents
2164 * that are just merged onto old extents, such as when we are doing sequential
2165 * writes, so we can properly account for the metadata space we'll need.
2167 void btrfs_merge_delalloc_extent(struct inode
*inode
, struct extent_state
*new,
2168 struct extent_state
*other
)
2170 u64 new_size
, old_size
;
2173 /* not delalloc, ignore it */
2174 if (!(other
->state
& EXTENT_DELALLOC
))
2177 if (new->start
> other
->start
)
2178 new_size
= new->end
- other
->start
+ 1;
2180 new_size
= other
->end
- new->start
+ 1;
2182 /* we're not bigger than the max, unreserve the space and go */
2183 if (new_size
<= BTRFS_MAX_EXTENT_SIZE
) {
2184 spin_lock(&BTRFS_I(inode
)->lock
);
2185 btrfs_mod_outstanding_extents(BTRFS_I(inode
), -1);
2186 spin_unlock(&BTRFS_I(inode
)->lock
);
2191 * We have to add up either side to figure out how many extents were
2192 * accounted for before we merged into one big extent. If the number of
2193 * extents we accounted for is <= the amount we need for the new range
2194 * then we can return, otherwise drop. Think of it like this
2198 * So we've grown the extent by a MAX_SIZE extent, this would mean we
2199 * need 2 outstanding extents, on one side we have 1 and the other side
2200 * we have 1 so they are == and we can return. But in this case
2202 * [MAX_SIZE+4k][MAX_SIZE+4k]
2204 * Each range on their own accounts for 2 extents, but merged together
2205 * they are only 3 extents worth of accounting, so we need to drop in
2208 old_size
= other
->end
- other
->start
+ 1;
2209 num_extents
= count_max_extents(old_size
);
2210 old_size
= new->end
- new->start
+ 1;
2211 num_extents
+= count_max_extents(old_size
);
2212 if (count_max_extents(new_size
) >= num_extents
)
2215 spin_lock(&BTRFS_I(inode
)->lock
);
2216 btrfs_mod_outstanding_extents(BTRFS_I(inode
), -1);
2217 spin_unlock(&BTRFS_I(inode
)->lock
);
2220 static void btrfs_add_delalloc_inodes(struct btrfs_root
*root
,
2221 struct inode
*inode
)
2223 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
2225 spin_lock(&root
->delalloc_lock
);
2226 if (list_empty(&BTRFS_I(inode
)->delalloc_inodes
)) {
2227 list_add_tail(&BTRFS_I(inode
)->delalloc_inodes
,
2228 &root
->delalloc_inodes
);
2229 set_bit(BTRFS_INODE_IN_DELALLOC_LIST
,
2230 &BTRFS_I(inode
)->runtime_flags
);
2231 root
->nr_delalloc_inodes
++;
2232 if (root
->nr_delalloc_inodes
== 1) {
2233 spin_lock(&fs_info
->delalloc_root_lock
);
2234 BUG_ON(!list_empty(&root
->delalloc_root
));
2235 list_add_tail(&root
->delalloc_root
,
2236 &fs_info
->delalloc_roots
);
2237 spin_unlock(&fs_info
->delalloc_root_lock
);
2240 spin_unlock(&root
->delalloc_lock
);
2244 void __btrfs_del_delalloc_inode(struct btrfs_root
*root
,
2245 struct btrfs_inode
*inode
)
2247 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2249 if (!list_empty(&inode
->delalloc_inodes
)) {
2250 list_del_init(&inode
->delalloc_inodes
);
2251 clear_bit(BTRFS_INODE_IN_DELALLOC_LIST
,
2252 &inode
->runtime_flags
);
2253 root
->nr_delalloc_inodes
--;
2254 if (!root
->nr_delalloc_inodes
) {
2255 ASSERT(list_empty(&root
->delalloc_inodes
));
2256 spin_lock(&fs_info
->delalloc_root_lock
);
2257 BUG_ON(list_empty(&root
->delalloc_root
));
2258 list_del_init(&root
->delalloc_root
);
2259 spin_unlock(&fs_info
->delalloc_root_lock
);
2264 static void btrfs_del_delalloc_inode(struct btrfs_root
*root
,
2265 struct btrfs_inode
*inode
)
2267 spin_lock(&root
->delalloc_lock
);
2268 __btrfs_del_delalloc_inode(root
, inode
);
2269 spin_unlock(&root
->delalloc_lock
);
2273 * Properly track delayed allocation bytes in the inode and to maintain the
2274 * list of inodes that have pending delalloc work to be done.
2276 void btrfs_set_delalloc_extent(struct inode
*inode
, struct extent_state
*state
,
2279 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
2281 if ((*bits
& EXTENT_DEFRAG
) && !(*bits
& EXTENT_DELALLOC
))
2284 * set_bit and clear bit hooks normally require _irqsave/restore
2285 * but in this case, we are only testing for the DELALLOC
2286 * bit, which is only set or cleared with irqs on
2288 if (!(state
->state
& EXTENT_DELALLOC
) && (*bits
& EXTENT_DELALLOC
)) {
2289 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
2290 u64 len
= state
->end
+ 1 - state
->start
;
2291 u32 num_extents
= count_max_extents(len
);
2292 bool do_list
= !btrfs_is_free_space_inode(BTRFS_I(inode
));
2294 spin_lock(&BTRFS_I(inode
)->lock
);
2295 btrfs_mod_outstanding_extents(BTRFS_I(inode
), num_extents
);
2296 spin_unlock(&BTRFS_I(inode
)->lock
);
2298 /* For sanity tests */
2299 if (btrfs_is_testing(fs_info
))
2302 percpu_counter_add_batch(&fs_info
->delalloc_bytes
, len
,
2303 fs_info
->delalloc_batch
);
2304 spin_lock(&BTRFS_I(inode
)->lock
);
2305 BTRFS_I(inode
)->delalloc_bytes
+= len
;
2306 if (*bits
& EXTENT_DEFRAG
)
2307 BTRFS_I(inode
)->defrag_bytes
+= len
;
2308 if (do_list
&& !test_bit(BTRFS_INODE_IN_DELALLOC_LIST
,
2309 &BTRFS_I(inode
)->runtime_flags
))
2310 btrfs_add_delalloc_inodes(root
, inode
);
2311 spin_unlock(&BTRFS_I(inode
)->lock
);
2314 if (!(state
->state
& EXTENT_DELALLOC_NEW
) &&
2315 (*bits
& EXTENT_DELALLOC_NEW
)) {
2316 spin_lock(&BTRFS_I(inode
)->lock
);
2317 BTRFS_I(inode
)->new_delalloc_bytes
+= state
->end
+ 1 -
2319 spin_unlock(&BTRFS_I(inode
)->lock
);
2324 * Once a range is no longer delalloc this function ensures that proper
2325 * accounting happens.
2327 void btrfs_clear_delalloc_extent(struct inode
*vfs_inode
,
2328 struct extent_state
*state
, unsigned *bits
)
2330 struct btrfs_inode
*inode
= BTRFS_I(vfs_inode
);
2331 struct btrfs_fs_info
*fs_info
= btrfs_sb(vfs_inode
->i_sb
);
2332 u64 len
= state
->end
+ 1 - state
->start
;
2333 u32 num_extents
= count_max_extents(len
);
2335 if ((state
->state
& EXTENT_DEFRAG
) && (*bits
& EXTENT_DEFRAG
)) {
2336 spin_lock(&inode
->lock
);
2337 inode
->defrag_bytes
-= len
;
2338 spin_unlock(&inode
->lock
);
2342 * set_bit and clear bit hooks normally require _irqsave/restore
2343 * but in this case, we are only testing for the DELALLOC
2344 * bit, which is only set or cleared with irqs on
2346 if ((state
->state
& EXTENT_DELALLOC
) && (*bits
& EXTENT_DELALLOC
)) {
2347 struct btrfs_root
*root
= inode
->root
;
2348 bool do_list
= !btrfs_is_free_space_inode(inode
);
2350 spin_lock(&inode
->lock
);
2351 btrfs_mod_outstanding_extents(inode
, -num_extents
);
2352 spin_unlock(&inode
->lock
);
2355 * We don't reserve metadata space for space cache inodes so we
2356 * don't need to call delalloc_release_metadata if there is an
2359 if (*bits
& EXTENT_CLEAR_META_RESV
&&
2360 root
!= fs_info
->tree_root
)
2361 btrfs_delalloc_release_metadata(inode
, len
, false);
2363 /* For sanity tests. */
2364 if (btrfs_is_testing(fs_info
))
2367 if (!btrfs_is_data_reloc_root(root
) &&
2368 do_list
&& !(state
->state
& EXTENT_NORESERVE
) &&
2369 (*bits
& EXTENT_CLEAR_DATA_RESV
))
2370 btrfs_free_reserved_data_space_noquota(fs_info
, len
);
2372 percpu_counter_add_batch(&fs_info
->delalloc_bytes
, -len
,
2373 fs_info
->delalloc_batch
);
2374 spin_lock(&inode
->lock
);
2375 inode
->delalloc_bytes
-= len
;
2376 if (do_list
&& inode
->delalloc_bytes
== 0 &&
2377 test_bit(BTRFS_INODE_IN_DELALLOC_LIST
,
2378 &inode
->runtime_flags
))
2379 btrfs_del_delalloc_inode(root
, inode
);
2380 spin_unlock(&inode
->lock
);
2383 if ((state
->state
& EXTENT_DELALLOC_NEW
) &&
2384 (*bits
& EXTENT_DELALLOC_NEW
)) {
2385 spin_lock(&inode
->lock
);
2386 ASSERT(inode
->new_delalloc_bytes
>= len
);
2387 inode
->new_delalloc_bytes
-= len
;
2388 if (*bits
& EXTENT_ADD_INODE_BYTES
)
2389 inode_add_bytes(&inode
->vfs_inode
, len
);
2390 spin_unlock(&inode
->lock
);
2395 * in order to insert checksums into the metadata in large chunks,
2396 * we wait until bio submission time. All the pages in the bio are
2397 * checksummed and sums are attached onto the ordered extent record.
2399 * At IO completion time the cums attached on the ordered extent record
2400 * are inserted into the btree
2402 static blk_status_t
btrfs_submit_bio_start(struct inode
*inode
, struct bio
*bio
,
2403 u64 dio_file_offset
)
2405 return btrfs_csum_one_bio(BTRFS_I(inode
), bio
, (u64
)-1, false);
2409 * Split an extent_map at [start, start + len]
2411 * This function is intended to be used only for extract_ordered_extent().
2413 static int split_zoned_em(struct btrfs_inode
*inode
, u64 start
, u64 len
,
2416 struct extent_map_tree
*em_tree
= &inode
->extent_tree
;
2417 struct extent_map
*em
;
2418 struct extent_map
*split_pre
= NULL
;
2419 struct extent_map
*split_mid
= NULL
;
2420 struct extent_map
*split_post
= NULL
;
2422 unsigned long flags
;
2425 if (pre
== 0 && post
== 0)
2428 split_pre
= alloc_extent_map();
2430 split_mid
= alloc_extent_map();
2432 split_post
= alloc_extent_map();
2433 if (!split_pre
|| (pre
&& !split_mid
) || (post
&& !split_post
)) {
2438 ASSERT(pre
+ post
< len
);
2440 lock_extent(&inode
->io_tree
, start
, start
+ len
- 1);
2441 write_lock(&em_tree
->lock
);
2442 em
= lookup_extent_mapping(em_tree
, start
, len
);
2448 ASSERT(em
->len
== len
);
2449 ASSERT(!test_bit(EXTENT_FLAG_COMPRESSED
, &em
->flags
));
2450 ASSERT(em
->block_start
< EXTENT_MAP_LAST_BYTE
);
2451 ASSERT(test_bit(EXTENT_FLAG_PINNED
, &em
->flags
));
2452 ASSERT(!test_bit(EXTENT_FLAG_LOGGING
, &em
->flags
));
2453 ASSERT(!list_empty(&em
->list
));
2456 clear_bit(EXTENT_FLAG_PINNED
, &em
->flags
);
2458 /* First, replace the em with a new extent_map starting from * em->start */
2459 split_pre
->start
= em
->start
;
2460 split_pre
->len
= (pre
? pre
: em
->len
- post
);
2461 split_pre
->orig_start
= split_pre
->start
;
2462 split_pre
->block_start
= em
->block_start
;
2463 split_pre
->block_len
= split_pre
->len
;
2464 split_pre
->orig_block_len
= split_pre
->block_len
;
2465 split_pre
->ram_bytes
= split_pre
->len
;
2466 split_pre
->flags
= flags
;
2467 split_pre
->compress_type
= em
->compress_type
;
2468 split_pre
->generation
= em
->generation
;
2470 replace_extent_mapping(em_tree
, em
, split_pre
, 1);
2473 * Now we only have an extent_map at:
2474 * [em->start, em->start + pre] if pre != 0
2475 * [em->start, em->start + em->len - post] if pre == 0
2479 /* Insert the middle extent_map */
2480 split_mid
->start
= em
->start
+ pre
;
2481 split_mid
->len
= em
->len
- pre
- post
;
2482 split_mid
->orig_start
= split_mid
->start
;
2483 split_mid
->block_start
= em
->block_start
+ pre
;
2484 split_mid
->block_len
= split_mid
->len
;
2485 split_mid
->orig_block_len
= split_mid
->block_len
;
2486 split_mid
->ram_bytes
= split_mid
->len
;
2487 split_mid
->flags
= flags
;
2488 split_mid
->compress_type
= em
->compress_type
;
2489 split_mid
->generation
= em
->generation
;
2490 add_extent_mapping(em_tree
, split_mid
, 1);
2494 split_post
->start
= em
->start
+ em
->len
- post
;
2495 split_post
->len
= post
;
2496 split_post
->orig_start
= split_post
->start
;
2497 split_post
->block_start
= em
->block_start
+ em
->len
- post
;
2498 split_post
->block_len
= split_post
->len
;
2499 split_post
->orig_block_len
= split_post
->block_len
;
2500 split_post
->ram_bytes
= split_post
->len
;
2501 split_post
->flags
= flags
;
2502 split_post
->compress_type
= em
->compress_type
;
2503 split_post
->generation
= em
->generation
;
2504 add_extent_mapping(em_tree
, split_post
, 1);
2508 free_extent_map(em
);
2509 /* Once for the tree */
2510 free_extent_map(em
);
2513 write_unlock(&em_tree
->lock
);
2514 unlock_extent(&inode
->io_tree
, start
, start
+ len
- 1);
2516 free_extent_map(split_pre
);
2517 free_extent_map(split_mid
);
2518 free_extent_map(split_post
);
2523 static blk_status_t
extract_ordered_extent(struct btrfs_inode
*inode
,
2524 struct bio
*bio
, loff_t file_offset
)
2526 struct btrfs_ordered_extent
*ordered
;
2527 u64 start
= (u64
)bio
->bi_iter
.bi_sector
<< SECTOR_SHIFT
;
2529 u64 len
= bio
->bi_iter
.bi_size
;
2530 u64 end
= start
+ len
;
2535 ordered
= btrfs_lookup_ordered_extent(inode
, file_offset
);
2536 if (WARN_ON_ONCE(!ordered
))
2537 return BLK_STS_IOERR
;
2539 /* No need to split */
2540 if (ordered
->disk_num_bytes
== len
)
2543 /* We cannot split once end_bio'd ordered extent */
2544 if (WARN_ON_ONCE(ordered
->bytes_left
!= ordered
->disk_num_bytes
)) {
2549 /* We cannot split a compressed ordered extent */
2550 if (WARN_ON_ONCE(ordered
->disk_num_bytes
!= ordered
->num_bytes
)) {
2555 ordered_end
= ordered
->disk_bytenr
+ ordered
->disk_num_bytes
;
2556 /* bio must be in one ordered extent */
2557 if (WARN_ON_ONCE(start
< ordered
->disk_bytenr
|| end
> ordered_end
)) {
2562 /* Checksum list should be empty */
2563 if (WARN_ON_ONCE(!list_empty(&ordered
->list
))) {
2568 file_len
= ordered
->num_bytes
;
2569 pre
= start
- ordered
->disk_bytenr
;
2570 post
= ordered_end
- end
;
2572 ret
= btrfs_split_ordered_extent(ordered
, pre
, post
);
2575 ret
= split_zoned_em(inode
, file_offset
, file_len
, pre
, post
);
2578 btrfs_put_ordered_extent(ordered
);
2580 return errno_to_blk_status(ret
);
2584 * extent_io.c submission hook. This does the right thing for csum calculation
2585 * on write, or reading the csums from the tree before a read.
2587 * Rules about async/sync submit,
2588 * a) read: sync submit
2590 * b) write without checksum: sync submit
2592 * c) write with checksum:
2593 * c-1) if bio is issued by fsync: sync submit
2594 * (sync_writers != 0)
2596 * c-2) if root is reloc root: sync submit
2597 * (only in case of buffered IO)
2599 * c-3) otherwise: async submit
2601 void btrfs_submit_data_bio(struct inode
*inode
, struct bio
*bio
,
2602 int mirror_num
, enum btrfs_compression_type compress_type
)
2604 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
2605 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
2606 enum btrfs_wq_endio_type metadata
= BTRFS_WQ_ENDIO_DATA
;
2607 blk_status_t ret
= 0;
2609 int async
= !atomic_read(&BTRFS_I(inode
)->sync_writers
);
2611 skip_sum
= (BTRFS_I(inode
)->flags
& BTRFS_INODE_NODATASUM
) ||
2612 test_bit(BTRFS_FS_STATE_NO_CSUMS
, &fs_info
->fs_state
);
2614 if (btrfs_is_free_space_inode(BTRFS_I(inode
)))
2615 metadata
= BTRFS_WQ_ENDIO_FREE_SPACE
;
2617 if (bio_op(bio
) == REQ_OP_ZONE_APPEND
) {
2618 struct page
*page
= bio_first_bvec_all(bio
)->bv_page
;
2619 loff_t file_offset
= page_offset(page
);
2621 ret
= extract_ordered_extent(BTRFS_I(inode
), bio
, file_offset
);
2626 if (btrfs_op(bio
) != BTRFS_MAP_WRITE
) {
2627 ret
= btrfs_bio_wq_end_io(fs_info
, bio
, metadata
);
2631 if (compress_type
!= BTRFS_COMPRESS_NONE
) {
2633 * btrfs_submit_compressed_read will handle completing
2634 * the bio if there were any errors, so just return
2637 btrfs_submit_compressed_read(inode
, bio
, mirror_num
);
2641 * Lookup bio sums does extra checks around whether we
2642 * need to csum or not, which is why we ignore skip_sum
2645 ret
= btrfs_lookup_bio_sums(inode
, bio
, NULL
);
2650 } else if (async
&& !skip_sum
) {
2651 /* csum items have already been cloned */
2652 if (btrfs_is_data_reloc_root(root
))
2654 /* we're doing a write, do the async checksumming */
2655 ret
= btrfs_wq_submit_bio(inode
, bio
, mirror_num
,
2656 0, btrfs_submit_bio_start
);
2658 } else if (!skip_sum
) {
2659 ret
= btrfs_csum_one_bio(BTRFS_I(inode
), bio
, (u64
)-1, false);
2665 ret
= btrfs_map_bio(fs_info
, bio
, mirror_num
);
2669 bio
->bi_status
= ret
;
2675 * given a list of ordered sums record them in the inode. This happens
2676 * at IO completion time based on sums calculated at bio submission time.
2678 static int add_pending_csums(struct btrfs_trans_handle
*trans
,
2679 struct list_head
*list
)
2681 struct btrfs_ordered_sum
*sum
;
2682 struct btrfs_root
*csum_root
= NULL
;
2685 list_for_each_entry(sum
, list
, list
) {
2686 trans
->adding_csums
= true;
2688 csum_root
= btrfs_csum_root(trans
->fs_info
,
2690 ret
= btrfs_csum_file_blocks(trans
, csum_root
, sum
);
2691 trans
->adding_csums
= false;
2698 static int btrfs_find_new_delalloc_bytes(struct btrfs_inode
*inode
,
2701 struct extent_state
**cached_state
)
2703 u64 search_start
= start
;
2704 const u64 end
= start
+ len
- 1;
2706 while (search_start
< end
) {
2707 const u64 search_len
= end
- search_start
+ 1;
2708 struct extent_map
*em
;
2712 em
= btrfs_get_extent(inode
, NULL
, 0, search_start
, search_len
);
2716 if (em
->block_start
!= EXTENT_MAP_HOLE
)
2720 if (em
->start
< search_start
)
2721 em_len
-= search_start
- em
->start
;
2722 if (em_len
> search_len
)
2723 em_len
= search_len
;
2725 ret
= set_extent_bit(&inode
->io_tree
, search_start
,
2726 search_start
+ em_len
- 1,
2727 EXTENT_DELALLOC_NEW
, 0, NULL
, cached_state
,
2730 search_start
= extent_map_end(em
);
2731 free_extent_map(em
);
2738 int btrfs_set_extent_delalloc(struct btrfs_inode
*inode
, u64 start
, u64 end
,
2739 unsigned int extra_bits
,
2740 struct extent_state
**cached_state
)
2742 WARN_ON(PAGE_ALIGNED(end
));
2744 if (start
>= i_size_read(&inode
->vfs_inode
) &&
2745 !(inode
->flags
& BTRFS_INODE_PREALLOC
)) {
2747 * There can't be any extents following eof in this case so just
2748 * set the delalloc new bit for the range directly.
2750 extra_bits
|= EXTENT_DELALLOC_NEW
;
2754 ret
= btrfs_find_new_delalloc_bytes(inode
, start
,
2761 return set_extent_delalloc(&inode
->io_tree
, start
, end
, extra_bits
,
2765 /* see btrfs_writepage_start_hook for details on why this is required */
2766 struct btrfs_writepage_fixup
{
2768 struct inode
*inode
;
2769 struct btrfs_work work
;
2772 static void btrfs_writepage_fixup_worker(struct btrfs_work
*work
)
2774 struct btrfs_writepage_fixup
*fixup
;
2775 struct btrfs_ordered_extent
*ordered
;
2776 struct extent_state
*cached_state
= NULL
;
2777 struct extent_changeset
*data_reserved
= NULL
;
2779 struct btrfs_inode
*inode
;
2783 bool free_delalloc_space
= true;
2785 fixup
= container_of(work
, struct btrfs_writepage_fixup
, work
);
2787 inode
= BTRFS_I(fixup
->inode
);
2788 page_start
= page_offset(page
);
2789 page_end
= page_offset(page
) + PAGE_SIZE
- 1;
2792 * This is similar to page_mkwrite, we need to reserve the space before
2793 * we take the page lock.
2795 ret
= btrfs_delalloc_reserve_space(inode
, &data_reserved
, page_start
,
2801 * Before we queued this fixup, we took a reference on the page.
2802 * page->mapping may go NULL, but it shouldn't be moved to a different
2805 if (!page
->mapping
|| !PageDirty(page
) || !PageChecked(page
)) {
2807 * Unfortunately this is a little tricky, either
2809 * 1) We got here and our page had already been dealt with and
2810 * we reserved our space, thus ret == 0, so we need to just
2811 * drop our space reservation and bail. This can happen the
2812 * first time we come into the fixup worker, or could happen
2813 * while waiting for the ordered extent.
2814 * 2) Our page was already dealt with, but we happened to get an
2815 * ENOSPC above from the btrfs_delalloc_reserve_space. In
2816 * this case we obviously don't have anything to release, but
2817 * because the page was already dealt with we don't want to
2818 * mark the page with an error, so make sure we're resetting
2819 * ret to 0. This is why we have this check _before_ the ret
2820 * check, because we do not want to have a surprise ENOSPC
2821 * when the page was already properly dealt with.
2824 btrfs_delalloc_release_extents(inode
, PAGE_SIZE
);
2825 btrfs_delalloc_release_space(inode
, data_reserved
,
2826 page_start
, PAGE_SIZE
,
2834 * We can't mess with the page state unless it is locked, so now that
2835 * it is locked bail if we failed to make our space reservation.
2840 lock_extent_bits(&inode
->io_tree
, page_start
, page_end
, &cached_state
);
2842 /* already ordered? We're done */
2843 if (PageOrdered(page
))
2846 ordered
= btrfs_lookup_ordered_range(inode
, page_start
, PAGE_SIZE
);
2848 unlock_extent_cached(&inode
->io_tree
, page_start
, page_end
,
2851 btrfs_start_ordered_extent(ordered
, 1);
2852 btrfs_put_ordered_extent(ordered
);
2856 ret
= btrfs_set_extent_delalloc(inode
, page_start
, page_end
, 0,
2862 * Everything went as planned, we're now the owner of a dirty page with
2863 * delayed allocation bits set and space reserved for our COW
2866 * The page was dirty when we started, nothing should have cleaned it.
2868 BUG_ON(!PageDirty(page
));
2869 free_delalloc_space
= false;
2871 btrfs_delalloc_release_extents(inode
, PAGE_SIZE
);
2872 if (free_delalloc_space
)
2873 btrfs_delalloc_release_space(inode
, data_reserved
, page_start
,
2875 unlock_extent_cached(&inode
->io_tree
, page_start
, page_end
,
2880 * We hit ENOSPC or other errors. Update the mapping and page
2881 * to reflect the errors and clean the page.
2883 mapping_set_error(page
->mapping
, ret
);
2884 end_extent_writepage(page
, ret
, page_start
, page_end
);
2885 clear_page_dirty_for_io(page
);
2888 btrfs_page_clear_checked(inode
->root
->fs_info
, page
, page_start
, PAGE_SIZE
);
2892 extent_changeset_free(data_reserved
);
2894 * As a precaution, do a delayed iput in case it would be the last iput
2895 * that could need flushing space. Recursing back to fixup worker would
2898 btrfs_add_delayed_iput(&inode
->vfs_inode
);
2902 * There are a few paths in the higher layers of the kernel that directly
2903 * set the page dirty bit without asking the filesystem if it is a
2904 * good idea. This causes problems because we want to make sure COW
2905 * properly happens and the data=ordered rules are followed.
2907 * In our case any range that doesn't have the ORDERED bit set
2908 * hasn't been properly setup for IO. We kick off an async process
2909 * to fix it up. The async helper will wait for ordered extents, set
2910 * the delalloc bit and make it safe to write the page.
2912 int btrfs_writepage_cow_fixup(struct page
*page
)
2914 struct inode
*inode
= page
->mapping
->host
;
2915 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
2916 struct btrfs_writepage_fixup
*fixup
;
2918 /* This page has ordered extent covering it already */
2919 if (PageOrdered(page
))
2923 * PageChecked is set below when we create a fixup worker for this page,
2924 * don't try to create another one if we're already PageChecked()
2926 * The extent_io writepage code will redirty the page if we send back
2929 if (PageChecked(page
))
2932 fixup
= kzalloc(sizeof(*fixup
), GFP_NOFS
);
2937 * We are already holding a reference to this inode from
2938 * write_cache_pages. We need to hold it because the space reservation
2939 * takes place outside of the page lock, and we can't trust
2940 * page->mapping outside of the page lock.
2943 btrfs_page_set_checked(fs_info
, page
, page_offset(page
), PAGE_SIZE
);
2945 btrfs_init_work(&fixup
->work
, btrfs_writepage_fixup_worker
, NULL
, NULL
);
2947 fixup
->inode
= inode
;
2948 btrfs_queue_work(fs_info
->fixup_workers
, &fixup
->work
);
2953 static int insert_reserved_file_extent(struct btrfs_trans_handle
*trans
,
2954 struct btrfs_inode
*inode
, u64 file_pos
,
2955 struct btrfs_file_extent_item
*stack_fi
,
2956 const bool update_inode_bytes
,
2957 u64 qgroup_reserved
)
2959 struct btrfs_root
*root
= inode
->root
;
2960 const u64 sectorsize
= root
->fs_info
->sectorsize
;
2961 struct btrfs_path
*path
;
2962 struct extent_buffer
*leaf
;
2963 struct btrfs_key ins
;
2964 u64 disk_num_bytes
= btrfs_stack_file_extent_disk_num_bytes(stack_fi
);
2965 u64 disk_bytenr
= btrfs_stack_file_extent_disk_bytenr(stack_fi
);
2966 u64 offset
= btrfs_stack_file_extent_offset(stack_fi
);
2967 u64 num_bytes
= btrfs_stack_file_extent_num_bytes(stack_fi
);
2968 u64 ram_bytes
= btrfs_stack_file_extent_ram_bytes(stack_fi
);
2969 struct btrfs_drop_extents_args drop_args
= { 0 };
2972 path
= btrfs_alloc_path();
2977 * we may be replacing one extent in the tree with another.
2978 * The new extent is pinned in the extent map, and we don't want
2979 * to drop it from the cache until it is completely in the btree.
2981 * So, tell btrfs_drop_extents to leave this extent in the cache.
2982 * the caller is expected to unpin it and allow it to be merged
2985 drop_args
.path
= path
;
2986 drop_args
.start
= file_pos
;
2987 drop_args
.end
= file_pos
+ num_bytes
;
2988 drop_args
.replace_extent
= true;
2989 drop_args
.extent_item_size
= sizeof(*stack_fi
);
2990 ret
= btrfs_drop_extents(trans
, root
, inode
, &drop_args
);
2994 if (!drop_args
.extent_inserted
) {
2995 ins
.objectid
= btrfs_ino(inode
);
2996 ins
.offset
= file_pos
;
2997 ins
.type
= BTRFS_EXTENT_DATA_KEY
;
2999 ret
= btrfs_insert_empty_item(trans
, root
, path
, &ins
,
3004 leaf
= path
->nodes
[0];
3005 btrfs_set_stack_file_extent_generation(stack_fi
, trans
->transid
);
3006 write_extent_buffer(leaf
, stack_fi
,
3007 btrfs_item_ptr_offset(leaf
, path
->slots
[0]),
3008 sizeof(struct btrfs_file_extent_item
));
3010 btrfs_mark_buffer_dirty(leaf
);
3011 btrfs_release_path(path
);
3014 * If we dropped an inline extent here, we know the range where it is
3015 * was not marked with the EXTENT_DELALLOC_NEW bit, so we update the
3016 * number of bytes only for that range containing the inline extent.
3017 * The remaining of the range will be processed when clearning the
3018 * EXTENT_DELALLOC_BIT bit through the ordered extent completion.
3020 if (file_pos
== 0 && !IS_ALIGNED(drop_args
.bytes_found
, sectorsize
)) {
3021 u64 inline_size
= round_down(drop_args
.bytes_found
, sectorsize
);
3023 inline_size
= drop_args
.bytes_found
- inline_size
;
3024 btrfs_update_inode_bytes(inode
, sectorsize
, inline_size
);
3025 drop_args
.bytes_found
-= inline_size
;
3026 num_bytes
-= sectorsize
;
3029 if (update_inode_bytes
)
3030 btrfs_update_inode_bytes(inode
, num_bytes
, drop_args
.bytes_found
);
3032 ins
.objectid
= disk_bytenr
;
3033 ins
.offset
= disk_num_bytes
;
3034 ins
.type
= BTRFS_EXTENT_ITEM_KEY
;
3036 ret
= btrfs_inode_set_file_extent_range(inode
, file_pos
, ram_bytes
);
3040 ret
= btrfs_alloc_reserved_file_extent(trans
, root
, btrfs_ino(inode
),
3042 qgroup_reserved
, &ins
);
3044 btrfs_free_path(path
);
3049 static void btrfs_release_delalloc_bytes(struct btrfs_fs_info
*fs_info
,
3052 struct btrfs_block_group
*cache
;
3054 cache
= btrfs_lookup_block_group(fs_info
, start
);
3057 spin_lock(&cache
->lock
);
3058 cache
->delalloc_bytes
-= len
;
3059 spin_unlock(&cache
->lock
);
3061 btrfs_put_block_group(cache
);
3064 static int insert_ordered_extent_file_extent(struct btrfs_trans_handle
*trans
,
3065 struct btrfs_ordered_extent
*oe
)
3067 struct btrfs_file_extent_item stack_fi
;
3068 bool update_inode_bytes
;
3069 u64 num_bytes
= oe
->num_bytes
;
3070 u64 ram_bytes
= oe
->ram_bytes
;
3072 memset(&stack_fi
, 0, sizeof(stack_fi
));
3073 btrfs_set_stack_file_extent_type(&stack_fi
, BTRFS_FILE_EXTENT_REG
);
3074 btrfs_set_stack_file_extent_disk_bytenr(&stack_fi
, oe
->disk_bytenr
);
3075 btrfs_set_stack_file_extent_disk_num_bytes(&stack_fi
,
3076 oe
->disk_num_bytes
);
3077 btrfs_set_stack_file_extent_offset(&stack_fi
, oe
->offset
);
3078 if (test_bit(BTRFS_ORDERED_TRUNCATED
, &oe
->flags
))
3079 num_bytes
= ram_bytes
= oe
->truncated_len
;
3080 btrfs_set_stack_file_extent_num_bytes(&stack_fi
, num_bytes
);
3081 btrfs_set_stack_file_extent_ram_bytes(&stack_fi
, ram_bytes
);
3082 btrfs_set_stack_file_extent_compression(&stack_fi
, oe
->compress_type
);
3083 /* Encryption and other encoding is reserved and all 0 */
3086 * For delalloc, when completing an ordered extent we update the inode's
3087 * bytes when clearing the range in the inode's io tree, so pass false
3088 * as the argument 'update_inode_bytes' to insert_reserved_file_extent(),
3089 * except if the ordered extent was truncated.
3091 update_inode_bytes
= test_bit(BTRFS_ORDERED_DIRECT
, &oe
->flags
) ||
3092 test_bit(BTRFS_ORDERED_ENCODED
, &oe
->flags
) ||
3093 test_bit(BTRFS_ORDERED_TRUNCATED
, &oe
->flags
);
3095 return insert_reserved_file_extent(trans
, BTRFS_I(oe
->inode
),
3096 oe
->file_offset
, &stack_fi
,
3097 update_inode_bytes
, oe
->qgroup_rsv
);
3101 * As ordered data IO finishes, this gets called so we can finish
3102 * an ordered extent if the range of bytes in the file it covers are
3105 static int btrfs_finish_ordered_io(struct btrfs_ordered_extent
*ordered_extent
)
3107 struct btrfs_inode
*inode
= BTRFS_I(ordered_extent
->inode
);
3108 struct btrfs_root
*root
= inode
->root
;
3109 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
3110 struct btrfs_trans_handle
*trans
= NULL
;
3111 struct extent_io_tree
*io_tree
= &inode
->io_tree
;
3112 struct extent_state
*cached_state
= NULL
;
3114 int compress_type
= 0;
3116 u64 logical_len
= ordered_extent
->num_bytes
;
3117 bool freespace_inode
;
3118 bool truncated
= false;
3119 bool clear_reserved_extent
= true;
3120 unsigned int clear_bits
= EXTENT_DEFRAG
;
3122 start
= ordered_extent
->file_offset
;
3123 end
= start
+ ordered_extent
->num_bytes
- 1;
3125 if (!test_bit(BTRFS_ORDERED_NOCOW
, &ordered_extent
->flags
) &&
3126 !test_bit(BTRFS_ORDERED_PREALLOC
, &ordered_extent
->flags
) &&
3127 !test_bit(BTRFS_ORDERED_DIRECT
, &ordered_extent
->flags
) &&
3128 !test_bit(BTRFS_ORDERED_ENCODED
, &ordered_extent
->flags
))
3129 clear_bits
|= EXTENT_DELALLOC_NEW
;
3131 freespace_inode
= btrfs_is_free_space_inode(inode
);
3133 if (test_bit(BTRFS_ORDERED_IOERR
, &ordered_extent
->flags
)) {
3138 /* A valid bdev implies a write on a sequential zone */
3139 if (ordered_extent
->bdev
) {
3140 btrfs_rewrite_logical_zoned(ordered_extent
);
3141 btrfs_zone_finish_endio(fs_info
, ordered_extent
->disk_bytenr
,
3142 ordered_extent
->disk_num_bytes
);
3145 btrfs_free_io_failure_record(inode
, start
, end
);
3147 if (test_bit(BTRFS_ORDERED_TRUNCATED
, &ordered_extent
->flags
)) {
3149 logical_len
= ordered_extent
->truncated_len
;
3150 /* Truncated the entire extent, don't bother adding */
3155 if (test_bit(BTRFS_ORDERED_NOCOW
, &ordered_extent
->flags
)) {
3156 BUG_ON(!list_empty(&ordered_extent
->list
)); /* Logic error */
3158 btrfs_inode_safe_disk_i_size_write(inode
, 0);
3159 if (freespace_inode
)
3160 trans
= btrfs_join_transaction_spacecache(root
);
3162 trans
= btrfs_join_transaction(root
);
3163 if (IS_ERR(trans
)) {
3164 ret
= PTR_ERR(trans
);
3168 trans
->block_rsv
= &inode
->block_rsv
;
3169 ret
= btrfs_update_inode_fallback(trans
, root
, inode
);
3170 if (ret
) /* -ENOMEM or corruption */
3171 btrfs_abort_transaction(trans
, ret
);
3175 clear_bits
|= EXTENT_LOCKED
;
3176 lock_extent_bits(io_tree
, start
, end
, &cached_state
);
3178 if (freespace_inode
)
3179 trans
= btrfs_join_transaction_spacecache(root
);
3181 trans
= btrfs_join_transaction(root
);
3182 if (IS_ERR(trans
)) {
3183 ret
= PTR_ERR(trans
);
3188 trans
->block_rsv
= &inode
->block_rsv
;
3190 if (test_bit(BTRFS_ORDERED_COMPRESSED
, &ordered_extent
->flags
))
3191 compress_type
= ordered_extent
->compress_type
;
3192 if (test_bit(BTRFS_ORDERED_PREALLOC
, &ordered_extent
->flags
)) {
3193 BUG_ON(compress_type
);
3194 ret
= btrfs_mark_extent_written(trans
, inode
,
3195 ordered_extent
->file_offset
,
3196 ordered_extent
->file_offset
+
3198 btrfs_zoned_release_data_reloc_bg(fs_info
, ordered_extent
->disk_bytenr
,
3199 ordered_extent
->disk_num_bytes
);
3201 BUG_ON(root
== fs_info
->tree_root
);
3202 ret
= insert_ordered_extent_file_extent(trans
, ordered_extent
);
3204 clear_reserved_extent
= false;
3205 btrfs_release_delalloc_bytes(fs_info
,
3206 ordered_extent
->disk_bytenr
,
3207 ordered_extent
->disk_num_bytes
);
3210 unpin_extent_cache(&inode
->extent_tree
, ordered_extent
->file_offset
,
3211 ordered_extent
->num_bytes
, trans
->transid
);
3213 btrfs_abort_transaction(trans
, ret
);
3217 ret
= add_pending_csums(trans
, &ordered_extent
->list
);
3219 btrfs_abort_transaction(trans
, ret
);
3224 * If this is a new delalloc range, clear its new delalloc flag to
3225 * update the inode's number of bytes. This needs to be done first
3226 * before updating the inode item.
3228 if ((clear_bits
& EXTENT_DELALLOC_NEW
) &&
3229 !test_bit(BTRFS_ORDERED_TRUNCATED
, &ordered_extent
->flags
))
3230 clear_extent_bit(&inode
->io_tree
, start
, end
,
3231 EXTENT_DELALLOC_NEW
| EXTENT_ADD_INODE_BYTES
,
3232 0, 0, &cached_state
);
3234 btrfs_inode_safe_disk_i_size_write(inode
, 0);
3235 ret
= btrfs_update_inode_fallback(trans
, root
, inode
);
3236 if (ret
) { /* -ENOMEM or corruption */
3237 btrfs_abort_transaction(trans
, ret
);
3242 clear_extent_bit(&inode
->io_tree
, start
, end
, clear_bits
,
3243 (clear_bits
& EXTENT_LOCKED
) ? 1 : 0, 0,
3247 btrfs_end_transaction(trans
);
3249 if (ret
|| truncated
) {
3250 u64 unwritten_start
= start
;
3253 * If we failed to finish this ordered extent for any reason we
3254 * need to make sure BTRFS_ORDERED_IOERR is set on the ordered
3255 * extent, and mark the inode with the error if it wasn't
3256 * already set. Any error during writeback would have already
3257 * set the mapping error, so we need to set it if we're the ones
3258 * marking this ordered extent as failed.
3260 if (ret
&& !test_and_set_bit(BTRFS_ORDERED_IOERR
,
3261 &ordered_extent
->flags
))
3262 mapping_set_error(ordered_extent
->inode
->i_mapping
, -EIO
);
3265 unwritten_start
+= logical_len
;
3266 clear_extent_uptodate(io_tree
, unwritten_start
, end
, NULL
);
3268 /* Drop the cache for the part of the extent we didn't write. */
3269 btrfs_drop_extent_cache(inode
, unwritten_start
, end
, 0);
3272 * If the ordered extent had an IOERR or something else went
3273 * wrong we need to return the space for this ordered extent
3274 * back to the allocator. We only free the extent in the
3275 * truncated case if we didn't write out the extent at all.
3277 * If we made it past insert_reserved_file_extent before we
3278 * errored out then we don't need to do this as the accounting
3279 * has already been done.
3281 if ((ret
|| !logical_len
) &&
3282 clear_reserved_extent
&&
3283 !test_bit(BTRFS_ORDERED_NOCOW
, &ordered_extent
->flags
) &&
3284 !test_bit(BTRFS_ORDERED_PREALLOC
, &ordered_extent
->flags
)) {
3286 * Discard the range before returning it back to the
3289 if (ret
&& btrfs_test_opt(fs_info
, DISCARD_SYNC
))
3290 btrfs_discard_extent(fs_info
,
3291 ordered_extent
->disk_bytenr
,
3292 ordered_extent
->disk_num_bytes
,
3294 btrfs_free_reserved_extent(fs_info
,
3295 ordered_extent
->disk_bytenr
,
3296 ordered_extent
->disk_num_bytes
, 1);
3301 * This needs to be done to make sure anybody waiting knows we are done
3302 * updating everything for this ordered extent.
3304 btrfs_remove_ordered_extent(inode
, ordered_extent
);
3307 btrfs_put_ordered_extent(ordered_extent
);
3308 /* once for the tree */
3309 btrfs_put_ordered_extent(ordered_extent
);
3314 static void finish_ordered_fn(struct btrfs_work
*work
)
3316 struct btrfs_ordered_extent
*ordered_extent
;
3317 ordered_extent
= container_of(work
, struct btrfs_ordered_extent
, work
);
3318 btrfs_finish_ordered_io(ordered_extent
);
3321 void btrfs_writepage_endio_finish_ordered(struct btrfs_inode
*inode
,
3322 struct page
*page
, u64 start
,
3323 u64 end
, bool uptodate
)
3325 trace_btrfs_writepage_end_io_hook(inode
, start
, end
, uptodate
);
3327 btrfs_mark_ordered_io_finished(inode
, page
, start
, end
+ 1 - start
,
3328 finish_ordered_fn
, uptodate
);
3332 * check_data_csum - verify checksum of one sector of uncompressed data
3334 * @io_bio: btrfs_io_bio which contains the csum
3335 * @bio_offset: offset to the beginning of the bio (in bytes)
3336 * @page: page where is the data to be verified
3337 * @pgoff: offset inside the page
3338 * @start: logical offset in the file
3340 * The length of such check is always one sector size.
3342 static int check_data_csum(struct inode
*inode
, struct btrfs_bio
*bbio
,
3343 u32 bio_offset
, struct page
*page
, u32 pgoff
,
3346 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
3347 SHASH_DESC_ON_STACK(shash
, fs_info
->csum_shash
);
3349 u32 len
= fs_info
->sectorsize
;
3350 const u32 csum_size
= fs_info
->csum_size
;
3351 unsigned int offset_sectors
;
3353 u8 csum
[BTRFS_CSUM_SIZE
];
3355 ASSERT(pgoff
+ len
<= PAGE_SIZE
);
3357 offset_sectors
= bio_offset
>> fs_info
->sectorsize_bits
;
3358 csum_expected
= ((u8
*)bbio
->csum
) + offset_sectors
* csum_size
;
3360 kaddr
= kmap_atomic(page
);
3361 shash
->tfm
= fs_info
->csum_shash
;
3363 crypto_shash_digest(shash
, kaddr
+ pgoff
, len
, csum
);
3364 kunmap_atomic(kaddr
);
3366 if (memcmp(csum
, csum_expected
, csum_size
))
3371 btrfs_print_data_csum_error(BTRFS_I(inode
), start
, csum
, csum_expected
,
3374 btrfs_dev_stat_inc_and_print(bbio
->device
,
3375 BTRFS_DEV_STAT_CORRUPTION_ERRS
);
3376 memzero_page(page
, pgoff
, len
);
3381 * When reads are done, we need to check csums to verify the data is correct.
3382 * if there's a match, we allow the bio to finish. If not, the code in
3383 * extent_io.c will try to find good copies for us.
3385 * @bio_offset: offset to the beginning of the bio (in bytes)
3386 * @start: file offset of the range start
3387 * @end: file offset of the range end (inclusive)
3389 * Return a bitmap where bit set means a csum mismatch, and bit not set means
3392 unsigned int btrfs_verify_data_csum(struct btrfs_bio
*bbio
,
3393 u32 bio_offset
, struct page
*page
,
3396 struct inode
*inode
= page
->mapping
->host
;
3397 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
3398 struct extent_io_tree
*io_tree
= &BTRFS_I(inode
)->io_tree
;
3399 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
3400 const u32 sectorsize
= root
->fs_info
->sectorsize
;
3402 unsigned int result
= 0;
3404 if (btrfs_page_test_checked(fs_info
, page
, start
, end
+ 1 - start
)) {
3405 btrfs_page_clear_checked(fs_info
, page
, start
, end
+ 1 - start
);
3410 * This only happens for NODATASUM or compressed read.
3411 * Normally this should be covered by above check for compressed read
3412 * or the next check for NODATASUM. Just do a quicker exit here.
3414 if (bbio
->csum
== NULL
)
3417 if (BTRFS_I(inode
)->flags
& BTRFS_INODE_NODATASUM
)
3420 if (unlikely(test_bit(BTRFS_FS_STATE_NO_CSUMS
, &fs_info
->fs_state
)))
3423 ASSERT(page_offset(page
) <= start
&&
3424 end
<= page_offset(page
) + PAGE_SIZE
- 1);
3425 for (pg_off
= offset_in_page(start
);
3426 pg_off
< offset_in_page(end
);
3427 pg_off
+= sectorsize
, bio_offset
+= sectorsize
) {
3428 u64 file_offset
= pg_off
+ page_offset(page
);
3431 if (btrfs_is_data_reloc_root(root
) &&
3432 test_range_bit(io_tree
, file_offset
,
3433 file_offset
+ sectorsize
- 1,
3434 EXTENT_NODATASUM
, 1, NULL
)) {
3435 /* Skip the range without csum for data reloc inode */
3436 clear_extent_bits(io_tree
, file_offset
,
3437 file_offset
+ sectorsize
- 1,
3441 ret
= check_data_csum(inode
, bbio
, bio_offset
, page
, pg_off
,
3442 page_offset(page
) + pg_off
);
3444 const int nr_bit
= (pg_off
- offset_in_page(start
)) >>
3445 root
->fs_info
->sectorsize_bits
;
3447 result
|= (1U << nr_bit
);
3454 * btrfs_add_delayed_iput - perform a delayed iput on @inode
3456 * @inode: The inode we want to perform iput on
3458 * This function uses the generic vfs_inode::i_count to track whether we should
3459 * just decrement it (in case it's > 1) or if this is the last iput then link
3460 * the inode to the delayed iput machinery. Delayed iputs are processed at
3461 * transaction commit time/superblock commit/cleaner kthread.
3463 void btrfs_add_delayed_iput(struct inode
*inode
)
3465 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
3466 struct btrfs_inode
*binode
= BTRFS_I(inode
);
3468 if (atomic_add_unless(&inode
->i_count
, -1, 1))
3471 atomic_inc(&fs_info
->nr_delayed_iputs
);
3472 spin_lock(&fs_info
->delayed_iput_lock
);
3473 ASSERT(list_empty(&binode
->delayed_iput
));
3474 list_add_tail(&binode
->delayed_iput
, &fs_info
->delayed_iputs
);
3475 spin_unlock(&fs_info
->delayed_iput_lock
);
3476 if (!test_bit(BTRFS_FS_CLEANER_RUNNING
, &fs_info
->flags
))
3477 wake_up_process(fs_info
->cleaner_kthread
);
3480 static void run_delayed_iput_locked(struct btrfs_fs_info
*fs_info
,
3481 struct btrfs_inode
*inode
)
3483 list_del_init(&inode
->delayed_iput
);
3484 spin_unlock(&fs_info
->delayed_iput_lock
);
3485 iput(&inode
->vfs_inode
);
3486 if (atomic_dec_and_test(&fs_info
->nr_delayed_iputs
))
3487 wake_up(&fs_info
->delayed_iputs_wait
);
3488 spin_lock(&fs_info
->delayed_iput_lock
);
3491 static void btrfs_run_delayed_iput(struct btrfs_fs_info
*fs_info
,
3492 struct btrfs_inode
*inode
)
3494 if (!list_empty(&inode
->delayed_iput
)) {
3495 spin_lock(&fs_info
->delayed_iput_lock
);
3496 if (!list_empty(&inode
->delayed_iput
))
3497 run_delayed_iput_locked(fs_info
, inode
);
3498 spin_unlock(&fs_info
->delayed_iput_lock
);
3502 void btrfs_run_delayed_iputs(struct btrfs_fs_info
*fs_info
)
3505 spin_lock(&fs_info
->delayed_iput_lock
);
3506 while (!list_empty(&fs_info
->delayed_iputs
)) {
3507 struct btrfs_inode
*inode
;
3509 inode
= list_first_entry(&fs_info
->delayed_iputs
,
3510 struct btrfs_inode
, delayed_iput
);
3511 run_delayed_iput_locked(fs_info
, inode
);
3512 cond_resched_lock(&fs_info
->delayed_iput_lock
);
3514 spin_unlock(&fs_info
->delayed_iput_lock
);
3518 * Wait for flushing all delayed iputs
3520 * @fs_info: the filesystem
3522 * This will wait on any delayed iputs that are currently running with KILLABLE
3523 * set. Once they are all done running we will return, unless we are killed in
3524 * which case we return EINTR. This helps in user operations like fallocate etc
3525 * that might get blocked on the iputs.
3527 * Return EINTR if we were killed, 0 if nothing's pending
3529 int btrfs_wait_on_delayed_iputs(struct btrfs_fs_info
*fs_info
)
3531 int ret
= wait_event_killable(fs_info
->delayed_iputs_wait
,
3532 atomic_read(&fs_info
->nr_delayed_iputs
) == 0);
3539 * This creates an orphan entry for the given inode in case something goes wrong
3540 * in the middle of an unlink.
3542 int btrfs_orphan_add(struct btrfs_trans_handle
*trans
,
3543 struct btrfs_inode
*inode
)
3547 ret
= btrfs_insert_orphan_item(trans
, inode
->root
, btrfs_ino(inode
));
3548 if (ret
&& ret
!= -EEXIST
) {
3549 btrfs_abort_transaction(trans
, ret
);
3557 * We have done the delete so we can go ahead and remove the orphan item for
3558 * this particular inode.
3560 static int btrfs_orphan_del(struct btrfs_trans_handle
*trans
,
3561 struct btrfs_inode
*inode
)
3563 return btrfs_del_orphan_item(trans
, inode
->root
, btrfs_ino(inode
));
3567 * this cleans up any orphans that may be left on the list from the last use
3570 int btrfs_orphan_cleanup(struct btrfs_root
*root
)
3572 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
3573 struct btrfs_path
*path
;
3574 struct extent_buffer
*leaf
;
3575 struct btrfs_key key
, found_key
;
3576 struct btrfs_trans_handle
*trans
;
3577 struct inode
*inode
;
3578 u64 last_objectid
= 0;
3579 int ret
= 0, nr_unlink
= 0;
3581 if (test_and_set_bit(BTRFS_ROOT_ORPHAN_CLEANUP
, &root
->state
))
3584 path
= btrfs_alloc_path();
3589 path
->reada
= READA_BACK
;
3591 key
.objectid
= BTRFS_ORPHAN_OBJECTID
;
3592 key
.type
= BTRFS_ORPHAN_ITEM_KEY
;
3593 key
.offset
= (u64
)-1;
3596 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
3601 * if ret == 0 means we found what we were searching for, which
3602 * is weird, but possible, so only screw with path if we didn't
3603 * find the key and see if we have stuff that matches
3607 if (path
->slots
[0] == 0)
3612 /* pull out the item */
3613 leaf
= path
->nodes
[0];
3614 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
3616 /* make sure the item matches what we want */
3617 if (found_key
.objectid
!= BTRFS_ORPHAN_OBJECTID
)
3619 if (found_key
.type
!= BTRFS_ORPHAN_ITEM_KEY
)
3622 /* release the path since we're done with it */
3623 btrfs_release_path(path
);
3626 * this is where we are basically btrfs_lookup, without the
3627 * crossing root thing. we store the inode number in the
3628 * offset of the orphan item.
3631 if (found_key
.offset
== last_objectid
) {
3633 "Error removing orphan entry, stopping orphan cleanup");
3638 last_objectid
= found_key
.offset
;
3640 found_key
.objectid
= found_key
.offset
;
3641 found_key
.type
= BTRFS_INODE_ITEM_KEY
;
3642 found_key
.offset
= 0;
3643 inode
= btrfs_iget(fs_info
->sb
, last_objectid
, root
);
3644 ret
= PTR_ERR_OR_ZERO(inode
);
3645 if (ret
&& ret
!= -ENOENT
)
3648 if (ret
== -ENOENT
&& root
== fs_info
->tree_root
) {
3649 struct btrfs_root
*dead_root
;
3650 int is_dead_root
= 0;
3653 * This is an orphan in the tree root. Currently these
3654 * could come from 2 sources:
3655 * a) a root (snapshot/subvolume) deletion in progress
3656 * b) a free space cache inode
3657 * We need to distinguish those two, as the orphan item
3658 * for a root must not get deleted before the deletion
3659 * of the snapshot/subvolume's tree completes.
3661 * btrfs_find_orphan_roots() ran before us, which has
3662 * found all deleted roots and loaded them into
3663 * fs_info->fs_roots_radix. So here we can find if an
3664 * orphan item corresponds to a deleted root by looking
3665 * up the root from that radix tree.
3668 spin_lock(&fs_info
->fs_roots_radix_lock
);
3669 dead_root
= radix_tree_lookup(&fs_info
->fs_roots_radix
,
3670 (unsigned long)found_key
.objectid
);
3671 if (dead_root
&& btrfs_root_refs(&dead_root
->root_item
) == 0)
3673 spin_unlock(&fs_info
->fs_roots_radix_lock
);
3676 /* prevent this orphan from being found again */
3677 key
.offset
= found_key
.objectid
- 1;
3684 * If we have an inode with links, there are a couple of
3687 * 1. We were halfway through creating fsverity metadata for the
3688 * file. In that case, the orphan item represents incomplete
3689 * fsverity metadata which must be cleaned up with
3690 * btrfs_drop_verity_items and deleting the orphan item.
3692 * 2. Old kernels (before v3.12) used to create an
3693 * orphan item for truncate indicating that there were possibly
3694 * extent items past i_size that needed to be deleted. In v3.12,
3695 * truncate was changed to update i_size in sync with the extent
3696 * items, but the (useless) orphan item was still created. Since
3697 * v4.18, we don't create the orphan item for truncate at all.
3699 * So, this item could mean that we need to do a truncate, but
3700 * only if this filesystem was last used on a pre-v3.12 kernel
3701 * and was not cleanly unmounted. The odds of that are quite
3702 * slim, and it's a pain to do the truncate now, so just delete
3705 * It's also possible that this orphan item was supposed to be
3706 * deleted but wasn't. The inode number may have been reused,
3707 * but either way, we can delete the orphan item.
3709 if (ret
== -ENOENT
|| inode
->i_nlink
) {
3711 ret
= btrfs_drop_verity_items(BTRFS_I(inode
));
3716 trans
= btrfs_start_transaction(root
, 1);
3717 if (IS_ERR(trans
)) {
3718 ret
= PTR_ERR(trans
);
3721 btrfs_debug(fs_info
, "auto deleting %Lu",
3722 found_key
.objectid
);
3723 ret
= btrfs_del_orphan_item(trans
, root
,
3724 found_key
.objectid
);
3725 btrfs_end_transaction(trans
);
3733 /* this will do delete_inode and everything for us */
3736 /* release the path since we're done with it */
3737 btrfs_release_path(path
);
3739 if (test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED
, &root
->state
)) {
3740 trans
= btrfs_join_transaction(root
);
3742 btrfs_end_transaction(trans
);
3746 btrfs_debug(fs_info
, "unlinked %d orphans", nr_unlink
);
3750 btrfs_err(fs_info
, "could not do orphan cleanup %d", ret
);
3751 btrfs_free_path(path
);
3756 * very simple check to peek ahead in the leaf looking for xattrs. If we
3757 * don't find any xattrs, we know there can't be any acls.
3759 * slot is the slot the inode is in, objectid is the objectid of the inode
3761 static noinline
int acls_after_inode_item(struct extent_buffer
*leaf
,
3762 int slot
, u64 objectid
,
3763 int *first_xattr_slot
)
3765 u32 nritems
= btrfs_header_nritems(leaf
);
3766 struct btrfs_key found_key
;
3767 static u64 xattr_access
= 0;
3768 static u64 xattr_default
= 0;
3771 if (!xattr_access
) {
3772 xattr_access
= btrfs_name_hash(XATTR_NAME_POSIX_ACL_ACCESS
,
3773 strlen(XATTR_NAME_POSIX_ACL_ACCESS
));
3774 xattr_default
= btrfs_name_hash(XATTR_NAME_POSIX_ACL_DEFAULT
,
3775 strlen(XATTR_NAME_POSIX_ACL_DEFAULT
));
3779 *first_xattr_slot
= -1;
3780 while (slot
< nritems
) {
3781 btrfs_item_key_to_cpu(leaf
, &found_key
, slot
);
3783 /* we found a different objectid, there must not be acls */
3784 if (found_key
.objectid
!= objectid
)
3787 /* we found an xattr, assume we've got an acl */
3788 if (found_key
.type
== BTRFS_XATTR_ITEM_KEY
) {
3789 if (*first_xattr_slot
== -1)
3790 *first_xattr_slot
= slot
;
3791 if (found_key
.offset
== xattr_access
||
3792 found_key
.offset
== xattr_default
)
3797 * we found a key greater than an xattr key, there can't
3798 * be any acls later on
3800 if (found_key
.type
> BTRFS_XATTR_ITEM_KEY
)
3807 * it goes inode, inode backrefs, xattrs, extents,
3808 * so if there are a ton of hard links to an inode there can
3809 * be a lot of backrefs. Don't waste time searching too hard,
3810 * this is just an optimization
3815 /* we hit the end of the leaf before we found an xattr or
3816 * something larger than an xattr. We have to assume the inode
3819 if (*first_xattr_slot
== -1)
3820 *first_xattr_slot
= slot
;
3825 * read an inode from the btree into the in-memory inode
3827 static int btrfs_read_locked_inode(struct inode
*inode
,
3828 struct btrfs_path
*in_path
)
3830 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
3831 struct btrfs_path
*path
= in_path
;
3832 struct extent_buffer
*leaf
;
3833 struct btrfs_inode_item
*inode_item
;
3834 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
3835 struct btrfs_key location
;
3840 bool filled
= false;
3841 int first_xattr_slot
;
3843 ret
= btrfs_fill_inode(inode
, &rdev
);
3848 path
= btrfs_alloc_path();
3853 memcpy(&location
, &BTRFS_I(inode
)->location
, sizeof(location
));
3855 ret
= btrfs_lookup_inode(NULL
, root
, path
, &location
, 0);
3857 if (path
!= in_path
)
3858 btrfs_free_path(path
);
3862 leaf
= path
->nodes
[0];
3867 inode_item
= btrfs_item_ptr(leaf
, path
->slots
[0],
3868 struct btrfs_inode_item
);
3869 inode
->i_mode
= btrfs_inode_mode(leaf
, inode_item
);
3870 set_nlink(inode
, btrfs_inode_nlink(leaf
, inode_item
));
3871 i_uid_write(inode
, btrfs_inode_uid(leaf
, inode_item
));
3872 i_gid_write(inode
, btrfs_inode_gid(leaf
, inode_item
));
3873 btrfs_i_size_write(BTRFS_I(inode
), btrfs_inode_size(leaf
, inode_item
));
3874 btrfs_inode_set_file_extent_range(BTRFS_I(inode
), 0,
3875 round_up(i_size_read(inode
), fs_info
->sectorsize
));
3877 inode
->i_atime
.tv_sec
= btrfs_timespec_sec(leaf
, &inode_item
->atime
);
3878 inode
->i_atime
.tv_nsec
= btrfs_timespec_nsec(leaf
, &inode_item
->atime
);
3880 inode
->i_mtime
.tv_sec
= btrfs_timespec_sec(leaf
, &inode_item
->mtime
);
3881 inode
->i_mtime
.tv_nsec
= btrfs_timespec_nsec(leaf
, &inode_item
->mtime
);
3883 inode
->i_ctime
.tv_sec
= btrfs_timespec_sec(leaf
, &inode_item
->ctime
);
3884 inode
->i_ctime
.tv_nsec
= btrfs_timespec_nsec(leaf
, &inode_item
->ctime
);
3886 BTRFS_I(inode
)->i_otime
.tv_sec
=
3887 btrfs_timespec_sec(leaf
, &inode_item
->otime
);
3888 BTRFS_I(inode
)->i_otime
.tv_nsec
=
3889 btrfs_timespec_nsec(leaf
, &inode_item
->otime
);
3891 inode_set_bytes(inode
, btrfs_inode_nbytes(leaf
, inode_item
));
3892 BTRFS_I(inode
)->generation
= btrfs_inode_generation(leaf
, inode_item
);
3893 BTRFS_I(inode
)->last_trans
= btrfs_inode_transid(leaf
, inode_item
);
3895 inode_set_iversion_queried(inode
,
3896 btrfs_inode_sequence(leaf
, inode_item
));
3897 inode
->i_generation
= BTRFS_I(inode
)->generation
;
3899 rdev
= btrfs_inode_rdev(leaf
, inode_item
);
3901 BTRFS_I(inode
)->index_cnt
= (u64
)-1;
3902 btrfs_inode_split_flags(btrfs_inode_flags(leaf
, inode_item
),
3903 &BTRFS_I(inode
)->flags
, &BTRFS_I(inode
)->ro_flags
);
3907 * If we were modified in the current generation and evicted from memory
3908 * and then re-read we need to do a full sync since we don't have any
3909 * idea about which extents were modified before we were evicted from
3912 * This is required for both inode re-read from disk and delayed inode
3913 * in delayed_nodes_tree.
3915 if (BTRFS_I(inode
)->last_trans
== fs_info
->generation
)
3916 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC
,
3917 &BTRFS_I(inode
)->runtime_flags
);
3920 * We don't persist the id of the transaction where an unlink operation
3921 * against the inode was last made. So here we assume the inode might
3922 * have been evicted, and therefore the exact value of last_unlink_trans
3923 * lost, and set it to last_trans to avoid metadata inconsistencies
3924 * between the inode and its parent if the inode is fsync'ed and the log
3925 * replayed. For example, in the scenario:
3928 * ln mydir/foo mydir/bar
3931 * echo 2 > /proc/sys/vm/drop_caches # evicts inode
3932 * xfs_io -c fsync mydir/foo
3934 * mount fs, triggers fsync log replay
3936 * We must make sure that when we fsync our inode foo we also log its
3937 * parent inode, otherwise after log replay the parent still has the
3938 * dentry with the "bar" name but our inode foo has a link count of 1
3939 * and doesn't have an inode ref with the name "bar" anymore.
3941 * Setting last_unlink_trans to last_trans is a pessimistic approach,
3942 * but it guarantees correctness at the expense of occasional full
3943 * transaction commits on fsync if our inode is a directory, or if our
3944 * inode is not a directory, logging its parent unnecessarily.
3946 BTRFS_I(inode
)->last_unlink_trans
= BTRFS_I(inode
)->last_trans
;
3949 * Same logic as for last_unlink_trans. We don't persist the generation
3950 * of the last transaction where this inode was used for a reflink
3951 * operation, so after eviction and reloading the inode we must be
3952 * pessimistic and assume the last transaction that modified the inode.
3954 BTRFS_I(inode
)->last_reflink_trans
= BTRFS_I(inode
)->last_trans
;
3957 if (inode
->i_nlink
!= 1 ||
3958 path
->slots
[0] >= btrfs_header_nritems(leaf
))
3961 btrfs_item_key_to_cpu(leaf
, &location
, path
->slots
[0]);
3962 if (location
.objectid
!= btrfs_ino(BTRFS_I(inode
)))
3965 ptr
= btrfs_item_ptr_offset(leaf
, path
->slots
[0]);
3966 if (location
.type
== BTRFS_INODE_REF_KEY
) {
3967 struct btrfs_inode_ref
*ref
;
3969 ref
= (struct btrfs_inode_ref
*)ptr
;
3970 BTRFS_I(inode
)->dir_index
= btrfs_inode_ref_index(leaf
, ref
);
3971 } else if (location
.type
== BTRFS_INODE_EXTREF_KEY
) {
3972 struct btrfs_inode_extref
*extref
;
3974 extref
= (struct btrfs_inode_extref
*)ptr
;
3975 BTRFS_I(inode
)->dir_index
= btrfs_inode_extref_index(leaf
,
3980 * try to precache a NULL acl entry for files that don't have
3981 * any xattrs or acls
3983 maybe_acls
= acls_after_inode_item(leaf
, path
->slots
[0],
3984 btrfs_ino(BTRFS_I(inode
)), &first_xattr_slot
);
3985 if (first_xattr_slot
!= -1) {
3986 path
->slots
[0] = first_xattr_slot
;
3987 ret
= btrfs_load_inode_props(inode
, path
);
3990 "error loading props for ino %llu (root %llu): %d",
3991 btrfs_ino(BTRFS_I(inode
)),
3992 root
->root_key
.objectid
, ret
);
3994 if (path
!= in_path
)
3995 btrfs_free_path(path
);
3998 cache_no_acl(inode
);
4000 switch (inode
->i_mode
& S_IFMT
) {
4002 inode
->i_mapping
->a_ops
= &btrfs_aops
;
4003 inode
->i_fop
= &btrfs_file_operations
;
4004 inode
->i_op
= &btrfs_file_inode_operations
;
4007 inode
->i_fop
= &btrfs_dir_file_operations
;
4008 inode
->i_op
= &btrfs_dir_inode_operations
;
4011 inode
->i_op
= &btrfs_symlink_inode_operations
;
4012 inode_nohighmem(inode
);
4013 inode
->i_mapping
->a_ops
= &btrfs_aops
;
4016 inode
->i_op
= &btrfs_special_inode_operations
;
4017 init_special_inode(inode
, inode
->i_mode
, rdev
);
4021 btrfs_sync_inode_flags_to_i_flags(inode
);
4026 * given a leaf and an inode, copy the inode fields into the leaf
4028 static void fill_inode_item(struct btrfs_trans_handle
*trans
,
4029 struct extent_buffer
*leaf
,
4030 struct btrfs_inode_item
*item
,
4031 struct inode
*inode
)
4033 struct btrfs_map_token token
;
4036 btrfs_init_map_token(&token
, leaf
);
4038 btrfs_set_token_inode_uid(&token
, item
, i_uid_read(inode
));
4039 btrfs_set_token_inode_gid(&token
, item
, i_gid_read(inode
));
4040 btrfs_set_token_inode_size(&token
, item
, BTRFS_I(inode
)->disk_i_size
);
4041 btrfs_set_token_inode_mode(&token
, item
, inode
->i_mode
);
4042 btrfs_set_token_inode_nlink(&token
, item
, inode
->i_nlink
);
4044 btrfs_set_token_timespec_sec(&token
, &item
->atime
,
4045 inode
->i_atime
.tv_sec
);
4046 btrfs_set_token_timespec_nsec(&token
, &item
->atime
,
4047 inode
->i_atime
.tv_nsec
);
4049 btrfs_set_token_timespec_sec(&token
, &item
->mtime
,
4050 inode
->i_mtime
.tv_sec
);
4051 btrfs_set_token_timespec_nsec(&token
, &item
->mtime
,
4052 inode
->i_mtime
.tv_nsec
);
4054 btrfs_set_token_timespec_sec(&token
, &item
->ctime
,
4055 inode
->i_ctime
.tv_sec
);
4056 btrfs_set_token_timespec_nsec(&token
, &item
->ctime
,
4057 inode
->i_ctime
.tv_nsec
);
4059 btrfs_set_token_timespec_sec(&token
, &item
->otime
,
4060 BTRFS_I(inode
)->i_otime
.tv_sec
);
4061 btrfs_set_token_timespec_nsec(&token
, &item
->otime
,
4062 BTRFS_I(inode
)->i_otime
.tv_nsec
);
4064 btrfs_set_token_inode_nbytes(&token
, item
, inode_get_bytes(inode
));
4065 btrfs_set_token_inode_generation(&token
, item
,
4066 BTRFS_I(inode
)->generation
);
4067 btrfs_set_token_inode_sequence(&token
, item
, inode_peek_iversion(inode
));
4068 btrfs_set_token_inode_transid(&token
, item
, trans
->transid
);
4069 btrfs_set_token_inode_rdev(&token
, item
, inode
->i_rdev
);
4070 flags
= btrfs_inode_combine_flags(BTRFS_I(inode
)->flags
,
4071 BTRFS_I(inode
)->ro_flags
);
4072 btrfs_set_token_inode_flags(&token
, item
, flags
);
4073 btrfs_set_token_inode_block_group(&token
, item
, 0);
4077 * copy everything in the in-memory inode into the btree.
4079 static noinline
int btrfs_update_inode_item(struct btrfs_trans_handle
*trans
,
4080 struct btrfs_root
*root
,
4081 struct btrfs_inode
*inode
)
4083 struct btrfs_inode_item
*inode_item
;
4084 struct btrfs_path
*path
;
4085 struct extent_buffer
*leaf
;
4088 path
= btrfs_alloc_path();
4092 ret
= btrfs_lookup_inode(trans
, root
, path
, &inode
->location
, 1);
4099 leaf
= path
->nodes
[0];
4100 inode_item
= btrfs_item_ptr(leaf
, path
->slots
[0],
4101 struct btrfs_inode_item
);
4103 fill_inode_item(trans
, leaf
, inode_item
, &inode
->vfs_inode
);
4104 btrfs_mark_buffer_dirty(leaf
);
4105 btrfs_set_inode_last_trans(trans
, inode
);
4108 btrfs_free_path(path
);
4113 * copy everything in the in-memory inode into the btree.
4115 noinline
int btrfs_update_inode(struct btrfs_trans_handle
*trans
,
4116 struct btrfs_root
*root
,
4117 struct btrfs_inode
*inode
)
4119 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4123 * If the inode is a free space inode, we can deadlock during commit
4124 * if we put it into the delayed code.
4126 * The data relocation inode should also be directly updated
4129 if (!btrfs_is_free_space_inode(inode
)
4130 && !btrfs_is_data_reloc_root(root
)
4131 && !test_bit(BTRFS_FS_LOG_RECOVERING
, &fs_info
->flags
)) {
4132 btrfs_update_root_times(trans
, root
);
4134 ret
= btrfs_delayed_update_inode(trans
, root
, inode
);
4136 btrfs_set_inode_last_trans(trans
, inode
);
4140 return btrfs_update_inode_item(trans
, root
, inode
);
4143 int btrfs_update_inode_fallback(struct btrfs_trans_handle
*trans
,
4144 struct btrfs_root
*root
, struct btrfs_inode
*inode
)
4148 ret
= btrfs_update_inode(trans
, root
, inode
);
4150 return btrfs_update_inode_item(trans
, root
, inode
);
4155 * unlink helper that gets used here in inode.c and in the tree logging
4156 * recovery code. It remove a link in a directory with a given name, and
4157 * also drops the back refs in the inode to the directory
4159 static int __btrfs_unlink_inode(struct btrfs_trans_handle
*trans
,
4160 struct btrfs_inode
*dir
,
4161 struct btrfs_inode
*inode
,
4162 const char *name
, int name_len
,
4163 struct btrfs_rename_ctx
*rename_ctx
)
4165 struct btrfs_root
*root
= dir
->root
;
4166 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4167 struct btrfs_path
*path
;
4169 struct btrfs_dir_item
*di
;
4171 u64 ino
= btrfs_ino(inode
);
4172 u64 dir_ino
= btrfs_ino(dir
);
4174 path
= btrfs_alloc_path();
4180 di
= btrfs_lookup_dir_item(trans
, root
, path
, dir_ino
,
4181 name
, name_len
, -1);
4182 if (IS_ERR_OR_NULL(di
)) {
4183 ret
= di
? PTR_ERR(di
) : -ENOENT
;
4186 ret
= btrfs_delete_one_dir_name(trans
, root
, path
, di
);
4189 btrfs_release_path(path
);
4192 * If we don't have dir index, we have to get it by looking up
4193 * the inode ref, since we get the inode ref, remove it directly,
4194 * it is unnecessary to do delayed deletion.
4196 * But if we have dir index, needn't search inode ref to get it.
4197 * Since the inode ref is close to the inode item, it is better
4198 * that we delay to delete it, and just do this deletion when
4199 * we update the inode item.
4201 if (inode
->dir_index
) {
4202 ret
= btrfs_delayed_delete_inode_ref(inode
);
4204 index
= inode
->dir_index
;
4209 ret
= btrfs_del_inode_ref(trans
, root
, name
, name_len
, ino
,
4213 "failed to delete reference to %.*s, inode %llu parent %llu",
4214 name_len
, name
, ino
, dir_ino
);
4215 btrfs_abort_transaction(trans
, ret
);
4220 rename_ctx
->index
= index
;
4222 ret
= btrfs_delete_delayed_dir_index(trans
, dir
, index
);
4224 btrfs_abort_transaction(trans
, ret
);
4229 * If we are in a rename context, we don't need to update anything in the
4230 * log. That will be done later during the rename by btrfs_log_new_name().
4231 * Besides that, doing it here would only cause extra unncessary btree
4232 * operations on the log tree, increasing latency for applications.
4235 btrfs_del_inode_ref_in_log(trans
, root
, name
, name_len
, inode
,
4237 btrfs_del_dir_entries_in_log(trans
, root
, name
, name_len
, dir
,
4242 * If we have a pending delayed iput we could end up with the final iput
4243 * being run in btrfs-cleaner context. If we have enough of these built
4244 * up we can end up burning a lot of time in btrfs-cleaner without any
4245 * way to throttle the unlinks. Since we're currently holding a ref on
4246 * the inode we can run the delayed iput here without any issues as the
4247 * final iput won't be done until after we drop the ref we're currently
4250 btrfs_run_delayed_iput(fs_info
, inode
);
4252 btrfs_free_path(path
);
4256 btrfs_i_size_write(dir
, dir
->vfs_inode
.i_size
- name_len
* 2);
4257 inode_inc_iversion(&inode
->vfs_inode
);
4258 inode_inc_iversion(&dir
->vfs_inode
);
4259 inode
->vfs_inode
.i_ctime
= dir
->vfs_inode
.i_mtime
=
4260 dir
->vfs_inode
.i_ctime
= current_time(&inode
->vfs_inode
);
4261 ret
= btrfs_update_inode(trans
, root
, dir
);
4266 int btrfs_unlink_inode(struct btrfs_trans_handle
*trans
,
4267 struct btrfs_inode
*dir
, struct btrfs_inode
*inode
,
4268 const char *name
, int name_len
)
4271 ret
= __btrfs_unlink_inode(trans
, dir
, inode
, name
, name_len
, NULL
);
4273 drop_nlink(&inode
->vfs_inode
);
4274 ret
= btrfs_update_inode(trans
, inode
->root
, inode
);
4280 * helper to start transaction for unlink and rmdir.
4282 * unlink and rmdir are special in btrfs, they do not always free space, so
4283 * if we cannot make our reservations the normal way try and see if there is
4284 * plenty of slack room in the global reserve to migrate, otherwise we cannot
4285 * allow the unlink to occur.
4287 static struct btrfs_trans_handle
*__unlink_start_trans(struct inode
*dir
)
4289 struct btrfs_root
*root
= BTRFS_I(dir
)->root
;
4292 * 1 for the possible orphan item
4293 * 1 for the dir item
4294 * 1 for the dir index
4295 * 1 for the inode ref
4297 * 1 for the parent inode
4299 return btrfs_start_transaction_fallback_global_rsv(root
, 6);
4302 static int btrfs_unlink(struct inode
*dir
, struct dentry
*dentry
)
4304 struct btrfs_trans_handle
*trans
;
4305 struct inode
*inode
= d_inode(dentry
);
4308 trans
= __unlink_start_trans(dir
);
4310 return PTR_ERR(trans
);
4312 btrfs_record_unlink_dir(trans
, BTRFS_I(dir
), BTRFS_I(d_inode(dentry
)),
4315 ret
= btrfs_unlink_inode(trans
, BTRFS_I(dir
),
4316 BTRFS_I(d_inode(dentry
)), dentry
->d_name
.name
,
4317 dentry
->d_name
.len
);
4321 if (inode
->i_nlink
== 0) {
4322 ret
= btrfs_orphan_add(trans
, BTRFS_I(inode
));
4328 btrfs_end_transaction(trans
);
4329 btrfs_btree_balance_dirty(BTRFS_I(dir
)->root
->fs_info
);
4333 static int btrfs_unlink_subvol(struct btrfs_trans_handle
*trans
,
4334 struct inode
*dir
, struct dentry
*dentry
)
4336 struct btrfs_root
*root
= BTRFS_I(dir
)->root
;
4337 struct btrfs_inode
*inode
= BTRFS_I(d_inode(dentry
));
4338 struct btrfs_path
*path
;
4339 struct extent_buffer
*leaf
;
4340 struct btrfs_dir_item
*di
;
4341 struct btrfs_key key
;
4342 const char *name
= dentry
->d_name
.name
;
4343 int name_len
= dentry
->d_name
.len
;
4347 u64 dir_ino
= btrfs_ino(BTRFS_I(dir
));
4349 if (btrfs_ino(inode
) == BTRFS_FIRST_FREE_OBJECTID
) {
4350 objectid
= inode
->root
->root_key
.objectid
;
4351 } else if (btrfs_ino(inode
) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID
) {
4352 objectid
= inode
->location
.objectid
;
4358 path
= btrfs_alloc_path();
4362 di
= btrfs_lookup_dir_item(trans
, root
, path
, dir_ino
,
4363 name
, name_len
, -1);
4364 if (IS_ERR_OR_NULL(di
)) {
4365 ret
= di
? PTR_ERR(di
) : -ENOENT
;
4369 leaf
= path
->nodes
[0];
4370 btrfs_dir_item_key_to_cpu(leaf
, di
, &key
);
4371 WARN_ON(key
.type
!= BTRFS_ROOT_ITEM_KEY
|| key
.objectid
!= objectid
);
4372 ret
= btrfs_delete_one_dir_name(trans
, root
, path
, di
);
4374 btrfs_abort_transaction(trans
, ret
);
4377 btrfs_release_path(path
);
4380 * This is a placeholder inode for a subvolume we didn't have a
4381 * reference to at the time of the snapshot creation. In the meantime
4382 * we could have renamed the real subvol link into our snapshot, so
4383 * depending on btrfs_del_root_ref to return -ENOENT here is incorrect.
4384 * Instead simply lookup the dir_index_item for this entry so we can
4385 * remove it. Otherwise we know we have a ref to the root and we can
4386 * call btrfs_del_root_ref, and it _shouldn't_ fail.
4388 if (btrfs_ino(inode
) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID
) {
4389 di
= btrfs_search_dir_index_item(root
, path
, dir_ino
,
4391 if (IS_ERR_OR_NULL(di
)) {
4396 btrfs_abort_transaction(trans
, ret
);
4400 leaf
= path
->nodes
[0];
4401 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
4403 btrfs_release_path(path
);
4405 ret
= btrfs_del_root_ref(trans
, objectid
,
4406 root
->root_key
.objectid
, dir_ino
,
4407 &index
, name
, name_len
);
4409 btrfs_abort_transaction(trans
, ret
);
4414 ret
= btrfs_delete_delayed_dir_index(trans
, BTRFS_I(dir
), index
);
4416 btrfs_abort_transaction(trans
, ret
);
4420 btrfs_i_size_write(BTRFS_I(dir
), dir
->i_size
- name_len
* 2);
4421 inode_inc_iversion(dir
);
4422 dir
->i_mtime
= dir
->i_ctime
= current_time(dir
);
4423 ret
= btrfs_update_inode_fallback(trans
, root
, BTRFS_I(dir
));
4425 btrfs_abort_transaction(trans
, ret
);
4427 btrfs_free_path(path
);
4432 * Helper to check if the subvolume references other subvolumes or if it's
4435 static noinline
int may_destroy_subvol(struct btrfs_root
*root
)
4437 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4438 struct btrfs_path
*path
;
4439 struct btrfs_dir_item
*di
;
4440 struct btrfs_key key
;
4444 path
= btrfs_alloc_path();
4448 /* Make sure this root isn't set as the default subvol */
4449 dir_id
= btrfs_super_root_dir(fs_info
->super_copy
);
4450 di
= btrfs_lookup_dir_item(NULL
, fs_info
->tree_root
, path
,
4451 dir_id
, "default", 7, 0);
4452 if (di
&& !IS_ERR(di
)) {
4453 btrfs_dir_item_key_to_cpu(path
->nodes
[0], di
, &key
);
4454 if (key
.objectid
== root
->root_key
.objectid
) {
4457 "deleting default subvolume %llu is not allowed",
4461 btrfs_release_path(path
);
4464 key
.objectid
= root
->root_key
.objectid
;
4465 key
.type
= BTRFS_ROOT_REF_KEY
;
4466 key
.offset
= (u64
)-1;
4468 ret
= btrfs_search_slot(NULL
, fs_info
->tree_root
, &key
, path
, 0, 0);
4474 if (path
->slots
[0] > 0) {
4476 btrfs_item_key_to_cpu(path
->nodes
[0], &key
, path
->slots
[0]);
4477 if (key
.objectid
== root
->root_key
.objectid
&&
4478 key
.type
== BTRFS_ROOT_REF_KEY
)
4482 btrfs_free_path(path
);
4486 /* Delete all dentries for inodes belonging to the root */
4487 static void btrfs_prune_dentries(struct btrfs_root
*root
)
4489 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4490 struct rb_node
*node
;
4491 struct rb_node
*prev
;
4492 struct btrfs_inode
*entry
;
4493 struct inode
*inode
;
4496 if (!BTRFS_FS_ERROR(fs_info
))
4497 WARN_ON(btrfs_root_refs(&root
->root_item
) != 0);
4499 spin_lock(&root
->inode_lock
);
4501 node
= root
->inode_tree
.rb_node
;
4505 entry
= rb_entry(node
, struct btrfs_inode
, rb_node
);
4507 if (objectid
< btrfs_ino(entry
))
4508 node
= node
->rb_left
;
4509 else if (objectid
> btrfs_ino(entry
))
4510 node
= node
->rb_right
;
4516 entry
= rb_entry(prev
, struct btrfs_inode
, rb_node
);
4517 if (objectid
<= btrfs_ino(entry
)) {
4521 prev
= rb_next(prev
);
4525 entry
= rb_entry(node
, struct btrfs_inode
, rb_node
);
4526 objectid
= btrfs_ino(entry
) + 1;
4527 inode
= igrab(&entry
->vfs_inode
);
4529 spin_unlock(&root
->inode_lock
);
4530 if (atomic_read(&inode
->i_count
) > 1)
4531 d_prune_aliases(inode
);
4533 * btrfs_drop_inode will have it removed from the inode
4534 * cache when its usage count hits zero.
4538 spin_lock(&root
->inode_lock
);
4542 if (cond_resched_lock(&root
->inode_lock
))
4545 node
= rb_next(node
);
4547 spin_unlock(&root
->inode_lock
);
4550 int btrfs_delete_subvolume(struct inode
*dir
, struct dentry
*dentry
)
4552 struct btrfs_fs_info
*fs_info
= btrfs_sb(dentry
->d_sb
);
4553 struct btrfs_root
*root
= BTRFS_I(dir
)->root
;
4554 struct inode
*inode
= d_inode(dentry
);
4555 struct btrfs_root
*dest
= BTRFS_I(inode
)->root
;
4556 struct btrfs_trans_handle
*trans
;
4557 struct btrfs_block_rsv block_rsv
;
4562 * Don't allow to delete a subvolume with send in progress. This is
4563 * inside the inode lock so the error handling that has to drop the bit
4564 * again is not run concurrently.
4566 spin_lock(&dest
->root_item_lock
);
4567 if (dest
->send_in_progress
) {
4568 spin_unlock(&dest
->root_item_lock
);
4570 "attempt to delete subvolume %llu during send",
4571 dest
->root_key
.objectid
);
4574 if (atomic_read(&dest
->nr_swapfiles
)) {
4575 spin_unlock(&dest
->root_item_lock
);
4577 "attempt to delete subvolume %llu with active swapfile",
4578 root
->root_key
.objectid
);
4581 root_flags
= btrfs_root_flags(&dest
->root_item
);
4582 btrfs_set_root_flags(&dest
->root_item
,
4583 root_flags
| BTRFS_ROOT_SUBVOL_DEAD
);
4584 spin_unlock(&dest
->root_item_lock
);
4586 down_write(&fs_info
->subvol_sem
);
4588 ret
= may_destroy_subvol(dest
);
4592 btrfs_init_block_rsv(&block_rsv
, BTRFS_BLOCK_RSV_TEMP
);
4594 * One for dir inode,
4595 * two for dir entries,
4596 * two for root ref/backref.
4598 ret
= btrfs_subvolume_reserve_metadata(root
, &block_rsv
, 5, true);
4602 trans
= btrfs_start_transaction(root
, 0);
4603 if (IS_ERR(trans
)) {
4604 ret
= PTR_ERR(trans
);
4607 trans
->block_rsv
= &block_rsv
;
4608 trans
->bytes_reserved
= block_rsv
.size
;
4610 btrfs_record_snapshot_destroy(trans
, BTRFS_I(dir
));
4612 ret
= btrfs_unlink_subvol(trans
, dir
, dentry
);
4614 btrfs_abort_transaction(trans
, ret
);
4618 ret
= btrfs_record_root_in_trans(trans
, dest
);
4620 btrfs_abort_transaction(trans
, ret
);
4624 memset(&dest
->root_item
.drop_progress
, 0,
4625 sizeof(dest
->root_item
.drop_progress
));
4626 btrfs_set_root_drop_level(&dest
->root_item
, 0);
4627 btrfs_set_root_refs(&dest
->root_item
, 0);
4629 if (!test_and_set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED
, &dest
->state
)) {
4630 ret
= btrfs_insert_orphan_item(trans
,
4632 dest
->root_key
.objectid
);
4634 btrfs_abort_transaction(trans
, ret
);
4639 ret
= btrfs_uuid_tree_remove(trans
, dest
->root_item
.uuid
,
4640 BTRFS_UUID_KEY_SUBVOL
,
4641 dest
->root_key
.objectid
);
4642 if (ret
&& ret
!= -ENOENT
) {
4643 btrfs_abort_transaction(trans
, ret
);
4646 if (!btrfs_is_empty_uuid(dest
->root_item
.received_uuid
)) {
4647 ret
= btrfs_uuid_tree_remove(trans
,
4648 dest
->root_item
.received_uuid
,
4649 BTRFS_UUID_KEY_RECEIVED_SUBVOL
,
4650 dest
->root_key
.objectid
);
4651 if (ret
&& ret
!= -ENOENT
) {
4652 btrfs_abort_transaction(trans
, ret
);
4657 free_anon_bdev(dest
->anon_dev
);
4660 trans
->block_rsv
= NULL
;
4661 trans
->bytes_reserved
= 0;
4662 ret
= btrfs_end_transaction(trans
);
4663 inode
->i_flags
|= S_DEAD
;
4665 btrfs_subvolume_release_metadata(root
, &block_rsv
);
4667 up_write(&fs_info
->subvol_sem
);
4669 spin_lock(&dest
->root_item_lock
);
4670 root_flags
= btrfs_root_flags(&dest
->root_item
);
4671 btrfs_set_root_flags(&dest
->root_item
,
4672 root_flags
& ~BTRFS_ROOT_SUBVOL_DEAD
);
4673 spin_unlock(&dest
->root_item_lock
);
4675 d_invalidate(dentry
);
4676 btrfs_prune_dentries(dest
);
4677 ASSERT(dest
->send_in_progress
== 0);
4683 static int btrfs_rmdir(struct inode
*dir
, struct dentry
*dentry
)
4685 struct inode
*inode
= d_inode(dentry
);
4686 struct btrfs_fs_info
*fs_info
= BTRFS_I(inode
)->root
->fs_info
;
4688 struct btrfs_trans_handle
*trans
;
4689 u64 last_unlink_trans
;
4691 if (inode
->i_size
> BTRFS_EMPTY_DIR_SIZE
)
4693 if (btrfs_ino(BTRFS_I(inode
)) == BTRFS_FIRST_FREE_OBJECTID
) {
4694 if (unlikely(btrfs_fs_incompat(fs_info
, EXTENT_TREE_V2
))) {
4696 "extent tree v2 doesn't support snapshot deletion yet");
4699 return btrfs_delete_subvolume(dir
, dentry
);
4702 trans
= __unlink_start_trans(dir
);
4704 return PTR_ERR(trans
);
4706 if (unlikely(btrfs_ino(BTRFS_I(inode
)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID
)) {
4707 err
= btrfs_unlink_subvol(trans
, dir
, dentry
);
4711 err
= btrfs_orphan_add(trans
, BTRFS_I(inode
));
4715 last_unlink_trans
= BTRFS_I(inode
)->last_unlink_trans
;
4717 /* now the directory is empty */
4718 err
= btrfs_unlink_inode(trans
, BTRFS_I(dir
),
4719 BTRFS_I(d_inode(dentry
)), dentry
->d_name
.name
,
4720 dentry
->d_name
.len
);
4722 btrfs_i_size_write(BTRFS_I(inode
), 0);
4724 * Propagate the last_unlink_trans value of the deleted dir to
4725 * its parent directory. This is to prevent an unrecoverable
4726 * log tree in the case we do something like this:
4728 * 2) create snapshot under dir foo
4729 * 3) delete the snapshot
4732 * 6) fsync foo or some file inside foo
4734 if (last_unlink_trans
>= trans
->transid
)
4735 BTRFS_I(dir
)->last_unlink_trans
= last_unlink_trans
;
4738 btrfs_end_transaction(trans
);
4739 btrfs_btree_balance_dirty(fs_info
);
4745 * btrfs_truncate_block - read, zero a chunk and write a block
4746 * @inode - inode that we're zeroing
4747 * @from - the offset to start zeroing
4748 * @len - the length to zero, 0 to zero the entire range respective to the
4750 * @front - zero up to the offset instead of from the offset on
4752 * This will find the block for the "from" offset and cow the block and zero the
4753 * part we want to zero. This is used with truncate and hole punching.
4755 int btrfs_truncate_block(struct btrfs_inode
*inode
, loff_t from
, loff_t len
,
4758 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
4759 struct address_space
*mapping
= inode
->vfs_inode
.i_mapping
;
4760 struct extent_io_tree
*io_tree
= &inode
->io_tree
;
4761 struct btrfs_ordered_extent
*ordered
;
4762 struct extent_state
*cached_state
= NULL
;
4763 struct extent_changeset
*data_reserved
= NULL
;
4764 bool only_release_metadata
= false;
4765 u32 blocksize
= fs_info
->sectorsize
;
4766 pgoff_t index
= from
>> PAGE_SHIFT
;
4767 unsigned offset
= from
& (blocksize
- 1);
4769 gfp_t mask
= btrfs_alloc_write_mask(mapping
);
4770 size_t write_bytes
= blocksize
;
4775 if (IS_ALIGNED(offset
, blocksize
) &&
4776 (!len
|| IS_ALIGNED(len
, blocksize
)))
4779 block_start
= round_down(from
, blocksize
);
4780 block_end
= block_start
+ blocksize
- 1;
4782 ret
= btrfs_check_data_free_space(inode
, &data_reserved
, block_start
,
4785 if (btrfs_check_nocow_lock(inode
, block_start
, &write_bytes
) > 0) {
4786 /* For nocow case, no need to reserve data space */
4787 only_release_metadata
= true;
4792 ret
= btrfs_delalloc_reserve_metadata(inode
, blocksize
, blocksize
, false);
4794 if (!only_release_metadata
)
4795 btrfs_free_reserved_data_space(inode
, data_reserved
,
4796 block_start
, blocksize
);
4800 page
= find_or_create_page(mapping
, index
, mask
);
4802 btrfs_delalloc_release_space(inode
, data_reserved
, block_start
,
4804 btrfs_delalloc_release_extents(inode
, blocksize
);
4808 ret
= set_page_extent_mapped(page
);
4812 if (!PageUptodate(page
)) {
4813 ret
= btrfs_read_folio(NULL
, page_folio(page
));
4815 if (page
->mapping
!= mapping
) {
4820 if (!PageUptodate(page
)) {
4825 wait_on_page_writeback(page
);
4827 lock_extent_bits(io_tree
, block_start
, block_end
, &cached_state
);
4829 ordered
= btrfs_lookup_ordered_extent(inode
, block_start
);
4831 unlock_extent_cached(io_tree
, block_start
, block_end
,
4835 btrfs_start_ordered_extent(ordered
, 1);
4836 btrfs_put_ordered_extent(ordered
);
4840 clear_extent_bit(&inode
->io_tree
, block_start
, block_end
,
4841 EXTENT_DELALLOC
| EXTENT_DO_ACCOUNTING
| EXTENT_DEFRAG
,
4842 0, 0, &cached_state
);
4844 ret
= btrfs_set_extent_delalloc(inode
, block_start
, block_end
, 0,
4847 unlock_extent_cached(io_tree
, block_start
, block_end
,
4852 if (offset
!= blocksize
) {
4854 len
= blocksize
- offset
;
4856 memzero_page(page
, (block_start
- page_offset(page
)),
4859 memzero_page(page
, (block_start
- page_offset(page
)) + offset
,
4861 flush_dcache_page(page
);
4863 btrfs_page_clear_checked(fs_info
, page
, block_start
,
4864 block_end
+ 1 - block_start
);
4865 btrfs_page_set_dirty(fs_info
, page
, block_start
, block_end
+ 1 - block_start
);
4866 unlock_extent_cached(io_tree
, block_start
, block_end
, &cached_state
);
4868 if (only_release_metadata
)
4869 set_extent_bit(&inode
->io_tree
, block_start
, block_end
,
4870 EXTENT_NORESERVE
, 0, NULL
, NULL
, GFP_NOFS
, NULL
);
4874 if (only_release_metadata
)
4875 btrfs_delalloc_release_metadata(inode
, blocksize
, true);
4877 btrfs_delalloc_release_space(inode
, data_reserved
,
4878 block_start
, blocksize
, true);
4880 btrfs_delalloc_release_extents(inode
, blocksize
);
4884 if (only_release_metadata
)
4885 btrfs_check_nocow_unlock(inode
);
4886 extent_changeset_free(data_reserved
);
4890 static int maybe_insert_hole(struct btrfs_root
*root
, struct btrfs_inode
*inode
,
4891 u64 offset
, u64 len
)
4893 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4894 struct btrfs_trans_handle
*trans
;
4895 struct btrfs_drop_extents_args drop_args
= { 0 };
4899 * If NO_HOLES is enabled, we don't need to do anything.
4900 * Later, up in the call chain, either btrfs_set_inode_last_sub_trans()
4901 * or btrfs_update_inode() will be called, which guarantee that the next
4902 * fsync will know this inode was changed and needs to be logged.
4904 if (btrfs_fs_incompat(fs_info
, NO_HOLES
))
4908 * 1 - for the one we're dropping
4909 * 1 - for the one we're adding
4910 * 1 - for updating the inode.
4912 trans
= btrfs_start_transaction(root
, 3);
4914 return PTR_ERR(trans
);
4916 drop_args
.start
= offset
;
4917 drop_args
.end
= offset
+ len
;
4918 drop_args
.drop_cache
= true;
4920 ret
= btrfs_drop_extents(trans
, root
, inode
, &drop_args
);
4922 btrfs_abort_transaction(trans
, ret
);
4923 btrfs_end_transaction(trans
);
4927 ret
= btrfs_insert_file_extent(trans
, root
, btrfs_ino(inode
),
4928 offset
, 0, 0, len
, 0, len
, 0, 0, 0);
4930 btrfs_abort_transaction(trans
, ret
);
4932 btrfs_update_inode_bytes(inode
, 0, drop_args
.bytes_found
);
4933 btrfs_update_inode(trans
, root
, inode
);
4935 btrfs_end_transaction(trans
);
4940 * This function puts in dummy file extents for the area we're creating a hole
4941 * for. So if we are truncating this file to a larger size we need to insert
4942 * these file extents so that btrfs_get_extent will return a EXTENT_MAP_HOLE for
4943 * the range between oldsize and size
4945 int btrfs_cont_expand(struct btrfs_inode
*inode
, loff_t oldsize
, loff_t size
)
4947 struct btrfs_root
*root
= inode
->root
;
4948 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4949 struct extent_io_tree
*io_tree
= &inode
->io_tree
;
4950 struct extent_map
*em
= NULL
;
4951 struct extent_state
*cached_state
= NULL
;
4952 struct extent_map_tree
*em_tree
= &inode
->extent_tree
;
4953 u64 hole_start
= ALIGN(oldsize
, fs_info
->sectorsize
);
4954 u64 block_end
= ALIGN(size
, fs_info
->sectorsize
);
4961 * If our size started in the middle of a block we need to zero out the
4962 * rest of the block before we expand the i_size, otherwise we could
4963 * expose stale data.
4965 err
= btrfs_truncate_block(inode
, oldsize
, 0, 0);
4969 if (size
<= hole_start
)
4972 btrfs_lock_and_flush_ordered_range(inode
, hole_start
, block_end
- 1,
4974 cur_offset
= hole_start
;
4976 em
= btrfs_get_extent(inode
, NULL
, 0, cur_offset
,
4977 block_end
- cur_offset
);
4983 last_byte
= min(extent_map_end(em
), block_end
);
4984 last_byte
= ALIGN(last_byte
, fs_info
->sectorsize
);
4985 hole_size
= last_byte
- cur_offset
;
4987 if (!test_bit(EXTENT_FLAG_PREALLOC
, &em
->flags
)) {
4988 struct extent_map
*hole_em
;
4990 err
= maybe_insert_hole(root
, inode
, cur_offset
,
4995 err
= btrfs_inode_set_file_extent_range(inode
,
4996 cur_offset
, hole_size
);
5000 btrfs_drop_extent_cache(inode
, cur_offset
,
5001 cur_offset
+ hole_size
- 1, 0);
5002 hole_em
= alloc_extent_map();
5004 btrfs_set_inode_full_sync(inode
);
5007 hole_em
->start
= cur_offset
;
5008 hole_em
->len
= hole_size
;
5009 hole_em
->orig_start
= cur_offset
;
5011 hole_em
->block_start
= EXTENT_MAP_HOLE
;
5012 hole_em
->block_len
= 0;
5013 hole_em
->orig_block_len
= 0;
5014 hole_em
->ram_bytes
= hole_size
;
5015 hole_em
->compress_type
= BTRFS_COMPRESS_NONE
;
5016 hole_em
->generation
= fs_info
->generation
;
5019 write_lock(&em_tree
->lock
);
5020 err
= add_extent_mapping(em_tree
, hole_em
, 1);
5021 write_unlock(&em_tree
->lock
);
5024 btrfs_drop_extent_cache(inode
, cur_offset
,
5028 free_extent_map(hole_em
);
5030 err
= btrfs_inode_set_file_extent_range(inode
,
5031 cur_offset
, hole_size
);
5036 free_extent_map(em
);
5038 cur_offset
= last_byte
;
5039 if (cur_offset
>= block_end
)
5042 free_extent_map(em
);
5043 unlock_extent_cached(io_tree
, hole_start
, block_end
- 1, &cached_state
);
5047 static int btrfs_setsize(struct inode
*inode
, struct iattr
*attr
)
5049 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
5050 struct btrfs_trans_handle
*trans
;
5051 loff_t oldsize
= i_size_read(inode
);
5052 loff_t newsize
= attr
->ia_size
;
5053 int mask
= attr
->ia_valid
;
5057 * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a
5058 * special case where we need to update the times despite not having
5059 * these flags set. For all other operations the VFS set these flags
5060 * explicitly if it wants a timestamp update.
5062 if (newsize
!= oldsize
) {
5063 inode_inc_iversion(inode
);
5064 if (!(mask
& (ATTR_CTIME
| ATTR_MTIME
)))
5065 inode
->i_ctime
= inode
->i_mtime
=
5066 current_time(inode
);
5069 if (newsize
> oldsize
) {
5071 * Don't do an expanding truncate while snapshotting is ongoing.
5072 * This is to ensure the snapshot captures a fully consistent
5073 * state of this file - if the snapshot captures this expanding
5074 * truncation, it must capture all writes that happened before
5077 btrfs_drew_write_lock(&root
->snapshot_lock
);
5078 ret
= btrfs_cont_expand(BTRFS_I(inode
), oldsize
, newsize
);
5080 btrfs_drew_write_unlock(&root
->snapshot_lock
);
5084 trans
= btrfs_start_transaction(root
, 1);
5085 if (IS_ERR(trans
)) {
5086 btrfs_drew_write_unlock(&root
->snapshot_lock
);
5087 return PTR_ERR(trans
);
5090 i_size_write(inode
, newsize
);
5091 btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode
), 0);
5092 pagecache_isize_extended(inode
, oldsize
, newsize
);
5093 ret
= btrfs_update_inode(trans
, root
, BTRFS_I(inode
));
5094 btrfs_drew_write_unlock(&root
->snapshot_lock
);
5095 btrfs_end_transaction(trans
);
5097 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
5099 if (btrfs_is_zoned(fs_info
)) {
5100 ret
= btrfs_wait_ordered_range(inode
,
5101 ALIGN(newsize
, fs_info
->sectorsize
),
5108 * We're truncating a file that used to have good data down to
5109 * zero. Make sure any new writes to the file get on disk
5113 set_bit(BTRFS_INODE_FLUSH_ON_CLOSE
,
5114 &BTRFS_I(inode
)->runtime_flags
);
5116 truncate_setsize(inode
, newsize
);
5118 inode_dio_wait(inode
);
5120 ret
= btrfs_truncate(inode
, newsize
== oldsize
);
5121 if (ret
&& inode
->i_nlink
) {
5125 * Truncate failed, so fix up the in-memory size. We
5126 * adjusted disk_i_size down as we removed extents, so
5127 * wait for disk_i_size to be stable and then update the
5128 * in-memory size to match.
5130 err
= btrfs_wait_ordered_range(inode
, 0, (u64
)-1);
5133 i_size_write(inode
, BTRFS_I(inode
)->disk_i_size
);
5140 static int btrfs_setattr(struct user_namespace
*mnt_userns
, struct dentry
*dentry
,
5143 struct inode
*inode
= d_inode(dentry
);
5144 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
5147 if (btrfs_root_readonly(root
))
5150 err
= setattr_prepare(mnt_userns
, dentry
, attr
);
5154 if (S_ISREG(inode
->i_mode
) && (attr
->ia_valid
& ATTR_SIZE
)) {
5155 err
= btrfs_setsize(inode
, attr
);
5160 if (attr
->ia_valid
) {
5161 setattr_copy(mnt_userns
, inode
, attr
);
5162 inode_inc_iversion(inode
);
5163 err
= btrfs_dirty_inode(inode
);
5165 if (!err
&& attr
->ia_valid
& ATTR_MODE
)
5166 err
= posix_acl_chmod(mnt_userns
, inode
, inode
->i_mode
);
5173 * While truncating the inode pages during eviction, we get the VFS
5174 * calling btrfs_invalidate_folio() against each folio of the inode. This
5175 * is slow because the calls to btrfs_invalidate_folio() result in a
5176 * huge amount of calls to lock_extent_bits() and clear_extent_bit(),
5177 * which keep merging and splitting extent_state structures over and over,
5178 * wasting lots of time.
5180 * Therefore if the inode is being evicted, let btrfs_invalidate_folio()
5181 * skip all those expensive operations on a per folio basis and do only
5182 * the ordered io finishing, while we release here the extent_map and
5183 * extent_state structures, without the excessive merging and splitting.
5185 static void evict_inode_truncate_pages(struct inode
*inode
)
5187 struct extent_io_tree
*io_tree
= &BTRFS_I(inode
)->io_tree
;
5188 struct extent_map_tree
*map_tree
= &BTRFS_I(inode
)->extent_tree
;
5189 struct rb_node
*node
;
5191 ASSERT(inode
->i_state
& I_FREEING
);
5192 truncate_inode_pages_final(&inode
->i_data
);
5194 write_lock(&map_tree
->lock
);
5195 while (!RB_EMPTY_ROOT(&map_tree
->map
.rb_root
)) {
5196 struct extent_map
*em
;
5198 node
= rb_first_cached(&map_tree
->map
);
5199 em
= rb_entry(node
, struct extent_map
, rb_node
);
5200 clear_bit(EXTENT_FLAG_PINNED
, &em
->flags
);
5201 clear_bit(EXTENT_FLAG_LOGGING
, &em
->flags
);
5202 remove_extent_mapping(map_tree
, em
);
5203 free_extent_map(em
);
5204 if (need_resched()) {
5205 write_unlock(&map_tree
->lock
);
5207 write_lock(&map_tree
->lock
);
5210 write_unlock(&map_tree
->lock
);
5213 * Keep looping until we have no more ranges in the io tree.
5214 * We can have ongoing bios started by readahead that have
5215 * their endio callback (extent_io.c:end_bio_extent_readpage)
5216 * still in progress (unlocked the pages in the bio but did not yet
5217 * unlocked the ranges in the io tree). Therefore this means some
5218 * ranges can still be locked and eviction started because before
5219 * submitting those bios, which are executed by a separate task (work
5220 * queue kthread), inode references (inode->i_count) were not taken
5221 * (which would be dropped in the end io callback of each bio).
5222 * Therefore here we effectively end up waiting for those bios and
5223 * anyone else holding locked ranges without having bumped the inode's
5224 * reference count - if we don't do it, when they access the inode's
5225 * io_tree to unlock a range it may be too late, leading to an
5226 * use-after-free issue.
5228 spin_lock(&io_tree
->lock
);
5229 while (!RB_EMPTY_ROOT(&io_tree
->state
)) {
5230 struct extent_state
*state
;
5231 struct extent_state
*cached_state
= NULL
;
5234 unsigned state_flags
;
5236 node
= rb_first(&io_tree
->state
);
5237 state
= rb_entry(node
, struct extent_state
, rb_node
);
5238 start
= state
->start
;
5240 state_flags
= state
->state
;
5241 spin_unlock(&io_tree
->lock
);
5243 lock_extent_bits(io_tree
, start
, end
, &cached_state
);
5246 * If still has DELALLOC flag, the extent didn't reach disk,
5247 * and its reserved space won't be freed by delayed_ref.
5248 * So we need to free its reserved space here.
5249 * (Refer to comment in btrfs_invalidate_folio, case 2)
5251 * Note, end is the bytenr of last byte, so we need + 1 here.
5253 if (state_flags
& EXTENT_DELALLOC
)
5254 btrfs_qgroup_free_data(BTRFS_I(inode
), NULL
, start
,
5257 clear_extent_bit(io_tree
, start
, end
,
5258 EXTENT_LOCKED
| EXTENT_DELALLOC
|
5259 EXTENT_DO_ACCOUNTING
| EXTENT_DEFRAG
, 1, 1,
5263 spin_lock(&io_tree
->lock
);
5265 spin_unlock(&io_tree
->lock
);
5268 static struct btrfs_trans_handle
*evict_refill_and_join(struct btrfs_root
*root
,
5269 struct btrfs_block_rsv
*rsv
)
5271 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
5272 struct btrfs_trans_handle
*trans
;
5273 u64 delayed_refs_extra
= btrfs_calc_insert_metadata_size(fs_info
, 1);
5277 * Eviction should be taking place at some place safe because of our
5278 * delayed iputs. However the normal flushing code will run delayed
5279 * iputs, so we cannot use FLUSH_ALL otherwise we'll deadlock.
5281 * We reserve the delayed_refs_extra here again because we can't use
5282 * btrfs_start_transaction(root, 0) for the same deadlocky reason as
5283 * above. We reserve our extra bit here because we generate a ton of
5284 * delayed refs activity by truncating.
5286 * BTRFS_RESERVE_FLUSH_EVICT will steal from the global_rsv if it can,
5287 * if we fail to make this reservation we can re-try without the
5288 * delayed_refs_extra so we can make some forward progress.
5290 ret
= btrfs_block_rsv_refill(fs_info
, rsv
, rsv
->size
+ delayed_refs_extra
,
5291 BTRFS_RESERVE_FLUSH_EVICT
);
5293 ret
= btrfs_block_rsv_refill(fs_info
, rsv
, rsv
->size
,
5294 BTRFS_RESERVE_FLUSH_EVICT
);
5297 "could not allocate space for delete; will truncate on mount");
5298 return ERR_PTR(-ENOSPC
);
5300 delayed_refs_extra
= 0;
5303 trans
= btrfs_join_transaction(root
);
5307 if (delayed_refs_extra
) {
5308 trans
->block_rsv
= &fs_info
->trans_block_rsv
;
5309 trans
->bytes_reserved
= delayed_refs_extra
;
5310 btrfs_block_rsv_migrate(rsv
, trans
->block_rsv
,
5311 delayed_refs_extra
, 1);
5316 void btrfs_evict_inode(struct inode
*inode
)
5318 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
5319 struct btrfs_trans_handle
*trans
;
5320 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
5321 struct btrfs_block_rsv
*rsv
;
5324 trace_btrfs_inode_evict(inode
);
5327 fsverity_cleanup_inode(inode
);
5332 evict_inode_truncate_pages(inode
);
5334 if (inode
->i_nlink
&&
5335 ((btrfs_root_refs(&root
->root_item
) != 0 &&
5336 root
->root_key
.objectid
!= BTRFS_ROOT_TREE_OBJECTID
) ||
5337 btrfs_is_free_space_inode(BTRFS_I(inode
))))
5340 if (is_bad_inode(inode
))
5343 btrfs_free_io_failure_record(BTRFS_I(inode
), 0, (u64
)-1);
5345 if (test_bit(BTRFS_FS_LOG_RECOVERING
, &fs_info
->flags
))
5348 if (inode
->i_nlink
> 0) {
5349 BUG_ON(btrfs_root_refs(&root
->root_item
) != 0 &&
5350 root
->root_key
.objectid
!= BTRFS_ROOT_TREE_OBJECTID
);
5355 * This makes sure the inode item in tree is uptodate and the space for
5356 * the inode update is released.
5358 ret
= btrfs_commit_inode_delayed_inode(BTRFS_I(inode
));
5363 * This drops any pending insert or delete operations we have for this
5364 * inode. We could have a delayed dir index deletion queued up, but
5365 * we're removing the inode completely so that'll be taken care of in
5368 btrfs_kill_delayed_inode_items(BTRFS_I(inode
));
5370 rsv
= btrfs_alloc_block_rsv(fs_info
, BTRFS_BLOCK_RSV_TEMP
);
5373 rsv
->size
= btrfs_calc_metadata_size(fs_info
, 1);
5376 btrfs_i_size_write(BTRFS_I(inode
), 0);
5379 struct btrfs_truncate_control control
= {
5380 .inode
= BTRFS_I(inode
),
5381 .ino
= btrfs_ino(BTRFS_I(inode
)),
5386 trans
= evict_refill_and_join(root
, rsv
);
5390 trans
->block_rsv
= rsv
;
5392 ret
= btrfs_truncate_inode_items(trans
, root
, &control
);
5393 trans
->block_rsv
= &fs_info
->trans_block_rsv
;
5394 btrfs_end_transaction(trans
);
5395 btrfs_btree_balance_dirty(fs_info
);
5396 if (ret
&& ret
!= -ENOSPC
&& ret
!= -EAGAIN
)
5403 * Errors here aren't a big deal, it just means we leave orphan items in
5404 * the tree. They will be cleaned up on the next mount. If the inode
5405 * number gets reused, cleanup deletes the orphan item without doing
5406 * anything, and unlink reuses the existing orphan item.
5408 * If it turns out that we are dropping too many of these, we might want
5409 * to add a mechanism for retrying these after a commit.
5411 trans
= evict_refill_and_join(root
, rsv
);
5412 if (!IS_ERR(trans
)) {
5413 trans
->block_rsv
= rsv
;
5414 btrfs_orphan_del(trans
, BTRFS_I(inode
));
5415 trans
->block_rsv
= &fs_info
->trans_block_rsv
;
5416 btrfs_end_transaction(trans
);
5420 btrfs_free_block_rsv(fs_info
, rsv
);
5423 * If we didn't successfully delete, the orphan item will still be in
5424 * the tree and we'll retry on the next mount. Again, we might also want
5425 * to retry these periodically in the future.
5427 btrfs_remove_delayed_node(BTRFS_I(inode
));
5428 fsverity_cleanup_inode(inode
);
5433 * Return the key found in the dir entry in the location pointer, fill @type
5434 * with BTRFS_FT_*, and return 0.
5436 * If no dir entries were found, returns -ENOENT.
5437 * If found a corrupted location in dir entry, returns -EUCLEAN.
5439 static int btrfs_inode_by_name(struct inode
*dir
, struct dentry
*dentry
,
5440 struct btrfs_key
*location
, u8
*type
)
5442 const char *name
= dentry
->d_name
.name
;
5443 int namelen
= dentry
->d_name
.len
;
5444 struct btrfs_dir_item
*di
;
5445 struct btrfs_path
*path
;
5446 struct btrfs_root
*root
= BTRFS_I(dir
)->root
;
5449 path
= btrfs_alloc_path();
5453 di
= btrfs_lookup_dir_item(NULL
, root
, path
, btrfs_ino(BTRFS_I(dir
)),
5455 if (IS_ERR_OR_NULL(di
)) {
5456 ret
= di
? PTR_ERR(di
) : -ENOENT
;
5460 btrfs_dir_item_key_to_cpu(path
->nodes
[0], di
, location
);
5461 if (location
->type
!= BTRFS_INODE_ITEM_KEY
&&
5462 location
->type
!= BTRFS_ROOT_ITEM_KEY
) {
5464 btrfs_warn(root
->fs_info
,
5465 "%s gets something invalid in DIR_ITEM (name %s, directory ino %llu, location(%llu %u %llu))",
5466 __func__
, name
, btrfs_ino(BTRFS_I(dir
)),
5467 location
->objectid
, location
->type
, location
->offset
);
5470 *type
= btrfs_dir_type(path
->nodes
[0], di
);
5472 btrfs_free_path(path
);
5477 * when we hit a tree root in a directory, the btrfs part of the inode
5478 * needs to be changed to reflect the root directory of the tree root. This
5479 * is kind of like crossing a mount point.
5481 static int fixup_tree_root_location(struct btrfs_fs_info
*fs_info
,
5483 struct dentry
*dentry
,
5484 struct btrfs_key
*location
,
5485 struct btrfs_root
**sub_root
)
5487 struct btrfs_path
*path
;
5488 struct btrfs_root
*new_root
;
5489 struct btrfs_root_ref
*ref
;
5490 struct extent_buffer
*leaf
;
5491 struct btrfs_key key
;
5495 path
= btrfs_alloc_path();
5502 key
.objectid
= BTRFS_I(dir
)->root
->root_key
.objectid
;
5503 key
.type
= BTRFS_ROOT_REF_KEY
;
5504 key
.offset
= location
->objectid
;
5506 ret
= btrfs_search_slot(NULL
, fs_info
->tree_root
, &key
, path
, 0, 0);
5513 leaf
= path
->nodes
[0];
5514 ref
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_root_ref
);
5515 if (btrfs_root_ref_dirid(leaf
, ref
) != btrfs_ino(BTRFS_I(dir
)) ||
5516 btrfs_root_ref_name_len(leaf
, ref
) != dentry
->d_name
.len
)
5519 ret
= memcmp_extent_buffer(leaf
, dentry
->d_name
.name
,
5520 (unsigned long)(ref
+ 1),
5521 dentry
->d_name
.len
);
5525 btrfs_release_path(path
);
5527 new_root
= btrfs_get_fs_root(fs_info
, location
->objectid
, true);
5528 if (IS_ERR(new_root
)) {
5529 err
= PTR_ERR(new_root
);
5533 *sub_root
= new_root
;
5534 location
->objectid
= btrfs_root_dirid(&new_root
->root_item
);
5535 location
->type
= BTRFS_INODE_ITEM_KEY
;
5536 location
->offset
= 0;
5539 btrfs_free_path(path
);
5543 static void inode_tree_add(struct inode
*inode
)
5545 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
5546 struct btrfs_inode
*entry
;
5548 struct rb_node
*parent
;
5549 struct rb_node
*new = &BTRFS_I(inode
)->rb_node
;
5550 u64 ino
= btrfs_ino(BTRFS_I(inode
));
5552 if (inode_unhashed(inode
))
5555 spin_lock(&root
->inode_lock
);
5556 p
= &root
->inode_tree
.rb_node
;
5559 entry
= rb_entry(parent
, struct btrfs_inode
, rb_node
);
5561 if (ino
< btrfs_ino(entry
))
5562 p
= &parent
->rb_left
;
5563 else if (ino
> btrfs_ino(entry
))
5564 p
= &parent
->rb_right
;
5566 WARN_ON(!(entry
->vfs_inode
.i_state
&
5567 (I_WILL_FREE
| I_FREEING
)));
5568 rb_replace_node(parent
, new, &root
->inode_tree
);
5569 RB_CLEAR_NODE(parent
);
5570 spin_unlock(&root
->inode_lock
);
5574 rb_link_node(new, parent
, p
);
5575 rb_insert_color(new, &root
->inode_tree
);
5576 spin_unlock(&root
->inode_lock
);
5579 static void inode_tree_del(struct btrfs_inode
*inode
)
5581 struct btrfs_root
*root
= inode
->root
;
5584 spin_lock(&root
->inode_lock
);
5585 if (!RB_EMPTY_NODE(&inode
->rb_node
)) {
5586 rb_erase(&inode
->rb_node
, &root
->inode_tree
);
5587 RB_CLEAR_NODE(&inode
->rb_node
);
5588 empty
= RB_EMPTY_ROOT(&root
->inode_tree
);
5590 spin_unlock(&root
->inode_lock
);
5592 if (empty
&& btrfs_root_refs(&root
->root_item
) == 0) {
5593 spin_lock(&root
->inode_lock
);
5594 empty
= RB_EMPTY_ROOT(&root
->inode_tree
);
5595 spin_unlock(&root
->inode_lock
);
5597 btrfs_add_dead_root(root
);
5602 static int btrfs_init_locked_inode(struct inode
*inode
, void *p
)
5604 struct btrfs_iget_args
*args
= p
;
5606 inode
->i_ino
= args
->ino
;
5607 BTRFS_I(inode
)->location
.objectid
= args
->ino
;
5608 BTRFS_I(inode
)->location
.type
= BTRFS_INODE_ITEM_KEY
;
5609 BTRFS_I(inode
)->location
.offset
= 0;
5610 BTRFS_I(inode
)->root
= btrfs_grab_root(args
->root
);
5611 BUG_ON(args
->root
&& !BTRFS_I(inode
)->root
);
5615 static int btrfs_find_actor(struct inode
*inode
, void *opaque
)
5617 struct btrfs_iget_args
*args
= opaque
;
5619 return args
->ino
== BTRFS_I(inode
)->location
.objectid
&&
5620 args
->root
== BTRFS_I(inode
)->root
;
5623 static struct inode
*btrfs_iget_locked(struct super_block
*s
, u64 ino
,
5624 struct btrfs_root
*root
)
5626 struct inode
*inode
;
5627 struct btrfs_iget_args args
;
5628 unsigned long hashval
= btrfs_inode_hash(ino
, root
);
5633 inode
= iget5_locked(s
, hashval
, btrfs_find_actor
,
5634 btrfs_init_locked_inode
,
5640 * Get an inode object given its inode number and corresponding root.
5641 * Path can be preallocated to prevent recursing back to iget through
5642 * allocator. NULL is also valid but may require an additional allocation
5645 struct inode
*btrfs_iget_path(struct super_block
*s
, u64 ino
,
5646 struct btrfs_root
*root
, struct btrfs_path
*path
)
5648 struct inode
*inode
;
5650 inode
= btrfs_iget_locked(s
, ino
, root
);
5652 return ERR_PTR(-ENOMEM
);
5654 if (inode
->i_state
& I_NEW
) {
5657 ret
= btrfs_read_locked_inode(inode
, path
);
5659 inode_tree_add(inode
);
5660 unlock_new_inode(inode
);
5664 * ret > 0 can come from btrfs_search_slot called by
5665 * btrfs_read_locked_inode, this means the inode item
5670 inode
= ERR_PTR(ret
);
5677 struct inode
*btrfs_iget(struct super_block
*s
, u64 ino
, struct btrfs_root
*root
)
5679 return btrfs_iget_path(s
, ino
, root
, NULL
);
5682 static struct inode
*new_simple_dir(struct super_block
*s
,
5683 struct btrfs_key
*key
,
5684 struct btrfs_root
*root
)
5686 struct inode
*inode
= new_inode(s
);
5689 return ERR_PTR(-ENOMEM
);
5691 BTRFS_I(inode
)->root
= btrfs_grab_root(root
);
5692 memcpy(&BTRFS_I(inode
)->location
, key
, sizeof(*key
));
5693 set_bit(BTRFS_INODE_DUMMY
, &BTRFS_I(inode
)->runtime_flags
);
5695 inode
->i_ino
= BTRFS_EMPTY_SUBVOL_DIR_OBJECTID
;
5697 * We only need lookup, the rest is read-only and there's no inode
5698 * associated with the dentry
5700 inode
->i_op
= &simple_dir_inode_operations
;
5701 inode
->i_opflags
&= ~IOP_XATTR
;
5702 inode
->i_fop
= &simple_dir_operations
;
5703 inode
->i_mode
= S_IFDIR
| S_IRUGO
| S_IWUSR
| S_IXUGO
;
5704 inode
->i_mtime
= current_time(inode
);
5705 inode
->i_atime
= inode
->i_mtime
;
5706 inode
->i_ctime
= inode
->i_mtime
;
5707 BTRFS_I(inode
)->i_otime
= inode
->i_mtime
;
5712 static_assert(BTRFS_FT_UNKNOWN
== FT_UNKNOWN
);
5713 static_assert(BTRFS_FT_REG_FILE
== FT_REG_FILE
);
5714 static_assert(BTRFS_FT_DIR
== FT_DIR
);
5715 static_assert(BTRFS_FT_CHRDEV
== FT_CHRDEV
);
5716 static_assert(BTRFS_FT_BLKDEV
== FT_BLKDEV
);
5717 static_assert(BTRFS_FT_FIFO
== FT_FIFO
);
5718 static_assert(BTRFS_FT_SOCK
== FT_SOCK
);
5719 static_assert(BTRFS_FT_SYMLINK
== FT_SYMLINK
);
5721 static inline u8
btrfs_inode_type(struct inode
*inode
)
5723 return fs_umode_to_ftype(inode
->i_mode
);
5726 struct inode
*btrfs_lookup_dentry(struct inode
*dir
, struct dentry
*dentry
)
5728 struct btrfs_fs_info
*fs_info
= btrfs_sb(dir
->i_sb
);
5729 struct inode
*inode
;
5730 struct btrfs_root
*root
= BTRFS_I(dir
)->root
;
5731 struct btrfs_root
*sub_root
= root
;
5732 struct btrfs_key location
;
5736 if (dentry
->d_name
.len
> BTRFS_NAME_LEN
)
5737 return ERR_PTR(-ENAMETOOLONG
);
5739 ret
= btrfs_inode_by_name(dir
, dentry
, &location
, &di_type
);
5741 return ERR_PTR(ret
);
5743 if (location
.type
== BTRFS_INODE_ITEM_KEY
) {
5744 inode
= btrfs_iget(dir
->i_sb
, location
.objectid
, root
);
5748 /* Do extra check against inode mode with di_type */
5749 if (btrfs_inode_type(inode
) != di_type
) {
5751 "inode mode mismatch with dir: inode mode=0%o btrfs type=%u dir type=%u",
5752 inode
->i_mode
, btrfs_inode_type(inode
),
5755 return ERR_PTR(-EUCLEAN
);
5760 ret
= fixup_tree_root_location(fs_info
, dir
, dentry
,
5761 &location
, &sub_root
);
5764 inode
= ERR_PTR(ret
);
5766 inode
= new_simple_dir(dir
->i_sb
, &location
, sub_root
);
5768 inode
= btrfs_iget(dir
->i_sb
, location
.objectid
, sub_root
);
5770 if (root
!= sub_root
)
5771 btrfs_put_root(sub_root
);
5773 if (!IS_ERR(inode
) && root
!= sub_root
) {
5774 down_read(&fs_info
->cleanup_work_sem
);
5775 if (!sb_rdonly(inode
->i_sb
))
5776 ret
= btrfs_orphan_cleanup(sub_root
);
5777 up_read(&fs_info
->cleanup_work_sem
);
5780 inode
= ERR_PTR(ret
);
5787 static int btrfs_dentry_delete(const struct dentry
*dentry
)
5789 struct btrfs_root
*root
;
5790 struct inode
*inode
= d_inode(dentry
);
5792 if (!inode
&& !IS_ROOT(dentry
))
5793 inode
= d_inode(dentry
->d_parent
);
5796 root
= BTRFS_I(inode
)->root
;
5797 if (btrfs_root_refs(&root
->root_item
) == 0)
5800 if (btrfs_ino(BTRFS_I(inode
)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID
)
5806 static struct dentry
*btrfs_lookup(struct inode
*dir
, struct dentry
*dentry
,
5809 struct inode
*inode
= btrfs_lookup_dentry(dir
, dentry
);
5811 if (inode
== ERR_PTR(-ENOENT
))
5813 return d_splice_alias(inode
, dentry
);
5817 * All this infrastructure exists because dir_emit can fault, and we are holding
5818 * the tree lock when doing readdir. For now just allocate a buffer and copy
5819 * our information into that, and then dir_emit from the buffer. This is
5820 * similar to what NFS does, only we don't keep the buffer around in pagecache
5821 * because I'm afraid I'll mess that up. Long term we need to make filldir do
5822 * copy_to_user_inatomic so we don't have to worry about page faulting under the
5825 static int btrfs_opendir(struct inode
*inode
, struct file
*file
)
5827 struct btrfs_file_private
*private;
5829 private = kzalloc(sizeof(struct btrfs_file_private
), GFP_KERNEL
);
5832 private->filldir_buf
= kzalloc(PAGE_SIZE
, GFP_KERNEL
);
5833 if (!private->filldir_buf
) {
5837 file
->private_data
= private;
5848 static int btrfs_filldir(void *addr
, int entries
, struct dir_context
*ctx
)
5851 struct dir_entry
*entry
= addr
;
5852 char *name
= (char *)(entry
+ 1);
5854 ctx
->pos
= get_unaligned(&entry
->offset
);
5855 if (!dir_emit(ctx
, name
, get_unaligned(&entry
->name_len
),
5856 get_unaligned(&entry
->ino
),
5857 get_unaligned(&entry
->type
)))
5859 addr
+= sizeof(struct dir_entry
) +
5860 get_unaligned(&entry
->name_len
);
5866 static int btrfs_real_readdir(struct file
*file
, struct dir_context
*ctx
)
5868 struct inode
*inode
= file_inode(file
);
5869 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
5870 struct btrfs_file_private
*private = file
->private_data
;
5871 struct btrfs_dir_item
*di
;
5872 struct btrfs_key key
;
5873 struct btrfs_key found_key
;
5874 struct btrfs_path
*path
;
5876 struct list_head ins_list
;
5877 struct list_head del_list
;
5884 struct btrfs_key location
;
5886 if (!dir_emit_dots(file
, ctx
))
5889 path
= btrfs_alloc_path();
5893 addr
= private->filldir_buf
;
5894 path
->reada
= READA_FORWARD
;
5896 INIT_LIST_HEAD(&ins_list
);
5897 INIT_LIST_HEAD(&del_list
);
5898 put
= btrfs_readdir_get_delayed_items(inode
, &ins_list
, &del_list
);
5901 key
.type
= BTRFS_DIR_INDEX_KEY
;
5902 key
.offset
= ctx
->pos
;
5903 key
.objectid
= btrfs_ino(BTRFS_I(inode
));
5905 btrfs_for_each_slot(root
, &key
, &found_key
, path
, ret
) {
5906 struct dir_entry
*entry
;
5907 struct extent_buffer
*leaf
= path
->nodes
[0];
5909 if (found_key
.objectid
!= key
.objectid
)
5911 if (found_key
.type
!= BTRFS_DIR_INDEX_KEY
)
5913 if (found_key
.offset
< ctx
->pos
)
5915 if (btrfs_should_delete_dir_index(&del_list
, found_key
.offset
))
5917 di
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_dir_item
);
5918 name_len
= btrfs_dir_name_len(leaf
, di
);
5919 if ((total_len
+ sizeof(struct dir_entry
) + name_len
) >=
5921 btrfs_release_path(path
);
5922 ret
= btrfs_filldir(private->filldir_buf
, entries
, ctx
);
5925 addr
= private->filldir_buf
;
5932 put_unaligned(name_len
, &entry
->name_len
);
5933 name_ptr
= (char *)(entry
+ 1);
5934 read_extent_buffer(leaf
, name_ptr
, (unsigned long)(di
+ 1),
5936 put_unaligned(fs_ftype_to_dtype(btrfs_dir_type(leaf
, di
)),
5938 btrfs_dir_item_key_to_cpu(leaf
, di
, &location
);
5939 put_unaligned(location
.objectid
, &entry
->ino
);
5940 put_unaligned(found_key
.offset
, &entry
->offset
);
5942 addr
+= sizeof(struct dir_entry
) + name_len
;
5943 total_len
+= sizeof(struct dir_entry
) + name_len
;
5945 /* Catch error encountered during iteration */
5949 btrfs_release_path(path
);
5951 ret
= btrfs_filldir(private->filldir_buf
, entries
, ctx
);
5955 ret
= btrfs_readdir_delayed_dir_index(ctx
, &ins_list
);
5960 * Stop new entries from being returned after we return the last
5963 * New directory entries are assigned a strictly increasing
5964 * offset. This means that new entries created during readdir
5965 * are *guaranteed* to be seen in the future by that readdir.
5966 * This has broken buggy programs which operate on names as
5967 * they're returned by readdir. Until we re-use freed offsets
5968 * we have this hack to stop new entries from being returned
5969 * under the assumption that they'll never reach this huge
5972 * This is being careful not to overflow 32bit loff_t unless the
5973 * last entry requires it because doing so has broken 32bit apps
5976 if (ctx
->pos
>= INT_MAX
)
5977 ctx
->pos
= LLONG_MAX
;
5984 btrfs_readdir_put_delayed_items(inode
, &ins_list
, &del_list
);
5985 btrfs_free_path(path
);
5990 * This is somewhat expensive, updating the tree every time the
5991 * inode changes. But, it is most likely to find the inode in cache.
5992 * FIXME, needs more benchmarking...there are no reasons other than performance
5993 * to keep or drop this code.
5995 static int btrfs_dirty_inode(struct inode
*inode
)
5997 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
5998 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
5999 struct btrfs_trans_handle
*trans
;
6002 if (test_bit(BTRFS_INODE_DUMMY
, &BTRFS_I(inode
)->runtime_flags
))
6005 trans
= btrfs_join_transaction(root
);
6007 return PTR_ERR(trans
);
6009 ret
= btrfs_update_inode(trans
, root
, BTRFS_I(inode
));
6010 if (ret
&& (ret
== -ENOSPC
|| ret
== -EDQUOT
)) {
6011 /* whoops, lets try again with the full transaction */
6012 btrfs_end_transaction(trans
);
6013 trans
= btrfs_start_transaction(root
, 1);
6015 return PTR_ERR(trans
);
6017 ret
= btrfs_update_inode(trans
, root
, BTRFS_I(inode
));
6019 btrfs_end_transaction(trans
);
6020 if (BTRFS_I(inode
)->delayed_node
)
6021 btrfs_balance_delayed_items(fs_info
);
6027 * This is a copy of file_update_time. We need this so we can return error on
6028 * ENOSPC for updating the inode in the case of file write and mmap writes.
6030 static int btrfs_update_time(struct inode
*inode
, struct timespec64
*now
,
6033 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
6034 bool dirty
= flags
& ~S_VERSION
;
6036 if (btrfs_root_readonly(root
))
6039 if (flags
& S_VERSION
)
6040 dirty
|= inode_maybe_inc_iversion(inode
, dirty
);
6041 if (flags
& S_CTIME
)
6042 inode
->i_ctime
= *now
;
6043 if (flags
& S_MTIME
)
6044 inode
->i_mtime
= *now
;
6045 if (flags
& S_ATIME
)
6046 inode
->i_atime
= *now
;
6047 return dirty
? btrfs_dirty_inode(inode
) : 0;
6051 * find the highest existing sequence number in a directory
6052 * and then set the in-memory index_cnt variable to reflect
6053 * free sequence numbers
6055 static int btrfs_set_inode_index_count(struct btrfs_inode
*inode
)
6057 struct btrfs_root
*root
= inode
->root
;
6058 struct btrfs_key key
, found_key
;
6059 struct btrfs_path
*path
;
6060 struct extent_buffer
*leaf
;
6063 key
.objectid
= btrfs_ino(inode
);
6064 key
.type
= BTRFS_DIR_INDEX_KEY
;
6065 key
.offset
= (u64
)-1;
6067 path
= btrfs_alloc_path();
6071 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
6074 /* FIXME: we should be able to handle this */
6079 if (path
->slots
[0] == 0) {
6080 inode
->index_cnt
= BTRFS_DIR_START_INDEX
;
6086 leaf
= path
->nodes
[0];
6087 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
6089 if (found_key
.objectid
!= btrfs_ino(inode
) ||
6090 found_key
.type
!= BTRFS_DIR_INDEX_KEY
) {
6091 inode
->index_cnt
= BTRFS_DIR_START_INDEX
;
6095 inode
->index_cnt
= found_key
.offset
+ 1;
6097 btrfs_free_path(path
);
6102 * helper to find a free sequence number in a given directory. This current
6103 * code is very simple, later versions will do smarter things in the btree
6105 int btrfs_set_inode_index(struct btrfs_inode
*dir
, u64
*index
)
6109 if (dir
->index_cnt
== (u64
)-1) {
6110 ret
= btrfs_inode_delayed_dir_index_count(dir
);
6112 ret
= btrfs_set_inode_index_count(dir
);
6118 *index
= dir
->index_cnt
;
6124 static int btrfs_insert_inode_locked(struct inode
*inode
)
6126 struct btrfs_iget_args args
;
6128 args
.ino
= BTRFS_I(inode
)->location
.objectid
;
6129 args
.root
= BTRFS_I(inode
)->root
;
6131 return insert_inode_locked4(inode
,
6132 btrfs_inode_hash(inode
->i_ino
, BTRFS_I(inode
)->root
),
6133 btrfs_find_actor
, &args
);
6136 int btrfs_new_inode_prepare(struct btrfs_new_inode_args
*args
,
6137 unsigned int *trans_num_items
)
6139 struct inode
*dir
= args
->dir
;
6140 struct inode
*inode
= args
->inode
;
6143 ret
= posix_acl_create(dir
, &inode
->i_mode
, &args
->default_acl
, &args
->acl
);
6147 /* 1 to add inode item */
6148 *trans_num_items
= 1;
6149 /* 1 to add compression property */
6150 if (BTRFS_I(dir
)->prop_compress
)
6151 (*trans_num_items
)++;
6152 /* 1 to add default ACL xattr */
6153 if (args
->default_acl
)
6154 (*trans_num_items
)++;
6155 /* 1 to add access ACL xattr */
6157 (*trans_num_items
)++;
6158 #ifdef CONFIG_SECURITY
6159 /* 1 to add LSM xattr */
6160 if (dir
->i_security
)
6161 (*trans_num_items
)++;
6164 /* 1 to add orphan item */
6165 (*trans_num_items
)++;
6169 * 1 to add dir index
6170 * 1 to update parent inode item
6172 * No need for 1 unit for the inode ref item because it is
6173 * inserted in a batch together with the inode item at
6174 * btrfs_create_new_inode().
6176 *trans_num_items
+= 3;
6181 void btrfs_new_inode_args_destroy(struct btrfs_new_inode_args
*args
)
6183 posix_acl_release(args
->acl
);
6184 posix_acl_release(args
->default_acl
);
6188 * Inherit flags from the parent inode.
6190 * Currently only the compression flags and the cow flags are inherited.
6192 static void btrfs_inherit_iflags(struct inode
*inode
, struct inode
*dir
)
6196 flags
= BTRFS_I(dir
)->flags
;
6198 if (flags
& BTRFS_INODE_NOCOMPRESS
) {
6199 BTRFS_I(inode
)->flags
&= ~BTRFS_INODE_COMPRESS
;
6200 BTRFS_I(inode
)->flags
|= BTRFS_INODE_NOCOMPRESS
;
6201 } else if (flags
& BTRFS_INODE_COMPRESS
) {
6202 BTRFS_I(inode
)->flags
&= ~BTRFS_INODE_NOCOMPRESS
;
6203 BTRFS_I(inode
)->flags
|= BTRFS_INODE_COMPRESS
;
6206 if (flags
& BTRFS_INODE_NODATACOW
) {
6207 BTRFS_I(inode
)->flags
|= BTRFS_INODE_NODATACOW
;
6208 if (S_ISREG(inode
->i_mode
))
6209 BTRFS_I(inode
)->flags
|= BTRFS_INODE_NODATASUM
;
6212 btrfs_sync_inode_flags_to_i_flags(inode
);
6215 int btrfs_create_new_inode(struct btrfs_trans_handle
*trans
,
6216 struct btrfs_new_inode_args
*args
)
6218 struct inode
*dir
= args
->dir
;
6219 struct inode
*inode
= args
->inode
;
6220 const char *name
= args
->orphan
? NULL
: args
->dentry
->d_name
.name
;
6221 int name_len
= args
->orphan
? 0 : args
->dentry
->d_name
.len
;
6222 struct btrfs_fs_info
*fs_info
= btrfs_sb(dir
->i_sb
);
6223 struct btrfs_root
*root
;
6224 struct btrfs_inode_item
*inode_item
;
6225 struct btrfs_key
*location
;
6226 struct btrfs_path
*path
;
6228 struct btrfs_inode_ref
*ref
;
6229 struct btrfs_key key
[2];
6231 struct btrfs_item_batch batch
;
6235 path
= btrfs_alloc_path();
6240 BTRFS_I(inode
)->root
= btrfs_grab_root(BTRFS_I(dir
)->root
);
6241 root
= BTRFS_I(inode
)->root
;
6243 ret
= btrfs_get_free_objectid(root
, &objectid
);
6246 inode
->i_ino
= objectid
;
6250 * O_TMPFILE, set link count to 0, so that after this point, we
6251 * fill in an inode item with the correct link count.
6253 set_nlink(inode
, 0);
6255 trace_btrfs_inode_request(dir
);
6257 ret
= btrfs_set_inode_index(BTRFS_I(dir
), &BTRFS_I(inode
)->dir_index
);
6261 /* index_cnt is ignored for everything but a dir. */
6262 BTRFS_I(inode
)->index_cnt
= BTRFS_DIR_START_INDEX
;
6263 BTRFS_I(inode
)->generation
= trans
->transid
;
6264 inode
->i_generation
= BTRFS_I(inode
)->generation
;
6267 * Subvolumes don't inherit flags from their parent directory.
6268 * Originally this was probably by accident, but we probably can't
6269 * change it now without compatibility issues.
6272 btrfs_inherit_iflags(inode
, dir
);
6274 if (S_ISREG(inode
->i_mode
)) {
6275 if (btrfs_test_opt(fs_info
, NODATASUM
))
6276 BTRFS_I(inode
)->flags
|= BTRFS_INODE_NODATASUM
;
6277 if (btrfs_test_opt(fs_info
, NODATACOW
))
6278 BTRFS_I(inode
)->flags
|= BTRFS_INODE_NODATACOW
|
6279 BTRFS_INODE_NODATASUM
;
6282 location
= &BTRFS_I(inode
)->location
;
6283 location
->objectid
= objectid
;
6284 location
->offset
= 0;
6285 location
->type
= BTRFS_INODE_ITEM_KEY
;
6287 ret
= btrfs_insert_inode_locked(inode
);
6290 BTRFS_I(dir
)->index_cnt
--;
6295 * We could have gotten an inode number from somebody who was fsynced
6296 * and then removed in this same transaction, so let's just set full
6297 * sync since it will be a full sync anyway and this will blow away the
6298 * old info in the log.
6300 btrfs_set_inode_full_sync(BTRFS_I(inode
));
6302 key
[0].objectid
= objectid
;
6303 key
[0].type
= BTRFS_INODE_ITEM_KEY
;
6306 sizes
[0] = sizeof(struct btrfs_inode_item
);
6308 if (!args
->orphan
) {
6310 * Start new inodes with an inode_ref. This is slightly more
6311 * efficient for small numbers of hard links since they will
6312 * be packed into one item. Extended refs will kick in if we
6313 * add more hard links than can fit in the ref item.
6315 key
[1].objectid
= objectid
;
6316 key
[1].type
= BTRFS_INODE_REF_KEY
;
6318 key
[1].offset
= objectid
;
6319 sizes
[1] = 2 + sizeof(*ref
);
6321 key
[1].offset
= btrfs_ino(BTRFS_I(dir
));
6322 sizes
[1] = name_len
+ sizeof(*ref
);
6326 batch
.keys
= &key
[0];
6327 batch
.data_sizes
= &sizes
[0];
6328 batch
.total_data_size
= sizes
[0] + (args
->orphan
? 0 : sizes
[1]);
6329 batch
.nr
= args
->orphan
? 1 : 2;
6330 ret
= btrfs_insert_empty_items(trans
, root
, path
, &batch
);
6332 btrfs_abort_transaction(trans
, ret
);
6336 inode
->i_mtime
= current_time(inode
);
6337 inode
->i_atime
= inode
->i_mtime
;
6338 inode
->i_ctime
= inode
->i_mtime
;
6339 BTRFS_I(inode
)->i_otime
= inode
->i_mtime
;
6342 * We're going to fill the inode item now, so at this point the inode
6343 * must be fully initialized.
6346 inode_item
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
6347 struct btrfs_inode_item
);
6348 memzero_extent_buffer(path
->nodes
[0], (unsigned long)inode_item
,
6349 sizeof(*inode_item
));
6350 fill_inode_item(trans
, path
->nodes
[0], inode_item
, inode
);
6352 if (!args
->orphan
) {
6353 ref
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0] + 1,
6354 struct btrfs_inode_ref
);
6355 ptr
= (unsigned long)(ref
+ 1);
6357 btrfs_set_inode_ref_name_len(path
->nodes
[0], ref
, 2);
6358 btrfs_set_inode_ref_index(path
->nodes
[0], ref
, 0);
6359 write_extent_buffer(path
->nodes
[0], "..", ptr
, 2);
6361 btrfs_set_inode_ref_name_len(path
->nodes
[0], ref
, name_len
);
6362 btrfs_set_inode_ref_index(path
->nodes
[0], ref
,
6363 BTRFS_I(inode
)->dir_index
);
6364 write_extent_buffer(path
->nodes
[0], name
, ptr
, name_len
);
6368 btrfs_mark_buffer_dirty(path
->nodes
[0]);
6369 btrfs_release_path(path
);
6372 struct inode
*parent
;
6375 * Subvolumes inherit properties from their parent subvolume,
6376 * not the directory they were created in.
6378 parent
= btrfs_iget(fs_info
->sb
, BTRFS_FIRST_FREE_OBJECTID
,
6379 BTRFS_I(dir
)->root
);
6380 if (IS_ERR(parent
)) {
6381 ret
= PTR_ERR(parent
);
6383 ret
= btrfs_inode_inherit_props(trans
, inode
, parent
);
6387 ret
= btrfs_inode_inherit_props(trans
, inode
, dir
);
6391 "error inheriting props for ino %llu (root %llu): %d",
6392 btrfs_ino(BTRFS_I(inode
)), root
->root_key
.objectid
,
6397 * Subvolumes don't inherit ACLs or get passed to the LSM. This is
6400 if (!args
->subvol
) {
6401 ret
= btrfs_init_inode_security(trans
, args
);
6403 btrfs_abort_transaction(trans
, ret
);
6408 inode_tree_add(inode
);
6410 trace_btrfs_inode_new(inode
);
6411 btrfs_set_inode_last_trans(trans
, BTRFS_I(inode
));
6413 btrfs_update_root_times(trans
, root
);
6416 ret
= btrfs_orphan_add(trans
, BTRFS_I(inode
));
6418 ret
= btrfs_add_link(trans
, BTRFS_I(dir
), BTRFS_I(inode
), name
,
6419 name_len
, 0, BTRFS_I(inode
)->dir_index
);
6422 btrfs_abort_transaction(trans
, ret
);
6431 * discard_new_inode() calls iput(), but the caller owns the reference
6435 discard_new_inode(inode
);
6437 btrfs_free_path(path
);
6442 * utility function to add 'inode' into 'parent_inode' with
6443 * a give name and a given sequence number.
6444 * if 'add_backref' is true, also insert a backref from the
6445 * inode to the parent directory.
6447 int btrfs_add_link(struct btrfs_trans_handle
*trans
,
6448 struct btrfs_inode
*parent_inode
, struct btrfs_inode
*inode
,
6449 const char *name
, int name_len
, int add_backref
, u64 index
)
6452 struct btrfs_key key
;
6453 struct btrfs_root
*root
= parent_inode
->root
;
6454 u64 ino
= btrfs_ino(inode
);
6455 u64 parent_ino
= btrfs_ino(parent_inode
);
6457 if (unlikely(ino
== BTRFS_FIRST_FREE_OBJECTID
)) {
6458 memcpy(&key
, &inode
->root
->root_key
, sizeof(key
));
6461 key
.type
= BTRFS_INODE_ITEM_KEY
;
6465 if (unlikely(ino
== BTRFS_FIRST_FREE_OBJECTID
)) {
6466 ret
= btrfs_add_root_ref(trans
, key
.objectid
,
6467 root
->root_key
.objectid
, parent_ino
,
6468 index
, name
, name_len
);
6469 } else if (add_backref
) {
6470 ret
= btrfs_insert_inode_ref(trans
, root
, name
, name_len
, ino
,
6474 /* Nothing to clean up yet */
6478 ret
= btrfs_insert_dir_item(trans
, name
, name_len
, parent_inode
, &key
,
6479 btrfs_inode_type(&inode
->vfs_inode
), index
);
6480 if (ret
== -EEXIST
|| ret
== -EOVERFLOW
)
6483 btrfs_abort_transaction(trans
, ret
);
6487 btrfs_i_size_write(parent_inode
, parent_inode
->vfs_inode
.i_size
+
6489 inode_inc_iversion(&parent_inode
->vfs_inode
);
6491 * If we are replaying a log tree, we do not want to update the mtime
6492 * and ctime of the parent directory with the current time, since the
6493 * log replay procedure is responsible for setting them to their correct
6494 * values (the ones it had when the fsync was done).
6496 if (!test_bit(BTRFS_FS_LOG_RECOVERING
, &root
->fs_info
->flags
)) {
6497 struct timespec64 now
= current_time(&parent_inode
->vfs_inode
);
6499 parent_inode
->vfs_inode
.i_mtime
= now
;
6500 parent_inode
->vfs_inode
.i_ctime
= now
;
6502 ret
= btrfs_update_inode(trans
, root
, parent_inode
);
6504 btrfs_abort_transaction(trans
, ret
);
6508 if (unlikely(ino
== BTRFS_FIRST_FREE_OBJECTID
)) {
6511 err
= btrfs_del_root_ref(trans
, key
.objectid
,
6512 root
->root_key
.objectid
, parent_ino
,
6513 &local_index
, name
, name_len
);
6515 btrfs_abort_transaction(trans
, err
);
6516 } else if (add_backref
) {
6520 err
= btrfs_del_inode_ref(trans
, root
, name
, name_len
,
6521 ino
, parent_ino
, &local_index
);
6523 btrfs_abort_transaction(trans
, err
);
6526 /* Return the original error code */
6530 static int btrfs_create_common(struct inode
*dir
, struct dentry
*dentry
,
6531 struct inode
*inode
)
6533 struct btrfs_fs_info
*fs_info
= btrfs_sb(dir
->i_sb
);
6534 struct btrfs_root
*root
= BTRFS_I(dir
)->root
;
6535 struct btrfs_new_inode_args new_inode_args
= {
6540 unsigned int trans_num_items
;
6541 struct btrfs_trans_handle
*trans
;
6544 err
= btrfs_new_inode_prepare(&new_inode_args
, &trans_num_items
);
6548 trans
= btrfs_start_transaction(root
, trans_num_items
);
6549 if (IS_ERR(trans
)) {
6550 err
= PTR_ERR(trans
);
6551 goto out_new_inode_args
;
6554 err
= btrfs_create_new_inode(trans
, &new_inode_args
);
6556 d_instantiate_new(dentry
, inode
);
6558 btrfs_end_transaction(trans
);
6559 btrfs_btree_balance_dirty(fs_info
);
6561 btrfs_new_inode_args_destroy(&new_inode_args
);
6568 static int btrfs_mknod(struct user_namespace
*mnt_userns
, struct inode
*dir
,
6569 struct dentry
*dentry
, umode_t mode
, dev_t rdev
)
6571 struct inode
*inode
;
6573 inode
= new_inode(dir
->i_sb
);
6576 inode_init_owner(mnt_userns
, inode
, dir
, mode
);
6577 inode
->i_op
= &btrfs_special_inode_operations
;
6578 init_special_inode(inode
, inode
->i_mode
, rdev
);
6579 return btrfs_create_common(dir
, dentry
, inode
);
6582 static int btrfs_create(struct user_namespace
*mnt_userns
, struct inode
*dir
,
6583 struct dentry
*dentry
, umode_t mode
, bool excl
)
6585 struct inode
*inode
;
6587 inode
= new_inode(dir
->i_sb
);
6590 inode_init_owner(mnt_userns
, inode
, dir
, mode
);
6591 inode
->i_fop
= &btrfs_file_operations
;
6592 inode
->i_op
= &btrfs_file_inode_operations
;
6593 inode
->i_mapping
->a_ops
= &btrfs_aops
;
6594 return btrfs_create_common(dir
, dentry
, inode
);
6597 static int btrfs_link(struct dentry
*old_dentry
, struct inode
*dir
,
6598 struct dentry
*dentry
)
6600 struct btrfs_trans_handle
*trans
= NULL
;
6601 struct btrfs_root
*root
= BTRFS_I(dir
)->root
;
6602 struct inode
*inode
= d_inode(old_dentry
);
6603 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
6608 /* do not allow sys_link's with other subvols of the same device */
6609 if (root
->root_key
.objectid
!= BTRFS_I(inode
)->root
->root_key
.objectid
)
6612 if (inode
->i_nlink
>= BTRFS_LINK_MAX
)
6615 err
= btrfs_set_inode_index(BTRFS_I(dir
), &index
);
6620 * 2 items for inode and inode ref
6621 * 2 items for dir items
6622 * 1 item for parent inode
6623 * 1 item for orphan item deletion if O_TMPFILE
6625 trans
= btrfs_start_transaction(root
, inode
->i_nlink
? 5 : 6);
6626 if (IS_ERR(trans
)) {
6627 err
= PTR_ERR(trans
);
6632 /* There are several dir indexes for this inode, clear the cache. */
6633 BTRFS_I(inode
)->dir_index
= 0ULL;
6635 inode_inc_iversion(inode
);
6636 inode
->i_ctime
= current_time(inode
);
6638 set_bit(BTRFS_INODE_COPY_EVERYTHING
, &BTRFS_I(inode
)->runtime_flags
);
6640 err
= btrfs_add_link(trans
, BTRFS_I(dir
), BTRFS_I(inode
),
6641 dentry
->d_name
.name
, dentry
->d_name
.len
, 1, index
);
6646 struct dentry
*parent
= dentry
->d_parent
;
6648 err
= btrfs_update_inode(trans
, root
, BTRFS_I(inode
));
6651 if (inode
->i_nlink
== 1) {
6653 * If new hard link count is 1, it's a file created
6654 * with open(2) O_TMPFILE flag.
6656 err
= btrfs_orphan_del(trans
, BTRFS_I(inode
));
6660 d_instantiate(dentry
, inode
);
6661 btrfs_log_new_name(trans
, old_dentry
, NULL
, 0, parent
);
6666 btrfs_end_transaction(trans
);
6668 inode_dec_link_count(inode
);
6671 btrfs_btree_balance_dirty(fs_info
);
6675 static int btrfs_mkdir(struct user_namespace
*mnt_userns
, struct inode
*dir
,
6676 struct dentry
*dentry
, umode_t mode
)
6678 struct inode
*inode
;
6680 inode
= new_inode(dir
->i_sb
);
6683 inode_init_owner(mnt_userns
, inode
, dir
, S_IFDIR
| mode
);
6684 inode
->i_op
= &btrfs_dir_inode_operations
;
6685 inode
->i_fop
= &btrfs_dir_file_operations
;
6686 return btrfs_create_common(dir
, dentry
, inode
);
6689 static noinline
int uncompress_inline(struct btrfs_path
*path
,
6691 size_t pg_offset
, u64 extent_offset
,
6692 struct btrfs_file_extent_item
*item
)
6695 struct extent_buffer
*leaf
= path
->nodes
[0];
6698 unsigned long inline_size
;
6702 WARN_ON(pg_offset
!= 0);
6703 compress_type
= btrfs_file_extent_compression(leaf
, item
);
6704 max_size
= btrfs_file_extent_ram_bytes(leaf
, item
);
6705 inline_size
= btrfs_file_extent_inline_item_len(leaf
, path
->slots
[0]);
6706 tmp
= kmalloc(inline_size
, GFP_NOFS
);
6709 ptr
= btrfs_file_extent_inline_start(item
);
6711 read_extent_buffer(leaf
, tmp
, ptr
, inline_size
);
6713 max_size
= min_t(unsigned long, PAGE_SIZE
, max_size
);
6714 ret
= btrfs_decompress(compress_type
, tmp
, page
,
6715 extent_offset
, inline_size
, max_size
);
6718 * decompression code contains a memset to fill in any space between the end
6719 * of the uncompressed data and the end of max_size in case the decompressed
6720 * data ends up shorter than ram_bytes. That doesn't cover the hole between
6721 * the end of an inline extent and the beginning of the next block, so we
6722 * cover that region here.
6725 if (max_size
+ pg_offset
< PAGE_SIZE
)
6726 memzero_page(page
, pg_offset
+ max_size
,
6727 PAGE_SIZE
- max_size
- pg_offset
);
6733 * btrfs_get_extent - Lookup the first extent overlapping a range in a file.
6734 * @inode: file to search in
6735 * @page: page to read extent data into if the extent is inline
6736 * @pg_offset: offset into @page to copy to
6737 * @start: file offset
6738 * @len: length of range starting at @start
6740 * This returns the first &struct extent_map which overlaps with the given
6741 * range, reading it from the B-tree and caching it if necessary. Note that
6742 * there may be more extents which overlap the given range after the returned
6745 * If @page is not NULL and the extent is inline, this also reads the extent
6746 * data directly into the page and marks the extent up to date in the io_tree.
6748 * Return: ERR_PTR on error, non-NULL extent_map on success.
6750 struct extent_map
*btrfs_get_extent(struct btrfs_inode
*inode
,
6751 struct page
*page
, size_t pg_offset
,
6754 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
6756 u64 extent_start
= 0;
6758 u64 objectid
= btrfs_ino(inode
);
6759 int extent_type
= -1;
6760 struct btrfs_path
*path
= NULL
;
6761 struct btrfs_root
*root
= inode
->root
;
6762 struct btrfs_file_extent_item
*item
;
6763 struct extent_buffer
*leaf
;
6764 struct btrfs_key found_key
;
6765 struct extent_map
*em
= NULL
;
6766 struct extent_map_tree
*em_tree
= &inode
->extent_tree
;
6767 struct extent_io_tree
*io_tree
= &inode
->io_tree
;
6769 read_lock(&em_tree
->lock
);
6770 em
= lookup_extent_mapping(em_tree
, start
, len
);
6771 read_unlock(&em_tree
->lock
);
6774 if (em
->start
> start
|| em
->start
+ em
->len
<= start
)
6775 free_extent_map(em
);
6776 else if (em
->block_start
== EXTENT_MAP_INLINE
&& page
)
6777 free_extent_map(em
);
6781 em
= alloc_extent_map();
6786 em
->start
= EXTENT_MAP_HOLE
;
6787 em
->orig_start
= EXTENT_MAP_HOLE
;
6789 em
->block_len
= (u64
)-1;
6791 path
= btrfs_alloc_path();
6797 /* Chances are we'll be called again, so go ahead and do readahead */
6798 path
->reada
= READA_FORWARD
;
6801 * The same explanation in load_free_space_cache applies here as well,
6802 * we only read when we're loading the free space cache, and at that
6803 * point the commit_root has everything we need.
6805 if (btrfs_is_free_space_inode(inode
)) {
6806 path
->search_commit_root
= 1;
6807 path
->skip_locking
= 1;
6810 ret
= btrfs_lookup_file_extent(NULL
, root
, path
, objectid
, start
, 0);
6813 } else if (ret
> 0) {
6814 if (path
->slots
[0] == 0)
6820 leaf
= path
->nodes
[0];
6821 item
= btrfs_item_ptr(leaf
, path
->slots
[0],
6822 struct btrfs_file_extent_item
);
6823 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
6824 if (found_key
.objectid
!= objectid
||
6825 found_key
.type
!= BTRFS_EXTENT_DATA_KEY
) {
6827 * If we backup past the first extent we want to move forward
6828 * and see if there is an extent in front of us, otherwise we'll
6829 * say there is a hole for our whole search range which can
6836 extent_type
= btrfs_file_extent_type(leaf
, item
);
6837 extent_start
= found_key
.offset
;
6838 extent_end
= btrfs_file_extent_end(path
);
6839 if (extent_type
== BTRFS_FILE_EXTENT_REG
||
6840 extent_type
== BTRFS_FILE_EXTENT_PREALLOC
) {
6841 /* Only regular file could have regular/prealloc extent */
6842 if (!S_ISREG(inode
->vfs_inode
.i_mode
)) {
6845 "regular/prealloc extent found for non-regular inode %llu",
6849 trace_btrfs_get_extent_show_fi_regular(inode
, leaf
, item
,
6851 } else if (extent_type
== BTRFS_FILE_EXTENT_INLINE
) {
6852 trace_btrfs_get_extent_show_fi_inline(inode
, leaf
, item
,
6857 if (start
>= extent_end
) {
6859 if (path
->slots
[0] >= btrfs_header_nritems(leaf
)) {
6860 ret
= btrfs_next_leaf(root
, path
);
6866 leaf
= path
->nodes
[0];
6868 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
6869 if (found_key
.objectid
!= objectid
||
6870 found_key
.type
!= BTRFS_EXTENT_DATA_KEY
)
6872 if (start
+ len
<= found_key
.offset
)
6874 if (start
> found_key
.offset
)
6877 /* New extent overlaps with existing one */
6879 em
->orig_start
= start
;
6880 em
->len
= found_key
.offset
- start
;
6881 em
->block_start
= EXTENT_MAP_HOLE
;
6885 btrfs_extent_item_to_extent_map(inode
, path
, item
, !page
, em
);
6887 if (extent_type
== BTRFS_FILE_EXTENT_REG
||
6888 extent_type
== BTRFS_FILE_EXTENT_PREALLOC
) {
6890 } else if (extent_type
== BTRFS_FILE_EXTENT_INLINE
) {
6894 size_t extent_offset
;
6900 size
= btrfs_file_extent_ram_bytes(leaf
, item
);
6901 extent_offset
= page_offset(page
) + pg_offset
- extent_start
;
6902 copy_size
= min_t(u64
, PAGE_SIZE
- pg_offset
,
6903 size
- extent_offset
);
6904 em
->start
= extent_start
+ extent_offset
;
6905 em
->len
= ALIGN(copy_size
, fs_info
->sectorsize
);
6906 em
->orig_block_len
= em
->len
;
6907 em
->orig_start
= em
->start
;
6908 ptr
= btrfs_file_extent_inline_start(item
) + extent_offset
;
6910 if (!PageUptodate(page
)) {
6911 if (btrfs_file_extent_compression(leaf
, item
) !=
6912 BTRFS_COMPRESS_NONE
) {
6913 ret
= uncompress_inline(path
, page
, pg_offset
,
6914 extent_offset
, item
);
6918 map
= kmap_local_page(page
);
6919 read_extent_buffer(leaf
, map
+ pg_offset
, ptr
,
6921 if (pg_offset
+ copy_size
< PAGE_SIZE
) {
6922 memset(map
+ pg_offset
+ copy_size
, 0,
6923 PAGE_SIZE
- pg_offset
-
6928 flush_dcache_page(page
);
6930 set_extent_uptodate(io_tree
, em
->start
,
6931 extent_map_end(em
) - 1, NULL
, GFP_NOFS
);
6936 em
->orig_start
= start
;
6938 em
->block_start
= EXTENT_MAP_HOLE
;
6941 btrfs_release_path(path
);
6942 if (em
->start
> start
|| extent_map_end(em
) <= start
) {
6944 "bad extent! em: [%llu %llu] passed [%llu %llu]",
6945 em
->start
, em
->len
, start
, len
);
6950 write_lock(&em_tree
->lock
);
6951 ret
= btrfs_add_extent_mapping(fs_info
, em_tree
, &em
, start
, len
);
6952 write_unlock(&em_tree
->lock
);
6954 btrfs_free_path(path
);
6956 trace_btrfs_get_extent(root
, inode
, em
);
6959 free_extent_map(em
);
6960 return ERR_PTR(ret
);
6965 struct extent_map
*btrfs_get_extent_fiemap(struct btrfs_inode
*inode
,
6968 struct extent_map
*em
;
6969 struct extent_map
*hole_em
= NULL
;
6970 u64 delalloc_start
= start
;
6976 em
= btrfs_get_extent(inode
, NULL
, 0, start
, len
);
6980 * If our em maps to:
6982 * - a pre-alloc extent,
6983 * there might actually be delalloc bytes behind it.
6985 if (em
->block_start
!= EXTENT_MAP_HOLE
&&
6986 !test_bit(EXTENT_FLAG_PREALLOC
, &em
->flags
))
6991 /* check to see if we've wrapped (len == -1 or similar) */
7000 /* ok, we didn't find anything, lets look for delalloc */
7001 delalloc_len
= count_range_bits(&inode
->io_tree
, &delalloc_start
,
7002 end
, len
, EXTENT_DELALLOC
, 1);
7003 delalloc_end
= delalloc_start
+ delalloc_len
;
7004 if (delalloc_end
< delalloc_start
)
7005 delalloc_end
= (u64
)-1;
7008 * We didn't find anything useful, return the original results from
7011 if (delalloc_start
> end
|| delalloc_end
<= start
) {
7018 * Adjust the delalloc_start to make sure it doesn't go backwards from
7019 * the start they passed in
7021 delalloc_start
= max(start
, delalloc_start
);
7022 delalloc_len
= delalloc_end
- delalloc_start
;
7024 if (delalloc_len
> 0) {
7027 const u64 hole_end
= extent_map_end(hole_em
);
7029 em
= alloc_extent_map();
7037 * When btrfs_get_extent can't find anything it returns one
7040 * Make sure what it found really fits our range, and adjust to
7041 * make sure it is based on the start from the caller
7043 if (hole_end
<= start
|| hole_em
->start
> end
) {
7044 free_extent_map(hole_em
);
7047 hole_start
= max(hole_em
->start
, start
);
7048 hole_len
= hole_end
- hole_start
;
7051 if (hole_em
&& delalloc_start
> hole_start
) {
7053 * Our hole starts before our delalloc, so we have to
7054 * return just the parts of the hole that go until the
7057 em
->len
= min(hole_len
, delalloc_start
- hole_start
);
7058 em
->start
= hole_start
;
7059 em
->orig_start
= hole_start
;
7061 * Don't adjust block start at all, it is fixed at
7064 em
->block_start
= hole_em
->block_start
;
7065 em
->block_len
= hole_len
;
7066 if (test_bit(EXTENT_FLAG_PREALLOC
, &hole_em
->flags
))
7067 set_bit(EXTENT_FLAG_PREALLOC
, &em
->flags
);
7070 * Hole is out of passed range or it starts after
7073 em
->start
= delalloc_start
;
7074 em
->len
= delalloc_len
;
7075 em
->orig_start
= delalloc_start
;
7076 em
->block_start
= EXTENT_MAP_DELALLOC
;
7077 em
->block_len
= delalloc_len
;
7084 free_extent_map(hole_em
);
7086 free_extent_map(em
);
7087 return ERR_PTR(err
);
7092 static struct extent_map
*btrfs_create_dio_extent(struct btrfs_inode
*inode
,
7095 const u64 orig_start
,
7096 const u64 block_start
,
7097 const u64 block_len
,
7098 const u64 orig_block_len
,
7099 const u64 ram_bytes
,
7102 struct extent_map
*em
= NULL
;
7105 if (type
!= BTRFS_ORDERED_NOCOW
) {
7106 em
= create_io_em(inode
, start
, len
, orig_start
, block_start
,
7107 block_len
, orig_block_len
, ram_bytes
,
7108 BTRFS_COMPRESS_NONE
, /* compress_type */
7113 ret
= btrfs_add_ordered_extent(inode
, start
, len
, len
, block_start
,
7116 (1 << BTRFS_ORDERED_DIRECT
),
7117 BTRFS_COMPRESS_NONE
);
7120 free_extent_map(em
);
7121 btrfs_drop_extent_cache(inode
, start
, start
+ len
- 1, 0);
7130 static struct extent_map
*btrfs_new_extent_direct(struct btrfs_inode
*inode
,
7133 struct btrfs_root
*root
= inode
->root
;
7134 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
7135 struct extent_map
*em
;
7136 struct btrfs_key ins
;
7140 alloc_hint
= get_extent_allocation_hint(inode
, start
, len
);
7141 ret
= btrfs_reserve_extent(root
, len
, len
, fs_info
->sectorsize
,
7142 0, alloc_hint
, &ins
, 1, 1);
7144 return ERR_PTR(ret
);
7146 em
= btrfs_create_dio_extent(inode
, start
, ins
.offset
, start
,
7147 ins
.objectid
, ins
.offset
, ins
.offset
,
7148 ins
.offset
, BTRFS_ORDERED_REGULAR
);
7149 btrfs_dec_block_group_reservations(fs_info
, ins
.objectid
);
7151 btrfs_free_reserved_extent(fs_info
, ins
.objectid
, ins
.offset
,
7157 static bool btrfs_extent_readonly(struct btrfs_fs_info
*fs_info
, u64 bytenr
)
7159 struct btrfs_block_group
*block_group
;
7160 bool readonly
= false;
7162 block_group
= btrfs_lookup_block_group(fs_info
, bytenr
);
7163 if (!block_group
|| block_group
->ro
)
7166 btrfs_put_block_group(block_group
);
7171 * Check if we can do nocow write into the range [@offset, @offset + @len)
7173 * @offset: File offset
7174 * @len: The length to write, will be updated to the nocow writeable
7176 * @orig_start: (optional) Return the original file offset of the file extent
7177 * @orig_len: (optional) Return the original on-disk length of the file extent
7178 * @ram_bytes: (optional) Return the ram_bytes of the file extent
7179 * @strict: if true, omit optimizations that might force us into unnecessary
7180 * cow. e.g., don't trust generation number.
7183 * >0 and update @len if we can do nocow write
7184 * 0 if we can't do nocow write
7185 * <0 if error happened
7187 * NOTE: This only checks the file extents, caller is responsible to wait for
7188 * any ordered extents.
7190 noinline
int can_nocow_extent(struct inode
*inode
, u64 offset
, u64
*len
,
7191 u64
*orig_start
, u64
*orig_block_len
,
7192 u64
*ram_bytes
, bool strict
)
7194 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
7195 struct can_nocow_file_extent_args nocow_args
= { 0 };
7196 struct btrfs_path
*path
;
7198 struct extent_buffer
*leaf
;
7199 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
7200 struct extent_io_tree
*io_tree
= &BTRFS_I(inode
)->io_tree
;
7201 struct btrfs_file_extent_item
*fi
;
7202 struct btrfs_key key
;
7205 path
= btrfs_alloc_path();
7209 ret
= btrfs_lookup_file_extent(NULL
, root
, path
,
7210 btrfs_ino(BTRFS_I(inode
)), offset
, 0);
7215 if (path
->slots
[0] == 0) {
7216 /* can't find the item, must cow */
7223 leaf
= path
->nodes
[0];
7224 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
7225 if (key
.objectid
!= btrfs_ino(BTRFS_I(inode
)) ||
7226 key
.type
!= BTRFS_EXTENT_DATA_KEY
) {
7227 /* not our file or wrong item type, must cow */
7231 if (key
.offset
> offset
) {
7232 /* Wrong offset, must cow */
7236 if (btrfs_file_extent_end(path
) <= offset
)
7239 fi
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_file_extent_item
);
7240 found_type
= btrfs_file_extent_type(leaf
, fi
);
7242 *ram_bytes
= btrfs_file_extent_ram_bytes(leaf
, fi
);
7244 nocow_args
.start
= offset
;
7245 nocow_args
.end
= offset
+ *len
- 1;
7246 nocow_args
.strict
= strict
;
7247 nocow_args
.free_path
= true;
7249 ret
= can_nocow_file_extent(path
, &key
, BTRFS_I(inode
), &nocow_args
);
7250 /* can_nocow_file_extent() has freed the path. */
7254 /* Treat errors as not being able to NOCOW. */
7260 if (btrfs_extent_readonly(fs_info
, nocow_args
.disk_bytenr
))
7263 if (!(BTRFS_I(inode
)->flags
& BTRFS_INODE_NODATACOW
) &&
7264 found_type
== BTRFS_FILE_EXTENT_PREALLOC
) {
7267 range_end
= round_up(offset
+ nocow_args
.num_bytes
,
7268 root
->fs_info
->sectorsize
) - 1;
7269 ret
= test_range_bit(io_tree
, offset
, range_end
,
7270 EXTENT_DELALLOC
, 0, NULL
);
7278 *orig_start
= key
.offset
- nocow_args
.extent_offset
;
7280 *orig_block_len
= nocow_args
.disk_num_bytes
;
7282 *len
= nocow_args
.num_bytes
;
7285 btrfs_free_path(path
);
7289 static int lock_extent_direct(struct inode
*inode
, u64 lockstart
, u64 lockend
,
7290 struct extent_state
**cached_state
,
7291 unsigned int iomap_flags
)
7293 const bool writing
= (iomap_flags
& IOMAP_WRITE
);
7294 const bool nowait
= (iomap_flags
& IOMAP_NOWAIT
);
7295 struct extent_io_tree
*io_tree
= &BTRFS_I(inode
)->io_tree
;
7296 struct btrfs_ordered_extent
*ordered
;
7301 if (!try_lock_extent(io_tree
, lockstart
, lockend
))
7304 lock_extent_bits(io_tree
, lockstart
, lockend
, cached_state
);
7307 * We're concerned with the entire range that we're going to be
7308 * doing DIO to, so we need to make sure there's no ordered
7309 * extents in this range.
7311 ordered
= btrfs_lookup_ordered_range(BTRFS_I(inode
), lockstart
,
7312 lockend
- lockstart
+ 1);
7315 * We need to make sure there are no buffered pages in this
7316 * range either, we could have raced between the invalidate in
7317 * generic_file_direct_write and locking the extent. The
7318 * invalidate needs to happen so that reads after a write do not
7322 (!writing
|| !filemap_range_has_page(inode
->i_mapping
,
7323 lockstart
, lockend
)))
7326 unlock_extent_cached(io_tree
, lockstart
, lockend
, cached_state
);
7330 btrfs_put_ordered_extent(ordered
);
7335 * If we are doing a DIO read and the ordered extent we
7336 * found is for a buffered write, we can not wait for it
7337 * to complete and retry, because if we do so we can
7338 * deadlock with concurrent buffered writes on page
7339 * locks. This happens only if our DIO read covers more
7340 * than one extent map, if at this point has already
7341 * created an ordered extent for a previous extent map
7342 * and locked its range in the inode's io tree, and a
7343 * concurrent write against that previous extent map's
7344 * range and this range started (we unlock the ranges
7345 * in the io tree only when the bios complete and
7346 * buffered writes always lock pages before attempting
7347 * to lock range in the io tree).
7350 test_bit(BTRFS_ORDERED_DIRECT
, &ordered
->flags
))
7351 btrfs_start_ordered_extent(ordered
, 1);
7353 ret
= nowait
? -EAGAIN
: -ENOTBLK
;
7354 btrfs_put_ordered_extent(ordered
);
7357 * We could trigger writeback for this range (and wait
7358 * for it to complete) and then invalidate the pages for
7359 * this range (through invalidate_inode_pages2_range()),
7360 * but that can lead us to a deadlock with a concurrent
7361 * call to readahead (a buffered read or a defrag call
7362 * triggered a readahead) on a page lock due to an
7363 * ordered dio extent we created before but did not have
7364 * yet a corresponding bio submitted (whence it can not
7365 * complete), which makes readahead wait for that
7366 * ordered extent to complete while holding a lock on
7369 ret
= nowait
? -EAGAIN
: -ENOTBLK
;
7381 /* The callers of this must take lock_extent() */
7382 static struct extent_map
*create_io_em(struct btrfs_inode
*inode
, u64 start
,
7383 u64 len
, u64 orig_start
, u64 block_start
,
7384 u64 block_len
, u64 orig_block_len
,
7385 u64 ram_bytes
, int compress_type
,
7388 struct extent_map_tree
*em_tree
;
7389 struct extent_map
*em
;
7392 ASSERT(type
== BTRFS_ORDERED_PREALLOC
||
7393 type
== BTRFS_ORDERED_COMPRESSED
||
7394 type
== BTRFS_ORDERED_NOCOW
||
7395 type
== BTRFS_ORDERED_REGULAR
);
7397 em_tree
= &inode
->extent_tree
;
7398 em
= alloc_extent_map();
7400 return ERR_PTR(-ENOMEM
);
7403 em
->orig_start
= orig_start
;
7405 em
->block_len
= block_len
;
7406 em
->block_start
= block_start
;
7407 em
->orig_block_len
= orig_block_len
;
7408 em
->ram_bytes
= ram_bytes
;
7409 em
->generation
= -1;
7410 set_bit(EXTENT_FLAG_PINNED
, &em
->flags
);
7411 if (type
== BTRFS_ORDERED_PREALLOC
) {
7412 set_bit(EXTENT_FLAG_FILLING
, &em
->flags
);
7413 } else if (type
== BTRFS_ORDERED_COMPRESSED
) {
7414 set_bit(EXTENT_FLAG_COMPRESSED
, &em
->flags
);
7415 em
->compress_type
= compress_type
;
7419 btrfs_drop_extent_cache(inode
, em
->start
,
7420 em
->start
+ em
->len
- 1, 0);
7421 write_lock(&em_tree
->lock
);
7422 ret
= add_extent_mapping(em_tree
, em
, 1);
7423 write_unlock(&em_tree
->lock
);
7425 * The caller has taken lock_extent(), who could race with us
7428 } while (ret
== -EEXIST
);
7431 free_extent_map(em
);
7432 return ERR_PTR(ret
);
7435 /* em got 2 refs now, callers needs to do free_extent_map once. */
7440 static int btrfs_get_blocks_direct_write(struct extent_map
**map
,
7441 struct inode
*inode
,
7442 struct btrfs_dio_data
*dio_data
,
7444 unsigned int iomap_flags
)
7446 const bool nowait
= (iomap_flags
& IOMAP_NOWAIT
);
7447 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
7448 struct extent_map
*em
= *map
;
7450 u64 block_start
, orig_start
, orig_block_len
, ram_bytes
;
7451 struct btrfs_block_group
*bg
;
7452 bool can_nocow
= false;
7453 bool space_reserved
= false;
7458 * We don't allocate a new extent in the following cases
7460 * 1) The inode is marked as NODATACOW. In this case we'll just use the
7462 * 2) The extent is marked as PREALLOC. We're good to go here and can
7463 * just use the extent.
7466 if (test_bit(EXTENT_FLAG_PREALLOC
, &em
->flags
) ||
7467 ((BTRFS_I(inode
)->flags
& BTRFS_INODE_NODATACOW
) &&
7468 em
->block_start
!= EXTENT_MAP_HOLE
)) {
7469 if (test_bit(EXTENT_FLAG_PREALLOC
, &em
->flags
))
7470 type
= BTRFS_ORDERED_PREALLOC
;
7472 type
= BTRFS_ORDERED_NOCOW
;
7473 len
= min(len
, em
->len
- (start
- em
->start
));
7474 block_start
= em
->block_start
+ (start
- em
->start
);
7476 if (can_nocow_extent(inode
, start
, &len
, &orig_start
,
7477 &orig_block_len
, &ram_bytes
, false) == 1) {
7478 bg
= btrfs_inc_nocow_writers(fs_info
, block_start
);
7486 struct extent_map
*em2
;
7488 /* We can NOCOW, so only need to reserve metadata space. */
7489 ret
= btrfs_delalloc_reserve_metadata(BTRFS_I(inode
), len
, len
,
7492 /* Our caller expects us to free the input extent map. */
7493 free_extent_map(em
);
7495 btrfs_dec_nocow_writers(bg
);
7496 if (nowait
&& (ret
== -ENOSPC
|| ret
== -EDQUOT
))
7500 space_reserved
= true;
7502 em2
= btrfs_create_dio_extent(BTRFS_I(inode
), start
, len
,
7503 orig_start
, block_start
,
7504 len
, orig_block_len
,
7506 btrfs_dec_nocow_writers(bg
);
7507 if (type
== BTRFS_ORDERED_PREALLOC
) {
7508 free_extent_map(em
);
7517 dio_data
->nocow_done
= true;
7519 /* Our caller expects us to free the input extent map. */
7520 free_extent_map(em
);
7527 * If we could not allocate data space before locking the file
7528 * range and we can't do a NOCOW write, then we have to fail.
7530 if (!dio_data
->data_space_reserved
)
7534 * We have to COW and we have already reserved data space before,
7535 * so now we reserve only metadata.
7537 ret
= btrfs_delalloc_reserve_metadata(BTRFS_I(inode
), len
, len
,
7541 space_reserved
= true;
7543 em
= btrfs_new_extent_direct(BTRFS_I(inode
), start
, len
);
7549 len
= min(len
, em
->len
- (start
- em
->start
));
7551 btrfs_delalloc_release_metadata(BTRFS_I(inode
),
7552 prev_len
- len
, true);
7556 * We have created our ordered extent, so we can now release our reservation
7557 * for an outstanding extent.
7559 btrfs_delalloc_release_extents(BTRFS_I(inode
), prev_len
);
7562 * Need to update the i_size under the extent lock so buffered
7563 * readers will get the updated i_size when we unlock.
7565 if (start
+ len
> i_size_read(inode
))
7566 i_size_write(inode
, start
+ len
);
7568 if (ret
&& space_reserved
) {
7569 btrfs_delalloc_release_extents(BTRFS_I(inode
), len
);
7570 btrfs_delalloc_release_metadata(BTRFS_I(inode
), len
, true);
7575 static int btrfs_dio_iomap_begin(struct inode
*inode
, loff_t start
,
7576 loff_t length
, unsigned int flags
, struct iomap
*iomap
,
7577 struct iomap
*srcmap
)
7579 struct iomap_iter
*iter
= container_of(iomap
, struct iomap_iter
, iomap
);
7580 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
7581 struct extent_map
*em
;
7582 struct extent_state
*cached_state
= NULL
;
7583 struct btrfs_dio_data
*dio_data
= iter
->private;
7584 u64 lockstart
, lockend
;
7585 const bool write
= !!(flags
& IOMAP_WRITE
);
7588 const u64 data_alloc_len
= length
;
7589 bool unlock_extents
= false;
7592 len
= min_t(u64
, len
, fs_info
->sectorsize
);
7595 lockend
= start
+ len
- 1;
7598 * iomap_dio_rw() only does filemap_write_and_wait_range(), which isn't
7599 * enough if we've written compressed pages to this area, so we need to
7600 * flush the dirty pages again to make absolutely sure that any
7601 * outstanding dirty pages are on disk - the first flush only starts
7602 * compression on the data, while keeping the pages locked, so by the
7603 * time the second flush returns we know bios for the compressed pages
7604 * were submitted and finished, and the pages no longer under writeback.
7606 * If we have a NOWAIT request and we have any pages in the range that
7607 * are locked, likely due to compression still in progress, we don't want
7608 * to block on page locks. We also don't want to block on pages marked as
7609 * dirty or under writeback (same as for the non-compression case).
7610 * iomap_dio_rw() did the same check, but after that and before we got
7611 * here, mmap'ed writes may have happened or buffered reads started
7612 * (readpage() and readahead(), which lock pages), as we haven't locked
7613 * the file range yet.
7615 if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT
,
7616 &BTRFS_I(inode
)->runtime_flags
)) {
7617 if (flags
& IOMAP_NOWAIT
) {
7618 if (filemap_range_needs_writeback(inode
->i_mapping
,
7619 lockstart
, lockend
))
7622 ret
= filemap_fdatawrite_range(inode
->i_mapping
, start
,
7623 start
+ length
- 1);
7629 memset(dio_data
, 0, sizeof(*dio_data
));
7632 * We always try to allocate data space and must do it before locking
7633 * the file range, to avoid deadlocks with concurrent writes to the same
7634 * range if the range has several extents and the writes don't expand the
7635 * current i_size (the inode lock is taken in shared mode). If we fail to
7636 * allocate data space here we continue and later, after locking the
7637 * file range, we fail with ENOSPC only if we figure out we can not do a
7640 if (write
&& !(flags
& IOMAP_NOWAIT
)) {
7641 ret
= btrfs_check_data_free_space(BTRFS_I(inode
),
7642 &dio_data
->data_reserved
,
7643 start
, data_alloc_len
);
7645 dio_data
->data_space_reserved
= true;
7646 else if (ret
&& !(BTRFS_I(inode
)->flags
&
7647 (BTRFS_INODE_NODATACOW
| BTRFS_INODE_PREALLOC
)))
7652 * If this errors out it's because we couldn't invalidate pagecache for
7653 * this range and we need to fallback to buffered IO, or we are doing a
7654 * NOWAIT read/write and we need to block.
7656 ret
= lock_extent_direct(inode
, lockstart
, lockend
, &cached_state
, flags
);
7660 em
= btrfs_get_extent(BTRFS_I(inode
), NULL
, 0, start
, len
);
7667 * Ok for INLINE and COMPRESSED extents we need to fallback on buffered
7668 * io. INLINE is special, and we could probably kludge it in here, but
7669 * it's still buffered so for safety lets just fall back to the generic
7672 * For COMPRESSED we _have_ to read the entire extent in so we can
7673 * decompress it, so there will be buffering required no matter what we
7674 * do, so go ahead and fallback to buffered.
7676 * We return -ENOTBLK because that's what makes DIO go ahead and go back
7677 * to buffered IO. Don't blame me, this is the price we pay for using
7680 if (test_bit(EXTENT_FLAG_COMPRESSED
, &em
->flags
) ||
7681 em
->block_start
== EXTENT_MAP_INLINE
) {
7682 free_extent_map(em
);
7684 * If we are in a NOWAIT context, return -EAGAIN in order to
7685 * fallback to buffered IO. This is not only because we can
7686 * block with buffered IO (no support for NOWAIT semantics at
7687 * the moment) but also to avoid returning short reads to user
7688 * space - this happens if we were able to read some data from
7689 * previous non-compressed extents and then when we fallback to
7690 * buffered IO, at btrfs_file_read_iter() by calling
7691 * filemap_read(), we fail to fault in pages for the read buffer,
7692 * in which case filemap_read() returns a short read (the number
7693 * of bytes previously read is > 0, so it does not return -EFAULT).
7695 ret
= (flags
& IOMAP_NOWAIT
) ? -EAGAIN
: -ENOTBLK
;
7699 len
= min(len
, em
->len
- (start
- em
->start
));
7702 * If we have a NOWAIT request and the range contains multiple extents
7703 * (or a mix of extents and holes), then we return -EAGAIN to make the
7704 * caller fallback to a context where it can do a blocking (without
7705 * NOWAIT) request. This way we avoid doing partial IO and returning
7706 * success to the caller, which is not optimal for writes and for reads
7707 * it can result in unexpected behaviour for an application.
7709 * When doing a read, because we use IOMAP_DIO_PARTIAL when calling
7710 * iomap_dio_rw(), we can end up returning less data then what the caller
7711 * asked for, resulting in an unexpected, and incorrect, short read.
7712 * That is, the caller asked to read N bytes and we return less than that,
7713 * which is wrong unless we are crossing EOF. This happens if we get a
7714 * page fault error when trying to fault in pages for the buffer that is
7715 * associated to the struct iov_iter passed to iomap_dio_rw(), and we
7716 * have previously submitted bios for other extents in the range, in
7717 * which case iomap_dio_rw() may return us EIOCBQUEUED if not all of
7718 * those bios have completed by the time we get the page fault error,
7719 * which we return back to our caller - we should only return EIOCBQUEUED
7720 * after we have submitted bios for all the extents in the range.
7722 if ((flags
& IOMAP_NOWAIT
) && len
< length
) {
7723 free_extent_map(em
);
7729 ret
= btrfs_get_blocks_direct_write(&em
, inode
, dio_data
,
7733 unlock_extents
= true;
7734 /* Recalc len in case the new em is smaller than requested */
7735 len
= min(len
, em
->len
- (start
- em
->start
));
7736 if (dio_data
->data_space_reserved
) {
7738 u64 release_len
= 0;
7740 if (dio_data
->nocow_done
) {
7741 release_offset
= start
;
7742 release_len
= data_alloc_len
;
7743 } else if (len
< data_alloc_len
) {
7744 release_offset
= start
+ len
;
7745 release_len
= data_alloc_len
- len
;
7748 if (release_len
> 0)
7749 btrfs_free_reserved_data_space(BTRFS_I(inode
),
7750 dio_data
->data_reserved
,
7756 * We need to unlock only the end area that we aren't using.
7757 * The rest is going to be unlocked by the endio routine.
7759 lockstart
= start
+ len
;
7760 if (lockstart
< lockend
)
7761 unlock_extents
= true;
7765 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
,
7766 lockstart
, lockend
, &cached_state
);
7768 free_extent_state(cached_state
);
7771 * Translate extent map information to iomap.
7772 * We trim the extents (and move the addr) even though iomap code does
7773 * that, since we have locked only the parts we are performing I/O in.
7775 if ((em
->block_start
== EXTENT_MAP_HOLE
) ||
7776 (test_bit(EXTENT_FLAG_PREALLOC
, &em
->flags
) && !write
)) {
7777 iomap
->addr
= IOMAP_NULL_ADDR
;
7778 iomap
->type
= IOMAP_HOLE
;
7780 iomap
->addr
= em
->block_start
+ (start
- em
->start
);
7781 iomap
->type
= IOMAP_MAPPED
;
7783 iomap
->offset
= start
;
7784 iomap
->bdev
= fs_info
->fs_devices
->latest_dev
->bdev
;
7785 iomap
->length
= len
;
7787 if (write
&& btrfs_use_zone_append(BTRFS_I(inode
), em
->block_start
))
7788 iomap
->flags
|= IOMAP_F_ZONE_APPEND
;
7790 free_extent_map(em
);
7795 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
, lockstart
, lockend
,
7798 if (dio_data
->data_space_reserved
) {
7799 btrfs_free_reserved_data_space(BTRFS_I(inode
),
7800 dio_data
->data_reserved
,
7801 start
, data_alloc_len
);
7802 extent_changeset_free(dio_data
->data_reserved
);
7808 static int btrfs_dio_iomap_end(struct inode
*inode
, loff_t pos
, loff_t length
,
7809 ssize_t written
, unsigned int flags
, struct iomap
*iomap
)
7811 struct iomap_iter
*iter
= container_of(iomap
, struct iomap_iter
, iomap
);
7812 struct btrfs_dio_data
*dio_data
= iter
->private;
7813 size_t submitted
= dio_data
->submitted
;
7814 const bool write
= !!(flags
& IOMAP_WRITE
);
7817 if (!write
&& (iomap
->type
== IOMAP_HOLE
)) {
7818 /* If reading from a hole, unlock and return */
7819 unlock_extent(&BTRFS_I(inode
)->io_tree
, pos
, pos
+ length
- 1);
7823 if (submitted
< length
) {
7825 length
-= submitted
;
7827 __endio_write_update_ordered(BTRFS_I(inode
), pos
,
7830 unlock_extent(&BTRFS_I(inode
)->io_tree
, pos
,
7836 extent_changeset_free(dio_data
->data_reserved
);
7840 static void btrfs_dio_private_put(struct btrfs_dio_private
*dip
)
7843 * This implies a barrier so that stores to dio_bio->bi_status before
7844 * this and loads of dio_bio->bi_status after this are fully ordered.
7846 if (!refcount_dec_and_test(&dip
->refs
))
7849 if (btrfs_op(&dip
->bio
) == BTRFS_MAP_WRITE
) {
7850 __endio_write_update_ordered(BTRFS_I(dip
->inode
),
7853 !dip
->bio
.bi_status
);
7855 unlock_extent(&BTRFS_I(dip
->inode
)->io_tree
,
7857 dip
->file_offset
+ dip
->bytes
- 1);
7861 bio_endio(&dip
->bio
);
7864 static void submit_dio_repair_bio(struct inode
*inode
, struct bio
*bio
,
7866 enum btrfs_compression_type compress_type
)
7868 struct btrfs_dio_private
*dip
= bio
->bi_private
;
7869 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
7871 BUG_ON(bio_op(bio
) == REQ_OP_WRITE
);
7873 if (btrfs_bio_wq_end_io(fs_info
, bio
, BTRFS_WQ_ENDIO_DATA
))
7876 refcount_inc(&dip
->refs
);
7877 if (btrfs_map_bio(fs_info
, bio
, mirror_num
))
7878 refcount_dec(&dip
->refs
);
7881 static blk_status_t
btrfs_check_read_dio_bio(struct btrfs_dio_private
*dip
,
7882 struct btrfs_bio
*bbio
,
7883 const bool uptodate
)
7885 struct inode
*inode
= dip
->inode
;
7886 struct btrfs_fs_info
*fs_info
= BTRFS_I(inode
)->root
->fs_info
;
7887 const u32 sectorsize
= fs_info
->sectorsize
;
7888 struct extent_io_tree
*failure_tree
= &BTRFS_I(inode
)->io_failure_tree
;
7889 struct extent_io_tree
*io_tree
= &BTRFS_I(inode
)->io_tree
;
7890 const bool csum
= !(BTRFS_I(inode
)->flags
& BTRFS_INODE_NODATASUM
);
7891 struct bio_vec bvec
;
7892 struct bvec_iter iter
;
7894 blk_status_t err
= BLK_STS_OK
;
7896 __bio_for_each_segment(bvec
, &bbio
->bio
, iter
, bbio
->iter
) {
7897 unsigned int i
, nr_sectors
, pgoff
;
7899 nr_sectors
= BTRFS_BYTES_TO_BLKS(fs_info
, bvec
.bv_len
);
7900 pgoff
= bvec
.bv_offset
;
7901 for (i
= 0; i
< nr_sectors
; i
++) {
7902 u64 start
= bbio
->file_offset
+ bio_offset
;
7904 ASSERT(pgoff
< PAGE_SIZE
);
7906 (!csum
|| !check_data_csum(inode
, bbio
,
7907 bio_offset
, bvec
.bv_page
,
7909 clean_io_failure(fs_info
, failure_tree
, io_tree
,
7910 start
, bvec
.bv_page
,
7911 btrfs_ino(BTRFS_I(inode
)),
7916 ret
= btrfs_repair_one_sector(inode
, &bbio
->bio
,
7917 bio_offset
, bvec
.bv_page
, pgoff
,
7918 start
, bbio
->mirror_num
,
7919 submit_dio_repair_bio
);
7921 err
= errno_to_blk_status(ret
);
7923 ASSERT(bio_offset
+ sectorsize
> bio_offset
);
7924 bio_offset
+= sectorsize
;
7925 pgoff
+= sectorsize
;
7931 static void __endio_write_update_ordered(struct btrfs_inode
*inode
,
7932 const u64 offset
, const u64 bytes
,
7933 const bool uptodate
)
7935 btrfs_mark_ordered_io_finished(inode
, NULL
, offset
, bytes
,
7936 finish_ordered_fn
, uptodate
);
7939 static blk_status_t
btrfs_submit_bio_start_direct_io(struct inode
*inode
,
7941 u64 dio_file_offset
)
7943 return btrfs_csum_one_bio(BTRFS_I(inode
), bio
, dio_file_offset
, false);
7946 static void btrfs_end_dio_bio(struct bio
*bio
)
7948 struct btrfs_dio_private
*dip
= bio
->bi_private
;
7949 struct btrfs_bio
*bbio
= btrfs_bio(bio
);
7950 blk_status_t err
= bio
->bi_status
;
7953 btrfs_warn(BTRFS_I(dip
->inode
)->root
->fs_info
,
7954 "direct IO failed ino %llu rw %d,%u sector %#Lx len %u err no %d",
7955 btrfs_ino(BTRFS_I(dip
->inode
)), bio_op(bio
),
7956 bio
->bi_opf
, bio
->bi_iter
.bi_sector
,
7957 bio
->bi_iter
.bi_size
, err
);
7959 if (bio_op(bio
) == REQ_OP_READ
)
7960 err
= btrfs_check_read_dio_bio(dip
, bbio
, !err
);
7963 dip
->bio
.bi_status
= err
;
7965 btrfs_record_physical_zoned(dip
->inode
, bbio
->file_offset
, bio
);
7968 btrfs_dio_private_put(dip
);
7971 static inline blk_status_t
btrfs_submit_dio_bio(struct bio
*bio
,
7972 struct inode
*inode
, u64 file_offset
, int async_submit
)
7974 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
7975 struct btrfs_dio_private
*dip
= bio
->bi_private
;
7976 bool write
= btrfs_op(bio
) == BTRFS_MAP_WRITE
;
7979 /* Check btrfs_submit_bio_hook() for rules about async submit. */
7981 async_submit
= !atomic_read(&BTRFS_I(inode
)->sync_writers
);
7984 ret
= btrfs_bio_wq_end_io(fs_info
, bio
, BTRFS_WQ_ENDIO_DATA
);
7989 if (BTRFS_I(inode
)->flags
& BTRFS_INODE_NODATASUM
)
7992 if (write
&& async_submit
) {
7993 ret
= btrfs_wq_submit_bio(inode
, bio
, 0, file_offset
,
7994 btrfs_submit_bio_start_direct_io
);
7998 * If we aren't doing async submit, calculate the csum of the
8001 ret
= btrfs_csum_one_bio(BTRFS_I(inode
), bio
, file_offset
, false);
8007 csum_offset
= file_offset
- dip
->file_offset
;
8008 csum_offset
>>= fs_info
->sectorsize_bits
;
8009 csum_offset
*= fs_info
->csum_size
;
8010 btrfs_bio(bio
)->csum
= dip
->csums
+ csum_offset
;
8013 ret
= btrfs_map_bio(fs_info
, bio
, 0);
8018 static void btrfs_submit_direct(const struct iomap_iter
*iter
,
8019 struct bio
*dio_bio
, loff_t file_offset
)
8021 struct btrfs_dio_private
*dip
=
8022 container_of(dio_bio
, struct btrfs_dio_private
, bio
);
8023 struct inode
*inode
= iter
->inode
;
8024 const bool write
= (btrfs_op(dio_bio
) == BTRFS_MAP_WRITE
);
8025 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
8026 const bool raid56
= (btrfs_data_alloc_profile(fs_info
) &
8027 BTRFS_BLOCK_GROUP_RAID56_MASK
);
8030 int async_submit
= 0;
8032 u64 clone_offset
= 0;
8036 blk_status_t status
;
8037 struct btrfs_io_geometry geom
;
8038 struct btrfs_dio_data
*dio_data
= iter
->private;
8039 struct extent_map
*em
= NULL
;
8042 dip
->file_offset
= file_offset
;
8043 dip
->bytes
= dio_bio
->bi_iter
.bi_size
;
8044 refcount_set(&dip
->refs
, 1);
8047 if (!write
&& !(BTRFS_I(inode
)->flags
& BTRFS_INODE_NODATASUM
)) {
8048 unsigned int nr_sectors
=
8049 (dio_bio
->bi_iter
.bi_size
>> fs_info
->sectorsize_bits
);
8052 * Load the csums up front to reduce csum tree searches and
8053 * contention when submitting bios.
8055 status
= BLK_STS_RESOURCE
;
8056 dip
->csums
= kcalloc(nr_sectors
, fs_info
->csum_size
, GFP_NOFS
);
8060 status
= btrfs_lookup_bio_sums(inode
, dio_bio
, dip
->csums
);
8061 if (status
!= BLK_STS_OK
)
8065 start_sector
= dio_bio
->bi_iter
.bi_sector
;
8066 submit_len
= dio_bio
->bi_iter
.bi_size
;
8069 logical
= start_sector
<< 9;
8070 em
= btrfs_get_chunk_map(fs_info
, logical
, submit_len
);
8072 status
= errno_to_blk_status(PTR_ERR(em
));
8076 ret
= btrfs_get_io_geometry(fs_info
, em
, btrfs_op(dio_bio
),
8079 status
= errno_to_blk_status(ret
);
8083 clone_len
= min(submit_len
, geom
.len
);
8084 ASSERT(clone_len
<= UINT_MAX
);
8087 * This will never fail as it's passing GPF_NOFS and
8088 * the allocation is backed by btrfs_bioset.
8090 bio
= btrfs_bio_clone_partial(dio_bio
, clone_offset
, clone_len
);
8091 bio
->bi_private
= dip
;
8092 bio
->bi_end_io
= btrfs_end_dio_bio
;
8093 btrfs_bio(bio
)->file_offset
= file_offset
;
8095 if (bio_op(bio
) == REQ_OP_ZONE_APPEND
) {
8096 status
= extract_ordered_extent(BTRFS_I(inode
), bio
,
8104 ASSERT(submit_len
>= clone_len
);
8105 submit_len
-= clone_len
;
8108 * Increase the count before we submit the bio so we know
8109 * the end IO handler won't happen before we increase the
8110 * count. Otherwise, the dip might get freed before we're
8111 * done setting it up.
8113 * We transfer the initial reference to the last bio, so we
8114 * don't need to increment the reference count for the last one.
8116 if (submit_len
> 0) {
8117 refcount_inc(&dip
->refs
);
8119 * If we are submitting more than one bio, submit them
8120 * all asynchronously. The exception is RAID 5 or 6, as
8121 * asynchronous checksums make it difficult to collect
8122 * full stripe writes.
8128 status
= btrfs_submit_dio_bio(bio
, inode
, file_offset
,
8133 refcount_dec(&dip
->refs
);
8137 dio_data
->submitted
+= clone_len
;
8138 clone_offset
+= clone_len
;
8139 start_sector
+= clone_len
>> 9;
8140 file_offset
+= clone_len
;
8142 free_extent_map(em
);
8143 } while (submit_len
> 0);
8147 free_extent_map(em
);
8149 dio_bio
->bi_status
= status
;
8150 btrfs_dio_private_put(dip
);
8153 static const struct iomap_ops btrfs_dio_iomap_ops
= {
8154 .iomap_begin
= btrfs_dio_iomap_begin
,
8155 .iomap_end
= btrfs_dio_iomap_end
,
8158 static const struct iomap_dio_ops btrfs_dio_ops
= {
8159 .submit_io
= btrfs_submit_direct
,
8160 .bio_set
= &btrfs_dio_bioset
,
8163 ssize_t
btrfs_dio_rw(struct kiocb
*iocb
, struct iov_iter
*iter
, size_t done_before
)
8165 struct btrfs_dio_data data
;
8167 return iomap_dio_rw(iocb
, iter
, &btrfs_dio_iomap_ops
, &btrfs_dio_ops
,
8168 IOMAP_DIO_PARTIAL
, &data
, done_before
);
8171 static int btrfs_fiemap(struct inode
*inode
, struct fiemap_extent_info
*fieinfo
,
8176 ret
= fiemap_prep(inode
, fieinfo
, start
, &len
, 0);
8180 return extent_fiemap(BTRFS_I(inode
), fieinfo
, start
, len
);
8183 static int btrfs_writepage(struct page
*page
, struct writeback_control
*wbc
)
8185 struct inode
*inode
= page
->mapping
->host
;
8188 if (current
->flags
& PF_MEMALLOC
) {
8189 redirty_page_for_writepage(wbc
, page
);
8195 * If we are under memory pressure we will call this directly from the
8196 * VM, we need to make sure we have the inode referenced for the ordered
8197 * extent. If not just return like we didn't do anything.
8199 if (!igrab(inode
)) {
8200 redirty_page_for_writepage(wbc
, page
);
8201 return AOP_WRITEPAGE_ACTIVATE
;
8203 ret
= extent_write_full_page(page
, wbc
);
8204 btrfs_add_delayed_iput(inode
);
8208 static int btrfs_writepages(struct address_space
*mapping
,
8209 struct writeback_control
*wbc
)
8211 return extent_writepages(mapping
, wbc
);
8214 static void btrfs_readahead(struct readahead_control
*rac
)
8216 extent_readahead(rac
);
8220 * For release_folio() and invalidate_folio() we have a race window where
8221 * folio_end_writeback() is called but the subpage spinlock is not yet released.
8222 * If we continue to release/invalidate the page, we could cause use-after-free
8223 * for subpage spinlock. So this function is to spin and wait for subpage
8226 static void wait_subpage_spinlock(struct page
*page
)
8228 struct btrfs_fs_info
*fs_info
= btrfs_sb(page
->mapping
->host
->i_sb
);
8229 struct btrfs_subpage
*subpage
;
8231 if (!btrfs_is_subpage(fs_info
, page
))
8234 ASSERT(PagePrivate(page
) && page
->private);
8235 subpage
= (struct btrfs_subpage
*)page
->private;
8238 * This may look insane as we just acquire the spinlock and release it,
8239 * without doing anything. But we just want to make sure no one is
8240 * still holding the subpage spinlock.
8241 * And since the page is not dirty nor writeback, and we have page
8242 * locked, the only possible way to hold a spinlock is from the endio
8243 * function to clear page writeback.
8245 * Here we just acquire the spinlock so that all existing callers
8246 * should exit and we're safe to release/invalidate the page.
8248 spin_lock_irq(&subpage
->lock
);
8249 spin_unlock_irq(&subpage
->lock
);
8252 static bool __btrfs_release_folio(struct folio
*folio
, gfp_t gfp_flags
)
8254 int ret
= try_release_extent_mapping(&folio
->page
, gfp_flags
);
8257 wait_subpage_spinlock(&folio
->page
);
8258 clear_page_extent_mapped(&folio
->page
);
8263 static bool btrfs_release_folio(struct folio
*folio
, gfp_t gfp_flags
)
8265 if (folio_test_writeback(folio
) || folio_test_dirty(folio
))
8267 return __btrfs_release_folio(folio
, gfp_flags
);
8270 #ifdef CONFIG_MIGRATION
8271 static int btrfs_migratepage(struct address_space
*mapping
,
8272 struct page
*newpage
, struct page
*page
,
8273 enum migrate_mode mode
)
8277 ret
= migrate_page_move_mapping(mapping
, newpage
, page
, 0);
8278 if (ret
!= MIGRATEPAGE_SUCCESS
)
8281 if (page_has_private(page
))
8282 attach_page_private(newpage
, detach_page_private(page
));
8284 if (PageOrdered(page
)) {
8285 ClearPageOrdered(page
);
8286 SetPageOrdered(newpage
);
8289 if (mode
!= MIGRATE_SYNC_NO_COPY
)
8290 migrate_page_copy(newpage
, page
);
8292 migrate_page_states(newpage
, page
);
8293 return MIGRATEPAGE_SUCCESS
;
8297 static void btrfs_invalidate_folio(struct folio
*folio
, size_t offset
,
8300 struct btrfs_inode
*inode
= BTRFS_I(folio
->mapping
->host
);
8301 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
8302 struct extent_io_tree
*tree
= &inode
->io_tree
;
8303 struct extent_state
*cached_state
= NULL
;
8304 u64 page_start
= folio_pos(folio
);
8305 u64 page_end
= page_start
+ folio_size(folio
) - 1;
8307 int inode_evicting
= inode
->vfs_inode
.i_state
& I_FREEING
;
8310 * We have folio locked so no new ordered extent can be created on this
8311 * page, nor bio can be submitted for this folio.
8313 * But already submitted bio can still be finished on this folio.
8314 * Furthermore, endio function won't skip folio which has Ordered
8315 * (Private2) already cleared, so it's possible for endio and
8316 * invalidate_folio to do the same ordered extent accounting twice
8319 * So here we wait for any submitted bios to finish, so that we won't
8320 * do double ordered extent accounting on the same folio.
8322 folio_wait_writeback(folio
);
8323 wait_subpage_spinlock(&folio
->page
);
8326 * For subpage case, we have call sites like
8327 * btrfs_punch_hole_lock_range() which passes range not aligned to
8329 * If the range doesn't cover the full folio, we don't need to and
8330 * shouldn't clear page extent mapped, as folio->private can still
8331 * record subpage dirty bits for other part of the range.
8333 * For cases that invalidate the full folio even the range doesn't
8334 * cover the full folio, like invalidating the last folio, we're
8335 * still safe to wait for ordered extent to finish.
8337 if (!(offset
== 0 && length
== folio_size(folio
))) {
8338 btrfs_release_folio(folio
, GFP_NOFS
);
8342 if (!inode_evicting
)
8343 lock_extent_bits(tree
, page_start
, page_end
, &cached_state
);
8346 while (cur
< page_end
) {
8347 struct btrfs_ordered_extent
*ordered
;
8352 ordered
= btrfs_lookup_first_ordered_range(inode
, cur
,
8353 page_end
+ 1 - cur
);
8355 range_end
= page_end
;
8357 * No ordered extent covering this range, we are safe
8358 * to delete all extent states in the range.
8360 delete_states
= true;
8363 if (ordered
->file_offset
> cur
) {
8365 * There is a range between [cur, oe->file_offset) not
8366 * covered by any ordered extent.
8367 * We are safe to delete all extent states, and handle
8368 * the ordered extent in the next iteration.
8370 range_end
= ordered
->file_offset
- 1;
8371 delete_states
= true;
8375 range_end
= min(ordered
->file_offset
+ ordered
->num_bytes
- 1,
8377 ASSERT(range_end
+ 1 - cur
< U32_MAX
);
8378 range_len
= range_end
+ 1 - cur
;
8379 if (!btrfs_page_test_ordered(fs_info
, &folio
->page
, cur
, range_len
)) {
8381 * If Ordered (Private2) is cleared, it means endio has
8382 * already been executed for the range.
8383 * We can't delete the extent states as
8384 * btrfs_finish_ordered_io() may still use some of them.
8386 delete_states
= false;
8389 btrfs_page_clear_ordered(fs_info
, &folio
->page
, cur
, range_len
);
8392 * IO on this page will never be started, so we need to account
8393 * for any ordered extents now. Don't clear EXTENT_DELALLOC_NEW
8394 * here, must leave that up for the ordered extent completion.
8396 * This will also unlock the range for incoming
8397 * btrfs_finish_ordered_io().
8399 if (!inode_evicting
)
8400 clear_extent_bit(tree
, cur
, range_end
,
8402 EXTENT_LOCKED
| EXTENT_DO_ACCOUNTING
|
8403 EXTENT_DEFRAG
, 1, 0, &cached_state
);
8405 spin_lock_irq(&inode
->ordered_tree
.lock
);
8406 set_bit(BTRFS_ORDERED_TRUNCATED
, &ordered
->flags
);
8407 ordered
->truncated_len
= min(ordered
->truncated_len
,
8408 cur
- ordered
->file_offset
);
8409 spin_unlock_irq(&inode
->ordered_tree
.lock
);
8411 if (btrfs_dec_test_ordered_pending(inode
, &ordered
,
8412 cur
, range_end
+ 1 - cur
)) {
8413 btrfs_finish_ordered_io(ordered
);
8415 * The ordered extent has finished, now we're again
8416 * safe to delete all extent states of the range.
8418 delete_states
= true;
8421 * btrfs_finish_ordered_io() will get executed by endio
8422 * of other pages, thus we can't delete extent states
8425 delete_states
= false;
8429 btrfs_put_ordered_extent(ordered
);
8431 * Qgroup reserved space handler
8432 * Sector(s) here will be either:
8434 * 1) Already written to disk or bio already finished
8435 * Then its QGROUP_RESERVED bit in io_tree is already cleared.
8436 * Qgroup will be handled by its qgroup_record then.
8437 * btrfs_qgroup_free_data() call will do nothing here.
8439 * 2) Not written to disk yet
8440 * Then btrfs_qgroup_free_data() call will clear the
8441 * QGROUP_RESERVED bit of its io_tree, and free the qgroup
8442 * reserved data space.
8443 * Since the IO will never happen for this page.
8445 btrfs_qgroup_free_data(inode
, NULL
, cur
, range_end
+ 1 - cur
);
8446 if (!inode_evicting
) {
8447 clear_extent_bit(tree
, cur
, range_end
, EXTENT_LOCKED
|
8448 EXTENT_DELALLOC
| EXTENT_UPTODATE
|
8449 EXTENT_DO_ACCOUNTING
| EXTENT_DEFRAG
, 1,
8450 delete_states
, &cached_state
);
8452 cur
= range_end
+ 1;
8455 * We have iterated through all ordered extents of the page, the page
8456 * should not have Ordered (Private2) anymore, or the above iteration
8457 * did something wrong.
8459 ASSERT(!folio_test_ordered(folio
));
8460 btrfs_page_clear_checked(fs_info
, &folio
->page
, folio_pos(folio
), folio_size(folio
));
8461 if (!inode_evicting
)
8462 __btrfs_release_folio(folio
, GFP_NOFS
);
8463 clear_page_extent_mapped(&folio
->page
);
8467 * btrfs_page_mkwrite() is not allowed to change the file size as it gets
8468 * called from a page fault handler when a page is first dirtied. Hence we must
8469 * be careful to check for EOF conditions here. We set the page up correctly
8470 * for a written page which means we get ENOSPC checking when writing into
8471 * holes and correct delalloc and unwritten extent mapping on filesystems that
8472 * support these features.
8474 * We are not allowed to take the i_mutex here so we have to play games to
8475 * protect against truncate races as the page could now be beyond EOF. Because
8476 * truncate_setsize() writes the inode size before removing pages, once we have
8477 * the page lock we can determine safely if the page is beyond EOF. If it is not
8478 * beyond EOF, then the page is guaranteed safe against truncation until we
8481 vm_fault_t
btrfs_page_mkwrite(struct vm_fault
*vmf
)
8483 struct page
*page
= vmf
->page
;
8484 struct inode
*inode
= file_inode(vmf
->vma
->vm_file
);
8485 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
8486 struct extent_io_tree
*io_tree
= &BTRFS_I(inode
)->io_tree
;
8487 struct btrfs_ordered_extent
*ordered
;
8488 struct extent_state
*cached_state
= NULL
;
8489 struct extent_changeset
*data_reserved
= NULL
;
8490 unsigned long zero_start
;
8500 reserved_space
= PAGE_SIZE
;
8502 sb_start_pagefault(inode
->i_sb
);
8503 page_start
= page_offset(page
);
8504 page_end
= page_start
+ PAGE_SIZE
- 1;
8508 * Reserving delalloc space after obtaining the page lock can lead to
8509 * deadlock. For example, if a dirty page is locked by this function
8510 * and the call to btrfs_delalloc_reserve_space() ends up triggering
8511 * dirty page write out, then the btrfs_writepage() function could
8512 * end up waiting indefinitely to get a lock on the page currently
8513 * being processed by btrfs_page_mkwrite() function.
8515 ret2
= btrfs_delalloc_reserve_space(BTRFS_I(inode
), &data_reserved
,
8516 page_start
, reserved_space
);
8518 ret2
= file_update_time(vmf
->vma
->vm_file
);
8522 ret
= vmf_error(ret2
);
8528 ret
= VM_FAULT_NOPAGE
; /* make the VM retry the fault */
8530 down_read(&BTRFS_I(inode
)->i_mmap_lock
);
8532 size
= i_size_read(inode
);
8534 if ((page
->mapping
!= inode
->i_mapping
) ||
8535 (page_start
>= size
)) {
8536 /* page got truncated out from underneath us */
8539 wait_on_page_writeback(page
);
8541 lock_extent_bits(io_tree
, page_start
, page_end
, &cached_state
);
8542 ret2
= set_page_extent_mapped(page
);
8544 ret
= vmf_error(ret2
);
8545 unlock_extent_cached(io_tree
, page_start
, page_end
, &cached_state
);
8550 * we can't set the delalloc bits if there are pending ordered
8551 * extents. Drop our locks and wait for them to finish
8553 ordered
= btrfs_lookup_ordered_range(BTRFS_I(inode
), page_start
,
8556 unlock_extent_cached(io_tree
, page_start
, page_end
,
8559 up_read(&BTRFS_I(inode
)->i_mmap_lock
);
8560 btrfs_start_ordered_extent(ordered
, 1);
8561 btrfs_put_ordered_extent(ordered
);
8565 if (page
->index
== ((size
- 1) >> PAGE_SHIFT
)) {
8566 reserved_space
= round_up(size
- page_start
,
8567 fs_info
->sectorsize
);
8568 if (reserved_space
< PAGE_SIZE
) {
8569 end
= page_start
+ reserved_space
- 1;
8570 btrfs_delalloc_release_space(BTRFS_I(inode
),
8571 data_reserved
, page_start
,
8572 PAGE_SIZE
- reserved_space
, true);
8577 * page_mkwrite gets called when the page is firstly dirtied after it's
8578 * faulted in, but write(2) could also dirty a page and set delalloc
8579 * bits, thus in this case for space account reason, we still need to
8580 * clear any delalloc bits within this page range since we have to
8581 * reserve data&meta space before lock_page() (see above comments).
8583 clear_extent_bit(&BTRFS_I(inode
)->io_tree
, page_start
, end
,
8584 EXTENT_DELALLOC
| EXTENT_DO_ACCOUNTING
|
8585 EXTENT_DEFRAG
, 0, 0, &cached_state
);
8587 ret2
= btrfs_set_extent_delalloc(BTRFS_I(inode
), page_start
, end
, 0,
8590 unlock_extent_cached(io_tree
, page_start
, page_end
,
8592 ret
= VM_FAULT_SIGBUS
;
8596 /* page is wholly or partially inside EOF */
8597 if (page_start
+ PAGE_SIZE
> size
)
8598 zero_start
= offset_in_page(size
);
8600 zero_start
= PAGE_SIZE
;
8602 if (zero_start
!= PAGE_SIZE
) {
8603 memzero_page(page
, zero_start
, PAGE_SIZE
- zero_start
);
8604 flush_dcache_page(page
);
8606 btrfs_page_clear_checked(fs_info
, page
, page_start
, PAGE_SIZE
);
8607 btrfs_page_set_dirty(fs_info
, page
, page_start
, end
+ 1 - page_start
);
8608 btrfs_page_set_uptodate(fs_info
, page
, page_start
, end
+ 1 - page_start
);
8610 btrfs_set_inode_last_sub_trans(BTRFS_I(inode
));
8612 unlock_extent_cached(io_tree
, page_start
, page_end
, &cached_state
);
8613 up_read(&BTRFS_I(inode
)->i_mmap_lock
);
8615 btrfs_delalloc_release_extents(BTRFS_I(inode
), PAGE_SIZE
);
8616 sb_end_pagefault(inode
->i_sb
);
8617 extent_changeset_free(data_reserved
);
8618 return VM_FAULT_LOCKED
;
8622 up_read(&BTRFS_I(inode
)->i_mmap_lock
);
8624 btrfs_delalloc_release_extents(BTRFS_I(inode
), PAGE_SIZE
);
8625 btrfs_delalloc_release_space(BTRFS_I(inode
), data_reserved
, page_start
,
8626 reserved_space
, (ret
!= 0));
8628 sb_end_pagefault(inode
->i_sb
);
8629 extent_changeset_free(data_reserved
);
8633 static int btrfs_truncate(struct inode
*inode
, bool skip_writeback
)
8635 struct btrfs_truncate_control control
= {
8636 .inode
= BTRFS_I(inode
),
8637 .ino
= btrfs_ino(BTRFS_I(inode
)),
8638 .min_type
= BTRFS_EXTENT_DATA_KEY
,
8639 .clear_extent_range
= true,
8641 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
8642 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
8643 struct btrfs_block_rsv
*rsv
;
8645 struct btrfs_trans_handle
*trans
;
8646 u64 mask
= fs_info
->sectorsize
- 1;
8647 u64 min_size
= btrfs_calc_metadata_size(fs_info
, 1);
8649 if (!skip_writeback
) {
8650 ret
= btrfs_wait_ordered_range(inode
, inode
->i_size
& (~mask
),
8657 * Yes ladies and gentlemen, this is indeed ugly. We have a couple of
8658 * things going on here:
8660 * 1) We need to reserve space to update our inode.
8662 * 2) We need to have something to cache all the space that is going to
8663 * be free'd up by the truncate operation, but also have some slack
8664 * space reserved in case it uses space during the truncate (thank you
8665 * very much snapshotting).
8667 * And we need these to be separate. The fact is we can use a lot of
8668 * space doing the truncate, and we have no earthly idea how much space
8669 * we will use, so we need the truncate reservation to be separate so it
8670 * doesn't end up using space reserved for updating the inode. We also
8671 * need to be able to stop the transaction and start a new one, which
8672 * means we need to be able to update the inode several times, and we
8673 * have no idea of knowing how many times that will be, so we can't just
8674 * reserve 1 item for the entirety of the operation, so that has to be
8675 * done separately as well.
8677 * So that leaves us with
8679 * 1) rsv - for the truncate reservation, which we will steal from the
8680 * transaction reservation.
8681 * 2) fs_info->trans_block_rsv - this will have 1 items worth left for
8682 * updating the inode.
8684 rsv
= btrfs_alloc_block_rsv(fs_info
, BTRFS_BLOCK_RSV_TEMP
);
8687 rsv
->size
= min_size
;
8691 * 1 for the truncate slack space
8692 * 1 for updating the inode.
8694 trans
= btrfs_start_transaction(root
, 2);
8695 if (IS_ERR(trans
)) {
8696 ret
= PTR_ERR(trans
);
8700 /* Migrate the slack space for the truncate to our reserve */
8701 ret
= btrfs_block_rsv_migrate(&fs_info
->trans_block_rsv
, rsv
,
8705 trans
->block_rsv
= rsv
;
8708 struct extent_state
*cached_state
= NULL
;
8709 const u64 new_size
= inode
->i_size
;
8710 const u64 lock_start
= ALIGN_DOWN(new_size
, fs_info
->sectorsize
);
8712 control
.new_size
= new_size
;
8713 lock_extent_bits(&BTRFS_I(inode
)->io_tree
, lock_start
, (u64
)-1,
8716 * We want to drop from the next block forward in case this new
8717 * size is not block aligned since we will be keeping the last
8718 * block of the extent just the way it is.
8720 btrfs_drop_extent_cache(BTRFS_I(inode
),
8721 ALIGN(new_size
, fs_info
->sectorsize
),
8724 ret
= btrfs_truncate_inode_items(trans
, root
, &control
);
8726 inode_sub_bytes(inode
, control
.sub_bytes
);
8727 btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode
), control
.last_size
);
8729 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
, lock_start
,
8730 (u64
)-1, &cached_state
);
8732 trans
->block_rsv
= &fs_info
->trans_block_rsv
;
8733 if (ret
!= -ENOSPC
&& ret
!= -EAGAIN
)
8736 ret
= btrfs_update_inode(trans
, root
, BTRFS_I(inode
));
8740 btrfs_end_transaction(trans
);
8741 btrfs_btree_balance_dirty(fs_info
);
8743 trans
= btrfs_start_transaction(root
, 2);
8744 if (IS_ERR(trans
)) {
8745 ret
= PTR_ERR(trans
);
8750 btrfs_block_rsv_release(fs_info
, rsv
, -1, NULL
);
8751 ret
= btrfs_block_rsv_migrate(&fs_info
->trans_block_rsv
,
8752 rsv
, min_size
, false);
8753 BUG_ON(ret
); /* shouldn't happen */
8754 trans
->block_rsv
= rsv
;
8758 * We can't call btrfs_truncate_block inside a trans handle as we could
8759 * deadlock with freeze, if we got BTRFS_NEED_TRUNCATE_BLOCK then we
8760 * know we've truncated everything except the last little bit, and can
8761 * do btrfs_truncate_block and then update the disk_i_size.
8763 if (ret
== BTRFS_NEED_TRUNCATE_BLOCK
) {
8764 btrfs_end_transaction(trans
);
8765 btrfs_btree_balance_dirty(fs_info
);
8767 ret
= btrfs_truncate_block(BTRFS_I(inode
), inode
->i_size
, 0, 0);
8770 trans
= btrfs_start_transaction(root
, 1);
8771 if (IS_ERR(trans
)) {
8772 ret
= PTR_ERR(trans
);
8775 btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode
), 0);
8781 trans
->block_rsv
= &fs_info
->trans_block_rsv
;
8782 ret2
= btrfs_update_inode(trans
, root
, BTRFS_I(inode
));
8786 ret2
= btrfs_end_transaction(trans
);
8789 btrfs_btree_balance_dirty(fs_info
);
8792 btrfs_free_block_rsv(fs_info
, rsv
);
8794 * So if we truncate and then write and fsync we normally would just
8795 * write the extents that changed, which is a problem if we need to
8796 * first truncate that entire inode. So set this flag so we write out
8797 * all of the extents in the inode to the sync log so we're completely
8800 * If no extents were dropped or trimmed we don't need to force the next
8801 * fsync to truncate all the inode's items from the log and re-log them
8802 * all. This means the truncate operation did not change the file size,
8803 * or changed it to a smaller size but there was only an implicit hole
8804 * between the old i_size and the new i_size, and there were no prealloc
8805 * extents beyond i_size to drop.
8807 if (control
.extents_found
> 0)
8808 btrfs_set_inode_full_sync(BTRFS_I(inode
));
8813 struct inode
*btrfs_new_subvol_inode(struct user_namespace
*mnt_userns
,
8816 struct inode
*inode
;
8818 inode
= new_inode(dir
->i_sb
);
8821 * Subvolumes don't inherit the sgid bit or the parent's gid if
8822 * the parent's sgid bit is set. This is probably a bug.
8824 inode_init_owner(mnt_userns
, inode
, NULL
,
8825 S_IFDIR
| (~current_umask() & S_IRWXUGO
));
8826 inode
->i_op
= &btrfs_dir_inode_operations
;
8827 inode
->i_fop
= &btrfs_dir_file_operations
;
8832 struct inode
*btrfs_alloc_inode(struct super_block
*sb
)
8834 struct btrfs_fs_info
*fs_info
= btrfs_sb(sb
);
8835 struct btrfs_inode
*ei
;
8836 struct inode
*inode
;
8838 ei
= alloc_inode_sb(sb
, btrfs_inode_cachep
, GFP_KERNEL
);
8845 ei
->last_sub_trans
= 0;
8846 ei
->logged_trans
= 0;
8847 ei
->delalloc_bytes
= 0;
8848 ei
->new_delalloc_bytes
= 0;
8849 ei
->defrag_bytes
= 0;
8850 ei
->disk_i_size
= 0;
8854 ei
->index_cnt
= (u64
)-1;
8856 ei
->last_unlink_trans
= 0;
8857 ei
->last_reflink_trans
= 0;
8858 ei
->last_log_commit
= 0;
8860 spin_lock_init(&ei
->lock
);
8861 ei
->outstanding_extents
= 0;
8862 if (sb
->s_magic
!= BTRFS_TEST_MAGIC
)
8863 btrfs_init_metadata_block_rsv(fs_info
, &ei
->block_rsv
,
8864 BTRFS_BLOCK_RSV_DELALLOC
);
8865 ei
->runtime_flags
= 0;
8866 ei
->prop_compress
= BTRFS_COMPRESS_NONE
;
8867 ei
->defrag_compress
= BTRFS_COMPRESS_NONE
;
8869 ei
->delayed_node
= NULL
;
8871 ei
->i_otime
.tv_sec
= 0;
8872 ei
->i_otime
.tv_nsec
= 0;
8874 inode
= &ei
->vfs_inode
;
8875 extent_map_tree_init(&ei
->extent_tree
);
8876 extent_io_tree_init(fs_info
, &ei
->io_tree
, IO_TREE_INODE_IO
, inode
);
8877 extent_io_tree_init(fs_info
, &ei
->io_failure_tree
,
8878 IO_TREE_INODE_IO_FAILURE
, inode
);
8879 extent_io_tree_init(fs_info
, &ei
->file_extent_tree
,
8880 IO_TREE_INODE_FILE_EXTENT
, inode
);
8881 ei
->io_tree
.track_uptodate
= true;
8882 ei
->io_failure_tree
.track_uptodate
= true;
8883 atomic_set(&ei
->sync_writers
, 0);
8884 mutex_init(&ei
->log_mutex
);
8885 btrfs_ordered_inode_tree_init(&ei
->ordered_tree
);
8886 INIT_LIST_HEAD(&ei
->delalloc_inodes
);
8887 INIT_LIST_HEAD(&ei
->delayed_iput
);
8888 RB_CLEAR_NODE(&ei
->rb_node
);
8889 init_rwsem(&ei
->i_mmap_lock
);
8894 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
8895 void btrfs_test_destroy_inode(struct inode
*inode
)
8897 btrfs_drop_extent_cache(BTRFS_I(inode
), 0, (u64
)-1, 0);
8898 kmem_cache_free(btrfs_inode_cachep
, BTRFS_I(inode
));
8902 void btrfs_free_inode(struct inode
*inode
)
8904 kmem_cache_free(btrfs_inode_cachep
, BTRFS_I(inode
));
8907 void btrfs_destroy_inode(struct inode
*vfs_inode
)
8909 struct btrfs_ordered_extent
*ordered
;
8910 struct btrfs_inode
*inode
= BTRFS_I(vfs_inode
);
8911 struct btrfs_root
*root
= inode
->root
;
8913 WARN_ON(!hlist_empty(&vfs_inode
->i_dentry
));
8914 WARN_ON(vfs_inode
->i_data
.nrpages
);
8915 WARN_ON(inode
->block_rsv
.reserved
);
8916 WARN_ON(inode
->block_rsv
.size
);
8917 WARN_ON(inode
->outstanding_extents
);
8918 if (!S_ISDIR(vfs_inode
->i_mode
)) {
8919 WARN_ON(inode
->delalloc_bytes
);
8920 WARN_ON(inode
->new_delalloc_bytes
);
8922 WARN_ON(inode
->csum_bytes
);
8923 WARN_ON(inode
->defrag_bytes
);
8926 * This can happen where we create an inode, but somebody else also
8927 * created the same inode and we need to destroy the one we already
8934 ordered
= btrfs_lookup_first_ordered_extent(inode
, (u64
)-1);
8938 btrfs_err(root
->fs_info
,
8939 "found ordered extent %llu %llu on inode cleanup",
8940 ordered
->file_offset
, ordered
->num_bytes
);
8941 btrfs_remove_ordered_extent(inode
, ordered
);
8942 btrfs_put_ordered_extent(ordered
);
8943 btrfs_put_ordered_extent(ordered
);
8946 btrfs_qgroup_check_reserved_leak(inode
);
8947 inode_tree_del(inode
);
8948 btrfs_drop_extent_cache(inode
, 0, (u64
)-1, 0);
8949 btrfs_inode_clear_file_extent_range(inode
, 0, (u64
)-1);
8950 btrfs_put_root(inode
->root
);
8953 int btrfs_drop_inode(struct inode
*inode
)
8955 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
8960 /* the snap/subvol tree is on deleting */
8961 if (btrfs_root_refs(&root
->root_item
) == 0)
8964 return generic_drop_inode(inode
);
8967 static void init_once(void *foo
)
8969 struct btrfs_inode
*ei
= foo
;
8971 inode_init_once(&ei
->vfs_inode
);
8974 void __cold
btrfs_destroy_cachep(void)
8977 * Make sure all delayed rcu free inodes are flushed before we
8981 bioset_exit(&btrfs_dio_bioset
);
8982 kmem_cache_destroy(btrfs_inode_cachep
);
8983 kmem_cache_destroy(btrfs_trans_handle_cachep
);
8984 kmem_cache_destroy(btrfs_path_cachep
);
8985 kmem_cache_destroy(btrfs_free_space_cachep
);
8986 kmem_cache_destroy(btrfs_free_space_bitmap_cachep
);
8989 int __init
btrfs_init_cachep(void)
8991 btrfs_inode_cachep
= kmem_cache_create("btrfs_inode",
8992 sizeof(struct btrfs_inode
), 0,
8993 SLAB_RECLAIM_ACCOUNT
| SLAB_MEM_SPREAD
| SLAB_ACCOUNT
,
8995 if (!btrfs_inode_cachep
)
8998 btrfs_trans_handle_cachep
= kmem_cache_create("btrfs_trans_handle",
8999 sizeof(struct btrfs_trans_handle
), 0,
9000 SLAB_TEMPORARY
| SLAB_MEM_SPREAD
, NULL
);
9001 if (!btrfs_trans_handle_cachep
)
9004 btrfs_path_cachep
= kmem_cache_create("btrfs_path",
9005 sizeof(struct btrfs_path
), 0,
9006 SLAB_MEM_SPREAD
, NULL
);
9007 if (!btrfs_path_cachep
)
9010 btrfs_free_space_cachep
= kmem_cache_create("btrfs_free_space",
9011 sizeof(struct btrfs_free_space
), 0,
9012 SLAB_MEM_SPREAD
, NULL
);
9013 if (!btrfs_free_space_cachep
)
9016 btrfs_free_space_bitmap_cachep
= kmem_cache_create("btrfs_free_space_bitmap",
9017 PAGE_SIZE
, PAGE_SIZE
,
9018 SLAB_MEM_SPREAD
, NULL
);
9019 if (!btrfs_free_space_bitmap_cachep
)
9022 if (bioset_init(&btrfs_dio_bioset
, BIO_POOL_SIZE
,
9023 offsetof(struct btrfs_dio_private
, bio
),
9029 btrfs_destroy_cachep();
9033 static int btrfs_getattr(struct user_namespace
*mnt_userns
,
9034 const struct path
*path
, struct kstat
*stat
,
9035 u32 request_mask
, unsigned int flags
)
9039 struct inode
*inode
= d_inode(path
->dentry
);
9040 u32 blocksize
= inode
->i_sb
->s_blocksize
;
9041 u32 bi_flags
= BTRFS_I(inode
)->flags
;
9042 u32 bi_ro_flags
= BTRFS_I(inode
)->ro_flags
;
9044 stat
->result_mask
|= STATX_BTIME
;
9045 stat
->btime
.tv_sec
= BTRFS_I(inode
)->i_otime
.tv_sec
;
9046 stat
->btime
.tv_nsec
= BTRFS_I(inode
)->i_otime
.tv_nsec
;
9047 if (bi_flags
& BTRFS_INODE_APPEND
)
9048 stat
->attributes
|= STATX_ATTR_APPEND
;
9049 if (bi_flags
& BTRFS_INODE_COMPRESS
)
9050 stat
->attributes
|= STATX_ATTR_COMPRESSED
;
9051 if (bi_flags
& BTRFS_INODE_IMMUTABLE
)
9052 stat
->attributes
|= STATX_ATTR_IMMUTABLE
;
9053 if (bi_flags
& BTRFS_INODE_NODUMP
)
9054 stat
->attributes
|= STATX_ATTR_NODUMP
;
9055 if (bi_ro_flags
& BTRFS_INODE_RO_VERITY
)
9056 stat
->attributes
|= STATX_ATTR_VERITY
;
9058 stat
->attributes_mask
|= (STATX_ATTR_APPEND
|
9059 STATX_ATTR_COMPRESSED
|
9060 STATX_ATTR_IMMUTABLE
|
9063 generic_fillattr(mnt_userns
, inode
, stat
);
9064 stat
->dev
= BTRFS_I(inode
)->root
->anon_dev
;
9066 spin_lock(&BTRFS_I(inode
)->lock
);
9067 delalloc_bytes
= BTRFS_I(inode
)->new_delalloc_bytes
;
9068 inode_bytes
= inode_get_bytes(inode
);
9069 spin_unlock(&BTRFS_I(inode
)->lock
);
9070 stat
->blocks
= (ALIGN(inode_bytes
, blocksize
) +
9071 ALIGN(delalloc_bytes
, blocksize
)) >> 9;
9075 static int btrfs_rename_exchange(struct inode
*old_dir
,
9076 struct dentry
*old_dentry
,
9077 struct inode
*new_dir
,
9078 struct dentry
*new_dentry
)
9080 struct btrfs_fs_info
*fs_info
= btrfs_sb(old_dir
->i_sb
);
9081 struct btrfs_trans_handle
*trans
;
9082 unsigned int trans_num_items
;
9083 struct btrfs_root
*root
= BTRFS_I(old_dir
)->root
;
9084 struct btrfs_root
*dest
= BTRFS_I(new_dir
)->root
;
9085 struct inode
*new_inode
= new_dentry
->d_inode
;
9086 struct inode
*old_inode
= old_dentry
->d_inode
;
9087 struct timespec64 ctime
= current_time(old_inode
);
9088 struct btrfs_rename_ctx old_rename_ctx
;
9089 struct btrfs_rename_ctx new_rename_ctx
;
9090 u64 old_ino
= btrfs_ino(BTRFS_I(old_inode
));
9091 u64 new_ino
= btrfs_ino(BTRFS_I(new_inode
));
9096 bool need_abort
= false;
9099 * For non-subvolumes allow exchange only within one subvolume, in the
9100 * same inode namespace. Two subvolumes (represented as directory) can
9101 * be exchanged as they're a logical link and have a fixed inode number.
9104 (old_ino
!= BTRFS_FIRST_FREE_OBJECTID
||
9105 new_ino
!= BTRFS_FIRST_FREE_OBJECTID
))
9108 /* close the race window with snapshot create/destroy ioctl */
9109 if (old_ino
== BTRFS_FIRST_FREE_OBJECTID
||
9110 new_ino
== BTRFS_FIRST_FREE_OBJECTID
)
9111 down_read(&fs_info
->subvol_sem
);
9115 * 1 to remove old dir item
9116 * 1 to remove old dir index
9117 * 1 to add new dir item
9118 * 1 to add new dir index
9119 * 1 to update parent inode
9121 * If the parents are the same, we only need to account for one
9123 trans_num_items
= (old_dir
== new_dir
? 9 : 10);
9124 if (old_ino
== BTRFS_FIRST_FREE_OBJECTID
) {
9126 * 1 to remove old root ref
9127 * 1 to remove old root backref
9128 * 1 to add new root ref
9129 * 1 to add new root backref
9131 trans_num_items
+= 4;
9134 * 1 to update inode item
9135 * 1 to remove old inode ref
9136 * 1 to add new inode ref
9138 trans_num_items
+= 3;
9140 if (new_ino
== BTRFS_FIRST_FREE_OBJECTID
)
9141 trans_num_items
+= 4;
9143 trans_num_items
+= 3;
9144 trans
= btrfs_start_transaction(root
, trans_num_items
);
9145 if (IS_ERR(trans
)) {
9146 ret
= PTR_ERR(trans
);
9151 ret
= btrfs_record_root_in_trans(trans
, dest
);
9157 * We need to find a free sequence number both in the source and
9158 * in the destination directory for the exchange.
9160 ret
= btrfs_set_inode_index(BTRFS_I(new_dir
), &old_idx
);
9163 ret
= btrfs_set_inode_index(BTRFS_I(old_dir
), &new_idx
);
9167 BTRFS_I(old_inode
)->dir_index
= 0ULL;
9168 BTRFS_I(new_inode
)->dir_index
= 0ULL;
9170 /* Reference for the source. */
9171 if (old_ino
== BTRFS_FIRST_FREE_OBJECTID
) {
9172 /* force full log commit if subvolume involved. */
9173 btrfs_set_log_full_commit(trans
);
9175 ret
= btrfs_insert_inode_ref(trans
, dest
,
9176 new_dentry
->d_name
.name
,
9177 new_dentry
->d_name
.len
,
9179 btrfs_ino(BTRFS_I(new_dir
)),
9186 /* And now for the dest. */
9187 if (new_ino
== BTRFS_FIRST_FREE_OBJECTID
) {
9188 /* force full log commit if subvolume involved. */
9189 btrfs_set_log_full_commit(trans
);
9191 ret
= btrfs_insert_inode_ref(trans
, root
,
9192 old_dentry
->d_name
.name
,
9193 old_dentry
->d_name
.len
,
9195 btrfs_ino(BTRFS_I(old_dir
)),
9199 btrfs_abort_transaction(trans
, ret
);
9204 /* Update inode version and ctime/mtime. */
9205 inode_inc_iversion(old_dir
);
9206 inode_inc_iversion(new_dir
);
9207 inode_inc_iversion(old_inode
);
9208 inode_inc_iversion(new_inode
);
9209 old_dir
->i_ctime
= old_dir
->i_mtime
= ctime
;
9210 new_dir
->i_ctime
= new_dir
->i_mtime
= ctime
;
9211 old_inode
->i_ctime
= ctime
;
9212 new_inode
->i_ctime
= ctime
;
9214 if (old_dentry
->d_parent
!= new_dentry
->d_parent
) {
9215 btrfs_record_unlink_dir(trans
, BTRFS_I(old_dir
),
9216 BTRFS_I(old_inode
), 1);
9217 btrfs_record_unlink_dir(trans
, BTRFS_I(new_dir
),
9218 BTRFS_I(new_inode
), 1);
9221 /* src is a subvolume */
9222 if (old_ino
== BTRFS_FIRST_FREE_OBJECTID
) {
9223 ret
= btrfs_unlink_subvol(trans
, old_dir
, old_dentry
);
9224 } else { /* src is an inode */
9225 ret
= __btrfs_unlink_inode(trans
, BTRFS_I(old_dir
),
9226 BTRFS_I(old_dentry
->d_inode
),
9227 old_dentry
->d_name
.name
,
9228 old_dentry
->d_name
.len
,
9231 ret
= btrfs_update_inode(trans
, root
, BTRFS_I(old_inode
));
9234 btrfs_abort_transaction(trans
, ret
);
9238 /* dest is a subvolume */
9239 if (new_ino
== BTRFS_FIRST_FREE_OBJECTID
) {
9240 ret
= btrfs_unlink_subvol(trans
, new_dir
, new_dentry
);
9241 } else { /* dest is an inode */
9242 ret
= __btrfs_unlink_inode(trans
, BTRFS_I(new_dir
),
9243 BTRFS_I(new_dentry
->d_inode
),
9244 new_dentry
->d_name
.name
,
9245 new_dentry
->d_name
.len
,
9248 ret
= btrfs_update_inode(trans
, dest
, BTRFS_I(new_inode
));
9251 btrfs_abort_transaction(trans
, ret
);
9255 ret
= btrfs_add_link(trans
, BTRFS_I(new_dir
), BTRFS_I(old_inode
),
9256 new_dentry
->d_name
.name
,
9257 new_dentry
->d_name
.len
, 0, old_idx
);
9259 btrfs_abort_transaction(trans
, ret
);
9263 ret
= btrfs_add_link(trans
, BTRFS_I(old_dir
), BTRFS_I(new_inode
),
9264 old_dentry
->d_name
.name
,
9265 old_dentry
->d_name
.len
, 0, new_idx
);
9267 btrfs_abort_transaction(trans
, ret
);
9271 if (old_inode
->i_nlink
== 1)
9272 BTRFS_I(old_inode
)->dir_index
= old_idx
;
9273 if (new_inode
->i_nlink
== 1)
9274 BTRFS_I(new_inode
)->dir_index
= new_idx
;
9277 * Now pin the logs of the roots. We do it to ensure that no other task
9278 * can sync the logs while we are in progress with the rename, because
9279 * that could result in an inconsistency in case any of the inodes that
9280 * are part of this rename operation were logged before.
9282 if (old_ino
!= BTRFS_FIRST_FREE_OBJECTID
)
9283 btrfs_pin_log_trans(root
);
9284 if (new_ino
!= BTRFS_FIRST_FREE_OBJECTID
)
9285 btrfs_pin_log_trans(dest
);
9287 /* Do the log updates for all inodes. */
9288 if (old_ino
!= BTRFS_FIRST_FREE_OBJECTID
)
9289 btrfs_log_new_name(trans
, old_dentry
, BTRFS_I(old_dir
),
9290 old_rename_ctx
.index
, new_dentry
->d_parent
);
9291 if (new_ino
!= BTRFS_FIRST_FREE_OBJECTID
)
9292 btrfs_log_new_name(trans
, new_dentry
, BTRFS_I(new_dir
),
9293 new_rename_ctx
.index
, old_dentry
->d_parent
);
9295 /* Now unpin the logs. */
9296 if (old_ino
!= BTRFS_FIRST_FREE_OBJECTID
)
9297 btrfs_end_log_trans(root
);
9298 if (new_ino
!= BTRFS_FIRST_FREE_OBJECTID
)
9299 btrfs_end_log_trans(dest
);
9301 ret2
= btrfs_end_transaction(trans
);
9302 ret
= ret
? ret
: ret2
;
9304 if (new_ino
== BTRFS_FIRST_FREE_OBJECTID
||
9305 old_ino
== BTRFS_FIRST_FREE_OBJECTID
)
9306 up_read(&fs_info
->subvol_sem
);
9311 static struct inode
*new_whiteout_inode(struct user_namespace
*mnt_userns
,
9314 struct inode
*inode
;
9316 inode
= new_inode(dir
->i_sb
);
9318 inode_init_owner(mnt_userns
, inode
, dir
,
9319 S_IFCHR
| WHITEOUT_MODE
);
9320 inode
->i_op
= &btrfs_special_inode_operations
;
9321 init_special_inode(inode
, inode
->i_mode
, WHITEOUT_DEV
);
9326 static int btrfs_rename(struct user_namespace
*mnt_userns
,
9327 struct inode
*old_dir
, struct dentry
*old_dentry
,
9328 struct inode
*new_dir
, struct dentry
*new_dentry
,
9331 struct btrfs_fs_info
*fs_info
= btrfs_sb(old_dir
->i_sb
);
9332 struct btrfs_new_inode_args whiteout_args
= {
9334 .dentry
= old_dentry
,
9336 struct btrfs_trans_handle
*trans
;
9337 unsigned int trans_num_items
;
9338 struct btrfs_root
*root
= BTRFS_I(old_dir
)->root
;
9339 struct btrfs_root
*dest
= BTRFS_I(new_dir
)->root
;
9340 struct inode
*new_inode
= d_inode(new_dentry
);
9341 struct inode
*old_inode
= d_inode(old_dentry
);
9342 struct btrfs_rename_ctx rename_ctx
;
9346 u64 old_ino
= btrfs_ino(BTRFS_I(old_inode
));
9348 if (btrfs_ino(BTRFS_I(new_dir
)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID
)
9351 /* we only allow rename subvolume link between subvolumes */
9352 if (old_ino
!= BTRFS_FIRST_FREE_OBJECTID
&& root
!= dest
)
9355 if (old_ino
== BTRFS_EMPTY_SUBVOL_DIR_OBJECTID
||
9356 (new_inode
&& btrfs_ino(BTRFS_I(new_inode
)) == BTRFS_FIRST_FREE_OBJECTID
))
9359 if (S_ISDIR(old_inode
->i_mode
) && new_inode
&&
9360 new_inode
->i_size
> BTRFS_EMPTY_DIR_SIZE
)
9364 /* check for collisions, even if the name isn't there */
9365 ret
= btrfs_check_dir_item_collision(dest
, new_dir
->i_ino
,
9366 new_dentry
->d_name
.name
,
9367 new_dentry
->d_name
.len
);
9370 if (ret
== -EEXIST
) {
9372 * eexist without a new_inode */
9373 if (WARN_ON(!new_inode
)) {
9377 /* maybe -EOVERFLOW */
9384 * we're using rename to replace one file with another. Start IO on it
9385 * now so we don't add too much work to the end of the transaction
9387 if (new_inode
&& S_ISREG(old_inode
->i_mode
) && new_inode
->i_size
)
9388 filemap_flush(old_inode
->i_mapping
);
9390 if (flags
& RENAME_WHITEOUT
) {
9391 whiteout_args
.inode
= new_whiteout_inode(mnt_userns
, old_dir
);
9392 if (!whiteout_args
.inode
)
9394 ret
= btrfs_new_inode_prepare(&whiteout_args
, &trans_num_items
);
9396 goto out_whiteout_inode
;
9398 /* 1 to update the old parent inode. */
9399 trans_num_items
= 1;
9402 if (old_ino
== BTRFS_FIRST_FREE_OBJECTID
) {
9403 /* Close the race window with snapshot create/destroy ioctl */
9404 down_read(&fs_info
->subvol_sem
);
9406 * 1 to remove old root ref
9407 * 1 to remove old root backref
9408 * 1 to add new root ref
9409 * 1 to add new root backref
9411 trans_num_items
+= 4;
9415 * 1 to remove old inode ref
9416 * 1 to add new inode ref
9418 trans_num_items
+= 3;
9421 * 1 to remove old dir item
9422 * 1 to remove old dir index
9423 * 1 to add new dir item
9424 * 1 to add new dir index
9426 trans_num_items
+= 4;
9427 /* 1 to update new parent inode if it's not the same as the old parent */
9428 if (new_dir
!= old_dir
)
9433 * 1 to remove inode ref
9434 * 1 to remove dir item
9435 * 1 to remove dir index
9436 * 1 to possibly add orphan item
9438 trans_num_items
+= 5;
9440 trans
= btrfs_start_transaction(root
, trans_num_items
);
9441 if (IS_ERR(trans
)) {
9442 ret
= PTR_ERR(trans
);
9447 ret
= btrfs_record_root_in_trans(trans
, dest
);
9452 ret
= btrfs_set_inode_index(BTRFS_I(new_dir
), &index
);
9456 BTRFS_I(old_inode
)->dir_index
= 0ULL;
9457 if (unlikely(old_ino
== BTRFS_FIRST_FREE_OBJECTID
)) {
9458 /* force full log commit if subvolume involved. */
9459 btrfs_set_log_full_commit(trans
);
9461 ret
= btrfs_insert_inode_ref(trans
, dest
,
9462 new_dentry
->d_name
.name
,
9463 new_dentry
->d_name
.len
,
9465 btrfs_ino(BTRFS_I(new_dir
)), index
);
9470 inode_inc_iversion(old_dir
);
9471 inode_inc_iversion(new_dir
);
9472 inode_inc_iversion(old_inode
);
9473 old_dir
->i_ctime
= old_dir
->i_mtime
=
9474 new_dir
->i_ctime
= new_dir
->i_mtime
=
9475 old_inode
->i_ctime
= current_time(old_dir
);
9477 if (old_dentry
->d_parent
!= new_dentry
->d_parent
)
9478 btrfs_record_unlink_dir(trans
, BTRFS_I(old_dir
),
9479 BTRFS_I(old_inode
), 1);
9481 if (unlikely(old_ino
== BTRFS_FIRST_FREE_OBJECTID
)) {
9482 ret
= btrfs_unlink_subvol(trans
, old_dir
, old_dentry
);
9484 ret
= __btrfs_unlink_inode(trans
, BTRFS_I(old_dir
),
9485 BTRFS_I(d_inode(old_dentry
)),
9486 old_dentry
->d_name
.name
,
9487 old_dentry
->d_name
.len
,
9490 ret
= btrfs_update_inode(trans
, root
, BTRFS_I(old_inode
));
9493 btrfs_abort_transaction(trans
, ret
);
9498 inode_inc_iversion(new_inode
);
9499 new_inode
->i_ctime
= current_time(new_inode
);
9500 if (unlikely(btrfs_ino(BTRFS_I(new_inode
)) ==
9501 BTRFS_EMPTY_SUBVOL_DIR_OBJECTID
)) {
9502 ret
= btrfs_unlink_subvol(trans
, new_dir
, new_dentry
);
9503 BUG_ON(new_inode
->i_nlink
== 0);
9505 ret
= btrfs_unlink_inode(trans
, BTRFS_I(new_dir
),
9506 BTRFS_I(d_inode(new_dentry
)),
9507 new_dentry
->d_name
.name
,
9508 new_dentry
->d_name
.len
);
9510 if (!ret
&& new_inode
->i_nlink
== 0)
9511 ret
= btrfs_orphan_add(trans
,
9512 BTRFS_I(d_inode(new_dentry
)));
9514 btrfs_abort_transaction(trans
, ret
);
9519 ret
= btrfs_add_link(trans
, BTRFS_I(new_dir
), BTRFS_I(old_inode
),
9520 new_dentry
->d_name
.name
,
9521 new_dentry
->d_name
.len
, 0, index
);
9523 btrfs_abort_transaction(trans
, ret
);
9527 if (old_inode
->i_nlink
== 1)
9528 BTRFS_I(old_inode
)->dir_index
= index
;
9530 if (old_ino
!= BTRFS_FIRST_FREE_OBJECTID
)
9531 btrfs_log_new_name(trans
, old_dentry
, BTRFS_I(old_dir
),
9532 rename_ctx
.index
, new_dentry
->d_parent
);
9534 if (flags
& RENAME_WHITEOUT
) {
9535 ret
= btrfs_create_new_inode(trans
, &whiteout_args
);
9537 btrfs_abort_transaction(trans
, ret
);
9540 unlock_new_inode(whiteout_args
.inode
);
9541 iput(whiteout_args
.inode
);
9542 whiteout_args
.inode
= NULL
;
9546 ret2
= btrfs_end_transaction(trans
);
9547 ret
= ret
? ret
: ret2
;
9549 if (old_ino
== BTRFS_FIRST_FREE_OBJECTID
)
9550 up_read(&fs_info
->subvol_sem
);
9551 if (flags
& RENAME_WHITEOUT
)
9552 btrfs_new_inode_args_destroy(&whiteout_args
);
9554 if (flags
& RENAME_WHITEOUT
)
9555 iput(whiteout_args
.inode
);
9559 static int btrfs_rename2(struct user_namespace
*mnt_userns
, struct inode
*old_dir
,
9560 struct dentry
*old_dentry
, struct inode
*new_dir
,
9561 struct dentry
*new_dentry
, unsigned int flags
)
9563 if (flags
& ~(RENAME_NOREPLACE
| RENAME_EXCHANGE
| RENAME_WHITEOUT
))
9566 if (flags
& RENAME_EXCHANGE
)
9567 return btrfs_rename_exchange(old_dir
, old_dentry
, new_dir
,
9570 return btrfs_rename(mnt_userns
, old_dir
, old_dentry
, new_dir
,
9574 struct btrfs_delalloc_work
{
9575 struct inode
*inode
;
9576 struct completion completion
;
9577 struct list_head list
;
9578 struct btrfs_work work
;
9581 static void btrfs_run_delalloc_work(struct btrfs_work
*work
)
9583 struct btrfs_delalloc_work
*delalloc_work
;
9584 struct inode
*inode
;
9586 delalloc_work
= container_of(work
, struct btrfs_delalloc_work
,
9588 inode
= delalloc_work
->inode
;
9589 filemap_flush(inode
->i_mapping
);
9590 if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT
,
9591 &BTRFS_I(inode
)->runtime_flags
))
9592 filemap_flush(inode
->i_mapping
);
9595 complete(&delalloc_work
->completion
);
9598 static struct btrfs_delalloc_work
*btrfs_alloc_delalloc_work(struct inode
*inode
)
9600 struct btrfs_delalloc_work
*work
;
9602 work
= kmalloc(sizeof(*work
), GFP_NOFS
);
9606 init_completion(&work
->completion
);
9607 INIT_LIST_HEAD(&work
->list
);
9608 work
->inode
= inode
;
9609 btrfs_init_work(&work
->work
, btrfs_run_delalloc_work
, NULL
, NULL
);
9615 * some fairly slow code that needs optimization. This walks the list
9616 * of all the inodes with pending delalloc and forces them to disk.
9618 static int start_delalloc_inodes(struct btrfs_root
*root
,
9619 struct writeback_control
*wbc
, bool snapshot
,
9620 bool in_reclaim_context
)
9622 struct btrfs_inode
*binode
;
9623 struct inode
*inode
;
9624 struct btrfs_delalloc_work
*work
, *next
;
9625 struct list_head works
;
9626 struct list_head splice
;
9628 bool full_flush
= wbc
->nr_to_write
== LONG_MAX
;
9630 INIT_LIST_HEAD(&works
);
9631 INIT_LIST_HEAD(&splice
);
9633 mutex_lock(&root
->delalloc_mutex
);
9634 spin_lock(&root
->delalloc_lock
);
9635 list_splice_init(&root
->delalloc_inodes
, &splice
);
9636 while (!list_empty(&splice
)) {
9637 binode
= list_entry(splice
.next
, struct btrfs_inode
,
9640 list_move_tail(&binode
->delalloc_inodes
,
9641 &root
->delalloc_inodes
);
9643 if (in_reclaim_context
&&
9644 test_bit(BTRFS_INODE_NO_DELALLOC_FLUSH
, &binode
->runtime_flags
))
9647 inode
= igrab(&binode
->vfs_inode
);
9649 cond_resched_lock(&root
->delalloc_lock
);
9652 spin_unlock(&root
->delalloc_lock
);
9655 set_bit(BTRFS_INODE_SNAPSHOT_FLUSH
,
9656 &binode
->runtime_flags
);
9658 work
= btrfs_alloc_delalloc_work(inode
);
9664 list_add_tail(&work
->list
, &works
);
9665 btrfs_queue_work(root
->fs_info
->flush_workers
,
9668 ret
= filemap_fdatawrite_wbc(inode
->i_mapping
, wbc
);
9669 btrfs_add_delayed_iput(inode
);
9670 if (ret
|| wbc
->nr_to_write
<= 0)
9674 spin_lock(&root
->delalloc_lock
);
9676 spin_unlock(&root
->delalloc_lock
);
9679 list_for_each_entry_safe(work
, next
, &works
, list
) {
9680 list_del_init(&work
->list
);
9681 wait_for_completion(&work
->completion
);
9685 if (!list_empty(&splice
)) {
9686 spin_lock(&root
->delalloc_lock
);
9687 list_splice_tail(&splice
, &root
->delalloc_inodes
);
9688 spin_unlock(&root
->delalloc_lock
);
9690 mutex_unlock(&root
->delalloc_mutex
);
9694 int btrfs_start_delalloc_snapshot(struct btrfs_root
*root
, bool in_reclaim_context
)
9696 struct writeback_control wbc
= {
9697 .nr_to_write
= LONG_MAX
,
9698 .sync_mode
= WB_SYNC_NONE
,
9700 .range_end
= LLONG_MAX
,
9702 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
9704 if (BTRFS_FS_ERROR(fs_info
))
9707 return start_delalloc_inodes(root
, &wbc
, true, in_reclaim_context
);
9710 int btrfs_start_delalloc_roots(struct btrfs_fs_info
*fs_info
, long nr
,
9711 bool in_reclaim_context
)
9713 struct writeback_control wbc
= {
9715 .sync_mode
= WB_SYNC_NONE
,
9717 .range_end
= LLONG_MAX
,
9719 struct btrfs_root
*root
;
9720 struct list_head splice
;
9723 if (BTRFS_FS_ERROR(fs_info
))
9726 INIT_LIST_HEAD(&splice
);
9728 mutex_lock(&fs_info
->delalloc_root_mutex
);
9729 spin_lock(&fs_info
->delalloc_root_lock
);
9730 list_splice_init(&fs_info
->delalloc_roots
, &splice
);
9731 while (!list_empty(&splice
)) {
9733 * Reset nr_to_write here so we know that we're doing a full
9737 wbc
.nr_to_write
= LONG_MAX
;
9739 root
= list_first_entry(&splice
, struct btrfs_root
,
9741 root
= btrfs_grab_root(root
);
9743 list_move_tail(&root
->delalloc_root
,
9744 &fs_info
->delalloc_roots
);
9745 spin_unlock(&fs_info
->delalloc_root_lock
);
9747 ret
= start_delalloc_inodes(root
, &wbc
, false, in_reclaim_context
);
9748 btrfs_put_root(root
);
9749 if (ret
< 0 || wbc
.nr_to_write
<= 0)
9751 spin_lock(&fs_info
->delalloc_root_lock
);
9753 spin_unlock(&fs_info
->delalloc_root_lock
);
9757 if (!list_empty(&splice
)) {
9758 spin_lock(&fs_info
->delalloc_root_lock
);
9759 list_splice_tail(&splice
, &fs_info
->delalloc_roots
);
9760 spin_unlock(&fs_info
->delalloc_root_lock
);
9762 mutex_unlock(&fs_info
->delalloc_root_mutex
);
9766 static int btrfs_symlink(struct user_namespace
*mnt_userns
, struct inode
*dir
,
9767 struct dentry
*dentry
, const char *symname
)
9769 struct btrfs_fs_info
*fs_info
= btrfs_sb(dir
->i_sb
);
9770 struct btrfs_trans_handle
*trans
;
9771 struct btrfs_root
*root
= BTRFS_I(dir
)->root
;
9772 struct btrfs_path
*path
;
9773 struct btrfs_key key
;
9774 struct inode
*inode
;
9775 struct btrfs_new_inode_args new_inode_args
= {
9779 unsigned int trans_num_items
;
9784 struct btrfs_file_extent_item
*ei
;
9785 struct extent_buffer
*leaf
;
9787 name_len
= strlen(symname
);
9788 if (name_len
> BTRFS_MAX_INLINE_DATA_SIZE(fs_info
))
9789 return -ENAMETOOLONG
;
9791 inode
= new_inode(dir
->i_sb
);
9794 inode_init_owner(mnt_userns
, inode
, dir
, S_IFLNK
| S_IRWXUGO
);
9795 inode
->i_op
= &btrfs_symlink_inode_operations
;
9796 inode_nohighmem(inode
);
9797 inode
->i_mapping
->a_ops
= &btrfs_aops
;
9798 btrfs_i_size_write(BTRFS_I(inode
), name_len
);
9799 inode_set_bytes(inode
, name_len
);
9801 new_inode_args
.inode
= inode
;
9802 err
= btrfs_new_inode_prepare(&new_inode_args
, &trans_num_items
);
9805 /* 1 additional item for the inline extent */
9808 trans
= btrfs_start_transaction(root
, trans_num_items
);
9809 if (IS_ERR(trans
)) {
9810 err
= PTR_ERR(trans
);
9811 goto out_new_inode_args
;
9814 err
= btrfs_create_new_inode(trans
, &new_inode_args
);
9818 path
= btrfs_alloc_path();
9821 btrfs_abort_transaction(trans
, err
);
9822 discard_new_inode(inode
);
9826 key
.objectid
= btrfs_ino(BTRFS_I(inode
));
9828 key
.type
= BTRFS_EXTENT_DATA_KEY
;
9829 datasize
= btrfs_file_extent_calc_inline_size(name_len
);
9830 err
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
9833 btrfs_abort_transaction(trans
, err
);
9834 btrfs_free_path(path
);
9835 discard_new_inode(inode
);
9839 leaf
= path
->nodes
[0];
9840 ei
= btrfs_item_ptr(leaf
, path
->slots
[0],
9841 struct btrfs_file_extent_item
);
9842 btrfs_set_file_extent_generation(leaf
, ei
, trans
->transid
);
9843 btrfs_set_file_extent_type(leaf
, ei
,
9844 BTRFS_FILE_EXTENT_INLINE
);
9845 btrfs_set_file_extent_encryption(leaf
, ei
, 0);
9846 btrfs_set_file_extent_compression(leaf
, ei
, 0);
9847 btrfs_set_file_extent_other_encoding(leaf
, ei
, 0);
9848 btrfs_set_file_extent_ram_bytes(leaf
, ei
, name_len
);
9850 ptr
= btrfs_file_extent_inline_start(ei
);
9851 write_extent_buffer(leaf
, symname
, ptr
, name_len
);
9852 btrfs_mark_buffer_dirty(leaf
);
9853 btrfs_free_path(path
);
9855 d_instantiate_new(dentry
, inode
);
9858 btrfs_end_transaction(trans
);
9859 btrfs_btree_balance_dirty(fs_info
);
9861 btrfs_new_inode_args_destroy(&new_inode_args
);
9868 static struct btrfs_trans_handle
*insert_prealloc_file_extent(
9869 struct btrfs_trans_handle
*trans_in
,
9870 struct btrfs_inode
*inode
,
9871 struct btrfs_key
*ins
,
9874 struct btrfs_file_extent_item stack_fi
;
9875 struct btrfs_replace_extent_info extent_info
;
9876 struct btrfs_trans_handle
*trans
= trans_in
;
9877 struct btrfs_path
*path
;
9878 u64 start
= ins
->objectid
;
9879 u64 len
= ins
->offset
;
9880 int qgroup_released
;
9883 memset(&stack_fi
, 0, sizeof(stack_fi
));
9885 btrfs_set_stack_file_extent_type(&stack_fi
, BTRFS_FILE_EXTENT_PREALLOC
);
9886 btrfs_set_stack_file_extent_disk_bytenr(&stack_fi
, start
);
9887 btrfs_set_stack_file_extent_disk_num_bytes(&stack_fi
, len
);
9888 btrfs_set_stack_file_extent_num_bytes(&stack_fi
, len
);
9889 btrfs_set_stack_file_extent_ram_bytes(&stack_fi
, len
);
9890 btrfs_set_stack_file_extent_compression(&stack_fi
, BTRFS_COMPRESS_NONE
);
9891 /* Encryption and other encoding is reserved and all 0 */
9893 qgroup_released
= btrfs_qgroup_release_data(inode
, file_offset
, len
);
9894 if (qgroup_released
< 0)
9895 return ERR_PTR(qgroup_released
);
9898 ret
= insert_reserved_file_extent(trans
, inode
,
9899 file_offset
, &stack_fi
,
9900 true, qgroup_released
);
9906 extent_info
.disk_offset
= start
;
9907 extent_info
.disk_len
= len
;
9908 extent_info
.data_offset
= 0;
9909 extent_info
.data_len
= len
;
9910 extent_info
.file_offset
= file_offset
;
9911 extent_info
.extent_buf
= (char *)&stack_fi
;
9912 extent_info
.is_new_extent
= true;
9913 extent_info
.update_times
= true;
9914 extent_info
.qgroup_reserved
= qgroup_released
;
9915 extent_info
.insertions
= 0;
9917 path
= btrfs_alloc_path();
9923 ret
= btrfs_replace_file_extents(inode
, path
, file_offset
,
9924 file_offset
+ len
- 1, &extent_info
,
9926 btrfs_free_path(path
);
9933 * We have released qgroup data range at the beginning of the function,
9934 * and normally qgroup_released bytes will be freed when committing
9936 * But if we error out early, we have to free what we have released
9937 * or we leak qgroup data reservation.
9939 btrfs_qgroup_free_refroot(inode
->root
->fs_info
,
9940 inode
->root
->root_key
.objectid
, qgroup_released
,
9941 BTRFS_QGROUP_RSV_DATA
);
9942 return ERR_PTR(ret
);
9945 static int __btrfs_prealloc_file_range(struct inode
*inode
, int mode
,
9946 u64 start
, u64 num_bytes
, u64 min_size
,
9947 loff_t actual_len
, u64
*alloc_hint
,
9948 struct btrfs_trans_handle
*trans
)
9950 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
9951 struct extent_map_tree
*em_tree
= &BTRFS_I(inode
)->extent_tree
;
9952 struct extent_map
*em
;
9953 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
9954 struct btrfs_key ins
;
9955 u64 cur_offset
= start
;
9956 u64 clear_offset
= start
;
9959 u64 last_alloc
= (u64
)-1;
9961 bool own_trans
= true;
9962 u64 end
= start
+ num_bytes
- 1;
9966 while (num_bytes
> 0) {
9967 cur_bytes
= min_t(u64
, num_bytes
, SZ_256M
);
9968 cur_bytes
= max(cur_bytes
, min_size
);
9970 * If we are severely fragmented we could end up with really
9971 * small allocations, so if the allocator is returning small
9972 * chunks lets make its job easier by only searching for those
9975 cur_bytes
= min(cur_bytes
, last_alloc
);
9976 ret
= btrfs_reserve_extent(root
, cur_bytes
, cur_bytes
,
9977 min_size
, 0, *alloc_hint
, &ins
, 1, 0);
9982 * We've reserved this space, and thus converted it from
9983 * ->bytes_may_use to ->bytes_reserved. Any error that happens
9984 * from here on out we will only need to clear our reservation
9985 * for the remaining unreserved area, so advance our
9986 * clear_offset by our extent size.
9988 clear_offset
+= ins
.offset
;
9990 last_alloc
= ins
.offset
;
9991 trans
= insert_prealloc_file_extent(trans
, BTRFS_I(inode
),
9994 * Now that we inserted the prealloc extent we can finally
9995 * decrement the number of reservations in the block group.
9996 * If we did it before, we could race with relocation and have
9997 * relocation miss the reserved extent, making it fail later.
9999 btrfs_dec_block_group_reservations(fs_info
, ins
.objectid
);
10000 if (IS_ERR(trans
)) {
10001 ret
= PTR_ERR(trans
);
10002 btrfs_free_reserved_extent(fs_info
, ins
.objectid
,
10007 btrfs_drop_extent_cache(BTRFS_I(inode
), cur_offset
,
10008 cur_offset
+ ins
.offset
-1, 0);
10010 em
= alloc_extent_map();
10012 btrfs_set_inode_full_sync(BTRFS_I(inode
));
10016 em
->start
= cur_offset
;
10017 em
->orig_start
= cur_offset
;
10018 em
->len
= ins
.offset
;
10019 em
->block_start
= ins
.objectid
;
10020 em
->block_len
= ins
.offset
;
10021 em
->orig_block_len
= ins
.offset
;
10022 em
->ram_bytes
= ins
.offset
;
10023 set_bit(EXTENT_FLAG_PREALLOC
, &em
->flags
);
10024 em
->generation
= trans
->transid
;
10027 write_lock(&em_tree
->lock
);
10028 ret
= add_extent_mapping(em_tree
, em
, 1);
10029 write_unlock(&em_tree
->lock
);
10030 if (ret
!= -EEXIST
)
10032 btrfs_drop_extent_cache(BTRFS_I(inode
), cur_offset
,
10033 cur_offset
+ ins
.offset
- 1,
10036 free_extent_map(em
);
10038 num_bytes
-= ins
.offset
;
10039 cur_offset
+= ins
.offset
;
10040 *alloc_hint
= ins
.objectid
+ ins
.offset
;
10042 inode_inc_iversion(inode
);
10043 inode
->i_ctime
= current_time(inode
);
10044 BTRFS_I(inode
)->flags
|= BTRFS_INODE_PREALLOC
;
10045 if (!(mode
& FALLOC_FL_KEEP_SIZE
) &&
10046 (actual_len
> inode
->i_size
) &&
10047 (cur_offset
> inode
->i_size
)) {
10048 if (cur_offset
> actual_len
)
10049 i_size
= actual_len
;
10051 i_size
= cur_offset
;
10052 i_size_write(inode
, i_size
);
10053 btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode
), 0);
10056 ret
= btrfs_update_inode(trans
, root
, BTRFS_I(inode
));
10059 btrfs_abort_transaction(trans
, ret
);
10061 btrfs_end_transaction(trans
);
10066 btrfs_end_transaction(trans
);
10070 if (clear_offset
< end
)
10071 btrfs_free_reserved_data_space(BTRFS_I(inode
), NULL
, clear_offset
,
10072 end
- clear_offset
+ 1);
10076 int btrfs_prealloc_file_range(struct inode
*inode
, int mode
,
10077 u64 start
, u64 num_bytes
, u64 min_size
,
10078 loff_t actual_len
, u64
*alloc_hint
)
10080 return __btrfs_prealloc_file_range(inode
, mode
, start
, num_bytes
,
10081 min_size
, actual_len
, alloc_hint
,
10085 int btrfs_prealloc_file_range_trans(struct inode
*inode
,
10086 struct btrfs_trans_handle
*trans
, int mode
,
10087 u64 start
, u64 num_bytes
, u64 min_size
,
10088 loff_t actual_len
, u64
*alloc_hint
)
10090 return __btrfs_prealloc_file_range(inode
, mode
, start
, num_bytes
,
10091 min_size
, actual_len
, alloc_hint
, trans
);
10094 static int btrfs_permission(struct user_namespace
*mnt_userns
,
10095 struct inode
*inode
, int mask
)
10097 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
10098 umode_t mode
= inode
->i_mode
;
10100 if (mask
& MAY_WRITE
&&
10101 (S_ISREG(mode
) || S_ISDIR(mode
) || S_ISLNK(mode
))) {
10102 if (btrfs_root_readonly(root
))
10104 if (BTRFS_I(inode
)->flags
& BTRFS_INODE_READONLY
)
10107 return generic_permission(mnt_userns
, inode
, mask
);
10110 static int btrfs_tmpfile(struct user_namespace
*mnt_userns
, struct inode
*dir
,
10111 struct dentry
*dentry
, umode_t mode
)
10113 struct btrfs_fs_info
*fs_info
= btrfs_sb(dir
->i_sb
);
10114 struct btrfs_trans_handle
*trans
;
10115 struct btrfs_root
*root
= BTRFS_I(dir
)->root
;
10116 struct inode
*inode
;
10117 struct btrfs_new_inode_args new_inode_args
= {
10122 unsigned int trans_num_items
;
10125 inode
= new_inode(dir
->i_sb
);
10128 inode_init_owner(mnt_userns
, inode
, dir
, mode
);
10129 inode
->i_fop
= &btrfs_file_operations
;
10130 inode
->i_op
= &btrfs_file_inode_operations
;
10131 inode
->i_mapping
->a_ops
= &btrfs_aops
;
10133 new_inode_args
.inode
= inode
;
10134 ret
= btrfs_new_inode_prepare(&new_inode_args
, &trans_num_items
);
10138 trans
= btrfs_start_transaction(root
, trans_num_items
);
10139 if (IS_ERR(trans
)) {
10140 ret
= PTR_ERR(trans
);
10141 goto out_new_inode_args
;
10144 ret
= btrfs_create_new_inode(trans
, &new_inode_args
);
10147 * We set number of links to 0 in btrfs_create_new_inode(), and here we
10148 * set it to 1 because d_tmpfile() will issue a warning if the count is
10151 * d_tmpfile() -> inode_dec_link_count() -> drop_nlink()
10153 set_nlink(inode
, 1);
10156 d_tmpfile(dentry
, inode
);
10157 unlock_new_inode(inode
);
10158 mark_inode_dirty(inode
);
10161 btrfs_end_transaction(trans
);
10162 btrfs_btree_balance_dirty(fs_info
);
10163 out_new_inode_args
:
10164 btrfs_new_inode_args_destroy(&new_inode_args
);
10171 void btrfs_set_range_writeback(struct btrfs_inode
*inode
, u64 start
, u64 end
)
10173 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
10174 unsigned long index
= start
>> PAGE_SHIFT
;
10175 unsigned long end_index
= end
>> PAGE_SHIFT
;
10179 ASSERT(end
+ 1 - start
<= U32_MAX
);
10180 len
= end
+ 1 - start
;
10181 while (index
<= end_index
) {
10182 page
= find_get_page(inode
->vfs_inode
.i_mapping
, index
);
10183 ASSERT(page
); /* Pages should be in the extent_io_tree */
10185 btrfs_page_set_writeback(fs_info
, page
, start
, len
);
10191 static int btrfs_encoded_io_compression_from_extent(
10192 struct btrfs_fs_info
*fs_info
,
10195 switch (compress_type
) {
10196 case BTRFS_COMPRESS_NONE
:
10197 return BTRFS_ENCODED_IO_COMPRESSION_NONE
;
10198 case BTRFS_COMPRESS_ZLIB
:
10199 return BTRFS_ENCODED_IO_COMPRESSION_ZLIB
;
10200 case BTRFS_COMPRESS_LZO
:
10202 * The LZO format depends on the sector size. 64K is the maximum
10203 * sector size that we support.
10205 if (fs_info
->sectorsize
< SZ_4K
|| fs_info
->sectorsize
> SZ_64K
)
10207 return BTRFS_ENCODED_IO_COMPRESSION_LZO_4K
+
10208 (fs_info
->sectorsize_bits
- 12);
10209 case BTRFS_COMPRESS_ZSTD
:
10210 return BTRFS_ENCODED_IO_COMPRESSION_ZSTD
;
10216 static ssize_t
btrfs_encoded_read_inline(
10217 struct kiocb
*iocb
,
10218 struct iov_iter
*iter
, u64 start
,
10220 struct extent_state
**cached_state
,
10221 u64 extent_start
, size_t count
,
10222 struct btrfs_ioctl_encoded_io_args
*encoded
,
10225 struct btrfs_inode
*inode
= BTRFS_I(file_inode(iocb
->ki_filp
));
10226 struct btrfs_root
*root
= inode
->root
;
10227 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
10228 struct extent_io_tree
*io_tree
= &inode
->io_tree
;
10229 struct btrfs_path
*path
;
10230 struct extent_buffer
*leaf
;
10231 struct btrfs_file_extent_item
*item
;
10237 path
= btrfs_alloc_path();
10242 ret
= btrfs_lookup_file_extent(NULL
, root
, path
, btrfs_ino(inode
),
10246 /* The extent item disappeared? */
10251 leaf
= path
->nodes
[0];
10252 item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_file_extent_item
);
10254 ram_bytes
= btrfs_file_extent_ram_bytes(leaf
, item
);
10255 ptr
= btrfs_file_extent_inline_start(item
);
10257 encoded
->len
= min_t(u64
, extent_start
+ ram_bytes
,
10258 inode
->vfs_inode
.i_size
) - iocb
->ki_pos
;
10259 ret
= btrfs_encoded_io_compression_from_extent(fs_info
,
10260 btrfs_file_extent_compression(leaf
, item
));
10263 encoded
->compression
= ret
;
10264 if (encoded
->compression
) {
10265 size_t inline_size
;
10267 inline_size
= btrfs_file_extent_inline_item_len(leaf
,
10269 if (inline_size
> count
) {
10273 count
= inline_size
;
10274 encoded
->unencoded_len
= ram_bytes
;
10275 encoded
->unencoded_offset
= iocb
->ki_pos
- extent_start
;
10277 count
= min_t(u64
, count
, encoded
->len
);
10278 encoded
->len
= count
;
10279 encoded
->unencoded_len
= count
;
10280 ptr
+= iocb
->ki_pos
- extent_start
;
10283 tmp
= kmalloc(count
, GFP_NOFS
);
10288 read_extent_buffer(leaf
, tmp
, ptr
, count
);
10289 btrfs_release_path(path
);
10290 unlock_extent_cached(io_tree
, start
, lockend
, cached_state
);
10291 btrfs_inode_unlock(&inode
->vfs_inode
, BTRFS_ILOCK_SHARED
);
10294 ret
= copy_to_iter(tmp
, count
, iter
);
10299 btrfs_free_path(path
);
10303 struct btrfs_encoded_read_private
{
10304 struct btrfs_inode
*inode
;
10306 wait_queue_head_t wait
;
10308 blk_status_t status
;
10312 static blk_status_t
submit_encoded_read_bio(struct btrfs_inode
*inode
,
10313 struct bio
*bio
, int mirror_num
)
10315 struct btrfs_encoded_read_private
*priv
= bio
->bi_private
;
10316 struct btrfs_bio
*bbio
= btrfs_bio(bio
);
10317 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
10320 if (!priv
->skip_csum
) {
10321 ret
= btrfs_lookup_bio_sums(&inode
->vfs_inode
, bio
, NULL
);
10326 ret
= btrfs_bio_wq_end_io(fs_info
, bio
, BTRFS_WQ_ENDIO_DATA
);
10328 btrfs_bio_free_csum(bbio
);
10332 atomic_inc(&priv
->pending
);
10333 ret
= btrfs_map_bio(fs_info
, bio
, mirror_num
);
10335 atomic_dec(&priv
->pending
);
10336 btrfs_bio_free_csum(bbio
);
10341 static blk_status_t
btrfs_encoded_read_verify_csum(struct btrfs_bio
*bbio
)
10343 const bool uptodate
= (bbio
->bio
.bi_status
== BLK_STS_OK
);
10344 struct btrfs_encoded_read_private
*priv
= bbio
->bio
.bi_private
;
10345 struct btrfs_inode
*inode
= priv
->inode
;
10346 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
10347 u32 sectorsize
= fs_info
->sectorsize
;
10348 struct bio_vec
*bvec
;
10349 struct bvec_iter_all iter_all
;
10350 u64 start
= priv
->file_offset
;
10351 u32 bio_offset
= 0;
10353 if (priv
->skip_csum
|| !uptodate
)
10354 return bbio
->bio
.bi_status
;
10356 bio_for_each_segment_all(bvec
, &bbio
->bio
, iter_all
) {
10357 unsigned int i
, nr_sectors
, pgoff
;
10359 nr_sectors
= BTRFS_BYTES_TO_BLKS(fs_info
, bvec
->bv_len
);
10360 pgoff
= bvec
->bv_offset
;
10361 for (i
= 0; i
< nr_sectors
; i
++) {
10362 ASSERT(pgoff
< PAGE_SIZE
);
10363 if (check_data_csum(&inode
->vfs_inode
, bbio
, bio_offset
,
10364 bvec
->bv_page
, pgoff
, start
))
10365 return BLK_STS_IOERR
;
10366 start
+= sectorsize
;
10367 bio_offset
+= sectorsize
;
10368 pgoff
+= sectorsize
;
10374 static void btrfs_encoded_read_endio(struct bio
*bio
)
10376 struct btrfs_encoded_read_private
*priv
= bio
->bi_private
;
10377 struct btrfs_bio
*bbio
= btrfs_bio(bio
);
10378 blk_status_t status
;
10380 status
= btrfs_encoded_read_verify_csum(bbio
);
10383 * The memory barrier implied by the atomic_dec_return() here
10384 * pairs with the memory barrier implied by the
10385 * atomic_dec_return() or io_wait_event() in
10386 * btrfs_encoded_read_regular_fill_pages() to ensure that this
10387 * write is observed before the load of status in
10388 * btrfs_encoded_read_regular_fill_pages().
10390 WRITE_ONCE(priv
->status
, status
);
10392 if (!atomic_dec_return(&priv
->pending
))
10393 wake_up(&priv
->wait
);
10394 btrfs_bio_free_csum(bbio
);
10398 static int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode
*inode
,
10402 struct page
**pages
)
10404 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
10405 struct btrfs_encoded_read_private priv
= {
10407 .file_offset
= file_offset
,
10408 .pending
= ATOMIC_INIT(1),
10409 .skip_csum
= (inode
->flags
& BTRFS_INODE_NODATASUM
),
10411 unsigned long i
= 0;
10415 init_waitqueue_head(&priv
.wait
);
10417 * Submit bios for the extent, splitting due to bio or stripe limits as
10420 while (cur
< disk_io_size
) {
10421 struct extent_map
*em
;
10422 struct btrfs_io_geometry geom
;
10423 struct bio
*bio
= NULL
;
10426 em
= btrfs_get_chunk_map(fs_info
, disk_bytenr
+ cur
,
10427 disk_io_size
- cur
);
10431 ret
= btrfs_get_io_geometry(fs_info
, em
, BTRFS_MAP_READ
,
10432 disk_bytenr
+ cur
, &geom
);
10433 free_extent_map(em
);
10436 WRITE_ONCE(priv
.status
, errno_to_blk_status(ret
));
10439 remaining
= min(geom
.len
, disk_io_size
- cur
);
10440 while (bio
|| remaining
) {
10441 size_t bytes
= min_t(u64
, remaining
, PAGE_SIZE
);
10444 bio
= btrfs_bio_alloc(BIO_MAX_VECS
);
10445 bio
->bi_iter
.bi_sector
=
10446 (disk_bytenr
+ cur
) >> SECTOR_SHIFT
;
10447 bio
->bi_end_io
= btrfs_encoded_read_endio
;
10448 bio
->bi_private
= &priv
;
10449 bio
->bi_opf
= REQ_OP_READ
;
10453 bio_add_page(bio
, pages
[i
], bytes
, 0) < bytes
) {
10454 blk_status_t status
;
10456 status
= submit_encoded_read_bio(inode
, bio
, 0);
10458 WRITE_ONCE(priv
.status
, status
);
10468 remaining
-= bytes
;
10473 if (atomic_dec_return(&priv
.pending
))
10474 io_wait_event(priv
.wait
, !atomic_read(&priv
.pending
));
10475 /* See btrfs_encoded_read_endio() for ordering. */
10476 return blk_status_to_errno(READ_ONCE(priv
.status
));
10479 static ssize_t
btrfs_encoded_read_regular(struct kiocb
*iocb
,
10480 struct iov_iter
*iter
,
10481 u64 start
, u64 lockend
,
10482 struct extent_state
**cached_state
,
10483 u64 disk_bytenr
, u64 disk_io_size
,
10484 size_t count
, bool compressed
,
10487 struct btrfs_inode
*inode
= BTRFS_I(file_inode(iocb
->ki_filp
));
10488 struct extent_io_tree
*io_tree
= &inode
->io_tree
;
10489 struct page
**pages
;
10490 unsigned long nr_pages
, i
;
10492 size_t page_offset
;
10495 nr_pages
= DIV_ROUND_UP(disk_io_size
, PAGE_SIZE
);
10496 pages
= kcalloc(nr_pages
, sizeof(struct page
*), GFP_NOFS
);
10499 ret
= btrfs_alloc_page_array(nr_pages
, pages
);
10505 ret
= btrfs_encoded_read_regular_fill_pages(inode
, start
, disk_bytenr
,
10506 disk_io_size
, pages
);
10510 unlock_extent_cached(io_tree
, start
, lockend
, cached_state
);
10511 btrfs_inode_unlock(&inode
->vfs_inode
, BTRFS_ILOCK_SHARED
);
10518 i
= (iocb
->ki_pos
- start
) >> PAGE_SHIFT
;
10519 page_offset
= (iocb
->ki_pos
- start
) & (PAGE_SIZE
- 1);
10522 while (cur
< count
) {
10523 size_t bytes
= min_t(size_t, count
- cur
,
10524 PAGE_SIZE
- page_offset
);
10526 if (copy_page_to_iter(pages
[i
], page_offset
, bytes
,
10537 for (i
= 0; i
< nr_pages
; i
++) {
10539 __free_page(pages
[i
]);
10545 ssize_t
btrfs_encoded_read(struct kiocb
*iocb
, struct iov_iter
*iter
,
10546 struct btrfs_ioctl_encoded_io_args
*encoded
)
10548 struct btrfs_inode
*inode
= BTRFS_I(file_inode(iocb
->ki_filp
));
10549 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
10550 struct extent_io_tree
*io_tree
= &inode
->io_tree
;
10552 size_t count
= iov_iter_count(iter
);
10553 u64 start
, lockend
, disk_bytenr
, disk_io_size
;
10554 struct extent_state
*cached_state
= NULL
;
10555 struct extent_map
*em
;
10556 bool unlocked
= false;
10558 file_accessed(iocb
->ki_filp
);
10560 btrfs_inode_lock(&inode
->vfs_inode
, BTRFS_ILOCK_SHARED
);
10562 if (iocb
->ki_pos
>= inode
->vfs_inode
.i_size
) {
10563 btrfs_inode_unlock(&inode
->vfs_inode
, BTRFS_ILOCK_SHARED
);
10566 start
= ALIGN_DOWN(iocb
->ki_pos
, fs_info
->sectorsize
);
10568 * We don't know how long the extent containing iocb->ki_pos is, but if
10569 * it's compressed we know that it won't be longer than this.
10571 lockend
= start
+ BTRFS_MAX_UNCOMPRESSED
- 1;
10574 struct btrfs_ordered_extent
*ordered
;
10576 ret
= btrfs_wait_ordered_range(&inode
->vfs_inode
, start
,
10577 lockend
- start
+ 1);
10579 goto out_unlock_inode
;
10580 lock_extent_bits(io_tree
, start
, lockend
, &cached_state
);
10581 ordered
= btrfs_lookup_ordered_range(inode
, start
,
10582 lockend
- start
+ 1);
10585 btrfs_put_ordered_extent(ordered
);
10586 unlock_extent_cached(io_tree
, start
, lockend
, &cached_state
);
10590 em
= btrfs_get_extent(inode
, NULL
, 0, start
, lockend
- start
+ 1);
10593 goto out_unlock_extent
;
10596 if (em
->block_start
== EXTENT_MAP_INLINE
) {
10597 u64 extent_start
= em
->start
;
10600 * For inline extents we get everything we need out of the
10603 free_extent_map(em
);
10605 ret
= btrfs_encoded_read_inline(iocb
, iter
, start
, lockend
,
10606 &cached_state
, extent_start
,
10607 count
, encoded
, &unlocked
);
10612 * We only want to return up to EOF even if the extent extends beyond
10615 encoded
->len
= min_t(u64
, extent_map_end(em
),
10616 inode
->vfs_inode
.i_size
) - iocb
->ki_pos
;
10617 if (em
->block_start
== EXTENT_MAP_HOLE
||
10618 test_bit(EXTENT_FLAG_PREALLOC
, &em
->flags
)) {
10619 disk_bytenr
= EXTENT_MAP_HOLE
;
10620 count
= min_t(u64
, count
, encoded
->len
);
10621 encoded
->len
= count
;
10622 encoded
->unencoded_len
= count
;
10623 } else if (test_bit(EXTENT_FLAG_COMPRESSED
, &em
->flags
)) {
10624 disk_bytenr
= em
->block_start
;
10626 * Bail if the buffer isn't large enough to return the whole
10627 * compressed extent.
10629 if (em
->block_len
> count
) {
10633 disk_io_size
= count
= em
->block_len
;
10634 encoded
->unencoded_len
= em
->ram_bytes
;
10635 encoded
->unencoded_offset
= iocb
->ki_pos
- em
->orig_start
;
10636 ret
= btrfs_encoded_io_compression_from_extent(fs_info
,
10637 em
->compress_type
);
10640 encoded
->compression
= ret
;
10642 disk_bytenr
= em
->block_start
+ (start
- em
->start
);
10643 if (encoded
->len
> count
)
10644 encoded
->len
= count
;
10646 * Don't read beyond what we locked. This also limits the page
10647 * allocations that we'll do.
10649 disk_io_size
= min(lockend
+ 1, iocb
->ki_pos
+ encoded
->len
) - start
;
10650 count
= start
+ disk_io_size
- iocb
->ki_pos
;
10651 encoded
->len
= count
;
10652 encoded
->unencoded_len
= count
;
10653 disk_io_size
= ALIGN(disk_io_size
, fs_info
->sectorsize
);
10655 free_extent_map(em
);
10658 if (disk_bytenr
== EXTENT_MAP_HOLE
) {
10659 unlock_extent_cached(io_tree
, start
, lockend
, &cached_state
);
10660 btrfs_inode_unlock(&inode
->vfs_inode
, BTRFS_ILOCK_SHARED
);
10662 ret
= iov_iter_zero(count
, iter
);
10666 ret
= btrfs_encoded_read_regular(iocb
, iter
, start
, lockend
,
10667 &cached_state
, disk_bytenr
,
10668 disk_io_size
, count
,
10669 encoded
->compression
,
10675 iocb
->ki_pos
+= encoded
->len
;
10677 free_extent_map(em
);
10680 unlock_extent_cached(io_tree
, start
, lockend
, &cached_state
);
10683 btrfs_inode_unlock(&inode
->vfs_inode
, BTRFS_ILOCK_SHARED
);
10687 ssize_t
btrfs_do_encoded_write(struct kiocb
*iocb
, struct iov_iter
*from
,
10688 const struct btrfs_ioctl_encoded_io_args
*encoded
)
10690 struct btrfs_inode
*inode
= BTRFS_I(file_inode(iocb
->ki_filp
));
10691 struct btrfs_root
*root
= inode
->root
;
10692 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
10693 struct extent_io_tree
*io_tree
= &inode
->io_tree
;
10694 struct extent_changeset
*data_reserved
= NULL
;
10695 struct extent_state
*cached_state
= NULL
;
10699 u64 num_bytes
, ram_bytes
, disk_num_bytes
;
10700 unsigned long nr_pages
, i
;
10701 struct page
**pages
;
10702 struct btrfs_key ins
;
10703 bool extent_reserved
= false;
10704 struct extent_map
*em
;
10707 switch (encoded
->compression
) {
10708 case BTRFS_ENCODED_IO_COMPRESSION_ZLIB
:
10709 compression
= BTRFS_COMPRESS_ZLIB
;
10711 case BTRFS_ENCODED_IO_COMPRESSION_ZSTD
:
10712 compression
= BTRFS_COMPRESS_ZSTD
;
10714 case BTRFS_ENCODED_IO_COMPRESSION_LZO_4K
:
10715 case BTRFS_ENCODED_IO_COMPRESSION_LZO_8K
:
10716 case BTRFS_ENCODED_IO_COMPRESSION_LZO_16K
:
10717 case BTRFS_ENCODED_IO_COMPRESSION_LZO_32K
:
10718 case BTRFS_ENCODED_IO_COMPRESSION_LZO_64K
:
10719 /* The sector size must match for LZO. */
10720 if (encoded
->compression
-
10721 BTRFS_ENCODED_IO_COMPRESSION_LZO_4K
+ 12 !=
10722 fs_info
->sectorsize_bits
)
10724 compression
= BTRFS_COMPRESS_LZO
;
10729 if (encoded
->encryption
!= BTRFS_ENCODED_IO_ENCRYPTION_NONE
)
10732 orig_count
= iov_iter_count(from
);
10734 /* The extent size must be sane. */
10735 if (encoded
->unencoded_len
> BTRFS_MAX_UNCOMPRESSED
||
10736 orig_count
> BTRFS_MAX_COMPRESSED
|| orig_count
== 0)
10740 * The compressed data must be smaller than the decompressed data.
10742 * It's of course possible for data to compress to larger or the same
10743 * size, but the buffered I/O path falls back to no compression for such
10744 * data, and we don't want to break any assumptions by creating these
10747 * Note that this is less strict than the current check we have that the
10748 * compressed data must be at least one sector smaller than the
10749 * decompressed data. We only want to enforce the weaker requirement
10750 * from old kernels that it is at least one byte smaller.
10752 if (orig_count
>= encoded
->unencoded_len
)
10755 /* The extent must start on a sector boundary. */
10756 start
= iocb
->ki_pos
;
10757 if (!IS_ALIGNED(start
, fs_info
->sectorsize
))
10761 * The extent must end on a sector boundary. However, we allow a write
10762 * which ends at or extends i_size to have an unaligned length; we round
10763 * up the extent size and set i_size to the unaligned end.
10765 if (start
+ encoded
->len
< inode
->vfs_inode
.i_size
&&
10766 !IS_ALIGNED(start
+ encoded
->len
, fs_info
->sectorsize
))
10769 /* Finally, the offset in the unencoded data must be sector-aligned. */
10770 if (!IS_ALIGNED(encoded
->unencoded_offset
, fs_info
->sectorsize
))
10773 num_bytes
= ALIGN(encoded
->len
, fs_info
->sectorsize
);
10774 ram_bytes
= ALIGN(encoded
->unencoded_len
, fs_info
->sectorsize
);
10775 end
= start
+ num_bytes
- 1;
10778 * If the extent cannot be inline, the compressed data on disk must be
10779 * sector-aligned. For convenience, we extend it with zeroes if it
10782 disk_num_bytes
= ALIGN(orig_count
, fs_info
->sectorsize
);
10783 nr_pages
= DIV_ROUND_UP(disk_num_bytes
, PAGE_SIZE
);
10784 pages
= kvcalloc(nr_pages
, sizeof(struct page
*), GFP_KERNEL_ACCOUNT
);
10787 for (i
= 0; i
< nr_pages
; i
++) {
10788 size_t bytes
= min_t(size_t, PAGE_SIZE
, iov_iter_count(from
));
10791 pages
[i
] = alloc_page(GFP_KERNEL_ACCOUNT
);
10796 kaddr
= kmap(pages
[i
]);
10797 if (copy_from_iter(kaddr
, bytes
, from
) != bytes
) {
10802 if (bytes
< PAGE_SIZE
)
10803 memset(kaddr
+ bytes
, 0, PAGE_SIZE
- bytes
);
10808 struct btrfs_ordered_extent
*ordered
;
10810 ret
= btrfs_wait_ordered_range(&inode
->vfs_inode
, start
, num_bytes
);
10813 ret
= invalidate_inode_pages2_range(inode
->vfs_inode
.i_mapping
,
10814 start
>> PAGE_SHIFT
,
10815 end
>> PAGE_SHIFT
);
10818 lock_extent_bits(io_tree
, start
, end
, &cached_state
);
10819 ordered
= btrfs_lookup_ordered_range(inode
, start
, num_bytes
);
10821 !filemap_range_has_page(inode
->vfs_inode
.i_mapping
, start
, end
))
10824 btrfs_put_ordered_extent(ordered
);
10825 unlock_extent_cached(io_tree
, start
, end
, &cached_state
);
10830 * We don't use the higher-level delalloc space functions because our
10831 * num_bytes and disk_num_bytes are different.
10833 ret
= btrfs_alloc_data_chunk_ondemand(inode
, disk_num_bytes
);
10836 ret
= btrfs_qgroup_reserve_data(inode
, &data_reserved
, start
, num_bytes
);
10838 goto out_free_data_space
;
10839 ret
= btrfs_delalloc_reserve_metadata(inode
, num_bytes
, disk_num_bytes
,
10842 goto out_qgroup_free_data
;
10844 /* Try an inline extent first. */
10845 if (start
== 0 && encoded
->unencoded_len
== encoded
->len
&&
10846 encoded
->unencoded_offset
== 0) {
10847 ret
= cow_file_range_inline(inode
, encoded
->len
, orig_count
,
10848 compression
, pages
, true);
10852 goto out_delalloc_release
;
10856 ret
= btrfs_reserve_extent(root
, disk_num_bytes
, disk_num_bytes
,
10857 disk_num_bytes
, 0, 0, &ins
, 1, 1);
10859 goto out_delalloc_release
;
10860 extent_reserved
= true;
10862 em
= create_io_em(inode
, start
, num_bytes
,
10863 start
- encoded
->unencoded_offset
, ins
.objectid
,
10864 ins
.offset
, ins
.offset
, ram_bytes
, compression
,
10865 BTRFS_ORDERED_COMPRESSED
);
10868 goto out_free_reserved
;
10870 free_extent_map(em
);
10872 ret
= btrfs_add_ordered_extent(inode
, start
, num_bytes
, ram_bytes
,
10873 ins
.objectid
, ins
.offset
,
10874 encoded
->unencoded_offset
,
10875 (1 << BTRFS_ORDERED_ENCODED
) |
10876 (1 << BTRFS_ORDERED_COMPRESSED
),
10879 btrfs_drop_extent_cache(inode
, start
, end
, 0);
10880 goto out_free_reserved
;
10882 btrfs_dec_block_group_reservations(fs_info
, ins
.objectid
);
10884 if (start
+ encoded
->len
> inode
->vfs_inode
.i_size
)
10885 i_size_write(&inode
->vfs_inode
, start
+ encoded
->len
);
10887 unlock_extent_cached(io_tree
, start
, end
, &cached_state
);
10889 btrfs_delalloc_release_extents(inode
, num_bytes
);
10891 if (btrfs_submit_compressed_write(inode
, start
, num_bytes
, ins
.objectid
,
10892 ins
.offset
, pages
, nr_pages
, 0, NULL
,
10894 btrfs_writepage_endio_finish_ordered(inode
, pages
[0], start
, end
, 0);
10902 btrfs_dec_block_group_reservations(fs_info
, ins
.objectid
);
10903 btrfs_free_reserved_extent(fs_info
, ins
.objectid
, ins
.offset
, 1);
10904 out_delalloc_release
:
10905 btrfs_delalloc_release_extents(inode
, num_bytes
);
10906 btrfs_delalloc_release_metadata(inode
, disk_num_bytes
, ret
< 0);
10907 out_qgroup_free_data
:
10909 btrfs_qgroup_free_data(inode
, data_reserved
, start
, num_bytes
);
10910 out_free_data_space
:
10912 * If btrfs_reserve_extent() succeeded, then we already decremented
10915 if (!extent_reserved
)
10916 btrfs_free_reserved_data_space_noquota(fs_info
, disk_num_bytes
);
10918 unlock_extent_cached(io_tree
, start
, end
, &cached_state
);
10920 for (i
= 0; i
< nr_pages
; i
++) {
10922 __free_page(pages
[i
]);
10927 iocb
->ki_pos
+= encoded
->len
;
10933 * Add an entry indicating a block group or device which is pinned by a
10934 * swapfile. Returns 0 on success, 1 if there is already an entry for it, or a
10935 * negative errno on failure.
10937 static int btrfs_add_swapfile_pin(struct inode
*inode
, void *ptr
,
10938 bool is_block_group
)
10940 struct btrfs_fs_info
*fs_info
= BTRFS_I(inode
)->root
->fs_info
;
10941 struct btrfs_swapfile_pin
*sp
, *entry
;
10942 struct rb_node
**p
;
10943 struct rb_node
*parent
= NULL
;
10945 sp
= kmalloc(sizeof(*sp
), GFP_NOFS
);
10950 sp
->is_block_group
= is_block_group
;
10951 sp
->bg_extent_count
= 1;
10953 spin_lock(&fs_info
->swapfile_pins_lock
);
10954 p
= &fs_info
->swapfile_pins
.rb_node
;
10957 entry
= rb_entry(parent
, struct btrfs_swapfile_pin
, node
);
10958 if (sp
->ptr
< entry
->ptr
||
10959 (sp
->ptr
== entry
->ptr
&& sp
->inode
< entry
->inode
)) {
10960 p
= &(*p
)->rb_left
;
10961 } else if (sp
->ptr
> entry
->ptr
||
10962 (sp
->ptr
== entry
->ptr
&& sp
->inode
> entry
->inode
)) {
10963 p
= &(*p
)->rb_right
;
10965 if (is_block_group
)
10966 entry
->bg_extent_count
++;
10967 spin_unlock(&fs_info
->swapfile_pins_lock
);
10972 rb_link_node(&sp
->node
, parent
, p
);
10973 rb_insert_color(&sp
->node
, &fs_info
->swapfile_pins
);
10974 spin_unlock(&fs_info
->swapfile_pins_lock
);
10978 /* Free all of the entries pinned by this swapfile. */
10979 static void btrfs_free_swapfile_pins(struct inode
*inode
)
10981 struct btrfs_fs_info
*fs_info
= BTRFS_I(inode
)->root
->fs_info
;
10982 struct btrfs_swapfile_pin
*sp
;
10983 struct rb_node
*node
, *next
;
10985 spin_lock(&fs_info
->swapfile_pins_lock
);
10986 node
= rb_first(&fs_info
->swapfile_pins
);
10988 next
= rb_next(node
);
10989 sp
= rb_entry(node
, struct btrfs_swapfile_pin
, node
);
10990 if (sp
->inode
== inode
) {
10991 rb_erase(&sp
->node
, &fs_info
->swapfile_pins
);
10992 if (sp
->is_block_group
) {
10993 btrfs_dec_block_group_swap_extents(sp
->ptr
,
10994 sp
->bg_extent_count
);
10995 btrfs_put_block_group(sp
->ptr
);
11001 spin_unlock(&fs_info
->swapfile_pins_lock
);
11004 struct btrfs_swap_info
{
11010 unsigned long nr_pages
;
11014 static int btrfs_add_swap_extent(struct swap_info_struct
*sis
,
11015 struct btrfs_swap_info
*bsi
)
11017 unsigned long nr_pages
;
11018 unsigned long max_pages
;
11019 u64 first_ppage
, first_ppage_reported
, next_ppage
;
11023 * Our swapfile may have had its size extended after the swap header was
11024 * written. In that case activating the swapfile should not go beyond
11025 * the max size set in the swap header.
11027 if (bsi
->nr_pages
>= sis
->max
)
11030 max_pages
= sis
->max
- bsi
->nr_pages
;
11031 first_ppage
= ALIGN(bsi
->block_start
, PAGE_SIZE
) >> PAGE_SHIFT
;
11032 next_ppage
= ALIGN_DOWN(bsi
->block_start
+ bsi
->block_len
,
11033 PAGE_SIZE
) >> PAGE_SHIFT
;
11035 if (first_ppage
>= next_ppage
)
11037 nr_pages
= next_ppage
- first_ppage
;
11038 nr_pages
= min(nr_pages
, max_pages
);
11040 first_ppage_reported
= first_ppage
;
11041 if (bsi
->start
== 0)
11042 first_ppage_reported
++;
11043 if (bsi
->lowest_ppage
> first_ppage_reported
)
11044 bsi
->lowest_ppage
= first_ppage_reported
;
11045 if (bsi
->highest_ppage
< (next_ppage
- 1))
11046 bsi
->highest_ppage
= next_ppage
- 1;
11048 ret
= add_swap_extent(sis
, bsi
->nr_pages
, nr_pages
, first_ppage
);
11051 bsi
->nr_extents
+= ret
;
11052 bsi
->nr_pages
+= nr_pages
;
11056 static void btrfs_swap_deactivate(struct file
*file
)
11058 struct inode
*inode
= file_inode(file
);
11060 btrfs_free_swapfile_pins(inode
);
11061 atomic_dec(&BTRFS_I(inode
)->root
->nr_swapfiles
);
11064 static int btrfs_swap_activate(struct swap_info_struct
*sis
, struct file
*file
,
11067 struct inode
*inode
= file_inode(file
);
11068 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
11069 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
11070 struct extent_io_tree
*io_tree
= &BTRFS_I(inode
)->io_tree
;
11071 struct extent_state
*cached_state
= NULL
;
11072 struct extent_map
*em
= NULL
;
11073 struct btrfs_device
*device
= NULL
;
11074 struct btrfs_swap_info bsi
= {
11075 .lowest_ppage
= (sector_t
)-1ULL,
11082 * If the swap file was just created, make sure delalloc is done. If the
11083 * file changes again after this, the user is doing something stupid and
11084 * we don't really care.
11086 ret
= btrfs_wait_ordered_range(inode
, 0, (u64
)-1);
11091 * The inode is locked, so these flags won't change after we check them.
11093 if (BTRFS_I(inode
)->flags
& BTRFS_INODE_COMPRESS
) {
11094 btrfs_warn(fs_info
, "swapfile must not be compressed");
11097 if (!(BTRFS_I(inode
)->flags
& BTRFS_INODE_NODATACOW
)) {
11098 btrfs_warn(fs_info
, "swapfile must not be copy-on-write");
11101 if (!(BTRFS_I(inode
)->flags
& BTRFS_INODE_NODATASUM
)) {
11102 btrfs_warn(fs_info
, "swapfile must not be checksummed");
11107 * Balance or device remove/replace/resize can move stuff around from
11108 * under us. The exclop protection makes sure they aren't running/won't
11109 * run concurrently while we are mapping the swap extents, and
11110 * fs_info->swapfile_pins prevents them from running while the swap
11111 * file is active and moving the extents. Note that this also prevents
11112 * a concurrent device add which isn't actually necessary, but it's not
11113 * really worth the trouble to allow it.
11115 if (!btrfs_exclop_start(fs_info
, BTRFS_EXCLOP_SWAP_ACTIVATE
)) {
11116 btrfs_warn(fs_info
,
11117 "cannot activate swapfile while exclusive operation is running");
11122 * Prevent snapshot creation while we are activating the swap file.
11123 * We do not want to race with snapshot creation. If snapshot creation
11124 * already started before we bumped nr_swapfiles from 0 to 1 and
11125 * completes before the first write into the swap file after it is
11126 * activated, than that write would fallback to COW.
11128 if (!btrfs_drew_try_write_lock(&root
->snapshot_lock
)) {
11129 btrfs_exclop_finish(fs_info
);
11130 btrfs_warn(fs_info
,
11131 "cannot activate swapfile because snapshot creation is in progress");
11135 * Snapshots can create extents which require COW even if NODATACOW is
11136 * set. We use this counter to prevent snapshots. We must increment it
11137 * before walking the extents because we don't want a concurrent
11138 * snapshot to run after we've already checked the extents.
11140 * It is possible that subvolume is marked for deletion but still not
11141 * removed yet. To prevent this race, we check the root status before
11142 * activating the swapfile.
11144 spin_lock(&root
->root_item_lock
);
11145 if (btrfs_root_dead(root
)) {
11146 spin_unlock(&root
->root_item_lock
);
11148 btrfs_exclop_finish(fs_info
);
11149 btrfs_warn(fs_info
,
11150 "cannot activate swapfile because subvolume %llu is being deleted",
11151 root
->root_key
.objectid
);
11154 atomic_inc(&root
->nr_swapfiles
);
11155 spin_unlock(&root
->root_item_lock
);
11157 isize
= ALIGN_DOWN(inode
->i_size
, fs_info
->sectorsize
);
11159 lock_extent_bits(io_tree
, 0, isize
- 1, &cached_state
);
11161 while (start
< isize
) {
11162 u64 logical_block_start
, physical_block_start
;
11163 struct btrfs_block_group
*bg
;
11164 u64 len
= isize
- start
;
11166 em
= btrfs_get_extent(BTRFS_I(inode
), NULL
, 0, start
, len
);
11172 if (em
->block_start
== EXTENT_MAP_HOLE
) {
11173 btrfs_warn(fs_info
, "swapfile must not have holes");
11177 if (em
->block_start
== EXTENT_MAP_INLINE
) {
11179 * It's unlikely we'll ever actually find ourselves
11180 * here, as a file small enough to fit inline won't be
11181 * big enough to store more than the swap header, but in
11182 * case something changes in the future, let's catch it
11183 * here rather than later.
11185 btrfs_warn(fs_info
, "swapfile must not be inline");
11189 if (test_bit(EXTENT_FLAG_COMPRESSED
, &em
->flags
)) {
11190 btrfs_warn(fs_info
, "swapfile must not be compressed");
11195 logical_block_start
= em
->block_start
+ (start
- em
->start
);
11196 len
= min(len
, em
->len
- (start
- em
->start
));
11197 free_extent_map(em
);
11200 ret
= can_nocow_extent(inode
, start
, &len
, NULL
, NULL
, NULL
, true);
11206 btrfs_warn(fs_info
,
11207 "swapfile must not be copy-on-write");
11212 em
= btrfs_get_chunk_map(fs_info
, logical_block_start
, len
);
11218 if (em
->map_lookup
->type
& BTRFS_BLOCK_GROUP_PROFILE_MASK
) {
11219 btrfs_warn(fs_info
,
11220 "swapfile must have single data profile");
11225 if (device
== NULL
) {
11226 device
= em
->map_lookup
->stripes
[0].dev
;
11227 ret
= btrfs_add_swapfile_pin(inode
, device
, false);
11232 } else if (device
!= em
->map_lookup
->stripes
[0].dev
) {
11233 btrfs_warn(fs_info
, "swapfile must be on one device");
11238 physical_block_start
= (em
->map_lookup
->stripes
[0].physical
+
11239 (logical_block_start
- em
->start
));
11240 len
= min(len
, em
->len
- (logical_block_start
- em
->start
));
11241 free_extent_map(em
);
11244 bg
= btrfs_lookup_block_group(fs_info
, logical_block_start
);
11246 btrfs_warn(fs_info
,
11247 "could not find block group containing swapfile");
11252 if (!btrfs_inc_block_group_swap_extents(bg
)) {
11253 btrfs_warn(fs_info
,
11254 "block group for swapfile at %llu is read-only%s",
11256 atomic_read(&fs_info
->scrubs_running
) ?
11257 " (scrub running)" : "");
11258 btrfs_put_block_group(bg
);
11263 ret
= btrfs_add_swapfile_pin(inode
, bg
, true);
11265 btrfs_put_block_group(bg
);
11272 if (bsi
.block_len
&&
11273 bsi
.block_start
+ bsi
.block_len
== physical_block_start
) {
11274 bsi
.block_len
+= len
;
11276 if (bsi
.block_len
) {
11277 ret
= btrfs_add_swap_extent(sis
, &bsi
);
11282 bsi
.block_start
= physical_block_start
;
11283 bsi
.block_len
= len
;
11290 ret
= btrfs_add_swap_extent(sis
, &bsi
);
11293 if (!IS_ERR_OR_NULL(em
))
11294 free_extent_map(em
);
11296 unlock_extent_cached(io_tree
, 0, isize
- 1, &cached_state
);
11299 btrfs_swap_deactivate(file
);
11301 btrfs_drew_write_unlock(&root
->snapshot_lock
);
11303 btrfs_exclop_finish(fs_info
);
11309 sis
->bdev
= device
->bdev
;
11310 *span
= bsi
.highest_ppage
- bsi
.lowest_ppage
+ 1;
11311 sis
->max
= bsi
.nr_pages
;
11312 sis
->pages
= bsi
.nr_pages
- 1;
11313 sis
->highest_bit
= bsi
.nr_pages
- 1;
11314 return bsi
.nr_extents
;
11317 static void btrfs_swap_deactivate(struct file
*file
)
11321 static int btrfs_swap_activate(struct swap_info_struct
*sis
, struct file
*file
,
11324 return -EOPNOTSUPP
;
11329 * Update the number of bytes used in the VFS' inode. When we replace extents in
11330 * a range (clone, dedupe, fallocate's zero range), we must update the number of
11331 * bytes used by the inode in an atomic manner, so that concurrent stat(2) calls
11332 * always get a correct value.
11334 void btrfs_update_inode_bytes(struct btrfs_inode
*inode
,
11335 const u64 add_bytes
,
11336 const u64 del_bytes
)
11338 if (add_bytes
== del_bytes
)
11341 spin_lock(&inode
->lock
);
11343 inode_sub_bytes(&inode
->vfs_inode
, del_bytes
);
11345 inode_add_bytes(&inode
->vfs_inode
, add_bytes
);
11346 spin_unlock(&inode
->lock
);
11350 * Verify that there are no ordered extents for a given file range.
11352 * @inode: The target inode.
11353 * @start: Start offset of the file range, should be sector size aligned.
11354 * @end: End offset (inclusive) of the file range, its value +1 should be
11355 * sector size aligned.
11357 * This should typically be used for cases where we locked an inode's VFS lock in
11358 * exclusive mode, we have also locked the inode's i_mmap_lock in exclusive mode,
11359 * we have flushed all delalloc in the range, we have waited for all ordered
11360 * extents in the range to complete and finally we have locked the file range in
11361 * the inode's io_tree.
11363 void btrfs_assert_inode_range_clean(struct btrfs_inode
*inode
, u64 start
, u64 end
)
11365 struct btrfs_root
*root
= inode
->root
;
11366 struct btrfs_ordered_extent
*ordered
;
11368 if (!IS_ENABLED(CONFIG_BTRFS_ASSERT
))
11371 ordered
= btrfs_lookup_first_ordered_range(inode
, start
, end
+ 1 - start
);
11373 btrfs_err(root
->fs_info
,
11374 "found unexpected ordered extent in file range [%llu, %llu] for inode %llu root %llu (ordered range [%llu, %llu])",
11375 start
, end
, btrfs_ino(inode
), root
->root_key
.objectid
,
11376 ordered
->file_offset
,
11377 ordered
->file_offset
+ ordered
->num_bytes
- 1);
11378 btrfs_put_ordered_extent(ordered
);
11381 ASSERT(ordered
== NULL
);
11384 static const struct inode_operations btrfs_dir_inode_operations
= {
11385 .getattr
= btrfs_getattr
,
11386 .lookup
= btrfs_lookup
,
11387 .create
= btrfs_create
,
11388 .unlink
= btrfs_unlink
,
11389 .link
= btrfs_link
,
11390 .mkdir
= btrfs_mkdir
,
11391 .rmdir
= btrfs_rmdir
,
11392 .rename
= btrfs_rename2
,
11393 .symlink
= btrfs_symlink
,
11394 .setattr
= btrfs_setattr
,
11395 .mknod
= btrfs_mknod
,
11396 .listxattr
= btrfs_listxattr
,
11397 .permission
= btrfs_permission
,
11398 .get_acl
= btrfs_get_acl
,
11399 .set_acl
= btrfs_set_acl
,
11400 .update_time
= btrfs_update_time
,
11401 .tmpfile
= btrfs_tmpfile
,
11402 .fileattr_get
= btrfs_fileattr_get
,
11403 .fileattr_set
= btrfs_fileattr_set
,
11406 static const struct file_operations btrfs_dir_file_operations
= {
11407 .llseek
= generic_file_llseek
,
11408 .read
= generic_read_dir
,
11409 .iterate_shared
= btrfs_real_readdir
,
11410 .open
= btrfs_opendir
,
11411 .unlocked_ioctl
= btrfs_ioctl
,
11412 #ifdef CONFIG_COMPAT
11413 .compat_ioctl
= btrfs_compat_ioctl
,
11415 .release
= btrfs_release_file
,
11416 .fsync
= btrfs_sync_file
,
11420 * btrfs doesn't support the bmap operation because swapfiles
11421 * use bmap to make a mapping of extents in the file. They assume
11422 * these extents won't change over the life of the file and they
11423 * use the bmap result to do IO directly to the drive.
11425 * the btrfs bmap call would return logical addresses that aren't
11426 * suitable for IO and they also will change frequently as COW
11427 * operations happen. So, swapfile + btrfs == corruption.
11429 * For now we're avoiding this by dropping bmap.
11431 static const struct address_space_operations btrfs_aops
= {
11432 .read_folio
= btrfs_read_folio
,
11433 .writepage
= btrfs_writepage
,
11434 .writepages
= btrfs_writepages
,
11435 .readahead
= btrfs_readahead
,
11436 .direct_IO
= noop_direct_IO
,
11437 .invalidate_folio
= btrfs_invalidate_folio
,
11438 .release_folio
= btrfs_release_folio
,
11439 #ifdef CONFIG_MIGRATION
11440 .migratepage
= btrfs_migratepage
,
11442 .dirty_folio
= filemap_dirty_folio
,
11443 .error_remove_page
= generic_error_remove_page
,
11444 .swap_activate
= btrfs_swap_activate
,
11445 .swap_deactivate
= btrfs_swap_deactivate
,
11448 static const struct inode_operations btrfs_file_inode_operations
= {
11449 .getattr
= btrfs_getattr
,
11450 .setattr
= btrfs_setattr
,
11451 .listxattr
= btrfs_listxattr
,
11452 .permission
= btrfs_permission
,
11453 .fiemap
= btrfs_fiemap
,
11454 .get_acl
= btrfs_get_acl
,
11455 .set_acl
= btrfs_set_acl
,
11456 .update_time
= btrfs_update_time
,
11457 .fileattr_get
= btrfs_fileattr_get
,
11458 .fileattr_set
= btrfs_fileattr_set
,
11460 static const struct inode_operations btrfs_special_inode_operations
= {
11461 .getattr
= btrfs_getattr
,
11462 .setattr
= btrfs_setattr
,
11463 .permission
= btrfs_permission
,
11464 .listxattr
= btrfs_listxattr
,
11465 .get_acl
= btrfs_get_acl
,
11466 .set_acl
= btrfs_set_acl
,
11467 .update_time
= btrfs_update_time
,
11469 static const struct inode_operations btrfs_symlink_inode_operations
= {
11470 .get_link
= page_get_link
,
11471 .getattr
= btrfs_getattr
,
11472 .setattr
= btrfs_setattr
,
11473 .permission
= btrfs_permission
,
11474 .listxattr
= btrfs_listxattr
,
11475 .update_time
= btrfs_update_time
,
11478 const struct dentry_operations btrfs_dentry_operations
= {
11479 .d_delete
= btrfs_dentry_delete
,