1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2007 Oracle. All rights reserved.
7 #include <linux/blkdev.h>
8 #include <linux/radix-tree.h>
9 #include <linux/writeback.h>
10 #include <linux/workqueue.h>
11 #include <linux/kthread.h>
12 #include <linux/slab.h>
13 #include <linux/migrate.h>
14 #include <linux/ratelimit.h>
15 #include <linux/uuid.h>
16 #include <linux/semaphore.h>
17 #include <linux/error-injection.h>
18 #include <linux/crc32c.h>
19 #include <linux/sched/mm.h>
20 #include <asm/unaligned.h>
21 #include <crypto/hash.h>
24 #include "transaction.h"
25 #include "btrfs_inode.h"
27 #include "print-tree.h"
30 #include "free-space-cache.h"
31 #include "free-space-tree.h"
32 #include "rcu-string.h"
33 #include "dev-replace.h"
37 #include "compression.h"
38 #include "tree-checker.h"
39 #include "ref-verify.h"
40 #include "block-group.h"
42 #include "space-info.h"
46 #include "accessors.h"
47 #include "extent-tree.h"
48 #include "root-tree.h"
50 #include "uuid-tree.h"
51 #include "relocation.h"
55 #define BTRFS_SUPER_FLAG_SUPP (BTRFS_HEADER_FLAG_WRITTEN |\
56 BTRFS_HEADER_FLAG_RELOC |\
57 BTRFS_SUPER_FLAG_ERROR |\
58 BTRFS_SUPER_FLAG_SEEDING |\
59 BTRFS_SUPER_FLAG_METADUMP |\
60 BTRFS_SUPER_FLAG_METADUMP_V2)
62 static int btrfs_cleanup_transaction(struct btrfs_fs_info
*fs_info
);
63 static void btrfs_error_commit_super(struct btrfs_fs_info
*fs_info
);
65 static void btrfs_free_csum_hash(struct btrfs_fs_info
*fs_info
)
67 if (fs_info
->csum_shash
)
68 crypto_free_shash(fs_info
->csum_shash
);
72 * Compute the csum of a btree block and store the result to provided buffer.
74 static void csum_tree_block(struct extent_buffer
*buf
, u8
*result
)
76 struct btrfs_fs_info
*fs_info
= buf
->fs_info
;
77 const int num_pages
= num_extent_pages(buf
);
78 const int first_page_part
= min_t(u32
, PAGE_SIZE
, fs_info
->nodesize
);
79 SHASH_DESC_ON_STACK(shash
, fs_info
->csum_shash
);
83 shash
->tfm
= fs_info
->csum_shash
;
84 crypto_shash_init(shash
);
85 kaddr
= page_address(buf
->pages
[0]) + offset_in_page(buf
->start
);
86 crypto_shash_update(shash
, kaddr
+ BTRFS_CSUM_SIZE
,
87 first_page_part
- BTRFS_CSUM_SIZE
);
89 for (i
= 1; i
< num_pages
&& INLINE_EXTENT_BUFFER_PAGES
> 1; i
++) {
90 kaddr
= page_address(buf
->pages
[i
]);
91 crypto_shash_update(shash
, kaddr
, PAGE_SIZE
);
93 memset(result
, 0, BTRFS_CSUM_SIZE
);
94 crypto_shash_final(shash
, result
);
98 * we can't consider a given block up to date unless the transid of the
99 * block matches the transid in the parent node's pointer. This is how we
100 * detect blocks that either didn't get written at all or got written
101 * in the wrong place.
103 int btrfs_buffer_uptodate(struct extent_buffer
*eb
, u64 parent_transid
, int atomic
)
105 if (!extent_buffer_uptodate(eb
))
108 if (!parent_transid
|| btrfs_header_generation(eb
) == parent_transid
)
114 if (!extent_buffer_uptodate(eb
) ||
115 btrfs_header_generation(eb
) != parent_transid
) {
116 btrfs_err_rl(eb
->fs_info
,
117 "parent transid verify failed on logical %llu mirror %u wanted %llu found %llu",
118 eb
->start
, eb
->read_mirror
,
119 parent_transid
, btrfs_header_generation(eb
));
120 clear_extent_buffer_uptodate(eb
);
126 static bool btrfs_supported_super_csum(u16 csum_type
)
129 case BTRFS_CSUM_TYPE_CRC32
:
130 case BTRFS_CSUM_TYPE_XXHASH
:
131 case BTRFS_CSUM_TYPE_SHA256
:
132 case BTRFS_CSUM_TYPE_BLAKE2
:
140 * Return 0 if the superblock checksum type matches the checksum value of that
141 * algorithm. Pass the raw disk superblock data.
143 int btrfs_check_super_csum(struct btrfs_fs_info
*fs_info
,
144 const struct btrfs_super_block
*disk_sb
)
146 char result
[BTRFS_CSUM_SIZE
];
147 SHASH_DESC_ON_STACK(shash
, fs_info
->csum_shash
);
149 shash
->tfm
= fs_info
->csum_shash
;
152 * The super_block structure does not span the whole
153 * BTRFS_SUPER_INFO_SIZE range, we expect that the unused space is
154 * filled with zeros and is included in the checksum.
156 crypto_shash_digest(shash
, (const u8
*)disk_sb
+ BTRFS_CSUM_SIZE
,
157 BTRFS_SUPER_INFO_SIZE
- BTRFS_CSUM_SIZE
, result
);
159 if (memcmp(disk_sb
->csum
, result
, fs_info
->csum_size
))
165 static int btrfs_repair_eb_io_failure(const struct extent_buffer
*eb
,
168 struct btrfs_fs_info
*fs_info
= eb
->fs_info
;
169 int i
, num_pages
= num_extent_pages(eb
);
172 if (sb_rdonly(fs_info
->sb
))
175 for (i
= 0; i
< num_pages
; i
++) {
176 struct page
*p
= eb
->pages
[i
];
177 u64 start
= max_t(u64
, eb
->start
, page_offset(p
));
178 u64 end
= min_t(u64
, eb
->start
+ eb
->len
, page_offset(p
) + PAGE_SIZE
);
179 u32 len
= end
- start
;
181 ret
= btrfs_repair_io_failure(fs_info
, 0, start
, len
,
182 start
, p
, offset_in_page(start
), mirror_num
);
191 * helper to read a given tree block, doing retries as required when
192 * the checksums don't match and we have alternate mirrors to try.
194 * @check: expected tree parentness check, see the comments of the
195 * structure for details.
197 int btrfs_read_extent_buffer(struct extent_buffer
*eb
,
198 struct btrfs_tree_parent_check
*check
)
200 struct btrfs_fs_info
*fs_info
= eb
->fs_info
;
205 int failed_mirror
= 0;
210 clear_bit(EXTENT_BUFFER_CORRUPT
, &eb
->bflags
);
211 ret
= read_extent_buffer_pages(eb
, WAIT_COMPLETE
, mirror_num
, check
);
215 num_copies
= btrfs_num_copies(fs_info
,
220 if (!failed_mirror
) {
222 failed_mirror
= eb
->read_mirror
;
226 if (mirror_num
== failed_mirror
)
229 if (mirror_num
> num_copies
)
233 if (failed
&& !ret
&& failed_mirror
)
234 btrfs_repair_eb_io_failure(eb
, failed_mirror
);
240 * Checksum a dirty tree block before IO.
242 blk_status_t
btree_csum_one_bio(struct btrfs_bio
*bbio
)
244 struct extent_buffer
*eb
= bbio
->private;
245 struct btrfs_fs_info
*fs_info
= eb
->fs_info
;
246 u64 found_start
= btrfs_header_bytenr(eb
);
247 u8 result
[BTRFS_CSUM_SIZE
];
250 /* Btree blocks are always contiguous on disk. */
251 if (WARN_ON_ONCE(bbio
->file_offset
!= eb
->start
))
252 return BLK_STS_IOERR
;
253 if (WARN_ON_ONCE(bbio
->bio
.bi_iter
.bi_size
!= eb
->len
))
254 return BLK_STS_IOERR
;
256 if (test_bit(EXTENT_BUFFER_NO_CHECK
, &eb
->bflags
)) {
257 WARN_ON_ONCE(found_start
!= 0);
261 if (WARN_ON_ONCE(found_start
!= eb
->start
))
262 return BLK_STS_IOERR
;
263 if (WARN_ON(!btrfs_page_test_uptodate(fs_info
, eb
->pages
[0], eb
->start
,
265 return BLK_STS_IOERR
;
267 ASSERT(memcmp_extent_buffer(eb
, fs_info
->fs_devices
->metadata_uuid
,
268 offsetof(struct btrfs_header
, fsid
),
269 BTRFS_FSID_SIZE
) == 0);
270 csum_tree_block(eb
, result
);
272 if (btrfs_header_level(eb
))
273 ret
= btrfs_check_node(eb
);
275 ret
= btrfs_check_leaf(eb
);
281 * Also check the generation, the eb reached here must be newer than
282 * last committed. Or something seriously wrong happened.
284 if (unlikely(btrfs_header_generation(eb
) <= fs_info
->last_trans_committed
)) {
287 "block=%llu bad generation, have %llu expect > %llu",
288 eb
->start
, btrfs_header_generation(eb
),
289 fs_info
->last_trans_committed
);
292 write_extent_buffer(eb
, result
, 0, fs_info
->csum_size
);
296 btrfs_print_tree(eb
, 0);
297 btrfs_err(fs_info
, "block=%llu write time tree block corruption detected",
300 * Be noisy if this is an extent buffer from a log tree. We don't abort
301 * a transaction in case there's a bad log tree extent buffer, we just
302 * fallback to a transaction commit. Still we want to know when there is
303 * a bad log tree extent buffer, as that may signal a bug somewhere.
305 WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG
) ||
306 btrfs_header_owner(eb
) == BTRFS_TREE_LOG_OBJECTID
);
307 return errno_to_blk_status(ret
);
310 static bool check_tree_block_fsid(struct extent_buffer
*eb
)
312 struct btrfs_fs_info
*fs_info
= eb
->fs_info
;
313 struct btrfs_fs_devices
*fs_devices
= fs_info
->fs_devices
, *seed_devs
;
314 u8 fsid
[BTRFS_FSID_SIZE
];
316 read_extent_buffer(eb
, fsid
, offsetof(struct btrfs_header
, fsid
),
320 * alloc_fsid_devices() copies the fsid into fs_devices::metadata_uuid.
321 * This is then overwritten by metadata_uuid if it is present in the
322 * device_list_add(). The same true for a seed device as well. So use of
323 * fs_devices::metadata_uuid is appropriate here.
325 if (memcmp(fsid
, fs_info
->fs_devices
->metadata_uuid
, BTRFS_FSID_SIZE
) == 0)
328 list_for_each_entry(seed_devs
, &fs_devices
->seed_list
, seed_list
)
329 if (!memcmp(fsid
, seed_devs
->fsid
, BTRFS_FSID_SIZE
))
335 /* Do basic extent buffer checks at read time */
336 int btrfs_validate_extent_buffer(struct extent_buffer
*eb
,
337 struct btrfs_tree_parent_check
*check
)
339 struct btrfs_fs_info
*fs_info
= eb
->fs_info
;
341 const u32 csum_size
= fs_info
->csum_size
;
343 u8 result
[BTRFS_CSUM_SIZE
];
344 const u8
*header_csum
;
349 found_start
= btrfs_header_bytenr(eb
);
350 if (found_start
!= eb
->start
) {
351 btrfs_err_rl(fs_info
,
352 "bad tree block start, mirror %u want %llu have %llu",
353 eb
->read_mirror
, eb
->start
, found_start
);
357 if (check_tree_block_fsid(eb
)) {
358 btrfs_err_rl(fs_info
, "bad fsid on logical %llu mirror %u",
359 eb
->start
, eb
->read_mirror
);
363 found_level
= btrfs_header_level(eb
);
364 if (found_level
>= BTRFS_MAX_LEVEL
) {
366 "bad tree block level, mirror %u level %d on logical %llu",
367 eb
->read_mirror
, btrfs_header_level(eb
), eb
->start
);
372 csum_tree_block(eb
, result
);
373 header_csum
= page_address(eb
->pages
[0]) +
374 get_eb_offset_in_page(eb
, offsetof(struct btrfs_header
, csum
));
376 if (memcmp(result
, header_csum
, csum_size
) != 0) {
377 btrfs_warn_rl(fs_info
,
378 "checksum verify failed on logical %llu mirror %u wanted " CSUM_FMT
" found " CSUM_FMT
" level %d",
379 eb
->start
, eb
->read_mirror
,
380 CSUM_FMT_VALUE(csum_size
, header_csum
),
381 CSUM_FMT_VALUE(csum_size
, result
),
382 btrfs_header_level(eb
));
387 if (found_level
!= check
->level
) {
389 "level verify failed on logical %llu mirror %u wanted %u found %u",
390 eb
->start
, eb
->read_mirror
, check
->level
, found_level
);
394 if (unlikely(check
->transid
&&
395 btrfs_header_generation(eb
) != check
->transid
)) {
396 btrfs_err_rl(eb
->fs_info
,
397 "parent transid verify failed on logical %llu mirror %u wanted %llu found %llu",
398 eb
->start
, eb
->read_mirror
, check
->transid
,
399 btrfs_header_generation(eb
));
403 if (check
->has_first_key
) {
404 struct btrfs_key
*expect_key
= &check
->first_key
;
405 struct btrfs_key found_key
;
408 btrfs_node_key_to_cpu(eb
, &found_key
, 0);
410 btrfs_item_key_to_cpu(eb
, &found_key
, 0);
411 if (unlikely(btrfs_comp_cpu_keys(expect_key
, &found_key
))) {
413 "tree first key mismatch detected, bytenr=%llu parent_transid=%llu key expected=(%llu,%u,%llu) has=(%llu,%u,%llu)",
414 eb
->start
, check
->transid
,
415 expect_key
->objectid
,
416 expect_key
->type
, expect_key
->offset
,
417 found_key
.objectid
, found_key
.type
,
423 if (check
->owner_root
) {
424 ret
= btrfs_check_eb_owner(eb
, check
->owner_root
);
430 * If this is a leaf block and it is corrupt, set the corrupt bit so
431 * that we don't try and read the other copies of this block, just
434 if (found_level
== 0 && btrfs_check_leaf(eb
)) {
435 set_bit(EXTENT_BUFFER_CORRUPT
, &eb
->bflags
);
439 if (found_level
> 0 && btrfs_check_node(eb
))
444 "read time tree block corruption detected on logical %llu mirror %u",
445 eb
->start
, eb
->read_mirror
);
450 #ifdef CONFIG_MIGRATION
451 static int btree_migrate_folio(struct address_space
*mapping
,
452 struct folio
*dst
, struct folio
*src
, enum migrate_mode mode
)
455 * we can't safely write a btree page from here,
456 * we haven't done the locking hook
458 if (folio_test_dirty(src
))
461 * Buffers may be managed in a filesystem specific way.
462 * We must have no buffers or drop them.
464 if (folio_get_private(src
) &&
465 !filemap_release_folio(src
, GFP_KERNEL
))
467 return migrate_folio(mapping
, dst
, src
, mode
);
470 #define btree_migrate_folio NULL
473 static int btree_writepages(struct address_space
*mapping
,
474 struct writeback_control
*wbc
)
476 struct btrfs_fs_info
*fs_info
;
479 if (wbc
->sync_mode
== WB_SYNC_NONE
) {
481 if (wbc
->for_kupdate
)
484 fs_info
= BTRFS_I(mapping
->host
)->root
->fs_info
;
485 /* this is a bit racy, but that's ok */
486 ret
= __percpu_counter_compare(&fs_info
->dirty_metadata_bytes
,
487 BTRFS_DIRTY_METADATA_THRESH
,
488 fs_info
->dirty_metadata_batch
);
492 return btree_write_cache_pages(mapping
, wbc
);
495 static bool btree_release_folio(struct folio
*folio
, gfp_t gfp_flags
)
497 if (folio_test_writeback(folio
) || folio_test_dirty(folio
))
500 return try_release_extent_buffer(&folio
->page
);
503 static void btree_invalidate_folio(struct folio
*folio
, size_t offset
,
506 struct extent_io_tree
*tree
;
507 tree
= &BTRFS_I(folio
->mapping
->host
)->io_tree
;
508 extent_invalidate_folio(tree
, folio
, offset
);
509 btree_release_folio(folio
, GFP_NOFS
);
510 if (folio_get_private(folio
)) {
511 btrfs_warn(BTRFS_I(folio
->mapping
->host
)->root
->fs_info
,
512 "folio private not zero on folio %llu",
513 (unsigned long long)folio_pos(folio
));
514 folio_detach_private(folio
);
519 static bool btree_dirty_folio(struct address_space
*mapping
,
522 struct btrfs_fs_info
*fs_info
= btrfs_sb(mapping
->host
->i_sb
);
523 struct btrfs_subpage_info
*spi
= fs_info
->subpage_info
;
524 struct btrfs_subpage
*subpage
;
525 struct extent_buffer
*eb
;
527 u64 page_start
= folio_pos(folio
);
529 if (fs_info
->sectorsize
== PAGE_SIZE
) {
530 eb
= folio_get_private(folio
);
532 BUG_ON(!test_bit(EXTENT_BUFFER_DIRTY
, &eb
->bflags
));
533 BUG_ON(!atomic_read(&eb
->refs
));
534 btrfs_assert_tree_write_locked(eb
);
535 return filemap_dirty_folio(mapping
, folio
);
539 subpage
= folio_get_private(folio
);
541 for (cur_bit
= spi
->dirty_offset
;
542 cur_bit
< spi
->dirty_offset
+ spi
->bitmap_nr_bits
;
547 spin_lock_irqsave(&subpage
->lock
, flags
);
548 if (!test_bit(cur_bit
, subpage
->bitmaps
)) {
549 spin_unlock_irqrestore(&subpage
->lock
, flags
);
552 spin_unlock_irqrestore(&subpage
->lock
, flags
);
553 cur
= page_start
+ cur_bit
* fs_info
->sectorsize
;
555 eb
= find_extent_buffer(fs_info
, cur
);
557 ASSERT(test_bit(EXTENT_BUFFER_DIRTY
, &eb
->bflags
));
558 ASSERT(atomic_read(&eb
->refs
));
559 btrfs_assert_tree_write_locked(eb
);
560 free_extent_buffer(eb
);
562 cur_bit
+= (fs_info
->nodesize
>> fs_info
->sectorsize_bits
) - 1;
564 return filemap_dirty_folio(mapping
, folio
);
567 #define btree_dirty_folio filemap_dirty_folio
570 static const struct address_space_operations btree_aops
= {
571 .writepages
= btree_writepages
,
572 .release_folio
= btree_release_folio
,
573 .invalidate_folio
= btree_invalidate_folio
,
574 .migrate_folio
= btree_migrate_folio
,
575 .dirty_folio
= btree_dirty_folio
,
578 struct extent_buffer
*btrfs_find_create_tree_block(
579 struct btrfs_fs_info
*fs_info
,
580 u64 bytenr
, u64 owner_root
,
583 if (btrfs_is_testing(fs_info
))
584 return alloc_test_extent_buffer(fs_info
, bytenr
);
585 return alloc_extent_buffer(fs_info
, bytenr
, owner_root
, level
);
589 * Read tree block at logical address @bytenr and do variant basic but critical
592 * @check: expected tree parentness check, see comments of the
593 * structure for details.
595 struct extent_buffer
*read_tree_block(struct btrfs_fs_info
*fs_info
, u64 bytenr
,
596 struct btrfs_tree_parent_check
*check
)
598 struct extent_buffer
*buf
= NULL
;
603 buf
= btrfs_find_create_tree_block(fs_info
, bytenr
, check
->owner_root
,
608 ret
= btrfs_read_extent_buffer(buf
, check
);
610 free_extent_buffer_stale(buf
);
613 if (btrfs_check_eb_owner(buf
, check
->owner_root
)) {
614 free_extent_buffer_stale(buf
);
615 return ERR_PTR(-EUCLEAN
);
621 static void __setup_root(struct btrfs_root
*root
, struct btrfs_fs_info
*fs_info
,
624 bool dummy
= test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO
, &fs_info
->fs_state
);
626 memset(&root
->root_key
, 0, sizeof(root
->root_key
));
627 memset(&root
->root_item
, 0, sizeof(root
->root_item
));
628 memset(&root
->defrag_progress
, 0, sizeof(root
->defrag_progress
));
629 root
->fs_info
= fs_info
;
630 root
->root_key
.objectid
= objectid
;
632 root
->commit_root
= NULL
;
634 RB_CLEAR_NODE(&root
->rb_node
);
636 root
->last_trans
= 0;
637 root
->free_objectid
= 0;
638 root
->nr_delalloc_inodes
= 0;
639 root
->nr_ordered_extents
= 0;
640 root
->inode_tree
= RB_ROOT
;
641 INIT_RADIX_TREE(&root
->delayed_nodes_tree
, GFP_ATOMIC
);
643 btrfs_init_root_block_rsv(root
);
645 INIT_LIST_HEAD(&root
->dirty_list
);
646 INIT_LIST_HEAD(&root
->root_list
);
647 INIT_LIST_HEAD(&root
->delalloc_inodes
);
648 INIT_LIST_HEAD(&root
->delalloc_root
);
649 INIT_LIST_HEAD(&root
->ordered_extents
);
650 INIT_LIST_HEAD(&root
->ordered_root
);
651 INIT_LIST_HEAD(&root
->reloc_dirty_list
);
652 INIT_LIST_HEAD(&root
->logged_list
[0]);
653 INIT_LIST_HEAD(&root
->logged_list
[1]);
654 spin_lock_init(&root
->inode_lock
);
655 spin_lock_init(&root
->delalloc_lock
);
656 spin_lock_init(&root
->ordered_extent_lock
);
657 spin_lock_init(&root
->accounting_lock
);
658 spin_lock_init(&root
->log_extents_lock
[0]);
659 spin_lock_init(&root
->log_extents_lock
[1]);
660 spin_lock_init(&root
->qgroup_meta_rsv_lock
);
661 mutex_init(&root
->objectid_mutex
);
662 mutex_init(&root
->log_mutex
);
663 mutex_init(&root
->ordered_extent_mutex
);
664 mutex_init(&root
->delalloc_mutex
);
665 init_waitqueue_head(&root
->qgroup_flush_wait
);
666 init_waitqueue_head(&root
->log_writer_wait
);
667 init_waitqueue_head(&root
->log_commit_wait
[0]);
668 init_waitqueue_head(&root
->log_commit_wait
[1]);
669 INIT_LIST_HEAD(&root
->log_ctxs
[0]);
670 INIT_LIST_HEAD(&root
->log_ctxs
[1]);
671 atomic_set(&root
->log_commit
[0], 0);
672 atomic_set(&root
->log_commit
[1], 0);
673 atomic_set(&root
->log_writers
, 0);
674 atomic_set(&root
->log_batch
, 0);
675 refcount_set(&root
->refs
, 1);
676 atomic_set(&root
->snapshot_force_cow
, 0);
677 atomic_set(&root
->nr_swapfiles
, 0);
678 root
->log_transid
= 0;
679 root
->log_transid_committed
= -1;
680 root
->last_log_commit
= 0;
683 extent_io_tree_init(fs_info
, &root
->dirty_log_pages
,
684 IO_TREE_ROOT_DIRTY_LOG_PAGES
);
685 extent_io_tree_init(fs_info
, &root
->log_csum_range
,
686 IO_TREE_LOG_CSUM_RANGE
);
689 spin_lock_init(&root
->root_item_lock
);
690 btrfs_qgroup_init_swapped_blocks(&root
->swapped_blocks
);
691 #ifdef CONFIG_BTRFS_DEBUG
692 INIT_LIST_HEAD(&root
->leak_list
);
693 spin_lock(&fs_info
->fs_roots_radix_lock
);
694 list_add_tail(&root
->leak_list
, &fs_info
->allocated_roots
);
695 spin_unlock(&fs_info
->fs_roots_radix_lock
);
699 static struct btrfs_root
*btrfs_alloc_root(struct btrfs_fs_info
*fs_info
,
700 u64 objectid
, gfp_t flags
)
702 struct btrfs_root
*root
= kzalloc(sizeof(*root
), flags
);
704 __setup_root(root
, fs_info
, objectid
);
708 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
709 /* Should only be used by the testing infrastructure */
710 struct btrfs_root
*btrfs_alloc_dummy_root(struct btrfs_fs_info
*fs_info
)
712 struct btrfs_root
*root
;
715 return ERR_PTR(-EINVAL
);
717 root
= btrfs_alloc_root(fs_info
, BTRFS_ROOT_TREE_OBJECTID
, GFP_KERNEL
);
719 return ERR_PTR(-ENOMEM
);
721 /* We don't use the stripesize in selftest, set it as sectorsize */
722 root
->alloc_bytenr
= 0;
728 static int global_root_cmp(struct rb_node
*a_node
, const struct rb_node
*b_node
)
730 const struct btrfs_root
*a
= rb_entry(a_node
, struct btrfs_root
, rb_node
);
731 const struct btrfs_root
*b
= rb_entry(b_node
, struct btrfs_root
, rb_node
);
733 return btrfs_comp_cpu_keys(&a
->root_key
, &b
->root_key
);
736 static int global_root_key_cmp(const void *k
, const struct rb_node
*node
)
738 const struct btrfs_key
*key
= k
;
739 const struct btrfs_root
*root
= rb_entry(node
, struct btrfs_root
, rb_node
);
741 return btrfs_comp_cpu_keys(key
, &root
->root_key
);
744 int btrfs_global_root_insert(struct btrfs_root
*root
)
746 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
750 write_lock(&fs_info
->global_root_lock
);
751 tmp
= rb_find_add(&root
->rb_node
, &fs_info
->global_root_tree
, global_root_cmp
);
752 write_unlock(&fs_info
->global_root_lock
);
756 btrfs_warn(fs_info
, "global root %llu %llu already exists",
757 root
->root_key
.objectid
, root
->root_key
.offset
);
762 void btrfs_global_root_delete(struct btrfs_root
*root
)
764 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
766 write_lock(&fs_info
->global_root_lock
);
767 rb_erase(&root
->rb_node
, &fs_info
->global_root_tree
);
768 write_unlock(&fs_info
->global_root_lock
);
771 struct btrfs_root
*btrfs_global_root(struct btrfs_fs_info
*fs_info
,
772 struct btrfs_key
*key
)
774 struct rb_node
*node
;
775 struct btrfs_root
*root
= NULL
;
777 read_lock(&fs_info
->global_root_lock
);
778 node
= rb_find(key
, &fs_info
->global_root_tree
, global_root_key_cmp
);
780 root
= container_of(node
, struct btrfs_root
, rb_node
);
781 read_unlock(&fs_info
->global_root_lock
);
786 static u64
btrfs_global_root_id(struct btrfs_fs_info
*fs_info
, u64 bytenr
)
788 struct btrfs_block_group
*block_group
;
791 if (!btrfs_fs_incompat(fs_info
, EXTENT_TREE_V2
))
795 block_group
= btrfs_lookup_block_group(fs_info
, bytenr
);
797 block_group
= btrfs_lookup_first_block_group(fs_info
, bytenr
);
801 ret
= block_group
->global_root_id
;
802 btrfs_put_block_group(block_group
);
807 struct btrfs_root
*btrfs_csum_root(struct btrfs_fs_info
*fs_info
, u64 bytenr
)
809 struct btrfs_key key
= {
810 .objectid
= BTRFS_CSUM_TREE_OBJECTID
,
811 .type
= BTRFS_ROOT_ITEM_KEY
,
812 .offset
= btrfs_global_root_id(fs_info
, bytenr
),
815 return btrfs_global_root(fs_info
, &key
);
818 struct btrfs_root
*btrfs_extent_root(struct btrfs_fs_info
*fs_info
, u64 bytenr
)
820 struct btrfs_key key
= {
821 .objectid
= BTRFS_EXTENT_TREE_OBJECTID
,
822 .type
= BTRFS_ROOT_ITEM_KEY
,
823 .offset
= btrfs_global_root_id(fs_info
, bytenr
),
826 return btrfs_global_root(fs_info
, &key
);
829 struct btrfs_root
*btrfs_block_group_root(struct btrfs_fs_info
*fs_info
)
831 if (btrfs_fs_compat_ro(fs_info
, BLOCK_GROUP_TREE
))
832 return fs_info
->block_group_root
;
833 return btrfs_extent_root(fs_info
, 0);
836 struct btrfs_root
*btrfs_create_tree(struct btrfs_trans_handle
*trans
,
839 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
840 struct extent_buffer
*leaf
;
841 struct btrfs_root
*tree_root
= fs_info
->tree_root
;
842 struct btrfs_root
*root
;
843 struct btrfs_key key
;
844 unsigned int nofs_flag
;
848 * We're holding a transaction handle, so use a NOFS memory allocation
849 * context to avoid deadlock if reclaim happens.
851 nofs_flag
= memalloc_nofs_save();
852 root
= btrfs_alloc_root(fs_info
, objectid
, GFP_KERNEL
);
853 memalloc_nofs_restore(nofs_flag
);
855 return ERR_PTR(-ENOMEM
);
857 root
->root_key
.objectid
= objectid
;
858 root
->root_key
.type
= BTRFS_ROOT_ITEM_KEY
;
859 root
->root_key
.offset
= 0;
861 leaf
= btrfs_alloc_tree_block(trans
, root
, 0, objectid
, NULL
, 0, 0, 0,
862 0, BTRFS_NESTING_NORMAL
);
870 btrfs_mark_buffer_dirty(trans
, leaf
);
872 root
->commit_root
= btrfs_root_node(root
);
873 set_bit(BTRFS_ROOT_TRACK_DIRTY
, &root
->state
);
875 btrfs_set_root_flags(&root
->root_item
, 0);
876 btrfs_set_root_limit(&root
->root_item
, 0);
877 btrfs_set_root_bytenr(&root
->root_item
, leaf
->start
);
878 btrfs_set_root_generation(&root
->root_item
, trans
->transid
);
879 btrfs_set_root_level(&root
->root_item
, 0);
880 btrfs_set_root_refs(&root
->root_item
, 1);
881 btrfs_set_root_used(&root
->root_item
, leaf
->len
);
882 btrfs_set_root_last_snapshot(&root
->root_item
, 0);
883 btrfs_set_root_dirid(&root
->root_item
, 0);
884 if (is_fstree(objectid
))
885 generate_random_guid(root
->root_item
.uuid
);
887 export_guid(root
->root_item
.uuid
, &guid_null
);
888 btrfs_set_root_drop_level(&root
->root_item
, 0);
890 btrfs_tree_unlock(leaf
);
892 key
.objectid
= objectid
;
893 key
.type
= BTRFS_ROOT_ITEM_KEY
;
895 ret
= btrfs_insert_root(trans
, tree_root
, &key
, &root
->root_item
);
902 btrfs_put_root(root
);
907 static struct btrfs_root
*alloc_log_tree(struct btrfs_trans_handle
*trans
,
908 struct btrfs_fs_info
*fs_info
)
910 struct btrfs_root
*root
;
912 root
= btrfs_alloc_root(fs_info
, BTRFS_TREE_LOG_OBJECTID
, GFP_NOFS
);
914 return ERR_PTR(-ENOMEM
);
916 root
->root_key
.objectid
= BTRFS_TREE_LOG_OBJECTID
;
917 root
->root_key
.type
= BTRFS_ROOT_ITEM_KEY
;
918 root
->root_key
.offset
= BTRFS_TREE_LOG_OBJECTID
;
923 int btrfs_alloc_log_tree_node(struct btrfs_trans_handle
*trans
,
924 struct btrfs_root
*root
)
926 struct extent_buffer
*leaf
;
929 * DON'T set SHAREABLE bit for log trees.
931 * Log trees are not exposed to user space thus can't be snapshotted,
932 * and they go away before a real commit is actually done.
934 * They do store pointers to file data extents, and those reference
935 * counts still get updated (along with back refs to the log tree).
938 leaf
= btrfs_alloc_tree_block(trans
, root
, 0, BTRFS_TREE_LOG_OBJECTID
,
939 NULL
, 0, 0, 0, 0, BTRFS_NESTING_NORMAL
);
941 return PTR_ERR(leaf
);
945 btrfs_mark_buffer_dirty(trans
, root
->node
);
946 btrfs_tree_unlock(root
->node
);
951 int btrfs_init_log_root_tree(struct btrfs_trans_handle
*trans
,
952 struct btrfs_fs_info
*fs_info
)
954 struct btrfs_root
*log_root
;
956 log_root
= alloc_log_tree(trans
, fs_info
);
957 if (IS_ERR(log_root
))
958 return PTR_ERR(log_root
);
960 if (!btrfs_is_zoned(fs_info
)) {
961 int ret
= btrfs_alloc_log_tree_node(trans
, log_root
);
964 btrfs_put_root(log_root
);
969 WARN_ON(fs_info
->log_root_tree
);
970 fs_info
->log_root_tree
= log_root
;
974 int btrfs_add_log_tree(struct btrfs_trans_handle
*trans
,
975 struct btrfs_root
*root
)
977 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
978 struct btrfs_root
*log_root
;
979 struct btrfs_inode_item
*inode_item
;
982 log_root
= alloc_log_tree(trans
, fs_info
);
983 if (IS_ERR(log_root
))
984 return PTR_ERR(log_root
);
986 ret
= btrfs_alloc_log_tree_node(trans
, log_root
);
988 btrfs_put_root(log_root
);
992 log_root
->last_trans
= trans
->transid
;
993 log_root
->root_key
.offset
= root
->root_key
.objectid
;
995 inode_item
= &log_root
->root_item
.inode
;
996 btrfs_set_stack_inode_generation(inode_item
, 1);
997 btrfs_set_stack_inode_size(inode_item
, 3);
998 btrfs_set_stack_inode_nlink(inode_item
, 1);
999 btrfs_set_stack_inode_nbytes(inode_item
,
1001 btrfs_set_stack_inode_mode(inode_item
, S_IFDIR
| 0755);
1003 btrfs_set_root_node(&log_root
->root_item
, log_root
->node
);
1005 WARN_ON(root
->log_root
);
1006 root
->log_root
= log_root
;
1007 root
->log_transid
= 0;
1008 root
->log_transid_committed
= -1;
1009 root
->last_log_commit
= 0;
1013 static struct btrfs_root
*read_tree_root_path(struct btrfs_root
*tree_root
,
1014 struct btrfs_path
*path
,
1015 struct btrfs_key
*key
)
1017 struct btrfs_root
*root
;
1018 struct btrfs_tree_parent_check check
= { 0 };
1019 struct btrfs_fs_info
*fs_info
= tree_root
->fs_info
;
1024 root
= btrfs_alloc_root(fs_info
, key
->objectid
, GFP_NOFS
);
1026 return ERR_PTR(-ENOMEM
);
1028 ret
= btrfs_find_root(tree_root
, key
, path
,
1029 &root
->root_item
, &root
->root_key
);
1036 generation
= btrfs_root_generation(&root
->root_item
);
1037 level
= btrfs_root_level(&root
->root_item
);
1038 check
.level
= level
;
1039 check
.transid
= generation
;
1040 check
.owner_root
= key
->objectid
;
1041 root
->node
= read_tree_block(fs_info
, btrfs_root_bytenr(&root
->root_item
),
1043 if (IS_ERR(root
->node
)) {
1044 ret
= PTR_ERR(root
->node
);
1048 if (!btrfs_buffer_uptodate(root
->node
, generation
, 0)) {
1054 * For real fs, and not log/reloc trees, root owner must
1055 * match its root node owner
1057 if (!test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO
, &fs_info
->fs_state
) &&
1058 root
->root_key
.objectid
!= BTRFS_TREE_LOG_OBJECTID
&&
1059 root
->root_key
.objectid
!= BTRFS_TREE_RELOC_OBJECTID
&&
1060 root
->root_key
.objectid
!= btrfs_header_owner(root
->node
)) {
1062 "root=%llu block=%llu, tree root owner mismatch, have %llu expect %llu",
1063 root
->root_key
.objectid
, root
->node
->start
,
1064 btrfs_header_owner(root
->node
),
1065 root
->root_key
.objectid
);
1069 root
->commit_root
= btrfs_root_node(root
);
1072 btrfs_put_root(root
);
1073 return ERR_PTR(ret
);
1076 struct btrfs_root
*btrfs_read_tree_root(struct btrfs_root
*tree_root
,
1077 struct btrfs_key
*key
)
1079 struct btrfs_root
*root
;
1080 struct btrfs_path
*path
;
1082 path
= btrfs_alloc_path();
1084 return ERR_PTR(-ENOMEM
);
1085 root
= read_tree_root_path(tree_root
, path
, key
);
1086 btrfs_free_path(path
);
1092 * Initialize subvolume root in-memory structure
1094 * @anon_dev: anonymous device to attach to the root, if zero, allocate new
1096 static int btrfs_init_fs_root(struct btrfs_root
*root
, dev_t anon_dev
)
1100 btrfs_drew_lock_init(&root
->snapshot_lock
);
1102 if (root
->root_key
.objectid
!= BTRFS_TREE_LOG_OBJECTID
&&
1103 !btrfs_is_data_reloc_root(root
) &&
1104 is_fstree(root
->root_key
.objectid
)) {
1105 set_bit(BTRFS_ROOT_SHAREABLE
, &root
->state
);
1106 btrfs_check_and_init_root_item(&root
->root_item
);
1110 * Don't assign anonymous block device to roots that are not exposed to
1111 * userspace, the id pool is limited to 1M
1113 if (is_fstree(root
->root_key
.objectid
) &&
1114 btrfs_root_refs(&root
->root_item
) > 0) {
1116 ret
= get_anon_bdev(&root
->anon_dev
);
1120 root
->anon_dev
= anon_dev
;
1124 mutex_lock(&root
->objectid_mutex
);
1125 ret
= btrfs_init_root_free_objectid(root
);
1127 mutex_unlock(&root
->objectid_mutex
);
1131 ASSERT(root
->free_objectid
<= BTRFS_LAST_FREE_OBJECTID
);
1133 mutex_unlock(&root
->objectid_mutex
);
1137 /* The caller is responsible to call btrfs_free_fs_root */
1141 static struct btrfs_root
*btrfs_lookup_fs_root(struct btrfs_fs_info
*fs_info
,
1144 struct btrfs_root
*root
;
1146 spin_lock(&fs_info
->fs_roots_radix_lock
);
1147 root
= radix_tree_lookup(&fs_info
->fs_roots_radix
,
1148 (unsigned long)root_id
);
1149 root
= btrfs_grab_root(root
);
1150 spin_unlock(&fs_info
->fs_roots_radix_lock
);
1154 static struct btrfs_root
*btrfs_get_global_root(struct btrfs_fs_info
*fs_info
,
1157 struct btrfs_key key
= {
1158 .objectid
= objectid
,
1159 .type
= BTRFS_ROOT_ITEM_KEY
,
1164 case BTRFS_ROOT_TREE_OBJECTID
:
1165 return btrfs_grab_root(fs_info
->tree_root
);
1166 case BTRFS_EXTENT_TREE_OBJECTID
:
1167 return btrfs_grab_root(btrfs_global_root(fs_info
, &key
));
1168 case BTRFS_CHUNK_TREE_OBJECTID
:
1169 return btrfs_grab_root(fs_info
->chunk_root
);
1170 case BTRFS_DEV_TREE_OBJECTID
:
1171 return btrfs_grab_root(fs_info
->dev_root
);
1172 case BTRFS_CSUM_TREE_OBJECTID
:
1173 return btrfs_grab_root(btrfs_global_root(fs_info
, &key
));
1174 case BTRFS_QUOTA_TREE_OBJECTID
:
1175 return btrfs_grab_root(fs_info
->quota_root
);
1176 case BTRFS_UUID_TREE_OBJECTID
:
1177 return btrfs_grab_root(fs_info
->uuid_root
);
1178 case BTRFS_BLOCK_GROUP_TREE_OBJECTID
:
1179 return btrfs_grab_root(fs_info
->block_group_root
);
1180 case BTRFS_FREE_SPACE_TREE_OBJECTID
:
1181 return btrfs_grab_root(btrfs_global_root(fs_info
, &key
));
1182 case BTRFS_RAID_STRIPE_TREE_OBJECTID
:
1183 return btrfs_grab_root(fs_info
->stripe_root
);
1189 int btrfs_insert_fs_root(struct btrfs_fs_info
*fs_info
,
1190 struct btrfs_root
*root
)
1194 ret
= radix_tree_preload(GFP_NOFS
);
1198 spin_lock(&fs_info
->fs_roots_radix_lock
);
1199 ret
= radix_tree_insert(&fs_info
->fs_roots_radix
,
1200 (unsigned long)root
->root_key
.objectid
,
1203 btrfs_grab_root(root
);
1204 set_bit(BTRFS_ROOT_IN_RADIX
, &root
->state
);
1206 spin_unlock(&fs_info
->fs_roots_radix_lock
);
1207 radix_tree_preload_end();
1212 void btrfs_check_leaked_roots(struct btrfs_fs_info
*fs_info
)
1214 #ifdef CONFIG_BTRFS_DEBUG
1215 struct btrfs_root
*root
;
1217 while (!list_empty(&fs_info
->allocated_roots
)) {
1218 char buf
[BTRFS_ROOT_NAME_BUF_LEN
];
1220 root
= list_first_entry(&fs_info
->allocated_roots
,
1221 struct btrfs_root
, leak_list
);
1222 btrfs_err(fs_info
, "leaked root %s refcount %d",
1223 btrfs_root_name(&root
->root_key
, buf
),
1224 refcount_read(&root
->refs
));
1225 while (refcount_read(&root
->refs
) > 1)
1226 btrfs_put_root(root
);
1227 btrfs_put_root(root
);
1232 static void free_global_roots(struct btrfs_fs_info
*fs_info
)
1234 struct btrfs_root
*root
;
1235 struct rb_node
*node
;
1237 while ((node
= rb_first_postorder(&fs_info
->global_root_tree
)) != NULL
) {
1238 root
= rb_entry(node
, struct btrfs_root
, rb_node
);
1239 rb_erase(&root
->rb_node
, &fs_info
->global_root_tree
);
1240 btrfs_put_root(root
);
1244 void btrfs_free_fs_info(struct btrfs_fs_info
*fs_info
)
1246 percpu_counter_destroy(&fs_info
->dirty_metadata_bytes
);
1247 percpu_counter_destroy(&fs_info
->delalloc_bytes
);
1248 percpu_counter_destroy(&fs_info
->ordered_bytes
);
1249 percpu_counter_destroy(&fs_info
->dev_replace
.bio_counter
);
1250 btrfs_free_csum_hash(fs_info
);
1251 btrfs_free_stripe_hash_table(fs_info
);
1252 btrfs_free_ref_cache(fs_info
);
1253 kfree(fs_info
->balance_ctl
);
1254 kfree(fs_info
->delayed_root
);
1255 free_global_roots(fs_info
);
1256 btrfs_put_root(fs_info
->tree_root
);
1257 btrfs_put_root(fs_info
->chunk_root
);
1258 btrfs_put_root(fs_info
->dev_root
);
1259 btrfs_put_root(fs_info
->quota_root
);
1260 btrfs_put_root(fs_info
->uuid_root
);
1261 btrfs_put_root(fs_info
->fs_root
);
1262 btrfs_put_root(fs_info
->data_reloc_root
);
1263 btrfs_put_root(fs_info
->block_group_root
);
1264 btrfs_put_root(fs_info
->stripe_root
);
1265 btrfs_check_leaked_roots(fs_info
);
1266 btrfs_extent_buffer_leak_debug_check(fs_info
);
1267 kfree(fs_info
->super_copy
);
1268 kfree(fs_info
->super_for_commit
);
1269 kfree(fs_info
->subpage_info
);
1275 * Get an in-memory reference of a root structure.
1277 * For essential trees like root/extent tree, we grab it from fs_info directly.
1278 * For subvolume trees, we check the cached filesystem roots first. If not
1279 * found, then read it from disk and add it to cached fs roots.
1281 * Caller should release the root by calling btrfs_put_root() after the usage.
1283 * NOTE: Reloc and log trees can't be read by this function as they share the
1284 * same root objectid.
1286 * @objectid: root id
1287 * @anon_dev: preallocated anonymous block device number for new roots,
1288 * pass 0 for new allocation.
1289 * @check_ref: whether to check root item references, If true, return -ENOENT
1292 static struct btrfs_root
*btrfs_get_root_ref(struct btrfs_fs_info
*fs_info
,
1293 u64 objectid
, dev_t anon_dev
,
1296 struct btrfs_root
*root
;
1297 struct btrfs_path
*path
;
1298 struct btrfs_key key
;
1301 root
= btrfs_get_global_root(fs_info
, objectid
);
1306 * If we're called for non-subvolume trees, and above function didn't
1307 * find one, do not try to read it from disk.
1309 * This is namely for free-space-tree and quota tree, which can change
1310 * at runtime and should only be grabbed from fs_info.
1312 if (!is_fstree(objectid
) && objectid
!= BTRFS_DATA_RELOC_TREE_OBJECTID
)
1313 return ERR_PTR(-ENOENT
);
1315 root
= btrfs_lookup_fs_root(fs_info
, objectid
);
1317 /* Shouldn't get preallocated anon_dev for cached roots */
1319 if (check_ref
&& btrfs_root_refs(&root
->root_item
) == 0) {
1320 btrfs_put_root(root
);
1321 return ERR_PTR(-ENOENT
);
1326 key
.objectid
= objectid
;
1327 key
.type
= BTRFS_ROOT_ITEM_KEY
;
1328 key
.offset
= (u64
)-1;
1329 root
= btrfs_read_tree_root(fs_info
->tree_root
, &key
);
1333 if (check_ref
&& btrfs_root_refs(&root
->root_item
) == 0) {
1338 ret
= btrfs_init_fs_root(root
, anon_dev
);
1342 path
= btrfs_alloc_path();
1347 key
.objectid
= BTRFS_ORPHAN_OBJECTID
;
1348 key
.type
= BTRFS_ORPHAN_ITEM_KEY
;
1349 key
.offset
= objectid
;
1351 ret
= btrfs_search_slot(NULL
, fs_info
->tree_root
, &key
, path
, 0, 0);
1352 btrfs_free_path(path
);
1356 set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED
, &root
->state
);
1358 ret
= btrfs_insert_fs_root(fs_info
, root
);
1360 if (ret
== -EEXIST
) {
1361 btrfs_put_root(root
);
1369 * If our caller provided us an anonymous device, then it's his
1370 * responsibility to free it in case we fail. So we have to set our
1371 * root's anon_dev to 0 to avoid a double free, once by btrfs_put_root()
1372 * and once again by our caller.
1376 btrfs_put_root(root
);
1377 return ERR_PTR(ret
);
1381 * Get in-memory reference of a root structure
1383 * @objectid: tree objectid
1384 * @check_ref: if set, verify that the tree exists and the item has at least
1387 struct btrfs_root
*btrfs_get_fs_root(struct btrfs_fs_info
*fs_info
,
1388 u64 objectid
, bool check_ref
)
1390 return btrfs_get_root_ref(fs_info
, objectid
, 0, check_ref
);
1394 * Get in-memory reference of a root structure, created as new, optionally pass
1395 * the anonymous block device id
1397 * @objectid: tree objectid
1398 * @anon_dev: if zero, allocate a new anonymous block device or use the
1401 struct btrfs_root
*btrfs_get_new_fs_root(struct btrfs_fs_info
*fs_info
,
1402 u64 objectid
, dev_t anon_dev
)
1404 return btrfs_get_root_ref(fs_info
, objectid
, anon_dev
, true);
1408 * Return a root for the given objectid.
1410 * @fs_info: the fs_info
1411 * @objectid: the objectid we need to lookup
1413 * This is exclusively used for backref walking, and exists specifically because
1414 * of how qgroups does lookups. Qgroups will do a backref lookup at delayed ref
1415 * creation time, which means we may have to read the tree_root in order to look
1416 * up a fs root that is not in memory. If the root is not in memory we will
1417 * read the tree root commit root and look up the fs root from there. This is a
1418 * temporary root, it will not be inserted into the radix tree as it doesn't
1419 * have the most uptodate information, it'll simply be discarded once the
1420 * backref code is finished using the root.
1422 struct btrfs_root
*btrfs_get_fs_root_commit_root(struct btrfs_fs_info
*fs_info
,
1423 struct btrfs_path
*path
,
1426 struct btrfs_root
*root
;
1427 struct btrfs_key key
;
1429 ASSERT(path
->search_commit_root
&& path
->skip_locking
);
1432 * This can return -ENOENT if we ask for a root that doesn't exist, but
1433 * since this is called via the backref walking code we won't be looking
1434 * up a root that doesn't exist, unless there's corruption. So if root
1435 * != NULL just return it.
1437 root
= btrfs_get_global_root(fs_info
, objectid
);
1441 root
= btrfs_lookup_fs_root(fs_info
, objectid
);
1445 key
.objectid
= objectid
;
1446 key
.type
= BTRFS_ROOT_ITEM_KEY
;
1447 key
.offset
= (u64
)-1;
1448 root
= read_tree_root_path(fs_info
->tree_root
, path
, &key
);
1449 btrfs_release_path(path
);
1454 static int cleaner_kthread(void *arg
)
1456 struct btrfs_fs_info
*fs_info
= arg
;
1462 set_bit(BTRFS_FS_CLEANER_RUNNING
, &fs_info
->flags
);
1464 /* Make the cleaner go to sleep early. */
1465 if (btrfs_need_cleaner_sleep(fs_info
))
1469 * Do not do anything if we might cause open_ctree() to block
1470 * before we have finished mounting the filesystem.
1472 if (!test_bit(BTRFS_FS_OPEN
, &fs_info
->flags
))
1475 if (!mutex_trylock(&fs_info
->cleaner_mutex
))
1479 * Avoid the problem that we change the status of the fs
1480 * during the above check and trylock.
1482 if (btrfs_need_cleaner_sleep(fs_info
)) {
1483 mutex_unlock(&fs_info
->cleaner_mutex
);
1487 if (test_and_clear_bit(BTRFS_FS_FEATURE_CHANGED
, &fs_info
->flags
))
1488 btrfs_sysfs_feature_update(fs_info
);
1490 btrfs_run_delayed_iputs(fs_info
);
1492 again
= btrfs_clean_one_deleted_snapshot(fs_info
);
1493 mutex_unlock(&fs_info
->cleaner_mutex
);
1496 * The defragger has dealt with the R/O remount and umount,
1497 * needn't do anything special here.
1499 btrfs_run_defrag_inodes(fs_info
);
1502 * Acquires fs_info->reclaim_bgs_lock to avoid racing
1503 * with relocation (btrfs_relocate_chunk) and relocation
1504 * acquires fs_info->cleaner_mutex (btrfs_relocate_block_group)
1505 * after acquiring fs_info->reclaim_bgs_lock. So we
1506 * can't hold, nor need to, fs_info->cleaner_mutex when deleting
1507 * unused block groups.
1509 btrfs_delete_unused_bgs(fs_info
);
1512 * Reclaim block groups in the reclaim_bgs list after we deleted
1513 * all unused block_groups. This possibly gives us some more free
1516 btrfs_reclaim_bgs(fs_info
);
1518 clear_and_wake_up_bit(BTRFS_FS_CLEANER_RUNNING
, &fs_info
->flags
);
1519 if (kthread_should_park())
1521 if (kthread_should_stop())
1524 set_current_state(TASK_INTERRUPTIBLE
);
1526 __set_current_state(TASK_RUNNING
);
1531 static int transaction_kthread(void *arg
)
1533 struct btrfs_root
*root
= arg
;
1534 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1535 struct btrfs_trans_handle
*trans
;
1536 struct btrfs_transaction
*cur
;
1539 unsigned long delay
;
1543 cannot_commit
= false;
1544 delay
= msecs_to_jiffies(fs_info
->commit_interval
* 1000);
1545 mutex_lock(&fs_info
->transaction_kthread_mutex
);
1547 spin_lock(&fs_info
->trans_lock
);
1548 cur
= fs_info
->running_transaction
;
1550 spin_unlock(&fs_info
->trans_lock
);
1554 delta
= ktime_get_seconds() - cur
->start_time
;
1555 if (!test_and_clear_bit(BTRFS_FS_COMMIT_TRANS
, &fs_info
->flags
) &&
1556 cur
->state
< TRANS_STATE_COMMIT_PREP
&&
1557 delta
< fs_info
->commit_interval
) {
1558 spin_unlock(&fs_info
->trans_lock
);
1559 delay
-= msecs_to_jiffies((delta
- 1) * 1000);
1561 msecs_to_jiffies(fs_info
->commit_interval
* 1000));
1564 transid
= cur
->transid
;
1565 spin_unlock(&fs_info
->trans_lock
);
1567 /* If the file system is aborted, this will always fail. */
1568 trans
= btrfs_attach_transaction(root
);
1569 if (IS_ERR(trans
)) {
1570 if (PTR_ERR(trans
) != -ENOENT
)
1571 cannot_commit
= true;
1574 if (transid
== trans
->transid
) {
1575 btrfs_commit_transaction(trans
);
1577 btrfs_end_transaction(trans
);
1580 wake_up_process(fs_info
->cleaner_kthread
);
1581 mutex_unlock(&fs_info
->transaction_kthread_mutex
);
1583 if (BTRFS_FS_ERROR(fs_info
))
1584 btrfs_cleanup_transaction(fs_info
);
1585 if (!kthread_should_stop() &&
1586 (!btrfs_transaction_blocked(fs_info
) ||
1588 schedule_timeout_interruptible(delay
);
1589 } while (!kthread_should_stop());
1594 * This will find the highest generation in the array of root backups. The
1595 * index of the highest array is returned, or -EINVAL if we can't find
1598 * We check to make sure the array is valid by comparing the
1599 * generation of the latest root in the array with the generation
1600 * in the super block. If they don't match we pitch it.
1602 static int find_newest_super_backup(struct btrfs_fs_info
*info
)
1604 const u64 newest_gen
= btrfs_super_generation(info
->super_copy
);
1606 struct btrfs_root_backup
*root_backup
;
1609 for (i
= 0; i
< BTRFS_NUM_BACKUP_ROOTS
; i
++) {
1610 root_backup
= info
->super_copy
->super_roots
+ i
;
1611 cur
= btrfs_backup_tree_root_gen(root_backup
);
1612 if (cur
== newest_gen
)
1620 * copy all the root pointers into the super backup array.
1621 * this will bump the backup pointer by one when it is
1624 static void backup_super_roots(struct btrfs_fs_info
*info
)
1626 const int next_backup
= info
->backup_root_index
;
1627 struct btrfs_root_backup
*root_backup
;
1629 root_backup
= info
->super_for_commit
->super_roots
+ next_backup
;
1632 * make sure all of our padding and empty slots get zero filled
1633 * regardless of which ones we use today
1635 memset(root_backup
, 0, sizeof(*root_backup
));
1637 info
->backup_root_index
= (next_backup
+ 1) % BTRFS_NUM_BACKUP_ROOTS
;
1639 btrfs_set_backup_tree_root(root_backup
, info
->tree_root
->node
->start
);
1640 btrfs_set_backup_tree_root_gen(root_backup
,
1641 btrfs_header_generation(info
->tree_root
->node
));
1643 btrfs_set_backup_tree_root_level(root_backup
,
1644 btrfs_header_level(info
->tree_root
->node
));
1646 btrfs_set_backup_chunk_root(root_backup
, info
->chunk_root
->node
->start
);
1647 btrfs_set_backup_chunk_root_gen(root_backup
,
1648 btrfs_header_generation(info
->chunk_root
->node
));
1649 btrfs_set_backup_chunk_root_level(root_backup
,
1650 btrfs_header_level(info
->chunk_root
->node
));
1652 if (!btrfs_fs_compat_ro(info
, BLOCK_GROUP_TREE
)) {
1653 struct btrfs_root
*extent_root
= btrfs_extent_root(info
, 0);
1654 struct btrfs_root
*csum_root
= btrfs_csum_root(info
, 0);
1656 btrfs_set_backup_extent_root(root_backup
,
1657 extent_root
->node
->start
);
1658 btrfs_set_backup_extent_root_gen(root_backup
,
1659 btrfs_header_generation(extent_root
->node
));
1660 btrfs_set_backup_extent_root_level(root_backup
,
1661 btrfs_header_level(extent_root
->node
));
1663 btrfs_set_backup_csum_root(root_backup
, csum_root
->node
->start
);
1664 btrfs_set_backup_csum_root_gen(root_backup
,
1665 btrfs_header_generation(csum_root
->node
));
1666 btrfs_set_backup_csum_root_level(root_backup
,
1667 btrfs_header_level(csum_root
->node
));
1671 * we might commit during log recovery, which happens before we set
1672 * the fs_root. Make sure it is valid before we fill it in.
1674 if (info
->fs_root
&& info
->fs_root
->node
) {
1675 btrfs_set_backup_fs_root(root_backup
,
1676 info
->fs_root
->node
->start
);
1677 btrfs_set_backup_fs_root_gen(root_backup
,
1678 btrfs_header_generation(info
->fs_root
->node
));
1679 btrfs_set_backup_fs_root_level(root_backup
,
1680 btrfs_header_level(info
->fs_root
->node
));
1683 btrfs_set_backup_dev_root(root_backup
, info
->dev_root
->node
->start
);
1684 btrfs_set_backup_dev_root_gen(root_backup
,
1685 btrfs_header_generation(info
->dev_root
->node
));
1686 btrfs_set_backup_dev_root_level(root_backup
,
1687 btrfs_header_level(info
->dev_root
->node
));
1689 btrfs_set_backup_total_bytes(root_backup
,
1690 btrfs_super_total_bytes(info
->super_copy
));
1691 btrfs_set_backup_bytes_used(root_backup
,
1692 btrfs_super_bytes_used(info
->super_copy
));
1693 btrfs_set_backup_num_devices(root_backup
,
1694 btrfs_super_num_devices(info
->super_copy
));
1697 * if we don't copy this out to the super_copy, it won't get remembered
1698 * for the next commit
1700 memcpy(&info
->super_copy
->super_roots
,
1701 &info
->super_for_commit
->super_roots
,
1702 sizeof(*root_backup
) * BTRFS_NUM_BACKUP_ROOTS
);
1706 * Reads a backup root based on the passed priority. Prio 0 is the newest, prio
1707 * 1/2/3 are 2nd newest/3rd newest/4th (oldest) backup roots
1709 * @fs_info: filesystem whose backup roots need to be read
1710 * @priority: priority of backup root required
1712 * Returns backup root index on success and -EINVAL otherwise.
1714 static int read_backup_root(struct btrfs_fs_info
*fs_info
, u8 priority
)
1716 int backup_index
= find_newest_super_backup(fs_info
);
1717 struct btrfs_super_block
*super
= fs_info
->super_copy
;
1718 struct btrfs_root_backup
*root_backup
;
1720 if (priority
< BTRFS_NUM_BACKUP_ROOTS
&& backup_index
>= 0) {
1722 return backup_index
;
1724 backup_index
= backup_index
+ BTRFS_NUM_BACKUP_ROOTS
- priority
;
1725 backup_index
%= BTRFS_NUM_BACKUP_ROOTS
;
1730 root_backup
= super
->super_roots
+ backup_index
;
1732 btrfs_set_super_generation(super
,
1733 btrfs_backup_tree_root_gen(root_backup
));
1734 btrfs_set_super_root(super
, btrfs_backup_tree_root(root_backup
));
1735 btrfs_set_super_root_level(super
,
1736 btrfs_backup_tree_root_level(root_backup
));
1737 btrfs_set_super_bytes_used(super
, btrfs_backup_bytes_used(root_backup
));
1740 * Fixme: the total bytes and num_devices need to match or we should
1743 btrfs_set_super_total_bytes(super
, btrfs_backup_total_bytes(root_backup
));
1744 btrfs_set_super_num_devices(super
, btrfs_backup_num_devices(root_backup
));
1746 return backup_index
;
1749 /* helper to cleanup workers */
1750 static void btrfs_stop_all_workers(struct btrfs_fs_info
*fs_info
)
1752 btrfs_destroy_workqueue(fs_info
->fixup_workers
);
1753 btrfs_destroy_workqueue(fs_info
->delalloc_workers
);
1754 btrfs_destroy_workqueue(fs_info
->workers
);
1755 if (fs_info
->endio_workers
)
1756 destroy_workqueue(fs_info
->endio_workers
);
1757 if (fs_info
->rmw_workers
)
1758 destroy_workqueue(fs_info
->rmw_workers
);
1759 if (fs_info
->compressed_write_workers
)
1760 destroy_workqueue(fs_info
->compressed_write_workers
);
1761 btrfs_destroy_workqueue(fs_info
->endio_write_workers
);
1762 btrfs_destroy_workqueue(fs_info
->endio_freespace_worker
);
1763 btrfs_destroy_workqueue(fs_info
->delayed_workers
);
1764 btrfs_destroy_workqueue(fs_info
->caching_workers
);
1765 btrfs_destroy_workqueue(fs_info
->flush_workers
);
1766 btrfs_destroy_workqueue(fs_info
->qgroup_rescan_workers
);
1767 if (fs_info
->discard_ctl
.discard_workers
)
1768 destroy_workqueue(fs_info
->discard_ctl
.discard_workers
);
1770 * Now that all other work queues are destroyed, we can safely destroy
1771 * the queues used for metadata I/O, since tasks from those other work
1772 * queues can do metadata I/O operations.
1774 if (fs_info
->endio_meta_workers
)
1775 destroy_workqueue(fs_info
->endio_meta_workers
);
1778 static void free_root_extent_buffers(struct btrfs_root
*root
)
1781 free_extent_buffer(root
->node
);
1782 free_extent_buffer(root
->commit_root
);
1784 root
->commit_root
= NULL
;
1788 static void free_global_root_pointers(struct btrfs_fs_info
*fs_info
)
1790 struct btrfs_root
*root
, *tmp
;
1792 rbtree_postorder_for_each_entry_safe(root
, tmp
,
1793 &fs_info
->global_root_tree
,
1795 free_root_extent_buffers(root
);
1798 /* helper to cleanup tree roots */
1799 static void free_root_pointers(struct btrfs_fs_info
*info
, bool free_chunk_root
)
1801 free_root_extent_buffers(info
->tree_root
);
1803 free_global_root_pointers(info
);
1804 free_root_extent_buffers(info
->dev_root
);
1805 free_root_extent_buffers(info
->quota_root
);
1806 free_root_extent_buffers(info
->uuid_root
);
1807 free_root_extent_buffers(info
->fs_root
);
1808 free_root_extent_buffers(info
->data_reloc_root
);
1809 free_root_extent_buffers(info
->block_group_root
);
1810 free_root_extent_buffers(info
->stripe_root
);
1811 if (free_chunk_root
)
1812 free_root_extent_buffers(info
->chunk_root
);
1815 void btrfs_put_root(struct btrfs_root
*root
)
1820 if (refcount_dec_and_test(&root
->refs
)) {
1821 WARN_ON(!RB_EMPTY_ROOT(&root
->inode_tree
));
1822 WARN_ON(test_bit(BTRFS_ROOT_DEAD_RELOC_TREE
, &root
->state
));
1824 free_anon_bdev(root
->anon_dev
);
1825 free_root_extent_buffers(root
);
1826 #ifdef CONFIG_BTRFS_DEBUG
1827 spin_lock(&root
->fs_info
->fs_roots_radix_lock
);
1828 list_del_init(&root
->leak_list
);
1829 spin_unlock(&root
->fs_info
->fs_roots_radix_lock
);
1835 void btrfs_free_fs_roots(struct btrfs_fs_info
*fs_info
)
1838 struct btrfs_root
*gang
[8];
1841 while (!list_empty(&fs_info
->dead_roots
)) {
1842 gang
[0] = list_entry(fs_info
->dead_roots
.next
,
1843 struct btrfs_root
, root_list
);
1844 list_del(&gang
[0]->root_list
);
1846 if (test_bit(BTRFS_ROOT_IN_RADIX
, &gang
[0]->state
))
1847 btrfs_drop_and_free_fs_root(fs_info
, gang
[0]);
1848 btrfs_put_root(gang
[0]);
1852 ret
= radix_tree_gang_lookup(&fs_info
->fs_roots_radix
,
1857 for (i
= 0; i
< ret
; i
++)
1858 btrfs_drop_and_free_fs_root(fs_info
, gang
[i
]);
1862 static void btrfs_init_scrub(struct btrfs_fs_info
*fs_info
)
1864 mutex_init(&fs_info
->scrub_lock
);
1865 atomic_set(&fs_info
->scrubs_running
, 0);
1866 atomic_set(&fs_info
->scrub_pause_req
, 0);
1867 atomic_set(&fs_info
->scrubs_paused
, 0);
1868 atomic_set(&fs_info
->scrub_cancel_req
, 0);
1869 init_waitqueue_head(&fs_info
->scrub_pause_wait
);
1870 refcount_set(&fs_info
->scrub_workers_refcnt
, 0);
1873 static void btrfs_init_balance(struct btrfs_fs_info
*fs_info
)
1875 spin_lock_init(&fs_info
->balance_lock
);
1876 mutex_init(&fs_info
->balance_mutex
);
1877 atomic_set(&fs_info
->balance_pause_req
, 0);
1878 atomic_set(&fs_info
->balance_cancel_req
, 0);
1879 fs_info
->balance_ctl
= NULL
;
1880 init_waitqueue_head(&fs_info
->balance_wait_q
);
1881 atomic_set(&fs_info
->reloc_cancel_req
, 0);
1884 static int btrfs_init_btree_inode(struct super_block
*sb
)
1886 struct btrfs_fs_info
*fs_info
= btrfs_sb(sb
);
1887 unsigned long hash
= btrfs_inode_hash(BTRFS_BTREE_INODE_OBJECTID
,
1888 fs_info
->tree_root
);
1889 struct inode
*inode
;
1891 inode
= new_inode(sb
);
1895 inode
->i_ino
= BTRFS_BTREE_INODE_OBJECTID
;
1896 set_nlink(inode
, 1);
1898 * we set the i_size on the btree inode to the max possible int.
1899 * the real end of the address space is determined by all of
1900 * the devices in the system
1902 inode
->i_size
= OFFSET_MAX
;
1903 inode
->i_mapping
->a_ops
= &btree_aops
;
1904 mapping_set_gfp_mask(inode
->i_mapping
, GFP_NOFS
);
1906 RB_CLEAR_NODE(&BTRFS_I(inode
)->rb_node
);
1907 extent_io_tree_init(fs_info
, &BTRFS_I(inode
)->io_tree
,
1908 IO_TREE_BTREE_INODE_IO
);
1909 extent_map_tree_init(&BTRFS_I(inode
)->extent_tree
);
1911 BTRFS_I(inode
)->root
= btrfs_grab_root(fs_info
->tree_root
);
1912 BTRFS_I(inode
)->location
.objectid
= BTRFS_BTREE_INODE_OBJECTID
;
1913 BTRFS_I(inode
)->location
.type
= 0;
1914 BTRFS_I(inode
)->location
.offset
= 0;
1915 set_bit(BTRFS_INODE_DUMMY
, &BTRFS_I(inode
)->runtime_flags
);
1916 __insert_inode_hash(inode
, hash
);
1917 fs_info
->btree_inode
= inode
;
1922 static void btrfs_init_dev_replace_locks(struct btrfs_fs_info
*fs_info
)
1924 mutex_init(&fs_info
->dev_replace
.lock_finishing_cancel_unmount
);
1925 init_rwsem(&fs_info
->dev_replace
.rwsem
);
1926 init_waitqueue_head(&fs_info
->dev_replace
.replace_wait
);
1929 static void btrfs_init_qgroup(struct btrfs_fs_info
*fs_info
)
1931 spin_lock_init(&fs_info
->qgroup_lock
);
1932 mutex_init(&fs_info
->qgroup_ioctl_lock
);
1933 fs_info
->qgroup_tree
= RB_ROOT
;
1934 INIT_LIST_HEAD(&fs_info
->dirty_qgroups
);
1935 fs_info
->qgroup_seq
= 1;
1936 fs_info
->qgroup_ulist
= NULL
;
1937 fs_info
->qgroup_rescan_running
= false;
1938 fs_info
->qgroup_drop_subtree_thres
= BTRFS_MAX_LEVEL
;
1939 mutex_init(&fs_info
->qgroup_rescan_lock
);
1942 static int btrfs_init_workqueues(struct btrfs_fs_info
*fs_info
)
1944 u32 max_active
= fs_info
->thread_pool_size
;
1945 unsigned int flags
= WQ_MEM_RECLAIM
| WQ_FREEZABLE
| WQ_UNBOUND
;
1946 unsigned int ordered_flags
= WQ_MEM_RECLAIM
| WQ_FREEZABLE
;
1949 btrfs_alloc_workqueue(fs_info
, "worker", flags
, max_active
, 16);
1951 fs_info
->delalloc_workers
=
1952 btrfs_alloc_workqueue(fs_info
, "delalloc",
1953 flags
, max_active
, 2);
1955 fs_info
->flush_workers
=
1956 btrfs_alloc_workqueue(fs_info
, "flush_delalloc",
1957 flags
, max_active
, 0);
1959 fs_info
->caching_workers
=
1960 btrfs_alloc_workqueue(fs_info
, "cache", flags
, max_active
, 0);
1962 fs_info
->fixup_workers
=
1963 btrfs_alloc_ordered_workqueue(fs_info
, "fixup", ordered_flags
);
1965 fs_info
->endio_workers
=
1966 alloc_workqueue("btrfs-endio", flags
, max_active
);
1967 fs_info
->endio_meta_workers
=
1968 alloc_workqueue("btrfs-endio-meta", flags
, max_active
);
1969 fs_info
->rmw_workers
= alloc_workqueue("btrfs-rmw", flags
, max_active
);
1970 fs_info
->endio_write_workers
=
1971 btrfs_alloc_workqueue(fs_info
, "endio-write", flags
,
1973 fs_info
->compressed_write_workers
=
1974 alloc_workqueue("btrfs-compressed-write", flags
, max_active
);
1975 fs_info
->endio_freespace_worker
=
1976 btrfs_alloc_workqueue(fs_info
, "freespace-write", flags
,
1978 fs_info
->delayed_workers
=
1979 btrfs_alloc_workqueue(fs_info
, "delayed-meta", flags
,
1981 fs_info
->qgroup_rescan_workers
=
1982 btrfs_alloc_ordered_workqueue(fs_info
, "qgroup-rescan",
1984 fs_info
->discard_ctl
.discard_workers
=
1985 alloc_ordered_workqueue("btrfs_discard", WQ_FREEZABLE
);
1987 if (!(fs_info
->workers
&&
1988 fs_info
->delalloc_workers
&& fs_info
->flush_workers
&&
1989 fs_info
->endio_workers
&& fs_info
->endio_meta_workers
&&
1990 fs_info
->compressed_write_workers
&&
1991 fs_info
->endio_write_workers
&&
1992 fs_info
->endio_freespace_worker
&& fs_info
->rmw_workers
&&
1993 fs_info
->caching_workers
&& fs_info
->fixup_workers
&&
1994 fs_info
->delayed_workers
&& fs_info
->qgroup_rescan_workers
&&
1995 fs_info
->discard_ctl
.discard_workers
)) {
2002 static int btrfs_init_csum_hash(struct btrfs_fs_info
*fs_info
, u16 csum_type
)
2004 struct crypto_shash
*csum_shash
;
2005 const char *csum_driver
= btrfs_super_csum_driver(csum_type
);
2007 csum_shash
= crypto_alloc_shash(csum_driver
, 0, 0);
2009 if (IS_ERR(csum_shash
)) {
2010 btrfs_err(fs_info
, "error allocating %s hash for checksum",
2012 return PTR_ERR(csum_shash
);
2015 fs_info
->csum_shash
= csum_shash
;
2018 * Check if the checksum implementation is a fast accelerated one.
2019 * As-is this is a bit of a hack and should be replaced once the csum
2020 * implementations provide that information themselves.
2022 switch (csum_type
) {
2023 case BTRFS_CSUM_TYPE_CRC32
:
2024 if (!strstr(crypto_shash_driver_name(csum_shash
), "generic"))
2025 set_bit(BTRFS_FS_CSUM_IMPL_FAST
, &fs_info
->flags
);
2027 case BTRFS_CSUM_TYPE_XXHASH
:
2028 set_bit(BTRFS_FS_CSUM_IMPL_FAST
, &fs_info
->flags
);
2034 btrfs_info(fs_info
, "using %s (%s) checksum algorithm",
2035 btrfs_super_csum_name(csum_type
),
2036 crypto_shash_driver_name(csum_shash
));
2040 static int btrfs_replay_log(struct btrfs_fs_info
*fs_info
,
2041 struct btrfs_fs_devices
*fs_devices
)
2044 struct btrfs_tree_parent_check check
= { 0 };
2045 struct btrfs_root
*log_tree_root
;
2046 struct btrfs_super_block
*disk_super
= fs_info
->super_copy
;
2047 u64 bytenr
= btrfs_super_log_root(disk_super
);
2048 int level
= btrfs_super_log_root_level(disk_super
);
2050 if (fs_devices
->rw_devices
== 0) {
2051 btrfs_warn(fs_info
, "log replay required on RO media");
2055 log_tree_root
= btrfs_alloc_root(fs_info
, BTRFS_TREE_LOG_OBJECTID
,
2060 check
.level
= level
;
2061 check
.transid
= fs_info
->generation
+ 1;
2062 check
.owner_root
= BTRFS_TREE_LOG_OBJECTID
;
2063 log_tree_root
->node
= read_tree_block(fs_info
, bytenr
, &check
);
2064 if (IS_ERR(log_tree_root
->node
)) {
2065 btrfs_warn(fs_info
, "failed to read log tree");
2066 ret
= PTR_ERR(log_tree_root
->node
);
2067 log_tree_root
->node
= NULL
;
2068 btrfs_put_root(log_tree_root
);
2071 if (!extent_buffer_uptodate(log_tree_root
->node
)) {
2072 btrfs_err(fs_info
, "failed to read log tree");
2073 btrfs_put_root(log_tree_root
);
2077 /* returns with log_tree_root freed on success */
2078 ret
= btrfs_recover_log_trees(log_tree_root
);
2080 btrfs_handle_fs_error(fs_info
, ret
,
2081 "Failed to recover log tree");
2082 btrfs_put_root(log_tree_root
);
2086 if (sb_rdonly(fs_info
->sb
)) {
2087 ret
= btrfs_commit_super(fs_info
);
2095 static int load_global_roots_objectid(struct btrfs_root
*tree_root
,
2096 struct btrfs_path
*path
, u64 objectid
,
2099 struct btrfs_fs_info
*fs_info
= tree_root
->fs_info
;
2100 struct btrfs_root
*root
;
2101 u64 max_global_id
= 0;
2103 struct btrfs_key key
= {
2104 .objectid
= objectid
,
2105 .type
= BTRFS_ROOT_ITEM_KEY
,
2110 /* If we have IGNOREDATACSUMS skip loading these roots. */
2111 if (objectid
== BTRFS_CSUM_TREE_OBJECTID
&&
2112 btrfs_test_opt(fs_info
, IGNOREDATACSUMS
)) {
2113 set_bit(BTRFS_FS_STATE_NO_CSUMS
, &fs_info
->fs_state
);
2118 ret
= btrfs_search_slot(NULL
, tree_root
, &key
, path
, 0, 0);
2122 if (path
->slots
[0] >= btrfs_header_nritems(path
->nodes
[0])) {
2123 ret
= btrfs_next_leaf(tree_root
, path
);
2132 btrfs_item_key_to_cpu(path
->nodes
[0], &key
, path
->slots
[0]);
2133 if (key
.objectid
!= objectid
)
2135 btrfs_release_path(path
);
2138 * Just worry about this for extent tree, it'll be the same for
2141 if (objectid
== BTRFS_EXTENT_TREE_OBJECTID
)
2142 max_global_id
= max(max_global_id
, key
.offset
);
2145 root
= read_tree_root_path(tree_root
, path
, &key
);
2147 if (!btrfs_test_opt(fs_info
, IGNOREBADROOTS
))
2148 ret
= PTR_ERR(root
);
2151 set_bit(BTRFS_ROOT_TRACK_DIRTY
, &root
->state
);
2152 ret
= btrfs_global_root_insert(root
);
2154 btrfs_put_root(root
);
2159 btrfs_release_path(path
);
2161 if (objectid
== BTRFS_EXTENT_TREE_OBJECTID
)
2162 fs_info
->nr_global_roots
= max_global_id
+ 1;
2164 if (!found
|| ret
) {
2165 if (objectid
== BTRFS_CSUM_TREE_OBJECTID
)
2166 set_bit(BTRFS_FS_STATE_NO_CSUMS
, &fs_info
->fs_state
);
2168 if (!btrfs_test_opt(fs_info
, IGNOREBADROOTS
))
2169 ret
= ret
? ret
: -ENOENT
;
2172 btrfs_err(fs_info
, "failed to load root %s", name
);
2177 static int load_global_roots(struct btrfs_root
*tree_root
)
2179 struct btrfs_path
*path
;
2182 path
= btrfs_alloc_path();
2186 ret
= load_global_roots_objectid(tree_root
, path
,
2187 BTRFS_EXTENT_TREE_OBJECTID
, "extent");
2190 ret
= load_global_roots_objectid(tree_root
, path
,
2191 BTRFS_CSUM_TREE_OBJECTID
, "csum");
2194 if (!btrfs_fs_compat_ro(tree_root
->fs_info
, FREE_SPACE_TREE
))
2196 ret
= load_global_roots_objectid(tree_root
, path
,
2197 BTRFS_FREE_SPACE_TREE_OBJECTID
,
2200 btrfs_free_path(path
);
2204 static int btrfs_read_roots(struct btrfs_fs_info
*fs_info
)
2206 struct btrfs_root
*tree_root
= fs_info
->tree_root
;
2207 struct btrfs_root
*root
;
2208 struct btrfs_key location
;
2211 BUG_ON(!fs_info
->tree_root
);
2213 ret
= load_global_roots(tree_root
);
2217 location
.type
= BTRFS_ROOT_ITEM_KEY
;
2218 location
.offset
= 0;
2220 if (btrfs_fs_compat_ro(fs_info
, BLOCK_GROUP_TREE
)) {
2221 location
.objectid
= BTRFS_BLOCK_GROUP_TREE_OBJECTID
;
2222 root
= btrfs_read_tree_root(tree_root
, &location
);
2224 if (!btrfs_test_opt(fs_info
, IGNOREBADROOTS
)) {
2225 ret
= PTR_ERR(root
);
2229 set_bit(BTRFS_ROOT_TRACK_DIRTY
, &root
->state
);
2230 fs_info
->block_group_root
= root
;
2234 location
.objectid
= BTRFS_DEV_TREE_OBJECTID
;
2235 root
= btrfs_read_tree_root(tree_root
, &location
);
2237 if (!btrfs_test_opt(fs_info
, IGNOREBADROOTS
)) {
2238 ret
= PTR_ERR(root
);
2242 set_bit(BTRFS_ROOT_TRACK_DIRTY
, &root
->state
);
2243 fs_info
->dev_root
= root
;
2245 /* Initialize fs_info for all devices in any case */
2246 ret
= btrfs_init_devices_late(fs_info
);
2251 * This tree can share blocks with some other fs tree during relocation
2252 * and we need a proper setup by btrfs_get_fs_root
2254 root
= btrfs_get_fs_root(tree_root
->fs_info
,
2255 BTRFS_DATA_RELOC_TREE_OBJECTID
, true);
2257 if (!btrfs_test_opt(fs_info
, IGNOREBADROOTS
)) {
2258 ret
= PTR_ERR(root
);
2262 set_bit(BTRFS_ROOT_TRACK_DIRTY
, &root
->state
);
2263 fs_info
->data_reloc_root
= root
;
2266 location
.objectid
= BTRFS_QUOTA_TREE_OBJECTID
;
2267 root
= btrfs_read_tree_root(tree_root
, &location
);
2268 if (!IS_ERR(root
)) {
2269 set_bit(BTRFS_ROOT_TRACK_DIRTY
, &root
->state
);
2270 fs_info
->quota_root
= root
;
2273 location
.objectid
= BTRFS_UUID_TREE_OBJECTID
;
2274 root
= btrfs_read_tree_root(tree_root
, &location
);
2276 if (!btrfs_test_opt(fs_info
, IGNOREBADROOTS
)) {
2277 ret
= PTR_ERR(root
);
2282 set_bit(BTRFS_ROOT_TRACK_DIRTY
, &root
->state
);
2283 fs_info
->uuid_root
= root
;
2286 if (btrfs_fs_incompat(fs_info
, RAID_STRIPE_TREE
)) {
2287 location
.objectid
= BTRFS_RAID_STRIPE_TREE_OBJECTID
;
2288 root
= btrfs_read_tree_root(tree_root
, &location
);
2290 if (!btrfs_test_opt(fs_info
, IGNOREBADROOTS
)) {
2291 ret
= PTR_ERR(root
);
2295 set_bit(BTRFS_ROOT_TRACK_DIRTY
, &root
->state
);
2296 fs_info
->stripe_root
= root
;
2302 btrfs_warn(fs_info
, "failed to read root (objectid=%llu): %d",
2303 location
.objectid
, ret
);
2308 * Real super block validation
2309 * NOTE: super csum type and incompat features will not be checked here.
2311 * @sb: super block to check
2312 * @mirror_num: the super block number to check its bytenr:
2313 * 0 the primary (1st) sb
2314 * 1, 2 2nd and 3rd backup copy
2315 * -1 skip bytenr check
2317 int btrfs_validate_super(struct btrfs_fs_info
*fs_info
,
2318 struct btrfs_super_block
*sb
, int mirror_num
)
2320 u64 nodesize
= btrfs_super_nodesize(sb
);
2321 u64 sectorsize
= btrfs_super_sectorsize(sb
);
2324 if (btrfs_super_magic(sb
) != BTRFS_MAGIC
) {
2325 btrfs_err(fs_info
, "no valid FS found");
2328 if (btrfs_super_flags(sb
) & ~BTRFS_SUPER_FLAG_SUPP
) {
2329 btrfs_err(fs_info
, "unrecognized or unsupported super flag: %llu",
2330 btrfs_super_flags(sb
) & ~BTRFS_SUPER_FLAG_SUPP
);
2333 if (btrfs_super_root_level(sb
) >= BTRFS_MAX_LEVEL
) {
2334 btrfs_err(fs_info
, "tree_root level too big: %d >= %d",
2335 btrfs_super_root_level(sb
), BTRFS_MAX_LEVEL
);
2338 if (btrfs_super_chunk_root_level(sb
) >= BTRFS_MAX_LEVEL
) {
2339 btrfs_err(fs_info
, "chunk_root level too big: %d >= %d",
2340 btrfs_super_chunk_root_level(sb
), BTRFS_MAX_LEVEL
);
2343 if (btrfs_super_log_root_level(sb
) >= BTRFS_MAX_LEVEL
) {
2344 btrfs_err(fs_info
, "log_root level too big: %d >= %d",
2345 btrfs_super_log_root_level(sb
), BTRFS_MAX_LEVEL
);
2350 * Check sectorsize and nodesize first, other check will need it.
2351 * Check all possible sectorsize(4K, 8K, 16K, 32K, 64K) here.
2353 if (!is_power_of_2(sectorsize
) || sectorsize
< 4096 ||
2354 sectorsize
> BTRFS_MAX_METADATA_BLOCKSIZE
) {
2355 btrfs_err(fs_info
, "invalid sectorsize %llu", sectorsize
);
2360 * We only support at most two sectorsizes: 4K and PAGE_SIZE.
2362 * We can support 16K sectorsize with 64K page size without problem,
2363 * but such sectorsize/pagesize combination doesn't make much sense.
2364 * 4K will be our future standard, PAGE_SIZE is supported from the very
2367 if (sectorsize
> PAGE_SIZE
|| (sectorsize
!= SZ_4K
&& sectorsize
!= PAGE_SIZE
)) {
2369 "sectorsize %llu not yet supported for page size %lu",
2370 sectorsize
, PAGE_SIZE
);
2374 if (!is_power_of_2(nodesize
) || nodesize
< sectorsize
||
2375 nodesize
> BTRFS_MAX_METADATA_BLOCKSIZE
) {
2376 btrfs_err(fs_info
, "invalid nodesize %llu", nodesize
);
2379 if (nodesize
!= le32_to_cpu(sb
->__unused_leafsize
)) {
2380 btrfs_err(fs_info
, "invalid leafsize %u, should be %llu",
2381 le32_to_cpu(sb
->__unused_leafsize
), nodesize
);
2385 /* Root alignment check */
2386 if (!IS_ALIGNED(btrfs_super_root(sb
), sectorsize
)) {
2387 btrfs_warn(fs_info
, "tree_root block unaligned: %llu",
2388 btrfs_super_root(sb
));
2391 if (!IS_ALIGNED(btrfs_super_chunk_root(sb
), sectorsize
)) {
2392 btrfs_warn(fs_info
, "chunk_root block unaligned: %llu",
2393 btrfs_super_chunk_root(sb
));
2396 if (!IS_ALIGNED(btrfs_super_log_root(sb
), sectorsize
)) {
2397 btrfs_warn(fs_info
, "log_root block unaligned: %llu",
2398 btrfs_super_log_root(sb
));
2402 if (memcmp(fs_info
->fs_devices
->fsid
, sb
->fsid
, BTRFS_FSID_SIZE
) != 0) {
2404 "superblock fsid doesn't match fsid of fs_devices: %pU != %pU",
2405 sb
->fsid
, fs_info
->fs_devices
->fsid
);
2409 if (memcmp(fs_info
->fs_devices
->metadata_uuid
, btrfs_sb_fsid_ptr(sb
),
2410 BTRFS_FSID_SIZE
) != 0) {
2412 "superblock metadata_uuid doesn't match metadata uuid of fs_devices: %pU != %pU",
2413 btrfs_sb_fsid_ptr(sb
), fs_info
->fs_devices
->metadata_uuid
);
2417 if (memcmp(fs_info
->fs_devices
->metadata_uuid
, sb
->dev_item
.fsid
,
2418 BTRFS_FSID_SIZE
) != 0) {
2420 "dev_item UUID does not match metadata fsid: %pU != %pU",
2421 fs_info
->fs_devices
->metadata_uuid
, sb
->dev_item
.fsid
);
2426 * Artificial requirement for block-group-tree to force newer features
2427 * (free-space-tree, no-holes) so the test matrix is smaller.
2429 if (btrfs_fs_compat_ro(fs_info
, BLOCK_GROUP_TREE
) &&
2430 (!btrfs_fs_compat_ro(fs_info
, FREE_SPACE_TREE_VALID
) ||
2431 !btrfs_fs_incompat(fs_info
, NO_HOLES
))) {
2433 "block-group-tree feature requires fres-space-tree and no-holes");
2438 * Hint to catch really bogus numbers, bitflips or so, more exact checks are
2441 if (btrfs_super_bytes_used(sb
) < 6 * btrfs_super_nodesize(sb
)) {
2442 btrfs_err(fs_info
, "bytes_used is too small %llu",
2443 btrfs_super_bytes_used(sb
));
2446 if (!is_power_of_2(btrfs_super_stripesize(sb
))) {
2447 btrfs_err(fs_info
, "invalid stripesize %u",
2448 btrfs_super_stripesize(sb
));
2451 if (btrfs_super_num_devices(sb
) > (1UL << 31))
2452 btrfs_warn(fs_info
, "suspicious number of devices: %llu",
2453 btrfs_super_num_devices(sb
));
2454 if (btrfs_super_num_devices(sb
) == 0) {
2455 btrfs_err(fs_info
, "number of devices is 0");
2459 if (mirror_num
>= 0 &&
2460 btrfs_super_bytenr(sb
) != btrfs_sb_offset(mirror_num
)) {
2461 btrfs_err(fs_info
, "super offset mismatch %llu != %u",
2462 btrfs_super_bytenr(sb
), BTRFS_SUPER_INFO_OFFSET
);
2467 * Obvious sys_chunk_array corruptions, it must hold at least one key
2470 if (btrfs_super_sys_array_size(sb
) > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE
) {
2471 btrfs_err(fs_info
, "system chunk array too big %u > %u",
2472 btrfs_super_sys_array_size(sb
),
2473 BTRFS_SYSTEM_CHUNK_ARRAY_SIZE
);
2476 if (btrfs_super_sys_array_size(sb
) < sizeof(struct btrfs_disk_key
)
2477 + sizeof(struct btrfs_chunk
)) {
2478 btrfs_err(fs_info
, "system chunk array too small %u < %zu",
2479 btrfs_super_sys_array_size(sb
),
2480 sizeof(struct btrfs_disk_key
)
2481 + sizeof(struct btrfs_chunk
));
2486 * The generation is a global counter, we'll trust it more than the others
2487 * but it's still possible that it's the one that's wrong.
2489 if (btrfs_super_generation(sb
) < btrfs_super_chunk_root_generation(sb
))
2491 "suspicious: generation < chunk_root_generation: %llu < %llu",
2492 btrfs_super_generation(sb
),
2493 btrfs_super_chunk_root_generation(sb
));
2494 if (btrfs_super_generation(sb
) < btrfs_super_cache_generation(sb
)
2495 && btrfs_super_cache_generation(sb
) != (u64
)-1)
2497 "suspicious: generation < cache_generation: %llu < %llu",
2498 btrfs_super_generation(sb
),
2499 btrfs_super_cache_generation(sb
));
2505 * Validation of super block at mount time.
2506 * Some checks already done early at mount time, like csum type and incompat
2507 * flags will be skipped.
2509 static int btrfs_validate_mount_super(struct btrfs_fs_info
*fs_info
)
2511 return btrfs_validate_super(fs_info
, fs_info
->super_copy
, 0);
2515 * Validation of super block at write time.
2516 * Some checks like bytenr check will be skipped as their values will be
2518 * Extra checks like csum type and incompat flags will be done here.
2520 static int btrfs_validate_write_super(struct btrfs_fs_info
*fs_info
,
2521 struct btrfs_super_block
*sb
)
2525 ret
= btrfs_validate_super(fs_info
, sb
, -1);
2528 if (!btrfs_supported_super_csum(btrfs_super_csum_type(sb
))) {
2530 btrfs_err(fs_info
, "invalid csum type, has %u want %u",
2531 btrfs_super_csum_type(sb
), BTRFS_CSUM_TYPE_CRC32
);
2534 if (btrfs_super_incompat_flags(sb
) & ~BTRFS_FEATURE_INCOMPAT_SUPP
) {
2537 "invalid incompat flags, has 0x%llx valid mask 0x%llx",
2538 btrfs_super_incompat_flags(sb
),
2539 (unsigned long long)BTRFS_FEATURE_INCOMPAT_SUPP
);
2545 "super block corruption detected before writing it to disk");
2549 static int load_super_root(struct btrfs_root
*root
, u64 bytenr
, u64 gen
, int level
)
2551 struct btrfs_tree_parent_check check
= {
2554 .owner_root
= root
->root_key
.objectid
2558 root
->node
= read_tree_block(root
->fs_info
, bytenr
, &check
);
2559 if (IS_ERR(root
->node
)) {
2560 ret
= PTR_ERR(root
->node
);
2564 if (!extent_buffer_uptodate(root
->node
)) {
2565 free_extent_buffer(root
->node
);
2570 btrfs_set_root_node(&root
->root_item
, root
->node
);
2571 root
->commit_root
= btrfs_root_node(root
);
2572 btrfs_set_root_refs(&root
->root_item
, 1);
2576 static int load_important_roots(struct btrfs_fs_info
*fs_info
)
2578 struct btrfs_super_block
*sb
= fs_info
->super_copy
;
2582 bytenr
= btrfs_super_root(sb
);
2583 gen
= btrfs_super_generation(sb
);
2584 level
= btrfs_super_root_level(sb
);
2585 ret
= load_super_root(fs_info
->tree_root
, bytenr
, gen
, level
);
2587 btrfs_warn(fs_info
, "couldn't read tree root");
2593 static int __cold
init_tree_roots(struct btrfs_fs_info
*fs_info
)
2595 int backup_index
= find_newest_super_backup(fs_info
);
2596 struct btrfs_super_block
*sb
= fs_info
->super_copy
;
2597 struct btrfs_root
*tree_root
= fs_info
->tree_root
;
2598 bool handle_error
= false;
2602 for (i
= 0; i
< BTRFS_NUM_BACKUP_ROOTS
; i
++) {
2604 if (!IS_ERR(tree_root
->node
))
2605 free_extent_buffer(tree_root
->node
);
2606 tree_root
->node
= NULL
;
2608 if (!btrfs_test_opt(fs_info
, USEBACKUPROOT
))
2611 free_root_pointers(fs_info
, 0);
2614 * Don't use the log in recovery mode, it won't be
2617 btrfs_set_super_log_root(sb
, 0);
2619 /* We can't trust the free space cache either */
2620 btrfs_set_opt(fs_info
->mount_opt
, CLEAR_CACHE
);
2622 btrfs_warn(fs_info
, "try to load backup roots slot %d", i
);
2623 ret
= read_backup_root(fs_info
, i
);
2629 ret
= load_important_roots(fs_info
);
2631 handle_error
= true;
2636 * No need to hold btrfs_root::objectid_mutex since the fs
2637 * hasn't been fully initialised and we are the only user
2639 ret
= btrfs_init_root_free_objectid(tree_root
);
2641 handle_error
= true;
2645 ASSERT(tree_root
->free_objectid
<= BTRFS_LAST_FREE_OBJECTID
);
2647 ret
= btrfs_read_roots(fs_info
);
2649 handle_error
= true;
2653 /* All successful */
2654 fs_info
->generation
= btrfs_header_generation(tree_root
->node
);
2655 fs_info
->last_trans_committed
= fs_info
->generation
;
2656 fs_info
->last_reloc_trans
= 0;
2658 /* Always begin writing backup roots after the one being used */
2659 if (backup_index
< 0) {
2660 fs_info
->backup_root_index
= 0;
2662 fs_info
->backup_root_index
= backup_index
+ 1;
2663 fs_info
->backup_root_index
%= BTRFS_NUM_BACKUP_ROOTS
;
2671 void btrfs_init_fs_info(struct btrfs_fs_info
*fs_info
)
2673 INIT_RADIX_TREE(&fs_info
->fs_roots_radix
, GFP_ATOMIC
);
2674 INIT_RADIX_TREE(&fs_info
->buffer_radix
, GFP_ATOMIC
);
2675 INIT_LIST_HEAD(&fs_info
->trans_list
);
2676 INIT_LIST_HEAD(&fs_info
->dead_roots
);
2677 INIT_LIST_HEAD(&fs_info
->delayed_iputs
);
2678 INIT_LIST_HEAD(&fs_info
->delalloc_roots
);
2679 INIT_LIST_HEAD(&fs_info
->caching_block_groups
);
2680 spin_lock_init(&fs_info
->delalloc_root_lock
);
2681 spin_lock_init(&fs_info
->trans_lock
);
2682 spin_lock_init(&fs_info
->fs_roots_radix_lock
);
2683 spin_lock_init(&fs_info
->delayed_iput_lock
);
2684 spin_lock_init(&fs_info
->defrag_inodes_lock
);
2685 spin_lock_init(&fs_info
->super_lock
);
2686 spin_lock_init(&fs_info
->buffer_lock
);
2687 spin_lock_init(&fs_info
->unused_bgs_lock
);
2688 spin_lock_init(&fs_info
->treelog_bg_lock
);
2689 spin_lock_init(&fs_info
->zone_active_bgs_lock
);
2690 spin_lock_init(&fs_info
->relocation_bg_lock
);
2691 rwlock_init(&fs_info
->tree_mod_log_lock
);
2692 rwlock_init(&fs_info
->global_root_lock
);
2693 mutex_init(&fs_info
->unused_bg_unpin_mutex
);
2694 mutex_init(&fs_info
->reclaim_bgs_lock
);
2695 mutex_init(&fs_info
->reloc_mutex
);
2696 mutex_init(&fs_info
->delalloc_root_mutex
);
2697 mutex_init(&fs_info
->zoned_meta_io_lock
);
2698 mutex_init(&fs_info
->zoned_data_reloc_io_lock
);
2699 seqlock_init(&fs_info
->profiles_lock
);
2701 btrfs_lockdep_init_map(fs_info
, btrfs_trans_num_writers
);
2702 btrfs_lockdep_init_map(fs_info
, btrfs_trans_num_extwriters
);
2703 btrfs_lockdep_init_map(fs_info
, btrfs_trans_pending_ordered
);
2704 btrfs_lockdep_init_map(fs_info
, btrfs_ordered_extent
);
2705 btrfs_state_lockdep_init_map(fs_info
, btrfs_trans_commit_prep
,
2706 BTRFS_LOCKDEP_TRANS_COMMIT_PREP
);
2707 btrfs_state_lockdep_init_map(fs_info
, btrfs_trans_unblocked
,
2708 BTRFS_LOCKDEP_TRANS_UNBLOCKED
);
2709 btrfs_state_lockdep_init_map(fs_info
, btrfs_trans_super_committed
,
2710 BTRFS_LOCKDEP_TRANS_SUPER_COMMITTED
);
2711 btrfs_state_lockdep_init_map(fs_info
, btrfs_trans_completed
,
2712 BTRFS_LOCKDEP_TRANS_COMPLETED
);
2714 INIT_LIST_HEAD(&fs_info
->dirty_cowonly_roots
);
2715 INIT_LIST_HEAD(&fs_info
->space_info
);
2716 INIT_LIST_HEAD(&fs_info
->tree_mod_seq_list
);
2717 INIT_LIST_HEAD(&fs_info
->unused_bgs
);
2718 INIT_LIST_HEAD(&fs_info
->reclaim_bgs
);
2719 INIT_LIST_HEAD(&fs_info
->zone_active_bgs
);
2720 #ifdef CONFIG_BTRFS_DEBUG
2721 INIT_LIST_HEAD(&fs_info
->allocated_roots
);
2722 INIT_LIST_HEAD(&fs_info
->allocated_ebs
);
2723 spin_lock_init(&fs_info
->eb_leak_lock
);
2725 extent_map_tree_init(&fs_info
->mapping_tree
);
2726 btrfs_init_block_rsv(&fs_info
->global_block_rsv
,
2727 BTRFS_BLOCK_RSV_GLOBAL
);
2728 btrfs_init_block_rsv(&fs_info
->trans_block_rsv
, BTRFS_BLOCK_RSV_TRANS
);
2729 btrfs_init_block_rsv(&fs_info
->chunk_block_rsv
, BTRFS_BLOCK_RSV_CHUNK
);
2730 btrfs_init_block_rsv(&fs_info
->empty_block_rsv
, BTRFS_BLOCK_RSV_EMPTY
);
2731 btrfs_init_block_rsv(&fs_info
->delayed_block_rsv
,
2732 BTRFS_BLOCK_RSV_DELOPS
);
2733 btrfs_init_block_rsv(&fs_info
->delayed_refs_rsv
,
2734 BTRFS_BLOCK_RSV_DELREFS
);
2736 atomic_set(&fs_info
->async_delalloc_pages
, 0);
2737 atomic_set(&fs_info
->defrag_running
, 0);
2738 atomic_set(&fs_info
->nr_delayed_iputs
, 0);
2739 atomic64_set(&fs_info
->tree_mod_seq
, 0);
2740 fs_info
->global_root_tree
= RB_ROOT
;
2741 fs_info
->max_inline
= BTRFS_DEFAULT_MAX_INLINE
;
2742 fs_info
->metadata_ratio
= 0;
2743 fs_info
->defrag_inodes
= RB_ROOT
;
2744 atomic64_set(&fs_info
->free_chunk_space
, 0);
2745 fs_info
->tree_mod_log
= RB_ROOT
;
2746 fs_info
->commit_interval
= BTRFS_DEFAULT_COMMIT_INTERVAL
;
2747 btrfs_init_ref_verify(fs_info
);
2749 fs_info
->thread_pool_size
= min_t(unsigned long,
2750 num_online_cpus() + 2, 8);
2752 INIT_LIST_HEAD(&fs_info
->ordered_roots
);
2753 spin_lock_init(&fs_info
->ordered_root_lock
);
2755 btrfs_init_scrub(fs_info
);
2756 btrfs_init_balance(fs_info
);
2757 btrfs_init_async_reclaim_work(fs_info
);
2759 rwlock_init(&fs_info
->block_group_cache_lock
);
2760 fs_info
->block_group_cache_tree
= RB_ROOT_CACHED
;
2762 extent_io_tree_init(fs_info
, &fs_info
->excluded_extents
,
2763 IO_TREE_FS_EXCLUDED_EXTENTS
);
2765 mutex_init(&fs_info
->ordered_operations_mutex
);
2766 mutex_init(&fs_info
->tree_log_mutex
);
2767 mutex_init(&fs_info
->chunk_mutex
);
2768 mutex_init(&fs_info
->transaction_kthread_mutex
);
2769 mutex_init(&fs_info
->cleaner_mutex
);
2770 mutex_init(&fs_info
->ro_block_group_mutex
);
2771 init_rwsem(&fs_info
->commit_root_sem
);
2772 init_rwsem(&fs_info
->cleanup_work_sem
);
2773 init_rwsem(&fs_info
->subvol_sem
);
2774 sema_init(&fs_info
->uuid_tree_rescan_sem
, 1);
2776 btrfs_init_dev_replace_locks(fs_info
);
2777 btrfs_init_qgroup(fs_info
);
2778 btrfs_discard_init(fs_info
);
2780 btrfs_init_free_cluster(&fs_info
->meta_alloc_cluster
);
2781 btrfs_init_free_cluster(&fs_info
->data_alloc_cluster
);
2783 init_waitqueue_head(&fs_info
->transaction_throttle
);
2784 init_waitqueue_head(&fs_info
->transaction_wait
);
2785 init_waitqueue_head(&fs_info
->transaction_blocked_wait
);
2786 init_waitqueue_head(&fs_info
->async_submit_wait
);
2787 init_waitqueue_head(&fs_info
->delayed_iputs_wait
);
2789 /* Usable values until the real ones are cached from the superblock */
2790 fs_info
->nodesize
= 4096;
2791 fs_info
->sectorsize
= 4096;
2792 fs_info
->sectorsize_bits
= ilog2(4096);
2793 fs_info
->stripesize
= 4096;
2795 fs_info
->max_extent_size
= BTRFS_MAX_EXTENT_SIZE
;
2797 spin_lock_init(&fs_info
->swapfile_pins_lock
);
2798 fs_info
->swapfile_pins
= RB_ROOT
;
2800 fs_info
->bg_reclaim_threshold
= BTRFS_DEFAULT_RECLAIM_THRESH
;
2801 INIT_WORK(&fs_info
->reclaim_bgs_work
, btrfs_reclaim_bgs_work
);
2804 static int init_mount_fs_info(struct btrfs_fs_info
*fs_info
, struct super_block
*sb
)
2809 sb
->s_blocksize
= BTRFS_BDEV_BLOCKSIZE
;
2810 sb
->s_blocksize_bits
= blksize_bits(BTRFS_BDEV_BLOCKSIZE
);
2812 ret
= percpu_counter_init(&fs_info
->ordered_bytes
, 0, GFP_KERNEL
);
2816 ret
= percpu_counter_init(&fs_info
->dirty_metadata_bytes
, 0, GFP_KERNEL
);
2820 fs_info
->dirty_metadata_batch
= PAGE_SIZE
*
2821 (1 + ilog2(nr_cpu_ids
));
2823 ret
= percpu_counter_init(&fs_info
->delalloc_bytes
, 0, GFP_KERNEL
);
2827 ret
= percpu_counter_init(&fs_info
->dev_replace
.bio_counter
, 0,
2832 fs_info
->delayed_root
= kmalloc(sizeof(struct btrfs_delayed_root
),
2834 if (!fs_info
->delayed_root
)
2836 btrfs_init_delayed_root(fs_info
->delayed_root
);
2839 set_bit(BTRFS_FS_STATE_RO
, &fs_info
->fs_state
);
2841 return btrfs_alloc_stripe_hash_table(fs_info
);
2844 static int btrfs_uuid_rescan_kthread(void *data
)
2846 struct btrfs_fs_info
*fs_info
= data
;
2850 * 1st step is to iterate through the existing UUID tree and
2851 * to delete all entries that contain outdated data.
2852 * 2nd step is to add all missing entries to the UUID tree.
2854 ret
= btrfs_uuid_tree_iterate(fs_info
);
2857 btrfs_warn(fs_info
, "iterating uuid_tree failed %d",
2859 up(&fs_info
->uuid_tree_rescan_sem
);
2862 return btrfs_uuid_scan_kthread(data
);
2865 static int btrfs_check_uuid_tree(struct btrfs_fs_info
*fs_info
)
2867 struct task_struct
*task
;
2869 down(&fs_info
->uuid_tree_rescan_sem
);
2870 task
= kthread_run(btrfs_uuid_rescan_kthread
, fs_info
, "btrfs-uuid");
2872 /* fs_info->update_uuid_tree_gen remains 0 in all error case */
2873 btrfs_warn(fs_info
, "failed to start uuid_rescan task");
2874 up(&fs_info
->uuid_tree_rescan_sem
);
2875 return PTR_ERR(task
);
2881 static int btrfs_cleanup_fs_roots(struct btrfs_fs_info
*fs_info
)
2883 u64 root_objectid
= 0;
2884 struct btrfs_root
*gang
[8];
2887 unsigned int ret
= 0;
2890 spin_lock(&fs_info
->fs_roots_radix_lock
);
2891 ret
= radix_tree_gang_lookup(&fs_info
->fs_roots_radix
,
2892 (void **)gang
, root_objectid
,
2895 spin_unlock(&fs_info
->fs_roots_radix_lock
);
2898 root_objectid
= gang
[ret
- 1]->root_key
.objectid
+ 1;
2900 for (i
= 0; i
< ret
; i
++) {
2901 /* Avoid to grab roots in dead_roots. */
2902 if (btrfs_root_refs(&gang
[i
]->root_item
) == 0) {
2906 /* Grab all the search result for later use. */
2907 gang
[i
] = btrfs_grab_root(gang
[i
]);
2909 spin_unlock(&fs_info
->fs_roots_radix_lock
);
2911 for (i
= 0; i
< ret
; i
++) {
2914 root_objectid
= gang
[i
]->root_key
.objectid
;
2915 err
= btrfs_orphan_cleanup(gang
[i
]);
2918 btrfs_put_root(gang
[i
]);
2923 /* Release the uncleaned roots due to error. */
2924 for (; i
< ret
; i
++) {
2926 btrfs_put_root(gang
[i
]);
2932 * Some options only have meaning at mount time and shouldn't persist across
2933 * remounts, or be displayed. Clear these at the end of mount and remount
2936 void btrfs_clear_oneshot_options(struct btrfs_fs_info
*fs_info
)
2938 btrfs_clear_opt(fs_info
->mount_opt
, USEBACKUPROOT
);
2939 btrfs_clear_opt(fs_info
->mount_opt
, CLEAR_CACHE
);
2943 * Mounting logic specific to read-write file systems. Shared by open_ctree
2944 * and btrfs_remount when remounting from read-only to read-write.
2946 int btrfs_start_pre_rw_mount(struct btrfs_fs_info
*fs_info
)
2949 const bool cache_opt
= btrfs_test_opt(fs_info
, SPACE_CACHE
);
2950 bool rebuild_free_space_tree
= false;
2952 if (btrfs_test_opt(fs_info
, CLEAR_CACHE
) &&
2953 btrfs_fs_compat_ro(fs_info
, FREE_SPACE_TREE
)) {
2954 rebuild_free_space_tree
= true;
2955 } else if (btrfs_fs_compat_ro(fs_info
, FREE_SPACE_TREE
) &&
2956 !btrfs_fs_compat_ro(fs_info
, FREE_SPACE_TREE_VALID
)) {
2957 btrfs_warn(fs_info
, "free space tree is invalid");
2958 rebuild_free_space_tree
= true;
2961 if (rebuild_free_space_tree
) {
2962 btrfs_info(fs_info
, "rebuilding free space tree");
2963 ret
= btrfs_rebuild_free_space_tree(fs_info
);
2966 "failed to rebuild free space tree: %d", ret
);
2971 if (btrfs_fs_compat_ro(fs_info
, FREE_SPACE_TREE
) &&
2972 !btrfs_test_opt(fs_info
, FREE_SPACE_TREE
)) {
2973 btrfs_info(fs_info
, "disabling free space tree");
2974 ret
= btrfs_delete_free_space_tree(fs_info
);
2977 "failed to disable free space tree: %d", ret
);
2983 * btrfs_find_orphan_roots() is responsible for finding all the dead
2984 * roots (with 0 refs), flag them with BTRFS_ROOT_DEAD_TREE and load
2985 * them into the fs_info->fs_roots_radix tree. This must be done before
2986 * calling btrfs_orphan_cleanup() on the tree root. If we don't do it
2987 * first, then btrfs_orphan_cleanup() will delete a dead root's orphan
2988 * item before the root's tree is deleted - this means that if we unmount
2989 * or crash before the deletion completes, on the next mount we will not
2990 * delete what remains of the tree because the orphan item does not
2991 * exists anymore, which is what tells us we have a pending deletion.
2993 ret
= btrfs_find_orphan_roots(fs_info
);
2997 ret
= btrfs_cleanup_fs_roots(fs_info
);
3001 down_read(&fs_info
->cleanup_work_sem
);
3002 if ((ret
= btrfs_orphan_cleanup(fs_info
->fs_root
)) ||
3003 (ret
= btrfs_orphan_cleanup(fs_info
->tree_root
))) {
3004 up_read(&fs_info
->cleanup_work_sem
);
3007 up_read(&fs_info
->cleanup_work_sem
);
3009 mutex_lock(&fs_info
->cleaner_mutex
);
3010 ret
= btrfs_recover_relocation(fs_info
);
3011 mutex_unlock(&fs_info
->cleaner_mutex
);
3013 btrfs_warn(fs_info
, "failed to recover relocation: %d", ret
);
3017 if (btrfs_test_opt(fs_info
, FREE_SPACE_TREE
) &&
3018 !btrfs_fs_compat_ro(fs_info
, FREE_SPACE_TREE
)) {
3019 btrfs_info(fs_info
, "creating free space tree");
3020 ret
= btrfs_create_free_space_tree(fs_info
);
3023 "failed to create free space tree: %d", ret
);
3028 if (cache_opt
!= btrfs_free_space_cache_v1_active(fs_info
)) {
3029 ret
= btrfs_set_free_space_cache_v1_active(fs_info
, cache_opt
);
3034 ret
= btrfs_resume_balance_async(fs_info
);
3038 ret
= btrfs_resume_dev_replace_async(fs_info
);
3040 btrfs_warn(fs_info
, "failed to resume dev_replace");
3044 btrfs_qgroup_rescan_resume(fs_info
);
3046 if (!fs_info
->uuid_root
) {
3047 btrfs_info(fs_info
, "creating UUID tree");
3048 ret
= btrfs_create_uuid_tree(fs_info
);
3051 "failed to create the UUID tree %d", ret
);
3061 * Do various sanity and dependency checks of different features.
3063 * @is_rw_mount: If the mount is read-write.
3065 * This is the place for less strict checks (like for subpage or artificial
3066 * feature dependencies).
3068 * For strict checks or possible corruption detection, see
3069 * btrfs_validate_super().
3071 * This should be called after btrfs_parse_options(), as some mount options
3072 * (space cache related) can modify on-disk format like free space tree and
3073 * screw up certain feature dependencies.
3075 int btrfs_check_features(struct btrfs_fs_info
*fs_info
, bool is_rw_mount
)
3077 struct btrfs_super_block
*disk_super
= fs_info
->super_copy
;
3078 u64 incompat
= btrfs_super_incompat_flags(disk_super
);
3079 const u64 compat_ro
= btrfs_super_compat_ro_flags(disk_super
);
3080 const u64 compat_ro_unsupp
= (compat_ro
& ~BTRFS_FEATURE_COMPAT_RO_SUPP
);
3082 if (incompat
& ~BTRFS_FEATURE_INCOMPAT_SUPP
) {
3084 "cannot mount because of unknown incompat features (0x%llx)",
3089 /* Runtime limitation for mixed block groups. */
3090 if ((incompat
& BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS
) &&
3091 (fs_info
->sectorsize
!= fs_info
->nodesize
)) {
3093 "unequal nodesize/sectorsize (%u != %u) are not allowed for mixed block groups",
3094 fs_info
->nodesize
, fs_info
->sectorsize
);
3098 /* Mixed backref is an always-enabled feature. */
3099 incompat
|= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF
;
3101 /* Set compression related flags just in case. */
3102 if (fs_info
->compress_type
== BTRFS_COMPRESS_LZO
)
3103 incompat
|= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO
;
3104 else if (fs_info
->compress_type
== BTRFS_COMPRESS_ZSTD
)
3105 incompat
|= BTRFS_FEATURE_INCOMPAT_COMPRESS_ZSTD
;
3108 * An ancient flag, which should really be marked deprecated.
3109 * Such runtime limitation doesn't really need a incompat flag.
3111 if (btrfs_super_nodesize(disk_super
) > PAGE_SIZE
)
3112 incompat
|= BTRFS_FEATURE_INCOMPAT_BIG_METADATA
;
3114 if (compat_ro_unsupp
&& is_rw_mount
) {
3116 "cannot mount read-write because of unknown compat_ro features (0x%llx)",
3122 * We have unsupported RO compat features, although RO mounted, we
3123 * should not cause any metadata writes, including log replay.
3124 * Or we could screw up whatever the new feature requires.
3126 if (compat_ro_unsupp
&& btrfs_super_log_root(disk_super
) &&
3127 !btrfs_test_opt(fs_info
, NOLOGREPLAY
)) {
3129 "cannot replay dirty log with unsupported compat_ro features (0x%llx), try rescue=nologreplay",
3135 * Artificial limitations for block group tree, to force
3136 * block-group-tree to rely on no-holes and free-space-tree.
3138 if (btrfs_fs_compat_ro(fs_info
, BLOCK_GROUP_TREE
) &&
3139 (!btrfs_fs_incompat(fs_info
, NO_HOLES
) ||
3140 !btrfs_test_opt(fs_info
, FREE_SPACE_TREE
))) {
3142 "block-group-tree feature requires no-holes and free-space-tree features");
3147 * Subpage runtime limitation on v1 cache.
3149 * V1 space cache still has some hard codeed PAGE_SIZE usage, while
3150 * we're already defaulting to v2 cache, no need to bother v1 as it's
3151 * going to be deprecated anyway.
3153 if (fs_info
->sectorsize
< PAGE_SIZE
&& btrfs_test_opt(fs_info
, SPACE_CACHE
)) {
3155 "v1 space cache is not supported for page size %lu with sectorsize %u",
3156 PAGE_SIZE
, fs_info
->sectorsize
);
3160 /* This can be called by remount, we need to protect the super block. */
3161 spin_lock(&fs_info
->super_lock
);
3162 btrfs_set_super_incompat_flags(disk_super
, incompat
);
3163 spin_unlock(&fs_info
->super_lock
);
3168 int __cold
open_ctree(struct super_block
*sb
, struct btrfs_fs_devices
*fs_devices
,
3176 struct btrfs_super_block
*disk_super
;
3177 struct btrfs_fs_info
*fs_info
= btrfs_sb(sb
);
3178 struct btrfs_root
*tree_root
;
3179 struct btrfs_root
*chunk_root
;
3183 ret
= init_mount_fs_info(fs_info
, sb
);
3187 /* These need to be init'ed before we start creating inodes and such. */
3188 tree_root
= btrfs_alloc_root(fs_info
, BTRFS_ROOT_TREE_OBJECTID
,
3190 fs_info
->tree_root
= tree_root
;
3191 chunk_root
= btrfs_alloc_root(fs_info
, BTRFS_CHUNK_TREE_OBJECTID
,
3193 fs_info
->chunk_root
= chunk_root
;
3194 if (!tree_root
|| !chunk_root
) {
3199 ret
= btrfs_init_btree_inode(sb
);
3203 invalidate_bdev(fs_devices
->latest_dev
->bdev
);
3206 * Read super block and check the signature bytes only
3208 disk_super
= btrfs_read_dev_super(fs_devices
->latest_dev
->bdev
);
3209 if (IS_ERR(disk_super
)) {
3210 ret
= PTR_ERR(disk_super
);
3215 * Verify the type first, if that or the checksum value are
3216 * corrupted, we'll find out
3218 csum_type
= btrfs_super_csum_type(disk_super
);
3219 if (!btrfs_supported_super_csum(csum_type
)) {
3220 btrfs_err(fs_info
, "unsupported checksum algorithm: %u",
3223 btrfs_release_disk_super(disk_super
);
3227 fs_info
->csum_size
= btrfs_super_csum_size(disk_super
);
3229 ret
= btrfs_init_csum_hash(fs_info
, csum_type
);
3231 btrfs_release_disk_super(disk_super
);
3236 * We want to check superblock checksum, the type is stored inside.
3237 * Pass the whole disk block of size BTRFS_SUPER_INFO_SIZE (4k).
3239 if (btrfs_check_super_csum(fs_info
, disk_super
)) {
3240 btrfs_err(fs_info
, "superblock checksum mismatch");
3242 btrfs_release_disk_super(disk_super
);
3247 * super_copy is zeroed at allocation time and we never touch the
3248 * following bytes up to INFO_SIZE, the checksum is calculated from
3249 * the whole block of INFO_SIZE
3251 memcpy(fs_info
->super_copy
, disk_super
, sizeof(*fs_info
->super_copy
));
3252 btrfs_release_disk_super(disk_super
);
3254 disk_super
= fs_info
->super_copy
;
3256 memcpy(fs_info
->super_for_commit
, fs_info
->super_copy
,
3257 sizeof(*fs_info
->super_for_commit
));
3259 ret
= btrfs_validate_mount_super(fs_info
);
3261 btrfs_err(fs_info
, "superblock contains fatal errors");
3266 if (!btrfs_super_root(disk_super
)) {
3267 btrfs_err(fs_info
, "invalid superblock tree root bytenr");
3272 /* check FS state, whether FS is broken. */
3273 if (btrfs_super_flags(disk_super
) & BTRFS_SUPER_FLAG_ERROR
)
3274 WRITE_ONCE(fs_info
->fs_error
, -EUCLEAN
);
3277 * In the long term, we'll store the compression type in the super
3278 * block, and it'll be used for per file compression control.
3280 fs_info
->compress_type
= BTRFS_COMPRESS_ZLIB
;
3283 /* Set up fs_info before parsing mount options */
3284 nodesize
= btrfs_super_nodesize(disk_super
);
3285 sectorsize
= btrfs_super_sectorsize(disk_super
);
3286 stripesize
= sectorsize
;
3287 fs_info
->dirty_metadata_batch
= nodesize
* (1 + ilog2(nr_cpu_ids
));
3288 fs_info
->delalloc_batch
= sectorsize
* 512 * (1 + ilog2(nr_cpu_ids
));
3290 fs_info
->nodesize
= nodesize
;
3291 fs_info
->sectorsize
= sectorsize
;
3292 fs_info
->sectorsize_bits
= ilog2(sectorsize
);
3293 fs_info
->csums_per_leaf
= BTRFS_MAX_ITEM_SIZE(fs_info
) / fs_info
->csum_size
;
3294 fs_info
->stripesize
= stripesize
;
3296 ret
= btrfs_parse_options(fs_info
, options
, sb
->s_flags
);
3300 ret
= btrfs_check_features(fs_info
, !sb_rdonly(sb
));
3304 if (sectorsize
< PAGE_SIZE
) {
3305 struct btrfs_subpage_info
*subpage_info
;
3308 * V1 space cache has some hardcoded PAGE_SIZE usage, and is
3309 * going to be deprecated.
3311 * Force to use v2 cache for subpage case.
3313 btrfs_clear_opt(fs_info
->mount_opt
, SPACE_CACHE
);
3314 btrfs_set_and_info(fs_info
, FREE_SPACE_TREE
,
3315 "forcing free space tree for sector size %u with page size %lu",
3316 sectorsize
, PAGE_SIZE
);
3319 "read-write for sector size %u with page size %lu is experimental",
3320 sectorsize
, PAGE_SIZE
);
3321 subpage_info
= kzalloc(sizeof(*subpage_info
), GFP_KERNEL
);
3322 if (!subpage_info
) {
3326 btrfs_init_subpage_info(subpage_info
, sectorsize
);
3327 fs_info
->subpage_info
= subpage_info
;
3330 ret
= btrfs_init_workqueues(fs_info
);
3332 goto fail_sb_buffer
;
3334 sb
->s_bdi
->ra_pages
*= btrfs_super_num_devices(disk_super
);
3335 sb
->s_bdi
->ra_pages
= max(sb
->s_bdi
->ra_pages
, SZ_4M
/ PAGE_SIZE
);
3337 sb
->s_blocksize
= sectorsize
;
3338 sb
->s_blocksize_bits
= blksize_bits(sectorsize
);
3339 memcpy(&sb
->s_uuid
, fs_info
->fs_devices
->fsid
, BTRFS_FSID_SIZE
);
3341 mutex_lock(&fs_info
->chunk_mutex
);
3342 ret
= btrfs_read_sys_array(fs_info
);
3343 mutex_unlock(&fs_info
->chunk_mutex
);
3345 btrfs_err(fs_info
, "failed to read the system array: %d", ret
);
3346 goto fail_sb_buffer
;
3349 generation
= btrfs_super_chunk_root_generation(disk_super
);
3350 level
= btrfs_super_chunk_root_level(disk_super
);
3351 ret
= load_super_root(chunk_root
, btrfs_super_chunk_root(disk_super
),
3354 btrfs_err(fs_info
, "failed to read chunk root");
3355 goto fail_tree_roots
;
3358 read_extent_buffer(chunk_root
->node
, fs_info
->chunk_tree_uuid
,
3359 offsetof(struct btrfs_header
, chunk_tree_uuid
),
3362 ret
= btrfs_read_chunk_tree(fs_info
);
3364 btrfs_err(fs_info
, "failed to read chunk tree: %d", ret
);
3365 goto fail_tree_roots
;
3369 * At this point we know all the devices that make this filesystem,
3370 * including the seed devices but we don't know yet if the replace
3371 * target is required. So free devices that are not part of this
3372 * filesystem but skip the replace target device which is checked
3373 * below in btrfs_init_dev_replace().
3375 btrfs_free_extra_devids(fs_devices
);
3376 if (!fs_devices
->latest_dev
->bdev
) {
3377 btrfs_err(fs_info
, "failed to read devices");
3379 goto fail_tree_roots
;
3382 ret
= init_tree_roots(fs_info
);
3384 goto fail_tree_roots
;
3387 * Get zone type information of zoned block devices. This will also
3388 * handle emulation of a zoned filesystem if a regular device has the
3389 * zoned incompat feature flag set.
3391 ret
= btrfs_get_dev_zone_info_all_devices(fs_info
);
3394 "zoned: failed to read device zone info: %d", ret
);
3395 goto fail_block_groups
;
3399 * If we have a uuid root and we're not being told to rescan we need to
3400 * check the generation here so we can set the
3401 * BTRFS_FS_UPDATE_UUID_TREE_GEN bit. Otherwise we could commit the
3402 * transaction during a balance or the log replay without updating the
3403 * uuid generation, and then if we crash we would rescan the uuid tree,
3404 * even though it was perfectly fine.
3406 if (fs_info
->uuid_root
&& !btrfs_test_opt(fs_info
, RESCAN_UUID_TREE
) &&
3407 fs_info
->generation
== btrfs_super_uuid_tree_generation(disk_super
))
3408 set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN
, &fs_info
->flags
);
3410 ret
= btrfs_verify_dev_extents(fs_info
);
3413 "failed to verify dev extents against chunks: %d",
3415 goto fail_block_groups
;
3417 ret
= btrfs_recover_balance(fs_info
);
3419 btrfs_err(fs_info
, "failed to recover balance: %d", ret
);
3420 goto fail_block_groups
;
3423 ret
= btrfs_init_dev_stats(fs_info
);
3425 btrfs_err(fs_info
, "failed to init dev_stats: %d", ret
);
3426 goto fail_block_groups
;
3429 ret
= btrfs_init_dev_replace(fs_info
);
3431 btrfs_err(fs_info
, "failed to init dev_replace: %d", ret
);
3432 goto fail_block_groups
;
3435 ret
= btrfs_check_zoned_mode(fs_info
);
3437 btrfs_err(fs_info
, "failed to initialize zoned mode: %d",
3439 goto fail_block_groups
;
3442 ret
= btrfs_sysfs_add_fsid(fs_devices
);
3444 btrfs_err(fs_info
, "failed to init sysfs fsid interface: %d",
3446 goto fail_block_groups
;
3449 ret
= btrfs_sysfs_add_mounted(fs_info
);
3451 btrfs_err(fs_info
, "failed to init sysfs interface: %d", ret
);
3452 goto fail_fsdev_sysfs
;
3455 ret
= btrfs_init_space_info(fs_info
);
3457 btrfs_err(fs_info
, "failed to initialize space info: %d", ret
);
3461 ret
= btrfs_read_block_groups(fs_info
);
3463 btrfs_err(fs_info
, "failed to read block groups: %d", ret
);
3467 btrfs_free_zone_cache(fs_info
);
3469 btrfs_check_active_zone_reservation(fs_info
);
3471 if (!sb_rdonly(sb
) && fs_info
->fs_devices
->missing_devices
&&
3472 !btrfs_check_rw_degradable(fs_info
, NULL
)) {
3474 "writable mount is not allowed due to too many missing devices");
3479 fs_info
->cleaner_kthread
= kthread_run(cleaner_kthread
, fs_info
,
3481 if (IS_ERR(fs_info
->cleaner_kthread
)) {
3482 ret
= PTR_ERR(fs_info
->cleaner_kthread
);
3486 fs_info
->transaction_kthread
= kthread_run(transaction_kthread
,
3488 "btrfs-transaction");
3489 if (IS_ERR(fs_info
->transaction_kthread
)) {
3490 ret
= PTR_ERR(fs_info
->transaction_kthread
);
3494 if (!btrfs_test_opt(fs_info
, NOSSD
) &&
3495 !fs_info
->fs_devices
->rotating
) {
3496 btrfs_set_and_info(fs_info
, SSD
, "enabling ssd optimizations");
3500 * For devices supporting discard turn on discard=async automatically,
3501 * unless it's already set or disabled. This could be turned off by
3502 * nodiscard for the same mount.
3504 * The zoned mode piggy backs on the discard functionality for
3505 * resetting a zone. There is no reason to delay the zone reset as it is
3506 * fast enough. So, do not enable async discard for zoned mode.
3508 if (!(btrfs_test_opt(fs_info
, DISCARD_SYNC
) ||
3509 btrfs_test_opt(fs_info
, DISCARD_ASYNC
) ||
3510 btrfs_test_opt(fs_info
, NODISCARD
)) &&
3511 fs_info
->fs_devices
->discardable
&&
3512 !btrfs_is_zoned(fs_info
)) {
3513 btrfs_set_and_info(fs_info
, DISCARD_ASYNC
,
3514 "auto enabling async discard");
3517 ret
= btrfs_read_qgroup_config(fs_info
);
3519 goto fail_trans_kthread
;
3521 if (btrfs_build_ref_tree(fs_info
))
3522 btrfs_err(fs_info
, "couldn't build ref tree");
3524 /* do not make disk changes in broken FS or nologreplay is given */
3525 if (btrfs_super_log_root(disk_super
) != 0 &&
3526 !btrfs_test_opt(fs_info
, NOLOGREPLAY
)) {
3527 btrfs_info(fs_info
, "start tree-log replay");
3528 ret
= btrfs_replay_log(fs_info
, fs_devices
);
3533 fs_info
->fs_root
= btrfs_get_fs_root(fs_info
, BTRFS_FS_TREE_OBJECTID
, true);
3534 if (IS_ERR(fs_info
->fs_root
)) {
3535 ret
= PTR_ERR(fs_info
->fs_root
);
3536 btrfs_warn(fs_info
, "failed to read fs tree: %d", ret
);
3537 fs_info
->fs_root
= NULL
;
3544 ret
= btrfs_start_pre_rw_mount(fs_info
);
3546 close_ctree(fs_info
);
3549 btrfs_discard_resume(fs_info
);
3551 if (fs_info
->uuid_root
&&
3552 (btrfs_test_opt(fs_info
, RESCAN_UUID_TREE
) ||
3553 fs_info
->generation
!= btrfs_super_uuid_tree_generation(disk_super
))) {
3554 btrfs_info(fs_info
, "checking UUID tree");
3555 ret
= btrfs_check_uuid_tree(fs_info
);
3558 "failed to check the UUID tree: %d", ret
);
3559 close_ctree(fs_info
);
3564 set_bit(BTRFS_FS_OPEN
, &fs_info
->flags
);
3566 /* Kick the cleaner thread so it'll start deleting snapshots. */
3567 if (test_bit(BTRFS_FS_UNFINISHED_DROPS
, &fs_info
->flags
))
3568 wake_up_process(fs_info
->cleaner_kthread
);
3571 btrfs_clear_oneshot_options(fs_info
);
3575 btrfs_free_qgroup_config(fs_info
);
3577 kthread_stop(fs_info
->transaction_kthread
);
3578 btrfs_cleanup_transaction(fs_info
);
3579 btrfs_free_fs_roots(fs_info
);
3581 kthread_stop(fs_info
->cleaner_kthread
);
3584 * make sure we're done with the btree inode before we stop our
3587 filemap_write_and_wait(fs_info
->btree_inode
->i_mapping
);
3590 btrfs_sysfs_remove_mounted(fs_info
);
3593 btrfs_sysfs_remove_fsid(fs_info
->fs_devices
);
3596 btrfs_put_block_group_cache(fs_info
);
3599 if (fs_info
->data_reloc_root
)
3600 btrfs_drop_and_free_fs_root(fs_info
, fs_info
->data_reloc_root
);
3601 free_root_pointers(fs_info
, true);
3602 invalidate_inode_pages2(fs_info
->btree_inode
->i_mapping
);
3605 btrfs_stop_all_workers(fs_info
);
3606 btrfs_free_block_groups(fs_info
);
3608 btrfs_mapping_tree_free(&fs_info
->mapping_tree
);
3610 iput(fs_info
->btree_inode
);
3612 btrfs_close_devices(fs_info
->fs_devices
);
3616 ALLOW_ERROR_INJECTION(open_ctree
, ERRNO
);
3618 static void btrfs_end_super_write(struct bio
*bio
)
3620 struct btrfs_device
*device
= bio
->bi_private
;
3621 struct bio_vec
*bvec
;
3622 struct bvec_iter_all iter_all
;
3625 bio_for_each_segment_all(bvec
, bio
, iter_all
) {
3626 page
= bvec
->bv_page
;
3628 if (bio
->bi_status
) {
3629 btrfs_warn_rl_in_rcu(device
->fs_info
,
3630 "lost page write due to IO error on %s (%d)",
3631 btrfs_dev_name(device
),
3632 blk_status_to_errno(bio
->bi_status
));
3633 ClearPageUptodate(page
);
3635 btrfs_dev_stat_inc_and_print(device
,
3636 BTRFS_DEV_STAT_WRITE_ERRS
);
3638 SetPageUptodate(page
);
3648 struct btrfs_super_block
*btrfs_read_dev_one_super(struct block_device
*bdev
,
3649 int copy_num
, bool drop_cache
)
3651 struct btrfs_super_block
*super
;
3653 u64 bytenr
, bytenr_orig
;
3654 struct address_space
*mapping
= bdev
->bd_inode
->i_mapping
;
3657 bytenr_orig
= btrfs_sb_offset(copy_num
);
3658 ret
= btrfs_sb_log_location_bdev(bdev
, copy_num
, READ
, &bytenr
);
3660 return ERR_PTR(-EINVAL
);
3662 return ERR_PTR(ret
);
3664 if (bytenr
+ BTRFS_SUPER_INFO_SIZE
>= bdev_nr_bytes(bdev
))
3665 return ERR_PTR(-EINVAL
);
3668 /* This should only be called with the primary sb. */
3669 ASSERT(copy_num
== 0);
3672 * Drop the page of the primary superblock, so later read will
3673 * always read from the device.
3675 invalidate_inode_pages2_range(mapping
,
3676 bytenr
>> PAGE_SHIFT
,
3677 (bytenr
+ BTRFS_SUPER_INFO_SIZE
) >> PAGE_SHIFT
);
3680 page
= read_cache_page_gfp(mapping
, bytenr
>> PAGE_SHIFT
, GFP_NOFS
);
3682 return ERR_CAST(page
);
3684 super
= page_address(page
);
3685 if (btrfs_super_magic(super
) != BTRFS_MAGIC
) {
3686 btrfs_release_disk_super(super
);
3687 return ERR_PTR(-ENODATA
);
3690 if (btrfs_super_bytenr(super
) != bytenr_orig
) {
3691 btrfs_release_disk_super(super
);
3692 return ERR_PTR(-EINVAL
);
3699 struct btrfs_super_block
*btrfs_read_dev_super(struct block_device
*bdev
)
3701 struct btrfs_super_block
*super
, *latest
= NULL
;
3705 /* we would like to check all the supers, but that would make
3706 * a btrfs mount succeed after a mkfs from a different FS.
3707 * So, we need to add a special mount option to scan for
3708 * later supers, using BTRFS_SUPER_MIRROR_MAX instead
3710 for (i
= 0; i
< 1; i
++) {
3711 super
= btrfs_read_dev_one_super(bdev
, i
, false);
3715 if (!latest
|| btrfs_super_generation(super
) > transid
) {
3717 btrfs_release_disk_super(super
);
3720 transid
= btrfs_super_generation(super
);
3728 * Write superblock @sb to the @device. Do not wait for completion, all the
3729 * pages we use for writing are locked.
3731 * Write @max_mirrors copies of the superblock, where 0 means default that fit
3732 * the expected device size at commit time. Note that max_mirrors must be
3733 * same for write and wait phases.
3735 * Return number of errors when page is not found or submission fails.
3737 static int write_dev_supers(struct btrfs_device
*device
,
3738 struct btrfs_super_block
*sb
, int max_mirrors
)
3740 struct btrfs_fs_info
*fs_info
= device
->fs_info
;
3741 struct address_space
*mapping
= device
->bdev
->bd_inode
->i_mapping
;
3742 SHASH_DESC_ON_STACK(shash
, fs_info
->csum_shash
);
3746 u64 bytenr
, bytenr_orig
;
3748 if (max_mirrors
== 0)
3749 max_mirrors
= BTRFS_SUPER_MIRROR_MAX
;
3751 shash
->tfm
= fs_info
->csum_shash
;
3753 for (i
= 0; i
< max_mirrors
; i
++) {
3756 struct btrfs_super_block
*disk_super
;
3758 bytenr_orig
= btrfs_sb_offset(i
);
3759 ret
= btrfs_sb_log_location(device
, i
, WRITE
, &bytenr
);
3760 if (ret
== -ENOENT
) {
3762 } else if (ret
< 0) {
3763 btrfs_err(device
->fs_info
,
3764 "couldn't get super block location for mirror %d",
3769 if (bytenr
+ BTRFS_SUPER_INFO_SIZE
>=
3770 device
->commit_total_bytes
)
3773 btrfs_set_super_bytenr(sb
, bytenr_orig
);
3775 crypto_shash_digest(shash
, (const char *)sb
+ BTRFS_CSUM_SIZE
,
3776 BTRFS_SUPER_INFO_SIZE
- BTRFS_CSUM_SIZE
,
3779 page
= find_or_create_page(mapping
, bytenr
>> PAGE_SHIFT
,
3782 btrfs_err(device
->fs_info
,
3783 "couldn't get super block page for bytenr %llu",
3789 /* Bump the refcount for wait_dev_supers() */
3792 disk_super
= page_address(page
);
3793 memcpy(disk_super
, sb
, BTRFS_SUPER_INFO_SIZE
);
3796 * Directly use bios here instead of relying on the page cache
3797 * to do I/O, so we don't lose the ability to do integrity
3800 bio
= bio_alloc(device
->bdev
, 1,
3801 REQ_OP_WRITE
| REQ_SYNC
| REQ_META
| REQ_PRIO
,
3803 bio
->bi_iter
.bi_sector
= bytenr
>> SECTOR_SHIFT
;
3804 bio
->bi_private
= device
;
3805 bio
->bi_end_io
= btrfs_end_super_write
;
3806 __bio_add_page(bio
, page
, BTRFS_SUPER_INFO_SIZE
,
3807 offset_in_page(bytenr
));
3810 * We FUA only the first super block. The others we allow to
3811 * go down lazy and there's a short window where the on-disk
3812 * copies might still contain the older version.
3814 if (i
== 0 && !btrfs_test_opt(device
->fs_info
, NOBARRIER
))
3815 bio
->bi_opf
|= REQ_FUA
;
3818 if (btrfs_advance_sb_log(device
, i
))
3821 return errors
< i
? 0 : -1;
3825 * Wait for write completion of superblocks done by write_dev_supers,
3826 * @max_mirrors same for write and wait phases.
3828 * Return number of errors when page is not found or not marked up to
3831 static int wait_dev_supers(struct btrfs_device
*device
, int max_mirrors
)
3835 bool primary_failed
= false;
3839 if (max_mirrors
== 0)
3840 max_mirrors
= BTRFS_SUPER_MIRROR_MAX
;
3842 for (i
= 0; i
< max_mirrors
; i
++) {
3845 ret
= btrfs_sb_log_location(device
, i
, READ
, &bytenr
);
3846 if (ret
== -ENOENT
) {
3848 } else if (ret
< 0) {
3851 primary_failed
= true;
3854 if (bytenr
+ BTRFS_SUPER_INFO_SIZE
>=
3855 device
->commit_total_bytes
)
3858 page
= find_get_page(device
->bdev
->bd_inode
->i_mapping
,
3859 bytenr
>> PAGE_SHIFT
);
3863 primary_failed
= true;
3866 /* Page is submitted locked and unlocked once the IO completes */
3867 wait_on_page_locked(page
);
3868 if (PageError(page
)) {
3871 primary_failed
= true;
3874 /* Drop our reference */
3877 /* Drop the reference from the writing run */
3881 /* log error, force error return */
3882 if (primary_failed
) {
3883 btrfs_err(device
->fs_info
, "error writing primary super block to device %llu",
3888 return errors
< i
? 0 : -1;
3892 * endio for the write_dev_flush, this will wake anyone waiting
3893 * for the barrier when it is done
3895 static void btrfs_end_empty_barrier(struct bio
*bio
)
3898 complete(bio
->bi_private
);
3902 * Submit a flush request to the device if it supports it. Error handling is
3903 * done in the waiting counterpart.
3905 static void write_dev_flush(struct btrfs_device
*device
)
3907 struct bio
*bio
= &device
->flush_bio
;
3909 device
->last_flush_error
= BLK_STS_OK
;
3911 bio_init(bio
, device
->bdev
, NULL
, 0,
3912 REQ_OP_WRITE
| REQ_SYNC
| REQ_PREFLUSH
);
3913 bio
->bi_end_io
= btrfs_end_empty_barrier
;
3914 init_completion(&device
->flush_wait
);
3915 bio
->bi_private
= &device
->flush_wait
;
3917 set_bit(BTRFS_DEV_STATE_FLUSH_SENT
, &device
->dev_state
);
3921 * If the flush bio has been submitted by write_dev_flush, wait for it.
3922 * Return true for any error, and false otherwise.
3924 static bool wait_dev_flush(struct btrfs_device
*device
)
3926 struct bio
*bio
= &device
->flush_bio
;
3928 if (!test_and_clear_bit(BTRFS_DEV_STATE_FLUSH_SENT
, &device
->dev_state
))
3931 wait_for_completion_io(&device
->flush_wait
);
3933 if (bio
->bi_status
) {
3934 device
->last_flush_error
= bio
->bi_status
;
3935 btrfs_dev_stat_inc_and_print(device
, BTRFS_DEV_STAT_FLUSH_ERRS
);
3943 * send an empty flush down to each device in parallel,
3944 * then wait for them
3946 static int barrier_all_devices(struct btrfs_fs_info
*info
)
3948 struct list_head
*head
;
3949 struct btrfs_device
*dev
;
3950 int errors_wait
= 0;
3952 lockdep_assert_held(&info
->fs_devices
->device_list_mutex
);
3953 /* send down all the barriers */
3954 head
= &info
->fs_devices
->devices
;
3955 list_for_each_entry(dev
, head
, dev_list
) {
3956 if (test_bit(BTRFS_DEV_STATE_MISSING
, &dev
->dev_state
))
3960 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA
, &dev
->dev_state
) ||
3961 !test_bit(BTRFS_DEV_STATE_WRITEABLE
, &dev
->dev_state
))
3964 write_dev_flush(dev
);
3967 /* wait for all the barriers */
3968 list_for_each_entry(dev
, head
, dev_list
) {
3969 if (test_bit(BTRFS_DEV_STATE_MISSING
, &dev
->dev_state
))
3975 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA
, &dev
->dev_state
) ||
3976 !test_bit(BTRFS_DEV_STATE_WRITEABLE
, &dev
->dev_state
))
3979 if (wait_dev_flush(dev
))
3984 * Checks last_flush_error of disks in order to determine the device
3987 if (errors_wait
&& !btrfs_check_rw_degradable(info
, NULL
))
3993 int btrfs_get_num_tolerated_disk_barrier_failures(u64 flags
)
3996 int min_tolerated
= INT_MAX
;
3998 if ((flags
& BTRFS_BLOCK_GROUP_PROFILE_MASK
) == 0 ||
3999 (flags
& BTRFS_AVAIL_ALLOC_BIT_SINGLE
))
4000 min_tolerated
= min_t(int, min_tolerated
,
4001 btrfs_raid_array
[BTRFS_RAID_SINGLE
].
4002 tolerated_failures
);
4004 for (raid_type
= 0; raid_type
< BTRFS_NR_RAID_TYPES
; raid_type
++) {
4005 if (raid_type
== BTRFS_RAID_SINGLE
)
4007 if (!(flags
& btrfs_raid_array
[raid_type
].bg_flag
))
4009 min_tolerated
= min_t(int, min_tolerated
,
4010 btrfs_raid_array
[raid_type
].
4011 tolerated_failures
);
4014 if (min_tolerated
== INT_MAX
) {
4015 pr_warn("BTRFS: unknown raid flag: %llu", flags
);
4019 return min_tolerated
;
4022 int write_all_supers(struct btrfs_fs_info
*fs_info
, int max_mirrors
)
4024 struct list_head
*head
;
4025 struct btrfs_device
*dev
;
4026 struct btrfs_super_block
*sb
;
4027 struct btrfs_dev_item
*dev_item
;
4031 int total_errors
= 0;
4034 do_barriers
= !btrfs_test_opt(fs_info
, NOBARRIER
);
4037 * max_mirrors == 0 indicates we're from commit_transaction,
4038 * not from fsync where the tree roots in fs_info have not
4039 * been consistent on disk.
4041 if (max_mirrors
== 0)
4042 backup_super_roots(fs_info
);
4044 sb
= fs_info
->super_for_commit
;
4045 dev_item
= &sb
->dev_item
;
4047 mutex_lock(&fs_info
->fs_devices
->device_list_mutex
);
4048 head
= &fs_info
->fs_devices
->devices
;
4049 max_errors
= btrfs_super_num_devices(fs_info
->super_copy
) - 1;
4052 ret
= barrier_all_devices(fs_info
);
4055 &fs_info
->fs_devices
->device_list_mutex
);
4056 btrfs_handle_fs_error(fs_info
, ret
,
4057 "errors while submitting device barriers.");
4062 list_for_each_entry(dev
, head
, dev_list
) {
4067 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA
, &dev
->dev_state
) ||
4068 !test_bit(BTRFS_DEV_STATE_WRITEABLE
, &dev
->dev_state
))
4071 btrfs_set_stack_device_generation(dev_item
, 0);
4072 btrfs_set_stack_device_type(dev_item
, dev
->type
);
4073 btrfs_set_stack_device_id(dev_item
, dev
->devid
);
4074 btrfs_set_stack_device_total_bytes(dev_item
,
4075 dev
->commit_total_bytes
);
4076 btrfs_set_stack_device_bytes_used(dev_item
,
4077 dev
->commit_bytes_used
);
4078 btrfs_set_stack_device_io_align(dev_item
, dev
->io_align
);
4079 btrfs_set_stack_device_io_width(dev_item
, dev
->io_width
);
4080 btrfs_set_stack_device_sector_size(dev_item
, dev
->sector_size
);
4081 memcpy(dev_item
->uuid
, dev
->uuid
, BTRFS_UUID_SIZE
);
4082 memcpy(dev_item
->fsid
, dev
->fs_devices
->metadata_uuid
,
4085 flags
= btrfs_super_flags(sb
);
4086 btrfs_set_super_flags(sb
, flags
| BTRFS_HEADER_FLAG_WRITTEN
);
4088 ret
= btrfs_validate_write_super(fs_info
, sb
);
4090 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
4091 btrfs_handle_fs_error(fs_info
, -EUCLEAN
,
4092 "unexpected superblock corruption detected");
4096 ret
= write_dev_supers(dev
, sb
, max_mirrors
);
4100 if (total_errors
> max_errors
) {
4101 btrfs_err(fs_info
, "%d errors while writing supers",
4103 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
4105 /* FUA is masked off if unsupported and can't be the reason */
4106 btrfs_handle_fs_error(fs_info
, -EIO
,
4107 "%d errors while writing supers",
4113 list_for_each_entry(dev
, head
, dev_list
) {
4116 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA
, &dev
->dev_state
) ||
4117 !test_bit(BTRFS_DEV_STATE_WRITEABLE
, &dev
->dev_state
))
4120 ret
= wait_dev_supers(dev
, max_mirrors
);
4124 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
4125 if (total_errors
> max_errors
) {
4126 btrfs_handle_fs_error(fs_info
, -EIO
,
4127 "%d errors while writing supers",
4134 /* Drop a fs root from the radix tree and free it. */
4135 void btrfs_drop_and_free_fs_root(struct btrfs_fs_info
*fs_info
,
4136 struct btrfs_root
*root
)
4138 bool drop_ref
= false;
4140 spin_lock(&fs_info
->fs_roots_radix_lock
);
4141 radix_tree_delete(&fs_info
->fs_roots_radix
,
4142 (unsigned long)root
->root_key
.objectid
);
4143 if (test_and_clear_bit(BTRFS_ROOT_IN_RADIX
, &root
->state
))
4145 spin_unlock(&fs_info
->fs_roots_radix_lock
);
4147 if (BTRFS_FS_ERROR(fs_info
)) {
4148 ASSERT(root
->log_root
== NULL
);
4149 if (root
->reloc_root
) {
4150 btrfs_put_root(root
->reloc_root
);
4151 root
->reloc_root
= NULL
;
4156 btrfs_put_root(root
);
4159 int btrfs_commit_super(struct btrfs_fs_info
*fs_info
)
4161 struct btrfs_root
*root
= fs_info
->tree_root
;
4162 struct btrfs_trans_handle
*trans
;
4164 mutex_lock(&fs_info
->cleaner_mutex
);
4165 btrfs_run_delayed_iputs(fs_info
);
4166 mutex_unlock(&fs_info
->cleaner_mutex
);
4167 wake_up_process(fs_info
->cleaner_kthread
);
4169 /* wait until ongoing cleanup work done */
4170 down_write(&fs_info
->cleanup_work_sem
);
4171 up_write(&fs_info
->cleanup_work_sem
);
4173 trans
= btrfs_join_transaction(root
);
4175 return PTR_ERR(trans
);
4176 return btrfs_commit_transaction(trans
);
4179 static void warn_about_uncommitted_trans(struct btrfs_fs_info
*fs_info
)
4181 struct btrfs_transaction
*trans
;
4182 struct btrfs_transaction
*tmp
;
4185 if (list_empty(&fs_info
->trans_list
))
4189 * This function is only called at the very end of close_ctree(),
4190 * thus no other running transaction, no need to take trans_lock.
4192 ASSERT(test_bit(BTRFS_FS_CLOSING_DONE
, &fs_info
->flags
));
4193 list_for_each_entry_safe(trans
, tmp
, &fs_info
->trans_list
, list
) {
4194 struct extent_state
*cached
= NULL
;
4195 u64 dirty_bytes
= 0;
4201 while (find_first_extent_bit(&trans
->dirty_pages
, cur
,
4202 &found_start
, &found_end
, EXTENT_DIRTY
, &cached
)) {
4203 dirty_bytes
+= found_end
+ 1 - found_start
;
4204 cur
= found_end
+ 1;
4207 "transaction %llu (with %llu dirty metadata bytes) is not committed",
4208 trans
->transid
, dirty_bytes
);
4209 btrfs_cleanup_one_transaction(trans
, fs_info
);
4211 if (trans
== fs_info
->running_transaction
)
4212 fs_info
->running_transaction
= NULL
;
4213 list_del_init(&trans
->list
);
4215 btrfs_put_transaction(trans
);
4216 trace_btrfs_transaction_commit(fs_info
);
4221 void __cold
close_ctree(struct btrfs_fs_info
*fs_info
)
4225 set_bit(BTRFS_FS_CLOSING_START
, &fs_info
->flags
);
4228 * If we had UNFINISHED_DROPS we could still be processing them, so
4229 * clear that bit and wake up relocation so it can stop.
4230 * We must do this before stopping the block group reclaim task, because
4231 * at btrfs_relocate_block_group() we wait for this bit, and after the
4232 * wait we stop with -EINTR if btrfs_fs_closing() returns non-zero - we
4233 * have just set BTRFS_FS_CLOSING_START, so btrfs_fs_closing() will
4236 btrfs_wake_unfinished_drop(fs_info
);
4239 * We may have the reclaim task running and relocating a data block group,
4240 * in which case it may create delayed iputs. So stop it before we park
4241 * the cleaner kthread otherwise we can get new delayed iputs after
4242 * parking the cleaner, and that can make the async reclaim task to hang
4243 * if it's waiting for delayed iputs to complete, since the cleaner is
4244 * parked and can not run delayed iputs - this will make us hang when
4245 * trying to stop the async reclaim task.
4247 cancel_work_sync(&fs_info
->reclaim_bgs_work
);
4249 * We don't want the cleaner to start new transactions, add more delayed
4250 * iputs, etc. while we're closing. We can't use kthread_stop() yet
4251 * because that frees the task_struct, and the transaction kthread might
4252 * still try to wake up the cleaner.
4254 kthread_park(fs_info
->cleaner_kthread
);
4256 /* wait for the qgroup rescan worker to stop */
4257 btrfs_qgroup_wait_for_completion(fs_info
, false);
4259 /* wait for the uuid_scan task to finish */
4260 down(&fs_info
->uuid_tree_rescan_sem
);
4261 /* avoid complains from lockdep et al., set sem back to initial state */
4262 up(&fs_info
->uuid_tree_rescan_sem
);
4264 /* pause restriper - we want to resume on mount */
4265 btrfs_pause_balance(fs_info
);
4267 btrfs_dev_replace_suspend_for_unmount(fs_info
);
4269 btrfs_scrub_cancel(fs_info
);
4271 /* wait for any defraggers to finish */
4272 wait_event(fs_info
->transaction_wait
,
4273 (atomic_read(&fs_info
->defrag_running
) == 0));
4275 /* clear out the rbtree of defraggable inodes */
4276 btrfs_cleanup_defrag_inodes(fs_info
);
4279 * After we parked the cleaner kthread, ordered extents may have
4280 * completed and created new delayed iputs. If one of the async reclaim
4281 * tasks is running and in the RUN_DELAYED_IPUTS flush state, then we
4282 * can hang forever trying to stop it, because if a delayed iput is
4283 * added after it ran btrfs_run_delayed_iputs() and before it called
4284 * btrfs_wait_on_delayed_iputs(), it will hang forever since there is
4285 * no one else to run iputs.
4287 * So wait for all ongoing ordered extents to complete and then run
4288 * delayed iputs. This works because once we reach this point no one
4289 * can either create new ordered extents nor create delayed iputs
4290 * through some other means.
4292 * Also note that btrfs_wait_ordered_roots() is not safe here, because
4293 * it waits for BTRFS_ORDERED_COMPLETE to be set on an ordered extent,
4294 * but the delayed iput for the respective inode is made only when doing
4295 * the final btrfs_put_ordered_extent() (which must happen at
4296 * btrfs_finish_ordered_io() when we are unmounting).
4298 btrfs_flush_workqueue(fs_info
->endio_write_workers
);
4299 /* Ordered extents for free space inodes. */
4300 btrfs_flush_workqueue(fs_info
->endio_freespace_worker
);
4301 btrfs_run_delayed_iputs(fs_info
);
4303 cancel_work_sync(&fs_info
->async_reclaim_work
);
4304 cancel_work_sync(&fs_info
->async_data_reclaim_work
);
4305 cancel_work_sync(&fs_info
->preempt_reclaim_work
);
4307 /* Cancel or finish ongoing discard work */
4308 btrfs_discard_cleanup(fs_info
);
4310 if (!sb_rdonly(fs_info
->sb
)) {
4312 * The cleaner kthread is stopped, so do one final pass over
4313 * unused block groups.
4315 btrfs_delete_unused_bgs(fs_info
);
4318 * There might be existing delayed inode workers still running
4319 * and holding an empty delayed inode item. We must wait for
4320 * them to complete first because they can create a transaction.
4321 * This happens when someone calls btrfs_balance_delayed_items()
4322 * and then a transaction commit runs the same delayed nodes
4323 * before any delayed worker has done something with the nodes.
4324 * We must wait for any worker here and not at transaction
4325 * commit time since that could cause a deadlock.
4326 * This is a very rare case.
4328 btrfs_flush_workqueue(fs_info
->delayed_workers
);
4330 ret
= btrfs_commit_super(fs_info
);
4332 btrfs_err(fs_info
, "commit super ret %d", ret
);
4335 if (BTRFS_FS_ERROR(fs_info
))
4336 btrfs_error_commit_super(fs_info
);
4338 kthread_stop(fs_info
->transaction_kthread
);
4339 kthread_stop(fs_info
->cleaner_kthread
);
4341 ASSERT(list_empty(&fs_info
->delayed_iputs
));
4342 set_bit(BTRFS_FS_CLOSING_DONE
, &fs_info
->flags
);
4344 if (btrfs_check_quota_leak(fs_info
)) {
4345 WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG
));
4346 btrfs_err(fs_info
, "qgroup reserved space leaked");
4349 btrfs_free_qgroup_config(fs_info
);
4350 ASSERT(list_empty(&fs_info
->delalloc_roots
));
4352 if (percpu_counter_sum(&fs_info
->delalloc_bytes
)) {
4353 btrfs_info(fs_info
, "at unmount delalloc count %lld",
4354 percpu_counter_sum(&fs_info
->delalloc_bytes
));
4357 if (percpu_counter_sum(&fs_info
->ordered_bytes
))
4358 btrfs_info(fs_info
, "at unmount dio bytes count %lld",
4359 percpu_counter_sum(&fs_info
->ordered_bytes
));
4361 btrfs_sysfs_remove_mounted(fs_info
);
4362 btrfs_sysfs_remove_fsid(fs_info
->fs_devices
);
4364 btrfs_put_block_group_cache(fs_info
);
4367 * we must make sure there is not any read request to
4368 * submit after we stopping all workers.
4370 invalidate_inode_pages2(fs_info
->btree_inode
->i_mapping
);
4371 btrfs_stop_all_workers(fs_info
);
4373 /* We shouldn't have any transaction open at this point */
4374 warn_about_uncommitted_trans(fs_info
);
4376 clear_bit(BTRFS_FS_OPEN
, &fs_info
->flags
);
4377 free_root_pointers(fs_info
, true);
4378 btrfs_free_fs_roots(fs_info
);
4381 * We must free the block groups after dropping the fs_roots as we could
4382 * have had an IO error and have left over tree log blocks that aren't
4383 * cleaned up until the fs roots are freed. This makes the block group
4384 * accounting appear to be wrong because there's pending reserved bytes,
4385 * so make sure we do the block group cleanup afterwards.
4387 btrfs_free_block_groups(fs_info
);
4389 iput(fs_info
->btree_inode
);
4391 btrfs_mapping_tree_free(&fs_info
->mapping_tree
);
4392 btrfs_close_devices(fs_info
->fs_devices
);
4395 void btrfs_mark_buffer_dirty(struct btrfs_trans_handle
*trans
,
4396 struct extent_buffer
*buf
)
4398 struct btrfs_fs_info
*fs_info
= buf
->fs_info
;
4399 u64 transid
= btrfs_header_generation(buf
);
4401 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
4403 * This is a fast path so only do this check if we have sanity tests
4404 * enabled. Normal people shouldn't be using unmapped buffers as dirty
4405 * outside of the sanity tests.
4407 if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED
, &buf
->bflags
)))
4410 /* This is an active transaction (its state < TRANS_STATE_UNBLOCKED). */
4411 ASSERT(trans
->transid
== fs_info
->generation
);
4412 btrfs_assert_tree_write_locked(buf
);
4413 if (unlikely(transid
!= fs_info
->generation
)) {
4414 btrfs_abort_transaction(trans
, -EUCLEAN
);
4416 "dirty buffer transid mismatch, logical %llu found transid %llu running transid %llu",
4417 buf
->start
, transid
, fs_info
->generation
);
4419 set_extent_buffer_dirty(buf
);
4422 static void __btrfs_btree_balance_dirty(struct btrfs_fs_info
*fs_info
,
4426 * looks as though older kernels can get into trouble with
4427 * this code, they end up stuck in balance_dirty_pages forever
4431 if (current
->flags
& PF_MEMALLOC
)
4435 btrfs_balance_delayed_items(fs_info
);
4437 ret
= __percpu_counter_compare(&fs_info
->dirty_metadata_bytes
,
4438 BTRFS_DIRTY_METADATA_THRESH
,
4439 fs_info
->dirty_metadata_batch
);
4441 balance_dirty_pages_ratelimited(fs_info
->btree_inode
->i_mapping
);
4445 void btrfs_btree_balance_dirty(struct btrfs_fs_info
*fs_info
)
4447 __btrfs_btree_balance_dirty(fs_info
, 1);
4450 void btrfs_btree_balance_dirty_nodelay(struct btrfs_fs_info
*fs_info
)
4452 __btrfs_btree_balance_dirty(fs_info
, 0);
4455 static void btrfs_error_commit_super(struct btrfs_fs_info
*fs_info
)
4457 /* cleanup FS via transaction */
4458 btrfs_cleanup_transaction(fs_info
);
4460 mutex_lock(&fs_info
->cleaner_mutex
);
4461 btrfs_run_delayed_iputs(fs_info
);
4462 mutex_unlock(&fs_info
->cleaner_mutex
);
4464 down_write(&fs_info
->cleanup_work_sem
);
4465 up_write(&fs_info
->cleanup_work_sem
);
4468 static void btrfs_drop_all_logs(struct btrfs_fs_info
*fs_info
)
4470 struct btrfs_root
*gang
[8];
4471 u64 root_objectid
= 0;
4474 spin_lock(&fs_info
->fs_roots_radix_lock
);
4475 while ((ret
= radix_tree_gang_lookup(&fs_info
->fs_roots_radix
,
4476 (void **)gang
, root_objectid
,
4477 ARRAY_SIZE(gang
))) != 0) {
4480 for (i
= 0; i
< ret
; i
++)
4481 gang
[i
] = btrfs_grab_root(gang
[i
]);
4482 spin_unlock(&fs_info
->fs_roots_radix_lock
);
4484 for (i
= 0; i
< ret
; i
++) {
4487 root_objectid
= gang
[i
]->root_key
.objectid
;
4488 btrfs_free_log(NULL
, gang
[i
]);
4489 btrfs_put_root(gang
[i
]);
4492 spin_lock(&fs_info
->fs_roots_radix_lock
);
4494 spin_unlock(&fs_info
->fs_roots_radix_lock
);
4495 btrfs_free_log_root_tree(NULL
, fs_info
);
4498 static void btrfs_destroy_ordered_extents(struct btrfs_root
*root
)
4500 struct btrfs_ordered_extent
*ordered
;
4502 spin_lock(&root
->ordered_extent_lock
);
4504 * This will just short circuit the ordered completion stuff which will
4505 * make sure the ordered extent gets properly cleaned up.
4507 list_for_each_entry(ordered
, &root
->ordered_extents
,
4509 set_bit(BTRFS_ORDERED_IOERR
, &ordered
->flags
);
4510 spin_unlock(&root
->ordered_extent_lock
);
4513 static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info
*fs_info
)
4515 struct btrfs_root
*root
;
4518 spin_lock(&fs_info
->ordered_root_lock
);
4519 list_splice_init(&fs_info
->ordered_roots
, &splice
);
4520 while (!list_empty(&splice
)) {
4521 root
= list_first_entry(&splice
, struct btrfs_root
,
4523 list_move_tail(&root
->ordered_root
,
4524 &fs_info
->ordered_roots
);
4526 spin_unlock(&fs_info
->ordered_root_lock
);
4527 btrfs_destroy_ordered_extents(root
);
4530 spin_lock(&fs_info
->ordered_root_lock
);
4532 spin_unlock(&fs_info
->ordered_root_lock
);
4535 * We need this here because if we've been flipped read-only we won't
4536 * get sync() from the umount, so we need to make sure any ordered
4537 * extents that haven't had their dirty pages IO start writeout yet
4538 * actually get run and error out properly.
4540 btrfs_wait_ordered_roots(fs_info
, U64_MAX
, 0, (u64
)-1);
4543 static void btrfs_destroy_delayed_refs(struct btrfs_transaction
*trans
,
4544 struct btrfs_fs_info
*fs_info
)
4546 struct rb_node
*node
;
4547 struct btrfs_delayed_ref_root
*delayed_refs
;
4548 struct btrfs_delayed_ref_node
*ref
;
4550 delayed_refs
= &trans
->delayed_refs
;
4552 spin_lock(&delayed_refs
->lock
);
4553 if (atomic_read(&delayed_refs
->num_entries
) == 0) {
4554 spin_unlock(&delayed_refs
->lock
);
4555 btrfs_debug(fs_info
, "delayed_refs has NO entry");
4559 while ((node
= rb_first_cached(&delayed_refs
->href_root
)) != NULL
) {
4560 struct btrfs_delayed_ref_head
*head
;
4562 bool pin_bytes
= false;
4564 head
= rb_entry(node
, struct btrfs_delayed_ref_head
,
4566 if (btrfs_delayed_ref_lock(delayed_refs
, head
))
4569 spin_lock(&head
->lock
);
4570 while ((n
= rb_first_cached(&head
->ref_tree
)) != NULL
) {
4571 ref
= rb_entry(n
, struct btrfs_delayed_ref_node
,
4573 rb_erase_cached(&ref
->ref_node
, &head
->ref_tree
);
4574 RB_CLEAR_NODE(&ref
->ref_node
);
4575 if (!list_empty(&ref
->add_list
))
4576 list_del(&ref
->add_list
);
4577 atomic_dec(&delayed_refs
->num_entries
);
4578 btrfs_put_delayed_ref(ref
);
4579 btrfs_delayed_refs_rsv_release(fs_info
, 1, 0);
4581 if (head
->must_insert_reserved
)
4583 btrfs_free_delayed_extent_op(head
->extent_op
);
4584 btrfs_delete_ref_head(delayed_refs
, head
);
4585 spin_unlock(&head
->lock
);
4586 spin_unlock(&delayed_refs
->lock
);
4587 mutex_unlock(&head
->mutex
);
4590 struct btrfs_block_group
*cache
;
4592 cache
= btrfs_lookup_block_group(fs_info
, head
->bytenr
);
4595 spin_lock(&cache
->space_info
->lock
);
4596 spin_lock(&cache
->lock
);
4597 cache
->pinned
+= head
->num_bytes
;
4598 btrfs_space_info_update_bytes_pinned(fs_info
,
4599 cache
->space_info
, head
->num_bytes
);
4600 cache
->reserved
-= head
->num_bytes
;
4601 cache
->space_info
->bytes_reserved
-= head
->num_bytes
;
4602 spin_unlock(&cache
->lock
);
4603 spin_unlock(&cache
->space_info
->lock
);
4605 btrfs_put_block_group(cache
);
4607 btrfs_error_unpin_extent_range(fs_info
, head
->bytenr
,
4608 head
->bytenr
+ head
->num_bytes
- 1);
4610 btrfs_cleanup_ref_head_accounting(fs_info
, delayed_refs
, head
);
4611 btrfs_put_delayed_ref_head(head
);
4613 spin_lock(&delayed_refs
->lock
);
4615 btrfs_qgroup_destroy_extent_records(trans
);
4617 spin_unlock(&delayed_refs
->lock
);
4620 static void btrfs_destroy_delalloc_inodes(struct btrfs_root
*root
)
4622 struct btrfs_inode
*btrfs_inode
;
4625 spin_lock(&root
->delalloc_lock
);
4626 list_splice_init(&root
->delalloc_inodes
, &splice
);
4628 while (!list_empty(&splice
)) {
4629 struct inode
*inode
= NULL
;
4630 btrfs_inode
= list_first_entry(&splice
, struct btrfs_inode
,
4632 __btrfs_del_delalloc_inode(root
, btrfs_inode
);
4633 spin_unlock(&root
->delalloc_lock
);
4636 * Make sure we get a live inode and that it'll not disappear
4639 inode
= igrab(&btrfs_inode
->vfs_inode
);
4641 unsigned int nofs_flag
;
4643 nofs_flag
= memalloc_nofs_save();
4644 invalidate_inode_pages2(inode
->i_mapping
);
4645 memalloc_nofs_restore(nofs_flag
);
4648 spin_lock(&root
->delalloc_lock
);
4650 spin_unlock(&root
->delalloc_lock
);
4653 static void btrfs_destroy_all_delalloc_inodes(struct btrfs_fs_info
*fs_info
)
4655 struct btrfs_root
*root
;
4658 spin_lock(&fs_info
->delalloc_root_lock
);
4659 list_splice_init(&fs_info
->delalloc_roots
, &splice
);
4660 while (!list_empty(&splice
)) {
4661 root
= list_first_entry(&splice
, struct btrfs_root
,
4663 root
= btrfs_grab_root(root
);
4665 spin_unlock(&fs_info
->delalloc_root_lock
);
4667 btrfs_destroy_delalloc_inodes(root
);
4668 btrfs_put_root(root
);
4670 spin_lock(&fs_info
->delalloc_root_lock
);
4672 spin_unlock(&fs_info
->delalloc_root_lock
);
4675 static void btrfs_destroy_marked_extents(struct btrfs_fs_info
*fs_info
,
4676 struct extent_io_tree
*dirty_pages
,
4679 struct extent_buffer
*eb
;
4683 while (find_first_extent_bit(dirty_pages
, start
, &start
, &end
,
4685 clear_extent_bits(dirty_pages
, start
, end
, mark
);
4686 while (start
<= end
) {
4687 eb
= find_extent_buffer(fs_info
, start
);
4688 start
+= fs_info
->nodesize
;
4692 btrfs_tree_lock(eb
);
4693 wait_on_extent_buffer_writeback(eb
);
4694 btrfs_clear_buffer_dirty(NULL
, eb
);
4695 btrfs_tree_unlock(eb
);
4697 free_extent_buffer_stale(eb
);
4702 static void btrfs_destroy_pinned_extent(struct btrfs_fs_info
*fs_info
,
4703 struct extent_io_tree
*unpin
)
4709 struct extent_state
*cached_state
= NULL
;
4712 * The btrfs_finish_extent_commit() may get the same range as
4713 * ours between find_first_extent_bit and clear_extent_dirty.
4714 * Hence, hold the unused_bg_unpin_mutex to avoid double unpin
4715 * the same extent range.
4717 mutex_lock(&fs_info
->unused_bg_unpin_mutex
);
4718 if (!find_first_extent_bit(unpin
, 0, &start
, &end
,
4719 EXTENT_DIRTY
, &cached_state
)) {
4720 mutex_unlock(&fs_info
->unused_bg_unpin_mutex
);
4724 clear_extent_dirty(unpin
, start
, end
, &cached_state
);
4725 free_extent_state(cached_state
);
4726 btrfs_error_unpin_extent_range(fs_info
, start
, end
);
4727 mutex_unlock(&fs_info
->unused_bg_unpin_mutex
);
4732 static void btrfs_cleanup_bg_io(struct btrfs_block_group
*cache
)
4734 struct inode
*inode
;
4736 inode
= cache
->io_ctl
.inode
;
4738 unsigned int nofs_flag
;
4740 nofs_flag
= memalloc_nofs_save();
4741 invalidate_inode_pages2(inode
->i_mapping
);
4742 memalloc_nofs_restore(nofs_flag
);
4744 BTRFS_I(inode
)->generation
= 0;
4745 cache
->io_ctl
.inode
= NULL
;
4748 ASSERT(cache
->io_ctl
.pages
== NULL
);
4749 btrfs_put_block_group(cache
);
4752 void btrfs_cleanup_dirty_bgs(struct btrfs_transaction
*cur_trans
,
4753 struct btrfs_fs_info
*fs_info
)
4755 struct btrfs_block_group
*cache
;
4757 spin_lock(&cur_trans
->dirty_bgs_lock
);
4758 while (!list_empty(&cur_trans
->dirty_bgs
)) {
4759 cache
= list_first_entry(&cur_trans
->dirty_bgs
,
4760 struct btrfs_block_group
,
4763 if (!list_empty(&cache
->io_list
)) {
4764 spin_unlock(&cur_trans
->dirty_bgs_lock
);
4765 list_del_init(&cache
->io_list
);
4766 btrfs_cleanup_bg_io(cache
);
4767 spin_lock(&cur_trans
->dirty_bgs_lock
);
4770 list_del_init(&cache
->dirty_list
);
4771 spin_lock(&cache
->lock
);
4772 cache
->disk_cache_state
= BTRFS_DC_ERROR
;
4773 spin_unlock(&cache
->lock
);
4775 spin_unlock(&cur_trans
->dirty_bgs_lock
);
4776 btrfs_put_block_group(cache
);
4777 btrfs_delayed_refs_rsv_release(fs_info
, 1, 0);
4778 spin_lock(&cur_trans
->dirty_bgs_lock
);
4780 spin_unlock(&cur_trans
->dirty_bgs_lock
);
4783 * Refer to the definition of io_bgs member for details why it's safe
4784 * to use it without any locking
4786 while (!list_empty(&cur_trans
->io_bgs
)) {
4787 cache
= list_first_entry(&cur_trans
->io_bgs
,
4788 struct btrfs_block_group
,
4791 list_del_init(&cache
->io_list
);
4792 spin_lock(&cache
->lock
);
4793 cache
->disk_cache_state
= BTRFS_DC_ERROR
;
4794 spin_unlock(&cache
->lock
);
4795 btrfs_cleanup_bg_io(cache
);
4799 void btrfs_cleanup_one_transaction(struct btrfs_transaction
*cur_trans
,
4800 struct btrfs_fs_info
*fs_info
)
4802 struct btrfs_device
*dev
, *tmp
;
4804 btrfs_cleanup_dirty_bgs(cur_trans
, fs_info
);
4805 ASSERT(list_empty(&cur_trans
->dirty_bgs
));
4806 ASSERT(list_empty(&cur_trans
->io_bgs
));
4808 list_for_each_entry_safe(dev
, tmp
, &cur_trans
->dev_update_list
,
4810 list_del_init(&dev
->post_commit_list
);
4813 btrfs_destroy_delayed_refs(cur_trans
, fs_info
);
4815 cur_trans
->state
= TRANS_STATE_COMMIT_START
;
4816 wake_up(&fs_info
->transaction_blocked_wait
);
4818 cur_trans
->state
= TRANS_STATE_UNBLOCKED
;
4819 wake_up(&fs_info
->transaction_wait
);
4821 btrfs_destroy_delayed_inodes(fs_info
);
4823 btrfs_destroy_marked_extents(fs_info
, &cur_trans
->dirty_pages
,
4825 btrfs_destroy_pinned_extent(fs_info
, &cur_trans
->pinned_extents
);
4827 cur_trans
->state
=TRANS_STATE_COMPLETED
;
4828 wake_up(&cur_trans
->commit_wait
);
4831 static int btrfs_cleanup_transaction(struct btrfs_fs_info
*fs_info
)
4833 struct btrfs_transaction
*t
;
4835 mutex_lock(&fs_info
->transaction_kthread_mutex
);
4837 spin_lock(&fs_info
->trans_lock
);
4838 while (!list_empty(&fs_info
->trans_list
)) {
4839 t
= list_first_entry(&fs_info
->trans_list
,
4840 struct btrfs_transaction
, list
);
4841 if (t
->state
>= TRANS_STATE_COMMIT_PREP
) {
4842 refcount_inc(&t
->use_count
);
4843 spin_unlock(&fs_info
->trans_lock
);
4844 btrfs_wait_for_commit(fs_info
, t
->transid
);
4845 btrfs_put_transaction(t
);
4846 spin_lock(&fs_info
->trans_lock
);
4849 if (t
== fs_info
->running_transaction
) {
4850 t
->state
= TRANS_STATE_COMMIT_DOING
;
4851 spin_unlock(&fs_info
->trans_lock
);
4853 * We wait for 0 num_writers since we don't hold a trans
4854 * handle open currently for this transaction.
4856 wait_event(t
->writer_wait
,
4857 atomic_read(&t
->num_writers
) == 0);
4859 spin_unlock(&fs_info
->trans_lock
);
4861 btrfs_cleanup_one_transaction(t
, fs_info
);
4863 spin_lock(&fs_info
->trans_lock
);
4864 if (t
== fs_info
->running_transaction
)
4865 fs_info
->running_transaction
= NULL
;
4866 list_del_init(&t
->list
);
4867 spin_unlock(&fs_info
->trans_lock
);
4869 btrfs_put_transaction(t
);
4870 trace_btrfs_transaction_commit(fs_info
);
4871 spin_lock(&fs_info
->trans_lock
);
4873 spin_unlock(&fs_info
->trans_lock
);
4874 btrfs_destroy_all_ordered_extents(fs_info
);
4875 btrfs_destroy_delayed_inodes(fs_info
);
4876 btrfs_assert_delayed_root_empty(fs_info
);
4877 btrfs_destroy_all_delalloc_inodes(fs_info
);
4878 btrfs_drop_all_logs(fs_info
);
4879 mutex_unlock(&fs_info
->transaction_kthread_mutex
);
4884 int btrfs_init_root_free_objectid(struct btrfs_root
*root
)
4886 struct btrfs_path
*path
;
4888 struct extent_buffer
*l
;
4889 struct btrfs_key search_key
;
4890 struct btrfs_key found_key
;
4893 path
= btrfs_alloc_path();
4897 search_key
.objectid
= BTRFS_LAST_FREE_OBJECTID
;
4898 search_key
.type
= -1;
4899 search_key
.offset
= (u64
)-1;
4900 ret
= btrfs_search_slot(NULL
, root
, &search_key
, path
, 0, 0);
4903 BUG_ON(ret
== 0); /* Corruption */
4904 if (path
->slots
[0] > 0) {
4905 slot
= path
->slots
[0] - 1;
4907 btrfs_item_key_to_cpu(l
, &found_key
, slot
);
4908 root
->free_objectid
= max_t(u64
, found_key
.objectid
+ 1,
4909 BTRFS_FIRST_FREE_OBJECTID
);
4911 root
->free_objectid
= BTRFS_FIRST_FREE_OBJECTID
;
4915 btrfs_free_path(path
);
4919 int btrfs_get_free_objectid(struct btrfs_root
*root
, u64
*objectid
)
4922 mutex_lock(&root
->objectid_mutex
);
4924 if (unlikely(root
->free_objectid
>= BTRFS_LAST_FREE_OBJECTID
)) {
4925 btrfs_warn(root
->fs_info
,
4926 "the objectid of root %llu reaches its highest value",
4927 root
->root_key
.objectid
);
4932 *objectid
= root
->free_objectid
++;
4935 mutex_unlock(&root
->objectid_mutex
);