1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2014 Facebook. All rights reserved.
6 #include <linux/sched.h>
7 #include <linux/stacktrace.h>
12 #include "delayed-ref.h"
13 #include "ref-verify.h"
15 #include "accessors.h"
18 * Used to keep track the roots and number of refs each root has for a given
19 * bytenr. This just tracks the number of direct references, no shared
29 * These are meant to represent what should exist in the extent tree, these can
30 * be used to verify the extent tree is consistent as these should all match
31 * what the extent tree says.
45 * Whenever we add/remove a reference we record the action. The action maps
46 * back to the delayed ref action. We hold the ref we are changing in the
47 * action so we can account for the history properly, and we record the root we
48 * were called with since it could be different from ref_root. We also store
49 * stack traces because that's how I roll.
55 struct list_head list
;
56 unsigned long trace
[MAX_TRACE
];
57 unsigned int trace_len
;
61 * One of these for every block we reference, it holds the roots and references
62 * to it as well as all of the ref actions that have occurred to it. We never
63 * free it until we unmount the file system in order to make sure re-allocations
64 * are happening properly.
75 struct list_head actions
;
78 static struct block_entry
*insert_block_entry(struct rb_root
*root
,
79 struct block_entry
*be
)
81 struct rb_node
**p
= &root
->rb_node
;
82 struct rb_node
*parent_node
= NULL
;
83 struct block_entry
*entry
;
87 entry
= rb_entry(parent_node
, struct block_entry
, node
);
88 if (entry
->bytenr
> be
->bytenr
)
90 else if (entry
->bytenr
< be
->bytenr
)
96 rb_link_node(&be
->node
, parent_node
, p
);
97 rb_insert_color(&be
->node
, root
);
101 static struct block_entry
*lookup_block_entry(struct rb_root
*root
, u64 bytenr
)
104 struct block_entry
*entry
= NULL
;
108 entry
= rb_entry(n
, struct block_entry
, node
);
109 if (entry
->bytenr
< bytenr
)
111 else if (entry
->bytenr
> bytenr
)
119 static struct root_entry
*insert_root_entry(struct rb_root
*root
,
120 struct root_entry
*re
)
122 struct rb_node
**p
= &root
->rb_node
;
123 struct rb_node
*parent_node
= NULL
;
124 struct root_entry
*entry
;
128 entry
= rb_entry(parent_node
, struct root_entry
, node
);
129 if (entry
->root_objectid
> re
->root_objectid
)
131 else if (entry
->root_objectid
< re
->root_objectid
)
137 rb_link_node(&re
->node
, parent_node
, p
);
138 rb_insert_color(&re
->node
, root
);
143 static int comp_refs(struct ref_entry
*ref1
, struct ref_entry
*ref2
)
145 if (ref1
->root_objectid
< ref2
->root_objectid
)
147 if (ref1
->root_objectid
> ref2
->root_objectid
)
149 if (ref1
->parent
< ref2
->parent
)
151 if (ref1
->parent
> ref2
->parent
)
153 if (ref1
->owner
< ref2
->owner
)
155 if (ref1
->owner
> ref2
->owner
)
157 if (ref1
->offset
< ref2
->offset
)
159 if (ref1
->offset
> ref2
->offset
)
164 static struct ref_entry
*insert_ref_entry(struct rb_root
*root
,
165 struct ref_entry
*ref
)
167 struct rb_node
**p
= &root
->rb_node
;
168 struct rb_node
*parent_node
= NULL
;
169 struct ref_entry
*entry
;
174 entry
= rb_entry(parent_node
, struct ref_entry
, node
);
175 cmp
= comp_refs(entry
, ref
);
184 rb_link_node(&ref
->node
, parent_node
, p
);
185 rb_insert_color(&ref
->node
, root
);
190 static struct root_entry
*lookup_root_entry(struct rb_root
*root
, u64 objectid
)
193 struct root_entry
*entry
= NULL
;
197 entry
= rb_entry(n
, struct root_entry
, node
);
198 if (entry
->root_objectid
< objectid
)
200 else if (entry
->root_objectid
> objectid
)
208 #ifdef CONFIG_STACKTRACE
209 static void __save_stack_trace(struct ref_action
*ra
)
211 ra
->trace_len
= stack_trace_save(ra
->trace
, MAX_TRACE
, 2);
214 static void __print_stack_trace(struct btrfs_fs_info
*fs_info
,
215 struct ref_action
*ra
)
217 if (ra
->trace_len
== 0) {
218 btrfs_err(fs_info
, " ref-verify: no stacktrace");
221 stack_trace_print(ra
->trace
, ra
->trace_len
, 2);
224 static inline void __save_stack_trace(struct ref_action
*ra
)
228 static inline void __print_stack_trace(struct btrfs_fs_info
*fs_info
,
229 struct ref_action
*ra
)
231 btrfs_err(fs_info
, " ref-verify: no stacktrace support");
235 static void free_block_entry(struct block_entry
*be
)
237 struct root_entry
*re
;
238 struct ref_entry
*ref
;
239 struct ref_action
*ra
;
242 while ((n
= rb_first(&be
->roots
))) {
243 re
= rb_entry(n
, struct root_entry
, node
);
244 rb_erase(&re
->node
, &be
->roots
);
248 while((n
= rb_first(&be
->refs
))) {
249 ref
= rb_entry(n
, struct ref_entry
, node
);
250 rb_erase(&ref
->node
, &be
->refs
);
254 while (!list_empty(&be
->actions
)) {
255 ra
= list_first_entry(&be
->actions
, struct ref_action
,
263 static struct block_entry
*add_block_entry(struct btrfs_fs_info
*fs_info
,
267 struct block_entry
*be
= NULL
, *exist
;
268 struct root_entry
*re
= NULL
;
270 re
= kzalloc(sizeof(struct root_entry
), GFP_NOFS
);
271 be
= kzalloc(sizeof(struct block_entry
), GFP_NOFS
);
275 return ERR_PTR(-ENOMEM
);
280 re
->root_objectid
= root_objectid
;
283 spin_lock(&fs_info
->ref_verify_lock
);
284 exist
= insert_block_entry(&fs_info
->block_tree
, be
);
287 struct root_entry
*exist_re
;
289 exist_re
= insert_root_entry(&exist
->roots
, re
);
304 INIT_LIST_HEAD(&be
->actions
);
306 insert_root_entry(&be
->roots
, re
);
312 static int add_tree_block(struct btrfs_fs_info
*fs_info
, u64 ref_root
,
313 u64 parent
, u64 bytenr
, int level
)
315 struct block_entry
*be
;
316 struct root_entry
*re
;
317 struct ref_entry
*ref
= NULL
, *exist
;
319 ref
= kmalloc(sizeof(struct ref_entry
), GFP_NOFS
);
324 ref
->root_objectid
= 0;
326 ref
->root_objectid
= ref_root
;
327 ref
->parent
= parent
;
332 be
= add_block_entry(fs_info
, bytenr
, fs_info
->nodesize
, ref_root
);
343 re
= lookup_root_entry(&be
->roots
, ref_root
);
347 exist
= insert_ref_entry(&be
->refs
, ref
);
352 spin_unlock(&fs_info
->ref_verify_lock
);
357 static int add_shared_data_ref(struct btrfs_fs_info
*fs_info
,
358 u64 parent
, u32 num_refs
, u64 bytenr
,
361 struct block_entry
*be
;
362 struct ref_entry
*ref
;
364 ref
= kzalloc(sizeof(struct ref_entry
), GFP_NOFS
);
367 be
= add_block_entry(fs_info
, bytenr
, num_bytes
, 0);
372 be
->num_refs
+= num_refs
;
374 ref
->parent
= parent
;
375 ref
->num_refs
= num_refs
;
376 if (insert_ref_entry(&be
->refs
, ref
)) {
377 spin_unlock(&fs_info
->ref_verify_lock
);
378 btrfs_err(fs_info
, "existing shared ref when reading from disk?");
382 spin_unlock(&fs_info
->ref_verify_lock
);
386 static int add_extent_data_ref(struct btrfs_fs_info
*fs_info
,
387 struct extent_buffer
*leaf
,
388 struct btrfs_extent_data_ref
*dref
,
389 u64 bytenr
, u64 num_bytes
)
391 struct block_entry
*be
;
392 struct ref_entry
*ref
;
393 struct root_entry
*re
;
394 u64 ref_root
= btrfs_extent_data_ref_root(leaf
, dref
);
395 u64 owner
= btrfs_extent_data_ref_objectid(leaf
, dref
);
396 u64 offset
= btrfs_extent_data_ref_offset(leaf
, dref
);
397 u32 num_refs
= btrfs_extent_data_ref_count(leaf
, dref
);
399 ref
= kzalloc(sizeof(struct ref_entry
), GFP_NOFS
);
402 be
= add_block_entry(fs_info
, bytenr
, num_bytes
, ref_root
);
407 be
->num_refs
+= num_refs
;
411 ref
->root_objectid
= ref_root
;
412 ref
->offset
= offset
;
413 ref
->num_refs
= num_refs
;
414 if (insert_ref_entry(&be
->refs
, ref
)) {
415 spin_unlock(&fs_info
->ref_verify_lock
);
416 btrfs_err(fs_info
, "existing ref when reading from disk?");
421 re
= lookup_root_entry(&be
->roots
, ref_root
);
423 spin_unlock(&fs_info
->ref_verify_lock
);
424 btrfs_err(fs_info
, "missing root in new block entry?");
427 re
->num_refs
+= num_refs
;
428 spin_unlock(&fs_info
->ref_verify_lock
);
432 static int process_extent_item(struct btrfs_fs_info
*fs_info
,
433 struct btrfs_path
*path
, struct btrfs_key
*key
,
434 int slot
, int *tree_block_level
)
436 struct btrfs_extent_item
*ei
;
437 struct btrfs_extent_inline_ref
*iref
;
438 struct btrfs_extent_data_ref
*dref
;
439 struct btrfs_shared_data_ref
*sref
;
440 struct extent_buffer
*leaf
= path
->nodes
[0];
441 u32 item_size
= btrfs_item_size(leaf
, slot
);
442 unsigned long end
, ptr
;
443 u64 offset
, flags
, count
;
446 ei
= btrfs_item_ptr(leaf
, slot
, struct btrfs_extent_item
);
447 flags
= btrfs_extent_flags(leaf
, ei
);
449 if ((key
->type
== BTRFS_EXTENT_ITEM_KEY
) &&
450 flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
) {
451 struct btrfs_tree_block_info
*info
;
453 info
= (struct btrfs_tree_block_info
*)(ei
+ 1);
454 *tree_block_level
= btrfs_tree_block_level(leaf
, info
);
455 iref
= (struct btrfs_extent_inline_ref
*)(info
+ 1);
457 if (key
->type
== BTRFS_METADATA_ITEM_KEY
)
458 *tree_block_level
= key
->offset
;
459 iref
= (struct btrfs_extent_inline_ref
*)(ei
+ 1);
462 ptr
= (unsigned long)iref
;
463 end
= (unsigned long)ei
+ item_size
;
465 iref
= (struct btrfs_extent_inline_ref
*)ptr
;
466 type
= btrfs_extent_inline_ref_type(leaf
, iref
);
467 offset
= btrfs_extent_inline_ref_offset(leaf
, iref
);
469 case BTRFS_TREE_BLOCK_REF_KEY
:
470 ret
= add_tree_block(fs_info
, offset
, 0, key
->objectid
,
473 case BTRFS_SHARED_BLOCK_REF_KEY
:
474 ret
= add_tree_block(fs_info
, 0, offset
, key
->objectid
,
477 case BTRFS_EXTENT_DATA_REF_KEY
:
478 dref
= (struct btrfs_extent_data_ref
*)(&iref
->offset
);
479 ret
= add_extent_data_ref(fs_info
, leaf
, dref
,
480 key
->objectid
, key
->offset
);
482 case BTRFS_SHARED_DATA_REF_KEY
:
483 sref
= (struct btrfs_shared_data_ref
*)(iref
+ 1);
484 count
= btrfs_shared_data_ref_count(leaf
, sref
);
485 ret
= add_shared_data_ref(fs_info
, offset
, count
,
486 key
->objectid
, key
->offset
);
488 case BTRFS_EXTENT_OWNER_REF_KEY
:
489 WARN_ON(!btrfs_fs_incompat(fs_info
, SIMPLE_QUOTA
));
492 btrfs_err(fs_info
, "invalid key type in iref");
498 ptr
+= btrfs_extent_inline_ref_size(type
);
503 static int process_leaf(struct btrfs_root
*root
,
504 struct btrfs_path
*path
, u64
*bytenr
, u64
*num_bytes
,
505 int *tree_block_level
)
507 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
508 struct extent_buffer
*leaf
= path
->nodes
[0];
509 struct btrfs_extent_data_ref
*dref
;
510 struct btrfs_shared_data_ref
*sref
;
513 struct btrfs_key key
;
514 int nritems
= btrfs_header_nritems(leaf
);
516 for (i
= 0; i
< nritems
; i
++) {
517 btrfs_item_key_to_cpu(leaf
, &key
, i
);
519 case BTRFS_EXTENT_ITEM_KEY
:
520 *num_bytes
= key
.offset
;
522 case BTRFS_METADATA_ITEM_KEY
:
523 *bytenr
= key
.objectid
;
524 ret
= process_extent_item(fs_info
, path
, &key
, i
,
527 case BTRFS_TREE_BLOCK_REF_KEY
:
528 ret
= add_tree_block(fs_info
, key
.offset
, 0,
529 key
.objectid
, *tree_block_level
);
531 case BTRFS_SHARED_BLOCK_REF_KEY
:
532 ret
= add_tree_block(fs_info
, 0, key
.offset
,
533 key
.objectid
, *tree_block_level
);
535 case BTRFS_EXTENT_DATA_REF_KEY
:
536 dref
= btrfs_item_ptr(leaf
, i
,
537 struct btrfs_extent_data_ref
);
538 ret
= add_extent_data_ref(fs_info
, leaf
, dref
, *bytenr
,
541 case BTRFS_SHARED_DATA_REF_KEY
:
542 sref
= btrfs_item_ptr(leaf
, i
,
543 struct btrfs_shared_data_ref
);
544 count
= btrfs_shared_data_ref_count(leaf
, sref
);
545 ret
= add_shared_data_ref(fs_info
, key
.offset
, count
,
546 *bytenr
, *num_bytes
);
557 /* Walk down to the leaf from the given level */
558 static int walk_down_tree(struct btrfs_root
*root
, struct btrfs_path
*path
,
559 int level
, u64
*bytenr
, u64
*num_bytes
,
560 int *tree_block_level
)
562 struct extent_buffer
*eb
;
567 eb
= btrfs_read_node_slot(path
->nodes
[level
],
571 btrfs_tree_read_lock(eb
);
572 path
->nodes
[level
-1] = eb
;
573 path
->slots
[level
-1] = 0;
574 path
->locks
[level
-1] = BTRFS_READ_LOCK
;
576 ret
= process_leaf(root
, path
, bytenr
, num_bytes
,
586 /* Walk up to the next node that needs to be processed */
587 static int walk_up_tree(struct btrfs_path
*path
, int *level
)
591 for (l
= 0; l
< BTRFS_MAX_LEVEL
; l
++) {
597 btrfs_header_nritems(path
->nodes
[l
])) {
602 btrfs_tree_unlock_rw(path
->nodes
[l
], path
->locks
[l
]);
603 free_extent_buffer(path
->nodes
[l
]);
604 path
->nodes
[l
] = NULL
;
612 static void dump_ref_action(struct btrfs_fs_info
*fs_info
,
613 struct ref_action
*ra
)
616 " Ref action %d, root %llu, ref_root %llu, parent %llu, owner %llu, offset %llu, num_refs %llu",
617 ra
->action
, ra
->root
, ra
->ref
.root_objectid
, ra
->ref
.parent
,
618 ra
->ref
.owner
, ra
->ref
.offset
, ra
->ref
.num_refs
);
619 __print_stack_trace(fs_info
, ra
);
623 * Dumps all the information from the block entry to printk, it's going to be
626 static void dump_block_entry(struct btrfs_fs_info
*fs_info
,
627 struct block_entry
*be
)
629 struct ref_entry
*ref
;
630 struct root_entry
*re
;
631 struct ref_action
*ra
;
635 "dumping block entry [%llu %llu], num_refs %llu, metadata %d, from disk %d",
636 be
->bytenr
, be
->len
, be
->num_refs
, be
->metadata
,
639 for (n
= rb_first(&be
->refs
); n
; n
= rb_next(n
)) {
640 ref
= rb_entry(n
, struct ref_entry
, node
);
642 " ref root %llu, parent %llu, owner %llu, offset %llu, num_refs %llu",
643 ref
->root_objectid
, ref
->parent
, ref
->owner
,
644 ref
->offset
, ref
->num_refs
);
647 for (n
= rb_first(&be
->roots
); n
; n
= rb_next(n
)) {
648 re
= rb_entry(n
, struct root_entry
, node
);
649 btrfs_err(fs_info
, " root entry %llu, num_refs %llu",
650 re
->root_objectid
, re
->num_refs
);
653 list_for_each_entry(ra
, &be
->actions
, list
)
654 dump_ref_action(fs_info
, ra
);
658 * Called when we modify a ref for a bytenr.
660 * This will add an action item to the given bytenr and do sanity checks to make
661 * sure we haven't messed something up. If we are making a new allocation and
662 * this block entry has history we will delete all previous actions as long as
663 * our sanity checks pass as they are no longer needed.
665 int btrfs_ref_tree_mod(struct btrfs_fs_info
*fs_info
,
666 struct btrfs_ref
*generic_ref
)
668 struct ref_entry
*ref
= NULL
, *exist
;
669 struct ref_action
*ra
= NULL
;
670 struct block_entry
*be
= NULL
;
671 struct root_entry
*re
= NULL
;
672 int action
= generic_ref
->action
;
675 u64 bytenr
= generic_ref
->bytenr
;
676 u64 num_bytes
= generic_ref
->len
;
677 u64 parent
= generic_ref
->parent
;
682 if (!btrfs_test_opt(fs_info
, REF_VERIFY
))
685 if (generic_ref
->type
== BTRFS_REF_METADATA
) {
687 ref_root
= generic_ref
->tree_ref
.ref_root
;
688 owner
= generic_ref
->tree_ref
.level
;
689 } else if (!parent
) {
690 ref_root
= generic_ref
->data_ref
.ref_root
;
691 owner
= generic_ref
->data_ref
.ino
;
692 offset
= generic_ref
->data_ref
.offset
;
694 metadata
= owner
< BTRFS_FIRST_FREE_OBJECTID
;
696 ref
= kzalloc(sizeof(struct ref_entry
), GFP_NOFS
);
697 ra
= kmalloc(sizeof(struct ref_action
), GFP_NOFS
);
705 ref
->parent
= parent
;
707 ref
->root_objectid
= ref_root
;
708 ref
->offset
= offset
;
709 ref
->num_refs
= (action
== BTRFS_DROP_DELAYED_REF
) ? -1 : 1;
711 memcpy(&ra
->ref
, ref
, sizeof(struct ref_entry
));
713 * Save the extra info from the delayed ref in the ref action to make it
714 * easier to figure out what is happening. The real ref's we add to the
715 * ref tree need to reflect what we save on disk so it matches any
716 * on-disk refs we pre-loaded.
718 ra
->ref
.owner
= owner
;
719 ra
->ref
.offset
= offset
;
720 ra
->ref
.root_objectid
= ref_root
;
721 __save_stack_trace(ra
);
723 INIT_LIST_HEAD(&ra
->list
);
725 ra
->root
= generic_ref
->real_root
;
728 * This is an allocation, preallocate the block_entry in case we haven't
732 if (action
== BTRFS_ADD_DELAYED_EXTENT
) {
734 * For subvol_create we'll just pass in whatever the parent root
735 * is and the new root objectid, so let's not treat the passed
736 * in root as if it really has a ref for this bytenr.
738 be
= add_block_entry(fs_info
, bytenr
, num_bytes
, ref_root
);
749 if (be
->num_refs
!= 1) {
751 "re-allocated a block that still has references to it!");
752 dump_block_entry(fs_info
, be
);
753 dump_ref_action(fs_info
, ra
);
759 while (!list_empty(&be
->actions
)) {
760 struct ref_action
*tmp
;
762 tmp
= list_first_entry(&be
->actions
, struct ref_action
,
764 list_del(&tmp
->list
);
768 struct root_entry
*tmp
;
771 re
= kmalloc(sizeof(struct root_entry
), GFP_NOFS
);
779 * This is the root that is modifying us, so it's the
780 * one we want to lookup below when we modify the
783 ref_root
= generic_ref
->real_root
;
784 re
->root_objectid
= generic_ref
->real_root
;
788 spin_lock(&fs_info
->ref_verify_lock
);
789 be
= lookup_block_entry(&fs_info
->block_tree
, bytenr
);
792 "trying to do action %d to bytenr %llu num_bytes %llu but there is no existing entry!",
793 action
, bytenr
, num_bytes
);
794 dump_ref_action(fs_info
, ra
);
799 } else if (be
->num_refs
== 0) {
801 "trying to do action %d for a bytenr that has 0 total references",
803 dump_block_entry(fs_info
, be
);
804 dump_ref_action(fs_info
, ra
);
812 tmp
= insert_root_entry(&be
->roots
, re
);
820 exist
= insert_ref_entry(&be
->refs
, ref
);
822 if (action
== BTRFS_DROP_DELAYED_REF
) {
823 if (exist
->num_refs
== 0) {
825 "dropping a ref for a existing root that doesn't have a ref on the block");
826 dump_block_entry(fs_info
, be
);
827 dump_ref_action(fs_info
, ra
);
833 if (exist
->num_refs
== 0) {
834 rb_erase(&exist
->node
, &be
->refs
);
837 } else if (!be
->metadata
) {
841 "attempting to add another ref for an existing ref on a tree block");
842 dump_block_entry(fs_info
, be
);
843 dump_ref_action(fs_info
, ra
);
850 if (action
== BTRFS_DROP_DELAYED_REF
) {
852 "dropping a ref for a root that doesn't have a ref on the block");
853 dump_block_entry(fs_info
, be
);
854 dump_ref_action(fs_info
, ra
);
861 if (!parent
&& !re
) {
862 re
= lookup_root_entry(&be
->roots
, ref_root
);
865 * This shouldn't happen because we will add our re
866 * above when we lookup the be with !parent, but just in
867 * case catch this case so we don't panic because I
868 * didn't think of some other corner case.
870 btrfs_err(fs_info
, "failed to find root %llu for %llu",
871 generic_ref
->real_root
, be
->bytenr
);
872 dump_block_entry(fs_info
, be
);
873 dump_ref_action(fs_info
, ra
);
878 if (action
== BTRFS_DROP_DELAYED_REF
) {
882 } else if (action
== BTRFS_ADD_DELAYED_REF
) {
887 list_add_tail(&ra
->list
, &be
->actions
);
890 spin_unlock(&fs_info
->ref_verify_lock
);
893 btrfs_clear_opt(fs_info
->mount_opt
, REF_VERIFY
);
897 /* Free up the ref cache */
898 void btrfs_free_ref_cache(struct btrfs_fs_info
*fs_info
)
900 struct block_entry
*be
;
903 if (!btrfs_test_opt(fs_info
, REF_VERIFY
))
906 spin_lock(&fs_info
->ref_verify_lock
);
907 while ((n
= rb_first(&fs_info
->block_tree
))) {
908 be
= rb_entry(n
, struct block_entry
, node
);
909 rb_erase(&be
->node
, &fs_info
->block_tree
);
910 free_block_entry(be
);
911 cond_resched_lock(&fs_info
->ref_verify_lock
);
913 spin_unlock(&fs_info
->ref_verify_lock
);
916 void btrfs_free_ref_tree_range(struct btrfs_fs_info
*fs_info
, u64 start
,
919 struct block_entry
*be
= NULL
, *entry
;
922 if (!btrfs_test_opt(fs_info
, REF_VERIFY
))
925 spin_lock(&fs_info
->ref_verify_lock
);
926 n
= fs_info
->block_tree
.rb_node
;
928 entry
= rb_entry(n
, struct block_entry
, node
);
929 if (entry
->bytenr
< start
) {
931 } else if (entry
->bytenr
> start
) {
937 /* We want to get as close to start as possible */
939 (entry
->bytenr
< start
&& be
->bytenr
> start
) ||
940 (entry
->bytenr
< start
&& entry
->bytenr
> be
->bytenr
))
945 * Could have an empty block group, maybe have something to check for
946 * this case to verify we were actually empty?
949 spin_unlock(&fs_info
->ref_verify_lock
);
955 be
= rb_entry(n
, struct block_entry
, node
);
957 if (be
->bytenr
< start
&& be
->bytenr
+ be
->len
> start
) {
959 "block entry overlaps a block group [%llu,%llu]!",
961 dump_block_entry(fs_info
, be
);
964 if (be
->bytenr
< start
)
966 if (be
->bytenr
>= start
+ len
)
968 if (be
->bytenr
+ be
->len
> start
+ len
) {
970 "block entry overlaps a block group [%llu,%llu]!",
972 dump_block_entry(fs_info
, be
);
974 rb_erase(&be
->node
, &fs_info
->block_tree
);
975 free_block_entry(be
);
977 spin_unlock(&fs_info
->ref_verify_lock
);
980 /* Walk down all roots and build the ref tree, meant to be called at mount */
981 int btrfs_build_ref_tree(struct btrfs_fs_info
*fs_info
)
983 struct btrfs_root
*extent_root
;
984 struct btrfs_path
*path
;
985 struct extent_buffer
*eb
;
986 int tree_block_level
= 0;
987 u64 bytenr
= 0, num_bytes
= 0;
990 if (!btrfs_test_opt(fs_info
, REF_VERIFY
))
993 path
= btrfs_alloc_path();
997 extent_root
= btrfs_extent_root(fs_info
, 0);
998 eb
= btrfs_read_lock_root_node(extent_root
);
999 level
= btrfs_header_level(eb
);
1000 path
->nodes
[level
] = eb
;
1001 path
->slots
[level
] = 0;
1002 path
->locks
[level
] = BTRFS_READ_LOCK
;
1006 * We have to keep track of the bytenr/num_bytes we last hit
1007 * because we could have run out of space for an inline ref, and
1008 * would have had to added a ref key item which may appear on a
1009 * different leaf from the original extent item.
1011 ret
= walk_down_tree(extent_root
, path
, level
,
1012 &bytenr
, &num_bytes
, &tree_block_level
);
1015 ret
= walk_up_tree(path
, &level
);
1024 btrfs_clear_opt(fs_info
->mount_opt
, REF_VERIFY
);
1025 btrfs_free_ref_cache(fs_info
);
1027 btrfs_free_path(path
);