1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2009 Oracle. All rights reserved.
6 #include <linux/sched.h>
7 #include <linux/slab.h>
8 #include <linux/sort.h>
11 #include "delayed-ref.h"
12 #include "transaction.h"
14 #include "space-info.h"
15 #include "tree-mod-log.h"
18 struct kmem_cache
*btrfs_delayed_ref_head_cachep
;
19 struct kmem_cache
*btrfs_delayed_tree_ref_cachep
;
20 struct kmem_cache
*btrfs_delayed_data_ref_cachep
;
21 struct kmem_cache
*btrfs_delayed_extent_op_cachep
;
23 * delayed back reference update tracking. For subvolume trees
24 * we queue up extent allocations and backref maintenance for
25 * delayed processing. This avoids deep call chains where we
26 * add extents in the middle of btrfs_search_slot, and it allows
27 * us to buffer up frequently modified backrefs in an rb tree instead
28 * of hammering updates on the extent allocation tree.
31 bool btrfs_check_space_for_delayed_refs(struct btrfs_fs_info
*fs_info
)
33 struct btrfs_block_rsv
*delayed_refs_rsv
= &fs_info
->delayed_refs_rsv
;
34 struct btrfs_block_rsv
*global_rsv
= &fs_info
->global_block_rsv
;
38 spin_lock(&global_rsv
->lock
);
39 reserved
= global_rsv
->reserved
;
40 spin_unlock(&global_rsv
->lock
);
43 * Since the global reserve is just kind of magic we don't really want
44 * to rely on it to save our bacon, so if our size is more than the
45 * delayed_refs_rsv and the global rsv then it's time to think about
48 spin_lock(&delayed_refs_rsv
->lock
);
49 reserved
+= delayed_refs_rsv
->reserved
;
50 if (delayed_refs_rsv
->size
>= reserved
)
52 spin_unlock(&delayed_refs_rsv
->lock
);
57 * Release a ref head's reservation.
59 * @fs_info: the filesystem
60 * @nr: number of items to drop
62 * Drops the delayed ref head's count from the delayed refs rsv and free any
63 * excess reservation we had.
65 void btrfs_delayed_refs_rsv_release(struct btrfs_fs_info
*fs_info
, int nr
)
67 struct btrfs_block_rsv
*block_rsv
= &fs_info
->delayed_refs_rsv
;
68 const u64 num_bytes
= btrfs_calc_delayed_ref_bytes(fs_info
, nr
);
71 released
= btrfs_block_rsv_release(fs_info
, block_rsv
, num_bytes
, NULL
);
73 trace_btrfs_space_reservation(fs_info
, "delayed_refs_rsv",
78 * Adjust the size of the delayed refs rsv.
80 * This is to be called anytime we may have adjusted trans->delayed_ref_updates,
81 * it'll calculate the additional size and add it to the delayed_refs_rsv.
83 void btrfs_update_delayed_refs_rsv(struct btrfs_trans_handle
*trans
)
85 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
86 struct btrfs_block_rsv
*delayed_rsv
= &fs_info
->delayed_refs_rsv
;
89 if (!trans
->delayed_ref_updates
)
92 num_bytes
= btrfs_calc_delayed_ref_bytes(fs_info
,
93 trans
->delayed_ref_updates
);
95 spin_lock(&delayed_rsv
->lock
);
96 delayed_rsv
->size
+= num_bytes
;
97 delayed_rsv
->full
= false;
98 spin_unlock(&delayed_rsv
->lock
);
99 trans
->delayed_ref_updates
= 0;
103 * Transfer bytes to our delayed refs rsv.
105 * @fs_info: the filesystem
106 * @num_bytes: number of bytes to transfer
108 * This transfers up to the num_bytes amount, previously reserved, to the
109 * delayed_refs_rsv. Any extra bytes are returned to the space info.
111 void btrfs_migrate_to_delayed_refs_rsv(struct btrfs_fs_info
*fs_info
,
114 struct btrfs_block_rsv
*delayed_refs_rsv
= &fs_info
->delayed_refs_rsv
;
117 spin_lock(&delayed_refs_rsv
->lock
);
118 if (delayed_refs_rsv
->size
> delayed_refs_rsv
->reserved
) {
119 u64 delta
= delayed_refs_rsv
->size
-
120 delayed_refs_rsv
->reserved
;
121 if (num_bytes
> delta
) {
122 to_free
= num_bytes
- delta
;
131 delayed_refs_rsv
->reserved
+= num_bytes
;
132 if (delayed_refs_rsv
->reserved
>= delayed_refs_rsv
->size
)
133 delayed_refs_rsv
->full
= true;
134 spin_unlock(&delayed_refs_rsv
->lock
);
137 trace_btrfs_space_reservation(fs_info
, "delayed_refs_rsv",
140 btrfs_space_info_free_bytes_may_use(fs_info
,
141 delayed_refs_rsv
->space_info
, to_free
);
145 * Refill based on our delayed refs usage.
147 * @fs_info: the filesystem
148 * @flush: control how we can flush for this reservation.
150 * This will refill the delayed block_rsv up to 1 items size worth of space and
151 * will return -ENOSPC if we can't make the reservation.
153 int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info
*fs_info
,
154 enum btrfs_reserve_flush_enum flush
)
156 struct btrfs_block_rsv
*block_rsv
= &fs_info
->delayed_refs_rsv
;
157 u64 limit
= btrfs_calc_delayed_ref_bytes(fs_info
, 1);
163 spin_lock(&block_rsv
->lock
);
164 if (block_rsv
->reserved
< block_rsv
->size
) {
165 num_bytes
= block_rsv
->size
- block_rsv
->reserved
;
166 num_bytes
= min(num_bytes
, limit
);
168 spin_unlock(&block_rsv
->lock
);
173 ret
= btrfs_reserve_metadata_bytes(fs_info
, block_rsv
, num_bytes
, flush
);
178 * We may have raced with someone else, so check again if we the block
179 * reserve is still not full and release any excess space.
181 spin_lock(&block_rsv
->lock
);
182 if (block_rsv
->reserved
< block_rsv
->size
) {
183 u64 needed
= block_rsv
->size
- block_rsv
->reserved
;
185 if (num_bytes
>= needed
) {
186 block_rsv
->reserved
+= needed
;
187 block_rsv
->full
= true;
188 to_free
= num_bytes
- needed
;
189 refilled_bytes
= needed
;
191 block_rsv
->reserved
+= num_bytes
;
193 refilled_bytes
= num_bytes
;
199 spin_unlock(&block_rsv
->lock
);
202 btrfs_space_info_free_bytes_may_use(fs_info
, block_rsv
->space_info
,
205 if (refilled_bytes
> 0)
206 trace_btrfs_space_reservation(fs_info
, "delayed_refs_rsv", 0,
212 * compare two delayed tree backrefs with same bytenr and type
214 static int comp_tree_refs(struct btrfs_delayed_tree_ref
*ref1
,
215 struct btrfs_delayed_tree_ref
*ref2
)
217 if (ref1
->node
.type
== BTRFS_TREE_BLOCK_REF_KEY
) {
218 if (ref1
->root
< ref2
->root
)
220 if (ref1
->root
> ref2
->root
)
223 if (ref1
->parent
< ref2
->parent
)
225 if (ref1
->parent
> ref2
->parent
)
232 * compare two delayed data backrefs with same bytenr and type
234 static int comp_data_refs(struct btrfs_delayed_data_ref
*ref1
,
235 struct btrfs_delayed_data_ref
*ref2
)
237 if (ref1
->node
.type
== BTRFS_EXTENT_DATA_REF_KEY
) {
238 if (ref1
->root
< ref2
->root
)
240 if (ref1
->root
> ref2
->root
)
242 if (ref1
->objectid
< ref2
->objectid
)
244 if (ref1
->objectid
> ref2
->objectid
)
246 if (ref1
->offset
< ref2
->offset
)
248 if (ref1
->offset
> ref2
->offset
)
251 if (ref1
->parent
< ref2
->parent
)
253 if (ref1
->parent
> ref2
->parent
)
259 static int comp_refs(struct btrfs_delayed_ref_node
*ref1
,
260 struct btrfs_delayed_ref_node
*ref2
,
265 if (ref1
->type
< ref2
->type
)
267 if (ref1
->type
> ref2
->type
)
269 if (ref1
->type
== BTRFS_TREE_BLOCK_REF_KEY
||
270 ref1
->type
== BTRFS_SHARED_BLOCK_REF_KEY
)
271 ret
= comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref1
),
272 btrfs_delayed_node_to_tree_ref(ref2
));
274 ret
= comp_data_refs(btrfs_delayed_node_to_data_ref(ref1
),
275 btrfs_delayed_node_to_data_ref(ref2
));
279 if (ref1
->seq
< ref2
->seq
)
281 if (ref1
->seq
> ref2
->seq
)
287 /* insert a new ref to head ref rbtree */
288 static struct btrfs_delayed_ref_head
*htree_insert(struct rb_root_cached
*root
,
289 struct rb_node
*node
)
291 struct rb_node
**p
= &root
->rb_root
.rb_node
;
292 struct rb_node
*parent_node
= NULL
;
293 struct btrfs_delayed_ref_head
*entry
;
294 struct btrfs_delayed_ref_head
*ins
;
296 bool leftmost
= true;
298 ins
= rb_entry(node
, struct btrfs_delayed_ref_head
, href_node
);
299 bytenr
= ins
->bytenr
;
302 entry
= rb_entry(parent_node
, struct btrfs_delayed_ref_head
,
305 if (bytenr
< entry
->bytenr
) {
307 } else if (bytenr
> entry
->bytenr
) {
315 rb_link_node(node
, parent_node
, p
);
316 rb_insert_color_cached(node
, root
, leftmost
);
320 static struct btrfs_delayed_ref_node
* tree_insert(struct rb_root_cached
*root
,
321 struct btrfs_delayed_ref_node
*ins
)
323 struct rb_node
**p
= &root
->rb_root
.rb_node
;
324 struct rb_node
*node
= &ins
->ref_node
;
325 struct rb_node
*parent_node
= NULL
;
326 struct btrfs_delayed_ref_node
*entry
;
327 bool leftmost
= true;
333 entry
= rb_entry(parent_node
, struct btrfs_delayed_ref_node
,
335 comp
= comp_refs(ins
, entry
, true);
338 } else if (comp
> 0) {
346 rb_link_node(node
, parent_node
, p
);
347 rb_insert_color_cached(node
, root
, leftmost
);
351 static struct btrfs_delayed_ref_head
*find_first_ref_head(
352 struct btrfs_delayed_ref_root
*dr
)
355 struct btrfs_delayed_ref_head
*entry
;
357 n
= rb_first_cached(&dr
->href_root
);
361 entry
= rb_entry(n
, struct btrfs_delayed_ref_head
, href_node
);
367 * Find a head entry based on bytenr. This returns the delayed ref head if it
368 * was able to find one, or NULL if nothing was in that spot. If return_bigger
369 * is given, the next bigger entry is returned if no exact match is found.
371 static struct btrfs_delayed_ref_head
*find_ref_head(
372 struct btrfs_delayed_ref_root
*dr
, u64 bytenr
,
375 struct rb_root
*root
= &dr
->href_root
.rb_root
;
377 struct btrfs_delayed_ref_head
*entry
;
382 entry
= rb_entry(n
, struct btrfs_delayed_ref_head
, href_node
);
384 if (bytenr
< entry
->bytenr
)
386 else if (bytenr
> entry
->bytenr
)
391 if (entry
&& return_bigger
) {
392 if (bytenr
> entry
->bytenr
) {
393 n
= rb_next(&entry
->href_node
);
396 entry
= rb_entry(n
, struct btrfs_delayed_ref_head
,
404 int btrfs_delayed_ref_lock(struct btrfs_delayed_ref_root
*delayed_refs
,
405 struct btrfs_delayed_ref_head
*head
)
407 lockdep_assert_held(&delayed_refs
->lock
);
408 if (mutex_trylock(&head
->mutex
))
411 refcount_inc(&head
->refs
);
412 spin_unlock(&delayed_refs
->lock
);
414 mutex_lock(&head
->mutex
);
415 spin_lock(&delayed_refs
->lock
);
416 if (RB_EMPTY_NODE(&head
->href_node
)) {
417 mutex_unlock(&head
->mutex
);
418 btrfs_put_delayed_ref_head(head
);
421 btrfs_put_delayed_ref_head(head
);
425 static inline void drop_delayed_ref(struct btrfs_delayed_ref_root
*delayed_refs
,
426 struct btrfs_delayed_ref_head
*head
,
427 struct btrfs_delayed_ref_node
*ref
)
429 lockdep_assert_held(&head
->lock
);
430 rb_erase_cached(&ref
->ref_node
, &head
->ref_tree
);
431 RB_CLEAR_NODE(&ref
->ref_node
);
432 if (!list_empty(&ref
->add_list
))
433 list_del(&ref
->add_list
);
434 btrfs_put_delayed_ref(ref
);
435 atomic_dec(&delayed_refs
->num_entries
);
438 static bool merge_ref(struct btrfs_delayed_ref_root
*delayed_refs
,
439 struct btrfs_delayed_ref_head
*head
,
440 struct btrfs_delayed_ref_node
*ref
,
443 struct btrfs_delayed_ref_node
*next
;
444 struct rb_node
*node
= rb_next(&ref
->ref_node
);
447 while (!done
&& node
) {
450 next
= rb_entry(node
, struct btrfs_delayed_ref_node
, ref_node
);
451 node
= rb_next(node
);
452 if (seq
&& next
->seq
>= seq
)
454 if (comp_refs(ref
, next
, false))
457 if (ref
->action
== next
->action
) {
460 if (ref
->ref_mod
< next
->ref_mod
) {
464 mod
= -next
->ref_mod
;
467 drop_delayed_ref(delayed_refs
, head
, next
);
469 if (ref
->ref_mod
== 0) {
470 drop_delayed_ref(delayed_refs
, head
, ref
);
474 * Can't have multiples of the same ref on a tree block.
476 WARN_ON(ref
->type
== BTRFS_TREE_BLOCK_REF_KEY
||
477 ref
->type
== BTRFS_SHARED_BLOCK_REF_KEY
);
484 void btrfs_merge_delayed_refs(struct btrfs_fs_info
*fs_info
,
485 struct btrfs_delayed_ref_root
*delayed_refs
,
486 struct btrfs_delayed_ref_head
*head
)
488 struct btrfs_delayed_ref_node
*ref
;
489 struct rb_node
*node
;
492 lockdep_assert_held(&head
->lock
);
494 if (RB_EMPTY_ROOT(&head
->ref_tree
.rb_root
))
497 /* We don't have too many refs to merge for data. */
501 seq
= btrfs_tree_mod_log_lowest_seq(fs_info
);
503 for (node
= rb_first_cached(&head
->ref_tree
); node
;
504 node
= rb_next(node
)) {
505 ref
= rb_entry(node
, struct btrfs_delayed_ref_node
, ref_node
);
506 if (seq
&& ref
->seq
>= seq
)
508 if (merge_ref(delayed_refs
, head
, ref
, seq
))
513 int btrfs_check_delayed_seq(struct btrfs_fs_info
*fs_info
, u64 seq
)
516 u64 min_seq
= btrfs_tree_mod_log_lowest_seq(fs_info
);
518 if (min_seq
!= 0 && seq
>= min_seq
) {
520 "holding back delayed_ref %llu, lowest is %llu",
528 struct btrfs_delayed_ref_head
*btrfs_select_ref_head(
529 struct btrfs_delayed_ref_root
*delayed_refs
)
531 struct btrfs_delayed_ref_head
*head
;
533 lockdep_assert_held(&delayed_refs
->lock
);
535 head
= find_ref_head(delayed_refs
, delayed_refs
->run_delayed_start
,
537 if (!head
&& delayed_refs
->run_delayed_start
!= 0) {
538 delayed_refs
->run_delayed_start
= 0;
539 head
= find_first_ref_head(delayed_refs
);
544 while (head
->processing
) {
545 struct rb_node
*node
;
547 node
= rb_next(&head
->href_node
);
549 if (delayed_refs
->run_delayed_start
== 0)
551 delayed_refs
->run_delayed_start
= 0;
554 head
= rb_entry(node
, struct btrfs_delayed_ref_head
,
558 head
->processing
= true;
559 WARN_ON(delayed_refs
->num_heads_ready
== 0);
560 delayed_refs
->num_heads_ready
--;
561 delayed_refs
->run_delayed_start
= head
->bytenr
+
566 void btrfs_delete_ref_head(struct btrfs_delayed_ref_root
*delayed_refs
,
567 struct btrfs_delayed_ref_head
*head
)
569 lockdep_assert_held(&delayed_refs
->lock
);
570 lockdep_assert_held(&head
->lock
);
572 rb_erase_cached(&head
->href_node
, &delayed_refs
->href_root
);
573 RB_CLEAR_NODE(&head
->href_node
);
574 atomic_dec(&delayed_refs
->num_entries
);
575 delayed_refs
->num_heads
--;
576 if (!head
->processing
)
577 delayed_refs
->num_heads_ready
--;
581 * Helper to insert the ref_node to the tail or merge with tail.
583 * Return false if the ref was inserted.
584 * Return true if the ref was merged into an existing one (and therefore can be
585 * freed by the caller).
587 static bool insert_delayed_ref(struct btrfs_delayed_ref_root
*root
,
588 struct btrfs_delayed_ref_head
*href
,
589 struct btrfs_delayed_ref_node
*ref
)
591 struct btrfs_delayed_ref_node
*exist
;
594 spin_lock(&href
->lock
);
595 exist
= tree_insert(&href
->ref_tree
, ref
);
597 if (ref
->action
== BTRFS_ADD_DELAYED_REF
)
598 list_add_tail(&ref
->add_list
, &href
->ref_add_list
);
599 atomic_inc(&root
->num_entries
);
600 spin_unlock(&href
->lock
);
604 /* Now we are sure we can merge */
605 if (exist
->action
== ref
->action
) {
608 /* Need to change action */
609 if (exist
->ref_mod
< ref
->ref_mod
) {
610 exist
->action
= ref
->action
;
611 mod
= -exist
->ref_mod
;
612 exist
->ref_mod
= ref
->ref_mod
;
613 if (ref
->action
== BTRFS_ADD_DELAYED_REF
)
614 list_add_tail(&exist
->add_list
,
615 &href
->ref_add_list
);
616 else if (ref
->action
== BTRFS_DROP_DELAYED_REF
) {
617 ASSERT(!list_empty(&exist
->add_list
));
618 list_del(&exist
->add_list
);
625 exist
->ref_mod
+= mod
;
627 /* remove existing tail if its ref_mod is zero */
628 if (exist
->ref_mod
== 0)
629 drop_delayed_ref(root
, href
, exist
);
630 spin_unlock(&href
->lock
);
635 * helper function to update the accounting in the head ref
636 * existing and update must have the same bytenr
638 static noinline
void update_existing_head_ref(struct btrfs_trans_handle
*trans
,
639 struct btrfs_delayed_ref_head
*existing
,
640 struct btrfs_delayed_ref_head
*update
)
642 struct btrfs_delayed_ref_root
*delayed_refs
=
643 &trans
->transaction
->delayed_refs
;
644 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
647 BUG_ON(existing
->is_data
!= update
->is_data
);
649 spin_lock(&existing
->lock
);
650 if (update
->must_insert_reserved
) {
651 /* if the extent was freed and then
652 * reallocated before the delayed ref
653 * entries were processed, we can end up
654 * with an existing head ref without
655 * the must_insert_reserved flag set.
658 existing
->must_insert_reserved
= update
->must_insert_reserved
;
661 * update the num_bytes so we make sure the accounting
664 existing
->num_bytes
= update
->num_bytes
;
668 if (update
->extent_op
) {
669 if (!existing
->extent_op
) {
670 existing
->extent_op
= update
->extent_op
;
672 if (update
->extent_op
->update_key
) {
673 memcpy(&existing
->extent_op
->key
,
674 &update
->extent_op
->key
,
675 sizeof(update
->extent_op
->key
));
676 existing
->extent_op
->update_key
= true;
678 if (update
->extent_op
->update_flags
) {
679 existing
->extent_op
->flags_to_set
|=
680 update
->extent_op
->flags_to_set
;
681 existing
->extent_op
->update_flags
= true;
683 btrfs_free_delayed_extent_op(update
->extent_op
);
687 * update the reference mod on the head to reflect this new operation,
688 * only need the lock for this case cause we could be processing it
689 * currently, for refs we just added we know we're a-ok.
691 old_ref_mod
= existing
->total_ref_mod
;
692 existing
->ref_mod
+= update
->ref_mod
;
693 existing
->total_ref_mod
+= update
->ref_mod
;
696 * If we are going to from a positive ref mod to a negative or vice
697 * versa we need to make sure to adjust pending_csums accordingly.
699 if (existing
->is_data
) {
701 btrfs_csum_bytes_to_leaves(fs_info
,
702 existing
->num_bytes
);
704 if (existing
->total_ref_mod
>= 0 && old_ref_mod
< 0) {
705 delayed_refs
->pending_csums
-= existing
->num_bytes
;
706 btrfs_delayed_refs_rsv_release(fs_info
, csum_leaves
);
708 if (existing
->total_ref_mod
< 0 && old_ref_mod
>= 0) {
709 delayed_refs
->pending_csums
+= existing
->num_bytes
;
710 trans
->delayed_ref_updates
+= csum_leaves
;
714 spin_unlock(&existing
->lock
);
717 static void init_delayed_ref_head(struct btrfs_delayed_ref_head
*head_ref
,
718 struct btrfs_qgroup_extent_record
*qrecord
,
719 u64 bytenr
, u64 num_bytes
, u64 ref_root
,
720 u64 reserved
, int action
, bool is_data
,
724 bool must_insert_reserved
= false;
726 /* If reserved is provided, it must be a data extent. */
727 BUG_ON(!is_data
&& reserved
);
730 case BTRFS_UPDATE_DELAYED_HEAD
:
733 case BTRFS_DROP_DELAYED_REF
:
735 * The head node stores the sum of all the mods, so dropping a ref
736 * should drop the sum in the head node by one.
740 case BTRFS_ADD_DELAYED_EXTENT
:
742 * BTRFS_ADD_DELAYED_EXTENT means that we need to update the
743 * reserved accounting when the extent is finally added, or if a
744 * later modification deletes the delayed ref without ever
745 * inserting the extent into the extent allocation tree.
746 * ref->must_insert_reserved is the flag used to record that
747 * accounting mods are required.
749 * Once we record must_insert_reserved, switch the action to
750 * BTRFS_ADD_DELAYED_REF because other special casing is not
753 must_insert_reserved
= true;
757 refcount_set(&head_ref
->refs
, 1);
758 head_ref
->bytenr
= bytenr
;
759 head_ref
->num_bytes
= num_bytes
;
760 head_ref
->ref_mod
= count_mod
;
761 head_ref
->must_insert_reserved
= must_insert_reserved
;
762 head_ref
->is_data
= is_data
;
763 head_ref
->is_system
= is_system
;
764 head_ref
->ref_tree
= RB_ROOT_CACHED
;
765 INIT_LIST_HEAD(&head_ref
->ref_add_list
);
766 RB_CLEAR_NODE(&head_ref
->href_node
);
767 head_ref
->processing
= false;
768 head_ref
->total_ref_mod
= count_mod
;
769 spin_lock_init(&head_ref
->lock
);
770 mutex_init(&head_ref
->mutex
);
773 if (ref_root
&& reserved
) {
774 qrecord
->data_rsv
= reserved
;
775 qrecord
->data_rsv_refroot
= ref_root
;
777 qrecord
->bytenr
= bytenr
;
778 qrecord
->num_bytes
= num_bytes
;
779 qrecord
->old_roots
= NULL
;
784 * helper function to actually insert a head node into the rbtree.
785 * this does all the dirty work in terms of maintaining the correct
786 * overall modification count.
788 static noinline
struct btrfs_delayed_ref_head
*
789 add_delayed_ref_head(struct btrfs_trans_handle
*trans
,
790 struct btrfs_delayed_ref_head
*head_ref
,
791 struct btrfs_qgroup_extent_record
*qrecord
,
792 int action
, bool *qrecord_inserted_ret
)
794 struct btrfs_delayed_ref_head
*existing
;
795 struct btrfs_delayed_ref_root
*delayed_refs
;
796 bool qrecord_inserted
= false;
798 delayed_refs
= &trans
->transaction
->delayed_refs
;
800 /* Record qgroup extent info if provided */
802 if (btrfs_qgroup_trace_extent_nolock(trans
->fs_info
,
803 delayed_refs
, qrecord
))
806 qrecord_inserted
= true;
809 trace_add_delayed_ref_head(trans
->fs_info
, head_ref
, action
);
811 existing
= htree_insert(&delayed_refs
->href_root
,
812 &head_ref
->href_node
);
814 update_existing_head_ref(trans
, existing
, head_ref
);
816 * we've updated the existing ref, free the newly
819 kmem_cache_free(btrfs_delayed_ref_head_cachep
, head_ref
);
822 if (head_ref
->is_data
&& head_ref
->ref_mod
< 0) {
823 delayed_refs
->pending_csums
+= head_ref
->num_bytes
;
824 trans
->delayed_ref_updates
+=
825 btrfs_csum_bytes_to_leaves(trans
->fs_info
,
826 head_ref
->num_bytes
);
828 delayed_refs
->num_heads
++;
829 delayed_refs
->num_heads_ready
++;
830 atomic_inc(&delayed_refs
->num_entries
);
831 trans
->delayed_ref_updates
++;
833 if (qrecord_inserted_ret
)
834 *qrecord_inserted_ret
= qrecord_inserted
;
840 * init_delayed_ref_common - Initialize the structure which represents a
841 * modification to a an extent.
843 * @fs_info: Internal to the mounted filesystem mount structure.
845 * @ref: The structure which is going to be initialized.
847 * @bytenr: The logical address of the extent for which a modification is
848 * going to be recorded.
850 * @num_bytes: Size of the extent whose modification is being recorded.
852 * @ref_root: The id of the root where this modification has originated, this
853 * can be either one of the well-known metadata trees or the
854 * subvolume id which references this extent.
856 * @action: Can be one of BTRFS_ADD_DELAYED_REF/BTRFS_DROP_DELAYED_REF or
857 * BTRFS_ADD_DELAYED_EXTENT
859 * @ref_type: Holds the type of the extent which is being recorded, can be
860 * one of BTRFS_SHARED_BLOCK_REF_KEY/BTRFS_TREE_BLOCK_REF_KEY
861 * when recording a metadata extent or BTRFS_SHARED_DATA_REF_KEY/
862 * BTRFS_EXTENT_DATA_REF_KEY when recording data extent
864 static void init_delayed_ref_common(struct btrfs_fs_info
*fs_info
,
865 struct btrfs_delayed_ref_node
*ref
,
866 u64 bytenr
, u64 num_bytes
, u64 ref_root
,
867 int action
, u8 ref_type
)
871 if (action
== BTRFS_ADD_DELAYED_EXTENT
)
872 action
= BTRFS_ADD_DELAYED_REF
;
874 if (is_fstree(ref_root
))
875 seq
= atomic64_read(&fs_info
->tree_mod_seq
);
877 refcount_set(&ref
->refs
, 1);
878 ref
->bytenr
= bytenr
;
879 ref
->num_bytes
= num_bytes
;
881 ref
->action
= action
;
883 ref
->type
= ref_type
;
884 RB_CLEAR_NODE(&ref
->ref_node
);
885 INIT_LIST_HEAD(&ref
->add_list
);
889 * add a delayed tree ref. This does all of the accounting required
890 * to make sure the delayed ref is eventually processed before this
891 * transaction commits.
893 int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle
*trans
,
894 struct btrfs_ref
*generic_ref
,
895 struct btrfs_delayed_extent_op
*extent_op
)
897 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
898 struct btrfs_delayed_tree_ref
*ref
;
899 struct btrfs_delayed_ref_head
*head_ref
;
900 struct btrfs_delayed_ref_root
*delayed_refs
;
901 struct btrfs_qgroup_extent_record
*record
= NULL
;
902 bool qrecord_inserted
;
905 int action
= generic_ref
->action
;
906 int level
= generic_ref
->tree_ref
.level
;
907 u64 bytenr
= generic_ref
->bytenr
;
908 u64 num_bytes
= generic_ref
->len
;
909 u64 parent
= generic_ref
->parent
;
912 is_system
= (generic_ref
->tree_ref
.owning_root
== BTRFS_CHUNK_TREE_OBJECTID
);
914 ASSERT(generic_ref
->type
== BTRFS_REF_METADATA
&& generic_ref
->action
);
915 ref
= kmem_cache_alloc(btrfs_delayed_tree_ref_cachep
, GFP_NOFS
);
919 head_ref
= kmem_cache_alloc(btrfs_delayed_ref_head_cachep
, GFP_NOFS
);
921 kmem_cache_free(btrfs_delayed_tree_ref_cachep
, ref
);
925 if (test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
) &&
926 !generic_ref
->skip_qgroup
) {
927 record
= kzalloc(sizeof(*record
), GFP_NOFS
);
929 kmem_cache_free(btrfs_delayed_tree_ref_cachep
, ref
);
930 kmem_cache_free(btrfs_delayed_ref_head_cachep
, head_ref
);
936 ref_type
= BTRFS_SHARED_BLOCK_REF_KEY
;
938 ref_type
= BTRFS_TREE_BLOCK_REF_KEY
;
940 init_delayed_ref_common(fs_info
, &ref
->node
, bytenr
, num_bytes
,
941 generic_ref
->tree_ref
.owning_root
, action
,
943 ref
->root
= generic_ref
->tree_ref
.owning_root
;
944 ref
->parent
= parent
;
947 init_delayed_ref_head(head_ref
, record
, bytenr
, num_bytes
,
948 generic_ref
->tree_ref
.owning_root
, 0, action
,
950 head_ref
->extent_op
= extent_op
;
952 delayed_refs
= &trans
->transaction
->delayed_refs
;
953 spin_lock(&delayed_refs
->lock
);
956 * insert both the head node and the new ref without dropping
959 head_ref
= add_delayed_ref_head(trans
, head_ref
, record
,
960 action
, &qrecord_inserted
);
962 merged
= insert_delayed_ref(delayed_refs
, head_ref
, &ref
->node
);
963 spin_unlock(&delayed_refs
->lock
);
966 * Need to update the delayed_refs_rsv with any changes we may have
969 btrfs_update_delayed_refs_rsv(trans
);
971 trace_add_delayed_tree_ref(fs_info
, &ref
->node
, ref
,
972 action
== BTRFS_ADD_DELAYED_EXTENT
?
973 BTRFS_ADD_DELAYED_REF
: action
);
975 kmem_cache_free(btrfs_delayed_tree_ref_cachep
, ref
);
977 if (qrecord_inserted
)
978 btrfs_qgroup_trace_extent_post(trans
, record
);
984 * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
986 int btrfs_add_delayed_data_ref(struct btrfs_trans_handle
*trans
,
987 struct btrfs_ref
*generic_ref
,
990 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
991 struct btrfs_delayed_data_ref
*ref
;
992 struct btrfs_delayed_ref_head
*head_ref
;
993 struct btrfs_delayed_ref_root
*delayed_refs
;
994 struct btrfs_qgroup_extent_record
*record
= NULL
;
995 bool qrecord_inserted
;
996 int action
= generic_ref
->action
;
998 u64 bytenr
= generic_ref
->bytenr
;
999 u64 num_bytes
= generic_ref
->len
;
1000 u64 parent
= generic_ref
->parent
;
1001 u64 ref_root
= generic_ref
->data_ref
.owning_root
;
1002 u64 owner
= generic_ref
->data_ref
.ino
;
1003 u64 offset
= generic_ref
->data_ref
.offset
;
1006 ASSERT(generic_ref
->type
== BTRFS_REF_DATA
&& action
);
1007 ref
= kmem_cache_alloc(btrfs_delayed_data_ref_cachep
, GFP_NOFS
);
1012 ref_type
= BTRFS_SHARED_DATA_REF_KEY
;
1014 ref_type
= BTRFS_EXTENT_DATA_REF_KEY
;
1015 init_delayed_ref_common(fs_info
, &ref
->node
, bytenr
, num_bytes
,
1016 ref_root
, action
, ref_type
);
1017 ref
->root
= ref_root
;
1018 ref
->parent
= parent
;
1019 ref
->objectid
= owner
;
1020 ref
->offset
= offset
;
1023 head_ref
= kmem_cache_alloc(btrfs_delayed_ref_head_cachep
, GFP_NOFS
);
1025 kmem_cache_free(btrfs_delayed_data_ref_cachep
, ref
);
1029 if (test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
) &&
1030 !generic_ref
->skip_qgroup
) {
1031 record
= kzalloc(sizeof(*record
), GFP_NOFS
);
1033 kmem_cache_free(btrfs_delayed_data_ref_cachep
, ref
);
1034 kmem_cache_free(btrfs_delayed_ref_head_cachep
,
1040 init_delayed_ref_head(head_ref
, record
, bytenr
, num_bytes
, ref_root
,
1041 reserved
, action
, true, false);
1042 head_ref
->extent_op
= NULL
;
1044 delayed_refs
= &trans
->transaction
->delayed_refs
;
1045 spin_lock(&delayed_refs
->lock
);
1048 * insert both the head node and the new ref without dropping
1051 head_ref
= add_delayed_ref_head(trans
, head_ref
, record
,
1052 action
, &qrecord_inserted
);
1054 merged
= insert_delayed_ref(delayed_refs
, head_ref
, &ref
->node
);
1055 spin_unlock(&delayed_refs
->lock
);
1058 * Need to update the delayed_refs_rsv with any changes we may have
1061 btrfs_update_delayed_refs_rsv(trans
);
1063 trace_add_delayed_data_ref(trans
->fs_info
, &ref
->node
, ref
,
1064 action
== BTRFS_ADD_DELAYED_EXTENT
?
1065 BTRFS_ADD_DELAYED_REF
: action
);
1067 kmem_cache_free(btrfs_delayed_data_ref_cachep
, ref
);
1070 if (qrecord_inserted
)
1071 return btrfs_qgroup_trace_extent_post(trans
, record
);
1075 int btrfs_add_delayed_extent_op(struct btrfs_trans_handle
*trans
,
1076 u64 bytenr
, u64 num_bytes
,
1077 struct btrfs_delayed_extent_op
*extent_op
)
1079 struct btrfs_delayed_ref_head
*head_ref
;
1080 struct btrfs_delayed_ref_root
*delayed_refs
;
1082 head_ref
= kmem_cache_alloc(btrfs_delayed_ref_head_cachep
, GFP_NOFS
);
1086 init_delayed_ref_head(head_ref
, NULL
, bytenr
, num_bytes
, 0, 0,
1087 BTRFS_UPDATE_DELAYED_HEAD
, false, false);
1088 head_ref
->extent_op
= extent_op
;
1090 delayed_refs
= &trans
->transaction
->delayed_refs
;
1091 spin_lock(&delayed_refs
->lock
);
1093 add_delayed_ref_head(trans
, head_ref
, NULL
, BTRFS_UPDATE_DELAYED_HEAD
,
1096 spin_unlock(&delayed_refs
->lock
);
1099 * Need to update the delayed_refs_rsv with any changes we may have
1102 btrfs_update_delayed_refs_rsv(trans
);
1107 * This does a simple search for the head node for a given extent. Returns the
1108 * head node if found, or NULL if not.
1110 struct btrfs_delayed_ref_head
*
1111 btrfs_find_delayed_ref_head(struct btrfs_delayed_ref_root
*delayed_refs
, u64 bytenr
)
1113 lockdep_assert_held(&delayed_refs
->lock
);
1115 return find_ref_head(delayed_refs
, bytenr
, false);
1118 void __cold
btrfs_delayed_ref_exit(void)
1120 kmem_cache_destroy(btrfs_delayed_ref_head_cachep
);
1121 kmem_cache_destroy(btrfs_delayed_tree_ref_cachep
);
1122 kmem_cache_destroy(btrfs_delayed_data_ref_cachep
);
1123 kmem_cache_destroy(btrfs_delayed_extent_op_cachep
);
1126 int __init
btrfs_delayed_ref_init(void)
1128 btrfs_delayed_ref_head_cachep
= kmem_cache_create(
1129 "btrfs_delayed_ref_head",
1130 sizeof(struct btrfs_delayed_ref_head
), 0,
1131 SLAB_MEM_SPREAD
, NULL
);
1132 if (!btrfs_delayed_ref_head_cachep
)
1135 btrfs_delayed_tree_ref_cachep
= kmem_cache_create(
1136 "btrfs_delayed_tree_ref",
1137 sizeof(struct btrfs_delayed_tree_ref
), 0,
1138 SLAB_MEM_SPREAD
, NULL
);
1139 if (!btrfs_delayed_tree_ref_cachep
)
1142 btrfs_delayed_data_ref_cachep
= kmem_cache_create(
1143 "btrfs_delayed_data_ref",
1144 sizeof(struct btrfs_delayed_data_ref
), 0,
1145 SLAB_MEM_SPREAD
, NULL
);
1146 if (!btrfs_delayed_data_ref_cachep
)
1149 btrfs_delayed_extent_op_cachep
= kmem_cache_create(
1150 "btrfs_delayed_extent_op",
1151 sizeof(struct btrfs_delayed_extent_op
), 0,
1152 SLAB_MEM_SPREAD
, NULL
);
1153 if (!btrfs_delayed_extent_op_cachep
)
1158 btrfs_delayed_ref_exit();