1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Copyright (C) 2008 Oracle. All rights reserved.
6 #ifndef BTRFS_DELAYED_REF_H
7 #define BTRFS_DELAYED_REF_H
9 #include <linux/refcount.h>
11 /* these are the possible values of struct btrfs_delayed_ref_node->action */
12 #define BTRFS_ADD_DELAYED_REF 1 /* add one backref to the tree */
13 #define BTRFS_DROP_DELAYED_REF 2 /* delete one backref from the tree */
14 #define BTRFS_ADD_DELAYED_EXTENT 3 /* record a full extent allocation */
15 #define BTRFS_UPDATE_DELAYED_HEAD 4 /* not changing ref count on head ref */
17 struct btrfs_delayed_ref_node
{
18 struct rb_node ref_node
;
20 * If action is BTRFS_ADD_DELAYED_REF, also link this node to
21 * ref_head->ref_add_list, then we do not need to iterate the
22 * whole ref_head->ref_list to find BTRFS_ADD_DELAYED_REF nodes.
24 struct list_head add_list
;
26 /* the starting bytenr of the extent */
29 /* the size of the extent */
32 /* seq number to keep track of insertion order */
35 /* ref count on this data structure */
39 * how many refs is this entry adding or deleting. For
40 * head refs, this may be a negative number because it is keeping
41 * track of the total mods done to the reference count.
42 * For individual refs, this will always be a positive number
44 * It may be more than one, since it is possible for a single
45 * parent to have more than one ref on an extent
49 unsigned int action
:8;
53 struct btrfs_delayed_extent_op
{
54 struct btrfs_disk_key key
;
62 * the head refs are used to hold a lock on a given extent, which allows us
63 * to make sure that only one process is running the delayed refs
64 * at a time for a single extent. They also store the sum of all the
65 * reference count modifications we've queued up.
67 struct btrfs_delayed_ref_head
{
71 * For insertion into struct btrfs_delayed_ref_root::href_root.
72 * Keep it in the same cache line as 'bytenr' for more efficient
73 * searches in the rbtree.
75 struct rb_node href_node
;
77 * the mutex is held while running the refs, and it is also
78 * held when checking the sum of reference modifications.
84 /* Protects 'ref_tree' and 'ref_add_list'. */
86 struct rb_root_cached ref_tree
;
87 /* accumulate add BTRFS_ADD_DELAYED_REF nodes to this ref_add_list. */
88 struct list_head ref_add_list
;
90 struct btrfs_delayed_extent_op
*extent_op
;
93 * This is used to track the final ref_mod from all the refs associated
94 * with this head ref, this is not adjusted as delayed refs are run,
95 * this is meant to track if we need to do the csum accounting or not.
100 * This is the current outstanding mod references for this bytenr. This
101 * is used with lookup_extent_info to get an accurate reference count
102 * for a bytenr, so it is adjusted as delayed refs are run so that any
103 * on disk reference count + ref_mod is accurate.
108 * when a new extent is allocated, it is just reserved in memory
109 * The actual extent isn't inserted into the extent allocation tree
110 * until the delayed ref is processed. must_insert_reserved is
111 * used to flag a delayed ref so the accounting can be updated
112 * when a full insert is done.
114 * It is possible the extent will be freed before it is ever
115 * inserted into the extent allocation tree. In this case
116 * we need to update the in ram accounting to properly reflect
117 * the free has happened.
119 bool must_insert_reserved
;
125 struct btrfs_delayed_tree_ref
{
126 struct btrfs_delayed_ref_node node
;
132 struct btrfs_delayed_data_ref
{
133 struct btrfs_delayed_ref_node node
;
140 enum btrfs_delayed_ref_flags
{
141 /* Indicate that we are flushing delayed refs for the commit */
142 BTRFS_DELAYED_REFS_FLUSHING
,
145 struct btrfs_delayed_ref_root
{
146 /* head ref rbtree */
147 struct rb_root_cached href_root
;
149 /* dirty extent records */
150 struct rb_root dirty_extent_root
;
152 /* this spin lock protects the rbtree and the entries inside */
155 /* how many delayed ref updates we've queued, used by the
158 atomic_t num_entries
;
160 /* total number of head nodes in tree */
161 unsigned long num_heads
;
163 /* total number of head nodes ready for processing */
164 unsigned long num_heads_ready
;
170 u64 run_delayed_start
;
173 * To make qgroup to skip given root.
174 * This is for snapshot, as btrfs_qgroup_inherit() will manually
175 * modify counters for snapshot and its source, so we should skip
176 * the snapshot in new_root/old_roots or it will get calculated twice
181 enum btrfs_ref_type
{
188 struct btrfs_data_ref
{
189 /* For EXTENT_DATA_REF */
191 /* Original root this data extent belongs to */
194 /* Inode which refers to this data extent */
198 * file_offset - extent_offset
200 * file_offset is the key.offset of the EXTENT_DATA key.
201 * extent_offset is btrfs_file_extent_offset() of the EXTENT_DATA data.
206 struct btrfs_tree_ref
{
208 * Level of this tree block
210 * Shared for skinny (TREE_BLOCK_REF) and normal tree ref.
215 * Root which owns this tree block.
217 * For TREE_BLOCK_REF (skinny metadata, either inline or keyed)
221 /* For non-skinny metadata, no special member needed */
225 enum btrfs_ref_type type
;
229 * Whether this extent should go through qgroup record.
231 * Normally false, but for certain cases like delayed subtree scan,
232 * setting this flag can hugely reduce qgroup overhead.
236 #ifdef CONFIG_BTRFS_FS_REF_VERIFY
237 /* Through which root is this modification. */
243 /* Bytenr of the parent tree block */
246 struct btrfs_data_ref data_ref
;
247 struct btrfs_tree_ref tree_ref
;
251 extern struct kmem_cache
*btrfs_delayed_ref_head_cachep
;
252 extern struct kmem_cache
*btrfs_delayed_tree_ref_cachep
;
253 extern struct kmem_cache
*btrfs_delayed_data_ref_cachep
;
254 extern struct kmem_cache
*btrfs_delayed_extent_op_cachep
;
256 int __init
btrfs_delayed_ref_init(void);
257 void __cold
btrfs_delayed_ref_exit(void);
259 static inline u64
btrfs_calc_delayed_ref_bytes(const struct btrfs_fs_info
*fs_info
,
260 int num_delayed_refs
)
264 num_bytes
= btrfs_calc_insert_metadata_size(fs_info
, num_delayed_refs
);
267 * We have to check the mount option here because we could be enabling
268 * the free space tree for the first time and don't have the compat_ro
271 * We need extra reservations if we have the free space tree because
272 * we'll have to modify that tree as well.
274 if (btrfs_test_opt(fs_info
, FREE_SPACE_TREE
))
280 static inline void btrfs_init_generic_ref(struct btrfs_ref
*generic_ref
,
281 int action
, u64 bytenr
, u64 len
, u64 parent
)
283 generic_ref
->action
= action
;
284 generic_ref
->bytenr
= bytenr
;
285 generic_ref
->len
= len
;
286 generic_ref
->parent
= parent
;
289 static inline void btrfs_init_tree_ref(struct btrfs_ref
*generic_ref
,
290 int level
, u64 root
, u64 mod_root
, bool skip_qgroup
)
292 #ifdef CONFIG_BTRFS_FS_REF_VERIFY
293 /* If @real_root not set, use @root as fallback */
294 generic_ref
->real_root
= mod_root
?: root
;
296 generic_ref
->tree_ref
.level
= level
;
297 generic_ref
->tree_ref
.owning_root
= root
;
298 generic_ref
->type
= BTRFS_REF_METADATA
;
299 if (skip_qgroup
|| !(is_fstree(root
) &&
300 (!mod_root
|| is_fstree(mod_root
))))
301 generic_ref
->skip_qgroup
= true;
303 generic_ref
->skip_qgroup
= false;
307 static inline void btrfs_init_data_ref(struct btrfs_ref
*generic_ref
,
308 u64 ref_root
, u64 ino
, u64 offset
, u64 mod_root
,
311 #ifdef CONFIG_BTRFS_FS_REF_VERIFY
312 /* If @real_root not set, use @root as fallback */
313 generic_ref
->real_root
= mod_root
?: ref_root
;
315 generic_ref
->data_ref
.owning_root
= ref_root
;
316 generic_ref
->data_ref
.ino
= ino
;
317 generic_ref
->data_ref
.offset
= offset
;
318 generic_ref
->type
= BTRFS_REF_DATA
;
319 if (skip_qgroup
|| !(is_fstree(ref_root
) &&
320 (!mod_root
|| is_fstree(mod_root
))))
321 generic_ref
->skip_qgroup
= true;
323 generic_ref
->skip_qgroup
= false;
326 static inline struct btrfs_delayed_extent_op
*
327 btrfs_alloc_delayed_extent_op(void)
329 return kmem_cache_alloc(btrfs_delayed_extent_op_cachep
, GFP_NOFS
);
333 btrfs_free_delayed_extent_op(struct btrfs_delayed_extent_op
*op
)
336 kmem_cache_free(btrfs_delayed_extent_op_cachep
, op
);
339 static inline void btrfs_put_delayed_ref(struct btrfs_delayed_ref_node
*ref
)
341 WARN_ON(refcount_read(&ref
->refs
) == 0);
342 if (refcount_dec_and_test(&ref
->refs
)) {
343 WARN_ON(!RB_EMPTY_NODE(&ref
->ref_node
));
345 case BTRFS_TREE_BLOCK_REF_KEY
:
346 case BTRFS_SHARED_BLOCK_REF_KEY
:
347 kmem_cache_free(btrfs_delayed_tree_ref_cachep
, ref
);
349 case BTRFS_EXTENT_DATA_REF_KEY
:
350 case BTRFS_SHARED_DATA_REF_KEY
:
351 kmem_cache_free(btrfs_delayed_data_ref_cachep
, ref
);
359 static inline u64
btrfs_ref_head_to_space_flags(
360 struct btrfs_delayed_ref_head
*head_ref
)
362 if (head_ref
->is_data
)
363 return BTRFS_BLOCK_GROUP_DATA
;
364 else if (head_ref
->is_system
)
365 return BTRFS_BLOCK_GROUP_SYSTEM
;
366 return BTRFS_BLOCK_GROUP_METADATA
;
369 static inline void btrfs_put_delayed_ref_head(struct btrfs_delayed_ref_head
*head
)
371 if (refcount_dec_and_test(&head
->refs
))
372 kmem_cache_free(btrfs_delayed_ref_head_cachep
, head
);
375 int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle
*trans
,
376 struct btrfs_ref
*generic_ref
,
377 struct btrfs_delayed_extent_op
*extent_op
);
378 int btrfs_add_delayed_data_ref(struct btrfs_trans_handle
*trans
,
379 struct btrfs_ref
*generic_ref
,
381 int btrfs_add_delayed_extent_op(struct btrfs_trans_handle
*trans
,
382 u64 bytenr
, u64 num_bytes
,
383 struct btrfs_delayed_extent_op
*extent_op
);
384 void btrfs_merge_delayed_refs(struct btrfs_fs_info
*fs_info
,
385 struct btrfs_delayed_ref_root
*delayed_refs
,
386 struct btrfs_delayed_ref_head
*head
);
388 struct btrfs_delayed_ref_head
*
389 btrfs_find_delayed_ref_head(struct btrfs_delayed_ref_root
*delayed_refs
,
391 int btrfs_delayed_ref_lock(struct btrfs_delayed_ref_root
*delayed_refs
,
392 struct btrfs_delayed_ref_head
*head
);
393 static inline void btrfs_delayed_ref_unlock(struct btrfs_delayed_ref_head
*head
)
395 mutex_unlock(&head
->mutex
);
397 void btrfs_delete_ref_head(struct btrfs_delayed_ref_root
*delayed_refs
,
398 struct btrfs_delayed_ref_head
*head
);
400 struct btrfs_delayed_ref_head
*btrfs_select_ref_head(
401 struct btrfs_delayed_ref_root
*delayed_refs
);
403 int btrfs_check_delayed_seq(struct btrfs_fs_info
*fs_info
, u64 seq
);
405 void btrfs_delayed_refs_rsv_release(struct btrfs_fs_info
*fs_info
, int nr
);
406 void btrfs_update_delayed_refs_rsv(struct btrfs_trans_handle
*trans
);
407 int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info
*fs_info
,
408 enum btrfs_reserve_flush_enum flush
);
409 void btrfs_migrate_to_delayed_refs_rsv(struct btrfs_fs_info
*fs_info
,
411 bool btrfs_check_space_for_delayed_refs(struct btrfs_fs_info
*fs_info
);
414 * helper functions to cast a node into its container
416 static inline struct btrfs_delayed_tree_ref
*
417 btrfs_delayed_node_to_tree_ref(struct btrfs_delayed_ref_node
*node
)
419 return container_of(node
, struct btrfs_delayed_tree_ref
, node
);
422 static inline struct btrfs_delayed_data_ref
*
423 btrfs_delayed_node_to_data_ref(struct btrfs_delayed_ref_node
*node
)
425 return container_of(node
, struct btrfs_delayed_data_ref
, node
);