]> git.ipfire.org Git - thirdparty/linux.git/blame - fs/btrfs/ctree.h
btrfs: move btrfs_realloc_node() from ctree.c into defrag.c
[thirdparty/linux.git] / fs / btrfs / ctree.h
CommitLineData
9888c340 1/* SPDX-License-Identifier: GPL-2.0 */
6cbd5570
CM
2/*
3 * Copyright (C) 2007 Oracle. All rights reserved.
6cbd5570
CM
4 */
5
9888c340
DS
6#ifndef BTRFS_CTREE_H
7#define BTRFS_CTREE_H
eb60ceac 8
3b16a4e3 9#include <linux/pagemap.h>
2992df73 10#include "locking.h"
a56159d4 11#include "fs.h"
79d25df0 12#include "accessors.h"
e20d96d6 13
e089f05c 14struct btrfs_trans_handle;
79154b1b 15struct btrfs_transaction;
a22285a6 16struct btrfs_pending_snapshot;
31890da0 17struct btrfs_delayed_ref_root;
8719aaae 18struct btrfs_space_info;
32da5386 19struct btrfs_block_group;
e6dcd2dc 20struct btrfs_ordered_sum;
82fa113f 21struct btrfs_ref;
c3a3b19b 22struct btrfs_bio;
1881fba8 23struct btrfs_ioctl_encoded_io_args;
0e75f005
JB
24struct btrfs_device;
25struct btrfs_fs_devices;
26struct btrfs_balance_control;
27struct btrfs_delayed_root;
28struct reloc_control;
e089f05c 29
ace75066
FM
30/* Read ahead values for struct btrfs_path.reada */
31enum {
32 READA_NONE,
33 READA_BACK,
34 READA_FORWARD,
35 /*
36 * Similar to READA_FORWARD but unlike it:
37 *
38 * 1) It will trigger readahead even for leaves that are not close to
39 * each other on disk;
40 * 2) It also triggers readahead for nodes;
41 * 3) During a search, even when a node or leaf is already in memory, it
42 * will still trigger readahead for other nodes and leaves that follow
43 * it.
44 *
45 * This is meant to be used only when we know we are iterating over the
46 * entire tree or a very large part of it.
47 */
48 READA_FORWARD_ALWAYS,
49};
50
fec577fb 51/*
234b63a0
CM
52 * btrfs_paths remember the path taken from the root down to the leaf.
53 * level 0 is always the leaf, and nodes[1...BTRFS_MAX_LEVEL] will point
fec577fb
CM
54 * to any other levels that are present.
55 *
56 * The slots array records the index of the item or block pointer
57 * used while walking the tree.
58 */
234b63a0 59struct btrfs_path {
5f39d397 60 struct extent_buffer *nodes[BTRFS_MAX_LEVEL];
234b63a0 61 int slots[BTRFS_MAX_LEVEL];
925baedd 62 /* if there is real range locking, this locks field will change */
4fb72bf2 63 u8 locks[BTRFS_MAX_LEVEL];
dccabfad 64 u8 reada;
925baedd 65 /* keep some upper locks as we walk down */
7853f15b 66 u8 lowest_level;
459931ec
CM
67
68 /*
69 * set by btrfs_split_item, tells search_slot to keep all locks
70 * and to force calls to keep space in the nodes
71 */
b9473439
CM
72 unsigned int search_for_split:1;
73 unsigned int keep_locks:1;
74 unsigned int skip_locking:1;
5d4f98a2 75 unsigned int search_commit_root:1;
3f8a18cc 76 unsigned int need_commit_sem:1;
5f5bc6b1 77 unsigned int skip_release_on_error:1;
9a664971 78 /*
79 * Indicate that new item (btrfs_search_slot) is extending already
80 * existing item and ins_len contains only the data size and not item
81 * header (ie. sizeof(struct btrfs_item) is not included).
82 */
83 unsigned int search_for_extension:1;
857bc13f
JB
84 /* Stop search if any locks need to be taken (for read) */
85 unsigned int nowait:1;
eb60ceac 86};
d9d88fde 87
27cdeb70
MX
88/*
89 * The state of btrfs root
90 */
61fa90c1
DS
91enum {
92 /*
93 * btrfs_record_root_in_trans is a multi-step process, and it can race
94 * with the balancing code. But the race is very small, and only the
95 * first time the root is added to each transaction. So IN_TRANS_SETUP
96 * is used to tell us when more checks are required
97 */
98 BTRFS_ROOT_IN_TRANS_SETUP,
92a7cc42
QW
99
100 /*
101 * Set if tree blocks of this root can be shared by other roots.
102 * Only subvolume trees and their reloc trees have this bit set.
103 * Conflicts with TRACK_DIRTY bit.
104 *
105 * This affects two things:
106 *
107 * - How balance works
108 * For shareable roots, we need to use reloc tree and do path
109 * replacement for balance, and need various pre/post hooks for
110 * snapshot creation to handle them.
111 *
112 * While for non-shareable trees, we just simply do a tree search
113 * with COW.
114 *
115 * - How dirty roots are tracked
116 * For shareable roots, btrfs_record_root_in_trans() is needed to
117 * track them, while non-subvolume roots have TRACK_DIRTY bit, they
118 * don't need to set this manually.
119 */
120 BTRFS_ROOT_SHAREABLE,
61fa90c1 121 BTRFS_ROOT_TRACK_DIRTY,
fc7cbcd4 122 BTRFS_ROOT_IN_RADIX,
61fa90c1
DS
123 BTRFS_ROOT_ORPHAN_ITEM_INSERTED,
124 BTRFS_ROOT_DEFRAG_RUNNING,
125 BTRFS_ROOT_FORCE_COW,
126 BTRFS_ROOT_MULTI_LOG_TASKS,
127 BTRFS_ROOT_DIRTY,
83354f07 128 BTRFS_ROOT_DELETING,
d2311e69
QW
129
130 /*
131 * Reloc tree is orphan, only kept here for qgroup delayed subtree scan
132 *
133 * Set for the subvolume tree owning the reloc tree.
134 */
135 BTRFS_ROOT_DEAD_RELOC_TREE,
78c52d9e
JB
136 /* Mark dead root stored on device whose cleanup needs to be resumed */
137 BTRFS_ROOT_DEAD_TREE,
47876f7c 138 /* The root has a log tree. Used for subvolume roots and the tree root. */
e7a79811 139 BTRFS_ROOT_HAS_LOG_TREE,
c53e9653
QW
140 /* Qgroup flushing is in progress */
141 BTRFS_ROOT_QGROUP_FLUSHING,
54230013
JB
142 /* We started the orphan cleanup for this root. */
143 BTRFS_ROOT_ORPHAN_CLEANUP,
b4be6aef
JB
144 /* This root has a drop operation that was started previously. */
145 BTRFS_ROOT_UNFINISHED_DROP,
b40130b2
JB
146 /* This reloc root needs to have its buffers lockdep class reset. */
147 BTRFS_ROOT_RESET_LOCKDEP_CLASS,
61fa90c1 148};
27cdeb70 149
370a11b8
QW
150/*
151 * Record swapped tree blocks of a subvolume tree for delayed subtree trace
152 * code. For detail check comment in fs/btrfs/qgroup.c.
153 */
154struct btrfs_qgroup_swapped_blocks {
155 spinlock_t lock;
156 /* RM_EMPTY_ROOT() of above blocks[] */
157 bool swapped;
158 struct rb_root blocks[BTRFS_MAX_LEVEL];
159};
160
9f5fae2f
CM
161/*
162 * in ram representation of the tree. extent_root is used for all allocations
f2458e1d 163 * and for the extent tree extent_root root.
9f5fae2f
CM
164 */
165struct btrfs_root {
abed4aaa
JB
166 struct rb_node rb_node;
167
5f39d397 168 struct extent_buffer *node;
925baedd 169
5f39d397 170 struct extent_buffer *commit_root;
e02119d5 171 struct btrfs_root *log_root;
1a40e23b 172 struct btrfs_root *reloc_root;
31153d81 173
27cdeb70 174 unsigned long state;
62e2749e
CM
175 struct btrfs_root_item root_item;
176 struct btrfs_key root_key;
9f5fae2f 177 struct btrfs_fs_info *fs_info;
d0c803c4
CM
178 struct extent_io_tree dirty_log_pages;
179
a2135011 180 struct mutex objectid_mutex;
7237f183 181
f0486c68
YZ
182 spinlock_t accounting_lock;
183 struct btrfs_block_rsv *block_rsv;
184
e02119d5 185 struct mutex log_mutex;
7237f183
YZ
186 wait_queue_head_t log_writer_wait;
187 wait_queue_head_t log_commit_wait[2];
8b050d35 188 struct list_head log_ctxs[2];
a93e0168 189 /* Used only for log trees of subvolumes, not for the log root tree */
7237f183
YZ
190 atomic_t log_writers;
191 atomic_t log_commit[2];
28a95795 192 /* Used only for log trees of subvolumes, not for the log root tree */
2ecb7923 193 atomic_t log_batch;
bb14a59b 194 int log_transid;
d1433deb
MX
195 /* No matter the commit succeeds or not*/
196 int log_transid_committed;
197 /* Just be updated when the commit succeeds. */
bb14a59b 198 int last_log_commit;
ff782e0a 199 pid_t log_start_pid;
ea8c2819 200
0f7d52f4 201 u64 last_trans;
5f39d397 202
9f5fae2f 203 u32 type;
13a8a7c8 204
6b8fad57 205 u64 free_objectid;
7585717f 206
6702ed49 207 struct btrfs_key defrag_progress;
0ef3e66b 208 struct btrfs_key defrag_max;
0b86a832 209
92a7cc42 210 /* The dirty list is only used by non-shareable roots */
0b86a832 211 struct list_head dirty_list;
7b128766 212
5d4f98a2
YZ
213 struct list_head root_list;
214
2ab28f32
JB
215 spinlock_t log_extents_lock[2];
216 struct list_head logged_list[2];
217
5d4f98a2
YZ
218 spinlock_t inode_lock;
219 /* red-black tree that keeps track of in-memory inodes */
220 struct rb_root inode_tree;
221
16cdcec7 222 /*
088aea3b
DS
223 * radix tree that keeps track of delayed nodes of every inode,
224 * protected by inode_lock
16cdcec7 225 */
088aea3b 226 struct radix_tree_root delayed_nodes_tree;
3394e160
CM
227 /*
228 * right now this just gets used so that a root has its own devid
229 * for stat. It may be used for more later
230 */
0ee5dc67 231 dev_t anon_dev;
f1ebcc74 232
5f3ab90a 233 spinlock_t root_item_lock;
0700cea7 234 refcount_t refs;
eb73c1b7 235
573bfb72 236 struct mutex delalloc_mutex;
eb73c1b7
MX
237 spinlock_t delalloc_lock;
238 /*
239 * all of the inodes that have delalloc bytes. It is possible for
240 * this list to be empty even when there is still dirty data=ordered
241 * extents waiting to finish IO.
242 */
243 struct list_head delalloc_inodes;
244 struct list_head delalloc_root;
245 u64 nr_delalloc_inodes;
31f3d255
MX
246
247 struct mutex ordered_extent_mutex;
199c2a9c
MX
248 /*
249 * this is used by the balancing code to wait for all the pending
250 * ordered extents
251 */
252 spinlock_t ordered_extent_lock;
253
254 /*
255 * all of the data=ordered extents pending writeback
256 * these can span multiple transactions and basically include
257 * every dirty data page that isn't from nodatacow
258 */
259 struct list_head ordered_extents;
260 struct list_head ordered_root;
261 u64 nr_ordered_extents;
2c686537 262
d2311e69
QW
263 /*
264 * Not empty if this subvolume root has gone through tree block swap
265 * (relocation)
266 *
267 * Will be used by reloc_control::dirty_subvol_roots.
268 */
269 struct list_head reloc_dirty_list;
270
2c686537
DS
271 /*
272 * Number of currently running SEND ioctls to prevent
273 * manipulation with the read-only status via SUBVOL_SETFLAGS
274 */
275 int send_in_progress;
62d54f3a
FM
276 /*
277 * Number of currently running deduplication operations that have a
278 * destination inode belonging to this root. Protected by the lock
279 * root_item_lock.
280 */
281 int dedupe_in_progress;
dcc3eb96
NB
282 /* For exclusion of snapshot creation and nocow writes */
283 struct btrfs_drew_lock snapshot_lock;
284
8ecebf4d 285 atomic_t snapshot_force_cow;
8287475a
QW
286
287 /* For qgroup metadata reserved space */
288 spinlock_t qgroup_meta_rsv_lock;
289 u64 qgroup_meta_rsv_pertrans;
290 u64 qgroup_meta_rsv_prealloc;
c53e9653 291 wait_queue_head_t qgroup_flush_wait;
57ec5fb4 292
eede2bf3
OS
293 /* Number of active swapfiles */
294 atomic_t nr_swapfiles;
295
370a11b8
QW
296 /* Record pairs of swapped blocks for qgroup */
297 struct btrfs_qgroup_swapped_blocks swapped_blocks;
298
e289f03e
FM
299 /* Used only by log trees, when logging csum items */
300 struct extent_io_tree log_csum_range;
301
2672a051
BB
302 /* Used in simple quotas, track root during relocation. */
303 u64 relocation_src_root;
304
57ec5fb4
DS
305#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
306 u64 alloc_bytenr;
307#endif
bd647ce3
JB
308
309#ifdef CONFIG_BTRFS_DEBUG
310 struct list_head leak_list;
311#endif
62e2749e 312};
118c701e 313
1fe5ebc4
JB
314static inline bool btrfs_root_readonly(const struct btrfs_root *root)
315{
316 /* Byte-swap the constant at compile time, root_item::flags is LE */
317 return (root->root_item.flags & cpu_to_le64(BTRFS_ROOT_SUBVOL_RDONLY)) != 0;
318}
319
320static inline bool btrfs_root_dead(const struct btrfs_root *root)
321{
322 /* Byte-swap the constant at compile time, root_item::flags is LE */
323 return (root->root_item.flags & cpu_to_le64(BTRFS_ROOT_SUBVOL_DEAD)) != 0;
324}
325
326static inline u64 btrfs_root_id(const struct btrfs_root *root)
327{
328 return root->root_key.objectid;
329}
330
bf385648
FM
331/*
332 * Structure that conveys information about an extent that is going to replace
333 * all the extents in a file range.
334 */
335struct btrfs_replace_extent_info {
690a5dbf
FM
336 u64 disk_offset;
337 u64 disk_len;
338 u64 data_offset;
339 u64 data_len;
340 u64 file_offset;
fb870f6c 341 /* Pointer to a file extent item of type regular or prealloc. */
690a5dbf 342 char *extent_buf;
8fccebfa
FM
343 /*
344 * Set to true when attempting to replace a file range with a new extent
345 * described by this structure, set to false when attempting to clone an
346 * existing extent into a file range.
347 */
348 bool is_new_extent;
983d8209
FM
349 /* Indicate if we should update the inode's mtime and ctime. */
350 bool update_times;
8fccebfa
FM
351 /* Meaningful only if is_new_extent is true. */
352 int qgroup_reserved;
353 /*
354 * Meaningful only if is_new_extent is true.
355 * Used to track how many extent items we have already inserted in a
356 * subvolume tree that refer to the extent described by this structure,
357 * so that we know when to create a new delayed ref or update an existing
358 * one.
359 */
360 int insertions;
690a5dbf
FM
361};
362
5893dfb9
FM
363/* Arguments for btrfs_drop_extents() */
364struct btrfs_drop_extents_args {
365 /* Input parameters */
366
367 /*
368 * If NULL, btrfs_drop_extents() will allocate and free its own path.
369 * If 'replace_extent' is true, this must not be NULL. Also the path
370 * is always released except if 'replace_extent' is true and
371 * btrfs_drop_extents() sets 'extent_inserted' to true, in which case
372 * the path is kept locked.
373 */
374 struct btrfs_path *path;
375 /* Start offset of the range to drop extents from */
376 u64 start;
377 /* End (exclusive, last byte + 1) of the range to drop extents from */
378 u64 end;
379 /* If true drop all the extent maps in the range */
380 bool drop_cache;
381 /*
382 * If true it means we want to insert a new extent after dropping all
383 * the extents in the range. If this is true, the 'extent_item_size'
384 * parameter must be set as well and the 'extent_inserted' field will
385 * be set to true by btrfs_drop_extents() if it could insert the new
386 * extent.
387 * Note: when this is set to true the path must not be NULL.
388 */
389 bool replace_extent;
390 /*
391 * Used if 'replace_extent' is true. Size of the file extent item to
392 * insert after dropping all existing extents in the range
393 */
394 u32 extent_item_size;
395
396 /* Output parameters */
397
398 /*
399 * Set to the minimum between the input parameter 'end' and the end
400 * (exclusive, last byte + 1) of the last dropped extent. This is always
401 * set even if btrfs_drop_extents() returns an error.
402 */
403 u64 drop_end;
2766ff61
FM
404 /*
405 * The number of allocated bytes found in the range. This can be smaller
406 * than the range's length when there are holes in the range.
407 */
408 u64 bytes_found;
5893dfb9
FM
409 /*
410 * Only set if 'replace_extent' is true. Set to true if we were able
411 * to insert a replacement extent after dropping all extents in the
412 * range, otherwise set to false by btrfs_drop_extents().
413 * Also, if btrfs_drop_extents() has set this to true it means it
414 * returned with the path locked, otherwise if it has set this to
415 * false it has returned with the path released.
416 */
417 bool extent_inserted;
418};
419
23b5ec74 420struct btrfs_file_private {
23b5ec74 421 void *filldir_buf;
9b378f6a 422 u64 last_index;
3c32c721 423 struct extent_state *llseek_cached_state;
23b5ec74
JB
424};
425
da17066c 426static inline u32 BTRFS_LEAF_DATA_SIZE(const struct btrfs_fs_info *info)
1db1ff92 427{
118c701e 428 return info->nodesize - sizeof(struct btrfs_header);
1db1ff92
JM
429}
430
da17066c 431static inline u32 BTRFS_MAX_ITEM_SIZE(const struct btrfs_fs_info *info)
1db1ff92 432{
da17066c 433 return BTRFS_LEAF_DATA_SIZE(info) - sizeof(struct btrfs_item);
1db1ff92
JM
434}
435
da17066c 436static inline u32 BTRFS_NODEPTRS_PER_BLOCK(const struct btrfs_fs_info *info)
1db1ff92 437{
da17066c 438 return BTRFS_LEAF_DATA_SIZE(info) / sizeof(struct btrfs_key_ptr);
1db1ff92
JM
439}
440
da17066c 441static inline u32 BTRFS_MAX_XATTR_SIZE(const struct btrfs_fs_info *info)
1db1ff92 442{
da17066c 443 return BTRFS_MAX_ITEM_SIZE(info) - sizeof(struct btrfs_dir_item);
1db1ff92
JM
444}
445
2e78c927 446#define BTRFS_BYTES_TO_BLKS(fs_info, bytes) \
265fdfa6 447 ((bytes) >> (fs_info)->sectorsize_bits)
2e78c927 448
3b16a4e3
JB
449static inline gfp_t btrfs_alloc_write_mask(struct address_space *mapping)
450{
c62d2555 451 return mapping_gfp_constraint(mapping, ~__GFP_FS);
3b16a4e3
JB
452}
453
2ff7e61e 454int btrfs_error_unpin_extent_range(struct btrfs_fs_info *fs_info,
acce952b 455 u64 start, u64 end);
2ff7e61e 456int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr,
1edb647b 457 u64 num_bytes, u64 *actual_bytes);
2ff7e61e 458int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range);
acce952b 459
dee26a9f 460/* ctree.c */
226463d7
JB
461int __init btrfs_ctree_init(void);
462void __cold btrfs_ctree_exit(void);
7b00dfff 463
fdf8d595
AJ
464int btrfs_bin_search(struct extent_buffer *eb, int first_slot,
465 const struct btrfs_key *key, int *slot);
7b00dfff 466
e1f60a65 467int __pure btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2);
79d25df0
FM
468
469#ifdef __LITTLE_ENDIAN
470
471/*
472 * Compare two keys, on little-endian the disk order is same as CPU order and
473 * we can avoid the conversion.
474 */
475static inline int btrfs_comp_keys(const struct btrfs_disk_key *disk_key,
476 const struct btrfs_key *k2)
477{
478 const struct btrfs_key *k1 = (const struct btrfs_key *)disk_key;
479
480 return btrfs_comp_cpu_keys(k1, k2);
481}
482
483#else
484
485/* Compare two keys in a memcmp fashion. */
486static inline int btrfs_comp_keys(const struct btrfs_disk_key *disk,
487 const struct btrfs_key *k2)
488{
489 struct btrfs_key k1;
490
491 btrfs_disk_key_to_cpu(&k1, disk);
492
493 return btrfs_comp_cpu_keys(&k1, k2);
494}
495
496#endif
497
0b86a832
CM
498int btrfs_previous_item(struct btrfs_root *root,
499 struct btrfs_path *path, u64 min_objectid,
500 int type);
ade2e0b3
WS
501int btrfs_previous_extent_item(struct btrfs_root *root,
502 struct btrfs_path *path, u64 min_objectid);
50564b65 503void btrfs_set_item_key_safe(struct btrfs_trans_handle *trans,
b7a0365e 504 struct btrfs_path *path,
310712b2 505 const struct btrfs_key *new_key);
925baedd 506struct extent_buffer *btrfs_root_node(struct btrfs_root *root);
e7a84565 507int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
3f157a2f 508 struct btrfs_key *key, int lowest_level,
de78b51a 509 u64 min_trans);
3f157a2f 510int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
de78b51a 511 struct btrfs_path *path,
3f157a2f 512 u64 min_trans);
4b231ae4
DS
513struct extent_buffer *btrfs_read_node_slot(struct extent_buffer *parent,
514 int slot);
515
5f39d397
CM
516int btrfs_cow_block(struct btrfs_trans_handle *trans,
517 struct btrfs_root *root, struct extent_buffer *buf,
518 struct extent_buffer *parent, int parent_slot,
9631e4cc
JB
519 struct extent_buffer **cow_ret,
520 enum btrfs_lock_nesting nest);
95f93bc4
FM
521int btrfs_force_cow_block(struct btrfs_trans_handle *trans,
522 struct btrfs_root *root,
523 struct extent_buffer *buf,
524 struct extent_buffer *parent, int parent_slot,
525 struct extent_buffer **cow_ret,
526 u64 search_start, u64 empty_size,
527 enum btrfs_lock_nesting nest);
be20aa9d
CM
528int btrfs_copy_root(struct btrfs_trans_handle *trans,
529 struct btrfs_root *root,
530 struct extent_buffer *buf,
531 struct extent_buffer **cow_ret, u64 new_root_objectid);
5d4f98a2
YZ
532int btrfs_block_can_be_shared(struct btrfs_root *root,
533 struct extent_buffer *buf);
751a2761
FM
534int btrfs_del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
535 struct btrfs_path *path, int level, int slot);
50564b65
FM
536void btrfs_extend_item(struct btrfs_trans_handle *trans,
537 struct btrfs_path *path, u32 data_size);
538void btrfs_truncate_item(struct btrfs_trans_handle *trans,
539 struct btrfs_path *path, u32 new_size, int from_end);
459931ec
CM
540int btrfs_split_item(struct btrfs_trans_handle *trans,
541 struct btrfs_root *root,
542 struct btrfs_path *path,
310712b2 543 const struct btrfs_key *new_key,
459931ec 544 unsigned long split_offset);
ad48fd75
YZ
545int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
546 struct btrfs_root *root,
547 struct btrfs_path *path,
310712b2 548 const struct btrfs_key *new_key);
e33d5c3d
KN
549int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path,
550 u64 inum, u64 ioff, u8 key_type, struct btrfs_key *found_key);
310712b2
OS
551int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root,
552 const struct btrfs_key *key, struct btrfs_path *p,
553 int ins_len, int cow);
554int btrfs_search_old_slot(struct btrfs_root *root, const struct btrfs_key *key,
5d9e75c4 555 struct btrfs_path *p, u64 time_seq);
2f38b3e1 556int btrfs_search_slot_for_read(struct btrfs_root *root,
310712b2
OS
557 const struct btrfs_key *key,
558 struct btrfs_path *p, int find_higher,
559 int return_any);
b3b4aa74 560void btrfs_release_path(struct btrfs_path *p);
2c90e5d6
CM
561struct btrfs_path *btrfs_alloc_path(void);
562void btrfs_free_path(struct btrfs_path *p);
b4ce94de 563
85e21bac
CM
564int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
565 struct btrfs_path *path, int slot, int nr);
85e21bac
CM
566static inline int btrfs_del_item(struct btrfs_trans_handle *trans,
567 struct btrfs_root *root,
568 struct btrfs_path *path)
569{
570 return btrfs_del_items(trans, root, path, path->slots[0], 1);
571}
572
b7ef5f3a
FM
573/*
574 * Describes a batch of items to insert in a btree. This is used by
f0641656 575 * btrfs_insert_empty_items().
b7ef5f3a
FM
576 */
577struct btrfs_item_batch {
578 /*
579 * Pointer to an array containing the keys of the items to insert (in
580 * sorted order).
581 */
582 const struct btrfs_key *keys;
583 /* Pointer to an array containing the data size for each item to insert. */
584 const u32 *data_sizes;
585 /*
586 * The sum of data sizes for all items. The caller can compute this while
587 * setting up the data_sizes array, so it ends up being more efficient
588 * than having btrfs_insert_empty_items() or setup_item_for_insert()
589 * doing it, as it would avoid an extra loop over a potentially large
590 * array, and in the case of setup_item_for_insert(), we would be doing
591 * it while holding a write lock on a leaf and often on upper level nodes
592 * too, unnecessarily increasing the size of a critical section.
593 */
594 u32 total_data_size;
595 /* Size of the keys and data_sizes arrays (number of items in the batch). */
596 int nr;
597};
598
50564b65
FM
599void btrfs_setup_item_for_insert(struct btrfs_trans_handle *trans,
600 struct btrfs_root *root,
f0641656
FM
601 struct btrfs_path *path,
602 const struct btrfs_key *key,
603 u32 data_size);
310712b2
OS
604int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root *root,
605 const struct btrfs_key *key, void *data, u32 data_size);
9c58309d
CM
606int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
607 struct btrfs_root *root,
608 struct btrfs_path *path,
b7ef5f3a 609 const struct btrfs_item_batch *batch);
9c58309d
CM
610
611static inline int btrfs_insert_empty_item(struct btrfs_trans_handle *trans,
612 struct btrfs_root *root,
613 struct btrfs_path *path,
310712b2 614 const struct btrfs_key *key,
9c58309d
CM
615 u32 data_size)
616{
b7ef5f3a
FM
617 struct btrfs_item_batch batch;
618
619 batch.keys = key;
620 batch.data_sizes = &data_size;
621 batch.total_data_size = data_size;
622 batch.nr = 1;
623
624 return btrfs_insert_empty_items(trans, root, path, &batch);
9c58309d
CM
625}
626
3d7806ec
JS
627int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
628 u64 time_seq);
0ff40a91
MPS
629
630int btrfs_search_backwards(struct btrfs_root *root, struct btrfs_key *key,
631 struct btrfs_path *path);
632
62142be3
GN
633int btrfs_get_next_valid_item(struct btrfs_root *root, struct btrfs_key *key,
634 struct btrfs_path *path);
635
636/*
637 * Search in @root for a given @key, and store the slot found in @found_key.
638 *
639 * @root: The root node of the tree.
640 * @key: The key we are looking for.
641 * @found_key: Will hold the found item.
642 * @path: Holds the current slot/leaf.
643 * @iter_ret: Contains the value returned from btrfs_search_slot or
644 * btrfs_get_next_valid_item, whichever was executed last.
645 *
646 * The @iter_ret is an output variable that will contain the return value of
647 * btrfs_search_slot, if it encountered an error, or the value returned from
648 * btrfs_get_next_valid_item otherwise. That return value can be 0, if a valid
649 * slot was found, 1 if there were no more leaves, and <0 if there was an error.
650 *
651 * It's recommended to use a separate variable for iter_ret and then use it to
652 * set the function return value so there's no confusion of the 0/1/errno
653 * values stemming from btrfs_search_slot.
654 */
655#define btrfs_for_each_slot(root, key, found_key, path, iter_ret) \
656 for (iter_ret = btrfs_search_slot(NULL, (root), (key), (path), 0, 0); \
657 (iter_ret) >= 0 && \
658 (iter_ret = btrfs_get_next_valid_item((root), (found_key), (path))) == 0; \
659 (path)->slots[0]++ \
660 )
661
890d2b1a 662int btrfs_next_old_item(struct btrfs_root *root, struct btrfs_path *path, u64 time_seq);
809d6902
DS
663
664/*
665 * Search the tree again to find a leaf with greater keys.
666 *
667 * Returns 0 if it found something or 1 if there are no greater leaves.
668 * Returns < 0 on error.
669 */
670static inline int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
671{
672 return btrfs_next_old_leaf(root, path, 0);
673}
674
1c8f52a5
AB
675static inline int btrfs_next_item(struct btrfs_root *root, struct btrfs_path *p)
676{
677 return btrfs_next_old_item(root, p, 0);
678}
6c75a589 679int btrfs_leaf_free_space(const struct extent_buffer *leaf);
babbf170 680
95a06077
JS
681static inline int is_fstree(u64 rootid)
682{
683 if (rootid == BTRFS_FS_TREE_OBJECTID ||
e09fe2d2
QW
684 ((s64)rootid >= (s64)BTRFS_FIRST_FREE_OBJECTID &&
685 !btrfs_qgroup_level(rootid)))
95a06077
JS
686 return 1;
687 return 0;
688}
210549eb 689
37f00a6d
JT
690static inline bool btrfs_is_data_reloc_root(const struct btrfs_root *root)
691{
692 return root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID;
693}
694
b3cbfb0d 695u16 btrfs_csum_type_size(u16 type);
0e6c40eb
JB
696int btrfs_super_csum_size(const struct btrfs_super_block *s);
697const char *btrfs_super_csum_name(u16 csum_type);
698const char *btrfs_super_csum_driver(u16 csum_type);
699size_t __attribute_const__ btrfs_get_num_csums(void);
700
f57ad937
QW
701/*
702 * We use page status Private2 to indicate there is an ordered extent with
703 * unfinished IO.
704 *
705 * Rename the Private2 accessors to Ordered, to improve readability.
706 */
707#define PageOrdered(page) PagePrivate2(page)
708#define SetPageOrdered(page) SetPagePrivate2(page)
709#define ClearPageOrdered(page) ClearPagePrivate2(page)
895586eb
MWO
710#define folio_test_ordered(folio) folio_test_private_2(folio)
711#define folio_set_ordered(folio) folio_set_private_2(folio)
712#define folio_clear_ordered(folio) folio_clear_private_2(folio)
f57ad937 713
eb60ceac 714#endif