]> git.ipfire.org Git - thirdparty/linux.git/blob - fs/btrfs/ctree.h
btrfs: move btrfs_realloc_node() from ctree.c into defrag.c
[thirdparty/linux.git] / fs / btrfs / ctree.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Copyright (C) 2007 Oracle. All rights reserved.
4 */
5
6 #ifndef BTRFS_CTREE_H
7 #define BTRFS_CTREE_H
8
9 #include <linux/pagemap.h>
10 #include "locking.h"
11 #include "fs.h"
12 #include "accessors.h"
13
14 struct btrfs_trans_handle;
15 struct btrfs_transaction;
16 struct btrfs_pending_snapshot;
17 struct btrfs_delayed_ref_root;
18 struct btrfs_space_info;
19 struct btrfs_block_group;
20 struct btrfs_ordered_sum;
21 struct btrfs_ref;
22 struct btrfs_bio;
23 struct btrfs_ioctl_encoded_io_args;
24 struct btrfs_device;
25 struct btrfs_fs_devices;
26 struct btrfs_balance_control;
27 struct btrfs_delayed_root;
28 struct reloc_control;
29
30 /* Read ahead values for struct btrfs_path.reada */
31 enum {
32 READA_NONE,
33 READA_BACK,
34 READA_FORWARD,
35 /*
36 * Similar to READA_FORWARD but unlike it:
37 *
38 * 1) It will trigger readahead even for leaves that are not close to
39 * each other on disk;
40 * 2) It also triggers readahead for nodes;
41 * 3) During a search, even when a node or leaf is already in memory, it
42 * will still trigger readahead for other nodes and leaves that follow
43 * it.
44 *
45 * This is meant to be used only when we know we are iterating over the
46 * entire tree or a very large part of it.
47 */
48 READA_FORWARD_ALWAYS,
49 };
50
51 /*
52 * btrfs_paths remember the path taken from the root down to the leaf.
53 * level 0 is always the leaf, and nodes[1...BTRFS_MAX_LEVEL] will point
54 * to any other levels that are present.
55 *
56 * The slots array records the index of the item or block pointer
57 * used while walking the tree.
58 */
59 struct btrfs_path {
60 struct extent_buffer *nodes[BTRFS_MAX_LEVEL];
61 int slots[BTRFS_MAX_LEVEL];
62 /* if there is real range locking, this locks field will change */
63 u8 locks[BTRFS_MAX_LEVEL];
64 u8 reada;
65 /* keep some upper locks as we walk down */
66 u8 lowest_level;
67
68 /*
69 * set by btrfs_split_item, tells search_slot to keep all locks
70 * and to force calls to keep space in the nodes
71 */
72 unsigned int search_for_split:1;
73 unsigned int keep_locks:1;
74 unsigned int skip_locking:1;
75 unsigned int search_commit_root:1;
76 unsigned int need_commit_sem:1;
77 unsigned int skip_release_on_error:1;
78 /*
79 * Indicate that new item (btrfs_search_slot) is extending already
80 * existing item and ins_len contains only the data size and not item
81 * header (ie. sizeof(struct btrfs_item) is not included).
82 */
83 unsigned int search_for_extension:1;
84 /* Stop search if any locks need to be taken (for read) */
85 unsigned int nowait:1;
86 };
87
88 /*
89 * The state of btrfs root
90 */
91 enum {
92 /*
93 * btrfs_record_root_in_trans is a multi-step process, and it can race
94 * with the balancing code. But the race is very small, and only the
95 * first time the root is added to each transaction. So IN_TRANS_SETUP
96 * is used to tell us when more checks are required
97 */
98 BTRFS_ROOT_IN_TRANS_SETUP,
99
100 /*
101 * Set if tree blocks of this root can be shared by other roots.
102 * Only subvolume trees and their reloc trees have this bit set.
103 * Conflicts with TRACK_DIRTY bit.
104 *
105 * This affects two things:
106 *
107 * - How balance works
108 * For shareable roots, we need to use reloc tree and do path
109 * replacement for balance, and need various pre/post hooks for
110 * snapshot creation to handle them.
111 *
112 * While for non-shareable trees, we just simply do a tree search
113 * with COW.
114 *
115 * - How dirty roots are tracked
116 * For shareable roots, btrfs_record_root_in_trans() is needed to
117 * track them, while non-subvolume roots have TRACK_DIRTY bit, they
118 * don't need to set this manually.
119 */
120 BTRFS_ROOT_SHAREABLE,
121 BTRFS_ROOT_TRACK_DIRTY,
122 BTRFS_ROOT_IN_RADIX,
123 BTRFS_ROOT_ORPHAN_ITEM_INSERTED,
124 BTRFS_ROOT_DEFRAG_RUNNING,
125 BTRFS_ROOT_FORCE_COW,
126 BTRFS_ROOT_MULTI_LOG_TASKS,
127 BTRFS_ROOT_DIRTY,
128 BTRFS_ROOT_DELETING,
129
130 /*
131 * Reloc tree is orphan, only kept here for qgroup delayed subtree scan
132 *
133 * Set for the subvolume tree owning the reloc tree.
134 */
135 BTRFS_ROOT_DEAD_RELOC_TREE,
136 /* Mark dead root stored on device whose cleanup needs to be resumed */
137 BTRFS_ROOT_DEAD_TREE,
138 /* The root has a log tree. Used for subvolume roots and the tree root. */
139 BTRFS_ROOT_HAS_LOG_TREE,
140 /* Qgroup flushing is in progress */
141 BTRFS_ROOT_QGROUP_FLUSHING,
142 /* We started the orphan cleanup for this root. */
143 BTRFS_ROOT_ORPHAN_CLEANUP,
144 /* This root has a drop operation that was started previously. */
145 BTRFS_ROOT_UNFINISHED_DROP,
146 /* This reloc root needs to have its buffers lockdep class reset. */
147 BTRFS_ROOT_RESET_LOCKDEP_CLASS,
148 };
149
150 /*
151 * Record swapped tree blocks of a subvolume tree for delayed subtree trace
152 * code. For detail check comment in fs/btrfs/qgroup.c.
153 */
154 struct btrfs_qgroup_swapped_blocks {
155 spinlock_t lock;
156 /* RM_EMPTY_ROOT() of above blocks[] */
157 bool swapped;
158 struct rb_root blocks[BTRFS_MAX_LEVEL];
159 };
160
161 /*
162 * in ram representation of the tree. extent_root is used for all allocations
163 * and for the extent tree extent_root root.
164 */
165 struct btrfs_root {
166 struct rb_node rb_node;
167
168 struct extent_buffer *node;
169
170 struct extent_buffer *commit_root;
171 struct btrfs_root *log_root;
172 struct btrfs_root *reloc_root;
173
174 unsigned long state;
175 struct btrfs_root_item root_item;
176 struct btrfs_key root_key;
177 struct btrfs_fs_info *fs_info;
178 struct extent_io_tree dirty_log_pages;
179
180 struct mutex objectid_mutex;
181
182 spinlock_t accounting_lock;
183 struct btrfs_block_rsv *block_rsv;
184
185 struct mutex log_mutex;
186 wait_queue_head_t log_writer_wait;
187 wait_queue_head_t log_commit_wait[2];
188 struct list_head log_ctxs[2];
189 /* Used only for log trees of subvolumes, not for the log root tree */
190 atomic_t log_writers;
191 atomic_t log_commit[2];
192 /* Used only for log trees of subvolumes, not for the log root tree */
193 atomic_t log_batch;
194 int log_transid;
195 /* No matter the commit succeeds or not*/
196 int log_transid_committed;
197 /* Just be updated when the commit succeeds. */
198 int last_log_commit;
199 pid_t log_start_pid;
200
201 u64 last_trans;
202
203 u32 type;
204
205 u64 free_objectid;
206
207 struct btrfs_key defrag_progress;
208 struct btrfs_key defrag_max;
209
210 /* The dirty list is only used by non-shareable roots */
211 struct list_head dirty_list;
212
213 struct list_head root_list;
214
215 spinlock_t log_extents_lock[2];
216 struct list_head logged_list[2];
217
218 spinlock_t inode_lock;
219 /* red-black tree that keeps track of in-memory inodes */
220 struct rb_root inode_tree;
221
222 /*
223 * radix tree that keeps track of delayed nodes of every inode,
224 * protected by inode_lock
225 */
226 struct radix_tree_root delayed_nodes_tree;
227 /*
228 * right now this just gets used so that a root has its own devid
229 * for stat. It may be used for more later
230 */
231 dev_t anon_dev;
232
233 spinlock_t root_item_lock;
234 refcount_t refs;
235
236 struct mutex delalloc_mutex;
237 spinlock_t delalloc_lock;
238 /*
239 * all of the inodes that have delalloc bytes. It is possible for
240 * this list to be empty even when there is still dirty data=ordered
241 * extents waiting to finish IO.
242 */
243 struct list_head delalloc_inodes;
244 struct list_head delalloc_root;
245 u64 nr_delalloc_inodes;
246
247 struct mutex ordered_extent_mutex;
248 /*
249 * this is used by the balancing code to wait for all the pending
250 * ordered extents
251 */
252 spinlock_t ordered_extent_lock;
253
254 /*
255 * all of the data=ordered extents pending writeback
256 * these can span multiple transactions and basically include
257 * every dirty data page that isn't from nodatacow
258 */
259 struct list_head ordered_extents;
260 struct list_head ordered_root;
261 u64 nr_ordered_extents;
262
263 /*
264 * Not empty if this subvolume root has gone through tree block swap
265 * (relocation)
266 *
267 * Will be used by reloc_control::dirty_subvol_roots.
268 */
269 struct list_head reloc_dirty_list;
270
271 /*
272 * Number of currently running SEND ioctls to prevent
273 * manipulation with the read-only status via SUBVOL_SETFLAGS
274 */
275 int send_in_progress;
276 /*
277 * Number of currently running deduplication operations that have a
278 * destination inode belonging to this root. Protected by the lock
279 * root_item_lock.
280 */
281 int dedupe_in_progress;
282 /* For exclusion of snapshot creation and nocow writes */
283 struct btrfs_drew_lock snapshot_lock;
284
285 atomic_t snapshot_force_cow;
286
287 /* For qgroup metadata reserved space */
288 spinlock_t qgroup_meta_rsv_lock;
289 u64 qgroup_meta_rsv_pertrans;
290 u64 qgroup_meta_rsv_prealloc;
291 wait_queue_head_t qgroup_flush_wait;
292
293 /* Number of active swapfiles */
294 atomic_t nr_swapfiles;
295
296 /* Record pairs of swapped blocks for qgroup */
297 struct btrfs_qgroup_swapped_blocks swapped_blocks;
298
299 /* Used only by log trees, when logging csum items */
300 struct extent_io_tree log_csum_range;
301
302 /* Used in simple quotas, track root during relocation. */
303 u64 relocation_src_root;
304
305 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
306 u64 alloc_bytenr;
307 #endif
308
309 #ifdef CONFIG_BTRFS_DEBUG
310 struct list_head leak_list;
311 #endif
312 };
313
314 static inline bool btrfs_root_readonly(const struct btrfs_root *root)
315 {
316 /* Byte-swap the constant at compile time, root_item::flags is LE */
317 return (root->root_item.flags & cpu_to_le64(BTRFS_ROOT_SUBVOL_RDONLY)) != 0;
318 }
319
320 static inline bool btrfs_root_dead(const struct btrfs_root *root)
321 {
322 /* Byte-swap the constant at compile time, root_item::flags is LE */
323 return (root->root_item.flags & cpu_to_le64(BTRFS_ROOT_SUBVOL_DEAD)) != 0;
324 }
325
326 static inline u64 btrfs_root_id(const struct btrfs_root *root)
327 {
328 return root->root_key.objectid;
329 }
330
331 /*
332 * Structure that conveys information about an extent that is going to replace
333 * all the extents in a file range.
334 */
335 struct btrfs_replace_extent_info {
336 u64 disk_offset;
337 u64 disk_len;
338 u64 data_offset;
339 u64 data_len;
340 u64 file_offset;
341 /* Pointer to a file extent item of type regular or prealloc. */
342 char *extent_buf;
343 /*
344 * Set to true when attempting to replace a file range with a new extent
345 * described by this structure, set to false when attempting to clone an
346 * existing extent into a file range.
347 */
348 bool is_new_extent;
349 /* Indicate if we should update the inode's mtime and ctime. */
350 bool update_times;
351 /* Meaningful only if is_new_extent is true. */
352 int qgroup_reserved;
353 /*
354 * Meaningful only if is_new_extent is true.
355 * Used to track how many extent items we have already inserted in a
356 * subvolume tree that refer to the extent described by this structure,
357 * so that we know when to create a new delayed ref or update an existing
358 * one.
359 */
360 int insertions;
361 };
362
363 /* Arguments for btrfs_drop_extents() */
364 struct btrfs_drop_extents_args {
365 /* Input parameters */
366
367 /*
368 * If NULL, btrfs_drop_extents() will allocate and free its own path.
369 * If 'replace_extent' is true, this must not be NULL. Also the path
370 * is always released except if 'replace_extent' is true and
371 * btrfs_drop_extents() sets 'extent_inserted' to true, in which case
372 * the path is kept locked.
373 */
374 struct btrfs_path *path;
375 /* Start offset of the range to drop extents from */
376 u64 start;
377 /* End (exclusive, last byte + 1) of the range to drop extents from */
378 u64 end;
379 /* If true drop all the extent maps in the range */
380 bool drop_cache;
381 /*
382 * If true it means we want to insert a new extent after dropping all
383 * the extents in the range. If this is true, the 'extent_item_size'
384 * parameter must be set as well and the 'extent_inserted' field will
385 * be set to true by btrfs_drop_extents() if it could insert the new
386 * extent.
387 * Note: when this is set to true the path must not be NULL.
388 */
389 bool replace_extent;
390 /*
391 * Used if 'replace_extent' is true. Size of the file extent item to
392 * insert after dropping all existing extents in the range
393 */
394 u32 extent_item_size;
395
396 /* Output parameters */
397
398 /*
399 * Set to the minimum between the input parameter 'end' and the end
400 * (exclusive, last byte + 1) of the last dropped extent. This is always
401 * set even if btrfs_drop_extents() returns an error.
402 */
403 u64 drop_end;
404 /*
405 * The number of allocated bytes found in the range. This can be smaller
406 * than the range's length when there are holes in the range.
407 */
408 u64 bytes_found;
409 /*
410 * Only set if 'replace_extent' is true. Set to true if we were able
411 * to insert a replacement extent after dropping all extents in the
412 * range, otherwise set to false by btrfs_drop_extents().
413 * Also, if btrfs_drop_extents() has set this to true it means it
414 * returned with the path locked, otherwise if it has set this to
415 * false it has returned with the path released.
416 */
417 bool extent_inserted;
418 };
419
420 struct btrfs_file_private {
421 void *filldir_buf;
422 u64 last_index;
423 struct extent_state *llseek_cached_state;
424 };
425
426 static inline u32 BTRFS_LEAF_DATA_SIZE(const struct btrfs_fs_info *info)
427 {
428 return info->nodesize - sizeof(struct btrfs_header);
429 }
430
431 static inline u32 BTRFS_MAX_ITEM_SIZE(const struct btrfs_fs_info *info)
432 {
433 return BTRFS_LEAF_DATA_SIZE(info) - sizeof(struct btrfs_item);
434 }
435
436 static inline u32 BTRFS_NODEPTRS_PER_BLOCK(const struct btrfs_fs_info *info)
437 {
438 return BTRFS_LEAF_DATA_SIZE(info) / sizeof(struct btrfs_key_ptr);
439 }
440
441 static inline u32 BTRFS_MAX_XATTR_SIZE(const struct btrfs_fs_info *info)
442 {
443 return BTRFS_MAX_ITEM_SIZE(info) - sizeof(struct btrfs_dir_item);
444 }
445
446 #define BTRFS_BYTES_TO_BLKS(fs_info, bytes) \
447 ((bytes) >> (fs_info)->sectorsize_bits)
448
449 static inline gfp_t btrfs_alloc_write_mask(struct address_space *mapping)
450 {
451 return mapping_gfp_constraint(mapping, ~__GFP_FS);
452 }
453
454 int btrfs_error_unpin_extent_range(struct btrfs_fs_info *fs_info,
455 u64 start, u64 end);
456 int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr,
457 u64 num_bytes, u64 *actual_bytes);
458 int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range);
459
460 /* ctree.c */
461 int __init btrfs_ctree_init(void);
462 void __cold btrfs_ctree_exit(void);
463
464 int btrfs_bin_search(struct extent_buffer *eb, int first_slot,
465 const struct btrfs_key *key, int *slot);
466
467 int __pure btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2);
468
469 #ifdef __LITTLE_ENDIAN
470
471 /*
472 * Compare two keys, on little-endian the disk order is same as CPU order and
473 * we can avoid the conversion.
474 */
475 static inline int btrfs_comp_keys(const struct btrfs_disk_key *disk_key,
476 const struct btrfs_key *k2)
477 {
478 const struct btrfs_key *k1 = (const struct btrfs_key *)disk_key;
479
480 return btrfs_comp_cpu_keys(k1, k2);
481 }
482
483 #else
484
485 /* Compare two keys in a memcmp fashion. */
486 static inline int btrfs_comp_keys(const struct btrfs_disk_key *disk,
487 const struct btrfs_key *k2)
488 {
489 struct btrfs_key k1;
490
491 btrfs_disk_key_to_cpu(&k1, disk);
492
493 return btrfs_comp_cpu_keys(&k1, k2);
494 }
495
496 #endif
497
498 int btrfs_previous_item(struct btrfs_root *root,
499 struct btrfs_path *path, u64 min_objectid,
500 int type);
501 int btrfs_previous_extent_item(struct btrfs_root *root,
502 struct btrfs_path *path, u64 min_objectid);
503 void btrfs_set_item_key_safe(struct btrfs_trans_handle *trans,
504 struct btrfs_path *path,
505 const struct btrfs_key *new_key);
506 struct extent_buffer *btrfs_root_node(struct btrfs_root *root);
507 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
508 struct btrfs_key *key, int lowest_level,
509 u64 min_trans);
510 int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
511 struct btrfs_path *path,
512 u64 min_trans);
513 struct extent_buffer *btrfs_read_node_slot(struct extent_buffer *parent,
514 int slot);
515
516 int btrfs_cow_block(struct btrfs_trans_handle *trans,
517 struct btrfs_root *root, struct extent_buffer *buf,
518 struct extent_buffer *parent, int parent_slot,
519 struct extent_buffer **cow_ret,
520 enum btrfs_lock_nesting nest);
521 int btrfs_force_cow_block(struct btrfs_trans_handle *trans,
522 struct btrfs_root *root,
523 struct extent_buffer *buf,
524 struct extent_buffer *parent, int parent_slot,
525 struct extent_buffer **cow_ret,
526 u64 search_start, u64 empty_size,
527 enum btrfs_lock_nesting nest);
528 int btrfs_copy_root(struct btrfs_trans_handle *trans,
529 struct btrfs_root *root,
530 struct extent_buffer *buf,
531 struct extent_buffer **cow_ret, u64 new_root_objectid);
532 int btrfs_block_can_be_shared(struct btrfs_root *root,
533 struct extent_buffer *buf);
534 int btrfs_del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
535 struct btrfs_path *path, int level, int slot);
536 void btrfs_extend_item(struct btrfs_trans_handle *trans,
537 struct btrfs_path *path, u32 data_size);
538 void btrfs_truncate_item(struct btrfs_trans_handle *trans,
539 struct btrfs_path *path, u32 new_size, int from_end);
540 int btrfs_split_item(struct btrfs_trans_handle *trans,
541 struct btrfs_root *root,
542 struct btrfs_path *path,
543 const struct btrfs_key *new_key,
544 unsigned long split_offset);
545 int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
546 struct btrfs_root *root,
547 struct btrfs_path *path,
548 const struct btrfs_key *new_key);
549 int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path,
550 u64 inum, u64 ioff, u8 key_type, struct btrfs_key *found_key);
551 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root,
552 const struct btrfs_key *key, struct btrfs_path *p,
553 int ins_len, int cow);
554 int btrfs_search_old_slot(struct btrfs_root *root, const struct btrfs_key *key,
555 struct btrfs_path *p, u64 time_seq);
556 int btrfs_search_slot_for_read(struct btrfs_root *root,
557 const struct btrfs_key *key,
558 struct btrfs_path *p, int find_higher,
559 int return_any);
560 void btrfs_release_path(struct btrfs_path *p);
561 struct btrfs_path *btrfs_alloc_path(void);
562 void btrfs_free_path(struct btrfs_path *p);
563
564 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
565 struct btrfs_path *path, int slot, int nr);
566 static inline int btrfs_del_item(struct btrfs_trans_handle *trans,
567 struct btrfs_root *root,
568 struct btrfs_path *path)
569 {
570 return btrfs_del_items(trans, root, path, path->slots[0], 1);
571 }
572
573 /*
574 * Describes a batch of items to insert in a btree. This is used by
575 * btrfs_insert_empty_items().
576 */
577 struct btrfs_item_batch {
578 /*
579 * Pointer to an array containing the keys of the items to insert (in
580 * sorted order).
581 */
582 const struct btrfs_key *keys;
583 /* Pointer to an array containing the data size for each item to insert. */
584 const u32 *data_sizes;
585 /*
586 * The sum of data sizes for all items. The caller can compute this while
587 * setting up the data_sizes array, so it ends up being more efficient
588 * than having btrfs_insert_empty_items() or setup_item_for_insert()
589 * doing it, as it would avoid an extra loop over a potentially large
590 * array, and in the case of setup_item_for_insert(), we would be doing
591 * it while holding a write lock on a leaf and often on upper level nodes
592 * too, unnecessarily increasing the size of a critical section.
593 */
594 u32 total_data_size;
595 /* Size of the keys and data_sizes arrays (number of items in the batch). */
596 int nr;
597 };
598
599 void btrfs_setup_item_for_insert(struct btrfs_trans_handle *trans,
600 struct btrfs_root *root,
601 struct btrfs_path *path,
602 const struct btrfs_key *key,
603 u32 data_size);
604 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root *root,
605 const struct btrfs_key *key, void *data, u32 data_size);
606 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
607 struct btrfs_root *root,
608 struct btrfs_path *path,
609 const struct btrfs_item_batch *batch);
610
611 static inline int btrfs_insert_empty_item(struct btrfs_trans_handle *trans,
612 struct btrfs_root *root,
613 struct btrfs_path *path,
614 const struct btrfs_key *key,
615 u32 data_size)
616 {
617 struct btrfs_item_batch batch;
618
619 batch.keys = key;
620 batch.data_sizes = &data_size;
621 batch.total_data_size = data_size;
622 batch.nr = 1;
623
624 return btrfs_insert_empty_items(trans, root, path, &batch);
625 }
626
627 int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
628 u64 time_seq);
629
630 int btrfs_search_backwards(struct btrfs_root *root, struct btrfs_key *key,
631 struct btrfs_path *path);
632
633 int btrfs_get_next_valid_item(struct btrfs_root *root, struct btrfs_key *key,
634 struct btrfs_path *path);
635
636 /*
637 * Search in @root for a given @key, and store the slot found in @found_key.
638 *
639 * @root: The root node of the tree.
640 * @key: The key we are looking for.
641 * @found_key: Will hold the found item.
642 * @path: Holds the current slot/leaf.
643 * @iter_ret: Contains the value returned from btrfs_search_slot or
644 * btrfs_get_next_valid_item, whichever was executed last.
645 *
646 * The @iter_ret is an output variable that will contain the return value of
647 * btrfs_search_slot, if it encountered an error, or the value returned from
648 * btrfs_get_next_valid_item otherwise. That return value can be 0, if a valid
649 * slot was found, 1 if there were no more leaves, and <0 if there was an error.
650 *
651 * It's recommended to use a separate variable for iter_ret and then use it to
652 * set the function return value so there's no confusion of the 0/1/errno
653 * values stemming from btrfs_search_slot.
654 */
655 #define btrfs_for_each_slot(root, key, found_key, path, iter_ret) \
656 for (iter_ret = btrfs_search_slot(NULL, (root), (key), (path), 0, 0); \
657 (iter_ret) >= 0 && \
658 (iter_ret = btrfs_get_next_valid_item((root), (found_key), (path))) == 0; \
659 (path)->slots[0]++ \
660 )
661
662 int btrfs_next_old_item(struct btrfs_root *root, struct btrfs_path *path, u64 time_seq);
663
664 /*
665 * Search the tree again to find a leaf with greater keys.
666 *
667 * Returns 0 if it found something or 1 if there are no greater leaves.
668 * Returns < 0 on error.
669 */
670 static inline int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
671 {
672 return btrfs_next_old_leaf(root, path, 0);
673 }
674
675 static inline int btrfs_next_item(struct btrfs_root *root, struct btrfs_path *p)
676 {
677 return btrfs_next_old_item(root, p, 0);
678 }
679 int btrfs_leaf_free_space(const struct extent_buffer *leaf);
680
681 static inline int is_fstree(u64 rootid)
682 {
683 if (rootid == BTRFS_FS_TREE_OBJECTID ||
684 ((s64)rootid >= (s64)BTRFS_FIRST_FREE_OBJECTID &&
685 !btrfs_qgroup_level(rootid)))
686 return 1;
687 return 0;
688 }
689
690 static inline bool btrfs_is_data_reloc_root(const struct btrfs_root *root)
691 {
692 return root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID;
693 }
694
695 u16 btrfs_csum_type_size(u16 type);
696 int btrfs_super_csum_size(const struct btrfs_super_block *s);
697 const char *btrfs_super_csum_name(u16 csum_type);
698 const char *btrfs_super_csum_driver(u16 csum_type);
699 size_t __attribute_const__ btrfs_get_num_csums(void);
700
701 /*
702 * We use page status Private2 to indicate there is an ordered extent with
703 * unfinished IO.
704 *
705 * Rename the Private2 accessors to Ordered, to improve readability.
706 */
707 #define PageOrdered(page) PagePrivate2(page)
708 #define SetPageOrdered(page) SetPagePrivate2(page)
709 #define ClearPageOrdered(page) ClearPagePrivate2(page)
710 #define folio_test_ordered(folio) folio_test_private_2(folio)
711 #define folio_set_ordered(folio) folio_set_private_2(folio)
712 #define folio_clear_ordered(folio) folio_clear_private_2(folio)
713
714 #endif