]> git.ipfire.org Git - thirdparty/linux.git/blob - fs/btrfs/fs.h
Merge tag 'linux-watchdog-6.6-rc1' of git://www.linux-watchdog.org/linux-watchdog
[thirdparty/linux.git] / fs / btrfs / fs.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2
3 #ifndef BTRFS_FS_H
4 #define BTRFS_FS_H
5
6 #include <linux/blkdev.h>
7 #include <linux/fs.h>
8 #include <linux/btrfs_tree.h>
9 #include <linux/sizes.h>
10 #include "extent-io-tree.h"
11 #include "extent_map.h"
12 #include "async-thread.h"
13 #include "block-rsv.h"
14
15 #define BTRFS_MAX_EXTENT_SIZE SZ_128M
16
17 #define BTRFS_OLDEST_GENERATION 0ULL
18
19 #define BTRFS_EMPTY_DIR_SIZE 0
20
21 #define BTRFS_DIRTY_METADATA_THRESH SZ_32M
22
23 #define BTRFS_SUPER_INFO_OFFSET SZ_64K
24 #define BTRFS_SUPER_INFO_SIZE 4096
25 static_assert(sizeof(struct btrfs_super_block) == BTRFS_SUPER_INFO_SIZE);
26
27 /*
28 * Number of metadata items necessary for an unlink operation:
29 *
30 * 1 for the possible orphan item
31 * 1 for the dir item
32 * 1 for the dir index
33 * 1 for the inode ref
34 * 1 for the inode
35 * 1 for the parent inode
36 */
37 #define BTRFS_UNLINK_METADATA_UNITS 6
38
39 /*
40 * The reserved space at the beginning of each device. It covers the primary
41 * super block and leaves space for potential use by other tools like
42 * bootloaders or to lower potential damage of accidental overwrite.
43 */
44 #define BTRFS_DEVICE_RANGE_RESERVED (SZ_1M)
45 /*
46 * Runtime (in-memory) states of filesystem
47 */
48 enum {
49 /*
50 * Filesystem is being remounted, allow to skip some operations, like
51 * defrag
52 */
53 BTRFS_FS_STATE_REMOUNTING,
54 /* Filesystem in RO mode */
55 BTRFS_FS_STATE_RO,
56 /* Track if a transaction abort has been reported on this filesystem */
57 BTRFS_FS_STATE_TRANS_ABORTED,
58 /*
59 * Bio operations should be blocked on this filesystem because a source
60 * or target device is being destroyed as part of a device replace
61 */
62 BTRFS_FS_STATE_DEV_REPLACING,
63 /* The btrfs_fs_info created for self-tests */
64 BTRFS_FS_STATE_DUMMY_FS_INFO,
65
66 BTRFS_FS_STATE_NO_CSUMS,
67
68 /* Indicates there was an error cleaning up a log tree. */
69 BTRFS_FS_STATE_LOG_CLEANUP_ERROR,
70
71 BTRFS_FS_STATE_COUNT
72 };
73
74 enum {
75 BTRFS_FS_CLOSING_START,
76 BTRFS_FS_CLOSING_DONE,
77 BTRFS_FS_LOG_RECOVERING,
78 BTRFS_FS_OPEN,
79 BTRFS_FS_QUOTA_ENABLED,
80 BTRFS_FS_UPDATE_UUID_TREE_GEN,
81 BTRFS_FS_CREATING_FREE_SPACE_TREE,
82 BTRFS_FS_BTREE_ERR,
83 BTRFS_FS_LOG1_ERR,
84 BTRFS_FS_LOG2_ERR,
85 BTRFS_FS_QUOTA_OVERRIDE,
86 /* Used to record internally whether fs has been frozen */
87 BTRFS_FS_FROZEN,
88 /*
89 * Indicate that balance has been set up from the ioctl and is in the
90 * main phase. The fs_info::balance_ctl is initialized.
91 */
92 BTRFS_FS_BALANCE_RUNNING,
93
94 /*
95 * Indicate that relocation of a chunk has started, it's set per chunk
96 * and is toggled between chunks.
97 */
98 BTRFS_FS_RELOC_RUNNING,
99
100 /* Indicate that the cleaner thread is awake and doing something. */
101 BTRFS_FS_CLEANER_RUNNING,
102
103 /*
104 * The checksumming has an optimized version and is considered fast,
105 * so we don't need to offload checksums to workqueues.
106 */
107 BTRFS_FS_CSUM_IMPL_FAST,
108
109 /* Indicate that the discard workqueue can service discards. */
110 BTRFS_FS_DISCARD_RUNNING,
111
112 /* Indicate that we need to cleanup space cache v1 */
113 BTRFS_FS_CLEANUP_SPACE_CACHE_V1,
114
115 /* Indicate that we can't trust the free space tree for caching yet */
116 BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED,
117
118 /* Indicate whether there are any tree modification log users */
119 BTRFS_FS_TREE_MOD_LOG_USERS,
120
121 /* Indicate that we want the transaction kthread to commit right now. */
122 BTRFS_FS_COMMIT_TRANS,
123
124 /* Indicate we have half completed snapshot deletions pending. */
125 BTRFS_FS_UNFINISHED_DROPS,
126
127 /* Indicate we have to finish a zone to do next allocation. */
128 BTRFS_FS_NEED_ZONE_FINISH,
129
130 /* Indicate that we want to commit the transaction. */
131 BTRFS_FS_NEED_TRANS_COMMIT,
132
133 /* This is set when active zone tracking is needed. */
134 BTRFS_FS_ACTIVE_ZONE_TRACKING,
135
136 /*
137 * Indicate if we have some features changed, this is mostly for
138 * cleaner thread to update the sysfs interface.
139 */
140 BTRFS_FS_FEATURE_CHANGED,
141
142 #if BITS_PER_LONG == 32
143 /* Indicate if we have error/warn message printed on 32bit systems */
144 BTRFS_FS_32BIT_ERROR,
145 BTRFS_FS_32BIT_WARN,
146 #endif
147 };
148
149 /*
150 * Flags for mount options.
151 *
152 * Note: don't forget to add new options to btrfs_show_options()
153 */
154 enum {
155 BTRFS_MOUNT_NODATASUM = (1UL << 0),
156 BTRFS_MOUNT_NODATACOW = (1UL << 1),
157 BTRFS_MOUNT_NOBARRIER = (1UL << 2),
158 BTRFS_MOUNT_SSD = (1UL << 3),
159 BTRFS_MOUNT_DEGRADED = (1UL << 4),
160 BTRFS_MOUNT_COMPRESS = (1UL << 5),
161 BTRFS_MOUNT_NOTREELOG = (1UL << 6),
162 BTRFS_MOUNT_FLUSHONCOMMIT = (1UL << 7),
163 BTRFS_MOUNT_SSD_SPREAD = (1UL << 8),
164 BTRFS_MOUNT_NOSSD = (1UL << 9),
165 BTRFS_MOUNT_DISCARD_SYNC = (1UL << 10),
166 BTRFS_MOUNT_FORCE_COMPRESS = (1UL << 11),
167 BTRFS_MOUNT_SPACE_CACHE = (1UL << 12),
168 BTRFS_MOUNT_CLEAR_CACHE = (1UL << 13),
169 BTRFS_MOUNT_USER_SUBVOL_RM_ALLOWED = (1UL << 14),
170 BTRFS_MOUNT_ENOSPC_DEBUG = (1UL << 15),
171 BTRFS_MOUNT_AUTO_DEFRAG = (1UL << 16),
172 BTRFS_MOUNT_USEBACKUPROOT = (1UL << 17),
173 BTRFS_MOUNT_SKIP_BALANCE = (1UL << 18),
174 BTRFS_MOUNT_CHECK_INTEGRITY = (1UL << 19),
175 BTRFS_MOUNT_CHECK_INTEGRITY_DATA = (1UL << 20),
176 BTRFS_MOUNT_PANIC_ON_FATAL_ERROR = (1UL << 21),
177 BTRFS_MOUNT_RESCAN_UUID_TREE = (1UL << 22),
178 BTRFS_MOUNT_FRAGMENT_DATA = (1UL << 23),
179 BTRFS_MOUNT_FRAGMENT_METADATA = (1UL << 24),
180 BTRFS_MOUNT_FREE_SPACE_TREE = (1UL << 25),
181 BTRFS_MOUNT_NOLOGREPLAY = (1UL << 26),
182 BTRFS_MOUNT_REF_VERIFY = (1UL << 27),
183 BTRFS_MOUNT_DISCARD_ASYNC = (1UL << 28),
184 BTRFS_MOUNT_IGNOREBADROOTS = (1UL << 29),
185 BTRFS_MOUNT_IGNOREDATACSUMS = (1UL << 30),
186 BTRFS_MOUNT_NODISCARD = (1UL << 31),
187 };
188
189 /*
190 * Compat flags that we support. If any incompat flags are set other than the
191 * ones specified below then we will fail to mount
192 */
193 #define BTRFS_FEATURE_COMPAT_SUPP 0ULL
194 #define BTRFS_FEATURE_COMPAT_SAFE_SET 0ULL
195 #define BTRFS_FEATURE_COMPAT_SAFE_CLEAR 0ULL
196
197 #define BTRFS_FEATURE_COMPAT_RO_SUPP \
198 (BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE | \
199 BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE_VALID | \
200 BTRFS_FEATURE_COMPAT_RO_VERITY | \
201 BTRFS_FEATURE_COMPAT_RO_BLOCK_GROUP_TREE)
202
203 #define BTRFS_FEATURE_COMPAT_RO_SAFE_SET 0ULL
204 #define BTRFS_FEATURE_COMPAT_RO_SAFE_CLEAR 0ULL
205
206 #define BTRFS_FEATURE_INCOMPAT_SUPP_STABLE \
207 (BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF | \
208 BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL | \
209 BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS | \
210 BTRFS_FEATURE_INCOMPAT_BIG_METADATA | \
211 BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO | \
212 BTRFS_FEATURE_INCOMPAT_COMPRESS_ZSTD | \
213 BTRFS_FEATURE_INCOMPAT_RAID56 | \
214 BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF | \
215 BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA | \
216 BTRFS_FEATURE_INCOMPAT_NO_HOLES | \
217 BTRFS_FEATURE_INCOMPAT_METADATA_UUID | \
218 BTRFS_FEATURE_INCOMPAT_RAID1C34 | \
219 BTRFS_FEATURE_INCOMPAT_ZONED)
220
221 #ifdef CONFIG_BTRFS_DEBUG
222 /*
223 * Features under developmen like Extent tree v2 support is enabled
224 * only under CONFIG_BTRFS_DEBUG.
225 */
226 #define BTRFS_FEATURE_INCOMPAT_SUPP \
227 (BTRFS_FEATURE_INCOMPAT_SUPP_STABLE | \
228 BTRFS_FEATURE_INCOMPAT_EXTENT_TREE_V2)
229
230 #else
231
232 #define BTRFS_FEATURE_INCOMPAT_SUPP \
233 (BTRFS_FEATURE_INCOMPAT_SUPP_STABLE)
234
235 #endif
236
237 #define BTRFS_FEATURE_INCOMPAT_SAFE_SET \
238 (BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF)
239 #define BTRFS_FEATURE_INCOMPAT_SAFE_CLEAR 0ULL
240
241 #define BTRFS_DEFAULT_COMMIT_INTERVAL (30)
242 #define BTRFS_DEFAULT_MAX_INLINE (2048)
243
244 struct btrfs_dev_replace {
245 /* See #define above */
246 u64 replace_state;
247 /* Seconds since 1-Jan-1970 */
248 time64_t time_started;
249 /* Seconds since 1-Jan-1970 */
250 time64_t time_stopped;
251 atomic64_t num_write_errors;
252 atomic64_t num_uncorrectable_read_errors;
253
254 u64 cursor_left;
255 u64 committed_cursor_left;
256 u64 cursor_left_last_write_of_item;
257 u64 cursor_right;
258
259 /* See #define above */
260 u64 cont_reading_from_srcdev_mode;
261
262 int is_valid;
263 int item_needs_writeback;
264 struct btrfs_device *srcdev;
265 struct btrfs_device *tgtdev;
266
267 struct mutex lock_finishing_cancel_unmount;
268 struct rw_semaphore rwsem;
269
270 struct btrfs_scrub_progress scrub_progress;
271
272 struct percpu_counter bio_counter;
273 wait_queue_head_t replace_wait;
274 };
275
276 /*
277 * Free clusters are used to claim free space in relatively large chunks,
278 * allowing us to do less seeky writes. They are used for all metadata
279 * allocations. In ssd_spread mode they are also used for data allocations.
280 */
281 struct btrfs_free_cluster {
282 spinlock_t lock;
283 spinlock_t refill_lock;
284 struct rb_root root;
285
286 /* Largest extent in this cluster */
287 u64 max_size;
288
289 /* First extent starting offset */
290 u64 window_start;
291
292 /* We did a full search and couldn't create a cluster */
293 bool fragmented;
294
295 struct btrfs_block_group *block_group;
296 /*
297 * When a cluster is allocated from a block group, we put the cluster
298 * onto a list in the block group so that it can be freed before the
299 * block group is freed.
300 */
301 struct list_head block_group_list;
302 };
303
304 /* Discard control. */
305 /*
306 * Async discard uses multiple lists to differentiate the discard filter
307 * parameters. Index 0 is for completely free block groups where we need to
308 * ensure the entire block group is trimmed without being lossy. Indices
309 * afterwards represent monotonically decreasing discard filter sizes to
310 * prioritize what should be discarded next.
311 */
312 #define BTRFS_NR_DISCARD_LISTS 3
313 #define BTRFS_DISCARD_INDEX_UNUSED 0
314 #define BTRFS_DISCARD_INDEX_START 1
315
316 struct btrfs_discard_ctl {
317 struct workqueue_struct *discard_workers;
318 struct delayed_work work;
319 spinlock_t lock;
320 struct btrfs_block_group *block_group;
321 struct list_head discard_list[BTRFS_NR_DISCARD_LISTS];
322 u64 prev_discard;
323 u64 prev_discard_time;
324 atomic_t discardable_extents;
325 atomic64_t discardable_bytes;
326 u64 max_discard_size;
327 u64 delay_ms;
328 u32 iops_limit;
329 u32 kbps_limit;
330 u64 discard_extent_bytes;
331 u64 discard_bitmap_bytes;
332 atomic64_t discard_bytes_saved;
333 };
334
335 /*
336 * Exclusive operations (device replace, resize, device add/remove, balance)
337 */
338 enum btrfs_exclusive_operation {
339 BTRFS_EXCLOP_NONE,
340 BTRFS_EXCLOP_BALANCE_PAUSED,
341 BTRFS_EXCLOP_BALANCE,
342 BTRFS_EXCLOP_DEV_ADD,
343 BTRFS_EXCLOP_DEV_REMOVE,
344 BTRFS_EXCLOP_DEV_REPLACE,
345 BTRFS_EXCLOP_RESIZE,
346 BTRFS_EXCLOP_SWAP_ACTIVATE,
347 };
348
349 /* Store data about transaction commits, exported via sysfs. */
350 struct btrfs_commit_stats {
351 /* Total number of commits */
352 u64 commit_count;
353 /* The maximum commit duration so far in ns */
354 u64 max_commit_dur;
355 /* The last commit duration in ns */
356 u64 last_commit_dur;
357 /* The total commit duration in ns */
358 u64 total_commit_dur;
359 };
360
361 struct btrfs_fs_info {
362 u8 chunk_tree_uuid[BTRFS_UUID_SIZE];
363 unsigned long flags;
364 struct btrfs_root *tree_root;
365 struct btrfs_root *chunk_root;
366 struct btrfs_root *dev_root;
367 struct btrfs_root *fs_root;
368 struct btrfs_root *quota_root;
369 struct btrfs_root *uuid_root;
370 struct btrfs_root *data_reloc_root;
371 struct btrfs_root *block_group_root;
372
373 /* The log root tree is a directory of all the other log roots */
374 struct btrfs_root *log_root_tree;
375
376 /* The tree that holds the global roots (csum, extent, etc) */
377 rwlock_t global_root_lock;
378 struct rb_root global_root_tree;
379
380 spinlock_t fs_roots_radix_lock;
381 struct radix_tree_root fs_roots_radix;
382
383 /* Block group cache stuff */
384 rwlock_t block_group_cache_lock;
385 struct rb_root_cached block_group_cache_tree;
386
387 /* Keep track of unallocated space */
388 atomic64_t free_chunk_space;
389
390 /* Track ranges which are used by log trees blocks/logged data extents */
391 struct extent_io_tree excluded_extents;
392
393 /* logical->physical extent mapping */
394 struct extent_map_tree mapping_tree;
395
396 /*
397 * Block reservation for extent, checksum, root tree and delayed dir
398 * index item.
399 */
400 struct btrfs_block_rsv global_block_rsv;
401 /* Block reservation for metadata operations */
402 struct btrfs_block_rsv trans_block_rsv;
403 /* Block reservation for chunk tree */
404 struct btrfs_block_rsv chunk_block_rsv;
405 /* Block reservation for delayed operations */
406 struct btrfs_block_rsv delayed_block_rsv;
407 /* Block reservation for delayed refs */
408 struct btrfs_block_rsv delayed_refs_rsv;
409
410 struct btrfs_block_rsv empty_block_rsv;
411
412 u64 generation;
413 u64 last_trans_committed;
414 /*
415 * Generation of the last transaction used for block group relocation
416 * since the filesystem was last mounted (or 0 if none happened yet).
417 * Must be written and read while holding btrfs_fs_info::commit_root_sem.
418 */
419 u64 last_reloc_trans;
420
421 /*
422 * This is updated to the current trans every time a full commit is
423 * required instead of the faster short fsync log commits
424 */
425 u64 last_trans_log_full_commit;
426 unsigned long mount_opt;
427
428 unsigned long compress_type:4;
429 unsigned int compress_level;
430 u32 commit_interval;
431 /*
432 * It is a suggestive number, the read side is safe even it gets a
433 * wrong number because we will write out the data into a regular
434 * extent. The write side(mount/remount) is under ->s_umount lock,
435 * so it is also safe.
436 */
437 u64 max_inline;
438
439 struct btrfs_transaction *running_transaction;
440 wait_queue_head_t transaction_throttle;
441 wait_queue_head_t transaction_wait;
442 wait_queue_head_t transaction_blocked_wait;
443 wait_queue_head_t async_submit_wait;
444
445 /*
446 * Used to protect the incompat_flags, compat_flags, compat_ro_flags
447 * when they are updated.
448 *
449 * Because we do not clear the flags for ever, so we needn't use
450 * the lock on the read side.
451 *
452 * We also needn't use the lock when we mount the fs, because
453 * there is no other task which will update the flag.
454 */
455 spinlock_t super_lock;
456 struct btrfs_super_block *super_copy;
457 struct btrfs_super_block *super_for_commit;
458 struct super_block *sb;
459 struct inode *btree_inode;
460 struct mutex tree_log_mutex;
461 struct mutex transaction_kthread_mutex;
462 struct mutex cleaner_mutex;
463 struct mutex chunk_mutex;
464
465 /*
466 * This is taken to make sure we don't set block groups ro after the
467 * free space cache has been allocated on them.
468 */
469 struct mutex ro_block_group_mutex;
470
471 /*
472 * This is used during read/modify/write to make sure no two ios are
473 * trying to mod the same stripe at the same time.
474 */
475 struct btrfs_stripe_hash_table *stripe_hash_table;
476
477 /*
478 * This protects the ordered operations list only while we are
479 * processing all of the entries on it. This way we make sure the
480 * commit code doesn't find the list temporarily empty because another
481 * function happens to be doing non-waiting preflush before jumping
482 * into the main commit.
483 */
484 struct mutex ordered_operations_mutex;
485
486 struct rw_semaphore commit_root_sem;
487
488 struct rw_semaphore cleanup_work_sem;
489
490 struct rw_semaphore subvol_sem;
491
492 spinlock_t trans_lock;
493 /*
494 * The reloc mutex goes with the trans lock, it is taken during commit
495 * to protect us from the relocation code.
496 */
497 struct mutex reloc_mutex;
498
499 struct list_head trans_list;
500 struct list_head dead_roots;
501 struct list_head caching_block_groups;
502
503 spinlock_t delayed_iput_lock;
504 struct list_head delayed_iputs;
505 atomic_t nr_delayed_iputs;
506 wait_queue_head_t delayed_iputs_wait;
507
508 atomic64_t tree_mod_seq;
509
510 /* This protects tree_mod_log and tree_mod_seq_list */
511 rwlock_t tree_mod_log_lock;
512 struct rb_root tree_mod_log;
513 struct list_head tree_mod_seq_list;
514
515 atomic_t async_delalloc_pages;
516
517 /* This is used to protect the following list -- ordered_roots. */
518 spinlock_t ordered_root_lock;
519
520 /*
521 * All fs/file tree roots in which there are data=ordered extents
522 * pending writeback are added into this list.
523 *
524 * These can span multiple transactions and basically include every
525 * dirty data page that isn't from nodatacow.
526 */
527 struct list_head ordered_roots;
528
529 struct mutex delalloc_root_mutex;
530 spinlock_t delalloc_root_lock;
531 /* All fs/file tree roots that have delalloc inodes. */
532 struct list_head delalloc_roots;
533
534 /*
535 * There is a pool of worker threads for checksumming during writes and
536 * a pool for checksumming after reads. This is because readers can
537 * run with FS locks held, and the writers may be waiting for those
538 * locks. We don't want ordering in the pending list to cause
539 * deadlocks, and so the two are serviced separately.
540 *
541 * A third pool does submit_bio to avoid deadlocking with the other two.
542 */
543 struct btrfs_workqueue *workers;
544 struct btrfs_workqueue *delalloc_workers;
545 struct btrfs_workqueue *flush_workers;
546 struct workqueue_struct *endio_workers;
547 struct workqueue_struct *endio_meta_workers;
548 struct workqueue_struct *rmw_workers;
549 struct workqueue_struct *compressed_write_workers;
550 struct btrfs_workqueue *endio_write_workers;
551 struct btrfs_workqueue *endio_freespace_worker;
552 struct btrfs_workqueue *caching_workers;
553
554 /*
555 * Fixup workers take dirty pages that didn't properly go through the
556 * cow mechanism and make them safe to write. It happens for the
557 * sys_munmap function call path.
558 */
559 struct btrfs_workqueue *fixup_workers;
560 struct btrfs_workqueue *delayed_workers;
561
562 struct task_struct *transaction_kthread;
563 struct task_struct *cleaner_kthread;
564 u32 thread_pool_size;
565
566 struct kobject *space_info_kobj;
567 struct kobject *qgroups_kobj;
568 struct kobject *discard_kobj;
569
570 /* Used to keep from writing metadata until there is a nice batch */
571 struct percpu_counter dirty_metadata_bytes;
572 struct percpu_counter delalloc_bytes;
573 struct percpu_counter ordered_bytes;
574 s32 dirty_metadata_batch;
575 s32 delalloc_batch;
576
577 /* Protected by 'trans_lock'. */
578 struct list_head dirty_cowonly_roots;
579
580 struct btrfs_fs_devices *fs_devices;
581
582 /*
583 * The space_info list is effectively read only after initial setup.
584 * It is populated at mount time and cleaned up after all block groups
585 * are removed. RCU is used to protect it.
586 */
587 struct list_head space_info;
588
589 struct btrfs_space_info *data_sinfo;
590
591 struct reloc_control *reloc_ctl;
592
593 /* data_alloc_cluster is only used in ssd_spread mode */
594 struct btrfs_free_cluster data_alloc_cluster;
595
596 /* All metadata allocations go through this cluster. */
597 struct btrfs_free_cluster meta_alloc_cluster;
598
599 /* Auto defrag inodes go here. */
600 spinlock_t defrag_inodes_lock;
601 struct rb_root defrag_inodes;
602 atomic_t defrag_running;
603
604 /* Used to protect avail_{data, metadata, system}_alloc_bits */
605 seqlock_t profiles_lock;
606 /*
607 * These three are in extended format (availability of single chunks is
608 * denoted by BTRFS_AVAIL_ALLOC_BIT_SINGLE bit, other types are denoted
609 * by corresponding BTRFS_BLOCK_GROUP_* bits)
610 */
611 u64 avail_data_alloc_bits;
612 u64 avail_metadata_alloc_bits;
613 u64 avail_system_alloc_bits;
614
615 /* Balance state */
616 spinlock_t balance_lock;
617 struct mutex balance_mutex;
618 atomic_t balance_pause_req;
619 atomic_t balance_cancel_req;
620 struct btrfs_balance_control *balance_ctl;
621 wait_queue_head_t balance_wait_q;
622
623 /* Cancellation requests for chunk relocation */
624 atomic_t reloc_cancel_req;
625
626 u32 data_chunk_allocations;
627 u32 metadata_ratio;
628
629 void *bdev_holder;
630
631 /* Private scrub information */
632 struct mutex scrub_lock;
633 atomic_t scrubs_running;
634 atomic_t scrub_pause_req;
635 atomic_t scrubs_paused;
636 atomic_t scrub_cancel_req;
637 wait_queue_head_t scrub_pause_wait;
638 /*
639 * The worker pointers are NULL iff the refcount is 0, ie. scrub is not
640 * running.
641 */
642 refcount_t scrub_workers_refcnt;
643 struct workqueue_struct *scrub_workers;
644 struct btrfs_subpage_info *subpage_info;
645
646 struct btrfs_discard_ctl discard_ctl;
647
648 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
649 u32 check_integrity_print_mask;
650 #endif
651 /* Is qgroup tracking in a consistent state? */
652 u64 qgroup_flags;
653
654 /* Holds configuration and tracking. Protected by qgroup_lock. */
655 struct rb_root qgroup_tree;
656 spinlock_t qgroup_lock;
657
658 /*
659 * Used to avoid frequently calling ulist_alloc()/ulist_free()
660 * when doing qgroup accounting, it must be protected by qgroup_lock.
661 */
662 struct ulist *qgroup_ulist;
663
664 /*
665 * Protect user change for quota operations. If a transaction is needed,
666 * it must be started before locking this lock.
667 */
668 struct mutex qgroup_ioctl_lock;
669
670 /* List of dirty qgroups to be written at next commit. */
671 struct list_head dirty_qgroups;
672
673 /* Used by qgroup for an efficient tree traversal. */
674 u64 qgroup_seq;
675
676 /* Qgroup rescan items. */
677 /* Protects the progress item */
678 struct mutex qgroup_rescan_lock;
679 struct btrfs_key qgroup_rescan_progress;
680 struct btrfs_workqueue *qgroup_rescan_workers;
681 struct completion qgroup_rescan_completion;
682 struct btrfs_work qgroup_rescan_work;
683 /* Protected by qgroup_rescan_lock */
684 bool qgroup_rescan_running;
685 u8 qgroup_drop_subtree_thres;
686
687 /*
688 * If this is not 0, then it indicates a serious filesystem error has
689 * happened and it contains that error (negative errno value).
690 */
691 int fs_error;
692
693 /* Filesystem state */
694 unsigned long fs_state;
695
696 struct btrfs_delayed_root *delayed_root;
697
698 /* Extent buffer radix tree */
699 spinlock_t buffer_lock;
700 /* Entries are eb->start / sectorsize */
701 struct radix_tree_root buffer_radix;
702
703 /* Next backup root to be overwritten */
704 int backup_root_index;
705
706 /* Device replace state */
707 struct btrfs_dev_replace dev_replace;
708
709 struct semaphore uuid_tree_rescan_sem;
710
711 /* Used to reclaim the metadata space in the background. */
712 struct work_struct async_reclaim_work;
713 struct work_struct async_data_reclaim_work;
714 struct work_struct preempt_reclaim_work;
715
716 /* Reclaim partially filled block groups in the background */
717 struct work_struct reclaim_bgs_work;
718 struct list_head reclaim_bgs;
719 int bg_reclaim_threshold;
720
721 spinlock_t unused_bgs_lock;
722 struct list_head unused_bgs;
723 struct mutex unused_bg_unpin_mutex;
724 /* Protect block groups that are going to be deleted */
725 struct mutex reclaim_bgs_lock;
726
727 /* Cached block sizes */
728 u32 nodesize;
729 u32 sectorsize;
730 /* ilog2 of sectorsize, use to avoid 64bit division */
731 u32 sectorsize_bits;
732 u32 csum_size;
733 u32 csums_per_leaf;
734 u32 stripesize;
735
736 /*
737 * Maximum size of an extent. BTRFS_MAX_EXTENT_SIZE on regular
738 * filesystem, on zoned it depends on the device constraints.
739 */
740 u64 max_extent_size;
741
742 /* Block groups and devices containing active swapfiles. */
743 spinlock_t swapfile_pins_lock;
744 struct rb_root swapfile_pins;
745
746 struct crypto_shash *csum_shash;
747
748 /* Type of exclusive operation running, protected by super_lock */
749 enum btrfs_exclusive_operation exclusive_operation;
750
751 /*
752 * Zone size > 0 when in ZONED mode, otherwise it's used for a check
753 * if the mode is enabled
754 */
755 u64 zone_size;
756
757 /* Constraints for ZONE_APPEND commands: */
758 struct queue_limits limits;
759 u64 max_zone_append_size;
760
761 struct mutex zoned_meta_io_lock;
762 spinlock_t treelog_bg_lock;
763 u64 treelog_bg;
764
765 /*
766 * Start of the dedicated data relocation block group, protected by
767 * relocation_bg_lock.
768 */
769 spinlock_t relocation_bg_lock;
770 u64 data_reloc_bg;
771 struct mutex zoned_data_reloc_io_lock;
772
773 struct btrfs_block_group *active_meta_bg;
774 struct btrfs_block_group *active_system_bg;
775
776 u64 nr_global_roots;
777
778 spinlock_t zone_active_bgs_lock;
779 struct list_head zone_active_bgs;
780
781 /* Updates are not protected by any lock */
782 struct btrfs_commit_stats commit_stats;
783
784 /*
785 * Last generation where we dropped a non-relocation root.
786 * Use btrfs_set_last_root_drop_gen() and btrfs_get_last_root_drop_gen()
787 * to change it and to read it, respectively.
788 */
789 u64 last_root_drop_gen;
790
791 /*
792 * Annotations for transaction events (structures are empty when
793 * compiled without lockdep).
794 */
795 struct lockdep_map btrfs_trans_num_writers_map;
796 struct lockdep_map btrfs_trans_num_extwriters_map;
797 struct lockdep_map btrfs_state_change_map[4];
798 struct lockdep_map btrfs_trans_pending_ordered_map;
799 struct lockdep_map btrfs_ordered_extent_map;
800
801 #ifdef CONFIG_BTRFS_FS_REF_VERIFY
802 spinlock_t ref_verify_lock;
803 struct rb_root block_tree;
804 #endif
805
806 #ifdef CONFIG_BTRFS_DEBUG
807 struct kobject *debug_kobj;
808 struct list_head allocated_roots;
809
810 spinlock_t eb_leak_lock;
811 struct list_head allocated_ebs;
812 #endif
813 };
814
815 static inline void btrfs_set_last_root_drop_gen(struct btrfs_fs_info *fs_info,
816 u64 gen)
817 {
818 WRITE_ONCE(fs_info->last_root_drop_gen, gen);
819 }
820
821 static inline u64 btrfs_get_last_root_drop_gen(const struct btrfs_fs_info *fs_info)
822 {
823 return READ_ONCE(fs_info->last_root_drop_gen);
824 }
825
826 /*
827 * Take the number of bytes to be checksummed and figure out how many leaves
828 * it would require to store the csums for that many bytes.
829 */
830 static inline u64 btrfs_csum_bytes_to_leaves(
831 const struct btrfs_fs_info *fs_info, u64 csum_bytes)
832 {
833 const u64 num_csums = csum_bytes >> fs_info->sectorsize_bits;
834
835 return DIV_ROUND_UP_ULL(num_csums, fs_info->csums_per_leaf);
836 }
837
838 /*
839 * Use this if we would be adding new items, as we could split nodes as we cow
840 * down the tree.
841 */
842 static inline u64 btrfs_calc_insert_metadata_size(const struct btrfs_fs_info *fs_info,
843 unsigned num_items)
844 {
845 return (u64)fs_info->nodesize * BTRFS_MAX_LEVEL * 2 * num_items;
846 }
847
848 /*
849 * Doing a truncate or a modification won't result in new nodes or leaves, just
850 * what we need for COW.
851 */
852 static inline u64 btrfs_calc_metadata_size(const struct btrfs_fs_info *fs_info,
853 unsigned num_items)
854 {
855 return (u64)fs_info->nodesize * BTRFS_MAX_LEVEL * num_items;
856 }
857
858 #define BTRFS_MAX_EXTENT_ITEM_SIZE(r) ((BTRFS_LEAF_DATA_SIZE(r->fs_info) >> 4) - \
859 sizeof(struct btrfs_item))
860
861 static inline bool btrfs_is_zoned(const struct btrfs_fs_info *fs_info)
862 {
863 return IS_ENABLED(CONFIG_BLK_DEV_ZONED) && fs_info->zone_size > 0;
864 }
865
866 /*
867 * Count how many fs_info->max_extent_size cover the @size
868 */
869 static inline u32 count_max_extents(struct btrfs_fs_info *fs_info, u64 size)
870 {
871 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
872 if (!fs_info)
873 return div_u64(size + BTRFS_MAX_EXTENT_SIZE - 1, BTRFS_MAX_EXTENT_SIZE);
874 #endif
875
876 return div_u64(size + fs_info->max_extent_size - 1, fs_info->max_extent_size);
877 }
878
879 bool btrfs_exclop_start(struct btrfs_fs_info *fs_info,
880 enum btrfs_exclusive_operation type);
881 bool btrfs_exclop_start_try_lock(struct btrfs_fs_info *fs_info,
882 enum btrfs_exclusive_operation type);
883 void btrfs_exclop_start_unlock(struct btrfs_fs_info *fs_info);
884 void btrfs_exclop_finish(struct btrfs_fs_info *fs_info);
885 void btrfs_exclop_balance(struct btrfs_fs_info *fs_info,
886 enum btrfs_exclusive_operation op);
887
888 /* Compatibility and incompatibility defines */
889 void __btrfs_set_fs_incompat(struct btrfs_fs_info *fs_info, u64 flag,
890 const char *name);
891 void __btrfs_clear_fs_incompat(struct btrfs_fs_info *fs_info, u64 flag,
892 const char *name);
893 void __btrfs_set_fs_compat_ro(struct btrfs_fs_info *fs_info, u64 flag,
894 const char *name);
895 void __btrfs_clear_fs_compat_ro(struct btrfs_fs_info *fs_info, u64 flag,
896 const char *name);
897
898 #define __btrfs_fs_incompat(fs_info, flags) \
899 (!!(btrfs_super_incompat_flags((fs_info)->super_copy) & (flags)))
900
901 #define __btrfs_fs_compat_ro(fs_info, flags) \
902 (!!(btrfs_super_compat_ro_flags((fs_info)->super_copy) & (flags)))
903
904 #define btrfs_set_fs_incompat(__fs_info, opt) \
905 __btrfs_set_fs_incompat((__fs_info), BTRFS_FEATURE_INCOMPAT_##opt, #opt)
906
907 #define btrfs_clear_fs_incompat(__fs_info, opt) \
908 __btrfs_clear_fs_incompat((__fs_info), BTRFS_FEATURE_INCOMPAT_##opt, #opt)
909
910 #define btrfs_fs_incompat(fs_info, opt) \
911 __btrfs_fs_incompat((fs_info), BTRFS_FEATURE_INCOMPAT_##opt)
912
913 #define btrfs_set_fs_compat_ro(__fs_info, opt) \
914 __btrfs_set_fs_compat_ro((__fs_info), BTRFS_FEATURE_COMPAT_RO_##opt, #opt)
915
916 #define btrfs_clear_fs_compat_ro(__fs_info, opt) \
917 __btrfs_clear_fs_compat_ro((__fs_info), BTRFS_FEATURE_COMPAT_RO_##opt, #opt)
918
919 #define btrfs_fs_compat_ro(fs_info, opt) \
920 __btrfs_fs_compat_ro((fs_info), BTRFS_FEATURE_COMPAT_RO_##opt)
921
922 #define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt)
923 #define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt)
924 #define btrfs_raw_test_opt(o, opt) ((o) & BTRFS_MOUNT_##opt)
925 #define btrfs_test_opt(fs_info, opt) ((fs_info)->mount_opt & \
926 BTRFS_MOUNT_##opt)
927
928 #define btrfs_set_and_info(fs_info, opt, fmt, args...) \
929 do { \
930 if (!btrfs_test_opt(fs_info, opt)) \
931 btrfs_info(fs_info, fmt, ##args); \
932 btrfs_set_opt(fs_info->mount_opt, opt); \
933 } while (0)
934
935 #define btrfs_clear_and_info(fs_info, opt, fmt, args...) \
936 do { \
937 if (btrfs_test_opt(fs_info, opt)) \
938 btrfs_info(fs_info, fmt, ##args); \
939 btrfs_clear_opt(fs_info->mount_opt, opt); \
940 } while (0)
941
942 static inline int btrfs_fs_closing(struct btrfs_fs_info *fs_info)
943 {
944 /* Do it this way so we only ever do one test_bit in the normal case. */
945 if (test_bit(BTRFS_FS_CLOSING_START, &fs_info->flags)) {
946 if (test_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags))
947 return 2;
948 return 1;
949 }
950 return 0;
951 }
952
953 /*
954 * If we remount the fs to be R/O or umount the fs, the cleaner needn't do
955 * anything except sleeping. This function is used to check the status of
956 * the fs.
957 * We check for BTRFS_FS_STATE_RO to avoid races with a concurrent remount,
958 * since setting and checking for SB_RDONLY in the superblock's flags is not
959 * atomic.
960 */
961 static inline int btrfs_need_cleaner_sleep(struct btrfs_fs_info *fs_info)
962 {
963 return test_bit(BTRFS_FS_STATE_RO, &fs_info->fs_state) ||
964 btrfs_fs_closing(fs_info);
965 }
966
967 static inline void btrfs_wake_unfinished_drop(struct btrfs_fs_info *fs_info)
968 {
969 clear_and_wake_up_bit(BTRFS_FS_UNFINISHED_DROPS, &fs_info->flags);
970 }
971
972 #define BTRFS_FS_ERROR(fs_info) (READ_ONCE((fs_info)->fs_error))
973
974 #define BTRFS_FS_LOG_CLEANUP_ERROR(fs_info) \
975 (unlikely(test_bit(BTRFS_FS_STATE_LOG_CLEANUP_ERROR, \
976 &(fs_info)->fs_state)))
977
978 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
979
980 #define EXPORT_FOR_TESTS
981
982 static inline int btrfs_is_testing(struct btrfs_fs_info *fs_info)
983 {
984 return test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state);
985 }
986
987 void btrfs_test_destroy_inode(struct inode *inode);
988
989 #else
990
991 #define EXPORT_FOR_TESTS static
992
993 static inline int btrfs_is_testing(struct btrfs_fs_info *fs_info)
994 {
995 return 0;
996 }
997 #endif
998
999 #endif