1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2007 Oracle. All rights reserved.
6 #include <linux/sched.h>
8 #include <linux/slab.h>
9 #include <linux/buffer_head.h>
10 #include <linux/blkdev.h>
11 #include <linux/ratelimit.h>
12 #include <linux/kthread.h>
13 #include <linux/raid/pq.h>
14 #include <linux/semaphore.h>
15 #include <linux/uuid.h>
16 #include <linux/list_sort.h>
18 #include "extent_map.h"
20 #include "transaction.h"
21 #include "print-tree.h"
24 #include "async-thread.h"
25 #include "check-integrity.h"
26 #include "rcu-string.h"
28 #include "dev-replace.h"
30 #include "tree-checker.h"
31 #include "space-info.h"
33 const struct btrfs_raid_attr btrfs_raid_array
[BTRFS_NR_RAID_TYPES
] = {
34 [BTRFS_RAID_RAID10
] = {
37 .devs_max
= 0, /* 0 == as many as possible */
39 .tolerated_failures
= 1,
43 .raid_name
= "raid10",
44 .bg_flag
= BTRFS_BLOCK_GROUP_RAID10
,
45 .mindev_error
= BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET
,
47 [BTRFS_RAID_RAID1
] = {
52 .tolerated_failures
= 1,
57 .bg_flag
= BTRFS_BLOCK_GROUP_RAID1
,
58 .mindev_error
= BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET
,
65 .tolerated_failures
= 0,
70 .bg_flag
= BTRFS_BLOCK_GROUP_DUP
,
73 [BTRFS_RAID_RAID0
] = {
78 .tolerated_failures
= 0,
83 .bg_flag
= BTRFS_BLOCK_GROUP_RAID0
,
86 [BTRFS_RAID_SINGLE
] = {
91 .tolerated_failures
= 0,
95 .raid_name
= "single",
99 [BTRFS_RAID_RAID5
] = {
104 .tolerated_failures
= 1,
108 .raid_name
= "raid5",
109 .bg_flag
= BTRFS_BLOCK_GROUP_RAID5
,
110 .mindev_error
= BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET
,
112 [BTRFS_RAID_RAID6
] = {
117 .tolerated_failures
= 2,
121 .raid_name
= "raid6",
122 .bg_flag
= BTRFS_BLOCK_GROUP_RAID6
,
123 .mindev_error
= BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET
,
127 const char *btrfs_bg_type_to_raid_name(u64 flags
)
129 const int index
= btrfs_bg_flags_to_raid_index(flags
);
131 if (index
>= BTRFS_NR_RAID_TYPES
)
134 return btrfs_raid_array
[index
].raid_name
;
138 * Fill @buf with textual description of @bg_flags, no more than @size_buf
139 * bytes including terminating null byte.
141 void btrfs_describe_block_groups(u64 bg_flags
, char *buf
, u32 size_buf
)
146 u64 flags
= bg_flags
;
147 u32 size_bp
= size_buf
;
154 #define DESCRIBE_FLAG(flag, desc) \
156 if (flags & (flag)) { \
157 ret = snprintf(bp, size_bp, "%s|", (desc)); \
158 if (ret < 0 || ret >= size_bp) \
166 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_DATA
, "data");
167 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_SYSTEM
, "system");
168 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_METADATA
, "metadata");
170 DESCRIBE_FLAG(BTRFS_AVAIL_ALLOC_BIT_SINGLE
, "single");
171 for (i
= 0; i
< BTRFS_NR_RAID_TYPES
; i
++)
172 DESCRIBE_FLAG(btrfs_raid_array
[i
].bg_flag
,
173 btrfs_raid_array
[i
].raid_name
);
177 ret
= snprintf(bp
, size_bp
, "0x%llx|", flags
);
181 if (size_bp
< size_buf
)
182 buf
[size_buf
- size_bp
- 1] = '\0'; /* remove last | */
185 * The text is trimmed, it's up to the caller to provide sufficiently
191 static int init_first_rw_device(struct btrfs_trans_handle
*trans
);
192 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info
*fs_info
);
193 static void __btrfs_reset_dev_stats(struct btrfs_device
*dev
);
194 static void btrfs_dev_stat_print_on_error(struct btrfs_device
*dev
);
195 static void btrfs_dev_stat_print_on_load(struct btrfs_device
*device
);
196 static int __btrfs_map_block(struct btrfs_fs_info
*fs_info
,
197 enum btrfs_map_op op
,
198 u64 logical
, u64
*length
,
199 struct btrfs_bio
**bbio_ret
,
200 int mirror_num
, int need_raid_map
);
206 * There are several mutexes that protect manipulation of devices and low-level
207 * structures like chunks but not block groups, extents or files
209 * uuid_mutex (global lock)
210 * ------------------------
211 * protects the fs_uuids list that tracks all per-fs fs_devices, resulting from
212 * the SCAN_DEV ioctl registration or from mount either implicitly (the first
213 * device) or requested by the device= mount option
215 * the mutex can be very coarse and can cover long-running operations
217 * protects: updates to fs_devices counters like missing devices, rw devices,
218 * seeding, structure cloning, opening/closing devices at mount/umount time
220 * global::fs_devs - add, remove, updates to the global list
222 * does not protect: manipulation of the fs_devices::devices list!
224 * btrfs_device::name - renames (write side), read is RCU
226 * fs_devices::device_list_mutex (per-fs, with RCU)
227 * ------------------------------------------------
228 * protects updates to fs_devices::devices, ie. adding and deleting
230 * simple list traversal with read-only actions can be done with RCU protection
232 * may be used to exclude some operations from running concurrently without any
233 * modifications to the list (see write_all_supers)
237 * protects balance structures (status, state) and context accessed from
238 * several places (internally, ioctl)
242 * protects chunks, adding or removing during allocation, trim or when a new
243 * device is added/removed. Additionally it also protects post_commit_list of
244 * individual devices, since they can be added to the transaction's
245 * post_commit_list only with chunk_mutex held.
249 * a big lock that is held by the cleaner thread and prevents running subvolume
250 * cleaning together with relocation or delayed iputs
263 * Exclusive operations, BTRFS_FS_EXCL_OP
264 * ======================================
266 * Maintains the exclusivity of the following operations that apply to the
267 * whole filesystem and cannot run in parallel.
272 * - Device replace (*)
275 * The device operations (as above) can be in one of the following states:
281 * Only device operations marked with (*) can go into the Paused state for the
284 * - ioctl (only Balance can be Paused through ioctl)
285 * - filesystem remounted as read-only
286 * - filesystem unmounted and mounted as read-only
287 * - system power-cycle and filesystem mounted as read-only
288 * - filesystem or device errors leading to forced read-only
290 * BTRFS_FS_EXCL_OP flag is set and cleared using atomic operations.
291 * During the course of Paused state, the BTRFS_FS_EXCL_OP remains set.
292 * A device operation in Paused or Running state can be canceled or resumed
293 * either by ioctl (Balance only) or when remounted as read-write.
294 * BTRFS_FS_EXCL_OP flag is cleared when the device operation is canceled or
298 DEFINE_MUTEX(uuid_mutex
);
299 static LIST_HEAD(fs_uuids
);
300 struct list_head
*btrfs_get_fs_uuids(void)
306 * alloc_fs_devices - allocate struct btrfs_fs_devices
307 * @fsid: if not NULL, copy the UUID to fs_devices::fsid
308 * @metadata_fsid: if not NULL, copy the UUID to fs_devices::metadata_fsid
310 * Return a pointer to a new struct btrfs_fs_devices on success, or ERR_PTR().
311 * The returned struct is not linked onto any lists and can be destroyed with
312 * kfree() right away.
314 static struct btrfs_fs_devices
*alloc_fs_devices(const u8
*fsid
,
315 const u8
*metadata_fsid
)
317 struct btrfs_fs_devices
*fs_devs
;
319 fs_devs
= kzalloc(sizeof(*fs_devs
), GFP_KERNEL
);
321 return ERR_PTR(-ENOMEM
);
323 mutex_init(&fs_devs
->device_list_mutex
);
325 INIT_LIST_HEAD(&fs_devs
->devices
);
326 INIT_LIST_HEAD(&fs_devs
->alloc_list
);
327 INIT_LIST_HEAD(&fs_devs
->fs_list
);
329 memcpy(fs_devs
->fsid
, fsid
, BTRFS_FSID_SIZE
);
332 memcpy(fs_devs
->metadata_uuid
, metadata_fsid
, BTRFS_FSID_SIZE
);
334 memcpy(fs_devs
->metadata_uuid
, fsid
, BTRFS_FSID_SIZE
);
339 void btrfs_free_device(struct btrfs_device
*device
)
341 WARN_ON(!list_empty(&device
->post_commit_list
));
342 rcu_string_free(device
->name
);
343 extent_io_tree_release(&device
->alloc_state
);
344 bio_put(device
->flush_bio
);
348 static void free_fs_devices(struct btrfs_fs_devices
*fs_devices
)
350 struct btrfs_device
*device
;
351 WARN_ON(fs_devices
->opened
);
352 while (!list_empty(&fs_devices
->devices
)) {
353 device
= list_entry(fs_devices
->devices
.next
,
354 struct btrfs_device
, dev_list
);
355 list_del(&device
->dev_list
);
356 btrfs_free_device(device
);
361 static void btrfs_kobject_uevent(struct block_device
*bdev
,
362 enum kobject_action action
)
366 ret
= kobject_uevent(&disk_to_dev(bdev
->bd_disk
)->kobj
, action
);
368 pr_warn("BTRFS: Sending event '%d' to kobject: '%s' (%p): failed\n",
370 kobject_name(&disk_to_dev(bdev
->bd_disk
)->kobj
),
371 &disk_to_dev(bdev
->bd_disk
)->kobj
);
374 void __exit
btrfs_cleanup_fs_uuids(void)
376 struct btrfs_fs_devices
*fs_devices
;
378 while (!list_empty(&fs_uuids
)) {
379 fs_devices
= list_entry(fs_uuids
.next
,
380 struct btrfs_fs_devices
, fs_list
);
381 list_del(&fs_devices
->fs_list
);
382 free_fs_devices(fs_devices
);
387 * Returns a pointer to a new btrfs_device on success; ERR_PTR() on error.
388 * Returned struct is not linked onto any lists and must be destroyed using
391 static struct btrfs_device
*__alloc_device(void)
393 struct btrfs_device
*dev
;
395 dev
= kzalloc(sizeof(*dev
), GFP_KERNEL
);
397 return ERR_PTR(-ENOMEM
);
400 * Preallocate a bio that's always going to be used for flushing device
401 * barriers and matches the device lifespan
403 dev
->flush_bio
= bio_alloc_bioset(GFP_KERNEL
, 0, NULL
);
404 if (!dev
->flush_bio
) {
406 return ERR_PTR(-ENOMEM
);
409 INIT_LIST_HEAD(&dev
->dev_list
);
410 INIT_LIST_HEAD(&dev
->dev_alloc_list
);
411 INIT_LIST_HEAD(&dev
->post_commit_list
);
413 spin_lock_init(&dev
->io_lock
);
415 atomic_set(&dev
->reada_in_flight
, 0);
416 atomic_set(&dev
->dev_stats_ccnt
, 0);
417 btrfs_device_data_ordered_init(dev
);
418 INIT_RADIX_TREE(&dev
->reada_zones
, GFP_NOFS
& ~__GFP_DIRECT_RECLAIM
);
419 INIT_RADIX_TREE(&dev
->reada_extents
, GFP_NOFS
& ~__GFP_DIRECT_RECLAIM
);
420 extent_io_tree_init(NULL
, &dev
->alloc_state
, 0, NULL
);
425 static noinline
struct btrfs_fs_devices
*find_fsid(
426 const u8
*fsid
, const u8
*metadata_fsid
)
428 struct btrfs_fs_devices
*fs_devices
;
434 * Handle scanned device having completed its fsid change but
435 * belonging to a fs_devices that was created by first scanning
436 * a device which didn't have its fsid/metadata_uuid changed
437 * at all and the CHANGING_FSID_V2 flag set.
439 list_for_each_entry(fs_devices
, &fs_uuids
, fs_list
) {
440 if (fs_devices
->fsid_change
&&
441 memcmp(metadata_fsid
, fs_devices
->fsid
,
442 BTRFS_FSID_SIZE
) == 0 &&
443 memcmp(fs_devices
->fsid
, fs_devices
->metadata_uuid
,
444 BTRFS_FSID_SIZE
) == 0) {
449 * Handle scanned device having completed its fsid change but
450 * belonging to a fs_devices that was created by a device that
451 * has an outdated pair of fsid/metadata_uuid and
452 * CHANGING_FSID_V2 flag set.
454 list_for_each_entry(fs_devices
, &fs_uuids
, fs_list
) {
455 if (fs_devices
->fsid_change
&&
456 memcmp(fs_devices
->metadata_uuid
,
457 fs_devices
->fsid
, BTRFS_FSID_SIZE
) != 0 &&
458 memcmp(metadata_fsid
, fs_devices
->metadata_uuid
,
459 BTRFS_FSID_SIZE
) == 0) {
465 /* Handle non-split brain cases */
466 list_for_each_entry(fs_devices
, &fs_uuids
, fs_list
) {
468 if (memcmp(fsid
, fs_devices
->fsid
, BTRFS_FSID_SIZE
) == 0
469 && memcmp(metadata_fsid
, fs_devices
->metadata_uuid
,
470 BTRFS_FSID_SIZE
) == 0)
473 if (memcmp(fsid
, fs_devices
->fsid
, BTRFS_FSID_SIZE
) == 0)
481 btrfs_get_bdev_and_sb(const char *device_path
, fmode_t flags
, void *holder
,
482 int flush
, struct block_device
**bdev
,
483 struct buffer_head
**bh
)
487 *bdev
= blkdev_get_by_path(device_path
, flags
, holder
);
490 ret
= PTR_ERR(*bdev
);
495 filemap_write_and_wait((*bdev
)->bd_inode
->i_mapping
);
496 ret
= set_blocksize(*bdev
, BTRFS_BDEV_BLOCKSIZE
);
498 blkdev_put(*bdev
, flags
);
501 invalidate_bdev(*bdev
);
502 *bh
= btrfs_read_dev_super(*bdev
);
505 blkdev_put(*bdev
, flags
);
517 static void requeue_list(struct btrfs_pending_bios
*pending_bios
,
518 struct bio
*head
, struct bio
*tail
)
521 struct bio
*old_head
;
523 old_head
= pending_bios
->head
;
524 pending_bios
->head
= head
;
525 if (pending_bios
->tail
)
526 tail
->bi_next
= old_head
;
528 pending_bios
->tail
= tail
;
532 * we try to collect pending bios for a device so we don't get a large
533 * number of procs sending bios down to the same device. This greatly
534 * improves the schedulers ability to collect and merge the bios.
536 * But, it also turns into a long list of bios to process and that is sure
537 * to eventually make the worker thread block. The solution here is to
538 * make some progress and then put this work struct back at the end of
539 * the list if the block device is congested. This way, multiple devices
540 * can make progress from a single worker thread.
542 static noinline
void run_scheduled_bios(struct btrfs_device
*device
)
544 struct btrfs_fs_info
*fs_info
= device
->fs_info
;
546 struct backing_dev_info
*bdi
;
547 struct btrfs_pending_bios
*pending_bios
;
551 unsigned long num_run
;
552 unsigned long batch_run
= 0;
553 unsigned long last_waited
= 0;
555 int sync_pending
= 0;
556 struct blk_plug plug
;
559 * this function runs all the bios we've collected for
560 * a particular device. We don't want to wander off to
561 * another device without first sending all of these down.
562 * So, setup a plug here and finish it off before we return
564 blk_start_plug(&plug
);
566 bdi
= device
->bdev
->bd_bdi
;
569 spin_lock(&device
->io_lock
);
574 /* take all the bios off the list at once and process them
575 * later on (without the lock held). But, remember the
576 * tail and other pointers so the bios can be properly reinserted
577 * into the list if we hit congestion
579 if (!force_reg
&& device
->pending_sync_bios
.head
) {
580 pending_bios
= &device
->pending_sync_bios
;
583 pending_bios
= &device
->pending_bios
;
587 pending
= pending_bios
->head
;
588 tail
= pending_bios
->tail
;
589 WARN_ON(pending
&& !tail
);
592 * if pending was null this time around, no bios need processing
593 * at all and we can stop. Otherwise it'll loop back up again
594 * and do an additional check so no bios are missed.
596 * device->running_pending is used to synchronize with the
599 if (device
->pending_sync_bios
.head
== NULL
&&
600 device
->pending_bios
.head
== NULL
) {
602 device
->running_pending
= 0;
605 device
->running_pending
= 1;
608 pending_bios
->head
= NULL
;
609 pending_bios
->tail
= NULL
;
611 spin_unlock(&device
->io_lock
);
616 /* we want to work on both lists, but do more bios on the
617 * sync list than the regular list
620 pending_bios
!= &device
->pending_sync_bios
&&
621 device
->pending_sync_bios
.head
) ||
622 (num_run
> 64 && pending_bios
== &device
->pending_sync_bios
&&
623 device
->pending_bios
.head
)) {
624 spin_lock(&device
->io_lock
);
625 requeue_list(pending_bios
, pending
, tail
);
630 pending
= pending
->bi_next
;
633 BUG_ON(atomic_read(&cur
->__bi_cnt
) == 0);
636 * if we're doing the sync list, record that our
637 * plug has some sync requests on it
639 * If we're doing the regular list and there are
640 * sync requests sitting around, unplug before
643 if (pending_bios
== &device
->pending_sync_bios
) {
645 } else if (sync_pending
) {
646 blk_finish_plug(&plug
);
647 blk_start_plug(&plug
);
651 btrfsic_submit_bio(cur
);
658 * we made progress, there is more work to do and the bdi
659 * is now congested. Back off and let other work structs
662 if (pending
&& bdi_write_congested(bdi
) && batch_run
> 8 &&
663 fs_info
->fs_devices
->open_devices
> 1) {
664 struct io_context
*ioc
;
666 ioc
= current
->io_context
;
669 * the main goal here is that we don't want to
670 * block if we're going to be able to submit
671 * more requests without blocking.
673 * This code does two great things, it pokes into
674 * the elevator code from a filesystem _and_
675 * it makes assumptions about how batching works.
677 if (ioc
&& ioc
->nr_batch_requests
> 0 &&
678 time_before(jiffies
, ioc
->last_waited
+ HZ
/50UL) &&
680 ioc
->last_waited
== last_waited
)) {
682 * we want to go through our batch of
683 * requests and stop. So, we copy out
684 * the ioc->last_waited time and test
685 * against it before looping
687 last_waited
= ioc
->last_waited
;
691 spin_lock(&device
->io_lock
);
692 requeue_list(pending_bios
, pending
, tail
);
693 device
->running_pending
= 1;
695 spin_unlock(&device
->io_lock
);
696 btrfs_queue_work(fs_info
->submit_workers
,
706 spin_lock(&device
->io_lock
);
707 if (device
->pending_bios
.head
|| device
->pending_sync_bios
.head
)
709 spin_unlock(&device
->io_lock
);
712 blk_finish_plug(&plug
);
715 static void pending_bios_fn(struct btrfs_work
*work
)
717 struct btrfs_device
*device
;
719 device
= container_of(work
, struct btrfs_device
, work
);
720 run_scheduled_bios(device
);
723 static bool device_path_matched(const char *path
, struct btrfs_device
*device
)
728 found
= strcmp(rcu_str_deref(device
->name
), path
);
735 * Search and remove all stale (devices which are not mounted) devices.
736 * When both inputs are NULL, it will search and release all stale devices.
737 * path: Optional. When provided will it release all unmounted devices
738 * matching this path only.
739 * skip_dev: Optional. Will skip this device when searching for the stale
741 * Return: 0 for success or if @path is NULL.
742 * -EBUSY if @path is a mounted device.
743 * -ENOENT if @path does not match any device in the list.
745 static int btrfs_free_stale_devices(const char *path
,
746 struct btrfs_device
*skip_device
)
748 struct btrfs_fs_devices
*fs_devices
, *tmp_fs_devices
;
749 struct btrfs_device
*device
, *tmp_device
;
755 list_for_each_entry_safe(fs_devices
, tmp_fs_devices
, &fs_uuids
, fs_list
) {
757 mutex_lock(&fs_devices
->device_list_mutex
);
758 list_for_each_entry_safe(device
, tmp_device
,
759 &fs_devices
->devices
, dev_list
) {
760 if (skip_device
&& skip_device
== device
)
762 if (path
&& !device
->name
)
764 if (path
&& !device_path_matched(path
, device
))
766 if (fs_devices
->opened
) {
767 /* for an already deleted device return 0 */
768 if (path
&& ret
!= 0)
773 /* delete the stale device */
774 fs_devices
->num_devices
--;
775 list_del(&device
->dev_list
);
776 btrfs_free_device(device
);
779 if (fs_devices
->num_devices
== 0)
782 mutex_unlock(&fs_devices
->device_list_mutex
);
784 if (fs_devices
->num_devices
== 0) {
785 btrfs_sysfs_remove_fsid(fs_devices
);
786 list_del(&fs_devices
->fs_list
);
787 free_fs_devices(fs_devices
);
794 static int btrfs_open_one_device(struct btrfs_fs_devices
*fs_devices
,
795 struct btrfs_device
*device
, fmode_t flags
,
798 struct request_queue
*q
;
799 struct block_device
*bdev
;
800 struct buffer_head
*bh
;
801 struct btrfs_super_block
*disk_super
;
810 ret
= btrfs_get_bdev_and_sb(device
->name
->str
, flags
, holder
, 1,
815 disk_super
= (struct btrfs_super_block
*)bh
->b_data
;
816 devid
= btrfs_stack_device_id(&disk_super
->dev_item
);
817 if (devid
!= device
->devid
)
820 if (memcmp(device
->uuid
, disk_super
->dev_item
.uuid
, BTRFS_UUID_SIZE
))
823 device
->generation
= btrfs_super_generation(disk_super
);
825 if (btrfs_super_flags(disk_super
) & BTRFS_SUPER_FLAG_SEEDING
) {
826 if (btrfs_super_incompat_flags(disk_super
) &
827 BTRFS_FEATURE_INCOMPAT_METADATA_UUID
) {
829 "BTRFS: Invalid seeding and uuid-changed device detected\n");
833 clear_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
);
834 fs_devices
->seeding
= 1;
836 if (bdev_read_only(bdev
))
837 clear_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
);
839 set_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
);
842 q
= bdev_get_queue(bdev
);
843 if (!blk_queue_nonrot(q
))
844 fs_devices
->rotating
= 1;
847 clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA
, &device
->dev_state
);
848 device
->mode
= flags
;
850 fs_devices
->open_devices
++;
851 if (test_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
) &&
852 device
->devid
!= BTRFS_DEV_REPLACE_DEVID
) {
853 fs_devices
->rw_devices
++;
854 list_add_tail(&device
->dev_alloc_list
, &fs_devices
->alloc_list
);
862 blkdev_put(bdev
, flags
);
868 * Handle scanned device having its CHANGING_FSID_V2 flag set and the fs_devices
869 * being created with a disk that has already completed its fsid change.
871 static struct btrfs_fs_devices
*find_fsid_inprogress(
872 struct btrfs_super_block
*disk_super
)
874 struct btrfs_fs_devices
*fs_devices
;
876 list_for_each_entry(fs_devices
, &fs_uuids
, fs_list
) {
877 if (memcmp(fs_devices
->metadata_uuid
, fs_devices
->fsid
,
878 BTRFS_FSID_SIZE
) != 0 &&
879 memcmp(fs_devices
->metadata_uuid
, disk_super
->fsid
,
880 BTRFS_FSID_SIZE
) == 0 && !fs_devices
->fsid_change
) {
889 static struct btrfs_fs_devices
*find_fsid_changed(
890 struct btrfs_super_block
*disk_super
)
892 struct btrfs_fs_devices
*fs_devices
;
895 * Handles the case where scanned device is part of an fs that had
896 * multiple successful changes of FSID but curently device didn't
897 * observe it. Meaning our fsid will be different than theirs.
899 list_for_each_entry(fs_devices
, &fs_uuids
, fs_list
) {
900 if (memcmp(fs_devices
->metadata_uuid
, fs_devices
->fsid
,
901 BTRFS_FSID_SIZE
) != 0 &&
902 memcmp(fs_devices
->metadata_uuid
, disk_super
->metadata_uuid
,
903 BTRFS_FSID_SIZE
) == 0 &&
904 memcmp(fs_devices
->fsid
, disk_super
->fsid
,
905 BTRFS_FSID_SIZE
) != 0) {
913 * Add new device to list of registered devices
916 * device pointer which was just added or updated when successful
917 * error pointer when failed
919 static noinline
struct btrfs_device
*device_list_add(const char *path
,
920 struct btrfs_super_block
*disk_super
,
921 bool *new_device_added
)
923 struct btrfs_device
*device
;
924 struct btrfs_fs_devices
*fs_devices
= NULL
;
925 struct rcu_string
*name
;
926 u64 found_transid
= btrfs_super_generation(disk_super
);
927 u64 devid
= btrfs_stack_device_id(&disk_super
->dev_item
);
928 bool has_metadata_uuid
= (btrfs_super_incompat_flags(disk_super
) &
929 BTRFS_FEATURE_INCOMPAT_METADATA_UUID
);
930 bool fsid_change_in_progress
= (btrfs_super_flags(disk_super
) &
931 BTRFS_SUPER_FLAG_CHANGING_FSID_V2
);
933 if (fsid_change_in_progress
) {
934 if (!has_metadata_uuid
) {
936 * When we have an image which has CHANGING_FSID_V2 set
937 * it might belong to either a filesystem which has
938 * disks with completed fsid change or it might belong
939 * to fs with no UUID changes in effect, handle both.
941 fs_devices
= find_fsid_inprogress(disk_super
);
943 fs_devices
= find_fsid(disk_super
->fsid
, NULL
);
945 fs_devices
= find_fsid_changed(disk_super
);
947 } else if (has_metadata_uuid
) {
948 fs_devices
= find_fsid(disk_super
->fsid
,
949 disk_super
->metadata_uuid
);
951 fs_devices
= find_fsid(disk_super
->fsid
, NULL
);
956 if (has_metadata_uuid
)
957 fs_devices
= alloc_fs_devices(disk_super
->fsid
,
958 disk_super
->metadata_uuid
);
960 fs_devices
= alloc_fs_devices(disk_super
->fsid
, NULL
);
962 if (IS_ERR(fs_devices
))
963 return ERR_CAST(fs_devices
);
965 fs_devices
->fsid_change
= fsid_change_in_progress
;
967 mutex_lock(&fs_devices
->device_list_mutex
);
968 list_add(&fs_devices
->fs_list
, &fs_uuids
);
972 mutex_lock(&fs_devices
->device_list_mutex
);
973 device
= btrfs_find_device(fs_devices
, devid
,
974 disk_super
->dev_item
.uuid
, NULL
, false);
977 * If this disk has been pulled into an fs devices created by
978 * a device which had the CHANGING_FSID_V2 flag then replace the
979 * metadata_uuid/fsid values of the fs_devices.
981 if (has_metadata_uuid
&& fs_devices
->fsid_change
&&
982 found_transid
> fs_devices
->latest_generation
) {
983 memcpy(fs_devices
->fsid
, disk_super
->fsid
,
985 memcpy(fs_devices
->metadata_uuid
,
986 disk_super
->metadata_uuid
, BTRFS_FSID_SIZE
);
988 fs_devices
->fsid_change
= false;
993 if (fs_devices
->opened
) {
994 mutex_unlock(&fs_devices
->device_list_mutex
);
995 return ERR_PTR(-EBUSY
);
998 device
= btrfs_alloc_device(NULL
, &devid
,
999 disk_super
->dev_item
.uuid
);
1000 if (IS_ERR(device
)) {
1001 mutex_unlock(&fs_devices
->device_list_mutex
);
1002 /* we can safely leave the fs_devices entry around */
1006 name
= rcu_string_strdup(path
, GFP_NOFS
);
1008 btrfs_free_device(device
);
1009 mutex_unlock(&fs_devices
->device_list_mutex
);
1010 return ERR_PTR(-ENOMEM
);
1012 rcu_assign_pointer(device
->name
, name
);
1014 list_add_rcu(&device
->dev_list
, &fs_devices
->devices
);
1015 fs_devices
->num_devices
++;
1017 device
->fs_devices
= fs_devices
;
1018 *new_device_added
= true;
1020 if (disk_super
->label
[0])
1021 pr_info("BTRFS: device label %s devid %llu transid %llu %s\n",
1022 disk_super
->label
, devid
, found_transid
, path
);
1024 pr_info("BTRFS: device fsid %pU devid %llu transid %llu %s\n",
1025 disk_super
->fsid
, devid
, found_transid
, path
);
1027 } else if (!device
->name
|| strcmp(device
->name
->str
, path
)) {
1029 * When FS is already mounted.
1030 * 1. If you are here and if the device->name is NULL that
1031 * means this device was missing at time of FS mount.
1032 * 2. If you are here and if the device->name is different
1033 * from 'path' that means either
1034 * a. The same device disappeared and reappeared with
1035 * different name. or
1036 * b. The missing-disk-which-was-replaced, has
1039 * We must allow 1 and 2a above. But 2b would be a spurious
1040 * and unintentional.
1042 * Further in case of 1 and 2a above, the disk at 'path'
1043 * would have missed some transaction when it was away and
1044 * in case of 2a the stale bdev has to be updated as well.
1045 * 2b must not be allowed at all time.
1049 * For now, we do allow update to btrfs_fs_device through the
1050 * btrfs dev scan cli after FS has been mounted. We're still
1051 * tracking a problem where systems fail mount by subvolume id
1052 * when we reject replacement on a mounted FS.
1054 if (!fs_devices
->opened
&& found_transid
< device
->generation
) {
1056 * That is if the FS is _not_ mounted and if you
1057 * are here, that means there is more than one
1058 * disk with same uuid and devid.We keep the one
1059 * with larger generation number or the last-in if
1060 * generation are equal.
1062 mutex_unlock(&fs_devices
->device_list_mutex
);
1063 return ERR_PTR(-EEXIST
);
1067 * We are going to replace the device path for a given devid,
1068 * make sure it's the same device if the device is mounted
1071 struct block_device
*path_bdev
;
1073 path_bdev
= lookup_bdev(path
);
1074 if (IS_ERR(path_bdev
)) {
1075 mutex_unlock(&fs_devices
->device_list_mutex
);
1076 return ERR_CAST(path_bdev
);
1079 if (device
->bdev
!= path_bdev
) {
1081 mutex_unlock(&fs_devices
->device_list_mutex
);
1082 btrfs_warn_in_rcu(device
->fs_info
,
1083 "duplicate device fsid:devid for %pU:%llu old:%s new:%s",
1084 disk_super
->fsid
, devid
,
1085 rcu_str_deref(device
->name
), path
);
1086 return ERR_PTR(-EEXIST
);
1089 btrfs_info_in_rcu(device
->fs_info
,
1090 "device fsid %pU devid %llu moved old:%s new:%s",
1091 disk_super
->fsid
, devid
,
1092 rcu_str_deref(device
->name
), path
);
1095 name
= rcu_string_strdup(path
, GFP_NOFS
);
1097 mutex_unlock(&fs_devices
->device_list_mutex
);
1098 return ERR_PTR(-ENOMEM
);
1100 rcu_string_free(device
->name
);
1101 rcu_assign_pointer(device
->name
, name
);
1102 if (test_bit(BTRFS_DEV_STATE_MISSING
, &device
->dev_state
)) {
1103 fs_devices
->missing_devices
--;
1104 clear_bit(BTRFS_DEV_STATE_MISSING
, &device
->dev_state
);
1109 * Unmount does not free the btrfs_device struct but would zero
1110 * generation along with most of the other members. So just update
1111 * it back. We need it to pick the disk with largest generation
1114 if (!fs_devices
->opened
) {
1115 device
->generation
= found_transid
;
1116 fs_devices
->latest_generation
= max_t(u64
, found_transid
,
1117 fs_devices
->latest_generation
);
1120 fs_devices
->total_devices
= btrfs_super_num_devices(disk_super
);
1122 mutex_unlock(&fs_devices
->device_list_mutex
);
1126 static struct btrfs_fs_devices
*clone_fs_devices(struct btrfs_fs_devices
*orig
)
1128 struct btrfs_fs_devices
*fs_devices
;
1129 struct btrfs_device
*device
;
1130 struct btrfs_device
*orig_dev
;
1132 fs_devices
= alloc_fs_devices(orig
->fsid
, NULL
);
1133 if (IS_ERR(fs_devices
))
1136 mutex_lock(&orig
->device_list_mutex
);
1137 fs_devices
->total_devices
= orig
->total_devices
;
1139 list_for_each_entry(orig_dev
, &orig
->devices
, dev_list
) {
1140 struct rcu_string
*name
;
1142 device
= btrfs_alloc_device(NULL
, &orig_dev
->devid
,
1148 * This is ok to do without rcu read locked because we hold the
1149 * uuid mutex so nothing we touch in here is going to disappear.
1151 if (orig_dev
->name
) {
1152 name
= rcu_string_strdup(orig_dev
->name
->str
,
1155 btrfs_free_device(device
);
1158 rcu_assign_pointer(device
->name
, name
);
1161 list_add(&device
->dev_list
, &fs_devices
->devices
);
1162 device
->fs_devices
= fs_devices
;
1163 fs_devices
->num_devices
++;
1165 mutex_unlock(&orig
->device_list_mutex
);
1168 mutex_unlock(&orig
->device_list_mutex
);
1169 free_fs_devices(fs_devices
);
1170 return ERR_PTR(-ENOMEM
);
1174 * After we have read the system tree and know devids belonging to
1175 * this filesystem, remove the device which does not belong there.
1177 void btrfs_free_extra_devids(struct btrfs_fs_devices
*fs_devices
, int step
)
1179 struct btrfs_device
*device
, *next
;
1180 struct btrfs_device
*latest_dev
= NULL
;
1182 mutex_lock(&uuid_mutex
);
1184 /* This is the initialized path, it is safe to release the devices. */
1185 list_for_each_entry_safe(device
, next
, &fs_devices
->devices
, dev_list
) {
1186 if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA
,
1187 &device
->dev_state
)) {
1188 if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT
,
1189 &device
->dev_state
) &&
1191 device
->generation
> latest_dev
->generation
)) {
1192 latest_dev
= device
;
1197 if (device
->devid
== BTRFS_DEV_REPLACE_DEVID
) {
1199 * In the first step, keep the device which has
1200 * the correct fsid and the devid that is used
1201 * for the dev_replace procedure.
1202 * In the second step, the dev_replace state is
1203 * read from the device tree and it is known
1204 * whether the procedure is really active or
1205 * not, which means whether this device is
1206 * used or whether it should be removed.
1208 if (step
== 0 || test_bit(BTRFS_DEV_STATE_REPLACE_TGT
,
1209 &device
->dev_state
)) {
1214 blkdev_put(device
->bdev
, device
->mode
);
1215 device
->bdev
= NULL
;
1216 fs_devices
->open_devices
--;
1218 if (test_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
)) {
1219 list_del_init(&device
->dev_alloc_list
);
1220 clear_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
);
1221 if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT
,
1222 &device
->dev_state
))
1223 fs_devices
->rw_devices
--;
1225 list_del_init(&device
->dev_list
);
1226 fs_devices
->num_devices
--;
1227 btrfs_free_device(device
);
1230 if (fs_devices
->seed
) {
1231 fs_devices
= fs_devices
->seed
;
1235 fs_devices
->latest_bdev
= latest_dev
->bdev
;
1237 mutex_unlock(&uuid_mutex
);
1240 static void btrfs_close_bdev(struct btrfs_device
*device
)
1245 if (test_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
)) {
1246 sync_blockdev(device
->bdev
);
1247 invalidate_bdev(device
->bdev
);
1250 blkdev_put(device
->bdev
, device
->mode
);
1253 static void btrfs_close_one_device(struct btrfs_device
*device
)
1255 struct btrfs_fs_devices
*fs_devices
= device
->fs_devices
;
1256 struct btrfs_device
*new_device
;
1257 struct rcu_string
*name
;
1260 fs_devices
->open_devices
--;
1262 if (test_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
) &&
1263 device
->devid
!= BTRFS_DEV_REPLACE_DEVID
) {
1264 list_del_init(&device
->dev_alloc_list
);
1265 fs_devices
->rw_devices
--;
1268 if (test_bit(BTRFS_DEV_STATE_MISSING
, &device
->dev_state
))
1269 fs_devices
->missing_devices
--;
1271 btrfs_close_bdev(device
);
1273 new_device
= btrfs_alloc_device(NULL
, &device
->devid
,
1275 BUG_ON(IS_ERR(new_device
)); /* -ENOMEM */
1277 /* Safe because we are under uuid_mutex */
1279 name
= rcu_string_strdup(device
->name
->str
, GFP_NOFS
);
1280 BUG_ON(!name
); /* -ENOMEM */
1281 rcu_assign_pointer(new_device
->name
, name
);
1284 list_replace_rcu(&device
->dev_list
, &new_device
->dev_list
);
1285 new_device
->fs_devices
= device
->fs_devices
;
1288 btrfs_free_device(device
);
1291 static int close_fs_devices(struct btrfs_fs_devices
*fs_devices
)
1293 struct btrfs_device
*device
, *tmp
;
1295 if (--fs_devices
->opened
> 0)
1298 mutex_lock(&fs_devices
->device_list_mutex
);
1299 list_for_each_entry_safe(device
, tmp
, &fs_devices
->devices
, dev_list
) {
1300 btrfs_close_one_device(device
);
1302 mutex_unlock(&fs_devices
->device_list_mutex
);
1304 WARN_ON(fs_devices
->open_devices
);
1305 WARN_ON(fs_devices
->rw_devices
);
1306 fs_devices
->opened
= 0;
1307 fs_devices
->seeding
= 0;
1312 int btrfs_close_devices(struct btrfs_fs_devices
*fs_devices
)
1314 struct btrfs_fs_devices
*seed_devices
= NULL
;
1317 mutex_lock(&uuid_mutex
);
1318 ret
= close_fs_devices(fs_devices
);
1319 if (!fs_devices
->opened
) {
1320 seed_devices
= fs_devices
->seed
;
1321 fs_devices
->seed
= NULL
;
1323 mutex_unlock(&uuid_mutex
);
1325 while (seed_devices
) {
1326 fs_devices
= seed_devices
;
1327 seed_devices
= fs_devices
->seed
;
1328 close_fs_devices(fs_devices
);
1329 free_fs_devices(fs_devices
);
1334 static int open_fs_devices(struct btrfs_fs_devices
*fs_devices
,
1335 fmode_t flags
, void *holder
)
1337 struct btrfs_device
*device
;
1338 struct btrfs_device
*latest_dev
= NULL
;
1341 flags
|= FMODE_EXCL
;
1343 list_for_each_entry(device
, &fs_devices
->devices
, dev_list
) {
1344 /* Just open everything we can; ignore failures here */
1345 if (btrfs_open_one_device(fs_devices
, device
, flags
, holder
))
1349 device
->generation
> latest_dev
->generation
)
1350 latest_dev
= device
;
1352 if (fs_devices
->open_devices
== 0) {
1356 fs_devices
->opened
= 1;
1357 fs_devices
->latest_bdev
= latest_dev
->bdev
;
1358 fs_devices
->total_rw_bytes
= 0;
1363 static int devid_cmp(void *priv
, struct list_head
*a
, struct list_head
*b
)
1365 struct btrfs_device
*dev1
, *dev2
;
1367 dev1
= list_entry(a
, struct btrfs_device
, dev_list
);
1368 dev2
= list_entry(b
, struct btrfs_device
, dev_list
);
1370 if (dev1
->devid
< dev2
->devid
)
1372 else if (dev1
->devid
> dev2
->devid
)
1377 int btrfs_open_devices(struct btrfs_fs_devices
*fs_devices
,
1378 fmode_t flags
, void *holder
)
1382 lockdep_assert_held(&uuid_mutex
);
1384 mutex_lock(&fs_devices
->device_list_mutex
);
1385 if (fs_devices
->opened
) {
1386 fs_devices
->opened
++;
1389 list_sort(NULL
, &fs_devices
->devices
, devid_cmp
);
1390 ret
= open_fs_devices(fs_devices
, flags
, holder
);
1392 mutex_unlock(&fs_devices
->device_list_mutex
);
1397 static void btrfs_release_disk_super(struct page
*page
)
1403 static int btrfs_read_disk_super(struct block_device
*bdev
, u64 bytenr
,
1405 struct btrfs_super_block
**disk_super
)
1410 /* make sure our super fits in the device */
1411 if (bytenr
+ PAGE_SIZE
>= i_size_read(bdev
->bd_inode
))
1414 /* make sure our super fits in the page */
1415 if (sizeof(**disk_super
) > PAGE_SIZE
)
1418 /* make sure our super doesn't straddle pages on disk */
1419 index
= bytenr
>> PAGE_SHIFT
;
1420 if ((bytenr
+ sizeof(**disk_super
) - 1) >> PAGE_SHIFT
!= index
)
1423 /* pull in the page with our super */
1424 *page
= read_cache_page_gfp(bdev
->bd_inode
->i_mapping
,
1427 if (IS_ERR_OR_NULL(*page
))
1432 /* align our pointer to the offset of the super block */
1433 *disk_super
= p
+ offset_in_page(bytenr
);
1435 if (btrfs_super_bytenr(*disk_super
) != bytenr
||
1436 btrfs_super_magic(*disk_super
) != BTRFS_MAGIC
) {
1437 btrfs_release_disk_super(*page
);
1441 if ((*disk_super
)->label
[0] &&
1442 (*disk_super
)->label
[BTRFS_LABEL_SIZE
- 1])
1443 (*disk_super
)->label
[BTRFS_LABEL_SIZE
- 1] = '\0';
1448 int btrfs_forget_devices(const char *path
)
1452 mutex_lock(&uuid_mutex
);
1453 ret
= btrfs_free_stale_devices(strlen(path
) ? path
: NULL
, NULL
);
1454 mutex_unlock(&uuid_mutex
);
1460 * Look for a btrfs signature on a device. This may be called out of the mount path
1461 * and we are not allowed to call set_blocksize during the scan. The superblock
1462 * is read via pagecache
1464 struct btrfs_device
*btrfs_scan_one_device(const char *path
, fmode_t flags
,
1467 struct btrfs_super_block
*disk_super
;
1468 bool new_device_added
= false;
1469 struct btrfs_device
*device
= NULL
;
1470 struct block_device
*bdev
;
1474 lockdep_assert_held(&uuid_mutex
);
1477 * we would like to check all the supers, but that would make
1478 * a btrfs mount succeed after a mkfs from a different FS.
1479 * So, we need to add a special mount option to scan for
1480 * later supers, using BTRFS_SUPER_MIRROR_MAX instead
1482 bytenr
= btrfs_sb_offset(0);
1483 flags
|= FMODE_EXCL
;
1485 bdev
= blkdev_get_by_path(path
, flags
, holder
);
1487 return ERR_CAST(bdev
);
1489 if (btrfs_read_disk_super(bdev
, bytenr
, &page
, &disk_super
)) {
1490 device
= ERR_PTR(-EINVAL
);
1491 goto error_bdev_put
;
1494 device
= device_list_add(path
, disk_super
, &new_device_added
);
1495 if (!IS_ERR(device
)) {
1496 if (new_device_added
)
1497 btrfs_free_stale_devices(path
, device
);
1500 btrfs_release_disk_super(page
);
1503 blkdev_put(bdev
, flags
);
1509 * Try to find a chunk that intersects [start, start + len] range and when one
1510 * such is found, record the end of it in *start
1512 static bool contains_pending_extent(struct btrfs_device
*device
, u64
*start
,
1515 u64 physical_start
, physical_end
;
1517 lockdep_assert_held(&device
->fs_info
->chunk_mutex
);
1519 if (!find_first_extent_bit(&device
->alloc_state
, *start
,
1520 &physical_start
, &physical_end
,
1521 CHUNK_ALLOCATED
, NULL
)) {
1523 if (in_range(physical_start
, *start
, len
) ||
1524 in_range(*start
, physical_start
,
1525 physical_end
- physical_start
)) {
1526 *start
= physical_end
+ 1;
1535 * find_free_dev_extent_start - find free space in the specified device
1536 * @device: the device which we search the free space in
1537 * @num_bytes: the size of the free space that we need
1538 * @search_start: the position from which to begin the search
1539 * @start: store the start of the free space.
1540 * @len: the size of the free space. that we find, or the size
1541 * of the max free space if we don't find suitable free space
1543 * this uses a pretty simple search, the expectation is that it is
1544 * called very infrequently and that a given device has a small number
1547 * @start is used to store the start of the free space if we find. But if we
1548 * don't find suitable free space, it will be used to store the start position
1549 * of the max free space.
1551 * @len is used to store the size of the free space that we find.
1552 * But if we don't find suitable free space, it is used to store the size of
1553 * the max free space.
1555 int find_free_dev_extent_start(struct btrfs_device
*device
, u64 num_bytes
,
1556 u64 search_start
, u64
*start
, u64
*len
)
1558 struct btrfs_fs_info
*fs_info
= device
->fs_info
;
1559 struct btrfs_root
*root
= fs_info
->dev_root
;
1560 struct btrfs_key key
;
1561 struct btrfs_dev_extent
*dev_extent
;
1562 struct btrfs_path
*path
;
1567 u64 search_end
= device
->total_bytes
;
1570 struct extent_buffer
*l
;
1573 * We don't want to overwrite the superblock on the drive nor any area
1574 * used by the boot loader (grub for example), so we make sure to start
1575 * at an offset of at least 1MB.
1577 search_start
= max_t(u64
, search_start
, SZ_1M
);
1579 path
= btrfs_alloc_path();
1583 max_hole_start
= search_start
;
1587 if (search_start
>= search_end
||
1588 test_bit(BTRFS_DEV_STATE_REPLACE_TGT
, &device
->dev_state
)) {
1593 path
->reada
= READA_FORWARD
;
1594 path
->search_commit_root
= 1;
1595 path
->skip_locking
= 1;
1597 key
.objectid
= device
->devid
;
1598 key
.offset
= search_start
;
1599 key
.type
= BTRFS_DEV_EXTENT_KEY
;
1601 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
1605 ret
= btrfs_previous_item(root
, path
, key
.objectid
, key
.type
);
1612 slot
= path
->slots
[0];
1613 if (slot
>= btrfs_header_nritems(l
)) {
1614 ret
= btrfs_next_leaf(root
, path
);
1622 btrfs_item_key_to_cpu(l
, &key
, slot
);
1624 if (key
.objectid
< device
->devid
)
1627 if (key
.objectid
> device
->devid
)
1630 if (key
.type
!= BTRFS_DEV_EXTENT_KEY
)
1633 if (key
.offset
> search_start
) {
1634 hole_size
= key
.offset
- search_start
;
1637 * Have to check before we set max_hole_start, otherwise
1638 * we could end up sending back this offset anyway.
1640 if (contains_pending_extent(device
, &search_start
,
1642 if (key
.offset
>= search_start
)
1643 hole_size
= key
.offset
- search_start
;
1648 if (hole_size
> max_hole_size
) {
1649 max_hole_start
= search_start
;
1650 max_hole_size
= hole_size
;
1654 * If this free space is greater than which we need,
1655 * it must be the max free space that we have found
1656 * until now, so max_hole_start must point to the start
1657 * of this free space and the length of this free space
1658 * is stored in max_hole_size. Thus, we return
1659 * max_hole_start and max_hole_size and go back to the
1662 if (hole_size
>= num_bytes
) {
1668 dev_extent
= btrfs_item_ptr(l
, slot
, struct btrfs_dev_extent
);
1669 extent_end
= key
.offset
+ btrfs_dev_extent_length(l
,
1671 if (extent_end
> search_start
)
1672 search_start
= extent_end
;
1679 * At this point, search_start should be the end of
1680 * allocated dev extents, and when shrinking the device,
1681 * search_end may be smaller than search_start.
1683 if (search_end
> search_start
) {
1684 hole_size
= search_end
- search_start
;
1686 if (contains_pending_extent(device
, &search_start
, hole_size
)) {
1687 btrfs_release_path(path
);
1691 if (hole_size
> max_hole_size
) {
1692 max_hole_start
= search_start
;
1693 max_hole_size
= hole_size
;
1698 if (max_hole_size
< num_bytes
)
1704 btrfs_free_path(path
);
1705 *start
= max_hole_start
;
1707 *len
= max_hole_size
;
1711 int find_free_dev_extent(struct btrfs_device
*device
, u64 num_bytes
,
1712 u64
*start
, u64
*len
)
1714 /* FIXME use last free of some kind */
1715 return find_free_dev_extent_start(device
, num_bytes
, 0, start
, len
);
1718 static int btrfs_free_dev_extent(struct btrfs_trans_handle
*trans
,
1719 struct btrfs_device
*device
,
1720 u64 start
, u64
*dev_extent_len
)
1722 struct btrfs_fs_info
*fs_info
= device
->fs_info
;
1723 struct btrfs_root
*root
= fs_info
->dev_root
;
1725 struct btrfs_path
*path
;
1726 struct btrfs_key key
;
1727 struct btrfs_key found_key
;
1728 struct extent_buffer
*leaf
= NULL
;
1729 struct btrfs_dev_extent
*extent
= NULL
;
1731 path
= btrfs_alloc_path();
1735 key
.objectid
= device
->devid
;
1737 key
.type
= BTRFS_DEV_EXTENT_KEY
;
1739 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
1741 ret
= btrfs_previous_item(root
, path
, key
.objectid
,
1742 BTRFS_DEV_EXTENT_KEY
);
1745 leaf
= path
->nodes
[0];
1746 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
1747 extent
= btrfs_item_ptr(leaf
, path
->slots
[0],
1748 struct btrfs_dev_extent
);
1749 BUG_ON(found_key
.offset
> start
|| found_key
.offset
+
1750 btrfs_dev_extent_length(leaf
, extent
) < start
);
1752 btrfs_release_path(path
);
1754 } else if (ret
== 0) {
1755 leaf
= path
->nodes
[0];
1756 extent
= btrfs_item_ptr(leaf
, path
->slots
[0],
1757 struct btrfs_dev_extent
);
1759 btrfs_handle_fs_error(fs_info
, ret
, "Slot search failed");
1763 *dev_extent_len
= btrfs_dev_extent_length(leaf
, extent
);
1765 ret
= btrfs_del_item(trans
, root
, path
);
1767 btrfs_handle_fs_error(fs_info
, ret
,
1768 "Failed to remove dev extent item");
1770 set_bit(BTRFS_TRANS_HAVE_FREE_BGS
, &trans
->transaction
->flags
);
1773 btrfs_free_path(path
);
1777 static int btrfs_alloc_dev_extent(struct btrfs_trans_handle
*trans
,
1778 struct btrfs_device
*device
,
1779 u64 chunk_offset
, u64 start
, u64 num_bytes
)
1782 struct btrfs_path
*path
;
1783 struct btrfs_fs_info
*fs_info
= device
->fs_info
;
1784 struct btrfs_root
*root
= fs_info
->dev_root
;
1785 struct btrfs_dev_extent
*extent
;
1786 struct extent_buffer
*leaf
;
1787 struct btrfs_key key
;
1789 WARN_ON(!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA
, &device
->dev_state
));
1790 WARN_ON(test_bit(BTRFS_DEV_STATE_REPLACE_TGT
, &device
->dev_state
));
1791 path
= btrfs_alloc_path();
1795 key
.objectid
= device
->devid
;
1797 key
.type
= BTRFS_DEV_EXTENT_KEY
;
1798 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
1803 leaf
= path
->nodes
[0];
1804 extent
= btrfs_item_ptr(leaf
, path
->slots
[0],
1805 struct btrfs_dev_extent
);
1806 btrfs_set_dev_extent_chunk_tree(leaf
, extent
,
1807 BTRFS_CHUNK_TREE_OBJECTID
);
1808 btrfs_set_dev_extent_chunk_objectid(leaf
, extent
,
1809 BTRFS_FIRST_CHUNK_TREE_OBJECTID
);
1810 btrfs_set_dev_extent_chunk_offset(leaf
, extent
, chunk_offset
);
1812 btrfs_set_dev_extent_length(leaf
, extent
, num_bytes
);
1813 btrfs_mark_buffer_dirty(leaf
);
1815 btrfs_free_path(path
);
1819 static u64
find_next_chunk(struct btrfs_fs_info
*fs_info
)
1821 struct extent_map_tree
*em_tree
;
1822 struct extent_map
*em
;
1826 em_tree
= &fs_info
->mapping_tree
;
1827 read_lock(&em_tree
->lock
);
1828 n
= rb_last(&em_tree
->map
.rb_root
);
1830 em
= rb_entry(n
, struct extent_map
, rb_node
);
1831 ret
= em
->start
+ em
->len
;
1833 read_unlock(&em_tree
->lock
);
1838 static noinline
int find_next_devid(struct btrfs_fs_info
*fs_info
,
1842 struct btrfs_key key
;
1843 struct btrfs_key found_key
;
1844 struct btrfs_path
*path
;
1846 path
= btrfs_alloc_path();
1850 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
1851 key
.type
= BTRFS_DEV_ITEM_KEY
;
1852 key
.offset
= (u64
)-1;
1854 ret
= btrfs_search_slot(NULL
, fs_info
->chunk_root
, &key
, path
, 0, 0);
1858 BUG_ON(ret
== 0); /* Corruption */
1860 ret
= btrfs_previous_item(fs_info
->chunk_root
, path
,
1861 BTRFS_DEV_ITEMS_OBJECTID
,
1862 BTRFS_DEV_ITEM_KEY
);
1866 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
1868 *devid_ret
= found_key
.offset
+ 1;
1872 btrfs_free_path(path
);
1877 * the device information is stored in the chunk root
1878 * the btrfs_device struct should be fully filled in
1880 static int btrfs_add_dev_item(struct btrfs_trans_handle
*trans
,
1881 struct btrfs_device
*device
)
1884 struct btrfs_path
*path
;
1885 struct btrfs_dev_item
*dev_item
;
1886 struct extent_buffer
*leaf
;
1887 struct btrfs_key key
;
1890 path
= btrfs_alloc_path();
1894 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
1895 key
.type
= BTRFS_DEV_ITEM_KEY
;
1896 key
.offset
= device
->devid
;
1898 ret
= btrfs_insert_empty_item(trans
, trans
->fs_info
->chunk_root
, path
,
1899 &key
, sizeof(*dev_item
));
1903 leaf
= path
->nodes
[0];
1904 dev_item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_dev_item
);
1906 btrfs_set_device_id(leaf
, dev_item
, device
->devid
);
1907 btrfs_set_device_generation(leaf
, dev_item
, 0);
1908 btrfs_set_device_type(leaf
, dev_item
, device
->type
);
1909 btrfs_set_device_io_align(leaf
, dev_item
, device
->io_align
);
1910 btrfs_set_device_io_width(leaf
, dev_item
, device
->io_width
);
1911 btrfs_set_device_sector_size(leaf
, dev_item
, device
->sector_size
);
1912 btrfs_set_device_total_bytes(leaf
, dev_item
,
1913 btrfs_device_get_disk_total_bytes(device
));
1914 btrfs_set_device_bytes_used(leaf
, dev_item
,
1915 btrfs_device_get_bytes_used(device
));
1916 btrfs_set_device_group(leaf
, dev_item
, 0);
1917 btrfs_set_device_seek_speed(leaf
, dev_item
, 0);
1918 btrfs_set_device_bandwidth(leaf
, dev_item
, 0);
1919 btrfs_set_device_start_offset(leaf
, dev_item
, 0);
1921 ptr
= btrfs_device_uuid(dev_item
);
1922 write_extent_buffer(leaf
, device
->uuid
, ptr
, BTRFS_UUID_SIZE
);
1923 ptr
= btrfs_device_fsid(dev_item
);
1924 write_extent_buffer(leaf
, trans
->fs_info
->fs_devices
->metadata_uuid
,
1925 ptr
, BTRFS_FSID_SIZE
);
1926 btrfs_mark_buffer_dirty(leaf
);
1930 btrfs_free_path(path
);
1935 * Function to update ctime/mtime for a given device path.
1936 * Mainly used for ctime/mtime based probe like libblkid.
1938 static void update_dev_time(const char *path_name
)
1942 filp
= filp_open(path_name
, O_RDWR
, 0);
1945 file_update_time(filp
);
1946 filp_close(filp
, NULL
);
1949 static int btrfs_rm_dev_item(struct btrfs_device
*device
)
1951 struct btrfs_root
*root
= device
->fs_info
->chunk_root
;
1953 struct btrfs_path
*path
;
1954 struct btrfs_key key
;
1955 struct btrfs_trans_handle
*trans
;
1957 path
= btrfs_alloc_path();
1961 trans
= btrfs_start_transaction(root
, 0);
1962 if (IS_ERR(trans
)) {
1963 btrfs_free_path(path
);
1964 return PTR_ERR(trans
);
1966 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
1967 key
.type
= BTRFS_DEV_ITEM_KEY
;
1968 key
.offset
= device
->devid
;
1970 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
1974 btrfs_abort_transaction(trans
, ret
);
1975 btrfs_end_transaction(trans
);
1979 ret
= btrfs_del_item(trans
, root
, path
);
1981 btrfs_abort_transaction(trans
, ret
);
1982 btrfs_end_transaction(trans
);
1986 btrfs_free_path(path
);
1988 ret
= btrfs_commit_transaction(trans
);
1993 * Verify that @num_devices satisfies the RAID profile constraints in the whole
1994 * filesystem. It's up to the caller to adjust that number regarding eg. device
1997 static int btrfs_check_raid_min_devices(struct btrfs_fs_info
*fs_info
,
2005 seq
= read_seqbegin(&fs_info
->profiles_lock
);
2007 all_avail
= fs_info
->avail_data_alloc_bits
|
2008 fs_info
->avail_system_alloc_bits
|
2009 fs_info
->avail_metadata_alloc_bits
;
2010 } while (read_seqretry(&fs_info
->profiles_lock
, seq
));
2012 for (i
= 0; i
< BTRFS_NR_RAID_TYPES
; i
++) {
2013 if (!(all_avail
& btrfs_raid_array
[i
].bg_flag
))
2016 if (num_devices
< btrfs_raid_array
[i
].devs_min
) {
2017 int ret
= btrfs_raid_array
[i
].mindev_error
;
2027 static struct btrfs_device
* btrfs_find_next_active_device(
2028 struct btrfs_fs_devices
*fs_devs
, struct btrfs_device
*device
)
2030 struct btrfs_device
*next_device
;
2032 list_for_each_entry(next_device
, &fs_devs
->devices
, dev_list
) {
2033 if (next_device
!= device
&&
2034 !test_bit(BTRFS_DEV_STATE_MISSING
, &next_device
->dev_state
)
2035 && next_device
->bdev
)
2043 * Helper function to check if the given device is part of s_bdev / latest_bdev
2044 * and replace it with the provided or the next active device, in the context
2045 * where this function called, there should be always be another device (or
2046 * this_dev) which is active.
2048 void btrfs_assign_next_active_device(struct btrfs_device
*device
,
2049 struct btrfs_device
*this_dev
)
2051 struct btrfs_fs_info
*fs_info
= device
->fs_info
;
2052 struct btrfs_device
*next_device
;
2055 next_device
= this_dev
;
2057 next_device
= btrfs_find_next_active_device(fs_info
->fs_devices
,
2059 ASSERT(next_device
);
2061 if (fs_info
->sb
->s_bdev
&&
2062 (fs_info
->sb
->s_bdev
== device
->bdev
))
2063 fs_info
->sb
->s_bdev
= next_device
->bdev
;
2065 if (fs_info
->fs_devices
->latest_bdev
== device
->bdev
)
2066 fs_info
->fs_devices
->latest_bdev
= next_device
->bdev
;
2070 * Return btrfs_fs_devices::num_devices excluding the device that's being
2071 * currently replaced.
2073 static u64
btrfs_num_devices(struct btrfs_fs_info
*fs_info
)
2075 u64 num_devices
= fs_info
->fs_devices
->num_devices
;
2077 down_read(&fs_info
->dev_replace
.rwsem
);
2078 if (btrfs_dev_replace_is_ongoing(&fs_info
->dev_replace
)) {
2079 ASSERT(num_devices
> 1);
2082 up_read(&fs_info
->dev_replace
.rwsem
);
2087 int btrfs_rm_device(struct btrfs_fs_info
*fs_info
, const char *device_path
,
2090 struct btrfs_device
*device
;
2091 struct btrfs_fs_devices
*cur_devices
;
2092 struct btrfs_fs_devices
*fs_devices
= fs_info
->fs_devices
;
2096 mutex_lock(&uuid_mutex
);
2098 num_devices
= btrfs_num_devices(fs_info
);
2100 ret
= btrfs_check_raid_min_devices(fs_info
, num_devices
- 1);
2104 device
= btrfs_find_device_by_devspec(fs_info
, devid
, device_path
);
2106 if (IS_ERR(device
)) {
2107 if (PTR_ERR(device
) == -ENOENT
&&
2108 strcmp(device_path
, "missing") == 0)
2109 ret
= BTRFS_ERROR_DEV_MISSING_NOT_FOUND
;
2111 ret
= PTR_ERR(device
);
2115 if (btrfs_pinned_by_swapfile(fs_info
, device
)) {
2116 btrfs_warn_in_rcu(fs_info
,
2117 "cannot remove device %s (devid %llu) due to active swapfile",
2118 rcu_str_deref(device
->name
), device
->devid
);
2123 if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT
, &device
->dev_state
)) {
2124 ret
= BTRFS_ERROR_DEV_TGT_REPLACE
;
2128 if (test_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
) &&
2129 fs_info
->fs_devices
->rw_devices
== 1) {
2130 ret
= BTRFS_ERROR_DEV_ONLY_WRITABLE
;
2134 if (test_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
)) {
2135 mutex_lock(&fs_info
->chunk_mutex
);
2136 list_del_init(&device
->dev_alloc_list
);
2137 device
->fs_devices
->rw_devices
--;
2138 mutex_unlock(&fs_info
->chunk_mutex
);
2141 mutex_unlock(&uuid_mutex
);
2142 ret
= btrfs_shrink_device(device
, 0);
2143 mutex_lock(&uuid_mutex
);
2148 * TODO: the superblock still includes this device in its num_devices
2149 * counter although write_all_supers() is not locked out. This
2150 * could give a filesystem state which requires a degraded mount.
2152 ret
= btrfs_rm_dev_item(device
);
2156 clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA
, &device
->dev_state
);
2157 btrfs_scrub_cancel_dev(device
);
2160 * the device list mutex makes sure that we don't change
2161 * the device list while someone else is writing out all
2162 * the device supers. Whoever is writing all supers, should
2163 * lock the device list mutex before getting the number of
2164 * devices in the super block (super_copy). Conversely,
2165 * whoever updates the number of devices in the super block
2166 * (super_copy) should hold the device list mutex.
2170 * In normal cases the cur_devices == fs_devices. But in case
2171 * of deleting a seed device, the cur_devices should point to
2172 * its own fs_devices listed under the fs_devices->seed.
2174 cur_devices
= device
->fs_devices
;
2175 mutex_lock(&fs_devices
->device_list_mutex
);
2176 list_del_rcu(&device
->dev_list
);
2178 cur_devices
->num_devices
--;
2179 cur_devices
->total_devices
--;
2180 /* Update total_devices of the parent fs_devices if it's seed */
2181 if (cur_devices
!= fs_devices
)
2182 fs_devices
->total_devices
--;
2184 if (test_bit(BTRFS_DEV_STATE_MISSING
, &device
->dev_state
))
2185 cur_devices
->missing_devices
--;
2187 btrfs_assign_next_active_device(device
, NULL
);
2190 cur_devices
->open_devices
--;
2191 /* remove sysfs entry */
2192 btrfs_sysfs_rm_device_link(fs_devices
, device
);
2195 num_devices
= btrfs_super_num_devices(fs_info
->super_copy
) - 1;
2196 btrfs_set_super_num_devices(fs_info
->super_copy
, num_devices
);
2197 mutex_unlock(&fs_devices
->device_list_mutex
);
2200 * at this point, the device is zero sized and detached from
2201 * the devices list. All that's left is to zero out the old
2202 * supers and free the device.
2204 if (test_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
))
2205 btrfs_scratch_superblocks(device
->bdev
, device
->name
->str
);
2207 btrfs_close_bdev(device
);
2209 btrfs_free_device(device
);
2211 if (cur_devices
->open_devices
== 0) {
2212 while (fs_devices
) {
2213 if (fs_devices
->seed
== cur_devices
) {
2214 fs_devices
->seed
= cur_devices
->seed
;
2217 fs_devices
= fs_devices
->seed
;
2219 cur_devices
->seed
= NULL
;
2220 close_fs_devices(cur_devices
);
2221 free_fs_devices(cur_devices
);
2225 mutex_unlock(&uuid_mutex
);
2229 if (test_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
)) {
2230 mutex_lock(&fs_info
->chunk_mutex
);
2231 list_add(&device
->dev_alloc_list
,
2232 &fs_devices
->alloc_list
);
2233 device
->fs_devices
->rw_devices
++;
2234 mutex_unlock(&fs_info
->chunk_mutex
);
2239 void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device
*srcdev
)
2241 struct btrfs_fs_devices
*fs_devices
;
2243 lockdep_assert_held(&srcdev
->fs_info
->fs_devices
->device_list_mutex
);
2246 * in case of fs with no seed, srcdev->fs_devices will point
2247 * to fs_devices of fs_info. However when the dev being replaced is
2248 * a seed dev it will point to the seed's local fs_devices. In short
2249 * srcdev will have its correct fs_devices in both the cases.
2251 fs_devices
= srcdev
->fs_devices
;
2253 list_del_rcu(&srcdev
->dev_list
);
2254 list_del(&srcdev
->dev_alloc_list
);
2255 fs_devices
->num_devices
--;
2256 if (test_bit(BTRFS_DEV_STATE_MISSING
, &srcdev
->dev_state
))
2257 fs_devices
->missing_devices
--;
2259 if (test_bit(BTRFS_DEV_STATE_WRITEABLE
, &srcdev
->dev_state
))
2260 fs_devices
->rw_devices
--;
2263 fs_devices
->open_devices
--;
2266 void btrfs_rm_dev_replace_free_srcdev(struct btrfs_device
*srcdev
)
2268 struct btrfs_fs_info
*fs_info
= srcdev
->fs_info
;
2269 struct btrfs_fs_devices
*fs_devices
= srcdev
->fs_devices
;
2271 if (test_bit(BTRFS_DEV_STATE_WRITEABLE
, &srcdev
->dev_state
)) {
2272 /* zero out the old super if it is writable */
2273 btrfs_scratch_superblocks(srcdev
->bdev
, srcdev
->name
->str
);
2276 btrfs_close_bdev(srcdev
);
2278 btrfs_free_device(srcdev
);
2280 /* if this is no devs we rather delete the fs_devices */
2281 if (!fs_devices
->num_devices
) {
2282 struct btrfs_fs_devices
*tmp_fs_devices
;
2285 * On a mounted FS, num_devices can't be zero unless it's a
2286 * seed. In case of a seed device being replaced, the replace
2287 * target added to the sprout FS, so there will be no more
2288 * device left under the seed FS.
2290 ASSERT(fs_devices
->seeding
);
2292 tmp_fs_devices
= fs_info
->fs_devices
;
2293 while (tmp_fs_devices
) {
2294 if (tmp_fs_devices
->seed
== fs_devices
) {
2295 tmp_fs_devices
->seed
= fs_devices
->seed
;
2298 tmp_fs_devices
= tmp_fs_devices
->seed
;
2300 fs_devices
->seed
= NULL
;
2301 close_fs_devices(fs_devices
);
2302 free_fs_devices(fs_devices
);
2306 void btrfs_destroy_dev_replace_tgtdev(struct btrfs_device
*tgtdev
)
2308 struct btrfs_fs_devices
*fs_devices
= tgtdev
->fs_info
->fs_devices
;
2311 mutex_lock(&fs_devices
->device_list_mutex
);
2313 btrfs_sysfs_rm_device_link(fs_devices
, tgtdev
);
2316 fs_devices
->open_devices
--;
2318 fs_devices
->num_devices
--;
2320 btrfs_assign_next_active_device(tgtdev
, NULL
);
2322 list_del_rcu(&tgtdev
->dev_list
);
2324 mutex_unlock(&fs_devices
->device_list_mutex
);
2327 * The update_dev_time() with in btrfs_scratch_superblocks()
2328 * may lead to a call to btrfs_show_devname() which will try
2329 * to hold device_list_mutex. And here this device
2330 * is already out of device list, so we don't have to hold
2331 * the device_list_mutex lock.
2333 btrfs_scratch_superblocks(tgtdev
->bdev
, tgtdev
->name
->str
);
2335 btrfs_close_bdev(tgtdev
);
2337 btrfs_free_device(tgtdev
);
2340 static struct btrfs_device
*btrfs_find_device_by_path(
2341 struct btrfs_fs_info
*fs_info
, const char *device_path
)
2344 struct btrfs_super_block
*disk_super
;
2347 struct block_device
*bdev
;
2348 struct buffer_head
*bh
;
2349 struct btrfs_device
*device
;
2351 ret
= btrfs_get_bdev_and_sb(device_path
, FMODE_READ
,
2352 fs_info
->bdev_holder
, 0, &bdev
, &bh
);
2354 return ERR_PTR(ret
);
2355 disk_super
= (struct btrfs_super_block
*)bh
->b_data
;
2356 devid
= btrfs_stack_device_id(&disk_super
->dev_item
);
2357 dev_uuid
= disk_super
->dev_item
.uuid
;
2358 if (btrfs_fs_incompat(fs_info
, METADATA_UUID
))
2359 device
= btrfs_find_device(fs_info
->fs_devices
, devid
, dev_uuid
,
2360 disk_super
->metadata_uuid
, true);
2362 device
= btrfs_find_device(fs_info
->fs_devices
, devid
, dev_uuid
,
2363 disk_super
->fsid
, true);
2367 device
= ERR_PTR(-ENOENT
);
2368 blkdev_put(bdev
, FMODE_READ
);
2373 * Lookup a device given by device id, or the path if the id is 0.
2375 struct btrfs_device
*btrfs_find_device_by_devspec(
2376 struct btrfs_fs_info
*fs_info
, u64 devid
,
2377 const char *device_path
)
2379 struct btrfs_device
*device
;
2382 device
= btrfs_find_device(fs_info
->fs_devices
, devid
, NULL
,
2385 return ERR_PTR(-ENOENT
);
2389 if (!device_path
|| !device_path
[0])
2390 return ERR_PTR(-EINVAL
);
2392 if (strcmp(device_path
, "missing") == 0) {
2393 /* Find first missing device */
2394 list_for_each_entry(device
, &fs_info
->fs_devices
->devices
,
2396 if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA
,
2397 &device
->dev_state
) && !device
->bdev
)
2400 return ERR_PTR(-ENOENT
);
2403 return btrfs_find_device_by_path(fs_info
, device_path
);
2407 * does all the dirty work required for changing file system's UUID.
2409 static int btrfs_prepare_sprout(struct btrfs_fs_info
*fs_info
)
2411 struct btrfs_fs_devices
*fs_devices
= fs_info
->fs_devices
;
2412 struct btrfs_fs_devices
*old_devices
;
2413 struct btrfs_fs_devices
*seed_devices
;
2414 struct btrfs_super_block
*disk_super
= fs_info
->super_copy
;
2415 struct btrfs_device
*device
;
2418 lockdep_assert_held(&uuid_mutex
);
2419 if (!fs_devices
->seeding
)
2422 seed_devices
= alloc_fs_devices(NULL
, NULL
);
2423 if (IS_ERR(seed_devices
))
2424 return PTR_ERR(seed_devices
);
2426 old_devices
= clone_fs_devices(fs_devices
);
2427 if (IS_ERR(old_devices
)) {
2428 kfree(seed_devices
);
2429 return PTR_ERR(old_devices
);
2432 list_add(&old_devices
->fs_list
, &fs_uuids
);
2434 memcpy(seed_devices
, fs_devices
, sizeof(*seed_devices
));
2435 seed_devices
->opened
= 1;
2436 INIT_LIST_HEAD(&seed_devices
->devices
);
2437 INIT_LIST_HEAD(&seed_devices
->alloc_list
);
2438 mutex_init(&seed_devices
->device_list_mutex
);
2440 mutex_lock(&fs_devices
->device_list_mutex
);
2441 list_splice_init_rcu(&fs_devices
->devices
, &seed_devices
->devices
,
2443 list_for_each_entry(device
, &seed_devices
->devices
, dev_list
)
2444 device
->fs_devices
= seed_devices
;
2446 mutex_lock(&fs_info
->chunk_mutex
);
2447 list_splice_init(&fs_devices
->alloc_list
, &seed_devices
->alloc_list
);
2448 mutex_unlock(&fs_info
->chunk_mutex
);
2450 fs_devices
->seeding
= 0;
2451 fs_devices
->num_devices
= 0;
2452 fs_devices
->open_devices
= 0;
2453 fs_devices
->missing_devices
= 0;
2454 fs_devices
->rotating
= 0;
2455 fs_devices
->seed
= seed_devices
;
2457 generate_random_uuid(fs_devices
->fsid
);
2458 memcpy(fs_devices
->metadata_uuid
, fs_devices
->fsid
, BTRFS_FSID_SIZE
);
2459 memcpy(disk_super
->fsid
, fs_devices
->fsid
, BTRFS_FSID_SIZE
);
2460 mutex_unlock(&fs_devices
->device_list_mutex
);
2462 super_flags
= btrfs_super_flags(disk_super
) &
2463 ~BTRFS_SUPER_FLAG_SEEDING
;
2464 btrfs_set_super_flags(disk_super
, super_flags
);
2470 * Store the expected generation for seed devices in device items.
2472 static int btrfs_finish_sprout(struct btrfs_trans_handle
*trans
)
2474 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
2475 struct btrfs_root
*root
= fs_info
->chunk_root
;
2476 struct btrfs_path
*path
;
2477 struct extent_buffer
*leaf
;
2478 struct btrfs_dev_item
*dev_item
;
2479 struct btrfs_device
*device
;
2480 struct btrfs_key key
;
2481 u8 fs_uuid
[BTRFS_FSID_SIZE
];
2482 u8 dev_uuid
[BTRFS_UUID_SIZE
];
2486 path
= btrfs_alloc_path();
2490 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
2492 key
.type
= BTRFS_DEV_ITEM_KEY
;
2495 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 1);
2499 leaf
= path
->nodes
[0];
2501 if (path
->slots
[0] >= btrfs_header_nritems(leaf
)) {
2502 ret
= btrfs_next_leaf(root
, path
);
2507 leaf
= path
->nodes
[0];
2508 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
2509 btrfs_release_path(path
);
2513 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
2514 if (key
.objectid
!= BTRFS_DEV_ITEMS_OBJECTID
||
2515 key
.type
!= BTRFS_DEV_ITEM_KEY
)
2518 dev_item
= btrfs_item_ptr(leaf
, path
->slots
[0],
2519 struct btrfs_dev_item
);
2520 devid
= btrfs_device_id(leaf
, dev_item
);
2521 read_extent_buffer(leaf
, dev_uuid
, btrfs_device_uuid(dev_item
),
2523 read_extent_buffer(leaf
, fs_uuid
, btrfs_device_fsid(dev_item
),
2525 device
= btrfs_find_device(fs_info
->fs_devices
, devid
, dev_uuid
,
2527 BUG_ON(!device
); /* Logic error */
2529 if (device
->fs_devices
->seeding
) {
2530 btrfs_set_device_generation(leaf
, dev_item
,
2531 device
->generation
);
2532 btrfs_mark_buffer_dirty(leaf
);
2540 btrfs_free_path(path
);
2544 int btrfs_init_new_device(struct btrfs_fs_info
*fs_info
, const char *device_path
)
2546 struct btrfs_root
*root
= fs_info
->dev_root
;
2547 struct request_queue
*q
;
2548 struct btrfs_trans_handle
*trans
;
2549 struct btrfs_device
*device
;
2550 struct block_device
*bdev
;
2551 struct super_block
*sb
= fs_info
->sb
;
2552 struct rcu_string
*name
;
2553 struct btrfs_fs_devices
*fs_devices
= fs_info
->fs_devices
;
2554 u64 orig_super_total_bytes
;
2555 u64 orig_super_num_devices
;
2556 int seeding_dev
= 0;
2558 bool unlocked
= false;
2560 if (sb_rdonly(sb
) && !fs_devices
->seeding
)
2563 bdev
= blkdev_get_by_path(device_path
, FMODE_WRITE
| FMODE_EXCL
,
2564 fs_info
->bdev_holder
);
2566 return PTR_ERR(bdev
);
2568 if (fs_devices
->seeding
) {
2570 down_write(&sb
->s_umount
);
2571 mutex_lock(&uuid_mutex
);
2574 filemap_write_and_wait(bdev
->bd_inode
->i_mapping
);
2576 mutex_lock(&fs_devices
->device_list_mutex
);
2577 list_for_each_entry(device
, &fs_devices
->devices
, dev_list
) {
2578 if (device
->bdev
== bdev
) {
2581 &fs_devices
->device_list_mutex
);
2585 mutex_unlock(&fs_devices
->device_list_mutex
);
2587 device
= btrfs_alloc_device(fs_info
, NULL
, NULL
);
2588 if (IS_ERR(device
)) {
2589 /* we can safely leave the fs_devices entry around */
2590 ret
= PTR_ERR(device
);
2594 name
= rcu_string_strdup(device_path
, GFP_KERNEL
);
2597 goto error_free_device
;
2599 rcu_assign_pointer(device
->name
, name
);
2601 trans
= btrfs_start_transaction(root
, 0);
2602 if (IS_ERR(trans
)) {
2603 ret
= PTR_ERR(trans
);
2604 goto error_free_device
;
2607 q
= bdev_get_queue(bdev
);
2608 set_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
);
2609 device
->generation
= trans
->transid
;
2610 device
->io_width
= fs_info
->sectorsize
;
2611 device
->io_align
= fs_info
->sectorsize
;
2612 device
->sector_size
= fs_info
->sectorsize
;
2613 device
->total_bytes
= round_down(i_size_read(bdev
->bd_inode
),
2614 fs_info
->sectorsize
);
2615 device
->disk_total_bytes
= device
->total_bytes
;
2616 device
->commit_total_bytes
= device
->total_bytes
;
2617 device
->fs_info
= fs_info
;
2618 device
->bdev
= bdev
;
2619 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA
, &device
->dev_state
);
2620 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT
, &device
->dev_state
);
2621 device
->mode
= FMODE_EXCL
;
2622 device
->dev_stats_valid
= 1;
2623 set_blocksize(device
->bdev
, BTRFS_BDEV_BLOCKSIZE
);
2626 sb
->s_flags
&= ~SB_RDONLY
;
2627 ret
= btrfs_prepare_sprout(fs_info
);
2629 btrfs_abort_transaction(trans
, ret
);
2634 device
->fs_devices
= fs_devices
;
2636 mutex_lock(&fs_devices
->device_list_mutex
);
2637 mutex_lock(&fs_info
->chunk_mutex
);
2638 list_add_rcu(&device
->dev_list
, &fs_devices
->devices
);
2639 list_add(&device
->dev_alloc_list
, &fs_devices
->alloc_list
);
2640 fs_devices
->num_devices
++;
2641 fs_devices
->open_devices
++;
2642 fs_devices
->rw_devices
++;
2643 fs_devices
->total_devices
++;
2644 fs_devices
->total_rw_bytes
+= device
->total_bytes
;
2646 atomic64_add(device
->total_bytes
, &fs_info
->free_chunk_space
);
2648 if (!blk_queue_nonrot(q
))
2649 fs_devices
->rotating
= 1;
2651 orig_super_total_bytes
= btrfs_super_total_bytes(fs_info
->super_copy
);
2652 btrfs_set_super_total_bytes(fs_info
->super_copy
,
2653 round_down(orig_super_total_bytes
+ device
->total_bytes
,
2654 fs_info
->sectorsize
));
2656 orig_super_num_devices
= btrfs_super_num_devices(fs_info
->super_copy
);
2657 btrfs_set_super_num_devices(fs_info
->super_copy
,
2658 orig_super_num_devices
+ 1);
2660 /* add sysfs device entry */
2661 btrfs_sysfs_add_device_link(fs_devices
, device
);
2664 * we've got more storage, clear any full flags on the space
2667 btrfs_clear_space_info_full(fs_info
);
2669 mutex_unlock(&fs_info
->chunk_mutex
);
2670 mutex_unlock(&fs_devices
->device_list_mutex
);
2673 mutex_lock(&fs_info
->chunk_mutex
);
2674 ret
= init_first_rw_device(trans
);
2675 mutex_unlock(&fs_info
->chunk_mutex
);
2677 btrfs_abort_transaction(trans
, ret
);
2682 ret
= btrfs_add_dev_item(trans
, device
);
2684 btrfs_abort_transaction(trans
, ret
);
2689 char fsid_buf
[BTRFS_UUID_UNPARSED_SIZE
];
2691 ret
= btrfs_finish_sprout(trans
);
2693 btrfs_abort_transaction(trans
, ret
);
2697 /* Sprouting would change fsid of the mounted root,
2698 * so rename the fsid on the sysfs
2700 snprintf(fsid_buf
, BTRFS_UUID_UNPARSED_SIZE
, "%pU",
2701 fs_info
->fs_devices
->fsid
);
2702 if (kobject_rename(&fs_devices
->fsid_kobj
, fsid_buf
))
2704 "sysfs: failed to create fsid for sprout");
2707 ret
= btrfs_commit_transaction(trans
);
2710 mutex_unlock(&uuid_mutex
);
2711 up_write(&sb
->s_umount
);
2714 if (ret
) /* transaction commit */
2717 ret
= btrfs_relocate_sys_chunks(fs_info
);
2719 btrfs_handle_fs_error(fs_info
, ret
,
2720 "Failed to relocate sys chunks after device initialization. This can be fixed using the \"btrfs balance\" command.");
2721 trans
= btrfs_attach_transaction(root
);
2722 if (IS_ERR(trans
)) {
2723 if (PTR_ERR(trans
) == -ENOENT
)
2725 ret
= PTR_ERR(trans
);
2729 ret
= btrfs_commit_transaction(trans
);
2732 /* Update ctime/mtime for libblkid */
2733 update_dev_time(device_path
);
2737 btrfs_sysfs_rm_device_link(fs_devices
, device
);
2738 mutex_lock(&fs_info
->fs_devices
->device_list_mutex
);
2739 mutex_lock(&fs_info
->chunk_mutex
);
2740 list_del_rcu(&device
->dev_list
);
2741 list_del(&device
->dev_alloc_list
);
2742 fs_info
->fs_devices
->num_devices
--;
2743 fs_info
->fs_devices
->open_devices
--;
2744 fs_info
->fs_devices
->rw_devices
--;
2745 fs_info
->fs_devices
->total_devices
--;
2746 fs_info
->fs_devices
->total_rw_bytes
-= device
->total_bytes
;
2747 atomic64_sub(device
->total_bytes
, &fs_info
->free_chunk_space
);
2748 btrfs_set_super_total_bytes(fs_info
->super_copy
,
2749 orig_super_total_bytes
);
2750 btrfs_set_super_num_devices(fs_info
->super_copy
,
2751 orig_super_num_devices
);
2752 mutex_unlock(&fs_info
->chunk_mutex
);
2753 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
2756 sb
->s_flags
|= SB_RDONLY
;
2758 btrfs_end_transaction(trans
);
2760 btrfs_free_device(device
);
2762 blkdev_put(bdev
, FMODE_EXCL
);
2763 if (seeding_dev
&& !unlocked
) {
2764 mutex_unlock(&uuid_mutex
);
2765 up_write(&sb
->s_umount
);
2770 static noinline
int btrfs_update_device(struct btrfs_trans_handle
*trans
,
2771 struct btrfs_device
*device
)
2774 struct btrfs_path
*path
;
2775 struct btrfs_root
*root
= device
->fs_info
->chunk_root
;
2776 struct btrfs_dev_item
*dev_item
;
2777 struct extent_buffer
*leaf
;
2778 struct btrfs_key key
;
2780 path
= btrfs_alloc_path();
2784 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
2785 key
.type
= BTRFS_DEV_ITEM_KEY
;
2786 key
.offset
= device
->devid
;
2788 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 1);
2797 leaf
= path
->nodes
[0];
2798 dev_item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_dev_item
);
2800 btrfs_set_device_id(leaf
, dev_item
, device
->devid
);
2801 btrfs_set_device_type(leaf
, dev_item
, device
->type
);
2802 btrfs_set_device_io_align(leaf
, dev_item
, device
->io_align
);
2803 btrfs_set_device_io_width(leaf
, dev_item
, device
->io_width
);
2804 btrfs_set_device_sector_size(leaf
, dev_item
, device
->sector_size
);
2805 btrfs_set_device_total_bytes(leaf
, dev_item
,
2806 btrfs_device_get_disk_total_bytes(device
));
2807 btrfs_set_device_bytes_used(leaf
, dev_item
,
2808 btrfs_device_get_bytes_used(device
));
2809 btrfs_mark_buffer_dirty(leaf
);
2812 btrfs_free_path(path
);
2816 int btrfs_grow_device(struct btrfs_trans_handle
*trans
,
2817 struct btrfs_device
*device
, u64 new_size
)
2819 struct btrfs_fs_info
*fs_info
= device
->fs_info
;
2820 struct btrfs_super_block
*super_copy
= fs_info
->super_copy
;
2824 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
))
2827 new_size
= round_down(new_size
, fs_info
->sectorsize
);
2829 mutex_lock(&fs_info
->chunk_mutex
);
2830 old_total
= btrfs_super_total_bytes(super_copy
);
2831 diff
= round_down(new_size
- device
->total_bytes
, fs_info
->sectorsize
);
2833 if (new_size
<= device
->total_bytes
||
2834 test_bit(BTRFS_DEV_STATE_REPLACE_TGT
, &device
->dev_state
)) {
2835 mutex_unlock(&fs_info
->chunk_mutex
);
2839 btrfs_set_super_total_bytes(super_copy
,
2840 round_down(old_total
+ diff
, fs_info
->sectorsize
));
2841 device
->fs_devices
->total_rw_bytes
+= diff
;
2843 btrfs_device_set_total_bytes(device
, new_size
);
2844 btrfs_device_set_disk_total_bytes(device
, new_size
);
2845 btrfs_clear_space_info_full(device
->fs_info
);
2846 if (list_empty(&device
->post_commit_list
))
2847 list_add_tail(&device
->post_commit_list
,
2848 &trans
->transaction
->dev_update_list
);
2849 mutex_unlock(&fs_info
->chunk_mutex
);
2851 return btrfs_update_device(trans
, device
);
2854 static int btrfs_free_chunk(struct btrfs_trans_handle
*trans
, u64 chunk_offset
)
2856 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
2857 struct btrfs_root
*root
= fs_info
->chunk_root
;
2859 struct btrfs_path
*path
;
2860 struct btrfs_key key
;
2862 path
= btrfs_alloc_path();
2866 key
.objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
2867 key
.offset
= chunk_offset
;
2868 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
2870 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
2873 else if (ret
> 0) { /* Logic error or corruption */
2874 btrfs_handle_fs_error(fs_info
, -ENOENT
,
2875 "Failed lookup while freeing chunk.");
2880 ret
= btrfs_del_item(trans
, root
, path
);
2882 btrfs_handle_fs_error(fs_info
, ret
,
2883 "Failed to delete chunk item.");
2885 btrfs_free_path(path
);
2889 static int btrfs_del_sys_chunk(struct btrfs_fs_info
*fs_info
, u64 chunk_offset
)
2891 struct btrfs_super_block
*super_copy
= fs_info
->super_copy
;
2892 struct btrfs_disk_key
*disk_key
;
2893 struct btrfs_chunk
*chunk
;
2900 struct btrfs_key key
;
2902 mutex_lock(&fs_info
->chunk_mutex
);
2903 array_size
= btrfs_super_sys_array_size(super_copy
);
2905 ptr
= super_copy
->sys_chunk_array
;
2908 while (cur
< array_size
) {
2909 disk_key
= (struct btrfs_disk_key
*)ptr
;
2910 btrfs_disk_key_to_cpu(&key
, disk_key
);
2912 len
= sizeof(*disk_key
);
2914 if (key
.type
== BTRFS_CHUNK_ITEM_KEY
) {
2915 chunk
= (struct btrfs_chunk
*)(ptr
+ len
);
2916 num_stripes
= btrfs_stack_chunk_num_stripes(chunk
);
2917 len
+= btrfs_chunk_item_size(num_stripes
);
2922 if (key
.objectid
== BTRFS_FIRST_CHUNK_TREE_OBJECTID
&&
2923 key
.offset
== chunk_offset
) {
2924 memmove(ptr
, ptr
+ len
, array_size
- (cur
+ len
));
2926 btrfs_set_super_sys_array_size(super_copy
, array_size
);
2932 mutex_unlock(&fs_info
->chunk_mutex
);
2937 * btrfs_get_chunk_map() - Find the mapping containing the given logical extent.
2938 * @logical: Logical block offset in bytes.
2939 * @length: Length of extent in bytes.
2941 * Return: Chunk mapping or ERR_PTR.
2943 struct extent_map
*btrfs_get_chunk_map(struct btrfs_fs_info
*fs_info
,
2944 u64 logical
, u64 length
)
2946 struct extent_map_tree
*em_tree
;
2947 struct extent_map
*em
;
2949 em_tree
= &fs_info
->mapping_tree
;
2950 read_lock(&em_tree
->lock
);
2951 em
= lookup_extent_mapping(em_tree
, logical
, length
);
2952 read_unlock(&em_tree
->lock
);
2955 btrfs_crit(fs_info
, "unable to find logical %llu length %llu",
2957 return ERR_PTR(-EINVAL
);
2960 if (em
->start
> logical
|| em
->start
+ em
->len
< logical
) {
2962 "found a bad mapping, wanted %llu-%llu, found %llu-%llu",
2963 logical
, length
, em
->start
, em
->start
+ em
->len
);
2964 free_extent_map(em
);
2965 return ERR_PTR(-EINVAL
);
2968 /* callers are responsible for dropping em's ref. */
2972 int btrfs_remove_chunk(struct btrfs_trans_handle
*trans
, u64 chunk_offset
)
2974 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
2975 struct extent_map
*em
;
2976 struct map_lookup
*map
;
2977 u64 dev_extent_len
= 0;
2979 struct btrfs_fs_devices
*fs_devices
= fs_info
->fs_devices
;
2981 em
= btrfs_get_chunk_map(fs_info
, chunk_offset
, 1);
2984 * This is a logic error, but we don't want to just rely on the
2985 * user having built with ASSERT enabled, so if ASSERT doesn't
2986 * do anything we still error out.
2991 map
= em
->map_lookup
;
2992 mutex_lock(&fs_info
->chunk_mutex
);
2993 check_system_chunk(trans
, map
->type
);
2994 mutex_unlock(&fs_info
->chunk_mutex
);
2997 * Take the device list mutex to prevent races with the final phase of
2998 * a device replace operation that replaces the device object associated
2999 * with map stripes (dev-replace.c:btrfs_dev_replace_finishing()).
3001 mutex_lock(&fs_devices
->device_list_mutex
);
3002 for (i
= 0; i
< map
->num_stripes
; i
++) {
3003 struct btrfs_device
*device
= map
->stripes
[i
].dev
;
3004 ret
= btrfs_free_dev_extent(trans
, device
,
3005 map
->stripes
[i
].physical
,
3008 mutex_unlock(&fs_devices
->device_list_mutex
);
3009 btrfs_abort_transaction(trans
, ret
);
3013 if (device
->bytes_used
> 0) {
3014 mutex_lock(&fs_info
->chunk_mutex
);
3015 btrfs_device_set_bytes_used(device
,
3016 device
->bytes_used
- dev_extent_len
);
3017 atomic64_add(dev_extent_len
, &fs_info
->free_chunk_space
);
3018 btrfs_clear_space_info_full(fs_info
);
3019 mutex_unlock(&fs_info
->chunk_mutex
);
3022 ret
= btrfs_update_device(trans
, device
);
3024 mutex_unlock(&fs_devices
->device_list_mutex
);
3025 btrfs_abort_transaction(trans
, ret
);
3029 mutex_unlock(&fs_devices
->device_list_mutex
);
3031 ret
= btrfs_free_chunk(trans
, chunk_offset
);
3033 btrfs_abort_transaction(trans
, ret
);
3037 trace_btrfs_chunk_free(fs_info
, map
, chunk_offset
, em
->len
);
3039 if (map
->type
& BTRFS_BLOCK_GROUP_SYSTEM
) {
3040 ret
= btrfs_del_sys_chunk(fs_info
, chunk_offset
);
3042 btrfs_abort_transaction(trans
, ret
);
3047 ret
= btrfs_remove_block_group(trans
, chunk_offset
, em
);
3049 btrfs_abort_transaction(trans
, ret
);
3055 free_extent_map(em
);
3059 static int btrfs_relocate_chunk(struct btrfs_fs_info
*fs_info
, u64 chunk_offset
)
3061 struct btrfs_root
*root
= fs_info
->chunk_root
;
3062 struct btrfs_trans_handle
*trans
;
3066 * Prevent races with automatic removal of unused block groups.
3067 * After we relocate and before we remove the chunk with offset
3068 * chunk_offset, automatic removal of the block group can kick in,
3069 * resulting in a failure when calling btrfs_remove_chunk() below.
3071 * Make sure to acquire this mutex before doing a tree search (dev
3072 * or chunk trees) to find chunks. Otherwise the cleaner kthread might
3073 * call btrfs_remove_chunk() (through btrfs_delete_unused_bgs()) after
3074 * we release the path used to search the chunk/dev tree and before
3075 * the current task acquires this mutex and calls us.
3077 lockdep_assert_held(&fs_info
->delete_unused_bgs_mutex
);
3079 ret
= btrfs_can_relocate(fs_info
, chunk_offset
);
3083 /* step one, relocate all the extents inside this chunk */
3084 btrfs_scrub_pause(fs_info
);
3085 ret
= btrfs_relocate_block_group(fs_info
, chunk_offset
);
3086 btrfs_scrub_continue(fs_info
);
3091 * We add the kobjects here (and after forcing data chunk creation)
3092 * since relocation is the only place we'll create chunks of a new
3093 * type at runtime. The only place where we'll remove the last
3094 * chunk of a type is the call immediately below this one. Even
3095 * so, we're protected against races with the cleaner thread since
3096 * we're covered by the delete_unused_bgs_mutex.
3098 btrfs_add_raid_kobjects(fs_info
);
3100 trans
= btrfs_start_trans_remove_block_group(root
->fs_info
,
3102 if (IS_ERR(trans
)) {
3103 ret
= PTR_ERR(trans
);
3104 btrfs_handle_fs_error(root
->fs_info
, ret
, NULL
);
3109 * step two, delete the device extents and the
3110 * chunk tree entries
3112 ret
= btrfs_remove_chunk(trans
, chunk_offset
);
3113 btrfs_end_transaction(trans
);
3117 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info
*fs_info
)
3119 struct btrfs_root
*chunk_root
= fs_info
->chunk_root
;
3120 struct btrfs_path
*path
;
3121 struct extent_buffer
*leaf
;
3122 struct btrfs_chunk
*chunk
;
3123 struct btrfs_key key
;
3124 struct btrfs_key found_key
;
3126 bool retried
= false;
3130 path
= btrfs_alloc_path();
3135 key
.objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
3136 key
.offset
= (u64
)-1;
3137 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
3140 mutex_lock(&fs_info
->delete_unused_bgs_mutex
);
3141 ret
= btrfs_search_slot(NULL
, chunk_root
, &key
, path
, 0, 0);
3143 mutex_unlock(&fs_info
->delete_unused_bgs_mutex
);
3146 BUG_ON(ret
== 0); /* Corruption */
3148 ret
= btrfs_previous_item(chunk_root
, path
, key
.objectid
,
3151 mutex_unlock(&fs_info
->delete_unused_bgs_mutex
);
3157 leaf
= path
->nodes
[0];
3158 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
3160 chunk
= btrfs_item_ptr(leaf
, path
->slots
[0],
3161 struct btrfs_chunk
);
3162 chunk_type
= btrfs_chunk_type(leaf
, chunk
);
3163 btrfs_release_path(path
);
3165 if (chunk_type
& BTRFS_BLOCK_GROUP_SYSTEM
) {
3166 ret
= btrfs_relocate_chunk(fs_info
, found_key
.offset
);
3172 mutex_unlock(&fs_info
->delete_unused_bgs_mutex
);
3174 if (found_key
.offset
== 0)
3176 key
.offset
= found_key
.offset
- 1;
3179 if (failed
&& !retried
) {
3183 } else if (WARN_ON(failed
&& retried
)) {
3187 btrfs_free_path(path
);
3192 * return 1 : allocate a data chunk successfully,
3193 * return <0: errors during allocating a data chunk,
3194 * return 0 : no need to allocate a data chunk.
3196 static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info
*fs_info
,
3199 struct btrfs_block_group_cache
*cache
;
3203 cache
= btrfs_lookup_block_group(fs_info
, chunk_offset
);
3205 chunk_type
= cache
->flags
;
3206 btrfs_put_block_group(cache
);
3208 if (chunk_type
& BTRFS_BLOCK_GROUP_DATA
) {
3209 spin_lock(&fs_info
->data_sinfo
->lock
);
3210 bytes_used
= fs_info
->data_sinfo
->bytes_used
;
3211 spin_unlock(&fs_info
->data_sinfo
->lock
);
3214 struct btrfs_trans_handle
*trans
;
3217 trans
= btrfs_join_transaction(fs_info
->tree_root
);
3219 return PTR_ERR(trans
);
3221 ret
= btrfs_force_chunk_alloc(trans
,
3222 BTRFS_BLOCK_GROUP_DATA
);
3223 btrfs_end_transaction(trans
);
3227 btrfs_add_raid_kobjects(fs_info
);
3235 static int insert_balance_item(struct btrfs_fs_info
*fs_info
,
3236 struct btrfs_balance_control
*bctl
)
3238 struct btrfs_root
*root
= fs_info
->tree_root
;
3239 struct btrfs_trans_handle
*trans
;
3240 struct btrfs_balance_item
*item
;
3241 struct btrfs_disk_balance_args disk_bargs
;
3242 struct btrfs_path
*path
;
3243 struct extent_buffer
*leaf
;
3244 struct btrfs_key key
;
3247 path
= btrfs_alloc_path();
3251 trans
= btrfs_start_transaction(root
, 0);
3252 if (IS_ERR(trans
)) {
3253 btrfs_free_path(path
);
3254 return PTR_ERR(trans
);
3257 key
.objectid
= BTRFS_BALANCE_OBJECTID
;
3258 key
.type
= BTRFS_TEMPORARY_ITEM_KEY
;
3261 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
3266 leaf
= path
->nodes
[0];
3267 item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_balance_item
);
3269 memzero_extent_buffer(leaf
, (unsigned long)item
, sizeof(*item
));
3271 btrfs_cpu_balance_args_to_disk(&disk_bargs
, &bctl
->data
);
3272 btrfs_set_balance_data(leaf
, item
, &disk_bargs
);
3273 btrfs_cpu_balance_args_to_disk(&disk_bargs
, &bctl
->meta
);
3274 btrfs_set_balance_meta(leaf
, item
, &disk_bargs
);
3275 btrfs_cpu_balance_args_to_disk(&disk_bargs
, &bctl
->sys
);
3276 btrfs_set_balance_sys(leaf
, item
, &disk_bargs
);
3278 btrfs_set_balance_flags(leaf
, item
, bctl
->flags
);
3280 btrfs_mark_buffer_dirty(leaf
);
3282 btrfs_free_path(path
);
3283 err
= btrfs_commit_transaction(trans
);
3289 static int del_balance_item(struct btrfs_fs_info
*fs_info
)
3291 struct btrfs_root
*root
= fs_info
->tree_root
;
3292 struct btrfs_trans_handle
*trans
;
3293 struct btrfs_path
*path
;
3294 struct btrfs_key key
;
3297 path
= btrfs_alloc_path();
3301 trans
= btrfs_start_transaction(root
, 0);
3302 if (IS_ERR(trans
)) {
3303 btrfs_free_path(path
);
3304 return PTR_ERR(trans
);
3307 key
.objectid
= BTRFS_BALANCE_OBJECTID
;
3308 key
.type
= BTRFS_TEMPORARY_ITEM_KEY
;
3311 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
3319 ret
= btrfs_del_item(trans
, root
, path
);
3321 btrfs_free_path(path
);
3322 err
= btrfs_commit_transaction(trans
);
3329 * This is a heuristic used to reduce the number of chunks balanced on
3330 * resume after balance was interrupted.
3332 static void update_balance_args(struct btrfs_balance_control
*bctl
)
3335 * Turn on soft mode for chunk types that were being converted.
3337 if (bctl
->data
.flags
& BTRFS_BALANCE_ARGS_CONVERT
)
3338 bctl
->data
.flags
|= BTRFS_BALANCE_ARGS_SOFT
;
3339 if (bctl
->sys
.flags
& BTRFS_BALANCE_ARGS_CONVERT
)
3340 bctl
->sys
.flags
|= BTRFS_BALANCE_ARGS_SOFT
;
3341 if (bctl
->meta
.flags
& BTRFS_BALANCE_ARGS_CONVERT
)
3342 bctl
->meta
.flags
|= BTRFS_BALANCE_ARGS_SOFT
;
3345 * Turn on usage filter if is not already used. The idea is
3346 * that chunks that we have already balanced should be
3347 * reasonably full. Don't do it for chunks that are being
3348 * converted - that will keep us from relocating unconverted
3349 * (albeit full) chunks.
3351 if (!(bctl
->data
.flags
& BTRFS_BALANCE_ARGS_USAGE
) &&
3352 !(bctl
->data
.flags
& BTRFS_BALANCE_ARGS_USAGE_RANGE
) &&
3353 !(bctl
->data
.flags
& BTRFS_BALANCE_ARGS_CONVERT
)) {
3354 bctl
->data
.flags
|= BTRFS_BALANCE_ARGS_USAGE
;
3355 bctl
->data
.usage
= 90;
3357 if (!(bctl
->sys
.flags
& BTRFS_BALANCE_ARGS_USAGE
) &&
3358 !(bctl
->sys
.flags
& BTRFS_BALANCE_ARGS_USAGE_RANGE
) &&
3359 !(bctl
->sys
.flags
& BTRFS_BALANCE_ARGS_CONVERT
)) {
3360 bctl
->sys
.flags
|= BTRFS_BALANCE_ARGS_USAGE
;
3361 bctl
->sys
.usage
= 90;
3363 if (!(bctl
->meta
.flags
& BTRFS_BALANCE_ARGS_USAGE
) &&
3364 !(bctl
->meta
.flags
& BTRFS_BALANCE_ARGS_USAGE_RANGE
) &&
3365 !(bctl
->meta
.flags
& BTRFS_BALANCE_ARGS_CONVERT
)) {
3366 bctl
->meta
.flags
|= BTRFS_BALANCE_ARGS_USAGE
;
3367 bctl
->meta
.usage
= 90;
3372 * Clear the balance status in fs_info and delete the balance item from disk.
3374 static void reset_balance_state(struct btrfs_fs_info
*fs_info
)
3376 struct btrfs_balance_control
*bctl
= fs_info
->balance_ctl
;
3379 BUG_ON(!fs_info
->balance_ctl
);
3381 spin_lock(&fs_info
->balance_lock
);
3382 fs_info
->balance_ctl
= NULL
;
3383 spin_unlock(&fs_info
->balance_lock
);
3386 ret
= del_balance_item(fs_info
);
3388 btrfs_handle_fs_error(fs_info
, ret
, NULL
);
3392 * Balance filters. Return 1 if chunk should be filtered out
3393 * (should not be balanced).
3395 static int chunk_profiles_filter(u64 chunk_type
,
3396 struct btrfs_balance_args
*bargs
)
3398 chunk_type
= chunk_to_extended(chunk_type
) &
3399 BTRFS_EXTENDED_PROFILE_MASK
;
3401 if (bargs
->profiles
& chunk_type
)
3407 static int chunk_usage_range_filter(struct btrfs_fs_info
*fs_info
, u64 chunk_offset
,
3408 struct btrfs_balance_args
*bargs
)
3410 struct btrfs_block_group_cache
*cache
;
3412 u64 user_thresh_min
;
3413 u64 user_thresh_max
;
3416 cache
= btrfs_lookup_block_group(fs_info
, chunk_offset
);
3417 chunk_used
= btrfs_block_group_used(&cache
->item
);
3419 if (bargs
->usage_min
== 0)
3420 user_thresh_min
= 0;
3422 user_thresh_min
= div_factor_fine(cache
->key
.offset
,
3425 if (bargs
->usage_max
== 0)
3426 user_thresh_max
= 1;
3427 else if (bargs
->usage_max
> 100)
3428 user_thresh_max
= cache
->key
.offset
;
3430 user_thresh_max
= div_factor_fine(cache
->key
.offset
,
3433 if (user_thresh_min
<= chunk_used
&& chunk_used
< user_thresh_max
)
3436 btrfs_put_block_group(cache
);
3440 static int chunk_usage_filter(struct btrfs_fs_info
*fs_info
,
3441 u64 chunk_offset
, struct btrfs_balance_args
*bargs
)
3443 struct btrfs_block_group_cache
*cache
;
3444 u64 chunk_used
, user_thresh
;
3447 cache
= btrfs_lookup_block_group(fs_info
, chunk_offset
);
3448 chunk_used
= btrfs_block_group_used(&cache
->item
);
3450 if (bargs
->usage_min
== 0)
3452 else if (bargs
->usage
> 100)
3453 user_thresh
= cache
->key
.offset
;
3455 user_thresh
= div_factor_fine(cache
->key
.offset
,
3458 if (chunk_used
< user_thresh
)
3461 btrfs_put_block_group(cache
);
3465 static int chunk_devid_filter(struct extent_buffer
*leaf
,
3466 struct btrfs_chunk
*chunk
,
3467 struct btrfs_balance_args
*bargs
)
3469 struct btrfs_stripe
*stripe
;
3470 int num_stripes
= btrfs_chunk_num_stripes(leaf
, chunk
);
3473 for (i
= 0; i
< num_stripes
; i
++) {
3474 stripe
= btrfs_stripe_nr(chunk
, i
);
3475 if (btrfs_stripe_devid(leaf
, stripe
) == bargs
->devid
)
3482 static u64
calc_data_stripes(u64 type
, int num_stripes
)
3484 const int index
= btrfs_bg_flags_to_raid_index(type
);
3485 const int ncopies
= btrfs_raid_array
[index
].ncopies
;
3486 const int nparity
= btrfs_raid_array
[index
].nparity
;
3489 return num_stripes
- nparity
;
3491 return num_stripes
/ ncopies
;
3494 /* [pstart, pend) */
3495 static int chunk_drange_filter(struct extent_buffer
*leaf
,
3496 struct btrfs_chunk
*chunk
,
3497 struct btrfs_balance_args
*bargs
)
3499 struct btrfs_stripe
*stripe
;
3500 int num_stripes
= btrfs_chunk_num_stripes(leaf
, chunk
);
3507 if (!(bargs
->flags
& BTRFS_BALANCE_ARGS_DEVID
))
3510 type
= btrfs_chunk_type(leaf
, chunk
);
3511 factor
= calc_data_stripes(type
, num_stripes
);
3513 for (i
= 0; i
< num_stripes
; i
++) {
3514 stripe
= btrfs_stripe_nr(chunk
, i
);
3515 if (btrfs_stripe_devid(leaf
, stripe
) != bargs
->devid
)
3518 stripe_offset
= btrfs_stripe_offset(leaf
, stripe
);
3519 stripe_length
= btrfs_chunk_length(leaf
, chunk
);
3520 stripe_length
= div_u64(stripe_length
, factor
);
3522 if (stripe_offset
< bargs
->pend
&&
3523 stripe_offset
+ stripe_length
> bargs
->pstart
)
3530 /* [vstart, vend) */
3531 static int chunk_vrange_filter(struct extent_buffer
*leaf
,
3532 struct btrfs_chunk
*chunk
,
3534 struct btrfs_balance_args
*bargs
)
3536 if (chunk_offset
< bargs
->vend
&&
3537 chunk_offset
+ btrfs_chunk_length(leaf
, chunk
) > bargs
->vstart
)
3538 /* at least part of the chunk is inside this vrange */
3544 static int chunk_stripes_range_filter(struct extent_buffer
*leaf
,
3545 struct btrfs_chunk
*chunk
,
3546 struct btrfs_balance_args
*bargs
)
3548 int num_stripes
= btrfs_chunk_num_stripes(leaf
, chunk
);
3550 if (bargs
->stripes_min
<= num_stripes
3551 && num_stripes
<= bargs
->stripes_max
)
3557 static int chunk_soft_convert_filter(u64 chunk_type
,
3558 struct btrfs_balance_args
*bargs
)
3560 if (!(bargs
->flags
& BTRFS_BALANCE_ARGS_CONVERT
))
3563 chunk_type
= chunk_to_extended(chunk_type
) &
3564 BTRFS_EXTENDED_PROFILE_MASK
;
3566 if (bargs
->target
== chunk_type
)
3572 static int should_balance_chunk(struct extent_buffer
*leaf
,
3573 struct btrfs_chunk
*chunk
, u64 chunk_offset
)
3575 struct btrfs_fs_info
*fs_info
= leaf
->fs_info
;
3576 struct btrfs_balance_control
*bctl
= fs_info
->balance_ctl
;
3577 struct btrfs_balance_args
*bargs
= NULL
;
3578 u64 chunk_type
= btrfs_chunk_type(leaf
, chunk
);
3581 if (!((chunk_type
& BTRFS_BLOCK_GROUP_TYPE_MASK
) &
3582 (bctl
->flags
& BTRFS_BALANCE_TYPE_MASK
))) {
3586 if (chunk_type
& BTRFS_BLOCK_GROUP_DATA
)
3587 bargs
= &bctl
->data
;
3588 else if (chunk_type
& BTRFS_BLOCK_GROUP_SYSTEM
)
3590 else if (chunk_type
& BTRFS_BLOCK_GROUP_METADATA
)
3591 bargs
= &bctl
->meta
;
3593 /* profiles filter */
3594 if ((bargs
->flags
& BTRFS_BALANCE_ARGS_PROFILES
) &&
3595 chunk_profiles_filter(chunk_type
, bargs
)) {
3600 if ((bargs
->flags
& BTRFS_BALANCE_ARGS_USAGE
) &&
3601 chunk_usage_filter(fs_info
, chunk_offset
, bargs
)) {
3603 } else if ((bargs
->flags
& BTRFS_BALANCE_ARGS_USAGE_RANGE
) &&
3604 chunk_usage_range_filter(fs_info
, chunk_offset
, bargs
)) {
3609 if ((bargs
->flags
& BTRFS_BALANCE_ARGS_DEVID
) &&
3610 chunk_devid_filter(leaf
, chunk
, bargs
)) {
3614 /* drange filter, makes sense only with devid filter */
3615 if ((bargs
->flags
& BTRFS_BALANCE_ARGS_DRANGE
) &&
3616 chunk_drange_filter(leaf
, chunk
, bargs
)) {
3621 if ((bargs
->flags
& BTRFS_BALANCE_ARGS_VRANGE
) &&
3622 chunk_vrange_filter(leaf
, chunk
, chunk_offset
, bargs
)) {
3626 /* stripes filter */
3627 if ((bargs
->flags
& BTRFS_BALANCE_ARGS_STRIPES_RANGE
) &&
3628 chunk_stripes_range_filter(leaf
, chunk
, bargs
)) {
3632 /* soft profile changing mode */
3633 if ((bargs
->flags
& BTRFS_BALANCE_ARGS_SOFT
) &&
3634 chunk_soft_convert_filter(chunk_type
, bargs
)) {
3639 * limited by count, must be the last filter
3641 if ((bargs
->flags
& BTRFS_BALANCE_ARGS_LIMIT
)) {
3642 if (bargs
->limit
== 0)
3646 } else if ((bargs
->flags
& BTRFS_BALANCE_ARGS_LIMIT_RANGE
)) {
3648 * Same logic as the 'limit' filter; the minimum cannot be
3649 * determined here because we do not have the global information
3650 * about the count of all chunks that satisfy the filters.
3652 if (bargs
->limit_max
== 0)
3661 static int __btrfs_balance(struct btrfs_fs_info
*fs_info
)
3663 struct btrfs_balance_control
*bctl
= fs_info
->balance_ctl
;
3664 struct btrfs_root
*chunk_root
= fs_info
->chunk_root
;
3666 struct btrfs_chunk
*chunk
;
3667 struct btrfs_path
*path
= NULL
;
3668 struct btrfs_key key
;
3669 struct btrfs_key found_key
;
3670 struct extent_buffer
*leaf
;
3673 int enospc_errors
= 0;
3674 bool counting
= true;
3675 /* The single value limit and min/max limits use the same bytes in the */
3676 u64 limit_data
= bctl
->data
.limit
;
3677 u64 limit_meta
= bctl
->meta
.limit
;
3678 u64 limit_sys
= bctl
->sys
.limit
;
3682 int chunk_reserved
= 0;
3684 path
= btrfs_alloc_path();
3690 /* zero out stat counters */
3691 spin_lock(&fs_info
->balance_lock
);
3692 memset(&bctl
->stat
, 0, sizeof(bctl
->stat
));
3693 spin_unlock(&fs_info
->balance_lock
);
3697 * The single value limit and min/max limits use the same bytes
3700 bctl
->data
.limit
= limit_data
;
3701 bctl
->meta
.limit
= limit_meta
;
3702 bctl
->sys
.limit
= limit_sys
;
3704 key
.objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
3705 key
.offset
= (u64
)-1;
3706 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
3709 if ((!counting
&& atomic_read(&fs_info
->balance_pause_req
)) ||
3710 atomic_read(&fs_info
->balance_cancel_req
)) {
3715 mutex_lock(&fs_info
->delete_unused_bgs_mutex
);
3716 ret
= btrfs_search_slot(NULL
, chunk_root
, &key
, path
, 0, 0);
3718 mutex_unlock(&fs_info
->delete_unused_bgs_mutex
);
3723 * this shouldn't happen, it means the last relocate
3727 BUG(); /* FIXME break ? */
3729 ret
= btrfs_previous_item(chunk_root
, path
, 0,
3730 BTRFS_CHUNK_ITEM_KEY
);
3732 mutex_unlock(&fs_info
->delete_unused_bgs_mutex
);
3737 leaf
= path
->nodes
[0];
3738 slot
= path
->slots
[0];
3739 btrfs_item_key_to_cpu(leaf
, &found_key
, slot
);
3741 if (found_key
.objectid
!= key
.objectid
) {
3742 mutex_unlock(&fs_info
->delete_unused_bgs_mutex
);
3746 chunk
= btrfs_item_ptr(leaf
, slot
, struct btrfs_chunk
);
3747 chunk_type
= btrfs_chunk_type(leaf
, chunk
);
3750 spin_lock(&fs_info
->balance_lock
);
3751 bctl
->stat
.considered
++;
3752 spin_unlock(&fs_info
->balance_lock
);
3755 ret
= should_balance_chunk(leaf
, chunk
, found_key
.offset
);
3757 btrfs_release_path(path
);
3759 mutex_unlock(&fs_info
->delete_unused_bgs_mutex
);
3764 mutex_unlock(&fs_info
->delete_unused_bgs_mutex
);
3765 spin_lock(&fs_info
->balance_lock
);
3766 bctl
->stat
.expected
++;
3767 spin_unlock(&fs_info
->balance_lock
);
3769 if (chunk_type
& BTRFS_BLOCK_GROUP_DATA
)
3771 else if (chunk_type
& BTRFS_BLOCK_GROUP_SYSTEM
)
3773 else if (chunk_type
& BTRFS_BLOCK_GROUP_METADATA
)
3780 * Apply limit_min filter, no need to check if the LIMITS
3781 * filter is used, limit_min is 0 by default
3783 if (((chunk_type
& BTRFS_BLOCK_GROUP_DATA
) &&
3784 count_data
< bctl
->data
.limit_min
)
3785 || ((chunk_type
& BTRFS_BLOCK_GROUP_METADATA
) &&
3786 count_meta
< bctl
->meta
.limit_min
)
3787 || ((chunk_type
& BTRFS_BLOCK_GROUP_SYSTEM
) &&
3788 count_sys
< bctl
->sys
.limit_min
)) {
3789 mutex_unlock(&fs_info
->delete_unused_bgs_mutex
);
3793 if (!chunk_reserved
) {
3795 * We may be relocating the only data chunk we have,
3796 * which could potentially end up with losing data's
3797 * raid profile, so lets allocate an empty one in
3800 ret
= btrfs_may_alloc_data_chunk(fs_info
,
3803 mutex_unlock(&fs_info
->delete_unused_bgs_mutex
);
3805 } else if (ret
== 1) {
3810 ret
= btrfs_relocate_chunk(fs_info
, found_key
.offset
);
3811 mutex_unlock(&fs_info
->delete_unused_bgs_mutex
);
3812 if (ret
== -ENOSPC
) {
3814 } else if (ret
== -ETXTBSY
) {
3816 "skipping relocation of block group %llu due to active swapfile",
3822 spin_lock(&fs_info
->balance_lock
);
3823 bctl
->stat
.completed
++;
3824 spin_unlock(&fs_info
->balance_lock
);
3827 if (found_key
.offset
== 0)
3829 key
.offset
= found_key
.offset
- 1;
3833 btrfs_release_path(path
);
3838 btrfs_free_path(path
);
3839 if (enospc_errors
) {
3840 btrfs_info(fs_info
, "%d enospc errors during balance",
3850 * alloc_profile_is_valid - see if a given profile is valid and reduced
3851 * @flags: profile to validate
3852 * @extended: if true @flags is treated as an extended profile
3854 static int alloc_profile_is_valid(u64 flags
, int extended
)
3856 u64 mask
= (extended
? BTRFS_EXTENDED_PROFILE_MASK
:
3857 BTRFS_BLOCK_GROUP_PROFILE_MASK
);
3859 flags
&= ~BTRFS_BLOCK_GROUP_TYPE_MASK
;
3861 /* 1) check that all other bits are zeroed */
3865 /* 2) see if profile is reduced */
3867 return !extended
; /* "0" is valid for usual profiles */
3869 /* true if exactly one bit set */
3870 return is_power_of_2(flags
);
3873 static inline int balance_need_close(struct btrfs_fs_info
*fs_info
)
3875 /* cancel requested || normal exit path */
3876 return atomic_read(&fs_info
->balance_cancel_req
) ||
3877 (atomic_read(&fs_info
->balance_pause_req
) == 0 &&
3878 atomic_read(&fs_info
->balance_cancel_req
) == 0);
3881 /* Non-zero return value signifies invalidity */
3882 static inline int validate_convert_profile(struct btrfs_balance_args
*bctl_arg
,
3885 return ((bctl_arg
->flags
& BTRFS_BALANCE_ARGS_CONVERT
) &&
3886 (!alloc_profile_is_valid(bctl_arg
->target
, 1) ||
3887 (bctl_arg
->target
& ~allowed
)));
3891 * Fill @buf with textual description of balance filter flags @bargs, up to
3892 * @size_buf including the terminating null. The output may be trimmed if it
3893 * does not fit into the provided buffer.
3895 static void describe_balance_args(struct btrfs_balance_args
*bargs
, char *buf
,
3899 u32 size_bp
= size_buf
;
3901 u64 flags
= bargs
->flags
;
3902 char tmp_buf
[128] = {'\0'};
3907 #define CHECK_APPEND_NOARG(a) \
3909 ret = snprintf(bp, size_bp, (a)); \
3910 if (ret < 0 || ret >= size_bp) \
3911 goto out_overflow; \
3916 #define CHECK_APPEND_1ARG(a, v1) \
3918 ret = snprintf(bp, size_bp, (a), (v1)); \
3919 if (ret < 0 || ret >= size_bp) \
3920 goto out_overflow; \
3925 #define CHECK_APPEND_2ARG(a, v1, v2) \
3927 ret = snprintf(bp, size_bp, (a), (v1), (v2)); \
3928 if (ret < 0 || ret >= size_bp) \
3929 goto out_overflow; \
3934 if (flags
& BTRFS_BALANCE_ARGS_CONVERT
)
3935 CHECK_APPEND_1ARG("convert=%s,",
3936 btrfs_bg_type_to_raid_name(bargs
->target
));
3938 if (flags
& BTRFS_BALANCE_ARGS_SOFT
)
3939 CHECK_APPEND_NOARG("soft,");
3941 if (flags
& BTRFS_BALANCE_ARGS_PROFILES
) {
3942 btrfs_describe_block_groups(bargs
->profiles
, tmp_buf
,
3944 CHECK_APPEND_1ARG("profiles=%s,", tmp_buf
);
3947 if (flags
& BTRFS_BALANCE_ARGS_USAGE
)
3948 CHECK_APPEND_1ARG("usage=%llu,", bargs
->usage
);
3950 if (flags
& BTRFS_BALANCE_ARGS_USAGE_RANGE
)
3951 CHECK_APPEND_2ARG("usage=%u..%u,",
3952 bargs
->usage_min
, bargs
->usage_max
);
3954 if (flags
& BTRFS_BALANCE_ARGS_DEVID
)
3955 CHECK_APPEND_1ARG("devid=%llu,", bargs
->devid
);
3957 if (flags
& BTRFS_BALANCE_ARGS_DRANGE
)
3958 CHECK_APPEND_2ARG("drange=%llu..%llu,",
3959 bargs
->pstart
, bargs
->pend
);
3961 if (flags
& BTRFS_BALANCE_ARGS_VRANGE
)
3962 CHECK_APPEND_2ARG("vrange=%llu..%llu,",
3963 bargs
->vstart
, bargs
->vend
);
3965 if (flags
& BTRFS_BALANCE_ARGS_LIMIT
)
3966 CHECK_APPEND_1ARG("limit=%llu,", bargs
->limit
);
3968 if (flags
& BTRFS_BALANCE_ARGS_LIMIT_RANGE
)
3969 CHECK_APPEND_2ARG("limit=%u..%u,",
3970 bargs
->limit_min
, bargs
->limit_max
);
3972 if (flags
& BTRFS_BALANCE_ARGS_STRIPES_RANGE
)
3973 CHECK_APPEND_2ARG("stripes=%u..%u,",
3974 bargs
->stripes_min
, bargs
->stripes_max
);
3976 #undef CHECK_APPEND_2ARG
3977 #undef CHECK_APPEND_1ARG
3978 #undef CHECK_APPEND_NOARG
3982 if (size_bp
< size_buf
)
3983 buf
[size_buf
- size_bp
- 1] = '\0'; /* remove last , */
3988 static void describe_balance_start_or_resume(struct btrfs_fs_info
*fs_info
)
3990 u32 size_buf
= 1024;
3991 char tmp_buf
[192] = {'\0'};
3994 u32 size_bp
= size_buf
;
3996 struct btrfs_balance_control
*bctl
= fs_info
->balance_ctl
;
3998 buf
= kzalloc(size_buf
, GFP_KERNEL
);
4004 #define CHECK_APPEND_1ARG(a, v1) \
4006 ret = snprintf(bp, size_bp, (a), (v1)); \
4007 if (ret < 0 || ret >= size_bp) \
4008 goto out_overflow; \
4013 if (bctl
->flags
& BTRFS_BALANCE_FORCE
)
4014 CHECK_APPEND_1ARG("%s", "-f ");
4016 if (bctl
->flags
& BTRFS_BALANCE_DATA
) {
4017 describe_balance_args(&bctl
->data
, tmp_buf
, sizeof(tmp_buf
));
4018 CHECK_APPEND_1ARG("-d%s ", tmp_buf
);
4021 if (bctl
->flags
& BTRFS_BALANCE_METADATA
) {
4022 describe_balance_args(&bctl
->meta
, tmp_buf
, sizeof(tmp_buf
));
4023 CHECK_APPEND_1ARG("-m%s ", tmp_buf
);
4026 if (bctl
->flags
& BTRFS_BALANCE_SYSTEM
) {
4027 describe_balance_args(&bctl
->sys
, tmp_buf
, sizeof(tmp_buf
));
4028 CHECK_APPEND_1ARG("-s%s ", tmp_buf
);
4031 #undef CHECK_APPEND_1ARG
4035 if (size_bp
< size_buf
)
4036 buf
[size_buf
- size_bp
- 1] = '\0'; /* remove last " " */
4037 btrfs_info(fs_info
, "balance: %s %s",
4038 (bctl
->flags
& BTRFS_BALANCE_RESUME
) ?
4039 "resume" : "start", buf
);
4045 * Should be called with balance mutexe held
4047 int btrfs_balance(struct btrfs_fs_info
*fs_info
,
4048 struct btrfs_balance_control
*bctl
,
4049 struct btrfs_ioctl_balance_args
*bargs
)
4051 u64 meta_target
, data_target
;
4057 bool reducing_integrity
;
4060 if (btrfs_fs_closing(fs_info
) ||
4061 atomic_read(&fs_info
->balance_pause_req
) ||
4062 atomic_read(&fs_info
->balance_cancel_req
)) {
4067 allowed
= btrfs_super_incompat_flags(fs_info
->super_copy
);
4068 if (allowed
& BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS
)
4072 * In case of mixed groups both data and meta should be picked,
4073 * and identical options should be given for both of them.
4075 allowed
= BTRFS_BALANCE_DATA
| BTRFS_BALANCE_METADATA
;
4076 if (mixed
&& (bctl
->flags
& allowed
)) {
4077 if (!(bctl
->flags
& BTRFS_BALANCE_DATA
) ||
4078 !(bctl
->flags
& BTRFS_BALANCE_METADATA
) ||
4079 memcmp(&bctl
->data
, &bctl
->meta
, sizeof(bctl
->data
))) {
4081 "balance: mixed groups data and metadata options must be the same");
4087 num_devices
= btrfs_num_devices(fs_info
);
4089 for (i
= 0; i
< ARRAY_SIZE(btrfs_raid_array
); i
++)
4090 if (num_devices
>= btrfs_raid_array
[i
].devs_min
)
4091 allowed
|= btrfs_raid_array
[i
].bg_flag
;
4093 if (validate_convert_profile(&bctl
->data
, allowed
)) {
4095 "balance: invalid convert data profile %s",
4096 btrfs_bg_type_to_raid_name(bctl
->data
.target
));
4100 if (validate_convert_profile(&bctl
->meta
, allowed
)) {
4102 "balance: invalid convert metadata profile %s",
4103 btrfs_bg_type_to_raid_name(bctl
->meta
.target
));
4107 if (validate_convert_profile(&bctl
->sys
, allowed
)) {
4109 "balance: invalid convert system profile %s",
4110 btrfs_bg_type_to_raid_name(bctl
->sys
.target
));
4116 * Allow to reduce metadata or system integrity only if force set for
4117 * profiles with redundancy (copies, parity)
4120 for (i
= 0; i
< ARRAY_SIZE(btrfs_raid_array
); i
++) {
4121 if (btrfs_raid_array
[i
].ncopies
>= 2 ||
4122 btrfs_raid_array
[i
].tolerated_failures
>= 1)
4123 allowed
|= btrfs_raid_array
[i
].bg_flag
;
4126 seq
= read_seqbegin(&fs_info
->profiles_lock
);
4128 if (((bctl
->sys
.flags
& BTRFS_BALANCE_ARGS_CONVERT
) &&
4129 (fs_info
->avail_system_alloc_bits
& allowed
) &&
4130 !(bctl
->sys
.target
& allowed
)) ||
4131 ((bctl
->meta
.flags
& BTRFS_BALANCE_ARGS_CONVERT
) &&
4132 (fs_info
->avail_metadata_alloc_bits
& allowed
) &&
4133 !(bctl
->meta
.target
& allowed
)))
4134 reducing_integrity
= true;
4136 reducing_integrity
= false;
4138 /* if we're not converting, the target field is uninitialized */
4139 meta_target
= (bctl
->meta
.flags
& BTRFS_BALANCE_ARGS_CONVERT
) ?
4140 bctl
->meta
.target
: fs_info
->avail_metadata_alloc_bits
;
4141 data_target
= (bctl
->data
.flags
& BTRFS_BALANCE_ARGS_CONVERT
) ?
4142 bctl
->data
.target
: fs_info
->avail_data_alloc_bits
;
4143 } while (read_seqretry(&fs_info
->profiles_lock
, seq
));
4145 if (reducing_integrity
) {
4146 if (bctl
->flags
& BTRFS_BALANCE_FORCE
) {
4148 "balance: force reducing metadata integrity");
4151 "balance: reduces metadata integrity, use --force if you want this");
4157 if (btrfs_get_num_tolerated_disk_barrier_failures(meta_target
) <
4158 btrfs_get_num_tolerated_disk_barrier_failures(data_target
)) {
4160 "balance: metadata profile %s has lower redundancy than data profile %s",
4161 btrfs_bg_type_to_raid_name(meta_target
),
4162 btrfs_bg_type_to_raid_name(data_target
));
4165 if (fs_info
->send_in_progress
) {
4166 btrfs_warn_rl(fs_info
,
4167 "cannot run balance while send operations are in progress (%d in progress)",
4168 fs_info
->send_in_progress
);
4173 ret
= insert_balance_item(fs_info
, bctl
);
4174 if (ret
&& ret
!= -EEXIST
)
4177 if (!(bctl
->flags
& BTRFS_BALANCE_RESUME
)) {
4178 BUG_ON(ret
== -EEXIST
);
4179 BUG_ON(fs_info
->balance_ctl
);
4180 spin_lock(&fs_info
->balance_lock
);
4181 fs_info
->balance_ctl
= bctl
;
4182 spin_unlock(&fs_info
->balance_lock
);
4184 BUG_ON(ret
!= -EEXIST
);
4185 spin_lock(&fs_info
->balance_lock
);
4186 update_balance_args(bctl
);
4187 spin_unlock(&fs_info
->balance_lock
);
4190 ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING
, &fs_info
->flags
));
4191 set_bit(BTRFS_FS_BALANCE_RUNNING
, &fs_info
->flags
);
4192 describe_balance_start_or_resume(fs_info
);
4193 mutex_unlock(&fs_info
->balance_mutex
);
4195 ret
= __btrfs_balance(fs_info
);
4197 mutex_lock(&fs_info
->balance_mutex
);
4198 if (ret
== -ECANCELED
&& atomic_read(&fs_info
->balance_pause_req
))
4199 btrfs_info(fs_info
, "balance: paused");
4200 else if (ret
== -ECANCELED
&& atomic_read(&fs_info
->balance_cancel_req
))
4201 btrfs_info(fs_info
, "balance: canceled");
4203 btrfs_info(fs_info
, "balance: ended with status: %d", ret
);
4205 clear_bit(BTRFS_FS_BALANCE_RUNNING
, &fs_info
->flags
);
4208 memset(bargs
, 0, sizeof(*bargs
));
4209 btrfs_update_ioctl_balance_args(fs_info
, bargs
);
4212 if ((ret
&& ret
!= -ECANCELED
&& ret
!= -ENOSPC
) ||
4213 balance_need_close(fs_info
)) {
4214 reset_balance_state(fs_info
);
4215 clear_bit(BTRFS_FS_EXCL_OP
, &fs_info
->flags
);
4218 wake_up(&fs_info
->balance_wait_q
);
4222 if (bctl
->flags
& BTRFS_BALANCE_RESUME
)
4223 reset_balance_state(fs_info
);
4226 clear_bit(BTRFS_FS_EXCL_OP
, &fs_info
->flags
);
4231 static int balance_kthread(void *data
)
4233 struct btrfs_fs_info
*fs_info
= data
;
4236 mutex_lock(&fs_info
->balance_mutex
);
4237 if (fs_info
->balance_ctl
)
4238 ret
= btrfs_balance(fs_info
, fs_info
->balance_ctl
, NULL
);
4239 mutex_unlock(&fs_info
->balance_mutex
);
4244 int btrfs_resume_balance_async(struct btrfs_fs_info
*fs_info
)
4246 struct task_struct
*tsk
;
4248 mutex_lock(&fs_info
->balance_mutex
);
4249 if (!fs_info
->balance_ctl
) {
4250 mutex_unlock(&fs_info
->balance_mutex
);
4253 mutex_unlock(&fs_info
->balance_mutex
);
4255 if (btrfs_test_opt(fs_info
, SKIP_BALANCE
)) {
4256 btrfs_info(fs_info
, "balance: resume skipped");
4261 * A ro->rw remount sequence should continue with the paused balance
4262 * regardless of who pauses it, system or the user as of now, so set
4265 spin_lock(&fs_info
->balance_lock
);
4266 fs_info
->balance_ctl
->flags
|= BTRFS_BALANCE_RESUME
;
4267 spin_unlock(&fs_info
->balance_lock
);
4269 tsk
= kthread_run(balance_kthread
, fs_info
, "btrfs-balance");
4270 return PTR_ERR_OR_ZERO(tsk
);
4273 int btrfs_recover_balance(struct btrfs_fs_info
*fs_info
)
4275 struct btrfs_balance_control
*bctl
;
4276 struct btrfs_balance_item
*item
;
4277 struct btrfs_disk_balance_args disk_bargs
;
4278 struct btrfs_path
*path
;
4279 struct extent_buffer
*leaf
;
4280 struct btrfs_key key
;
4283 path
= btrfs_alloc_path();
4287 key
.objectid
= BTRFS_BALANCE_OBJECTID
;
4288 key
.type
= BTRFS_TEMPORARY_ITEM_KEY
;
4291 ret
= btrfs_search_slot(NULL
, fs_info
->tree_root
, &key
, path
, 0, 0);
4294 if (ret
> 0) { /* ret = -ENOENT; */
4299 bctl
= kzalloc(sizeof(*bctl
), GFP_NOFS
);
4305 leaf
= path
->nodes
[0];
4306 item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_balance_item
);
4308 bctl
->flags
= btrfs_balance_flags(leaf
, item
);
4309 bctl
->flags
|= BTRFS_BALANCE_RESUME
;
4311 btrfs_balance_data(leaf
, item
, &disk_bargs
);
4312 btrfs_disk_balance_args_to_cpu(&bctl
->data
, &disk_bargs
);
4313 btrfs_balance_meta(leaf
, item
, &disk_bargs
);
4314 btrfs_disk_balance_args_to_cpu(&bctl
->meta
, &disk_bargs
);
4315 btrfs_balance_sys(leaf
, item
, &disk_bargs
);
4316 btrfs_disk_balance_args_to_cpu(&bctl
->sys
, &disk_bargs
);
4319 * This should never happen, as the paused balance state is recovered
4320 * during mount without any chance of other exclusive ops to collide.
4322 * This gives the exclusive op status to balance and keeps in paused
4323 * state until user intervention (cancel or umount). If the ownership
4324 * cannot be assigned, show a message but do not fail. The balance
4325 * is in a paused state and must have fs_info::balance_ctl properly
4328 if (test_and_set_bit(BTRFS_FS_EXCL_OP
, &fs_info
->flags
))
4330 "balance: cannot set exclusive op status, resume manually");
4332 mutex_lock(&fs_info
->balance_mutex
);
4333 BUG_ON(fs_info
->balance_ctl
);
4334 spin_lock(&fs_info
->balance_lock
);
4335 fs_info
->balance_ctl
= bctl
;
4336 spin_unlock(&fs_info
->balance_lock
);
4337 mutex_unlock(&fs_info
->balance_mutex
);
4339 btrfs_free_path(path
);
4343 int btrfs_pause_balance(struct btrfs_fs_info
*fs_info
)
4347 mutex_lock(&fs_info
->balance_mutex
);
4348 if (!fs_info
->balance_ctl
) {
4349 mutex_unlock(&fs_info
->balance_mutex
);
4353 if (test_bit(BTRFS_FS_BALANCE_RUNNING
, &fs_info
->flags
)) {
4354 atomic_inc(&fs_info
->balance_pause_req
);
4355 mutex_unlock(&fs_info
->balance_mutex
);
4357 wait_event(fs_info
->balance_wait_q
,
4358 !test_bit(BTRFS_FS_BALANCE_RUNNING
, &fs_info
->flags
));
4360 mutex_lock(&fs_info
->balance_mutex
);
4361 /* we are good with balance_ctl ripped off from under us */
4362 BUG_ON(test_bit(BTRFS_FS_BALANCE_RUNNING
, &fs_info
->flags
));
4363 atomic_dec(&fs_info
->balance_pause_req
);
4368 mutex_unlock(&fs_info
->balance_mutex
);
4372 int btrfs_cancel_balance(struct btrfs_fs_info
*fs_info
)
4374 mutex_lock(&fs_info
->balance_mutex
);
4375 if (!fs_info
->balance_ctl
) {
4376 mutex_unlock(&fs_info
->balance_mutex
);
4381 * A paused balance with the item stored on disk can be resumed at
4382 * mount time if the mount is read-write. Otherwise it's still paused
4383 * and we must not allow cancelling as it deletes the item.
4385 if (sb_rdonly(fs_info
->sb
)) {
4386 mutex_unlock(&fs_info
->balance_mutex
);
4390 atomic_inc(&fs_info
->balance_cancel_req
);
4392 * if we are running just wait and return, balance item is
4393 * deleted in btrfs_balance in this case
4395 if (test_bit(BTRFS_FS_BALANCE_RUNNING
, &fs_info
->flags
)) {
4396 mutex_unlock(&fs_info
->balance_mutex
);
4397 wait_event(fs_info
->balance_wait_q
,
4398 !test_bit(BTRFS_FS_BALANCE_RUNNING
, &fs_info
->flags
));
4399 mutex_lock(&fs_info
->balance_mutex
);
4401 mutex_unlock(&fs_info
->balance_mutex
);
4403 * Lock released to allow other waiters to continue, we'll
4404 * reexamine the status again.
4406 mutex_lock(&fs_info
->balance_mutex
);
4408 if (fs_info
->balance_ctl
) {
4409 reset_balance_state(fs_info
);
4410 clear_bit(BTRFS_FS_EXCL_OP
, &fs_info
->flags
);
4411 btrfs_info(fs_info
, "balance: canceled");
4415 BUG_ON(fs_info
->balance_ctl
||
4416 test_bit(BTRFS_FS_BALANCE_RUNNING
, &fs_info
->flags
));
4417 atomic_dec(&fs_info
->balance_cancel_req
);
4418 mutex_unlock(&fs_info
->balance_mutex
);
4422 static int btrfs_uuid_scan_kthread(void *data
)
4424 struct btrfs_fs_info
*fs_info
= data
;
4425 struct btrfs_root
*root
= fs_info
->tree_root
;
4426 struct btrfs_key key
;
4427 struct btrfs_path
*path
= NULL
;
4429 struct extent_buffer
*eb
;
4431 struct btrfs_root_item root_item
;
4433 struct btrfs_trans_handle
*trans
= NULL
;
4435 path
= btrfs_alloc_path();
4442 key
.type
= BTRFS_ROOT_ITEM_KEY
;
4446 ret
= btrfs_search_forward(root
, &key
, path
,
4447 BTRFS_OLDEST_GENERATION
);
4454 if (key
.type
!= BTRFS_ROOT_ITEM_KEY
||
4455 (key
.objectid
< BTRFS_FIRST_FREE_OBJECTID
&&
4456 key
.objectid
!= BTRFS_FS_TREE_OBJECTID
) ||
4457 key
.objectid
> BTRFS_LAST_FREE_OBJECTID
)
4460 eb
= path
->nodes
[0];
4461 slot
= path
->slots
[0];
4462 item_size
= btrfs_item_size_nr(eb
, slot
);
4463 if (item_size
< sizeof(root_item
))
4466 read_extent_buffer(eb
, &root_item
,
4467 btrfs_item_ptr_offset(eb
, slot
),
4468 (int)sizeof(root_item
));
4469 if (btrfs_root_refs(&root_item
) == 0)
4472 if (!btrfs_is_empty_uuid(root_item
.uuid
) ||
4473 !btrfs_is_empty_uuid(root_item
.received_uuid
)) {
4477 btrfs_release_path(path
);
4479 * 1 - subvol uuid item
4480 * 1 - received_subvol uuid item
4482 trans
= btrfs_start_transaction(fs_info
->uuid_root
, 2);
4483 if (IS_ERR(trans
)) {
4484 ret
= PTR_ERR(trans
);
4492 if (!btrfs_is_empty_uuid(root_item
.uuid
)) {
4493 ret
= btrfs_uuid_tree_add(trans
, root_item
.uuid
,
4494 BTRFS_UUID_KEY_SUBVOL
,
4497 btrfs_warn(fs_info
, "uuid_tree_add failed %d",
4503 if (!btrfs_is_empty_uuid(root_item
.received_uuid
)) {
4504 ret
= btrfs_uuid_tree_add(trans
,
4505 root_item
.received_uuid
,
4506 BTRFS_UUID_KEY_RECEIVED_SUBVOL
,
4509 btrfs_warn(fs_info
, "uuid_tree_add failed %d",
4517 ret
= btrfs_end_transaction(trans
);
4523 btrfs_release_path(path
);
4524 if (key
.offset
< (u64
)-1) {
4526 } else if (key
.type
< BTRFS_ROOT_ITEM_KEY
) {
4528 key
.type
= BTRFS_ROOT_ITEM_KEY
;
4529 } else if (key
.objectid
< (u64
)-1) {
4531 key
.type
= BTRFS_ROOT_ITEM_KEY
;
4540 btrfs_free_path(path
);
4541 if (trans
&& !IS_ERR(trans
))
4542 btrfs_end_transaction(trans
);
4544 btrfs_warn(fs_info
, "btrfs_uuid_scan_kthread failed %d", ret
);
4546 set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN
, &fs_info
->flags
);
4547 up(&fs_info
->uuid_tree_rescan_sem
);
4552 * Callback for btrfs_uuid_tree_iterate().
4554 * 0 check succeeded, the entry is not outdated.
4555 * < 0 if an error occurred.
4556 * > 0 if the check failed, which means the caller shall remove the entry.
4558 static int btrfs_check_uuid_tree_entry(struct btrfs_fs_info
*fs_info
,
4559 u8
*uuid
, u8 type
, u64 subid
)
4561 struct btrfs_key key
;
4563 struct btrfs_root
*subvol_root
;
4565 if (type
!= BTRFS_UUID_KEY_SUBVOL
&&
4566 type
!= BTRFS_UUID_KEY_RECEIVED_SUBVOL
)
4569 key
.objectid
= subid
;
4570 key
.type
= BTRFS_ROOT_ITEM_KEY
;
4571 key
.offset
= (u64
)-1;
4572 subvol_root
= btrfs_read_fs_root_no_name(fs_info
, &key
);
4573 if (IS_ERR(subvol_root
)) {
4574 ret
= PTR_ERR(subvol_root
);
4581 case BTRFS_UUID_KEY_SUBVOL
:
4582 if (memcmp(uuid
, subvol_root
->root_item
.uuid
, BTRFS_UUID_SIZE
))
4585 case BTRFS_UUID_KEY_RECEIVED_SUBVOL
:
4586 if (memcmp(uuid
, subvol_root
->root_item
.received_uuid
,
4596 static int btrfs_uuid_rescan_kthread(void *data
)
4598 struct btrfs_fs_info
*fs_info
= (struct btrfs_fs_info
*)data
;
4602 * 1st step is to iterate through the existing UUID tree and
4603 * to delete all entries that contain outdated data.
4604 * 2nd step is to add all missing entries to the UUID tree.
4606 ret
= btrfs_uuid_tree_iterate(fs_info
, btrfs_check_uuid_tree_entry
);
4608 btrfs_warn(fs_info
, "iterating uuid_tree failed %d", ret
);
4609 up(&fs_info
->uuid_tree_rescan_sem
);
4612 return btrfs_uuid_scan_kthread(data
);
4615 int btrfs_create_uuid_tree(struct btrfs_fs_info
*fs_info
)
4617 struct btrfs_trans_handle
*trans
;
4618 struct btrfs_root
*tree_root
= fs_info
->tree_root
;
4619 struct btrfs_root
*uuid_root
;
4620 struct task_struct
*task
;
4627 trans
= btrfs_start_transaction(tree_root
, 2);
4629 return PTR_ERR(trans
);
4631 uuid_root
= btrfs_create_tree(trans
, BTRFS_UUID_TREE_OBJECTID
);
4632 if (IS_ERR(uuid_root
)) {
4633 ret
= PTR_ERR(uuid_root
);
4634 btrfs_abort_transaction(trans
, ret
);
4635 btrfs_end_transaction(trans
);
4639 fs_info
->uuid_root
= uuid_root
;
4641 ret
= btrfs_commit_transaction(trans
);
4645 down(&fs_info
->uuid_tree_rescan_sem
);
4646 task
= kthread_run(btrfs_uuid_scan_kthread
, fs_info
, "btrfs-uuid");
4648 /* fs_info->update_uuid_tree_gen remains 0 in all error case */
4649 btrfs_warn(fs_info
, "failed to start uuid_scan task");
4650 up(&fs_info
->uuid_tree_rescan_sem
);
4651 return PTR_ERR(task
);
4657 int btrfs_check_uuid_tree(struct btrfs_fs_info
*fs_info
)
4659 struct task_struct
*task
;
4661 down(&fs_info
->uuid_tree_rescan_sem
);
4662 task
= kthread_run(btrfs_uuid_rescan_kthread
, fs_info
, "btrfs-uuid");
4664 /* fs_info->update_uuid_tree_gen remains 0 in all error case */
4665 btrfs_warn(fs_info
, "failed to start uuid_rescan task");
4666 up(&fs_info
->uuid_tree_rescan_sem
);
4667 return PTR_ERR(task
);
4674 * shrinking a device means finding all of the device extents past
4675 * the new size, and then following the back refs to the chunks.
4676 * The chunk relocation code actually frees the device extent
4678 int btrfs_shrink_device(struct btrfs_device
*device
, u64 new_size
)
4680 struct btrfs_fs_info
*fs_info
= device
->fs_info
;
4681 struct btrfs_root
*root
= fs_info
->dev_root
;
4682 struct btrfs_trans_handle
*trans
;
4683 struct btrfs_dev_extent
*dev_extent
= NULL
;
4684 struct btrfs_path
*path
;
4690 bool retried
= false;
4691 struct extent_buffer
*l
;
4692 struct btrfs_key key
;
4693 struct btrfs_super_block
*super_copy
= fs_info
->super_copy
;
4694 u64 old_total
= btrfs_super_total_bytes(super_copy
);
4695 u64 old_size
= btrfs_device_get_total_bytes(device
);
4699 new_size
= round_down(new_size
, fs_info
->sectorsize
);
4701 diff
= round_down(old_size
- new_size
, fs_info
->sectorsize
);
4703 if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT
, &device
->dev_state
))
4706 path
= btrfs_alloc_path();
4710 path
->reada
= READA_BACK
;
4712 trans
= btrfs_start_transaction(root
, 0);
4713 if (IS_ERR(trans
)) {
4714 btrfs_free_path(path
);
4715 return PTR_ERR(trans
);
4718 mutex_lock(&fs_info
->chunk_mutex
);
4720 btrfs_device_set_total_bytes(device
, new_size
);
4721 if (test_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
)) {
4722 device
->fs_devices
->total_rw_bytes
-= diff
;
4723 atomic64_sub(diff
, &fs_info
->free_chunk_space
);
4727 * Once the device's size has been set to the new size, ensure all
4728 * in-memory chunks are synced to disk so that the loop below sees them
4729 * and relocates them accordingly.
4731 if (contains_pending_extent(device
, &start
, diff
)) {
4732 mutex_unlock(&fs_info
->chunk_mutex
);
4733 ret
= btrfs_commit_transaction(trans
);
4737 mutex_unlock(&fs_info
->chunk_mutex
);
4738 btrfs_end_transaction(trans
);
4742 key
.objectid
= device
->devid
;
4743 key
.offset
= (u64
)-1;
4744 key
.type
= BTRFS_DEV_EXTENT_KEY
;
4747 mutex_lock(&fs_info
->delete_unused_bgs_mutex
);
4748 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
4750 mutex_unlock(&fs_info
->delete_unused_bgs_mutex
);
4754 ret
= btrfs_previous_item(root
, path
, 0, key
.type
);
4756 mutex_unlock(&fs_info
->delete_unused_bgs_mutex
);
4761 btrfs_release_path(path
);
4766 slot
= path
->slots
[0];
4767 btrfs_item_key_to_cpu(l
, &key
, path
->slots
[0]);
4769 if (key
.objectid
!= device
->devid
) {
4770 mutex_unlock(&fs_info
->delete_unused_bgs_mutex
);
4771 btrfs_release_path(path
);
4775 dev_extent
= btrfs_item_ptr(l
, slot
, struct btrfs_dev_extent
);
4776 length
= btrfs_dev_extent_length(l
, dev_extent
);
4778 if (key
.offset
+ length
<= new_size
) {
4779 mutex_unlock(&fs_info
->delete_unused_bgs_mutex
);
4780 btrfs_release_path(path
);
4784 chunk_offset
= btrfs_dev_extent_chunk_offset(l
, dev_extent
);
4785 btrfs_release_path(path
);
4788 * We may be relocating the only data chunk we have,
4789 * which could potentially end up with losing data's
4790 * raid profile, so lets allocate an empty one in
4793 ret
= btrfs_may_alloc_data_chunk(fs_info
, chunk_offset
);
4795 mutex_unlock(&fs_info
->delete_unused_bgs_mutex
);
4799 ret
= btrfs_relocate_chunk(fs_info
, chunk_offset
);
4800 mutex_unlock(&fs_info
->delete_unused_bgs_mutex
);
4801 if (ret
== -ENOSPC
) {
4804 if (ret
== -ETXTBSY
) {
4806 "could not shrink block group %llu due to active swapfile",
4811 } while (key
.offset
-- > 0);
4813 if (failed
&& !retried
) {
4817 } else if (failed
&& retried
) {
4822 /* Shrinking succeeded, else we would be at "done". */
4823 trans
= btrfs_start_transaction(root
, 0);
4824 if (IS_ERR(trans
)) {
4825 ret
= PTR_ERR(trans
);
4829 mutex_lock(&fs_info
->chunk_mutex
);
4830 btrfs_device_set_disk_total_bytes(device
, new_size
);
4831 if (list_empty(&device
->post_commit_list
))
4832 list_add_tail(&device
->post_commit_list
,
4833 &trans
->transaction
->dev_update_list
);
4835 WARN_ON(diff
> old_total
);
4836 btrfs_set_super_total_bytes(super_copy
,
4837 round_down(old_total
- diff
, fs_info
->sectorsize
));
4838 mutex_unlock(&fs_info
->chunk_mutex
);
4840 /* Now btrfs_update_device() will change the on-disk size. */
4841 ret
= btrfs_update_device(trans
, device
);
4843 btrfs_abort_transaction(trans
, ret
);
4844 btrfs_end_transaction(trans
);
4846 ret
= btrfs_commit_transaction(trans
);
4849 btrfs_free_path(path
);
4851 mutex_lock(&fs_info
->chunk_mutex
);
4852 btrfs_device_set_total_bytes(device
, old_size
);
4853 if (test_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
))
4854 device
->fs_devices
->total_rw_bytes
+= diff
;
4855 atomic64_add(diff
, &fs_info
->free_chunk_space
);
4856 mutex_unlock(&fs_info
->chunk_mutex
);
4861 static int btrfs_add_system_chunk(struct btrfs_fs_info
*fs_info
,
4862 struct btrfs_key
*key
,
4863 struct btrfs_chunk
*chunk
, int item_size
)
4865 struct btrfs_super_block
*super_copy
= fs_info
->super_copy
;
4866 struct btrfs_disk_key disk_key
;
4870 mutex_lock(&fs_info
->chunk_mutex
);
4871 array_size
= btrfs_super_sys_array_size(super_copy
);
4872 if (array_size
+ item_size
+ sizeof(disk_key
)
4873 > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE
) {
4874 mutex_unlock(&fs_info
->chunk_mutex
);
4878 ptr
= super_copy
->sys_chunk_array
+ array_size
;
4879 btrfs_cpu_key_to_disk(&disk_key
, key
);
4880 memcpy(ptr
, &disk_key
, sizeof(disk_key
));
4881 ptr
+= sizeof(disk_key
);
4882 memcpy(ptr
, chunk
, item_size
);
4883 item_size
+= sizeof(disk_key
);
4884 btrfs_set_super_sys_array_size(super_copy
, array_size
+ item_size
);
4885 mutex_unlock(&fs_info
->chunk_mutex
);
4891 * sort the devices in descending order by max_avail, total_avail
4893 static int btrfs_cmp_device_info(const void *a
, const void *b
)
4895 const struct btrfs_device_info
*di_a
= a
;
4896 const struct btrfs_device_info
*di_b
= b
;
4898 if (di_a
->max_avail
> di_b
->max_avail
)
4900 if (di_a
->max_avail
< di_b
->max_avail
)
4902 if (di_a
->total_avail
> di_b
->total_avail
)
4904 if (di_a
->total_avail
< di_b
->total_avail
)
4909 static void check_raid56_incompat_flag(struct btrfs_fs_info
*info
, u64 type
)
4911 if (!(type
& BTRFS_BLOCK_GROUP_RAID56_MASK
))
4914 btrfs_set_fs_incompat(info
, RAID56
);
4917 static int __btrfs_alloc_chunk(struct btrfs_trans_handle
*trans
,
4918 u64 start
, u64 type
)
4920 struct btrfs_fs_info
*info
= trans
->fs_info
;
4921 struct btrfs_fs_devices
*fs_devices
= info
->fs_devices
;
4922 struct btrfs_device
*device
;
4923 struct map_lookup
*map
= NULL
;
4924 struct extent_map_tree
*em_tree
;
4925 struct extent_map
*em
;
4926 struct btrfs_device_info
*devices_info
= NULL
;
4928 int num_stripes
; /* total number of stripes to allocate */
4929 int data_stripes
; /* number of stripes that count for
4931 int sub_stripes
; /* sub_stripes info for map */
4932 int dev_stripes
; /* stripes per dev */
4933 int devs_max
; /* max devs to use */
4934 int devs_min
; /* min devs needed */
4935 int devs_increment
; /* ndevs has to be a multiple of this */
4936 int ncopies
; /* how many copies to data has */
4937 int nparity
; /* number of stripes worth of bytes to
4938 store parity information */
4940 u64 max_stripe_size
;
4949 BUG_ON(!alloc_profile_is_valid(type
, 0));
4951 if (list_empty(&fs_devices
->alloc_list
)) {
4952 if (btrfs_test_opt(info
, ENOSPC_DEBUG
))
4953 btrfs_debug(info
, "%s: no writable device", __func__
);
4957 index
= btrfs_bg_flags_to_raid_index(type
);
4959 sub_stripes
= btrfs_raid_array
[index
].sub_stripes
;
4960 dev_stripes
= btrfs_raid_array
[index
].dev_stripes
;
4961 devs_max
= btrfs_raid_array
[index
].devs_max
;
4963 devs_max
= BTRFS_MAX_DEVS(info
);
4964 devs_min
= btrfs_raid_array
[index
].devs_min
;
4965 devs_increment
= btrfs_raid_array
[index
].devs_increment
;
4966 ncopies
= btrfs_raid_array
[index
].ncopies
;
4967 nparity
= btrfs_raid_array
[index
].nparity
;
4969 if (type
& BTRFS_BLOCK_GROUP_DATA
) {
4970 max_stripe_size
= SZ_1G
;
4971 max_chunk_size
= BTRFS_MAX_DATA_CHUNK_SIZE
;
4972 } else if (type
& BTRFS_BLOCK_GROUP_METADATA
) {
4973 /* for larger filesystems, use larger metadata chunks */
4974 if (fs_devices
->total_rw_bytes
> 50ULL * SZ_1G
)
4975 max_stripe_size
= SZ_1G
;
4977 max_stripe_size
= SZ_256M
;
4978 max_chunk_size
= max_stripe_size
;
4979 } else if (type
& BTRFS_BLOCK_GROUP_SYSTEM
) {
4980 max_stripe_size
= SZ_32M
;
4981 max_chunk_size
= 2 * max_stripe_size
;
4983 btrfs_err(info
, "invalid chunk type 0x%llx requested",
4988 /* We don't want a chunk larger than 10% of writable space */
4989 max_chunk_size
= min(div_factor(fs_devices
->total_rw_bytes
, 1),
4992 devices_info
= kcalloc(fs_devices
->rw_devices
, sizeof(*devices_info
),
4998 * in the first pass through the devices list, we gather information
4999 * about the available holes on each device.
5002 list_for_each_entry(device
, &fs_devices
->alloc_list
, dev_alloc_list
) {
5006 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
)) {
5008 "BTRFS: read-only device in alloc_list\n");
5012 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA
,
5013 &device
->dev_state
) ||
5014 test_bit(BTRFS_DEV_STATE_REPLACE_TGT
, &device
->dev_state
))
5017 if (device
->total_bytes
> device
->bytes_used
)
5018 total_avail
= device
->total_bytes
- device
->bytes_used
;
5022 /* If there is no space on this device, skip it. */
5023 if (total_avail
== 0)
5026 ret
= find_free_dev_extent(device
,
5027 max_stripe_size
* dev_stripes
,
5028 &dev_offset
, &max_avail
);
5029 if (ret
&& ret
!= -ENOSPC
)
5033 max_avail
= max_stripe_size
* dev_stripes
;
5035 if (max_avail
< BTRFS_STRIPE_LEN
* dev_stripes
) {
5036 if (btrfs_test_opt(info
, ENOSPC_DEBUG
))
5038 "%s: devid %llu has no free space, have=%llu want=%u",
5039 __func__
, device
->devid
, max_avail
,
5040 BTRFS_STRIPE_LEN
* dev_stripes
);
5044 if (ndevs
== fs_devices
->rw_devices
) {
5045 WARN(1, "%s: found more than %llu devices\n",
5046 __func__
, fs_devices
->rw_devices
);
5049 devices_info
[ndevs
].dev_offset
= dev_offset
;
5050 devices_info
[ndevs
].max_avail
= max_avail
;
5051 devices_info
[ndevs
].total_avail
= total_avail
;
5052 devices_info
[ndevs
].dev
= device
;
5057 * now sort the devices by hole size / available space
5059 sort(devices_info
, ndevs
, sizeof(struct btrfs_device_info
),
5060 btrfs_cmp_device_info
, NULL
);
5062 /* round down to number of usable stripes */
5063 ndevs
= round_down(ndevs
, devs_increment
);
5065 if (ndevs
< devs_min
) {
5067 if (btrfs_test_opt(info
, ENOSPC_DEBUG
)) {
5069 "%s: not enough devices with free space: have=%d minimum required=%d",
5070 __func__
, ndevs
, devs_min
);
5075 ndevs
= min(ndevs
, devs_max
);
5078 * The primary goal is to maximize the number of stripes, so use as
5079 * many devices as possible, even if the stripes are not maximum sized.
5081 * The DUP profile stores more than one stripe per device, the
5082 * max_avail is the total size so we have to adjust.
5084 stripe_size
= div_u64(devices_info
[ndevs
- 1].max_avail
, dev_stripes
);
5085 num_stripes
= ndevs
* dev_stripes
;
5088 * this will have to be fixed for RAID1 and RAID10 over
5091 data_stripes
= (num_stripes
- nparity
) / ncopies
;
5094 * Use the number of data stripes to figure out how big this chunk
5095 * is really going to be in terms of logical address space,
5096 * and compare that answer with the max chunk size. If it's higher,
5097 * we try to reduce stripe_size.
5099 if (stripe_size
* data_stripes
> max_chunk_size
) {
5101 * Reduce stripe_size, round it up to a 16MB boundary again and
5102 * then use it, unless it ends up being even bigger than the
5103 * previous value we had already.
5105 stripe_size
= min(round_up(div_u64(max_chunk_size
,
5106 data_stripes
), SZ_16M
),
5110 /* align to BTRFS_STRIPE_LEN */
5111 stripe_size
= round_down(stripe_size
, BTRFS_STRIPE_LEN
);
5113 map
= kmalloc(map_lookup_size(num_stripes
), GFP_NOFS
);
5118 map
->num_stripes
= num_stripes
;
5120 for (i
= 0; i
< ndevs
; ++i
) {
5121 for (j
= 0; j
< dev_stripes
; ++j
) {
5122 int s
= i
* dev_stripes
+ j
;
5123 map
->stripes
[s
].dev
= devices_info
[i
].dev
;
5124 map
->stripes
[s
].physical
= devices_info
[i
].dev_offset
+
5128 map
->stripe_len
= BTRFS_STRIPE_LEN
;
5129 map
->io_align
= BTRFS_STRIPE_LEN
;
5130 map
->io_width
= BTRFS_STRIPE_LEN
;
5132 map
->sub_stripes
= sub_stripes
;
5134 chunk_size
= stripe_size
* data_stripes
;
5136 trace_btrfs_chunk_alloc(info
, map
, start
, chunk_size
);
5138 em
= alloc_extent_map();
5144 set_bit(EXTENT_FLAG_FS_MAPPING
, &em
->flags
);
5145 em
->map_lookup
= map
;
5147 em
->len
= chunk_size
;
5148 em
->block_start
= 0;
5149 em
->block_len
= em
->len
;
5150 em
->orig_block_len
= stripe_size
;
5152 em_tree
= &info
->mapping_tree
;
5153 write_lock(&em_tree
->lock
);
5154 ret
= add_extent_mapping(em_tree
, em
, 0);
5156 write_unlock(&em_tree
->lock
);
5157 free_extent_map(em
);
5160 write_unlock(&em_tree
->lock
);
5162 ret
= btrfs_make_block_group(trans
, 0, type
, start
, chunk_size
);
5164 goto error_del_extent
;
5166 for (i
= 0; i
< map
->num_stripes
; i
++) {
5167 struct btrfs_device
*dev
= map
->stripes
[i
].dev
;
5169 btrfs_device_set_bytes_used(dev
, dev
->bytes_used
+ stripe_size
);
5170 if (list_empty(&dev
->post_commit_list
))
5171 list_add_tail(&dev
->post_commit_list
,
5172 &trans
->transaction
->dev_update_list
);
5175 atomic64_sub(stripe_size
* map
->num_stripes
, &info
->free_chunk_space
);
5177 free_extent_map(em
);
5178 check_raid56_incompat_flag(info
, type
);
5180 kfree(devices_info
);
5184 write_lock(&em_tree
->lock
);
5185 remove_extent_mapping(em_tree
, em
);
5186 write_unlock(&em_tree
->lock
);
5188 /* One for our allocation */
5189 free_extent_map(em
);
5190 /* One for the tree reference */
5191 free_extent_map(em
);
5193 kfree(devices_info
);
5197 int btrfs_finish_chunk_alloc(struct btrfs_trans_handle
*trans
,
5198 u64 chunk_offset
, u64 chunk_size
)
5200 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
5201 struct btrfs_root
*extent_root
= fs_info
->extent_root
;
5202 struct btrfs_root
*chunk_root
= fs_info
->chunk_root
;
5203 struct btrfs_key key
;
5204 struct btrfs_device
*device
;
5205 struct btrfs_chunk
*chunk
;
5206 struct btrfs_stripe
*stripe
;
5207 struct extent_map
*em
;
5208 struct map_lookup
*map
;
5215 em
= btrfs_get_chunk_map(fs_info
, chunk_offset
, chunk_size
);
5219 map
= em
->map_lookup
;
5220 item_size
= btrfs_chunk_item_size(map
->num_stripes
);
5221 stripe_size
= em
->orig_block_len
;
5223 chunk
= kzalloc(item_size
, GFP_NOFS
);
5230 * Take the device list mutex to prevent races with the final phase of
5231 * a device replace operation that replaces the device object associated
5232 * with the map's stripes, because the device object's id can change
5233 * at any time during that final phase of the device replace operation
5234 * (dev-replace.c:btrfs_dev_replace_finishing()).
5236 mutex_lock(&fs_info
->fs_devices
->device_list_mutex
);
5237 for (i
= 0; i
< map
->num_stripes
; i
++) {
5238 device
= map
->stripes
[i
].dev
;
5239 dev_offset
= map
->stripes
[i
].physical
;
5241 ret
= btrfs_update_device(trans
, device
);
5244 ret
= btrfs_alloc_dev_extent(trans
, device
, chunk_offset
,
5245 dev_offset
, stripe_size
);
5250 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
5254 stripe
= &chunk
->stripe
;
5255 for (i
= 0; i
< map
->num_stripes
; i
++) {
5256 device
= map
->stripes
[i
].dev
;
5257 dev_offset
= map
->stripes
[i
].physical
;
5259 btrfs_set_stack_stripe_devid(stripe
, device
->devid
);
5260 btrfs_set_stack_stripe_offset(stripe
, dev_offset
);
5261 memcpy(stripe
->dev_uuid
, device
->uuid
, BTRFS_UUID_SIZE
);
5264 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
5266 btrfs_set_stack_chunk_length(chunk
, chunk_size
);
5267 btrfs_set_stack_chunk_owner(chunk
, extent_root
->root_key
.objectid
);
5268 btrfs_set_stack_chunk_stripe_len(chunk
, map
->stripe_len
);
5269 btrfs_set_stack_chunk_type(chunk
, map
->type
);
5270 btrfs_set_stack_chunk_num_stripes(chunk
, map
->num_stripes
);
5271 btrfs_set_stack_chunk_io_align(chunk
, map
->stripe_len
);
5272 btrfs_set_stack_chunk_io_width(chunk
, map
->stripe_len
);
5273 btrfs_set_stack_chunk_sector_size(chunk
, fs_info
->sectorsize
);
5274 btrfs_set_stack_chunk_sub_stripes(chunk
, map
->sub_stripes
);
5276 key
.objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
5277 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
5278 key
.offset
= chunk_offset
;
5280 ret
= btrfs_insert_item(trans
, chunk_root
, &key
, chunk
, item_size
);
5281 if (ret
== 0 && map
->type
& BTRFS_BLOCK_GROUP_SYSTEM
) {
5283 * TODO: Cleanup of inserted chunk root in case of
5286 ret
= btrfs_add_system_chunk(fs_info
, &key
, chunk
, item_size
);
5291 free_extent_map(em
);
5296 * Chunk allocation falls into two parts. The first part does work
5297 * that makes the new allocated chunk usable, but does not do any operation
5298 * that modifies the chunk tree. The second part does the work that
5299 * requires modifying the chunk tree. This division is important for the
5300 * bootstrap process of adding storage to a seed btrfs.
5302 int btrfs_alloc_chunk(struct btrfs_trans_handle
*trans
, u64 type
)
5306 lockdep_assert_held(&trans
->fs_info
->chunk_mutex
);
5307 chunk_offset
= find_next_chunk(trans
->fs_info
);
5308 return __btrfs_alloc_chunk(trans
, chunk_offset
, type
);
5311 static noinline
int init_first_rw_device(struct btrfs_trans_handle
*trans
)
5313 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
5315 u64 sys_chunk_offset
;
5319 chunk_offset
= find_next_chunk(fs_info
);
5320 alloc_profile
= btrfs_metadata_alloc_profile(fs_info
);
5321 ret
= __btrfs_alloc_chunk(trans
, chunk_offset
, alloc_profile
);
5325 sys_chunk_offset
= find_next_chunk(fs_info
);
5326 alloc_profile
= btrfs_system_alloc_profile(fs_info
);
5327 ret
= __btrfs_alloc_chunk(trans
, sys_chunk_offset
, alloc_profile
);
5331 static inline int btrfs_chunk_max_errors(struct map_lookup
*map
)
5333 const int index
= btrfs_bg_flags_to_raid_index(map
->type
);
5335 return btrfs_raid_array
[index
].tolerated_failures
;
5338 int btrfs_chunk_readonly(struct btrfs_fs_info
*fs_info
, u64 chunk_offset
)
5340 struct extent_map
*em
;
5341 struct map_lookup
*map
;
5346 em
= btrfs_get_chunk_map(fs_info
, chunk_offset
, 1);
5350 map
= em
->map_lookup
;
5351 for (i
= 0; i
< map
->num_stripes
; i
++) {
5352 if (test_bit(BTRFS_DEV_STATE_MISSING
,
5353 &map
->stripes
[i
].dev
->dev_state
)) {
5357 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE
,
5358 &map
->stripes
[i
].dev
->dev_state
)) {
5365 * If the number of missing devices is larger than max errors,
5366 * we can not write the data into that chunk successfully, so
5369 if (miss_ndevs
> btrfs_chunk_max_errors(map
))
5372 free_extent_map(em
);
5376 void btrfs_mapping_tree_free(struct extent_map_tree
*tree
)
5378 struct extent_map
*em
;
5381 write_lock(&tree
->lock
);
5382 em
= lookup_extent_mapping(tree
, 0, (u64
)-1);
5384 remove_extent_mapping(tree
, em
);
5385 write_unlock(&tree
->lock
);
5389 free_extent_map(em
);
5390 /* once for the tree */
5391 free_extent_map(em
);
5395 int btrfs_num_copies(struct btrfs_fs_info
*fs_info
, u64 logical
, u64 len
)
5397 struct extent_map
*em
;
5398 struct map_lookup
*map
;
5401 em
= btrfs_get_chunk_map(fs_info
, logical
, len
);
5404 * We could return errors for these cases, but that could get
5405 * ugly and we'd probably do the same thing which is just not do
5406 * anything else and exit, so return 1 so the callers don't try
5407 * to use other copies.
5411 map
= em
->map_lookup
;
5412 if (map
->type
& (BTRFS_BLOCK_GROUP_DUP
| BTRFS_BLOCK_GROUP_RAID1_MASK
))
5413 ret
= map
->num_stripes
;
5414 else if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
)
5415 ret
= map
->sub_stripes
;
5416 else if (map
->type
& BTRFS_BLOCK_GROUP_RAID5
)
5418 else if (map
->type
& BTRFS_BLOCK_GROUP_RAID6
)
5420 * There could be two corrupted data stripes, we need
5421 * to loop retry in order to rebuild the correct data.
5423 * Fail a stripe at a time on every retry except the
5424 * stripe under reconstruction.
5426 ret
= map
->num_stripes
;
5429 free_extent_map(em
);
5431 down_read(&fs_info
->dev_replace
.rwsem
);
5432 if (btrfs_dev_replace_is_ongoing(&fs_info
->dev_replace
) &&
5433 fs_info
->dev_replace
.tgtdev
)
5435 up_read(&fs_info
->dev_replace
.rwsem
);
5440 unsigned long btrfs_full_stripe_len(struct btrfs_fs_info
*fs_info
,
5443 struct extent_map
*em
;
5444 struct map_lookup
*map
;
5445 unsigned long len
= fs_info
->sectorsize
;
5447 em
= btrfs_get_chunk_map(fs_info
, logical
, len
);
5449 if (!WARN_ON(IS_ERR(em
))) {
5450 map
= em
->map_lookup
;
5451 if (map
->type
& BTRFS_BLOCK_GROUP_RAID56_MASK
)
5452 len
= map
->stripe_len
* nr_data_stripes(map
);
5453 free_extent_map(em
);
5458 int btrfs_is_parity_mirror(struct btrfs_fs_info
*fs_info
, u64 logical
, u64 len
)
5460 struct extent_map
*em
;
5461 struct map_lookup
*map
;
5464 em
= btrfs_get_chunk_map(fs_info
, logical
, len
);
5466 if(!WARN_ON(IS_ERR(em
))) {
5467 map
= em
->map_lookup
;
5468 if (map
->type
& BTRFS_BLOCK_GROUP_RAID56_MASK
)
5470 free_extent_map(em
);
5475 static int find_live_mirror(struct btrfs_fs_info
*fs_info
,
5476 struct map_lookup
*map
, int first
,
5477 int dev_replace_is_ongoing
)
5481 int preferred_mirror
;
5483 struct btrfs_device
*srcdev
;
5486 (BTRFS_BLOCK_GROUP_RAID1_MASK
| BTRFS_BLOCK_GROUP_RAID10
)));
5488 if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
)
5489 num_stripes
= map
->sub_stripes
;
5491 num_stripes
= map
->num_stripes
;
5493 preferred_mirror
= first
+ current
->pid
% num_stripes
;
5495 if (dev_replace_is_ongoing
&&
5496 fs_info
->dev_replace
.cont_reading_from_srcdev_mode
==
5497 BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID
)
5498 srcdev
= fs_info
->dev_replace
.srcdev
;
5503 * try to avoid the drive that is the source drive for a
5504 * dev-replace procedure, only choose it if no other non-missing
5505 * mirror is available
5507 for (tolerance
= 0; tolerance
< 2; tolerance
++) {
5508 if (map
->stripes
[preferred_mirror
].dev
->bdev
&&
5509 (tolerance
|| map
->stripes
[preferred_mirror
].dev
!= srcdev
))
5510 return preferred_mirror
;
5511 for (i
= first
; i
< first
+ num_stripes
; i
++) {
5512 if (map
->stripes
[i
].dev
->bdev
&&
5513 (tolerance
|| map
->stripes
[i
].dev
!= srcdev
))
5518 /* we couldn't find one that doesn't fail. Just return something
5519 * and the io error handling code will clean up eventually
5521 return preferred_mirror
;
5524 static inline int parity_smaller(u64 a
, u64 b
)
5529 /* Bubble-sort the stripe set to put the parity/syndrome stripes last */
5530 static void sort_parity_stripes(struct btrfs_bio
*bbio
, int num_stripes
)
5532 struct btrfs_bio_stripe s
;
5539 for (i
= 0; i
< num_stripes
- 1; i
++) {
5540 if (parity_smaller(bbio
->raid_map
[i
],
5541 bbio
->raid_map
[i
+1])) {
5542 s
= bbio
->stripes
[i
];
5543 l
= bbio
->raid_map
[i
];
5544 bbio
->stripes
[i
] = bbio
->stripes
[i
+1];
5545 bbio
->raid_map
[i
] = bbio
->raid_map
[i
+1];
5546 bbio
->stripes
[i
+1] = s
;
5547 bbio
->raid_map
[i
+1] = l
;
5555 static struct btrfs_bio
*alloc_btrfs_bio(int total_stripes
, int real_stripes
)
5557 struct btrfs_bio
*bbio
= kzalloc(
5558 /* the size of the btrfs_bio */
5559 sizeof(struct btrfs_bio
) +
5560 /* plus the variable array for the stripes */
5561 sizeof(struct btrfs_bio_stripe
) * (total_stripes
) +
5562 /* plus the variable array for the tgt dev */
5563 sizeof(int) * (real_stripes
) +
5565 * plus the raid_map, which includes both the tgt dev
5568 sizeof(u64
) * (total_stripes
),
5569 GFP_NOFS
|__GFP_NOFAIL
);
5571 atomic_set(&bbio
->error
, 0);
5572 refcount_set(&bbio
->refs
, 1);
5577 void btrfs_get_bbio(struct btrfs_bio
*bbio
)
5579 WARN_ON(!refcount_read(&bbio
->refs
));
5580 refcount_inc(&bbio
->refs
);
5583 void btrfs_put_bbio(struct btrfs_bio
*bbio
)
5587 if (refcount_dec_and_test(&bbio
->refs
))
5591 /* can REQ_OP_DISCARD be sent with other REQ like REQ_OP_WRITE? */
5593 * Please note that, discard won't be sent to target device of device
5596 static int __btrfs_map_block_for_discard(struct btrfs_fs_info
*fs_info
,
5597 u64 logical
, u64 length
,
5598 struct btrfs_bio
**bbio_ret
)
5600 struct extent_map
*em
;
5601 struct map_lookup
*map
;
5602 struct btrfs_bio
*bbio
;
5606 u64 stripe_end_offset
;
5613 u32 sub_stripes
= 0;
5614 u64 stripes_per_dev
= 0;
5615 u32 remaining_stripes
= 0;
5616 u32 last_stripe
= 0;
5620 /* discard always return a bbio */
5623 em
= btrfs_get_chunk_map(fs_info
, logical
, length
);
5627 map
= em
->map_lookup
;
5628 /* we don't discard raid56 yet */
5629 if (map
->type
& BTRFS_BLOCK_GROUP_RAID56_MASK
) {
5634 offset
= logical
- em
->start
;
5635 length
= min_t(u64
, em
->len
- offset
, length
);
5637 stripe_len
= map
->stripe_len
;
5639 * stripe_nr counts the total number of stripes we have to stride
5640 * to get to this block
5642 stripe_nr
= div64_u64(offset
, stripe_len
);
5644 /* stripe_offset is the offset of this block in its stripe */
5645 stripe_offset
= offset
- stripe_nr
* stripe_len
;
5647 stripe_nr_end
= round_up(offset
+ length
, map
->stripe_len
);
5648 stripe_nr_end
= div64_u64(stripe_nr_end
, map
->stripe_len
);
5649 stripe_cnt
= stripe_nr_end
- stripe_nr
;
5650 stripe_end_offset
= stripe_nr_end
* map
->stripe_len
-
5653 * after this, stripe_nr is the number of stripes on this
5654 * device we have to walk to find the data, and stripe_index is
5655 * the number of our device in the stripe array
5659 if (map
->type
& (BTRFS_BLOCK_GROUP_RAID0
|
5660 BTRFS_BLOCK_GROUP_RAID10
)) {
5661 if (map
->type
& BTRFS_BLOCK_GROUP_RAID0
)
5664 sub_stripes
= map
->sub_stripes
;
5666 factor
= map
->num_stripes
/ sub_stripes
;
5667 num_stripes
= min_t(u64
, map
->num_stripes
,
5668 sub_stripes
* stripe_cnt
);
5669 stripe_nr
= div_u64_rem(stripe_nr
, factor
, &stripe_index
);
5670 stripe_index
*= sub_stripes
;
5671 stripes_per_dev
= div_u64_rem(stripe_cnt
, factor
,
5672 &remaining_stripes
);
5673 div_u64_rem(stripe_nr_end
- 1, factor
, &last_stripe
);
5674 last_stripe
*= sub_stripes
;
5675 } else if (map
->type
& (BTRFS_BLOCK_GROUP_RAID1_MASK
|
5676 BTRFS_BLOCK_GROUP_DUP
)) {
5677 num_stripes
= map
->num_stripes
;
5679 stripe_nr
= div_u64_rem(stripe_nr
, map
->num_stripes
,
5683 bbio
= alloc_btrfs_bio(num_stripes
, 0);
5689 for (i
= 0; i
< num_stripes
; i
++) {
5690 bbio
->stripes
[i
].physical
=
5691 map
->stripes
[stripe_index
].physical
+
5692 stripe_offset
+ stripe_nr
* map
->stripe_len
;
5693 bbio
->stripes
[i
].dev
= map
->stripes
[stripe_index
].dev
;
5695 if (map
->type
& (BTRFS_BLOCK_GROUP_RAID0
|
5696 BTRFS_BLOCK_GROUP_RAID10
)) {
5697 bbio
->stripes
[i
].length
= stripes_per_dev
*
5700 if (i
/ sub_stripes
< remaining_stripes
)
5701 bbio
->stripes
[i
].length
+=
5705 * Special for the first stripe and
5708 * |-------|...|-------|
5712 if (i
< sub_stripes
)
5713 bbio
->stripes
[i
].length
-=
5716 if (stripe_index
>= last_stripe
&&
5717 stripe_index
<= (last_stripe
+
5719 bbio
->stripes
[i
].length
-=
5722 if (i
== sub_stripes
- 1)
5725 bbio
->stripes
[i
].length
= length
;
5729 if (stripe_index
== map
->num_stripes
) {
5736 bbio
->map_type
= map
->type
;
5737 bbio
->num_stripes
= num_stripes
;
5739 free_extent_map(em
);
5744 * In dev-replace case, for repair case (that's the only case where the mirror
5745 * is selected explicitly when calling btrfs_map_block), blocks left of the
5746 * left cursor can also be read from the target drive.
5748 * For REQ_GET_READ_MIRRORS, the target drive is added as the last one to the
5750 * For READ, it also needs to be supported using the same mirror number.
5752 * If the requested block is not left of the left cursor, EIO is returned. This
5753 * can happen because btrfs_num_copies() returns one more in the dev-replace
5756 static int get_extra_mirror_from_replace(struct btrfs_fs_info
*fs_info
,
5757 u64 logical
, u64 length
,
5758 u64 srcdev_devid
, int *mirror_num
,
5761 struct btrfs_bio
*bbio
= NULL
;
5763 int index_srcdev
= 0;
5765 u64 physical_of_found
= 0;
5769 ret
= __btrfs_map_block(fs_info
, BTRFS_MAP_GET_READ_MIRRORS
,
5770 logical
, &length
, &bbio
, 0, 0);
5772 ASSERT(bbio
== NULL
);
5776 num_stripes
= bbio
->num_stripes
;
5777 if (*mirror_num
> num_stripes
) {
5779 * BTRFS_MAP_GET_READ_MIRRORS does not contain this mirror,
5780 * that means that the requested area is not left of the left
5783 btrfs_put_bbio(bbio
);
5788 * process the rest of the function using the mirror_num of the source
5789 * drive. Therefore look it up first. At the end, patch the device
5790 * pointer to the one of the target drive.
5792 for (i
= 0; i
< num_stripes
; i
++) {
5793 if (bbio
->stripes
[i
].dev
->devid
!= srcdev_devid
)
5797 * In case of DUP, in order to keep it simple, only add the
5798 * mirror with the lowest physical address
5801 physical_of_found
<= bbio
->stripes
[i
].physical
)
5806 physical_of_found
= bbio
->stripes
[i
].physical
;
5809 btrfs_put_bbio(bbio
);
5815 *mirror_num
= index_srcdev
+ 1;
5816 *physical
= physical_of_found
;
5820 static void handle_ops_on_dev_replace(enum btrfs_map_op op
,
5821 struct btrfs_bio
**bbio_ret
,
5822 struct btrfs_dev_replace
*dev_replace
,
5823 int *num_stripes_ret
, int *max_errors_ret
)
5825 struct btrfs_bio
*bbio
= *bbio_ret
;
5826 u64 srcdev_devid
= dev_replace
->srcdev
->devid
;
5827 int tgtdev_indexes
= 0;
5828 int num_stripes
= *num_stripes_ret
;
5829 int max_errors
= *max_errors_ret
;
5832 if (op
== BTRFS_MAP_WRITE
) {
5833 int index_where_to_add
;
5836 * duplicate the write operations while the dev replace
5837 * procedure is running. Since the copying of the old disk to
5838 * the new disk takes place at run time while the filesystem is
5839 * mounted writable, the regular write operations to the old
5840 * disk have to be duplicated to go to the new disk as well.
5842 * Note that device->missing is handled by the caller, and that
5843 * the write to the old disk is already set up in the stripes
5846 index_where_to_add
= num_stripes
;
5847 for (i
= 0; i
< num_stripes
; i
++) {
5848 if (bbio
->stripes
[i
].dev
->devid
== srcdev_devid
) {
5849 /* write to new disk, too */
5850 struct btrfs_bio_stripe
*new =
5851 bbio
->stripes
+ index_where_to_add
;
5852 struct btrfs_bio_stripe
*old
=
5855 new->physical
= old
->physical
;
5856 new->length
= old
->length
;
5857 new->dev
= dev_replace
->tgtdev
;
5858 bbio
->tgtdev_map
[i
] = index_where_to_add
;
5859 index_where_to_add
++;
5864 num_stripes
= index_where_to_add
;
5865 } else if (op
== BTRFS_MAP_GET_READ_MIRRORS
) {
5866 int index_srcdev
= 0;
5868 u64 physical_of_found
= 0;
5871 * During the dev-replace procedure, the target drive can also
5872 * be used to read data in case it is needed to repair a corrupt
5873 * block elsewhere. This is possible if the requested area is
5874 * left of the left cursor. In this area, the target drive is a
5875 * full copy of the source drive.
5877 for (i
= 0; i
< num_stripes
; i
++) {
5878 if (bbio
->stripes
[i
].dev
->devid
== srcdev_devid
) {
5880 * In case of DUP, in order to keep it simple,
5881 * only add the mirror with the lowest physical
5885 physical_of_found
<=
5886 bbio
->stripes
[i
].physical
)
5890 physical_of_found
= bbio
->stripes
[i
].physical
;
5894 struct btrfs_bio_stripe
*tgtdev_stripe
=
5895 bbio
->stripes
+ num_stripes
;
5897 tgtdev_stripe
->physical
= physical_of_found
;
5898 tgtdev_stripe
->length
=
5899 bbio
->stripes
[index_srcdev
].length
;
5900 tgtdev_stripe
->dev
= dev_replace
->tgtdev
;
5901 bbio
->tgtdev_map
[index_srcdev
] = num_stripes
;
5908 *num_stripes_ret
= num_stripes
;
5909 *max_errors_ret
= max_errors
;
5910 bbio
->num_tgtdevs
= tgtdev_indexes
;
5914 static bool need_full_stripe(enum btrfs_map_op op
)
5916 return (op
== BTRFS_MAP_WRITE
|| op
== BTRFS_MAP_GET_READ_MIRRORS
);
5920 * btrfs_get_io_geometry - calculates the geomery of a particular (address, len)
5921 * tuple. This information is used to calculate how big a
5922 * particular bio can get before it straddles a stripe.
5924 * @fs_info - the filesystem
5925 * @logical - address that we want to figure out the geometry of
5926 * @len - the length of IO we are going to perform, starting at @logical
5927 * @op - type of operation - write or read
5928 * @io_geom - pointer used to return values
5930 * Returns < 0 in case a chunk for the given logical address cannot be found,
5931 * usually shouldn't happen unless @logical is corrupted, 0 otherwise.
5933 int btrfs_get_io_geometry(struct btrfs_fs_info
*fs_info
, enum btrfs_map_op op
,
5934 u64 logical
, u64 len
, struct btrfs_io_geometry
*io_geom
)
5936 struct extent_map
*em
;
5937 struct map_lookup
*map
;
5942 u64 raid56_full_stripe_start
= (u64
)-1;
5946 ASSERT(op
!= BTRFS_MAP_DISCARD
);
5948 em
= btrfs_get_chunk_map(fs_info
, logical
, len
);
5952 map
= em
->map_lookup
;
5953 /* Offset of this logical address in the chunk */
5954 offset
= logical
- em
->start
;
5955 /* Len of a stripe in a chunk */
5956 stripe_len
= map
->stripe_len
;
5957 /* Stripe wher this block falls in */
5958 stripe_nr
= div64_u64(offset
, stripe_len
);
5959 /* Offset of stripe in the chunk */
5960 stripe_offset
= stripe_nr
* stripe_len
;
5961 if (offset
< stripe_offset
) {
5963 "stripe math has gone wrong, stripe_offset=%llu offset=%llu start=%llu logical=%llu stripe_len=%llu",
5964 stripe_offset
, offset
, em
->start
, logical
, stripe_len
);
5969 /* stripe_offset is the offset of this block in its stripe */
5970 stripe_offset
= offset
- stripe_offset
;
5971 data_stripes
= nr_data_stripes(map
);
5973 if (map
->type
& BTRFS_BLOCK_GROUP_PROFILE_MASK
) {
5974 u64 max_len
= stripe_len
- stripe_offset
;
5977 * In case of raid56, we need to know the stripe aligned start
5979 if (map
->type
& BTRFS_BLOCK_GROUP_RAID56_MASK
) {
5980 unsigned long full_stripe_len
= stripe_len
* data_stripes
;
5981 raid56_full_stripe_start
= offset
;
5984 * Allow a write of a full stripe, but make sure we
5985 * don't allow straddling of stripes
5987 raid56_full_stripe_start
= div64_u64(raid56_full_stripe_start
,
5989 raid56_full_stripe_start
*= full_stripe_len
;
5992 * For writes to RAID[56], allow a full stripeset across
5993 * all disks. For other RAID types and for RAID[56]
5994 * reads, just allow a single stripe (on a single disk).
5996 if (op
== BTRFS_MAP_WRITE
) {
5997 max_len
= stripe_len
* data_stripes
-
5998 (offset
- raid56_full_stripe_start
);
6001 len
= min_t(u64
, em
->len
- offset
, max_len
);
6003 len
= em
->len
- offset
;
6007 io_geom
->offset
= offset
;
6008 io_geom
->stripe_len
= stripe_len
;
6009 io_geom
->stripe_nr
= stripe_nr
;
6010 io_geom
->stripe_offset
= stripe_offset
;
6011 io_geom
->raid56_stripe_offset
= raid56_full_stripe_start
;
6015 free_extent_map(em
);
6019 static int __btrfs_map_block(struct btrfs_fs_info
*fs_info
,
6020 enum btrfs_map_op op
,
6021 u64 logical
, u64
*length
,
6022 struct btrfs_bio
**bbio_ret
,
6023 int mirror_num
, int need_raid_map
)
6025 struct extent_map
*em
;
6026 struct map_lookup
*map
;
6037 int tgtdev_indexes
= 0;
6038 struct btrfs_bio
*bbio
= NULL
;
6039 struct btrfs_dev_replace
*dev_replace
= &fs_info
->dev_replace
;
6040 int dev_replace_is_ongoing
= 0;
6041 int num_alloc_stripes
;
6042 int patch_the_first_stripe_for_dev_replace
= 0;
6043 u64 physical_to_patch_in_first_stripe
= 0;
6044 u64 raid56_full_stripe_start
= (u64
)-1;
6045 struct btrfs_io_geometry geom
;
6049 if (op
== BTRFS_MAP_DISCARD
)
6050 return __btrfs_map_block_for_discard(fs_info
, logical
,
6053 ret
= btrfs_get_io_geometry(fs_info
, op
, logical
, *length
, &geom
);
6057 em
= btrfs_get_chunk_map(fs_info
, logical
, *length
);
6059 map
= em
->map_lookup
;
6062 offset
= geom
.offset
;
6063 stripe_len
= geom
.stripe_len
;
6064 stripe_nr
= geom
.stripe_nr
;
6065 stripe_offset
= geom
.stripe_offset
;
6066 raid56_full_stripe_start
= geom
.raid56_stripe_offset
;
6067 data_stripes
= nr_data_stripes(map
);
6069 down_read(&dev_replace
->rwsem
);
6070 dev_replace_is_ongoing
= btrfs_dev_replace_is_ongoing(dev_replace
);
6072 * Hold the semaphore for read during the whole operation, write is
6073 * requested at commit time but must wait.
6075 if (!dev_replace_is_ongoing
)
6076 up_read(&dev_replace
->rwsem
);
6078 if (dev_replace_is_ongoing
&& mirror_num
== map
->num_stripes
+ 1 &&
6079 !need_full_stripe(op
) && dev_replace
->tgtdev
!= NULL
) {
6080 ret
= get_extra_mirror_from_replace(fs_info
, logical
, *length
,
6081 dev_replace
->srcdev
->devid
,
6083 &physical_to_patch_in_first_stripe
);
6087 patch_the_first_stripe_for_dev_replace
= 1;
6088 } else if (mirror_num
> map
->num_stripes
) {
6094 if (map
->type
& BTRFS_BLOCK_GROUP_RAID0
) {
6095 stripe_nr
= div_u64_rem(stripe_nr
, map
->num_stripes
,
6097 if (!need_full_stripe(op
))
6099 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID1_MASK
) {
6100 if (need_full_stripe(op
))
6101 num_stripes
= map
->num_stripes
;
6102 else if (mirror_num
)
6103 stripe_index
= mirror_num
- 1;
6105 stripe_index
= find_live_mirror(fs_info
, map
, 0,
6106 dev_replace_is_ongoing
);
6107 mirror_num
= stripe_index
+ 1;
6110 } else if (map
->type
& BTRFS_BLOCK_GROUP_DUP
) {
6111 if (need_full_stripe(op
)) {
6112 num_stripes
= map
->num_stripes
;
6113 } else if (mirror_num
) {
6114 stripe_index
= mirror_num
- 1;
6119 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
) {
6120 u32 factor
= map
->num_stripes
/ map
->sub_stripes
;
6122 stripe_nr
= div_u64_rem(stripe_nr
, factor
, &stripe_index
);
6123 stripe_index
*= map
->sub_stripes
;
6125 if (need_full_stripe(op
))
6126 num_stripes
= map
->sub_stripes
;
6127 else if (mirror_num
)
6128 stripe_index
+= mirror_num
- 1;
6130 int old_stripe_index
= stripe_index
;
6131 stripe_index
= find_live_mirror(fs_info
, map
,
6133 dev_replace_is_ongoing
);
6134 mirror_num
= stripe_index
- old_stripe_index
+ 1;
6137 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID56_MASK
) {
6138 if (need_raid_map
&& (need_full_stripe(op
) || mirror_num
> 1)) {
6139 /* push stripe_nr back to the start of the full stripe */
6140 stripe_nr
= div64_u64(raid56_full_stripe_start
,
6141 stripe_len
* data_stripes
);
6143 /* RAID[56] write or recovery. Return all stripes */
6144 num_stripes
= map
->num_stripes
;
6145 max_errors
= nr_parity_stripes(map
);
6147 *length
= map
->stripe_len
;
6152 * Mirror #0 or #1 means the original data block.
6153 * Mirror #2 is RAID5 parity block.
6154 * Mirror #3 is RAID6 Q block.
6156 stripe_nr
= div_u64_rem(stripe_nr
,
6157 data_stripes
, &stripe_index
);
6159 stripe_index
= data_stripes
+ mirror_num
- 2;
6161 /* We distribute the parity blocks across stripes */
6162 div_u64_rem(stripe_nr
+ stripe_index
, map
->num_stripes
,
6164 if (!need_full_stripe(op
) && mirror_num
<= 1)
6169 * after this, stripe_nr is the number of stripes on this
6170 * device we have to walk to find the data, and stripe_index is
6171 * the number of our device in the stripe array
6173 stripe_nr
= div_u64_rem(stripe_nr
, map
->num_stripes
,
6175 mirror_num
= stripe_index
+ 1;
6177 if (stripe_index
>= map
->num_stripes
) {
6179 "stripe index math went horribly wrong, got stripe_index=%u, num_stripes=%u",
6180 stripe_index
, map
->num_stripes
);
6185 num_alloc_stripes
= num_stripes
;
6186 if (dev_replace_is_ongoing
&& dev_replace
->tgtdev
!= NULL
) {
6187 if (op
== BTRFS_MAP_WRITE
)
6188 num_alloc_stripes
<<= 1;
6189 if (op
== BTRFS_MAP_GET_READ_MIRRORS
)
6190 num_alloc_stripes
++;
6191 tgtdev_indexes
= num_stripes
;
6194 bbio
= alloc_btrfs_bio(num_alloc_stripes
, tgtdev_indexes
);
6199 if (dev_replace_is_ongoing
&& dev_replace
->tgtdev
!= NULL
)
6200 bbio
->tgtdev_map
= (int *)(bbio
->stripes
+ num_alloc_stripes
);
6202 /* build raid_map */
6203 if (map
->type
& BTRFS_BLOCK_GROUP_RAID56_MASK
&& need_raid_map
&&
6204 (need_full_stripe(op
) || mirror_num
> 1)) {
6208 bbio
->raid_map
= (u64
*)((void *)bbio
->stripes
+
6209 sizeof(struct btrfs_bio_stripe
) *
6211 sizeof(int) * tgtdev_indexes
);
6213 /* Work out the disk rotation on this stripe-set */
6214 div_u64_rem(stripe_nr
, num_stripes
, &rot
);
6216 /* Fill in the logical address of each stripe */
6217 tmp
= stripe_nr
* data_stripes
;
6218 for (i
= 0; i
< data_stripes
; i
++)
6219 bbio
->raid_map
[(i
+rot
) % num_stripes
] =
6220 em
->start
+ (tmp
+ i
) * map
->stripe_len
;
6222 bbio
->raid_map
[(i
+rot
) % map
->num_stripes
] = RAID5_P_STRIPE
;
6223 if (map
->type
& BTRFS_BLOCK_GROUP_RAID6
)
6224 bbio
->raid_map
[(i
+rot
+1) % num_stripes
] =
6229 for (i
= 0; i
< num_stripes
; i
++) {
6230 bbio
->stripes
[i
].physical
=
6231 map
->stripes
[stripe_index
].physical
+
6233 stripe_nr
* map
->stripe_len
;
6234 bbio
->stripes
[i
].dev
=
6235 map
->stripes
[stripe_index
].dev
;
6239 if (need_full_stripe(op
))
6240 max_errors
= btrfs_chunk_max_errors(map
);
6243 sort_parity_stripes(bbio
, num_stripes
);
6245 if (dev_replace_is_ongoing
&& dev_replace
->tgtdev
!= NULL
&&
6246 need_full_stripe(op
)) {
6247 handle_ops_on_dev_replace(op
, &bbio
, dev_replace
, &num_stripes
,
6252 bbio
->map_type
= map
->type
;
6253 bbio
->num_stripes
= num_stripes
;
6254 bbio
->max_errors
= max_errors
;
6255 bbio
->mirror_num
= mirror_num
;
6258 * this is the case that REQ_READ && dev_replace_is_ongoing &&
6259 * mirror_num == num_stripes + 1 && dev_replace target drive is
6260 * available as a mirror
6262 if (patch_the_first_stripe_for_dev_replace
&& num_stripes
> 0) {
6263 WARN_ON(num_stripes
> 1);
6264 bbio
->stripes
[0].dev
= dev_replace
->tgtdev
;
6265 bbio
->stripes
[0].physical
= physical_to_patch_in_first_stripe
;
6266 bbio
->mirror_num
= map
->num_stripes
+ 1;
6269 if (dev_replace_is_ongoing
) {
6270 lockdep_assert_held(&dev_replace
->rwsem
);
6271 /* Unlock and let waiting writers proceed */
6272 up_read(&dev_replace
->rwsem
);
6274 free_extent_map(em
);
6278 int btrfs_map_block(struct btrfs_fs_info
*fs_info
, enum btrfs_map_op op
,
6279 u64 logical
, u64
*length
,
6280 struct btrfs_bio
**bbio_ret
, int mirror_num
)
6282 return __btrfs_map_block(fs_info
, op
, logical
, length
, bbio_ret
,
6286 /* For Scrub/replace */
6287 int btrfs_map_sblock(struct btrfs_fs_info
*fs_info
, enum btrfs_map_op op
,
6288 u64 logical
, u64
*length
,
6289 struct btrfs_bio
**bbio_ret
)
6291 return __btrfs_map_block(fs_info
, op
, logical
, length
, bbio_ret
, 0, 1);
6294 int btrfs_rmap_block(struct btrfs_fs_info
*fs_info
, u64 chunk_start
,
6295 u64 physical
, u64
**logical
, int *naddrs
, int *stripe_len
)
6297 struct extent_map
*em
;
6298 struct map_lookup
*map
;
6306 em
= btrfs_get_chunk_map(fs_info
, chunk_start
, 1);
6310 map
= em
->map_lookup
;
6312 rmap_len
= map
->stripe_len
;
6314 if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
)
6315 length
= div_u64(length
, map
->num_stripes
/ map
->sub_stripes
);
6316 else if (map
->type
& BTRFS_BLOCK_GROUP_RAID0
)
6317 length
= div_u64(length
, map
->num_stripes
);
6318 else if (map
->type
& BTRFS_BLOCK_GROUP_RAID56_MASK
) {
6319 length
= div_u64(length
, nr_data_stripes(map
));
6320 rmap_len
= map
->stripe_len
* nr_data_stripes(map
);
6323 buf
= kcalloc(map
->num_stripes
, sizeof(u64
), GFP_NOFS
);
6324 BUG_ON(!buf
); /* -ENOMEM */
6326 for (i
= 0; i
< map
->num_stripes
; i
++) {
6327 if (map
->stripes
[i
].physical
> physical
||
6328 map
->stripes
[i
].physical
+ length
<= physical
)
6331 stripe_nr
= physical
- map
->stripes
[i
].physical
;
6332 stripe_nr
= div64_u64(stripe_nr
, map
->stripe_len
);
6334 if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
) {
6335 stripe_nr
= stripe_nr
* map
->num_stripes
+ i
;
6336 stripe_nr
= div_u64(stripe_nr
, map
->sub_stripes
);
6337 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID0
) {
6338 stripe_nr
= stripe_nr
* map
->num_stripes
+ i
;
6339 } /* else if RAID[56], multiply by nr_data_stripes().
6340 * Alternatively, just use rmap_len below instead of
6341 * map->stripe_len */
6343 bytenr
= chunk_start
+ stripe_nr
* rmap_len
;
6344 WARN_ON(nr
>= map
->num_stripes
);
6345 for (j
= 0; j
< nr
; j
++) {
6346 if (buf
[j
] == bytenr
)
6350 WARN_ON(nr
>= map
->num_stripes
);
6357 *stripe_len
= rmap_len
;
6359 free_extent_map(em
);
6363 static inline void btrfs_end_bbio(struct btrfs_bio
*bbio
, struct bio
*bio
)
6365 bio
->bi_private
= bbio
->private;
6366 bio
->bi_end_io
= bbio
->end_io
;
6369 btrfs_put_bbio(bbio
);
6372 static void btrfs_end_bio(struct bio
*bio
)
6374 struct btrfs_bio
*bbio
= bio
->bi_private
;
6375 int is_orig_bio
= 0;
6377 if (bio
->bi_status
) {
6378 atomic_inc(&bbio
->error
);
6379 if (bio
->bi_status
== BLK_STS_IOERR
||
6380 bio
->bi_status
== BLK_STS_TARGET
) {
6381 unsigned int stripe_index
=
6382 btrfs_io_bio(bio
)->stripe_index
;
6383 struct btrfs_device
*dev
;
6385 BUG_ON(stripe_index
>= bbio
->num_stripes
);
6386 dev
= bbio
->stripes
[stripe_index
].dev
;
6388 if (bio_op(bio
) == REQ_OP_WRITE
)
6389 btrfs_dev_stat_inc_and_print(dev
,
6390 BTRFS_DEV_STAT_WRITE_ERRS
);
6391 else if (!(bio
->bi_opf
& REQ_RAHEAD
))
6392 btrfs_dev_stat_inc_and_print(dev
,
6393 BTRFS_DEV_STAT_READ_ERRS
);
6394 if (bio
->bi_opf
& REQ_PREFLUSH
)
6395 btrfs_dev_stat_inc_and_print(dev
,
6396 BTRFS_DEV_STAT_FLUSH_ERRS
);
6401 if (bio
== bbio
->orig_bio
)
6404 btrfs_bio_counter_dec(bbio
->fs_info
);
6406 if (atomic_dec_and_test(&bbio
->stripes_pending
)) {
6409 bio
= bbio
->orig_bio
;
6412 btrfs_io_bio(bio
)->mirror_num
= bbio
->mirror_num
;
6413 /* only send an error to the higher layers if it is
6414 * beyond the tolerance of the btrfs bio
6416 if (atomic_read(&bbio
->error
) > bbio
->max_errors
) {
6417 bio
->bi_status
= BLK_STS_IOERR
;
6420 * this bio is actually up to date, we didn't
6421 * go over the max number of errors
6423 bio
->bi_status
= BLK_STS_OK
;
6426 btrfs_end_bbio(bbio
, bio
);
6427 } else if (!is_orig_bio
) {
6433 * see run_scheduled_bios for a description of why bios are collected for
6436 * This will add one bio to the pending list for a device and make sure
6437 * the work struct is scheduled.
6439 static noinline
void btrfs_schedule_bio(struct btrfs_device
*device
,
6442 struct btrfs_fs_info
*fs_info
= device
->fs_info
;
6443 int should_queue
= 1;
6444 struct btrfs_pending_bios
*pending_bios
;
6446 /* don't bother with additional async steps for reads, right now */
6447 if (bio_op(bio
) == REQ_OP_READ
) {
6448 btrfsic_submit_bio(bio
);
6452 WARN_ON(bio
->bi_next
);
6453 bio
->bi_next
= NULL
;
6455 spin_lock(&device
->io_lock
);
6456 if (op_is_sync(bio
->bi_opf
))
6457 pending_bios
= &device
->pending_sync_bios
;
6459 pending_bios
= &device
->pending_bios
;
6461 if (pending_bios
->tail
)
6462 pending_bios
->tail
->bi_next
= bio
;
6464 pending_bios
->tail
= bio
;
6465 if (!pending_bios
->head
)
6466 pending_bios
->head
= bio
;
6467 if (device
->running_pending
)
6470 spin_unlock(&device
->io_lock
);
6473 btrfs_queue_work(fs_info
->submit_workers
, &device
->work
);
6476 static void submit_stripe_bio(struct btrfs_bio
*bbio
, struct bio
*bio
,
6477 u64 physical
, int dev_nr
, int async
)
6479 struct btrfs_device
*dev
= bbio
->stripes
[dev_nr
].dev
;
6480 struct btrfs_fs_info
*fs_info
= bbio
->fs_info
;
6482 bio
->bi_private
= bbio
;
6483 btrfs_io_bio(bio
)->stripe_index
= dev_nr
;
6484 bio
->bi_end_io
= btrfs_end_bio
;
6485 bio
->bi_iter
.bi_sector
= physical
>> 9;
6486 btrfs_debug_in_rcu(fs_info
,
6487 "btrfs_map_bio: rw %d 0x%x, sector=%llu, dev=%lu (%s id %llu), size=%u",
6488 bio_op(bio
), bio
->bi_opf
, (u64
)bio
->bi_iter
.bi_sector
,
6489 (u_long
)dev
->bdev
->bd_dev
, rcu_str_deref(dev
->name
), dev
->devid
,
6490 bio
->bi_iter
.bi_size
);
6491 bio_set_dev(bio
, dev
->bdev
);
6493 btrfs_bio_counter_inc_noblocked(fs_info
);
6496 btrfs_schedule_bio(dev
, bio
);
6498 btrfsic_submit_bio(bio
);
6501 static void bbio_error(struct btrfs_bio
*bbio
, struct bio
*bio
, u64 logical
)
6503 atomic_inc(&bbio
->error
);
6504 if (atomic_dec_and_test(&bbio
->stripes_pending
)) {
6505 /* Should be the original bio. */
6506 WARN_ON(bio
!= bbio
->orig_bio
);
6508 btrfs_io_bio(bio
)->mirror_num
= bbio
->mirror_num
;
6509 bio
->bi_iter
.bi_sector
= logical
>> 9;
6510 if (atomic_read(&bbio
->error
) > bbio
->max_errors
)
6511 bio
->bi_status
= BLK_STS_IOERR
;
6513 bio
->bi_status
= BLK_STS_OK
;
6514 btrfs_end_bbio(bbio
, bio
);
6518 blk_status_t
btrfs_map_bio(struct btrfs_fs_info
*fs_info
, struct bio
*bio
,
6519 int mirror_num
, int async_submit
)
6521 struct btrfs_device
*dev
;
6522 struct bio
*first_bio
= bio
;
6523 u64 logical
= (u64
)bio
->bi_iter
.bi_sector
<< 9;
6529 struct btrfs_bio
*bbio
= NULL
;
6531 length
= bio
->bi_iter
.bi_size
;
6532 map_length
= length
;
6534 btrfs_bio_counter_inc_blocked(fs_info
);
6535 ret
= __btrfs_map_block(fs_info
, btrfs_op(bio
), logical
,
6536 &map_length
, &bbio
, mirror_num
, 1);
6538 btrfs_bio_counter_dec(fs_info
);
6539 return errno_to_blk_status(ret
);
6542 total_devs
= bbio
->num_stripes
;
6543 bbio
->orig_bio
= first_bio
;
6544 bbio
->private = first_bio
->bi_private
;
6545 bbio
->end_io
= first_bio
->bi_end_io
;
6546 bbio
->fs_info
= fs_info
;
6547 atomic_set(&bbio
->stripes_pending
, bbio
->num_stripes
);
6549 if ((bbio
->map_type
& BTRFS_BLOCK_GROUP_RAID56_MASK
) &&
6550 ((bio_op(bio
) == REQ_OP_WRITE
) || (mirror_num
> 1))) {
6551 /* In this case, map_length has been set to the length of
6552 a single stripe; not the whole write */
6553 if (bio_op(bio
) == REQ_OP_WRITE
) {
6554 ret
= raid56_parity_write(fs_info
, bio
, bbio
,
6557 ret
= raid56_parity_recover(fs_info
, bio
, bbio
,
6558 map_length
, mirror_num
, 1);
6561 btrfs_bio_counter_dec(fs_info
);
6562 return errno_to_blk_status(ret
);
6565 if (map_length
< length
) {
6567 "mapping failed logical %llu bio len %llu len %llu",
6568 logical
, length
, map_length
);
6572 for (dev_nr
= 0; dev_nr
< total_devs
; dev_nr
++) {
6573 dev
= bbio
->stripes
[dev_nr
].dev
;
6574 if (!dev
|| !dev
->bdev
|| test_bit(BTRFS_DEV_STATE_MISSING
,
6576 (bio_op(first_bio
) == REQ_OP_WRITE
&&
6577 !test_bit(BTRFS_DEV_STATE_WRITEABLE
, &dev
->dev_state
))) {
6578 bbio_error(bbio
, first_bio
, logical
);
6582 if (dev_nr
< total_devs
- 1)
6583 bio
= btrfs_bio_clone(first_bio
);
6587 submit_stripe_bio(bbio
, bio
, bbio
->stripes
[dev_nr
].physical
,
6588 dev_nr
, async_submit
);
6590 btrfs_bio_counter_dec(fs_info
);
6595 * Find a device specified by @devid or @uuid in the list of @fs_devices, or
6598 * If devid and uuid are both specified, the match must be exact, otherwise
6599 * only devid is used.
6601 * If @seed is true, traverse through the seed devices.
6603 struct btrfs_device
*btrfs_find_device(struct btrfs_fs_devices
*fs_devices
,
6604 u64 devid
, u8
*uuid
, u8
*fsid
,
6607 struct btrfs_device
*device
;
6609 while (fs_devices
) {
6611 !memcmp(fs_devices
->metadata_uuid
, fsid
, BTRFS_FSID_SIZE
)) {
6612 list_for_each_entry(device
, &fs_devices
->devices
,
6614 if (device
->devid
== devid
&&
6615 (!uuid
|| memcmp(device
->uuid
, uuid
,
6616 BTRFS_UUID_SIZE
) == 0))
6621 fs_devices
= fs_devices
->seed
;
6628 static struct btrfs_device
*add_missing_dev(struct btrfs_fs_devices
*fs_devices
,
6629 u64 devid
, u8
*dev_uuid
)
6631 struct btrfs_device
*device
;
6633 device
= btrfs_alloc_device(NULL
, &devid
, dev_uuid
);
6637 list_add(&device
->dev_list
, &fs_devices
->devices
);
6638 device
->fs_devices
= fs_devices
;
6639 fs_devices
->num_devices
++;
6641 set_bit(BTRFS_DEV_STATE_MISSING
, &device
->dev_state
);
6642 fs_devices
->missing_devices
++;
6648 * btrfs_alloc_device - allocate struct btrfs_device
6649 * @fs_info: used only for generating a new devid, can be NULL if
6650 * devid is provided (i.e. @devid != NULL).
6651 * @devid: a pointer to devid for this device. If NULL a new devid
6653 * @uuid: a pointer to UUID for this device. If NULL a new UUID
6656 * Return: a pointer to a new &struct btrfs_device on success; ERR_PTR()
6657 * on error. Returned struct is not linked onto any lists and must be
6658 * destroyed with btrfs_free_device.
6660 struct btrfs_device
*btrfs_alloc_device(struct btrfs_fs_info
*fs_info
,
6664 struct btrfs_device
*dev
;
6667 if (WARN_ON(!devid
&& !fs_info
))
6668 return ERR_PTR(-EINVAL
);
6670 dev
= __alloc_device();
6679 ret
= find_next_devid(fs_info
, &tmp
);
6681 btrfs_free_device(dev
);
6682 return ERR_PTR(ret
);
6688 memcpy(dev
->uuid
, uuid
, BTRFS_UUID_SIZE
);
6690 generate_random_uuid(dev
->uuid
);
6692 btrfs_init_work(&dev
->work
, btrfs_submit_helper
,
6693 pending_bios_fn
, NULL
, NULL
);
6698 static void btrfs_report_missing_device(struct btrfs_fs_info
*fs_info
,
6699 u64 devid
, u8
*uuid
, bool error
)
6702 btrfs_err_rl(fs_info
, "devid %llu uuid %pU is missing",
6705 btrfs_warn_rl(fs_info
, "devid %llu uuid %pU is missing",
6709 static u64
calc_stripe_length(u64 type
, u64 chunk_len
, int num_stripes
)
6711 int index
= btrfs_bg_flags_to_raid_index(type
);
6712 int ncopies
= btrfs_raid_array
[index
].ncopies
;
6715 switch (type
& BTRFS_BLOCK_GROUP_PROFILE_MASK
) {
6716 case BTRFS_BLOCK_GROUP_RAID5
:
6717 data_stripes
= num_stripes
- 1;
6719 case BTRFS_BLOCK_GROUP_RAID6
:
6720 data_stripes
= num_stripes
- 2;
6723 data_stripes
= num_stripes
/ ncopies
;
6726 return div_u64(chunk_len
, data_stripes
);
6729 static int read_one_chunk(struct btrfs_key
*key
, struct extent_buffer
*leaf
,
6730 struct btrfs_chunk
*chunk
)
6732 struct btrfs_fs_info
*fs_info
= leaf
->fs_info
;
6733 struct extent_map_tree
*map_tree
= &fs_info
->mapping_tree
;
6734 struct map_lookup
*map
;
6735 struct extent_map
*em
;
6739 u8 uuid
[BTRFS_UUID_SIZE
];
6744 logical
= key
->offset
;
6745 length
= btrfs_chunk_length(leaf
, chunk
);
6746 num_stripes
= btrfs_chunk_num_stripes(leaf
, chunk
);
6749 * Only need to verify chunk item if we're reading from sys chunk array,
6750 * as chunk item in tree block is already verified by tree-checker.
6752 if (leaf
->start
== BTRFS_SUPER_INFO_OFFSET
) {
6753 ret
= btrfs_check_chunk_valid(leaf
, chunk
, logical
);
6758 read_lock(&map_tree
->lock
);
6759 em
= lookup_extent_mapping(map_tree
, logical
, 1);
6760 read_unlock(&map_tree
->lock
);
6762 /* already mapped? */
6763 if (em
&& em
->start
<= logical
&& em
->start
+ em
->len
> logical
) {
6764 free_extent_map(em
);
6767 free_extent_map(em
);
6770 em
= alloc_extent_map();
6773 map
= kmalloc(map_lookup_size(num_stripes
), GFP_NOFS
);
6775 free_extent_map(em
);
6779 set_bit(EXTENT_FLAG_FS_MAPPING
, &em
->flags
);
6780 em
->map_lookup
= map
;
6781 em
->start
= logical
;
6784 em
->block_start
= 0;
6785 em
->block_len
= em
->len
;
6787 map
->num_stripes
= num_stripes
;
6788 map
->io_width
= btrfs_chunk_io_width(leaf
, chunk
);
6789 map
->io_align
= btrfs_chunk_io_align(leaf
, chunk
);
6790 map
->stripe_len
= btrfs_chunk_stripe_len(leaf
, chunk
);
6791 map
->type
= btrfs_chunk_type(leaf
, chunk
);
6792 map
->sub_stripes
= btrfs_chunk_sub_stripes(leaf
, chunk
);
6793 map
->verified_stripes
= 0;
6794 em
->orig_block_len
= calc_stripe_length(map
->type
, em
->len
,
6796 for (i
= 0; i
< num_stripes
; i
++) {
6797 map
->stripes
[i
].physical
=
6798 btrfs_stripe_offset_nr(leaf
, chunk
, i
);
6799 devid
= btrfs_stripe_devid_nr(leaf
, chunk
, i
);
6800 read_extent_buffer(leaf
, uuid
, (unsigned long)
6801 btrfs_stripe_dev_uuid_nr(chunk
, i
),
6803 map
->stripes
[i
].dev
= btrfs_find_device(fs_info
->fs_devices
,
6804 devid
, uuid
, NULL
, true);
6805 if (!map
->stripes
[i
].dev
&&
6806 !btrfs_test_opt(fs_info
, DEGRADED
)) {
6807 free_extent_map(em
);
6808 btrfs_report_missing_device(fs_info
, devid
, uuid
, true);
6811 if (!map
->stripes
[i
].dev
) {
6812 map
->stripes
[i
].dev
=
6813 add_missing_dev(fs_info
->fs_devices
, devid
,
6815 if (IS_ERR(map
->stripes
[i
].dev
)) {
6816 free_extent_map(em
);
6818 "failed to init missing dev %llu: %ld",
6819 devid
, PTR_ERR(map
->stripes
[i
].dev
));
6820 return PTR_ERR(map
->stripes
[i
].dev
);
6822 btrfs_report_missing_device(fs_info
, devid
, uuid
, false);
6824 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA
,
6825 &(map
->stripes
[i
].dev
->dev_state
));
6829 write_lock(&map_tree
->lock
);
6830 ret
= add_extent_mapping(map_tree
, em
, 0);
6831 write_unlock(&map_tree
->lock
);
6834 "failed to add chunk map, start=%llu len=%llu: %d",
6835 em
->start
, em
->len
, ret
);
6837 free_extent_map(em
);
6842 static void fill_device_from_item(struct extent_buffer
*leaf
,
6843 struct btrfs_dev_item
*dev_item
,
6844 struct btrfs_device
*device
)
6848 device
->devid
= btrfs_device_id(leaf
, dev_item
);
6849 device
->disk_total_bytes
= btrfs_device_total_bytes(leaf
, dev_item
);
6850 device
->total_bytes
= device
->disk_total_bytes
;
6851 device
->commit_total_bytes
= device
->disk_total_bytes
;
6852 device
->bytes_used
= btrfs_device_bytes_used(leaf
, dev_item
);
6853 device
->commit_bytes_used
= device
->bytes_used
;
6854 device
->type
= btrfs_device_type(leaf
, dev_item
);
6855 device
->io_align
= btrfs_device_io_align(leaf
, dev_item
);
6856 device
->io_width
= btrfs_device_io_width(leaf
, dev_item
);
6857 device
->sector_size
= btrfs_device_sector_size(leaf
, dev_item
);
6858 WARN_ON(device
->devid
== BTRFS_DEV_REPLACE_DEVID
);
6859 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT
, &device
->dev_state
);
6861 ptr
= btrfs_device_uuid(dev_item
);
6862 read_extent_buffer(leaf
, device
->uuid
, ptr
, BTRFS_UUID_SIZE
);
6865 static struct btrfs_fs_devices
*open_seed_devices(struct btrfs_fs_info
*fs_info
,
6868 struct btrfs_fs_devices
*fs_devices
;
6871 lockdep_assert_held(&uuid_mutex
);
6874 fs_devices
= fs_info
->fs_devices
->seed
;
6875 while (fs_devices
) {
6876 if (!memcmp(fs_devices
->fsid
, fsid
, BTRFS_FSID_SIZE
))
6879 fs_devices
= fs_devices
->seed
;
6882 fs_devices
= find_fsid(fsid
, NULL
);
6884 if (!btrfs_test_opt(fs_info
, DEGRADED
))
6885 return ERR_PTR(-ENOENT
);
6887 fs_devices
= alloc_fs_devices(fsid
, NULL
);
6888 if (IS_ERR(fs_devices
))
6891 fs_devices
->seeding
= 1;
6892 fs_devices
->opened
= 1;
6896 fs_devices
= clone_fs_devices(fs_devices
);
6897 if (IS_ERR(fs_devices
))
6900 ret
= open_fs_devices(fs_devices
, FMODE_READ
, fs_info
->bdev_holder
);
6902 free_fs_devices(fs_devices
);
6903 fs_devices
= ERR_PTR(ret
);
6907 if (!fs_devices
->seeding
) {
6908 close_fs_devices(fs_devices
);
6909 free_fs_devices(fs_devices
);
6910 fs_devices
= ERR_PTR(-EINVAL
);
6914 fs_devices
->seed
= fs_info
->fs_devices
->seed
;
6915 fs_info
->fs_devices
->seed
= fs_devices
;
6920 static int read_one_dev(struct extent_buffer
*leaf
,
6921 struct btrfs_dev_item
*dev_item
)
6923 struct btrfs_fs_info
*fs_info
= leaf
->fs_info
;
6924 struct btrfs_fs_devices
*fs_devices
= fs_info
->fs_devices
;
6925 struct btrfs_device
*device
;
6928 u8 fs_uuid
[BTRFS_FSID_SIZE
];
6929 u8 dev_uuid
[BTRFS_UUID_SIZE
];
6931 devid
= btrfs_device_id(leaf
, dev_item
);
6932 read_extent_buffer(leaf
, dev_uuid
, btrfs_device_uuid(dev_item
),
6934 read_extent_buffer(leaf
, fs_uuid
, btrfs_device_fsid(dev_item
),
6937 if (memcmp(fs_uuid
, fs_devices
->metadata_uuid
, BTRFS_FSID_SIZE
)) {
6938 fs_devices
= open_seed_devices(fs_info
, fs_uuid
);
6939 if (IS_ERR(fs_devices
))
6940 return PTR_ERR(fs_devices
);
6943 device
= btrfs_find_device(fs_info
->fs_devices
, devid
, dev_uuid
,
6946 if (!btrfs_test_opt(fs_info
, DEGRADED
)) {
6947 btrfs_report_missing_device(fs_info
, devid
,
6952 device
= add_missing_dev(fs_devices
, devid
, dev_uuid
);
6953 if (IS_ERR(device
)) {
6955 "failed to add missing dev %llu: %ld",
6956 devid
, PTR_ERR(device
));
6957 return PTR_ERR(device
);
6959 btrfs_report_missing_device(fs_info
, devid
, dev_uuid
, false);
6961 if (!device
->bdev
) {
6962 if (!btrfs_test_opt(fs_info
, DEGRADED
)) {
6963 btrfs_report_missing_device(fs_info
,
6964 devid
, dev_uuid
, true);
6967 btrfs_report_missing_device(fs_info
, devid
,
6971 if (!device
->bdev
&&
6972 !test_bit(BTRFS_DEV_STATE_MISSING
, &device
->dev_state
)) {
6974 * this happens when a device that was properly setup
6975 * in the device info lists suddenly goes bad.
6976 * device->bdev is NULL, and so we have to set
6977 * device->missing to one here
6979 device
->fs_devices
->missing_devices
++;
6980 set_bit(BTRFS_DEV_STATE_MISSING
, &device
->dev_state
);
6983 /* Move the device to its own fs_devices */
6984 if (device
->fs_devices
!= fs_devices
) {
6985 ASSERT(test_bit(BTRFS_DEV_STATE_MISSING
,
6986 &device
->dev_state
));
6988 list_move(&device
->dev_list
, &fs_devices
->devices
);
6989 device
->fs_devices
->num_devices
--;
6990 fs_devices
->num_devices
++;
6992 device
->fs_devices
->missing_devices
--;
6993 fs_devices
->missing_devices
++;
6995 device
->fs_devices
= fs_devices
;
6999 if (device
->fs_devices
!= fs_info
->fs_devices
) {
7000 BUG_ON(test_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
));
7001 if (device
->generation
!=
7002 btrfs_device_generation(leaf
, dev_item
))
7006 fill_device_from_item(leaf
, dev_item
, device
);
7007 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA
, &device
->dev_state
);
7008 if (test_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
) &&
7009 !test_bit(BTRFS_DEV_STATE_REPLACE_TGT
, &device
->dev_state
)) {
7010 device
->fs_devices
->total_rw_bytes
+= device
->total_bytes
;
7011 atomic64_add(device
->total_bytes
- device
->bytes_used
,
7012 &fs_info
->free_chunk_space
);
7018 int btrfs_read_sys_array(struct btrfs_fs_info
*fs_info
)
7020 struct btrfs_root
*root
= fs_info
->tree_root
;
7021 struct btrfs_super_block
*super_copy
= fs_info
->super_copy
;
7022 struct extent_buffer
*sb
;
7023 struct btrfs_disk_key
*disk_key
;
7024 struct btrfs_chunk
*chunk
;
7026 unsigned long sb_array_offset
;
7033 struct btrfs_key key
;
7035 ASSERT(BTRFS_SUPER_INFO_SIZE
<= fs_info
->nodesize
);
7037 * This will create extent buffer of nodesize, superblock size is
7038 * fixed to BTRFS_SUPER_INFO_SIZE. If nodesize > sb size, this will
7039 * overallocate but we can keep it as-is, only the first page is used.
7041 sb
= btrfs_find_create_tree_block(fs_info
, BTRFS_SUPER_INFO_OFFSET
);
7044 set_extent_buffer_uptodate(sb
);
7045 btrfs_set_buffer_lockdep_class(root
->root_key
.objectid
, sb
, 0);
7047 * The sb extent buffer is artificial and just used to read the system array.
7048 * set_extent_buffer_uptodate() call does not properly mark all it's
7049 * pages up-to-date when the page is larger: extent does not cover the
7050 * whole page and consequently check_page_uptodate does not find all
7051 * the page's extents up-to-date (the hole beyond sb),
7052 * write_extent_buffer then triggers a WARN_ON.
7054 * Regular short extents go through mark_extent_buffer_dirty/writeback cycle,
7055 * but sb spans only this function. Add an explicit SetPageUptodate call
7056 * to silence the warning eg. on PowerPC 64.
7058 if (PAGE_SIZE
> BTRFS_SUPER_INFO_SIZE
)
7059 SetPageUptodate(sb
->pages
[0]);
7061 write_extent_buffer(sb
, super_copy
, 0, BTRFS_SUPER_INFO_SIZE
);
7062 array_size
= btrfs_super_sys_array_size(super_copy
);
7064 array_ptr
= super_copy
->sys_chunk_array
;
7065 sb_array_offset
= offsetof(struct btrfs_super_block
, sys_chunk_array
);
7068 while (cur_offset
< array_size
) {
7069 disk_key
= (struct btrfs_disk_key
*)array_ptr
;
7070 len
= sizeof(*disk_key
);
7071 if (cur_offset
+ len
> array_size
)
7072 goto out_short_read
;
7074 btrfs_disk_key_to_cpu(&key
, disk_key
);
7077 sb_array_offset
+= len
;
7080 if (key
.type
== BTRFS_CHUNK_ITEM_KEY
) {
7081 chunk
= (struct btrfs_chunk
*)sb_array_offset
;
7083 * At least one btrfs_chunk with one stripe must be
7084 * present, exact stripe count check comes afterwards
7086 len
= btrfs_chunk_item_size(1);
7087 if (cur_offset
+ len
> array_size
)
7088 goto out_short_read
;
7090 num_stripes
= btrfs_chunk_num_stripes(sb
, chunk
);
7093 "invalid number of stripes %u in sys_array at offset %u",
7094 num_stripes
, cur_offset
);
7099 type
= btrfs_chunk_type(sb
, chunk
);
7100 if ((type
& BTRFS_BLOCK_GROUP_SYSTEM
) == 0) {
7102 "invalid chunk type %llu in sys_array at offset %u",
7108 len
= btrfs_chunk_item_size(num_stripes
);
7109 if (cur_offset
+ len
> array_size
)
7110 goto out_short_read
;
7112 ret
= read_one_chunk(&key
, sb
, chunk
);
7117 "unexpected item type %u in sys_array at offset %u",
7118 (u32
)key
.type
, cur_offset
);
7123 sb_array_offset
+= len
;
7126 clear_extent_buffer_uptodate(sb
);
7127 free_extent_buffer_stale(sb
);
7131 btrfs_err(fs_info
, "sys_array too short to read %u bytes at offset %u",
7133 clear_extent_buffer_uptodate(sb
);
7134 free_extent_buffer_stale(sb
);
7139 * Check if all chunks in the fs are OK for read-write degraded mount
7141 * If the @failing_dev is specified, it's accounted as missing.
7143 * Return true if all chunks meet the minimal RW mount requirements.
7144 * Return false if any chunk doesn't meet the minimal RW mount requirements.
7146 bool btrfs_check_rw_degradable(struct btrfs_fs_info
*fs_info
,
7147 struct btrfs_device
*failing_dev
)
7149 struct extent_map_tree
*map_tree
= &fs_info
->mapping_tree
;
7150 struct extent_map
*em
;
7154 read_lock(&map_tree
->lock
);
7155 em
= lookup_extent_mapping(map_tree
, 0, (u64
)-1);
7156 read_unlock(&map_tree
->lock
);
7157 /* No chunk at all? Return false anyway */
7163 struct map_lookup
*map
;
7168 map
= em
->map_lookup
;
7170 btrfs_get_num_tolerated_disk_barrier_failures(
7172 for (i
= 0; i
< map
->num_stripes
; i
++) {
7173 struct btrfs_device
*dev
= map
->stripes
[i
].dev
;
7175 if (!dev
|| !dev
->bdev
||
7176 test_bit(BTRFS_DEV_STATE_MISSING
, &dev
->dev_state
) ||
7177 dev
->last_flush_error
)
7179 else if (failing_dev
&& failing_dev
== dev
)
7182 if (missing
> max_tolerated
) {
7185 "chunk %llu missing %d devices, max tolerance is %d for writable mount",
7186 em
->start
, missing
, max_tolerated
);
7187 free_extent_map(em
);
7191 next_start
= extent_map_end(em
);
7192 free_extent_map(em
);
7194 read_lock(&map_tree
->lock
);
7195 em
= lookup_extent_mapping(map_tree
, next_start
,
7196 (u64
)(-1) - next_start
);
7197 read_unlock(&map_tree
->lock
);
7203 int btrfs_read_chunk_tree(struct btrfs_fs_info
*fs_info
)
7205 struct btrfs_root
*root
= fs_info
->chunk_root
;
7206 struct btrfs_path
*path
;
7207 struct extent_buffer
*leaf
;
7208 struct btrfs_key key
;
7209 struct btrfs_key found_key
;
7214 path
= btrfs_alloc_path();
7219 * uuid_mutex is needed only if we are mounting a sprout FS
7220 * otherwise we don't need it.
7222 mutex_lock(&uuid_mutex
);
7223 mutex_lock(&fs_info
->chunk_mutex
);
7226 * Read all device items, and then all the chunk items. All
7227 * device items are found before any chunk item (their object id
7228 * is smaller than the lowest possible object id for a chunk
7229 * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID).
7231 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
7234 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
7238 leaf
= path
->nodes
[0];
7239 slot
= path
->slots
[0];
7240 if (slot
>= btrfs_header_nritems(leaf
)) {
7241 ret
= btrfs_next_leaf(root
, path
);
7248 btrfs_item_key_to_cpu(leaf
, &found_key
, slot
);
7249 if (found_key
.type
== BTRFS_DEV_ITEM_KEY
) {
7250 struct btrfs_dev_item
*dev_item
;
7251 dev_item
= btrfs_item_ptr(leaf
, slot
,
7252 struct btrfs_dev_item
);
7253 ret
= read_one_dev(leaf
, dev_item
);
7257 } else if (found_key
.type
== BTRFS_CHUNK_ITEM_KEY
) {
7258 struct btrfs_chunk
*chunk
;
7259 chunk
= btrfs_item_ptr(leaf
, slot
, struct btrfs_chunk
);
7260 ret
= read_one_chunk(&found_key
, leaf
, chunk
);
7268 * After loading chunk tree, we've got all device information,
7269 * do another round of validation checks.
7271 if (total_dev
!= fs_info
->fs_devices
->total_devices
) {
7273 "super_num_devices %llu mismatch with num_devices %llu found here",
7274 btrfs_super_num_devices(fs_info
->super_copy
),
7279 if (btrfs_super_total_bytes(fs_info
->super_copy
) <
7280 fs_info
->fs_devices
->total_rw_bytes
) {
7282 "super_total_bytes %llu mismatch with fs_devices total_rw_bytes %llu",
7283 btrfs_super_total_bytes(fs_info
->super_copy
),
7284 fs_info
->fs_devices
->total_rw_bytes
);
7290 mutex_unlock(&fs_info
->chunk_mutex
);
7291 mutex_unlock(&uuid_mutex
);
7293 btrfs_free_path(path
);
7297 void btrfs_init_devices_late(struct btrfs_fs_info
*fs_info
)
7299 struct btrfs_fs_devices
*fs_devices
= fs_info
->fs_devices
;
7300 struct btrfs_device
*device
;
7302 while (fs_devices
) {
7303 mutex_lock(&fs_devices
->device_list_mutex
);
7304 list_for_each_entry(device
, &fs_devices
->devices
, dev_list
)
7305 device
->fs_info
= fs_info
;
7306 mutex_unlock(&fs_devices
->device_list_mutex
);
7308 fs_devices
= fs_devices
->seed
;
7312 static void __btrfs_reset_dev_stats(struct btrfs_device
*dev
)
7316 for (i
= 0; i
< BTRFS_DEV_STAT_VALUES_MAX
; i
++)
7317 btrfs_dev_stat_reset(dev
, i
);
7320 int btrfs_init_dev_stats(struct btrfs_fs_info
*fs_info
)
7322 struct btrfs_key key
;
7323 struct btrfs_key found_key
;
7324 struct btrfs_root
*dev_root
= fs_info
->dev_root
;
7325 struct btrfs_fs_devices
*fs_devices
= fs_info
->fs_devices
;
7326 struct extent_buffer
*eb
;
7329 struct btrfs_device
*device
;
7330 struct btrfs_path
*path
= NULL
;
7333 path
= btrfs_alloc_path();
7339 mutex_lock(&fs_devices
->device_list_mutex
);
7340 list_for_each_entry(device
, &fs_devices
->devices
, dev_list
) {
7342 struct btrfs_dev_stats_item
*ptr
;
7344 key
.objectid
= BTRFS_DEV_STATS_OBJECTID
;
7345 key
.type
= BTRFS_PERSISTENT_ITEM_KEY
;
7346 key
.offset
= device
->devid
;
7347 ret
= btrfs_search_slot(NULL
, dev_root
, &key
, path
, 0, 0);
7349 __btrfs_reset_dev_stats(device
);
7350 device
->dev_stats_valid
= 1;
7351 btrfs_release_path(path
);
7354 slot
= path
->slots
[0];
7355 eb
= path
->nodes
[0];
7356 btrfs_item_key_to_cpu(eb
, &found_key
, slot
);
7357 item_size
= btrfs_item_size_nr(eb
, slot
);
7359 ptr
= btrfs_item_ptr(eb
, slot
,
7360 struct btrfs_dev_stats_item
);
7362 for (i
= 0; i
< BTRFS_DEV_STAT_VALUES_MAX
; i
++) {
7363 if (item_size
>= (1 + i
) * sizeof(__le64
))
7364 btrfs_dev_stat_set(device
, i
,
7365 btrfs_dev_stats_value(eb
, ptr
, i
));
7367 btrfs_dev_stat_reset(device
, i
);
7370 device
->dev_stats_valid
= 1;
7371 btrfs_dev_stat_print_on_load(device
);
7372 btrfs_release_path(path
);
7374 mutex_unlock(&fs_devices
->device_list_mutex
);
7377 btrfs_free_path(path
);
7378 return ret
< 0 ? ret
: 0;
7381 static int update_dev_stat_item(struct btrfs_trans_handle
*trans
,
7382 struct btrfs_device
*device
)
7384 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
7385 struct btrfs_root
*dev_root
= fs_info
->dev_root
;
7386 struct btrfs_path
*path
;
7387 struct btrfs_key key
;
7388 struct extent_buffer
*eb
;
7389 struct btrfs_dev_stats_item
*ptr
;
7393 key
.objectid
= BTRFS_DEV_STATS_OBJECTID
;
7394 key
.type
= BTRFS_PERSISTENT_ITEM_KEY
;
7395 key
.offset
= device
->devid
;
7397 path
= btrfs_alloc_path();
7400 ret
= btrfs_search_slot(trans
, dev_root
, &key
, path
, -1, 1);
7402 btrfs_warn_in_rcu(fs_info
,
7403 "error %d while searching for dev_stats item for device %s",
7404 ret
, rcu_str_deref(device
->name
));
7409 btrfs_item_size_nr(path
->nodes
[0], path
->slots
[0]) < sizeof(*ptr
)) {
7410 /* need to delete old one and insert a new one */
7411 ret
= btrfs_del_item(trans
, dev_root
, path
);
7413 btrfs_warn_in_rcu(fs_info
,
7414 "delete too small dev_stats item for device %s failed %d",
7415 rcu_str_deref(device
->name
), ret
);
7422 /* need to insert a new item */
7423 btrfs_release_path(path
);
7424 ret
= btrfs_insert_empty_item(trans
, dev_root
, path
,
7425 &key
, sizeof(*ptr
));
7427 btrfs_warn_in_rcu(fs_info
,
7428 "insert dev_stats item for device %s failed %d",
7429 rcu_str_deref(device
->name
), ret
);
7434 eb
= path
->nodes
[0];
7435 ptr
= btrfs_item_ptr(eb
, path
->slots
[0], struct btrfs_dev_stats_item
);
7436 for (i
= 0; i
< BTRFS_DEV_STAT_VALUES_MAX
; i
++)
7437 btrfs_set_dev_stats_value(eb
, ptr
, i
,
7438 btrfs_dev_stat_read(device
, i
));
7439 btrfs_mark_buffer_dirty(eb
);
7442 btrfs_free_path(path
);
7447 * called from commit_transaction. Writes all changed device stats to disk.
7449 int btrfs_run_dev_stats(struct btrfs_trans_handle
*trans
)
7451 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
7452 struct btrfs_fs_devices
*fs_devices
= fs_info
->fs_devices
;
7453 struct btrfs_device
*device
;
7457 mutex_lock(&fs_devices
->device_list_mutex
);
7458 list_for_each_entry(device
, &fs_devices
->devices
, dev_list
) {
7459 stats_cnt
= atomic_read(&device
->dev_stats_ccnt
);
7460 if (!device
->dev_stats_valid
|| stats_cnt
== 0)
7465 * There is a LOAD-LOAD control dependency between the value of
7466 * dev_stats_ccnt and updating the on-disk values which requires
7467 * reading the in-memory counters. Such control dependencies
7468 * require explicit read memory barriers.
7470 * This memory barriers pairs with smp_mb__before_atomic in
7471 * btrfs_dev_stat_inc/btrfs_dev_stat_set and with the full
7472 * barrier implied by atomic_xchg in
7473 * btrfs_dev_stats_read_and_reset
7477 ret
= update_dev_stat_item(trans
, device
);
7479 atomic_sub(stats_cnt
, &device
->dev_stats_ccnt
);
7481 mutex_unlock(&fs_devices
->device_list_mutex
);
7486 void btrfs_dev_stat_inc_and_print(struct btrfs_device
*dev
, int index
)
7488 btrfs_dev_stat_inc(dev
, index
);
7489 btrfs_dev_stat_print_on_error(dev
);
7492 static void btrfs_dev_stat_print_on_error(struct btrfs_device
*dev
)
7494 if (!dev
->dev_stats_valid
)
7496 btrfs_err_rl_in_rcu(dev
->fs_info
,
7497 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
7498 rcu_str_deref(dev
->name
),
7499 btrfs_dev_stat_read(dev
, BTRFS_DEV_STAT_WRITE_ERRS
),
7500 btrfs_dev_stat_read(dev
, BTRFS_DEV_STAT_READ_ERRS
),
7501 btrfs_dev_stat_read(dev
, BTRFS_DEV_STAT_FLUSH_ERRS
),
7502 btrfs_dev_stat_read(dev
, BTRFS_DEV_STAT_CORRUPTION_ERRS
),
7503 btrfs_dev_stat_read(dev
, BTRFS_DEV_STAT_GENERATION_ERRS
));
7506 static void btrfs_dev_stat_print_on_load(struct btrfs_device
*dev
)
7510 for (i
= 0; i
< BTRFS_DEV_STAT_VALUES_MAX
; i
++)
7511 if (btrfs_dev_stat_read(dev
, i
) != 0)
7513 if (i
== BTRFS_DEV_STAT_VALUES_MAX
)
7514 return; /* all values == 0, suppress message */
7516 btrfs_info_in_rcu(dev
->fs_info
,
7517 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
7518 rcu_str_deref(dev
->name
),
7519 btrfs_dev_stat_read(dev
, BTRFS_DEV_STAT_WRITE_ERRS
),
7520 btrfs_dev_stat_read(dev
, BTRFS_DEV_STAT_READ_ERRS
),
7521 btrfs_dev_stat_read(dev
, BTRFS_DEV_STAT_FLUSH_ERRS
),
7522 btrfs_dev_stat_read(dev
, BTRFS_DEV_STAT_CORRUPTION_ERRS
),
7523 btrfs_dev_stat_read(dev
, BTRFS_DEV_STAT_GENERATION_ERRS
));
7526 int btrfs_get_dev_stats(struct btrfs_fs_info
*fs_info
,
7527 struct btrfs_ioctl_get_dev_stats
*stats
)
7529 struct btrfs_device
*dev
;
7530 struct btrfs_fs_devices
*fs_devices
= fs_info
->fs_devices
;
7533 mutex_lock(&fs_devices
->device_list_mutex
);
7534 dev
= btrfs_find_device(fs_info
->fs_devices
, stats
->devid
, NULL
, NULL
,
7536 mutex_unlock(&fs_devices
->device_list_mutex
);
7539 btrfs_warn(fs_info
, "get dev_stats failed, device not found");
7541 } else if (!dev
->dev_stats_valid
) {
7542 btrfs_warn(fs_info
, "get dev_stats failed, not yet valid");
7544 } else if (stats
->flags
& BTRFS_DEV_STATS_RESET
) {
7545 for (i
= 0; i
< BTRFS_DEV_STAT_VALUES_MAX
; i
++) {
7546 if (stats
->nr_items
> i
)
7548 btrfs_dev_stat_read_and_reset(dev
, i
);
7550 btrfs_dev_stat_reset(dev
, i
);
7553 for (i
= 0; i
< BTRFS_DEV_STAT_VALUES_MAX
; i
++)
7554 if (stats
->nr_items
> i
)
7555 stats
->values
[i
] = btrfs_dev_stat_read(dev
, i
);
7557 if (stats
->nr_items
> BTRFS_DEV_STAT_VALUES_MAX
)
7558 stats
->nr_items
= BTRFS_DEV_STAT_VALUES_MAX
;
7562 void btrfs_scratch_superblocks(struct block_device
*bdev
, const char *device_path
)
7564 struct buffer_head
*bh
;
7565 struct btrfs_super_block
*disk_super
;
7571 for (copy_num
= 0; copy_num
< BTRFS_SUPER_MIRROR_MAX
;
7574 if (btrfs_read_dev_one_super(bdev
, copy_num
, &bh
))
7577 disk_super
= (struct btrfs_super_block
*)bh
->b_data
;
7579 memset(&disk_super
->magic
, 0, sizeof(disk_super
->magic
));
7580 set_buffer_dirty(bh
);
7581 sync_dirty_buffer(bh
);
7585 /* Notify udev that device has changed */
7586 btrfs_kobject_uevent(bdev
, KOBJ_CHANGE
);
7588 /* Update ctime/mtime for device path for libblkid */
7589 update_dev_time(device_path
);
7593 * Update the size and bytes used for each device where it changed. This is
7594 * delayed since we would otherwise get errors while writing out the
7597 * Must be invoked during transaction commit.
7599 void btrfs_commit_device_sizes(struct btrfs_transaction
*trans
)
7601 struct btrfs_device
*curr
, *next
;
7603 ASSERT(trans
->state
== TRANS_STATE_COMMIT_DOING
);
7605 if (list_empty(&trans
->dev_update_list
))
7609 * We don't need the device_list_mutex here. This list is owned by the
7610 * transaction and the transaction must complete before the device is
7613 mutex_lock(&trans
->fs_info
->chunk_mutex
);
7614 list_for_each_entry_safe(curr
, next
, &trans
->dev_update_list
,
7616 list_del_init(&curr
->post_commit_list
);
7617 curr
->commit_total_bytes
= curr
->disk_total_bytes
;
7618 curr
->commit_bytes_used
= curr
->bytes_used
;
7620 mutex_unlock(&trans
->fs_info
->chunk_mutex
);
7623 void btrfs_set_fs_info_ptr(struct btrfs_fs_info
*fs_info
)
7625 struct btrfs_fs_devices
*fs_devices
= fs_info
->fs_devices
;
7626 while (fs_devices
) {
7627 fs_devices
->fs_info
= fs_info
;
7628 fs_devices
= fs_devices
->seed
;
7632 void btrfs_reset_fs_info_ptr(struct btrfs_fs_info
*fs_info
)
7634 struct btrfs_fs_devices
*fs_devices
= fs_info
->fs_devices
;
7635 while (fs_devices
) {
7636 fs_devices
->fs_info
= NULL
;
7637 fs_devices
= fs_devices
->seed
;
7642 * Multiplicity factor for simple profiles: DUP, RAID1-like and RAID10.
7644 int btrfs_bg_type_to_factor(u64 flags
)
7646 const int index
= btrfs_bg_flags_to_raid_index(flags
);
7648 return btrfs_raid_array
[index
].ncopies
;
7653 static int verify_one_dev_extent(struct btrfs_fs_info
*fs_info
,
7654 u64 chunk_offset
, u64 devid
,
7655 u64 physical_offset
, u64 physical_len
)
7657 struct extent_map_tree
*em_tree
= &fs_info
->mapping_tree
;
7658 struct extent_map
*em
;
7659 struct map_lookup
*map
;
7660 struct btrfs_device
*dev
;
7666 read_lock(&em_tree
->lock
);
7667 em
= lookup_extent_mapping(em_tree
, chunk_offset
, 1);
7668 read_unlock(&em_tree
->lock
);
7672 "dev extent physical offset %llu on devid %llu doesn't have corresponding chunk",
7673 physical_offset
, devid
);
7678 map
= em
->map_lookup
;
7679 stripe_len
= calc_stripe_length(map
->type
, em
->len
, map
->num_stripes
);
7680 if (physical_len
!= stripe_len
) {
7682 "dev extent physical offset %llu on devid %llu length doesn't match chunk %llu, have %llu expect %llu",
7683 physical_offset
, devid
, em
->start
, physical_len
,
7689 for (i
= 0; i
< map
->num_stripes
; i
++) {
7690 if (map
->stripes
[i
].dev
->devid
== devid
&&
7691 map
->stripes
[i
].physical
== physical_offset
) {
7693 if (map
->verified_stripes
>= map
->num_stripes
) {
7695 "too many dev extents for chunk %llu found",
7700 map
->verified_stripes
++;
7706 "dev extent physical offset %llu devid %llu has no corresponding chunk",
7707 physical_offset
, devid
);
7711 /* Make sure no dev extent is beyond device bondary */
7712 dev
= btrfs_find_device(fs_info
->fs_devices
, devid
, NULL
, NULL
, true);
7714 btrfs_err(fs_info
, "failed to find devid %llu", devid
);
7719 /* It's possible this device is a dummy for seed device */
7720 if (dev
->disk_total_bytes
== 0) {
7721 dev
= btrfs_find_device(fs_info
->fs_devices
->seed
, devid
, NULL
,
7724 btrfs_err(fs_info
, "failed to find seed devid %llu",
7731 if (physical_offset
+ physical_len
> dev
->disk_total_bytes
) {
7733 "dev extent devid %llu physical offset %llu len %llu is beyond device boundary %llu",
7734 devid
, physical_offset
, physical_len
,
7735 dev
->disk_total_bytes
);
7740 free_extent_map(em
);
7744 static int verify_chunk_dev_extent_mapping(struct btrfs_fs_info
*fs_info
)
7746 struct extent_map_tree
*em_tree
= &fs_info
->mapping_tree
;
7747 struct extent_map
*em
;
7748 struct rb_node
*node
;
7751 read_lock(&em_tree
->lock
);
7752 for (node
= rb_first_cached(&em_tree
->map
); node
; node
= rb_next(node
)) {
7753 em
= rb_entry(node
, struct extent_map
, rb_node
);
7754 if (em
->map_lookup
->num_stripes
!=
7755 em
->map_lookup
->verified_stripes
) {
7757 "chunk %llu has missing dev extent, have %d expect %d",
7758 em
->start
, em
->map_lookup
->verified_stripes
,
7759 em
->map_lookup
->num_stripes
);
7765 read_unlock(&em_tree
->lock
);
7770 * Ensure that all dev extents are mapped to correct chunk, otherwise
7771 * later chunk allocation/free would cause unexpected behavior.
7773 * NOTE: This will iterate through the whole device tree, which should be of
7774 * the same size level as the chunk tree. This slightly increases mount time.
7776 int btrfs_verify_dev_extents(struct btrfs_fs_info
*fs_info
)
7778 struct btrfs_path
*path
;
7779 struct btrfs_root
*root
= fs_info
->dev_root
;
7780 struct btrfs_key key
;
7782 u64 prev_dev_ext_end
= 0;
7786 key
.type
= BTRFS_DEV_EXTENT_KEY
;
7789 path
= btrfs_alloc_path();
7793 path
->reada
= READA_FORWARD
;
7794 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
7798 if (path
->slots
[0] >= btrfs_header_nritems(path
->nodes
[0])) {
7799 ret
= btrfs_next_item(root
, path
);
7802 /* No dev extents at all? Not good */
7809 struct extent_buffer
*leaf
= path
->nodes
[0];
7810 struct btrfs_dev_extent
*dext
;
7811 int slot
= path
->slots
[0];
7813 u64 physical_offset
;
7817 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
7818 if (key
.type
!= BTRFS_DEV_EXTENT_KEY
)
7820 devid
= key
.objectid
;
7821 physical_offset
= key
.offset
;
7823 dext
= btrfs_item_ptr(leaf
, slot
, struct btrfs_dev_extent
);
7824 chunk_offset
= btrfs_dev_extent_chunk_offset(leaf
, dext
);
7825 physical_len
= btrfs_dev_extent_length(leaf
, dext
);
7827 /* Check if this dev extent overlaps with the previous one */
7828 if (devid
== prev_devid
&& physical_offset
< prev_dev_ext_end
) {
7830 "dev extent devid %llu physical offset %llu overlap with previous dev extent end %llu",
7831 devid
, physical_offset
, prev_dev_ext_end
);
7836 ret
= verify_one_dev_extent(fs_info
, chunk_offset
, devid
,
7837 physical_offset
, physical_len
);
7841 prev_dev_ext_end
= physical_offset
+ physical_len
;
7843 ret
= btrfs_next_item(root
, path
);
7852 /* Ensure all chunks have corresponding dev extents */
7853 ret
= verify_chunk_dev_extent_mapping(fs_info
);
7855 btrfs_free_path(path
);
7860 * Check whether the given block group or device is pinned by any inode being
7861 * used as a swapfile.
7863 bool btrfs_pinned_by_swapfile(struct btrfs_fs_info
*fs_info
, void *ptr
)
7865 struct btrfs_swapfile_pin
*sp
;
7866 struct rb_node
*node
;
7868 spin_lock(&fs_info
->swapfile_pins_lock
);
7869 node
= fs_info
->swapfile_pins
.rb_node
;
7871 sp
= rb_entry(node
, struct btrfs_swapfile_pin
, node
);
7873 node
= node
->rb_left
;
7874 else if (ptr
> sp
->ptr
)
7875 node
= node
->rb_right
;
7879 spin_unlock(&fs_info
->swapfile_pins_lock
);
7880 return node
!= NULL
;