]> git.ipfire.org Git - people/ms/linux.git/commitdiff
btrfs: zoned: fix mounting with conventional zones
authorJohannes Thumshirn <johannes.thumshirn@wdc.com>
Mon, 5 Sep 2022 12:38:24 +0000 (05:38 -0700)
committerDavid Sterba <dsterba@suse.com>
Mon, 5 Sep 2022 13:39:10 +0000 (15:39 +0200)
Since commit 6a921de58992 ("btrfs: zoned: introduce
space_info->active_total_bytes"), we're only counting the bytes of a
block group on an active zone as usable for metadata writes. But on a
SMR drive, we don't have active zones and short circuit some of the
logic.

This leads to an error on mount, because we cannot reserve space for
metadata writes.

Fix this by also setting the BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE bit in the
block-group's runtime flag if the zone is a conventional zone.

Fixes: 6a921de58992 ("btrfs: zoned: introduce space_info->active_total_bytes")
Signed-off-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
Signed-off-by: David Sterba <dsterba@suse.com>
fs/btrfs/zoned.c

index f576e30ceb33bce4e4e93b99027cb2ee91c41bcb..62e7007a7e46c6ea8bc04f4ba48638498052c11b 100644 (file)
@@ -1187,7 +1187,7 @@ int btrfs_ensure_empty_zones(struct btrfs_device *device, u64 start, u64 size)
  * offset.
  */
 static int calculate_alloc_pointer(struct btrfs_block_group *cache,
-                                  u64 *offset_ret)
+                                  u64 *offset_ret, bool new)
 {
        struct btrfs_fs_info *fs_info = cache->fs_info;
        struct btrfs_root *root;
@@ -1197,6 +1197,21 @@ static int calculate_alloc_pointer(struct btrfs_block_group *cache,
        int ret;
        u64 length;
 
+       /*
+        * Avoid  tree lookups for a new block group, there's no use for it.
+        * It must always be 0.
+        *
+        * Also, we have a lock chain of extent buffer lock -> chunk mutex.
+        * For new a block group, this function is called from
+        * btrfs_make_block_group() which is already taking the chunk mutex.
+        * Thus, we cannot call calculate_alloc_pointer() which takes extent
+        * buffer locks to avoid deadlock.
+        */
+       if (new) {
+               *offset_ret = 0;
+               return 0;
+       }
+
        path = btrfs_alloc_path();
        if (!path)
                return -ENOMEM;
@@ -1332,6 +1347,13 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
                else
                        num_conventional++;
 
+               /*
+                * Consider a zone as active if we can allow any number of
+                * active zones.
+                */
+               if (!device->zone_info->max_active_zones)
+                       __set_bit(i, active);
+
                if (!is_sequential) {
                        alloc_offsets[i] = WP_CONVENTIONAL;
                        continue;
@@ -1398,45 +1420,23 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
                        __set_bit(i, active);
                        break;
                }
-
-               /*
-                * Consider a zone as active if we can allow any number of
-                * active zones.
-                */
-               if (!device->zone_info->max_active_zones)
-                       __set_bit(i, active);
        }
 
        if (num_sequential > 0)
                cache->seq_zone = true;
 
        if (num_conventional > 0) {
-               /*
-                * Avoid calling calculate_alloc_pointer() for new BG. It
-                * is no use for new BG. It must be always 0.
-                *
-                * Also, we have a lock chain of extent buffer lock ->
-                * chunk mutex.  For new BG, this function is called from
-                * btrfs_make_block_group() which is already taking the
-                * chunk mutex. Thus, we cannot call
-                * calculate_alloc_pointer() which takes extent buffer
-                * locks to avoid deadlock.
-                */
-
                /* Zone capacity is always zone size in emulation */
                cache->zone_capacity = cache->length;
-               if (new) {
-                       cache->alloc_offset = 0;
-                       goto out;
-               }
-               ret = calculate_alloc_pointer(cache, &last_alloc);
-               if (ret || map->num_stripes == num_conventional) {
-                       if (!ret)
-                               cache->alloc_offset = last_alloc;
-                       else
-                               btrfs_err(fs_info,
+               ret = calculate_alloc_pointer(cache, &last_alloc, new);
+               if (ret) {
+                       btrfs_err(fs_info,
                        "zoned: failed to determine allocation offset of bg %llu",
-                                         cache->start);
+                                 cache->start);
+                       goto out;
+               } else if (map->num_stripes == num_conventional) {
+                       cache->alloc_offset = last_alloc;
+                       cache->zone_is_active = 1;
                        goto out;
                }
        }
@@ -1504,13 +1504,6 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
                goto out;
        }
 
-       if (cache->zone_is_active) {
-               btrfs_get_block_group(cache);
-               spin_lock(&fs_info->zone_active_bgs_lock);
-               list_add_tail(&cache->active_bg_list, &fs_info->zone_active_bgs);
-               spin_unlock(&fs_info->zone_active_bgs_lock);
-       }
-
 out:
        if (cache->alloc_offset > fs_info->zone_size) {
                btrfs_err(fs_info,
@@ -1535,10 +1528,16 @@ out:
                ret = -EIO;
        }
 
-       if (!ret)
+       if (!ret) {
                cache->meta_write_pointer = cache->alloc_offset + cache->start;
-
-       if (ret) {
+               if (cache->zone_is_active) {
+                       btrfs_get_block_group(cache);
+                       spin_lock(&fs_info->zone_active_bgs_lock);
+                       list_add_tail(&cache->active_bg_list,
+                                     &fs_info->zone_active_bgs);
+                       spin_unlock(&fs_info->zone_active_bgs_lock);
+               }
+       } else {
                kfree(cache->physical_map);
                cache->physical_map = NULL;
        }