]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
btrfs: zoned: re-flow prepare_allocation_zoned()
authorJohannes Thumshirn <johannes.thumshirn@wdc.com>
Mon, 15 Dec 2025 10:38:18 +0000 (11:38 +0100)
committerDavid Sterba <dsterba@suse.com>
Tue, 3 Feb 2026 06:49:10 +0000 (07:49 +0100)
Re-flow prepare allocation zoned to make it a bit more readable by
returning early and removing unnecessary indentations.

This patch does not change any functionality.

Signed-off-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
fs/btrfs/extent-tree.c

index 3b840a4fdf1c0d5126c7a2e2f1a51a7ee409f4bf..1dcd69fe97ed529d3b8d1dfc0384ce20ac736813 100644 (file)
@@ -4284,36 +4284,43 @@ static int prepare_allocation_zoned(struct btrfs_fs_info *fs_info,
                                    struct find_free_extent_ctl *ffe_ctl,
                                    struct btrfs_space_info *space_info)
 {
+       struct btrfs_block_group *block_group;
+
        if (ffe_ctl->for_treelog) {
                spin_lock(&fs_info->treelog_bg_lock);
                if (fs_info->treelog_bg)
                        ffe_ctl->hint_byte = fs_info->treelog_bg;
                spin_unlock(&fs_info->treelog_bg_lock);
-       } else if (ffe_ctl->for_data_reloc) {
+               return 0;
+       }
+
+       if (ffe_ctl->for_data_reloc) {
                spin_lock(&fs_info->relocation_bg_lock);
                if (fs_info->data_reloc_bg)
                        ffe_ctl->hint_byte = fs_info->data_reloc_bg;
                spin_unlock(&fs_info->relocation_bg_lock);
-       } else if (ffe_ctl->flags & BTRFS_BLOCK_GROUP_DATA) {
-               struct btrfs_block_group *block_group;
+               return 0;
+       }
 
-               spin_lock(&fs_info->zone_active_bgs_lock);
-               list_for_each_entry(block_group, &fs_info->zone_active_bgs, active_bg_list) {
-                       /*
-                        * No lock is OK here because avail is monotonically
-                        * decreasing, and this is just a hint.
-                        */
-                       u64 avail = block_group->zone_capacity - block_group->alloc_offset;
+       if (!(ffe_ctl->flags & BTRFS_BLOCK_GROUP_DATA))
+               return 0;
 
-                       if (block_group_bits(block_group, ffe_ctl->flags) &&
-                           block_group->space_info == space_info &&
-                           avail >= ffe_ctl->num_bytes) {
-                               ffe_ctl->hint_byte = block_group->start;
-                               break;
-                       }
+       spin_lock(&fs_info->zone_active_bgs_lock);
+       list_for_each_entry(block_group, &fs_info->zone_active_bgs, active_bg_list) {
+               /*
+                * No lock is OK here because avail is monotonically
+                * decreasing, and this is just a hint.
+                */
+               u64 avail = block_group->zone_capacity - block_group->alloc_offset;
+
+               if (block_group_bits(block_group, ffe_ctl->flags) &&
+                   block_group->space_info == space_info &&
+                   avail >= ffe_ctl->num_bytes) {
+                       ffe_ctl->hint_byte = block_group->start;
+                       break;
                }
-               spin_unlock(&fs_info->zone_active_bgs_lock);
        }
+       spin_unlock(&fs_info->zone_active_bgs_lock);
 
        return 0;
 }