struct find_free_extent_ctl *ffe_ctl,
struct btrfs_space_info *space_info)
{
+ struct btrfs_block_group *block_group;
+
if (ffe_ctl->for_treelog) {
spin_lock(&fs_info->treelog_bg_lock);
if (fs_info->treelog_bg)
ffe_ctl->hint_byte = fs_info->treelog_bg;
spin_unlock(&fs_info->treelog_bg_lock);
- } else if (ffe_ctl->for_data_reloc) {
+ return 0;
+ }
+
+ if (ffe_ctl->for_data_reloc) {
spin_lock(&fs_info->relocation_bg_lock);
if (fs_info->data_reloc_bg)
ffe_ctl->hint_byte = fs_info->data_reloc_bg;
spin_unlock(&fs_info->relocation_bg_lock);
- } else if (ffe_ctl->flags & BTRFS_BLOCK_GROUP_DATA) {
- struct btrfs_block_group *block_group;
+ return 0;
+ }
- spin_lock(&fs_info->zone_active_bgs_lock);
- list_for_each_entry(block_group, &fs_info->zone_active_bgs, active_bg_list) {
- /*
- * No lock is OK here because avail is monotonically
- * decreasing, and this is just a hint.
- */
- u64 avail = block_group->zone_capacity - block_group->alloc_offset;
+ if (!(ffe_ctl->flags & BTRFS_BLOCK_GROUP_DATA))
+ return 0;
- if (block_group_bits(block_group, ffe_ctl->flags) &&
- block_group->space_info == space_info &&
- avail >= ffe_ctl->num_bytes) {
- ffe_ctl->hint_byte = block_group->start;
- break;
- }
+ spin_lock(&fs_info->zone_active_bgs_lock);
+ list_for_each_entry(block_group, &fs_info->zone_active_bgs, active_bg_list) {
+ /*
+ * No lock is OK here because avail is monotonically
+ * decreasing, and this is just a hint.
+ */
+ u64 avail = block_group->zone_capacity - block_group->alloc_offset;
+
+ if (block_group_bits(block_group, ffe_ctl->flags) &&
+ block_group->space_info == space_info &&
+ avail >= ffe_ctl->num_bytes) {
+ ffe_ctl->hint_byte = block_group->start;
+ break;
}
- spin_unlock(&fs_info->zone_active_bgs_lock);
}
+ spin_unlock(&fs_info->zone_active_bgs_lock);
return 0;
}