goto out;
}
- num_bytes = cache->length - cache->reserved - cache->pinned -
- cache->bytes_super - cache->zone_unusable - cache->used;
+ num_bytes = btrfs_block_group_available_space(cache);
/*
* Data never overcommits, even in mixed mode, so do just the straight
void btrfs_dec_block_group_ro(struct btrfs_block_group *cache)
{
struct btrfs_space_info *sinfo = cache->space_info;
- u64 num_bytes;
BUG_ON(!cache->ro);
btrfs_space_info_update_bytes_zone_unusable(sinfo, cache->zone_unusable);
sinfo->bytes_readonly -= cache->zone_unusable;
}
- num_bytes = cache->length - cache->reserved -
- cache->pinned - cache->bytes_super -
- cache->zone_unusable - cache->used;
- sinfo->bytes_readonly -= num_bytes;
+ sinfo->bytes_readonly -= btrfs_block_group_available_space(cache);
list_del_init(&cache->ro_list);
}
spin_unlock(&cache->lock);
!(block_group->flags & BTRFS_BLOCK_GROUP_METADATA);
}
+static inline u64 btrfs_block_group_available_space(const struct btrfs_block_group *bg)
+{
+ lockdep_assert_held(&bg->lock);
+
+ return (bg->length - bg->used - bg->pinned - bg->reserved -
+ bg->bytes_super - bg->zone_unusable);
+}
+
#ifdef CONFIG_BTRFS_DEBUG
int btrfs_should_fragment_free_space(const struct btrfs_block_group *block_group);
#endif
u64 avail;
spin_lock(&cache->lock);
- avail = cache->length - cache->used - cache->pinned -
- cache->reserved - cache->bytes_super - cache->zone_unusable;
+ avail = btrfs_block_group_available_space(cache);
btrfs_info(fs_info,
"block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %llu delalloc %llu super %llu zone_unusable (%llu bytes available) %s",
cache->start, cache->length, cache->used, cache->pinned,