]> git.ipfire.org Git - thirdparty/linux.git/blobdiff - fs/btrfs/block-group.c
btrfs: remove pointless loop from btrfs_update_block_group()
[thirdparty/linux.git] / fs / btrfs / block-group.c
index 5ba57ea03f42881504f1d99af9bb0e2e1156ac80..72dbfb410e42ed190adf75f003ba9b6afb5b2485 100644 (file)
@@ -3542,12 +3542,11 @@ int btrfs_update_block_group(struct btrfs_trans_handle *trans,
                             u64 bytenr, u64 num_bytes, bool alloc)
 {
        struct btrfs_fs_info *info = trans->fs_info;
-       struct btrfs_block_group *cache = NULL;
-       u64 total = num_bytes;
+       struct btrfs_space_info *space_info;
+       struct btrfs_block_group *cache;
        u64 old_val;
-       u64 byte_in_group;
+       bool reclaim = false;
        int factor;
-       int ret = 0;
 
        /* Block accounting for super block */
        spin_lock(&info->delalloc_root_lock);
@@ -3559,97 +3558,85 @@ int btrfs_update_block_group(struct btrfs_trans_handle *trans,
        btrfs_set_super_bytes_used(info->super_copy, old_val);
        spin_unlock(&info->delalloc_root_lock);
 
-       while (total) {
-               struct btrfs_space_info *space_info;
-               bool reclaim = false;
-
-               cache = btrfs_lookup_block_group(info, bytenr);
-               if (!cache) {
-                       ret = -ENOENT;
-                       break;
-               }
-               space_info = cache->space_info;
-               factor = btrfs_bg_type_to_factor(cache->flags);
+       cache = btrfs_lookup_block_group(info, bytenr);
+       if (!cache)
+               return -ENOENT;
 
-               /*
-                * If this block group has free space cache written out, we
-                * need to make sure to load it if we are removing space.  This
-                * is because we need the unpinning stage to actually add the
-                * space back to the block group, otherwise we will leak space.
-                */
-               if (!alloc && !btrfs_block_group_done(cache))
-                       btrfs_cache_block_group(cache, true);
+       /* An extent can not span multiple block groups. */
+       ASSERT(bytenr + num_bytes <= cache->start + cache->length);
 
-               byte_in_group = bytenr - cache->start;
-               WARN_ON(byte_in_group > cache->length);
+       space_info = cache->space_info;
+       factor = btrfs_bg_type_to_factor(cache->flags);
 
-               spin_lock(&space_info->lock);
-               spin_lock(&cache->lock);
+       /*
+        * If this block group has free space cache written out, we need to make
+        * sure to load it if we are removing space.  This is because we need
+        * the unpinning stage to actually add the space back to the block group,
+        * otherwise we will leak space.
+        */
+       if (!alloc && !btrfs_block_group_done(cache))
+               btrfs_cache_block_group(cache, true);
 
-               if (btrfs_test_opt(info, SPACE_CACHE) &&
-                   cache->disk_cache_state < BTRFS_DC_CLEAR)
-                       cache->disk_cache_state = BTRFS_DC_CLEAR;
+       spin_lock(&space_info->lock);
+       spin_lock(&cache->lock);
 
-               old_val = cache->used;
-               num_bytes = min(total, cache->length - byte_in_group);
-               if (alloc) {
-                       old_val += num_bytes;
-                       cache->used = old_val;
-                       cache->reserved -= num_bytes;
-                       space_info->bytes_reserved -= num_bytes;
-                       space_info->bytes_used += num_bytes;
-                       space_info->disk_used += num_bytes * factor;
-                       spin_unlock(&cache->lock);
-                       spin_unlock(&space_info->lock);
-               } else {
-                       old_val -= num_bytes;
-                       cache->used = old_val;
-                       cache->pinned += num_bytes;
-                       btrfs_space_info_update_bytes_pinned(info, space_info,
-                                                            num_bytes);
-                       space_info->bytes_used -= num_bytes;
-                       space_info->disk_used -= num_bytes * factor;
+       if (btrfs_test_opt(info, SPACE_CACHE) &&
+           cache->disk_cache_state < BTRFS_DC_CLEAR)
+               cache->disk_cache_state = BTRFS_DC_CLEAR;
 
-                       reclaim = should_reclaim_block_group(cache, num_bytes);
+       old_val = cache->used;
+       if (alloc) {
+               old_val += num_bytes;
+               cache->used = old_val;
+               cache->reserved -= num_bytes;
+               space_info->bytes_reserved -= num_bytes;
+               space_info->bytes_used += num_bytes;
+               space_info->disk_used += num_bytes * factor;
+               spin_unlock(&cache->lock);
+               spin_unlock(&space_info->lock);
+       } else {
+               old_val -= num_bytes;
+               cache->used = old_val;
+               cache->pinned += num_bytes;
+               btrfs_space_info_update_bytes_pinned(info, space_info, num_bytes);
+               space_info->bytes_used -= num_bytes;
+               space_info->disk_used -= num_bytes * factor;
 
-                       spin_unlock(&cache->lock);
-                       spin_unlock(&space_info->lock);
+               reclaim = should_reclaim_block_group(cache, num_bytes);
 
-                       set_extent_bit(&trans->transaction->pinned_extents,
-                                      bytenr, bytenr + num_bytes - 1,
-                                      EXTENT_DIRTY, NULL);
-               }
+               spin_unlock(&cache->lock);
+               spin_unlock(&space_info->lock);
 
-               spin_lock(&trans->transaction->dirty_bgs_lock);
-               if (list_empty(&cache->dirty_list)) {
-                       list_add_tail(&cache->dirty_list,
-                                     &trans->transaction->dirty_bgs);
-                       trans->delayed_ref_updates++;
-                       btrfs_get_block_group(cache);
-               }
-               spin_unlock(&trans->transaction->dirty_bgs_lock);
+               set_extent_bit(&trans->transaction->pinned_extents, bytenr,
+                              bytenr + num_bytes - 1, EXTENT_DIRTY, NULL);
+       }
 
-               /*
-                * No longer have used bytes in this block group, queue it for
-                * deletion. We do this after adding the block group to the
-                * dirty list to avoid races between cleaner kthread and space
-                * cache writeout.
-                */
-               if (!alloc && old_val == 0) {
-                       if (!btrfs_test_opt(info, DISCARD_ASYNC))
-                               btrfs_mark_bg_unused(cache);
-               } else if (!alloc && reclaim) {
-                       btrfs_mark_bg_to_reclaim(cache);
-               }
+       spin_lock(&trans->transaction->dirty_bgs_lock);
+       if (list_empty(&cache->dirty_list)) {
+               list_add_tail(&cache->dirty_list, &trans->transaction->dirty_bgs);
+               trans->delayed_ref_updates++;
+               btrfs_get_block_group(cache);
+       }
+       spin_unlock(&trans->transaction->dirty_bgs_lock);
 
-               btrfs_put_block_group(cache);
-               total -= num_bytes;
-               bytenr += num_bytes;
+       /*
+        * No longer have used bytes in this block group, queue it for deletion.
+        * We do this after adding the block group to the dirty list to avoid
+        * races between cleaner kthread and space cache writeout.
+        */
+       if (!alloc && old_val == 0) {
+               if (!btrfs_test_opt(info, DISCARD_ASYNC))
+                       btrfs_mark_bg_unused(cache);
+       } else if (!alloc && reclaim) {
+               btrfs_mark_bg_to_reclaim(cache);
        }
 
+       btrfs_put_block_group(cache);
+
        /* Modified block groups are accounted for in the delayed_refs_rsv. */
        btrfs_update_delayed_refs_rsv(trans);
-       return ret;
+
+       return 0;
 }
 
 /*