]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
btrfs: zoned: properly take lock to read/update block group's zoned variables
authorNaohiro Aota <naohiro.aota@wdc.com>
Thu, 1 Aug 2024 07:47:52 +0000 (16:47 +0900)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 29 Aug 2024 15:35:42 +0000 (17:35 +0200)
commit e30729d4bd4001881be4d1ad4332a5d4985398f8 upstream.

__btrfs_add_free_space_zoned() references and modifies bg's alloc_offset,
ro, and zone_unusable, but without taking the lock. It is mostly safe
because they monotonically increase (at least for now) and this function is
mostly called by a transaction commit, which is serialized by itself.

Still, taking the lock is a safer and correct option and I'm going to add a
change to reset zone_unusable while a block group is still alive. So, add
locking around the operations.

Fixes: 169e0da91a21 ("btrfs: zoned: track unusable bytes for zones")
CC: stable@vger.kernel.org # 5.15+
Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
fs/btrfs/free-space-cache.c

index 62c3dea9572ab7681c4b8eefe3b6429cb3670b5e..1926a228d0ba0e4be9d8d956c2a538272fbca01f 100644 (file)
@@ -2698,15 +2698,16 @@ static int __btrfs_add_free_space_zoned(struct btrfs_block_group *block_group,
        u64 offset = bytenr - block_group->start;
        u64 to_free, to_unusable;
        int bg_reclaim_threshold = 0;
-       bool initial = ((size == block_group->length) && (block_group->alloc_offset == 0));
+       bool initial;
        u64 reclaimable_unusable;
 
-       WARN_ON(!initial && offset + size > block_group->zone_capacity);
+       spin_lock(&block_group->lock);
 
+       initial = ((size == block_group->length) && (block_group->alloc_offset == 0));
+       WARN_ON(!initial && offset + size > block_group->zone_capacity);
        if (!initial)
                bg_reclaim_threshold = READ_ONCE(sinfo->bg_reclaim_threshold);
 
-       spin_lock(&ctl->tree_lock);
        if (!used)
                to_free = size;
        else if (initial)
@@ -2719,7 +2720,9 @@ static int __btrfs_add_free_space_zoned(struct btrfs_block_group *block_group,
                to_free = offset + size - block_group->alloc_offset;
        to_unusable = size - to_free;
 
+       spin_lock(&ctl->tree_lock);
        ctl->free_space += to_free;
+       spin_unlock(&ctl->tree_lock);
        /*
         * If the block group is read-only, we should account freed space into
         * bytes_readonly.
@@ -2728,11 +2731,8 @@ static int __btrfs_add_free_space_zoned(struct btrfs_block_group *block_group,
                block_group->zone_unusable += to_unusable;
                WARN_ON(block_group->zone_unusable > block_group->length);
        }
-       spin_unlock(&ctl->tree_lock);
        if (!used) {
-               spin_lock(&block_group->lock);
                block_group->alloc_offset -= size;
-               spin_unlock(&block_group->lock);
        }
 
        reclaimable_unusable = block_group->zone_unusable -
@@ -2746,6 +2746,8 @@ static int __btrfs_add_free_space_zoned(struct btrfs_block_group *block_group,
                btrfs_mark_bg_to_reclaim(block_group);
        }
 
+       spin_unlock(&block_group->lock);
+
        return 0;
 }