]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
btrfs: fix use-after-free warning in btrfs_get_or_create_delayed_node()
authorLeo Martins <loemra.dev@gmail.com>
Sat, 13 Dec 2025 01:26:26 +0000 (17:26 -0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 17 Jan 2026 15:35:18 +0000 (16:35 +0100)
[ Upstream commit 83f59076a1ae6f5c6845d6f7ed3a1a373d883684 ]

Previously, btrfs_get_or_create_delayed_node() set the delayed_node's
refcount before acquiring the root->delayed_nodes lock.
Commit e8513c012de7 ("btrfs: implement ref_tracker for delayed_nodes")
moved refcount_set inside the critical section, which means there is
no longer a memory barrier between setting the refcount and setting
btrfs_inode->delayed_node.

Without that barrier, the stores to node->refs and
btrfs_inode->delayed_node may become visible out of order. Another
thread can then read btrfs_inode->delayed_node and attempt to
increment a refcount that hasn't been set yet, leading to a
refcounting bug and a use-after-free warning.

The fix is to move refcount_set back to where it was to take
advantage of the implicit memory barrier provided by lock
acquisition.

Because the allocations now happen outside of the lock's critical
section, they can use GFP_NOFS instead of GFP_ATOMIC.

Reported-by: kernel test robot <oliver.sang@intel.com>
Closes: https://lore.kernel.org/oe-lkp/202511262228.6dda231e-lkp@intel.com
Fixes: e8513c012de7 ("btrfs: implement ref_tracker for delayed_nodes")
Tested-by: kernel test robot <oliver.sang@intel.com>
Reviewed-by: Filipe Manana <fdmanana@suse.com>
Signed-off-by: Leo Martins <loemra.dev@gmail.com>
Signed-off-by: David Sterba <dsterba@suse.com>
Signed-off-by: Sasha Levin <sashal@kernel.org>
fs/btrfs/delayed-inode.c

index 3df7b9d7fbe8d3854087c569b4d1e9309a517dd3..59b489d7e4b588748761b9d59ec3673c9834ba52 100644 (file)
@@ -152,37 +152,39 @@ again:
                return ERR_PTR(-ENOMEM);
        btrfs_init_delayed_node(node, root, ino);
 
+       /* Cached in the inode and can be accessed. */
+       refcount_set(&node->refs, 2);
+       btrfs_delayed_node_ref_tracker_alloc(node, tracker, GFP_NOFS);
+       btrfs_delayed_node_ref_tracker_alloc(node, &node->inode_cache_tracker, GFP_NOFS);
+
        /* Allocate and reserve the slot, from now it can return a NULL from xa_load(). */
        ret = xa_reserve(&root->delayed_nodes, ino, GFP_NOFS);
-       if (ret == -ENOMEM) {
-               btrfs_delayed_node_ref_tracker_dir_exit(node);
-               kmem_cache_free(delayed_node_cache, node);
-               return ERR_PTR(-ENOMEM);
-       }
+       if (ret == -ENOMEM)
+               goto cleanup;
+
        xa_lock(&root->delayed_nodes);
        ptr = xa_load(&root->delayed_nodes, ino);
        if (ptr) {
                /* Somebody inserted it, go back and read it. */
                xa_unlock(&root->delayed_nodes);
-               btrfs_delayed_node_ref_tracker_dir_exit(node);
-               kmem_cache_free(delayed_node_cache, node);
-               node = NULL;
-               goto again;
+               goto cleanup;
        }
        ptr = __xa_store(&root->delayed_nodes, ino, node, GFP_ATOMIC);
        ASSERT(xa_err(ptr) != -EINVAL);
        ASSERT(xa_err(ptr) != -ENOMEM);
        ASSERT(ptr == NULL);
-
-       /* Cached in the inode and can be accessed. */
-       refcount_set(&node->refs, 2);
-       btrfs_delayed_node_ref_tracker_alloc(node, tracker, GFP_ATOMIC);
-       btrfs_delayed_node_ref_tracker_alloc(node, &node->inode_cache_tracker, GFP_ATOMIC);
-
        btrfs_inode->delayed_node = node;
        xa_unlock(&root->delayed_nodes);
 
        return node;
+cleanup:
+       btrfs_delayed_node_ref_tracker_free(node, tracker);
+       btrfs_delayed_node_ref_tracker_free(node, &node->inode_cache_tracker);
+       btrfs_delayed_node_ref_tracker_dir_exit(node);
+       kmem_cache_free(delayed_node_cache, node);
+       if (ret)
+               return ERR_PTR(ret);
+       goto again;
 }
 
 /*