]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
btrfs: qgroup: fix race between quota disable and quota rescan ioctl
authorFilipe Manana <fdmanana@suse.com>
Mon, 30 Jun 2025 12:19:20 +0000 (13:19 +0100)
committerDavid Sterba <dsterba@suse.com>
Mon, 21 Jul 2025 22:04:49 +0000 (00:04 +0200)
There's a race between a task disabling quotas and another running the
rescan ioctl that can result in a use-after-free of qgroup records from
the fs_info->qgroup_tree rbtree.

This happens as follows:

1) Task A enters btrfs_ioctl_quota_rescan() -> btrfs_qgroup_rescan();

2) Task B enters btrfs_quota_disable() and calls
   btrfs_qgroup_wait_for_completion(), which does nothing because at that
   point fs_info->qgroup_rescan_running is false (it wasn't set yet by
   task A);

3) Task B calls btrfs_free_qgroup_config() which starts freeing qgroups
   from fs_info->qgroup_tree without taking the lock fs_info->qgroup_lock;

4) Task A enters qgroup_rescan_zero_tracking() which starts iterating
   the fs_info->qgroup_tree tree while holding fs_info->qgroup_lock,
   but task B is freeing qgroup records from that tree without holding
   the lock, resulting in a use-after-free.

Fix this by taking fs_info->qgroup_lock at btrfs_free_qgroup_config().
Also at btrfs_qgroup_rescan() don't start the rescan worker if quotas
were already disabled.

Reported-by: cen zhang <zzzccc427@gmail.com>
Link: https://lore.kernel.org/linux-btrfs/CAFRLqsV+cMDETFuzqdKSHk_FDm6tneea45krsHqPD6B3FetLpQ@mail.gmail.com/
CC: stable@vger.kernel.org # 6.1+
Reviewed-by: Boris Burkov <boris@bur.io>
Reviewed-by: Qu Wenruo <wqu@suse.com>
Signed-off-by: Filipe Manana <fdmanana@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
fs/btrfs/qgroup.c

index b83d9534adaed3bffdea930a7f3357219b3c3dce..310ca2dd9f242f3be5769f7c7c348f83c87ede3e 100644 (file)
@@ -636,22 +636,30 @@ bool btrfs_check_quota_leak(const struct btrfs_fs_info *fs_info)
 
 /*
  * This is called from close_ctree() or open_ctree() or btrfs_quota_disable(),
- * first two are in single-threaded paths.And for the third one, we have set
- * quota_root to be null with qgroup_lock held before, so it is safe to clean
- * up the in-memory structures without qgroup_lock held.
+ * first two are in single-threaded paths.
  */
 void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info)
 {
        struct rb_node *n;
        struct btrfs_qgroup *qgroup;
 
+       /*
+        * btrfs_quota_disable() can be called concurrently with
+        * btrfs_qgroup_rescan() -> qgroup_rescan_zero_tracking(), so take the
+        * lock.
+        */
+       spin_lock(&fs_info->qgroup_lock);
        while ((n = rb_first(&fs_info->qgroup_tree))) {
                qgroup = rb_entry(n, struct btrfs_qgroup, node);
                rb_erase(n, &fs_info->qgroup_tree);
                __del_qgroup_rb(qgroup);
+               spin_unlock(&fs_info->qgroup_lock);
                btrfs_sysfs_del_one_qgroup(fs_info, qgroup);
                kfree(qgroup);
+               spin_lock(&fs_info->qgroup_lock);
        }
+       spin_unlock(&fs_info->qgroup_lock);
+
        /*
         * We call btrfs_free_qgroup_config() when unmounting
         * filesystem and disabling quota, so we set qgroup_ulist
@@ -4036,12 +4044,21 @@ btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info)
        qgroup_rescan_zero_tracking(fs_info);
 
        mutex_lock(&fs_info->qgroup_rescan_lock);
-       fs_info->qgroup_rescan_running = true;
-       btrfs_queue_work(fs_info->qgroup_rescan_workers,
-                        &fs_info->qgroup_rescan_work);
+       /*
+        * The rescan worker is only for full accounting qgroups, check if it's
+        * enabled as it is pointless to queue it otherwise. A concurrent quota
+        * disable may also have just cleared BTRFS_FS_QUOTA_ENABLED.
+        */
+       if (btrfs_qgroup_full_accounting(fs_info)) {
+               fs_info->qgroup_rescan_running = true;
+               btrfs_queue_work(fs_info->qgroup_rescan_workers,
+                                &fs_info->qgroup_rescan_work);
+       } else {
+               ret = -ENOTCONN;
+       }
        mutex_unlock(&fs_info->qgroup_rescan_lock);
 
-       return 0;
+       return ret;
 }
 
 int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info,