]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
blk-throttle: fix access race during throttle policy activation
authorHan Guangjiang <hanguangjiang@lixiang.com>
Fri, 5 Sep 2025 10:24:11 +0000 (18:24 +0800)
committerJens Axboe <axboe@kernel.dk>
Mon, 8 Sep 2025 14:24:44 +0000 (08:24 -0600)
On repeated cold boots we occasionally hit a NULL pointer crash in
blk_should_throtl() when throttling is consulted before the throttle
policy is fully enabled for the queue. Checking only q->td != NULL is
insufficient during early initialization, so blkg_to_pd() for the
throttle policy can still return NULL and blkg_to_tg() becomes NULL,
which later gets dereferenced.

 Unable to handle kernel NULL pointer dereference
 at virtual address 0000000000000156
 ...
 pc : submit_bio_noacct+0x14c/0x4c8
 lr : submit_bio_noacct+0x48/0x4c8
 sp : ffff800087f0b690
 x29: ffff800087f0b690 x28: 0000000000005f90 x27: ffff00068af393c0
 x26: 0000000000080000 x25: 000000000002fbc0 x24: ffff000684ddcc70
 x23: 0000000000000000 x22: 0000000000000000 x21: 0000000000000000
 x20: 0000000000080000 x19: ffff000684ddcd08 x18: ffffffffffffffff
 x17: 0000000000000000 x16: ffff80008132a550 x15: 0000ffff98020fff
 x14: 0000000000000000 x13: 1fffe000d11d7021 x12: ffff000688eb810c
 x11: ffff00077ec4bb80 x10: ffff000688dcb720 x9 : ffff80008068ef60
 x8 : 00000a6fb8a86e85 x7 : 000000000000111e x6 : 0000000000000002
 x5 : 0000000000000246 x4 : 0000000000015cff x3 : 0000000000394500
 x2 : ffff000682e35e40 x1 : 0000000000364940 x0 : 000000000000001a
 Call trace:
  submit_bio_noacct+0x14c/0x4c8
  verity_map+0x178/0x2c8
  __map_bio+0x228/0x250
  dm_submit_bio+0x1c4/0x678
  __submit_bio+0x170/0x230
  submit_bio_noacct_nocheck+0x16c/0x388
  submit_bio_noacct+0x16c/0x4c8
  submit_bio+0xb4/0x210
  f2fs_submit_read_bio+0x4c/0xf0
  f2fs_mpage_readpages+0x3b0/0x5f0
  f2fs_readahead+0x90/0xe8

Tighten blk_throtl_activated() to also require that the throttle policy
bit is set on the queue:

  return q->td != NULL &&
         test_bit(blkcg_policy_throtl.plid, q->blkcg_pols);

This prevents blk_should_throtl() from accessing throttle group state
until policy data has been attached to blkgs.

Fixes: a3166c51702b ("blk-throttle: delay initialization until configuration")
Co-developed-by: Liang Jie <liangjie@lixiang.com>
Signed-off-by: Liang Jie <liangjie@lixiang.com>
Signed-off-by: Han Guangjiang <hanguangjiang@lixiang.com>
Reviewed-by: Yu Kuai <yukuai3@huawei.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-cgroup.c
block/blk-cgroup.h
block/blk-throttle.c
block/blk-throttle.h

index fe9ebd6a2e14d1f2414acd48f28549c16717348a..7246fc2563152cfc6179f238bf5a3688661a38d6 100644 (file)
@@ -110,12 +110,6 @@ static struct cgroup_subsys_state *blkcg_css(void)
        return task_css(current, io_cgrp_id);
 }
 
-static bool blkcg_policy_enabled(struct request_queue *q,
-                                const struct blkcg_policy *pol)
-{
-       return pol && test_bit(pol->plid, q->blkcg_pols);
-}
-
 static void blkg_free_workfn(struct work_struct *work)
 {
        struct blkcg_gq *blkg = container_of(work, struct blkcg_gq,
index 81868ad86330cf01b64d50f1ea89a2b3678202d0..83367086cb6ae3fd6599a46b7ee08715866accc3 100644 (file)
@@ -459,6 +459,12 @@ static inline bool blk_cgroup_mergeable(struct request *rq, struct bio *bio)
                bio_issue_as_root_blkg(rq->bio) == bio_issue_as_root_blkg(bio);
 }
 
+static inline bool blkcg_policy_enabled(struct request_queue *q,
+                               const struct blkcg_policy *pol)
+{
+       return pol && test_bit(pol->plid, q->blkcg_pols);
+}
+
 void blk_cgroup_bio_start(struct bio *bio);
 void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta);
 #else  /* CONFIG_BLK_CGROUP */
index 397b6a410f9e505a3b97448b9275e539e863a67e..cfa1cd60d2c5fa370d56bed1b58e61fe918588af 100644 (file)
@@ -1327,17 +1327,13 @@ static int blk_throtl_init(struct gendisk *disk)
        INIT_WORK(&td->dispatch_work, blk_throtl_dispatch_work_fn);
        throtl_service_queue_init(&td->service_queue);
 
-       /*
-        * Freeze queue before activating policy, to synchronize with IO path,
-        * which is protected by 'q_usage_counter'.
-        */
        memflags = blk_mq_freeze_queue(disk->queue);
        blk_mq_quiesce_queue(disk->queue);
 
        q->td = td;
        td->queue = q;
 
-       /* activate policy */
+       /* activate policy, blk_throtl_activated() will return true */
        ret = blkcg_activate_policy(disk, &blkcg_policy_throtl);
        if (ret) {
                q->td = NULL;
index 3b27755bfbff1d39d501d85c64fe0d324da896b0..9d7a42c039a15e34d1c11421a142294ec501c189 100644 (file)
@@ -156,7 +156,13 @@ void blk_throtl_cancel_bios(struct gendisk *disk);
 
 static inline bool blk_throtl_activated(struct request_queue *q)
 {
-       return q->td != NULL;
+       /*
+        * q->td guarantees that the blk-throttle module is already loaded,
+        * and the plid of blk-throttle is assigned.
+        * blkcg_policy_enabled() guarantees that the policy is activated
+        * in the request_queue.
+        */
+       return q->td != NULL && blkcg_policy_enabled(q, &blkcg_policy_throtl);
 }
 
 static inline bool blk_should_throtl(struct bio *bio)
@@ -164,11 +170,6 @@ static inline bool blk_should_throtl(struct bio *bio)
        struct throtl_grp *tg;
        int rw = bio_data_dir(bio);
 
-       /*
-        * This is called under bio_queue_enter(), and it's synchronized with
-        * the activation of blk-throtl, which is protected by
-        * blk_mq_freeze_queue().
-        */
        if (!blk_throtl_activated(bio->bi_bdev->bd_queue))
                return false;
 
@@ -194,7 +195,10 @@ static inline bool blk_should_throtl(struct bio *bio)
 
 static inline bool blk_throtl_bio(struct bio *bio)
 {
-
+       /*
+        * block throttling takes effect if the policy is activated
+        * in the bio's request_queue.
+        */
        if (!blk_should_throtl(bio))
                return false;