]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
block: don't acquire ->elevator_lock in blk_mq_map_swqueue and blk_mq_realloc_hw_ctxs
authorMing Lei <ming.lei@redhat.com>
Mon, 5 May 2025 14:18:01 +0000 (22:18 +0800)
committerJens Axboe <axboe@kernel.dk>
Tue, 6 May 2025 13:43:43 +0000 (07:43 -0600)
Both blk_mq_map_swqueue() and blk_mq_realloc_hw_ctxs() are called before
the request queue is added to tagset list, so the two won't run concurrently
with blk_mq_update_nr_hw_queues().

When the two functions are only called from queue initialization or
blk_mq_update_nr_hw_queues(), elevator switch can't happen.

So remove ->elevator_lock uses from the two functions.

Reviewed-by: Hannes Reinecke <hare@suse.de>
Reviewed-by: Nilay Shroff <nilay@linux.ibm.com>
Signed-off-by: Ming Lei <ming.lei@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/20250505141805.2751237-24-ming.lei@redhat.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-mq.c

index a4ab7779e0204ca8374c802662b48d2fda135de7..38d0a41ff1ae9852b14497623e96e464973db4e3 100644 (file)
@@ -4156,8 +4156,6 @@ static void blk_mq_map_swqueue(struct request_queue *q)
        struct blk_mq_ctx *ctx;
        struct blk_mq_tag_set *set = q->tag_set;
 
-       mutex_lock(&q->elevator_lock);
-
        queue_for_each_hw_ctx(q, hctx, i) {
                cpumask_clear(hctx->cpumask);
                hctx->nr_ctx = 0;
@@ -4262,8 +4260,6 @@ static void blk_mq_map_swqueue(struct request_queue *q)
                hctx->next_cpu = blk_mq_first_mapped_cpu(hctx);
                hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
        }
-
-       mutex_unlock(&q->elevator_lock);
 }
 
 /*
@@ -4567,16 +4563,9 @@ static void __blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
 }
 
 static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
-                                  struct request_queue *q, bool lock)
+                                  struct request_queue *q)
 {
-       if (lock) {
-               /* protect against switching io scheduler  */
-               mutex_lock(&q->elevator_lock);
-               __blk_mq_realloc_hw_ctxs(set, q);
-               mutex_unlock(&q->elevator_lock);
-       } else {
-               __blk_mq_realloc_hw_ctxs(set, q);
-       }
+       __blk_mq_realloc_hw_ctxs(set, q);
 
        /* unregister cpuhp callbacks for exited hctxs */
        blk_mq_remove_hw_queues_cpuhp(q);
@@ -4608,7 +4597,7 @@ int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
 
        xa_init(&q->hctx_table);
 
-       blk_mq_realloc_hw_ctxs(set, q, false);
+       blk_mq_realloc_hw_ctxs(set, q);
        if (!q->nr_hw_queues)
                goto err_hctxs;
 
@@ -5019,7 +5008,7 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
 fallback:
        blk_mq_update_queue_map(set);
        list_for_each_entry(q, &set->tag_list, tag_set_list) {
-               blk_mq_realloc_hw_ctxs(set, q, true);
+               blk_mq_realloc_hw_ctxs(set, q);
 
                if (q->nr_hw_queues != set->nr_hw_queues) {
                        int i = prev_nr_hw_queues;