]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
blk-mq: factor out a helper blk_mq_limit_depth()
authorYu Kuai <yukuai@fnnas.com>
Tue, 3 Feb 2026 08:19:44 +0000 (16:19 +0800)
committerJens Axboe <axboe@kernel.dk>
Tue, 3 Feb 2026 14:45:36 +0000 (07:45 -0700)
There are no functional changes, just make code cleaner.

Signed-off-by: Yu Kuai <yukuai@fnnas.com>
Reviewed-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-mq.c

index cf1daedbb39fddee6d638ae9ce622aeb3b730f24..b7b272e856b81491026748b3160048f6c0e70183 100644 (file)
@@ -498,6 +498,42 @@ __blk_mq_alloc_requests_batch(struct blk_mq_alloc_data *data)
        return rq_list_pop(data->cached_rqs);
 }
 
+static void blk_mq_limit_depth(struct blk_mq_alloc_data *data)
+{
+       struct elevator_mq_ops *ops;
+
+       /* If no I/O scheduler has been configured, don't limit requests */
+       if (!data->q->elevator) {
+               blk_mq_tag_busy(data->hctx);
+               return;
+       }
+
+       /*
+        * All requests use scheduler tags when an I/O scheduler is
+        * enabled for the queue.
+        */
+       data->rq_flags |= RQF_SCHED_TAGS;
+
+       /*
+        * Flush/passthrough requests are special and go directly to the
+        * dispatch list, they are not subject to the async_depth limit.
+        */
+       if ((data->cmd_flags & REQ_OP_MASK) == REQ_OP_FLUSH ||
+           blk_op_is_passthrough(data->cmd_flags))
+               return;
+
+       WARN_ON_ONCE(data->flags & BLK_MQ_REQ_RESERVED);
+       data->rq_flags |= RQF_USE_SCHED;
+
+       /*
+        * By default, sync requests have no limit, and async requests are
+        * limited to async_depth.
+        */
+       ops = &data->q->elevator->type->ops;
+       if (ops->limit_depth)
+               ops->limit_depth(data->cmd_flags, data);
+}
+
 static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data)
 {
        struct request_queue *q = data->q;
@@ -516,31 +552,7 @@ retry:
        data->ctx = blk_mq_get_ctx(q);
        data->hctx = blk_mq_map_queue(data->cmd_flags, data->ctx);
 
-       if (q->elevator) {
-               /*
-                * All requests use scheduler tags when an I/O scheduler is
-                * enabled for the queue.
-                */
-               data->rq_flags |= RQF_SCHED_TAGS;
-
-               /*
-                * Flush/passthrough requests are special and go directly to the
-                * dispatch list.
-                */
-               if ((data->cmd_flags & REQ_OP_MASK) != REQ_OP_FLUSH &&
-                   !blk_op_is_passthrough(data->cmd_flags)) {
-                       struct elevator_mq_ops *ops = &q->elevator->type->ops;
-
-                       WARN_ON_ONCE(data->flags & BLK_MQ_REQ_RESERVED);
-
-                       data->rq_flags |= RQF_USE_SCHED;
-                       if (ops->limit_depth)
-                               ops->limit_depth(data->cmd_flags, data);
-               }
-       } else {
-               blk_mq_tag_busy(data->hctx);
-       }
-
+       blk_mq_limit_depth(data);
        if (data->flags & BLK_MQ_REQ_RESERVED)
                data->rq_flags |= RQF_RESV;