]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
blk-mq: add a new queue sysfs attribute async_depth
authorYu Kuai <yukuai@fnnas.com>
Tue, 3 Feb 2026 08:19:45 +0000 (16:19 +0800)
committerJens Axboe <axboe@kernel.dk>
Tue, 3 Feb 2026 14:45:36 +0000 (07:45 -0700)
Add a new field async_depth to request_queue and related APIs, this is
currently not used, following patches will convert elevators to use
this instead of internal async_depth.

Signed-off-by: Yu Kuai <yukuai@fnnas.com>
Reviewed-by: Nilay Shroff <nilay@linux.ibm.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-core.c
block/blk-mq.c
block/blk-sysfs.c
block/elevator.c
include/linux/blkdev.h

index d6732dc69dd9db0a26fa7bad5712627d704d0e1f..474700ffaa1c895cb28f5fb0ec9e6b1f5c601b74 100644 (file)
@@ -463,6 +463,7 @@ struct request_queue *blk_alloc_queue(struct queue_limits *lim, int node_id)
        fs_reclaim_release(GFP_KERNEL);
 
        q->nr_requests = BLKDEV_DEFAULT_RQ;
+       q->async_depth = BLKDEV_DEFAULT_RQ;
 
        return q;
 
index b7b272e856b81491026748b3160048f6c0e70183..0ad3dd3329db71132bb92ff80e9092819e78cd7c 100644 (file)
@@ -4662,6 +4662,7 @@ int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
        spin_lock_init(&q->requeue_lock);
 
        q->nr_requests = set->queue_depth;
+       q->async_depth = set->queue_depth;
 
        blk_mq_init_cpu_queues(q, set->nr_hw_queues);
        blk_mq_map_swqueue(q);
@@ -5028,6 +5029,11 @@ struct elevator_tags *blk_mq_update_nr_requests(struct request_queue *q,
                q->elevator->et = et;
        }
 
+       /*
+        * Preserve relative value, both nr and async_depth are at most 16 bit
+        * value, no need to worry about overflow.
+        */
+       q->async_depth = max(q->async_depth * nr / q->nr_requests, 1);
        q->nr_requests = nr;
        if (q->elevator && q->elevator->type->ops.depth_updated)
                q->elevator->type->ops.depth_updated(q);
index a580688c3ad51ebb6209182fe630d2fb3a15a46e..003aa684e8543c7a4da93989da1f3d2f12706c84 100644 (file)
@@ -127,6 +127,46 @@ unlock:
        return ret;
 }
 
+static ssize_t queue_async_depth_show(struct gendisk *disk, char *page)
+{
+       guard(mutex)(&disk->queue->elevator_lock);
+
+       return queue_var_show(disk->queue->async_depth, page);
+}
+
+static ssize_t
+queue_async_depth_store(struct gendisk *disk, const char *page, size_t count)
+{
+       struct request_queue *q = disk->queue;
+       unsigned int memflags;
+       unsigned long nr;
+       int ret;
+
+       if (!queue_is_mq(q))
+               return -EINVAL;
+
+       ret = queue_var_store(&nr, page, count);
+       if (ret < 0)
+               return ret;
+
+       if (nr == 0)
+               return -EINVAL;
+
+       memflags = blk_mq_freeze_queue(q);
+       scoped_guard(mutex, &q->elevator_lock) {
+               if (q->elevator) {
+                       q->async_depth = min(q->nr_requests, nr);
+                       if (q->elevator->type->ops.depth_updated)
+                               q->elevator->type->ops.depth_updated(q);
+               } else {
+                       ret = -EINVAL;
+               }
+       }
+       blk_mq_unfreeze_queue(q, memflags);
+
+       return ret;
+}
+
 static ssize_t queue_ra_show(struct gendisk *disk, char *page)
 {
        ssize_t ret;
@@ -532,6 +572,7 @@ static struct queue_sysfs_entry _prefix##_entry = { \
 }
 
 QUEUE_RW_ENTRY(queue_requests, "nr_requests");
+QUEUE_RW_ENTRY(queue_async_depth, "async_depth");
 QUEUE_RW_ENTRY(queue_ra, "read_ahead_kb");
 QUEUE_LIM_RW_ENTRY(queue_max_sectors, "max_sectors_kb");
 QUEUE_LIM_RO_ENTRY(queue_max_hw_sectors, "max_hw_sectors_kb");
@@ -719,6 +760,7 @@ static struct attribute *blk_mq_queue_attrs[] = {
         */
        &elv_iosched_entry.attr,
        &queue_requests_entry.attr,
+       &queue_async_depth_entry.attr,
 #ifdef CONFIG_BLK_WBT
        &queue_wb_lat_entry.attr,
 #endif
index a2f8b2251dc6e65d4472ee714c5305dc58985b37..ebe2a1fcf011f9f1e573f73062b40ee24c4df96a 100644 (file)
@@ -589,6 +589,7 @@ static int elevator_switch(struct request_queue *q, struct elv_change_ctx *ctx)
                blk_queue_flag_clear(QUEUE_FLAG_SQ_SCHED, q);
                q->elevator = NULL;
                q->nr_requests = q->tag_set->queue_depth;
+               q->async_depth = q->tag_set->queue_depth;
        }
        blk_add_trace_msg(q, "elv switch: %s", ctx->name);
 
index 67d8d9e03abc9b46cb8380bd0da13f27ce0cf705..99ef8cd7673c2b60e9f5bc9a8e531d1e1e8057b9 100644 (file)
@@ -551,6 +551,7 @@ struct request_queue {
         * queue settings
         */
        unsigned int            nr_requests;    /* Max # of requests */
+       unsigned int            async_depth;    /* Max # of async requests */
 
 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
        struct blk_crypto_profile *crypto_profile;