]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
blk-mq: use array manage hctx map instead of xarray
authorFengnan Chang <fengnanchang@gmail.com>
Fri, 28 Nov 2025 08:53:13 +0000 (16:53 +0800)
committerJens Axboe <axboe@kernel.dk>
Fri, 28 Nov 2025 16:09:19 +0000 (09:09 -0700)
After commit 4e5cc99e1e48 ("blk-mq: manage hctx map via xarray"), we use
an xarray instead of array to store hctx, but in poll mode, each time
in blk_mq_poll, we need use xa_load to find corresponding hctx, this
introduce some costs. In my test, xa_load may cost 3.8% cpu.

This patch revert previous change, eliminates the overhead of xa_load
and can result in a 3% performance improvement.

Signed-off-by: Fengnan Chang <changfengnan@bytedance.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-mq-tag.c
block/blk-mq.c
block/blk-mq.h
include/linux/blk-mq.h
include/linux/blkdev.h

index 5b664dbdf655575d092dc0789be56f42ca3c8be5..33946cdb571648a0ebaf0d8796af32c7759084f1 100644 (file)
@@ -499,7 +499,7 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_tag_iter_fn *fn,
        int srcu_idx;
 
        /*
-        * __blk_mq_update_nr_hw_queues() updates nr_hw_queues and hctx_table
+        * __blk_mq_update_nr_hw_queues() updates nr_hw_queues and queue_hw_ctx
         * while the queue is frozen. So we can use q_usage_counter to avoid
         * racing with it.
         */
index f2650c97a75e235ecdb04e5110649457005e9458..1ef81110eb8acd092271dd7ffd22379ffa9663ce 100644 (file)
@@ -730,7 +730,7 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
         * If not tell the caller that it should skip this queue.
         */
        ret = -EXDEV;
-       data.hctx = xa_load(&q->hctx_table, hctx_idx);
+       data.hctx = q->queue_hw_ctx[hctx_idx];
        if (!blk_mq_hw_queue_mapped(data.hctx))
                goto out_queue_exit;
        cpu = cpumask_first_and(data.hctx->cpumask, cpu_online_mask);
@@ -3946,8 +3946,6 @@ static void blk_mq_exit_hctx(struct request_queue *q,
                        blk_free_flush_queue_callback);
        hctx->fq = NULL;
 
-       xa_erase(&q->hctx_table, hctx_idx);
-
        spin_lock(&q->unused_hctx_lock);
        list_add(&hctx->hctx_list, &q->unused_hctx_list);
        spin_unlock(&q->unused_hctx_lock);
@@ -3989,14 +3987,8 @@ static int blk_mq_init_hctx(struct request_queue *q,
                                hctx->numa_node))
                goto exit_hctx;
 
-       if (xa_insert(&q->hctx_table, hctx_idx, hctx, GFP_KERNEL))
-               goto exit_flush_rq;
-
        return 0;
 
- exit_flush_rq:
-       if (set->ops->exit_request)
-               set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx);
  exit_hctx:
        if (set->ops->exit_hctx)
                set->ops->exit_hctx(hctx, hctx_idx);
@@ -4385,7 +4377,7 @@ void blk_mq_release(struct request_queue *q)
                kobject_put(&hctx->kobj);
        }
 
-       xa_destroy(&q->hctx_table);
+       kfree(q->queue_hw_ctx);
 
        /*
         * release .mq_kobj and sw queue's kobject now because
@@ -4529,26 +4521,44 @@ static struct blk_mq_hw_ctx *blk_mq_alloc_and_init_hctx(
 static void __blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
                                     struct request_queue *q)
 {
-       struct blk_mq_hw_ctx *hctx;
-       unsigned long i, j;
+       int i, j, end;
+       struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx;
+
+       if (q->nr_hw_queues < set->nr_hw_queues) {
+               struct blk_mq_hw_ctx **new_hctxs;
+
+               new_hctxs = kcalloc_node(set->nr_hw_queues,
+                                      sizeof(*new_hctxs), GFP_KERNEL,
+                                      set->numa_node);
+               if (!new_hctxs)
+                       return;
+               if (hctxs)
+                       memcpy(new_hctxs, hctxs, q->nr_hw_queues *
+                              sizeof(*hctxs));
+               q->queue_hw_ctx = new_hctxs;
+               kfree(hctxs);
+               hctxs = new_hctxs;
+       }
 
        for (i = 0; i < set->nr_hw_queues; i++) {
                int old_node;
                int node = blk_mq_get_hctx_node(set, i);
-               struct blk_mq_hw_ctx *old_hctx = xa_load(&q->hctx_table, i);
+               struct blk_mq_hw_ctx *old_hctx = hctxs[i];
 
                if (old_hctx) {
                        old_node = old_hctx->numa_node;
                        blk_mq_exit_hctx(q, set, old_hctx, i);
                }
 
-               if (!blk_mq_alloc_and_init_hctx(set, q, i, node)) {
+               hctxs[i] = blk_mq_alloc_and_init_hctx(set, q, i, node);
+               if (!hctxs[i]) {
                        if (!old_hctx)
                                break;
                        pr_warn("Allocate new hctx on node %d fails, fallback to previous one on node %d\n",
                                        node, old_node);
-                       hctx = blk_mq_alloc_and_init_hctx(set, q, i, old_node);
-                       WARN_ON_ONCE(!hctx);
+                       hctxs[i] = blk_mq_alloc_and_init_hctx(set, q, i,
+                                       old_node);
+                       WARN_ON_ONCE(!hctxs[i]);
                }
        }
        /*
@@ -4557,13 +4567,21 @@ static void __blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
         */
        if (i != set->nr_hw_queues) {
                j = q->nr_hw_queues;
+               end = i;
        } else {
                j = i;
+               end = q->nr_hw_queues;
                q->nr_hw_queues = set->nr_hw_queues;
        }
 
-       xa_for_each_start(&q->hctx_table, j, hctx, j)
-               blk_mq_exit_hctx(q, set, hctx, j);
+       for (; j < end; j++) {
+               struct blk_mq_hw_ctx *hctx = hctxs[j];
+
+               if (hctx) {
+                       blk_mq_exit_hctx(q, set, hctx, j);
+                       hctxs[j] = NULL;
+               }
+       }
 }
 
 static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
@@ -4599,8 +4617,6 @@ int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
        INIT_LIST_HEAD(&q->unused_hctx_list);
        spin_lock_init(&q->unused_hctx_lock);
 
-       xa_init(&q->hctx_table);
-
        blk_mq_realloc_hw_ctxs(set, q);
        if (!q->nr_hw_queues)
                goto err_hctxs;
@@ -5187,7 +5203,7 @@ int blk_mq_poll(struct request_queue *q, blk_qc_t cookie,
 {
        if (!blk_mq_can_poll(q))
                return 0;
-       return blk_hctx_poll(q, xa_load(&q->hctx_table, cookie), iob, flags);
+       return blk_hctx_poll(q, q->queue_hw_ctx[cookie], iob, flags);
 }
 
 int blk_rq_poll(struct request *rq, struct io_comp_batch *iob,
index c4fccdeb54412978665f54ca44d8456095335009..80a3f0c2bce76e94901bb285eef95a8e0de08c83 100644 (file)
@@ -84,7 +84,7 @@ static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *
                                                          enum hctx_type type,
                                                          unsigned int cpu)
 {
-       return xa_load(&q->hctx_table, q->tag_set->map[type].mq_map[cpu]);
+       return q->queue_hw_ctx[q->tag_set->map[type].mq_map[cpu]];
 }
 
 static inline enum hctx_type blk_mq_get_hctx_type(blk_opf_t opf)
index b54506b3b76d9076b2a3fe7e8320dfe521902f66..9208ff90ae167882e0ceb0cbd0e109ea8d2bdc2e 100644 (file)
@@ -1016,7 +1016,8 @@ static inline void *blk_mq_rq_to_pdu(struct request *rq)
 }
 
 #define queue_for_each_hw_ctx(q, hctx, i)                              \
-       xa_for_each(&(q)->hctx_table, (i), (hctx))
+       for ((i) = 0; (i) < (q)->nr_hw_queues &&                        \
+            ({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++)
 
 #define hctx_for_each_ctx(hctx, ctx, i)                                        \
        for ((i) = 0; (i) < (hctx)->nr_ctx &&                           \
index cb4ba09959ee4d78d60cb5ec762215b1d1bf43d0..6195f89648dbce6c3d4d7e60b831591779877a36 100644 (file)
@@ -503,7 +503,7 @@ struct request_queue {
 
        /* hw dispatch queues */
        unsigned int            nr_hw_queues;
-       struct xarray           hctx_table;
+       struct blk_mq_hw_ctx    **queue_hw_ctx;
 
        struct percpu_ref       q_usage_counter;
        struct lock_class_key   io_lock_cls_key;