* If not tell the caller that it should skip this queue.
*/
ret = -EXDEV;
- data.hctx = xa_load(&q->hctx_table, hctx_idx);
+ data.hctx = q->queue_hw_ctx[hctx_idx];
if (!blk_mq_hw_queue_mapped(data.hctx))
goto out_queue_exit;
cpu = cpumask_first_and(data.hctx->cpumask, cpu_online_mask);
blk_free_flush_queue_callback);
hctx->fq = NULL;
- xa_erase(&q->hctx_table, hctx_idx);
-
spin_lock(&q->unused_hctx_lock);
list_add(&hctx->hctx_list, &q->unused_hctx_list);
spin_unlock(&q->unused_hctx_lock);
hctx->numa_node))
goto exit_hctx;
- if (xa_insert(&q->hctx_table, hctx_idx, hctx, GFP_KERNEL))
- goto exit_flush_rq;
-
return 0;
- exit_flush_rq:
- if (set->ops->exit_request)
- set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx);
exit_hctx:
if (set->ops->exit_hctx)
set->ops->exit_hctx(hctx, hctx_idx);
kobject_put(&hctx->kobj);
}
- xa_destroy(&q->hctx_table);
+ kfree(q->queue_hw_ctx);
/*
* release .mq_kobj and sw queue's kobject now because
static void __blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
struct request_queue *q)
{
- struct blk_mq_hw_ctx *hctx;
- unsigned long i, j;
+ int i, j, end;
+ struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx;
+
+ if (q->nr_hw_queues < set->nr_hw_queues) {
+ struct blk_mq_hw_ctx **new_hctxs;
+
+ new_hctxs = kcalloc_node(set->nr_hw_queues,
+ sizeof(*new_hctxs), GFP_KERNEL,
+ set->numa_node);
+ if (!new_hctxs)
+ return;
+ if (hctxs)
+ memcpy(new_hctxs, hctxs, q->nr_hw_queues *
+ sizeof(*hctxs));
+ q->queue_hw_ctx = new_hctxs;
+ kfree(hctxs);
+ hctxs = new_hctxs;
+ }
for (i = 0; i < set->nr_hw_queues; i++) {
int old_node;
int node = blk_mq_get_hctx_node(set, i);
- struct blk_mq_hw_ctx *old_hctx = xa_load(&q->hctx_table, i);
+ struct blk_mq_hw_ctx *old_hctx = hctxs[i];
if (old_hctx) {
old_node = old_hctx->numa_node;
blk_mq_exit_hctx(q, set, old_hctx, i);
}
- if (!blk_mq_alloc_and_init_hctx(set, q, i, node)) {
+ hctxs[i] = blk_mq_alloc_and_init_hctx(set, q, i, node);
+ if (!hctxs[i]) {
if (!old_hctx)
break;
pr_warn("Allocate new hctx on node %d fails, fallback to previous one on node %d\n",
node, old_node);
- hctx = blk_mq_alloc_and_init_hctx(set, q, i, old_node);
- WARN_ON_ONCE(!hctx);
+ hctxs[i] = blk_mq_alloc_and_init_hctx(set, q, i,
+ old_node);
+ WARN_ON_ONCE(!hctxs[i]);
}
}
/*
*/
if (i != set->nr_hw_queues) {
j = q->nr_hw_queues;
+ end = i;
} else {
j = i;
+ end = q->nr_hw_queues;
q->nr_hw_queues = set->nr_hw_queues;
}
- xa_for_each_start(&q->hctx_table, j, hctx, j)
- blk_mq_exit_hctx(q, set, hctx, j);
+ for (; j < end; j++) {
+ struct blk_mq_hw_ctx *hctx = hctxs[j];
+
+ if (hctx) {
+ blk_mq_exit_hctx(q, set, hctx, j);
+ hctxs[j] = NULL;
+ }
+ }
}
static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
INIT_LIST_HEAD(&q->unused_hctx_list);
spin_lock_init(&q->unused_hctx_lock);
- xa_init(&q->hctx_table);
-
blk_mq_realloc_hw_ctxs(set, q);
if (!q->nr_hw_queues)
goto err_hctxs;
{
if (!blk_mq_can_poll(q))
return 0;
- return blk_hctx_poll(q, xa_load(&q->hctx_table, cookie), iob, flags);
+ return blk_hctx_poll(q, q->queue_hw_ctx[cookie], iob, flags);
}
int blk_rq_poll(struct request *rq, struct io_comp_batch *iob,