return NULL;
}
-static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
- struct request_queue *q)
+static void __blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
+ struct request_queue *q)
{
struct blk_mq_hw_ctx *hctx;
unsigned long i, j;
- /* protect against switching io scheduler */
- mutex_lock(&q->elevator_lock);
for (i = 0; i < set->nr_hw_queues; i++) {
int old_node;
int node = blk_mq_get_hctx_node(set, i);
xa_for_each_start(&q->hctx_table, j, hctx, j)
blk_mq_exit_hctx(q, set, hctx, j);
- mutex_unlock(&q->elevator_lock);
+}
+
+static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
+ struct request_queue *q, bool lock)
+{
+ if (lock) {
+ /* protect against switching io scheduler */
+ mutex_lock(&q->elevator_lock);
+ __blk_mq_realloc_hw_ctxs(set, q);
+ mutex_unlock(&q->elevator_lock);
+ } else {
+ __blk_mq_realloc_hw_ctxs(set, q);
+ }
/* unregister cpuhp callbacks for exited hctxs */
blk_mq_remove_hw_queues_cpuhp(q);
xa_init(&q->hctx_table);
- blk_mq_realloc_hw_ctxs(set, q);
+ blk_mq_realloc_hw_ctxs(set, q, false);
if (!q->nr_hw_queues)
goto err_hctxs;
fallback:
blk_mq_update_queue_map(set);
list_for_each_entry(q, &set->tag_list, tag_set_list) {
- blk_mq_realloc_hw_ctxs(set, q);
+ blk_mq_realloc_hw_ctxs(set, q, true);
if (q->nr_hw_queues != set->nr_hw_queues) {
int i = prev_nr_hw_queues;