if (!entry->show)
return -EIO;
- mutex_lock(&q->sysfs_lock);
+ mutex_lock(&q->elevator_lock);
res = entry->show(hctx, page);
- mutex_unlock(&q->sysfs_lock);
+ mutex_unlock(&q->elevator_lock);
return res;
}
struct blk_mq_ctx *ctx;
struct blk_mq_tag_set *set = q->tag_set;
+ mutex_lock(&q->elevator_lock);
+
queue_for_each_hw_ctx(q, hctx, i) {
cpumask_clear(hctx->cpumask);
hctx->nr_ctx = 0;
hctx->next_cpu = blk_mq_first_mapped_cpu(hctx);
hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
}
+
+ mutex_unlock(&q->elevator_lock);
}
/*
struct list_head flush_list;
/*
- * Protects against I/O scheduler switching, particularly when
- * updating q->elevator. Since the elevator update code path may
- * also modify q->nr_requests and wbt latency, this lock also
- * protects the sysfs attributes nr_requests and wbt_lat_usec.
- * To ensure proper locking order during an elevator update, first
- * freeze the queue, then acquire ->elevator_lock.
+ * Protects against I/O scheduler switching, particularly when updating
+ * q->elevator. Since the elevator update code path may also modify q->
+ * nr_requests and wbt latency, this lock also protects the sysfs attrs
+ * nr_requests and wbt_lat_usec. Additionally the nr_hw_queues update
+ * may modify hctx tags, reserved-tags and cpumask, so this lock also
+ * helps protect the hctx attrs. To ensure proper locking order during
+ * an elevator or nr_hw_queue update, first freeze the queue, then
+ * acquire ->elevator_lock.
*/
struct mutex elevator_lock;