int ret, err;
unsigned int memflags;
struct request_queue *q = disk->queue;
+ struct blk_mq_tag_set *set = q->tag_set;
ret = queue_var_store(&nr, page, count);
if (ret < 0)
return ret;
- memflags = blk_mq_freeze_queue(q);
- mutex_lock(&q->elevator_lock);
+ /*
+ * Serialize updating nr_requests with concurrent queue_requests_store()
+ * and switching elevator.
+ */
+ down_write(&set->update_nr_hwq_lock);
if (nr == q->nr_requests)
goto unlock;
if (nr < BLKDEV_MIN_RQ)
nr = BLKDEV_MIN_RQ;
- if (nr <= q->tag_set->reserved_tags ||
+ /*
+ * Switching elevator is protected by update_nr_hwq_lock:
+ * - read lock is held from elevator sysfs attribute;
+ * - write lock is held from updating nr_hw_queues;
+ * Hence it's safe to access q->elevator here with write lock held.
+ */
+ if (nr <= set->reserved_tags ||
(q->elevator && nr > MAX_SCHED_RQ) ||
- (!q->elevator && nr > q->tag_set->queue_depth)) {
+ (!q->elevator && nr > set->queue_depth)) {
ret = -EINVAL;
goto unlock;
}
+ memflags = blk_mq_freeze_queue(q);
+ mutex_lock(&q->elevator_lock);
+
err = blk_mq_update_nr_requests(disk->queue, nr);
if (err)
ret = err;
-unlock:
mutex_unlock(&q->elevator_lock);
blk_mq_unfreeze_queue(q, memflags);
+
+unlock:
+ up_write(&set->update_nr_hwq_lock);
return ret;
}