{
ssize_t ret;
- mutex_lock(&disk->queue->sysfs_lock);
+ mutex_lock(&disk->queue->elevator_lock);
ret = queue_var_show(disk->queue->nr_requests, page);
- mutex_unlock(&disk->queue->sysfs_lock);
+ mutex_unlock(&disk->queue->elevator_lock);
return ret;
}
if (ret < 0)
return ret;
- mutex_lock(&q->sysfs_lock);
memflags = blk_mq_freeze_queue(q);
+ mutex_lock(&q->elevator_lock);
if (nr < BLKDEV_MIN_RQ)
nr = BLKDEV_MIN_RQ;
err = blk_mq_update_nr_requests(disk->queue, nr);
if (err)
ret = err;
+ mutex_unlock(&q->elevator_lock);
blk_mq_unfreeze_queue(q, memflags);
- mutex_unlock(&q->sysfs_lock);
return ret;
}
/*
* Attributes which are protected with q->sysfs_lock.
*/
- &queue_requests_entry.attr,
#ifdef CONFIG_BLK_WBT
&queue_wb_lat_entry.attr,
#endif
* q->sysfs_lock.
*/
&elv_iosched_entry.attr,
+ &queue_requests_entry.attr,
/*
* Attributes which don't require locking.
struct list_head flush_list;
/*
- * Protects against I/O scheduler switching, specifically when
- * updating q->elevator. To ensure proper locking order during
- * an elevator update, first freeze the queue, then acquire
- * ->elevator_lock.
+ * Protects against I/O scheduler switching, particularly when
+ * updating q->elevator. Since the elevator update code path may
+ * also modify q->nr_requests, this lock also protects the sysfs
+ * attribute nr_requests.
+ * To ensure proper locking order during an elevator update, first
+ * freeze the queue, then acquire ->elevator_lock.
*/
struct mutex elevator_lock;