}
int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
- struct blk_mq_tags **tagsptr, unsigned int tdepth,
- bool can_grow)
+ struct blk_mq_tags **tagsptr, unsigned int tdepth)
{
struct blk_mq_tags *tags = *tagsptr;
- if (tdepth <= tags->nr_reserved_tags)
- return -EINVAL;
-
/*
* If we are allowed to grow beyond the original size, allocate
* a new set of tags before freeing the old one.
struct blk_mq_tag_set *set = hctx->queue->tag_set;
struct blk_mq_tags *new;
- if (!can_grow)
- return -EINVAL;
-
- /*
- * We need some sort of upper limit, set it high enough that
- * no valid use cases should require more.
- */
- if (tdepth > MAX_SCHED_RQ)
- return -EINVAL;
-
/*
* Only the sbitmap needs resizing since we allocated the max
* initially.
int ret = 0;
unsigned long i;
- if (q->nr_requests == nr)
- return 0;
-
blk_mq_quiesce_queue(q);
queue_for_each_hw_ctx(q, hctx, i) {
*/
if (hctx->sched_tags) {
ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags,
- nr, true);
+ nr);
} else {
- ret = blk_mq_tag_update_depth(hctx, &hctx->tags, nr,
- false);
+ ret = blk_mq_tag_update_depth(hctx, &hctx->tags, nr);
}
if (ret)
goto out;
unsigned int tag);
void blk_mq_put_tags(struct blk_mq_tags *tags, int *tag_array, int nr_tags);
int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
- struct blk_mq_tags **tags, unsigned int depth, bool can_grow);
+ struct blk_mq_tags **tags, unsigned int depth);
void blk_mq_tag_resize_shared_tags(struct blk_mq_tag_set *set,
unsigned int size);
void blk_mq_tag_update_sched_shared_tags(struct request_queue *q);
memflags = blk_mq_freeze_queue(q);
mutex_lock(&q->elevator_lock);
+
+ if (nr == q->nr_requests)
+ goto unlock;
+
if (nr < BLKDEV_MIN_RQ)
nr = BLKDEV_MIN_RQ;
+ if (nr <= q->tag_set->reserved_tags ||
+ (q->elevator && nr > MAX_SCHED_RQ) ||
+ (!q->elevator && nr > q->tag_set->queue_depth)) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
err = blk_mq_update_nr_requests(disk->queue, nr);
if (err)
ret = err;
+
+unlock:
mutex_unlock(&q->elevator_lock);
blk_mq_unfreeze_queue(q, memflags);
return ret;