}
void blk_mq_free_sched_res(struct elevator_resources *res,
+ struct elevator_type *type,
struct blk_mq_tag_set *set)
{
if (res->et) {
blk_mq_free_sched_tags(res->et, set);
res->et = NULL;
}
+ if (res->data) {
+ blk_mq_free_sched_data(type, res->data);
+ res->data = NULL;
+ }
}
void blk_mq_free_sched_res_batch(struct xarray *elv_tbl,
WARN_ON_ONCE(1);
continue;
}
- blk_mq_free_sched_res(&ctx->res, set);
+ blk_mq_free_sched_res(&ctx->res, ctx->type, set);
}
}
}
}
int blk_mq_alloc_sched_res(struct request_queue *q,
- struct elevator_resources *res, unsigned int nr_hw_queues)
+ struct elevator_type *type,
+ struct elevator_resources *res,
+ unsigned int nr_hw_queues)
{
struct blk_mq_tag_set *set = q->tag_set;
if (!res->et)
return -ENOMEM;
+ res->data = blk_mq_alloc_sched_data(q, type);
+ if (IS_ERR(res->data)) {
+ blk_mq_free_sched_tags(res->et, set);
+ return -ENOMEM;
+ }
+
return 0;
}
goto out_unwind;
}
- ret = blk_mq_alloc_sched_res(q, &ctx->res,
- nr_hw_queues);
+ ret = blk_mq_alloc_sched_res(q, q->elevator->type,
+ &ctx->res, nr_hw_queues);
if (ret)
goto out_unwind;
}
}
return 0;
+
out_unwind:
list_for_each_entry_continue_reverse(q, &set->tag_list, tag_set_list) {
if (q->elevator) {
ctx = xa_load(elv_tbl, q->id);
if (ctx)
- blk_mq_free_sched_res(&ctx->res, set);
+ blk_mq_free_sched_res(&ctx->res,
+ ctx->type, set);
}
}
return ret;
unsigned long i;
int ret;
- eq = elevator_alloc(q, e, et);
+ eq = elevator_alloc(q, e, res);
if (!eq)
return -ENOMEM;
struct elevator_tags *blk_mq_alloc_sched_tags(struct blk_mq_tag_set *set,
unsigned int nr_hw_queues, unsigned int nr_requests);
int blk_mq_alloc_sched_res(struct request_queue *q,
- struct elevator_resources *res, unsigned int nr_hw_queues);
+ struct elevator_type *type,
+ struct elevator_resources *res,
+ unsigned int nr_hw_queues);
int blk_mq_alloc_sched_res_batch(struct xarray *elv_tbl,
struct blk_mq_tag_set *set, unsigned int nr_hw_queues);
int blk_mq_alloc_sched_ctx_batch(struct xarray *elv_tbl,
void blk_mq_free_sched_tags(struct elevator_tags *et,
struct blk_mq_tag_set *set);
void blk_mq_free_sched_res(struct elevator_resources *res,
+ struct elevator_type *type,
struct blk_mq_tag_set *set);
void blk_mq_free_sched_res_batch(struct xarray *et_table,
struct blk_mq_tag_set *set);
static const struct kobj_type elv_ktype;
struct elevator_queue *elevator_alloc(struct request_queue *q,
- struct elevator_type *e, struct elevator_tags *et)
+ struct elevator_type *e, struct elevator_resources *res)
{
struct elevator_queue *eq;
kobject_init(&eq->kobj, &elv_ktype);
mutex_init(&eq->sysfs_lock);
hash_init(eq->hash);
- eq->et = et;
+ eq->et = res->et;
+ eq->elevator_data = res->data;
return eq;
}
mutex_unlock(&q->elevator_lock);
blk_mq_unfreeze_queue(q, memflags);
if (e) {
- blk_mq_free_sched_res(&ctx->res, q->tag_set);
+ blk_mq_free_sched_res(&ctx->res, ctx->type, q->tag_set);
kobject_put(&e->kobj);
}
}
int ret = 0;
if (ctx->old) {
- struct elevator_resources res = {.et = ctx->old->et};
+ struct elevator_resources res = {
+ .et = ctx->old->et,
+ .data = ctx->old->elevator_data
+ };
bool enable_wbt = test_bit(ELEVATOR_FLAG_ENABLE_WBT_ON_EXIT,
&ctx->old->flags);
elv_unregister_queue(q, ctx->old);
- blk_mq_free_sched_res(&res, q->tag_set);
+ blk_mq_free_sched_res(&res, ctx->old->type, q->tag_set);
kobject_put(&ctx->old->kobj);
if (enable_wbt)
wbt_enable_default(q->disk);
lockdep_assert_held(&set->update_nr_hwq_lock);
if (strncmp(ctx->name, "none", 4)) {
- ret = blk_mq_alloc_sched_res(q, &ctx->res, set->nr_hw_queues);
+ ret = blk_mq_alloc_sched_res(q, ctx->type, &ctx->res,
+ set->nr_hw_queues);
if (ret)
return ret;
}
blk_mq_unfreeze_queue(q, memflags);
if (!ret)
ret = elevator_change_done(q, ctx);
+
/*
* Free sched resource if it's allocated but we couldn't switch elevator.
*/
if (!ctx->new)
- blk_mq_free_sched_res(&ctx->res, set);
+ blk_mq_free_sched_res(&ctx->res, ctx->type, set);
return ret;
}
blk_mq_unfreeze_queue_nomemrestore(q);
if (!ret)
WARN_ON_ONCE(elevator_change_done(q, ctx));
+
/*
* Free sched resource if it's allocated but we couldn't switch elevator.
*/
if (!ctx->new)
- blk_mq_free_sched_res(&ctx->res, set);
+ blk_mq_free_sched_res(&ctx->res, ctx->type, set);
}
/*
.no_uevent = true,
};
int err;
- struct elevator_type *e;
/* now we allow to switch elevator */
blk_queue_flag_clear(QUEUE_FLAG_NO_ELV_SWITCH, q);
* have multiple queues or mq-deadline is not available, default
* to "none".
*/
- e = elevator_find_get(ctx.name);
- if (!e)
+ ctx.type = elevator_find_get(ctx.name);
+ if (!ctx.type)
return;
if ((q->nr_hw_queues == 1 ||
pr_warn("\"%s\" elevator initialization, failed %d, falling back to \"none\"\n",
ctx.name, err);
}
- elevator_put(e);
+ elevator_put(ctx.type);
}
void elevator_set_none(struct request_queue *q)
ctx.name = strstrip(elevator_name);
elv_iosched_load_module(ctx.name);
+ ctx.type = elevator_find_get(ctx.name);
down_read(&set->update_nr_hwq_lock);
if (!blk_queue_no_elv_switch(q)) {
ret = -ENOENT;
}
up_read(&set->update_nr_hwq_lock);
+
+ if (ctx.type)
+ elevator_put(ctx.type);
return ret;
}
};
struct elevator_resources {
+ /* holds elevator data */
+ void *data;
/* holds elevator tags */
struct elevator_tags *et;
};
extern bool elv_bio_merge_ok(struct request *, struct bio *);
struct elevator_queue *elevator_alloc(struct request_queue *,
- struct elevator_type *, struct elevator_tags *);
+ struct elevator_type *, struct elevator_resources *);
/*
* Helper functions.