1 // SPDX-License-Identifier: GPL-2.0
3 * Block device elevator/IO-scheduler.
5 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
7 * 30042000 Jens Axboe <axboe@kernel.dk> :
9 * Split the elevator a bit so that it is possible to choose a different
10 * one or even write a new "plug in". There are three pieces:
11 * - elevator_fn, inserts a new request in the queue list
12 * - elevator_merge_fn, decides whether a new buffer can be merged with
14 * - elevator_dequeue_fn, called when a request is taken off the active list
16 * 20082000 Dave Jones <davej@suse.de> :
17 * Removed tests for max-bomb-segments, which was breaking elvtune
18 * when run without -bN
21 * - Rework again to work with bio instead of buffer_heads
22 * - loose bi_dev comparisons, partition handling is right now
23 * - completely modularize elevator setup and teardown
26 #include <linux/kernel.h>
28 #include <linux/blkdev.h>
29 #include <linux/bio.h>
30 #include <linux/module.h>
31 #include <linux/slab.h>
32 #include <linux/init.h>
33 #include <linux/compiler.h>
34 #include <linux/blktrace_api.h>
35 #include <linux/hash.h>
36 #include <linux/uaccess.h>
37 #include <linux/pm_runtime.h>
39 #include <trace/events/block.h>
43 #include "blk-mq-sched.h"
46 #include "blk-cgroup.h"
48 static DEFINE_SPINLOCK(elv_list_lock
);
49 static LIST_HEAD(elv_list
);
54 #define rq_hash_key(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq))
57 * Query io scheduler to see if the current process issuing bio may be
60 static bool elv_iosched_allow_bio_merge(struct request
*rq
, struct bio
*bio
)
62 struct request_queue
*q
= rq
->q
;
63 struct elevator_queue
*e
= q
->elevator
;
65 if (e
->type
->ops
.allow_merge
)
66 return e
->type
->ops
.allow_merge(q
, rq
, bio
);
72 * can we safely merge with this request?
74 bool elv_bio_merge_ok(struct request
*rq
, struct bio
*bio
)
76 if (!blk_rq_merge_ok(rq
, bio
))
79 if (!elv_iosched_allow_bio_merge(rq
, bio
))
84 EXPORT_SYMBOL(elv_bio_merge_ok
);
86 static inline bool elv_support_features(struct request_queue
*q
,
87 const struct elevator_type
*e
)
89 return (q
->required_elevator_features
& e
->elevator_features
) ==
90 q
->required_elevator_features
;
94 * elevator_match - Check whether @e's name or alias matches @name
95 * @e: Scheduler to test
96 * @name: Elevator name to test
98 * Return true if the elevator @e's name or alias matches @name.
100 static bool elevator_match(const struct elevator_type
*e
, const char *name
)
102 return !strcmp(e
->elevator_name
, name
) ||
103 (e
->elevator_alias
&& !strcmp(e
->elevator_alias
, name
));
106 static struct elevator_type
*__elevator_find(const char *name
)
108 struct elevator_type
*e
;
110 list_for_each_entry(e
, &elv_list
, list
)
111 if (elevator_match(e
, name
))
116 static struct elevator_type
*elevator_find_get(struct request_queue
*q
,
119 struct elevator_type
*e
;
121 spin_lock(&elv_list_lock
);
122 e
= __elevator_find(name
);
123 if (e
&& (!elv_support_features(q
, e
) || !elevator_tryget(e
)))
125 spin_unlock(&elv_list_lock
);
129 static const struct kobj_type elv_ktype
;
131 struct elevator_queue
*elevator_alloc(struct request_queue
*q
,
132 struct elevator_type
*e
)
134 struct elevator_queue
*eq
;
136 eq
= kzalloc_node(sizeof(*eq
), GFP_KERNEL
, q
->node
);
142 kobject_init(&eq
->kobj
, &elv_ktype
);
143 mutex_init(&eq
->sysfs_lock
);
148 EXPORT_SYMBOL(elevator_alloc
);
150 static void elevator_release(struct kobject
*kobj
)
152 struct elevator_queue
*e
;
154 e
= container_of(kobj
, struct elevator_queue
, kobj
);
155 elevator_put(e
->type
);
159 void elevator_exit(struct request_queue
*q
)
161 struct elevator_queue
*e
= q
->elevator
;
164 blk_mq_sched_free_rqs(q
);
166 mutex_lock(&e
->sysfs_lock
);
167 blk_mq_exit_sched(q
, e
);
168 mutex_unlock(&e
->sysfs_lock
);
170 kobject_put(&e
->kobj
);
173 static inline void __elv_rqhash_del(struct request
*rq
)
176 rq
->rq_flags
&= ~RQF_HASHED
;
179 void elv_rqhash_del(struct request_queue
*q
, struct request
*rq
)
182 __elv_rqhash_del(rq
);
184 EXPORT_SYMBOL_GPL(elv_rqhash_del
);
186 void elv_rqhash_add(struct request_queue
*q
, struct request
*rq
)
188 struct elevator_queue
*e
= q
->elevator
;
190 BUG_ON(ELV_ON_HASH(rq
));
191 hash_add(e
->hash
, &rq
->hash
, rq_hash_key(rq
));
192 rq
->rq_flags
|= RQF_HASHED
;
194 EXPORT_SYMBOL_GPL(elv_rqhash_add
);
196 void elv_rqhash_reposition(struct request_queue
*q
, struct request
*rq
)
198 __elv_rqhash_del(rq
);
199 elv_rqhash_add(q
, rq
);
202 struct request
*elv_rqhash_find(struct request_queue
*q
, sector_t offset
)
204 struct elevator_queue
*e
= q
->elevator
;
205 struct hlist_node
*next
;
208 hash_for_each_possible_safe(e
->hash
, rq
, next
, hash
, offset
) {
209 BUG_ON(!ELV_ON_HASH(rq
));
211 if (unlikely(!rq_mergeable(rq
))) {
212 __elv_rqhash_del(rq
);
216 if (rq_hash_key(rq
) == offset
)
224 * RB-tree support functions for inserting/lookup/removal of requests
225 * in a sorted RB tree.
227 void elv_rb_add(struct rb_root
*root
, struct request
*rq
)
229 struct rb_node
**p
= &root
->rb_node
;
230 struct rb_node
*parent
= NULL
;
231 struct request
*__rq
;
235 __rq
= rb_entry(parent
, struct request
, rb_node
);
237 if (blk_rq_pos(rq
) < blk_rq_pos(__rq
))
239 else if (blk_rq_pos(rq
) >= blk_rq_pos(__rq
))
243 rb_link_node(&rq
->rb_node
, parent
, p
);
244 rb_insert_color(&rq
->rb_node
, root
);
246 EXPORT_SYMBOL(elv_rb_add
);
248 void elv_rb_del(struct rb_root
*root
, struct request
*rq
)
250 BUG_ON(RB_EMPTY_NODE(&rq
->rb_node
));
251 rb_erase(&rq
->rb_node
, root
);
252 RB_CLEAR_NODE(&rq
->rb_node
);
254 EXPORT_SYMBOL(elv_rb_del
);
256 struct request
*elv_rb_find(struct rb_root
*root
, sector_t sector
)
258 struct rb_node
*n
= root
->rb_node
;
262 rq
= rb_entry(n
, struct request
, rb_node
);
264 if (sector
< blk_rq_pos(rq
))
266 else if (sector
> blk_rq_pos(rq
))
274 EXPORT_SYMBOL(elv_rb_find
);
276 enum elv_merge
elv_merge(struct request_queue
*q
, struct request
**req
,
279 struct elevator_queue
*e
= q
->elevator
;
280 struct request
*__rq
;
284 * nomerges: No merges at all attempted
285 * noxmerges: Only simple one-hit cache try
286 * merges: All merge tries attempted
288 if (blk_queue_nomerges(q
) || !bio_mergeable(bio
))
289 return ELEVATOR_NO_MERGE
;
292 * First try one-hit cache.
294 if (q
->last_merge
&& elv_bio_merge_ok(q
->last_merge
, bio
)) {
295 enum elv_merge ret
= blk_try_merge(q
->last_merge
, bio
);
297 if (ret
!= ELEVATOR_NO_MERGE
) {
298 *req
= q
->last_merge
;
303 if (blk_queue_noxmerges(q
))
304 return ELEVATOR_NO_MERGE
;
307 * See if our hash lookup can find a potential backmerge.
309 __rq
= elv_rqhash_find(q
, bio
->bi_iter
.bi_sector
);
310 if (__rq
&& elv_bio_merge_ok(__rq
, bio
)) {
313 if (blk_discard_mergable(__rq
))
314 return ELEVATOR_DISCARD_MERGE
;
315 return ELEVATOR_BACK_MERGE
;
318 if (e
->type
->ops
.request_merge
)
319 return e
->type
->ops
.request_merge(q
, req
, bio
);
321 return ELEVATOR_NO_MERGE
;
325 * Attempt to do an insertion back merge. Only check for the case where
326 * we can append 'rq' to an existing request, so we can throw 'rq' away
329 * Returns true if we merged, false otherwise. 'free' will contain all
330 * requests that need to be freed.
332 bool elv_attempt_insert_merge(struct request_queue
*q
, struct request
*rq
,
333 struct list_head
*free
)
335 struct request
*__rq
;
338 if (blk_queue_nomerges(q
))
342 * First try one-hit cache.
344 if (q
->last_merge
&& blk_attempt_req_merge(q
, q
->last_merge
, rq
)) {
345 list_add(&rq
->queuelist
, free
);
349 if (blk_queue_noxmerges(q
))
354 * See if our hash lookup can find a potential backmerge.
357 __rq
= elv_rqhash_find(q
, blk_rq_pos(rq
));
358 if (!__rq
|| !blk_attempt_req_merge(q
, __rq
, rq
))
361 list_add(&rq
->queuelist
, free
);
362 /* The merged request could be merged with others, try again */
370 void elv_merged_request(struct request_queue
*q
, struct request
*rq
,
373 struct elevator_queue
*e
= q
->elevator
;
375 if (e
->type
->ops
.request_merged
)
376 e
->type
->ops
.request_merged(q
, rq
, type
);
378 if (type
== ELEVATOR_BACK_MERGE
)
379 elv_rqhash_reposition(q
, rq
);
384 void elv_merge_requests(struct request_queue
*q
, struct request
*rq
,
385 struct request
*next
)
387 struct elevator_queue
*e
= q
->elevator
;
389 if (e
->type
->ops
.requests_merged
)
390 e
->type
->ops
.requests_merged(q
, rq
, next
);
392 elv_rqhash_reposition(q
, rq
);
396 struct request
*elv_latter_request(struct request_queue
*q
, struct request
*rq
)
398 struct elevator_queue
*e
= q
->elevator
;
400 if (e
->type
->ops
.next_request
)
401 return e
->type
->ops
.next_request(q
, rq
);
406 struct request
*elv_former_request(struct request_queue
*q
, struct request
*rq
)
408 struct elevator_queue
*e
= q
->elevator
;
410 if (e
->type
->ops
.former_request
)
411 return e
->type
->ops
.former_request(q
, rq
);
416 #define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
419 elv_attr_show(struct kobject
*kobj
, struct attribute
*attr
, char *page
)
421 struct elv_fs_entry
*entry
= to_elv(attr
);
422 struct elevator_queue
*e
;
428 e
= container_of(kobj
, struct elevator_queue
, kobj
);
429 mutex_lock(&e
->sysfs_lock
);
430 error
= e
->type
? entry
->show(e
, page
) : -ENOENT
;
431 mutex_unlock(&e
->sysfs_lock
);
436 elv_attr_store(struct kobject
*kobj
, struct attribute
*attr
,
437 const char *page
, size_t length
)
439 struct elv_fs_entry
*entry
= to_elv(attr
);
440 struct elevator_queue
*e
;
446 e
= container_of(kobj
, struct elevator_queue
, kobj
);
447 mutex_lock(&e
->sysfs_lock
);
448 error
= e
->type
? entry
->store(e
, page
, length
) : -ENOENT
;
449 mutex_unlock(&e
->sysfs_lock
);
453 static const struct sysfs_ops elv_sysfs_ops
= {
454 .show
= elv_attr_show
,
455 .store
= elv_attr_store
,
458 static const struct kobj_type elv_ktype
= {
459 .sysfs_ops
= &elv_sysfs_ops
,
460 .release
= elevator_release
,
463 int elv_register_queue(struct request_queue
*q
, bool uevent
)
465 struct elevator_queue
*e
= q
->elevator
;
468 lockdep_assert_held(&q
->sysfs_lock
);
470 error
= kobject_add(&e
->kobj
, &q
->disk
->queue_kobj
, "iosched");
472 struct elv_fs_entry
*attr
= e
->type
->elevator_attrs
;
474 while (attr
->attr
.name
) {
475 if (sysfs_create_file(&e
->kobj
, &attr
->attr
))
481 kobject_uevent(&e
->kobj
, KOBJ_ADD
);
483 set_bit(ELEVATOR_FLAG_REGISTERED
, &e
->flags
);
488 void elv_unregister_queue(struct request_queue
*q
)
490 struct elevator_queue
*e
= q
->elevator
;
492 lockdep_assert_held(&q
->sysfs_lock
);
494 if (e
&& test_and_clear_bit(ELEVATOR_FLAG_REGISTERED
, &e
->flags
)) {
495 kobject_uevent(&e
->kobj
, KOBJ_REMOVE
);
496 kobject_del(&e
->kobj
);
500 int elv_register(struct elevator_type
*e
)
502 /* finish request is mandatory */
503 if (WARN_ON_ONCE(!e
->ops
.finish_request
))
505 /* insert_requests and dispatch_request are mandatory */
506 if (WARN_ON_ONCE(!e
->ops
.insert_requests
|| !e
->ops
.dispatch_request
))
509 /* create icq_cache if requested */
511 if (WARN_ON(e
->icq_size
< sizeof(struct io_cq
)) ||
512 WARN_ON(e
->icq_align
< __alignof__(struct io_cq
)))
515 snprintf(e
->icq_cache_name
, sizeof(e
->icq_cache_name
),
516 "%s_io_cq", e
->elevator_name
);
517 e
->icq_cache
= kmem_cache_create(e
->icq_cache_name
, e
->icq_size
,
518 e
->icq_align
, 0, NULL
);
523 /* register, don't allow duplicate names */
524 spin_lock(&elv_list_lock
);
525 if (__elevator_find(e
->elevator_name
)) {
526 spin_unlock(&elv_list_lock
);
527 kmem_cache_destroy(e
->icq_cache
);
530 list_add_tail(&e
->list
, &elv_list
);
531 spin_unlock(&elv_list_lock
);
533 printk(KERN_INFO
"io scheduler %s registered\n", e
->elevator_name
);
537 EXPORT_SYMBOL_GPL(elv_register
);
539 void elv_unregister(struct elevator_type
*e
)
542 spin_lock(&elv_list_lock
);
543 list_del_init(&e
->list
);
544 spin_unlock(&elv_list_lock
);
547 * Destroy icq_cache if it exists. icq's are RCU managed. Make
548 * sure all RCU operations are complete before proceeding.
552 kmem_cache_destroy(e
->icq_cache
);
556 EXPORT_SYMBOL_GPL(elv_unregister
);
558 static inline bool elv_support_iosched(struct request_queue
*q
)
560 if (!queue_is_mq(q
) ||
561 (q
->tag_set
&& (q
->tag_set
->flags
& BLK_MQ_F_NO_SCHED
)))
567 * For single queue devices, default to using mq-deadline. If we have multiple
568 * queues or mq-deadline is not available, default to "none".
570 static struct elevator_type
*elevator_get_default(struct request_queue
*q
)
572 if (q
->tag_set
&& q
->tag_set
->flags
& BLK_MQ_F_NO_SCHED_BY_DEFAULT
)
575 if (q
->nr_hw_queues
!= 1 &&
576 !blk_mq_is_shared_tags(q
->tag_set
->flags
))
579 return elevator_find_get(q
, "mq-deadline");
583 * Get the first elevator providing the features required by the request queue.
584 * Default to "none" if no matching elevator is found.
586 static struct elevator_type
*elevator_get_by_features(struct request_queue
*q
)
588 struct elevator_type
*e
, *found
= NULL
;
590 spin_lock(&elv_list_lock
);
592 list_for_each_entry(e
, &elv_list
, list
) {
593 if (elv_support_features(q
, e
)) {
599 if (found
&& !elevator_tryget(found
))
602 spin_unlock(&elv_list_lock
);
607 * For a device queue that has no required features, use the default elevator
608 * settings. Otherwise, use the first elevator available matching the required
609 * features. If no suitable elevator is find or if the chosen elevator
610 * initialization fails, fall back to the "none" elevator (no elevator).
612 void elevator_init_mq(struct request_queue
*q
)
614 struct elevator_type
*e
;
617 if (!elv_support_iosched(q
))
620 WARN_ON_ONCE(blk_queue_registered(q
));
622 if (unlikely(q
->elevator
))
625 if (!q
->required_elevator_features
)
626 e
= elevator_get_default(q
);
628 e
= elevator_get_by_features(q
);
633 * We are called before adding disk, when there isn't any FS I/O,
634 * so freezing queue plus canceling dispatch work is enough to
635 * drain any dispatch activities originated from passthrough
636 * requests, then no need to quiesce queue which may add long boot
637 * latency, especially when lots of disks are involved.
639 blk_mq_freeze_queue(q
);
640 blk_mq_cancel_work_sync(q
);
642 err
= blk_mq_init_sched(q
, e
);
644 blk_mq_unfreeze_queue(q
);
647 pr_warn("\"%s\" elevator initialization failed, "
648 "falling back to \"none\"\n", e
->elevator_name
);
655 * Switch to new_e io scheduler.
657 * If switching fails, we are most likely running out of memory and not able
658 * to restore the old io scheduler, so leaving the io scheduler being none.
660 int elevator_switch(struct request_queue
*q
, struct elevator_type
*new_e
)
664 lockdep_assert_held(&q
->sysfs_lock
);
666 blk_mq_freeze_queue(q
);
667 blk_mq_quiesce_queue(q
);
670 elv_unregister_queue(q
);
674 ret
= blk_mq_init_sched(q
, new_e
);
678 ret
= elv_register_queue(q
, true);
683 blk_add_trace_msg(q
, "elv switch: %s", new_e
->elevator_name
);
686 blk_mq_unquiesce_queue(q
);
687 blk_mq_unfreeze_queue(q
);
690 pr_warn("elv: switch to \"%s\" failed, falling back to \"none\"\n",
691 new_e
->elevator_name
);
697 void elevator_disable(struct request_queue
*q
)
699 lockdep_assert_held(&q
->sysfs_lock
);
701 blk_mq_freeze_queue(q
);
702 blk_mq_quiesce_queue(q
);
704 elv_unregister_queue(q
);
706 blk_queue_flag_clear(QUEUE_FLAG_SQ_SCHED
, q
);
708 q
->nr_requests
= q
->tag_set
->queue_depth
;
709 blk_add_trace_msg(q
, "elv switch: none");
711 blk_mq_unquiesce_queue(q
);
712 blk_mq_unfreeze_queue(q
);
716 * Switch this queue to the given IO scheduler.
718 static int elevator_change(struct request_queue
*q
, const char *elevator_name
)
720 struct elevator_type
*e
;
723 /* Make sure queue is not in the middle of being removed */
724 if (!blk_queue_registered(q
))
727 if (!strncmp(elevator_name
, "none", 4)) {
733 if (q
->elevator
&& elevator_match(q
->elevator
->type
, elevator_name
))
736 e
= elevator_find_get(q
, elevator_name
);
738 request_module("%s-iosched", elevator_name
);
739 e
= elevator_find_get(q
, elevator_name
);
743 ret
= elevator_switch(q
, e
);
748 ssize_t
elv_iosched_store(struct request_queue
*q
, const char *buf
,
751 char elevator_name
[ELV_NAME_MAX
];
754 if (!elv_support_iosched(q
))
757 strscpy(elevator_name
, buf
, sizeof(elevator_name
));
758 ret
= elevator_change(q
, strstrip(elevator_name
));
764 ssize_t
elv_iosched_show(struct request_queue
*q
, char *name
)
766 struct elevator_queue
*eq
= q
->elevator
;
767 struct elevator_type
*cur
= NULL
, *e
;
770 if (!elv_support_iosched(q
))
771 return sprintf(name
, "none\n");
774 len
+= sprintf(name
+len
, "[none] ");
776 len
+= sprintf(name
+len
, "none ");
780 spin_lock(&elv_list_lock
);
781 list_for_each_entry(e
, &elv_list
, list
) {
783 len
+= sprintf(name
+len
, "[%s] ", e
->elevator_name
);
784 else if (elv_support_features(q
, e
))
785 len
+= sprintf(name
+len
, "%s ", e
->elevator_name
);
787 spin_unlock(&elv_list_lock
);
789 len
+= sprintf(name
+len
, "\n");
793 struct request
*elv_rb_former_request(struct request_queue
*q
,
796 struct rb_node
*rbprev
= rb_prev(&rq
->rb_node
);
799 return rb_entry_rq(rbprev
);
803 EXPORT_SYMBOL(elv_rb_former_request
);
805 struct request
*elv_rb_latter_request(struct request_queue
*q
,
808 struct rb_node
*rbnext
= rb_next(&rq
->rb_node
);
811 return rb_entry_rq(rbnext
);
815 EXPORT_SYMBOL(elv_rb_latter_request
);
817 static int __init
elevator_setup(char *str
)
819 pr_warn("Kernel parameter elevator= does not have any effect anymore.\n"
820 "Please use sysfs to set IO scheduler for individual devices.\n");
824 __setup("elevator=", elevator_setup
);