1 // SPDX-License-Identifier: GPL-2.0
3 * MQ Deadline i/o scheduler - adaptation of the legacy deadline scheduler,
4 * for the blk-mq scheduling framework
6 * Copyright (C) 2016 Jens Axboe <axboe@kernel.dk>
8 #include <linux/kernel.h>
10 #include <linux/blkdev.h>
11 #include <linux/blk-mq.h>
12 #include <linux/bio.h>
13 #include <linux/module.h>
14 #include <linux/slab.h>
15 #include <linux/init.h>
16 #include <linux/compiler.h>
17 #include <linux/rbtree.h>
18 #include <linux/sbitmap.h>
20 #include <trace/events/block.h>
25 #include "blk-mq-debugfs.h"
26 #include "blk-mq-tag.h"
27 #include "blk-mq-sched.h"
30 * See Documentation/block/deadline-iosched.rst
32 static const int read_expire
= HZ
/ 2; /* max time before a read is submitted. */
33 static const int write_expire
= 5 * HZ
; /* ditto for writes, these limits are SOFT! */
35 * Time after which to dispatch lower priority requests even if higher
36 * priority requests are pending.
38 static const int prio_aging_expire
= 10 * HZ
;
39 static const int writes_starved
= 2; /* max times reads can starve a write */
40 static const int fifo_batch
= 16; /* # of sequential requests treated as one
41 by the above parameters. For throughput. */
48 enum { DD_DIR_COUNT
= 2 };
57 enum { DD_PRIO_COUNT
= 3 };
60 * I/O statistics per I/O priority. It is fine if these counters overflow.
61 * What matters is that these counters are at least as wide as
62 * log2(max_outstanding_requests).
64 struct io_stats_per_prio
{
72 * Deadline scheduler data per I/O priority (enum dd_prio). Requests are
73 * present on both sort_list[] and fifo_list[].
76 struct list_head dispatch
;
77 struct rb_root sort_list
[DD_DIR_COUNT
];
78 struct list_head fifo_list
[DD_DIR_COUNT
];
79 /* Next request in FIFO order. Read, write or both are NULL. */
80 struct request
*next_rq
[DD_DIR_COUNT
];
81 struct io_stats_per_prio stats
;
84 struct deadline_data
{
89 struct dd_per_prio per_prio
[DD_PRIO_COUNT
];
91 /* Data direction of latest dispatched request. */
92 enum dd_data_dir last_dir
;
93 unsigned int batching
; /* number of sequential requests made */
94 unsigned int starved
; /* times reads have starved writes */
97 * settings that change how the i/o scheduler behaves
99 int fifo_expire
[DD_DIR_COUNT
];
104 int prio_aging_expire
;
107 spinlock_t zone_lock
;
110 /* Maps an I/O priority class to a deadline scheduler priority. */
111 static const enum dd_prio ioprio_class_to_prio
[] = {
112 [IOPRIO_CLASS_NONE
] = DD_BE_PRIO
,
113 [IOPRIO_CLASS_RT
] = DD_RT_PRIO
,
114 [IOPRIO_CLASS_BE
] = DD_BE_PRIO
,
115 [IOPRIO_CLASS_IDLE
] = DD_IDLE_PRIO
,
118 static inline struct rb_root
*
119 deadline_rb_root(struct dd_per_prio
*per_prio
, struct request
*rq
)
121 return &per_prio
->sort_list
[rq_data_dir(rq
)];
125 * Returns the I/O priority class (IOPRIO_CLASS_*) that has been assigned to a
128 static u8
dd_rq_ioclass(struct request
*rq
)
130 return IOPRIO_PRIO_CLASS(req_get_ioprio(rq
));
134 * get the request after `rq' in sector-sorted order
136 static inline struct request
*
137 deadline_latter_request(struct request
*rq
)
139 struct rb_node
*node
= rb_next(&rq
->rb_node
);
142 return rb_entry_rq(node
);
148 deadline_add_rq_rb(struct dd_per_prio
*per_prio
, struct request
*rq
)
150 struct rb_root
*root
= deadline_rb_root(per_prio
, rq
);
152 elv_rb_add(root
, rq
);
156 deadline_del_rq_rb(struct dd_per_prio
*per_prio
, struct request
*rq
)
158 const enum dd_data_dir data_dir
= rq_data_dir(rq
);
160 if (per_prio
->next_rq
[data_dir
] == rq
)
161 per_prio
->next_rq
[data_dir
] = deadline_latter_request(rq
);
163 elv_rb_del(deadline_rb_root(per_prio
, rq
), rq
);
167 * remove rq from rbtree and fifo.
169 static void deadline_remove_request(struct request_queue
*q
,
170 struct dd_per_prio
*per_prio
,
173 list_del_init(&rq
->queuelist
);
176 * We might not be on the rbtree, if we are doing an insert merge
178 if (!RB_EMPTY_NODE(&rq
->rb_node
))
179 deadline_del_rq_rb(per_prio
, rq
);
181 elv_rqhash_del(q
, rq
);
182 if (q
->last_merge
== rq
)
183 q
->last_merge
= NULL
;
186 static void dd_request_merged(struct request_queue
*q
, struct request
*req
,
189 struct deadline_data
*dd
= q
->elevator
->elevator_data
;
190 const u8 ioprio_class
= dd_rq_ioclass(req
);
191 const enum dd_prio prio
= ioprio_class_to_prio
[ioprio_class
];
192 struct dd_per_prio
*per_prio
= &dd
->per_prio
[prio
];
195 * if the merge was a front merge, we need to reposition request
197 if (type
== ELEVATOR_FRONT_MERGE
) {
198 elv_rb_del(deadline_rb_root(per_prio
, req
), req
);
199 deadline_add_rq_rb(per_prio
, req
);
204 * Callback function that is invoked after @next has been merged into @req.
206 static void dd_merged_requests(struct request_queue
*q
, struct request
*req
,
207 struct request
*next
)
209 struct deadline_data
*dd
= q
->elevator
->elevator_data
;
210 const u8 ioprio_class
= dd_rq_ioclass(next
);
211 const enum dd_prio prio
= ioprio_class_to_prio
[ioprio_class
];
213 lockdep_assert_held(&dd
->lock
);
215 dd
->per_prio
[prio
].stats
.merged
++;
218 * if next expires before rq, assign its expire time to rq
219 * and move into next position (next will be deleted) in fifo
221 if (!list_empty(&req
->queuelist
) && !list_empty(&next
->queuelist
)) {
222 if (time_before((unsigned long)next
->fifo_time
,
223 (unsigned long)req
->fifo_time
)) {
224 list_move(&req
->queuelist
, &next
->queuelist
);
225 req
->fifo_time
= next
->fifo_time
;
230 * kill knowledge of next, this one is a goner
232 deadline_remove_request(q
, &dd
->per_prio
[prio
], next
);
236 * move an entry to dispatch queue
239 deadline_move_request(struct deadline_data
*dd
, struct dd_per_prio
*per_prio
,
242 const enum dd_data_dir data_dir
= rq_data_dir(rq
);
244 per_prio
->next_rq
[data_dir
] = deadline_latter_request(rq
);
247 * take it off the sort and fifo list
249 deadline_remove_request(rq
->q
, per_prio
, rq
);
252 /* Number of requests queued for a given priority level. */
253 static u32
dd_queued(struct deadline_data
*dd
, enum dd_prio prio
)
255 const struct io_stats_per_prio
*stats
= &dd
->per_prio
[prio
].stats
;
257 lockdep_assert_held(&dd
->lock
);
259 return stats
->inserted
- atomic_read(&stats
->completed
);
263 * deadline_check_fifo returns 0 if there are no expired requests on the fifo,
264 * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir])
266 static inline int deadline_check_fifo(struct dd_per_prio
*per_prio
,
267 enum dd_data_dir data_dir
)
269 struct request
*rq
= rq_entry_fifo(per_prio
->fifo_list
[data_dir
].next
);
274 if (time_after_eq(jiffies
, (unsigned long)rq
->fifo_time
))
281 * For the specified data direction, return the next request to
282 * dispatch using arrival ordered lists.
284 static struct request
*
285 deadline_fifo_request(struct deadline_data
*dd
, struct dd_per_prio
*per_prio
,
286 enum dd_data_dir data_dir
)
291 if (list_empty(&per_prio
->fifo_list
[data_dir
]))
294 rq
= rq_entry_fifo(per_prio
->fifo_list
[data_dir
].next
);
295 if (data_dir
== DD_READ
|| !blk_queue_is_zoned(rq
->q
))
299 * Look for a write request that can be dispatched, that is one with
300 * an unlocked target zone.
302 spin_lock_irqsave(&dd
->zone_lock
, flags
);
303 list_for_each_entry(rq
, &per_prio
->fifo_list
[DD_WRITE
], queuelist
) {
304 if (blk_req_can_dispatch_to_zone(rq
))
309 spin_unlock_irqrestore(&dd
->zone_lock
, flags
);
315 * For the specified data direction, return the next request to
316 * dispatch using sector position sorted lists.
318 static struct request
*
319 deadline_next_request(struct deadline_data
*dd
, struct dd_per_prio
*per_prio
,
320 enum dd_data_dir data_dir
)
325 rq
= per_prio
->next_rq
[data_dir
];
329 if (data_dir
== DD_READ
|| !blk_queue_is_zoned(rq
->q
))
333 * Look for a write request that can be dispatched, that is one with
334 * an unlocked target zone.
336 spin_lock_irqsave(&dd
->zone_lock
, flags
);
338 if (blk_req_can_dispatch_to_zone(rq
))
340 rq
= deadline_latter_request(rq
);
342 spin_unlock_irqrestore(&dd
->zone_lock
, flags
);
348 * Returns true if and only if @rq started after @latest_start where
349 * @latest_start is in jiffies.
351 static bool started_after(struct deadline_data
*dd
, struct request
*rq
,
352 unsigned long latest_start
)
354 unsigned long start_time
= (unsigned long)rq
->fifo_time
;
356 start_time
-= dd
->fifo_expire
[rq_data_dir(rq
)];
358 return time_after(start_time
, latest_start
);
362 * deadline_dispatch_requests selects the best request according to
363 * read/write expire, fifo_batch, etc and with a start time <= @latest_start.
365 static struct request
*__dd_dispatch_request(struct deadline_data
*dd
,
366 struct dd_per_prio
*per_prio
,
367 unsigned long latest_start
)
369 struct request
*rq
, *next_rq
;
370 enum dd_data_dir data_dir
;
374 lockdep_assert_held(&dd
->lock
);
376 if (!list_empty(&per_prio
->dispatch
)) {
377 rq
= list_first_entry(&per_prio
->dispatch
, struct request
,
379 if (started_after(dd
, rq
, latest_start
))
381 list_del_init(&rq
->queuelist
);
386 * batches are currently reads XOR writes
388 rq
= deadline_next_request(dd
, per_prio
, dd
->last_dir
);
389 if (rq
&& dd
->batching
< dd
->fifo_batch
)
390 /* we have a next request are still entitled to batch */
391 goto dispatch_request
;
394 * at this point we are not running a batch. select the appropriate
395 * data direction (read / write)
398 if (!list_empty(&per_prio
->fifo_list
[DD_READ
])) {
399 BUG_ON(RB_EMPTY_ROOT(&per_prio
->sort_list
[DD_READ
]));
401 if (deadline_fifo_request(dd
, per_prio
, DD_WRITE
) &&
402 (dd
->starved
++ >= dd
->writes_starved
))
403 goto dispatch_writes
;
407 goto dispatch_find_request
;
411 * there are either no reads or writes have been starved
414 if (!list_empty(&per_prio
->fifo_list
[DD_WRITE
])) {
416 BUG_ON(RB_EMPTY_ROOT(&per_prio
->sort_list
[DD_WRITE
]));
422 goto dispatch_find_request
;
427 dispatch_find_request
:
429 * we are not running a batch, find best request for selected data_dir
431 next_rq
= deadline_next_request(dd
, per_prio
, data_dir
);
432 if (deadline_check_fifo(per_prio
, data_dir
) || !next_rq
) {
434 * A deadline has expired, the last request was in the other
435 * direction, or we have run out of higher-sectored requests.
436 * Start again from the request with the earliest expiry time.
438 rq
= deadline_fifo_request(dd
, per_prio
, data_dir
);
441 * The last req was the same dir and we have a next request in
442 * sort order. No expired requests so continue on from here.
448 * For a zoned block device, if we only have writes queued and none of
449 * them can be dispatched, rq will be NULL.
454 dd
->last_dir
= data_dir
;
458 if (started_after(dd
, rq
, latest_start
))
462 * rq is the selected appropriate request.
465 deadline_move_request(dd
, per_prio
, rq
);
467 ioprio_class
= dd_rq_ioclass(rq
);
468 prio
= ioprio_class_to_prio
[ioprio_class
];
469 dd
->per_prio
[prio
].stats
.dispatched
++;
471 * If the request needs its target zone locked, do it.
473 blk_req_zone_write_lock(rq
);
474 rq
->rq_flags
|= RQF_STARTED
;
479 * Check whether there are any requests with priority other than DD_RT_PRIO
480 * that were inserted more than prio_aging_expire jiffies ago.
482 static struct request
*dd_dispatch_prio_aged_requests(struct deadline_data
*dd
,
489 lockdep_assert_held(&dd
->lock
);
491 prio_cnt
= !!dd_queued(dd
, DD_RT_PRIO
) + !!dd_queued(dd
, DD_BE_PRIO
) +
492 !!dd_queued(dd
, DD_IDLE_PRIO
);
496 for (prio
= DD_BE_PRIO
; prio
<= DD_PRIO_MAX
; prio
++) {
497 rq
= __dd_dispatch_request(dd
, &dd
->per_prio
[prio
],
498 now
- dd
->prio_aging_expire
);
507 * Called from blk_mq_run_hw_queue() -> __blk_mq_sched_dispatch_requests().
509 * One confusing aspect here is that we get called for a specific
510 * hardware queue, but we may return a request that is for a
511 * different hardware queue. This is because mq-deadline has shared
512 * state for all hardware queues, in terms of sorting, FIFOs, etc.
514 static struct request
*dd_dispatch_request(struct blk_mq_hw_ctx
*hctx
)
516 struct deadline_data
*dd
= hctx
->queue
->elevator
->elevator_data
;
517 const unsigned long now
= jiffies
;
521 spin_lock(&dd
->lock
);
522 rq
= dd_dispatch_prio_aged_requests(dd
, now
);
527 * Next, dispatch requests in priority order. Ignore lower priority
528 * requests if any higher priority requests are pending.
530 for (prio
= 0; prio
<= DD_PRIO_MAX
; prio
++) {
531 rq
= __dd_dispatch_request(dd
, &dd
->per_prio
[prio
], now
);
532 if (rq
|| dd_queued(dd
, prio
))
537 spin_unlock(&dd
->lock
);
543 * Called by __blk_mq_alloc_request(). The shallow_depth value set by this
544 * function is used by __blk_mq_get_tag().
546 static void dd_limit_depth(blk_opf_t opf
, struct blk_mq_alloc_data
*data
)
548 struct deadline_data
*dd
= data
->q
->elevator
->elevator_data
;
550 /* Do not throttle synchronous reads. */
551 if (op_is_sync(opf
) && !op_is_write(opf
))
555 * Throttle asynchronous requests and writes such that these requests
556 * do not block the allocation of synchronous requests.
558 data
->shallow_depth
= dd
->async_depth
;
561 /* Called by blk_mq_update_nr_requests(). */
562 static void dd_depth_updated(struct blk_mq_hw_ctx
*hctx
)
564 struct request_queue
*q
= hctx
->queue
;
565 struct deadline_data
*dd
= q
->elevator
->elevator_data
;
566 struct blk_mq_tags
*tags
= hctx
->sched_tags
;
568 dd
->async_depth
= max(1UL, 3 * q
->nr_requests
/ 4);
570 sbitmap_queue_min_shallow_depth(&tags
->bitmap_tags
, dd
->async_depth
);
573 /* Called by blk_mq_init_hctx() and blk_mq_init_sched(). */
574 static int dd_init_hctx(struct blk_mq_hw_ctx
*hctx
, unsigned int hctx_idx
)
576 dd_depth_updated(hctx
);
580 static void dd_exit_sched(struct elevator_queue
*e
)
582 struct deadline_data
*dd
= e
->elevator_data
;
585 for (prio
= 0; prio
<= DD_PRIO_MAX
; prio
++) {
586 struct dd_per_prio
*per_prio
= &dd
->per_prio
[prio
];
587 const struct io_stats_per_prio
*stats
= &per_prio
->stats
;
590 WARN_ON_ONCE(!list_empty(&per_prio
->fifo_list
[DD_READ
]));
591 WARN_ON_ONCE(!list_empty(&per_prio
->fifo_list
[DD_WRITE
]));
593 spin_lock(&dd
->lock
);
594 queued
= dd_queued(dd
, prio
);
595 spin_unlock(&dd
->lock
);
597 WARN_ONCE(queued
!= 0,
598 "statistics for priority %d: i %u m %u d %u c %u\n",
599 prio
, stats
->inserted
, stats
->merged
,
600 stats
->dispatched
, atomic_read(&stats
->completed
));
607 * initialize elevator private data (deadline_data).
609 static int dd_init_sched(struct request_queue
*q
, struct elevator_type
*e
)
611 struct deadline_data
*dd
;
612 struct elevator_queue
*eq
;
616 eq
= elevator_alloc(q
, e
);
620 dd
= kzalloc_node(sizeof(*dd
), GFP_KERNEL
, q
->node
);
624 eq
->elevator_data
= dd
;
626 for (prio
= 0; prio
<= DD_PRIO_MAX
; prio
++) {
627 struct dd_per_prio
*per_prio
= &dd
->per_prio
[prio
];
629 INIT_LIST_HEAD(&per_prio
->dispatch
);
630 INIT_LIST_HEAD(&per_prio
->fifo_list
[DD_READ
]);
631 INIT_LIST_HEAD(&per_prio
->fifo_list
[DD_WRITE
]);
632 per_prio
->sort_list
[DD_READ
] = RB_ROOT
;
633 per_prio
->sort_list
[DD_WRITE
] = RB_ROOT
;
635 dd
->fifo_expire
[DD_READ
] = read_expire
;
636 dd
->fifo_expire
[DD_WRITE
] = write_expire
;
637 dd
->writes_starved
= writes_starved
;
638 dd
->front_merges
= 1;
639 dd
->last_dir
= DD_WRITE
;
640 dd
->fifo_batch
= fifo_batch
;
641 dd
->prio_aging_expire
= prio_aging_expire
;
642 spin_lock_init(&dd
->lock
);
643 spin_lock_init(&dd
->zone_lock
);
645 /* We dispatch from request queue wide instead of hw queue */
646 blk_queue_flag_set(QUEUE_FLAG_SQ_SCHED
, q
);
652 kobject_put(&eq
->kobj
);
657 * Try to merge @bio into an existing request. If @bio has been merged into
658 * an existing request, store the pointer to that request into *@rq.
660 static int dd_request_merge(struct request_queue
*q
, struct request
**rq
,
663 struct deadline_data
*dd
= q
->elevator
->elevator_data
;
664 const u8 ioprio_class
= IOPRIO_PRIO_CLASS(bio
->bi_ioprio
);
665 const enum dd_prio prio
= ioprio_class_to_prio
[ioprio_class
];
666 struct dd_per_prio
*per_prio
= &dd
->per_prio
[prio
];
667 sector_t sector
= bio_end_sector(bio
);
668 struct request
*__rq
;
670 if (!dd
->front_merges
)
671 return ELEVATOR_NO_MERGE
;
673 __rq
= elv_rb_find(&per_prio
->sort_list
[bio_data_dir(bio
)], sector
);
675 BUG_ON(sector
!= blk_rq_pos(__rq
));
677 if (elv_bio_merge_ok(__rq
, bio
)) {
679 if (blk_discard_mergable(__rq
))
680 return ELEVATOR_DISCARD_MERGE
;
681 return ELEVATOR_FRONT_MERGE
;
685 return ELEVATOR_NO_MERGE
;
689 * Attempt to merge a bio into an existing request. This function is called
690 * before @bio is associated with a request.
692 static bool dd_bio_merge(struct request_queue
*q
, struct bio
*bio
,
693 unsigned int nr_segs
)
695 struct deadline_data
*dd
= q
->elevator
->elevator_data
;
696 struct request
*free
= NULL
;
699 spin_lock(&dd
->lock
);
700 ret
= blk_mq_sched_try_merge(q
, bio
, nr_segs
, &free
);
701 spin_unlock(&dd
->lock
);
704 blk_mq_free_request(free
);
710 * add rq to rbtree and fifo
712 static void dd_insert_request(struct blk_mq_hw_ctx
*hctx
, struct request
*rq
,
715 struct request_queue
*q
= hctx
->queue
;
716 struct deadline_data
*dd
= q
->elevator
->elevator_data
;
717 const enum dd_data_dir data_dir
= rq_data_dir(rq
);
718 u16 ioprio
= req_get_ioprio(rq
);
719 u8 ioprio_class
= IOPRIO_PRIO_CLASS(ioprio
);
720 struct dd_per_prio
*per_prio
;
724 lockdep_assert_held(&dd
->lock
);
727 * This may be a requeue of a write request that has locked its
728 * target zone. If it is the case, this releases the zone lock.
730 blk_req_zone_write_unlock(rq
);
732 prio
= ioprio_class_to_prio
[ioprio_class
];
733 per_prio
= &dd
->per_prio
[prio
];
734 if (!rq
->elv
.priv
[0]) {
735 per_prio
->stats
.inserted
++;
736 rq
->elv
.priv
[0] = (void *)(uintptr_t)1;
739 if (blk_mq_sched_try_insert_merge(q
, rq
, &free
)) {
740 blk_mq_free_requests(&free
);
744 trace_block_rq_insert(rq
);
747 list_add(&rq
->queuelist
, &per_prio
->dispatch
);
748 rq
->fifo_time
= jiffies
;
750 deadline_add_rq_rb(per_prio
, rq
);
752 if (rq_mergeable(rq
)) {
753 elv_rqhash_add(q
, rq
);
759 * set expire time and add to fifo list
761 rq
->fifo_time
= jiffies
+ dd
->fifo_expire
[data_dir
];
762 list_add_tail(&rq
->queuelist
, &per_prio
->fifo_list
[data_dir
]);
767 * Called from blk_mq_sched_insert_request() or blk_mq_sched_insert_requests().
769 static void dd_insert_requests(struct blk_mq_hw_ctx
*hctx
,
770 struct list_head
*list
, bool at_head
)
772 struct request_queue
*q
= hctx
->queue
;
773 struct deadline_data
*dd
= q
->elevator
->elevator_data
;
775 spin_lock(&dd
->lock
);
776 while (!list_empty(list
)) {
779 rq
= list_first_entry(list
, struct request
, queuelist
);
780 list_del_init(&rq
->queuelist
);
781 dd_insert_request(hctx
, rq
, at_head
);
783 spin_unlock(&dd
->lock
);
786 /* Callback from inside blk_mq_rq_ctx_init(). */
787 static void dd_prepare_request(struct request
*rq
)
789 rq
->elv
.priv
[0] = NULL
;
793 * Callback from inside blk_mq_free_request().
795 * For zoned block devices, write unlock the target zone of
796 * completed write requests. Do this while holding the zone lock
797 * spinlock so that the zone is never unlocked while deadline_fifo_request()
798 * or deadline_next_request() are executing. This function is called for
799 * all requests, whether or not these requests complete successfully.
801 * For a zoned block device, __dd_dispatch_request() may have stopped
802 * dispatching requests if all the queued requests are write requests directed
803 * at zones that are already locked due to on-going write requests. To ensure
804 * write request dispatch progress in this case, mark the queue as needing a
805 * restart to ensure that the queue is run again after completion of the
806 * request and zones being unlocked.
808 static void dd_finish_request(struct request
*rq
)
810 struct request_queue
*q
= rq
->q
;
811 struct deadline_data
*dd
= q
->elevator
->elevator_data
;
812 const u8 ioprio_class
= dd_rq_ioclass(rq
);
813 const enum dd_prio prio
= ioprio_class_to_prio
[ioprio_class
];
814 struct dd_per_prio
*per_prio
= &dd
->per_prio
[prio
];
817 * The block layer core may call dd_finish_request() without having
818 * called dd_insert_requests(). Skip requests that bypassed I/O
819 * scheduling. See also blk_mq_request_bypass_insert().
821 if (!rq
->elv
.priv
[0])
824 atomic_inc(&per_prio
->stats
.completed
);
826 if (blk_queue_is_zoned(q
)) {
829 spin_lock_irqsave(&dd
->zone_lock
, flags
);
830 blk_req_zone_write_unlock(rq
);
831 if (!list_empty(&per_prio
->fifo_list
[DD_WRITE
]))
832 blk_mq_sched_mark_restart_hctx(rq
->mq_hctx
);
833 spin_unlock_irqrestore(&dd
->zone_lock
, flags
);
837 static bool dd_has_work_for_prio(struct dd_per_prio
*per_prio
)
839 return !list_empty_careful(&per_prio
->dispatch
) ||
840 !list_empty_careful(&per_prio
->fifo_list
[DD_READ
]) ||
841 !list_empty_careful(&per_prio
->fifo_list
[DD_WRITE
]);
844 static bool dd_has_work(struct blk_mq_hw_ctx
*hctx
)
846 struct deadline_data
*dd
= hctx
->queue
->elevator
->elevator_data
;
849 for (prio
= 0; prio
<= DD_PRIO_MAX
; prio
++)
850 if (dd_has_work_for_prio(&dd
->per_prio
[prio
]))
859 #define SHOW_INT(__FUNC, __VAR) \
860 static ssize_t __FUNC(struct elevator_queue *e, char *page) \
862 struct deadline_data *dd = e->elevator_data; \
864 return sysfs_emit(page, "%d\n", __VAR); \
866 #define SHOW_JIFFIES(__FUNC, __VAR) SHOW_INT(__FUNC, jiffies_to_msecs(__VAR))
867 SHOW_JIFFIES(deadline_read_expire_show
, dd
->fifo_expire
[DD_READ
]);
868 SHOW_JIFFIES(deadline_write_expire_show
, dd
->fifo_expire
[DD_WRITE
]);
869 SHOW_JIFFIES(deadline_prio_aging_expire_show
, dd
->prio_aging_expire
);
870 SHOW_INT(deadline_writes_starved_show
, dd
->writes_starved
);
871 SHOW_INT(deadline_front_merges_show
, dd
->front_merges
);
872 SHOW_INT(deadline_async_depth_show
, dd
->async_depth
);
873 SHOW_INT(deadline_fifo_batch_show
, dd
->fifo_batch
);
877 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
878 static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
880 struct deadline_data *dd = e->elevator_data; \
883 __ret = kstrtoint(page, 0, &__data); \
886 if (__data < (MIN)) \
888 else if (__data > (MAX)) \
890 *(__PTR) = __CONV(__data); \
893 #define STORE_INT(__FUNC, __PTR, MIN, MAX) \
894 STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, )
895 #define STORE_JIFFIES(__FUNC, __PTR, MIN, MAX) \
896 STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, msecs_to_jiffies)
897 STORE_JIFFIES(deadline_read_expire_store
, &dd
->fifo_expire
[DD_READ
], 0, INT_MAX
);
898 STORE_JIFFIES(deadline_write_expire_store
, &dd
->fifo_expire
[DD_WRITE
], 0, INT_MAX
);
899 STORE_JIFFIES(deadline_prio_aging_expire_store
, &dd
->prio_aging_expire
, 0, INT_MAX
);
900 STORE_INT(deadline_writes_starved_store
, &dd
->writes_starved
, INT_MIN
, INT_MAX
);
901 STORE_INT(deadline_front_merges_store
, &dd
->front_merges
, 0, 1);
902 STORE_INT(deadline_async_depth_store
, &dd
->async_depth
, 1, INT_MAX
);
903 STORE_INT(deadline_fifo_batch_store
, &dd
->fifo_batch
, 0, INT_MAX
);
904 #undef STORE_FUNCTION
908 #define DD_ATTR(name) \
909 __ATTR(name, 0644, deadline_##name##_show, deadline_##name##_store)
911 static struct elv_fs_entry deadline_attrs
[] = {
912 DD_ATTR(read_expire
),
913 DD_ATTR(write_expire
),
914 DD_ATTR(writes_starved
),
915 DD_ATTR(front_merges
),
916 DD_ATTR(async_depth
),
918 DD_ATTR(prio_aging_expire
),
922 #ifdef CONFIG_BLK_DEBUG_FS
923 #define DEADLINE_DEBUGFS_DDIR_ATTRS(prio, data_dir, name) \
924 static void *deadline_##name##_fifo_start(struct seq_file *m, \
926 __acquires(&dd->lock) \
928 struct request_queue *q = m->private; \
929 struct deadline_data *dd = q->elevator->elevator_data; \
930 struct dd_per_prio *per_prio = &dd->per_prio[prio]; \
932 spin_lock(&dd->lock); \
933 return seq_list_start(&per_prio->fifo_list[data_dir], *pos); \
936 static void *deadline_##name##_fifo_next(struct seq_file *m, void *v, \
939 struct request_queue *q = m->private; \
940 struct deadline_data *dd = q->elevator->elevator_data; \
941 struct dd_per_prio *per_prio = &dd->per_prio[prio]; \
943 return seq_list_next(v, &per_prio->fifo_list[data_dir], pos); \
946 static void deadline_##name##_fifo_stop(struct seq_file *m, void *v) \
947 __releases(&dd->lock) \
949 struct request_queue *q = m->private; \
950 struct deadline_data *dd = q->elevator->elevator_data; \
952 spin_unlock(&dd->lock); \
955 static const struct seq_operations deadline_##name##_fifo_seq_ops = { \
956 .start = deadline_##name##_fifo_start, \
957 .next = deadline_##name##_fifo_next, \
958 .stop = deadline_##name##_fifo_stop, \
959 .show = blk_mq_debugfs_rq_show, \
962 static int deadline_##name##_next_rq_show(void *data, \
963 struct seq_file *m) \
965 struct request_queue *q = data; \
966 struct deadline_data *dd = q->elevator->elevator_data; \
967 struct dd_per_prio *per_prio = &dd->per_prio[prio]; \
968 struct request *rq = per_prio->next_rq[data_dir]; \
971 __blk_mq_debugfs_rq_show(m, rq); \
975 DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO
, DD_READ
, read0
);
976 DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO
, DD_WRITE
, write0
);
977 DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO
, DD_READ
, read1
);
978 DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO
, DD_WRITE
, write1
);
979 DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO
, DD_READ
, read2
);
980 DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO
, DD_WRITE
, write2
);
981 #undef DEADLINE_DEBUGFS_DDIR_ATTRS
983 static int deadline_batching_show(void *data
, struct seq_file
*m
)
985 struct request_queue
*q
= data
;
986 struct deadline_data
*dd
= q
->elevator
->elevator_data
;
988 seq_printf(m
, "%u\n", dd
->batching
);
992 static int deadline_starved_show(void *data
, struct seq_file
*m
)
994 struct request_queue
*q
= data
;
995 struct deadline_data
*dd
= q
->elevator
->elevator_data
;
997 seq_printf(m
, "%u\n", dd
->starved
);
1001 static int dd_async_depth_show(void *data
, struct seq_file
*m
)
1003 struct request_queue
*q
= data
;
1004 struct deadline_data
*dd
= q
->elevator
->elevator_data
;
1006 seq_printf(m
, "%u\n", dd
->async_depth
);
1010 static int dd_queued_show(void *data
, struct seq_file
*m
)
1012 struct request_queue
*q
= data
;
1013 struct deadline_data
*dd
= q
->elevator
->elevator_data
;
1016 spin_lock(&dd
->lock
);
1017 rt
= dd_queued(dd
, DD_RT_PRIO
);
1018 be
= dd_queued(dd
, DD_BE_PRIO
);
1019 idle
= dd_queued(dd
, DD_IDLE_PRIO
);
1020 spin_unlock(&dd
->lock
);
1022 seq_printf(m
, "%u %u %u\n", rt
, be
, idle
);
1027 /* Number of requests owned by the block driver for a given priority. */
1028 static u32
dd_owned_by_driver(struct deadline_data
*dd
, enum dd_prio prio
)
1030 const struct io_stats_per_prio
*stats
= &dd
->per_prio
[prio
].stats
;
1032 lockdep_assert_held(&dd
->lock
);
1034 return stats
->dispatched
+ stats
->merged
-
1035 atomic_read(&stats
->completed
);
1038 static int dd_owned_by_driver_show(void *data
, struct seq_file
*m
)
1040 struct request_queue
*q
= data
;
1041 struct deadline_data
*dd
= q
->elevator
->elevator_data
;
1044 spin_lock(&dd
->lock
);
1045 rt
= dd_owned_by_driver(dd
, DD_RT_PRIO
);
1046 be
= dd_owned_by_driver(dd
, DD_BE_PRIO
);
1047 idle
= dd_owned_by_driver(dd
, DD_IDLE_PRIO
);
1048 spin_unlock(&dd
->lock
);
1050 seq_printf(m
, "%u %u %u\n", rt
, be
, idle
);
1055 #define DEADLINE_DISPATCH_ATTR(prio) \
1056 static void *deadline_dispatch##prio##_start(struct seq_file *m, \
1058 __acquires(&dd->lock) \
1060 struct request_queue *q = m->private; \
1061 struct deadline_data *dd = q->elevator->elevator_data; \
1062 struct dd_per_prio *per_prio = &dd->per_prio[prio]; \
1064 spin_lock(&dd->lock); \
1065 return seq_list_start(&per_prio->dispatch, *pos); \
1068 static void *deadline_dispatch##prio##_next(struct seq_file *m, \
1069 void *v, loff_t *pos) \
1071 struct request_queue *q = m->private; \
1072 struct deadline_data *dd = q->elevator->elevator_data; \
1073 struct dd_per_prio *per_prio = &dd->per_prio[prio]; \
1075 return seq_list_next(v, &per_prio->dispatch, pos); \
1078 static void deadline_dispatch##prio##_stop(struct seq_file *m, void *v) \
1079 __releases(&dd->lock) \
1081 struct request_queue *q = m->private; \
1082 struct deadline_data *dd = q->elevator->elevator_data; \
1084 spin_unlock(&dd->lock); \
1087 static const struct seq_operations deadline_dispatch##prio##_seq_ops = { \
1088 .start = deadline_dispatch##prio##_start, \
1089 .next = deadline_dispatch##prio##_next, \
1090 .stop = deadline_dispatch##prio##_stop, \
1091 .show = blk_mq_debugfs_rq_show, \
1094 DEADLINE_DISPATCH_ATTR(0);
1095 DEADLINE_DISPATCH_ATTR(1);
1096 DEADLINE_DISPATCH_ATTR(2);
1097 #undef DEADLINE_DISPATCH_ATTR
1099 #define DEADLINE_QUEUE_DDIR_ATTRS(name) \
1100 {#name "_fifo_list", 0400, \
1101 .seq_ops = &deadline_##name##_fifo_seq_ops}
1102 #define DEADLINE_NEXT_RQ_ATTR(name) \
1103 {#name "_next_rq", 0400, deadline_##name##_next_rq_show}
1104 static const struct blk_mq_debugfs_attr deadline_queue_debugfs_attrs
[] = {
1105 DEADLINE_QUEUE_DDIR_ATTRS(read0
),
1106 DEADLINE_QUEUE_DDIR_ATTRS(write0
),
1107 DEADLINE_QUEUE_DDIR_ATTRS(read1
),
1108 DEADLINE_QUEUE_DDIR_ATTRS(write1
),
1109 DEADLINE_QUEUE_DDIR_ATTRS(read2
),
1110 DEADLINE_QUEUE_DDIR_ATTRS(write2
),
1111 DEADLINE_NEXT_RQ_ATTR(read0
),
1112 DEADLINE_NEXT_RQ_ATTR(write0
),
1113 DEADLINE_NEXT_RQ_ATTR(read1
),
1114 DEADLINE_NEXT_RQ_ATTR(write1
),
1115 DEADLINE_NEXT_RQ_ATTR(read2
),
1116 DEADLINE_NEXT_RQ_ATTR(write2
),
1117 {"batching", 0400, deadline_batching_show
},
1118 {"starved", 0400, deadline_starved_show
},
1119 {"async_depth", 0400, dd_async_depth_show
},
1120 {"dispatch0", 0400, .seq_ops
= &deadline_dispatch0_seq_ops
},
1121 {"dispatch1", 0400, .seq_ops
= &deadline_dispatch1_seq_ops
},
1122 {"dispatch2", 0400, .seq_ops
= &deadline_dispatch2_seq_ops
},
1123 {"owned_by_driver", 0400, dd_owned_by_driver_show
},
1124 {"queued", 0400, dd_queued_show
},
1127 #undef DEADLINE_QUEUE_DDIR_ATTRS
1130 static struct elevator_type mq_deadline
= {
1132 .depth_updated
= dd_depth_updated
,
1133 .limit_depth
= dd_limit_depth
,
1134 .insert_requests
= dd_insert_requests
,
1135 .dispatch_request
= dd_dispatch_request
,
1136 .prepare_request
= dd_prepare_request
,
1137 .finish_request
= dd_finish_request
,
1138 .next_request
= elv_rb_latter_request
,
1139 .former_request
= elv_rb_former_request
,
1140 .bio_merge
= dd_bio_merge
,
1141 .request_merge
= dd_request_merge
,
1142 .requests_merged
= dd_merged_requests
,
1143 .request_merged
= dd_request_merged
,
1144 .has_work
= dd_has_work
,
1145 .init_sched
= dd_init_sched
,
1146 .exit_sched
= dd_exit_sched
,
1147 .init_hctx
= dd_init_hctx
,
1150 #ifdef CONFIG_BLK_DEBUG_FS
1151 .queue_debugfs_attrs
= deadline_queue_debugfs_attrs
,
1153 .elevator_attrs
= deadline_attrs
,
1154 .elevator_name
= "mq-deadline",
1155 .elevator_alias
= "deadline",
1156 .elevator_features
= ELEVATOR_F_ZBD_SEQ_WRITE
,
1157 .elevator_owner
= THIS_MODULE
,
1159 MODULE_ALIAS("mq-deadline-iosched");
1161 static int __init
deadline_init(void)
1163 return elv_register(&mq_deadline
);
1166 static void __exit
deadline_exit(void)
1168 elv_unregister(&mq_deadline
);
1171 module_init(deadline_init
);
1172 module_exit(deadline_exit
);
1174 MODULE_AUTHOR("Jens Axboe, Damien Le Moal and Bart Van Assche");
1175 MODULE_LICENSE("GPL");
1176 MODULE_DESCRIPTION("MQ deadline IO scheduler");