]>
Commit | Line | Data |
---|---|---|
3dcf60bc | 1 | // SPDX-License-Identifier: GPL-2.0 |
945ffb60 JA |
2 | /* |
3 | * MQ Deadline i/o scheduler - adaptation of the legacy deadline scheduler, | |
4 | * for the blk-mq scheduling framework | |
5 | * | |
6 | * Copyright (C) 2016 Jens Axboe <axboe@kernel.dk> | |
7 | */ | |
8 | #include <linux/kernel.h> | |
9 | #include <linux/fs.h> | |
10 | #include <linux/blkdev.h> | |
945ffb60 JA |
11 | #include <linux/bio.h> |
12 | #include <linux/module.h> | |
13 | #include <linux/slab.h> | |
14 | #include <linux/init.h> | |
15 | #include <linux/compiler.h> | |
16 | #include <linux/rbtree.h> | |
17 | #include <linux/sbitmap.h> | |
18 | ||
b357e4a6 CK |
19 | #include <trace/events/block.h> |
20 | ||
2e9bc346 | 21 | #include "elevator.h" |
945ffb60 JA |
22 | #include "blk.h" |
23 | #include "blk-mq.h" | |
daaadb3e | 24 | #include "blk-mq-debugfs.h" |
945ffb60 JA |
25 | #include "blk-mq-sched.h" |
26 | ||
27 | /* | |
898bd37a | 28 | * See Documentation/block/deadline-iosched.rst |
945ffb60 JA |
29 | */ |
30 | static const int read_expire = HZ / 2; /* max time before a read is submitted. */ | |
31 | static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */ | |
322cff70 BVA |
32 | /* |
33 | * Time after which to dispatch lower priority requests even if higher | |
34 | * priority requests are pending. | |
35 | */ | |
36 | static const int prio_aging_expire = 10 * HZ; | |
945ffb60 JA |
37 | static const int writes_starved = 2; /* max times reads can starve a write */ |
38 | static const int fifo_batch = 16; /* # of sequential requests treated as one | |
39 | by the above parameters. For throughput. */ | |
40 | ||
004a26b3 BVA |
41 | enum dd_data_dir { |
42 | DD_READ = READ, | |
43 | DD_WRITE = WRITE, | |
44 | }; | |
45 | ||
46 | enum { DD_DIR_COUNT = 2 }; | |
47 | ||
c807ab52 BVA |
48 | enum dd_prio { |
49 | DD_RT_PRIO = 0, | |
50 | DD_BE_PRIO = 1, | |
51 | DD_IDLE_PRIO = 2, | |
52 | DD_PRIO_MAX = 2, | |
53 | }; | |
54 | ||
55 | enum { DD_PRIO_COUNT = 3 }; | |
56 | ||
bce0363e BVA |
57 | /* |
58 | * I/O statistics per I/O priority. It is fine if these counters overflow. | |
59 | * What matters is that these counters are at least as wide as | |
60 | * log2(max_outstanding_requests). | |
61 | */ | |
0f783995 | 62 | struct io_stats_per_prio { |
bce0363e BVA |
63 | uint32_t inserted; |
64 | uint32_t merged; | |
65 | uint32_t dispatched; | |
66 | atomic_t completed; | |
38ba64d1 BVA |
67 | }; |
68 | ||
c807ab52 BVA |
69 | /* |
70 | * Deadline scheduler data per I/O priority (enum dd_prio). Requests are | |
71 | * present on both sort_list[] and fifo_list[]. | |
72 | */ | |
73 | struct dd_per_prio { | |
74 | struct list_head dispatch; | |
75 | struct rb_root sort_list[DD_DIR_COUNT]; | |
76 | struct list_head fifo_list[DD_DIR_COUNT]; | |
83c46ed6 BVA |
77 | /* Position of the most recently dispatched request. */ |
78 | sector_t latest_pos[DD_DIR_COUNT]; | |
bce0363e | 79 | struct io_stats_per_prio stats; |
c807ab52 BVA |
80 | }; |
81 | ||
945ffb60 JA |
82 | struct deadline_data { |
83 | /* | |
84 | * run time data | |
85 | */ | |
86 | ||
c807ab52 | 87 | struct dd_per_prio per_prio[DD_PRIO_COUNT]; |
945ffb60 | 88 | |
d672d325 BVA |
89 | /* Data direction of latest dispatched request. */ |
90 | enum dd_data_dir last_dir; | |
945ffb60 JA |
91 | unsigned int batching; /* number of sequential requests made */ |
92 | unsigned int starved; /* times reads have starved writes */ | |
93 | ||
94 | /* | |
95 | * settings that change how the i/o scheduler behaves | |
96 | */ | |
004a26b3 | 97 | int fifo_expire[DD_DIR_COUNT]; |
945ffb60 JA |
98 | int fifo_batch; |
99 | int writes_starved; | |
100 | int front_merges; | |
07757588 | 101 | u32 async_depth; |
322cff70 | 102 | int prio_aging_expire; |
945ffb60 JA |
103 | |
104 | spinlock_t lock; | |
c807ab52 BVA |
105 | }; |
106 | ||
107 | /* Maps an I/O priority class to a deadline scheduler priority. */ | |
108 | static const enum dd_prio ioprio_class_to_prio[] = { | |
109 | [IOPRIO_CLASS_NONE] = DD_BE_PRIO, | |
110 | [IOPRIO_CLASS_RT] = DD_RT_PRIO, | |
111 | [IOPRIO_CLASS_BE] = DD_BE_PRIO, | |
112 | [IOPRIO_CLASS_IDLE] = DD_IDLE_PRIO, | |
945ffb60 JA |
113 | }; |
114 | ||
115 | static inline struct rb_root * | |
c807ab52 | 116 | deadline_rb_root(struct dd_per_prio *per_prio, struct request *rq) |
945ffb60 | 117 | { |
c807ab52 BVA |
118 | return &per_prio->sort_list[rq_data_dir(rq)]; |
119 | } | |
120 | ||
121 | /* | |
122 | * Returns the I/O priority class (IOPRIO_CLASS_*) that has been assigned to a | |
123 | * request. | |
124 | */ | |
125 | static u8 dd_rq_ioclass(struct request *rq) | |
126 | { | |
127 | return IOPRIO_PRIO_CLASS(req_get_ioprio(rq)); | |
945ffb60 JA |
128 | } |
129 | ||
0effb390 | 130 | /* |
fde02699 | 131 | * Return the first request for which blk_rq_pos() >= @pos. |
0effb390 | 132 | */ |
83c46ed6 BVA |
133 | static inline struct request *deadline_from_pos(struct dd_per_prio *per_prio, |
134 | enum dd_data_dir data_dir, sector_t pos) | |
135 | { | |
136 | struct rb_node *node = per_prio->sort_list[data_dir].rb_node; | |
137 | struct request *rq, *res = NULL; | |
138 | ||
0effb390 BVA |
139 | if (!node) |
140 | return NULL; | |
141 | ||
142 | rq = rb_entry_rq(node); | |
83c46ed6 BVA |
143 | while (node) { |
144 | rq = rb_entry_rq(node); | |
145 | if (blk_rq_pos(rq) >= pos) { | |
146 | res = rq; | |
147 | node = node->rb_left; | |
148 | } else { | |
149 | node = node->rb_right; | |
150 | } | |
151 | } | |
152 | return res; | |
153 | } | |
154 | ||
945ffb60 | 155 | static void |
c807ab52 | 156 | deadline_add_rq_rb(struct dd_per_prio *per_prio, struct request *rq) |
945ffb60 | 157 | { |
c807ab52 | 158 | struct rb_root *root = deadline_rb_root(per_prio, rq); |
945ffb60 JA |
159 | |
160 | elv_rb_add(root, rq); | |
161 | } | |
162 | ||
163 | static inline void | |
c807ab52 | 164 | deadline_del_rq_rb(struct dd_per_prio *per_prio, struct request *rq) |
945ffb60 | 165 | { |
c807ab52 | 166 | elv_rb_del(deadline_rb_root(per_prio, rq), rq); |
945ffb60 JA |
167 | } |
168 | ||
169 | /* | |
170 | * remove rq from rbtree and fifo. | |
171 | */ | |
c807ab52 BVA |
172 | static void deadline_remove_request(struct request_queue *q, |
173 | struct dd_per_prio *per_prio, | |
174 | struct request *rq) | |
945ffb60 | 175 | { |
945ffb60 JA |
176 | list_del_init(&rq->queuelist); |
177 | ||
178 | /* | |
179 | * We might not be on the rbtree, if we are doing an insert merge | |
180 | */ | |
181 | if (!RB_EMPTY_NODE(&rq->rb_node)) | |
c807ab52 | 182 | deadline_del_rq_rb(per_prio, rq); |
945ffb60 JA |
183 | |
184 | elv_rqhash_del(q, rq); | |
185 | if (q->last_merge == rq) | |
186 | q->last_merge = NULL; | |
187 | } | |
188 | ||
189 | static void dd_request_merged(struct request_queue *q, struct request *req, | |
34fe7c05 | 190 | enum elv_merge type) |
945ffb60 JA |
191 | { |
192 | struct deadline_data *dd = q->elevator->elevator_data; | |
c807ab52 BVA |
193 | const u8 ioprio_class = dd_rq_ioclass(req); |
194 | const enum dd_prio prio = ioprio_class_to_prio[ioprio_class]; | |
195 | struct dd_per_prio *per_prio = &dd->per_prio[prio]; | |
945ffb60 JA |
196 | |
197 | /* | |
198 | * if the merge was a front merge, we need to reposition request | |
199 | */ | |
200 | if (type == ELEVATOR_FRONT_MERGE) { | |
c807ab52 BVA |
201 | elv_rb_del(deadline_rb_root(per_prio, req), req); |
202 | deadline_add_rq_rb(per_prio, req); | |
945ffb60 JA |
203 | } |
204 | } | |
205 | ||
46eae2e3 BVA |
206 | /* |
207 | * Callback function that is invoked after @next has been merged into @req. | |
208 | */ | |
945ffb60 JA |
209 | static void dd_merged_requests(struct request_queue *q, struct request *req, |
210 | struct request *next) | |
211 | { | |
38ba64d1 | 212 | struct deadline_data *dd = q->elevator->elevator_data; |
c807ab52 BVA |
213 | const u8 ioprio_class = dd_rq_ioclass(next); |
214 | const enum dd_prio prio = ioprio_class_to_prio[ioprio_class]; | |
215 | ||
bce0363e BVA |
216 | lockdep_assert_held(&dd->lock); |
217 | ||
218 | dd->per_prio[prio].stats.merged++; | |
38ba64d1 | 219 | |
945ffb60 JA |
220 | /* |
221 | * if next expires before rq, assign its expire time to rq | |
222 | * and move into next position (next will be deleted) in fifo | |
223 | */ | |
224 | if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) { | |
225 | if (time_before((unsigned long)next->fifo_time, | |
226 | (unsigned long)req->fifo_time)) { | |
227 | list_move(&req->queuelist, &next->queuelist); | |
228 | req->fifo_time = next->fifo_time; | |
229 | } | |
230 | } | |
231 | ||
232 | /* | |
233 | * kill knowledge of next, this one is a goner | |
234 | */ | |
c807ab52 | 235 | deadline_remove_request(q, &dd->per_prio[prio], next); |
945ffb60 JA |
236 | } |
237 | ||
238 | /* | |
239 | * move an entry to dispatch queue | |
240 | */ | |
241 | static void | |
c807ab52 BVA |
242 | deadline_move_request(struct deadline_data *dd, struct dd_per_prio *per_prio, |
243 | struct request *rq) | |
945ffb60 | 244 | { |
945ffb60 JA |
245 | /* |
246 | * take it off the sort and fifo list | |
247 | */ | |
c807ab52 | 248 | deadline_remove_request(rq->q, per_prio, rq); |
945ffb60 JA |
249 | } |
250 | ||
32f64cad BVA |
251 | /* Number of requests queued for a given priority level. */ |
252 | static u32 dd_queued(struct deadline_data *dd, enum dd_prio prio) | |
253 | { | |
bce0363e BVA |
254 | const struct io_stats_per_prio *stats = &dd->per_prio[prio].stats; |
255 | ||
256 | lockdep_assert_held(&dd->lock); | |
257 | ||
258 | return stats->inserted - atomic_read(&stats->completed); | |
32f64cad BVA |
259 | } |
260 | ||
945ffb60 | 261 | /* |
e0d85cde BVA |
262 | * deadline_check_fifo returns true if and only if there are expired requests |
263 | * in the FIFO list. Requires !list_empty(&dd->fifo_list[data_dir]). | |
945ffb60 | 264 | */ |
e0d85cde BVA |
265 | static inline bool deadline_check_fifo(struct dd_per_prio *per_prio, |
266 | enum dd_data_dir data_dir) | |
945ffb60 | 267 | { |
c807ab52 | 268 | struct request *rq = rq_entry_fifo(per_prio->fifo_list[data_dir].next); |
945ffb60 | 269 | |
e0d85cde | 270 | return time_is_before_eq_jiffies((unsigned long)rq->fifo_time); |
945ffb60 JA |
271 | } |
272 | ||
bf09ce56 DLM |
273 | /* |
274 | * For the specified data direction, return the next request to | |
275 | * dispatch using arrival ordered lists. | |
276 | */ | |
277 | static struct request * | |
c807ab52 BVA |
278 | deadline_fifo_request(struct deadline_data *dd, struct dd_per_prio *per_prio, |
279 | enum dd_data_dir data_dir) | |
bf09ce56 | 280 | { |
c807ab52 | 281 | if (list_empty(&per_prio->fifo_list[data_dir])) |
bf09ce56 DLM |
282 | return NULL; |
283 | ||
fde02699 | 284 | return rq_entry_fifo(per_prio->fifo_list[data_dir].next); |
bf09ce56 DLM |
285 | } |
286 | ||
287 | /* | |
288 | * For the specified data direction, return the next request to | |
289 | * dispatch using sector position sorted lists. | |
290 | */ | |
291 | static struct request * | |
c807ab52 BVA |
292 | deadline_next_request(struct deadline_data *dd, struct dd_per_prio *per_prio, |
293 | enum dd_data_dir data_dir) | |
bf09ce56 | 294 | { |
fde02699 DLM |
295 | return deadline_from_pos(per_prio, data_dir, |
296 | per_prio->latest_pos[data_dir]); | |
bf09ce56 DLM |
297 | } |
298 | ||
322cff70 BVA |
299 | /* |
300 | * Returns true if and only if @rq started after @latest_start where | |
301 | * @latest_start is in jiffies. | |
302 | */ | |
303 | static bool started_after(struct deadline_data *dd, struct request *rq, | |
304 | unsigned long latest_start) | |
305 | { | |
306 | unsigned long start_time = (unsigned long)rq->fifo_time; | |
307 | ||
308 | start_time -= dd->fifo_expire[rq_data_dir(rq)]; | |
309 | ||
310 | return time_after(start_time, latest_start); | |
311 | } | |
312 | ||
945ffb60 JA |
313 | /* |
314 | * deadline_dispatch_requests selects the best request according to | |
322cff70 | 315 | * read/write expire, fifo_batch, etc and with a start time <= @latest_start. |
945ffb60 | 316 | */ |
c807ab52 | 317 | static struct request *__dd_dispatch_request(struct deadline_data *dd, |
322cff70 BVA |
318 | struct dd_per_prio *per_prio, |
319 | unsigned long latest_start) | |
945ffb60 | 320 | { |
bf09ce56 | 321 | struct request *rq, *next_rq; |
004a26b3 | 322 | enum dd_data_dir data_dir; |
38ba64d1 BVA |
323 | enum dd_prio prio; |
324 | u8 ioprio_class; | |
945ffb60 | 325 | |
3bd473f4 BVA |
326 | lockdep_assert_held(&dd->lock); |
327 | ||
c807ab52 BVA |
328 | if (!list_empty(&per_prio->dispatch)) { |
329 | rq = list_first_entry(&per_prio->dispatch, struct request, | |
330 | queuelist); | |
322cff70 BVA |
331 | if (started_after(dd, rq, latest_start)) |
332 | return NULL; | |
945ffb60 | 333 | list_del_init(&rq->queuelist); |
83c46ed6 | 334 | data_dir = rq_data_dir(rq); |
945ffb60 JA |
335 | goto done; |
336 | } | |
337 | ||
945ffb60 JA |
338 | /* |
339 | * batches are currently reads XOR writes | |
340 | */ | |
c807ab52 | 341 | rq = deadline_next_request(dd, per_prio, dd->last_dir); |
83c46ed6 | 342 | if (rq && dd->batching < dd->fifo_batch) { |
45b46b6f | 343 | /* we have a next request and are still entitled to batch */ |
83c46ed6 | 344 | data_dir = rq_data_dir(rq); |
945ffb60 | 345 | goto dispatch_request; |
83c46ed6 | 346 | } |
945ffb60 JA |
347 | |
348 | /* | |
349 | * at this point we are not running a batch. select the appropriate | |
350 | * data direction (read / write) | |
351 | */ | |
352 | ||
c807ab52 BVA |
353 | if (!list_empty(&per_prio->fifo_list[DD_READ])) { |
354 | BUG_ON(RB_EMPTY_ROOT(&per_prio->sort_list[DD_READ])); | |
945ffb60 | 355 | |
c807ab52 | 356 | if (deadline_fifo_request(dd, per_prio, DD_WRITE) && |
5700f691 | 357 | (dd->starved++ >= dd->writes_starved)) |
945ffb60 JA |
358 | goto dispatch_writes; |
359 | ||
004a26b3 | 360 | data_dir = DD_READ; |
945ffb60 JA |
361 | |
362 | goto dispatch_find_request; | |
363 | } | |
364 | ||
365 | /* | |
366 | * there are either no reads or writes have been starved | |
367 | */ | |
368 | ||
c807ab52 | 369 | if (!list_empty(&per_prio->fifo_list[DD_WRITE])) { |
945ffb60 | 370 | dispatch_writes: |
c807ab52 | 371 | BUG_ON(RB_EMPTY_ROOT(&per_prio->sort_list[DD_WRITE])); |
945ffb60 JA |
372 | |
373 | dd->starved = 0; | |
374 | ||
004a26b3 | 375 | data_dir = DD_WRITE; |
945ffb60 JA |
376 | |
377 | goto dispatch_find_request; | |
378 | } | |
379 | ||
380 | return NULL; | |
381 | ||
382 | dispatch_find_request: | |
383 | /* | |
384 | * we are not running a batch, find best request for selected data_dir | |
385 | */ | |
c807ab52 BVA |
386 | next_rq = deadline_next_request(dd, per_prio, data_dir); |
387 | if (deadline_check_fifo(per_prio, data_dir) || !next_rq) { | |
945ffb60 JA |
388 | /* |
389 | * A deadline has expired, the last request was in the other | |
390 | * direction, or we have run out of higher-sectored requests. | |
391 | * Start again from the request with the earliest expiry time. | |
392 | */ | |
c807ab52 | 393 | rq = deadline_fifo_request(dd, per_prio, data_dir); |
945ffb60 JA |
394 | } else { |
395 | /* | |
396 | * The last req was the same dir and we have a next request in | |
397 | * sort order. No expired requests so continue on from here. | |
398 | */ | |
bf09ce56 | 399 | rq = next_rq; |
945ffb60 JA |
400 | } |
401 | ||
5700f691 DLM |
402 | if (!rq) |
403 | return NULL; | |
404 | ||
d672d325 | 405 | dd->last_dir = data_dir; |
945ffb60 JA |
406 | dd->batching = 0; |
407 | ||
408 | dispatch_request: | |
322cff70 BVA |
409 | if (started_after(dd, rq, latest_start)) |
410 | return NULL; | |
411 | ||
945ffb60 JA |
412 | /* |
413 | * rq is the selected appropriate request. | |
414 | */ | |
415 | dd->batching++; | |
c807ab52 | 416 | deadline_move_request(dd, per_prio, rq); |
945ffb60 | 417 | done: |
38ba64d1 BVA |
418 | ioprio_class = dd_rq_ioclass(rq); |
419 | prio = ioprio_class_to_prio[ioprio_class]; | |
83c46ed6 | 420 | dd->per_prio[prio].latest_pos[data_dir] = blk_rq_pos(rq); |
bce0363e | 421 | dd->per_prio[prio].stats.dispatched++; |
945ffb60 JA |
422 | rq->rq_flags |= RQF_STARTED; |
423 | return rq; | |
424 | } | |
425 | ||
322cff70 BVA |
426 | /* |
427 | * Check whether there are any requests with priority other than DD_RT_PRIO | |
428 | * that were inserted more than prio_aging_expire jiffies ago. | |
429 | */ | |
430 | static struct request *dd_dispatch_prio_aged_requests(struct deadline_data *dd, | |
431 | unsigned long now) | |
432 | { | |
433 | struct request *rq; | |
434 | enum dd_prio prio; | |
435 | int prio_cnt; | |
436 | ||
437 | lockdep_assert_held(&dd->lock); | |
438 | ||
439 | prio_cnt = !!dd_queued(dd, DD_RT_PRIO) + !!dd_queued(dd, DD_BE_PRIO) + | |
440 | !!dd_queued(dd, DD_IDLE_PRIO); | |
441 | if (prio_cnt < 2) | |
442 | return NULL; | |
443 | ||
444 | for (prio = DD_BE_PRIO; prio <= DD_PRIO_MAX; prio++) { | |
445 | rq = __dd_dispatch_request(dd, &dd->per_prio[prio], | |
446 | now - dd->prio_aging_expire); | |
447 | if (rq) | |
448 | return rq; | |
449 | } | |
450 | ||
451 | return NULL; | |
452 | } | |
453 | ||
ca11f209 | 454 | /* |
46eae2e3 BVA |
455 | * Called from blk_mq_run_hw_queue() -> __blk_mq_sched_dispatch_requests(). |
456 | * | |
ca11f209 | 457 | * One confusing aspect here is that we get called for a specific |
7211aef8 | 458 | * hardware queue, but we may return a request that is for a |
ca11f209 JA |
459 | * different hardware queue. This is because mq-deadline has shared |
460 | * state for all hardware queues, in terms of sorting, FIFOs, etc. | |
461 | */ | |
c13660a0 | 462 | static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx) |
945ffb60 JA |
463 | { |
464 | struct deadline_data *dd = hctx->queue->elevator->elevator_data; | |
322cff70 | 465 | const unsigned long now = jiffies; |
7b05bf77 | 466 | struct request *rq; |
c807ab52 | 467 | enum dd_prio prio; |
945ffb60 JA |
468 | |
469 | spin_lock(&dd->lock); | |
322cff70 BVA |
470 | rq = dd_dispatch_prio_aged_requests(dd, now); |
471 | if (rq) | |
472 | goto unlock; | |
473 | ||
474 | /* | |
475 | * Next, dispatch requests in priority order. Ignore lower priority | |
476 | * requests if any higher priority requests are pending. | |
477 | */ | |
fb926032 | 478 | for (prio = 0; prio <= DD_PRIO_MAX; prio++) { |
322cff70 BVA |
479 | rq = __dd_dispatch_request(dd, &dd->per_prio[prio], now); |
480 | if (rq || dd_queued(dd, prio)) | |
c807ab52 BVA |
481 | break; |
482 | } | |
322cff70 BVA |
483 | |
484 | unlock: | |
945ffb60 | 485 | spin_unlock(&dd->lock); |
c13660a0 JA |
486 | |
487 | return rq; | |
945ffb60 JA |
488 | } |
489 | ||
39823b47 BVA |
490 | /* |
491 | * 'depth' is a number in the range 1..INT_MAX representing a number of | |
492 | * requests. Scale it with a factor (1 << bt->sb.shift) / q->nr_requests since | |
493 | * 1..(1 << bt->sb.shift) is the range expected by sbitmap_get_shallow(). | |
494 | * Values larger than q->nr_requests have the same effect as q->nr_requests. | |
495 | */ | |
496 | static int dd_to_word_depth(struct blk_mq_hw_ctx *hctx, unsigned int qdepth) | |
497 | { | |
498 | struct sbitmap_queue *bt = &hctx->sched_tags->bitmap_tags; | |
499 | const unsigned int nrr = hctx->queue->nr_requests; | |
500 | ||
501 | return ((qdepth << bt->sb.shift) + nrr - 1) / nrr; | |
502 | } | |
503 | ||
07757588 BVA |
504 | /* |
505 | * Called by __blk_mq_alloc_request(). The shallow_depth value set by this | |
506 | * function is used by __blk_mq_get_tag(). | |
507 | */ | |
f8359efe | 508 | static void dd_limit_depth(blk_opf_t opf, struct blk_mq_alloc_data *data) |
07757588 BVA |
509 | { |
510 | struct deadline_data *dd = data->q->elevator->elevator_data; | |
511 | ||
512 | /* Do not throttle synchronous reads. */ | |
f8359efe | 513 | if (op_is_sync(opf) && !op_is_write(opf)) |
07757588 BVA |
514 | return; |
515 | ||
516 | /* | |
517 | * Throttle asynchronous requests and writes such that these requests | |
518 | * do not block the allocation of synchronous requests. | |
519 | */ | |
39823b47 | 520 | data->shallow_depth = dd_to_word_depth(data->hctx, dd->async_depth); |
07757588 BVA |
521 | } |
522 | ||
523 | /* Called by blk_mq_update_nr_requests(). */ | |
524 | static void dd_depth_updated(struct blk_mq_hw_ctx *hctx) | |
525 | { | |
526 | struct request_queue *q = hctx->queue; | |
527 | struct deadline_data *dd = q->elevator->elevator_data; | |
528 | struct blk_mq_tags *tags = hctx->sched_tags; | |
529 | ||
39823b47 | 530 | dd->async_depth = q->nr_requests; |
07757588 | 531 | |
39823b47 | 532 | sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, 1); |
07757588 BVA |
533 | } |
534 | ||
535 | /* Called by blk_mq_init_hctx() and blk_mq_init_sched(). */ | |
536 | static int dd_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) | |
537 | { | |
538 | dd_depth_updated(hctx); | |
539 | return 0; | |
540 | } | |
541 | ||
3e9a99eb | 542 | static void dd_exit_sched(struct elevator_queue *e) |
945ffb60 JA |
543 | { |
544 | struct deadline_data *dd = e->elevator_data; | |
c807ab52 | 545 | enum dd_prio prio; |
945ffb60 | 546 | |
c807ab52 BVA |
547 | for (prio = 0; prio <= DD_PRIO_MAX; prio++) { |
548 | struct dd_per_prio *per_prio = &dd->per_prio[prio]; | |
bce0363e BVA |
549 | const struct io_stats_per_prio *stats = &per_prio->stats; |
550 | uint32_t queued; | |
c807ab52 BVA |
551 | |
552 | WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_READ])); | |
553 | WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_WRITE])); | |
bce0363e BVA |
554 | |
555 | spin_lock(&dd->lock); | |
556 | queued = dd_queued(dd, prio); | |
557 | spin_unlock(&dd->lock); | |
558 | ||
559 | WARN_ONCE(queued != 0, | |
32f64cad | 560 | "statistics for priority %d: i %u m %u d %u c %u\n", |
bce0363e BVA |
561 | prio, stats->inserted, stats->merged, |
562 | stats->dispatched, atomic_read(&stats->completed)); | |
c807ab52 | 563 | } |
945ffb60 JA |
564 | |
565 | kfree(dd); | |
566 | } | |
567 | ||
568 | /* | |
0f783995 | 569 | * initialize elevator private data (deadline_data). |
945ffb60 | 570 | */ |
3e9a99eb | 571 | static int dd_init_sched(struct request_queue *q, struct elevator_type *e) |
945ffb60 JA |
572 | { |
573 | struct deadline_data *dd; | |
574 | struct elevator_queue *eq; | |
c807ab52 BVA |
575 | enum dd_prio prio; |
576 | int ret = -ENOMEM; | |
945ffb60 JA |
577 | |
578 | eq = elevator_alloc(q, e); | |
579 | if (!eq) | |
c807ab52 | 580 | return ret; |
945ffb60 JA |
581 | |
582 | dd = kzalloc_node(sizeof(*dd), GFP_KERNEL, q->node); | |
c807ab52 BVA |
583 | if (!dd) |
584 | goto put_eq; | |
585 | ||
945ffb60 JA |
586 | eq->elevator_data = dd; |
587 | ||
c807ab52 BVA |
588 | for (prio = 0; prio <= DD_PRIO_MAX; prio++) { |
589 | struct dd_per_prio *per_prio = &dd->per_prio[prio]; | |
590 | ||
591 | INIT_LIST_HEAD(&per_prio->dispatch); | |
592 | INIT_LIST_HEAD(&per_prio->fifo_list[DD_READ]); | |
593 | INIT_LIST_HEAD(&per_prio->fifo_list[DD_WRITE]); | |
594 | per_prio->sort_list[DD_READ] = RB_ROOT; | |
595 | per_prio->sort_list[DD_WRITE] = RB_ROOT; | |
596 | } | |
004a26b3 BVA |
597 | dd->fifo_expire[DD_READ] = read_expire; |
598 | dd->fifo_expire[DD_WRITE] = write_expire; | |
945ffb60 JA |
599 | dd->writes_starved = writes_starved; |
600 | dd->front_merges = 1; | |
d672d325 | 601 | dd->last_dir = DD_WRITE; |
945ffb60 | 602 | dd->fifo_batch = fifo_batch; |
322cff70 | 603 | dd->prio_aging_expire = prio_aging_expire; |
945ffb60 | 604 | spin_lock_init(&dd->lock); |
945ffb60 | 605 | |
4d337ceb ML |
606 | /* We dispatch from request queue wide instead of hw queue */ |
607 | blk_queue_flag_set(QUEUE_FLAG_SQ_SCHED, q); | |
608 | ||
945ffb60 JA |
609 | q->elevator = eq; |
610 | return 0; | |
c807ab52 BVA |
611 | |
612 | put_eq: | |
613 | kobject_put(&eq->kobj); | |
614 | return ret; | |
945ffb60 JA |
615 | } |
616 | ||
46eae2e3 BVA |
617 | /* |
618 | * Try to merge @bio into an existing request. If @bio has been merged into | |
619 | * an existing request, store the pointer to that request into *@rq. | |
620 | */ | |
945ffb60 JA |
621 | static int dd_request_merge(struct request_queue *q, struct request **rq, |
622 | struct bio *bio) | |
623 | { | |
624 | struct deadline_data *dd = q->elevator->elevator_data; | |
c807ab52 BVA |
625 | const u8 ioprio_class = IOPRIO_PRIO_CLASS(bio->bi_ioprio); |
626 | const enum dd_prio prio = ioprio_class_to_prio[ioprio_class]; | |
627 | struct dd_per_prio *per_prio = &dd->per_prio[prio]; | |
945ffb60 JA |
628 | sector_t sector = bio_end_sector(bio); |
629 | struct request *__rq; | |
630 | ||
631 | if (!dd->front_merges) | |
632 | return ELEVATOR_NO_MERGE; | |
633 | ||
c807ab52 | 634 | __rq = elv_rb_find(&per_prio->sort_list[bio_data_dir(bio)], sector); |
945ffb60 JA |
635 | if (__rq) { |
636 | BUG_ON(sector != blk_rq_pos(__rq)); | |
637 | ||
638 | if (elv_bio_merge_ok(__rq, bio)) { | |
639 | *rq = __rq; | |
866663b7 ML |
640 | if (blk_discard_mergable(__rq)) |
641 | return ELEVATOR_DISCARD_MERGE; | |
945ffb60 JA |
642 | return ELEVATOR_FRONT_MERGE; |
643 | } | |
644 | } | |
645 | ||
646 | return ELEVATOR_NO_MERGE; | |
647 | } | |
648 | ||
46eae2e3 BVA |
649 | /* |
650 | * Attempt to merge a bio into an existing request. This function is called | |
651 | * before @bio is associated with a request. | |
652 | */ | |
efed9a33 | 653 | static bool dd_bio_merge(struct request_queue *q, struct bio *bio, |
14ccb66b | 654 | unsigned int nr_segs) |
945ffb60 | 655 | { |
945ffb60 | 656 | struct deadline_data *dd = q->elevator->elevator_data; |
e4d750c9 JA |
657 | struct request *free = NULL; |
658 | bool ret; | |
945ffb60 JA |
659 | |
660 | spin_lock(&dd->lock); | |
14ccb66b | 661 | ret = blk_mq_sched_try_merge(q, bio, nr_segs, &free); |
945ffb60 JA |
662 | spin_unlock(&dd->lock); |
663 | ||
e4d750c9 JA |
664 | if (free) |
665 | blk_mq_free_request(free); | |
666 | ||
945ffb60 JA |
667 | return ret; |
668 | } | |
669 | ||
670 | /* | |
671 | * add rq to rbtree and fifo | |
672 | */ | |
673 | static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, | |
b2097bd2 | 674 | blk_insert_t flags, struct list_head *free) |
945ffb60 JA |
675 | { |
676 | struct request_queue *q = hctx->queue; | |
677 | struct deadline_data *dd = q->elevator->elevator_data; | |
004a26b3 | 678 | const enum dd_data_dir data_dir = rq_data_dir(rq); |
c807ab52 BVA |
679 | u16 ioprio = req_get_ioprio(rq); |
680 | u8 ioprio_class = IOPRIO_PRIO_CLASS(ioprio); | |
681 | struct dd_per_prio *per_prio; | |
682 | enum dd_prio prio; | |
945ffb60 | 683 | |
3bd473f4 BVA |
684 | lockdep_assert_held(&dd->lock); |
685 | ||
c807ab52 | 686 | prio = ioprio_class_to_prio[ioprio_class]; |
bce0363e | 687 | per_prio = &dd->per_prio[prio]; |
e2c7275d | 688 | if (!rq->elv.priv[0]) { |
bce0363e | 689 | per_prio->stats.inserted++; |
e2c7275d BVA |
690 | rq->elv.priv[0] = (void *)(uintptr_t)1; |
691 | } | |
c807ab52 | 692 | |
b2097bd2 | 693 | if (blk_mq_sched_try_insert_merge(q, rq, free)) |
945ffb60 JA |
694 | return; |
695 | ||
b357e4a6 | 696 | trace_block_rq_insert(rq); |
945ffb60 | 697 | |
93fffe16 | 698 | if (flags & BLK_MQ_INSERT_AT_HEAD) { |
c807ab52 | 699 | list_add(&rq->queuelist, &per_prio->dispatch); |
725f22a1 | 700 | rq->fifo_time = jiffies; |
945ffb60 | 701 | } else { |
0effb390 BVA |
702 | struct list_head *insert_before; |
703 | ||
c807ab52 | 704 | deadline_add_rq_rb(per_prio, rq); |
945ffb60 JA |
705 | |
706 | if (rq_mergeable(rq)) { | |
707 | elv_rqhash_add(q, rq); | |
708 | if (!q->last_merge) | |
709 | q->last_merge = rq; | |
710 | } | |
711 | ||
712 | /* | |
713 | * set expire time and add to fifo list | |
714 | */ | |
715 | rq->fifo_time = jiffies + dd->fifo_expire[data_dir]; | |
0effb390 | 716 | insert_before = &per_prio->fifo_list[data_dir]; |
0effb390 | 717 | list_add_tail(&rq->queuelist, insert_before); |
945ffb60 JA |
718 | } |
719 | } | |
720 | ||
46eae2e3 | 721 | /* |
2bd215df | 722 | * Called from blk_mq_insert_request() or blk_mq_dispatch_plug_list(). |
46eae2e3 | 723 | */ |
945ffb60 | 724 | static void dd_insert_requests(struct blk_mq_hw_ctx *hctx, |
93fffe16 CH |
725 | struct list_head *list, |
726 | blk_insert_t flags) | |
945ffb60 JA |
727 | { |
728 | struct request_queue *q = hctx->queue; | |
729 | struct deadline_data *dd = q->elevator->elevator_data; | |
b2097bd2 | 730 | LIST_HEAD(free); |
945ffb60 JA |
731 | |
732 | spin_lock(&dd->lock); | |
733 | while (!list_empty(list)) { | |
734 | struct request *rq; | |
735 | ||
736 | rq = list_first_entry(list, struct request, queuelist); | |
737 | list_del_init(&rq->queuelist); | |
b2097bd2 | 738 | dd_insert_request(hctx, rq, flags, &free); |
945ffb60 JA |
739 | } |
740 | spin_unlock(&dd->lock); | |
b2097bd2 BVA |
741 | |
742 | blk_mq_free_requests(&free); | |
945ffb60 JA |
743 | } |
744 | ||
b6d2b054 | 745 | /* Callback from inside blk_mq_rq_ctx_init(). */ |
5d9c305b | 746 | static void dd_prepare_request(struct request *rq) |
f3bc78d2 | 747 | { |
b6d2b054 | 748 | rq->elv.priv[0] = NULL; |
f3bc78d2 DLM |
749 | } |
750 | ||
5700f691 | 751 | /* |
46eae2e3 | 752 | * Callback from inside blk_mq_free_request(). |
5700f691 | 753 | */ |
f3bc78d2 | 754 | static void dd_finish_request(struct request *rq) |
5700f691 DLM |
755 | { |
756 | struct request_queue *q = rq->q; | |
c807ab52 BVA |
757 | struct deadline_data *dd = q->elevator->elevator_data; |
758 | const u8 ioprio_class = dd_rq_ioclass(rq); | |
759 | const enum dd_prio prio = ioprio_class_to_prio[ioprio_class]; | |
760 | struct dd_per_prio *per_prio = &dd->per_prio[prio]; | |
5700f691 | 761 | |
b6d2b054 BVA |
762 | /* |
763 | * The block layer core may call dd_finish_request() without having | |
e2c7275d BVA |
764 | * called dd_insert_requests(). Skip requests that bypassed I/O |
765 | * scheduling. See also blk_mq_request_bypass_insert(). | |
b6d2b054 | 766 | */ |
fde02699 DLM |
767 | if (rq->elv.priv[0]) |
768 | atomic_inc(&per_prio->stats.completed); | |
5700f691 DLM |
769 | } |
770 | ||
c807ab52 BVA |
771 | static bool dd_has_work_for_prio(struct dd_per_prio *per_prio) |
772 | { | |
773 | return !list_empty_careful(&per_prio->dispatch) || | |
774 | !list_empty_careful(&per_prio->fifo_list[DD_READ]) || | |
775 | !list_empty_careful(&per_prio->fifo_list[DD_WRITE]); | |
776 | } | |
777 | ||
945ffb60 JA |
778 | static bool dd_has_work(struct blk_mq_hw_ctx *hctx) |
779 | { | |
780 | struct deadline_data *dd = hctx->queue->elevator->elevator_data; | |
c807ab52 BVA |
781 | enum dd_prio prio; |
782 | ||
783 | for (prio = 0; prio <= DD_PRIO_MAX; prio++) | |
784 | if (dd_has_work_for_prio(&dd->per_prio[prio])) | |
785 | return true; | |
945ffb60 | 786 | |
c807ab52 | 787 | return false; |
945ffb60 JA |
788 | } |
789 | ||
790 | /* | |
791 | * sysfs parts below | |
792 | */ | |
d6d7f013 | 793 | #define SHOW_INT(__FUNC, __VAR) \ |
945ffb60 JA |
794 | static ssize_t __FUNC(struct elevator_queue *e, char *page) \ |
795 | { \ | |
796 | struct deadline_data *dd = e->elevator_data; \ | |
d6d7f013 BVA |
797 | \ |
798 | return sysfs_emit(page, "%d\n", __VAR); \ | |
945ffb60 | 799 | } |
d6d7f013 BVA |
800 | #define SHOW_JIFFIES(__FUNC, __VAR) SHOW_INT(__FUNC, jiffies_to_msecs(__VAR)) |
801 | SHOW_JIFFIES(deadline_read_expire_show, dd->fifo_expire[DD_READ]); | |
802 | SHOW_JIFFIES(deadline_write_expire_show, dd->fifo_expire[DD_WRITE]); | |
322cff70 | 803 | SHOW_JIFFIES(deadline_prio_aging_expire_show, dd->prio_aging_expire); |
d6d7f013 BVA |
804 | SHOW_INT(deadline_writes_starved_show, dd->writes_starved); |
805 | SHOW_INT(deadline_front_merges_show, dd->front_merges); | |
46cdc45a | 806 | SHOW_INT(deadline_async_depth_show, dd->async_depth); |
d6d7f013 BVA |
807 | SHOW_INT(deadline_fifo_batch_show, dd->fifo_batch); |
808 | #undef SHOW_INT | |
809 | #undef SHOW_JIFFIES | |
945ffb60 JA |
810 | |
811 | #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ | |
812 | static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \ | |
813 | { \ | |
814 | struct deadline_data *dd = e->elevator_data; \ | |
d6d7f013 BVA |
815 | int __data, __ret; \ |
816 | \ | |
817 | __ret = kstrtoint(page, 0, &__data); \ | |
818 | if (__ret < 0) \ | |
819 | return __ret; \ | |
945ffb60 JA |
820 | if (__data < (MIN)) \ |
821 | __data = (MIN); \ | |
822 | else if (__data > (MAX)) \ | |
823 | __data = (MAX); \ | |
d6d7f013 | 824 | *(__PTR) = __CONV(__data); \ |
235f8da1 | 825 | return count; \ |
945ffb60 | 826 | } |
d6d7f013 BVA |
827 | #define STORE_INT(__FUNC, __PTR, MIN, MAX) \ |
828 | STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, ) | |
829 | #define STORE_JIFFIES(__FUNC, __PTR, MIN, MAX) \ | |
830 | STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, msecs_to_jiffies) | |
831 | STORE_JIFFIES(deadline_read_expire_store, &dd->fifo_expire[DD_READ], 0, INT_MAX); | |
832 | STORE_JIFFIES(deadline_write_expire_store, &dd->fifo_expire[DD_WRITE], 0, INT_MAX); | |
322cff70 | 833 | STORE_JIFFIES(deadline_prio_aging_expire_store, &dd->prio_aging_expire, 0, INT_MAX); |
d6d7f013 BVA |
834 | STORE_INT(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX); |
835 | STORE_INT(deadline_front_merges_store, &dd->front_merges, 0, 1); | |
46cdc45a | 836 | STORE_INT(deadline_async_depth_store, &dd->async_depth, 1, INT_MAX); |
d6d7f013 | 837 | STORE_INT(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX); |
945ffb60 | 838 | #undef STORE_FUNCTION |
d6d7f013 BVA |
839 | #undef STORE_INT |
840 | #undef STORE_JIFFIES | |
945ffb60 JA |
841 | |
842 | #define DD_ATTR(name) \ | |
5657a819 | 843 | __ATTR(name, 0644, deadline_##name##_show, deadline_##name##_store) |
945ffb60 JA |
844 | |
845 | static struct elv_fs_entry deadline_attrs[] = { | |
846 | DD_ATTR(read_expire), | |
847 | DD_ATTR(write_expire), | |
848 | DD_ATTR(writes_starved), | |
849 | DD_ATTR(front_merges), | |
07757588 | 850 | DD_ATTR(async_depth), |
945ffb60 | 851 | DD_ATTR(fifo_batch), |
322cff70 | 852 | DD_ATTR(prio_aging_expire), |
945ffb60 JA |
853 | __ATTR_NULL |
854 | }; | |
855 | ||
daaadb3e | 856 | #ifdef CONFIG_BLK_DEBUG_FS |
c807ab52 | 857 | #define DEADLINE_DEBUGFS_DDIR_ATTRS(prio, data_dir, name) \ |
daaadb3e OS |
858 | static void *deadline_##name##_fifo_start(struct seq_file *m, \ |
859 | loff_t *pos) \ | |
860 | __acquires(&dd->lock) \ | |
861 | { \ | |
862 | struct request_queue *q = m->private; \ | |
863 | struct deadline_data *dd = q->elevator->elevator_data; \ | |
c807ab52 | 864 | struct dd_per_prio *per_prio = &dd->per_prio[prio]; \ |
daaadb3e OS |
865 | \ |
866 | spin_lock(&dd->lock); \ | |
c807ab52 | 867 | return seq_list_start(&per_prio->fifo_list[data_dir], *pos); \ |
daaadb3e OS |
868 | } \ |
869 | \ | |
870 | static void *deadline_##name##_fifo_next(struct seq_file *m, void *v, \ | |
871 | loff_t *pos) \ | |
872 | { \ | |
873 | struct request_queue *q = m->private; \ | |
874 | struct deadline_data *dd = q->elevator->elevator_data; \ | |
c807ab52 | 875 | struct dd_per_prio *per_prio = &dd->per_prio[prio]; \ |
daaadb3e | 876 | \ |
c807ab52 | 877 | return seq_list_next(v, &per_prio->fifo_list[data_dir], pos); \ |
daaadb3e OS |
878 | } \ |
879 | \ | |
880 | static void deadline_##name##_fifo_stop(struct seq_file *m, void *v) \ | |
881 | __releases(&dd->lock) \ | |
882 | { \ | |
883 | struct request_queue *q = m->private; \ | |
884 | struct deadline_data *dd = q->elevator->elevator_data; \ | |
885 | \ | |
886 | spin_unlock(&dd->lock); \ | |
887 | } \ | |
888 | \ | |
889 | static const struct seq_operations deadline_##name##_fifo_seq_ops = { \ | |
890 | .start = deadline_##name##_fifo_start, \ | |
891 | .next = deadline_##name##_fifo_next, \ | |
892 | .stop = deadline_##name##_fifo_stop, \ | |
893 | .show = blk_mq_debugfs_rq_show, \ | |
894 | }; \ | |
895 | \ | |
896 | static int deadline_##name##_next_rq_show(void *data, \ | |
897 | struct seq_file *m) \ | |
898 | { \ | |
899 | struct request_queue *q = data; \ | |
900 | struct deadline_data *dd = q->elevator->elevator_data; \ | |
c807ab52 | 901 | struct dd_per_prio *per_prio = &dd->per_prio[prio]; \ |
83c46ed6 | 902 | struct request *rq; \ |
daaadb3e | 903 | \ |
83c46ed6 BVA |
904 | rq = deadline_from_pos(per_prio, data_dir, \ |
905 | per_prio->latest_pos[data_dir]); \ | |
daaadb3e OS |
906 | if (rq) \ |
907 | __blk_mq_debugfs_rq_show(m, rq); \ | |
908 | return 0; \ | |
909 | } | |
c807ab52 BVA |
910 | |
911 | DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_READ, read0); | |
912 | DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_WRITE, write0); | |
913 | DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_READ, read1); | |
914 | DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_WRITE, write1); | |
915 | DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_READ, read2); | |
916 | DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_WRITE, write2); | |
daaadb3e OS |
917 | #undef DEADLINE_DEBUGFS_DDIR_ATTRS |
918 | ||
919 | static int deadline_batching_show(void *data, struct seq_file *m) | |
920 | { | |
921 | struct request_queue *q = data; | |
922 | struct deadline_data *dd = q->elevator->elevator_data; | |
923 | ||
924 | seq_printf(m, "%u\n", dd->batching); | |
925 | return 0; | |
926 | } | |
927 | ||
928 | static int deadline_starved_show(void *data, struct seq_file *m) | |
929 | { | |
930 | struct request_queue *q = data; | |
931 | struct deadline_data *dd = q->elevator->elevator_data; | |
932 | ||
933 | seq_printf(m, "%u\n", dd->starved); | |
934 | return 0; | |
935 | } | |
936 | ||
07757588 BVA |
937 | static int dd_async_depth_show(void *data, struct seq_file *m) |
938 | { | |
939 | struct request_queue *q = data; | |
940 | struct deadline_data *dd = q->elevator->elevator_data; | |
941 | ||
942 | seq_printf(m, "%u\n", dd->async_depth); | |
943 | return 0; | |
944 | } | |
945 | ||
38ba64d1 BVA |
946 | static int dd_queued_show(void *data, struct seq_file *m) |
947 | { | |
948 | struct request_queue *q = data; | |
949 | struct deadline_data *dd = q->elevator->elevator_data; | |
bce0363e BVA |
950 | u32 rt, be, idle; |
951 | ||
952 | spin_lock(&dd->lock); | |
953 | rt = dd_queued(dd, DD_RT_PRIO); | |
954 | be = dd_queued(dd, DD_BE_PRIO); | |
955 | idle = dd_queued(dd, DD_IDLE_PRIO); | |
956 | spin_unlock(&dd->lock); | |
957 | ||
958 | seq_printf(m, "%u %u %u\n", rt, be, idle); | |
38ba64d1 | 959 | |
38ba64d1 BVA |
960 | return 0; |
961 | } | |
962 | ||
963 | /* Number of requests owned by the block driver for a given priority. */ | |
964 | static u32 dd_owned_by_driver(struct deadline_data *dd, enum dd_prio prio) | |
965 | { | |
bce0363e BVA |
966 | const struct io_stats_per_prio *stats = &dd->per_prio[prio].stats; |
967 | ||
968 | lockdep_assert_held(&dd->lock); | |
969 | ||
970 | return stats->dispatched + stats->merged - | |
971 | atomic_read(&stats->completed); | |
38ba64d1 BVA |
972 | } |
973 | ||
974 | static int dd_owned_by_driver_show(void *data, struct seq_file *m) | |
975 | { | |
976 | struct request_queue *q = data; | |
977 | struct deadline_data *dd = q->elevator->elevator_data; | |
bce0363e BVA |
978 | u32 rt, be, idle; |
979 | ||
980 | spin_lock(&dd->lock); | |
981 | rt = dd_owned_by_driver(dd, DD_RT_PRIO); | |
982 | be = dd_owned_by_driver(dd, DD_BE_PRIO); | |
983 | idle = dd_owned_by_driver(dd, DD_IDLE_PRIO); | |
984 | spin_unlock(&dd->lock); | |
985 | ||
986 | seq_printf(m, "%u %u %u\n", rt, be, idle); | |
38ba64d1 | 987 | |
38ba64d1 BVA |
988 | return 0; |
989 | } | |
990 | ||
c807ab52 BVA |
991 | #define DEADLINE_DISPATCH_ATTR(prio) \ |
992 | static void *deadline_dispatch##prio##_start(struct seq_file *m, \ | |
993 | loff_t *pos) \ | |
994 | __acquires(&dd->lock) \ | |
995 | { \ | |
996 | struct request_queue *q = m->private; \ | |
997 | struct deadline_data *dd = q->elevator->elevator_data; \ | |
998 | struct dd_per_prio *per_prio = &dd->per_prio[prio]; \ | |
999 | \ | |
1000 | spin_lock(&dd->lock); \ | |
1001 | return seq_list_start(&per_prio->dispatch, *pos); \ | |
1002 | } \ | |
1003 | \ | |
1004 | static void *deadline_dispatch##prio##_next(struct seq_file *m, \ | |
1005 | void *v, loff_t *pos) \ | |
1006 | { \ | |
1007 | struct request_queue *q = m->private; \ | |
1008 | struct deadline_data *dd = q->elevator->elevator_data; \ | |
1009 | struct dd_per_prio *per_prio = &dd->per_prio[prio]; \ | |
1010 | \ | |
1011 | return seq_list_next(v, &per_prio->dispatch, pos); \ | |
1012 | } \ | |
1013 | \ | |
1014 | static void deadline_dispatch##prio##_stop(struct seq_file *m, void *v) \ | |
1015 | __releases(&dd->lock) \ | |
1016 | { \ | |
1017 | struct request_queue *q = m->private; \ | |
1018 | struct deadline_data *dd = q->elevator->elevator_data; \ | |
1019 | \ | |
1020 | spin_unlock(&dd->lock); \ | |
1021 | } \ | |
1022 | \ | |
1023 | static const struct seq_operations deadline_dispatch##prio##_seq_ops = { \ | |
1024 | .start = deadline_dispatch##prio##_start, \ | |
1025 | .next = deadline_dispatch##prio##_next, \ | |
1026 | .stop = deadline_dispatch##prio##_stop, \ | |
1027 | .show = blk_mq_debugfs_rq_show, \ | |
daaadb3e OS |
1028 | } |
1029 | ||
c807ab52 BVA |
1030 | DEADLINE_DISPATCH_ATTR(0); |
1031 | DEADLINE_DISPATCH_ATTR(1); | |
1032 | DEADLINE_DISPATCH_ATTR(2); | |
1033 | #undef DEADLINE_DISPATCH_ATTR | |
daaadb3e | 1034 | |
c807ab52 BVA |
1035 | #define DEADLINE_QUEUE_DDIR_ATTRS(name) \ |
1036 | {#name "_fifo_list", 0400, \ | |
1037 | .seq_ops = &deadline_##name##_fifo_seq_ops} | |
1038 | #define DEADLINE_NEXT_RQ_ATTR(name) \ | |
daaadb3e OS |
1039 | {#name "_next_rq", 0400, deadline_##name##_next_rq_show} |
1040 | static const struct blk_mq_debugfs_attr deadline_queue_debugfs_attrs[] = { | |
c807ab52 BVA |
1041 | DEADLINE_QUEUE_DDIR_ATTRS(read0), |
1042 | DEADLINE_QUEUE_DDIR_ATTRS(write0), | |
1043 | DEADLINE_QUEUE_DDIR_ATTRS(read1), | |
1044 | DEADLINE_QUEUE_DDIR_ATTRS(write1), | |
1045 | DEADLINE_QUEUE_DDIR_ATTRS(read2), | |
1046 | DEADLINE_QUEUE_DDIR_ATTRS(write2), | |
1047 | DEADLINE_NEXT_RQ_ATTR(read0), | |
1048 | DEADLINE_NEXT_RQ_ATTR(write0), | |
1049 | DEADLINE_NEXT_RQ_ATTR(read1), | |
1050 | DEADLINE_NEXT_RQ_ATTR(write1), | |
1051 | DEADLINE_NEXT_RQ_ATTR(read2), | |
1052 | DEADLINE_NEXT_RQ_ATTR(write2), | |
daaadb3e OS |
1053 | {"batching", 0400, deadline_batching_show}, |
1054 | {"starved", 0400, deadline_starved_show}, | |
07757588 | 1055 | {"async_depth", 0400, dd_async_depth_show}, |
c807ab52 BVA |
1056 | {"dispatch0", 0400, .seq_ops = &deadline_dispatch0_seq_ops}, |
1057 | {"dispatch1", 0400, .seq_ops = &deadline_dispatch1_seq_ops}, | |
1058 | {"dispatch2", 0400, .seq_ops = &deadline_dispatch2_seq_ops}, | |
38ba64d1 BVA |
1059 | {"owned_by_driver", 0400, dd_owned_by_driver_show}, |
1060 | {"queued", 0400, dd_queued_show}, | |
daaadb3e OS |
1061 | {}, |
1062 | }; | |
1063 | #undef DEADLINE_QUEUE_DDIR_ATTRS | |
1064 | #endif | |
1065 | ||
945ffb60 | 1066 | static struct elevator_type mq_deadline = { |
f9cd4bfe | 1067 | .ops = { |
07757588 BVA |
1068 | .depth_updated = dd_depth_updated, |
1069 | .limit_depth = dd_limit_depth, | |
945ffb60 | 1070 | .insert_requests = dd_insert_requests, |
c13660a0 | 1071 | .dispatch_request = dd_dispatch_request, |
f3bc78d2 DLM |
1072 | .prepare_request = dd_prepare_request, |
1073 | .finish_request = dd_finish_request, | |
945ffb60 JA |
1074 | .next_request = elv_rb_latter_request, |
1075 | .former_request = elv_rb_former_request, | |
1076 | .bio_merge = dd_bio_merge, | |
1077 | .request_merge = dd_request_merge, | |
1078 | .requests_merged = dd_merged_requests, | |
1079 | .request_merged = dd_request_merged, | |
1080 | .has_work = dd_has_work, | |
3e9a99eb BVA |
1081 | .init_sched = dd_init_sched, |
1082 | .exit_sched = dd_exit_sched, | |
07757588 | 1083 | .init_hctx = dd_init_hctx, |
945ffb60 JA |
1084 | }, |
1085 | ||
daaadb3e OS |
1086 | #ifdef CONFIG_BLK_DEBUG_FS |
1087 | .queue_debugfs_attrs = deadline_queue_debugfs_attrs, | |
1088 | #endif | |
945ffb60 JA |
1089 | .elevator_attrs = deadline_attrs, |
1090 | .elevator_name = "mq-deadline", | |
4d740bc9 | 1091 | .elevator_alias = "deadline", |
945ffb60 JA |
1092 | .elevator_owner = THIS_MODULE, |
1093 | }; | |
7de967e7 | 1094 | MODULE_ALIAS("mq-deadline-iosched"); |
945ffb60 JA |
1095 | |
1096 | static int __init deadline_init(void) | |
1097 | { | |
0f783995 | 1098 | return elv_register(&mq_deadline); |
945ffb60 JA |
1099 | } |
1100 | ||
1101 | static void __exit deadline_exit(void) | |
1102 | { | |
1103 | elv_unregister(&mq_deadline); | |
1104 | } | |
1105 | ||
1106 | module_init(deadline_init); | |
1107 | module_exit(deadline_exit); | |
1108 | ||
c807ab52 | 1109 | MODULE_AUTHOR("Jens Axboe, Damien Le Moal and Bart Van Assche"); |
945ffb60 JA |
1110 | MODULE_LICENSE("GPL"); |
1111 | MODULE_DESCRIPTION("MQ deadline IO scheduler"); |