1 // SPDX-License-Identifier: GPL-2.0
3 * buffered writeback throttling. loosely based on CoDel. We can't drop
4 * packets for IO scheduling, so the logic is something like this:
6 * - Monitor latencies in a defined window of time.
7 * - If the minimum latency in the above window exceeds some target, increment
8 * scaling step and scale down queue depth by a factor of 2x. The monitoring
9 * window is then shrunk to 100 / sqrt(scaling step + 1).
10 * - For any window where we don't have solid data on what the latencies
11 * look like, retain status quo.
12 * - If latencies look good, decrement scaling step.
13 * - If we're only doing writes, allow the scaling step to go negative. This
14 * will temporarily boost write performance, snapping back to a stable
15 * scaling step of 0 if reads show up or the heavy writers finish. Unlike
16 * positive scaling steps where we shrink the monitoring window, a negative
17 * scaling step retains the default step==0 window size.
19 * Copyright (C) 2016 Jens Axboe
22 #include <linux/kernel.h>
23 #include <linux/blk_types.h>
24 #include <linux/slab.h>
25 #include <linux/backing-dev.h>
26 #include <linux/swap.h>
30 #include "blk-rq-qos.h"
33 #define CREATE_TRACE_POINTS
34 #include <trace/events/wbt.h>
37 WBT_TRACKED
= 1, /* write, tracked for throttling */
38 WBT_READ
= 2, /* read */
39 WBT_KSWAPD
= 4, /* write, from kswapd */
40 WBT_DISCARD
= 8, /* discard */
42 WBT_NR_BITS
= 4, /* number of bits */
53 * If current state is WBT_STATE_ON/OFF_DEFAULT, it can be covered to any other
54 * state, if current state is WBT_STATE_ON/OFF_MANUAL, it can only be covered
55 * to WBT_STATE_OFF/ON_MANUAL.
58 WBT_STATE_ON_DEFAULT
= 1, /* on by default */
59 WBT_STATE_ON_MANUAL
= 2, /* on manually by sysfs */
60 WBT_STATE_OFF_DEFAULT
= 3, /* off by default */
61 WBT_STATE_OFF_MANUAL
= 4, /* off manually by sysfs */
66 * Settings that govern how we throttle
68 unsigned int wb_background
; /* background writeback */
69 unsigned int wb_normal
; /* normal writeback */
71 short enable_state
; /* WBT_STATE_* */
74 * Number of consecutive periods where we don't have enough
75 * information to make a firm scale up/down decision.
77 unsigned int unknown_cnt
;
79 u64 win_nsec
; /* default window size */
80 u64 cur_win_nsec
; /* current window size */
82 struct blk_stat_callback
*cb
;
89 unsigned long last_issue
; /* last non-throttled issue */
90 unsigned long last_comp
; /* last non-throttled comp */
91 unsigned long min_lat_nsec
;
93 struct rq_wait rq_wait
[WBT_NUM_RWQ
];
94 struct rq_depth rq_depth
;
97 static inline struct rq_wb
*RQWB(struct rq_qos
*rqos
)
99 return container_of(rqos
, struct rq_wb
, rqos
);
102 static inline void wbt_clear_state(struct request
*rq
)
107 static inline enum wbt_flags
wbt_flags(struct request
*rq
)
109 return rq
->wbt_flags
;
112 static inline bool wbt_is_tracked(struct request
*rq
)
114 return rq
->wbt_flags
& WBT_TRACKED
;
117 static inline bool wbt_is_read(struct request
*rq
)
119 return rq
->wbt_flags
& WBT_READ
;
124 * Default setting, we'll scale up (to 75% of QD max) or down (min 1)
125 * from here depending on device stats
132 RWB_WINDOW_NSEC
= 100 * 1000 * 1000ULL,
135 * Disregard stats, if we don't meet this minimum
137 RWB_MIN_WRITE_SAMPLES
= 3,
140 * If we have this number of consecutive windows with not enough
141 * information to scale up or down, scale up.
143 RWB_UNKNOWN_BUMP
= 5,
146 static inline bool rwb_enabled(struct rq_wb
*rwb
)
148 return rwb
&& rwb
->enable_state
!= WBT_STATE_OFF_DEFAULT
&&
149 rwb
->enable_state
!= WBT_STATE_OFF_MANUAL
;
152 static void wb_timestamp(struct rq_wb
*rwb
, unsigned long *var
)
154 if (rwb_enabled(rwb
)) {
155 const unsigned long cur
= jiffies
;
163 * If a task was rate throttled in balance_dirty_pages() within the last
164 * second or so, use that to indicate a higher cleaning rate.
166 static bool wb_recent_wait(struct rq_wb
*rwb
)
168 struct bdi_writeback
*wb
= &rwb
->rqos
.disk
->bdi
->wb
;
170 return time_before(jiffies
, wb
->dirty_sleep
+ HZ
);
173 static inline struct rq_wait
*get_rq_wait(struct rq_wb
*rwb
,
174 enum wbt_flags wb_acct
)
176 if (wb_acct
& WBT_KSWAPD
)
177 return &rwb
->rq_wait
[WBT_RWQ_KSWAPD
];
178 else if (wb_acct
& WBT_DISCARD
)
179 return &rwb
->rq_wait
[WBT_RWQ_DISCARD
];
181 return &rwb
->rq_wait
[WBT_RWQ_BG
];
184 static void rwb_wake_all(struct rq_wb
*rwb
)
188 for (i
= 0; i
< WBT_NUM_RWQ
; i
++) {
189 struct rq_wait
*rqw
= &rwb
->rq_wait
[i
];
191 if (wq_has_sleeper(&rqw
->wait
))
192 wake_up_all(&rqw
->wait
);
196 static void wbt_rqw_done(struct rq_wb
*rwb
, struct rq_wait
*rqw
,
197 enum wbt_flags wb_acct
)
201 inflight
= atomic_dec_return(&rqw
->inflight
);
204 * For discards, our limit is always the background. For writes, if
205 * the device does write back caching, drop further down before we
208 if (wb_acct
& WBT_DISCARD
)
209 limit
= rwb
->wb_background
;
210 else if (rwb
->wc
&& !wb_recent_wait(rwb
))
213 limit
= rwb
->wb_normal
;
216 * Don't wake anyone up if we are above the normal limit.
218 if (inflight
&& inflight
>= limit
)
221 if (wq_has_sleeper(&rqw
->wait
)) {
222 int diff
= limit
- inflight
;
224 if (!inflight
|| diff
>= rwb
->wb_background
/ 2)
225 wake_up_all(&rqw
->wait
);
229 static void __wbt_done(struct rq_qos
*rqos
, enum wbt_flags wb_acct
)
231 struct rq_wb
*rwb
= RQWB(rqos
);
234 if (!(wb_acct
& WBT_TRACKED
))
237 rqw
= get_rq_wait(rwb
, wb_acct
);
238 wbt_rqw_done(rwb
, rqw
, wb_acct
);
242 * Called on completion of a request. Note that it's also called when
243 * a request is merged, when the request gets freed.
245 static void wbt_done(struct rq_qos
*rqos
, struct request
*rq
)
247 struct rq_wb
*rwb
= RQWB(rqos
);
249 if (!wbt_is_tracked(rq
)) {
250 if (rwb
->sync_cookie
== rq
) {
252 rwb
->sync_cookie
= NULL
;
256 wb_timestamp(rwb
, &rwb
->last_comp
);
258 WARN_ON_ONCE(rq
== rwb
->sync_cookie
);
259 __wbt_done(rqos
, wbt_flags(rq
));
264 static inline bool stat_sample_valid(struct blk_rq_stat
*stat
)
267 * We need at least one read sample, and a minimum of
268 * RWB_MIN_WRITE_SAMPLES. We require some write samples to know
269 * that it's writes impacting us, and not just some sole read on
270 * a device that is in a lower power state.
272 return (stat
[READ
].nr_samples
>= 1 &&
273 stat
[WRITE
].nr_samples
>= RWB_MIN_WRITE_SAMPLES
);
276 static u64
rwb_sync_issue_lat(struct rq_wb
*rwb
)
278 u64 now
, issue
= READ_ONCE(rwb
->sync_issue
);
280 if (!issue
|| !rwb
->sync_cookie
)
283 now
= ktime_to_ns(ktime_get());
287 static inline unsigned int wbt_inflight(struct rq_wb
*rwb
)
289 unsigned int i
, ret
= 0;
291 for (i
= 0; i
< WBT_NUM_RWQ
; i
++)
292 ret
+= atomic_read(&rwb
->rq_wait
[i
].inflight
);
304 static int latency_exceeded(struct rq_wb
*rwb
, struct blk_rq_stat
*stat
)
306 struct backing_dev_info
*bdi
= rwb
->rqos
.disk
->bdi
;
307 struct rq_depth
*rqd
= &rwb
->rq_depth
;
311 * If our stored sync issue exceeds the window size, or it
312 * exceeds our min target AND we haven't logged any entries,
313 * flag the latency as exceeded. wbt works off completion latencies,
314 * but for a flooded device, a single sync IO can take a long time
315 * to complete after being issued. If this time exceeds our
316 * monitoring window AND we didn't see any other completions in that
317 * window, then count that sync IO as a violation of the latency.
319 thislat
= rwb_sync_issue_lat(rwb
);
320 if (thislat
> rwb
->cur_win_nsec
||
321 (thislat
> rwb
->min_lat_nsec
&& !stat
[READ
].nr_samples
)) {
322 trace_wbt_lat(bdi
, thislat
);
327 * No read/write mix, if stat isn't valid
329 if (!stat_sample_valid(stat
)) {
331 * If we had writes in this stat window and the window is
332 * current, we're only doing writes. If a task recently
333 * waited or still has writes in flights, consider us doing
334 * just writes as well.
336 if (stat
[WRITE
].nr_samples
|| wb_recent_wait(rwb
) ||
338 return LAT_UNKNOWN_WRITES
;
343 * If the 'min' latency exceeds our target, step down.
345 if (stat
[READ
].min
> rwb
->min_lat_nsec
) {
346 trace_wbt_lat(bdi
, stat
[READ
].min
);
347 trace_wbt_stat(bdi
, stat
);
352 trace_wbt_stat(bdi
, stat
);
357 static void rwb_trace_step(struct rq_wb
*rwb
, const char *msg
)
359 struct backing_dev_info
*bdi
= rwb
->rqos
.disk
->bdi
;
360 struct rq_depth
*rqd
= &rwb
->rq_depth
;
362 trace_wbt_step(bdi
, msg
, rqd
->scale_step
, rwb
->cur_win_nsec
,
363 rwb
->wb_background
, rwb
->wb_normal
, rqd
->max_depth
);
366 static void calc_wb_limits(struct rq_wb
*rwb
)
368 if (rwb
->min_lat_nsec
== 0) {
369 rwb
->wb_normal
= rwb
->wb_background
= 0;
370 } else if (rwb
->rq_depth
.max_depth
<= 2) {
371 rwb
->wb_normal
= rwb
->rq_depth
.max_depth
;
372 rwb
->wb_background
= 1;
374 rwb
->wb_normal
= (rwb
->rq_depth
.max_depth
+ 1) / 2;
375 rwb
->wb_background
= (rwb
->rq_depth
.max_depth
+ 3) / 4;
379 static void scale_up(struct rq_wb
*rwb
)
381 if (!rq_depth_scale_up(&rwb
->rq_depth
))
384 rwb
->unknown_cnt
= 0;
386 rwb_trace_step(rwb
, tracepoint_string("scale up"));
389 static void scale_down(struct rq_wb
*rwb
, bool hard_throttle
)
391 if (!rq_depth_scale_down(&rwb
->rq_depth
, hard_throttle
))
394 rwb
->unknown_cnt
= 0;
395 rwb_trace_step(rwb
, tracepoint_string("scale down"));
398 static void rwb_arm_timer(struct rq_wb
*rwb
)
400 struct rq_depth
*rqd
= &rwb
->rq_depth
;
402 if (rqd
->scale_step
> 0) {
404 * We should speed this up, using some variant of a fast
405 * integer inverse square root calculation. Since we only do
406 * this for every window expiration, it's not a huge deal,
409 rwb
->cur_win_nsec
= div_u64(rwb
->win_nsec
<< 4,
410 int_sqrt((rqd
->scale_step
+ 1) << 8));
413 * For step < 0, we don't want to increase/decrease the
416 rwb
->cur_win_nsec
= rwb
->win_nsec
;
419 blk_stat_activate_nsecs(rwb
->cb
, rwb
->cur_win_nsec
);
422 static void wb_timer_fn(struct blk_stat_callback
*cb
)
424 struct rq_wb
*rwb
= cb
->data
;
425 struct rq_depth
*rqd
= &rwb
->rq_depth
;
426 unsigned int inflight
= wbt_inflight(rwb
);
432 status
= latency_exceeded(rwb
, cb
->stat
);
434 trace_wbt_timer(rwb
->rqos
.disk
->bdi
, status
, rqd
->scale_step
, inflight
);
437 * If we exceeded the latency target, step down. If we did not,
438 * step one level up. If we don't know enough to say either exceeded
439 * or ok, then don't do anything.
443 scale_down(rwb
, true);
448 case LAT_UNKNOWN_WRITES
:
450 * We started a the center step, but don't have a valid
451 * read/write sample, but we do have writes going on.
452 * Allow step to go negative, to increase write perf.
457 if (++rwb
->unknown_cnt
< RWB_UNKNOWN_BUMP
)
460 * We get here when previously scaled reduced depth, and we
461 * currently don't have a valid read/write sample. For that
462 * case, slowly return to center state (step == 0).
464 if (rqd
->scale_step
> 0)
466 else if (rqd
->scale_step
< 0)
467 scale_down(rwb
, false);
474 * Re-arm timer, if we have IO in flight
476 if (rqd
->scale_step
|| inflight
)
480 static void wbt_update_limits(struct rq_wb
*rwb
)
482 struct rq_depth
*rqd
= &rwb
->rq_depth
;
485 rqd
->scaled_max
= false;
487 rq_depth_calc_max_depth(rqd
);
493 bool wbt_disabled(struct request_queue
*q
)
495 struct rq_qos
*rqos
= wbt_rq_qos(q
);
497 return !rqos
|| !rwb_enabled(RQWB(rqos
));
500 u64
wbt_get_min_lat(struct request_queue
*q
)
502 struct rq_qos
*rqos
= wbt_rq_qos(q
);
505 return RQWB(rqos
)->min_lat_nsec
;
508 void wbt_set_min_lat(struct request_queue
*q
, u64 val
)
510 struct rq_qos
*rqos
= wbt_rq_qos(q
);
514 RQWB(rqos
)->min_lat_nsec
= val
;
516 RQWB(rqos
)->enable_state
= WBT_STATE_ON_MANUAL
;
518 RQWB(rqos
)->enable_state
= WBT_STATE_OFF_MANUAL
;
520 wbt_update_limits(RQWB(rqos
));
524 static bool close_io(struct rq_wb
*rwb
)
526 const unsigned long now
= jiffies
;
528 return time_before(now
, rwb
->last_issue
+ HZ
/ 10) ||
529 time_before(now
, rwb
->last_comp
+ HZ
/ 10);
532 #define REQ_HIPRIO (REQ_SYNC | REQ_META | REQ_PRIO)
534 static inline unsigned int get_limit(struct rq_wb
*rwb
, blk_opf_t opf
)
538 if ((opf
& REQ_OP_MASK
) == REQ_OP_DISCARD
)
539 return rwb
->wb_background
;
542 * At this point we know it's a buffered write. If this is
543 * kswapd trying to free memory, or REQ_SYNC is set, then
544 * it's WB_SYNC_ALL writeback, and we'll use the max limit for
545 * that. If the write is marked as a background write, then use
546 * the idle limit, or go to normal if we haven't had competing
549 if ((opf
& REQ_HIPRIO
) || wb_recent_wait(rwb
) || current_is_kswapd())
550 limit
= rwb
->rq_depth
.max_depth
;
551 else if ((opf
& REQ_BACKGROUND
) || close_io(rwb
)) {
553 * If less than 100ms since we completed unrelated IO,
554 * limit us to half the depth for background writeback.
556 limit
= rwb
->wb_background
;
558 limit
= rwb
->wb_normal
;
563 struct wbt_wait_data
{
565 enum wbt_flags wb_acct
;
569 static bool wbt_inflight_cb(struct rq_wait
*rqw
, void *private_data
)
571 struct wbt_wait_data
*data
= private_data
;
572 return rq_wait_inc_below(rqw
, get_limit(data
->rwb
, data
->opf
));
575 static void wbt_cleanup_cb(struct rq_wait
*rqw
, void *private_data
)
577 struct wbt_wait_data
*data
= private_data
;
578 wbt_rqw_done(data
->rwb
, rqw
, data
->wb_acct
);
582 * Block if we will exceed our limit, or if we are currently waiting for
583 * the timer to kick off queuing again.
585 static void __wbt_wait(struct rq_wb
*rwb
, enum wbt_flags wb_acct
,
588 struct rq_wait
*rqw
= get_rq_wait(rwb
, wb_acct
);
589 struct wbt_wait_data data
= {
595 rq_qos_wait(rqw
, &data
, wbt_inflight_cb
, wbt_cleanup_cb
);
598 static inline bool wbt_should_throttle(struct bio
*bio
)
600 switch (bio_op(bio
)) {
603 * Don't throttle WRITE_ODIRECT
605 if ((bio
->bi_opf
& (REQ_SYNC
| REQ_IDLE
)) ==
606 (REQ_SYNC
| REQ_IDLE
))
616 static enum wbt_flags
bio_to_wbt_flags(struct rq_wb
*rwb
, struct bio
*bio
)
618 enum wbt_flags flags
= 0;
620 if (!rwb_enabled(rwb
))
623 if (bio_op(bio
) == REQ_OP_READ
) {
625 } else if (wbt_should_throttle(bio
)) {
626 if (current_is_kswapd())
628 if (bio_op(bio
) == REQ_OP_DISCARD
)
629 flags
|= WBT_DISCARD
;
630 flags
|= WBT_TRACKED
;
635 static void wbt_cleanup(struct rq_qos
*rqos
, struct bio
*bio
)
637 struct rq_wb
*rwb
= RQWB(rqos
);
638 enum wbt_flags flags
= bio_to_wbt_flags(rwb
, bio
);
639 __wbt_done(rqos
, flags
);
643 * May sleep, if we have exceeded the writeback limits. Caller can pass
644 * in an irq held spinlock, if it holds one when calling this function.
645 * If we do sleep, we'll release and re-grab it.
647 static void wbt_wait(struct rq_qos
*rqos
, struct bio
*bio
)
649 struct rq_wb
*rwb
= RQWB(rqos
);
650 enum wbt_flags flags
;
652 flags
= bio_to_wbt_flags(rwb
, bio
);
653 if (!(flags
& WBT_TRACKED
)) {
654 if (flags
& WBT_READ
)
655 wb_timestamp(rwb
, &rwb
->last_issue
);
659 __wbt_wait(rwb
, flags
, bio
->bi_opf
);
661 if (!blk_stat_is_active(rwb
->cb
))
665 static void wbt_track(struct rq_qos
*rqos
, struct request
*rq
, struct bio
*bio
)
667 struct rq_wb
*rwb
= RQWB(rqos
);
668 rq
->wbt_flags
|= bio_to_wbt_flags(rwb
, bio
);
671 static void wbt_issue(struct rq_qos
*rqos
, struct request
*rq
)
673 struct rq_wb
*rwb
= RQWB(rqos
);
675 if (!rwb_enabled(rwb
))
679 * Track sync issue, in case it takes a long time to complete. Allows us
680 * to react quicker, if a sync IO takes a long time to complete. Note
681 * that this is just a hint. The request can go away when it completes,
682 * so it's important we never dereference it. We only use the address to
683 * compare with, which is why we store the sync_issue time locally.
685 if (wbt_is_read(rq
) && !rwb
->sync_issue
) {
686 rwb
->sync_cookie
= rq
;
687 rwb
->sync_issue
= rq
->io_start_time_ns
;
691 static void wbt_requeue(struct rq_qos
*rqos
, struct request
*rq
)
693 struct rq_wb
*rwb
= RQWB(rqos
);
694 if (!rwb_enabled(rwb
))
696 if (rq
== rwb
->sync_cookie
) {
698 rwb
->sync_cookie
= NULL
;
702 void wbt_set_write_cache(struct request_queue
*q
, bool write_cache_on
)
704 struct rq_qos
*rqos
= wbt_rq_qos(q
);
706 RQWB(rqos
)->wc
= write_cache_on
;
710 * Enable wbt if defaults are configured that way
712 void wbt_enable_default(struct gendisk
*disk
)
714 struct request_queue
*q
= disk
->queue
;
716 bool enable
= IS_ENABLED(CONFIG_BLK_WBT_MQ
);
719 test_bit(ELEVATOR_FLAG_DISABLE_WBT
, &q
->elevator
->flags
))
722 /* Throttling already enabled? */
723 rqos
= wbt_rq_qos(q
);
725 if (enable
&& RQWB(rqos
)->enable_state
== WBT_STATE_OFF_DEFAULT
)
726 RQWB(rqos
)->enable_state
= WBT_STATE_ON_DEFAULT
;
730 /* Queue not registered? Maybe shutting down... */
731 if (!blk_queue_registered(q
))
734 if (queue_is_mq(q
) && enable
)
737 EXPORT_SYMBOL_GPL(wbt_enable_default
);
739 u64
wbt_default_latency_nsec(struct request_queue
*q
)
742 * We default to 2msec for non-rotational storage, and 75msec
743 * for rotational storage.
745 if (blk_queue_nonrot(q
))
751 static int wbt_data_dir(const struct request
*rq
)
753 const enum req_op op
= req_op(rq
);
755 if (op
== REQ_OP_READ
)
757 else if (op_is_write(op
))
764 static void wbt_queue_depth_changed(struct rq_qos
*rqos
)
766 RQWB(rqos
)->rq_depth
.queue_depth
= blk_queue_depth(rqos
->disk
->queue
);
767 wbt_update_limits(RQWB(rqos
));
770 static void wbt_exit(struct rq_qos
*rqos
)
772 struct rq_wb
*rwb
= RQWB(rqos
);
774 blk_stat_remove_callback(rqos
->disk
->queue
, rwb
->cb
);
775 blk_stat_free_callback(rwb
->cb
);
780 * Disable wbt, if enabled by default.
782 void wbt_disable_default(struct gendisk
*disk
)
784 struct rq_qos
*rqos
= wbt_rq_qos(disk
->queue
);
789 if (rwb
->enable_state
== WBT_STATE_ON_DEFAULT
) {
790 blk_stat_deactivate(rwb
->cb
);
791 rwb
->enable_state
= WBT_STATE_OFF_DEFAULT
;
794 EXPORT_SYMBOL_GPL(wbt_disable_default
);
796 #ifdef CONFIG_BLK_DEBUG_FS
797 static int wbt_curr_win_nsec_show(void *data
, struct seq_file
*m
)
799 struct rq_qos
*rqos
= data
;
800 struct rq_wb
*rwb
= RQWB(rqos
);
802 seq_printf(m
, "%llu\n", rwb
->cur_win_nsec
);
806 static int wbt_enabled_show(void *data
, struct seq_file
*m
)
808 struct rq_qos
*rqos
= data
;
809 struct rq_wb
*rwb
= RQWB(rqos
);
811 seq_printf(m
, "%d\n", rwb
->enable_state
);
815 static int wbt_id_show(void *data
, struct seq_file
*m
)
817 struct rq_qos
*rqos
= data
;
819 seq_printf(m
, "%u\n", rqos
->id
);
823 static int wbt_inflight_show(void *data
, struct seq_file
*m
)
825 struct rq_qos
*rqos
= data
;
826 struct rq_wb
*rwb
= RQWB(rqos
);
829 for (i
= 0; i
< WBT_NUM_RWQ
; i
++)
830 seq_printf(m
, "%d: inflight %d\n", i
,
831 atomic_read(&rwb
->rq_wait
[i
].inflight
));
835 static int wbt_min_lat_nsec_show(void *data
, struct seq_file
*m
)
837 struct rq_qos
*rqos
= data
;
838 struct rq_wb
*rwb
= RQWB(rqos
);
840 seq_printf(m
, "%lu\n", rwb
->min_lat_nsec
);
844 static int wbt_unknown_cnt_show(void *data
, struct seq_file
*m
)
846 struct rq_qos
*rqos
= data
;
847 struct rq_wb
*rwb
= RQWB(rqos
);
849 seq_printf(m
, "%u\n", rwb
->unknown_cnt
);
853 static int wbt_normal_show(void *data
, struct seq_file
*m
)
855 struct rq_qos
*rqos
= data
;
856 struct rq_wb
*rwb
= RQWB(rqos
);
858 seq_printf(m
, "%u\n", rwb
->wb_normal
);
862 static int wbt_background_show(void *data
, struct seq_file
*m
)
864 struct rq_qos
*rqos
= data
;
865 struct rq_wb
*rwb
= RQWB(rqos
);
867 seq_printf(m
, "%u\n", rwb
->wb_background
);
871 static const struct blk_mq_debugfs_attr wbt_debugfs_attrs
[] = {
872 {"curr_win_nsec", 0400, wbt_curr_win_nsec_show
},
873 {"enabled", 0400, wbt_enabled_show
},
874 {"id", 0400, wbt_id_show
},
875 {"inflight", 0400, wbt_inflight_show
},
876 {"min_lat_nsec", 0400, wbt_min_lat_nsec_show
},
877 {"unknown_cnt", 0400, wbt_unknown_cnt_show
},
878 {"wb_normal", 0400, wbt_normal_show
},
879 {"wb_background", 0400, wbt_background_show
},
884 static const struct rq_qos_ops wbt_rqos_ops
= {
885 .throttle
= wbt_wait
,
888 .requeue
= wbt_requeue
,
890 .cleanup
= wbt_cleanup
,
891 .queue_depth_changed
= wbt_queue_depth_changed
,
893 #ifdef CONFIG_BLK_DEBUG_FS
894 .debugfs_attrs
= wbt_debugfs_attrs
,
898 int wbt_init(struct gendisk
*disk
)
900 struct request_queue
*q
= disk
->queue
;
905 rwb
= kzalloc(sizeof(*rwb
), GFP_KERNEL
);
909 rwb
->cb
= blk_stat_alloc_callback(wb_timer_fn
, wbt_data_dir
, 2, rwb
);
915 for (i
= 0; i
< WBT_NUM_RWQ
; i
++)
916 rq_wait_init(&rwb
->rq_wait
[i
]);
918 rwb
->last_comp
= rwb
->last_issue
= jiffies
;
919 rwb
->win_nsec
= RWB_WINDOW_NSEC
;
920 rwb
->enable_state
= WBT_STATE_ON_DEFAULT
;
921 rwb
->wc
= test_bit(QUEUE_FLAG_WC
, &q
->queue_flags
);
922 rwb
->rq_depth
.default_depth
= RWB_DEF_DEPTH
;
923 rwb
->min_lat_nsec
= wbt_default_latency_nsec(q
);
924 rwb
->rq_depth
.queue_depth
= blk_queue_depth(q
);
925 wbt_update_limits(rwb
);
928 * Assign rwb and add the stats callback.
930 mutex_lock(&q
->rq_qos_mutex
);
931 ret
= rq_qos_add(&rwb
->rqos
, disk
, RQ_QOS_WBT
, &wbt_rqos_ops
);
932 mutex_unlock(&q
->rq_qos_mutex
);
936 blk_stat_add_callback(q
, rwb
->cb
);
941 blk_stat_free_callback(rwb
->cb
);