]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - block/blk-iolatency.c
Merge tag 'drm-intel-fixes-2019-02-20' of git://anongit.freedesktop.org/drm/drm-intel...
[thirdparty/kernel/stable.git] / block / blk-iolatency.c
1 /*
2 * Block rq-qos base io controller
3 *
4 * This works similar to wbt with a few exceptions
5 *
6 * - It's bio based, so the latency covers the whole block layer in addition to
7 * the actual io.
8 * - We will throttle all IO that comes in here if we need to.
9 * - We use the mean latency over the 100ms window. This is because writes can
10 * be particularly fast, which could give us a false sense of the impact of
11 * other workloads on our protected workload.
12 * - By default there's no throttling, we set the queue_depth to UINT_MAX so
13 * that we can have as many outstanding bio's as we're allowed to. Only at
14 * throttle time do we pay attention to the actual queue depth.
15 *
16 * The hierarchy works like the cpu controller does, we track the latency at
17 * every configured node, and each configured node has it's own independent
18 * queue depth. This means that we only care about our latency targets at the
19 * peer level. Some group at the bottom of the hierarchy isn't going to affect
20 * a group at the end of some other path if we're only configred at leaf level.
21 *
22 * Consider the following
23 *
24 * root blkg
25 * / \
26 * fast (target=5ms) slow (target=10ms)
27 * / \ / \
28 * a b normal(15ms) unloved
29 *
30 * "a" and "b" have no target, but their combined io under "fast" cannot exceed
31 * an average latency of 5ms. If it does then we will throttle the "slow"
32 * group. In the case of "normal", if it exceeds its 15ms target, we will
33 * throttle "unloved", but nobody else.
34 *
35 * In this example "fast", "slow", and "normal" will be the only groups actually
36 * accounting their io latencies. We have to walk up the heirarchy to the root
37 * on every submit and complete so we can do the appropriate stat recording and
38 * adjust the queue depth of ourselves if needed.
39 *
40 * There are 2 ways we throttle IO.
41 *
42 * 1) Queue depth throttling. As we throttle down we will adjust the maximum
43 * number of IO's we're allowed to have in flight. This starts at (u64)-1 down
44 * to 1. If the group is only ever submitting IO for itself then this is the
45 * only way we throttle.
46 *
47 * 2) Induced delay throttling. This is for the case that a group is generating
48 * IO that has to be issued by the root cg to avoid priority inversion. So think
49 * REQ_META or REQ_SWAP. If we are already at qd == 1 and we're getting a lot
50 * of work done for us on behalf of the root cg and are being asked to scale
51 * down more then we induce a latency at userspace return. We accumulate the
52 * total amount of time we need to be punished by doing
53 *
54 * total_time += min_lat_nsec - actual_io_completion
55 *
56 * and then at throttle time will do
57 *
58 * throttle_time = min(total_time, NSEC_PER_SEC)
59 *
60 * This induced delay will throttle back the activity that is generating the
61 * root cg issued io's, wethere that's some metadata intensive operation or the
62 * group is using so much memory that it is pushing us into swap.
63 *
64 * Copyright (C) 2018 Josef Bacik
65 */
66 #include <linux/kernel.h>
67 #include <linux/blk_types.h>
68 #include <linux/backing-dev.h>
69 #include <linux/module.h>
70 #include <linux/timer.h>
71 #include <linux/memcontrol.h>
72 #include <linux/sched/loadavg.h>
73 #include <linux/sched/signal.h>
74 #include <trace/events/block.h>
75 #include <linux/blk-mq.h>
76 #include "blk-rq-qos.h"
77 #include "blk-stat.h"
78
79 #define DEFAULT_SCALE_COOKIE 1000000U
80
81 static struct blkcg_policy blkcg_policy_iolatency;
82 struct iolatency_grp;
83
84 struct blk_iolatency {
85 struct rq_qos rqos;
86 struct timer_list timer;
87 atomic_t enabled;
88 };
89
90 static inline struct blk_iolatency *BLKIOLATENCY(struct rq_qos *rqos)
91 {
92 return container_of(rqos, struct blk_iolatency, rqos);
93 }
94
95 static inline bool blk_iolatency_enabled(struct blk_iolatency *blkiolat)
96 {
97 return atomic_read(&blkiolat->enabled) > 0;
98 }
99
100 struct child_latency_info {
101 spinlock_t lock;
102
103 /* Last time we adjusted the scale of everybody. */
104 u64 last_scale_event;
105
106 /* The latency that we missed. */
107 u64 scale_lat;
108
109 /* Total io's from all of our children for the last summation. */
110 u64 nr_samples;
111
112 /* The guy who actually changed the latency numbers. */
113 struct iolatency_grp *scale_grp;
114
115 /* Cookie to tell if we need to scale up or down. */
116 atomic_t scale_cookie;
117 };
118
119 struct percentile_stats {
120 u64 total;
121 u64 missed;
122 };
123
124 struct latency_stat {
125 union {
126 struct percentile_stats ps;
127 struct blk_rq_stat rqs;
128 };
129 };
130
131 struct iolatency_grp {
132 struct blkg_policy_data pd;
133 struct latency_stat __percpu *stats;
134 struct latency_stat cur_stat;
135 struct blk_iolatency *blkiolat;
136 struct rq_depth rq_depth;
137 struct rq_wait rq_wait;
138 atomic64_t window_start;
139 atomic_t scale_cookie;
140 u64 min_lat_nsec;
141 u64 cur_win_nsec;
142
143 /* total running average of our io latency. */
144 u64 lat_avg;
145
146 /* Our current number of IO's for the last summation. */
147 u64 nr_samples;
148
149 bool ssd;
150 struct child_latency_info child_lat;
151 };
152
153 #define BLKIOLATENCY_MIN_WIN_SIZE (100 * NSEC_PER_MSEC)
154 #define BLKIOLATENCY_MAX_WIN_SIZE NSEC_PER_SEC
155 /*
156 * These are the constants used to fake the fixed-point moving average
157 * calculation just like load average. The call to calc_load() folds
158 * (FIXED_1 (2048) - exp_factor) * new_sample into lat_avg. The sampling
159 * window size is bucketed to try to approximately calculate average
160 * latency such that 1/exp (decay rate) is [1 min, 2.5 min) when windows
161 * elapse immediately. Note, windows only elapse with IO activity. Idle
162 * periods extend the most recent window.
163 */
164 #define BLKIOLATENCY_NR_EXP_FACTORS 5
165 #define BLKIOLATENCY_EXP_BUCKET_SIZE (BLKIOLATENCY_MAX_WIN_SIZE / \
166 (BLKIOLATENCY_NR_EXP_FACTORS - 1))
167 static const u64 iolatency_exp_factors[BLKIOLATENCY_NR_EXP_FACTORS] = {
168 2045, // exp(1/600) - 600 samples
169 2039, // exp(1/240) - 240 samples
170 2031, // exp(1/120) - 120 samples
171 2023, // exp(1/80) - 80 samples
172 2014, // exp(1/60) - 60 samples
173 };
174
175 static inline struct iolatency_grp *pd_to_lat(struct blkg_policy_data *pd)
176 {
177 return pd ? container_of(pd, struct iolatency_grp, pd) : NULL;
178 }
179
180 static inline struct iolatency_grp *blkg_to_lat(struct blkcg_gq *blkg)
181 {
182 return pd_to_lat(blkg_to_pd(blkg, &blkcg_policy_iolatency));
183 }
184
185 static inline struct blkcg_gq *lat_to_blkg(struct iolatency_grp *iolat)
186 {
187 return pd_to_blkg(&iolat->pd);
188 }
189
190 static inline void latency_stat_init(struct iolatency_grp *iolat,
191 struct latency_stat *stat)
192 {
193 if (iolat->ssd) {
194 stat->ps.total = 0;
195 stat->ps.missed = 0;
196 } else
197 blk_rq_stat_init(&stat->rqs);
198 }
199
200 static inline void latency_stat_sum(struct iolatency_grp *iolat,
201 struct latency_stat *sum,
202 struct latency_stat *stat)
203 {
204 if (iolat->ssd) {
205 sum->ps.total += stat->ps.total;
206 sum->ps.missed += stat->ps.missed;
207 } else
208 blk_rq_stat_sum(&sum->rqs, &stat->rqs);
209 }
210
211 static inline void latency_stat_record_time(struct iolatency_grp *iolat,
212 u64 req_time)
213 {
214 struct latency_stat *stat = get_cpu_ptr(iolat->stats);
215 if (iolat->ssd) {
216 if (req_time >= iolat->min_lat_nsec)
217 stat->ps.missed++;
218 stat->ps.total++;
219 } else
220 blk_rq_stat_add(&stat->rqs, req_time);
221 put_cpu_ptr(stat);
222 }
223
224 static inline bool latency_sum_ok(struct iolatency_grp *iolat,
225 struct latency_stat *stat)
226 {
227 if (iolat->ssd) {
228 u64 thresh = div64_u64(stat->ps.total, 10);
229 thresh = max(thresh, 1ULL);
230 return stat->ps.missed < thresh;
231 }
232 return stat->rqs.mean <= iolat->min_lat_nsec;
233 }
234
235 static inline u64 latency_stat_samples(struct iolatency_grp *iolat,
236 struct latency_stat *stat)
237 {
238 if (iolat->ssd)
239 return stat->ps.total;
240 return stat->rqs.nr_samples;
241 }
242
243 static inline void iolat_update_total_lat_avg(struct iolatency_grp *iolat,
244 struct latency_stat *stat)
245 {
246 int exp_idx;
247
248 if (iolat->ssd)
249 return;
250
251 /*
252 * calc_load() takes in a number stored in fixed point representation.
253 * Because we are using this for IO time in ns, the values stored
254 * are significantly larger than the FIXED_1 denominator (2048).
255 * Therefore, rounding errors in the calculation are negligible and
256 * can be ignored.
257 */
258 exp_idx = min_t(int, BLKIOLATENCY_NR_EXP_FACTORS - 1,
259 div64_u64(iolat->cur_win_nsec,
260 BLKIOLATENCY_EXP_BUCKET_SIZE));
261 iolat->lat_avg = calc_load(iolat->lat_avg,
262 iolatency_exp_factors[exp_idx],
263 stat->rqs.mean);
264 }
265
266 static void iolat_cleanup_cb(struct rq_wait *rqw, void *private_data)
267 {
268 atomic_dec(&rqw->inflight);
269 wake_up(&rqw->wait);
270 }
271
272 static bool iolat_acquire_inflight(struct rq_wait *rqw, void *private_data)
273 {
274 struct iolatency_grp *iolat = private_data;
275 return rq_wait_inc_below(rqw, iolat->rq_depth.max_depth);
276 }
277
278 static void __blkcg_iolatency_throttle(struct rq_qos *rqos,
279 struct iolatency_grp *iolat,
280 bool issue_as_root,
281 bool use_memdelay)
282 {
283 struct rq_wait *rqw = &iolat->rq_wait;
284 unsigned use_delay = atomic_read(&lat_to_blkg(iolat)->use_delay);
285
286 if (use_delay)
287 blkcg_schedule_throttle(rqos->q, use_memdelay);
288
289 /*
290 * To avoid priority inversions we want to just take a slot if we are
291 * issuing as root. If we're being killed off there's no point in
292 * delaying things, we may have been killed by OOM so throttling may
293 * make recovery take even longer, so just let the IO's through so the
294 * task can go away.
295 */
296 if (issue_as_root || fatal_signal_pending(current)) {
297 atomic_inc(&rqw->inflight);
298 return;
299 }
300
301 rq_qos_wait(rqw, iolat, iolat_acquire_inflight, iolat_cleanup_cb);
302 }
303
304 #define SCALE_DOWN_FACTOR 2
305 #define SCALE_UP_FACTOR 4
306
307 static inline unsigned long scale_amount(unsigned long qd, bool up)
308 {
309 return max(up ? qd >> SCALE_UP_FACTOR : qd >> SCALE_DOWN_FACTOR, 1UL);
310 }
311
312 /*
313 * We scale the qd down faster than we scale up, so we need to use this helper
314 * to adjust the scale_cookie accordingly so we don't prematurely get
315 * scale_cookie at DEFAULT_SCALE_COOKIE and unthrottle too much.
316 *
317 * Each group has their own local copy of the last scale cookie they saw, so if
318 * the global scale cookie goes up or down they know which way they need to go
319 * based on their last knowledge of it.
320 */
321 static void scale_cookie_change(struct blk_iolatency *blkiolat,
322 struct child_latency_info *lat_info,
323 bool up)
324 {
325 unsigned long qd = blkiolat->rqos.q->nr_requests;
326 unsigned long scale = scale_amount(qd, up);
327 unsigned long old = atomic_read(&lat_info->scale_cookie);
328 unsigned long max_scale = qd << 1;
329 unsigned long diff = 0;
330
331 if (old < DEFAULT_SCALE_COOKIE)
332 diff = DEFAULT_SCALE_COOKIE - old;
333
334 if (up) {
335 if (scale + old > DEFAULT_SCALE_COOKIE)
336 atomic_set(&lat_info->scale_cookie,
337 DEFAULT_SCALE_COOKIE);
338 else if (diff > qd)
339 atomic_inc(&lat_info->scale_cookie);
340 else
341 atomic_add(scale, &lat_info->scale_cookie);
342 } else {
343 /*
344 * We don't want to dig a hole so deep that it takes us hours to
345 * dig out of it. Just enough that we don't throttle/unthrottle
346 * with jagged workloads but can still unthrottle once pressure
347 * has sufficiently dissipated.
348 */
349 if (diff > qd) {
350 if (diff < max_scale)
351 atomic_dec(&lat_info->scale_cookie);
352 } else {
353 atomic_sub(scale, &lat_info->scale_cookie);
354 }
355 }
356 }
357
358 /*
359 * Change the queue depth of the iolatency_grp. We add/subtract 1/16th of the
360 * queue depth at a time so we don't get wild swings and hopefully dial in to
361 * fairer distribution of the overall queue depth.
362 */
363 static void scale_change(struct iolatency_grp *iolat, bool up)
364 {
365 unsigned long qd = iolat->blkiolat->rqos.q->nr_requests;
366 unsigned long scale = scale_amount(qd, up);
367 unsigned long old = iolat->rq_depth.max_depth;
368
369 if (old > qd)
370 old = qd;
371
372 if (up) {
373 if (old == 1 && blkcg_unuse_delay(lat_to_blkg(iolat)))
374 return;
375
376 if (old < qd) {
377 old += scale;
378 old = min(old, qd);
379 iolat->rq_depth.max_depth = old;
380 wake_up_all(&iolat->rq_wait.wait);
381 }
382 } else {
383 old >>= 1;
384 iolat->rq_depth.max_depth = max(old, 1UL);
385 }
386 }
387
388 /* Check our parent and see if the scale cookie has changed. */
389 static void check_scale_change(struct iolatency_grp *iolat)
390 {
391 struct iolatency_grp *parent;
392 struct child_latency_info *lat_info;
393 unsigned int cur_cookie;
394 unsigned int our_cookie = atomic_read(&iolat->scale_cookie);
395 u64 scale_lat;
396 unsigned int old;
397 int direction = 0;
398
399 if (lat_to_blkg(iolat)->parent == NULL)
400 return;
401
402 parent = blkg_to_lat(lat_to_blkg(iolat)->parent);
403 if (!parent)
404 return;
405
406 lat_info = &parent->child_lat;
407 cur_cookie = atomic_read(&lat_info->scale_cookie);
408 scale_lat = READ_ONCE(lat_info->scale_lat);
409
410 if (cur_cookie < our_cookie)
411 direction = -1;
412 else if (cur_cookie > our_cookie)
413 direction = 1;
414 else
415 return;
416
417 old = atomic_cmpxchg(&iolat->scale_cookie, our_cookie, cur_cookie);
418
419 /* Somebody beat us to the punch, just bail. */
420 if (old != our_cookie)
421 return;
422
423 if (direction < 0 && iolat->min_lat_nsec) {
424 u64 samples_thresh;
425
426 if (!scale_lat || iolat->min_lat_nsec <= scale_lat)
427 return;
428
429 /*
430 * Sometimes high priority groups are their own worst enemy, so
431 * instead of taking it out on some poor other group that did 5%
432 * or less of the IO's for the last summation just skip this
433 * scale down event.
434 */
435 samples_thresh = lat_info->nr_samples * 5;
436 samples_thresh = max(1ULL, div64_u64(samples_thresh, 100));
437 if (iolat->nr_samples <= samples_thresh)
438 return;
439 }
440
441 /* We're as low as we can go. */
442 if (iolat->rq_depth.max_depth == 1 && direction < 0) {
443 blkcg_use_delay(lat_to_blkg(iolat));
444 return;
445 }
446
447 /* We're back to the default cookie, unthrottle all the things. */
448 if (cur_cookie == DEFAULT_SCALE_COOKIE) {
449 blkcg_clear_delay(lat_to_blkg(iolat));
450 iolat->rq_depth.max_depth = UINT_MAX;
451 wake_up_all(&iolat->rq_wait.wait);
452 return;
453 }
454
455 scale_change(iolat, direction > 0);
456 }
457
458 static void blkcg_iolatency_throttle(struct rq_qos *rqos, struct bio *bio)
459 {
460 struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
461 struct blkcg_gq *blkg = bio->bi_blkg;
462 bool issue_as_root = bio_issue_as_root_blkg(bio);
463
464 if (!blk_iolatency_enabled(blkiolat))
465 return;
466
467 while (blkg && blkg->parent) {
468 struct iolatency_grp *iolat = blkg_to_lat(blkg);
469 if (!iolat) {
470 blkg = blkg->parent;
471 continue;
472 }
473
474 check_scale_change(iolat);
475 __blkcg_iolatency_throttle(rqos, iolat, issue_as_root,
476 (bio->bi_opf & REQ_SWAP) == REQ_SWAP);
477 blkg = blkg->parent;
478 }
479 if (!timer_pending(&blkiolat->timer))
480 mod_timer(&blkiolat->timer, jiffies + HZ);
481 }
482
483 static void iolatency_record_time(struct iolatency_grp *iolat,
484 struct bio_issue *issue, u64 now,
485 bool issue_as_root)
486 {
487 u64 start = bio_issue_time(issue);
488 u64 req_time;
489
490 /*
491 * Have to do this so we are truncated to the correct time that our
492 * issue is truncated to.
493 */
494 now = __bio_issue_time(now);
495
496 if (now <= start)
497 return;
498
499 req_time = now - start;
500
501 /*
502 * We don't want to count issue_as_root bio's in the cgroups latency
503 * statistics as it could skew the numbers downwards.
504 */
505 if (unlikely(issue_as_root && iolat->rq_depth.max_depth != UINT_MAX)) {
506 u64 sub = iolat->min_lat_nsec;
507 if (req_time < sub)
508 blkcg_add_delay(lat_to_blkg(iolat), now, sub - req_time);
509 return;
510 }
511
512 latency_stat_record_time(iolat, req_time);
513 }
514
515 #define BLKIOLATENCY_MIN_ADJUST_TIME (500 * NSEC_PER_MSEC)
516 #define BLKIOLATENCY_MIN_GOOD_SAMPLES 5
517
518 static void iolatency_check_latencies(struct iolatency_grp *iolat, u64 now)
519 {
520 struct blkcg_gq *blkg = lat_to_blkg(iolat);
521 struct iolatency_grp *parent;
522 struct child_latency_info *lat_info;
523 struct latency_stat stat;
524 unsigned long flags;
525 int cpu;
526
527 latency_stat_init(iolat, &stat);
528 preempt_disable();
529 for_each_online_cpu(cpu) {
530 struct latency_stat *s;
531 s = per_cpu_ptr(iolat->stats, cpu);
532 latency_stat_sum(iolat, &stat, s);
533 latency_stat_init(iolat, s);
534 }
535 preempt_enable();
536
537 parent = blkg_to_lat(blkg->parent);
538 if (!parent)
539 return;
540
541 lat_info = &parent->child_lat;
542
543 iolat_update_total_lat_avg(iolat, &stat);
544
545 /* Everything is ok and we don't need to adjust the scale. */
546 if (latency_sum_ok(iolat, &stat) &&
547 atomic_read(&lat_info->scale_cookie) == DEFAULT_SCALE_COOKIE)
548 return;
549
550 /* Somebody beat us to the punch, just bail. */
551 spin_lock_irqsave(&lat_info->lock, flags);
552
553 latency_stat_sum(iolat, &iolat->cur_stat, &stat);
554 lat_info->nr_samples -= iolat->nr_samples;
555 lat_info->nr_samples += latency_stat_samples(iolat, &iolat->cur_stat);
556 iolat->nr_samples = latency_stat_samples(iolat, &iolat->cur_stat);
557
558 if ((lat_info->last_scale_event >= now ||
559 now - lat_info->last_scale_event < BLKIOLATENCY_MIN_ADJUST_TIME))
560 goto out;
561
562 if (latency_sum_ok(iolat, &iolat->cur_stat) &&
563 latency_sum_ok(iolat, &stat)) {
564 if (latency_stat_samples(iolat, &iolat->cur_stat) <
565 BLKIOLATENCY_MIN_GOOD_SAMPLES)
566 goto out;
567 if (lat_info->scale_grp == iolat) {
568 lat_info->last_scale_event = now;
569 scale_cookie_change(iolat->blkiolat, lat_info, true);
570 }
571 } else if (lat_info->scale_lat == 0 ||
572 lat_info->scale_lat >= iolat->min_lat_nsec) {
573 lat_info->last_scale_event = now;
574 if (!lat_info->scale_grp ||
575 lat_info->scale_lat > iolat->min_lat_nsec) {
576 WRITE_ONCE(lat_info->scale_lat, iolat->min_lat_nsec);
577 lat_info->scale_grp = iolat;
578 }
579 scale_cookie_change(iolat->blkiolat, lat_info, false);
580 }
581 latency_stat_init(iolat, &iolat->cur_stat);
582 out:
583 spin_unlock_irqrestore(&lat_info->lock, flags);
584 }
585
586 static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio)
587 {
588 struct blkcg_gq *blkg;
589 struct rq_wait *rqw;
590 struct iolatency_grp *iolat;
591 u64 window_start;
592 u64 now = ktime_to_ns(ktime_get());
593 bool issue_as_root = bio_issue_as_root_blkg(bio);
594 bool enabled = false;
595 int inflight = 0;
596
597 blkg = bio->bi_blkg;
598 if (!blkg || !bio_flagged(bio, BIO_TRACKED))
599 return;
600
601 iolat = blkg_to_lat(bio->bi_blkg);
602 if (!iolat)
603 return;
604
605 enabled = blk_iolatency_enabled(iolat->blkiolat);
606 if (!enabled)
607 return;
608
609 while (blkg && blkg->parent) {
610 iolat = blkg_to_lat(blkg);
611 if (!iolat) {
612 blkg = blkg->parent;
613 continue;
614 }
615 rqw = &iolat->rq_wait;
616
617 inflight = atomic_dec_return(&rqw->inflight);
618 WARN_ON_ONCE(inflight < 0);
619 if (iolat->min_lat_nsec == 0)
620 goto next;
621 iolatency_record_time(iolat, &bio->bi_issue, now,
622 issue_as_root);
623 window_start = atomic64_read(&iolat->window_start);
624 if (now > window_start &&
625 (now - window_start) >= iolat->cur_win_nsec) {
626 if (atomic64_cmpxchg(&iolat->window_start,
627 window_start, now) == window_start)
628 iolatency_check_latencies(iolat, now);
629 }
630 next:
631 wake_up(&rqw->wait);
632 blkg = blkg->parent;
633 }
634 }
635
636 static void blkcg_iolatency_cleanup(struct rq_qos *rqos, struct bio *bio)
637 {
638 struct blkcg_gq *blkg;
639
640 blkg = bio->bi_blkg;
641 while (blkg && blkg->parent) {
642 struct rq_wait *rqw;
643 struct iolatency_grp *iolat;
644
645 iolat = blkg_to_lat(blkg);
646 if (!iolat)
647 goto next;
648
649 rqw = &iolat->rq_wait;
650 atomic_dec(&rqw->inflight);
651 wake_up(&rqw->wait);
652 next:
653 blkg = blkg->parent;
654 }
655 }
656
657 static void blkcg_iolatency_exit(struct rq_qos *rqos)
658 {
659 struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
660
661 del_timer_sync(&blkiolat->timer);
662 blkcg_deactivate_policy(rqos->q, &blkcg_policy_iolatency);
663 kfree(blkiolat);
664 }
665
666 static struct rq_qos_ops blkcg_iolatency_ops = {
667 .throttle = blkcg_iolatency_throttle,
668 .cleanup = blkcg_iolatency_cleanup,
669 .done_bio = blkcg_iolatency_done_bio,
670 .exit = blkcg_iolatency_exit,
671 };
672
673 static void blkiolatency_timer_fn(struct timer_list *t)
674 {
675 struct blk_iolatency *blkiolat = from_timer(blkiolat, t, timer);
676 struct blkcg_gq *blkg;
677 struct cgroup_subsys_state *pos_css;
678 u64 now = ktime_to_ns(ktime_get());
679
680 rcu_read_lock();
681 blkg_for_each_descendant_pre(blkg, pos_css,
682 blkiolat->rqos.q->root_blkg) {
683 struct iolatency_grp *iolat;
684 struct child_latency_info *lat_info;
685 unsigned long flags;
686 u64 cookie;
687
688 /*
689 * We could be exiting, don't access the pd unless we have a
690 * ref on the blkg.
691 */
692 if (!blkg_tryget(blkg))
693 continue;
694
695 iolat = blkg_to_lat(blkg);
696 if (!iolat)
697 goto next;
698
699 lat_info = &iolat->child_lat;
700 cookie = atomic_read(&lat_info->scale_cookie);
701
702 if (cookie >= DEFAULT_SCALE_COOKIE)
703 goto next;
704
705 spin_lock_irqsave(&lat_info->lock, flags);
706 if (lat_info->last_scale_event >= now)
707 goto next_lock;
708
709 /*
710 * We scaled down but don't have a scale_grp, scale up and carry
711 * on.
712 */
713 if (lat_info->scale_grp == NULL) {
714 scale_cookie_change(iolat->blkiolat, lat_info, true);
715 goto next_lock;
716 }
717
718 /*
719 * It's been 5 seconds since our last scale event, clear the
720 * scale grp in case the group that needed the scale down isn't
721 * doing any IO currently.
722 */
723 if (now - lat_info->last_scale_event >=
724 ((u64)NSEC_PER_SEC * 5))
725 lat_info->scale_grp = NULL;
726 next_lock:
727 spin_unlock_irqrestore(&lat_info->lock, flags);
728 next:
729 blkg_put(blkg);
730 }
731 rcu_read_unlock();
732 }
733
734 int blk_iolatency_init(struct request_queue *q)
735 {
736 struct blk_iolatency *blkiolat;
737 struct rq_qos *rqos;
738 int ret;
739
740 blkiolat = kzalloc(sizeof(*blkiolat), GFP_KERNEL);
741 if (!blkiolat)
742 return -ENOMEM;
743
744 rqos = &blkiolat->rqos;
745 rqos->id = RQ_QOS_CGROUP;
746 rqos->ops = &blkcg_iolatency_ops;
747 rqos->q = q;
748
749 rq_qos_add(q, rqos);
750
751 ret = blkcg_activate_policy(q, &blkcg_policy_iolatency);
752 if (ret) {
753 rq_qos_del(q, rqos);
754 kfree(blkiolat);
755 return ret;
756 }
757
758 timer_setup(&blkiolat->timer, blkiolatency_timer_fn, 0);
759
760 return 0;
761 }
762
763 /*
764 * return 1 for enabling iolatency, return -1 for disabling iolatency, otherwise
765 * return 0.
766 */
767 static int iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val)
768 {
769 struct iolatency_grp *iolat = blkg_to_lat(blkg);
770 u64 oldval = iolat->min_lat_nsec;
771
772 iolat->min_lat_nsec = val;
773 iolat->cur_win_nsec = max_t(u64, val << 4, BLKIOLATENCY_MIN_WIN_SIZE);
774 iolat->cur_win_nsec = min_t(u64, iolat->cur_win_nsec,
775 BLKIOLATENCY_MAX_WIN_SIZE);
776
777 if (!oldval && val)
778 return 1;
779 if (oldval && !val)
780 return -1;
781 return 0;
782 }
783
784 static void iolatency_clear_scaling(struct blkcg_gq *blkg)
785 {
786 if (blkg->parent) {
787 struct iolatency_grp *iolat = blkg_to_lat(blkg->parent);
788 struct child_latency_info *lat_info;
789 if (!iolat)
790 return;
791
792 lat_info = &iolat->child_lat;
793 spin_lock(&lat_info->lock);
794 atomic_set(&lat_info->scale_cookie, DEFAULT_SCALE_COOKIE);
795 lat_info->last_scale_event = 0;
796 lat_info->scale_grp = NULL;
797 lat_info->scale_lat = 0;
798 spin_unlock(&lat_info->lock);
799 }
800 }
801
802 static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf,
803 size_t nbytes, loff_t off)
804 {
805 struct blkcg *blkcg = css_to_blkcg(of_css(of));
806 struct blkcg_gq *blkg;
807 struct blkg_conf_ctx ctx;
808 struct iolatency_grp *iolat;
809 char *p, *tok;
810 u64 lat_val = 0;
811 u64 oldval;
812 int ret;
813 int enable = 0;
814
815 ret = blkg_conf_prep(blkcg, &blkcg_policy_iolatency, buf, &ctx);
816 if (ret)
817 return ret;
818
819 iolat = blkg_to_lat(ctx.blkg);
820 p = ctx.body;
821
822 ret = -EINVAL;
823 while ((tok = strsep(&p, " "))) {
824 char key[16];
825 char val[21]; /* 18446744073709551616 */
826
827 if (sscanf(tok, "%15[^=]=%20s", key, val) != 2)
828 goto out;
829
830 if (!strcmp(key, "target")) {
831 u64 v;
832
833 if (!strcmp(val, "max"))
834 lat_val = 0;
835 else if (sscanf(val, "%llu", &v) == 1)
836 lat_val = v * NSEC_PER_USEC;
837 else
838 goto out;
839 } else {
840 goto out;
841 }
842 }
843
844 /* Walk up the tree to see if our new val is lower than it should be. */
845 blkg = ctx.blkg;
846 oldval = iolat->min_lat_nsec;
847
848 enable = iolatency_set_min_lat_nsec(blkg, lat_val);
849 if (enable) {
850 WARN_ON_ONCE(!blk_get_queue(blkg->q));
851 blkg_get(blkg);
852 }
853
854 if (oldval != iolat->min_lat_nsec) {
855 iolatency_clear_scaling(blkg);
856 }
857
858 ret = 0;
859 out:
860 blkg_conf_finish(&ctx);
861 if (ret == 0 && enable) {
862 struct iolatency_grp *tmp = blkg_to_lat(blkg);
863 struct blk_iolatency *blkiolat = tmp->blkiolat;
864
865 blk_mq_freeze_queue(blkg->q);
866
867 if (enable == 1)
868 atomic_inc(&blkiolat->enabled);
869 else if (enable == -1)
870 atomic_dec(&blkiolat->enabled);
871 else
872 WARN_ON_ONCE(1);
873
874 blk_mq_unfreeze_queue(blkg->q);
875
876 blkg_put(blkg);
877 blk_put_queue(blkg->q);
878 }
879 return ret ?: nbytes;
880 }
881
882 static u64 iolatency_prfill_limit(struct seq_file *sf,
883 struct blkg_policy_data *pd, int off)
884 {
885 struct iolatency_grp *iolat = pd_to_lat(pd);
886 const char *dname = blkg_dev_name(pd->blkg);
887
888 if (!dname || !iolat->min_lat_nsec)
889 return 0;
890 seq_printf(sf, "%s target=%llu\n",
891 dname, div_u64(iolat->min_lat_nsec, NSEC_PER_USEC));
892 return 0;
893 }
894
895 static int iolatency_print_limit(struct seq_file *sf, void *v)
896 {
897 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
898 iolatency_prfill_limit,
899 &blkcg_policy_iolatency, seq_cft(sf)->private, false);
900 return 0;
901 }
902
903 static size_t iolatency_ssd_stat(struct iolatency_grp *iolat, char *buf,
904 size_t size)
905 {
906 struct latency_stat stat;
907 int cpu;
908
909 latency_stat_init(iolat, &stat);
910 preempt_disable();
911 for_each_online_cpu(cpu) {
912 struct latency_stat *s;
913 s = per_cpu_ptr(iolat->stats, cpu);
914 latency_stat_sum(iolat, &stat, s);
915 }
916 preempt_enable();
917
918 if (iolat->rq_depth.max_depth == UINT_MAX)
919 return scnprintf(buf, size, " missed=%llu total=%llu depth=max",
920 (unsigned long long)stat.ps.missed,
921 (unsigned long long)stat.ps.total);
922 return scnprintf(buf, size, " missed=%llu total=%llu depth=%u",
923 (unsigned long long)stat.ps.missed,
924 (unsigned long long)stat.ps.total,
925 iolat->rq_depth.max_depth);
926 }
927
928 static size_t iolatency_pd_stat(struct blkg_policy_data *pd, char *buf,
929 size_t size)
930 {
931 struct iolatency_grp *iolat = pd_to_lat(pd);
932 unsigned long long avg_lat;
933 unsigned long long cur_win;
934
935 if (iolat->ssd)
936 return iolatency_ssd_stat(iolat, buf, size);
937
938 avg_lat = div64_u64(iolat->lat_avg, NSEC_PER_USEC);
939 cur_win = div64_u64(iolat->cur_win_nsec, NSEC_PER_MSEC);
940 if (iolat->rq_depth.max_depth == UINT_MAX)
941 return scnprintf(buf, size, " depth=max avg_lat=%llu win=%llu",
942 avg_lat, cur_win);
943
944 return scnprintf(buf, size, " depth=%u avg_lat=%llu win=%llu",
945 iolat->rq_depth.max_depth, avg_lat, cur_win);
946 }
947
948
949 static struct blkg_policy_data *iolatency_pd_alloc(gfp_t gfp, int node)
950 {
951 struct iolatency_grp *iolat;
952
953 iolat = kzalloc_node(sizeof(*iolat), gfp, node);
954 if (!iolat)
955 return NULL;
956 iolat->stats = __alloc_percpu_gfp(sizeof(struct latency_stat),
957 __alignof__(struct latency_stat), gfp);
958 if (!iolat->stats) {
959 kfree(iolat);
960 return NULL;
961 }
962 return &iolat->pd;
963 }
964
965 static void iolatency_pd_init(struct blkg_policy_data *pd)
966 {
967 struct iolatency_grp *iolat = pd_to_lat(pd);
968 struct blkcg_gq *blkg = lat_to_blkg(iolat);
969 struct rq_qos *rqos = blkcg_rq_qos(blkg->q);
970 struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
971 u64 now = ktime_to_ns(ktime_get());
972 int cpu;
973
974 if (blk_queue_nonrot(blkg->q))
975 iolat->ssd = true;
976 else
977 iolat->ssd = false;
978
979 for_each_possible_cpu(cpu) {
980 struct latency_stat *stat;
981 stat = per_cpu_ptr(iolat->stats, cpu);
982 latency_stat_init(iolat, stat);
983 }
984
985 latency_stat_init(iolat, &iolat->cur_stat);
986 rq_wait_init(&iolat->rq_wait);
987 spin_lock_init(&iolat->child_lat.lock);
988 iolat->rq_depth.queue_depth = blkg->q->nr_requests;
989 iolat->rq_depth.max_depth = UINT_MAX;
990 iolat->rq_depth.default_depth = iolat->rq_depth.queue_depth;
991 iolat->blkiolat = blkiolat;
992 iolat->cur_win_nsec = 100 * NSEC_PER_MSEC;
993 atomic64_set(&iolat->window_start, now);
994
995 /*
996 * We init things in list order, so the pd for the parent may not be
997 * init'ed yet for whatever reason.
998 */
999 if (blkg->parent && blkg_to_pd(blkg->parent, &blkcg_policy_iolatency)) {
1000 struct iolatency_grp *parent = blkg_to_lat(blkg->parent);
1001 atomic_set(&iolat->scale_cookie,
1002 atomic_read(&parent->child_lat.scale_cookie));
1003 } else {
1004 atomic_set(&iolat->scale_cookie, DEFAULT_SCALE_COOKIE);
1005 }
1006
1007 atomic_set(&iolat->child_lat.scale_cookie, DEFAULT_SCALE_COOKIE);
1008 }
1009
1010 static void iolatency_pd_offline(struct blkg_policy_data *pd)
1011 {
1012 struct iolatency_grp *iolat = pd_to_lat(pd);
1013 struct blkcg_gq *blkg = lat_to_blkg(iolat);
1014 struct blk_iolatency *blkiolat = iolat->blkiolat;
1015 int ret;
1016
1017 ret = iolatency_set_min_lat_nsec(blkg, 0);
1018 if (ret == 1)
1019 atomic_inc(&blkiolat->enabled);
1020 if (ret == -1)
1021 atomic_dec(&blkiolat->enabled);
1022 iolatency_clear_scaling(blkg);
1023 }
1024
1025 static void iolatency_pd_free(struct blkg_policy_data *pd)
1026 {
1027 struct iolatency_grp *iolat = pd_to_lat(pd);
1028 free_percpu(iolat->stats);
1029 kfree(iolat);
1030 }
1031
1032 static struct cftype iolatency_files[] = {
1033 {
1034 .name = "latency",
1035 .flags = CFTYPE_NOT_ON_ROOT,
1036 .seq_show = iolatency_print_limit,
1037 .write = iolatency_set_limit,
1038 },
1039 {}
1040 };
1041
1042 static struct blkcg_policy blkcg_policy_iolatency = {
1043 .dfl_cftypes = iolatency_files,
1044 .pd_alloc_fn = iolatency_pd_alloc,
1045 .pd_init_fn = iolatency_pd_init,
1046 .pd_offline_fn = iolatency_pd_offline,
1047 .pd_free_fn = iolatency_pd_free,
1048 .pd_stat_fn = iolatency_pd_stat,
1049 };
1050
1051 static int __init iolatency_init(void)
1052 {
1053 return blkcg_policy_register(&blkcg_policy_iolatency);
1054 }
1055
1056 static void __exit iolatency_exit(void)
1057 {
1058 return blkcg_policy_unregister(&blkcg_policy_iolatency);
1059 }
1060
1061 module_init(iolatency_init);
1062 module_exit(iolatency_exit);