1 // SPDX-License-Identifier: GPL-2.0
3 * Interface for controlling IO bandwidth on a request queue
5 * Copyright (C) 2010 Vivek Goyal <vgoyal@redhat.com>
8 #include <linux/module.h>
9 #include <linux/slab.h>
10 #include <linux/blkdev.h>
11 #include <linux/bio.h>
12 #include <linux/blktrace_api.h>
14 #include "blk-cgroup-rwstat.h"
16 #include "blk-throttle.h"
18 /* Max dispatch from a group in 1 round */
19 #define THROTL_GRP_QUANTUM 8
21 /* Total max dispatch from all groups in one round */
22 #define THROTL_QUANTUM 32
24 /* Throttling is performed over a slice and after that slice is renewed */
25 #define DFL_THROTL_SLICE_HD (HZ / 10)
26 #define DFL_THROTL_SLICE_SSD (HZ / 50)
27 #define MAX_THROTL_SLICE (HZ)
28 #define MAX_IDLE_TIME (5L * 1000 * 1000) /* 5 s */
29 #define MIN_THROTL_BPS (320 * 1024)
30 #define MIN_THROTL_IOPS (10)
31 #define DFL_LATENCY_TARGET (-1L)
32 #define DFL_IDLE_THRESHOLD (0)
33 #define DFL_HD_BASELINE_LATENCY (4000L) /* 4ms */
34 #define LATENCY_FILTERED_SSD (0)
36 * For HD, very small latency comes from sequential IO. Such IO is helpless to
37 * help determine if its IO is impacted by others, hence we ignore the IO
39 #define LATENCY_FILTERED_HD (1000L) /* 1ms */
41 /* A workqueue to queue throttle related work */
42 static struct workqueue_struct
*kthrotld_workqueue
;
44 #define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node)
46 /* We measure latency for request size from <= 4k to >= 1M */
47 #define LATENCY_BUCKET_SIZE 9
49 struct latency_bucket
{
50 unsigned long total_latency
; /* ns / 1024 */
54 struct avg_latency_bucket
{
55 unsigned long latency
; /* ns / 1024 */
61 /* service tree for active throtl groups */
62 struct throtl_service_queue service_queue
;
64 struct request_queue
*queue
;
66 /* Total Number of queued bios on READ and WRITE lists */
67 unsigned int nr_queued
[2];
69 unsigned int throtl_slice
;
71 /* Work for dispatching throttled bios */
72 struct work_struct dispatch_work
;
73 unsigned int limit_index
;
74 bool limit_valid
[LIMIT_CNT
];
76 unsigned long low_upgrade_time
;
77 unsigned long low_downgrade_time
;
81 struct latency_bucket tmp_buckets
[2][LATENCY_BUCKET_SIZE
];
82 struct avg_latency_bucket avg_buckets
[2][LATENCY_BUCKET_SIZE
];
83 struct latency_bucket __percpu
*latency_buckets
[2];
84 unsigned long last_calculate_time
;
85 unsigned long filtered_latency
;
87 bool track_bio_latency
;
90 static void throtl_pending_timer_fn(struct timer_list
*t
);
92 static inline struct blkcg_gq
*tg_to_blkg(struct throtl_grp
*tg
)
94 return pd_to_blkg(&tg
->pd
);
98 * sq_to_tg - return the throl_grp the specified service queue belongs to
99 * @sq: the throtl_service_queue of interest
101 * Return the throtl_grp @sq belongs to. If @sq is the top-level one
102 * embedded in throtl_data, %NULL is returned.
104 static struct throtl_grp
*sq_to_tg(struct throtl_service_queue
*sq
)
106 if (sq
&& sq
->parent_sq
)
107 return container_of(sq
, struct throtl_grp
, service_queue
);
113 * sq_to_td - return throtl_data the specified service queue belongs to
114 * @sq: the throtl_service_queue of interest
116 * A service_queue can be embedded in either a throtl_grp or throtl_data.
117 * Determine the associated throtl_data accordingly and return it.
119 static struct throtl_data
*sq_to_td(struct throtl_service_queue
*sq
)
121 struct throtl_grp
*tg
= sq_to_tg(sq
);
126 return container_of(sq
, struct throtl_data
, service_queue
);
130 * cgroup's limit in LIMIT_MAX is scaled if low limit is set. This scale is to
131 * make the IO dispatch more smooth.
132 * Scale up: linearly scale up according to elapsed time since upgrade. For
133 * every throtl_slice, the limit scales up 1/2 .low limit till the
134 * limit hits .max limit
135 * Scale down: exponentially scale down if a cgroup doesn't hit its .low limit
137 static uint64_t throtl_adjusted_limit(uint64_t low
, struct throtl_data
*td
)
139 /* arbitrary value to avoid too big scale */
140 if (td
->scale
< 4096 && time_after_eq(jiffies
,
141 td
->low_upgrade_time
+ td
->scale
* td
->throtl_slice
))
142 td
->scale
= (jiffies
- td
->low_upgrade_time
) / td
->throtl_slice
;
144 return low
+ (low
>> 1) * td
->scale
;
147 static uint64_t tg_bps_limit(struct throtl_grp
*tg
, int rw
)
149 struct blkcg_gq
*blkg
= tg_to_blkg(tg
);
150 struct throtl_data
*td
;
153 if (cgroup_subsys_on_dfl(io_cgrp_subsys
) && !blkg
->parent
)
157 ret
= tg
->bps
[rw
][td
->limit_index
];
158 if (ret
== 0 && td
->limit_index
== LIMIT_LOW
) {
159 /* intermediate node or iops isn't 0 */
160 if (!list_empty(&blkg
->blkcg
->css
.children
) ||
161 tg
->iops
[rw
][td
->limit_index
])
164 return MIN_THROTL_BPS
;
167 if (td
->limit_index
== LIMIT_MAX
&& tg
->bps
[rw
][LIMIT_LOW
] &&
168 tg
->bps
[rw
][LIMIT_LOW
] != tg
->bps
[rw
][LIMIT_MAX
]) {
171 adjusted
= throtl_adjusted_limit(tg
->bps
[rw
][LIMIT_LOW
], td
);
172 ret
= min(tg
->bps
[rw
][LIMIT_MAX
], adjusted
);
177 static unsigned int tg_iops_limit(struct throtl_grp
*tg
, int rw
)
179 struct blkcg_gq
*blkg
= tg_to_blkg(tg
);
180 struct throtl_data
*td
;
183 if (cgroup_subsys_on_dfl(io_cgrp_subsys
) && !blkg
->parent
)
187 ret
= tg
->iops
[rw
][td
->limit_index
];
188 if (ret
== 0 && tg
->td
->limit_index
== LIMIT_LOW
) {
189 /* intermediate node or bps isn't 0 */
190 if (!list_empty(&blkg
->blkcg
->css
.children
) ||
191 tg
->bps
[rw
][td
->limit_index
])
194 return MIN_THROTL_IOPS
;
197 if (td
->limit_index
== LIMIT_MAX
&& tg
->iops
[rw
][LIMIT_LOW
] &&
198 tg
->iops
[rw
][LIMIT_LOW
] != tg
->iops
[rw
][LIMIT_MAX
]) {
201 adjusted
= throtl_adjusted_limit(tg
->iops
[rw
][LIMIT_LOW
], td
);
202 if (adjusted
> UINT_MAX
)
204 ret
= min_t(unsigned int, tg
->iops
[rw
][LIMIT_MAX
], adjusted
);
209 #define request_bucket_index(sectors) \
210 clamp_t(int, order_base_2(sectors) - 3, 0, LATENCY_BUCKET_SIZE - 1)
213 * throtl_log - log debug message via blktrace
214 * @sq: the service_queue being reported
215 * @fmt: printf format string
218 * The messages are prefixed with "throtl BLKG_NAME" if @sq belongs to a
219 * throtl_grp; otherwise, just "throtl".
221 #define throtl_log(sq, fmt, args...) do { \
222 struct throtl_grp *__tg = sq_to_tg((sq)); \
223 struct throtl_data *__td = sq_to_td((sq)); \
226 if (likely(!blk_trace_note_message_enabled(__td->queue))) \
229 blk_add_cgroup_trace_msg(__td->queue, \
230 &tg_to_blkg(__tg)->blkcg->css, "throtl " fmt, ##args);\
232 blk_add_trace_msg(__td->queue, "throtl " fmt, ##args); \
236 static inline unsigned int throtl_bio_data_size(struct bio
*bio
)
238 /* assume it's one sector */
239 if (unlikely(bio_op(bio
) == REQ_OP_DISCARD
))
241 return bio
->bi_iter
.bi_size
;
244 static void throtl_qnode_init(struct throtl_qnode
*qn
, struct throtl_grp
*tg
)
246 INIT_LIST_HEAD(&qn
->node
);
247 bio_list_init(&qn
->bios
);
252 * throtl_qnode_add_bio - add a bio to a throtl_qnode and activate it
253 * @bio: bio being added
254 * @qn: qnode to add bio to
255 * @queued: the service_queue->queued[] list @qn belongs to
257 * Add @bio to @qn and put @qn on @queued if it's not already on.
258 * @qn->tg's reference count is bumped when @qn is activated. See the
259 * comment on top of throtl_qnode definition for details.
261 static void throtl_qnode_add_bio(struct bio
*bio
, struct throtl_qnode
*qn
,
262 struct list_head
*queued
)
264 bio_list_add(&qn
->bios
, bio
);
265 if (list_empty(&qn
->node
)) {
266 list_add_tail(&qn
->node
, queued
);
267 blkg_get(tg_to_blkg(qn
->tg
));
272 * throtl_peek_queued - peek the first bio on a qnode list
273 * @queued: the qnode list to peek
275 static struct bio
*throtl_peek_queued(struct list_head
*queued
)
277 struct throtl_qnode
*qn
;
280 if (list_empty(queued
))
283 qn
= list_first_entry(queued
, struct throtl_qnode
, node
);
284 bio
= bio_list_peek(&qn
->bios
);
290 * throtl_pop_queued - pop the first bio form a qnode list
291 * @queued: the qnode list to pop a bio from
292 * @tg_to_put: optional out argument for throtl_grp to put
294 * Pop the first bio from the qnode list @queued. After popping, the first
295 * qnode is removed from @queued if empty or moved to the end of @queued so
296 * that the popping order is round-robin.
298 * When the first qnode is removed, its associated throtl_grp should be put
299 * too. If @tg_to_put is NULL, this function automatically puts it;
300 * otherwise, *@tg_to_put is set to the throtl_grp to put and the caller is
301 * responsible for putting it.
303 static struct bio
*throtl_pop_queued(struct list_head
*queued
,
304 struct throtl_grp
**tg_to_put
)
306 struct throtl_qnode
*qn
;
309 if (list_empty(queued
))
312 qn
= list_first_entry(queued
, struct throtl_qnode
, node
);
313 bio
= bio_list_pop(&qn
->bios
);
316 if (bio_list_empty(&qn
->bios
)) {
317 list_del_init(&qn
->node
);
321 blkg_put(tg_to_blkg(qn
->tg
));
323 list_move_tail(&qn
->node
, queued
);
329 /* init a service_queue, assumes the caller zeroed it */
330 static void throtl_service_queue_init(struct throtl_service_queue
*sq
)
332 INIT_LIST_HEAD(&sq
->queued
[READ
]);
333 INIT_LIST_HEAD(&sq
->queued
[WRITE
]);
334 sq
->pending_tree
= RB_ROOT_CACHED
;
335 timer_setup(&sq
->pending_timer
, throtl_pending_timer_fn
, 0);
338 static struct blkg_policy_data
*throtl_pd_alloc(struct gendisk
*disk
,
339 struct blkcg
*blkcg
, gfp_t gfp
)
341 struct throtl_grp
*tg
;
344 tg
= kzalloc_node(sizeof(*tg
), gfp
, disk
->node_id
);
348 if (blkg_rwstat_init(&tg
->stat_bytes
, gfp
))
351 if (blkg_rwstat_init(&tg
->stat_ios
, gfp
))
352 goto err_exit_stat_bytes
;
354 throtl_service_queue_init(&tg
->service_queue
);
356 for (rw
= READ
; rw
<= WRITE
; rw
++) {
357 throtl_qnode_init(&tg
->qnode_on_self
[rw
], tg
);
358 throtl_qnode_init(&tg
->qnode_on_parent
[rw
], tg
);
361 RB_CLEAR_NODE(&tg
->rb_node
);
362 tg
->bps
[READ
][LIMIT_MAX
] = U64_MAX
;
363 tg
->bps
[WRITE
][LIMIT_MAX
] = U64_MAX
;
364 tg
->iops
[READ
][LIMIT_MAX
] = UINT_MAX
;
365 tg
->iops
[WRITE
][LIMIT_MAX
] = UINT_MAX
;
366 tg
->bps_conf
[READ
][LIMIT_MAX
] = U64_MAX
;
367 tg
->bps_conf
[WRITE
][LIMIT_MAX
] = U64_MAX
;
368 tg
->iops_conf
[READ
][LIMIT_MAX
] = UINT_MAX
;
369 tg
->iops_conf
[WRITE
][LIMIT_MAX
] = UINT_MAX
;
370 /* LIMIT_LOW will have default value 0 */
372 tg
->latency_target
= DFL_LATENCY_TARGET
;
373 tg
->latency_target_conf
= DFL_LATENCY_TARGET
;
374 tg
->idletime_threshold
= DFL_IDLE_THRESHOLD
;
375 tg
->idletime_threshold_conf
= DFL_IDLE_THRESHOLD
;
380 blkg_rwstat_exit(&tg
->stat_bytes
);
386 static void throtl_pd_init(struct blkg_policy_data
*pd
)
388 struct throtl_grp
*tg
= pd_to_tg(pd
);
389 struct blkcg_gq
*blkg
= tg_to_blkg(tg
);
390 struct throtl_data
*td
= blkg
->q
->td
;
391 struct throtl_service_queue
*sq
= &tg
->service_queue
;
394 * If on the default hierarchy, we switch to properly hierarchical
395 * behavior where limits on a given throtl_grp are applied to the
396 * whole subtree rather than just the group itself. e.g. If 16M
397 * read_bps limit is set on a parent group, summary bps of
398 * parent group and its subtree groups can't exceed 16M for the
401 * If not on the default hierarchy, the broken flat hierarchy
402 * behavior is retained where all throtl_grps are treated as if
403 * they're all separate root groups right below throtl_data.
404 * Limits of a group don't interact with limits of other groups
405 * regardless of the position of the group in the hierarchy.
407 sq
->parent_sq
= &td
->service_queue
;
408 if (cgroup_subsys_on_dfl(io_cgrp_subsys
) && blkg
->parent
)
409 sq
->parent_sq
= &blkg_to_tg(blkg
->parent
)->service_queue
;
414 * Set has_rules[] if @tg or any of its parents have limits configured.
415 * This doesn't require walking up to the top of the hierarchy as the
416 * parent's has_rules[] is guaranteed to be correct.
418 static void tg_update_has_rules(struct throtl_grp
*tg
)
420 struct throtl_grp
*parent_tg
= sq_to_tg(tg
->service_queue
.parent_sq
);
421 struct throtl_data
*td
= tg
->td
;
424 for (rw
= READ
; rw
<= WRITE
; rw
++) {
425 tg
->has_rules_iops
[rw
] =
426 (parent_tg
&& parent_tg
->has_rules_iops
[rw
]) ||
427 (td
->limit_valid
[td
->limit_index
] &&
428 tg_iops_limit(tg
, rw
) != UINT_MAX
);
429 tg
->has_rules_bps
[rw
] =
430 (parent_tg
&& parent_tg
->has_rules_bps
[rw
]) ||
431 (td
->limit_valid
[td
->limit_index
] &&
432 (tg_bps_limit(tg
, rw
) != U64_MAX
));
436 static void throtl_pd_online(struct blkg_policy_data
*pd
)
438 struct throtl_grp
*tg
= pd_to_tg(pd
);
440 * We don't want new groups to escape the limits of its ancestors.
441 * Update has_rules[] after a new group is brought online.
443 tg_update_has_rules(tg
);
446 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
447 static void blk_throtl_update_limit_valid(struct throtl_data
*td
)
449 struct cgroup_subsys_state
*pos_css
;
450 struct blkcg_gq
*blkg
;
451 bool low_valid
= false;
454 blkg_for_each_descendant_post(blkg
, pos_css
, td
->queue
->root_blkg
) {
455 struct throtl_grp
*tg
= blkg_to_tg(blkg
);
457 if (tg
->bps
[READ
][LIMIT_LOW
] || tg
->bps
[WRITE
][LIMIT_LOW
] ||
458 tg
->iops
[READ
][LIMIT_LOW
] || tg
->iops
[WRITE
][LIMIT_LOW
]) {
465 td
->limit_valid
[LIMIT_LOW
] = low_valid
;
468 static inline void blk_throtl_update_limit_valid(struct throtl_data
*td
)
473 static void throtl_upgrade_state(struct throtl_data
*td
);
474 static void throtl_pd_offline(struct blkg_policy_data
*pd
)
476 struct throtl_grp
*tg
= pd_to_tg(pd
);
478 tg
->bps
[READ
][LIMIT_LOW
] = 0;
479 tg
->bps
[WRITE
][LIMIT_LOW
] = 0;
480 tg
->iops
[READ
][LIMIT_LOW
] = 0;
481 tg
->iops
[WRITE
][LIMIT_LOW
] = 0;
483 blk_throtl_update_limit_valid(tg
->td
);
485 if (!tg
->td
->limit_valid
[tg
->td
->limit_index
])
486 throtl_upgrade_state(tg
->td
);
489 static void throtl_pd_free(struct blkg_policy_data
*pd
)
491 struct throtl_grp
*tg
= pd_to_tg(pd
);
493 del_timer_sync(&tg
->service_queue
.pending_timer
);
494 blkg_rwstat_exit(&tg
->stat_bytes
);
495 blkg_rwstat_exit(&tg
->stat_ios
);
499 static struct throtl_grp
*
500 throtl_rb_first(struct throtl_service_queue
*parent_sq
)
504 n
= rb_first_cached(&parent_sq
->pending_tree
);
508 return rb_entry_tg(n
);
511 static void throtl_rb_erase(struct rb_node
*n
,
512 struct throtl_service_queue
*parent_sq
)
514 rb_erase_cached(n
, &parent_sq
->pending_tree
);
518 static void update_min_dispatch_time(struct throtl_service_queue
*parent_sq
)
520 struct throtl_grp
*tg
;
522 tg
= throtl_rb_first(parent_sq
);
526 parent_sq
->first_pending_disptime
= tg
->disptime
;
529 static void tg_service_queue_add(struct throtl_grp
*tg
)
531 struct throtl_service_queue
*parent_sq
= tg
->service_queue
.parent_sq
;
532 struct rb_node
**node
= &parent_sq
->pending_tree
.rb_root
.rb_node
;
533 struct rb_node
*parent
= NULL
;
534 struct throtl_grp
*__tg
;
535 unsigned long key
= tg
->disptime
;
536 bool leftmost
= true;
538 while (*node
!= NULL
) {
540 __tg
= rb_entry_tg(parent
);
542 if (time_before(key
, __tg
->disptime
))
543 node
= &parent
->rb_left
;
545 node
= &parent
->rb_right
;
550 rb_link_node(&tg
->rb_node
, parent
, node
);
551 rb_insert_color_cached(&tg
->rb_node
, &parent_sq
->pending_tree
,
555 static void throtl_enqueue_tg(struct throtl_grp
*tg
)
557 if (!(tg
->flags
& THROTL_TG_PENDING
)) {
558 tg_service_queue_add(tg
);
559 tg
->flags
|= THROTL_TG_PENDING
;
560 tg
->service_queue
.parent_sq
->nr_pending
++;
564 static void throtl_dequeue_tg(struct throtl_grp
*tg
)
566 if (tg
->flags
& THROTL_TG_PENDING
) {
567 struct throtl_service_queue
*parent_sq
=
568 tg
->service_queue
.parent_sq
;
570 throtl_rb_erase(&tg
->rb_node
, parent_sq
);
571 --parent_sq
->nr_pending
;
572 tg
->flags
&= ~THROTL_TG_PENDING
;
576 /* Call with queue lock held */
577 static void throtl_schedule_pending_timer(struct throtl_service_queue
*sq
,
578 unsigned long expires
)
580 unsigned long max_expire
= jiffies
+ 8 * sq_to_td(sq
)->throtl_slice
;
583 * Since we are adjusting the throttle limit dynamically, the sleep
584 * time calculated according to previous limit might be invalid. It's
585 * possible the cgroup sleep time is very long and no other cgroups
586 * have IO running so notify the limit changes. Make sure the cgroup
587 * doesn't sleep too long to avoid the missed notification.
589 if (time_after(expires
, max_expire
))
590 expires
= max_expire
;
591 mod_timer(&sq
->pending_timer
, expires
);
592 throtl_log(sq
, "schedule timer. delay=%lu jiffies=%lu",
593 expires
- jiffies
, jiffies
);
597 * throtl_schedule_next_dispatch - schedule the next dispatch cycle
598 * @sq: the service_queue to schedule dispatch for
599 * @force: force scheduling
601 * Arm @sq->pending_timer so that the next dispatch cycle starts on the
602 * dispatch time of the first pending child. Returns %true if either timer
603 * is armed or there's no pending child left. %false if the current
604 * dispatch window is still open and the caller should continue
607 * If @force is %true, the dispatch timer is always scheduled and this
608 * function is guaranteed to return %true. This is to be used when the
609 * caller can't dispatch itself and needs to invoke pending_timer
610 * unconditionally. Note that forced scheduling is likely to induce short
611 * delay before dispatch starts even if @sq->first_pending_disptime is not
612 * in the future and thus shouldn't be used in hot paths.
614 static bool throtl_schedule_next_dispatch(struct throtl_service_queue
*sq
,
617 /* any pending children left? */
621 update_min_dispatch_time(sq
);
623 /* is the next dispatch time in the future? */
624 if (force
|| time_after(sq
->first_pending_disptime
, jiffies
)) {
625 throtl_schedule_pending_timer(sq
, sq
->first_pending_disptime
);
629 /* tell the caller to continue dispatching */
633 static inline void throtl_start_new_slice_with_credit(struct throtl_grp
*tg
,
634 bool rw
, unsigned long start
)
636 tg
->bytes_disp
[rw
] = 0;
638 tg
->carryover_bytes
[rw
] = 0;
639 tg
->carryover_ios
[rw
] = 0;
642 * Previous slice has expired. We must have trimmed it after last
643 * bio dispatch. That means since start of last slice, we never used
644 * that bandwidth. Do try to make use of that bandwidth while giving
647 if (time_after(start
, tg
->slice_start
[rw
]))
648 tg
->slice_start
[rw
] = start
;
650 tg
->slice_end
[rw
] = jiffies
+ tg
->td
->throtl_slice
;
651 throtl_log(&tg
->service_queue
,
652 "[%c] new slice with credit start=%lu end=%lu jiffies=%lu",
653 rw
== READ
? 'R' : 'W', tg
->slice_start
[rw
],
654 tg
->slice_end
[rw
], jiffies
);
657 static inline void throtl_start_new_slice(struct throtl_grp
*tg
, bool rw
,
658 bool clear_carryover
)
660 tg
->bytes_disp
[rw
] = 0;
662 tg
->slice_start
[rw
] = jiffies
;
663 tg
->slice_end
[rw
] = jiffies
+ tg
->td
->throtl_slice
;
664 if (clear_carryover
) {
665 tg
->carryover_bytes
[rw
] = 0;
666 tg
->carryover_ios
[rw
] = 0;
669 throtl_log(&tg
->service_queue
,
670 "[%c] new slice start=%lu end=%lu jiffies=%lu",
671 rw
== READ
? 'R' : 'W', tg
->slice_start
[rw
],
672 tg
->slice_end
[rw
], jiffies
);
675 static inline void throtl_set_slice_end(struct throtl_grp
*tg
, bool rw
,
676 unsigned long jiffy_end
)
678 tg
->slice_end
[rw
] = roundup(jiffy_end
, tg
->td
->throtl_slice
);
681 static inline void throtl_extend_slice(struct throtl_grp
*tg
, bool rw
,
682 unsigned long jiffy_end
)
684 throtl_set_slice_end(tg
, rw
, jiffy_end
);
685 throtl_log(&tg
->service_queue
,
686 "[%c] extend slice start=%lu end=%lu jiffies=%lu",
687 rw
== READ
? 'R' : 'W', tg
->slice_start
[rw
],
688 tg
->slice_end
[rw
], jiffies
);
691 /* Determine if previously allocated or extended slice is complete or not */
692 static bool throtl_slice_used(struct throtl_grp
*tg
, bool rw
)
694 if (time_in_range(jiffies
, tg
->slice_start
[rw
], tg
->slice_end
[rw
]))
700 static unsigned int calculate_io_allowed(u32 iops_limit
,
701 unsigned long jiffy_elapsed
)
703 unsigned int io_allowed
;
707 * jiffy_elapsed should not be a big value as minimum iops can be
708 * 1 then at max jiffy elapsed should be equivalent of 1 second as we
709 * will allow dispatch after 1 second and after that slice should
713 tmp
= (u64
)iops_limit
* jiffy_elapsed
;
717 io_allowed
= UINT_MAX
;
724 static u64
calculate_bytes_allowed(u64 bps_limit
, unsigned long jiffy_elapsed
)
727 * Can result be wider than 64 bits?
728 * We check against 62, not 64, due to ilog2 truncation.
730 if (ilog2(bps_limit
) + ilog2(jiffy_elapsed
) - ilog2(HZ
) > 62)
732 return mul_u64_u64_div_u64(bps_limit
, (u64
)jiffy_elapsed
, (u64
)HZ
);
735 /* Trim the used slices and adjust slice start accordingly */
736 static inline void throtl_trim_slice(struct throtl_grp
*tg
, bool rw
)
738 unsigned long time_elapsed
;
739 long long bytes_trim
;
742 BUG_ON(time_before(tg
->slice_end
[rw
], tg
->slice_start
[rw
]));
745 * If bps are unlimited (-1), then time slice don't get
746 * renewed. Don't try to trim the slice if slice is used. A new
747 * slice will start when appropriate.
749 if (throtl_slice_used(tg
, rw
))
753 * A bio has been dispatched. Also adjust slice_end. It might happen
754 * that initially cgroup limit was very low resulting in high
755 * slice_end, but later limit was bumped up and bio was dispatched
756 * sooner, then we need to reduce slice_end. A high bogus slice_end
757 * is bad because it does not allow new slice to start.
760 throtl_set_slice_end(tg
, rw
, jiffies
+ tg
->td
->throtl_slice
);
762 time_elapsed
= rounddown(jiffies
- tg
->slice_start
[rw
],
763 tg
->td
->throtl_slice
);
767 bytes_trim
= calculate_bytes_allowed(tg_bps_limit(tg
, rw
),
769 tg
->carryover_bytes
[rw
];
770 io_trim
= calculate_io_allowed(tg_iops_limit(tg
, rw
), time_elapsed
) +
771 tg
->carryover_ios
[rw
];
772 if (bytes_trim
<= 0 && io_trim
<= 0)
775 tg
->carryover_bytes
[rw
] = 0;
776 if ((long long)tg
->bytes_disp
[rw
] >= bytes_trim
)
777 tg
->bytes_disp
[rw
] -= bytes_trim
;
779 tg
->bytes_disp
[rw
] = 0;
781 tg
->carryover_ios
[rw
] = 0;
782 if ((int)tg
->io_disp
[rw
] >= io_trim
)
783 tg
->io_disp
[rw
] -= io_trim
;
787 tg
->slice_start
[rw
] += time_elapsed
;
789 throtl_log(&tg
->service_queue
,
790 "[%c] trim slice nr=%lu bytes=%lld io=%d start=%lu end=%lu jiffies=%lu",
791 rw
== READ
? 'R' : 'W', time_elapsed
/ tg
->td
->throtl_slice
,
792 bytes_trim
, io_trim
, tg
->slice_start
[rw
], tg
->slice_end
[rw
],
796 static void __tg_update_carryover(struct throtl_grp
*tg
, bool rw
)
798 unsigned long jiffy_elapsed
= jiffies
- tg
->slice_start
[rw
];
799 u64 bps_limit
= tg_bps_limit(tg
, rw
);
800 u32 iops_limit
= tg_iops_limit(tg
, rw
);
803 * If config is updated while bios are still throttled, calculate and
804 * accumulate how many bytes/ios are waited across changes. And
805 * carryover_bytes/ios will be used to calculate new wait time under new
808 if (bps_limit
!= U64_MAX
)
809 tg
->carryover_bytes
[rw
] +=
810 calculate_bytes_allowed(bps_limit
, jiffy_elapsed
) -
812 if (iops_limit
!= UINT_MAX
)
813 tg
->carryover_ios
[rw
] +=
814 calculate_io_allowed(iops_limit
, jiffy_elapsed
) -
818 static void tg_update_carryover(struct throtl_grp
*tg
)
820 if (tg
->service_queue
.nr_queued
[READ
])
821 __tg_update_carryover(tg
, READ
);
822 if (tg
->service_queue
.nr_queued
[WRITE
])
823 __tg_update_carryover(tg
, WRITE
);
825 /* see comments in struct throtl_grp for meaning of these fields. */
826 throtl_log(&tg
->service_queue
, "%s: %lld %lld %d %d\n", __func__
,
827 tg
->carryover_bytes
[READ
], tg
->carryover_bytes
[WRITE
],
828 tg
->carryover_ios
[READ
], tg
->carryover_ios
[WRITE
]);
831 static unsigned long tg_within_iops_limit(struct throtl_grp
*tg
, struct bio
*bio
,
834 bool rw
= bio_data_dir(bio
);
836 unsigned long jiffy_elapsed
, jiffy_wait
, jiffy_elapsed_rnd
;
838 if (iops_limit
== UINT_MAX
) {
842 jiffy_elapsed
= jiffies
- tg
->slice_start
[rw
];
844 /* Round up to the next throttle slice, wait time must be nonzero */
845 jiffy_elapsed_rnd
= roundup(jiffy_elapsed
+ 1, tg
->td
->throtl_slice
);
846 io_allowed
= calculate_io_allowed(iops_limit
, jiffy_elapsed_rnd
) +
847 tg
->carryover_ios
[rw
];
848 if (io_allowed
> 0 && tg
->io_disp
[rw
] + 1 <= io_allowed
)
851 /* Calc approx time to dispatch */
852 jiffy_wait
= jiffy_elapsed_rnd
- jiffy_elapsed
;
856 static unsigned long tg_within_bps_limit(struct throtl_grp
*tg
, struct bio
*bio
,
859 bool rw
= bio_data_dir(bio
);
860 long long bytes_allowed
;
862 unsigned long jiffy_elapsed
, jiffy_wait
, jiffy_elapsed_rnd
;
863 unsigned int bio_size
= throtl_bio_data_size(bio
);
865 /* no need to throttle if this bio's bytes have been accounted */
866 if (bps_limit
== U64_MAX
|| bio_flagged(bio
, BIO_BPS_THROTTLED
)) {
870 jiffy_elapsed
= jiffy_elapsed_rnd
= jiffies
- tg
->slice_start
[rw
];
872 /* Slice has just started. Consider one slice interval */
874 jiffy_elapsed_rnd
= tg
->td
->throtl_slice
;
876 jiffy_elapsed_rnd
= roundup(jiffy_elapsed_rnd
, tg
->td
->throtl_slice
);
877 bytes_allowed
= calculate_bytes_allowed(bps_limit
, jiffy_elapsed_rnd
) +
878 tg
->carryover_bytes
[rw
];
879 if (bytes_allowed
> 0 && tg
->bytes_disp
[rw
] + bio_size
<= bytes_allowed
)
882 /* Calc approx time to dispatch */
883 extra_bytes
= tg
->bytes_disp
[rw
] + bio_size
- bytes_allowed
;
884 jiffy_wait
= div64_u64(extra_bytes
* HZ
, bps_limit
);
890 * This wait time is without taking into consideration the rounding
891 * up we did. Add that time also.
893 jiffy_wait
= jiffy_wait
+ (jiffy_elapsed_rnd
- jiffy_elapsed
);
898 * Returns whether one can dispatch a bio or not. Also returns approx number
899 * of jiffies to wait before this bio is with-in IO rate and can be dispatched
901 static bool tg_may_dispatch(struct throtl_grp
*tg
, struct bio
*bio
,
904 bool rw
= bio_data_dir(bio
);
905 unsigned long bps_wait
= 0, iops_wait
= 0, max_wait
= 0;
906 u64 bps_limit
= tg_bps_limit(tg
, rw
);
907 u32 iops_limit
= tg_iops_limit(tg
, rw
);
910 * Currently whole state machine of group depends on first bio
911 * queued in the group bio list. So one should not be calling
912 * this function with a different bio if there are other bios
915 BUG_ON(tg
->service_queue
.nr_queued
[rw
] &&
916 bio
!= throtl_peek_queued(&tg
->service_queue
.queued
[rw
]));
918 /* If tg->bps = -1, then BW is unlimited */
919 if ((bps_limit
== U64_MAX
&& iops_limit
== UINT_MAX
) ||
920 tg
->flags
& THROTL_TG_CANCELING
) {
927 * If previous slice expired, start a new one otherwise renew/extend
928 * existing slice to make sure it is at least throtl_slice interval
929 * long since now. New slice is started only for empty throttle group.
930 * If there is queued bio, that means there should be an active
931 * slice and it should be extended instead.
933 if (throtl_slice_used(tg
, rw
) && !(tg
->service_queue
.nr_queued
[rw
]))
934 throtl_start_new_slice(tg
, rw
, true);
936 if (time_before(tg
->slice_end
[rw
],
937 jiffies
+ tg
->td
->throtl_slice
))
938 throtl_extend_slice(tg
, rw
,
939 jiffies
+ tg
->td
->throtl_slice
);
942 bps_wait
= tg_within_bps_limit(tg
, bio
, bps_limit
);
943 iops_wait
= tg_within_iops_limit(tg
, bio
, iops_limit
);
944 if (bps_wait
+ iops_wait
== 0) {
950 max_wait
= max(bps_wait
, iops_wait
);
955 if (time_before(tg
->slice_end
[rw
], jiffies
+ max_wait
))
956 throtl_extend_slice(tg
, rw
, jiffies
+ max_wait
);
961 static void throtl_charge_bio(struct throtl_grp
*tg
, struct bio
*bio
)
963 bool rw
= bio_data_dir(bio
);
964 unsigned int bio_size
= throtl_bio_data_size(bio
);
966 /* Charge the bio to the group */
967 if (!bio_flagged(bio
, BIO_BPS_THROTTLED
)) {
968 tg
->bytes_disp
[rw
] += bio_size
;
969 tg
->last_bytes_disp
[rw
] += bio_size
;
973 tg
->last_io_disp
[rw
]++;
977 * throtl_add_bio_tg - add a bio to the specified throtl_grp
980 * @tg: the target throtl_grp
982 * Add @bio to @tg's service_queue using @qn. If @qn is not specified,
983 * tg->qnode_on_self[] is used.
985 static void throtl_add_bio_tg(struct bio
*bio
, struct throtl_qnode
*qn
,
986 struct throtl_grp
*tg
)
988 struct throtl_service_queue
*sq
= &tg
->service_queue
;
989 bool rw
= bio_data_dir(bio
);
992 qn
= &tg
->qnode_on_self
[rw
];
995 * If @tg doesn't currently have any bios queued in the same
996 * direction, queueing @bio can change when @tg should be
997 * dispatched. Mark that @tg was empty. This is automatically
998 * cleared on the next tg_update_disptime().
1000 if (!sq
->nr_queued
[rw
])
1001 tg
->flags
|= THROTL_TG_WAS_EMPTY
;
1003 throtl_qnode_add_bio(bio
, qn
, &sq
->queued
[rw
]);
1005 sq
->nr_queued
[rw
]++;
1006 throtl_enqueue_tg(tg
);
1009 static void tg_update_disptime(struct throtl_grp
*tg
)
1011 struct throtl_service_queue
*sq
= &tg
->service_queue
;
1012 unsigned long read_wait
= -1, write_wait
= -1, min_wait
= -1, disptime
;
1015 bio
= throtl_peek_queued(&sq
->queued
[READ
]);
1017 tg_may_dispatch(tg
, bio
, &read_wait
);
1019 bio
= throtl_peek_queued(&sq
->queued
[WRITE
]);
1021 tg_may_dispatch(tg
, bio
, &write_wait
);
1023 min_wait
= min(read_wait
, write_wait
);
1024 disptime
= jiffies
+ min_wait
;
1026 /* Update dispatch time */
1027 throtl_rb_erase(&tg
->rb_node
, tg
->service_queue
.parent_sq
);
1028 tg
->disptime
= disptime
;
1029 tg_service_queue_add(tg
);
1031 /* see throtl_add_bio_tg() */
1032 tg
->flags
&= ~THROTL_TG_WAS_EMPTY
;
1035 static void start_parent_slice_with_credit(struct throtl_grp
*child_tg
,
1036 struct throtl_grp
*parent_tg
, bool rw
)
1038 if (throtl_slice_used(parent_tg
, rw
)) {
1039 throtl_start_new_slice_with_credit(parent_tg
, rw
,
1040 child_tg
->slice_start
[rw
]);
1045 static void tg_dispatch_one_bio(struct throtl_grp
*tg
, bool rw
)
1047 struct throtl_service_queue
*sq
= &tg
->service_queue
;
1048 struct throtl_service_queue
*parent_sq
= sq
->parent_sq
;
1049 struct throtl_grp
*parent_tg
= sq_to_tg(parent_sq
);
1050 struct throtl_grp
*tg_to_put
= NULL
;
1054 * @bio is being transferred from @tg to @parent_sq. Popping a bio
1055 * from @tg may put its reference and @parent_sq might end up
1056 * getting released prematurely. Remember the tg to put and put it
1057 * after @bio is transferred to @parent_sq.
1059 bio
= throtl_pop_queued(&sq
->queued
[rw
], &tg_to_put
);
1060 sq
->nr_queued
[rw
]--;
1062 throtl_charge_bio(tg
, bio
);
1065 * If our parent is another tg, we just need to transfer @bio to
1066 * the parent using throtl_add_bio_tg(). If our parent is
1067 * @td->service_queue, @bio is ready to be issued. Put it on its
1068 * bio_lists[] and decrease total number queued. The caller is
1069 * responsible for issuing these bios.
1072 throtl_add_bio_tg(bio
, &tg
->qnode_on_parent
[rw
], parent_tg
);
1073 start_parent_slice_with_credit(tg
, parent_tg
, rw
);
1075 bio_set_flag(bio
, BIO_BPS_THROTTLED
);
1076 throtl_qnode_add_bio(bio
, &tg
->qnode_on_parent
[rw
],
1077 &parent_sq
->queued
[rw
]);
1078 BUG_ON(tg
->td
->nr_queued
[rw
] <= 0);
1079 tg
->td
->nr_queued
[rw
]--;
1082 throtl_trim_slice(tg
, rw
);
1085 blkg_put(tg_to_blkg(tg_to_put
));
1088 static int throtl_dispatch_tg(struct throtl_grp
*tg
)
1090 struct throtl_service_queue
*sq
= &tg
->service_queue
;
1091 unsigned int nr_reads
= 0, nr_writes
= 0;
1092 unsigned int max_nr_reads
= THROTL_GRP_QUANTUM
* 3 / 4;
1093 unsigned int max_nr_writes
= THROTL_GRP_QUANTUM
- max_nr_reads
;
1096 /* Try to dispatch 75% READS and 25% WRITES */
1098 while ((bio
= throtl_peek_queued(&sq
->queued
[READ
])) &&
1099 tg_may_dispatch(tg
, bio
, NULL
)) {
1101 tg_dispatch_one_bio(tg
, bio_data_dir(bio
));
1104 if (nr_reads
>= max_nr_reads
)
1108 while ((bio
= throtl_peek_queued(&sq
->queued
[WRITE
])) &&
1109 tg_may_dispatch(tg
, bio
, NULL
)) {
1111 tg_dispatch_one_bio(tg
, bio_data_dir(bio
));
1114 if (nr_writes
>= max_nr_writes
)
1118 return nr_reads
+ nr_writes
;
1121 static int throtl_select_dispatch(struct throtl_service_queue
*parent_sq
)
1123 unsigned int nr_disp
= 0;
1126 struct throtl_grp
*tg
;
1127 struct throtl_service_queue
*sq
;
1129 if (!parent_sq
->nr_pending
)
1132 tg
= throtl_rb_first(parent_sq
);
1136 if (time_before(jiffies
, tg
->disptime
))
1139 nr_disp
+= throtl_dispatch_tg(tg
);
1141 sq
= &tg
->service_queue
;
1142 if (sq
->nr_queued
[READ
] || sq
->nr_queued
[WRITE
])
1143 tg_update_disptime(tg
);
1145 throtl_dequeue_tg(tg
);
1147 if (nr_disp
>= THROTL_QUANTUM
)
1154 static bool throtl_can_upgrade(struct throtl_data
*td
,
1155 struct throtl_grp
*this_tg
);
1157 * throtl_pending_timer_fn - timer function for service_queue->pending_timer
1158 * @t: the pending_timer member of the throtl_service_queue being serviced
1160 * This timer is armed when a child throtl_grp with active bio's become
1161 * pending and queued on the service_queue's pending_tree and expires when
1162 * the first child throtl_grp should be dispatched. This function
1163 * dispatches bio's from the children throtl_grps to the parent
1166 * If the parent's parent is another throtl_grp, dispatching is propagated
1167 * by either arming its pending_timer or repeating dispatch directly. If
1168 * the top-level service_tree is reached, throtl_data->dispatch_work is
1169 * kicked so that the ready bio's are issued.
1171 static void throtl_pending_timer_fn(struct timer_list
*t
)
1173 struct throtl_service_queue
*sq
= from_timer(sq
, t
, pending_timer
);
1174 struct throtl_grp
*tg
= sq_to_tg(sq
);
1175 struct throtl_data
*td
= sq_to_td(sq
);
1176 struct throtl_service_queue
*parent_sq
;
1177 struct request_queue
*q
;
1181 /* throtl_data may be gone, so figure out request queue by blkg */
1187 spin_lock_irq(&q
->queue_lock
);
1192 if (throtl_can_upgrade(td
, NULL
))
1193 throtl_upgrade_state(td
);
1196 parent_sq
= sq
->parent_sq
;
1200 throtl_log(sq
, "dispatch nr_queued=%u read=%u write=%u",
1201 sq
->nr_queued
[READ
] + sq
->nr_queued
[WRITE
],
1202 sq
->nr_queued
[READ
], sq
->nr_queued
[WRITE
]);
1204 ret
= throtl_select_dispatch(sq
);
1206 throtl_log(sq
, "bios disp=%u", ret
);
1210 if (throtl_schedule_next_dispatch(sq
, false))
1213 /* this dispatch windows is still open, relax and repeat */
1214 spin_unlock_irq(&q
->queue_lock
);
1216 spin_lock_irq(&q
->queue_lock
);
1223 /* @parent_sq is another throl_grp, propagate dispatch */
1224 if (tg
->flags
& THROTL_TG_WAS_EMPTY
) {
1225 tg_update_disptime(tg
);
1226 if (!throtl_schedule_next_dispatch(parent_sq
, false)) {
1227 /* window is already open, repeat dispatching */
1234 /* reached the top-level, queue issuing */
1235 queue_work(kthrotld_workqueue
, &td
->dispatch_work
);
1238 spin_unlock_irq(&q
->queue_lock
);
1242 * blk_throtl_dispatch_work_fn - work function for throtl_data->dispatch_work
1243 * @work: work item being executed
1245 * This function is queued for execution when bios reach the bio_lists[]
1246 * of throtl_data->service_queue. Those bios are ready and issued by this
1249 static void blk_throtl_dispatch_work_fn(struct work_struct
*work
)
1251 struct throtl_data
*td
= container_of(work
, struct throtl_data
,
1253 struct throtl_service_queue
*td_sq
= &td
->service_queue
;
1254 struct request_queue
*q
= td
->queue
;
1255 struct bio_list bio_list_on_stack
;
1257 struct blk_plug plug
;
1260 bio_list_init(&bio_list_on_stack
);
1262 spin_lock_irq(&q
->queue_lock
);
1263 for (rw
= READ
; rw
<= WRITE
; rw
++)
1264 while ((bio
= throtl_pop_queued(&td_sq
->queued
[rw
], NULL
)))
1265 bio_list_add(&bio_list_on_stack
, bio
);
1266 spin_unlock_irq(&q
->queue_lock
);
1268 if (!bio_list_empty(&bio_list_on_stack
)) {
1269 blk_start_plug(&plug
);
1270 while ((bio
= bio_list_pop(&bio_list_on_stack
)))
1271 submit_bio_noacct_nocheck(bio
);
1272 blk_finish_plug(&plug
);
1276 static u64
tg_prfill_conf_u64(struct seq_file
*sf
, struct blkg_policy_data
*pd
,
1279 struct throtl_grp
*tg
= pd_to_tg(pd
);
1280 u64 v
= *(u64
*)((void *)tg
+ off
);
1284 return __blkg_prfill_u64(sf
, pd
, v
);
1287 static u64
tg_prfill_conf_uint(struct seq_file
*sf
, struct blkg_policy_data
*pd
,
1290 struct throtl_grp
*tg
= pd_to_tg(pd
);
1291 unsigned int v
= *(unsigned int *)((void *)tg
+ off
);
1295 return __blkg_prfill_u64(sf
, pd
, v
);
1298 static int tg_print_conf_u64(struct seq_file
*sf
, void *v
)
1300 blkcg_print_blkgs(sf
, css_to_blkcg(seq_css(sf
)), tg_prfill_conf_u64
,
1301 &blkcg_policy_throtl
, seq_cft(sf
)->private, false);
1305 static int tg_print_conf_uint(struct seq_file
*sf
, void *v
)
1307 blkcg_print_blkgs(sf
, css_to_blkcg(seq_css(sf
)), tg_prfill_conf_uint
,
1308 &blkcg_policy_throtl
, seq_cft(sf
)->private, false);
1312 static void tg_conf_updated(struct throtl_grp
*tg
, bool global
)
1314 struct throtl_service_queue
*sq
= &tg
->service_queue
;
1315 struct cgroup_subsys_state
*pos_css
;
1316 struct blkcg_gq
*blkg
;
1318 throtl_log(&tg
->service_queue
,
1319 "limit change rbps=%llu wbps=%llu riops=%u wiops=%u",
1320 tg_bps_limit(tg
, READ
), tg_bps_limit(tg
, WRITE
),
1321 tg_iops_limit(tg
, READ
), tg_iops_limit(tg
, WRITE
));
1324 * Update has_rules[] flags for the updated tg's subtree. A tg is
1325 * considered to have rules if either the tg itself or any of its
1326 * ancestors has rules. This identifies groups without any
1327 * restrictions in the whole hierarchy and allows them to bypass
1330 blkg_for_each_descendant_pre(blkg
, pos_css
,
1331 global
? tg
->td
->queue
->root_blkg
: tg_to_blkg(tg
)) {
1332 struct throtl_grp
*this_tg
= blkg_to_tg(blkg
);
1333 struct throtl_grp
*parent_tg
;
1335 tg_update_has_rules(this_tg
);
1336 /* ignore root/second level */
1337 if (!cgroup_subsys_on_dfl(io_cgrp_subsys
) || !blkg
->parent
||
1338 !blkg
->parent
->parent
)
1340 parent_tg
= blkg_to_tg(blkg
->parent
);
1342 * make sure all children has lower idle time threshold and
1343 * higher latency target
1345 this_tg
->idletime_threshold
= min(this_tg
->idletime_threshold
,
1346 parent_tg
->idletime_threshold
);
1347 this_tg
->latency_target
= max(this_tg
->latency_target
,
1348 parent_tg
->latency_target
);
1352 * We're already holding queue_lock and know @tg is valid. Let's
1353 * apply the new config directly.
1355 * Restart the slices for both READ and WRITES. It might happen
1356 * that a group's limit are dropped suddenly and we don't want to
1357 * account recently dispatched IO with new low rate.
1359 throtl_start_new_slice(tg
, READ
, false);
1360 throtl_start_new_slice(tg
, WRITE
, false);
1362 if (tg
->flags
& THROTL_TG_PENDING
) {
1363 tg_update_disptime(tg
);
1364 throtl_schedule_next_dispatch(sq
->parent_sq
, true);
1368 static ssize_t
tg_set_conf(struct kernfs_open_file
*of
,
1369 char *buf
, size_t nbytes
, loff_t off
, bool is_u64
)
1371 struct blkcg
*blkcg
= css_to_blkcg(of_css(of
));
1372 struct blkg_conf_ctx ctx
;
1373 struct throtl_grp
*tg
;
1377 blkg_conf_init(&ctx
, buf
);
1379 ret
= blkg_conf_prep(blkcg
, &blkcg_policy_throtl
, &ctx
);
1384 if (sscanf(ctx
.body
, "%llu", &v
) != 1)
1389 tg
= blkg_to_tg(ctx
.blkg
);
1390 tg_update_carryover(tg
);
1393 *(u64
*)((void *)tg
+ of_cft(of
)->private) = v
;
1395 *(unsigned int *)((void *)tg
+ of_cft(of
)->private) = v
;
1397 tg_conf_updated(tg
, false);
1400 blkg_conf_exit(&ctx
);
1401 return ret
?: nbytes
;
1404 static ssize_t
tg_set_conf_u64(struct kernfs_open_file
*of
,
1405 char *buf
, size_t nbytes
, loff_t off
)
1407 return tg_set_conf(of
, buf
, nbytes
, off
, true);
1410 static ssize_t
tg_set_conf_uint(struct kernfs_open_file
*of
,
1411 char *buf
, size_t nbytes
, loff_t off
)
1413 return tg_set_conf(of
, buf
, nbytes
, off
, false);
1416 static int tg_print_rwstat(struct seq_file
*sf
, void *v
)
1418 blkcg_print_blkgs(sf
, css_to_blkcg(seq_css(sf
)),
1419 blkg_prfill_rwstat
, &blkcg_policy_throtl
,
1420 seq_cft(sf
)->private, true);
1424 static u64
tg_prfill_rwstat_recursive(struct seq_file
*sf
,
1425 struct blkg_policy_data
*pd
, int off
)
1427 struct blkg_rwstat_sample sum
;
1429 blkg_rwstat_recursive_sum(pd_to_blkg(pd
), &blkcg_policy_throtl
, off
,
1431 return __blkg_prfill_rwstat(sf
, pd
, &sum
);
1434 static int tg_print_rwstat_recursive(struct seq_file
*sf
, void *v
)
1436 blkcg_print_blkgs(sf
, css_to_blkcg(seq_css(sf
)),
1437 tg_prfill_rwstat_recursive
, &blkcg_policy_throtl
,
1438 seq_cft(sf
)->private, true);
1442 static struct cftype throtl_legacy_files
[] = {
1444 .name
= "throttle.read_bps_device",
1445 .private = offsetof(struct throtl_grp
, bps
[READ
][LIMIT_MAX
]),
1446 .seq_show
= tg_print_conf_u64
,
1447 .write
= tg_set_conf_u64
,
1450 .name
= "throttle.write_bps_device",
1451 .private = offsetof(struct throtl_grp
, bps
[WRITE
][LIMIT_MAX
]),
1452 .seq_show
= tg_print_conf_u64
,
1453 .write
= tg_set_conf_u64
,
1456 .name
= "throttle.read_iops_device",
1457 .private = offsetof(struct throtl_grp
, iops
[READ
][LIMIT_MAX
]),
1458 .seq_show
= tg_print_conf_uint
,
1459 .write
= tg_set_conf_uint
,
1462 .name
= "throttle.write_iops_device",
1463 .private = offsetof(struct throtl_grp
, iops
[WRITE
][LIMIT_MAX
]),
1464 .seq_show
= tg_print_conf_uint
,
1465 .write
= tg_set_conf_uint
,
1468 .name
= "throttle.io_service_bytes",
1469 .private = offsetof(struct throtl_grp
, stat_bytes
),
1470 .seq_show
= tg_print_rwstat
,
1473 .name
= "throttle.io_service_bytes_recursive",
1474 .private = offsetof(struct throtl_grp
, stat_bytes
),
1475 .seq_show
= tg_print_rwstat_recursive
,
1478 .name
= "throttle.io_serviced",
1479 .private = offsetof(struct throtl_grp
, stat_ios
),
1480 .seq_show
= tg_print_rwstat
,
1483 .name
= "throttle.io_serviced_recursive",
1484 .private = offsetof(struct throtl_grp
, stat_ios
),
1485 .seq_show
= tg_print_rwstat_recursive
,
1490 static u64
tg_prfill_limit(struct seq_file
*sf
, struct blkg_policy_data
*pd
,
1493 struct throtl_grp
*tg
= pd_to_tg(pd
);
1494 const char *dname
= blkg_dev_name(pd
->blkg
);
1495 char bufs
[4][21] = { "max", "max", "max", "max" };
1497 unsigned int iops_dft
;
1498 char idle_time
[26] = "";
1499 char latency_time
[26] = "";
1504 if (off
== LIMIT_LOW
) {
1509 iops_dft
= UINT_MAX
;
1512 if (tg
->bps_conf
[READ
][off
] == bps_dft
&&
1513 tg
->bps_conf
[WRITE
][off
] == bps_dft
&&
1514 tg
->iops_conf
[READ
][off
] == iops_dft
&&
1515 tg
->iops_conf
[WRITE
][off
] == iops_dft
&&
1516 (off
!= LIMIT_LOW
||
1517 (tg
->idletime_threshold_conf
== DFL_IDLE_THRESHOLD
&&
1518 tg
->latency_target_conf
== DFL_LATENCY_TARGET
)))
1521 if (tg
->bps_conf
[READ
][off
] != U64_MAX
)
1522 snprintf(bufs
[0], sizeof(bufs
[0]), "%llu",
1523 tg
->bps_conf
[READ
][off
]);
1524 if (tg
->bps_conf
[WRITE
][off
] != U64_MAX
)
1525 snprintf(bufs
[1], sizeof(bufs
[1]), "%llu",
1526 tg
->bps_conf
[WRITE
][off
]);
1527 if (tg
->iops_conf
[READ
][off
] != UINT_MAX
)
1528 snprintf(bufs
[2], sizeof(bufs
[2]), "%u",
1529 tg
->iops_conf
[READ
][off
]);
1530 if (tg
->iops_conf
[WRITE
][off
] != UINT_MAX
)
1531 snprintf(bufs
[3], sizeof(bufs
[3]), "%u",
1532 tg
->iops_conf
[WRITE
][off
]);
1533 if (off
== LIMIT_LOW
) {
1534 if (tg
->idletime_threshold_conf
== ULONG_MAX
)
1535 strcpy(idle_time
, " idle=max");
1537 snprintf(idle_time
, sizeof(idle_time
), " idle=%lu",
1538 tg
->idletime_threshold_conf
);
1540 if (tg
->latency_target_conf
== ULONG_MAX
)
1541 strcpy(latency_time
, " latency=max");
1543 snprintf(latency_time
, sizeof(latency_time
),
1544 " latency=%lu", tg
->latency_target_conf
);
1547 seq_printf(sf
, "%s rbps=%s wbps=%s riops=%s wiops=%s%s%s\n",
1548 dname
, bufs
[0], bufs
[1], bufs
[2], bufs
[3], idle_time
,
1553 static int tg_print_limit(struct seq_file
*sf
, void *v
)
1555 blkcg_print_blkgs(sf
, css_to_blkcg(seq_css(sf
)), tg_prfill_limit
,
1556 &blkcg_policy_throtl
, seq_cft(sf
)->private, false);
1560 static ssize_t
tg_set_limit(struct kernfs_open_file
*of
,
1561 char *buf
, size_t nbytes
, loff_t off
)
1563 struct blkcg
*blkcg
= css_to_blkcg(of_css(of
));
1564 struct blkg_conf_ctx ctx
;
1565 struct throtl_grp
*tg
;
1567 unsigned long idle_time
;
1568 unsigned long latency_time
;
1570 int index
= of_cft(of
)->private;
1572 blkg_conf_init(&ctx
, buf
);
1574 ret
= blkg_conf_prep(blkcg
, &blkcg_policy_throtl
, &ctx
);
1578 tg
= blkg_to_tg(ctx
.blkg
);
1579 tg_update_carryover(tg
);
1581 v
[0] = tg
->bps_conf
[READ
][index
];
1582 v
[1] = tg
->bps_conf
[WRITE
][index
];
1583 v
[2] = tg
->iops_conf
[READ
][index
];
1584 v
[3] = tg
->iops_conf
[WRITE
][index
];
1586 idle_time
= tg
->idletime_threshold_conf
;
1587 latency_time
= tg
->latency_target_conf
;
1589 char tok
[27]; /* wiops=18446744073709551616 */
1594 if (sscanf(ctx
.body
, "%26s%n", tok
, &len
) != 1)
1603 if (!p
|| (sscanf(p
, "%llu", &val
) != 1 && strcmp(p
, "max")))
1611 if (!strcmp(tok
, "rbps") && val
> 1)
1613 else if (!strcmp(tok
, "wbps") && val
> 1)
1615 else if (!strcmp(tok
, "riops") && val
> 1)
1616 v
[2] = min_t(u64
, val
, UINT_MAX
);
1617 else if (!strcmp(tok
, "wiops") && val
> 1)
1618 v
[3] = min_t(u64
, val
, UINT_MAX
);
1619 else if (off
== LIMIT_LOW
&& !strcmp(tok
, "idle"))
1621 else if (off
== LIMIT_LOW
&& !strcmp(tok
, "latency"))
1627 tg
->bps_conf
[READ
][index
] = v
[0];
1628 tg
->bps_conf
[WRITE
][index
] = v
[1];
1629 tg
->iops_conf
[READ
][index
] = v
[2];
1630 tg
->iops_conf
[WRITE
][index
] = v
[3];
1632 if (index
== LIMIT_MAX
) {
1633 tg
->bps
[READ
][index
] = v
[0];
1634 tg
->bps
[WRITE
][index
] = v
[1];
1635 tg
->iops
[READ
][index
] = v
[2];
1636 tg
->iops
[WRITE
][index
] = v
[3];
1638 tg
->bps
[READ
][LIMIT_LOW
] = min(tg
->bps_conf
[READ
][LIMIT_LOW
],
1639 tg
->bps_conf
[READ
][LIMIT_MAX
]);
1640 tg
->bps
[WRITE
][LIMIT_LOW
] = min(tg
->bps_conf
[WRITE
][LIMIT_LOW
],
1641 tg
->bps_conf
[WRITE
][LIMIT_MAX
]);
1642 tg
->iops
[READ
][LIMIT_LOW
] = min(tg
->iops_conf
[READ
][LIMIT_LOW
],
1643 tg
->iops_conf
[READ
][LIMIT_MAX
]);
1644 tg
->iops
[WRITE
][LIMIT_LOW
] = min(tg
->iops_conf
[WRITE
][LIMIT_LOW
],
1645 tg
->iops_conf
[WRITE
][LIMIT_MAX
]);
1646 tg
->idletime_threshold_conf
= idle_time
;
1647 tg
->latency_target_conf
= latency_time
;
1649 /* force user to configure all settings for low limit */
1650 if (!(tg
->bps
[READ
][LIMIT_LOW
] || tg
->iops
[READ
][LIMIT_LOW
] ||
1651 tg
->bps
[WRITE
][LIMIT_LOW
] || tg
->iops
[WRITE
][LIMIT_LOW
]) ||
1652 tg
->idletime_threshold_conf
== DFL_IDLE_THRESHOLD
||
1653 tg
->latency_target_conf
== DFL_LATENCY_TARGET
) {
1654 tg
->bps
[READ
][LIMIT_LOW
] = 0;
1655 tg
->bps
[WRITE
][LIMIT_LOW
] = 0;
1656 tg
->iops
[READ
][LIMIT_LOW
] = 0;
1657 tg
->iops
[WRITE
][LIMIT_LOW
] = 0;
1658 tg
->idletime_threshold
= DFL_IDLE_THRESHOLD
;
1659 tg
->latency_target
= DFL_LATENCY_TARGET
;
1660 } else if (index
== LIMIT_LOW
) {
1661 tg
->idletime_threshold
= tg
->idletime_threshold_conf
;
1662 tg
->latency_target
= tg
->latency_target_conf
;
1665 blk_throtl_update_limit_valid(tg
->td
);
1666 if (tg
->td
->limit_valid
[LIMIT_LOW
]) {
1667 if (index
== LIMIT_LOW
)
1668 tg
->td
->limit_index
= LIMIT_LOW
;
1670 tg
->td
->limit_index
= LIMIT_MAX
;
1671 tg_conf_updated(tg
, index
== LIMIT_LOW
&&
1672 tg
->td
->limit_valid
[LIMIT_LOW
]);
1675 blkg_conf_exit(&ctx
);
1676 return ret
?: nbytes
;
1679 static struct cftype throtl_files
[] = {
1680 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
1683 .flags
= CFTYPE_NOT_ON_ROOT
,
1684 .seq_show
= tg_print_limit
,
1685 .write
= tg_set_limit
,
1686 .private = LIMIT_LOW
,
1691 .flags
= CFTYPE_NOT_ON_ROOT
,
1692 .seq_show
= tg_print_limit
,
1693 .write
= tg_set_limit
,
1694 .private = LIMIT_MAX
,
1699 static void throtl_shutdown_wq(struct request_queue
*q
)
1701 struct throtl_data
*td
= q
->td
;
1703 cancel_work_sync(&td
->dispatch_work
);
1706 struct blkcg_policy blkcg_policy_throtl
= {
1707 .dfl_cftypes
= throtl_files
,
1708 .legacy_cftypes
= throtl_legacy_files
,
1710 .pd_alloc_fn
= throtl_pd_alloc
,
1711 .pd_init_fn
= throtl_pd_init
,
1712 .pd_online_fn
= throtl_pd_online
,
1713 .pd_offline_fn
= throtl_pd_offline
,
1714 .pd_free_fn
= throtl_pd_free
,
1717 void blk_throtl_cancel_bios(struct gendisk
*disk
)
1719 struct request_queue
*q
= disk
->queue
;
1720 struct cgroup_subsys_state
*pos_css
;
1721 struct blkcg_gq
*blkg
;
1723 spin_lock_irq(&q
->queue_lock
);
1725 * queue_lock is held, rcu lock is not needed here technically.
1726 * However, rcu lock is still held to emphasize that following
1727 * path need RCU protection and to prevent warning from lockdep.
1730 blkg_for_each_descendant_post(blkg
, pos_css
, q
->root_blkg
) {
1731 struct throtl_grp
*tg
= blkg_to_tg(blkg
);
1732 struct throtl_service_queue
*sq
= &tg
->service_queue
;
1735 * Set the flag to make sure throtl_pending_timer_fn() won't
1736 * stop until all throttled bios are dispatched.
1738 tg
->flags
|= THROTL_TG_CANCELING
;
1741 * Do not dispatch cgroup without THROTL_TG_PENDING or cgroup
1742 * will be inserted to service queue without THROTL_TG_PENDING
1743 * set in tg_update_disptime below. Then IO dispatched from
1744 * child in tg_dispatch_one_bio will trigger double insertion
1745 * and corrupt the tree.
1747 if (!(tg
->flags
& THROTL_TG_PENDING
))
1751 * Update disptime after setting the above flag to make sure
1752 * throtl_select_dispatch() won't exit without dispatching.
1754 tg_update_disptime(tg
);
1756 throtl_schedule_pending_timer(sq
, jiffies
+ 1);
1759 spin_unlock_irq(&q
->queue_lock
);
1762 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
1763 static unsigned long __tg_last_low_overflow_time(struct throtl_grp
*tg
)
1765 unsigned long rtime
= jiffies
, wtime
= jiffies
;
1767 if (tg
->bps
[READ
][LIMIT_LOW
] || tg
->iops
[READ
][LIMIT_LOW
])
1768 rtime
= tg
->last_low_overflow_time
[READ
];
1769 if (tg
->bps
[WRITE
][LIMIT_LOW
] || tg
->iops
[WRITE
][LIMIT_LOW
])
1770 wtime
= tg
->last_low_overflow_time
[WRITE
];
1771 return min(rtime
, wtime
);
1774 static unsigned long tg_last_low_overflow_time(struct throtl_grp
*tg
)
1776 struct throtl_service_queue
*parent_sq
;
1777 struct throtl_grp
*parent
= tg
;
1778 unsigned long ret
= __tg_last_low_overflow_time(tg
);
1781 parent_sq
= parent
->service_queue
.parent_sq
;
1782 parent
= sq_to_tg(parent_sq
);
1787 * The parent doesn't have low limit, it always reaches low
1788 * limit. Its overflow time is useless for children
1790 if (!parent
->bps
[READ
][LIMIT_LOW
] &&
1791 !parent
->iops
[READ
][LIMIT_LOW
] &&
1792 !parent
->bps
[WRITE
][LIMIT_LOW
] &&
1793 !parent
->iops
[WRITE
][LIMIT_LOW
])
1795 if (time_after(__tg_last_low_overflow_time(parent
), ret
))
1796 ret
= __tg_last_low_overflow_time(parent
);
1801 static bool throtl_tg_is_idle(struct throtl_grp
*tg
)
1804 * cgroup is idle if:
1805 * - single idle is too long, longer than a fixed value (in case user
1806 * configure a too big threshold) or 4 times of idletime threshold
1807 * - average think time is more than threshold
1808 * - IO latency is largely below threshold
1813 time
= min_t(unsigned long, MAX_IDLE_TIME
, 4 * tg
->idletime_threshold
);
1814 ret
= tg
->latency_target
== DFL_LATENCY_TARGET
||
1815 tg
->idletime_threshold
== DFL_IDLE_THRESHOLD
||
1816 (ktime_get_ns() >> 10) - tg
->last_finish_time
> time
||
1817 tg
->avg_idletime
> tg
->idletime_threshold
||
1818 (tg
->latency_target
&& tg
->bio_cnt
&&
1819 tg
->bad_bio_cnt
* 5 < tg
->bio_cnt
);
1820 throtl_log(&tg
->service_queue
,
1821 "avg_idle=%ld, idle_threshold=%ld, bad_bio=%d, total_bio=%d, is_idle=%d, scale=%d",
1822 tg
->avg_idletime
, tg
->idletime_threshold
, tg
->bad_bio_cnt
,
1823 tg
->bio_cnt
, ret
, tg
->td
->scale
);
1827 static bool throtl_low_limit_reached(struct throtl_grp
*tg
, int rw
)
1829 struct throtl_service_queue
*sq
= &tg
->service_queue
;
1830 bool limit
= tg
->bps
[rw
][LIMIT_LOW
] || tg
->iops
[rw
][LIMIT_LOW
];
1833 * if low limit is zero, low limit is always reached.
1834 * if low limit is non-zero, we can check if there is any request
1835 * is queued to determine if low limit is reached as we throttle
1836 * request according to limit.
1838 return !limit
|| sq
->nr_queued
[rw
];
1841 static bool throtl_tg_can_upgrade(struct throtl_grp
*tg
)
1844 * cgroup reaches low limit when low limit of READ and WRITE are
1845 * both reached, it's ok to upgrade to next limit if cgroup reaches
1848 if (throtl_low_limit_reached(tg
, READ
) &&
1849 throtl_low_limit_reached(tg
, WRITE
))
1852 if (time_after_eq(jiffies
,
1853 tg_last_low_overflow_time(tg
) + tg
->td
->throtl_slice
) &&
1854 throtl_tg_is_idle(tg
))
1859 static bool throtl_hierarchy_can_upgrade(struct throtl_grp
*tg
)
1862 if (throtl_tg_can_upgrade(tg
))
1864 tg
= sq_to_tg(tg
->service_queue
.parent_sq
);
1865 if (!tg
|| !tg_to_blkg(tg
)->parent
)
1871 static bool throtl_can_upgrade(struct throtl_data
*td
,
1872 struct throtl_grp
*this_tg
)
1874 struct cgroup_subsys_state
*pos_css
;
1875 struct blkcg_gq
*blkg
;
1877 if (td
->limit_index
!= LIMIT_LOW
)
1880 if (time_before(jiffies
, td
->low_downgrade_time
+ td
->throtl_slice
))
1884 blkg_for_each_descendant_post(blkg
, pos_css
, td
->queue
->root_blkg
) {
1885 struct throtl_grp
*tg
= blkg_to_tg(blkg
);
1889 if (!list_empty(&tg_to_blkg(tg
)->blkcg
->css
.children
))
1891 if (!throtl_hierarchy_can_upgrade(tg
)) {
1900 static void throtl_upgrade_check(struct throtl_grp
*tg
)
1902 unsigned long now
= jiffies
;
1904 if (tg
->td
->limit_index
!= LIMIT_LOW
)
1907 if (time_after(tg
->last_check_time
+ tg
->td
->throtl_slice
, now
))
1910 tg
->last_check_time
= now
;
1912 if (!time_after_eq(now
,
1913 __tg_last_low_overflow_time(tg
) + tg
->td
->throtl_slice
))
1916 if (throtl_can_upgrade(tg
->td
, NULL
))
1917 throtl_upgrade_state(tg
->td
);
1920 static void throtl_upgrade_state(struct throtl_data
*td
)
1922 struct cgroup_subsys_state
*pos_css
;
1923 struct blkcg_gq
*blkg
;
1925 throtl_log(&td
->service_queue
, "upgrade to max");
1926 td
->limit_index
= LIMIT_MAX
;
1927 td
->low_upgrade_time
= jiffies
;
1930 blkg_for_each_descendant_post(blkg
, pos_css
, td
->queue
->root_blkg
) {
1931 struct throtl_grp
*tg
= blkg_to_tg(blkg
);
1932 struct throtl_service_queue
*sq
= &tg
->service_queue
;
1934 tg
->disptime
= jiffies
- 1;
1935 throtl_select_dispatch(sq
);
1936 throtl_schedule_next_dispatch(sq
, true);
1939 throtl_select_dispatch(&td
->service_queue
);
1940 throtl_schedule_next_dispatch(&td
->service_queue
, true);
1941 queue_work(kthrotld_workqueue
, &td
->dispatch_work
);
1944 static void throtl_downgrade_state(struct throtl_data
*td
)
1948 throtl_log(&td
->service_queue
, "downgrade, scale %d", td
->scale
);
1950 td
->low_upgrade_time
= jiffies
- td
->scale
* td
->throtl_slice
;
1954 td
->limit_index
= LIMIT_LOW
;
1955 td
->low_downgrade_time
= jiffies
;
1958 static bool throtl_tg_can_downgrade(struct throtl_grp
*tg
)
1960 struct throtl_data
*td
= tg
->td
;
1961 unsigned long now
= jiffies
;
1964 * If cgroup is below low limit, consider downgrade and throttle other
1967 if (time_after_eq(now
, tg_last_low_overflow_time(tg
) +
1968 td
->throtl_slice
) &&
1969 (!throtl_tg_is_idle(tg
) ||
1970 !list_empty(&tg_to_blkg(tg
)->blkcg
->css
.children
)))
1975 static bool throtl_hierarchy_can_downgrade(struct throtl_grp
*tg
)
1977 struct throtl_data
*td
= tg
->td
;
1979 if (time_before(jiffies
, td
->low_upgrade_time
+ td
->throtl_slice
))
1983 if (!throtl_tg_can_downgrade(tg
))
1985 tg
= sq_to_tg(tg
->service_queue
.parent_sq
);
1986 if (!tg
|| !tg_to_blkg(tg
)->parent
)
1992 static void throtl_downgrade_check(struct throtl_grp
*tg
)
1996 unsigned long elapsed_time
;
1997 unsigned long now
= jiffies
;
1999 if (tg
->td
->limit_index
!= LIMIT_MAX
||
2000 !tg
->td
->limit_valid
[LIMIT_LOW
])
2002 if (!list_empty(&tg_to_blkg(tg
)->blkcg
->css
.children
))
2004 if (time_after(tg
->last_check_time
+ tg
->td
->throtl_slice
, now
))
2007 elapsed_time
= now
- tg
->last_check_time
;
2008 tg
->last_check_time
= now
;
2010 if (time_before(now
, tg_last_low_overflow_time(tg
) +
2011 tg
->td
->throtl_slice
))
2014 if (tg
->bps
[READ
][LIMIT_LOW
]) {
2015 bps
= tg
->last_bytes_disp
[READ
] * HZ
;
2016 do_div(bps
, elapsed_time
);
2017 if (bps
>= tg
->bps
[READ
][LIMIT_LOW
])
2018 tg
->last_low_overflow_time
[READ
] = now
;
2021 if (tg
->bps
[WRITE
][LIMIT_LOW
]) {
2022 bps
= tg
->last_bytes_disp
[WRITE
] * HZ
;
2023 do_div(bps
, elapsed_time
);
2024 if (bps
>= tg
->bps
[WRITE
][LIMIT_LOW
])
2025 tg
->last_low_overflow_time
[WRITE
] = now
;
2028 if (tg
->iops
[READ
][LIMIT_LOW
]) {
2029 iops
= tg
->last_io_disp
[READ
] * HZ
/ elapsed_time
;
2030 if (iops
>= tg
->iops
[READ
][LIMIT_LOW
])
2031 tg
->last_low_overflow_time
[READ
] = now
;
2034 if (tg
->iops
[WRITE
][LIMIT_LOW
]) {
2035 iops
= tg
->last_io_disp
[WRITE
] * HZ
/ elapsed_time
;
2036 if (iops
>= tg
->iops
[WRITE
][LIMIT_LOW
])
2037 tg
->last_low_overflow_time
[WRITE
] = now
;
2041 * If cgroup is below low limit, consider downgrade and throttle other
2044 if (throtl_hierarchy_can_downgrade(tg
))
2045 throtl_downgrade_state(tg
->td
);
2047 tg
->last_bytes_disp
[READ
] = 0;
2048 tg
->last_bytes_disp
[WRITE
] = 0;
2049 tg
->last_io_disp
[READ
] = 0;
2050 tg
->last_io_disp
[WRITE
] = 0;
2053 static void blk_throtl_update_idletime(struct throtl_grp
*tg
)
2056 unsigned long last_finish_time
= tg
->last_finish_time
;
2058 if (last_finish_time
== 0)
2061 now
= ktime_get_ns() >> 10;
2062 if (now
<= last_finish_time
||
2063 last_finish_time
== tg
->checked_last_finish_time
)
2066 tg
->avg_idletime
= (tg
->avg_idletime
* 7 + now
- last_finish_time
) >> 3;
2067 tg
->checked_last_finish_time
= last_finish_time
;
2070 static void throtl_update_latency_buckets(struct throtl_data
*td
)
2072 struct avg_latency_bucket avg_latency
[2][LATENCY_BUCKET_SIZE
];
2074 unsigned long last_latency
[2] = { 0 };
2075 unsigned long latency
[2];
2077 if (!blk_queue_nonrot(td
->queue
) || !td
->limit_valid
[LIMIT_LOW
])
2079 if (time_before(jiffies
, td
->last_calculate_time
+ HZ
))
2081 td
->last_calculate_time
= jiffies
;
2083 memset(avg_latency
, 0, sizeof(avg_latency
));
2084 for (rw
= READ
; rw
<= WRITE
; rw
++) {
2085 for (i
= 0; i
< LATENCY_BUCKET_SIZE
; i
++) {
2086 struct latency_bucket
*tmp
= &td
->tmp_buckets
[rw
][i
];
2088 for_each_possible_cpu(cpu
) {
2089 struct latency_bucket
*bucket
;
2091 /* this isn't race free, but ok in practice */
2092 bucket
= per_cpu_ptr(td
->latency_buckets
[rw
],
2094 tmp
->total_latency
+= bucket
[i
].total_latency
;
2095 tmp
->samples
+= bucket
[i
].samples
;
2096 bucket
[i
].total_latency
= 0;
2097 bucket
[i
].samples
= 0;
2100 if (tmp
->samples
>= 32) {
2101 int samples
= tmp
->samples
;
2103 latency
[rw
] = tmp
->total_latency
;
2105 tmp
->total_latency
= 0;
2107 latency
[rw
] /= samples
;
2108 if (latency
[rw
] == 0)
2110 avg_latency
[rw
][i
].latency
= latency
[rw
];
2115 for (rw
= READ
; rw
<= WRITE
; rw
++) {
2116 for (i
= 0; i
< LATENCY_BUCKET_SIZE
; i
++) {
2117 if (!avg_latency
[rw
][i
].latency
) {
2118 if (td
->avg_buckets
[rw
][i
].latency
< last_latency
[rw
])
2119 td
->avg_buckets
[rw
][i
].latency
=
2124 if (!td
->avg_buckets
[rw
][i
].valid
)
2125 latency
[rw
] = avg_latency
[rw
][i
].latency
;
2127 latency
[rw
] = (td
->avg_buckets
[rw
][i
].latency
* 7 +
2128 avg_latency
[rw
][i
].latency
) >> 3;
2130 td
->avg_buckets
[rw
][i
].latency
= max(latency
[rw
],
2132 td
->avg_buckets
[rw
][i
].valid
= true;
2133 last_latency
[rw
] = td
->avg_buckets
[rw
][i
].latency
;
2137 for (i
= 0; i
< LATENCY_BUCKET_SIZE
; i
++)
2138 throtl_log(&td
->service_queue
,
2139 "Latency bucket %d: read latency=%ld, read valid=%d, "
2140 "write latency=%ld, write valid=%d", i
,
2141 td
->avg_buckets
[READ
][i
].latency
,
2142 td
->avg_buckets
[READ
][i
].valid
,
2143 td
->avg_buckets
[WRITE
][i
].latency
,
2144 td
->avg_buckets
[WRITE
][i
].valid
);
2147 static inline void throtl_update_latency_buckets(struct throtl_data
*td
)
2151 static void blk_throtl_update_idletime(struct throtl_grp
*tg
)
2155 static void throtl_downgrade_check(struct throtl_grp
*tg
)
2159 static void throtl_upgrade_check(struct throtl_grp
*tg
)
2163 static bool throtl_can_upgrade(struct throtl_data
*td
,
2164 struct throtl_grp
*this_tg
)
2169 static void throtl_upgrade_state(struct throtl_data
*td
)
2174 bool __blk_throtl_bio(struct bio
*bio
)
2176 struct request_queue
*q
= bdev_get_queue(bio
->bi_bdev
);
2177 struct blkcg_gq
*blkg
= bio
->bi_blkg
;
2178 struct throtl_qnode
*qn
= NULL
;
2179 struct throtl_grp
*tg
= blkg_to_tg(blkg
);
2180 struct throtl_service_queue
*sq
;
2181 bool rw
= bio_data_dir(bio
);
2182 bool throttled
= false;
2183 struct throtl_data
*td
= tg
->td
;
2187 spin_lock_irq(&q
->queue_lock
);
2189 throtl_update_latency_buckets(td
);
2191 blk_throtl_update_idletime(tg
);
2193 sq
= &tg
->service_queue
;
2197 if (tg
->last_low_overflow_time
[rw
] == 0)
2198 tg
->last_low_overflow_time
[rw
] = jiffies
;
2199 throtl_downgrade_check(tg
);
2200 throtl_upgrade_check(tg
);
2201 /* throtl is FIFO - if bios are already queued, should queue */
2202 if (sq
->nr_queued
[rw
])
2205 /* if above limits, break to queue */
2206 if (!tg_may_dispatch(tg
, bio
, NULL
)) {
2207 tg
->last_low_overflow_time
[rw
] = jiffies
;
2208 if (throtl_can_upgrade(td
, tg
)) {
2209 throtl_upgrade_state(td
);
2215 /* within limits, let's charge and dispatch directly */
2216 throtl_charge_bio(tg
, bio
);
2219 * We need to trim slice even when bios are not being queued
2220 * otherwise it might happen that a bio is not queued for
2221 * a long time and slice keeps on extending and trim is not
2222 * called for a long time. Now if limits are reduced suddenly
2223 * we take into account all the IO dispatched so far at new
2224 * low rate and * newly queued IO gets a really long dispatch
2227 * So keep on trimming slice even if bio is not queued.
2229 throtl_trim_slice(tg
, rw
);
2232 * @bio passed through this layer without being throttled.
2233 * Climb up the ladder. If we're already at the top, it
2234 * can be executed directly.
2236 qn
= &tg
->qnode_on_parent
[rw
];
2240 bio_set_flag(bio
, BIO_BPS_THROTTLED
);
2245 /* out-of-limit, queue to @tg */
2246 throtl_log(sq
, "[%c] bio. bdisp=%llu sz=%u bps=%llu iodisp=%u iops=%u queued=%d/%d",
2247 rw
== READ
? 'R' : 'W',
2248 tg
->bytes_disp
[rw
], bio
->bi_iter
.bi_size
,
2249 tg_bps_limit(tg
, rw
),
2250 tg
->io_disp
[rw
], tg_iops_limit(tg
, rw
),
2251 sq
->nr_queued
[READ
], sq
->nr_queued
[WRITE
]);
2253 tg
->last_low_overflow_time
[rw
] = jiffies
;
2255 td
->nr_queued
[rw
]++;
2256 throtl_add_bio_tg(bio
, qn
, tg
);
2260 * Update @tg's dispatch time and force schedule dispatch if @tg
2261 * was empty before @bio. The forced scheduling isn't likely to
2262 * cause undue delay as @bio is likely to be dispatched directly if
2263 * its @tg's disptime is not in the future.
2265 if (tg
->flags
& THROTL_TG_WAS_EMPTY
) {
2266 tg_update_disptime(tg
);
2267 throtl_schedule_next_dispatch(tg
->service_queue
.parent_sq
, true);
2271 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2272 if (throttled
|| !td
->track_bio_latency
)
2273 bio
->bi_issue
.value
|= BIO_ISSUE_THROTL_SKIP_LATENCY
;
2275 spin_unlock_irq(&q
->queue_lock
);
2281 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2282 static void throtl_track_latency(struct throtl_data
*td
, sector_t size
,
2283 enum req_op op
, unsigned long time
)
2285 const bool rw
= op_is_write(op
);
2286 struct latency_bucket
*latency
;
2289 if (!td
|| td
->limit_index
!= LIMIT_LOW
||
2290 !(op
== REQ_OP_READ
|| op
== REQ_OP_WRITE
) ||
2291 !blk_queue_nonrot(td
->queue
))
2294 index
= request_bucket_index(size
);
2296 latency
= get_cpu_ptr(td
->latency_buckets
[rw
]);
2297 latency
[index
].total_latency
+= time
;
2298 latency
[index
].samples
++;
2299 put_cpu_ptr(td
->latency_buckets
[rw
]);
2302 void blk_throtl_stat_add(struct request
*rq
, u64 time_ns
)
2304 struct request_queue
*q
= rq
->q
;
2305 struct throtl_data
*td
= q
->td
;
2307 throtl_track_latency(td
, blk_rq_stats_sectors(rq
), req_op(rq
),
2311 void blk_throtl_bio_endio(struct bio
*bio
)
2313 struct blkcg_gq
*blkg
;
2314 struct throtl_grp
*tg
;
2316 unsigned long finish_time
;
2317 unsigned long start_time
;
2319 int rw
= bio_data_dir(bio
);
2321 blkg
= bio
->bi_blkg
;
2324 tg
= blkg_to_tg(blkg
);
2325 if (!tg
->td
->limit_valid
[LIMIT_LOW
])
2328 finish_time_ns
= ktime_get_ns();
2329 tg
->last_finish_time
= finish_time_ns
>> 10;
2331 start_time
= bio_issue_time(&bio
->bi_issue
) >> 10;
2332 finish_time
= __bio_issue_time(finish_time_ns
) >> 10;
2333 if (!start_time
|| finish_time
<= start_time
)
2336 lat
= finish_time
- start_time
;
2337 /* this is only for bio based driver */
2338 if (!(bio
->bi_issue
.value
& BIO_ISSUE_THROTL_SKIP_LATENCY
))
2339 throtl_track_latency(tg
->td
, bio_issue_size(&bio
->bi_issue
),
2342 if (tg
->latency_target
&& lat
>= tg
->td
->filtered_latency
) {
2344 unsigned int threshold
;
2346 bucket
= request_bucket_index(bio_issue_size(&bio
->bi_issue
));
2347 threshold
= tg
->td
->avg_buckets
[rw
][bucket
].latency
+
2349 if (lat
> threshold
)
2352 * Not race free, could get wrong count, which means cgroups
2358 if (time_after(jiffies
, tg
->bio_cnt_reset_time
) || tg
->bio_cnt
> 1024) {
2359 tg
->bio_cnt_reset_time
= tg
->td
->throtl_slice
+ jiffies
;
2361 tg
->bad_bio_cnt
/= 2;
2366 int blk_throtl_init(struct gendisk
*disk
)
2368 struct request_queue
*q
= disk
->queue
;
2369 struct throtl_data
*td
;
2372 td
= kzalloc_node(sizeof(*td
), GFP_KERNEL
, q
->node
);
2375 td
->latency_buckets
[READ
] = __alloc_percpu(sizeof(struct latency_bucket
) *
2376 LATENCY_BUCKET_SIZE
, __alignof__(u64
));
2377 if (!td
->latency_buckets
[READ
]) {
2381 td
->latency_buckets
[WRITE
] = __alloc_percpu(sizeof(struct latency_bucket
) *
2382 LATENCY_BUCKET_SIZE
, __alignof__(u64
));
2383 if (!td
->latency_buckets
[WRITE
]) {
2384 free_percpu(td
->latency_buckets
[READ
]);
2389 INIT_WORK(&td
->dispatch_work
, blk_throtl_dispatch_work_fn
);
2390 throtl_service_queue_init(&td
->service_queue
);
2395 td
->limit_valid
[LIMIT_MAX
] = true;
2396 td
->limit_index
= LIMIT_MAX
;
2397 td
->low_upgrade_time
= jiffies
;
2398 td
->low_downgrade_time
= jiffies
;
2400 /* activate policy */
2401 ret
= blkcg_activate_policy(disk
, &blkcg_policy_throtl
);
2403 free_percpu(td
->latency_buckets
[READ
]);
2404 free_percpu(td
->latency_buckets
[WRITE
]);
2410 void blk_throtl_exit(struct gendisk
*disk
)
2412 struct request_queue
*q
= disk
->queue
;
2415 del_timer_sync(&q
->td
->service_queue
.pending_timer
);
2416 throtl_shutdown_wq(q
);
2417 blkcg_deactivate_policy(disk
, &blkcg_policy_throtl
);
2418 free_percpu(q
->td
->latency_buckets
[READ
]);
2419 free_percpu(q
->td
->latency_buckets
[WRITE
]);
2423 void blk_throtl_register(struct gendisk
*disk
)
2425 struct request_queue
*q
= disk
->queue
;
2426 struct throtl_data
*td
;
2432 if (blk_queue_nonrot(q
)) {
2433 td
->throtl_slice
= DFL_THROTL_SLICE_SSD
;
2434 td
->filtered_latency
= LATENCY_FILTERED_SSD
;
2436 td
->throtl_slice
= DFL_THROTL_SLICE_HD
;
2437 td
->filtered_latency
= LATENCY_FILTERED_HD
;
2438 for (i
= 0; i
< LATENCY_BUCKET_SIZE
; i
++) {
2439 td
->avg_buckets
[READ
][i
].latency
= DFL_HD_BASELINE_LATENCY
;
2440 td
->avg_buckets
[WRITE
][i
].latency
= DFL_HD_BASELINE_LATENCY
;
2443 #ifndef CONFIG_BLK_DEV_THROTTLING_LOW
2444 /* if no low limit, use previous default */
2445 td
->throtl_slice
= DFL_THROTL_SLICE_HD
;
2448 td
->track_bio_latency
= !queue_is_mq(q
);
2449 if (!td
->track_bio_latency
)
2450 blk_stat_enable_accounting(q
);
2454 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2455 ssize_t
blk_throtl_sample_time_show(struct request_queue
*q
, char *page
)
2459 return sprintf(page
, "%u\n", jiffies_to_msecs(q
->td
->throtl_slice
));
2462 ssize_t
blk_throtl_sample_time_store(struct request_queue
*q
,
2463 const char *page
, size_t count
)
2470 if (kstrtoul(page
, 10, &v
))
2472 t
= msecs_to_jiffies(v
);
2473 if (t
== 0 || t
> MAX_THROTL_SLICE
)
2475 q
->td
->throtl_slice
= t
;
2480 static int __init
throtl_init(void)
2482 kthrotld_workqueue
= alloc_workqueue("kthrotld", WQ_MEM_RECLAIM
, 0);
2483 if (!kthrotld_workqueue
)
2484 panic("Failed to create kthrotld\n");
2486 return blkcg_policy_register(&blkcg_policy_throtl
);
2489 module_init(throtl_init
);