4 #include "blk-cgroup-rwstat.h"
7 * To implement hierarchical throttling, throtl_grps form a tree and bios
8 * are dispatched upwards level by level until they reach the top and get
9 * issued. When dispatching bios from the children and local group at each
10 * level, if the bios are dispatched into a single bio_list, there's a risk
11 * of a local or child group which can queue many bios at once filling up
12 * the list starving others.
14 * To avoid such starvation, dispatched bios are queued separately
15 * according to where they came from. When they are again dispatched to
16 * the parent, they're popped in round-robin order so that no single source
17 * hogs the dispatch window.
19 * throtl_qnode is used to keep the queued bios separated by their sources.
20 * Bios are queued to throtl_qnode which in turn is queued to
21 * throtl_service_queue and then dispatched in round-robin order.
23 * It's also used to track the reference counts on blkg's. A qnode always
24 * belongs to a throtl_grp and gets queued on itself or the parent, so
25 * incrementing the reference of the associated throtl_grp when a qnode is
26 * queued and decrementing when dequeued is enough to keep the whole blkg
27 * tree pinned while bios are in flight.
30 struct list_head node
; /* service_queue->queued[] */
31 struct bio_list bios
; /* queued bios */
32 struct throtl_grp
*tg
; /* tg this qnode belongs to */
35 struct throtl_service_queue
{
36 struct throtl_service_queue
*parent_sq
; /* the parent service_queue */
39 * Bios queued directly to this service_queue or dispatched from
40 * children throtl_grp's.
42 struct list_head queued
[2]; /* throtl_qnode [READ/WRITE] */
43 unsigned int nr_queued
[2]; /* number of queued bios */
46 * RB tree of active children throtl_grp's, which are sorted by
49 struct rb_root_cached pending_tree
; /* RB tree of active tgs */
50 unsigned int nr_pending
; /* # queued in the tree */
51 unsigned long first_pending_disptime
; /* disptime of the first tg */
52 struct timer_list pending_timer
; /* fires on first_pending_disptime */
56 THROTL_TG_PENDING
= 1 << 0, /* on parent's pending tree */
57 THROTL_TG_WAS_EMPTY
= 1 << 1, /* bio_lists[] became non-empty */
58 THROTL_TG_CANCELING
= 1 << 2, /* starts to cancel bio */
68 /* must be the first member */
69 struct blkg_policy_data pd
;
71 /* active throtl group service_queue member */
72 struct rb_node rb_node
;
74 /* throtl_data this group belongs to */
75 struct throtl_data
*td
;
77 /* this group's service queue */
78 struct throtl_service_queue service_queue
;
81 * qnode_on_self is used when bios are directly queued to this
82 * throtl_grp so that local bios compete fairly with bios
83 * dispatched from children. qnode_on_parent is used when bios are
84 * dispatched from this throtl_grp into its parent and will compete
85 * with the sibling qnode_on_parents and the parent's
88 struct throtl_qnode qnode_on_self
[2];
89 struct throtl_qnode qnode_on_parent
[2];
92 * Dispatch time in jiffies. This is the estimated time when group
93 * will unthrottle and is ready to dispatch more bio. It is used as
94 * key to sort active groups in service tree.
96 unsigned long disptime
;
100 /* are there any throtl rules between this group and td? */
101 bool has_rules_bps
[2];
102 bool has_rules_iops
[2];
104 /* internally used bytes per second rate limits */
105 uint64_t bps
[2][LIMIT_CNT
];
106 /* user configured bps limits */
107 uint64_t bps_conf
[2][LIMIT_CNT
];
109 /* internally used IOPS limits */
110 unsigned int iops
[2][LIMIT_CNT
];
111 /* user configured IOPS limits */
112 unsigned int iops_conf
[2][LIMIT_CNT
];
114 /* Number of bytes dispatched in current slice */
115 uint64_t bytes_disp
[2];
116 /* Number of bio's dispatched in current slice */
117 unsigned int io_disp
[2];
119 unsigned long last_low_overflow_time
[2];
121 uint64_t last_bytes_disp
[2];
122 unsigned int last_io_disp
[2];
125 * The following two fields are updated when new configuration is
126 * submitted while some bios are still throttled, they record how many
127 * bytes/ios are waited already in previous configuration, and they will
128 * be used to calculate wait time under new configuration.
130 long long carryover_bytes
[2];
131 int carryover_ios
[2];
133 unsigned long last_check_time
;
135 unsigned long latency_target
; /* us */
136 unsigned long latency_target_conf
; /* us */
137 /* When did we start a new slice */
138 unsigned long slice_start
[2];
139 unsigned long slice_end
[2];
141 unsigned long last_finish_time
; /* ns / 1024 */
142 unsigned long checked_last_finish_time
; /* ns / 1024 */
143 unsigned long avg_idletime
; /* ns / 1024 */
144 unsigned long idletime_threshold
; /* us */
145 unsigned long idletime_threshold_conf
; /* us */
147 unsigned int bio_cnt
; /* total bios */
148 unsigned int bad_bio_cnt
; /* bios exceeding latency threshold */
149 unsigned long bio_cnt_reset_time
;
151 struct blkg_rwstat stat_bytes
;
152 struct blkg_rwstat stat_ios
;
155 extern struct blkcg_policy blkcg_policy_throtl
;
157 static inline struct throtl_grp
*pd_to_tg(struct blkg_policy_data
*pd
)
159 return pd
? container_of(pd
, struct throtl_grp
, pd
) : NULL
;
162 static inline struct throtl_grp
*blkg_to_tg(struct blkcg_gq
*blkg
)
164 return pd_to_tg(blkg_to_pd(blkg
, &blkcg_policy_throtl
));
168 * Internal throttling interface
170 #ifndef CONFIG_BLK_DEV_THROTTLING
171 static inline int blk_throtl_init(struct gendisk
*disk
) { return 0; }
172 static inline void blk_throtl_exit(struct gendisk
*disk
) { }
173 static inline void blk_throtl_register(struct gendisk
*disk
) { }
174 static inline bool blk_throtl_bio(struct bio
*bio
) { return false; }
175 static inline void blk_throtl_cancel_bios(struct gendisk
*disk
) { }
176 #else /* CONFIG_BLK_DEV_THROTTLING */
177 int blk_throtl_init(struct gendisk
*disk
);
178 void blk_throtl_exit(struct gendisk
*disk
);
179 void blk_throtl_register(struct gendisk
*disk
);
180 bool __blk_throtl_bio(struct bio
*bio
);
181 void blk_throtl_cancel_bios(struct gendisk
*disk
);
183 static inline bool blk_should_throtl(struct bio
*bio
)
185 struct throtl_grp
*tg
= blkg_to_tg(bio
->bi_blkg
);
186 int rw
= bio_data_dir(bio
);
188 if (!cgroup_subsys_on_dfl(io_cgrp_subsys
)) {
189 if (!bio_flagged(bio
, BIO_CGROUP_ACCT
)) {
190 bio_set_flag(bio
, BIO_CGROUP_ACCT
);
191 blkg_rwstat_add(&tg
->stat_bytes
, bio
->bi_opf
,
192 bio
->bi_iter
.bi_size
);
194 blkg_rwstat_add(&tg
->stat_ios
, bio
->bi_opf
, 1);
197 /* iops limit is always counted */
198 if (tg
->has_rules_iops
[rw
])
201 if (tg
->has_rules_bps
[rw
] && !bio_flagged(bio
, BIO_BPS_THROTTLED
))
207 static inline bool blk_throtl_bio(struct bio
*bio
)
210 if (!blk_should_throtl(bio
))
213 return __blk_throtl_bio(bio
);
215 #endif /* CONFIG_BLK_DEV_THROTTLING */