4 #include "blk-cgroup-rwstat.h"
7 * To implement hierarchical throttling, throtl_grps form a tree and bios
8 * are dispatched upwards level by level until they reach the top and get
9 * issued. When dispatching bios from the children and local group at each
10 * level, if the bios are dispatched into a single bio_list, there's a risk
11 * of a local or child group which can queue many bios at once filling up
12 * the list starving others.
14 * To avoid such starvation, dispatched bios are queued separately
15 * according to where they came from. When they are again dispatched to
16 * the parent, they're popped in round-robin order so that no single source
17 * hogs the dispatch window.
19 * throtl_qnode is used to keep the queued bios separated by their sources.
20 * Bios are queued to throtl_qnode which in turn is queued to
21 * throtl_service_queue and then dispatched in round-robin order.
23 * It's also used to track the reference counts on blkg's. A qnode always
24 * belongs to a throtl_grp and gets queued on itself or the parent, so
25 * incrementing the reference of the associated throtl_grp when a qnode is
26 * queued and decrementing when dequeued is enough to keep the whole blkg
27 * tree pinned while bios are in flight.
30 struct list_head node
; /* service_queue->queued[] */
31 struct bio_list bios
; /* queued bios */
32 struct throtl_grp
*tg
; /* tg this qnode belongs to */
35 struct throtl_service_queue
{
36 struct throtl_service_queue
*parent_sq
; /* the parent service_queue */
39 * Bios queued directly to this service_queue or dispatched from
40 * children throtl_grp's.
42 struct list_head queued
[2]; /* throtl_qnode [READ/WRITE] */
43 unsigned int nr_queued
[2]; /* number of queued bios */
46 * RB tree of active children throtl_grp's, which are sorted by
49 struct rb_root_cached pending_tree
; /* RB tree of active tgs */
50 unsigned int nr_pending
; /* # queued in the tree */
51 unsigned long first_pending_disptime
; /* disptime of the first tg */
52 struct timer_list pending_timer
; /* fires on first_pending_disptime */
56 THROTL_TG_PENDING
= 1 << 0, /* on parent's pending tree */
57 THROTL_TG_WAS_EMPTY
= 1 << 1, /* bio_lists[] became non-empty */
58 THROTL_TG_HAS_IOPS_LIMIT
= 1 << 2, /* tg has iops limit */
59 THROTL_TG_CANCELING
= 1 << 3, /* starts to cancel bio */
69 /* must be the first member */
70 struct blkg_policy_data pd
;
72 /* active throtl group service_queue member */
73 struct rb_node rb_node
;
75 /* throtl_data this group belongs to */
76 struct throtl_data
*td
;
78 /* this group's service queue */
79 struct throtl_service_queue service_queue
;
82 * qnode_on_self is used when bios are directly queued to this
83 * throtl_grp so that local bios compete fairly with bios
84 * dispatched from children. qnode_on_parent is used when bios are
85 * dispatched from this throtl_grp into its parent and will compete
86 * with the sibling qnode_on_parents and the parent's
89 struct throtl_qnode qnode_on_self
[2];
90 struct throtl_qnode qnode_on_parent
[2];
93 * Dispatch time in jiffies. This is the estimated time when group
94 * will unthrottle and is ready to dispatch more bio. It is used as
95 * key to sort active groups in service tree.
97 unsigned long disptime
;
101 /* are there any throtl rules between this group and td? */
104 /* internally used bytes per second rate limits */
105 uint64_t bps
[2][LIMIT_CNT
];
106 /* user configured bps limits */
107 uint64_t bps_conf
[2][LIMIT_CNT
];
109 /* internally used IOPS limits */
110 unsigned int iops
[2][LIMIT_CNT
];
111 /* user configured IOPS limits */
112 unsigned int iops_conf
[2][LIMIT_CNT
];
114 /* Number of bytes dispatched in current slice */
115 uint64_t bytes_disp
[2];
116 /* Number of bio's dispatched in current slice */
117 unsigned int io_disp
[2];
119 unsigned long last_low_overflow_time
[2];
121 uint64_t last_bytes_disp
[2];
122 unsigned int last_io_disp
[2];
124 unsigned long last_check_time
;
126 unsigned long latency_target
; /* us */
127 unsigned long latency_target_conf
; /* us */
128 /* When did we start a new slice */
129 unsigned long slice_start
[2];
130 unsigned long slice_end
[2];
132 unsigned long last_finish_time
; /* ns / 1024 */
133 unsigned long checked_last_finish_time
; /* ns / 1024 */
134 unsigned long avg_idletime
; /* ns / 1024 */
135 unsigned long idletime_threshold
; /* us */
136 unsigned long idletime_threshold_conf
; /* us */
138 unsigned int bio_cnt
; /* total bios */
139 unsigned int bad_bio_cnt
; /* bios exceeding latency threshold */
140 unsigned long bio_cnt_reset_time
;
142 struct blkg_rwstat stat_bytes
;
143 struct blkg_rwstat stat_ios
;
146 extern struct blkcg_policy blkcg_policy_throtl
;
148 static inline struct throtl_grp
*pd_to_tg(struct blkg_policy_data
*pd
)
150 return pd
? container_of(pd
, struct throtl_grp
, pd
) : NULL
;
153 static inline struct throtl_grp
*blkg_to_tg(struct blkcg_gq
*blkg
)
155 return pd_to_tg(blkg_to_pd(blkg
, &blkcg_policy_throtl
));
159 * Internal throttling interface
161 #ifndef CONFIG_BLK_DEV_THROTTLING
162 static inline int blk_throtl_init(struct request_queue
*q
) { return 0; }
163 static inline void blk_throtl_exit(struct request_queue
*q
) { }
164 static inline void blk_throtl_register_queue(struct request_queue
*q
) { }
165 static inline bool blk_throtl_bio(struct bio
*bio
) { return false; }
166 static inline void blk_throtl_cancel_bios(struct request_queue
*q
) { }
167 #else /* CONFIG_BLK_DEV_THROTTLING */
168 int blk_throtl_init(struct request_queue
*q
);
169 void blk_throtl_exit(struct request_queue
*q
);
170 void blk_throtl_register_queue(struct request_queue
*q
);
171 bool __blk_throtl_bio(struct bio
*bio
);
172 void blk_throtl_cancel_bios(struct request_queue
*q
);
173 static inline bool blk_throtl_bio(struct bio
*bio
)
175 struct throtl_grp
*tg
= blkg_to_tg(bio
->bi_blkg
);
177 /* no need to throttle bps any more if the bio has been throttled */
178 if (bio_flagged(bio
, BIO_THROTTLED
) &&
179 !(tg
->flags
& THROTL_TG_HAS_IOPS_LIMIT
))
182 if (!tg
->has_rules
[bio_data_dir(bio
)])
185 return __blk_throtl_bio(bio
);
187 #endif /* CONFIG_BLK_DEV_THROTTLING */