]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - block/blk-rq-qos.h
block: print offending values when cloned rq limits are exceeded
[thirdparty/kernel/stable.git] / block / blk-rq-qos.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef RQ_QOS_H
3 #define RQ_QOS_H
4
5 #include <linux/kernel.h>
6 #include <linux/blkdev.h>
7 #include <linux/blk_types.h>
8 #include <linux/atomic.h>
9 #include <linux/wait.h>
10
11 #include "blk-mq-debugfs.h"
12
13 struct blk_mq_debugfs_attr;
14
15 enum rq_qos_id {
16 RQ_QOS_WBT,
17 RQ_QOS_CGROUP,
18 };
19
20 struct rq_wait {
21 wait_queue_head_t wait;
22 atomic_t inflight;
23 };
24
25 struct rq_qos {
26 struct rq_qos_ops *ops;
27 struct request_queue *q;
28 enum rq_qos_id id;
29 struct rq_qos *next;
30 #ifdef CONFIG_BLK_DEBUG_FS
31 struct dentry *debugfs_dir;
32 #endif
33 };
34
35 struct rq_qos_ops {
36 void (*throttle)(struct rq_qos *, struct bio *);
37 void (*track)(struct rq_qos *, struct request *, struct bio *);
38 void (*issue)(struct rq_qos *, struct request *);
39 void (*requeue)(struct rq_qos *, struct request *);
40 void (*done)(struct rq_qos *, struct request *);
41 void (*done_bio)(struct rq_qos *, struct bio *);
42 void (*cleanup)(struct rq_qos *, struct bio *);
43 void (*exit)(struct rq_qos *);
44 const struct blk_mq_debugfs_attr *debugfs_attrs;
45 };
46
47 struct rq_depth {
48 unsigned int max_depth;
49
50 int scale_step;
51 bool scaled_max;
52
53 unsigned int queue_depth;
54 unsigned int default_depth;
55 };
56
57 static inline struct rq_qos *rq_qos_id(struct request_queue *q,
58 enum rq_qos_id id)
59 {
60 struct rq_qos *rqos;
61 for (rqos = q->rq_qos; rqos; rqos = rqos->next) {
62 if (rqos->id == id)
63 break;
64 }
65 return rqos;
66 }
67
68 static inline struct rq_qos *wbt_rq_qos(struct request_queue *q)
69 {
70 return rq_qos_id(q, RQ_QOS_WBT);
71 }
72
73 static inline struct rq_qos *blkcg_rq_qos(struct request_queue *q)
74 {
75 return rq_qos_id(q, RQ_QOS_CGROUP);
76 }
77
78 static inline const char *rq_qos_id_to_name(enum rq_qos_id id)
79 {
80 switch (id) {
81 case RQ_QOS_WBT:
82 return "wbt";
83 case RQ_QOS_CGROUP:
84 return "cgroup";
85 }
86 return "unknown";
87 }
88
89 static inline void rq_wait_init(struct rq_wait *rq_wait)
90 {
91 atomic_set(&rq_wait->inflight, 0);
92 init_waitqueue_head(&rq_wait->wait);
93 }
94
95 static inline void rq_qos_add(struct request_queue *q, struct rq_qos *rqos)
96 {
97 rqos->next = q->rq_qos;
98 q->rq_qos = rqos;
99
100 if (rqos->ops->debugfs_attrs)
101 blk_mq_debugfs_register_rqos(rqos);
102 }
103
104 static inline void rq_qos_del(struct request_queue *q, struct rq_qos *rqos)
105 {
106 struct rq_qos *cur, *prev = NULL;
107 for (cur = q->rq_qos; cur; cur = cur->next) {
108 if (cur == rqos) {
109 if (prev)
110 prev->next = rqos->next;
111 else
112 q->rq_qos = cur;
113 break;
114 }
115 prev = cur;
116 }
117
118 blk_mq_debugfs_unregister_rqos(rqos);
119 }
120
121 typedef bool (acquire_inflight_cb_t)(struct rq_wait *rqw, void *private_data);
122 typedef void (cleanup_cb_t)(struct rq_wait *rqw, void *private_data);
123
124 void rq_qos_wait(struct rq_wait *rqw, void *private_data,
125 acquire_inflight_cb_t *acquire_inflight_cb,
126 cleanup_cb_t *cleanup_cb);
127 bool rq_wait_inc_below(struct rq_wait *rq_wait, unsigned int limit);
128 void rq_depth_scale_up(struct rq_depth *rqd);
129 void rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle);
130 bool rq_depth_calc_max_depth(struct rq_depth *rqd);
131
132 void __rq_qos_cleanup(struct rq_qos *rqos, struct bio *bio);
133 void __rq_qos_done(struct rq_qos *rqos, struct request *rq);
134 void __rq_qos_issue(struct rq_qos *rqos, struct request *rq);
135 void __rq_qos_requeue(struct rq_qos *rqos, struct request *rq);
136 void __rq_qos_throttle(struct rq_qos *rqos, struct bio *bio);
137 void __rq_qos_track(struct rq_qos *rqos, struct request *rq, struct bio *bio);
138 void __rq_qos_done_bio(struct rq_qos *rqos, struct bio *bio);
139
140 static inline void rq_qos_cleanup(struct request_queue *q, struct bio *bio)
141 {
142 if (q->rq_qos)
143 __rq_qos_cleanup(q->rq_qos, bio);
144 }
145
146 static inline void rq_qos_done(struct request_queue *q, struct request *rq)
147 {
148 if (q->rq_qos)
149 __rq_qos_done(q->rq_qos, rq);
150 }
151
152 static inline void rq_qos_issue(struct request_queue *q, struct request *rq)
153 {
154 if (q->rq_qos)
155 __rq_qos_issue(q->rq_qos, rq);
156 }
157
158 static inline void rq_qos_requeue(struct request_queue *q, struct request *rq)
159 {
160 if (q->rq_qos)
161 __rq_qos_requeue(q->rq_qos, rq);
162 }
163
164 static inline void rq_qos_done_bio(struct request_queue *q, struct bio *bio)
165 {
166 if (q->rq_qos)
167 __rq_qos_done_bio(q->rq_qos, bio);
168 }
169
170 static inline void rq_qos_throttle(struct request_queue *q, struct bio *bio)
171 {
172 /*
173 * BIO_TRACKED lets controllers know that a bio went through the
174 * normal rq_qos path.
175 */
176 bio_set_flag(bio, BIO_TRACKED);
177 if (q->rq_qos)
178 __rq_qos_throttle(q->rq_qos, bio);
179 }
180
181 static inline void rq_qos_track(struct request_queue *q, struct request *rq,
182 struct bio *bio)
183 {
184 if (q->rq_qos)
185 __rq_qos_track(q->rq_qos, rq, bio);
186 }
187
188 void rq_qos_exit(struct request_queue *);
189
190 #endif