]> git.ipfire.org Git - thirdparty/kernel/stable.git/blame - block/blk-rq-qos.c
block: add queue_is_mq() helper
[thirdparty/kernel/stable.git] / block / blk-rq-qos.c
CommitLineData
a7905043
JB
1#include "blk-rq-qos.h"
2
a7905043
JB
3/*
4 * Increment 'v', if 'v' is below 'below'. Returns true if we succeeded,
5 * false if 'v' + 1 would be bigger than 'below'.
6 */
22f17952 7static bool atomic_inc_below(atomic_t *v, unsigned int below)
a7905043 8{
22f17952 9 unsigned int cur = atomic_read(v);
a7905043
JB
10
11 for (;;) {
22f17952 12 unsigned int old;
a7905043
JB
13
14 if (cur >= below)
15 return false;
16 old = atomic_cmpxchg(v, cur, cur + 1);
17 if (old == cur)
18 break;
19 cur = old;
20 }
21
22 return true;
23}
24
22f17952 25bool rq_wait_inc_below(struct rq_wait *rq_wait, unsigned int limit)
a7905043
JB
26{
27 return atomic_inc_below(&rq_wait->inflight, limit);
28}
29
c1c80384 30void rq_qos_cleanup(struct request_queue *q, struct bio *bio)
a7905043
JB
31{
32 struct rq_qos *rqos;
33
34 for (rqos = q->rq_qos; rqos; rqos = rqos->next) {
35 if (rqos->ops->cleanup)
c1c80384 36 rqos->ops->cleanup(rqos, bio);
a7905043
JB
37 }
38}
39
40void rq_qos_done(struct request_queue *q, struct request *rq)
41{
42 struct rq_qos *rqos;
43
44 for (rqos = q->rq_qos; rqos; rqos = rqos->next) {
45 if (rqos->ops->done)
46 rqos->ops->done(rqos, rq);
47 }
48}
49
50void rq_qos_issue(struct request_queue *q, struct request *rq)
51{
52 struct rq_qos *rqos;
53
54 for(rqos = q->rq_qos; rqos; rqos = rqos->next) {
55 if (rqos->ops->issue)
56 rqos->ops->issue(rqos, rq);
57 }
58}
59
60void rq_qos_requeue(struct request_queue *q, struct request *rq)
61{
62 struct rq_qos *rqos;
63
64 for(rqos = q->rq_qos; rqos; rqos = rqos->next) {
65 if (rqos->ops->requeue)
66 rqos->ops->requeue(rqos, rq);
67 }
68}
69
d5337560 70void rq_qos_throttle(struct request_queue *q, struct bio *bio)
a7905043
JB
71{
72 struct rq_qos *rqos;
a7905043
JB
73
74 for(rqos = q->rq_qos; rqos; rqos = rqos->next) {
75 if (rqos->ops->throttle)
d5337560 76 rqos->ops->throttle(rqos, bio);
c1c80384
JB
77 }
78}
79
80void rq_qos_track(struct request_queue *q, struct request *rq, struct bio *bio)
81{
82 struct rq_qos *rqos;
83
84 for(rqos = q->rq_qos; rqos; rqos = rqos->next) {
85 if (rqos->ops->track)
86 rqos->ops->track(rqos, rq, bio);
a7905043 87 }
a7905043
JB
88}
89
67b42d0b
JB
90void rq_qos_done_bio(struct request_queue *q, struct bio *bio)
91{
92 struct rq_qos *rqos;
93
94 for(rqos = q->rq_qos; rqos; rqos = rqos->next) {
95 if (rqos->ops->done_bio)
96 rqos->ops->done_bio(rqos, bio);
97 }
98}
99
a7905043
JB
100/*
101 * Return true, if we can't increase the depth further by scaling
102 */
103bool rq_depth_calc_max_depth(struct rq_depth *rqd)
104{
105 unsigned int depth;
106 bool ret = false;
107
108 /*
109 * For QD=1 devices, this is a special case. It's important for those
110 * to have one request ready when one completes, so force a depth of
111 * 2 for those devices. On the backend, it'll be a depth of 1 anyway,
112 * since the device can't have more than that in flight. If we're
113 * scaling down, then keep a setting of 1/1/1.
114 */
115 if (rqd->queue_depth == 1) {
116 if (rqd->scale_step > 0)
117 rqd->max_depth = 1;
118 else {
119 rqd->max_depth = 2;
120 ret = true;
121 }
122 } else {
123 /*
124 * scale_step == 0 is our default state. If we have suffered
125 * latency spikes, step will be > 0, and we shrink the
126 * allowed write depths. If step is < 0, we're only doing
127 * writes, and we allow a temporarily higher depth to
128 * increase performance.
129 */
130 depth = min_t(unsigned int, rqd->default_depth,
131 rqd->queue_depth);
132 if (rqd->scale_step > 0)
133 depth = 1 + ((depth - 1) >> min(31, rqd->scale_step));
134 else if (rqd->scale_step < 0) {
135 unsigned int maxd = 3 * rqd->queue_depth / 4;
136
137 depth = 1 + ((depth - 1) << -rqd->scale_step);
138 if (depth > maxd) {
139 depth = maxd;
140 ret = true;
141 }
142 }
143
144 rqd->max_depth = depth;
145 }
146
147 return ret;
148}
149
150void rq_depth_scale_up(struct rq_depth *rqd)
151{
152 /*
153 * Hit max in previous round, stop here
154 */
155 if (rqd->scaled_max)
156 return;
157
158 rqd->scale_step--;
159
160 rqd->scaled_max = rq_depth_calc_max_depth(rqd);
161}
162
163/*
164 * Scale rwb down. If 'hard_throttle' is set, do it quicker, since we
165 * had a latency violation.
166 */
167void rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle)
168{
169 /*
170 * Stop scaling down when we've hit the limit. This also prevents
171 * ->scale_step from going to crazy values, if the device can't
172 * keep up.
173 */
174 if (rqd->max_depth == 1)
175 return;
176
177 if (rqd->scale_step < 0 && hard_throttle)
178 rqd->scale_step = 0;
179 else
180 rqd->scale_step++;
181
182 rqd->scaled_max = false;
183 rq_depth_calc_max_depth(rqd);
184}
185
186void rq_qos_exit(struct request_queue *q)
187{
188 while (q->rq_qos) {
189 struct rq_qos *rqos = q->rq_qos;
190 q->rq_qos = rqos->next;
191 rqos->ops->exit(rqos);
192 }
193}