]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - block/blk-mq-tag.c
Merge branch 'stable/for-linus-5.2' of git://git.kernel.org/pub/scm/linux/kernel...
[thirdparty/kernel/stable.git] / block / blk-mq-tag.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Tag allocation using scalable bitmaps. Uses active queue tracking to support
4 * fairer distribution of tags between multiple submitters when a shared tag map
5 * is used.
6 *
7 * Copyright (C) 2013-2014 Jens Axboe
8 */
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11
12 #include <linux/blk-mq.h>
13 #include "blk.h"
14 #include "blk-mq.h"
15 #include "blk-mq-tag.h"
16
17 bool blk_mq_has_free_tags(struct blk_mq_tags *tags)
18 {
19 if (!tags)
20 return true;
21
22 return sbitmap_any_bit_clear(&tags->bitmap_tags.sb);
23 }
24
25 /*
26 * If a previously inactive queue goes active, bump the active user count.
27 * We need to do this before try to allocate driver tag, then even if fail
28 * to get tag when first time, the other shared-tag users could reserve
29 * budget for it.
30 */
31 bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
32 {
33 if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) &&
34 !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
35 atomic_inc(&hctx->tags->active_queues);
36
37 return true;
38 }
39
40 /*
41 * Wakeup all potentially sleeping on tags
42 */
43 void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve)
44 {
45 sbitmap_queue_wake_all(&tags->bitmap_tags);
46 if (include_reserve)
47 sbitmap_queue_wake_all(&tags->breserved_tags);
48 }
49
50 /*
51 * If a previously busy queue goes inactive, potential waiters could now
52 * be allowed to queue. Wake them up and check.
53 */
54 void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
55 {
56 struct blk_mq_tags *tags = hctx->tags;
57
58 if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
59 return;
60
61 atomic_dec(&tags->active_queues);
62
63 blk_mq_tag_wakeup_all(tags, false);
64 }
65
66 /*
67 * For shared tag users, we track the number of currently active users
68 * and attempt to provide a fair share of the tag depth for each of them.
69 */
70 static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
71 struct sbitmap_queue *bt)
72 {
73 unsigned int depth, users;
74
75 if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_SHARED))
76 return true;
77 if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
78 return true;
79
80 /*
81 * Don't try dividing an ant
82 */
83 if (bt->sb.depth == 1)
84 return true;
85
86 users = atomic_read(&hctx->tags->active_queues);
87 if (!users)
88 return true;
89
90 /*
91 * Allow at least some tags
92 */
93 depth = max((bt->sb.depth + users - 1) / users, 4U);
94 return atomic_read(&hctx->nr_active) < depth;
95 }
96
97 static int __blk_mq_get_tag(struct blk_mq_alloc_data *data,
98 struct sbitmap_queue *bt)
99 {
100 if (!(data->flags & BLK_MQ_REQ_INTERNAL) &&
101 !hctx_may_queue(data->hctx, bt))
102 return -1;
103 if (data->shallow_depth)
104 return __sbitmap_queue_get_shallow(bt, data->shallow_depth);
105 else
106 return __sbitmap_queue_get(bt);
107 }
108
109 unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
110 {
111 struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
112 struct sbitmap_queue *bt;
113 struct sbq_wait_state *ws;
114 DEFINE_SBQ_WAIT(wait);
115 unsigned int tag_offset;
116 bool drop_ctx;
117 int tag;
118
119 if (data->flags & BLK_MQ_REQ_RESERVED) {
120 if (unlikely(!tags->nr_reserved_tags)) {
121 WARN_ON_ONCE(1);
122 return BLK_MQ_TAG_FAIL;
123 }
124 bt = &tags->breserved_tags;
125 tag_offset = 0;
126 } else {
127 bt = &tags->bitmap_tags;
128 tag_offset = tags->nr_reserved_tags;
129 }
130
131 tag = __blk_mq_get_tag(data, bt);
132 if (tag != -1)
133 goto found_tag;
134
135 if (data->flags & BLK_MQ_REQ_NOWAIT)
136 return BLK_MQ_TAG_FAIL;
137
138 ws = bt_wait_ptr(bt, data->hctx);
139 drop_ctx = data->ctx == NULL;
140 do {
141 struct sbitmap_queue *bt_prev;
142
143 /*
144 * We're out of tags on this hardware queue, kick any
145 * pending IO submits before going to sleep waiting for
146 * some to complete.
147 */
148 blk_mq_run_hw_queue(data->hctx, false);
149
150 /*
151 * Retry tag allocation after running the hardware queue,
152 * as running the queue may also have found completions.
153 */
154 tag = __blk_mq_get_tag(data, bt);
155 if (tag != -1)
156 break;
157
158 sbitmap_prepare_to_wait(bt, ws, &wait, TASK_UNINTERRUPTIBLE);
159
160 tag = __blk_mq_get_tag(data, bt);
161 if (tag != -1)
162 break;
163
164 if (data->ctx)
165 blk_mq_put_ctx(data->ctx);
166
167 bt_prev = bt;
168 io_schedule();
169
170 sbitmap_finish_wait(bt, ws, &wait);
171
172 data->ctx = blk_mq_get_ctx(data->q);
173 data->hctx = blk_mq_map_queue(data->q, data->cmd_flags,
174 data->ctx);
175 tags = blk_mq_tags_from_data(data);
176 if (data->flags & BLK_MQ_REQ_RESERVED)
177 bt = &tags->breserved_tags;
178 else
179 bt = &tags->bitmap_tags;
180
181 /*
182 * If destination hw queue is changed, fake wake up on
183 * previous queue for compensating the wake up miss, so
184 * other allocations on previous queue won't be starved.
185 */
186 if (bt != bt_prev)
187 sbitmap_queue_wake_up(bt_prev);
188
189 ws = bt_wait_ptr(bt, data->hctx);
190 } while (1);
191
192 if (drop_ctx && data->ctx)
193 blk_mq_put_ctx(data->ctx);
194
195 sbitmap_finish_wait(bt, ws, &wait);
196
197 found_tag:
198 return tag + tag_offset;
199 }
200
201 void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, struct blk_mq_tags *tags,
202 struct blk_mq_ctx *ctx, unsigned int tag)
203 {
204 if (!blk_mq_tag_is_reserved(tags, tag)) {
205 const int real_tag = tag - tags->nr_reserved_tags;
206
207 BUG_ON(real_tag >= tags->nr_tags);
208 sbitmap_queue_clear(&tags->bitmap_tags, real_tag, ctx->cpu);
209 } else {
210 BUG_ON(tag >= tags->nr_reserved_tags);
211 sbitmap_queue_clear(&tags->breserved_tags, tag, ctx->cpu);
212 }
213 }
214
215 struct bt_iter_data {
216 struct blk_mq_hw_ctx *hctx;
217 busy_iter_fn *fn;
218 void *data;
219 bool reserved;
220 };
221
222 static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
223 {
224 struct bt_iter_data *iter_data = data;
225 struct blk_mq_hw_ctx *hctx = iter_data->hctx;
226 struct blk_mq_tags *tags = hctx->tags;
227 bool reserved = iter_data->reserved;
228 struct request *rq;
229
230 if (!reserved)
231 bitnr += tags->nr_reserved_tags;
232 rq = tags->rqs[bitnr];
233
234 /*
235 * We can hit rq == NULL here, because the tagging functions
236 * test and set the bit before assigning ->rqs[].
237 */
238 if (rq && rq->q == hctx->queue)
239 return iter_data->fn(hctx, rq, iter_data->data, reserved);
240 return true;
241 }
242
243 /**
244 * bt_for_each - iterate over the requests associated with a hardware queue
245 * @hctx: Hardware queue to examine.
246 * @bt: sbitmap to examine. This is either the breserved_tags member
247 * or the bitmap_tags member of struct blk_mq_tags.
248 * @fn: Pointer to the function that will be called for each request
249 * associated with @hctx that has been assigned a driver tag.
250 * @fn will be called as follows: @fn(@hctx, rq, @data, @reserved)
251 * where rq is a pointer to a request. Return true to continue
252 * iterating tags, false to stop.
253 * @data: Will be passed as third argument to @fn.
254 * @reserved: Indicates whether @bt is the breserved_tags member or the
255 * bitmap_tags member of struct blk_mq_tags.
256 */
257 static void bt_for_each(struct blk_mq_hw_ctx *hctx, struct sbitmap_queue *bt,
258 busy_iter_fn *fn, void *data, bool reserved)
259 {
260 struct bt_iter_data iter_data = {
261 .hctx = hctx,
262 .fn = fn,
263 .data = data,
264 .reserved = reserved,
265 };
266
267 sbitmap_for_each_set(&bt->sb, bt_iter, &iter_data);
268 }
269
270 struct bt_tags_iter_data {
271 struct blk_mq_tags *tags;
272 busy_tag_iter_fn *fn;
273 void *data;
274 bool reserved;
275 };
276
277 static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
278 {
279 struct bt_tags_iter_data *iter_data = data;
280 struct blk_mq_tags *tags = iter_data->tags;
281 bool reserved = iter_data->reserved;
282 struct request *rq;
283
284 if (!reserved)
285 bitnr += tags->nr_reserved_tags;
286
287 /*
288 * We can hit rq == NULL here, because the tagging functions
289 * test and set the bit before assining ->rqs[].
290 */
291 rq = tags->rqs[bitnr];
292 if (rq && blk_mq_request_started(rq))
293 return iter_data->fn(rq, iter_data->data, reserved);
294
295 return true;
296 }
297
298 /**
299 * bt_tags_for_each - iterate over the requests in a tag map
300 * @tags: Tag map to iterate over.
301 * @bt: sbitmap to examine. This is either the breserved_tags member
302 * or the bitmap_tags member of struct blk_mq_tags.
303 * @fn: Pointer to the function that will be called for each started
304 * request. @fn will be called as follows: @fn(rq, @data,
305 * @reserved) where rq is a pointer to a request. Return true
306 * to continue iterating tags, false to stop.
307 * @data: Will be passed as second argument to @fn.
308 * @reserved: Indicates whether @bt is the breserved_tags member or the
309 * bitmap_tags member of struct blk_mq_tags.
310 */
311 static void bt_tags_for_each(struct blk_mq_tags *tags, struct sbitmap_queue *bt,
312 busy_tag_iter_fn *fn, void *data, bool reserved)
313 {
314 struct bt_tags_iter_data iter_data = {
315 .tags = tags,
316 .fn = fn,
317 .data = data,
318 .reserved = reserved,
319 };
320
321 if (tags->rqs)
322 sbitmap_for_each_set(&bt->sb, bt_tags_iter, &iter_data);
323 }
324
325 /**
326 * blk_mq_all_tag_busy_iter - iterate over all started requests in a tag map
327 * @tags: Tag map to iterate over.
328 * @fn: Pointer to the function that will be called for each started
329 * request. @fn will be called as follows: @fn(rq, @priv,
330 * reserved) where rq is a pointer to a request. 'reserved'
331 * indicates whether or not @rq is a reserved request. Return
332 * true to continue iterating tags, false to stop.
333 * @priv: Will be passed as second argument to @fn.
334 */
335 static void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags,
336 busy_tag_iter_fn *fn, void *priv)
337 {
338 if (tags->nr_reserved_tags)
339 bt_tags_for_each(tags, &tags->breserved_tags, fn, priv, true);
340 bt_tags_for_each(tags, &tags->bitmap_tags, fn, priv, false);
341 }
342
343 /**
344 * blk_mq_tagset_busy_iter - iterate over all started requests in a tag set
345 * @tagset: Tag set to iterate over.
346 * @fn: Pointer to the function that will be called for each started
347 * request. @fn will be called as follows: @fn(rq, @priv,
348 * reserved) where rq is a pointer to a request. 'reserved'
349 * indicates whether or not @rq is a reserved request. Return
350 * true to continue iterating tags, false to stop.
351 * @priv: Will be passed as second argument to @fn.
352 */
353 void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
354 busy_tag_iter_fn *fn, void *priv)
355 {
356 int i;
357
358 for (i = 0; i < tagset->nr_hw_queues; i++) {
359 if (tagset->tags && tagset->tags[i])
360 blk_mq_all_tag_busy_iter(tagset->tags[i], fn, priv);
361 }
362 }
363 EXPORT_SYMBOL(blk_mq_tagset_busy_iter);
364
365 /**
366 * blk_mq_queue_tag_busy_iter - iterate over all requests with a driver tag
367 * @q: Request queue to examine.
368 * @fn: Pointer to the function that will be called for each request
369 * on @q. @fn will be called as follows: @fn(hctx, rq, @priv,
370 * reserved) where rq is a pointer to a request and hctx points
371 * to the hardware queue associated with the request. 'reserved'
372 * indicates whether or not @rq is a reserved request.
373 * @priv: Will be passed as third argument to @fn.
374 *
375 * Note: if @q->tag_set is shared with other request queues then @fn will be
376 * called for all requests on all queues that share that tag set and not only
377 * for requests associated with @q.
378 */
379 void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
380 void *priv)
381 {
382 struct blk_mq_hw_ctx *hctx;
383 int i;
384
385 /*
386 * __blk_mq_update_nr_hw_queues() updates nr_hw_queues and queue_hw_ctx
387 * while the queue is frozen. So we can use q_usage_counter to avoid
388 * racing with it. __blk_mq_update_nr_hw_queues() uses
389 * synchronize_rcu() to ensure this function left the critical section
390 * below.
391 */
392 if (!percpu_ref_tryget(&q->q_usage_counter))
393 return;
394
395 queue_for_each_hw_ctx(q, hctx, i) {
396 struct blk_mq_tags *tags = hctx->tags;
397
398 /*
399 * If no software queues are currently mapped to this
400 * hardware queue, there's nothing to check
401 */
402 if (!blk_mq_hw_queue_mapped(hctx))
403 continue;
404
405 if (tags->nr_reserved_tags)
406 bt_for_each(hctx, &tags->breserved_tags, fn, priv, true);
407 bt_for_each(hctx, &tags->bitmap_tags, fn, priv, false);
408 }
409 blk_queue_exit(q);
410 }
411
412 static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth,
413 bool round_robin, int node)
414 {
415 return sbitmap_queue_init_node(bt, depth, -1, round_robin, GFP_KERNEL,
416 node);
417 }
418
419 static struct blk_mq_tags *blk_mq_init_bitmap_tags(struct blk_mq_tags *tags,
420 int node, int alloc_policy)
421 {
422 unsigned int depth = tags->nr_tags - tags->nr_reserved_tags;
423 bool round_robin = alloc_policy == BLK_TAG_ALLOC_RR;
424
425 if (bt_alloc(&tags->bitmap_tags, depth, round_robin, node))
426 goto free_tags;
427 if (bt_alloc(&tags->breserved_tags, tags->nr_reserved_tags, round_robin,
428 node))
429 goto free_bitmap_tags;
430
431 return tags;
432 free_bitmap_tags:
433 sbitmap_queue_free(&tags->bitmap_tags);
434 free_tags:
435 kfree(tags);
436 return NULL;
437 }
438
439 struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
440 unsigned int reserved_tags,
441 int node, int alloc_policy)
442 {
443 struct blk_mq_tags *tags;
444
445 if (total_tags > BLK_MQ_TAG_MAX) {
446 pr_err("blk-mq: tag depth too large\n");
447 return NULL;
448 }
449
450 tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node);
451 if (!tags)
452 return NULL;
453
454 tags->nr_tags = total_tags;
455 tags->nr_reserved_tags = reserved_tags;
456
457 return blk_mq_init_bitmap_tags(tags, node, alloc_policy);
458 }
459
460 void blk_mq_free_tags(struct blk_mq_tags *tags)
461 {
462 sbitmap_queue_free(&tags->bitmap_tags);
463 sbitmap_queue_free(&tags->breserved_tags);
464 kfree(tags);
465 }
466
467 int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
468 struct blk_mq_tags **tagsptr, unsigned int tdepth,
469 bool can_grow)
470 {
471 struct blk_mq_tags *tags = *tagsptr;
472
473 if (tdepth <= tags->nr_reserved_tags)
474 return -EINVAL;
475
476 /*
477 * If we are allowed to grow beyond the original size, allocate
478 * a new set of tags before freeing the old one.
479 */
480 if (tdepth > tags->nr_tags) {
481 struct blk_mq_tag_set *set = hctx->queue->tag_set;
482 struct blk_mq_tags *new;
483 bool ret;
484
485 if (!can_grow)
486 return -EINVAL;
487
488 /*
489 * We need some sort of upper limit, set it high enough that
490 * no valid use cases should require more.
491 */
492 if (tdepth > 16 * BLKDEV_MAX_RQ)
493 return -EINVAL;
494
495 new = blk_mq_alloc_rq_map(set, hctx->queue_num, tdepth,
496 tags->nr_reserved_tags);
497 if (!new)
498 return -ENOMEM;
499 ret = blk_mq_alloc_rqs(set, new, hctx->queue_num, tdepth);
500 if (ret) {
501 blk_mq_free_rq_map(new);
502 return -ENOMEM;
503 }
504
505 blk_mq_free_rqs(set, *tagsptr, hctx->queue_num);
506 blk_mq_free_rq_map(*tagsptr);
507 *tagsptr = new;
508 } else {
509 /*
510 * Don't need (or can't) update reserved tags here, they
511 * remain static and should never need resizing.
512 */
513 sbitmap_queue_resize(&tags->bitmap_tags,
514 tdepth - tags->nr_reserved_tags);
515 }
516
517 return 0;
518 }
519
520 /**
521 * blk_mq_unique_tag() - return a tag that is unique queue-wide
522 * @rq: request for which to compute a unique tag
523 *
524 * The tag field in struct request is unique per hardware queue but not over
525 * all hardware queues. Hence this function that returns a tag with the
526 * hardware context index in the upper bits and the per hardware queue tag in
527 * the lower bits.
528 *
529 * Note: When called for a request that is queued on a non-multiqueue request
530 * queue, the hardware context index is set to zero.
531 */
532 u32 blk_mq_unique_tag(struct request *rq)
533 {
534 return (rq->mq_hctx->queue_num << BLK_MQ_UNIQUE_TAG_BITS) |
535 (rq->tag & BLK_MQ_UNIQUE_TAG_MASK);
536 }
537 EXPORT_SYMBOL(blk_mq_unique_tag);