1 // SPDX-License-Identifier: GPL-2.0
3 * Tag allocation using scalable bitmaps. Uses active queue tracking to support
4 * fairer distribution of tags between multiple submitters when a shared tag map
7 * Copyright (C) 2013-2014 Jens Axboe
9 #include <linux/kernel.h>
10 #include <linux/module.h>
12 #include <linux/delay.h>
15 #include "blk-mq-sched.h"
18 * Recalculate wakeup batch when tag is shared by hctx.
20 static void blk_mq_update_wake_batch(struct blk_mq_tags
*tags
,
26 sbitmap_queue_recalculate_wake_batch(&tags
->bitmap_tags
,
28 sbitmap_queue_recalculate_wake_batch(&tags
->breserved_tags
,
33 * If a previously inactive queue goes active, bump the active user count.
34 * We need to do this before try to allocate driver tag, then even if fail
35 * to get tag when first time, the other shared-tag users could reserve
38 void __blk_mq_tag_busy(struct blk_mq_hw_ctx
*hctx
)
41 struct blk_mq_tags
*tags
= hctx
->tags
;
44 * calling test_bit() prior to test_and_set_bit() is intentional,
45 * it avoids dirtying the cacheline if the queue is already active.
47 if (blk_mq_is_shared_tags(hctx
->flags
)) {
48 struct request_queue
*q
= hctx
->queue
;
50 if (test_bit(QUEUE_FLAG_HCTX_ACTIVE
, &q
->queue_flags
) ||
51 test_and_set_bit(QUEUE_FLAG_HCTX_ACTIVE
, &q
->queue_flags
))
54 if (test_bit(BLK_MQ_S_TAG_ACTIVE
, &hctx
->state
) ||
55 test_and_set_bit(BLK_MQ_S_TAG_ACTIVE
, &hctx
->state
))
59 spin_lock_irq(&tags
->lock
);
60 users
= tags
->active_queues
+ 1;
61 WRITE_ONCE(tags
->active_queues
, users
);
62 blk_mq_update_wake_batch(tags
, users
);
63 spin_unlock_irq(&tags
->lock
);
67 * Wakeup all potentially sleeping on tags
69 void blk_mq_tag_wakeup_all(struct blk_mq_tags
*tags
, bool include_reserve
)
71 sbitmap_queue_wake_all(&tags
->bitmap_tags
);
73 sbitmap_queue_wake_all(&tags
->breserved_tags
);
77 * If a previously busy queue goes inactive, potential waiters could now
78 * be allowed to queue. Wake them up and check.
80 void __blk_mq_tag_idle(struct blk_mq_hw_ctx
*hctx
)
82 struct blk_mq_tags
*tags
= hctx
->tags
;
85 if (blk_mq_is_shared_tags(hctx
->flags
)) {
86 struct request_queue
*q
= hctx
->queue
;
88 if (!test_and_clear_bit(QUEUE_FLAG_HCTX_ACTIVE
,
92 if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE
, &hctx
->state
))
96 spin_lock_irq(&tags
->lock
);
97 users
= tags
->active_queues
- 1;
98 WRITE_ONCE(tags
->active_queues
, users
);
99 blk_mq_update_wake_batch(tags
, users
);
100 spin_unlock_irq(&tags
->lock
);
102 blk_mq_tag_wakeup_all(tags
, false);
105 static int __blk_mq_get_tag(struct blk_mq_alloc_data
*data
,
106 struct sbitmap_queue
*bt
)
108 if (!data
->q
->elevator
&& !(data
->flags
& BLK_MQ_REQ_RESERVED
) &&
109 !hctx_may_queue(data
->hctx
, bt
))
110 return BLK_MQ_NO_TAG
;
112 if (data
->shallow_depth
)
113 return sbitmap_queue_get_shallow(bt
, data
->shallow_depth
);
115 return __sbitmap_queue_get(bt
);
118 unsigned long blk_mq_get_tags(struct blk_mq_alloc_data
*data
, int nr_tags
,
119 unsigned int *offset
)
121 struct blk_mq_tags
*tags
= blk_mq_tags_from_data(data
);
122 struct sbitmap_queue
*bt
= &tags
->bitmap_tags
;
125 if (data
->shallow_depth
||data
->flags
& BLK_MQ_REQ_RESERVED
||
126 data
->hctx
->flags
& BLK_MQ_F_TAG_QUEUE_SHARED
)
128 ret
= __sbitmap_queue_get_batch(bt
, nr_tags
, offset
);
129 *offset
+= tags
->nr_reserved_tags
;
133 unsigned int blk_mq_get_tag(struct blk_mq_alloc_data
*data
)
135 struct blk_mq_tags
*tags
= blk_mq_tags_from_data(data
);
136 struct sbitmap_queue
*bt
;
137 struct sbq_wait_state
*ws
;
138 DEFINE_SBQ_WAIT(wait
);
139 unsigned int tag_offset
;
142 if (data
->flags
& BLK_MQ_REQ_RESERVED
) {
143 if (unlikely(!tags
->nr_reserved_tags
)) {
145 return BLK_MQ_NO_TAG
;
147 bt
= &tags
->breserved_tags
;
150 bt
= &tags
->bitmap_tags
;
151 tag_offset
= tags
->nr_reserved_tags
;
154 tag
= __blk_mq_get_tag(data
, bt
);
155 if (tag
!= BLK_MQ_NO_TAG
)
158 if (data
->flags
& BLK_MQ_REQ_NOWAIT
)
159 return BLK_MQ_NO_TAG
;
161 ws
= bt_wait_ptr(bt
, data
->hctx
);
163 struct sbitmap_queue
*bt_prev
;
166 * We're out of tags on this hardware queue, kick any
167 * pending IO submits before going to sleep waiting for
170 blk_mq_run_hw_queue(data
->hctx
, false);
173 * Retry tag allocation after running the hardware queue,
174 * as running the queue may also have found completions.
176 tag
= __blk_mq_get_tag(data
, bt
);
177 if (tag
!= BLK_MQ_NO_TAG
)
180 sbitmap_prepare_to_wait(bt
, ws
, &wait
, TASK_UNINTERRUPTIBLE
);
182 tag
= __blk_mq_get_tag(data
, bt
);
183 if (tag
!= BLK_MQ_NO_TAG
)
189 sbitmap_finish_wait(bt
, ws
, &wait
);
191 data
->ctx
= blk_mq_get_ctx(data
->q
);
192 data
->hctx
= blk_mq_map_queue(data
->q
, data
->cmd_flags
,
194 tags
= blk_mq_tags_from_data(data
);
195 if (data
->flags
& BLK_MQ_REQ_RESERVED
)
196 bt
= &tags
->breserved_tags
;
198 bt
= &tags
->bitmap_tags
;
201 * If destination hw queue is changed, fake wake up on
202 * previous queue for compensating the wake up miss, so
203 * other allocations on previous queue won't be starved.
206 sbitmap_queue_wake_up(bt_prev
, 1);
208 ws
= bt_wait_ptr(bt
, data
->hctx
);
211 sbitmap_finish_wait(bt
, ws
, &wait
);
215 * Give up this allocation if the hctx is inactive. The caller will
216 * retry on an active hctx.
218 if (unlikely(test_bit(BLK_MQ_S_INACTIVE
, &data
->hctx
->state
))) {
219 blk_mq_put_tag(tags
, data
->ctx
, tag
+ tag_offset
);
220 return BLK_MQ_NO_TAG
;
222 return tag
+ tag_offset
;
225 void blk_mq_put_tag(struct blk_mq_tags
*tags
, struct blk_mq_ctx
*ctx
,
228 if (!blk_mq_tag_is_reserved(tags
, tag
)) {
229 const int real_tag
= tag
- tags
->nr_reserved_tags
;
231 BUG_ON(real_tag
>= tags
->nr_tags
);
232 sbitmap_queue_clear(&tags
->bitmap_tags
, real_tag
, ctx
->cpu
);
234 sbitmap_queue_clear(&tags
->breserved_tags
, tag
, ctx
->cpu
);
238 void blk_mq_put_tags(struct blk_mq_tags
*tags
, int *tag_array
, int nr_tags
)
240 sbitmap_queue_clear_batch(&tags
->bitmap_tags
, tags
->nr_reserved_tags
,
244 struct bt_iter_data
{
245 struct blk_mq_hw_ctx
*hctx
;
246 struct request_queue
*q
;
247 busy_tag_iter_fn
*fn
;
252 static struct request
*blk_mq_find_and_get_req(struct blk_mq_tags
*tags
,
258 spin_lock_irqsave(&tags
->lock
, flags
);
259 rq
= tags
->rqs
[bitnr
];
260 if (!rq
|| rq
->tag
!= bitnr
|| !req_ref_inc_not_zero(rq
))
262 spin_unlock_irqrestore(&tags
->lock
, flags
);
266 static bool bt_iter(struct sbitmap
*bitmap
, unsigned int bitnr
, void *data
)
268 struct bt_iter_data
*iter_data
= data
;
269 struct blk_mq_hw_ctx
*hctx
= iter_data
->hctx
;
270 struct request_queue
*q
= iter_data
->q
;
271 struct blk_mq_tag_set
*set
= q
->tag_set
;
272 struct blk_mq_tags
*tags
;
276 if (blk_mq_is_shared_tags(set
->flags
))
277 tags
= set
->shared_tags
;
281 if (!iter_data
->reserved
)
282 bitnr
+= tags
->nr_reserved_tags
;
284 * We can hit rq == NULL here, because the tagging functions
285 * test and set the bit before assigning ->rqs[].
287 rq
= blk_mq_find_and_get_req(tags
, bitnr
);
291 if (rq
->q
== q
&& (!hctx
|| rq
->mq_hctx
== hctx
))
292 ret
= iter_data
->fn(rq
, iter_data
->data
);
293 blk_mq_put_rq_ref(rq
);
298 * bt_for_each - iterate over the requests associated with a hardware queue
299 * @hctx: Hardware queue to examine.
300 * @q: Request queue to examine.
301 * @bt: sbitmap to examine. This is either the breserved_tags member
302 * or the bitmap_tags member of struct blk_mq_tags.
303 * @fn: Pointer to the function that will be called for each request
304 * associated with @hctx that has been assigned a driver tag.
305 * @fn will be called as follows: @fn(@hctx, rq, @data, @reserved)
306 * where rq is a pointer to a request. Return true to continue
307 * iterating tags, false to stop.
308 * @data: Will be passed as third argument to @fn.
309 * @reserved: Indicates whether @bt is the breserved_tags member or the
310 * bitmap_tags member of struct blk_mq_tags.
312 static void bt_for_each(struct blk_mq_hw_ctx
*hctx
, struct request_queue
*q
,
313 struct sbitmap_queue
*bt
, busy_tag_iter_fn
*fn
,
314 void *data
, bool reserved
)
316 struct bt_iter_data iter_data
= {
320 .reserved
= reserved
,
324 sbitmap_for_each_set(&bt
->sb
, bt_iter
, &iter_data
);
327 struct bt_tags_iter_data
{
328 struct blk_mq_tags
*tags
;
329 busy_tag_iter_fn
*fn
;
334 #define BT_TAG_ITER_RESERVED (1 << 0)
335 #define BT_TAG_ITER_STARTED (1 << 1)
336 #define BT_TAG_ITER_STATIC_RQS (1 << 2)
338 static bool bt_tags_iter(struct sbitmap
*bitmap
, unsigned int bitnr
, void *data
)
340 struct bt_tags_iter_data
*iter_data
= data
;
341 struct blk_mq_tags
*tags
= iter_data
->tags
;
344 bool iter_static_rqs
= !!(iter_data
->flags
& BT_TAG_ITER_STATIC_RQS
);
346 if (!(iter_data
->flags
& BT_TAG_ITER_RESERVED
))
347 bitnr
+= tags
->nr_reserved_tags
;
350 * We can hit rq == NULL here, because the tagging functions
351 * test and set the bit before assigning ->rqs[].
354 rq
= tags
->static_rqs
[bitnr
];
356 rq
= blk_mq_find_and_get_req(tags
, bitnr
);
360 if (!(iter_data
->flags
& BT_TAG_ITER_STARTED
) ||
361 blk_mq_request_started(rq
))
362 ret
= iter_data
->fn(rq
, iter_data
->data
);
363 if (!iter_static_rqs
)
364 blk_mq_put_rq_ref(rq
);
369 * bt_tags_for_each - iterate over the requests in a tag map
370 * @tags: Tag map to iterate over.
371 * @bt: sbitmap to examine. This is either the breserved_tags member
372 * or the bitmap_tags member of struct blk_mq_tags.
373 * @fn: Pointer to the function that will be called for each started
374 * request. @fn will be called as follows: @fn(rq, @data,
375 * @reserved) where rq is a pointer to a request. Return true
376 * to continue iterating tags, false to stop.
377 * @data: Will be passed as second argument to @fn.
378 * @flags: BT_TAG_ITER_*
380 static void bt_tags_for_each(struct blk_mq_tags
*tags
, struct sbitmap_queue
*bt
,
381 busy_tag_iter_fn
*fn
, void *data
, unsigned int flags
)
383 struct bt_tags_iter_data iter_data
= {
391 sbitmap_for_each_set(&bt
->sb
, bt_tags_iter
, &iter_data
);
394 static void __blk_mq_all_tag_iter(struct blk_mq_tags
*tags
,
395 busy_tag_iter_fn
*fn
, void *priv
, unsigned int flags
)
397 WARN_ON_ONCE(flags
& BT_TAG_ITER_RESERVED
);
399 if (tags
->nr_reserved_tags
)
400 bt_tags_for_each(tags
, &tags
->breserved_tags
, fn
, priv
,
401 flags
| BT_TAG_ITER_RESERVED
);
402 bt_tags_for_each(tags
, &tags
->bitmap_tags
, fn
, priv
, flags
);
406 * blk_mq_all_tag_iter - iterate over all requests in a tag map
407 * @tags: Tag map to iterate over.
408 * @fn: Pointer to the function that will be called for each
409 * request. @fn will be called as follows: @fn(rq, @priv,
410 * reserved) where rq is a pointer to a request. 'reserved'
411 * indicates whether or not @rq is a reserved request. Return
412 * true to continue iterating tags, false to stop.
413 * @priv: Will be passed as second argument to @fn.
415 * Caller has to pass the tag map from which requests are allocated.
417 void blk_mq_all_tag_iter(struct blk_mq_tags
*tags
, busy_tag_iter_fn
*fn
,
420 __blk_mq_all_tag_iter(tags
, fn
, priv
, BT_TAG_ITER_STATIC_RQS
);
424 * blk_mq_tagset_busy_iter - iterate over all started requests in a tag set
425 * @tagset: Tag set to iterate over.
426 * @fn: Pointer to the function that will be called for each started
427 * request. @fn will be called as follows: @fn(rq, @priv,
428 * reserved) where rq is a pointer to a request. 'reserved'
429 * indicates whether or not @rq is a reserved request. Return
430 * true to continue iterating tags, false to stop.
431 * @priv: Will be passed as second argument to @fn.
433 * We grab one request reference before calling @fn and release it after
436 void blk_mq_tagset_busy_iter(struct blk_mq_tag_set
*tagset
,
437 busy_tag_iter_fn
*fn
, void *priv
)
439 unsigned int flags
= tagset
->flags
;
442 nr_tags
= blk_mq_is_shared_tags(flags
) ? 1 : tagset
->nr_hw_queues
;
444 for (i
= 0; i
< nr_tags
; i
++) {
445 if (tagset
->tags
&& tagset
->tags
[i
])
446 __blk_mq_all_tag_iter(tagset
->tags
[i
], fn
, priv
,
447 BT_TAG_ITER_STARTED
);
450 EXPORT_SYMBOL(blk_mq_tagset_busy_iter
);
452 static bool blk_mq_tagset_count_completed_rqs(struct request
*rq
, void *data
)
454 unsigned *count
= data
;
456 if (blk_mq_request_completed(rq
))
462 * blk_mq_tagset_wait_completed_request - Wait until all scheduled request
463 * completions have finished.
464 * @tagset: Tag set to drain completed request
466 * Note: This function has to be run after all IO queues are shutdown
468 void blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set
*tagset
)
473 blk_mq_tagset_busy_iter(tagset
,
474 blk_mq_tagset_count_completed_rqs
, &count
);
480 EXPORT_SYMBOL(blk_mq_tagset_wait_completed_request
);
483 * blk_mq_queue_tag_busy_iter - iterate over all requests with a driver tag
484 * @q: Request queue to examine.
485 * @fn: Pointer to the function that will be called for each request
486 * on @q. @fn will be called as follows: @fn(hctx, rq, @priv,
487 * reserved) where rq is a pointer to a request and hctx points
488 * to the hardware queue associated with the request. 'reserved'
489 * indicates whether or not @rq is a reserved request.
490 * @priv: Will be passed as third argument to @fn.
492 * Note: if @q->tag_set is shared with other request queues then @fn will be
493 * called for all requests on all queues that share that tag set and not only
494 * for requests associated with @q.
496 void blk_mq_queue_tag_busy_iter(struct request_queue
*q
, busy_tag_iter_fn
*fn
,
500 * __blk_mq_update_nr_hw_queues() updates nr_hw_queues and hctx_table
501 * while the queue is frozen. So we can use q_usage_counter to avoid
504 if (!percpu_ref_tryget(&q
->q_usage_counter
))
507 if (blk_mq_is_shared_tags(q
->tag_set
->flags
)) {
508 struct blk_mq_tags
*tags
= q
->tag_set
->shared_tags
;
509 struct sbitmap_queue
*bresv
= &tags
->breserved_tags
;
510 struct sbitmap_queue
*btags
= &tags
->bitmap_tags
;
512 if (tags
->nr_reserved_tags
)
513 bt_for_each(NULL
, q
, bresv
, fn
, priv
, true);
514 bt_for_each(NULL
, q
, btags
, fn
, priv
, false);
516 struct blk_mq_hw_ctx
*hctx
;
519 queue_for_each_hw_ctx(q
, hctx
, i
) {
520 struct blk_mq_tags
*tags
= hctx
->tags
;
521 struct sbitmap_queue
*bresv
= &tags
->breserved_tags
;
522 struct sbitmap_queue
*btags
= &tags
->bitmap_tags
;
525 * If no software queues are currently mapped to this
526 * hardware queue, there's nothing to check
528 if (!blk_mq_hw_queue_mapped(hctx
))
531 if (tags
->nr_reserved_tags
)
532 bt_for_each(hctx
, q
, bresv
, fn
, priv
, true);
533 bt_for_each(hctx
, q
, btags
, fn
, priv
, false);
539 static int bt_alloc(struct sbitmap_queue
*bt
, unsigned int depth
,
540 bool round_robin
, int node
)
542 return sbitmap_queue_init_node(bt
, depth
, -1, round_robin
, GFP_KERNEL
,
546 int blk_mq_init_bitmaps(struct sbitmap_queue
*bitmap_tags
,
547 struct sbitmap_queue
*breserved_tags
,
548 unsigned int queue_depth
, unsigned int reserved
,
549 int node
, int alloc_policy
)
551 unsigned int depth
= queue_depth
- reserved
;
552 bool round_robin
= alloc_policy
== BLK_TAG_ALLOC_RR
;
554 if (bt_alloc(bitmap_tags
, depth
, round_robin
, node
))
556 if (bt_alloc(breserved_tags
, reserved
, round_robin
, node
))
557 goto free_bitmap_tags
;
562 sbitmap_queue_free(bitmap_tags
);
566 struct blk_mq_tags
*blk_mq_init_tags(unsigned int total_tags
,
567 unsigned int reserved_tags
,
568 int node
, int alloc_policy
)
570 struct blk_mq_tags
*tags
;
572 if (total_tags
> BLK_MQ_TAG_MAX
) {
573 pr_err("blk-mq: tag depth too large\n");
577 tags
= kzalloc_node(sizeof(*tags
), GFP_KERNEL
, node
);
581 tags
->nr_tags
= total_tags
;
582 tags
->nr_reserved_tags
= reserved_tags
;
583 spin_lock_init(&tags
->lock
);
585 if (blk_mq_init_bitmaps(&tags
->bitmap_tags
, &tags
->breserved_tags
,
586 total_tags
, reserved_tags
, node
,
594 void blk_mq_free_tags(struct blk_mq_tags
*tags
)
596 sbitmap_queue_free(&tags
->bitmap_tags
);
597 sbitmap_queue_free(&tags
->breserved_tags
);
601 int blk_mq_tag_update_depth(struct blk_mq_hw_ctx
*hctx
,
602 struct blk_mq_tags
**tagsptr
, unsigned int tdepth
,
605 struct blk_mq_tags
*tags
= *tagsptr
;
607 if (tdepth
<= tags
->nr_reserved_tags
)
611 * If we are allowed to grow beyond the original size, allocate
612 * a new set of tags before freeing the old one.
614 if (tdepth
> tags
->nr_tags
) {
615 struct blk_mq_tag_set
*set
= hctx
->queue
->tag_set
;
616 struct blk_mq_tags
*new;
622 * We need some sort of upper limit, set it high enough that
623 * no valid use cases should require more.
625 if (tdepth
> MAX_SCHED_RQ
)
629 * Only the sbitmap needs resizing since we allocated the max
632 if (blk_mq_is_shared_tags(set
->flags
))
635 new = blk_mq_alloc_map_and_rqs(set
, hctx
->queue_num
, tdepth
);
639 blk_mq_free_map_and_rqs(set
, *tagsptr
, hctx
->queue_num
);
643 * Don't need (or can't) update reserved tags here, they
644 * remain static and should never need resizing.
646 sbitmap_queue_resize(&tags
->bitmap_tags
,
647 tdepth
- tags
->nr_reserved_tags
);
653 void blk_mq_tag_resize_shared_tags(struct blk_mq_tag_set
*set
, unsigned int size
)
655 struct blk_mq_tags
*tags
= set
->shared_tags
;
657 sbitmap_queue_resize(&tags
->bitmap_tags
, size
- set
->reserved_tags
);
660 void blk_mq_tag_update_sched_shared_tags(struct request_queue
*q
)
662 sbitmap_queue_resize(&q
->sched_shared_tags
->bitmap_tags
,
663 q
->nr_requests
- q
->tag_set
->reserved_tags
);
667 * blk_mq_unique_tag() - return a tag that is unique queue-wide
668 * @rq: request for which to compute a unique tag
670 * The tag field in struct request is unique per hardware queue but not over
671 * all hardware queues. Hence this function that returns a tag with the
672 * hardware context index in the upper bits and the per hardware queue tag in
675 * Note: When called for a request that is queued on a non-multiqueue request
676 * queue, the hardware context index is set to zero.
678 u32
blk_mq_unique_tag(struct request
*rq
)
680 return (rq
->mq_hctx
->queue_num
<< BLK_MQ_UNIQUE_TAG_BITS
) |
681 (rq
->tag
& BLK_MQ_UNIQUE_TAG_MASK
);
683 EXPORT_SYMBOL(blk_mq_unique_tag
);