]> git.ipfire.org Git - thirdparty/linux.git/blame - block/blk-mq-tag.c
Merge tag 'for-6.9-rc6-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave...
[thirdparty/linux.git] / block / blk-mq-tag.c
CommitLineData
3dcf60bc 1// SPDX-License-Identifier: GPL-2.0
75bb4625 2/*
88459642
OS
3 * Tag allocation using scalable bitmaps. Uses active queue tracking to support
4 * fairer distribution of tags between multiple submitters when a shared tag map
5 * is used.
75bb4625
JA
6 *
7 * Copyright (C) 2013-2014 Jens Axboe
8 */
320ae51f
JA
9#include <linux/kernel.h>
10#include <linux/module.h>
320ae51f 11
f9934a80 12#include <linux/delay.h>
320ae51f
JA
13#include "blk.h"
14#include "blk-mq.h"
d97e594c 15#include "blk-mq-sched.h"
320ae51f 16
180dccb0
LQ
17/*
18 * Recalculate wakeup batch when tag is shared by hctx.
19 */
20static void blk_mq_update_wake_batch(struct blk_mq_tags *tags,
21 unsigned int users)
22{
23 if (!users)
24 return;
25
26 sbitmap_queue_recalculate_wake_batch(&tags->bitmap_tags,
27 users);
28 sbitmap_queue_recalculate_wake_batch(&tags->breserved_tags,
29 users);
30}
31
0d2602ca
JA
32/*
33 * If a previously inactive queue goes active, bump the active user count.
d263ed99
JW
34 * We need to do this before try to allocate driver tag, then even if fail
35 * to get tag when first time, the other shared-tag users could reserve
36 * budget for it.
0d2602ca 37 */
ee78ec10 38void __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
0d2602ca 39{
180dccb0 40 unsigned int users;
4f1731df 41 struct blk_mq_tags *tags = hctx->tags;
180dccb0 42
3e94d54e
TL
43 /*
44 * calling test_bit() prior to test_and_set_bit() is intentional,
45 * it avoids dirtying the cacheline if the queue is already active.
46 */
079a2e3e 47 if (blk_mq_is_shared_tags(hctx->flags)) {
f1b49fdc 48 struct request_queue *q = hctx->queue;
f1b49fdc 49
3e94d54e
TL
50 if (test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags) ||
51 test_and_set_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags))
ee78ec10 52 return;
f1b49fdc 53 } else {
3e94d54e
TL
54 if (test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) ||
55 test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
ee78ec10 56 return;
f1b49fdc 57 }
0d2602ca 58
4f1731df
YK
59 spin_lock_irq(&tags->lock);
60 users = tags->active_queues + 1;
61 WRITE_ONCE(tags->active_queues, users);
62 blk_mq_update_wake_batch(tags, users);
63 spin_unlock_irq(&tags->lock);
0d2602ca
JA
64}
65
66/*
aed3ea94 67 * Wakeup all potentially sleeping on tags
0d2602ca 68 */
aed3ea94 69void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve)
0d2602ca 70{
ae0f1a73 71 sbitmap_queue_wake_all(&tags->bitmap_tags);
88459642 72 if (include_reserve)
ae0f1a73 73 sbitmap_queue_wake_all(&tags->breserved_tags);
0d2602ca
JA
74}
75
e3a2b3f9
JA
76/*
77 * If a previously busy queue goes inactive, potential waiters could now
78 * be allowed to queue. Wake them up and check.
79 */
80void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
81{
82 struct blk_mq_tags *tags = hctx->tags;
180dccb0 83 unsigned int users;
e3a2b3f9 84
079a2e3e 85 if (blk_mq_is_shared_tags(hctx->flags)) {
e155b0c2
JG
86 struct request_queue *q = hctx->queue;
87
f1b49fdc
JG
88 if (!test_and_clear_bit(QUEUE_FLAG_HCTX_ACTIVE,
89 &q->queue_flags))
90 return;
f1b49fdc
JG
91 } else {
92 if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
93 return;
f1b49fdc 94 }
e3a2b3f9 95
4f1731df
YK
96 spin_lock_irq(&tags->lock);
97 users = tags->active_queues - 1;
98 WRITE_ONCE(tags->active_queues, users);
180dccb0 99 blk_mq_update_wake_batch(tags, users);
4f1731df 100 spin_unlock_irq(&tags->lock);
079a2e3e 101
aed3ea94 102 blk_mq_tag_wakeup_all(tags, false);
e3a2b3f9
JA
103}
104
200e86b3
JA
105static int __blk_mq_get_tag(struct blk_mq_alloc_data *data,
106 struct sbitmap_queue *bt)
4bb659b1 107{
28500850
ML
108 if (!data->q->elevator && !(data->flags & BLK_MQ_REQ_RESERVED) &&
109 !hctx_may_queue(data->hctx, bt))
76647368 110 return BLK_MQ_NO_TAG;
42fdc5e4 111
229a9287 112 if (data->shallow_depth)
3f607293 113 return sbitmap_queue_get_shallow(bt, data->shallow_depth);
229a9287
OS
114 else
115 return __sbitmap_queue_get(bt);
4bb659b1
JA
116}
117
349302da
JA
118unsigned long blk_mq_get_tags(struct blk_mq_alloc_data *data, int nr_tags,
119 unsigned int *offset)
120{
121 struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
122 struct sbitmap_queue *bt = &tags->bitmap_tags;
123 unsigned long ret;
124
125 if (data->shallow_depth ||data->flags & BLK_MQ_REQ_RESERVED ||
126 data->hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
127 return 0;
128 ret = __sbitmap_queue_get_batch(bt, nr_tags, offset);
129 *offset += tags->nr_reserved_tags;
130 return ret;
131}
132
4941115b 133unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
320ae51f 134{
4941115b
JA
135 struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
136 struct sbitmap_queue *bt;
88459642 137 struct sbq_wait_state *ws;
5d2ee712 138 DEFINE_SBQ_WAIT(wait);
4941115b 139 unsigned int tag_offset;
320ae51f
JA
140 int tag;
141
4941115b
JA
142 if (data->flags & BLK_MQ_REQ_RESERVED) {
143 if (unlikely(!tags->nr_reserved_tags)) {
144 WARN_ON_ONCE(1);
419c3d5e 145 return BLK_MQ_NO_TAG;
4941115b 146 }
ae0f1a73 147 bt = &tags->breserved_tags;
4941115b
JA
148 tag_offset = 0;
149 } else {
ae0f1a73 150 bt = &tags->bitmap_tags;
4941115b
JA
151 tag_offset = tags->nr_reserved_tags;
152 }
153
200e86b3 154 tag = __blk_mq_get_tag(data, bt);
76647368 155 if (tag != BLK_MQ_NO_TAG)
4941115b 156 goto found_tag;
4bb659b1 157
6f3b0e8b 158 if (data->flags & BLK_MQ_REQ_NOWAIT)
419c3d5e 159 return BLK_MQ_NO_TAG;
4bb659b1 160
4941115b 161 ws = bt_wait_ptr(bt, data->hctx);
4bb659b1 162 do {
e6fc4649
ML
163 struct sbitmap_queue *bt_prev;
164
b3223207
BVA
165 /*
166 * We're out of tags on this hardware queue, kick any
167 * pending IO submits before going to sleep waiting for
8cecb07d 168 * some to complete.
b3223207 169 */
8cecb07d 170 blk_mq_run_hw_queue(data->hctx, false);
b3223207 171
080ff351
JA
172 /*
173 * Retry tag allocation after running the hardware queue,
174 * as running the queue may also have found completions.
175 */
200e86b3 176 tag = __blk_mq_get_tag(data, bt);
76647368 177 if (tag != BLK_MQ_NO_TAG)
080ff351
JA
178 break;
179
5d2ee712 180 sbitmap_prepare_to_wait(bt, ws, &wait, TASK_UNINTERRUPTIBLE);
4e5dff41
JA
181
182 tag = __blk_mq_get_tag(data, bt);
76647368 183 if (tag != BLK_MQ_NO_TAG)
4e5dff41
JA
184 break;
185
e6fc4649 186 bt_prev = bt;
4bb659b1 187 io_schedule();
cb96a42c 188
5d2ee712
JA
189 sbitmap_finish_wait(bt, ws, &wait);
190
cb96a42c 191 data->ctx = blk_mq_get_ctx(data->q);
f9afca4d 192 data->hctx = blk_mq_map_queue(data->q, data->cmd_flags,
8ccdf4a3 193 data->ctx);
4941115b
JA
194 tags = blk_mq_tags_from_data(data);
195 if (data->flags & BLK_MQ_REQ_RESERVED)
ae0f1a73 196 bt = &tags->breserved_tags;
4941115b 197 else
ae0f1a73 198 bt = &tags->bitmap_tags;
4941115b 199
e6fc4649
ML
200 /*
201 * If destination hw queue is changed, fake wake up on
202 * previous queue for compensating the wake up miss, so
203 * other allocations on previous queue won't be starved.
204 */
205 if (bt != bt_prev)
4acb8341 206 sbitmap_queue_wake_up(bt_prev, 1);
e6fc4649 207
4941115b 208 ws = bt_wait_ptr(bt, data->hctx);
4bb659b1
JA
209 } while (1);
210
5d2ee712 211 sbitmap_finish_wait(bt, ws, &wait);
320ae51f 212
4941115b 213found_tag:
bf0beec0
ML
214 /*
215 * Give up this allocation if the hctx is inactive. The caller will
216 * retry on an active hctx.
217 */
218 if (unlikely(test_bit(BLK_MQ_S_INACTIVE, &data->hctx->state))) {
219 blk_mq_put_tag(tags, data->ctx, tag + tag_offset);
220 return BLK_MQ_NO_TAG;
221 }
4941115b 222 return tag + tag_offset;
320ae51f
JA
223}
224
cae740a0
JG
225void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx,
226 unsigned int tag)
320ae51f 227{
415b806d 228 if (!blk_mq_tag_is_reserved(tags, tag)) {
4bb659b1
JA
229 const int real_tag = tag - tags->nr_reserved_tags;
230
70114c39 231 BUG_ON(real_tag >= tags->nr_tags);
ae0f1a73 232 sbitmap_queue_clear(&tags->bitmap_tags, real_tag, ctx->cpu);
70114c39 233 } else {
ae0f1a73 234 sbitmap_queue_clear(&tags->breserved_tags, tag, ctx->cpu);
70114c39 235 }
320ae51f
JA
236}
237
f794f335
JA
238void blk_mq_put_tags(struct blk_mq_tags *tags, int *tag_array, int nr_tags)
239{
240 sbitmap_queue_clear_batch(&tags->bitmap_tags, tags->nr_reserved_tags,
241 tag_array, nr_tags);
242}
243
88459642
OS
244struct bt_iter_data {
245 struct blk_mq_hw_ctx *hctx;
fea9f92f 246 struct request_queue *q;
fc39f8d2 247 busy_tag_iter_fn *fn;
88459642
OS
248 void *data;
249 bool reserved;
250};
251
2e315dc0
ML
252static struct request *blk_mq_find_and_get_req(struct blk_mq_tags *tags,
253 unsigned int bitnr)
254{
bd63141d
ML
255 struct request *rq;
256 unsigned long flags;
2e315dc0 257
bd63141d
ML
258 spin_lock_irqsave(&tags->lock, flags);
259 rq = tags->rqs[bitnr];
0a467d0f 260 if (!rq || rq->tag != bitnr || !req_ref_inc_not_zero(rq))
bd63141d
ML
261 rq = NULL;
262 spin_unlock_irqrestore(&tags->lock, flags);
2e315dc0
ML
263 return rq;
264}
265
88459642 266static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
320ae51f 267{
88459642
OS
268 struct bt_iter_data *iter_data = data;
269 struct blk_mq_hw_ctx *hctx = iter_data->hctx;
fea9f92f
JG
270 struct request_queue *q = iter_data->q;
271 struct blk_mq_tag_set *set = q->tag_set;
fea9f92f 272 struct blk_mq_tags *tags;
81481eb4 273 struct request *rq;
2e315dc0 274 bool ret = true;
4bb659b1 275
fea9f92f
JG
276 if (blk_mq_is_shared_tags(set->flags))
277 tags = set->shared_tags;
278 else
279 tags = hctx->tags;
280
4cf6e6c0 281 if (!iter_data->reserved)
88459642 282 bitnr += tags->nr_reserved_tags;
7f5562d5
JA
283 /*
284 * We can hit rq == NULL here, because the tagging functions
c7b1bf5c 285 * test and set the bit before assigning ->rqs[].
7f5562d5 286 */
2e315dc0
ML
287 rq = blk_mq_find_and_get_req(tags, bitnr);
288 if (!rq)
289 return true;
290
fea9f92f 291 if (rq->q == q && (!hctx || rq->mq_hctx == hctx))
2dd6532e 292 ret = iter_data->fn(rq, iter_data->data);
2e315dc0
ML
293 blk_mq_put_rq_ref(rq);
294 return ret;
88459642 295}
4bb659b1 296
c7b1bf5c
BVA
297/**
298 * bt_for_each - iterate over the requests associated with a hardware queue
299 * @hctx: Hardware queue to examine.
fea9f92f 300 * @q: Request queue to examine.
c7b1bf5c
BVA
301 * @bt: sbitmap to examine. This is either the breserved_tags member
302 * or the bitmap_tags member of struct blk_mq_tags.
303 * @fn: Pointer to the function that will be called for each request
304 * associated with @hctx that has been assigned a driver tag.
305 * @fn will be called as follows: @fn(@hctx, rq, @data, @reserved)
ab11fe5a
JA
306 * where rq is a pointer to a request. Return true to continue
307 * iterating tags, false to stop.
c7b1bf5c
BVA
308 * @data: Will be passed as third argument to @fn.
309 * @reserved: Indicates whether @bt is the breserved_tags member or the
310 * bitmap_tags member of struct blk_mq_tags.
311 */
fea9f92f
JG
312static void bt_for_each(struct blk_mq_hw_ctx *hctx, struct request_queue *q,
313 struct sbitmap_queue *bt, busy_tag_iter_fn *fn,
314 void *data, bool reserved)
88459642
OS
315{
316 struct bt_iter_data iter_data = {
317 .hctx = hctx,
318 .fn = fn,
319 .data = data,
320 .reserved = reserved,
fea9f92f 321 .q = q,
88459642
OS
322 };
323
324 sbitmap_for_each_set(&bt->sb, bt_iter, &iter_data);
320ae51f
JA
325}
326
88459642
OS
327struct bt_tags_iter_data {
328 struct blk_mq_tags *tags;
329 busy_tag_iter_fn *fn;
330 void *data;
602380d2 331 unsigned int flags;
88459642
OS
332};
333
602380d2
ML
334#define BT_TAG_ITER_RESERVED (1 << 0)
335#define BT_TAG_ITER_STARTED (1 << 1)
22f614bc 336#define BT_TAG_ITER_STATIC_RQS (1 << 2)
602380d2 337
88459642 338static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
f26cdc85 339{
88459642
OS
340 struct bt_tags_iter_data *iter_data = data;
341 struct blk_mq_tags *tags = iter_data->tags;
f26cdc85 342 struct request *rq;
2e315dc0
ML
343 bool ret = true;
344 bool iter_static_rqs = !!(iter_data->flags & BT_TAG_ITER_STATIC_RQS);
f26cdc85 345
4cf6e6c0 346 if (!(iter_data->flags & BT_TAG_ITER_RESERVED))
88459642 347 bitnr += tags->nr_reserved_tags;
7f5562d5
JA
348
349 /*
350 * We can hit rq == NULL here, because the tagging functions
22f614bc 351 * test and set the bit before assigning ->rqs[].
7f5562d5 352 */
2e315dc0 353 if (iter_static_rqs)
22f614bc
ML
354 rq = tags->static_rqs[bitnr];
355 else
2e315dc0 356 rq = blk_mq_find_and_get_req(tags, bitnr);
602380d2
ML
357 if (!rq)
358 return true;
2e315dc0
ML
359
360 if (!(iter_data->flags & BT_TAG_ITER_STARTED) ||
361 blk_mq_request_started(rq))
2dd6532e 362 ret = iter_data->fn(rq, iter_data->data);
2e315dc0
ML
363 if (!iter_static_rqs)
364 blk_mq_put_rq_ref(rq);
365 return ret;
88459642
OS
366}
367
c7b1bf5c
BVA
368/**
369 * bt_tags_for_each - iterate over the requests in a tag map
370 * @tags: Tag map to iterate over.
371 * @bt: sbitmap to examine. This is either the breserved_tags member
372 * or the bitmap_tags member of struct blk_mq_tags.
373 * @fn: Pointer to the function that will be called for each started
374 * request. @fn will be called as follows: @fn(rq, @data,
ab11fe5a
JA
375 * @reserved) where rq is a pointer to a request. Return true
376 * to continue iterating tags, false to stop.
c7b1bf5c 377 * @data: Will be passed as second argument to @fn.
602380d2 378 * @flags: BT_TAG_ITER_*
c7b1bf5c 379 */
88459642 380static void bt_tags_for_each(struct blk_mq_tags *tags, struct sbitmap_queue *bt,
602380d2 381 busy_tag_iter_fn *fn, void *data, unsigned int flags)
88459642
OS
382{
383 struct bt_tags_iter_data iter_data = {
384 .tags = tags,
385 .fn = fn,
386 .data = data,
602380d2 387 .flags = flags,
88459642
OS
388 };
389
390 if (tags->rqs)
391 sbitmap_for_each_set(&bt->sb, bt_tags_iter, &iter_data);
f26cdc85
KB
392}
393
602380d2
ML
394static void __blk_mq_all_tag_iter(struct blk_mq_tags *tags,
395 busy_tag_iter_fn *fn, void *priv, unsigned int flags)
396{
397 WARN_ON_ONCE(flags & BT_TAG_ITER_RESERVED);
398
399 if (tags->nr_reserved_tags)
ae0f1a73 400 bt_tags_for_each(tags, &tags->breserved_tags, fn, priv,
602380d2 401 flags | BT_TAG_ITER_RESERVED);
ae0f1a73 402 bt_tags_for_each(tags, &tags->bitmap_tags, fn, priv, flags);
602380d2
ML
403}
404
c7b1bf5c 405/**
602380d2 406 * blk_mq_all_tag_iter - iterate over all requests in a tag map
c7b1bf5c 407 * @tags: Tag map to iterate over.
602380d2 408 * @fn: Pointer to the function that will be called for each
c7b1bf5c
BVA
409 * request. @fn will be called as follows: @fn(rq, @priv,
410 * reserved) where rq is a pointer to a request. 'reserved'
ab11fe5a
JA
411 * indicates whether or not @rq is a reserved request. Return
412 * true to continue iterating tags, false to stop.
c7b1bf5c 413 * @priv: Will be passed as second argument to @fn.
22f614bc
ML
414 *
415 * Caller has to pass the tag map from which requests are allocated.
c7b1bf5c 416 */
602380d2
ML
417void blk_mq_all_tag_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
418 void *priv)
f26cdc85 419{
a8a5e383 420 __blk_mq_all_tag_iter(tags, fn, priv, BT_TAG_ITER_STATIC_RQS);
f26cdc85 421}
f26cdc85 422
c7b1bf5c
BVA
423/**
424 * blk_mq_tagset_busy_iter - iterate over all started requests in a tag set
425 * @tagset: Tag set to iterate over.
426 * @fn: Pointer to the function that will be called for each started
427 * request. @fn will be called as follows: @fn(rq, @priv,
428 * reserved) where rq is a pointer to a request. 'reserved'
ab11fe5a
JA
429 * indicates whether or not @rq is a reserved request. Return
430 * true to continue iterating tags, false to stop.
c7b1bf5c 431 * @priv: Will be passed as second argument to @fn.
2e315dc0
ML
432 *
433 * We grab one request reference before calling @fn and release it after
434 * @fn returns.
c7b1bf5c 435 */
e0489487
SG
436void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
437 busy_tag_iter_fn *fn, void *priv)
438{
0994c64e
JG
439 unsigned int flags = tagset->flags;
440 int i, nr_tags;
e0489487 441
0994c64e
JG
442 nr_tags = blk_mq_is_shared_tags(flags) ? 1 : tagset->nr_hw_queues;
443
444 for (i = 0; i < nr_tags; i++) {
e0489487 445 if (tagset->tags && tagset->tags[i])
602380d2
ML
446 __blk_mq_all_tag_iter(tagset->tags[i], fn, priv,
447 BT_TAG_ITER_STARTED);
e0489487
SG
448 }
449}
450EXPORT_SYMBOL(blk_mq_tagset_busy_iter);
451
2dd6532e 452static bool blk_mq_tagset_count_completed_rqs(struct request *rq, void *data)
f9934a80
ML
453{
454 unsigned *count = data;
455
456 if (blk_mq_request_completed(rq))
457 (*count)++;
458 return true;
459}
460
461/**
9cf1adc6
BC
462 * blk_mq_tagset_wait_completed_request - Wait until all scheduled request
463 * completions have finished.
f9934a80
ML
464 * @tagset: Tag set to drain completed request
465 *
466 * Note: This function has to be run after all IO queues are shutdown
467 */
468void blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set *tagset)
469{
470 while (true) {
471 unsigned count = 0;
472
473 blk_mq_tagset_busy_iter(tagset,
474 blk_mq_tagset_count_completed_rqs, &count);
475 if (!count)
476 break;
477 msleep(5);
478 }
479}
480EXPORT_SYMBOL(blk_mq_tagset_wait_completed_request);
481
c7b1bf5c
BVA
482/**
483 * blk_mq_queue_tag_busy_iter - iterate over all requests with a driver tag
484 * @q: Request queue to examine.
485 * @fn: Pointer to the function that will be called for each request
486 * on @q. @fn will be called as follows: @fn(hctx, rq, @priv,
487 * reserved) where rq is a pointer to a request and hctx points
488 * to the hardware queue associated with the request. 'reserved'
489 * indicates whether or not @rq is a reserved request.
490 * @priv: Will be passed as third argument to @fn.
491 *
492 * Note: if @q->tag_set is shared with other request queues then @fn will be
493 * called for all requests on all queues that share that tag set and not only
494 * for requests associated with @q.
495 */
fc39f8d2 496void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_tag_iter_fn *fn,
81481eb4 497 void *priv)
320ae51f 498{
f5bbbbe4 499 /*
4e5cc99e 500 * __blk_mq_update_nr_hw_queues() updates nr_hw_queues and hctx_table
c7b1bf5c 501 * while the queue is frozen. So we can use q_usage_counter to avoid
76cffccd 502 * racing with it.
f5bbbbe4 503 */
530ca2c9 504 if (!percpu_ref_tryget(&q->q_usage_counter))
f5bbbbe4 505 return;
0bf6cd5b 506
fea9f92f
JG
507 if (blk_mq_is_shared_tags(q->tag_set->flags)) {
508 struct blk_mq_tags *tags = q->tag_set->shared_tags;
509 struct sbitmap_queue *bresv = &tags->breserved_tags;
510 struct sbitmap_queue *btags = &tags->bitmap_tags;
0bf6cd5b
CH
511
512 if (tags->nr_reserved_tags)
fea9f92f
JG
513 bt_for_each(NULL, q, bresv, fn, priv, true);
514 bt_for_each(NULL, q, btags, fn, priv, false);
515 } else {
516 struct blk_mq_hw_ctx *hctx;
4f481208 517 unsigned long i;
fea9f92f
JG
518
519 queue_for_each_hw_ctx(q, hctx, i) {
520 struct blk_mq_tags *tags = hctx->tags;
521 struct sbitmap_queue *bresv = &tags->breserved_tags;
522 struct sbitmap_queue *btags = &tags->bitmap_tags;
523
524 /*
525 * If no software queues are currently mapped to this
526 * hardware queue, there's nothing to check
527 */
528 if (!blk_mq_hw_queue_mapped(hctx))
529 continue;
530
531 if (tags->nr_reserved_tags)
532 bt_for_each(hctx, q, bresv, fn, priv, true);
533 bt_for_each(hctx, q, btags, fn, priv, false);
534 }
4bb659b1 535 }
530ca2c9 536 blk_queue_exit(q);
4bb659b1
JA
537}
538
f4a644db
OS
539static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth,
540 bool round_robin, int node)
4bb659b1 541{
f4a644db
OS
542 return sbitmap_queue_init_node(bt, depth, -1, round_robin, GFP_KERNEL,
543 node);
4bb659b1
JA
544}
545
56b68085
JG
546int blk_mq_init_bitmaps(struct sbitmap_queue *bitmap_tags,
547 struct sbitmap_queue *breserved_tags,
548 unsigned int queue_depth, unsigned int reserved,
549 int node, int alloc_policy)
4bb659b1 550{
56b68085 551 unsigned int depth = queue_depth - reserved;
f4a644db 552 bool round_robin = alloc_policy == BLK_TAG_ALLOC_RR;
4bb659b1 553
56b68085 554 if (bt_alloc(bitmap_tags, depth, round_robin, node))
4d063237 555 return -ENOMEM;
56b68085 556 if (bt_alloc(breserved_tags, reserved, round_robin, node))
88459642 557 goto free_bitmap_tags;
4bb659b1 558
56b68085
JG
559 return 0;
560
561free_bitmap_tags:
562 sbitmap_queue_free(bitmap_tags);
563 return -ENOMEM;
564}
565
320ae51f 566struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
24391c0d 567 unsigned int reserved_tags,
e155b0c2 568 int node, int alloc_policy)
320ae51f 569{
320ae51f 570 struct blk_mq_tags *tags;
320ae51f
JA
571
572 if (total_tags > BLK_MQ_TAG_MAX) {
573 pr_err("blk-mq: tag depth too large\n");
574 return NULL;
575 }
576
577 tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node);
578 if (!tags)
579 return NULL;
580
320ae51f
JA
581 tags->nr_tags = total_tags;
582 tags->nr_reserved_tags = reserved_tags;
bd63141d 583 spin_lock_init(&tags->lock);
320ae51f 584
ae0f1a73
JG
585 if (blk_mq_init_bitmaps(&tags->bitmap_tags, &tags->breserved_tags,
586 total_tags, reserved_tags, node,
587 alloc_policy) < 0) {
4d063237
HR
588 kfree(tags);
589 return NULL;
590 }
591 return tags;
320ae51f
JA
592}
593
e155b0c2 594void blk_mq_free_tags(struct blk_mq_tags *tags)
320ae51f 595{
ae0f1a73
JG
596 sbitmap_queue_free(&tags->bitmap_tags);
597 sbitmap_queue_free(&tags->breserved_tags);
320ae51f
JA
598 kfree(tags);
599}
600
70f36b60
JA
601int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
602 struct blk_mq_tags **tagsptr, unsigned int tdepth,
603 bool can_grow)
e3a2b3f9 604{
70f36b60
JA
605 struct blk_mq_tags *tags = *tagsptr;
606
607 if (tdepth <= tags->nr_reserved_tags)
e3a2b3f9
JA
608 return -EINVAL;
609
610 /*
70f36b60
JA
611 * If we are allowed to grow beyond the original size, allocate
612 * a new set of tags before freeing the old one.
e3a2b3f9 613 */
70f36b60
JA
614 if (tdepth > tags->nr_tags) {
615 struct blk_mq_tag_set *set = hctx->queue->tag_set;
616 struct blk_mq_tags *new;
70f36b60
JA
617
618 if (!can_grow)
619 return -EINVAL;
620
621 /*
622 * We need some sort of upper limit, set it high enough that
623 * no valid use cases should require more.
624 */
d97e594c 625 if (tdepth > MAX_SCHED_RQ)
70f36b60
JA
626 return -EINVAL;
627
e155b0c2
JG
628 /*
629 * Only the sbitmap needs resizing since we allocated the max
630 * initially.
631 */
079a2e3e 632 if (blk_mq_is_shared_tags(set->flags))
e155b0c2
JG
633 return 0;
634
63064be1 635 new = blk_mq_alloc_map_and_rqs(set, hctx->queue_num, tdepth);
70f36b60
JA
636 if (!new)
637 return -ENOMEM;
70f36b60 638
645db34e 639 blk_mq_free_map_and_rqs(set, *tagsptr, hctx->queue_num);
70f36b60
JA
640 *tagsptr = new;
641 } else {
642 /*
643 * Don't need (or can't) update reserved tags here, they
644 * remain static and should never need resizing.
645 */
ae0f1a73 646 sbitmap_queue_resize(&tags->bitmap_tags,
75d6e175 647 tdepth - tags->nr_reserved_tags);
70f36b60 648 }
88459642 649
e3a2b3f9
JA
650 return 0;
651}
652
079a2e3e 653void blk_mq_tag_resize_shared_tags(struct blk_mq_tag_set *set, unsigned int size)
32bc15af 654{
079a2e3e 655 struct blk_mq_tags *tags = set->shared_tags;
e155b0c2 656
ae0f1a73 657 sbitmap_queue_resize(&tags->bitmap_tags, size - set->reserved_tags);
32bc15af
JG
658}
659
079a2e3e 660void blk_mq_tag_update_sched_shared_tags(struct request_queue *q)
a7e7388d 661{
079a2e3e 662 sbitmap_queue_resize(&q->sched_shared_tags->bitmap_tags,
a7e7388d
JG
663 q->nr_requests - q->tag_set->reserved_tags);
664}
665
205fb5f5
BVA
666/**
667 * blk_mq_unique_tag() - return a tag that is unique queue-wide
668 * @rq: request for which to compute a unique tag
669 *
670 * The tag field in struct request is unique per hardware queue but not over
671 * all hardware queues. Hence this function that returns a tag with the
672 * hardware context index in the upper bits and the per hardware queue tag in
673 * the lower bits.
674 *
675 * Note: When called for a request that is queued on a non-multiqueue request
676 * queue, the hardware context index is set to zero.
677 */
678u32 blk_mq_unique_tag(struct request *rq)
679{
ea4f995e 680 return (rq->mq_hctx->queue_num << BLK_MQ_UNIQUE_TAG_BITS) |
205fb5f5
BVA
681 (rq->tag & BLK_MQ_UNIQUE_TAG_MASK);
682}
683EXPORT_SYMBOL(blk_mq_unique_tag);