]> git.ipfire.org Git - thirdparty/linux.git/blame - block/blk-mq-tag.c
blk-wbt: rename __wbt_update_limits to wbt_update_limits
[thirdparty/linux.git] / block / blk-mq-tag.c
CommitLineData
3dcf60bc 1// SPDX-License-Identifier: GPL-2.0
75bb4625 2/*
88459642
OS
3 * Tag allocation using scalable bitmaps. Uses active queue tracking to support
4 * fairer distribution of tags between multiple submitters when a shared tag map
5 * is used.
75bb4625
JA
6 *
7 * Copyright (C) 2013-2014 Jens Axboe
8 */
320ae51f
JA
9#include <linux/kernel.h>
10#include <linux/module.h>
320ae51f
JA
11
12#include <linux/blk-mq.h>
f9934a80 13#include <linux/delay.h>
320ae51f
JA
14#include "blk.h"
15#include "blk-mq.h"
16#include "blk-mq-tag.h"
17
0d2602ca
JA
18/*
19 * If a previously inactive queue goes active, bump the active user count.
d263ed99
JW
20 * We need to do this before try to allocate driver tag, then even if fail
21 * to get tag when first time, the other shared-tag users could reserve
22 * budget for it.
0d2602ca
JA
23 */
24bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
25{
26 if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) &&
27 !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
28 atomic_inc(&hctx->tags->active_queues);
29
30 return true;
31}
32
33/*
aed3ea94 34 * Wakeup all potentially sleeping on tags
0d2602ca 35 */
aed3ea94 36void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve)
0d2602ca 37{
88459642
OS
38 sbitmap_queue_wake_all(&tags->bitmap_tags);
39 if (include_reserve)
40 sbitmap_queue_wake_all(&tags->breserved_tags);
0d2602ca
JA
41}
42
e3a2b3f9
JA
43/*
44 * If a previously busy queue goes inactive, potential waiters could now
45 * be allowed to queue. Wake them up and check.
46 */
47void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
48{
49 struct blk_mq_tags *tags = hctx->tags;
50
51 if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
52 return;
53
54 atomic_dec(&tags->active_queues);
55
aed3ea94 56 blk_mq_tag_wakeup_all(tags, false);
e3a2b3f9
JA
57}
58
0d2602ca
JA
59/*
60 * For shared tag users, we track the number of currently active users
61 * and attempt to provide a fair share of the tag depth for each of them.
62 */
63static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
88459642 64 struct sbitmap_queue *bt)
0d2602ca
JA
65{
66 unsigned int depth, users;
67
68 if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_SHARED))
69 return true;
70 if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
71 return true;
72
73 /*
74 * Don't try dividing an ant
75 */
88459642 76 if (bt->sb.depth == 1)
0d2602ca
JA
77 return true;
78
79 users = atomic_read(&hctx->tags->active_queues);
80 if (!users)
81 return true;
82
83 /*
84 * Allow at least some tags
85 */
88459642 86 depth = max((bt->sb.depth + users - 1) / users, 4U);
0d2602ca
JA
87 return atomic_read(&hctx->nr_active) < depth;
88}
89
200e86b3
JA
90static int __blk_mq_get_tag(struct blk_mq_alloc_data *data,
91 struct sbitmap_queue *bt)
4bb659b1 92{
200e86b3
JA
93 if (!(data->flags & BLK_MQ_REQ_INTERNAL) &&
94 !hctx_may_queue(data->hctx, bt))
76647368 95 return BLK_MQ_NO_TAG;
229a9287
OS
96 if (data->shallow_depth)
97 return __sbitmap_queue_get_shallow(bt, data->shallow_depth);
98 else
99 return __sbitmap_queue_get(bt);
4bb659b1
JA
100}
101
4941115b 102unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
320ae51f 103{
4941115b
JA
104 struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
105 struct sbitmap_queue *bt;
88459642 106 struct sbq_wait_state *ws;
5d2ee712 107 DEFINE_SBQ_WAIT(wait);
4941115b 108 unsigned int tag_offset;
320ae51f
JA
109 int tag;
110
4941115b
JA
111 if (data->flags & BLK_MQ_REQ_RESERVED) {
112 if (unlikely(!tags->nr_reserved_tags)) {
113 WARN_ON_ONCE(1);
419c3d5e 114 return BLK_MQ_NO_TAG;
4941115b
JA
115 }
116 bt = &tags->breserved_tags;
117 tag_offset = 0;
118 } else {
119 bt = &tags->bitmap_tags;
120 tag_offset = tags->nr_reserved_tags;
121 }
122
200e86b3 123 tag = __blk_mq_get_tag(data, bt);
76647368 124 if (tag != BLK_MQ_NO_TAG)
4941115b 125 goto found_tag;
4bb659b1 126
6f3b0e8b 127 if (data->flags & BLK_MQ_REQ_NOWAIT)
419c3d5e 128 return BLK_MQ_NO_TAG;
4bb659b1 129
4941115b 130 ws = bt_wait_ptr(bt, data->hctx);
4bb659b1 131 do {
e6fc4649
ML
132 struct sbitmap_queue *bt_prev;
133
b3223207
BVA
134 /*
135 * We're out of tags on this hardware queue, kick any
136 * pending IO submits before going to sleep waiting for
8cecb07d 137 * some to complete.
b3223207 138 */
8cecb07d 139 blk_mq_run_hw_queue(data->hctx, false);
b3223207 140
080ff351
JA
141 /*
142 * Retry tag allocation after running the hardware queue,
143 * as running the queue may also have found completions.
144 */
200e86b3 145 tag = __blk_mq_get_tag(data, bt);
76647368 146 if (tag != BLK_MQ_NO_TAG)
080ff351
JA
147 break;
148
5d2ee712 149 sbitmap_prepare_to_wait(bt, ws, &wait, TASK_UNINTERRUPTIBLE);
4e5dff41
JA
150
151 tag = __blk_mq_get_tag(data, bt);
76647368 152 if (tag != BLK_MQ_NO_TAG)
4e5dff41
JA
153 break;
154
e6fc4649 155 bt_prev = bt;
4bb659b1 156 io_schedule();
cb96a42c 157
5d2ee712
JA
158 sbitmap_finish_wait(bt, ws, &wait);
159
cb96a42c 160 data->ctx = blk_mq_get_ctx(data->q);
f9afca4d 161 data->hctx = blk_mq_map_queue(data->q, data->cmd_flags,
8ccdf4a3 162 data->ctx);
4941115b
JA
163 tags = blk_mq_tags_from_data(data);
164 if (data->flags & BLK_MQ_REQ_RESERVED)
165 bt = &tags->breserved_tags;
166 else
167 bt = &tags->bitmap_tags;
168
e6fc4649
ML
169 /*
170 * If destination hw queue is changed, fake wake up on
171 * previous queue for compensating the wake up miss, so
172 * other allocations on previous queue won't be starved.
173 */
174 if (bt != bt_prev)
175 sbitmap_queue_wake_up(bt_prev);
176
4941115b 177 ws = bt_wait_ptr(bt, data->hctx);
4bb659b1
JA
178 } while (1);
179
5d2ee712 180 sbitmap_finish_wait(bt, ws, &wait);
320ae51f 181
4941115b 182found_tag:
bf0beec0
ML
183 /*
184 * Give up this allocation if the hctx is inactive. The caller will
185 * retry on an active hctx.
186 */
187 if (unlikely(test_bit(BLK_MQ_S_INACTIVE, &data->hctx->state))) {
188 blk_mq_put_tag(tags, data->ctx, tag + tag_offset);
189 return BLK_MQ_NO_TAG;
190 }
4941115b 191 return tag + tag_offset;
320ae51f
JA
192}
193
cae740a0
JG
194void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx,
195 unsigned int tag)
320ae51f 196{
415b806d 197 if (!blk_mq_tag_is_reserved(tags, tag)) {
4bb659b1
JA
198 const int real_tag = tag - tags->nr_reserved_tags;
199
70114c39 200 BUG_ON(real_tag >= tags->nr_tags);
f4a644db 201 sbitmap_queue_clear(&tags->bitmap_tags, real_tag, ctx->cpu);
70114c39
JA
202 } else {
203 BUG_ON(tag >= tags->nr_reserved_tags);
f4a644db 204 sbitmap_queue_clear(&tags->breserved_tags, tag, ctx->cpu);
70114c39 205 }
320ae51f
JA
206}
207
88459642
OS
208struct bt_iter_data {
209 struct blk_mq_hw_ctx *hctx;
210 busy_iter_fn *fn;
211 void *data;
212 bool reserved;
213};
214
215static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
320ae51f 216{
88459642
OS
217 struct bt_iter_data *iter_data = data;
218 struct blk_mq_hw_ctx *hctx = iter_data->hctx;
219 struct blk_mq_tags *tags = hctx->tags;
220 bool reserved = iter_data->reserved;
81481eb4 221 struct request *rq;
4bb659b1 222
88459642
OS
223 if (!reserved)
224 bitnr += tags->nr_reserved_tags;
225 rq = tags->rqs[bitnr];
4bb659b1 226
7f5562d5
JA
227 /*
228 * We can hit rq == NULL here, because the tagging functions
c7b1bf5c 229 * test and set the bit before assigning ->rqs[].
7f5562d5
JA
230 */
231 if (rq && rq->q == hctx->queue)
7baa8572 232 return iter_data->fn(hctx, rq, iter_data->data, reserved);
88459642
OS
233 return true;
234}
4bb659b1 235
c7b1bf5c
BVA
236/**
237 * bt_for_each - iterate over the requests associated with a hardware queue
238 * @hctx: Hardware queue to examine.
239 * @bt: sbitmap to examine. This is either the breserved_tags member
240 * or the bitmap_tags member of struct blk_mq_tags.
241 * @fn: Pointer to the function that will be called for each request
242 * associated with @hctx that has been assigned a driver tag.
243 * @fn will be called as follows: @fn(@hctx, rq, @data, @reserved)
ab11fe5a
JA
244 * where rq is a pointer to a request. Return true to continue
245 * iterating tags, false to stop.
c7b1bf5c
BVA
246 * @data: Will be passed as third argument to @fn.
247 * @reserved: Indicates whether @bt is the breserved_tags member or the
248 * bitmap_tags member of struct blk_mq_tags.
249 */
88459642
OS
250static void bt_for_each(struct blk_mq_hw_ctx *hctx, struct sbitmap_queue *bt,
251 busy_iter_fn *fn, void *data, bool reserved)
252{
253 struct bt_iter_data iter_data = {
254 .hctx = hctx,
255 .fn = fn,
256 .data = data,
257 .reserved = reserved,
258 };
259
260 sbitmap_for_each_set(&bt->sb, bt_iter, &iter_data);
320ae51f
JA
261}
262
88459642
OS
263struct bt_tags_iter_data {
264 struct blk_mq_tags *tags;
265 busy_tag_iter_fn *fn;
266 void *data;
602380d2 267 unsigned int flags;
88459642
OS
268};
269
602380d2
ML
270#define BT_TAG_ITER_RESERVED (1 << 0)
271#define BT_TAG_ITER_STARTED (1 << 1)
272
88459642 273static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
f26cdc85 274{
88459642
OS
275 struct bt_tags_iter_data *iter_data = data;
276 struct blk_mq_tags *tags = iter_data->tags;
602380d2 277 bool reserved = iter_data->flags & BT_TAG_ITER_RESERVED;
f26cdc85 278 struct request *rq;
f26cdc85 279
88459642
OS
280 if (!reserved)
281 bitnr += tags->nr_reserved_tags;
7f5562d5
JA
282
283 /*
284 * We can hit rq == NULL here, because the tagging functions
285 * test and set the bit before assining ->rqs[].
286 */
88459642 287 rq = tags->rqs[bitnr];
602380d2
ML
288 if (!rq)
289 return true;
290 if ((iter_data->flags & BT_TAG_ITER_STARTED) &&
291 !blk_mq_request_started(rq))
292 return true;
293 return iter_data->fn(rq, iter_data->data, reserved);
88459642
OS
294}
295
c7b1bf5c
BVA
296/**
297 * bt_tags_for_each - iterate over the requests in a tag map
298 * @tags: Tag map to iterate over.
299 * @bt: sbitmap to examine. This is either the breserved_tags member
300 * or the bitmap_tags member of struct blk_mq_tags.
301 * @fn: Pointer to the function that will be called for each started
302 * request. @fn will be called as follows: @fn(rq, @data,
ab11fe5a
JA
303 * @reserved) where rq is a pointer to a request. Return true
304 * to continue iterating tags, false to stop.
c7b1bf5c 305 * @data: Will be passed as second argument to @fn.
602380d2 306 * @flags: BT_TAG_ITER_*
c7b1bf5c 307 */
88459642 308static void bt_tags_for_each(struct blk_mq_tags *tags, struct sbitmap_queue *bt,
602380d2 309 busy_tag_iter_fn *fn, void *data, unsigned int flags)
88459642
OS
310{
311 struct bt_tags_iter_data iter_data = {
312 .tags = tags,
313 .fn = fn,
314 .data = data,
602380d2 315 .flags = flags,
88459642
OS
316 };
317
318 if (tags->rqs)
319 sbitmap_for_each_set(&bt->sb, bt_tags_iter, &iter_data);
f26cdc85
KB
320}
321
602380d2
ML
322static void __blk_mq_all_tag_iter(struct blk_mq_tags *tags,
323 busy_tag_iter_fn *fn, void *priv, unsigned int flags)
324{
325 WARN_ON_ONCE(flags & BT_TAG_ITER_RESERVED);
326
327 if (tags->nr_reserved_tags)
328 bt_tags_for_each(tags, &tags->breserved_tags, fn, priv,
329 flags | BT_TAG_ITER_RESERVED);
330 bt_tags_for_each(tags, &tags->bitmap_tags, fn, priv, flags);
331}
332
c7b1bf5c 333/**
602380d2 334 * blk_mq_all_tag_iter - iterate over all requests in a tag map
c7b1bf5c 335 * @tags: Tag map to iterate over.
602380d2 336 * @fn: Pointer to the function that will be called for each
c7b1bf5c
BVA
337 * request. @fn will be called as follows: @fn(rq, @priv,
338 * reserved) where rq is a pointer to a request. 'reserved'
ab11fe5a
JA
339 * indicates whether or not @rq is a reserved request. Return
340 * true to continue iterating tags, false to stop.
c7b1bf5c
BVA
341 * @priv: Will be passed as second argument to @fn.
342 */
602380d2
ML
343void blk_mq_all_tag_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
344 void *priv)
f26cdc85 345{
602380d2 346 return __blk_mq_all_tag_iter(tags, fn, priv, 0);
f26cdc85 347}
f26cdc85 348
c7b1bf5c
BVA
349/**
350 * blk_mq_tagset_busy_iter - iterate over all started requests in a tag set
351 * @tagset: Tag set to iterate over.
352 * @fn: Pointer to the function that will be called for each started
353 * request. @fn will be called as follows: @fn(rq, @priv,
354 * reserved) where rq is a pointer to a request. 'reserved'
ab11fe5a
JA
355 * indicates whether or not @rq is a reserved request. Return
356 * true to continue iterating tags, false to stop.
c7b1bf5c
BVA
357 * @priv: Will be passed as second argument to @fn.
358 */
e0489487
SG
359void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
360 busy_tag_iter_fn *fn, void *priv)
361{
362 int i;
363
364 for (i = 0; i < tagset->nr_hw_queues; i++) {
365 if (tagset->tags && tagset->tags[i])
602380d2
ML
366 __blk_mq_all_tag_iter(tagset->tags[i], fn, priv,
367 BT_TAG_ITER_STARTED);
e0489487
SG
368 }
369}
370EXPORT_SYMBOL(blk_mq_tagset_busy_iter);
371
f9934a80
ML
372static bool blk_mq_tagset_count_completed_rqs(struct request *rq,
373 void *data, bool reserved)
374{
375 unsigned *count = data;
376
377 if (blk_mq_request_completed(rq))
378 (*count)++;
379 return true;
380}
381
382/**
383 * blk_mq_tagset_wait_completed_request - wait until all completed req's
384 * complete funtion is run
385 * @tagset: Tag set to drain completed request
386 *
387 * Note: This function has to be run after all IO queues are shutdown
388 */
389void blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set *tagset)
390{
391 while (true) {
392 unsigned count = 0;
393
394 blk_mq_tagset_busy_iter(tagset,
395 blk_mq_tagset_count_completed_rqs, &count);
396 if (!count)
397 break;
398 msleep(5);
399 }
400}
401EXPORT_SYMBOL(blk_mq_tagset_wait_completed_request);
402
c7b1bf5c
BVA
403/**
404 * blk_mq_queue_tag_busy_iter - iterate over all requests with a driver tag
405 * @q: Request queue to examine.
406 * @fn: Pointer to the function that will be called for each request
407 * on @q. @fn will be called as follows: @fn(hctx, rq, @priv,
408 * reserved) where rq is a pointer to a request and hctx points
409 * to the hardware queue associated with the request. 'reserved'
410 * indicates whether or not @rq is a reserved request.
411 * @priv: Will be passed as third argument to @fn.
412 *
413 * Note: if @q->tag_set is shared with other request queues then @fn will be
414 * called for all requests on all queues that share that tag set and not only
415 * for requests associated with @q.
416 */
0bf6cd5b 417void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
81481eb4 418 void *priv)
320ae51f 419{
0bf6cd5b
CH
420 struct blk_mq_hw_ctx *hctx;
421 int i;
422
f5bbbbe4 423 /*
c7b1bf5c
BVA
424 * __blk_mq_update_nr_hw_queues() updates nr_hw_queues and queue_hw_ctx
425 * while the queue is frozen. So we can use q_usage_counter to avoid
426 * racing with it. __blk_mq_update_nr_hw_queues() uses
427 * synchronize_rcu() to ensure this function left the critical section
428 * below.
f5bbbbe4 429 */
530ca2c9 430 if (!percpu_ref_tryget(&q->q_usage_counter))
f5bbbbe4 431 return;
0bf6cd5b
CH
432
433 queue_for_each_hw_ctx(q, hctx, i) {
434 struct blk_mq_tags *tags = hctx->tags;
435
436 /*
c7b1bf5c 437 * If no software queues are currently mapped to this
0bf6cd5b
CH
438 * hardware queue, there's nothing to check
439 */
440 if (!blk_mq_hw_queue_mapped(hctx))
441 continue;
442
443 if (tags->nr_reserved_tags)
88459642
OS
444 bt_for_each(hctx, &tags->breserved_tags, fn, priv, true);
445 bt_for_each(hctx, &tags->bitmap_tags, fn, priv, false);
4bb659b1 446 }
530ca2c9 447 blk_queue_exit(q);
4bb659b1
JA
448}
449
f4a644db
OS
450static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth,
451 bool round_robin, int node)
4bb659b1 452{
f4a644db
OS
453 return sbitmap_queue_init_node(bt, depth, -1, round_robin, GFP_KERNEL,
454 node);
4bb659b1
JA
455}
456
457static struct blk_mq_tags *blk_mq_init_bitmap_tags(struct blk_mq_tags *tags,
24391c0d 458 int node, int alloc_policy)
4bb659b1
JA
459{
460 unsigned int depth = tags->nr_tags - tags->nr_reserved_tags;
f4a644db 461 bool round_robin = alloc_policy == BLK_TAG_ALLOC_RR;
4bb659b1 462
f4a644db 463 if (bt_alloc(&tags->bitmap_tags, depth, round_robin, node))
88459642 464 goto free_tags;
f4a644db
OS
465 if (bt_alloc(&tags->breserved_tags, tags->nr_reserved_tags, round_robin,
466 node))
88459642 467 goto free_bitmap_tags;
4bb659b1
JA
468
469 return tags;
88459642
OS
470free_bitmap_tags:
471 sbitmap_queue_free(&tags->bitmap_tags);
472free_tags:
4bb659b1
JA
473 kfree(tags);
474 return NULL;
475}
476
320ae51f 477struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
24391c0d
SL
478 unsigned int reserved_tags,
479 int node, int alloc_policy)
320ae51f 480{
320ae51f 481 struct blk_mq_tags *tags;
320ae51f
JA
482
483 if (total_tags > BLK_MQ_TAG_MAX) {
484 pr_err("blk-mq: tag depth too large\n");
485 return NULL;
486 }
487
488 tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node);
489 if (!tags)
490 return NULL;
491
320ae51f
JA
492 tags->nr_tags = total_tags;
493 tags->nr_reserved_tags = reserved_tags;
320ae51f 494
24391c0d 495 return blk_mq_init_bitmap_tags(tags, node, alloc_policy);
320ae51f
JA
496}
497
498void blk_mq_free_tags(struct blk_mq_tags *tags)
499{
88459642
OS
500 sbitmap_queue_free(&tags->bitmap_tags);
501 sbitmap_queue_free(&tags->breserved_tags);
320ae51f
JA
502 kfree(tags);
503}
504
70f36b60
JA
505int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
506 struct blk_mq_tags **tagsptr, unsigned int tdepth,
507 bool can_grow)
e3a2b3f9 508{
70f36b60
JA
509 struct blk_mq_tags *tags = *tagsptr;
510
511 if (tdepth <= tags->nr_reserved_tags)
e3a2b3f9
JA
512 return -EINVAL;
513
514 /*
70f36b60
JA
515 * If we are allowed to grow beyond the original size, allocate
516 * a new set of tags before freeing the old one.
e3a2b3f9 517 */
70f36b60
JA
518 if (tdepth > tags->nr_tags) {
519 struct blk_mq_tag_set *set = hctx->queue->tag_set;
520 struct blk_mq_tags *new;
521 bool ret;
522
523 if (!can_grow)
524 return -EINVAL;
525
526 /*
527 * We need some sort of upper limit, set it high enough that
528 * no valid use cases should require more.
529 */
530 if (tdepth > 16 * BLKDEV_MAX_RQ)
531 return -EINVAL;
532
75d6e175
ML
533 new = blk_mq_alloc_rq_map(set, hctx->queue_num, tdepth,
534 tags->nr_reserved_tags);
70f36b60
JA
535 if (!new)
536 return -ENOMEM;
537 ret = blk_mq_alloc_rqs(set, new, hctx->queue_num, tdepth);
538 if (ret) {
539 blk_mq_free_rq_map(new);
540 return -ENOMEM;
541 }
542
543 blk_mq_free_rqs(set, *tagsptr, hctx->queue_num);
544 blk_mq_free_rq_map(*tagsptr);
545 *tagsptr = new;
546 } else {
547 /*
548 * Don't need (or can't) update reserved tags here, they
549 * remain static and should never need resizing.
550 */
75d6e175
ML
551 sbitmap_queue_resize(&tags->bitmap_tags,
552 tdepth - tags->nr_reserved_tags);
70f36b60 553 }
88459642 554
e3a2b3f9
JA
555 return 0;
556}
557
205fb5f5
BVA
558/**
559 * blk_mq_unique_tag() - return a tag that is unique queue-wide
560 * @rq: request for which to compute a unique tag
561 *
562 * The tag field in struct request is unique per hardware queue but not over
563 * all hardware queues. Hence this function that returns a tag with the
564 * hardware context index in the upper bits and the per hardware queue tag in
565 * the lower bits.
566 *
567 * Note: When called for a request that is queued on a non-multiqueue request
568 * queue, the hardware context index is set to zero.
569 */
570u32 blk_mq_unique_tag(struct request *rq)
571{
ea4f995e 572 return (rq->mq_hctx->queue_num << BLK_MQ_UNIQUE_TAG_BITS) |
205fb5f5
BVA
573 (rq->tag & BLK_MQ_UNIQUE_TAG_MASK);
574}
575EXPORT_SYMBOL(blk_mq_unique_tag);