]> git.ipfire.org Git - thirdparty/kernel/stable.git/blame - block/blk-mq.c
nbd: stop leaking sockets
[thirdparty/kernel/stable.git] / block / blk-mq.c
CommitLineData
75bb4625
JA
1/*
2 * Block multiqueue core code
3 *
4 * Copyright (C) 2013-2014 Jens Axboe
5 * Copyright (C) 2013-2014 Christoph Hellwig
6 */
320ae51f
JA
7#include <linux/kernel.h>
8#include <linux/module.h>
9#include <linux/backing-dev.h>
10#include <linux/bio.h>
11#include <linux/blkdev.h>
f75782e4 12#include <linux/kmemleak.h>
320ae51f
JA
13#include <linux/mm.h>
14#include <linux/init.h>
15#include <linux/slab.h>
16#include <linux/workqueue.h>
17#include <linux/smp.h>
18#include <linux/llist.h>
19#include <linux/list_sort.h>
20#include <linux/cpu.h>
21#include <linux/cache.h>
22#include <linux/sched/sysctl.h>
23#include <linux/delay.h>
aedcd72f 24#include <linux/crash_dump.h>
88c7b2b7 25#include <linux/prefetch.h>
320ae51f
JA
26
27#include <trace/events/block.h>
28
29#include <linux/blk-mq.h>
30#include "blk.h"
31#include "blk-mq.h"
32#include "blk-mq-tag.h"
cf43e6be 33#include "blk-stat.h"
87760e5e 34#include "blk-wbt.h"
bd166ef1 35#include "blk-mq-sched.h"
320ae51f
JA
36
37static DEFINE_MUTEX(all_q_mutex);
38static LIST_HEAD(all_q_list);
39
320ae51f
JA
40/*
41 * Check if any of the ctx's have pending work in this hardware queue
42 */
50e1dab8 43bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
320ae51f 44{
bd166ef1
JA
45 return sbitmap_any_bit_set(&hctx->ctx_map) ||
46 !list_empty_careful(&hctx->dispatch) ||
47 blk_mq_sched_has_work(hctx);
1429d7c9
JA
48}
49
320ae51f
JA
50/*
51 * Mark this ctx as having pending work in this hardware queue
52 */
53static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
54 struct blk_mq_ctx *ctx)
55{
88459642
OS
56 if (!sbitmap_test_bit(&hctx->ctx_map, ctx->index_hw))
57 sbitmap_set_bit(&hctx->ctx_map, ctx->index_hw);
1429d7c9
JA
58}
59
60static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
61 struct blk_mq_ctx *ctx)
62{
88459642 63 sbitmap_clear_bit(&hctx->ctx_map, ctx->index_hw);
320ae51f
JA
64}
65
b4c6a028 66void blk_mq_freeze_queue_start(struct request_queue *q)
43a5e4e2 67{
4ecd4fef 68 int freeze_depth;
cddd5d17 69
4ecd4fef
CH
70 freeze_depth = atomic_inc_return(&q->mq_freeze_depth);
71 if (freeze_depth == 1) {
3ef28e83 72 percpu_ref_kill(&q->q_usage_counter);
b94ec296 73 blk_mq_run_hw_queues(q, false);
cddd5d17 74 }
f3af020b 75}
b4c6a028 76EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_start);
f3af020b
TH
77
78static void blk_mq_freeze_queue_wait(struct request_queue *q)
79{
3ef28e83 80 wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
43a5e4e2
ML
81}
82
f3af020b
TH
83/*
84 * Guarantee no request is in use, so we can change any data structure of
85 * the queue afterward.
86 */
3ef28e83 87void blk_freeze_queue(struct request_queue *q)
f3af020b 88{
3ef28e83
DW
89 /*
90 * In the !blk_mq case we are only calling this to kill the
91 * q_usage_counter, otherwise this increases the freeze depth
92 * and waits for it to return to zero. For this reason there is
93 * no blk_unfreeze_queue(), and blk_freeze_queue() is not
94 * exported to drivers as the only user for unfreeze is blk_mq.
95 */
f3af020b
TH
96 blk_mq_freeze_queue_start(q);
97 blk_mq_freeze_queue_wait(q);
98}
3ef28e83
DW
99
100void blk_mq_freeze_queue(struct request_queue *q)
101{
102 /*
103 * ...just an alias to keep freeze and unfreeze actions balanced
104 * in the blk_mq_* namespace
105 */
106 blk_freeze_queue(q);
107}
c761d96b 108EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
f3af020b 109
b4c6a028 110void blk_mq_unfreeze_queue(struct request_queue *q)
320ae51f 111{
4ecd4fef 112 int freeze_depth;
320ae51f 113
4ecd4fef
CH
114 freeze_depth = atomic_dec_return(&q->mq_freeze_depth);
115 WARN_ON_ONCE(freeze_depth < 0);
116 if (!freeze_depth) {
3ef28e83 117 percpu_ref_reinit(&q->q_usage_counter);
320ae51f 118 wake_up_all(&q->mq_freeze_wq);
add703fd 119 }
320ae51f 120}
b4c6a028 121EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
320ae51f 122
6a83e74d
BVA
123/**
124 * blk_mq_quiesce_queue() - wait until all ongoing queue_rq calls have finished
125 * @q: request queue.
126 *
127 * Note: this function does not prevent that the struct request end_io()
128 * callback function is invoked. Additionally, it is not prevented that
129 * new queue_rq() calls occur unless the queue has been stopped first.
130 */
131void blk_mq_quiesce_queue(struct request_queue *q)
132{
133 struct blk_mq_hw_ctx *hctx;
134 unsigned int i;
135 bool rcu = false;
136
137 blk_mq_stop_hw_queues(q);
138
139 queue_for_each_hw_ctx(q, hctx, i) {
140 if (hctx->flags & BLK_MQ_F_BLOCKING)
141 synchronize_srcu(&hctx->queue_rq_srcu);
142 else
143 rcu = true;
144 }
145 if (rcu)
146 synchronize_rcu();
147}
148EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue);
149
aed3ea94
JA
150void blk_mq_wake_waiters(struct request_queue *q)
151{
152 struct blk_mq_hw_ctx *hctx;
153 unsigned int i;
154
155 queue_for_each_hw_ctx(q, hctx, i)
156 if (blk_mq_hw_queue_mapped(hctx))
157 blk_mq_tag_wakeup_all(hctx->tags, true);
3fd5940c
KB
158
159 /*
160 * If we are called because the queue has now been marked as
161 * dying, we need to ensure that processes currently waiting on
162 * the queue are notified as well.
163 */
164 wake_up_all(&q->mq_freeze_wq);
aed3ea94
JA
165}
166
320ae51f
JA
167bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
168{
169 return blk_mq_has_free_tags(hctx->tags);
170}
171EXPORT_SYMBOL(blk_mq_can_queue);
172
2c3ad667
JA
173void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
174 struct request *rq, unsigned int op)
320ae51f 175{
af76e555
CH
176 INIT_LIST_HEAD(&rq->queuelist);
177 /* csd/requeue_work/fifo_time is initialized before use */
178 rq->q = q;
320ae51f 179 rq->mq_ctx = ctx;
ef295ecf 180 rq->cmd_flags = op;
e8064021
CH
181 if (blk_queue_io_stat(q))
182 rq->rq_flags |= RQF_IO_STAT;
af76e555
CH
183 /* do not touch atomic flags, it needs atomic ops against the timer */
184 rq->cpu = -1;
af76e555
CH
185 INIT_HLIST_NODE(&rq->hash);
186 RB_CLEAR_NODE(&rq->rb_node);
af76e555
CH
187 rq->rq_disk = NULL;
188 rq->part = NULL;
3ee32372 189 rq->start_time = jiffies;
af76e555
CH
190#ifdef CONFIG_BLK_CGROUP
191 rq->rl = NULL;
0fec08b4 192 set_start_time_ns(rq);
af76e555
CH
193 rq->io_start_time_ns = 0;
194#endif
195 rq->nr_phys_segments = 0;
196#if defined(CONFIG_BLK_DEV_INTEGRITY)
197 rq->nr_integrity_segments = 0;
198#endif
af76e555
CH
199 rq->special = NULL;
200 /* tag was already set */
201 rq->errors = 0;
af76e555 202 rq->extra_len = 0;
af76e555 203
af76e555 204 INIT_LIST_HEAD(&rq->timeout_list);
f6be4fb4
JA
205 rq->timeout = 0;
206
af76e555
CH
207 rq->end_io = NULL;
208 rq->end_io_data = NULL;
209 rq->next_rq = NULL;
210
ef295ecf 211 ctx->rq_dispatched[op_is_sync(op)]++;
320ae51f 212}
2c3ad667 213EXPORT_SYMBOL_GPL(blk_mq_rq_ctx_init);
320ae51f 214
2c3ad667
JA
215struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data,
216 unsigned int op)
5dee8577
CH
217{
218 struct request *rq;
219 unsigned int tag;
220
cb96a42c 221 tag = blk_mq_get_tag(data);
5dee8577 222 if (tag != BLK_MQ_TAG_FAIL) {
bd166ef1
JA
223 struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
224
225 rq = tags->static_rqs[tag];
5dee8577 226
bd166ef1
JA
227 if (data->flags & BLK_MQ_REQ_INTERNAL) {
228 rq->tag = -1;
229 rq->internal_tag = tag;
230 } else {
200e86b3
JA
231 if (blk_mq_tag_busy(data->hctx)) {
232 rq->rq_flags = RQF_MQ_INFLIGHT;
233 atomic_inc(&data->hctx->nr_active);
234 }
bd166ef1
JA
235 rq->tag = tag;
236 rq->internal_tag = -1;
562bef42 237 data->hctx->tags->rqs[rq->tag] = rq;
bd166ef1
JA
238 }
239
ef295ecf 240 blk_mq_rq_ctx_init(data->q, data->ctx, rq, op);
5dee8577
CH
241 return rq;
242 }
243
244 return NULL;
245}
2c3ad667 246EXPORT_SYMBOL_GPL(__blk_mq_alloc_request);
5dee8577 247
6f3b0e8b
CH
248struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
249 unsigned int flags)
320ae51f 250{
5a797e00 251 struct blk_mq_alloc_data alloc_data = { .flags = flags };
bd166ef1 252 struct request *rq;
a492f075 253 int ret;
320ae51f 254
6f3b0e8b 255 ret = blk_queue_enter(q, flags & BLK_MQ_REQ_NOWAIT);
a492f075
JL
256 if (ret)
257 return ERR_PTR(ret);
320ae51f 258
bd166ef1 259 rq = blk_mq_sched_get_request(q, NULL, rw, &alloc_data);
841bac2c 260
bd166ef1
JA
261 blk_mq_put_ctx(alloc_data.ctx);
262 blk_queue_exit(q);
263
264 if (!rq)
a492f075 265 return ERR_PTR(-EWOULDBLOCK);
0c4de0f3
CH
266
267 rq->__data_len = 0;
268 rq->__sector = (sector_t) -1;
269 rq->bio = rq->biotail = NULL;
320ae51f
JA
270 return rq;
271}
4bb659b1 272EXPORT_SYMBOL(blk_mq_alloc_request);
320ae51f 273
1f5bd336
ML
274struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int rw,
275 unsigned int flags, unsigned int hctx_idx)
276{
6d2809d5 277 struct blk_mq_alloc_data alloc_data = { .flags = flags };
1f5bd336 278 struct request *rq;
6d2809d5 279 unsigned int cpu;
1f5bd336
ML
280 int ret;
281
282 /*
283 * If the tag allocator sleeps we could get an allocation for a
284 * different hardware context. No need to complicate the low level
285 * allocator for this for the rare use case of a command tied to
286 * a specific queue.
287 */
288 if (WARN_ON_ONCE(!(flags & BLK_MQ_REQ_NOWAIT)))
289 return ERR_PTR(-EINVAL);
290
291 if (hctx_idx >= q->nr_hw_queues)
292 return ERR_PTR(-EIO);
293
294 ret = blk_queue_enter(q, true);
295 if (ret)
296 return ERR_PTR(ret);
297
c8712c6a
CH
298 /*
299 * Check if the hardware context is actually mapped to anything.
300 * If not tell the caller that it should skip this queue.
301 */
6d2809d5
OS
302 alloc_data.hctx = q->queue_hw_ctx[hctx_idx];
303 if (!blk_mq_hw_queue_mapped(alloc_data.hctx)) {
304 blk_queue_exit(q);
305 return ERR_PTR(-EXDEV);
1f5bd336 306 }
6d2809d5
OS
307 cpu = cpumask_first(alloc_data.hctx->cpumask);
308 alloc_data.ctx = __blk_mq_get_ctx(q, cpu);
1f5bd336 309
6d2809d5 310 rq = blk_mq_sched_get_request(q, NULL, rw, &alloc_data);
c8712c6a 311
6d2809d5 312 blk_mq_put_ctx(alloc_data.ctx);
c8712c6a 313 blk_queue_exit(q);
6d2809d5
OS
314
315 if (!rq)
316 return ERR_PTR(-EWOULDBLOCK);
317
318 return rq;
1f5bd336
ML
319}
320EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
321
bd166ef1
JA
322void __blk_mq_finish_request(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
323 struct request *rq)
320ae51f 324{
bd166ef1 325 const int sched_tag = rq->internal_tag;
320ae51f
JA
326 struct request_queue *q = rq->q;
327
e8064021 328 if (rq->rq_flags & RQF_MQ_INFLIGHT)
0d2602ca 329 atomic_dec(&hctx->nr_active);
87760e5e
JA
330
331 wbt_done(q->rq_wb, &rq->issue_stat);
e8064021 332 rq->rq_flags = 0;
0d2602ca 333
af76e555 334 clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
06426adf 335 clear_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags);
bd166ef1
JA
336 if (rq->tag != -1)
337 blk_mq_put_tag(hctx, hctx->tags, ctx, rq->tag);
338 if (sched_tag != -1)
339 blk_mq_sched_completed_request(hctx, rq);
50e1dab8 340 blk_mq_sched_restart_queues(hctx);
3ef28e83 341 blk_queue_exit(q);
320ae51f
JA
342}
343
bd166ef1 344static void blk_mq_finish_hctx_request(struct blk_mq_hw_ctx *hctx,
16a3c2a7 345 struct request *rq)
320ae51f
JA
346{
347 struct blk_mq_ctx *ctx = rq->mq_ctx;
320ae51f
JA
348
349 ctx->rq_completed[rq_is_sync(rq)]++;
bd166ef1
JA
350 __blk_mq_finish_request(hctx, ctx, rq);
351}
352
353void blk_mq_finish_request(struct request *rq)
354{
355 blk_mq_finish_hctx_request(blk_mq_map_queue(rq->q, rq->mq_ctx->cpu), rq);
7c7f2f2b 356}
7c7f2f2b
JA
357
358void blk_mq_free_request(struct request *rq)
359{
bd166ef1 360 blk_mq_sched_put_request(rq);
320ae51f 361}
1a3b595a 362EXPORT_SYMBOL_GPL(blk_mq_free_request);
320ae51f 363
c8a446ad 364inline void __blk_mq_end_request(struct request *rq, int error)
320ae51f 365{
0d11e6ac
ML
366 blk_account_io_done(rq);
367
91b63639 368 if (rq->end_io) {
87760e5e 369 wbt_done(rq->q->rq_wb, &rq->issue_stat);
320ae51f 370 rq->end_io(rq, error);
91b63639
CH
371 } else {
372 if (unlikely(blk_bidi_rq(rq)))
373 blk_mq_free_request(rq->next_rq);
320ae51f 374 blk_mq_free_request(rq);
91b63639 375 }
320ae51f 376}
c8a446ad 377EXPORT_SYMBOL(__blk_mq_end_request);
63151a44 378
c8a446ad 379void blk_mq_end_request(struct request *rq, int error)
63151a44
CH
380{
381 if (blk_update_request(rq, error, blk_rq_bytes(rq)))
382 BUG();
c8a446ad 383 __blk_mq_end_request(rq, error);
63151a44 384}
c8a446ad 385EXPORT_SYMBOL(blk_mq_end_request);
320ae51f 386
30a91cb4 387static void __blk_mq_complete_request_remote(void *data)
320ae51f 388{
3d6efbf6 389 struct request *rq = data;
320ae51f 390
30a91cb4 391 rq->q->softirq_done_fn(rq);
320ae51f 392}
320ae51f 393
ed851860 394static void blk_mq_ipi_complete_request(struct request *rq)
320ae51f
JA
395{
396 struct blk_mq_ctx *ctx = rq->mq_ctx;
38535201 397 bool shared = false;
320ae51f
JA
398 int cpu;
399
38535201 400 if (!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) {
30a91cb4
CH
401 rq->q->softirq_done_fn(rq);
402 return;
403 }
320ae51f
JA
404
405 cpu = get_cpu();
38535201
CH
406 if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags))
407 shared = cpus_share_cache(cpu, ctx->cpu);
408
409 if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
30a91cb4 410 rq->csd.func = __blk_mq_complete_request_remote;
3d6efbf6
CH
411 rq->csd.info = rq;
412 rq->csd.flags = 0;
c46fff2a 413 smp_call_function_single_async(ctx->cpu, &rq->csd);
3d6efbf6 414 } else {
30a91cb4 415 rq->q->softirq_done_fn(rq);
3d6efbf6 416 }
320ae51f
JA
417 put_cpu();
418}
30a91cb4 419
cf43e6be
JA
420static void blk_mq_stat_add(struct request *rq)
421{
422 if (rq->rq_flags & RQF_STATS) {
423 /*
424 * We could rq->mq_ctx here, but there's less of a risk
425 * of races if we have the completion event add the stats
426 * to the local software queue.
427 */
428 struct blk_mq_ctx *ctx;
429
430 ctx = __blk_mq_get_ctx(rq->q, raw_smp_processor_id());
431 blk_stat_add(&ctx->stat[rq_data_dir(rq)], rq);
432 }
433}
434
1fa8cc52 435static void __blk_mq_complete_request(struct request *rq)
ed851860
JA
436{
437 struct request_queue *q = rq->q;
438
cf43e6be
JA
439 blk_mq_stat_add(rq);
440
ed851860 441 if (!q->softirq_done_fn)
c8a446ad 442 blk_mq_end_request(rq, rq->errors);
ed851860
JA
443 else
444 blk_mq_ipi_complete_request(rq);
445}
446
30a91cb4
CH
447/**
448 * blk_mq_complete_request - end I/O on a request
449 * @rq: the request being processed
450 *
451 * Description:
452 * Ends all I/O on a request. It does not handle partial completions.
453 * The actual completion happens out-of-order, through a IPI handler.
454 **/
f4829a9b 455void blk_mq_complete_request(struct request *rq, int error)
30a91cb4 456{
95f09684
JA
457 struct request_queue *q = rq->q;
458
459 if (unlikely(blk_should_fake_timeout(q)))
30a91cb4 460 return;
f4829a9b
CH
461 if (!blk_mark_rq_complete(rq)) {
462 rq->errors = error;
ed851860 463 __blk_mq_complete_request(rq);
f4829a9b 464 }
30a91cb4
CH
465}
466EXPORT_SYMBOL(blk_mq_complete_request);
320ae51f 467
973c0191
KB
468int blk_mq_request_started(struct request *rq)
469{
470 return test_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
471}
472EXPORT_SYMBOL_GPL(blk_mq_request_started);
473
e2490073 474void blk_mq_start_request(struct request *rq)
320ae51f
JA
475{
476 struct request_queue *q = rq->q;
477
bd166ef1
JA
478 blk_mq_sched_started_request(rq);
479
320ae51f
JA
480 trace_block_rq_issue(q, rq);
481
cf43e6be
JA
482 if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
483 blk_stat_set_issue_time(&rq->issue_stat);
484 rq->rq_flags |= RQF_STATS;
87760e5e 485 wbt_issue(q->rq_wb, &rq->issue_stat);
cf43e6be
JA
486 }
487
2b8393b4 488 blk_add_timer(rq);
87ee7b11 489
538b7534
JA
490 /*
491 * Ensure that ->deadline is visible before set the started
492 * flag and clear the completed flag.
493 */
494 smp_mb__before_atomic();
495
87ee7b11
JA
496 /*
497 * Mark us as started and clear complete. Complete might have been
498 * set if requeue raced with timeout, which then marked it as
499 * complete. So be sure to clear complete again when we start
500 * the request, otherwise we'll ignore the completion event.
501 */
4b570521
JA
502 if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
503 set_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
504 if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags))
505 clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
49f5baa5
CH
506
507 if (q->dma_drain_size && blk_rq_bytes(rq)) {
508 /*
509 * Make sure space for the drain appears. We know we can do
510 * this because max_hw_segments has been adjusted to be one
511 * fewer than the device can handle.
512 */
513 rq->nr_phys_segments++;
514 }
320ae51f 515}
e2490073 516EXPORT_SYMBOL(blk_mq_start_request);
320ae51f 517
ed0791b2 518static void __blk_mq_requeue_request(struct request *rq)
320ae51f
JA
519{
520 struct request_queue *q = rq->q;
521
522 trace_block_rq_requeue(q, rq);
87760e5e 523 wbt_requeue(q->rq_wb, &rq->issue_stat);
bd166ef1 524 blk_mq_sched_requeue_request(rq);
49f5baa5 525
e2490073
CH
526 if (test_and_clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
527 if (q->dma_drain_size && blk_rq_bytes(rq))
528 rq->nr_phys_segments--;
529 }
320ae51f
JA
530}
531
2b053aca 532void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list)
ed0791b2 533{
ed0791b2 534 __blk_mq_requeue_request(rq);
ed0791b2 535
ed0791b2 536 BUG_ON(blk_queued_rq(rq));
2b053aca 537 blk_mq_add_to_requeue_list(rq, true, kick_requeue_list);
ed0791b2
CH
538}
539EXPORT_SYMBOL(blk_mq_requeue_request);
540
6fca6a61
CH
541static void blk_mq_requeue_work(struct work_struct *work)
542{
543 struct request_queue *q =
2849450a 544 container_of(work, struct request_queue, requeue_work.work);
6fca6a61
CH
545 LIST_HEAD(rq_list);
546 struct request *rq, *next;
547 unsigned long flags;
548
549 spin_lock_irqsave(&q->requeue_lock, flags);
550 list_splice_init(&q->requeue_list, &rq_list);
551 spin_unlock_irqrestore(&q->requeue_lock, flags);
552
553 list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
e8064021 554 if (!(rq->rq_flags & RQF_SOFTBARRIER))
6fca6a61
CH
555 continue;
556
e8064021 557 rq->rq_flags &= ~RQF_SOFTBARRIER;
6fca6a61 558 list_del_init(&rq->queuelist);
bd6737f1 559 blk_mq_sched_insert_request(rq, true, false, false, true);
6fca6a61
CH
560 }
561
562 while (!list_empty(&rq_list)) {
563 rq = list_entry(rq_list.next, struct request, queuelist);
564 list_del_init(&rq->queuelist);
bd6737f1 565 blk_mq_sched_insert_request(rq, false, false, false, true);
6fca6a61
CH
566 }
567
52d7f1b5 568 blk_mq_run_hw_queues(q, false);
6fca6a61
CH
569}
570
2b053aca
BVA
571void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
572 bool kick_requeue_list)
6fca6a61
CH
573{
574 struct request_queue *q = rq->q;
575 unsigned long flags;
576
577 /*
578 * We abuse this flag that is otherwise used by the I/O scheduler to
579 * request head insertation from the workqueue.
580 */
e8064021 581 BUG_ON(rq->rq_flags & RQF_SOFTBARRIER);
6fca6a61
CH
582
583 spin_lock_irqsave(&q->requeue_lock, flags);
584 if (at_head) {
e8064021 585 rq->rq_flags |= RQF_SOFTBARRIER;
6fca6a61
CH
586 list_add(&rq->queuelist, &q->requeue_list);
587 } else {
588 list_add_tail(&rq->queuelist, &q->requeue_list);
589 }
590 spin_unlock_irqrestore(&q->requeue_lock, flags);
2b053aca
BVA
591
592 if (kick_requeue_list)
593 blk_mq_kick_requeue_list(q);
6fca6a61
CH
594}
595EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
596
597void blk_mq_kick_requeue_list(struct request_queue *q)
598{
2849450a 599 kblockd_schedule_delayed_work(&q->requeue_work, 0);
6fca6a61
CH
600}
601EXPORT_SYMBOL(blk_mq_kick_requeue_list);
602
2849450a
MS
603void blk_mq_delay_kick_requeue_list(struct request_queue *q,
604 unsigned long msecs)
605{
606 kblockd_schedule_delayed_work(&q->requeue_work,
607 msecs_to_jiffies(msecs));
608}
609EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);
610
1885b24d
JA
611void blk_mq_abort_requeue_list(struct request_queue *q)
612{
613 unsigned long flags;
614 LIST_HEAD(rq_list);
615
616 spin_lock_irqsave(&q->requeue_lock, flags);
617 list_splice_init(&q->requeue_list, &rq_list);
618 spin_unlock_irqrestore(&q->requeue_lock, flags);
619
620 while (!list_empty(&rq_list)) {
621 struct request *rq;
622
623 rq = list_first_entry(&rq_list, struct request, queuelist);
624 list_del_init(&rq->queuelist);
625 rq->errors = -EIO;
626 blk_mq_end_request(rq, rq->errors);
627 }
628}
629EXPORT_SYMBOL(blk_mq_abort_requeue_list);
630
0e62f51f
JA
631struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
632{
88c7b2b7
JA
633 if (tag < tags->nr_tags) {
634 prefetch(tags->rqs[tag]);
4ee86bab 635 return tags->rqs[tag];
88c7b2b7 636 }
4ee86bab
HR
637
638 return NULL;
24d2f903
CH
639}
640EXPORT_SYMBOL(blk_mq_tag_to_rq);
641
320ae51f 642struct blk_mq_timeout_data {
46f92d42
CH
643 unsigned long next;
644 unsigned int next_set;
320ae51f
JA
645};
646
90415837 647void blk_mq_rq_timed_out(struct request *req, bool reserved)
320ae51f 648{
f8a5b122 649 const struct blk_mq_ops *ops = req->q->mq_ops;
46f92d42 650 enum blk_eh_timer_return ret = BLK_EH_RESET_TIMER;
87ee7b11
JA
651
652 /*
653 * We know that complete is set at this point. If STARTED isn't set
654 * anymore, then the request isn't active and the "timeout" should
655 * just be ignored. This can happen due to the bitflag ordering.
656 * Timeout first checks if STARTED is set, and if it is, assumes
657 * the request is active. But if we race with completion, then
658 * we both flags will get cleared. So check here again, and ignore
659 * a timeout event with a request that isn't active.
660 */
46f92d42
CH
661 if (!test_bit(REQ_ATOM_STARTED, &req->atomic_flags))
662 return;
87ee7b11 663
46f92d42 664 if (ops->timeout)
0152fb6b 665 ret = ops->timeout(req, reserved);
46f92d42
CH
666
667 switch (ret) {
668 case BLK_EH_HANDLED:
669 __blk_mq_complete_request(req);
670 break;
671 case BLK_EH_RESET_TIMER:
672 blk_add_timer(req);
673 blk_clear_rq_complete(req);
674 break;
675 case BLK_EH_NOT_HANDLED:
676 break;
677 default:
678 printk(KERN_ERR "block: bad eh return: %d\n", ret);
679 break;
680 }
87ee7b11 681}
5b3f25fc 682
81481eb4
CH
683static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
684 struct request *rq, void *priv, bool reserved)
685{
686 struct blk_mq_timeout_data *data = priv;
87ee7b11 687
eb130dbf
KB
688 if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
689 /*
690 * If a request wasn't started before the queue was
691 * marked dying, kill it here or it'll go unnoticed.
692 */
a59e0f57
KB
693 if (unlikely(blk_queue_dying(rq->q))) {
694 rq->errors = -EIO;
695 blk_mq_end_request(rq, rq->errors);
696 }
46f92d42 697 return;
eb130dbf 698 }
87ee7b11 699
46f92d42
CH
700 if (time_after_eq(jiffies, rq->deadline)) {
701 if (!blk_mark_rq_complete(rq))
0152fb6b 702 blk_mq_rq_timed_out(rq, reserved);
46f92d42
CH
703 } else if (!data->next_set || time_after(data->next, rq->deadline)) {
704 data->next = rq->deadline;
705 data->next_set = 1;
706 }
87ee7b11
JA
707}
708
287922eb 709static void blk_mq_timeout_work(struct work_struct *work)
320ae51f 710{
287922eb
CH
711 struct request_queue *q =
712 container_of(work, struct request_queue, timeout_work);
81481eb4
CH
713 struct blk_mq_timeout_data data = {
714 .next = 0,
715 .next_set = 0,
716 };
81481eb4 717 int i;
320ae51f 718
71f79fb3
GKB
719 /* A deadlock might occur if a request is stuck requiring a
720 * timeout at the same time a queue freeze is waiting
721 * completion, since the timeout code would not be able to
722 * acquire the queue reference here.
723 *
724 * That's why we don't use blk_queue_enter here; instead, we use
725 * percpu_ref_tryget directly, because we need to be able to
726 * obtain a reference even in the short window between the queue
727 * starting to freeze, by dropping the first reference in
728 * blk_mq_freeze_queue_start, and the moment the last request is
729 * consumed, marked by the instant q_usage_counter reaches
730 * zero.
731 */
732 if (!percpu_ref_tryget(&q->q_usage_counter))
287922eb
CH
733 return;
734
0bf6cd5b 735 blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &data);
320ae51f 736
81481eb4
CH
737 if (data.next_set) {
738 data.next = blk_rq_timeout(round_jiffies_up(data.next));
739 mod_timer(&q->timeout, data.next);
0d2602ca 740 } else {
0bf6cd5b
CH
741 struct blk_mq_hw_ctx *hctx;
742
f054b56c
ML
743 queue_for_each_hw_ctx(q, hctx, i) {
744 /* the hctx may be unmapped, so check it here */
745 if (blk_mq_hw_queue_mapped(hctx))
746 blk_mq_tag_idle(hctx);
747 }
0d2602ca 748 }
287922eb 749 blk_queue_exit(q);
320ae51f
JA
750}
751
752/*
753 * Reverse check our software queue for entries that we could potentially
754 * merge with. Currently includes a hand-wavy stop count of 8, to not spend
755 * too much time checking for merges.
756 */
757static bool blk_mq_attempt_merge(struct request_queue *q,
758 struct blk_mq_ctx *ctx, struct bio *bio)
759{
760 struct request *rq;
761 int checked = 8;
762
763 list_for_each_entry_reverse(rq, &ctx->rq_list, queuelist) {
34fe7c05 764 bool merged = false;
320ae51f
JA
765
766 if (!checked--)
767 break;
768
769 if (!blk_rq_merge_ok(rq, bio))
770 continue;
771
34fe7c05
CH
772 switch (blk_try_merge(rq, bio)) {
773 case ELEVATOR_BACK_MERGE:
774 if (blk_mq_sched_allow_merge(q, rq, bio))
775 merged = bio_attempt_back_merge(q, rq, bio);
bd166ef1 776 break;
34fe7c05
CH
777 case ELEVATOR_FRONT_MERGE:
778 if (blk_mq_sched_allow_merge(q, rq, bio))
779 merged = bio_attempt_front_merge(q, rq, bio);
320ae51f 780 break;
1e739730
CH
781 case ELEVATOR_DISCARD_MERGE:
782 merged = bio_attempt_discard_merge(q, rq, bio);
320ae51f 783 break;
34fe7c05
CH
784 default:
785 continue;
320ae51f 786 }
34fe7c05
CH
787
788 if (merged)
789 ctx->rq_merged++;
790 return merged;
320ae51f
JA
791 }
792
793 return false;
794}
795
88459642
OS
796struct flush_busy_ctx_data {
797 struct blk_mq_hw_ctx *hctx;
798 struct list_head *list;
799};
800
801static bool flush_busy_ctx(struct sbitmap *sb, unsigned int bitnr, void *data)
802{
803 struct flush_busy_ctx_data *flush_data = data;
804 struct blk_mq_hw_ctx *hctx = flush_data->hctx;
805 struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
806
807 sbitmap_clear_bit(sb, bitnr);
808 spin_lock(&ctx->lock);
809 list_splice_tail_init(&ctx->rq_list, flush_data->list);
810 spin_unlock(&ctx->lock);
811 return true;
812}
813
1429d7c9
JA
814/*
815 * Process software queues that have been marked busy, splicing them
816 * to the for-dispatch
817 */
2c3ad667 818void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
1429d7c9 819{
88459642
OS
820 struct flush_busy_ctx_data data = {
821 .hctx = hctx,
822 .list = list,
823 };
1429d7c9 824
88459642 825 sbitmap_for_each_set(&hctx->ctx_map, flush_busy_ctx, &data);
1429d7c9 826}
2c3ad667 827EXPORT_SYMBOL_GPL(blk_mq_flush_busy_ctxs);
1429d7c9 828
703fd1c0
JA
829static inline unsigned int queued_to_index(unsigned int queued)
830{
831 if (!queued)
832 return 0;
1429d7c9 833
703fd1c0 834 return min(BLK_MQ_MAX_DISPATCH_ORDER - 1, ilog2(queued) + 1);
1429d7c9
JA
835}
836
bd6737f1
JA
837bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx,
838 bool wait)
bd166ef1
JA
839{
840 struct blk_mq_alloc_data data = {
841 .q = rq->q,
bd166ef1
JA
842 .hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu),
843 .flags = wait ? 0 : BLK_MQ_REQ_NOWAIT,
844 };
845
bd166ef1
JA
846 if (rq->tag != -1) {
847done:
848 if (hctx)
849 *hctx = data.hctx;
850 return true;
851 }
852
415b806d
SG
853 if (blk_mq_tag_is_reserved(data.hctx->sched_tags, rq->internal_tag))
854 data.flags |= BLK_MQ_REQ_RESERVED;
855
bd166ef1
JA
856 rq->tag = blk_mq_get_tag(&data);
857 if (rq->tag >= 0) {
200e86b3
JA
858 if (blk_mq_tag_busy(data.hctx)) {
859 rq->rq_flags |= RQF_MQ_INFLIGHT;
860 atomic_inc(&data.hctx->nr_active);
861 }
bd166ef1
JA
862 data.hctx->tags->rqs[rq->tag] = rq;
863 goto done;
864 }
865
866 return false;
867}
868
99cf1dc5
JA
869static void blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
870 struct request *rq)
871{
872 if (rq->tag == -1 || rq->internal_tag == -1)
873 return;
874
875 blk_mq_put_tag(hctx, hctx->tags, rq->mq_ctx, rq->tag);
876 rq->tag = -1;
877
878 if (rq->rq_flags & RQF_MQ_INFLIGHT) {
879 rq->rq_flags &= ~RQF_MQ_INFLIGHT;
880 atomic_dec(&hctx->nr_active);
881 }
882}
883
bd166ef1
JA
884/*
885 * If we fail getting a driver tag because all the driver tags are already
886 * assigned and on the dispatch list, BUT the first entry does not have a
887 * tag, then we could deadlock. For that case, move entries with assigned
888 * driver tags to the front, leaving the set of tagged requests in the
889 * same order, and the untagged set in the same order.
890 */
891static bool reorder_tags_to_front(struct list_head *list)
892{
893 struct request *rq, *tmp, *first = NULL;
894
895 list_for_each_entry_safe_reverse(rq, tmp, list, queuelist) {
896 if (rq == first)
897 break;
898 if (rq->tag != -1) {
899 list_move(&rq->queuelist, list);
900 if (!first)
901 first = rq;
902 }
903 }
904
905 return first != NULL;
906}
907
da55f2cc
OS
908static int blk_mq_dispatch_wake(wait_queue_t *wait, unsigned mode, int flags,
909 void *key)
910{
911 struct blk_mq_hw_ctx *hctx;
912
913 hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait);
914
915 list_del(&wait->task_list);
916 clear_bit_unlock(BLK_MQ_S_TAG_WAITING, &hctx->state);
917 blk_mq_run_hw_queue(hctx, true);
918 return 1;
919}
920
921static bool blk_mq_dispatch_wait_add(struct blk_mq_hw_ctx *hctx)
922{
923 struct sbq_wait_state *ws;
924
925 /*
926 * The TAG_WAITING bit serves as a lock protecting hctx->dispatch_wait.
927 * The thread which wins the race to grab this bit adds the hardware
928 * queue to the wait queue.
929 */
930 if (test_bit(BLK_MQ_S_TAG_WAITING, &hctx->state) ||
931 test_and_set_bit_lock(BLK_MQ_S_TAG_WAITING, &hctx->state))
932 return false;
933
934 init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake);
935 ws = bt_wait_ptr(&hctx->tags->bitmap_tags, hctx);
936
937 /*
938 * As soon as this returns, it's no longer safe to fiddle with
939 * hctx->dispatch_wait, since a completion can wake up the wait queue
940 * and unlock the bit.
941 */
942 add_wait_queue(&ws->wait, &hctx->dispatch_wait);
943 return true;
944}
945
f04c3df3 946bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list)
320ae51f
JA
947{
948 struct request_queue *q = hctx->queue;
320ae51f 949 struct request *rq;
74c45052
JA
950 LIST_HEAD(driver_list);
951 struct list_head *dptr;
f04c3df3 952 int queued, ret = BLK_MQ_RQ_QUEUE_OK;
320ae51f 953
74c45052
JA
954 /*
955 * Start off with dptr being NULL, so we start the first request
956 * immediately, even if we have more pending.
957 */
958 dptr = NULL;
959
320ae51f
JA
960 /*
961 * Now process all the entries, sending them to the driver.
962 */
1429d7c9 963 queued = 0;
f04c3df3 964 while (!list_empty(list)) {
74c45052 965 struct blk_mq_queue_data bd;
320ae51f 966
f04c3df3 967 rq = list_first_entry(list, struct request, queuelist);
bd166ef1
JA
968 if (!blk_mq_get_driver_tag(rq, &hctx, false)) {
969 if (!queued && reorder_tags_to_front(list))
970 continue;
3c782d67
JA
971
972 /*
da55f2cc
OS
973 * The initial allocation attempt failed, so we need to
974 * rerun the hardware queue when a tag is freed.
3c782d67 975 */
da55f2cc
OS
976 if (blk_mq_dispatch_wait_add(hctx)) {
977 /*
978 * It's possible that a tag was freed in the
979 * window between the allocation failure and
980 * adding the hardware queue to the wait queue.
981 */
982 if (!blk_mq_get_driver_tag(rq, &hctx, false))
983 break;
984 } else {
3c782d67 985 break;
da55f2cc 986 }
bd166ef1 987 }
da55f2cc 988
320ae51f 989 list_del_init(&rq->queuelist);
320ae51f 990
74c45052
JA
991 bd.rq = rq;
992 bd.list = dptr;
f04c3df3 993 bd.last = list_empty(list);
74c45052
JA
994
995 ret = q->mq_ops->queue_rq(hctx, &bd);
320ae51f
JA
996 switch (ret) {
997 case BLK_MQ_RQ_QUEUE_OK:
998 queued++;
52b9c330 999 break;
320ae51f 1000 case BLK_MQ_RQ_QUEUE_BUSY:
99cf1dc5 1001 blk_mq_put_driver_tag(hctx, rq);
f04c3df3 1002 list_add(&rq->queuelist, list);
ed0791b2 1003 __blk_mq_requeue_request(rq);
320ae51f
JA
1004 break;
1005 default:
1006 pr_err("blk-mq: bad return on queue: %d\n", ret);
320ae51f 1007 case BLK_MQ_RQ_QUEUE_ERROR:
1e93b8c2 1008 rq->errors = -EIO;
c8a446ad 1009 blk_mq_end_request(rq, rq->errors);
320ae51f
JA
1010 break;
1011 }
1012
1013 if (ret == BLK_MQ_RQ_QUEUE_BUSY)
1014 break;
74c45052
JA
1015
1016 /*
1017 * We've done the first request. If we have more than 1
1018 * left in the list, set dptr to defer issue.
1019 */
f04c3df3 1020 if (!dptr && list->next != list->prev)
74c45052 1021 dptr = &driver_list;
320ae51f
JA
1022 }
1023
703fd1c0 1024 hctx->dispatched[queued_to_index(queued)]++;
320ae51f
JA
1025
1026 /*
1027 * Any items that need requeuing? Stuff them into hctx->dispatch,
1028 * that is where we will continue on next queue run.
1029 */
f04c3df3 1030 if (!list_empty(list)) {
320ae51f 1031 spin_lock(&hctx->lock);
c13660a0 1032 list_splice_init(list, &hctx->dispatch);
320ae51f 1033 spin_unlock(&hctx->lock);
f04c3df3 1034
9ba52e58
SL
1035 /*
1036 * the queue is expected stopped with BLK_MQ_RQ_QUEUE_BUSY, but
1037 * it's possible the queue is stopped and restarted again
1038 * before this. Queue restart will dispatch requests. And since
1039 * requests in rq_list aren't added into hctx->dispatch yet,
1040 * the requests in rq_list might get lost.
1041 *
1042 * blk_mq_run_hw_queue() already checks the STOPPED bit
bd166ef1 1043 *
da55f2cc
OS
1044 * If RESTART or TAG_WAITING is set, then let completion restart
1045 * the queue instead of potentially looping here.
bd166ef1 1046 */
da55f2cc
OS
1047 if (!blk_mq_sched_needs_restart(hctx) &&
1048 !test_bit(BLK_MQ_S_TAG_WAITING, &hctx->state))
bd166ef1 1049 blk_mq_run_hw_queue(hctx, true);
320ae51f 1050 }
f04c3df3 1051
2aa0f21d 1052 return queued != 0;
f04c3df3
JA
1053}
1054
6a83e74d
BVA
1055static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
1056{
1057 int srcu_idx;
1058
1059 WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask) &&
1060 cpu_online(hctx->next_cpu));
1061
1062 if (!(hctx->flags & BLK_MQ_F_BLOCKING)) {
1063 rcu_read_lock();
bd166ef1 1064 blk_mq_sched_dispatch_requests(hctx);
6a83e74d
BVA
1065 rcu_read_unlock();
1066 } else {
1067 srcu_idx = srcu_read_lock(&hctx->queue_rq_srcu);
bd166ef1 1068 blk_mq_sched_dispatch_requests(hctx);
6a83e74d
BVA
1069 srcu_read_unlock(&hctx->queue_rq_srcu, srcu_idx);
1070 }
1071}
1072
506e931f
JA
1073/*
1074 * It'd be great if the workqueue API had a way to pass
1075 * in a mask and had some smarts for more clever placement.
1076 * For now we just round-robin here, switching for every
1077 * BLK_MQ_CPU_WORK_BATCH queued items.
1078 */
1079static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
1080{
b657d7e6
CH
1081 if (hctx->queue->nr_hw_queues == 1)
1082 return WORK_CPU_UNBOUND;
506e931f
JA
1083
1084 if (--hctx->next_cpu_batch <= 0) {
c02ebfdd 1085 int next_cpu;
506e931f
JA
1086
1087 next_cpu = cpumask_next(hctx->next_cpu, hctx->cpumask);
1088 if (next_cpu >= nr_cpu_ids)
1089 next_cpu = cpumask_first(hctx->cpumask);
1090
1091 hctx->next_cpu = next_cpu;
1092 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
1093 }
1094
b657d7e6 1095 return hctx->next_cpu;
506e931f
JA
1096}
1097
320ae51f
JA
1098void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
1099{
5d1b25c1
BVA
1100 if (unlikely(blk_mq_hctx_stopped(hctx) ||
1101 !blk_mq_hw_queue_mapped(hctx)))
320ae51f
JA
1102 return;
1103
1b792f2f 1104 if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) {
2a90d4aa
PB
1105 int cpu = get_cpu();
1106 if (cpumask_test_cpu(cpu, hctx->cpumask)) {
398205b8 1107 __blk_mq_run_hw_queue(hctx);
2a90d4aa 1108 put_cpu();
398205b8
PB
1109 return;
1110 }
e4043dcf 1111
2a90d4aa 1112 put_cpu();
e4043dcf 1113 }
398205b8 1114
27489a3c 1115 kblockd_schedule_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work);
320ae51f
JA
1116}
1117
b94ec296 1118void blk_mq_run_hw_queues(struct request_queue *q, bool async)
320ae51f
JA
1119{
1120 struct blk_mq_hw_ctx *hctx;
1121 int i;
1122
1123 queue_for_each_hw_ctx(q, hctx, i) {
bd166ef1 1124 if (!blk_mq_hctx_has_pending(hctx) ||
5d1b25c1 1125 blk_mq_hctx_stopped(hctx))
320ae51f
JA
1126 continue;
1127
b94ec296 1128 blk_mq_run_hw_queue(hctx, async);
320ae51f
JA
1129 }
1130}
b94ec296 1131EXPORT_SYMBOL(blk_mq_run_hw_queues);
320ae51f 1132
fd001443
BVA
1133/**
1134 * blk_mq_queue_stopped() - check whether one or more hctxs have been stopped
1135 * @q: request queue.
1136 *
1137 * The caller is responsible for serializing this function against
1138 * blk_mq_{start,stop}_hw_queue().
1139 */
1140bool blk_mq_queue_stopped(struct request_queue *q)
1141{
1142 struct blk_mq_hw_ctx *hctx;
1143 int i;
1144
1145 queue_for_each_hw_ctx(q, hctx, i)
1146 if (blk_mq_hctx_stopped(hctx))
1147 return true;
1148
1149 return false;
1150}
1151EXPORT_SYMBOL(blk_mq_queue_stopped);
1152
320ae51f
JA
1153void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
1154{
27489a3c 1155 cancel_work(&hctx->run_work);
70f4db63 1156 cancel_delayed_work(&hctx->delay_work);
320ae51f
JA
1157 set_bit(BLK_MQ_S_STOPPED, &hctx->state);
1158}
1159EXPORT_SYMBOL(blk_mq_stop_hw_queue);
1160
280d45f6
CH
1161void blk_mq_stop_hw_queues(struct request_queue *q)
1162{
1163 struct blk_mq_hw_ctx *hctx;
1164 int i;
1165
1166 queue_for_each_hw_ctx(q, hctx, i)
1167 blk_mq_stop_hw_queue(hctx);
1168}
1169EXPORT_SYMBOL(blk_mq_stop_hw_queues);
1170
320ae51f
JA
1171void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
1172{
1173 clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
e4043dcf 1174
0ffbce80 1175 blk_mq_run_hw_queue(hctx, false);
320ae51f
JA
1176}
1177EXPORT_SYMBOL(blk_mq_start_hw_queue);
1178
2f268556
CH
1179void blk_mq_start_hw_queues(struct request_queue *q)
1180{
1181 struct blk_mq_hw_ctx *hctx;
1182 int i;
1183
1184 queue_for_each_hw_ctx(q, hctx, i)
1185 blk_mq_start_hw_queue(hctx);
1186}
1187EXPORT_SYMBOL(blk_mq_start_hw_queues);
1188
ae911c5e
JA
1189void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
1190{
1191 if (!blk_mq_hctx_stopped(hctx))
1192 return;
1193
1194 clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
1195 blk_mq_run_hw_queue(hctx, async);
1196}
1197EXPORT_SYMBOL_GPL(blk_mq_start_stopped_hw_queue);
1198
1b4a3258 1199void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
320ae51f
JA
1200{
1201 struct blk_mq_hw_ctx *hctx;
1202 int i;
1203
ae911c5e
JA
1204 queue_for_each_hw_ctx(q, hctx, i)
1205 blk_mq_start_stopped_hw_queue(hctx, async);
320ae51f
JA
1206}
1207EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
1208
70f4db63 1209static void blk_mq_run_work_fn(struct work_struct *work)
320ae51f
JA
1210{
1211 struct blk_mq_hw_ctx *hctx;
1212
27489a3c 1213 hctx = container_of(work, struct blk_mq_hw_ctx, run_work);
e4043dcf 1214
320ae51f
JA
1215 __blk_mq_run_hw_queue(hctx);
1216}
1217
70f4db63
CH
1218static void blk_mq_delay_work_fn(struct work_struct *work)
1219{
1220 struct blk_mq_hw_ctx *hctx;
1221
1222 hctx = container_of(work, struct blk_mq_hw_ctx, delay_work.work);
1223
1224 if (test_and_clear_bit(BLK_MQ_S_STOPPED, &hctx->state))
1225 __blk_mq_run_hw_queue(hctx);
1226}
1227
1228void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
1229{
19c66e59
ML
1230 if (unlikely(!blk_mq_hw_queue_mapped(hctx)))
1231 return;
70f4db63 1232
7e79dadc 1233 blk_mq_stop_hw_queue(hctx);
b657d7e6
CH
1234 kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
1235 &hctx->delay_work, msecs_to_jiffies(msecs));
70f4db63
CH
1236}
1237EXPORT_SYMBOL(blk_mq_delay_queue);
1238
cfd0c552 1239static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx,
cfd0c552
ML
1240 struct request *rq,
1241 bool at_head)
320ae51f 1242{
e57690fe
JA
1243 struct blk_mq_ctx *ctx = rq->mq_ctx;
1244
01b983c9
JA
1245 trace_block_rq_insert(hctx->queue, rq);
1246
72a0a36e
CH
1247 if (at_head)
1248 list_add(&rq->queuelist, &ctx->rq_list);
1249 else
1250 list_add_tail(&rq->queuelist, &ctx->rq_list);
cfd0c552 1251}
4bb659b1 1252
2c3ad667
JA
1253void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
1254 bool at_head)
cfd0c552
ML
1255{
1256 struct blk_mq_ctx *ctx = rq->mq_ctx;
1257
e57690fe 1258 __blk_mq_insert_req_list(hctx, rq, at_head);
320ae51f 1259 blk_mq_hctx_mark_pending(hctx, ctx);
320ae51f
JA
1260}
1261
bd166ef1
JA
1262void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
1263 struct list_head *list)
320ae51f
JA
1264
1265{
320ae51f
JA
1266 /*
1267 * preemption doesn't flush plug list, so it's possible ctx->cpu is
1268 * offline now
1269 */
1270 spin_lock(&ctx->lock);
1271 while (!list_empty(list)) {
1272 struct request *rq;
1273
1274 rq = list_first_entry(list, struct request, queuelist);
e57690fe 1275 BUG_ON(rq->mq_ctx != ctx);
320ae51f 1276 list_del_init(&rq->queuelist);
e57690fe 1277 __blk_mq_insert_req_list(hctx, rq, false);
320ae51f 1278 }
cfd0c552 1279 blk_mq_hctx_mark_pending(hctx, ctx);
320ae51f 1280 spin_unlock(&ctx->lock);
320ae51f
JA
1281}
1282
1283static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b)
1284{
1285 struct request *rqa = container_of(a, struct request, queuelist);
1286 struct request *rqb = container_of(b, struct request, queuelist);
1287
1288 return !(rqa->mq_ctx < rqb->mq_ctx ||
1289 (rqa->mq_ctx == rqb->mq_ctx &&
1290 blk_rq_pos(rqa) < blk_rq_pos(rqb)));
1291}
1292
1293void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
1294{
1295 struct blk_mq_ctx *this_ctx;
1296 struct request_queue *this_q;
1297 struct request *rq;
1298 LIST_HEAD(list);
1299 LIST_HEAD(ctx_list);
1300 unsigned int depth;
1301
1302 list_splice_init(&plug->mq_list, &list);
1303
1304 list_sort(NULL, &list, plug_ctx_cmp);
1305
1306 this_q = NULL;
1307 this_ctx = NULL;
1308 depth = 0;
1309
1310 while (!list_empty(&list)) {
1311 rq = list_entry_rq(list.next);
1312 list_del_init(&rq->queuelist);
1313 BUG_ON(!rq->q);
1314 if (rq->mq_ctx != this_ctx) {
1315 if (this_ctx) {
bd166ef1
JA
1316 trace_block_unplug(this_q, depth, from_schedule);
1317 blk_mq_sched_insert_requests(this_q, this_ctx,
1318 &ctx_list,
1319 from_schedule);
320ae51f
JA
1320 }
1321
1322 this_ctx = rq->mq_ctx;
1323 this_q = rq->q;
1324 depth = 0;
1325 }
1326
1327 depth++;
1328 list_add_tail(&rq->queuelist, &ctx_list);
1329 }
1330
1331 /*
1332 * If 'this_ctx' is set, we know we have entries to complete
1333 * on 'ctx_list'. Do those.
1334 */
1335 if (this_ctx) {
bd166ef1
JA
1336 trace_block_unplug(this_q, depth, from_schedule);
1337 blk_mq_sched_insert_requests(this_q, this_ctx, &ctx_list,
1338 from_schedule);
320ae51f
JA
1339 }
1340}
1341
1342static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
1343{
1344 init_request_from_bio(rq, bio);
4b570521 1345
6e85eaf3 1346 blk_account_io_start(rq, true);
320ae51f
JA
1347}
1348
274a5843
JA
1349static inline bool hctx_allow_merges(struct blk_mq_hw_ctx *hctx)
1350{
1351 return (hctx->flags & BLK_MQ_F_SHOULD_MERGE) &&
1352 !blk_queue_nomerges(hctx->queue);
1353}
1354
07068d5b
JA
1355static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx,
1356 struct blk_mq_ctx *ctx,
1357 struct request *rq, struct bio *bio)
320ae51f 1358{
e18378a6 1359 if (!hctx_allow_merges(hctx) || !bio_mergeable(bio)) {
07068d5b
JA
1360 blk_mq_bio_to_request(rq, bio);
1361 spin_lock(&ctx->lock);
1362insert_rq:
1363 __blk_mq_insert_request(hctx, rq, false);
1364 spin_unlock(&ctx->lock);
1365 return false;
1366 } else {
274a5843
JA
1367 struct request_queue *q = hctx->queue;
1368
07068d5b
JA
1369 spin_lock(&ctx->lock);
1370 if (!blk_mq_attempt_merge(q, ctx, bio)) {
1371 blk_mq_bio_to_request(rq, bio);
1372 goto insert_rq;
1373 }
320ae51f 1374
07068d5b 1375 spin_unlock(&ctx->lock);
bd166ef1 1376 __blk_mq_finish_request(hctx, ctx, rq);
07068d5b 1377 return true;
14ec77f3 1378 }
07068d5b 1379}
14ec77f3 1380
fd2d3326
JA
1381static blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, struct request *rq)
1382{
bd166ef1
JA
1383 if (rq->tag != -1)
1384 return blk_tag_to_qc_t(rq->tag, hctx->queue_num, false);
1385
1386 return blk_tag_to_qc_t(rq->internal_tag, hctx->queue_num, true);
fd2d3326
JA
1387}
1388
066a4a73 1389static void blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie)
f984df1f 1390{
f984df1f 1391 struct request_queue *q = rq->q;
f984df1f
SL
1392 struct blk_mq_queue_data bd = {
1393 .rq = rq,
1394 .list = NULL,
1395 .last = 1
1396 };
bd166ef1
JA
1397 struct blk_mq_hw_ctx *hctx;
1398 blk_qc_t new_cookie;
1399 int ret;
f984df1f 1400
bd166ef1 1401 if (q->elevator)
2253efc8
BVA
1402 goto insert;
1403
bd166ef1
JA
1404 if (!blk_mq_get_driver_tag(rq, &hctx, false))
1405 goto insert;
1406
1407 new_cookie = request_to_qc_t(hctx, rq);
1408
f984df1f
SL
1409 /*
1410 * For OK queue, we are done. For error, kill it. Any other
1411 * error (busy), just add it to our list as we previously
1412 * would have done
1413 */
1414 ret = q->mq_ops->queue_rq(hctx, &bd);
7b371636
JA
1415 if (ret == BLK_MQ_RQ_QUEUE_OK) {
1416 *cookie = new_cookie;
2253efc8 1417 return;
7b371636 1418 }
f984df1f 1419
7b371636
JA
1420 __blk_mq_requeue_request(rq);
1421
1422 if (ret == BLK_MQ_RQ_QUEUE_ERROR) {
1423 *cookie = BLK_QC_T_NONE;
1424 rq->errors = -EIO;
1425 blk_mq_end_request(rq, rq->errors);
2253efc8 1426 return;
f984df1f 1427 }
7b371636 1428
2253efc8 1429insert:
bd6737f1 1430 blk_mq_sched_insert_request(rq, false, true, true, false);
f984df1f
SL
1431}
1432
07068d5b
JA
1433/*
1434 * Multiple hardware queue variant. This will not use per-process plugs,
1435 * but will attempt to bypass the hctx queueing if we can go straight to
1436 * hardware for SYNC IO.
1437 */
dece1635 1438static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
07068d5b 1439{
ef295ecf 1440 const int is_sync = op_is_sync(bio->bi_opf);
f73f44eb 1441 const int is_flush_fua = op_is_flush(bio->bi_opf);
5a797e00 1442 struct blk_mq_alloc_data data = { .flags = 0 };
07068d5b 1443 struct request *rq;
6a83e74d 1444 unsigned int request_count = 0, srcu_idx;
f984df1f 1445 struct blk_plug *plug;
5b3f341f 1446 struct request *same_queue_rq = NULL;
7b371636 1447 blk_qc_t cookie;
87760e5e 1448 unsigned int wb_acct;
07068d5b
JA
1449
1450 blk_queue_bounce(q, &bio);
1451
1452 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
4246a0b6 1453 bio_io_error(bio);
dece1635 1454 return BLK_QC_T_NONE;
07068d5b
JA
1455 }
1456
54efd50b
KO
1457 blk_queue_split(q, &bio, q->bio_split);
1458
87c279e6
OS
1459 if (!is_flush_fua && !blk_queue_nomerges(q) &&
1460 blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq))
1461 return BLK_QC_T_NONE;
f984df1f 1462
bd166ef1
JA
1463 if (blk_mq_sched_bio_merge(q, bio))
1464 return BLK_QC_T_NONE;
1465
87760e5e
JA
1466 wb_acct = wbt_wait(q->rq_wb, bio, NULL);
1467
bd166ef1
JA
1468 trace_block_getrq(q, bio, bio->bi_opf);
1469
1470 rq = blk_mq_sched_get_request(q, bio, bio->bi_opf, &data);
87760e5e
JA
1471 if (unlikely(!rq)) {
1472 __wbt_done(q->rq_wb, wb_acct);
dece1635 1473 return BLK_QC_T_NONE;
87760e5e
JA
1474 }
1475
1476 wbt_track(&rq->issue_stat, wb_acct);
07068d5b 1477
fd2d3326 1478 cookie = request_to_qc_t(data.hctx, rq);
07068d5b
JA
1479
1480 if (unlikely(is_flush_fua)) {
0c2a6fe4
JA
1481 if (q->elevator)
1482 goto elv_insert;
07068d5b
JA
1483 blk_mq_bio_to_request(rq, bio);
1484 blk_insert_flush(rq);
0c2a6fe4 1485 goto run_queue;
07068d5b
JA
1486 }
1487
f984df1f 1488 plug = current->plug;
e167dfb5
JA
1489 /*
1490 * If the driver supports defer issued based on 'last', then
1491 * queue it up like normal since we can potentially save some
1492 * CPU this way.
1493 */
f984df1f
SL
1494 if (((plug && !blk_queue_nomerges(q)) || is_sync) &&
1495 !(data.hctx->flags & BLK_MQ_F_DEFER_ISSUE)) {
1496 struct request *old_rq = NULL;
07068d5b
JA
1497
1498 blk_mq_bio_to_request(rq, bio);
07068d5b
JA
1499
1500 /*
6a83e74d 1501 * We do limited plugging. If the bio can be merged, do that.
f984df1f
SL
1502 * Otherwise the existing request in the plug list will be
1503 * issued. So the plug list will have one request at most
07068d5b 1504 */
f984df1f 1505 if (plug) {
5b3f341f
SL
1506 /*
1507 * The plug list might get flushed before this. If that
b094f89c
JA
1508 * happens, same_queue_rq is invalid and plug list is
1509 * empty
1510 */
5b3f341f
SL
1511 if (same_queue_rq && !list_empty(&plug->mq_list)) {
1512 old_rq = same_queue_rq;
f984df1f 1513 list_del_init(&old_rq->queuelist);
07068d5b 1514 }
f984df1f
SL
1515 list_add_tail(&rq->queuelist, &plug->mq_list);
1516 } else /* is_sync */
1517 old_rq = rq;
1518 blk_mq_put_ctx(data.ctx);
1519 if (!old_rq)
7b371636 1520 goto done;
6a83e74d
BVA
1521
1522 if (!(data.hctx->flags & BLK_MQ_F_BLOCKING)) {
1523 rcu_read_lock();
066a4a73 1524 blk_mq_try_issue_directly(old_rq, &cookie);
6a83e74d
BVA
1525 rcu_read_unlock();
1526 } else {
1527 srcu_idx = srcu_read_lock(&data.hctx->queue_rq_srcu);
066a4a73 1528 blk_mq_try_issue_directly(old_rq, &cookie);
6a83e74d
BVA
1529 srcu_read_unlock(&data.hctx->queue_rq_srcu, srcu_idx);
1530 }
7b371636 1531 goto done;
07068d5b
JA
1532 }
1533
bd166ef1 1534 if (q->elevator) {
0c2a6fe4 1535elv_insert:
bd166ef1
JA
1536 blk_mq_put_ctx(data.ctx);
1537 blk_mq_bio_to_request(rq, bio);
0abad774 1538 blk_mq_sched_insert_request(rq, false, true,
bd6737f1 1539 !is_sync || is_flush_fua, true);
bd166ef1
JA
1540 goto done;
1541 }
07068d5b
JA
1542 if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
1543 /*
1544 * For a SYNC request, send it to the hardware immediately. For
1545 * an ASYNC request, just ensure that we run it later on. The
1546 * latter allows for merging opportunities and more efficient
1547 * dispatching.
1548 */
0c2a6fe4 1549run_queue:
07068d5b
JA
1550 blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
1551 }
07068d5b 1552 blk_mq_put_ctx(data.ctx);
7b371636
JA
1553done:
1554 return cookie;
07068d5b
JA
1555}
1556
1557/*
1558 * Single hardware queue variant. This will attempt to use any per-process
1559 * plug for merging and IO deferral.
1560 */
dece1635 1561static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
07068d5b 1562{
ef295ecf 1563 const int is_sync = op_is_sync(bio->bi_opf);
f73f44eb 1564 const int is_flush_fua = op_is_flush(bio->bi_opf);
e6c4438b
JM
1565 struct blk_plug *plug;
1566 unsigned int request_count = 0;
5a797e00 1567 struct blk_mq_alloc_data data = { .flags = 0 };
07068d5b 1568 struct request *rq;
7b371636 1569 blk_qc_t cookie;
87760e5e 1570 unsigned int wb_acct;
07068d5b 1571
07068d5b
JA
1572 blk_queue_bounce(q, &bio);
1573
1574 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
4246a0b6 1575 bio_io_error(bio);
dece1635 1576 return BLK_QC_T_NONE;
07068d5b
JA
1577 }
1578
54efd50b
KO
1579 blk_queue_split(q, &bio, q->bio_split);
1580
87c279e6
OS
1581 if (!is_flush_fua && !blk_queue_nomerges(q)) {
1582 if (blk_attempt_plug_merge(q, bio, &request_count, NULL))
1583 return BLK_QC_T_NONE;
1584 } else
1585 request_count = blk_plug_queued_count(q);
07068d5b 1586
bd166ef1
JA
1587 if (blk_mq_sched_bio_merge(q, bio))
1588 return BLK_QC_T_NONE;
1589
87760e5e
JA
1590 wb_acct = wbt_wait(q->rq_wb, bio, NULL);
1591
bd166ef1
JA
1592 trace_block_getrq(q, bio, bio->bi_opf);
1593
1594 rq = blk_mq_sched_get_request(q, bio, bio->bi_opf, &data);
87760e5e
JA
1595 if (unlikely(!rq)) {
1596 __wbt_done(q->rq_wb, wb_acct);
dece1635 1597 return BLK_QC_T_NONE;
87760e5e
JA
1598 }
1599
1600 wbt_track(&rq->issue_stat, wb_acct);
320ae51f 1601
fd2d3326 1602 cookie = request_to_qc_t(data.hctx, rq);
320ae51f
JA
1603
1604 if (unlikely(is_flush_fua)) {
0c2a6fe4
JA
1605 if (q->elevator)
1606 goto elv_insert;
320ae51f 1607 blk_mq_bio_to_request(rq, bio);
320ae51f 1608 blk_insert_flush(rq);
0c2a6fe4 1609 goto run_queue;
320ae51f
JA
1610 }
1611
1612 /*
1613 * A task plug currently exists. Since this is completely lockless,
1614 * utilize that to temporarily store requests until the task is
1615 * either done or scheduled away.
1616 */
e6c4438b
JM
1617 plug = current->plug;
1618 if (plug) {
600271d9
SL
1619 struct request *last = NULL;
1620
e6c4438b 1621 blk_mq_bio_to_request(rq, bio);
0a6219a9
ML
1622
1623 /*
1624 * @request_count may become stale because of schedule
1625 * out, so check the list again.
1626 */
1627 if (list_empty(&plug->mq_list))
1628 request_count = 0;
676d0607 1629 if (!request_count)
e6c4438b 1630 trace_block_plug(q);
600271d9
SL
1631 else
1632 last = list_entry_rq(plug->mq_list.prev);
b094f89c
JA
1633
1634 blk_mq_put_ctx(data.ctx);
1635
600271d9
SL
1636 if (request_count >= BLK_MAX_REQUEST_COUNT || (last &&
1637 blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) {
e6c4438b
JM
1638 blk_flush_plug_list(plug, false);
1639 trace_block_plug(q);
320ae51f 1640 }
b094f89c 1641
e6c4438b 1642 list_add_tail(&rq->queuelist, &plug->mq_list);
7b371636 1643 return cookie;
320ae51f
JA
1644 }
1645
bd166ef1 1646 if (q->elevator) {
0c2a6fe4 1647elv_insert:
bd166ef1
JA
1648 blk_mq_put_ctx(data.ctx);
1649 blk_mq_bio_to_request(rq, bio);
0abad774 1650 blk_mq_sched_insert_request(rq, false, true,
bd6737f1 1651 !is_sync || is_flush_fua, true);
bd166ef1
JA
1652 goto done;
1653 }
07068d5b
JA
1654 if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
1655 /*
1656 * For a SYNC request, send it to the hardware immediately. For
1657 * an ASYNC request, just ensure that we run it later on. The
1658 * latter allows for merging opportunities and more efficient
1659 * dispatching.
1660 */
0c2a6fe4 1661run_queue:
07068d5b 1662 blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
320ae51f
JA
1663 }
1664
07068d5b 1665 blk_mq_put_ctx(data.ctx);
bd166ef1 1666done:
7b371636 1667 return cookie;
320ae51f
JA
1668}
1669
cc71a6f4
JA
1670void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
1671 unsigned int hctx_idx)
95363efd 1672{
e9b267d9 1673 struct page *page;
320ae51f 1674
24d2f903 1675 if (tags->rqs && set->ops->exit_request) {
e9b267d9 1676 int i;
320ae51f 1677
24d2f903 1678 for (i = 0; i < tags->nr_tags; i++) {
2af8cbe3
JA
1679 struct request *rq = tags->static_rqs[i];
1680
1681 if (!rq)
e9b267d9 1682 continue;
2af8cbe3 1683 set->ops->exit_request(set->driver_data, rq,
24d2f903 1684 hctx_idx, i);
2af8cbe3 1685 tags->static_rqs[i] = NULL;
e9b267d9 1686 }
320ae51f 1687 }
320ae51f 1688
24d2f903
CH
1689 while (!list_empty(&tags->page_list)) {
1690 page = list_first_entry(&tags->page_list, struct page, lru);
6753471c 1691 list_del_init(&page->lru);
f75782e4
CM
1692 /*
1693 * Remove kmemleak object previously allocated in
1694 * blk_mq_init_rq_map().
1695 */
1696 kmemleak_free(page_address(page));
320ae51f
JA
1697 __free_pages(page, page->private);
1698 }
cc71a6f4 1699}
320ae51f 1700
cc71a6f4
JA
1701void blk_mq_free_rq_map(struct blk_mq_tags *tags)
1702{
24d2f903 1703 kfree(tags->rqs);
cc71a6f4 1704 tags->rqs = NULL;
2af8cbe3
JA
1705 kfree(tags->static_rqs);
1706 tags->static_rqs = NULL;
320ae51f 1707
24d2f903 1708 blk_mq_free_tags(tags);
320ae51f
JA
1709}
1710
cc71a6f4
JA
1711struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
1712 unsigned int hctx_idx,
1713 unsigned int nr_tags,
1714 unsigned int reserved_tags)
320ae51f 1715{
24d2f903 1716 struct blk_mq_tags *tags;
59f082e4 1717 int node;
320ae51f 1718
59f082e4
SL
1719 node = blk_mq_hw_queue_to_node(set->mq_map, hctx_idx);
1720 if (node == NUMA_NO_NODE)
1721 node = set->numa_node;
1722
1723 tags = blk_mq_init_tags(nr_tags, reserved_tags, node,
24391c0d 1724 BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags));
24d2f903
CH
1725 if (!tags)
1726 return NULL;
320ae51f 1727
cc71a6f4 1728 tags->rqs = kzalloc_node(nr_tags * sizeof(struct request *),
36e1f3d1 1729 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
59f082e4 1730 node);
24d2f903
CH
1731 if (!tags->rqs) {
1732 blk_mq_free_tags(tags);
1733 return NULL;
1734 }
320ae51f 1735
2af8cbe3
JA
1736 tags->static_rqs = kzalloc_node(nr_tags * sizeof(struct request *),
1737 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
59f082e4 1738 node);
2af8cbe3
JA
1739 if (!tags->static_rqs) {
1740 kfree(tags->rqs);
1741 blk_mq_free_tags(tags);
1742 return NULL;
1743 }
1744
cc71a6f4
JA
1745 return tags;
1746}
1747
1748static size_t order_to_size(unsigned int order)
1749{
1750 return (size_t)PAGE_SIZE << order;
1751}
1752
1753int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
1754 unsigned int hctx_idx, unsigned int depth)
1755{
1756 unsigned int i, j, entries_per_page, max_order = 4;
1757 size_t rq_size, left;
59f082e4
SL
1758 int node;
1759
1760 node = blk_mq_hw_queue_to_node(set->mq_map, hctx_idx);
1761 if (node == NUMA_NO_NODE)
1762 node = set->numa_node;
cc71a6f4
JA
1763
1764 INIT_LIST_HEAD(&tags->page_list);
1765
320ae51f
JA
1766 /*
1767 * rq_size is the size of the request plus driver payload, rounded
1768 * to the cacheline size
1769 */
24d2f903 1770 rq_size = round_up(sizeof(struct request) + set->cmd_size,
320ae51f 1771 cache_line_size());
cc71a6f4 1772 left = rq_size * depth;
320ae51f 1773
cc71a6f4 1774 for (i = 0; i < depth; ) {
320ae51f
JA
1775 int this_order = max_order;
1776 struct page *page;
1777 int to_do;
1778 void *p;
1779
b3a834b1 1780 while (this_order && left < order_to_size(this_order - 1))
320ae51f
JA
1781 this_order--;
1782
1783 do {
59f082e4 1784 page = alloc_pages_node(node,
36e1f3d1 1785 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO,
a5164405 1786 this_order);
320ae51f
JA
1787 if (page)
1788 break;
1789 if (!this_order--)
1790 break;
1791 if (order_to_size(this_order) < rq_size)
1792 break;
1793 } while (1);
1794
1795 if (!page)
24d2f903 1796 goto fail;
320ae51f
JA
1797
1798 page->private = this_order;
24d2f903 1799 list_add_tail(&page->lru, &tags->page_list);
320ae51f
JA
1800
1801 p = page_address(page);
f75782e4
CM
1802 /*
1803 * Allow kmemleak to scan these pages as they contain pointers
1804 * to additional allocations like via ops->init_request().
1805 */
36e1f3d1 1806 kmemleak_alloc(p, order_to_size(this_order), 1, GFP_NOIO);
320ae51f 1807 entries_per_page = order_to_size(this_order) / rq_size;
cc71a6f4 1808 to_do = min(entries_per_page, depth - i);
320ae51f
JA
1809 left -= to_do * rq_size;
1810 for (j = 0; j < to_do; j++) {
2af8cbe3
JA
1811 struct request *rq = p;
1812
1813 tags->static_rqs[i] = rq;
24d2f903
CH
1814 if (set->ops->init_request) {
1815 if (set->ops->init_request(set->driver_data,
2af8cbe3 1816 rq, hctx_idx, i,
59f082e4 1817 node)) {
2af8cbe3 1818 tags->static_rqs[i] = NULL;
24d2f903 1819 goto fail;
a5164405 1820 }
e9b267d9
CH
1821 }
1822
320ae51f
JA
1823 p += rq_size;
1824 i++;
1825 }
1826 }
cc71a6f4 1827 return 0;
320ae51f 1828
24d2f903 1829fail:
cc71a6f4
JA
1830 blk_mq_free_rqs(set, tags, hctx_idx);
1831 return -ENOMEM;
320ae51f
JA
1832}
1833
e57690fe
JA
1834/*
1835 * 'cpu' is going away. splice any existing rq_list entries from this
1836 * software queue to the hw queue dispatch list, and ensure that it
1837 * gets run.
1838 */
9467f859 1839static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
484b4061 1840{
9467f859 1841 struct blk_mq_hw_ctx *hctx;
484b4061
JA
1842 struct blk_mq_ctx *ctx;
1843 LIST_HEAD(tmp);
1844
9467f859 1845 hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead);
e57690fe 1846 ctx = __blk_mq_get_ctx(hctx->queue, cpu);
484b4061
JA
1847
1848 spin_lock(&ctx->lock);
1849 if (!list_empty(&ctx->rq_list)) {
1850 list_splice_init(&ctx->rq_list, &tmp);
1851 blk_mq_hctx_clear_pending(hctx, ctx);
1852 }
1853 spin_unlock(&ctx->lock);
1854
1855 if (list_empty(&tmp))
9467f859 1856 return 0;
484b4061 1857
e57690fe
JA
1858 spin_lock(&hctx->lock);
1859 list_splice_tail_init(&tmp, &hctx->dispatch);
1860 spin_unlock(&hctx->lock);
484b4061
JA
1861
1862 blk_mq_run_hw_queue(hctx, true);
9467f859 1863 return 0;
484b4061
JA
1864}
1865
9467f859 1866static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
484b4061 1867{
9467f859
TG
1868 cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD,
1869 &hctx->cpuhp_dead);
484b4061
JA
1870}
1871
c3b4afca 1872/* hctx->ctxs will be freed in queue's release handler */
08e98fc6
ML
1873static void blk_mq_exit_hctx(struct request_queue *q,
1874 struct blk_mq_tag_set *set,
1875 struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
1876{
f70ced09
ML
1877 unsigned flush_start_tag = set->queue_depth;
1878
08e98fc6
ML
1879 blk_mq_tag_idle(hctx);
1880
f70ced09
ML
1881 if (set->ops->exit_request)
1882 set->ops->exit_request(set->driver_data,
1883 hctx->fq->flush_rq, hctx_idx,
1884 flush_start_tag + hctx_idx);
1885
08e98fc6
ML
1886 if (set->ops->exit_hctx)
1887 set->ops->exit_hctx(hctx, hctx_idx);
1888
6a83e74d
BVA
1889 if (hctx->flags & BLK_MQ_F_BLOCKING)
1890 cleanup_srcu_struct(&hctx->queue_rq_srcu);
1891
9467f859 1892 blk_mq_remove_cpuhp(hctx);
f70ced09 1893 blk_free_flush_queue(hctx->fq);
88459642 1894 sbitmap_free(&hctx->ctx_map);
08e98fc6
ML
1895}
1896
624dbe47
ML
1897static void blk_mq_exit_hw_queues(struct request_queue *q,
1898 struct blk_mq_tag_set *set, int nr_queue)
1899{
1900 struct blk_mq_hw_ctx *hctx;
1901 unsigned int i;
1902
1903 queue_for_each_hw_ctx(q, hctx, i) {
1904 if (i == nr_queue)
1905 break;
08e98fc6 1906 blk_mq_exit_hctx(q, set, hctx, i);
624dbe47 1907 }
624dbe47
ML
1908}
1909
1910static void blk_mq_free_hw_queues(struct request_queue *q,
1911 struct blk_mq_tag_set *set)
1912{
1913 struct blk_mq_hw_ctx *hctx;
1914 unsigned int i;
1915
e09aae7e 1916 queue_for_each_hw_ctx(q, hctx, i)
624dbe47 1917 free_cpumask_var(hctx->cpumask);
624dbe47
ML
1918}
1919
08e98fc6
ML
1920static int blk_mq_init_hctx(struct request_queue *q,
1921 struct blk_mq_tag_set *set,
1922 struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
320ae51f 1923{
08e98fc6 1924 int node;
f70ced09 1925 unsigned flush_start_tag = set->queue_depth;
08e98fc6
ML
1926
1927 node = hctx->numa_node;
1928 if (node == NUMA_NO_NODE)
1929 node = hctx->numa_node = set->numa_node;
1930
27489a3c 1931 INIT_WORK(&hctx->run_work, blk_mq_run_work_fn);
08e98fc6
ML
1932 INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn);
1933 spin_lock_init(&hctx->lock);
1934 INIT_LIST_HEAD(&hctx->dispatch);
1935 hctx->queue = q;
1936 hctx->queue_num = hctx_idx;
2404e607 1937 hctx->flags = set->flags & ~BLK_MQ_F_TAG_SHARED;
08e98fc6 1938
9467f859 1939 cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead);
08e98fc6
ML
1940
1941 hctx->tags = set->tags[hctx_idx];
320ae51f
JA
1942
1943 /*
08e98fc6
ML
1944 * Allocate space for all possible cpus to avoid allocation at
1945 * runtime
320ae51f 1946 */
08e98fc6
ML
1947 hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *),
1948 GFP_KERNEL, node);
1949 if (!hctx->ctxs)
1950 goto unregister_cpu_notifier;
320ae51f 1951
88459642
OS
1952 if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8), GFP_KERNEL,
1953 node))
08e98fc6 1954 goto free_ctxs;
320ae51f 1955
08e98fc6 1956 hctx->nr_ctx = 0;
320ae51f 1957
08e98fc6
ML
1958 if (set->ops->init_hctx &&
1959 set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
1960 goto free_bitmap;
320ae51f 1961
f70ced09
ML
1962 hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size);
1963 if (!hctx->fq)
1964 goto exit_hctx;
320ae51f 1965
f70ced09
ML
1966 if (set->ops->init_request &&
1967 set->ops->init_request(set->driver_data,
1968 hctx->fq->flush_rq, hctx_idx,
1969 flush_start_tag + hctx_idx, node))
1970 goto free_fq;
320ae51f 1971
6a83e74d
BVA
1972 if (hctx->flags & BLK_MQ_F_BLOCKING)
1973 init_srcu_struct(&hctx->queue_rq_srcu);
1974
08e98fc6 1975 return 0;
320ae51f 1976
f70ced09
ML
1977 free_fq:
1978 kfree(hctx->fq);
1979 exit_hctx:
1980 if (set->ops->exit_hctx)
1981 set->ops->exit_hctx(hctx, hctx_idx);
08e98fc6 1982 free_bitmap:
88459642 1983 sbitmap_free(&hctx->ctx_map);
08e98fc6
ML
1984 free_ctxs:
1985 kfree(hctx->ctxs);
1986 unregister_cpu_notifier:
9467f859 1987 blk_mq_remove_cpuhp(hctx);
08e98fc6
ML
1988 return -1;
1989}
320ae51f 1990
320ae51f
JA
1991static void blk_mq_init_cpu_queues(struct request_queue *q,
1992 unsigned int nr_hw_queues)
1993{
1994 unsigned int i;
1995
1996 for_each_possible_cpu(i) {
1997 struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
1998 struct blk_mq_hw_ctx *hctx;
1999
2000 memset(__ctx, 0, sizeof(*__ctx));
2001 __ctx->cpu = i;
2002 spin_lock_init(&__ctx->lock);
2003 INIT_LIST_HEAD(&__ctx->rq_list);
2004 __ctx->queue = q;
cf43e6be
JA
2005 blk_stat_init(&__ctx->stat[BLK_STAT_READ]);
2006 blk_stat_init(&__ctx->stat[BLK_STAT_WRITE]);
320ae51f
JA
2007
2008 /* If the cpu isn't online, the cpu is mapped to first hctx */
320ae51f
JA
2009 if (!cpu_online(i))
2010 continue;
2011
7d7e0f90 2012 hctx = blk_mq_map_queue(q, i);
e4043dcf 2013
320ae51f
JA
2014 /*
2015 * Set local node, IFF we have more than one hw queue. If
2016 * not, we remain on the home node of the device
2017 */
2018 if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
bffed457 2019 hctx->numa_node = local_memory_node(cpu_to_node(i));
320ae51f
JA
2020 }
2021}
2022
cc71a6f4
JA
2023static bool __blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, int hctx_idx)
2024{
2025 int ret = 0;
2026
2027 set->tags[hctx_idx] = blk_mq_alloc_rq_map(set, hctx_idx,
2028 set->queue_depth, set->reserved_tags);
2029 if (!set->tags[hctx_idx])
2030 return false;
2031
2032 ret = blk_mq_alloc_rqs(set, set->tags[hctx_idx], hctx_idx,
2033 set->queue_depth);
2034 if (!ret)
2035 return true;
2036
2037 blk_mq_free_rq_map(set->tags[hctx_idx]);
2038 set->tags[hctx_idx] = NULL;
2039 return false;
2040}
2041
2042static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set,
2043 unsigned int hctx_idx)
2044{
bd166ef1
JA
2045 if (set->tags[hctx_idx]) {
2046 blk_mq_free_rqs(set, set->tags[hctx_idx], hctx_idx);
2047 blk_mq_free_rq_map(set->tags[hctx_idx]);
2048 set->tags[hctx_idx] = NULL;
2049 }
cc71a6f4
JA
2050}
2051
5778322e
AM
2052static void blk_mq_map_swqueue(struct request_queue *q,
2053 const struct cpumask *online_mask)
320ae51f 2054{
d1b1cea1 2055 unsigned int i, hctx_idx;
320ae51f
JA
2056 struct blk_mq_hw_ctx *hctx;
2057 struct blk_mq_ctx *ctx;
2a34c087 2058 struct blk_mq_tag_set *set = q->tag_set;
320ae51f 2059
60de074b
AM
2060 /*
2061 * Avoid others reading imcomplete hctx->cpumask through sysfs
2062 */
2063 mutex_lock(&q->sysfs_lock);
2064
320ae51f 2065 queue_for_each_hw_ctx(q, hctx, i) {
e4043dcf 2066 cpumask_clear(hctx->cpumask);
320ae51f
JA
2067 hctx->nr_ctx = 0;
2068 }
2069
2070 /*
2071 * Map software to hardware queues
2072 */
897bb0c7 2073 for_each_possible_cpu(i) {
320ae51f 2074 /* If the cpu isn't online, the cpu is mapped to first hctx */
5778322e 2075 if (!cpumask_test_cpu(i, online_mask))
e4043dcf
JA
2076 continue;
2077
d1b1cea1
GKB
2078 hctx_idx = q->mq_map[i];
2079 /* unmapped hw queue can be remapped after CPU topo changed */
cc71a6f4
JA
2080 if (!set->tags[hctx_idx] &&
2081 !__blk_mq_alloc_rq_map(set, hctx_idx)) {
d1b1cea1
GKB
2082 /*
2083 * If tags initialization fail for some hctx,
2084 * that hctx won't be brought online. In this
2085 * case, remap the current ctx to hctx[0] which
2086 * is guaranteed to always have tags allocated
2087 */
cc71a6f4 2088 q->mq_map[i] = 0;
d1b1cea1
GKB
2089 }
2090
897bb0c7 2091 ctx = per_cpu_ptr(q->queue_ctx, i);
7d7e0f90 2092 hctx = blk_mq_map_queue(q, i);
868f2f0b 2093
e4043dcf 2094 cpumask_set_cpu(i, hctx->cpumask);
320ae51f
JA
2095 ctx->index_hw = hctx->nr_ctx;
2096 hctx->ctxs[hctx->nr_ctx++] = ctx;
2097 }
506e931f 2098
60de074b
AM
2099 mutex_unlock(&q->sysfs_lock);
2100
506e931f 2101 queue_for_each_hw_ctx(q, hctx, i) {
484b4061 2102 /*
a68aafa5
JA
2103 * If no software queues are mapped to this hardware queue,
2104 * disable it and free the request entries.
484b4061
JA
2105 */
2106 if (!hctx->nr_ctx) {
d1b1cea1
GKB
2107 /* Never unmap queue 0. We need it as a
2108 * fallback in case of a new remap fails
2109 * allocation
2110 */
cc71a6f4
JA
2111 if (i && set->tags[i])
2112 blk_mq_free_map_and_requests(set, i);
2113
2a34c087 2114 hctx->tags = NULL;
484b4061
JA
2115 continue;
2116 }
2117
2a34c087
ML
2118 hctx->tags = set->tags[i];
2119 WARN_ON(!hctx->tags);
2120
889fa31f
CY
2121 /*
2122 * Set the map size to the number of mapped software queues.
2123 * This is more accurate and more efficient than looping
2124 * over all possibly mapped software queues.
2125 */
88459642 2126 sbitmap_resize(&hctx->ctx_map, hctx->nr_ctx);
889fa31f 2127
484b4061
JA
2128 /*
2129 * Initialize batch roundrobin counts
2130 */
506e931f
JA
2131 hctx->next_cpu = cpumask_first(hctx->cpumask);
2132 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
2133 }
320ae51f
JA
2134}
2135
2404e607 2136static void queue_set_hctx_shared(struct request_queue *q, bool shared)
0d2602ca
JA
2137{
2138 struct blk_mq_hw_ctx *hctx;
0d2602ca
JA
2139 int i;
2140
2404e607
JM
2141 queue_for_each_hw_ctx(q, hctx, i) {
2142 if (shared)
2143 hctx->flags |= BLK_MQ_F_TAG_SHARED;
2144 else
2145 hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
2146 }
2147}
2148
2149static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set, bool shared)
2150{
2151 struct request_queue *q;
0d2602ca
JA
2152
2153 list_for_each_entry(q, &set->tag_list, tag_set_list) {
2154 blk_mq_freeze_queue(q);
2404e607 2155 queue_set_hctx_shared(q, shared);
0d2602ca
JA
2156 blk_mq_unfreeze_queue(q);
2157 }
2158}
2159
2160static void blk_mq_del_queue_tag_set(struct request_queue *q)
2161{
2162 struct blk_mq_tag_set *set = q->tag_set;
2163
0d2602ca
JA
2164 mutex_lock(&set->tag_list_lock);
2165 list_del_init(&q->tag_set_list);
2404e607
JM
2166 if (list_is_singular(&set->tag_list)) {
2167 /* just transitioned to unshared */
2168 set->flags &= ~BLK_MQ_F_TAG_SHARED;
2169 /* update existing queue */
2170 blk_mq_update_tag_set_depth(set, false);
2171 }
0d2602ca 2172 mutex_unlock(&set->tag_list_lock);
0d2602ca
JA
2173}
2174
2175static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
2176 struct request_queue *q)
2177{
2178 q->tag_set = set;
2179
2180 mutex_lock(&set->tag_list_lock);
2404e607
JM
2181
2182 /* Check to see if we're transitioning to shared (from 1 to 2 queues). */
2183 if (!list_empty(&set->tag_list) && !(set->flags & BLK_MQ_F_TAG_SHARED)) {
2184 set->flags |= BLK_MQ_F_TAG_SHARED;
2185 /* update existing queue */
2186 blk_mq_update_tag_set_depth(set, true);
2187 }
2188 if (set->flags & BLK_MQ_F_TAG_SHARED)
2189 queue_set_hctx_shared(q, true);
0d2602ca 2190 list_add_tail(&q->tag_set_list, &set->tag_list);
2404e607 2191
0d2602ca
JA
2192 mutex_unlock(&set->tag_list_lock);
2193}
2194
e09aae7e
ML
2195/*
2196 * It is the actual release handler for mq, but we do it from
2197 * request queue's release handler for avoiding use-after-free
2198 * and headache because q->mq_kobj shouldn't have been introduced,
2199 * but we can't group ctx/kctx kobj without it.
2200 */
2201void blk_mq_release(struct request_queue *q)
2202{
2203 struct blk_mq_hw_ctx *hctx;
2204 unsigned int i;
2205
bd166ef1
JA
2206 blk_mq_sched_teardown(q);
2207
e09aae7e 2208 /* hctx kobj stays in hctx */
c3b4afca
ML
2209 queue_for_each_hw_ctx(q, hctx, i) {
2210 if (!hctx)
2211 continue;
2212 kfree(hctx->ctxs);
e09aae7e 2213 kfree(hctx);
c3b4afca 2214 }
e09aae7e 2215
a723bab3
AM
2216 q->mq_map = NULL;
2217
e09aae7e
ML
2218 kfree(q->queue_hw_ctx);
2219
2220 /* ctx kobj stays in queue_ctx */
2221 free_percpu(q->queue_ctx);
2222}
2223
24d2f903 2224struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
b62c21b7
MS
2225{
2226 struct request_queue *uninit_q, *q;
2227
2228 uninit_q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node);
2229 if (!uninit_q)
2230 return ERR_PTR(-ENOMEM);
2231
2232 q = blk_mq_init_allocated_queue(set, uninit_q);
2233 if (IS_ERR(q))
2234 blk_cleanup_queue(uninit_q);
2235
2236 return q;
2237}
2238EXPORT_SYMBOL(blk_mq_init_queue);
2239
868f2f0b
KB
2240static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
2241 struct request_queue *q)
320ae51f 2242{
868f2f0b
KB
2243 int i, j;
2244 struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx;
f14bbe77 2245
868f2f0b 2246 blk_mq_sysfs_unregister(q);
24d2f903 2247 for (i = 0; i < set->nr_hw_queues; i++) {
868f2f0b 2248 int node;
f14bbe77 2249
868f2f0b
KB
2250 if (hctxs[i])
2251 continue;
2252
2253 node = blk_mq_hw_queue_to_node(q->mq_map, i);
cdef54dd
CH
2254 hctxs[i] = kzalloc_node(sizeof(struct blk_mq_hw_ctx),
2255 GFP_KERNEL, node);
320ae51f 2256 if (!hctxs[i])
868f2f0b 2257 break;
320ae51f 2258
a86073e4 2259 if (!zalloc_cpumask_var_node(&hctxs[i]->cpumask, GFP_KERNEL,
868f2f0b
KB
2260 node)) {
2261 kfree(hctxs[i]);
2262 hctxs[i] = NULL;
2263 break;
2264 }
e4043dcf 2265
0d2602ca 2266 atomic_set(&hctxs[i]->nr_active, 0);
f14bbe77 2267 hctxs[i]->numa_node = node;
320ae51f 2268 hctxs[i]->queue_num = i;
868f2f0b
KB
2269
2270 if (blk_mq_init_hctx(q, set, hctxs[i], i)) {
2271 free_cpumask_var(hctxs[i]->cpumask);
2272 kfree(hctxs[i]);
2273 hctxs[i] = NULL;
2274 break;
2275 }
2276 blk_mq_hctx_kobj_init(hctxs[i]);
320ae51f 2277 }
868f2f0b
KB
2278 for (j = i; j < q->nr_hw_queues; j++) {
2279 struct blk_mq_hw_ctx *hctx = hctxs[j];
2280
2281 if (hctx) {
cc71a6f4
JA
2282 if (hctx->tags)
2283 blk_mq_free_map_and_requests(set, j);
868f2f0b
KB
2284 blk_mq_exit_hctx(q, set, hctx, j);
2285 free_cpumask_var(hctx->cpumask);
2286 kobject_put(&hctx->kobj);
2287 kfree(hctx->ctxs);
2288 kfree(hctx);
2289 hctxs[j] = NULL;
2290
2291 }
2292 }
2293 q->nr_hw_queues = i;
2294 blk_mq_sysfs_register(q);
2295}
2296
2297struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
2298 struct request_queue *q)
2299{
66841672
ML
2300 /* mark the queue as mq asap */
2301 q->mq_ops = set->ops;
2302
868f2f0b
KB
2303 q->queue_ctx = alloc_percpu(struct blk_mq_ctx);
2304 if (!q->queue_ctx)
c7de5726 2305 goto err_exit;
868f2f0b
KB
2306
2307 q->queue_hw_ctx = kzalloc_node(nr_cpu_ids * sizeof(*(q->queue_hw_ctx)),
2308 GFP_KERNEL, set->numa_node);
2309 if (!q->queue_hw_ctx)
2310 goto err_percpu;
2311
bdd17e75 2312 q->mq_map = set->mq_map;
868f2f0b
KB
2313
2314 blk_mq_realloc_hw_ctxs(set, q);
2315 if (!q->nr_hw_queues)
2316 goto err_hctxs;
320ae51f 2317
287922eb 2318 INIT_WORK(&q->timeout_work, blk_mq_timeout_work);
e56f698b 2319 blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
320ae51f
JA
2320
2321 q->nr_queues = nr_cpu_ids;
320ae51f 2322
94eddfbe 2323 q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
320ae51f 2324
05f1dd53
JA
2325 if (!(set->flags & BLK_MQ_F_SG_MERGE))
2326 q->queue_flags |= 1 << QUEUE_FLAG_NO_SG_MERGE;
2327
1be036e9
CH
2328 q->sg_reserved_size = INT_MAX;
2329
2849450a 2330 INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work);
6fca6a61
CH
2331 INIT_LIST_HEAD(&q->requeue_list);
2332 spin_lock_init(&q->requeue_lock);
2333
07068d5b
JA
2334 if (q->nr_hw_queues > 1)
2335 blk_queue_make_request(q, blk_mq_make_request);
2336 else
2337 blk_queue_make_request(q, blk_sq_make_request);
2338
eba71768
JA
2339 /*
2340 * Do this after blk_queue_make_request() overrides it...
2341 */
2342 q->nr_requests = set->queue_depth;
2343
64f1c21e
JA
2344 /*
2345 * Default to classic polling
2346 */
2347 q->poll_nsec = -1;
2348
24d2f903
CH
2349 if (set->ops->complete)
2350 blk_queue_softirq_done(q, set->ops->complete);
30a91cb4 2351
24d2f903 2352 blk_mq_init_cpu_queues(q, set->nr_hw_queues);
320ae51f 2353
5778322e 2354 get_online_cpus();
320ae51f 2355 mutex_lock(&all_q_mutex);
320ae51f 2356
4593fdbe 2357 list_add_tail(&q->all_q_node, &all_q_list);
0d2602ca 2358 blk_mq_add_queue_tag_set(set, q);
5778322e 2359 blk_mq_map_swqueue(q, cpu_online_mask);
484b4061 2360
4593fdbe 2361 mutex_unlock(&all_q_mutex);
5778322e 2362 put_online_cpus();
4593fdbe 2363
d3484991
JA
2364 if (!(set->flags & BLK_MQ_F_NO_SCHED)) {
2365 int ret;
2366
2367 ret = blk_mq_sched_init(q);
2368 if (ret)
2369 return ERR_PTR(ret);
2370 }
2371
320ae51f 2372 return q;
18741986 2373
320ae51f 2374err_hctxs:
868f2f0b 2375 kfree(q->queue_hw_ctx);
320ae51f 2376err_percpu:
868f2f0b 2377 free_percpu(q->queue_ctx);
c7de5726
ML
2378err_exit:
2379 q->mq_ops = NULL;
320ae51f
JA
2380 return ERR_PTR(-ENOMEM);
2381}
b62c21b7 2382EXPORT_SYMBOL(blk_mq_init_allocated_queue);
320ae51f
JA
2383
2384void blk_mq_free_queue(struct request_queue *q)
2385{
624dbe47 2386 struct blk_mq_tag_set *set = q->tag_set;
320ae51f 2387
0e626368
AM
2388 mutex_lock(&all_q_mutex);
2389 list_del_init(&q->all_q_node);
2390 mutex_unlock(&all_q_mutex);
2391
87760e5e
JA
2392 wbt_exit(q);
2393
0d2602ca
JA
2394 blk_mq_del_queue_tag_set(q);
2395
624dbe47
ML
2396 blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
2397 blk_mq_free_hw_queues(q, set);
320ae51f 2398}
320ae51f
JA
2399
2400/* Basically redo blk_mq_init_queue with queue frozen */
5778322e
AM
2401static void blk_mq_queue_reinit(struct request_queue *q,
2402 const struct cpumask *online_mask)
320ae51f 2403{
4ecd4fef 2404 WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth));
320ae51f 2405
67aec14c
JA
2406 blk_mq_sysfs_unregister(q);
2407
320ae51f
JA
2408 /*
2409 * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe
2410 * we should change hctx numa_node according to new topology (this
2411 * involves free and re-allocate memory, worthy doing?)
2412 */
2413
5778322e 2414 blk_mq_map_swqueue(q, online_mask);
320ae51f 2415
67aec14c 2416 blk_mq_sysfs_register(q);
320ae51f
JA
2417}
2418
65d5291e
SAS
2419/*
2420 * New online cpumask which is going to be set in this hotplug event.
2421 * Declare this cpumasks as global as cpu-hotplug operation is invoked
2422 * one-by-one and dynamically allocating this could result in a failure.
2423 */
2424static struct cpumask cpuhp_online_new;
2425
2426static void blk_mq_queue_reinit_work(void)
320ae51f
JA
2427{
2428 struct request_queue *q;
320ae51f
JA
2429
2430 mutex_lock(&all_q_mutex);
f3af020b
TH
2431 /*
2432 * We need to freeze and reinit all existing queues. Freezing
2433 * involves synchronous wait for an RCU grace period and doing it
2434 * one by one may take a long time. Start freezing all queues in
2435 * one swoop and then wait for the completions so that freezing can
2436 * take place in parallel.
2437 */
2438 list_for_each_entry(q, &all_q_list, all_q_node)
2439 blk_mq_freeze_queue_start(q);
415d3dab 2440 list_for_each_entry(q, &all_q_list, all_q_node)
f3af020b
TH
2441 blk_mq_freeze_queue_wait(q);
2442
320ae51f 2443 list_for_each_entry(q, &all_q_list, all_q_node)
65d5291e 2444 blk_mq_queue_reinit(q, &cpuhp_online_new);
f3af020b
TH
2445
2446 list_for_each_entry(q, &all_q_list, all_q_node)
2447 blk_mq_unfreeze_queue(q);
2448
320ae51f 2449 mutex_unlock(&all_q_mutex);
65d5291e
SAS
2450}
2451
2452static int blk_mq_queue_reinit_dead(unsigned int cpu)
2453{
97a32864 2454 cpumask_copy(&cpuhp_online_new, cpu_online_mask);
65d5291e
SAS
2455 blk_mq_queue_reinit_work();
2456 return 0;
2457}
2458
2459/*
2460 * Before hotadded cpu starts handling requests, new mappings must be
2461 * established. Otherwise, these requests in hw queue might never be
2462 * dispatched.
2463 *
2464 * For example, there is a single hw queue (hctx) and two CPU queues (ctx0
2465 * for CPU0, and ctx1 for CPU1).
2466 *
2467 * Now CPU1 is just onlined and a request is inserted into ctx1->rq_list
2468 * and set bit0 in pending bitmap as ctx1->index_hw is still zero.
2469 *
2c3ad667
JA
2470 * And then while running hw queue, blk_mq_flush_busy_ctxs() finds bit0 is set
2471 * in pending bitmap and tries to retrieve requests in hctx->ctxs[0]->rq_list.
2472 * But htx->ctxs[0] is a pointer to ctx0, so the request in ctx1->rq_list is
2473 * ignored.
65d5291e
SAS
2474 */
2475static int blk_mq_queue_reinit_prepare(unsigned int cpu)
2476{
2477 cpumask_copy(&cpuhp_online_new, cpu_online_mask);
2478 cpumask_set_cpu(cpu, &cpuhp_online_new);
2479 blk_mq_queue_reinit_work();
2480 return 0;
320ae51f
JA
2481}
2482
a5164405
JA
2483static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
2484{
2485 int i;
2486
cc71a6f4
JA
2487 for (i = 0; i < set->nr_hw_queues; i++)
2488 if (!__blk_mq_alloc_rq_map(set, i))
a5164405 2489 goto out_unwind;
a5164405
JA
2490
2491 return 0;
2492
2493out_unwind:
2494 while (--i >= 0)
cc71a6f4 2495 blk_mq_free_rq_map(set->tags[i]);
a5164405 2496
a5164405
JA
2497 return -ENOMEM;
2498}
2499
2500/*
2501 * Allocate the request maps associated with this tag_set. Note that this
2502 * may reduce the depth asked for, if memory is tight. set->queue_depth
2503 * will be updated to reflect the allocated depth.
2504 */
2505static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
2506{
2507 unsigned int depth;
2508 int err;
2509
2510 depth = set->queue_depth;
2511 do {
2512 err = __blk_mq_alloc_rq_maps(set);
2513 if (!err)
2514 break;
2515
2516 set->queue_depth >>= 1;
2517 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) {
2518 err = -ENOMEM;
2519 break;
2520 }
2521 } while (set->queue_depth);
2522
2523 if (!set->queue_depth || err) {
2524 pr_err("blk-mq: failed to allocate request map\n");
2525 return -ENOMEM;
2526 }
2527
2528 if (depth != set->queue_depth)
2529 pr_info("blk-mq: reduced tag depth (%u -> %u)\n",
2530 depth, set->queue_depth);
2531
2532 return 0;
2533}
2534
a4391c64
JA
2535/*
2536 * Alloc a tag set to be associated with one or more request queues.
2537 * May fail with EINVAL for various error conditions. May adjust the
2538 * requested depth down, if if it too large. In that case, the set
2539 * value will be stored in set->queue_depth.
2540 */
24d2f903
CH
2541int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
2542{
da695ba2
CH
2543 int ret;
2544
205fb5f5
BVA
2545 BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS);
2546
24d2f903
CH
2547 if (!set->nr_hw_queues)
2548 return -EINVAL;
a4391c64 2549 if (!set->queue_depth)
24d2f903
CH
2550 return -EINVAL;
2551 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
2552 return -EINVAL;
2553
7d7e0f90 2554 if (!set->ops->queue_rq)
24d2f903
CH
2555 return -EINVAL;
2556
a4391c64
JA
2557 if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
2558 pr_info("blk-mq: reduced tag depth to %u\n",
2559 BLK_MQ_MAX_DEPTH);
2560 set->queue_depth = BLK_MQ_MAX_DEPTH;
2561 }
24d2f903 2562
6637fadf
SL
2563 /*
2564 * If a crashdump is active, then we are potentially in a very
2565 * memory constrained environment. Limit us to 1 queue and
2566 * 64 tags to prevent using too much memory.
2567 */
2568 if (is_kdump_kernel()) {
2569 set->nr_hw_queues = 1;
2570 set->queue_depth = min(64U, set->queue_depth);
2571 }
868f2f0b
KB
2572 /*
2573 * There is no use for more h/w queues than cpus.
2574 */
2575 if (set->nr_hw_queues > nr_cpu_ids)
2576 set->nr_hw_queues = nr_cpu_ids;
6637fadf 2577
868f2f0b 2578 set->tags = kzalloc_node(nr_cpu_ids * sizeof(struct blk_mq_tags *),
24d2f903
CH
2579 GFP_KERNEL, set->numa_node);
2580 if (!set->tags)
a5164405 2581 return -ENOMEM;
24d2f903 2582
da695ba2
CH
2583 ret = -ENOMEM;
2584 set->mq_map = kzalloc_node(sizeof(*set->mq_map) * nr_cpu_ids,
2585 GFP_KERNEL, set->numa_node);
bdd17e75
CH
2586 if (!set->mq_map)
2587 goto out_free_tags;
2588
da695ba2
CH
2589 if (set->ops->map_queues)
2590 ret = set->ops->map_queues(set);
2591 else
2592 ret = blk_mq_map_queues(set);
2593 if (ret)
2594 goto out_free_mq_map;
2595
2596 ret = blk_mq_alloc_rq_maps(set);
2597 if (ret)
bdd17e75 2598 goto out_free_mq_map;
24d2f903 2599
0d2602ca
JA
2600 mutex_init(&set->tag_list_lock);
2601 INIT_LIST_HEAD(&set->tag_list);
2602
24d2f903 2603 return 0;
bdd17e75
CH
2604
2605out_free_mq_map:
2606 kfree(set->mq_map);
2607 set->mq_map = NULL;
2608out_free_tags:
5676e7b6
RE
2609 kfree(set->tags);
2610 set->tags = NULL;
da695ba2 2611 return ret;
24d2f903
CH
2612}
2613EXPORT_SYMBOL(blk_mq_alloc_tag_set);
2614
2615void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
2616{
2617 int i;
2618
cc71a6f4
JA
2619 for (i = 0; i < nr_cpu_ids; i++)
2620 blk_mq_free_map_and_requests(set, i);
484b4061 2621
bdd17e75
CH
2622 kfree(set->mq_map);
2623 set->mq_map = NULL;
2624
981bd189 2625 kfree(set->tags);
5676e7b6 2626 set->tags = NULL;
24d2f903
CH
2627}
2628EXPORT_SYMBOL(blk_mq_free_tag_set);
2629
e3a2b3f9
JA
2630int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
2631{
2632 struct blk_mq_tag_set *set = q->tag_set;
2633 struct blk_mq_hw_ctx *hctx;
2634 int i, ret;
2635
bd166ef1 2636 if (!set)
e3a2b3f9
JA
2637 return -EINVAL;
2638
70f36b60
JA
2639 blk_mq_freeze_queue(q);
2640 blk_mq_quiesce_queue(q);
2641
e3a2b3f9
JA
2642 ret = 0;
2643 queue_for_each_hw_ctx(q, hctx, i) {
e9137d4b
KB
2644 if (!hctx->tags)
2645 continue;
bd166ef1
JA
2646 /*
2647 * If we're using an MQ scheduler, just update the scheduler
2648 * queue depth. This is similar to what the old code would do.
2649 */
70f36b60
JA
2650 if (!hctx->sched_tags) {
2651 ret = blk_mq_tag_update_depth(hctx, &hctx->tags,
2652 min(nr, set->queue_depth),
2653 false);
2654 } else {
2655 ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags,
2656 nr, true);
2657 }
e3a2b3f9
JA
2658 if (ret)
2659 break;
2660 }
2661
2662 if (!ret)
2663 q->nr_requests = nr;
2664
70f36b60
JA
2665 blk_mq_unfreeze_queue(q);
2666 blk_mq_start_stopped_hw_queues(q, true);
2667
e3a2b3f9
JA
2668 return ret;
2669}
2670
868f2f0b
KB
2671void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
2672{
2673 struct request_queue *q;
2674
2675 if (nr_hw_queues > nr_cpu_ids)
2676 nr_hw_queues = nr_cpu_ids;
2677 if (nr_hw_queues < 1 || nr_hw_queues == set->nr_hw_queues)
2678 return;
2679
2680 list_for_each_entry(q, &set->tag_list, tag_set_list)
2681 blk_mq_freeze_queue(q);
2682
2683 set->nr_hw_queues = nr_hw_queues;
2684 list_for_each_entry(q, &set->tag_list, tag_set_list) {
2685 blk_mq_realloc_hw_ctxs(set, q);
2686
f6f94300
JB
2687 /*
2688 * Manually set the make_request_fn as blk_queue_make_request
2689 * resets a lot of the queue settings.
2690 */
868f2f0b 2691 if (q->nr_hw_queues > 1)
f6f94300 2692 q->make_request_fn = blk_mq_make_request;
868f2f0b 2693 else
f6f94300 2694 q->make_request_fn = blk_sq_make_request;
868f2f0b
KB
2695
2696 blk_mq_queue_reinit(q, cpu_online_mask);
2697 }
2698
2699 list_for_each_entry(q, &set->tag_list, tag_set_list)
2700 blk_mq_unfreeze_queue(q);
2701}
2702EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
2703
64f1c21e
JA
2704static unsigned long blk_mq_poll_nsecs(struct request_queue *q,
2705 struct blk_mq_hw_ctx *hctx,
2706 struct request *rq)
2707{
2708 struct blk_rq_stat stat[2];
2709 unsigned long ret = 0;
2710
2711 /*
2712 * If stats collection isn't on, don't sleep but turn it on for
2713 * future users
2714 */
2715 if (!blk_stat_enable(q))
2716 return 0;
2717
2718 /*
2719 * We don't have to do this once per IO, should optimize this
2720 * to just use the current window of stats until it changes
2721 */
2722 memset(&stat, 0, sizeof(stat));
2723 blk_hctx_stat_get(hctx, stat);
2724
2725 /*
2726 * As an optimistic guess, use half of the mean service time
2727 * for this type of request. We can (and should) make this smarter.
2728 * For instance, if the completion latencies are tight, we can
2729 * get closer than just half the mean. This is especially
2730 * important on devices where the completion latencies are longer
2731 * than ~10 usec.
2732 */
2733 if (req_op(rq) == REQ_OP_READ && stat[BLK_STAT_READ].nr_samples)
2734 ret = (stat[BLK_STAT_READ].mean + 1) / 2;
2735 else if (req_op(rq) == REQ_OP_WRITE && stat[BLK_STAT_WRITE].nr_samples)
2736 ret = (stat[BLK_STAT_WRITE].mean + 1) / 2;
2737
2738 return ret;
2739}
2740
06426adf 2741static bool blk_mq_poll_hybrid_sleep(struct request_queue *q,
64f1c21e 2742 struct blk_mq_hw_ctx *hctx,
06426adf
JA
2743 struct request *rq)
2744{
2745 struct hrtimer_sleeper hs;
2746 enum hrtimer_mode mode;
64f1c21e 2747 unsigned int nsecs;
06426adf
JA
2748 ktime_t kt;
2749
64f1c21e
JA
2750 if (test_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags))
2751 return false;
2752
2753 /*
2754 * poll_nsec can be:
2755 *
2756 * -1: don't ever hybrid sleep
2757 * 0: use half of prev avg
2758 * >0: use this specific value
2759 */
2760 if (q->poll_nsec == -1)
2761 return false;
2762 else if (q->poll_nsec > 0)
2763 nsecs = q->poll_nsec;
2764 else
2765 nsecs = blk_mq_poll_nsecs(q, hctx, rq);
2766
2767 if (!nsecs)
06426adf
JA
2768 return false;
2769
2770 set_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags);
2771
2772 /*
2773 * This will be replaced with the stats tracking code, using
2774 * 'avg_completion_time / 2' as the pre-sleep target.
2775 */
8b0e1953 2776 kt = nsecs;
06426adf
JA
2777
2778 mode = HRTIMER_MODE_REL;
2779 hrtimer_init_on_stack(&hs.timer, CLOCK_MONOTONIC, mode);
2780 hrtimer_set_expires(&hs.timer, kt);
2781
2782 hrtimer_init_sleeper(&hs, current);
2783 do {
2784 if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags))
2785 break;
2786 set_current_state(TASK_UNINTERRUPTIBLE);
2787 hrtimer_start_expires(&hs.timer, mode);
2788 if (hs.task)
2789 io_schedule();
2790 hrtimer_cancel(&hs.timer);
2791 mode = HRTIMER_MODE_ABS;
2792 } while (hs.task && !signal_pending(current));
2793
2794 __set_current_state(TASK_RUNNING);
2795 destroy_hrtimer_on_stack(&hs.timer);
2796 return true;
2797}
2798
bbd7bb70
JA
2799static bool __blk_mq_poll(struct blk_mq_hw_ctx *hctx, struct request *rq)
2800{
2801 struct request_queue *q = hctx->queue;
2802 long state;
2803
06426adf
JA
2804 /*
2805 * If we sleep, have the caller restart the poll loop to reset
2806 * the state. Like for the other success return cases, the
2807 * caller is responsible for checking if the IO completed. If
2808 * the IO isn't complete, we'll get called again and will go
2809 * straight to the busy poll loop.
2810 */
64f1c21e 2811 if (blk_mq_poll_hybrid_sleep(q, hctx, rq))
06426adf
JA
2812 return true;
2813
bbd7bb70
JA
2814 hctx->poll_considered++;
2815
2816 state = current->state;
2817 while (!need_resched()) {
2818 int ret;
2819
2820 hctx->poll_invoked++;
2821
2822 ret = q->mq_ops->poll(hctx, rq->tag);
2823 if (ret > 0) {
2824 hctx->poll_success++;
2825 set_current_state(TASK_RUNNING);
2826 return true;
2827 }
2828
2829 if (signal_pending_state(state, current))
2830 set_current_state(TASK_RUNNING);
2831
2832 if (current->state == TASK_RUNNING)
2833 return true;
2834 if (ret < 0)
2835 break;
2836 cpu_relax();
2837 }
2838
2839 return false;
2840}
2841
2842bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie)
2843{
2844 struct blk_mq_hw_ctx *hctx;
2845 struct blk_plug *plug;
2846 struct request *rq;
2847
2848 if (!q->mq_ops || !q->mq_ops->poll || !blk_qc_t_valid(cookie) ||
2849 !test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
2850 return false;
2851
2852 plug = current->plug;
2853 if (plug)
2854 blk_flush_plug_list(plug, false);
2855
2856 hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)];
bd166ef1
JA
2857 if (!blk_qc_t_is_internal(cookie))
2858 rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie));
2859 else
2860 rq = blk_mq_tag_to_rq(hctx->sched_tags, blk_qc_t_to_tag(cookie));
bbd7bb70
JA
2861
2862 return __blk_mq_poll(hctx, rq);
2863}
2864EXPORT_SYMBOL_GPL(blk_mq_poll);
2865
676141e4
JA
2866void blk_mq_disable_hotplug(void)
2867{
2868 mutex_lock(&all_q_mutex);
2869}
2870
2871void blk_mq_enable_hotplug(void)
2872{
2873 mutex_unlock(&all_q_mutex);
2874}
2875
320ae51f
JA
2876static int __init blk_mq_init(void)
2877{
9467f859
TG
2878 cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL,
2879 blk_mq_hctx_notify_dead);
320ae51f 2880
65d5291e
SAS
2881 cpuhp_setup_state_nocalls(CPUHP_BLK_MQ_PREPARE, "block/mq:prepare",
2882 blk_mq_queue_reinit_prepare,
2883 blk_mq_queue_reinit_dead);
320ae51f
JA
2884 return 0;
2885}
2886subsys_initcall(blk_mq_init);