]> git.ipfire.org Git - people/arne_f/kernel.git/blame - block/blk-mq.c
blk-mq: Add helper to abort requeued requests
[people/arne_f/kernel.git] / block / blk-mq.c
CommitLineData
75bb4625
JA
1/*
2 * Block multiqueue core code
3 *
4 * Copyright (C) 2013-2014 Jens Axboe
5 * Copyright (C) 2013-2014 Christoph Hellwig
6 */
320ae51f
JA
7#include <linux/kernel.h>
8#include <linux/module.h>
9#include <linux/backing-dev.h>
10#include <linux/bio.h>
11#include <linux/blkdev.h>
12#include <linux/mm.h>
13#include <linux/init.h>
14#include <linux/slab.h>
15#include <linux/workqueue.h>
16#include <linux/smp.h>
17#include <linux/llist.h>
18#include <linux/list_sort.h>
19#include <linux/cpu.h>
20#include <linux/cache.h>
21#include <linux/sched/sysctl.h>
22#include <linux/delay.h>
aedcd72f 23#include <linux/crash_dump.h>
320ae51f
JA
24
25#include <trace/events/block.h>
26
27#include <linux/blk-mq.h>
28#include "blk.h"
29#include "blk-mq.h"
30#include "blk-mq-tag.h"
31
32static DEFINE_MUTEX(all_q_mutex);
33static LIST_HEAD(all_q_list);
34
35static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx);
36
320ae51f
JA
37/*
38 * Check if any of the ctx's have pending work in this hardware queue
39 */
40static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
41{
42 unsigned int i;
43
1429d7c9
JA
44 for (i = 0; i < hctx->ctx_map.map_size; i++)
45 if (hctx->ctx_map.map[i].word)
320ae51f
JA
46 return true;
47
48 return false;
49}
50
1429d7c9
JA
51static inline struct blk_align_bitmap *get_bm(struct blk_mq_hw_ctx *hctx,
52 struct blk_mq_ctx *ctx)
53{
54 return &hctx->ctx_map.map[ctx->index_hw / hctx->ctx_map.bits_per_word];
55}
56
57#define CTX_TO_BIT(hctx, ctx) \
58 ((ctx)->index_hw & ((hctx)->ctx_map.bits_per_word - 1))
59
320ae51f
JA
60/*
61 * Mark this ctx as having pending work in this hardware queue
62 */
63static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
64 struct blk_mq_ctx *ctx)
65{
1429d7c9
JA
66 struct blk_align_bitmap *bm = get_bm(hctx, ctx);
67
68 if (!test_bit(CTX_TO_BIT(hctx, ctx), &bm->word))
69 set_bit(CTX_TO_BIT(hctx, ctx), &bm->word);
70}
71
72static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
73 struct blk_mq_ctx *ctx)
74{
75 struct blk_align_bitmap *bm = get_bm(hctx, ctx);
76
77 clear_bit(CTX_TO_BIT(hctx, ctx), &bm->word);
320ae51f
JA
78}
79
320ae51f
JA
80static int blk_mq_queue_enter(struct request_queue *q)
81{
add703fd
TH
82 while (true) {
83 int ret;
320ae51f 84
add703fd
TH
85 if (percpu_ref_tryget_live(&q->mq_usage_counter))
86 return 0;
320ae51f 87
add703fd
TH
88 ret = wait_event_interruptible(q->mq_freeze_wq,
89 !q->mq_freeze_depth || blk_queue_dying(q));
90 if (blk_queue_dying(q))
91 return -ENODEV;
92 if (ret)
93 return ret;
94 }
320ae51f
JA
95}
96
97static void blk_mq_queue_exit(struct request_queue *q)
98{
add703fd
TH
99 percpu_ref_put(&q->mq_usage_counter);
100}
101
102static void blk_mq_usage_counter_release(struct percpu_ref *ref)
103{
104 struct request_queue *q =
105 container_of(ref, struct request_queue, mq_usage_counter);
106
107 wake_up_all(&q->mq_freeze_wq);
320ae51f
JA
108}
109
b4c6a028 110void blk_mq_freeze_queue_start(struct request_queue *q)
43a5e4e2 111{
cddd5d17
TH
112 bool freeze;
113
72d6f02a 114 spin_lock_irq(q->queue_lock);
cddd5d17 115 freeze = !q->mq_freeze_depth++;
72d6f02a
TH
116 spin_unlock_irq(q->queue_lock);
117
cddd5d17 118 if (freeze) {
9eca8046 119 percpu_ref_kill(&q->mq_usage_counter);
cddd5d17
TH
120 blk_mq_run_queues(q, false);
121 }
f3af020b 122}
b4c6a028 123EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_start);
f3af020b
TH
124
125static void blk_mq_freeze_queue_wait(struct request_queue *q)
126{
add703fd 127 wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter));
43a5e4e2
ML
128}
129
f3af020b
TH
130/*
131 * Guarantee no request is in use, so we can change any data structure of
132 * the queue afterward.
133 */
134void blk_mq_freeze_queue(struct request_queue *q)
135{
136 blk_mq_freeze_queue_start(q);
137 blk_mq_freeze_queue_wait(q);
138}
139
b4c6a028 140void blk_mq_unfreeze_queue(struct request_queue *q)
320ae51f 141{
cddd5d17 142 bool wake;
320ae51f
JA
143
144 spin_lock_irq(q->queue_lock);
780db207
TH
145 wake = !--q->mq_freeze_depth;
146 WARN_ON_ONCE(q->mq_freeze_depth < 0);
320ae51f 147 spin_unlock_irq(q->queue_lock);
add703fd
TH
148 if (wake) {
149 percpu_ref_reinit(&q->mq_usage_counter);
320ae51f 150 wake_up_all(&q->mq_freeze_wq);
add703fd 151 }
320ae51f 152}
b4c6a028 153EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
320ae51f 154
aed3ea94
JA
155void blk_mq_wake_waiters(struct request_queue *q)
156{
157 struct blk_mq_hw_ctx *hctx;
158 unsigned int i;
159
160 queue_for_each_hw_ctx(q, hctx, i)
161 if (blk_mq_hw_queue_mapped(hctx))
162 blk_mq_tag_wakeup_all(hctx->tags, true);
3fd5940c
KB
163
164 /*
165 * If we are called because the queue has now been marked as
166 * dying, we need to ensure that processes currently waiting on
167 * the queue are notified as well.
168 */
169 wake_up_all(&q->mq_freeze_wq);
aed3ea94
JA
170}
171
320ae51f
JA
172bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
173{
174 return blk_mq_has_free_tags(hctx->tags);
175}
176EXPORT_SYMBOL(blk_mq_can_queue);
177
94eddfbe
JA
178static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
179 struct request *rq, unsigned int rw_flags)
320ae51f 180{
94eddfbe
JA
181 if (blk_queue_io_stat(q))
182 rw_flags |= REQ_IO_STAT;
183
af76e555
CH
184 INIT_LIST_HEAD(&rq->queuelist);
185 /* csd/requeue_work/fifo_time is initialized before use */
186 rq->q = q;
320ae51f 187 rq->mq_ctx = ctx;
0d2602ca 188 rq->cmd_flags |= rw_flags;
af76e555
CH
189 /* do not touch atomic flags, it needs atomic ops against the timer */
190 rq->cpu = -1;
af76e555
CH
191 INIT_HLIST_NODE(&rq->hash);
192 RB_CLEAR_NODE(&rq->rb_node);
af76e555
CH
193 rq->rq_disk = NULL;
194 rq->part = NULL;
3ee32372 195 rq->start_time = jiffies;
af76e555
CH
196#ifdef CONFIG_BLK_CGROUP
197 rq->rl = NULL;
0fec08b4 198 set_start_time_ns(rq);
af76e555
CH
199 rq->io_start_time_ns = 0;
200#endif
201 rq->nr_phys_segments = 0;
202#if defined(CONFIG_BLK_DEV_INTEGRITY)
203 rq->nr_integrity_segments = 0;
204#endif
af76e555
CH
205 rq->special = NULL;
206 /* tag was already set */
207 rq->errors = 0;
af76e555 208
6f4a1626
TB
209 rq->cmd = rq->__cmd;
210
af76e555
CH
211 rq->extra_len = 0;
212 rq->sense_len = 0;
213 rq->resid_len = 0;
214 rq->sense = NULL;
215
af76e555 216 INIT_LIST_HEAD(&rq->timeout_list);
f6be4fb4
JA
217 rq->timeout = 0;
218
af76e555
CH
219 rq->end_io = NULL;
220 rq->end_io_data = NULL;
221 rq->next_rq = NULL;
222
320ae51f
JA
223 ctx->rq_dispatched[rw_is_sync(rw_flags)]++;
224}
225
5dee8577 226static struct request *
cb96a42c 227__blk_mq_alloc_request(struct blk_mq_alloc_data *data, int rw)
5dee8577
CH
228{
229 struct request *rq;
230 unsigned int tag;
231
cb96a42c 232 tag = blk_mq_get_tag(data);
5dee8577 233 if (tag != BLK_MQ_TAG_FAIL) {
cb96a42c 234 rq = data->hctx->tags->rqs[tag];
5dee8577 235
cb96a42c 236 if (blk_mq_tag_busy(data->hctx)) {
5dee8577 237 rq->cmd_flags = REQ_MQ_INFLIGHT;
cb96a42c 238 atomic_inc(&data->hctx->nr_active);
5dee8577
CH
239 }
240
241 rq->tag = tag;
cb96a42c 242 blk_mq_rq_ctx_init(data->q, data->ctx, rq, rw);
5dee8577
CH
243 return rq;
244 }
245
246 return NULL;
247}
248
4ce01dd1
CH
249struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp,
250 bool reserved)
320ae51f 251{
d852564f
CH
252 struct blk_mq_ctx *ctx;
253 struct blk_mq_hw_ctx *hctx;
320ae51f 254 struct request *rq;
cb96a42c 255 struct blk_mq_alloc_data alloc_data;
a492f075 256 int ret;
320ae51f 257
a492f075
JL
258 ret = blk_mq_queue_enter(q);
259 if (ret)
260 return ERR_PTR(ret);
320ae51f 261
d852564f
CH
262 ctx = blk_mq_get_ctx(q);
263 hctx = q->mq_ops->map_queue(q, ctx->cpu);
cb96a42c
ML
264 blk_mq_set_alloc_data(&alloc_data, q, gfp & ~__GFP_WAIT,
265 reserved, ctx, hctx);
d852564f 266
cb96a42c 267 rq = __blk_mq_alloc_request(&alloc_data, rw);
d852564f
CH
268 if (!rq && (gfp & __GFP_WAIT)) {
269 __blk_mq_run_hw_queue(hctx);
270 blk_mq_put_ctx(ctx);
271
272 ctx = blk_mq_get_ctx(q);
273 hctx = q->mq_ops->map_queue(q, ctx->cpu);
cb96a42c
ML
274 blk_mq_set_alloc_data(&alloc_data, q, gfp, reserved, ctx,
275 hctx);
276 rq = __blk_mq_alloc_request(&alloc_data, rw);
277 ctx = alloc_data.ctx;
d852564f
CH
278 }
279 blk_mq_put_ctx(ctx);
c76541a9
KB
280 if (!rq) {
281 blk_mq_queue_exit(q);
a492f075 282 return ERR_PTR(-EWOULDBLOCK);
c76541a9 283 }
320ae51f
JA
284 return rq;
285}
4bb659b1 286EXPORT_SYMBOL(blk_mq_alloc_request);
320ae51f 287
320ae51f
JA
288static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
289 struct blk_mq_ctx *ctx, struct request *rq)
290{
291 const int tag = rq->tag;
292 struct request_queue *q = rq->q;
293
0d2602ca
JA
294 if (rq->cmd_flags & REQ_MQ_INFLIGHT)
295 atomic_dec(&hctx->nr_active);
683d0e12 296 rq->cmd_flags = 0;
0d2602ca 297
af76e555 298 clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
0d2602ca 299 blk_mq_put_tag(hctx, tag, &ctx->last_tag);
320ae51f
JA
300 blk_mq_queue_exit(q);
301}
302
7c7f2f2b 303void blk_mq_free_hctx_request(struct blk_mq_hw_ctx *hctx, struct request *rq)
320ae51f
JA
304{
305 struct blk_mq_ctx *ctx = rq->mq_ctx;
320ae51f
JA
306
307 ctx->rq_completed[rq_is_sync(rq)]++;
320ae51f 308 __blk_mq_free_request(hctx, ctx, rq);
7c7f2f2b
JA
309
310}
311EXPORT_SYMBOL_GPL(blk_mq_free_hctx_request);
312
313void blk_mq_free_request(struct request *rq)
314{
315 struct blk_mq_hw_ctx *hctx;
316 struct request_queue *q = rq->q;
317
318 hctx = q->mq_ops->map_queue(q, rq->mq_ctx->cpu);
319 blk_mq_free_hctx_request(hctx, rq);
320ae51f 320}
1a3b595a 321EXPORT_SYMBOL_GPL(blk_mq_free_request);
320ae51f 322
c8a446ad 323inline void __blk_mq_end_request(struct request *rq, int error)
320ae51f 324{
0d11e6ac
ML
325 blk_account_io_done(rq);
326
91b63639 327 if (rq->end_io) {
320ae51f 328 rq->end_io(rq, error);
91b63639
CH
329 } else {
330 if (unlikely(blk_bidi_rq(rq)))
331 blk_mq_free_request(rq->next_rq);
320ae51f 332 blk_mq_free_request(rq);
91b63639 333 }
320ae51f 334}
c8a446ad 335EXPORT_SYMBOL(__blk_mq_end_request);
63151a44 336
c8a446ad 337void blk_mq_end_request(struct request *rq, int error)
63151a44
CH
338{
339 if (blk_update_request(rq, error, blk_rq_bytes(rq)))
340 BUG();
c8a446ad 341 __blk_mq_end_request(rq, error);
63151a44 342}
c8a446ad 343EXPORT_SYMBOL(blk_mq_end_request);
320ae51f 344
30a91cb4 345static void __blk_mq_complete_request_remote(void *data)
320ae51f 346{
3d6efbf6 347 struct request *rq = data;
320ae51f 348
30a91cb4 349 rq->q->softirq_done_fn(rq);
320ae51f 350}
320ae51f 351
ed851860 352static void blk_mq_ipi_complete_request(struct request *rq)
320ae51f
JA
353{
354 struct blk_mq_ctx *ctx = rq->mq_ctx;
38535201 355 bool shared = false;
320ae51f
JA
356 int cpu;
357
38535201 358 if (!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) {
30a91cb4
CH
359 rq->q->softirq_done_fn(rq);
360 return;
361 }
320ae51f
JA
362
363 cpu = get_cpu();
38535201
CH
364 if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags))
365 shared = cpus_share_cache(cpu, ctx->cpu);
366
367 if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
30a91cb4 368 rq->csd.func = __blk_mq_complete_request_remote;
3d6efbf6
CH
369 rq->csd.info = rq;
370 rq->csd.flags = 0;
c46fff2a 371 smp_call_function_single_async(ctx->cpu, &rq->csd);
3d6efbf6 372 } else {
30a91cb4 373 rq->q->softirq_done_fn(rq);
3d6efbf6 374 }
320ae51f
JA
375 put_cpu();
376}
30a91cb4 377
ed851860
JA
378void __blk_mq_complete_request(struct request *rq)
379{
380 struct request_queue *q = rq->q;
381
382 if (!q->softirq_done_fn)
c8a446ad 383 blk_mq_end_request(rq, rq->errors);
ed851860
JA
384 else
385 blk_mq_ipi_complete_request(rq);
386}
387
30a91cb4
CH
388/**
389 * blk_mq_complete_request - end I/O on a request
390 * @rq: the request being processed
391 *
392 * Description:
393 * Ends all I/O on a request. It does not handle partial completions.
394 * The actual completion happens out-of-order, through a IPI handler.
395 **/
396void blk_mq_complete_request(struct request *rq)
397{
95f09684
JA
398 struct request_queue *q = rq->q;
399
400 if (unlikely(blk_should_fake_timeout(q)))
30a91cb4 401 return;
ed851860
JA
402 if (!blk_mark_rq_complete(rq))
403 __blk_mq_complete_request(rq);
30a91cb4
CH
404}
405EXPORT_SYMBOL(blk_mq_complete_request);
320ae51f 406
973c0191
KB
407int blk_mq_request_started(struct request *rq)
408{
409 return test_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
410}
411EXPORT_SYMBOL_GPL(blk_mq_request_started);
412
e2490073 413void blk_mq_start_request(struct request *rq)
320ae51f
JA
414{
415 struct request_queue *q = rq->q;
416
417 trace_block_rq_issue(q, rq);
418
742ee69b 419 rq->resid_len = blk_rq_bytes(rq);
91b63639
CH
420 if (unlikely(blk_bidi_rq(rq)))
421 rq->next_rq->resid_len = blk_rq_bytes(rq->next_rq);
742ee69b 422
2b8393b4 423 blk_add_timer(rq);
87ee7b11 424
538b7534
JA
425 /*
426 * Ensure that ->deadline is visible before set the started
427 * flag and clear the completed flag.
428 */
429 smp_mb__before_atomic();
430
87ee7b11
JA
431 /*
432 * Mark us as started and clear complete. Complete might have been
433 * set if requeue raced with timeout, which then marked it as
434 * complete. So be sure to clear complete again when we start
435 * the request, otherwise we'll ignore the completion event.
436 */
4b570521
JA
437 if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
438 set_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
439 if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags))
440 clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
49f5baa5
CH
441
442 if (q->dma_drain_size && blk_rq_bytes(rq)) {
443 /*
444 * Make sure space for the drain appears. We know we can do
445 * this because max_hw_segments has been adjusted to be one
446 * fewer than the device can handle.
447 */
448 rq->nr_phys_segments++;
449 }
320ae51f 450}
e2490073 451EXPORT_SYMBOL(blk_mq_start_request);
320ae51f 452
ed0791b2 453static void __blk_mq_requeue_request(struct request *rq)
320ae51f
JA
454{
455 struct request_queue *q = rq->q;
456
457 trace_block_rq_requeue(q, rq);
49f5baa5 458
e2490073
CH
459 if (test_and_clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
460 if (q->dma_drain_size && blk_rq_bytes(rq))
461 rq->nr_phys_segments--;
462 }
320ae51f
JA
463}
464
ed0791b2
CH
465void blk_mq_requeue_request(struct request *rq)
466{
ed0791b2 467 __blk_mq_requeue_request(rq);
ed0791b2 468
ed0791b2 469 BUG_ON(blk_queued_rq(rq));
6fca6a61 470 blk_mq_add_to_requeue_list(rq, true);
ed0791b2
CH
471}
472EXPORT_SYMBOL(blk_mq_requeue_request);
473
6fca6a61
CH
474static void blk_mq_requeue_work(struct work_struct *work)
475{
476 struct request_queue *q =
477 container_of(work, struct request_queue, requeue_work);
478 LIST_HEAD(rq_list);
479 struct request *rq, *next;
480 unsigned long flags;
481
482 spin_lock_irqsave(&q->requeue_lock, flags);
483 list_splice_init(&q->requeue_list, &rq_list);
484 spin_unlock_irqrestore(&q->requeue_lock, flags);
485
486 list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
487 if (!(rq->cmd_flags & REQ_SOFTBARRIER))
488 continue;
489
490 rq->cmd_flags &= ~REQ_SOFTBARRIER;
491 list_del_init(&rq->queuelist);
492 blk_mq_insert_request(rq, true, false, false);
493 }
494
495 while (!list_empty(&rq_list)) {
496 rq = list_entry(rq_list.next, struct request, queuelist);
497 list_del_init(&rq->queuelist);
498 blk_mq_insert_request(rq, false, false, false);
499 }
500
8b957415
JA
501 /*
502 * Use the start variant of queue running here, so that running
503 * the requeue work will kick stopped queues.
504 */
505 blk_mq_start_hw_queues(q);
6fca6a61
CH
506}
507
508void blk_mq_add_to_requeue_list(struct request *rq, bool at_head)
509{
510 struct request_queue *q = rq->q;
511 unsigned long flags;
512
513 /*
514 * We abuse this flag that is otherwise used by the I/O scheduler to
515 * request head insertation from the workqueue.
516 */
517 BUG_ON(rq->cmd_flags & REQ_SOFTBARRIER);
518
519 spin_lock_irqsave(&q->requeue_lock, flags);
520 if (at_head) {
521 rq->cmd_flags |= REQ_SOFTBARRIER;
522 list_add(&rq->queuelist, &q->requeue_list);
523 } else {
524 list_add_tail(&rq->queuelist, &q->requeue_list);
525 }
526 spin_unlock_irqrestore(&q->requeue_lock, flags);
527}
528EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
529
c68ed59f
KB
530void blk_mq_cancel_requeue_work(struct request_queue *q)
531{
532 cancel_work_sync(&q->requeue_work);
533}
534EXPORT_SYMBOL_GPL(blk_mq_cancel_requeue_work);
535
6fca6a61
CH
536void blk_mq_kick_requeue_list(struct request_queue *q)
537{
538 kblockd_schedule_work(&q->requeue_work);
539}
540EXPORT_SYMBOL(blk_mq_kick_requeue_list);
541
1885b24d
JA
542void blk_mq_abort_requeue_list(struct request_queue *q)
543{
544 unsigned long flags;
545 LIST_HEAD(rq_list);
546
547 spin_lock_irqsave(&q->requeue_lock, flags);
548 list_splice_init(&q->requeue_list, &rq_list);
549 spin_unlock_irqrestore(&q->requeue_lock, flags);
550
551 while (!list_empty(&rq_list)) {
552 struct request *rq;
553
554 rq = list_first_entry(&rq_list, struct request, queuelist);
555 list_del_init(&rq->queuelist);
556 rq->errors = -EIO;
557 blk_mq_end_request(rq, rq->errors);
558 }
559}
560EXPORT_SYMBOL(blk_mq_abort_requeue_list);
561
7c94e1c1
ML
562static inline bool is_flush_request(struct request *rq,
563 struct blk_flush_queue *fq, unsigned int tag)
24d2f903 564{
0e62f51f 565 return ((rq->cmd_flags & REQ_FLUSH_SEQ) &&
7c94e1c1 566 fq->flush_rq->tag == tag);
0e62f51f
JA
567}
568
569struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
570{
571 struct request *rq = tags->rqs[tag];
e97c293c
ML
572 /* mq_ctx of flush rq is always cloned from the corresponding req */
573 struct blk_flush_queue *fq = blk_get_flush_queue(rq->q, rq->mq_ctx);
22302375 574
7c94e1c1 575 if (!is_flush_request(rq, fq, tag))
0e62f51f 576 return rq;
22302375 577
7c94e1c1 578 return fq->flush_rq;
24d2f903
CH
579}
580EXPORT_SYMBOL(blk_mq_tag_to_rq);
581
320ae51f 582struct blk_mq_timeout_data {
46f92d42
CH
583 unsigned long next;
584 unsigned int next_set;
320ae51f
JA
585};
586
90415837 587void blk_mq_rq_timed_out(struct request *req, bool reserved)
320ae51f 588{
46f92d42
CH
589 struct blk_mq_ops *ops = req->q->mq_ops;
590 enum blk_eh_timer_return ret = BLK_EH_RESET_TIMER;
87ee7b11
JA
591
592 /*
593 * We know that complete is set at this point. If STARTED isn't set
594 * anymore, then the request isn't active and the "timeout" should
595 * just be ignored. This can happen due to the bitflag ordering.
596 * Timeout first checks if STARTED is set, and if it is, assumes
597 * the request is active. But if we race with completion, then
598 * we both flags will get cleared. So check here again, and ignore
599 * a timeout event with a request that isn't active.
600 */
46f92d42
CH
601 if (!test_bit(REQ_ATOM_STARTED, &req->atomic_flags))
602 return;
87ee7b11 603
46f92d42 604 if (ops->timeout)
0152fb6b 605 ret = ops->timeout(req, reserved);
46f92d42
CH
606
607 switch (ret) {
608 case BLK_EH_HANDLED:
609 __blk_mq_complete_request(req);
610 break;
611 case BLK_EH_RESET_TIMER:
612 blk_add_timer(req);
613 blk_clear_rq_complete(req);
614 break;
615 case BLK_EH_NOT_HANDLED:
616 break;
617 default:
618 printk(KERN_ERR "block: bad eh return: %d\n", ret);
619 break;
620 }
87ee7b11 621}
81481eb4 622
81481eb4
CH
623static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
624 struct request *rq, void *priv, bool reserved)
625{
626 struct blk_mq_timeout_data *data = priv;
87ee7b11 627
46f92d42
CH
628 if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
629 return;
87ee7b11 630
46f92d42
CH
631 if (time_after_eq(jiffies, rq->deadline)) {
632 if (!blk_mark_rq_complete(rq))
0152fb6b 633 blk_mq_rq_timed_out(rq, reserved);
46f92d42
CH
634 } else if (!data->next_set || time_after(data->next, rq->deadline)) {
635 data->next = rq->deadline;
636 data->next_set = 1;
637 }
87ee7b11
JA
638}
639
81481eb4 640static void blk_mq_rq_timer(unsigned long priv)
320ae51f 641{
81481eb4
CH
642 struct request_queue *q = (struct request_queue *)priv;
643 struct blk_mq_timeout_data data = {
644 .next = 0,
645 .next_set = 0,
646 };
320ae51f 647 struct blk_mq_hw_ctx *hctx;
81481eb4 648 int i;
320ae51f 649
484b4061
JA
650 queue_for_each_hw_ctx(q, hctx, i) {
651 /*
652 * If not software queues are currently mapped to this
653 * hardware queue, there's nothing to check
654 */
19c66e59 655 if (!blk_mq_hw_queue_mapped(hctx))
484b4061
JA
656 continue;
657
81481eb4 658 blk_mq_tag_busy_iter(hctx, blk_mq_check_expired, &data);
484b4061 659 }
320ae51f 660
81481eb4
CH
661 if (data.next_set) {
662 data.next = blk_rq_timeout(round_jiffies_up(data.next));
663 mod_timer(&q->timeout, data.next);
0d2602ca
JA
664 } else {
665 queue_for_each_hw_ctx(q, hctx, i)
666 blk_mq_tag_idle(hctx);
667 }
320ae51f
JA
668}
669
670/*
671 * Reverse check our software queue for entries that we could potentially
672 * merge with. Currently includes a hand-wavy stop count of 8, to not spend
673 * too much time checking for merges.
674 */
675static bool blk_mq_attempt_merge(struct request_queue *q,
676 struct blk_mq_ctx *ctx, struct bio *bio)
677{
678 struct request *rq;
679 int checked = 8;
680
681 list_for_each_entry_reverse(rq, &ctx->rq_list, queuelist) {
682 int el_ret;
683
684 if (!checked--)
685 break;
686
687 if (!blk_rq_merge_ok(rq, bio))
688 continue;
689
690 el_ret = blk_try_merge(rq, bio);
691 if (el_ret == ELEVATOR_BACK_MERGE) {
692 if (bio_attempt_back_merge(q, rq, bio)) {
693 ctx->rq_merged++;
694 return true;
695 }
696 break;
697 } else if (el_ret == ELEVATOR_FRONT_MERGE) {
698 if (bio_attempt_front_merge(q, rq, bio)) {
699 ctx->rq_merged++;
700 return true;
701 }
702 break;
703 }
704 }
705
706 return false;
707}
708
1429d7c9
JA
709/*
710 * Process software queues that have been marked busy, splicing them
711 * to the for-dispatch
712 */
713static void flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
714{
715 struct blk_mq_ctx *ctx;
716 int i;
717
718 for (i = 0; i < hctx->ctx_map.map_size; i++) {
719 struct blk_align_bitmap *bm = &hctx->ctx_map.map[i];
720 unsigned int off, bit;
721
722 if (!bm->word)
723 continue;
724
725 bit = 0;
726 off = i * hctx->ctx_map.bits_per_word;
727 do {
728 bit = find_next_bit(&bm->word, bm->depth, bit);
729 if (bit >= bm->depth)
730 break;
731
732 ctx = hctx->ctxs[bit + off];
733 clear_bit(bit, &bm->word);
734 spin_lock(&ctx->lock);
735 list_splice_tail_init(&ctx->rq_list, list);
736 spin_unlock(&ctx->lock);
737
738 bit++;
739 } while (1);
740 }
741}
742
320ae51f
JA
743/*
744 * Run this hardware queue, pulling any software queues mapped to it in.
745 * Note that this function currently has various problems around ordering
746 * of IO. In particular, we'd like FIFO behaviour on handling existing
747 * items on the hctx->dispatch list. Ignore that for now.
748 */
749static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
750{
751 struct request_queue *q = hctx->queue;
320ae51f
JA
752 struct request *rq;
753 LIST_HEAD(rq_list);
74c45052
JA
754 LIST_HEAD(driver_list);
755 struct list_head *dptr;
1429d7c9 756 int queued;
320ae51f 757
fd1270d5 758 WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask));
e4043dcf 759
5d12f905 760 if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
320ae51f
JA
761 return;
762
763 hctx->run++;
764
765 /*
766 * Touch any software queue that has pending entries.
767 */
1429d7c9 768 flush_busy_ctxs(hctx, &rq_list);
320ae51f
JA
769
770 /*
771 * If we have previous entries on our dispatch list, grab them
772 * and stuff them at the front for more fair dispatch.
773 */
774 if (!list_empty_careful(&hctx->dispatch)) {
775 spin_lock(&hctx->lock);
776 if (!list_empty(&hctx->dispatch))
777 list_splice_init(&hctx->dispatch, &rq_list);
778 spin_unlock(&hctx->lock);
779 }
780
74c45052
JA
781 /*
782 * Start off with dptr being NULL, so we start the first request
783 * immediately, even if we have more pending.
784 */
785 dptr = NULL;
786
320ae51f
JA
787 /*
788 * Now process all the entries, sending them to the driver.
789 */
1429d7c9 790 queued = 0;
320ae51f 791 while (!list_empty(&rq_list)) {
74c45052 792 struct blk_mq_queue_data bd;
320ae51f
JA
793 int ret;
794
795 rq = list_first_entry(&rq_list, struct request, queuelist);
796 list_del_init(&rq->queuelist);
320ae51f 797
74c45052
JA
798 bd.rq = rq;
799 bd.list = dptr;
800 bd.last = list_empty(&rq_list);
801
802 ret = q->mq_ops->queue_rq(hctx, &bd);
320ae51f
JA
803 switch (ret) {
804 case BLK_MQ_RQ_QUEUE_OK:
805 queued++;
806 continue;
807 case BLK_MQ_RQ_QUEUE_BUSY:
320ae51f 808 list_add(&rq->queuelist, &rq_list);
ed0791b2 809 __blk_mq_requeue_request(rq);
320ae51f
JA
810 break;
811 default:
812 pr_err("blk-mq: bad return on queue: %d\n", ret);
320ae51f 813 case BLK_MQ_RQ_QUEUE_ERROR:
1e93b8c2 814 rq->errors = -EIO;
c8a446ad 815 blk_mq_end_request(rq, rq->errors);
320ae51f
JA
816 break;
817 }
818
819 if (ret == BLK_MQ_RQ_QUEUE_BUSY)
820 break;
74c45052
JA
821
822 /*
823 * We've done the first request. If we have more than 1
824 * left in the list, set dptr to defer issue.
825 */
826 if (!dptr && rq_list.next != rq_list.prev)
827 dptr = &driver_list;
320ae51f
JA
828 }
829
830 if (!queued)
831 hctx->dispatched[0]++;
832 else if (queued < (1 << (BLK_MQ_MAX_DISPATCH_ORDER - 1)))
833 hctx->dispatched[ilog2(queued) + 1]++;
834
835 /*
836 * Any items that need requeuing? Stuff them into hctx->dispatch,
837 * that is where we will continue on next queue run.
838 */
839 if (!list_empty(&rq_list)) {
840 spin_lock(&hctx->lock);
841 list_splice(&rq_list, &hctx->dispatch);
842 spin_unlock(&hctx->lock);
843 }
844}
845
506e931f
JA
846/*
847 * It'd be great if the workqueue API had a way to pass
848 * in a mask and had some smarts for more clever placement.
849 * For now we just round-robin here, switching for every
850 * BLK_MQ_CPU_WORK_BATCH queued items.
851 */
852static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
853{
b657d7e6
CH
854 if (hctx->queue->nr_hw_queues == 1)
855 return WORK_CPU_UNBOUND;
506e931f
JA
856
857 if (--hctx->next_cpu_batch <= 0) {
b657d7e6 858 int cpu = hctx->next_cpu, next_cpu;
506e931f
JA
859
860 next_cpu = cpumask_next(hctx->next_cpu, hctx->cpumask);
861 if (next_cpu >= nr_cpu_ids)
862 next_cpu = cpumask_first(hctx->cpumask);
863
864 hctx->next_cpu = next_cpu;
865 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
b657d7e6
CH
866
867 return cpu;
506e931f
JA
868 }
869
b657d7e6 870 return hctx->next_cpu;
506e931f
JA
871}
872
320ae51f
JA
873void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
874{
19c66e59
ML
875 if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state) ||
876 !blk_mq_hw_queue_mapped(hctx)))
320ae51f
JA
877 return;
878
398205b8 879 if (!async) {
2a90d4aa
PB
880 int cpu = get_cpu();
881 if (cpumask_test_cpu(cpu, hctx->cpumask)) {
398205b8 882 __blk_mq_run_hw_queue(hctx);
2a90d4aa 883 put_cpu();
398205b8
PB
884 return;
885 }
e4043dcf 886
2a90d4aa 887 put_cpu();
e4043dcf 888 }
398205b8 889
b657d7e6
CH
890 kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
891 &hctx->run_work, 0);
320ae51f
JA
892}
893
894void blk_mq_run_queues(struct request_queue *q, bool async)
895{
896 struct blk_mq_hw_ctx *hctx;
897 int i;
898
899 queue_for_each_hw_ctx(q, hctx, i) {
900 if ((!blk_mq_hctx_has_pending(hctx) &&
901 list_empty_careful(&hctx->dispatch)) ||
5d12f905 902 test_bit(BLK_MQ_S_STOPPED, &hctx->state))
320ae51f
JA
903 continue;
904
905 blk_mq_run_hw_queue(hctx, async);
906 }
907}
908EXPORT_SYMBOL(blk_mq_run_queues);
909
910void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
911{
70f4db63
CH
912 cancel_delayed_work(&hctx->run_work);
913 cancel_delayed_work(&hctx->delay_work);
320ae51f
JA
914 set_bit(BLK_MQ_S_STOPPED, &hctx->state);
915}
916EXPORT_SYMBOL(blk_mq_stop_hw_queue);
917
280d45f6
CH
918void blk_mq_stop_hw_queues(struct request_queue *q)
919{
920 struct blk_mq_hw_ctx *hctx;
921 int i;
922
923 queue_for_each_hw_ctx(q, hctx, i)
924 blk_mq_stop_hw_queue(hctx);
925}
926EXPORT_SYMBOL(blk_mq_stop_hw_queues);
927
320ae51f
JA
928void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
929{
930 clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
e4043dcf 931
0ffbce80 932 blk_mq_run_hw_queue(hctx, false);
320ae51f
JA
933}
934EXPORT_SYMBOL(blk_mq_start_hw_queue);
935
2f268556
CH
936void blk_mq_start_hw_queues(struct request_queue *q)
937{
938 struct blk_mq_hw_ctx *hctx;
939 int i;
940
941 queue_for_each_hw_ctx(q, hctx, i)
942 blk_mq_start_hw_queue(hctx);
943}
944EXPORT_SYMBOL(blk_mq_start_hw_queues);
945
946
1b4a3258 947void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
320ae51f
JA
948{
949 struct blk_mq_hw_ctx *hctx;
950 int i;
951
952 queue_for_each_hw_ctx(q, hctx, i) {
953 if (!test_bit(BLK_MQ_S_STOPPED, &hctx->state))
954 continue;
955
956 clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
1b4a3258 957 blk_mq_run_hw_queue(hctx, async);
320ae51f
JA
958 }
959}
960EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
961
70f4db63 962static void blk_mq_run_work_fn(struct work_struct *work)
320ae51f
JA
963{
964 struct blk_mq_hw_ctx *hctx;
965
70f4db63 966 hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work);
e4043dcf 967
320ae51f
JA
968 __blk_mq_run_hw_queue(hctx);
969}
970
70f4db63
CH
971static void blk_mq_delay_work_fn(struct work_struct *work)
972{
973 struct blk_mq_hw_ctx *hctx;
974
975 hctx = container_of(work, struct blk_mq_hw_ctx, delay_work.work);
976
977 if (test_and_clear_bit(BLK_MQ_S_STOPPED, &hctx->state))
978 __blk_mq_run_hw_queue(hctx);
979}
980
981void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
982{
19c66e59
ML
983 if (unlikely(!blk_mq_hw_queue_mapped(hctx)))
984 return;
70f4db63 985
b657d7e6
CH
986 kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
987 &hctx->delay_work, msecs_to_jiffies(msecs));
70f4db63
CH
988}
989EXPORT_SYMBOL(blk_mq_delay_queue);
990
320ae51f 991static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
72a0a36e 992 struct request *rq, bool at_head)
320ae51f
JA
993{
994 struct blk_mq_ctx *ctx = rq->mq_ctx;
995
01b983c9
JA
996 trace_block_rq_insert(hctx->queue, rq);
997
72a0a36e
CH
998 if (at_head)
999 list_add(&rq->queuelist, &ctx->rq_list);
1000 else
1001 list_add_tail(&rq->queuelist, &ctx->rq_list);
4bb659b1 1002
320ae51f 1003 blk_mq_hctx_mark_pending(hctx, ctx);
320ae51f
JA
1004}
1005
eeabc850
CH
1006void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue,
1007 bool async)
320ae51f 1008{
eeabc850 1009 struct request_queue *q = rq->q;
320ae51f 1010 struct blk_mq_hw_ctx *hctx;
eeabc850
CH
1011 struct blk_mq_ctx *ctx = rq->mq_ctx, *current_ctx;
1012
1013 current_ctx = blk_mq_get_ctx(q);
1014 if (!cpu_online(ctx->cpu))
1015 rq->mq_ctx = ctx = current_ctx;
320ae51f 1016
320ae51f
JA
1017 hctx = q->mq_ops->map_queue(q, ctx->cpu);
1018
a57a178a
CH
1019 spin_lock(&ctx->lock);
1020 __blk_mq_insert_request(hctx, rq, at_head);
1021 spin_unlock(&ctx->lock);
320ae51f 1022
320ae51f
JA
1023 if (run_queue)
1024 blk_mq_run_hw_queue(hctx, async);
e4043dcf
JA
1025
1026 blk_mq_put_ctx(current_ctx);
320ae51f
JA
1027}
1028
1029static void blk_mq_insert_requests(struct request_queue *q,
1030 struct blk_mq_ctx *ctx,
1031 struct list_head *list,
1032 int depth,
1033 bool from_schedule)
1034
1035{
1036 struct blk_mq_hw_ctx *hctx;
1037 struct blk_mq_ctx *current_ctx;
1038
1039 trace_block_unplug(q, depth, !from_schedule);
1040
1041 current_ctx = blk_mq_get_ctx(q);
1042
1043 if (!cpu_online(ctx->cpu))
1044 ctx = current_ctx;
1045 hctx = q->mq_ops->map_queue(q, ctx->cpu);
1046
1047 /*
1048 * preemption doesn't flush plug list, so it's possible ctx->cpu is
1049 * offline now
1050 */
1051 spin_lock(&ctx->lock);
1052 while (!list_empty(list)) {
1053 struct request *rq;
1054
1055 rq = list_first_entry(list, struct request, queuelist);
1056 list_del_init(&rq->queuelist);
1057 rq->mq_ctx = ctx;
72a0a36e 1058 __blk_mq_insert_request(hctx, rq, false);
320ae51f
JA
1059 }
1060 spin_unlock(&ctx->lock);
1061
320ae51f 1062 blk_mq_run_hw_queue(hctx, from_schedule);
e4043dcf 1063 blk_mq_put_ctx(current_ctx);
320ae51f
JA
1064}
1065
1066static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b)
1067{
1068 struct request *rqa = container_of(a, struct request, queuelist);
1069 struct request *rqb = container_of(b, struct request, queuelist);
1070
1071 return !(rqa->mq_ctx < rqb->mq_ctx ||
1072 (rqa->mq_ctx == rqb->mq_ctx &&
1073 blk_rq_pos(rqa) < blk_rq_pos(rqb)));
1074}
1075
1076void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
1077{
1078 struct blk_mq_ctx *this_ctx;
1079 struct request_queue *this_q;
1080 struct request *rq;
1081 LIST_HEAD(list);
1082 LIST_HEAD(ctx_list);
1083 unsigned int depth;
1084
1085 list_splice_init(&plug->mq_list, &list);
1086
1087 list_sort(NULL, &list, plug_ctx_cmp);
1088
1089 this_q = NULL;
1090 this_ctx = NULL;
1091 depth = 0;
1092
1093 while (!list_empty(&list)) {
1094 rq = list_entry_rq(list.next);
1095 list_del_init(&rq->queuelist);
1096 BUG_ON(!rq->q);
1097 if (rq->mq_ctx != this_ctx) {
1098 if (this_ctx) {
1099 blk_mq_insert_requests(this_q, this_ctx,
1100 &ctx_list, depth,
1101 from_schedule);
1102 }
1103
1104 this_ctx = rq->mq_ctx;
1105 this_q = rq->q;
1106 depth = 0;
1107 }
1108
1109 depth++;
1110 list_add_tail(&rq->queuelist, &ctx_list);
1111 }
1112
1113 /*
1114 * If 'this_ctx' is set, we know we have entries to complete
1115 * on 'ctx_list'. Do those.
1116 */
1117 if (this_ctx) {
1118 blk_mq_insert_requests(this_q, this_ctx, &ctx_list, depth,
1119 from_schedule);
1120 }
1121}
1122
1123static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
1124{
1125 init_request_from_bio(rq, bio);
4b570521 1126
3ee32372 1127 if (blk_do_io_stat(rq))
4b570521 1128 blk_account_io_start(rq, 1);
320ae51f
JA
1129}
1130
274a5843
JA
1131static inline bool hctx_allow_merges(struct blk_mq_hw_ctx *hctx)
1132{
1133 return (hctx->flags & BLK_MQ_F_SHOULD_MERGE) &&
1134 !blk_queue_nomerges(hctx->queue);
1135}
1136
07068d5b
JA
1137static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx,
1138 struct blk_mq_ctx *ctx,
1139 struct request *rq, struct bio *bio)
320ae51f 1140{
274a5843 1141 if (!hctx_allow_merges(hctx)) {
07068d5b
JA
1142 blk_mq_bio_to_request(rq, bio);
1143 spin_lock(&ctx->lock);
1144insert_rq:
1145 __blk_mq_insert_request(hctx, rq, false);
1146 spin_unlock(&ctx->lock);
1147 return false;
1148 } else {
274a5843
JA
1149 struct request_queue *q = hctx->queue;
1150
07068d5b
JA
1151 spin_lock(&ctx->lock);
1152 if (!blk_mq_attempt_merge(q, ctx, bio)) {
1153 blk_mq_bio_to_request(rq, bio);
1154 goto insert_rq;
1155 }
320ae51f 1156
07068d5b
JA
1157 spin_unlock(&ctx->lock);
1158 __blk_mq_free_request(hctx, ctx, rq);
1159 return true;
14ec77f3 1160 }
07068d5b 1161}
14ec77f3 1162
07068d5b
JA
1163struct blk_map_ctx {
1164 struct blk_mq_hw_ctx *hctx;
1165 struct blk_mq_ctx *ctx;
1166};
1167
1168static struct request *blk_mq_map_request(struct request_queue *q,
1169 struct bio *bio,
1170 struct blk_map_ctx *data)
1171{
1172 struct blk_mq_hw_ctx *hctx;
1173 struct blk_mq_ctx *ctx;
1174 struct request *rq;
1175 int rw = bio_data_dir(bio);
cb96a42c 1176 struct blk_mq_alloc_data alloc_data;
320ae51f 1177
07068d5b 1178 if (unlikely(blk_mq_queue_enter(q))) {
320ae51f 1179 bio_endio(bio, -EIO);
07068d5b 1180 return NULL;
320ae51f
JA
1181 }
1182
1183 ctx = blk_mq_get_ctx(q);
1184 hctx = q->mq_ops->map_queue(q, ctx->cpu);
1185
07068d5b 1186 if (rw_is_sync(bio->bi_rw))
27fbf4e8 1187 rw |= REQ_SYNC;
07068d5b 1188
320ae51f 1189 trace_block_getrq(q, bio, rw);
cb96a42c
ML
1190 blk_mq_set_alloc_data(&alloc_data, q, GFP_ATOMIC, false, ctx,
1191 hctx);
1192 rq = __blk_mq_alloc_request(&alloc_data, rw);
5dee8577 1193 if (unlikely(!rq)) {
793597a6 1194 __blk_mq_run_hw_queue(hctx);
320ae51f
JA
1195 blk_mq_put_ctx(ctx);
1196 trace_block_sleeprq(q, bio, rw);
793597a6
CH
1197
1198 ctx = blk_mq_get_ctx(q);
320ae51f 1199 hctx = q->mq_ops->map_queue(q, ctx->cpu);
cb96a42c
ML
1200 blk_mq_set_alloc_data(&alloc_data, q,
1201 __GFP_WAIT|GFP_ATOMIC, false, ctx, hctx);
1202 rq = __blk_mq_alloc_request(&alloc_data, rw);
1203 ctx = alloc_data.ctx;
1204 hctx = alloc_data.hctx;
320ae51f
JA
1205 }
1206
1207 hctx->queued++;
07068d5b
JA
1208 data->hctx = hctx;
1209 data->ctx = ctx;
1210 return rq;
1211}
1212
1213/*
1214 * Multiple hardware queue variant. This will not use per-process plugs,
1215 * but will attempt to bypass the hctx queueing if we can go straight to
1216 * hardware for SYNC IO.
1217 */
1218static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
1219{
1220 const int is_sync = rw_is_sync(bio->bi_rw);
1221 const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA);
1222 struct blk_map_ctx data;
1223 struct request *rq;
1224
1225 blk_queue_bounce(q, &bio);
1226
1227 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1228 bio_endio(bio, -EIO);
1229 return;
1230 }
1231
1232 rq = blk_mq_map_request(q, bio, &data);
1233 if (unlikely(!rq))
1234 return;
1235
1236 if (unlikely(is_flush_fua)) {
1237 blk_mq_bio_to_request(rq, bio);
1238 blk_insert_flush(rq);
1239 goto run_queue;
1240 }
1241
e167dfb5
JA
1242 /*
1243 * If the driver supports defer issued based on 'last', then
1244 * queue it up like normal since we can potentially save some
1245 * CPU this way.
1246 */
1247 if (is_sync && !(data.hctx->flags & BLK_MQ_F_DEFER_ISSUE)) {
74c45052
JA
1248 struct blk_mq_queue_data bd = {
1249 .rq = rq,
1250 .list = NULL,
1251 .last = 1
1252 };
07068d5b
JA
1253 int ret;
1254
1255 blk_mq_bio_to_request(rq, bio);
07068d5b
JA
1256
1257 /*
1258 * For OK queue, we are done. For error, kill it. Any other
1259 * error (busy), just add it to our list as we previously
1260 * would have done
1261 */
74c45052 1262 ret = q->mq_ops->queue_rq(data.hctx, &bd);
07068d5b
JA
1263 if (ret == BLK_MQ_RQ_QUEUE_OK)
1264 goto done;
1265 else {
1266 __blk_mq_requeue_request(rq);
1267
1268 if (ret == BLK_MQ_RQ_QUEUE_ERROR) {
1269 rq->errors = -EIO;
c8a446ad 1270 blk_mq_end_request(rq, rq->errors);
07068d5b
JA
1271 goto done;
1272 }
1273 }
1274 }
1275
1276 if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
1277 /*
1278 * For a SYNC request, send it to the hardware immediately. For
1279 * an ASYNC request, just ensure that we run it later on. The
1280 * latter allows for merging opportunities and more efficient
1281 * dispatching.
1282 */
1283run_queue:
1284 blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
1285 }
1286done:
1287 blk_mq_put_ctx(data.ctx);
1288}
1289
1290/*
1291 * Single hardware queue variant. This will attempt to use any per-process
1292 * plug for merging and IO deferral.
1293 */
1294static void blk_sq_make_request(struct request_queue *q, struct bio *bio)
1295{
1296 const int is_sync = rw_is_sync(bio->bi_rw);
1297 const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA);
1298 unsigned int use_plug, request_count = 0;
1299 struct blk_map_ctx data;
1300 struct request *rq;
1301
1302 /*
1303 * If we have multiple hardware queues, just go directly to
1304 * one of those for sync IO.
1305 */
1306 use_plug = !is_flush_fua && !is_sync;
1307
1308 blk_queue_bounce(q, &bio);
1309
1310 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1311 bio_endio(bio, -EIO);
1312 return;
1313 }
1314
1315 if (use_plug && !blk_queue_nomerges(q) &&
1316 blk_attempt_plug_merge(q, bio, &request_count))
1317 return;
1318
1319 rq = blk_mq_map_request(q, bio, &data);
ff87bcec
JA
1320 if (unlikely(!rq))
1321 return;
320ae51f
JA
1322
1323 if (unlikely(is_flush_fua)) {
1324 blk_mq_bio_to_request(rq, bio);
320ae51f
JA
1325 blk_insert_flush(rq);
1326 goto run_queue;
1327 }
1328
1329 /*
1330 * A task plug currently exists. Since this is completely lockless,
1331 * utilize that to temporarily store requests until the task is
1332 * either done or scheduled away.
1333 */
1334 if (use_plug) {
1335 struct blk_plug *plug = current->plug;
1336
1337 if (plug) {
1338 blk_mq_bio_to_request(rq, bio);
92f399c7 1339 if (list_empty(&plug->mq_list))
320ae51f
JA
1340 trace_block_plug(q);
1341 else if (request_count >= BLK_MAX_REQUEST_COUNT) {
1342 blk_flush_plug_list(plug, false);
1343 trace_block_plug(q);
1344 }
1345 list_add_tail(&rq->queuelist, &plug->mq_list);
07068d5b 1346 blk_mq_put_ctx(data.ctx);
320ae51f
JA
1347 return;
1348 }
1349 }
1350
07068d5b
JA
1351 if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
1352 /*
1353 * For a SYNC request, send it to the hardware immediately. For
1354 * an ASYNC request, just ensure that we run it later on. The
1355 * latter allows for merging opportunities and more efficient
1356 * dispatching.
1357 */
1358run_queue:
1359 blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
320ae51f
JA
1360 }
1361
07068d5b 1362 blk_mq_put_ctx(data.ctx);
320ae51f
JA
1363}
1364
1365/*
1366 * Default mapping to a software queue, since we use one per CPU.
1367 */
1368struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, const int cpu)
1369{
1370 return q->queue_hw_ctx[q->mq_map[cpu]];
1371}
1372EXPORT_SYMBOL(blk_mq_map_queue);
1373
24d2f903
CH
1374static void blk_mq_free_rq_map(struct blk_mq_tag_set *set,
1375 struct blk_mq_tags *tags, unsigned int hctx_idx)
95363efd 1376{
e9b267d9 1377 struct page *page;
320ae51f 1378
24d2f903 1379 if (tags->rqs && set->ops->exit_request) {
e9b267d9 1380 int i;
320ae51f 1381
24d2f903
CH
1382 for (i = 0; i < tags->nr_tags; i++) {
1383 if (!tags->rqs[i])
e9b267d9 1384 continue;
24d2f903
CH
1385 set->ops->exit_request(set->driver_data, tags->rqs[i],
1386 hctx_idx, i);
a5164405 1387 tags->rqs[i] = NULL;
e9b267d9 1388 }
320ae51f 1389 }
320ae51f 1390
24d2f903
CH
1391 while (!list_empty(&tags->page_list)) {
1392 page = list_first_entry(&tags->page_list, struct page, lru);
6753471c 1393 list_del_init(&page->lru);
320ae51f
JA
1394 __free_pages(page, page->private);
1395 }
1396
24d2f903 1397 kfree(tags->rqs);
320ae51f 1398
24d2f903 1399 blk_mq_free_tags(tags);
320ae51f
JA
1400}
1401
1402static size_t order_to_size(unsigned int order)
1403{
4ca08500 1404 return (size_t)PAGE_SIZE << order;
320ae51f
JA
1405}
1406
24d2f903
CH
1407static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
1408 unsigned int hctx_idx)
320ae51f 1409{
24d2f903 1410 struct blk_mq_tags *tags;
320ae51f
JA
1411 unsigned int i, j, entries_per_page, max_order = 4;
1412 size_t rq_size, left;
1413
24d2f903
CH
1414 tags = blk_mq_init_tags(set->queue_depth, set->reserved_tags,
1415 set->numa_node);
1416 if (!tags)
1417 return NULL;
320ae51f 1418
24d2f903
CH
1419 INIT_LIST_HEAD(&tags->page_list);
1420
a5164405
JA
1421 tags->rqs = kzalloc_node(set->queue_depth * sizeof(struct request *),
1422 GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
1423 set->numa_node);
24d2f903
CH
1424 if (!tags->rqs) {
1425 blk_mq_free_tags(tags);
1426 return NULL;
1427 }
320ae51f
JA
1428
1429 /*
1430 * rq_size is the size of the request plus driver payload, rounded
1431 * to the cacheline size
1432 */
24d2f903 1433 rq_size = round_up(sizeof(struct request) + set->cmd_size,
320ae51f 1434 cache_line_size());
24d2f903 1435 left = rq_size * set->queue_depth;
320ae51f 1436
24d2f903 1437 for (i = 0; i < set->queue_depth; ) {
320ae51f
JA
1438 int this_order = max_order;
1439 struct page *page;
1440 int to_do;
1441 void *p;
1442
1443 while (left < order_to_size(this_order - 1) && this_order)
1444 this_order--;
1445
1446 do {
a5164405
JA
1447 page = alloc_pages_node(set->numa_node,
1448 GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
1449 this_order);
320ae51f
JA
1450 if (page)
1451 break;
1452 if (!this_order--)
1453 break;
1454 if (order_to_size(this_order) < rq_size)
1455 break;
1456 } while (1);
1457
1458 if (!page)
24d2f903 1459 goto fail;
320ae51f
JA
1460
1461 page->private = this_order;
24d2f903 1462 list_add_tail(&page->lru, &tags->page_list);
320ae51f
JA
1463
1464 p = page_address(page);
1465 entries_per_page = order_to_size(this_order) / rq_size;
24d2f903 1466 to_do = min(entries_per_page, set->queue_depth - i);
320ae51f
JA
1467 left -= to_do * rq_size;
1468 for (j = 0; j < to_do; j++) {
24d2f903 1469 tags->rqs[i] = p;
683d0e12
DH
1470 tags->rqs[i]->atomic_flags = 0;
1471 tags->rqs[i]->cmd_flags = 0;
24d2f903
CH
1472 if (set->ops->init_request) {
1473 if (set->ops->init_request(set->driver_data,
1474 tags->rqs[i], hctx_idx, i,
a5164405
JA
1475 set->numa_node)) {
1476 tags->rqs[i] = NULL;
24d2f903 1477 goto fail;
a5164405 1478 }
e9b267d9
CH
1479 }
1480
320ae51f
JA
1481 p += rq_size;
1482 i++;
1483 }
1484 }
1485
24d2f903 1486 return tags;
320ae51f 1487
24d2f903 1488fail:
24d2f903
CH
1489 blk_mq_free_rq_map(set, tags, hctx_idx);
1490 return NULL;
320ae51f
JA
1491}
1492
1429d7c9
JA
1493static void blk_mq_free_bitmap(struct blk_mq_ctxmap *bitmap)
1494{
1495 kfree(bitmap->map);
1496}
1497
1498static int blk_mq_alloc_bitmap(struct blk_mq_ctxmap *bitmap, int node)
1499{
1500 unsigned int bpw = 8, total, num_maps, i;
1501
1502 bitmap->bits_per_word = bpw;
1503
1504 num_maps = ALIGN(nr_cpu_ids, bpw) / bpw;
1505 bitmap->map = kzalloc_node(num_maps * sizeof(struct blk_align_bitmap),
1506 GFP_KERNEL, node);
1507 if (!bitmap->map)
1508 return -ENOMEM;
1509
1510 bitmap->map_size = num_maps;
1511
1512 total = nr_cpu_ids;
1513 for (i = 0; i < num_maps; i++) {
1514 bitmap->map[i].depth = min(total, bitmap->bits_per_word);
1515 total -= bitmap->map[i].depth;
1516 }
1517
1518 return 0;
1519}
1520
484b4061
JA
1521static int blk_mq_hctx_cpu_offline(struct blk_mq_hw_ctx *hctx, int cpu)
1522{
1523 struct request_queue *q = hctx->queue;
1524 struct blk_mq_ctx *ctx;
1525 LIST_HEAD(tmp);
1526
1527 /*
1528 * Move ctx entries to new CPU, if this one is going away.
1529 */
1530 ctx = __blk_mq_get_ctx(q, cpu);
1531
1532 spin_lock(&ctx->lock);
1533 if (!list_empty(&ctx->rq_list)) {
1534 list_splice_init(&ctx->rq_list, &tmp);
1535 blk_mq_hctx_clear_pending(hctx, ctx);
1536 }
1537 spin_unlock(&ctx->lock);
1538
1539 if (list_empty(&tmp))
1540 return NOTIFY_OK;
1541
1542 ctx = blk_mq_get_ctx(q);
1543 spin_lock(&ctx->lock);
1544
1545 while (!list_empty(&tmp)) {
1546 struct request *rq;
1547
1548 rq = list_first_entry(&tmp, struct request, queuelist);
1549 rq->mq_ctx = ctx;
1550 list_move_tail(&rq->queuelist, &ctx->rq_list);
1551 }
1552
1553 hctx = q->mq_ops->map_queue(q, ctx->cpu);
1554 blk_mq_hctx_mark_pending(hctx, ctx);
1555
1556 spin_unlock(&ctx->lock);
1557
1558 blk_mq_run_hw_queue(hctx, true);
1559 blk_mq_put_ctx(ctx);
1560 return NOTIFY_OK;
1561}
1562
1563static int blk_mq_hctx_cpu_online(struct blk_mq_hw_ctx *hctx, int cpu)
1564{
1565 struct request_queue *q = hctx->queue;
1566 struct blk_mq_tag_set *set = q->tag_set;
1567
1568 if (set->tags[hctx->queue_num])
1569 return NOTIFY_OK;
1570
1571 set->tags[hctx->queue_num] = blk_mq_init_rq_map(set, hctx->queue_num);
1572 if (!set->tags[hctx->queue_num])
1573 return NOTIFY_STOP;
1574
1575 hctx->tags = set->tags[hctx->queue_num];
1576 return NOTIFY_OK;
1577}
1578
1579static int blk_mq_hctx_notify(void *data, unsigned long action,
1580 unsigned int cpu)
1581{
1582 struct blk_mq_hw_ctx *hctx = data;
1583
1584 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
1585 return blk_mq_hctx_cpu_offline(hctx, cpu);
1586 else if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN)
1587 return blk_mq_hctx_cpu_online(hctx, cpu);
1588
1589 return NOTIFY_OK;
1590}
1591
08e98fc6
ML
1592static void blk_mq_exit_hctx(struct request_queue *q,
1593 struct blk_mq_tag_set *set,
1594 struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
1595{
f70ced09
ML
1596 unsigned flush_start_tag = set->queue_depth;
1597
08e98fc6
ML
1598 blk_mq_tag_idle(hctx);
1599
f70ced09
ML
1600 if (set->ops->exit_request)
1601 set->ops->exit_request(set->driver_data,
1602 hctx->fq->flush_rq, hctx_idx,
1603 flush_start_tag + hctx_idx);
1604
08e98fc6
ML
1605 if (set->ops->exit_hctx)
1606 set->ops->exit_hctx(hctx, hctx_idx);
1607
1608 blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
f70ced09 1609 blk_free_flush_queue(hctx->fq);
08e98fc6
ML
1610 kfree(hctx->ctxs);
1611 blk_mq_free_bitmap(&hctx->ctx_map);
1612}
1613
624dbe47
ML
1614static void blk_mq_exit_hw_queues(struct request_queue *q,
1615 struct blk_mq_tag_set *set, int nr_queue)
1616{
1617 struct blk_mq_hw_ctx *hctx;
1618 unsigned int i;
1619
1620 queue_for_each_hw_ctx(q, hctx, i) {
1621 if (i == nr_queue)
1622 break;
08e98fc6 1623 blk_mq_exit_hctx(q, set, hctx, i);
624dbe47 1624 }
624dbe47
ML
1625}
1626
1627static void blk_mq_free_hw_queues(struct request_queue *q,
1628 struct blk_mq_tag_set *set)
1629{
1630 struct blk_mq_hw_ctx *hctx;
1631 unsigned int i;
1632
1633 queue_for_each_hw_ctx(q, hctx, i) {
1634 free_cpumask_var(hctx->cpumask);
cdef54dd 1635 kfree(hctx);
624dbe47
ML
1636 }
1637}
1638
08e98fc6
ML
1639static int blk_mq_init_hctx(struct request_queue *q,
1640 struct blk_mq_tag_set *set,
1641 struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
320ae51f 1642{
08e98fc6 1643 int node;
f70ced09 1644 unsigned flush_start_tag = set->queue_depth;
08e98fc6
ML
1645
1646 node = hctx->numa_node;
1647 if (node == NUMA_NO_NODE)
1648 node = hctx->numa_node = set->numa_node;
1649
1650 INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
1651 INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn);
1652 spin_lock_init(&hctx->lock);
1653 INIT_LIST_HEAD(&hctx->dispatch);
1654 hctx->queue = q;
1655 hctx->queue_num = hctx_idx;
1656 hctx->flags = set->flags;
08e98fc6
ML
1657
1658 blk_mq_init_cpu_notifier(&hctx->cpu_notifier,
1659 blk_mq_hctx_notify, hctx);
1660 blk_mq_register_cpu_notifier(&hctx->cpu_notifier);
1661
1662 hctx->tags = set->tags[hctx_idx];
320ae51f
JA
1663
1664 /*
08e98fc6
ML
1665 * Allocate space for all possible cpus to avoid allocation at
1666 * runtime
320ae51f 1667 */
08e98fc6
ML
1668 hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *),
1669 GFP_KERNEL, node);
1670 if (!hctx->ctxs)
1671 goto unregister_cpu_notifier;
320ae51f 1672
08e98fc6
ML
1673 if (blk_mq_alloc_bitmap(&hctx->ctx_map, node))
1674 goto free_ctxs;
320ae51f 1675
08e98fc6 1676 hctx->nr_ctx = 0;
320ae51f 1677
08e98fc6
ML
1678 if (set->ops->init_hctx &&
1679 set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
1680 goto free_bitmap;
320ae51f 1681
f70ced09
ML
1682 hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size);
1683 if (!hctx->fq)
1684 goto exit_hctx;
320ae51f 1685
f70ced09
ML
1686 if (set->ops->init_request &&
1687 set->ops->init_request(set->driver_data,
1688 hctx->fq->flush_rq, hctx_idx,
1689 flush_start_tag + hctx_idx, node))
1690 goto free_fq;
320ae51f 1691
08e98fc6 1692 return 0;
320ae51f 1693
f70ced09
ML
1694 free_fq:
1695 kfree(hctx->fq);
1696 exit_hctx:
1697 if (set->ops->exit_hctx)
1698 set->ops->exit_hctx(hctx, hctx_idx);
08e98fc6
ML
1699 free_bitmap:
1700 blk_mq_free_bitmap(&hctx->ctx_map);
1701 free_ctxs:
1702 kfree(hctx->ctxs);
1703 unregister_cpu_notifier:
1704 blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
320ae51f 1705
08e98fc6
ML
1706 return -1;
1707}
320ae51f 1708
08e98fc6
ML
1709static int blk_mq_init_hw_queues(struct request_queue *q,
1710 struct blk_mq_tag_set *set)
1711{
1712 struct blk_mq_hw_ctx *hctx;
1713 unsigned int i;
320ae51f 1714
08e98fc6
ML
1715 /*
1716 * Initialize hardware queues
1717 */
1718 queue_for_each_hw_ctx(q, hctx, i) {
1719 if (blk_mq_init_hctx(q, set, hctx, i))
320ae51f
JA
1720 break;
1721 }
1722
1723 if (i == q->nr_hw_queues)
1724 return 0;
1725
1726 /*
1727 * Init failed
1728 */
624dbe47 1729 blk_mq_exit_hw_queues(q, set, i);
320ae51f
JA
1730
1731 return 1;
1732}
1733
1734static void blk_mq_init_cpu_queues(struct request_queue *q,
1735 unsigned int nr_hw_queues)
1736{
1737 unsigned int i;
1738
1739 for_each_possible_cpu(i) {
1740 struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
1741 struct blk_mq_hw_ctx *hctx;
1742
1743 memset(__ctx, 0, sizeof(*__ctx));
1744 __ctx->cpu = i;
1745 spin_lock_init(&__ctx->lock);
1746 INIT_LIST_HEAD(&__ctx->rq_list);
1747 __ctx->queue = q;
1748
1749 /* If the cpu isn't online, the cpu is mapped to first hctx */
320ae51f
JA
1750 if (!cpu_online(i))
1751 continue;
1752
e4043dcf
JA
1753 hctx = q->mq_ops->map_queue(q, i);
1754 cpumask_set_cpu(i, hctx->cpumask);
1755 hctx->nr_ctx++;
1756
320ae51f
JA
1757 /*
1758 * Set local node, IFF we have more than one hw queue. If
1759 * not, we remain on the home node of the device
1760 */
1761 if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
1762 hctx->numa_node = cpu_to_node(i);
1763 }
1764}
1765
1766static void blk_mq_map_swqueue(struct request_queue *q)
1767{
1768 unsigned int i;
1769 struct blk_mq_hw_ctx *hctx;
1770 struct blk_mq_ctx *ctx;
1771
1772 queue_for_each_hw_ctx(q, hctx, i) {
e4043dcf 1773 cpumask_clear(hctx->cpumask);
320ae51f
JA
1774 hctx->nr_ctx = 0;
1775 }
1776
1777 /*
1778 * Map software to hardware queues
1779 */
1780 queue_for_each_ctx(q, ctx, i) {
1781 /* If the cpu isn't online, the cpu is mapped to first hctx */
e4043dcf
JA
1782 if (!cpu_online(i))
1783 continue;
1784
320ae51f 1785 hctx = q->mq_ops->map_queue(q, i);
e4043dcf 1786 cpumask_set_cpu(i, hctx->cpumask);
320ae51f
JA
1787 ctx->index_hw = hctx->nr_ctx;
1788 hctx->ctxs[hctx->nr_ctx++] = ctx;
1789 }
506e931f
JA
1790
1791 queue_for_each_hw_ctx(q, hctx, i) {
484b4061 1792 /*
a68aafa5
JA
1793 * If no software queues are mapped to this hardware queue,
1794 * disable it and free the request entries.
484b4061
JA
1795 */
1796 if (!hctx->nr_ctx) {
1797 struct blk_mq_tag_set *set = q->tag_set;
1798
1799 if (set->tags[i]) {
1800 blk_mq_free_rq_map(set, set->tags[i], i);
1801 set->tags[i] = NULL;
1802 hctx->tags = NULL;
1803 }
1804 continue;
1805 }
1806
1807 /*
1808 * Initialize batch roundrobin counts
1809 */
506e931f
JA
1810 hctx->next_cpu = cpumask_first(hctx->cpumask);
1811 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
1812 }
320ae51f
JA
1813}
1814
0d2602ca
JA
1815static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set)
1816{
1817 struct blk_mq_hw_ctx *hctx;
1818 struct request_queue *q;
1819 bool shared;
1820 int i;
1821
1822 if (set->tag_list.next == set->tag_list.prev)
1823 shared = false;
1824 else
1825 shared = true;
1826
1827 list_for_each_entry(q, &set->tag_list, tag_set_list) {
1828 blk_mq_freeze_queue(q);
1829
1830 queue_for_each_hw_ctx(q, hctx, i) {
1831 if (shared)
1832 hctx->flags |= BLK_MQ_F_TAG_SHARED;
1833 else
1834 hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
1835 }
1836 blk_mq_unfreeze_queue(q);
1837 }
1838}
1839
1840static void blk_mq_del_queue_tag_set(struct request_queue *q)
1841{
1842 struct blk_mq_tag_set *set = q->tag_set;
1843
0d2602ca
JA
1844 mutex_lock(&set->tag_list_lock);
1845 list_del_init(&q->tag_set_list);
1846 blk_mq_update_tag_set_depth(set);
1847 mutex_unlock(&set->tag_list_lock);
0d2602ca
JA
1848}
1849
1850static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
1851 struct request_queue *q)
1852{
1853 q->tag_set = set;
1854
1855 mutex_lock(&set->tag_list_lock);
1856 list_add_tail(&q->tag_set_list, &set->tag_list);
1857 blk_mq_update_tag_set_depth(set);
1858 mutex_unlock(&set->tag_list_lock);
1859}
1860
24d2f903 1861struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
320ae51f
JA
1862{
1863 struct blk_mq_hw_ctx **hctxs;
e6cdb092 1864 struct blk_mq_ctx __percpu *ctx;
320ae51f 1865 struct request_queue *q;
f14bbe77 1866 unsigned int *map;
320ae51f
JA
1867 int i;
1868
320ae51f
JA
1869 ctx = alloc_percpu(struct blk_mq_ctx);
1870 if (!ctx)
1871 return ERR_PTR(-ENOMEM);
1872
24d2f903
CH
1873 hctxs = kmalloc_node(set->nr_hw_queues * sizeof(*hctxs), GFP_KERNEL,
1874 set->numa_node);
320ae51f
JA
1875
1876 if (!hctxs)
1877 goto err_percpu;
1878
f14bbe77
JA
1879 map = blk_mq_make_queue_map(set);
1880 if (!map)
1881 goto err_map;
1882
24d2f903 1883 for (i = 0; i < set->nr_hw_queues; i++) {
f14bbe77
JA
1884 int node = blk_mq_hw_queue_to_node(map, i);
1885
cdef54dd
CH
1886 hctxs[i] = kzalloc_node(sizeof(struct blk_mq_hw_ctx),
1887 GFP_KERNEL, node);
320ae51f
JA
1888 if (!hctxs[i])
1889 goto err_hctxs;
1890
a86073e4
JA
1891 if (!zalloc_cpumask_var_node(&hctxs[i]->cpumask, GFP_KERNEL,
1892 node))
e4043dcf
JA
1893 goto err_hctxs;
1894
0d2602ca 1895 atomic_set(&hctxs[i]->nr_active, 0);
f14bbe77 1896 hctxs[i]->numa_node = node;
320ae51f
JA
1897 hctxs[i]->queue_num = i;
1898 }
1899
24d2f903 1900 q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node);
320ae51f
JA
1901 if (!q)
1902 goto err_hctxs;
1903
17497acb
TH
1904 /*
1905 * Init percpu_ref in atomic mode so that it's faster to shutdown.
1906 * See blk_register_queue() for details.
1907 */
a34375ef 1908 if (percpu_ref_init(&q->mq_usage_counter, blk_mq_usage_counter_release,
17497acb 1909 PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
3d2936f4
ML
1910 goto err_map;
1911
320ae51f
JA
1912 setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
1913 blk_queue_rq_timeout(q, 30000);
1914
1915 q->nr_queues = nr_cpu_ids;
24d2f903 1916 q->nr_hw_queues = set->nr_hw_queues;
f14bbe77 1917 q->mq_map = map;
320ae51f
JA
1918
1919 q->queue_ctx = ctx;
1920 q->queue_hw_ctx = hctxs;
1921
24d2f903 1922 q->mq_ops = set->ops;
94eddfbe 1923 q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
320ae51f 1924
05f1dd53
JA
1925 if (!(set->flags & BLK_MQ_F_SG_MERGE))
1926 q->queue_flags |= 1 << QUEUE_FLAG_NO_SG_MERGE;
1927
1be036e9
CH
1928 q->sg_reserved_size = INT_MAX;
1929
6fca6a61
CH
1930 INIT_WORK(&q->requeue_work, blk_mq_requeue_work);
1931 INIT_LIST_HEAD(&q->requeue_list);
1932 spin_lock_init(&q->requeue_lock);
1933
07068d5b
JA
1934 if (q->nr_hw_queues > 1)
1935 blk_queue_make_request(q, blk_mq_make_request);
1936 else
1937 blk_queue_make_request(q, blk_sq_make_request);
1938
24d2f903
CH
1939 if (set->timeout)
1940 blk_queue_rq_timeout(q, set->timeout);
320ae51f 1941
eba71768
JA
1942 /*
1943 * Do this after blk_queue_make_request() overrides it...
1944 */
1945 q->nr_requests = set->queue_depth;
1946
24d2f903
CH
1947 if (set->ops->complete)
1948 blk_queue_softirq_done(q, set->ops->complete);
30a91cb4 1949
24d2f903 1950 blk_mq_init_cpu_queues(q, set->nr_hw_queues);
320ae51f 1951
24d2f903 1952 if (blk_mq_init_hw_queues(q, set))
1bcb1ead 1953 goto err_hw;
18741986 1954
320ae51f
JA
1955 mutex_lock(&all_q_mutex);
1956 list_add_tail(&q->all_q_node, &all_q_list);
1957 mutex_unlock(&all_q_mutex);
1958
0d2602ca
JA
1959 blk_mq_add_queue_tag_set(set, q);
1960
484b4061
JA
1961 blk_mq_map_swqueue(q);
1962
320ae51f 1963 return q;
18741986 1964
320ae51f 1965err_hw:
320ae51f
JA
1966 blk_cleanup_queue(q);
1967err_hctxs:
f14bbe77 1968 kfree(map);
24d2f903 1969 for (i = 0; i < set->nr_hw_queues; i++) {
320ae51f
JA
1970 if (!hctxs[i])
1971 break;
e4043dcf 1972 free_cpumask_var(hctxs[i]->cpumask);
cdef54dd 1973 kfree(hctxs[i]);
320ae51f 1974 }
f14bbe77 1975err_map:
320ae51f
JA
1976 kfree(hctxs);
1977err_percpu:
1978 free_percpu(ctx);
1979 return ERR_PTR(-ENOMEM);
1980}
1981EXPORT_SYMBOL(blk_mq_init_queue);
1982
1983void blk_mq_free_queue(struct request_queue *q)
1984{
624dbe47 1985 struct blk_mq_tag_set *set = q->tag_set;
320ae51f 1986
0d2602ca
JA
1987 blk_mq_del_queue_tag_set(q);
1988
624dbe47
ML
1989 blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
1990 blk_mq_free_hw_queues(q, set);
320ae51f 1991
add703fd 1992 percpu_ref_exit(&q->mq_usage_counter);
3d2936f4 1993
320ae51f
JA
1994 free_percpu(q->queue_ctx);
1995 kfree(q->queue_hw_ctx);
1996 kfree(q->mq_map);
1997
1998 q->queue_ctx = NULL;
1999 q->queue_hw_ctx = NULL;
2000 q->mq_map = NULL;
2001
2002 mutex_lock(&all_q_mutex);
2003 list_del_init(&q->all_q_node);
2004 mutex_unlock(&all_q_mutex);
2005}
320ae51f
JA
2006
2007/* Basically redo blk_mq_init_queue with queue frozen */
f618ef7c 2008static void blk_mq_queue_reinit(struct request_queue *q)
320ae51f 2009{
f3af020b 2010 WARN_ON_ONCE(!q->mq_freeze_depth);
320ae51f 2011
67aec14c
JA
2012 blk_mq_sysfs_unregister(q);
2013
320ae51f
JA
2014 blk_mq_update_queue_map(q->mq_map, q->nr_hw_queues);
2015
2016 /*
2017 * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe
2018 * we should change hctx numa_node according to new topology (this
2019 * involves free and re-allocate memory, worthy doing?)
2020 */
2021
2022 blk_mq_map_swqueue(q);
2023
67aec14c 2024 blk_mq_sysfs_register(q);
320ae51f
JA
2025}
2026
f618ef7c
PG
2027static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
2028 unsigned long action, void *hcpu)
320ae51f
JA
2029{
2030 struct request_queue *q;
2031
2032 /*
9fccfed8
JA
2033 * Before new mappings are established, hotadded cpu might already
2034 * start handling requests. This doesn't break anything as we map
2035 * offline CPUs to first hardware queue. We will re-init the queue
2036 * below to get optimal settings.
320ae51f
JA
2037 */
2038 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN &&
2039 action != CPU_ONLINE && action != CPU_ONLINE_FROZEN)
2040 return NOTIFY_OK;
2041
2042 mutex_lock(&all_q_mutex);
f3af020b
TH
2043
2044 /*
2045 * We need to freeze and reinit all existing queues. Freezing
2046 * involves synchronous wait for an RCU grace period and doing it
2047 * one by one may take a long time. Start freezing all queues in
2048 * one swoop and then wait for the completions so that freezing can
2049 * take place in parallel.
2050 */
2051 list_for_each_entry(q, &all_q_list, all_q_node)
2052 blk_mq_freeze_queue_start(q);
2053 list_for_each_entry(q, &all_q_list, all_q_node)
2054 blk_mq_freeze_queue_wait(q);
2055
320ae51f
JA
2056 list_for_each_entry(q, &all_q_list, all_q_node)
2057 blk_mq_queue_reinit(q);
f3af020b
TH
2058
2059 list_for_each_entry(q, &all_q_list, all_q_node)
2060 blk_mq_unfreeze_queue(q);
2061
320ae51f
JA
2062 mutex_unlock(&all_q_mutex);
2063 return NOTIFY_OK;
2064}
2065
a5164405
JA
2066static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
2067{
2068 int i;
2069
2070 for (i = 0; i < set->nr_hw_queues; i++) {
2071 set->tags[i] = blk_mq_init_rq_map(set, i);
2072 if (!set->tags[i])
2073 goto out_unwind;
2074 }
2075
2076 return 0;
2077
2078out_unwind:
2079 while (--i >= 0)
2080 blk_mq_free_rq_map(set, set->tags[i], i);
2081
a5164405
JA
2082 return -ENOMEM;
2083}
2084
2085/*
2086 * Allocate the request maps associated with this tag_set. Note that this
2087 * may reduce the depth asked for, if memory is tight. set->queue_depth
2088 * will be updated to reflect the allocated depth.
2089 */
2090static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
2091{
2092 unsigned int depth;
2093 int err;
2094
2095 depth = set->queue_depth;
2096 do {
2097 err = __blk_mq_alloc_rq_maps(set);
2098 if (!err)
2099 break;
2100
2101 set->queue_depth >>= 1;
2102 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) {
2103 err = -ENOMEM;
2104 break;
2105 }
2106 } while (set->queue_depth);
2107
2108 if (!set->queue_depth || err) {
2109 pr_err("blk-mq: failed to allocate request map\n");
2110 return -ENOMEM;
2111 }
2112
2113 if (depth != set->queue_depth)
2114 pr_info("blk-mq: reduced tag depth (%u -> %u)\n",
2115 depth, set->queue_depth);
2116
2117 return 0;
2118}
2119
a4391c64
JA
2120/*
2121 * Alloc a tag set to be associated with one or more request queues.
2122 * May fail with EINVAL for various error conditions. May adjust the
2123 * requested depth down, if if it too large. In that case, the set
2124 * value will be stored in set->queue_depth.
2125 */
24d2f903
CH
2126int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
2127{
205fb5f5
BVA
2128 BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS);
2129
24d2f903
CH
2130 if (!set->nr_hw_queues)
2131 return -EINVAL;
a4391c64 2132 if (!set->queue_depth)
24d2f903
CH
2133 return -EINVAL;
2134 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
2135 return -EINVAL;
2136
cdef54dd 2137 if (!set->nr_hw_queues || !set->ops->queue_rq || !set->ops->map_queue)
24d2f903
CH
2138 return -EINVAL;
2139
a4391c64
JA
2140 if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
2141 pr_info("blk-mq: reduced tag depth to %u\n",
2142 BLK_MQ_MAX_DEPTH);
2143 set->queue_depth = BLK_MQ_MAX_DEPTH;
2144 }
24d2f903 2145
6637fadf
SL
2146 /*
2147 * If a crashdump is active, then we are potentially in a very
2148 * memory constrained environment. Limit us to 1 queue and
2149 * 64 tags to prevent using too much memory.
2150 */
2151 if (is_kdump_kernel()) {
2152 set->nr_hw_queues = 1;
2153 set->queue_depth = min(64U, set->queue_depth);
2154 }
2155
48479005
ML
2156 set->tags = kmalloc_node(set->nr_hw_queues *
2157 sizeof(struct blk_mq_tags *),
24d2f903
CH
2158 GFP_KERNEL, set->numa_node);
2159 if (!set->tags)
a5164405 2160 return -ENOMEM;
24d2f903 2161
a5164405
JA
2162 if (blk_mq_alloc_rq_maps(set))
2163 goto enomem;
24d2f903 2164
0d2602ca
JA
2165 mutex_init(&set->tag_list_lock);
2166 INIT_LIST_HEAD(&set->tag_list);
2167
24d2f903 2168 return 0;
a5164405 2169enomem:
5676e7b6
RE
2170 kfree(set->tags);
2171 set->tags = NULL;
24d2f903
CH
2172 return -ENOMEM;
2173}
2174EXPORT_SYMBOL(blk_mq_alloc_tag_set);
2175
2176void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
2177{
2178 int i;
2179
484b4061
JA
2180 for (i = 0; i < set->nr_hw_queues; i++) {
2181 if (set->tags[i])
2182 blk_mq_free_rq_map(set, set->tags[i], i);
2183 }
2184
981bd189 2185 kfree(set->tags);
5676e7b6 2186 set->tags = NULL;
24d2f903
CH
2187}
2188EXPORT_SYMBOL(blk_mq_free_tag_set);
2189
e3a2b3f9
JA
2190int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
2191{
2192 struct blk_mq_tag_set *set = q->tag_set;
2193 struct blk_mq_hw_ctx *hctx;
2194 int i, ret;
2195
2196 if (!set || nr > set->queue_depth)
2197 return -EINVAL;
2198
2199 ret = 0;
2200 queue_for_each_hw_ctx(q, hctx, i) {
2201 ret = blk_mq_tag_update_depth(hctx->tags, nr);
2202 if (ret)
2203 break;
2204 }
2205
2206 if (!ret)
2207 q->nr_requests = nr;
2208
2209 return ret;
2210}
2211
676141e4
JA
2212void blk_mq_disable_hotplug(void)
2213{
2214 mutex_lock(&all_q_mutex);
2215}
2216
2217void blk_mq_enable_hotplug(void)
2218{
2219 mutex_unlock(&all_q_mutex);
2220}
2221
320ae51f
JA
2222static int __init blk_mq_init(void)
2223{
320ae51f
JA
2224 blk_mq_cpu_init();
2225
add703fd 2226 hotcpu_notifier(blk_mq_queue_reinit_notify, 0);
320ae51f
JA
2227
2228 return 0;
2229}
2230subsys_initcall(blk_mq_init);