]> git.ipfire.org Git - thirdparty/kernel/stable.git/blame - block/blk-core.c
block: enumify ELEVATOR_*_MERGE
[thirdparty/kernel/stable.git] / block / blk-core.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * Copyright (C) 1991, 1992 Linus Torvalds
3 * Copyright (C) 1994, Karl Keyte: Added support for disk statistics
4 * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5 * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
6728cb0e
JA
6 * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au>
7 * - July2000
1da177e4
LT
8 * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
9 */
10
11/*
12 * This handles all read/write requests to block devices
13 */
1da177e4
LT
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/backing-dev.h>
17#include <linux/bio.h>
18#include <linux/blkdev.h>
320ae51f 19#include <linux/blk-mq.h>
1da177e4
LT
20#include <linux/highmem.h>
21#include <linux/mm.h>
22#include <linux/kernel_stat.h>
23#include <linux/string.h>
24#include <linux/init.h>
1da177e4
LT
25#include <linux/completion.h>
26#include <linux/slab.h>
27#include <linux/swap.h>
28#include <linux/writeback.h>
faccbd4b 29#include <linux/task_io_accounting_ops.h>
c17bb495 30#include <linux/fault-inject.h>
73c10101 31#include <linux/list_sort.h>
e3c78ca5 32#include <linux/delay.h>
aaf7c680 33#include <linux/ratelimit.h>
6c954667 34#include <linux/pm_runtime.h>
eea8f41c 35#include <linux/blk-cgroup.h>
18fbda91 36#include <linux/debugfs.h>
55782138
LZ
37
38#define CREATE_TRACE_POINTS
39#include <trace/events/block.h>
1da177e4 40
8324aa91 41#include "blk.h"
43a5e4e2 42#include "blk-mq.h"
bd166ef1 43#include "blk-mq-sched.h"
87760e5e 44#include "blk-wbt.h"
8324aa91 45
18fbda91
OS
46#ifdef CONFIG_DEBUG_FS
47struct dentry *blk_debugfs_root;
48#endif
49
d07335e5 50EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
b0da3f0d 51EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
0a82a8d1 52EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
3291fa57 53EXPORT_TRACEPOINT_SYMBOL_GPL(block_split);
cbae8d45 54EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug);
0bfc2455 55
a73f730d
TH
56DEFINE_IDA(blk_queue_ida);
57
1da177e4
LT
58/*
59 * For the allocated request tables
60 */
d674d414 61struct kmem_cache *request_cachep;
1da177e4
LT
62
63/*
64 * For queue allocation
65 */
6728cb0e 66struct kmem_cache *blk_requestq_cachep;
1da177e4 67
1da177e4
LT
68/*
69 * Controlling structure to kblockd
70 */
ff856bad 71static struct workqueue_struct *kblockd_workqueue;
1da177e4 72
d40f75a0
TH
73static void blk_clear_congested(struct request_list *rl, int sync)
74{
d40f75a0
TH
75#ifdef CONFIG_CGROUP_WRITEBACK
76 clear_wb_congested(rl->blkg->wb_congested, sync);
77#else
482cf79c
TH
78 /*
79 * If !CGROUP_WRITEBACK, all blkg's map to bdi->wb and we shouldn't
80 * flip its congestion state for events on other blkcgs.
81 */
82 if (rl == &rl->q->root_rl)
dc3b17cc 83 clear_wb_congested(rl->q->backing_dev_info->wb.congested, sync);
d40f75a0
TH
84#endif
85}
86
87static void blk_set_congested(struct request_list *rl, int sync)
88{
d40f75a0
TH
89#ifdef CONFIG_CGROUP_WRITEBACK
90 set_wb_congested(rl->blkg->wb_congested, sync);
91#else
482cf79c
TH
92 /* see blk_clear_congested() */
93 if (rl == &rl->q->root_rl)
dc3b17cc 94 set_wb_congested(rl->q->backing_dev_info->wb.congested, sync);
d40f75a0
TH
95#endif
96}
97
8324aa91 98void blk_queue_congestion_threshold(struct request_queue *q)
1da177e4
LT
99{
100 int nr;
101
102 nr = q->nr_requests - (q->nr_requests / 8) + 1;
103 if (nr > q->nr_requests)
104 nr = q->nr_requests;
105 q->nr_congestion_on = nr;
106
107 nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1;
108 if (nr < 1)
109 nr = 1;
110 q->nr_congestion_off = nr;
111}
112
2a4aa30c 113void blk_rq_init(struct request_queue *q, struct request *rq)
1da177e4 114{
1afb20f3
FT
115 memset(rq, 0, sizeof(*rq));
116
1da177e4 117 INIT_LIST_HEAD(&rq->queuelist);
242f9dcb 118 INIT_LIST_HEAD(&rq->timeout_list);
c7c22e4d 119 rq->cpu = -1;
63a71386 120 rq->q = q;
a2dec7b3 121 rq->__sector = (sector_t) -1;
2e662b65
JA
122 INIT_HLIST_NODE(&rq->hash);
123 RB_CLEAR_NODE(&rq->rb_node);
63a71386 124 rq->tag = -1;
bd166ef1 125 rq->internal_tag = -1;
b243ddcb 126 rq->start_time = jiffies;
9195291e 127 set_start_time_ns(rq);
09e099d4 128 rq->part = NULL;
1da177e4 129}
2a4aa30c 130EXPORT_SYMBOL(blk_rq_init);
1da177e4 131
5bb23a68
N
132static void req_bio_endio(struct request *rq, struct bio *bio,
133 unsigned int nbytes, int error)
1da177e4 134{
78d8e58a 135 if (error)
4246a0b6 136 bio->bi_error = error;
797e7dbb 137
e8064021 138 if (unlikely(rq->rq_flags & RQF_QUIET))
b7c44ed9 139 bio_set_flag(bio, BIO_QUIET);
08bafc03 140
f79ea416 141 bio_advance(bio, nbytes);
7ba1ba12 142
143a87f4 143 /* don't actually finish bio if it's part of flush sequence */
e8064021 144 if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ))
4246a0b6 145 bio_endio(bio);
1da177e4 146}
1da177e4 147
1da177e4
LT
148void blk_dump_rq_flags(struct request *rq, char *msg)
149{
aebf526b
CH
150 printk(KERN_INFO "%s: dev %s: flags=%llx\n", msg,
151 rq->rq_disk ? rq->rq_disk->disk_name : "?",
5953316d 152 (unsigned long long) rq->cmd_flags);
1da177e4 153
83096ebf
TH
154 printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n",
155 (unsigned long long)blk_rq_pos(rq),
156 blk_rq_sectors(rq), blk_rq_cur_sectors(rq));
b4f42e28
JA
157 printk(KERN_INFO " bio %p, biotail %p, len %u\n",
158 rq->bio, rq->biotail, blk_rq_bytes(rq));
1da177e4 159}
1da177e4
LT
160EXPORT_SYMBOL(blk_dump_rq_flags);
161
3cca6dc1 162static void blk_delay_work(struct work_struct *work)
1da177e4 163{
3cca6dc1 164 struct request_queue *q;
1da177e4 165
3cca6dc1
JA
166 q = container_of(work, struct request_queue, delay_work.work);
167 spin_lock_irq(q->queue_lock);
24ecfbe2 168 __blk_run_queue(q);
3cca6dc1 169 spin_unlock_irq(q->queue_lock);
1da177e4 170}
1da177e4
LT
171
172/**
3cca6dc1
JA
173 * blk_delay_queue - restart queueing after defined interval
174 * @q: The &struct request_queue in question
175 * @msecs: Delay in msecs
1da177e4
LT
176 *
177 * Description:
3cca6dc1
JA
178 * Sometimes queueing needs to be postponed for a little while, to allow
179 * resources to come back. This function will make sure that queueing is
70460571 180 * restarted around the specified time. Queue lock must be held.
3cca6dc1
JA
181 */
182void blk_delay_queue(struct request_queue *q, unsigned long msecs)
2ad8b1ef 183{
70460571
BVA
184 if (likely(!blk_queue_dead(q)))
185 queue_delayed_work(kblockd_workqueue, &q->delay_work,
186 msecs_to_jiffies(msecs));
2ad8b1ef 187}
3cca6dc1 188EXPORT_SYMBOL(blk_delay_queue);
2ad8b1ef 189
21491412
JA
190/**
191 * blk_start_queue_async - asynchronously restart a previously stopped queue
192 * @q: The &struct request_queue in question
193 *
194 * Description:
195 * blk_start_queue_async() will clear the stop flag on the queue, and
196 * ensure that the request_fn for the queue is run from an async
197 * context.
198 **/
199void blk_start_queue_async(struct request_queue *q)
200{
201 queue_flag_clear(QUEUE_FLAG_STOPPED, q);
202 blk_run_queue_async(q);
203}
204EXPORT_SYMBOL(blk_start_queue_async);
205
1da177e4
LT
206/**
207 * blk_start_queue - restart a previously stopped queue
165125e1 208 * @q: The &struct request_queue in question
1da177e4
LT
209 *
210 * Description:
211 * blk_start_queue() will clear the stop flag on the queue, and call
212 * the request_fn for the queue if it was in a stopped state when
213 * entered. Also see blk_stop_queue(). Queue lock must be held.
214 **/
165125e1 215void blk_start_queue(struct request_queue *q)
1da177e4 216{
a038e253
PBG
217 WARN_ON(!irqs_disabled());
218
75ad23bc 219 queue_flag_clear(QUEUE_FLAG_STOPPED, q);
24ecfbe2 220 __blk_run_queue(q);
1da177e4 221}
1da177e4
LT
222EXPORT_SYMBOL(blk_start_queue);
223
224/**
225 * blk_stop_queue - stop a queue
165125e1 226 * @q: The &struct request_queue in question
1da177e4
LT
227 *
228 * Description:
229 * The Linux block layer assumes that a block driver will consume all
230 * entries on the request queue when the request_fn strategy is called.
231 * Often this will not happen, because of hardware limitations (queue
232 * depth settings). If a device driver gets a 'queue full' response,
233 * or if it simply chooses not to queue more I/O at one point, it can
234 * call this function to prevent the request_fn from being called until
235 * the driver has signalled it's ready to go again. This happens by calling
236 * blk_start_queue() to restart queue operations. Queue lock must be held.
237 **/
165125e1 238void blk_stop_queue(struct request_queue *q)
1da177e4 239{
136b5721 240 cancel_delayed_work(&q->delay_work);
75ad23bc 241 queue_flag_set(QUEUE_FLAG_STOPPED, q);
1da177e4
LT
242}
243EXPORT_SYMBOL(blk_stop_queue);
244
245/**
246 * blk_sync_queue - cancel any pending callbacks on a queue
247 * @q: the queue
248 *
249 * Description:
250 * The block layer may perform asynchronous callback activity
251 * on a queue, such as calling the unplug function after a timeout.
252 * A block device may call blk_sync_queue to ensure that any
253 * such activity is cancelled, thus allowing it to release resources
59c51591 254 * that the callbacks might use. The caller must already have made sure
1da177e4
LT
255 * that its ->make_request_fn will not re-add plugging prior to calling
256 * this function.
257 *
da527770 258 * This function does not cancel any asynchronous activity arising
da3dae54 259 * out of elevator or throttling code. That would require elevator_exit()
5efd6113 260 * and blkcg_exit_queue() to be called with queue lock initialized.
da527770 261 *
1da177e4
LT
262 */
263void blk_sync_queue(struct request_queue *q)
264{
70ed28b9 265 del_timer_sync(&q->timeout);
f04c1fe7
ML
266
267 if (q->mq_ops) {
268 struct blk_mq_hw_ctx *hctx;
269 int i;
270
70f4db63 271 queue_for_each_hw_ctx(q, hctx, i) {
27489a3c 272 cancel_work_sync(&hctx->run_work);
70f4db63
CH
273 cancel_delayed_work_sync(&hctx->delay_work);
274 }
f04c1fe7
ML
275 } else {
276 cancel_delayed_work_sync(&q->delay_work);
277 }
1da177e4
LT
278}
279EXPORT_SYMBOL(blk_sync_queue);
280
c246e80d
BVA
281/**
282 * __blk_run_queue_uncond - run a queue whether or not it has been stopped
283 * @q: The queue to run
284 *
285 * Description:
286 * Invoke request handling on a queue if there are any pending requests.
287 * May be used to restart request handling after a request has completed.
288 * This variant runs the queue whether or not the queue has been
289 * stopped. Must be called with the queue lock held and interrupts
290 * disabled. See also @blk_run_queue.
291 */
292inline void __blk_run_queue_uncond(struct request_queue *q)
293{
294 if (unlikely(blk_queue_dead(q)))
295 return;
296
24faf6f6
BVA
297 /*
298 * Some request_fn implementations, e.g. scsi_request_fn(), unlock
299 * the queue lock internally. As a result multiple threads may be
300 * running such a request function concurrently. Keep track of the
301 * number of active request_fn invocations such that blk_drain_queue()
302 * can wait until all these request_fn calls have finished.
303 */
304 q->request_fn_active++;
c246e80d 305 q->request_fn(q);
24faf6f6 306 q->request_fn_active--;
c246e80d 307}
a7928c15 308EXPORT_SYMBOL_GPL(__blk_run_queue_uncond);
c246e80d 309
1da177e4 310/**
80a4b58e 311 * __blk_run_queue - run a single device queue
1da177e4 312 * @q: The queue to run
80a4b58e
JA
313 *
314 * Description:
315 * See @blk_run_queue. This variant must be called with the queue lock
24ecfbe2 316 * held and interrupts disabled.
1da177e4 317 */
24ecfbe2 318void __blk_run_queue(struct request_queue *q)
1da177e4 319{
a538cd03
TH
320 if (unlikely(blk_queue_stopped(q)))
321 return;
322
c246e80d 323 __blk_run_queue_uncond(q);
75ad23bc
NP
324}
325EXPORT_SYMBOL(__blk_run_queue);
dac07ec1 326
24ecfbe2
CH
327/**
328 * blk_run_queue_async - run a single device queue in workqueue context
329 * @q: The queue to run
330 *
331 * Description:
332 * Tells kblockd to perform the equivalent of @blk_run_queue on behalf
70460571 333 * of us. The caller must hold the queue lock.
24ecfbe2
CH
334 */
335void blk_run_queue_async(struct request_queue *q)
336{
70460571 337 if (likely(!blk_queue_stopped(q) && !blk_queue_dead(q)))
e7c2f967 338 mod_delayed_work(kblockd_workqueue, &q->delay_work, 0);
24ecfbe2 339}
c21e6beb 340EXPORT_SYMBOL(blk_run_queue_async);
24ecfbe2 341
75ad23bc
NP
342/**
343 * blk_run_queue - run a single device queue
344 * @q: The queue to run
80a4b58e
JA
345 *
346 * Description:
347 * Invoke request handling on this queue, if it has pending work to do.
a7f55792 348 * May be used to restart queueing when a request has completed.
75ad23bc
NP
349 */
350void blk_run_queue(struct request_queue *q)
351{
352 unsigned long flags;
353
354 spin_lock_irqsave(q->queue_lock, flags);
24ecfbe2 355 __blk_run_queue(q);
1da177e4
LT
356 spin_unlock_irqrestore(q->queue_lock, flags);
357}
358EXPORT_SYMBOL(blk_run_queue);
359
165125e1 360void blk_put_queue(struct request_queue *q)
483f4afc
AV
361{
362 kobject_put(&q->kobj);
363}
d86e0e83 364EXPORT_SYMBOL(blk_put_queue);
483f4afc 365
e3c78ca5 366/**
807592a4 367 * __blk_drain_queue - drain requests from request_queue
e3c78ca5 368 * @q: queue to drain
c9a929dd 369 * @drain_all: whether to drain all requests or only the ones w/ ELVPRIV
e3c78ca5 370 *
c9a929dd
TH
371 * Drain requests from @q. If @drain_all is set, all requests are drained.
372 * If not, only ELVPRIV requests are drained. The caller is responsible
373 * for ensuring that no new requests which need to be drained are queued.
e3c78ca5 374 */
807592a4
BVA
375static void __blk_drain_queue(struct request_queue *q, bool drain_all)
376 __releases(q->queue_lock)
377 __acquires(q->queue_lock)
e3c78ca5 378{
458f27a9
AH
379 int i;
380
807592a4
BVA
381 lockdep_assert_held(q->queue_lock);
382
e3c78ca5 383 while (true) {
481a7d64 384 bool drain = false;
e3c78ca5 385
b855b04a
TH
386 /*
387 * The caller might be trying to drain @q before its
388 * elevator is initialized.
389 */
390 if (q->elevator)
391 elv_drain_elevator(q);
392
5efd6113 393 blkcg_drain_queue(q);
e3c78ca5 394
4eabc941
TH
395 /*
396 * This function might be called on a queue which failed
b855b04a
TH
397 * driver init after queue creation or is not yet fully
398 * active yet. Some drivers (e.g. fd and loop) get unhappy
399 * in such cases. Kick queue iff dispatch queue has
400 * something on it and @q has request_fn set.
4eabc941 401 */
b855b04a 402 if (!list_empty(&q->queue_head) && q->request_fn)
4eabc941 403 __blk_run_queue(q);
c9a929dd 404
8a5ecdd4 405 drain |= q->nr_rqs_elvpriv;
24faf6f6 406 drain |= q->request_fn_active;
481a7d64
TH
407
408 /*
409 * Unfortunately, requests are queued at and tracked from
410 * multiple places and there's no single counter which can
411 * be drained. Check all the queues and counters.
412 */
413 if (drain_all) {
e97c293c 414 struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL);
481a7d64
TH
415 drain |= !list_empty(&q->queue_head);
416 for (i = 0; i < 2; i++) {
8a5ecdd4 417 drain |= q->nr_rqs[i];
481a7d64 418 drain |= q->in_flight[i];
7c94e1c1
ML
419 if (fq)
420 drain |= !list_empty(&fq->flush_queue[i]);
481a7d64
TH
421 }
422 }
e3c78ca5 423
481a7d64 424 if (!drain)
e3c78ca5 425 break;
807592a4
BVA
426
427 spin_unlock_irq(q->queue_lock);
428
e3c78ca5 429 msleep(10);
807592a4
BVA
430
431 spin_lock_irq(q->queue_lock);
e3c78ca5 432 }
458f27a9
AH
433
434 /*
435 * With queue marked dead, any woken up waiter will fail the
436 * allocation path, so the wakeup chaining is lost and we're
437 * left with hung waiters. We need to wake up those waiters.
438 */
439 if (q->request_fn) {
a051661c
TH
440 struct request_list *rl;
441
a051661c
TH
442 blk_queue_for_each_rl(rl, q)
443 for (i = 0; i < ARRAY_SIZE(rl->wait); i++)
444 wake_up_all(&rl->wait[i]);
458f27a9 445 }
e3c78ca5
TH
446}
447
d732580b
TH
448/**
449 * blk_queue_bypass_start - enter queue bypass mode
450 * @q: queue of interest
451 *
452 * In bypass mode, only the dispatch FIFO queue of @q is used. This
453 * function makes @q enter bypass mode and drains all requests which were
6ecf23af 454 * throttled or issued before. On return, it's guaranteed that no request
80fd9979
TH
455 * is being throttled or has ELVPRIV set and blk_queue_bypass() %true
456 * inside queue or RCU read lock.
d732580b
TH
457 */
458void blk_queue_bypass_start(struct request_queue *q)
459{
460 spin_lock_irq(q->queue_lock);
776687bc 461 q->bypass_depth++;
d732580b
TH
462 queue_flag_set(QUEUE_FLAG_BYPASS, q);
463 spin_unlock_irq(q->queue_lock);
464
776687bc
TH
465 /*
466 * Queues start drained. Skip actual draining till init is
467 * complete. This avoids lenghty delays during queue init which
468 * can happen many times during boot.
469 */
470 if (blk_queue_init_done(q)) {
807592a4
BVA
471 spin_lock_irq(q->queue_lock);
472 __blk_drain_queue(q, false);
473 spin_unlock_irq(q->queue_lock);
474
b82d4b19
TH
475 /* ensure blk_queue_bypass() is %true inside RCU read lock */
476 synchronize_rcu();
477 }
d732580b
TH
478}
479EXPORT_SYMBOL_GPL(blk_queue_bypass_start);
480
481/**
482 * blk_queue_bypass_end - leave queue bypass mode
483 * @q: queue of interest
484 *
485 * Leave bypass mode and restore the normal queueing behavior.
486 */
487void blk_queue_bypass_end(struct request_queue *q)
488{
489 spin_lock_irq(q->queue_lock);
490 if (!--q->bypass_depth)
491 queue_flag_clear(QUEUE_FLAG_BYPASS, q);
492 WARN_ON_ONCE(q->bypass_depth < 0);
493 spin_unlock_irq(q->queue_lock);
494}
495EXPORT_SYMBOL_GPL(blk_queue_bypass_end);
496
aed3ea94
JA
497void blk_set_queue_dying(struct request_queue *q)
498{
1b856086
BVA
499 spin_lock_irq(q->queue_lock);
500 queue_flag_set(QUEUE_FLAG_DYING, q);
501 spin_unlock_irq(q->queue_lock);
aed3ea94
JA
502
503 if (q->mq_ops)
504 blk_mq_wake_waiters(q);
505 else {
506 struct request_list *rl;
507
508 blk_queue_for_each_rl(rl, q) {
509 if (rl->rq_pool) {
510 wake_up(&rl->wait[BLK_RW_SYNC]);
511 wake_up(&rl->wait[BLK_RW_ASYNC]);
512 }
513 }
514 }
515}
516EXPORT_SYMBOL_GPL(blk_set_queue_dying);
517
c9a929dd
TH
518/**
519 * blk_cleanup_queue - shutdown a request queue
520 * @q: request queue to shutdown
521 *
c246e80d
BVA
522 * Mark @q DYING, drain all pending requests, mark @q DEAD, destroy and
523 * put it. All future requests will be failed immediately with -ENODEV.
c94a96ac 524 */
6728cb0e 525void blk_cleanup_queue(struct request_queue *q)
483f4afc 526{
c9a929dd 527 spinlock_t *lock = q->queue_lock;
e3335de9 528
3f3299d5 529 /* mark @q DYING, no new request or merges will be allowed afterwards */
483f4afc 530 mutex_lock(&q->sysfs_lock);
aed3ea94 531 blk_set_queue_dying(q);
c9a929dd 532 spin_lock_irq(lock);
6ecf23af 533
80fd9979 534 /*
3f3299d5 535 * A dying queue is permanently in bypass mode till released. Note
80fd9979
TH
536 * that, unlike blk_queue_bypass_start(), we aren't performing
537 * synchronize_rcu() after entering bypass mode to avoid the delay
538 * as some drivers create and destroy a lot of queues while
539 * probing. This is still safe because blk_release_queue() will be
540 * called only after the queue refcnt drops to zero and nothing,
541 * RCU or not, would be traversing the queue by then.
542 */
6ecf23af
TH
543 q->bypass_depth++;
544 queue_flag_set(QUEUE_FLAG_BYPASS, q);
545
c9a929dd
TH
546 queue_flag_set(QUEUE_FLAG_NOMERGES, q);
547 queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
3f3299d5 548 queue_flag_set(QUEUE_FLAG_DYING, q);
c9a929dd
TH
549 spin_unlock_irq(lock);
550 mutex_unlock(&q->sysfs_lock);
551
c246e80d
BVA
552 /*
553 * Drain all requests queued before DYING marking. Set DEAD flag to
554 * prevent that q->request_fn() gets invoked after draining finished.
555 */
3ef28e83
DW
556 blk_freeze_queue(q);
557 spin_lock_irq(lock);
558 if (!q->mq_ops)
43a5e4e2 559 __blk_drain_queue(q, true);
c246e80d 560 queue_flag_set(QUEUE_FLAG_DEAD, q);
807592a4 561 spin_unlock_irq(lock);
c9a929dd 562
5a48fc14
DW
563 /* for synchronous bio-based driver finish in-flight integrity i/o */
564 blk_flush_integrity();
565
c9a929dd 566 /* @q won't process any more request, flush async actions */
dc3b17cc 567 del_timer_sync(&q->backing_dev_info->laptop_mode_wb_timer);
c9a929dd
TH
568 blk_sync_queue(q);
569
45a9c9d9
BVA
570 if (q->mq_ops)
571 blk_mq_free_queue(q);
3ef28e83 572 percpu_ref_exit(&q->q_usage_counter);
45a9c9d9 573
5e5cfac0
AH
574 spin_lock_irq(lock);
575 if (q->queue_lock != &q->__queue_lock)
576 q->queue_lock = &q->__queue_lock;
577 spin_unlock_irq(lock);
578
dc3b17cc 579 bdi_unregister(q->backing_dev_info);
0dba1314 580 put_disk_devt(q->disk_devt);
6cd18e71 581
c9a929dd 582 /* @q is and will stay empty, shutdown and put */
483f4afc
AV
583 blk_put_queue(q);
584}
1da177e4
LT
585EXPORT_SYMBOL(blk_cleanup_queue);
586
271508db 587/* Allocate memory local to the request queue */
6d247d7f 588static void *alloc_request_simple(gfp_t gfp_mask, void *data)
271508db 589{
6d247d7f
CH
590 struct request_queue *q = data;
591
592 return kmem_cache_alloc_node(request_cachep, gfp_mask, q->node);
271508db
DR
593}
594
6d247d7f 595static void free_request_simple(void *element, void *data)
271508db
DR
596{
597 kmem_cache_free(request_cachep, element);
598}
599
6d247d7f
CH
600static void *alloc_request_size(gfp_t gfp_mask, void *data)
601{
602 struct request_queue *q = data;
603 struct request *rq;
604
605 rq = kmalloc_node(sizeof(struct request) + q->cmd_size, gfp_mask,
606 q->node);
607 if (rq && q->init_rq_fn && q->init_rq_fn(q, rq, gfp_mask) < 0) {
608 kfree(rq);
609 rq = NULL;
610 }
611 return rq;
612}
613
614static void free_request_size(void *element, void *data)
615{
616 struct request_queue *q = data;
617
618 if (q->exit_rq_fn)
619 q->exit_rq_fn(q, element);
620 kfree(element);
621}
622
5b788ce3
TH
623int blk_init_rl(struct request_list *rl, struct request_queue *q,
624 gfp_t gfp_mask)
1da177e4 625{
1abec4fd
MS
626 if (unlikely(rl->rq_pool))
627 return 0;
628
5b788ce3 629 rl->q = q;
1faa16d2
JA
630 rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0;
631 rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0;
1faa16d2
JA
632 init_waitqueue_head(&rl->wait[BLK_RW_SYNC]);
633 init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]);
1da177e4 634
6d247d7f
CH
635 if (q->cmd_size) {
636 rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ,
637 alloc_request_size, free_request_size,
638 q, gfp_mask, q->node);
639 } else {
640 rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ,
641 alloc_request_simple, free_request_simple,
642 q, gfp_mask, q->node);
643 }
1da177e4
LT
644 if (!rl->rq_pool)
645 return -ENOMEM;
646
647 return 0;
648}
649
5b788ce3
TH
650void blk_exit_rl(struct request_list *rl)
651{
652 if (rl->rq_pool)
653 mempool_destroy(rl->rq_pool);
654}
655
165125e1 656struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
1da177e4 657{
c304a51b 658 return blk_alloc_queue_node(gfp_mask, NUMA_NO_NODE);
1946089a
CL
659}
660EXPORT_SYMBOL(blk_alloc_queue);
1da177e4 661
6f3b0e8b 662int blk_queue_enter(struct request_queue *q, bool nowait)
3ef28e83
DW
663{
664 while (true) {
665 int ret;
666
667 if (percpu_ref_tryget_live(&q->q_usage_counter))
668 return 0;
669
6f3b0e8b 670 if (nowait)
3ef28e83
DW
671 return -EBUSY;
672
673 ret = wait_event_interruptible(q->mq_freeze_wq,
674 !atomic_read(&q->mq_freeze_depth) ||
675 blk_queue_dying(q));
676 if (blk_queue_dying(q))
677 return -ENODEV;
678 if (ret)
679 return ret;
680 }
681}
682
683void blk_queue_exit(struct request_queue *q)
684{
685 percpu_ref_put(&q->q_usage_counter);
686}
687
688static void blk_queue_usage_counter_release(struct percpu_ref *ref)
689{
690 struct request_queue *q =
691 container_of(ref, struct request_queue, q_usage_counter);
692
693 wake_up_all(&q->mq_freeze_wq);
694}
695
287922eb
CH
696static void blk_rq_timed_out_timer(unsigned long data)
697{
698 struct request_queue *q = (struct request_queue *)data;
699
700 kblockd_schedule_work(&q->timeout_work);
701}
702
165125e1 703struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
1946089a 704{
165125e1 705 struct request_queue *q;
1946089a 706
8324aa91 707 q = kmem_cache_alloc_node(blk_requestq_cachep,
94f6030c 708 gfp_mask | __GFP_ZERO, node_id);
1da177e4
LT
709 if (!q)
710 return NULL;
711
00380a40 712 q->id = ida_simple_get(&blk_queue_ida, 0, 0, gfp_mask);
a73f730d 713 if (q->id < 0)
3d2936f4 714 goto fail_q;
a73f730d 715
54efd50b
KO
716 q->bio_split = bioset_create(BIO_POOL_SIZE, 0);
717 if (!q->bio_split)
718 goto fail_id;
719
d03f6cdc
JK
720 q->backing_dev_info = bdi_alloc_node(gfp_mask, node_id);
721 if (!q->backing_dev_info)
722 goto fail_split;
723
dc3b17cc 724 q->backing_dev_info->ra_pages =
09cbfeaf 725 (VM_MAX_READAHEAD * 1024) / PAGE_SIZE;
dc3b17cc
JK
726 q->backing_dev_info->capabilities = BDI_CAP_CGROUP_WRITEBACK;
727 q->backing_dev_info->name = "block";
5151412d 728 q->node = node_id;
0989a025 729
dc3b17cc 730 setup_timer(&q->backing_dev_info->laptop_mode_wb_timer,
31373d09 731 laptop_mode_timer_fn, (unsigned long) q);
242f9dcb 732 setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
b855b04a 733 INIT_LIST_HEAD(&q->queue_head);
242f9dcb 734 INIT_LIST_HEAD(&q->timeout_list);
a612fddf 735 INIT_LIST_HEAD(&q->icq_list);
4eef3049 736#ifdef CONFIG_BLK_CGROUP
e8989fae 737 INIT_LIST_HEAD(&q->blkg_list);
4eef3049 738#endif
3cca6dc1 739 INIT_DELAYED_WORK(&q->delay_work, blk_delay_work);
483f4afc 740
8324aa91 741 kobject_init(&q->kobj, &blk_queue_ktype);
1da177e4 742
483f4afc 743 mutex_init(&q->sysfs_lock);
e7e72bf6 744 spin_lock_init(&q->__queue_lock);
483f4afc 745
c94a96ac
VG
746 /*
747 * By default initialize queue_lock to internal lock and driver can
748 * override it later if need be.
749 */
750 q->queue_lock = &q->__queue_lock;
751
b82d4b19
TH
752 /*
753 * A queue starts its life with bypass turned on to avoid
754 * unnecessary bypass on/off overhead and nasty surprises during
749fefe6
TH
755 * init. The initial bypass will be finished when the queue is
756 * registered by blk_register_queue().
b82d4b19
TH
757 */
758 q->bypass_depth = 1;
759 __set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags);
760
320ae51f
JA
761 init_waitqueue_head(&q->mq_freeze_wq);
762
3ef28e83
DW
763 /*
764 * Init percpu_ref in atomic mode so that it's faster to shutdown.
765 * See blk_register_queue() for details.
766 */
767 if (percpu_ref_init(&q->q_usage_counter,
768 blk_queue_usage_counter_release,
769 PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
fff4996b 770 goto fail_bdi;
f51b802c 771
3ef28e83
DW
772 if (blkcg_init_queue(q))
773 goto fail_ref;
774
1da177e4 775 return q;
a73f730d 776
3ef28e83
DW
777fail_ref:
778 percpu_ref_exit(&q->q_usage_counter);
fff4996b 779fail_bdi:
d03f6cdc 780 bdi_put(q->backing_dev_info);
54efd50b
KO
781fail_split:
782 bioset_free(q->bio_split);
a73f730d
TH
783fail_id:
784 ida_simple_remove(&blk_queue_ida, q->id);
785fail_q:
786 kmem_cache_free(blk_requestq_cachep, q);
787 return NULL;
1da177e4 788}
1946089a 789EXPORT_SYMBOL(blk_alloc_queue_node);
1da177e4
LT
790
791/**
792 * blk_init_queue - prepare a request queue for use with a block device
793 * @rfn: The function to be called to process requests that have been
794 * placed on the queue.
795 * @lock: Request queue spin lock
796 *
797 * Description:
798 * If a block device wishes to use the standard request handling procedures,
799 * which sorts requests and coalesces adjacent requests, then it must
800 * call blk_init_queue(). The function @rfn will be called when there
801 * are requests on the queue that need to be processed. If the device
802 * supports plugging, then @rfn may not be called immediately when requests
803 * are available on the queue, but may be called at some time later instead.
804 * Plugged queues are generally unplugged when a buffer belonging to one
805 * of the requests on the queue is needed, or due to memory pressure.
806 *
807 * @rfn is not required, or even expected, to remove all requests off the
808 * queue, but only as many as it can handle at a time. If it does leave
809 * requests on the queue, it is responsible for arranging that the requests
810 * get dealt with eventually.
811 *
812 * The queue spin lock must be held while manipulating the requests on the
a038e253
PBG
813 * request queue; this lock will be taken also from interrupt context, so irq
814 * disabling is needed for it.
1da177e4 815 *
710027a4 816 * Function returns a pointer to the initialized request queue, or %NULL if
1da177e4
LT
817 * it didn't succeed.
818 *
819 * Note:
820 * blk_init_queue() must be paired with a blk_cleanup_queue() call
821 * when the block device is deactivated (such as at module unload).
822 **/
1946089a 823
165125e1 824struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock)
1da177e4 825{
c304a51b 826 return blk_init_queue_node(rfn, lock, NUMA_NO_NODE);
1946089a
CL
827}
828EXPORT_SYMBOL(blk_init_queue);
829
165125e1 830struct request_queue *
1946089a
CL
831blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
832{
5ea708d1 833 struct request_queue *q;
1da177e4 834
5ea708d1
CH
835 q = blk_alloc_queue_node(GFP_KERNEL, node_id);
836 if (!q)
c86d1b8a
MS
837 return NULL;
838
5ea708d1
CH
839 q->request_fn = rfn;
840 if (lock)
841 q->queue_lock = lock;
842 if (blk_init_allocated_queue(q) < 0) {
843 blk_cleanup_queue(q);
844 return NULL;
845 }
18741986 846
7982e90c 847 return q;
01effb0d
MS
848}
849EXPORT_SYMBOL(blk_init_queue_node);
850
dece1635 851static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio);
336b7e1f 852
1da177e4 853
5ea708d1
CH
854int blk_init_allocated_queue(struct request_queue *q)
855{
6d247d7f 856 q->fq = blk_alloc_flush_queue(q, NUMA_NO_NODE, q->cmd_size);
ba483388 857 if (!q->fq)
5ea708d1 858 return -ENOMEM;
7982e90c 859
6d247d7f
CH
860 if (q->init_rq_fn && q->init_rq_fn(q, q->fq->flush_rq, GFP_KERNEL))
861 goto out_free_flush_queue;
862
a051661c 863 if (blk_init_rl(&q->root_rl, q, GFP_KERNEL))
6d247d7f 864 goto out_exit_flush_rq;
1da177e4 865
287922eb 866 INIT_WORK(&q->timeout_work, blk_timeout_work);
60ea8226 867 q->queue_flags |= QUEUE_FLAG_DEFAULT;
c94a96ac 868
f3b144aa
JA
869 /*
870 * This also sets hw/phys segments, boundary and size
871 */
c20e8de2 872 blk_queue_make_request(q, blk_queue_bio);
1da177e4 873
44ec9542
AS
874 q->sg_reserved_size = INT_MAX;
875
eb1c160b
TS
876 /* Protect q->elevator from elevator_change */
877 mutex_lock(&q->sysfs_lock);
878
b82d4b19 879 /* init elevator */
eb1c160b
TS
880 if (elevator_init(q, NULL)) {
881 mutex_unlock(&q->sysfs_lock);
6d247d7f 882 goto out_exit_flush_rq;
eb1c160b
TS
883 }
884
885 mutex_unlock(&q->sysfs_lock);
5ea708d1 886 return 0;
708f04d2 887
6d247d7f
CH
888out_exit_flush_rq:
889 if (q->exit_rq_fn)
890 q->exit_rq_fn(q, q->fq->flush_rq);
891out_free_flush_queue:
ba483388 892 blk_free_flush_queue(q->fq);
87760e5e 893 wbt_exit(q);
5ea708d1 894 return -ENOMEM;
1da177e4 895}
5151412d 896EXPORT_SYMBOL(blk_init_allocated_queue);
1da177e4 897
09ac46c4 898bool blk_get_queue(struct request_queue *q)
1da177e4 899{
3f3299d5 900 if (likely(!blk_queue_dying(q))) {
09ac46c4
TH
901 __blk_get_queue(q);
902 return true;
1da177e4
LT
903 }
904
09ac46c4 905 return false;
1da177e4 906}
d86e0e83 907EXPORT_SYMBOL(blk_get_queue);
1da177e4 908
5b788ce3 909static inline void blk_free_request(struct request_list *rl, struct request *rq)
1da177e4 910{
e8064021 911 if (rq->rq_flags & RQF_ELVPRIV) {
5b788ce3 912 elv_put_request(rl->q, rq);
f1f8cc94 913 if (rq->elv.icq)
11a3122f 914 put_io_context(rq->elv.icq->ioc);
f1f8cc94
TH
915 }
916
5b788ce3 917 mempool_free(rq, rl->rq_pool);
1da177e4
LT
918}
919
1da177e4
LT
920/*
921 * ioc_batching returns true if the ioc is a valid batching request and
922 * should be given priority access to a request.
923 */
165125e1 924static inline int ioc_batching(struct request_queue *q, struct io_context *ioc)
1da177e4
LT
925{
926 if (!ioc)
927 return 0;
928
929 /*
930 * Make sure the process is able to allocate at least 1 request
931 * even if the batch times out, otherwise we could theoretically
932 * lose wakeups.
933 */
934 return ioc->nr_batch_requests == q->nr_batching ||
935 (ioc->nr_batch_requests > 0
936 && time_before(jiffies, ioc->last_waited + BLK_BATCH_TIME));
937}
938
939/*
940 * ioc_set_batching sets ioc to be a new "batcher" if it is not one. This
941 * will cause the process to be a "batcher" on all queues in the system. This
942 * is the behaviour we want though - once it gets a wakeup it should be given
943 * a nice run.
944 */
165125e1 945static void ioc_set_batching(struct request_queue *q, struct io_context *ioc)
1da177e4
LT
946{
947 if (!ioc || ioc_batching(q, ioc))
948 return;
949
950 ioc->nr_batch_requests = q->nr_batching;
951 ioc->last_waited = jiffies;
952}
953
5b788ce3 954static void __freed_request(struct request_list *rl, int sync)
1da177e4 955{
5b788ce3 956 struct request_queue *q = rl->q;
1da177e4 957
d40f75a0
TH
958 if (rl->count[sync] < queue_congestion_off_threshold(q))
959 blk_clear_congested(rl, sync);
1da177e4 960
1faa16d2
JA
961 if (rl->count[sync] + 1 <= q->nr_requests) {
962 if (waitqueue_active(&rl->wait[sync]))
963 wake_up(&rl->wait[sync]);
1da177e4 964
5b788ce3 965 blk_clear_rl_full(rl, sync);
1da177e4
LT
966 }
967}
968
969/*
970 * A request has just been released. Account for it, update the full and
971 * congestion status, wake up any waiters. Called under q->queue_lock.
972 */
e8064021
CH
973static void freed_request(struct request_list *rl, bool sync,
974 req_flags_t rq_flags)
1da177e4 975{
5b788ce3 976 struct request_queue *q = rl->q;
1da177e4 977
8a5ecdd4 978 q->nr_rqs[sync]--;
1faa16d2 979 rl->count[sync]--;
e8064021 980 if (rq_flags & RQF_ELVPRIV)
8a5ecdd4 981 q->nr_rqs_elvpriv--;
1da177e4 982
5b788ce3 983 __freed_request(rl, sync);
1da177e4 984
1faa16d2 985 if (unlikely(rl->starved[sync ^ 1]))
5b788ce3 986 __freed_request(rl, sync ^ 1);
1da177e4
LT
987}
988
e3a2b3f9
JA
989int blk_update_nr_requests(struct request_queue *q, unsigned int nr)
990{
991 struct request_list *rl;
d40f75a0 992 int on_thresh, off_thresh;
e3a2b3f9
JA
993
994 spin_lock_irq(q->queue_lock);
995 q->nr_requests = nr;
996 blk_queue_congestion_threshold(q);
d40f75a0
TH
997 on_thresh = queue_congestion_on_threshold(q);
998 off_thresh = queue_congestion_off_threshold(q);
e3a2b3f9 999
d40f75a0
TH
1000 blk_queue_for_each_rl(rl, q) {
1001 if (rl->count[BLK_RW_SYNC] >= on_thresh)
1002 blk_set_congested(rl, BLK_RW_SYNC);
1003 else if (rl->count[BLK_RW_SYNC] < off_thresh)
1004 blk_clear_congested(rl, BLK_RW_SYNC);
e3a2b3f9 1005
d40f75a0
TH
1006 if (rl->count[BLK_RW_ASYNC] >= on_thresh)
1007 blk_set_congested(rl, BLK_RW_ASYNC);
1008 else if (rl->count[BLK_RW_ASYNC] < off_thresh)
1009 blk_clear_congested(rl, BLK_RW_ASYNC);
e3a2b3f9 1010
e3a2b3f9
JA
1011 if (rl->count[BLK_RW_SYNC] >= q->nr_requests) {
1012 blk_set_rl_full(rl, BLK_RW_SYNC);
1013 } else {
1014 blk_clear_rl_full(rl, BLK_RW_SYNC);
1015 wake_up(&rl->wait[BLK_RW_SYNC]);
1016 }
1017
1018 if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) {
1019 blk_set_rl_full(rl, BLK_RW_ASYNC);
1020 } else {
1021 blk_clear_rl_full(rl, BLK_RW_ASYNC);
1022 wake_up(&rl->wait[BLK_RW_ASYNC]);
1023 }
1024 }
1025
1026 spin_unlock_irq(q->queue_lock);
1027 return 0;
1028}
1029
da8303c6 1030/**
a06e05e6 1031 * __get_request - get a free request
5b788ce3 1032 * @rl: request list to allocate from
ef295ecf 1033 * @op: operation and flags
da8303c6
TH
1034 * @bio: bio to allocate request for (can be %NULL)
1035 * @gfp_mask: allocation mask
1036 *
1037 * Get a free request from @q. This function may fail under memory
1038 * pressure or if @q is dead.
1039 *
da3dae54 1040 * Must be called with @q->queue_lock held and,
a492f075
JL
1041 * Returns ERR_PTR on failure, with @q->queue_lock held.
1042 * Returns request pointer on success, with @q->queue_lock *not held*.
1da177e4 1043 */
ef295ecf
CH
1044static struct request *__get_request(struct request_list *rl, unsigned int op,
1045 struct bio *bio, gfp_t gfp_mask)
1da177e4 1046{
5b788ce3 1047 struct request_queue *q = rl->q;
b679281a 1048 struct request *rq;
7f4b35d1
TH
1049 struct elevator_type *et = q->elevator->type;
1050 struct io_context *ioc = rq_ioc(bio);
f1f8cc94 1051 struct io_cq *icq = NULL;
ef295ecf 1052 const bool is_sync = op_is_sync(op);
75eb6c37 1053 int may_queue;
e8064021 1054 req_flags_t rq_flags = RQF_ALLOCED;
88ee5ef1 1055
3f3299d5 1056 if (unlikely(blk_queue_dying(q)))
a492f075 1057 return ERR_PTR(-ENODEV);
da8303c6 1058
ef295ecf 1059 may_queue = elv_may_queue(q, op);
88ee5ef1
JA
1060 if (may_queue == ELV_MQUEUE_NO)
1061 goto rq_starved;
1062
1faa16d2
JA
1063 if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) {
1064 if (rl->count[is_sync]+1 >= q->nr_requests) {
88ee5ef1
JA
1065 /*
1066 * The queue will fill after this allocation, so set
1067 * it as full, and mark this process as "batching".
1068 * This process will be allowed to complete a batch of
1069 * requests, others will be blocked.
1070 */
5b788ce3 1071 if (!blk_rl_full(rl, is_sync)) {
88ee5ef1 1072 ioc_set_batching(q, ioc);
5b788ce3 1073 blk_set_rl_full(rl, is_sync);
88ee5ef1
JA
1074 } else {
1075 if (may_queue != ELV_MQUEUE_MUST
1076 && !ioc_batching(q, ioc)) {
1077 /*
1078 * The queue is full and the allocating
1079 * process is not a "batcher", and not
1080 * exempted by the IO scheduler
1081 */
a492f075 1082 return ERR_PTR(-ENOMEM);
88ee5ef1
JA
1083 }
1084 }
1da177e4 1085 }
d40f75a0 1086 blk_set_congested(rl, is_sync);
1da177e4
LT
1087 }
1088
082cf69e
JA
1089 /*
1090 * Only allow batching queuers to allocate up to 50% over the defined
1091 * limit of requests, otherwise we could have thousands of requests
1092 * allocated with any setting of ->nr_requests
1093 */
1faa16d2 1094 if (rl->count[is_sync] >= (3 * q->nr_requests / 2))
a492f075 1095 return ERR_PTR(-ENOMEM);
fd782a4a 1096
8a5ecdd4 1097 q->nr_rqs[is_sync]++;
1faa16d2
JA
1098 rl->count[is_sync]++;
1099 rl->starved[is_sync] = 0;
cb98fc8b 1100
f1f8cc94
TH
1101 /*
1102 * Decide whether the new request will be managed by elevator. If
e8064021 1103 * so, mark @rq_flags and increment elvpriv. Non-zero elvpriv will
f1f8cc94
TH
1104 * prevent the current elevator from being destroyed until the new
1105 * request is freed. This guarantees icq's won't be destroyed and
1106 * makes creating new ones safe.
1107 *
e6f7f93d
CH
1108 * Flush requests do not use the elevator so skip initialization.
1109 * This allows a request to share the flush and elevator data.
1110 *
f1f8cc94
TH
1111 * Also, lookup icq while holding queue_lock. If it doesn't exist,
1112 * it will be created after releasing queue_lock.
1113 */
e6f7f93d 1114 if (!op_is_flush(op) && !blk_queue_bypass(q)) {
e8064021 1115 rq_flags |= RQF_ELVPRIV;
8a5ecdd4 1116 q->nr_rqs_elvpriv++;
f1f8cc94
TH
1117 if (et->icq_cache && ioc)
1118 icq = ioc_lookup_icq(ioc, q);
9d5a4e94 1119 }
cb98fc8b 1120
f253b86b 1121 if (blk_queue_io_stat(q))
e8064021 1122 rq_flags |= RQF_IO_STAT;
1da177e4
LT
1123 spin_unlock_irq(q->queue_lock);
1124
29e2b09a 1125 /* allocate and init request */
5b788ce3 1126 rq = mempool_alloc(rl->rq_pool, gfp_mask);
29e2b09a 1127 if (!rq)
b679281a 1128 goto fail_alloc;
1da177e4 1129
29e2b09a 1130 blk_rq_init(q, rq);
a051661c 1131 blk_rq_set_rl(rq, rl);
5dc8b362 1132 blk_rq_set_prio(rq, ioc);
ef295ecf 1133 rq->cmd_flags = op;
e8064021 1134 rq->rq_flags = rq_flags;
29e2b09a 1135
aaf7c680 1136 /* init elvpriv */
e8064021 1137 if (rq_flags & RQF_ELVPRIV) {
aaf7c680 1138 if (unlikely(et->icq_cache && !icq)) {
7f4b35d1
TH
1139 if (ioc)
1140 icq = ioc_create_icq(ioc, q, gfp_mask);
aaf7c680
TH
1141 if (!icq)
1142 goto fail_elvpriv;
29e2b09a 1143 }
aaf7c680
TH
1144
1145 rq->elv.icq = icq;
1146 if (unlikely(elv_set_request(q, rq, bio, gfp_mask)))
1147 goto fail_elvpriv;
1148
1149 /* @rq->elv.icq holds io_context until @rq is freed */
29e2b09a
TH
1150 if (icq)
1151 get_io_context(icq->ioc);
1152 }
aaf7c680 1153out:
88ee5ef1
JA
1154 /*
1155 * ioc may be NULL here, and ioc_batching will be false. That's
1156 * OK, if the queue is under the request limit then requests need
1157 * not count toward the nr_batch_requests limit. There will always
1158 * be some limit enforced by BLK_BATCH_TIME.
1159 */
1da177e4
LT
1160 if (ioc_batching(q, ioc))
1161 ioc->nr_batch_requests--;
6728cb0e 1162
e6a40b09 1163 trace_block_getrq(q, bio, op);
1da177e4 1164 return rq;
b679281a 1165
aaf7c680
TH
1166fail_elvpriv:
1167 /*
1168 * elvpriv init failed. ioc, icq and elvpriv aren't mempool backed
1169 * and may fail indefinitely under memory pressure and thus
1170 * shouldn't stall IO. Treat this request as !elvpriv. This will
1171 * disturb iosched and blkcg but weird is bettern than dead.
1172 */
7b2b10e0 1173 printk_ratelimited(KERN_WARNING "%s: dev %s: request aux data allocation failed, iosched may be disturbed\n",
dc3b17cc 1174 __func__, dev_name(q->backing_dev_info->dev));
aaf7c680 1175
e8064021 1176 rq->rq_flags &= ~RQF_ELVPRIV;
aaf7c680
TH
1177 rq->elv.icq = NULL;
1178
1179 spin_lock_irq(q->queue_lock);
8a5ecdd4 1180 q->nr_rqs_elvpriv--;
aaf7c680
TH
1181 spin_unlock_irq(q->queue_lock);
1182 goto out;
1183
b679281a
TH
1184fail_alloc:
1185 /*
1186 * Allocation failed presumably due to memory. Undo anything we
1187 * might have messed up.
1188 *
1189 * Allocating task should really be put onto the front of the wait
1190 * queue, but this is pretty rare.
1191 */
1192 spin_lock_irq(q->queue_lock);
e8064021 1193 freed_request(rl, is_sync, rq_flags);
b679281a
TH
1194
1195 /*
1196 * in the very unlikely event that allocation failed and no
1197 * requests for this direction was pending, mark us starved so that
1198 * freeing of a request in the other direction will notice
1199 * us. another possible fix would be to split the rq mempool into
1200 * READ and WRITE
1201 */
1202rq_starved:
1203 if (unlikely(rl->count[is_sync] == 0))
1204 rl->starved[is_sync] = 1;
a492f075 1205 return ERR_PTR(-ENOMEM);
1da177e4
LT
1206}
1207
da8303c6 1208/**
a06e05e6 1209 * get_request - get a free request
da8303c6 1210 * @q: request_queue to allocate request from
ef295ecf 1211 * @op: operation and flags
da8303c6 1212 * @bio: bio to allocate request for (can be %NULL)
a06e05e6 1213 * @gfp_mask: allocation mask
da8303c6 1214 *
d0164adc
MG
1215 * Get a free request from @q. If %__GFP_DIRECT_RECLAIM is set in @gfp_mask,
1216 * this function keeps retrying under memory pressure and fails iff @q is dead.
d6344532 1217 *
da3dae54 1218 * Must be called with @q->queue_lock held and,
a492f075
JL
1219 * Returns ERR_PTR on failure, with @q->queue_lock held.
1220 * Returns request pointer on success, with @q->queue_lock *not held*.
1da177e4 1221 */
ef295ecf
CH
1222static struct request *get_request(struct request_queue *q, unsigned int op,
1223 struct bio *bio, gfp_t gfp_mask)
1da177e4 1224{
ef295ecf 1225 const bool is_sync = op_is_sync(op);
a06e05e6 1226 DEFINE_WAIT(wait);
a051661c 1227 struct request_list *rl;
1da177e4 1228 struct request *rq;
a051661c
TH
1229
1230 rl = blk_get_rl(q, bio); /* transferred to @rq on success */
a06e05e6 1231retry:
ef295ecf 1232 rq = __get_request(rl, op, bio, gfp_mask);
a492f075 1233 if (!IS_ERR(rq))
a06e05e6 1234 return rq;
1da177e4 1235
d0164adc 1236 if (!gfpflags_allow_blocking(gfp_mask) || unlikely(blk_queue_dying(q))) {
a051661c 1237 blk_put_rl(rl);
a492f075 1238 return rq;
a051661c 1239 }
1da177e4 1240
a06e05e6
TH
1241 /* wait on @rl and retry */
1242 prepare_to_wait_exclusive(&rl->wait[is_sync], &wait,
1243 TASK_UNINTERRUPTIBLE);
1da177e4 1244
e6a40b09 1245 trace_block_sleeprq(q, bio, op);
1da177e4 1246
a06e05e6
TH
1247 spin_unlock_irq(q->queue_lock);
1248 io_schedule();
d6344532 1249
a06e05e6
TH
1250 /*
1251 * After sleeping, we become a "batching" process and will be able
1252 * to allocate at least one request, and up to a big batch of them
1253 * for a small period time. See ioc_batching, ioc_set_batching
1254 */
a06e05e6 1255 ioc_set_batching(q, current->io_context);
05caf8db 1256
a06e05e6
TH
1257 spin_lock_irq(q->queue_lock);
1258 finish_wait(&rl->wait[is_sync], &wait);
1da177e4 1259
a06e05e6 1260 goto retry;
1da177e4
LT
1261}
1262
320ae51f
JA
1263static struct request *blk_old_get_request(struct request_queue *q, int rw,
1264 gfp_t gfp_mask)
1da177e4
LT
1265{
1266 struct request *rq;
1267
7f4b35d1
TH
1268 /* create ioc upfront */
1269 create_io_context(gfp_mask, q->node);
1270
d6344532 1271 spin_lock_irq(q->queue_lock);
ef295ecf 1272 rq = get_request(q, rw, NULL, gfp_mask);
0c4de0f3 1273 if (IS_ERR(rq)) {
da8303c6 1274 spin_unlock_irq(q->queue_lock);
0c4de0f3
CH
1275 return rq;
1276 }
1da177e4 1277
0c4de0f3
CH
1278 /* q->queue_lock is unlocked at this point */
1279 rq->__data_len = 0;
1280 rq->__sector = (sector_t) -1;
1281 rq->bio = rq->biotail = NULL;
1da177e4
LT
1282 return rq;
1283}
320ae51f
JA
1284
1285struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
1286{
1287 if (q->mq_ops)
6f3b0e8b
CH
1288 return blk_mq_alloc_request(q, rw,
1289 (gfp_mask & __GFP_DIRECT_RECLAIM) ?
1290 0 : BLK_MQ_REQ_NOWAIT);
320ae51f
JA
1291 else
1292 return blk_old_get_request(q, rw, gfp_mask);
1293}
1da177e4
LT
1294EXPORT_SYMBOL(blk_get_request);
1295
1296/**
1297 * blk_requeue_request - put a request back on queue
1298 * @q: request queue where request should be inserted
1299 * @rq: request to be inserted
1300 *
1301 * Description:
1302 * Drivers often keep queueing requests until the hardware cannot accept
1303 * more, when that condition happens we need to put the request back
1304 * on the queue. Must be called with queue lock held.
1305 */
165125e1 1306void blk_requeue_request(struct request_queue *q, struct request *rq)
1da177e4 1307{
242f9dcb
JA
1308 blk_delete_timer(rq);
1309 blk_clear_rq_complete(rq);
5f3ea37c 1310 trace_block_rq_requeue(q, rq);
87760e5e 1311 wbt_requeue(q->rq_wb, &rq->issue_stat);
2056a782 1312
e8064021 1313 if (rq->rq_flags & RQF_QUEUED)
1da177e4
LT
1314 blk_queue_end_tag(q, rq);
1315
ba396a6c
JB
1316 BUG_ON(blk_queued_rq(rq));
1317
1da177e4
LT
1318 elv_requeue_request(q, rq);
1319}
1da177e4
LT
1320EXPORT_SYMBOL(blk_requeue_request);
1321
73c10101
JA
1322static void add_acct_request(struct request_queue *q, struct request *rq,
1323 int where)
1324{
320ae51f 1325 blk_account_io_start(rq, true);
7eaceacc 1326 __elv_add_request(q, rq, where);
73c10101
JA
1327}
1328
074a7aca
TH
1329static void part_round_stats_single(int cpu, struct hd_struct *part,
1330 unsigned long now)
1331{
7276d02e
JA
1332 int inflight;
1333
074a7aca
TH
1334 if (now == part->stamp)
1335 return;
1336
7276d02e
JA
1337 inflight = part_in_flight(part);
1338 if (inflight) {
074a7aca 1339 __part_stat_add(cpu, part, time_in_queue,
7276d02e 1340 inflight * (now - part->stamp));
074a7aca
TH
1341 __part_stat_add(cpu, part, io_ticks, (now - part->stamp));
1342 }
1343 part->stamp = now;
1344}
1345
1346/**
496aa8a9
RD
1347 * part_round_stats() - Round off the performance stats on a struct disk_stats.
1348 * @cpu: cpu number for stats access
1349 * @part: target partition
1da177e4
LT
1350 *
1351 * The average IO queue length and utilisation statistics are maintained
1352 * by observing the current state of the queue length and the amount of
1353 * time it has been in this state for.
1354 *
1355 * Normally, that accounting is done on IO completion, but that can result
1356 * in more than a second's worth of IO being accounted for within any one
1357 * second, leading to >100% utilisation. To deal with that, we call this
1358 * function to do a round-off before returning the results when reading
1359 * /proc/diskstats. This accounts immediately for all queue usage up to
1360 * the current jiffies and restarts the counters again.
1361 */
c9959059 1362void part_round_stats(int cpu, struct hd_struct *part)
6f2576af
JM
1363{
1364 unsigned long now = jiffies;
1365
074a7aca
TH
1366 if (part->partno)
1367 part_round_stats_single(cpu, &part_to_disk(part)->part0, now);
1368 part_round_stats_single(cpu, part, now);
6f2576af 1369}
074a7aca 1370EXPORT_SYMBOL_GPL(part_round_stats);
6f2576af 1371
47fafbc7 1372#ifdef CONFIG_PM
c8158819
LM
1373static void blk_pm_put_request(struct request *rq)
1374{
e8064021 1375 if (rq->q->dev && !(rq->rq_flags & RQF_PM) && !--rq->q->nr_pending)
c8158819
LM
1376 pm_runtime_mark_last_busy(rq->q->dev);
1377}
1378#else
1379static inline void blk_pm_put_request(struct request *rq) {}
1380#endif
1381
1da177e4
LT
1382/*
1383 * queue lock must be held
1384 */
165125e1 1385void __blk_put_request(struct request_queue *q, struct request *req)
1da177e4 1386{
e8064021
CH
1387 req_flags_t rq_flags = req->rq_flags;
1388
1da177e4
LT
1389 if (unlikely(!q))
1390 return;
1da177e4 1391
6f5ba581
CH
1392 if (q->mq_ops) {
1393 blk_mq_free_request(req);
1394 return;
1395 }
1396
c8158819
LM
1397 blk_pm_put_request(req);
1398
8922e16c
TH
1399 elv_completed_request(q, req);
1400
1cd96c24
BH
1401 /* this is a bio leak */
1402 WARN_ON(req->bio != NULL);
1403
87760e5e
JA
1404 wbt_done(q->rq_wb, &req->issue_stat);
1405
1da177e4
LT
1406 /*
1407 * Request may not have originated from ll_rw_blk. if not,
1408 * it didn't come out of our reserved rq pools
1409 */
e8064021 1410 if (rq_flags & RQF_ALLOCED) {
a051661c 1411 struct request_list *rl = blk_rq_rl(req);
ef295ecf 1412 bool sync = op_is_sync(req->cmd_flags);
1da177e4 1413
1da177e4 1414 BUG_ON(!list_empty(&req->queuelist));
360f92c2 1415 BUG_ON(ELV_ON_HASH(req));
1da177e4 1416
a051661c 1417 blk_free_request(rl, req);
e8064021 1418 freed_request(rl, sync, rq_flags);
a051661c 1419 blk_put_rl(rl);
1da177e4
LT
1420 }
1421}
6e39b69e
MC
1422EXPORT_SYMBOL_GPL(__blk_put_request);
1423
1da177e4
LT
1424void blk_put_request(struct request *req)
1425{
165125e1 1426 struct request_queue *q = req->q;
8922e16c 1427
320ae51f
JA
1428 if (q->mq_ops)
1429 blk_mq_free_request(req);
1430 else {
1431 unsigned long flags;
1432
1433 spin_lock_irqsave(q->queue_lock, flags);
1434 __blk_put_request(q, req);
1435 spin_unlock_irqrestore(q->queue_lock, flags);
1436 }
1da177e4 1437}
1da177e4
LT
1438EXPORT_SYMBOL(blk_put_request);
1439
320ae51f
JA
1440bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
1441 struct bio *bio)
73c10101 1442{
1eff9d32 1443 const int ff = bio->bi_opf & REQ_FAILFAST_MASK;
73c10101 1444
73c10101
JA
1445 if (!ll_back_merge_fn(q, req, bio))
1446 return false;
1447
8c1cf6bb 1448 trace_block_bio_backmerge(q, req, bio);
73c10101
JA
1449
1450 if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
1451 blk_rq_set_mixed_merge(req);
1452
1453 req->biotail->bi_next = bio;
1454 req->biotail = bio;
4f024f37 1455 req->__data_len += bio->bi_iter.bi_size;
73c10101
JA
1456 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
1457
320ae51f 1458 blk_account_io_start(req, false);
73c10101
JA
1459 return true;
1460}
1461
320ae51f
JA
1462bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
1463 struct bio *bio)
73c10101 1464{
1eff9d32 1465 const int ff = bio->bi_opf & REQ_FAILFAST_MASK;
73c10101 1466
73c10101
JA
1467 if (!ll_front_merge_fn(q, req, bio))
1468 return false;
1469
8c1cf6bb 1470 trace_block_bio_frontmerge(q, req, bio);
73c10101
JA
1471
1472 if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
1473 blk_rq_set_mixed_merge(req);
1474
73c10101
JA
1475 bio->bi_next = req->bio;
1476 req->bio = bio;
1477
4f024f37
KO
1478 req->__sector = bio->bi_iter.bi_sector;
1479 req->__data_len += bio->bi_iter.bi_size;
73c10101
JA
1480 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
1481
320ae51f 1482 blk_account_io_start(req, false);
73c10101
JA
1483 return true;
1484}
1485
bd87b589 1486/**
320ae51f 1487 * blk_attempt_plug_merge - try to merge with %current's plugged list
bd87b589
TH
1488 * @q: request_queue new bio is being queued at
1489 * @bio: new bio being queued
1490 * @request_count: out parameter for number of traversed plugged requests
ccc2600b
RD
1491 * @same_queue_rq: pointer to &struct request that gets filled in when
1492 * another request associated with @q is found on the plug list
1493 * (optional, may be %NULL)
bd87b589
TH
1494 *
1495 * Determine whether @bio being queued on @q can be merged with a request
1496 * on %current's plugged list. Returns %true if merge was successful,
1497 * otherwise %false.
1498 *
07c2bd37
TH
1499 * Plugging coalesces IOs from the same issuer for the same purpose without
1500 * going through @q->queue_lock. As such it's more of an issuing mechanism
1501 * than scheduling, and the request, while may have elvpriv data, is not
1502 * added on the elevator at this point. In addition, we don't have
1503 * reliable access to the elevator outside queue lock. Only check basic
1504 * merging parameters without querying the elevator.
da41a589
RE
1505 *
1506 * Caller must ensure !blk_queue_nomerges(q) beforehand.
73c10101 1507 */
320ae51f 1508bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
5b3f341f
SL
1509 unsigned int *request_count,
1510 struct request **same_queue_rq)
73c10101
JA
1511{
1512 struct blk_plug *plug;
1513 struct request *rq;
92f399c7 1514 struct list_head *plug_list;
73c10101 1515
bd87b589 1516 plug = current->plug;
73c10101 1517 if (!plug)
34fe7c05 1518 return false;
56ebdaf2 1519 *request_count = 0;
73c10101 1520
92f399c7
SL
1521 if (q->mq_ops)
1522 plug_list = &plug->mq_list;
1523 else
1524 plug_list = &plug->list;
1525
1526 list_for_each_entry_reverse(rq, plug_list, queuelist) {
34fe7c05 1527 bool merged = false;
73c10101 1528
5b3f341f 1529 if (rq->q == q) {
1b2e19f1 1530 (*request_count)++;
5b3f341f
SL
1531 /*
1532 * Only blk-mq multiple hardware queues case checks the
1533 * rq in the same queue, there should be only one such
1534 * rq in a queue
1535 **/
1536 if (same_queue_rq)
1537 *same_queue_rq = rq;
1538 }
56ebdaf2 1539
07c2bd37 1540 if (rq->q != q || !blk_rq_merge_ok(rq, bio))
73c10101
JA
1541 continue;
1542
34fe7c05
CH
1543 switch (blk_try_merge(rq, bio)) {
1544 case ELEVATOR_BACK_MERGE:
1545 merged = bio_attempt_back_merge(q, rq, bio);
1546 break;
1547 case ELEVATOR_FRONT_MERGE:
1548 merged = bio_attempt_front_merge(q, rq, bio);
1549 break;
1550 default:
1551 break;
73c10101 1552 }
34fe7c05
CH
1553
1554 if (merged)
1555 return true;
73c10101 1556 }
34fe7c05
CH
1557
1558 return false;
73c10101
JA
1559}
1560
0809e3ac
JM
1561unsigned int blk_plug_queued_count(struct request_queue *q)
1562{
1563 struct blk_plug *plug;
1564 struct request *rq;
1565 struct list_head *plug_list;
1566 unsigned int ret = 0;
1567
1568 plug = current->plug;
1569 if (!plug)
1570 goto out;
1571
1572 if (q->mq_ops)
1573 plug_list = &plug->mq_list;
1574 else
1575 plug_list = &plug->list;
1576
1577 list_for_each_entry(rq, plug_list, queuelist) {
1578 if (rq->q == q)
1579 ret++;
1580 }
1581out:
1582 return ret;
1583}
1584
86db1e29 1585void init_request_from_bio(struct request *req, struct bio *bio)
52d9e675 1586{
1eff9d32 1587 if (bio->bi_opf & REQ_RAHEAD)
a82afdfc 1588 req->cmd_flags |= REQ_FAILFAST_MASK;
b31dc66a 1589
52d9e675 1590 req->errors = 0;
4f024f37 1591 req->__sector = bio->bi_iter.bi_sector;
5dc8b362
AM
1592 if (ioprio_valid(bio_prio(bio)))
1593 req->ioprio = bio_prio(bio);
bc1c56fd 1594 blk_rq_bio_prep(req->q, req, bio);
52d9e675
TH
1595}
1596
dece1635 1597static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
1da177e4 1598{
73c10101 1599 struct blk_plug *plug;
34fe7c05 1600 int where = ELEVATOR_INSERT_SORT;
e4d750c9 1601 struct request *req, *free;
56ebdaf2 1602 unsigned int request_count = 0;
87760e5e 1603 unsigned int wb_acct;
1da177e4 1604
1da177e4
LT
1605 /*
1606 * low level driver can indicate that it wants pages above a
1607 * certain limit bounced to low memory (ie for highmem, or even
1608 * ISA dma in theory)
1609 */
1610 blk_queue_bounce(q, &bio);
1611
23688bf4
JN
1612 blk_queue_split(q, &bio, q->bio_split);
1613
ffecfd1a 1614 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
4246a0b6
CH
1615 bio->bi_error = -EIO;
1616 bio_endio(bio);
dece1635 1617 return BLK_QC_T_NONE;
ffecfd1a
DW
1618 }
1619
f73f44eb 1620 if (op_is_flush(bio->bi_opf)) {
73c10101 1621 spin_lock_irq(q->queue_lock);
ae1b1539 1622 where = ELEVATOR_INSERT_FLUSH;
28e7d184
TH
1623 goto get_rq;
1624 }
1625
73c10101
JA
1626 /*
1627 * Check if we can merge with the plugged list before grabbing
1628 * any locks.
1629 */
0809e3ac
JM
1630 if (!blk_queue_nomerges(q)) {
1631 if (blk_attempt_plug_merge(q, bio, &request_count, NULL))
dece1635 1632 return BLK_QC_T_NONE;
0809e3ac
JM
1633 } else
1634 request_count = blk_plug_queued_count(q);
1da177e4 1635
73c10101 1636 spin_lock_irq(q->queue_lock);
2056a782 1637
34fe7c05
CH
1638 switch (elv_merge(q, &req, bio)) {
1639 case ELEVATOR_BACK_MERGE:
1640 if (!bio_attempt_back_merge(q, req, bio))
1641 break;
1642 elv_bio_merged(q, req, bio);
1643 free = attempt_back_merge(q, req);
1644 if (free)
1645 __blk_put_request(q, free);
1646 else
1647 elv_merged_request(q, req, ELEVATOR_BACK_MERGE);
1648 goto out_unlock;
1649 case ELEVATOR_FRONT_MERGE:
1650 if (!bio_attempt_front_merge(q, req, bio))
1651 break;
1652 elv_bio_merged(q, req, bio);
1653 free = attempt_front_merge(q, req);
1654 if (free)
1655 __blk_put_request(q, free);
1656 else
1657 elv_merged_request(q, req, ELEVATOR_FRONT_MERGE);
1658 goto out_unlock;
1659 default:
1660 break;
1da177e4
LT
1661 }
1662
450991bc 1663get_rq:
87760e5e
JA
1664 wb_acct = wbt_wait(q->rq_wb, bio, q->queue_lock);
1665
1da177e4 1666 /*
450991bc 1667 * Grab a free request. This is might sleep but can not fail.
d6344532 1668 * Returns with the queue unlocked.
450991bc 1669 */
ef295ecf 1670 req = get_request(q, bio->bi_opf, bio, GFP_NOIO);
a492f075 1671 if (IS_ERR(req)) {
87760e5e 1672 __wbt_done(q->rq_wb, wb_acct);
4246a0b6
CH
1673 bio->bi_error = PTR_ERR(req);
1674 bio_endio(bio);
da8303c6
TH
1675 goto out_unlock;
1676 }
d6344532 1677
87760e5e
JA
1678 wbt_track(&req->issue_stat, wb_acct);
1679
450991bc
NP
1680 /*
1681 * After dropping the lock and possibly sleeping here, our request
1682 * may now be mergeable after it had proven unmergeable (above).
1683 * We don't worry about that case for efficiency. It won't happen
1684 * often, and the elevators are able to handle it.
1da177e4 1685 */
52d9e675 1686 init_request_from_bio(req, bio);
1da177e4 1687
9562ad9a 1688 if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags))
11ccf116 1689 req->cpu = raw_smp_processor_id();
73c10101
JA
1690
1691 plug = current->plug;
721a9602 1692 if (plug) {
dc6d36c9
JA
1693 /*
1694 * If this is the first request added after a plug, fire
7aef2e78 1695 * of a plug trace.
0a6219a9
ML
1696 *
1697 * @request_count may become stale because of schedule
1698 * out, so check plug list again.
dc6d36c9 1699 */
0a6219a9 1700 if (!request_count || list_empty(&plug->list))
dc6d36c9 1701 trace_block_plug(q);
3540d5e8 1702 else {
50d24c34
SL
1703 struct request *last = list_entry_rq(plug->list.prev);
1704 if (request_count >= BLK_MAX_REQUEST_COUNT ||
1705 blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE) {
3540d5e8 1706 blk_flush_plug_list(plug, false);
019ceb7d
SL
1707 trace_block_plug(q);
1708 }
73c10101 1709 }
73c10101 1710 list_add_tail(&req->queuelist, &plug->list);
320ae51f 1711 blk_account_io_start(req, true);
73c10101
JA
1712 } else {
1713 spin_lock_irq(q->queue_lock);
1714 add_acct_request(q, req, where);
24ecfbe2 1715 __blk_run_queue(q);
73c10101
JA
1716out_unlock:
1717 spin_unlock_irq(q->queue_lock);
1718 }
dece1635
JA
1719
1720 return BLK_QC_T_NONE;
1da177e4
LT
1721}
1722
1723/*
1724 * If bio->bi_dev is a partition, remap the location
1725 */
1726static inline void blk_partition_remap(struct bio *bio)
1727{
1728 struct block_device *bdev = bio->bi_bdev;
1729
778889d8
ST
1730 /*
1731 * Zone reset does not include bi_size so bio_sectors() is always 0.
1732 * Include a test for the reset op code and perform the remap if needed.
1733 */
1734 if (bdev != bdev->bd_contains &&
1735 (bio_sectors(bio) || bio_op(bio) == REQ_OP_ZONE_RESET)) {
1da177e4
LT
1736 struct hd_struct *p = bdev->bd_part;
1737
4f024f37 1738 bio->bi_iter.bi_sector += p->start_sect;
1da177e4 1739 bio->bi_bdev = bdev->bd_contains;
c7149d6b 1740
d07335e5
MS
1741 trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), bio,
1742 bdev->bd_dev,
4f024f37 1743 bio->bi_iter.bi_sector - p->start_sect);
1da177e4
LT
1744 }
1745}
1746
1da177e4
LT
1747static void handle_bad_sector(struct bio *bio)
1748{
1749 char b[BDEVNAME_SIZE];
1750
1751 printk(KERN_INFO "attempt to access beyond end of device\n");
6296b960 1752 printk(KERN_INFO "%s: rw=%d, want=%Lu, limit=%Lu\n",
1da177e4 1753 bdevname(bio->bi_bdev, b),
1eff9d32 1754 bio->bi_opf,
f73a1c7d 1755 (unsigned long long)bio_end_sector(bio),
77304d2a 1756 (long long)(i_size_read(bio->bi_bdev->bd_inode) >> 9));
1da177e4
LT
1757}
1758
c17bb495
AM
1759#ifdef CONFIG_FAIL_MAKE_REQUEST
1760
1761static DECLARE_FAULT_ATTR(fail_make_request);
1762
1763static int __init setup_fail_make_request(char *str)
1764{
1765 return setup_fault_attr(&fail_make_request, str);
1766}
1767__setup("fail_make_request=", setup_fail_make_request);
1768
b2c9cd37 1769static bool should_fail_request(struct hd_struct *part, unsigned int bytes)
c17bb495 1770{
b2c9cd37 1771 return part->make_it_fail && should_fail(&fail_make_request, bytes);
c17bb495
AM
1772}
1773
1774static int __init fail_make_request_debugfs(void)
1775{
dd48c085
AM
1776 struct dentry *dir = fault_create_debugfs_attr("fail_make_request",
1777 NULL, &fail_make_request);
1778
21f9fcd8 1779 return PTR_ERR_OR_ZERO(dir);
c17bb495
AM
1780}
1781
1782late_initcall(fail_make_request_debugfs);
1783
1784#else /* CONFIG_FAIL_MAKE_REQUEST */
1785
b2c9cd37
AM
1786static inline bool should_fail_request(struct hd_struct *part,
1787 unsigned int bytes)
c17bb495 1788{
b2c9cd37 1789 return false;
c17bb495
AM
1790}
1791
1792#endif /* CONFIG_FAIL_MAKE_REQUEST */
1793
c07e2b41
JA
1794/*
1795 * Check whether this bio extends beyond the end of the device.
1796 */
1797static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors)
1798{
1799 sector_t maxsector;
1800
1801 if (!nr_sectors)
1802 return 0;
1803
1804 /* Test device or partition size, when known. */
77304d2a 1805 maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9;
c07e2b41 1806 if (maxsector) {
4f024f37 1807 sector_t sector = bio->bi_iter.bi_sector;
c07e2b41
JA
1808
1809 if (maxsector < nr_sectors || maxsector - nr_sectors < sector) {
1810 /*
1811 * This may well happen - the kernel calls bread()
1812 * without checking the size of the device, e.g., when
1813 * mounting a device.
1814 */
1815 handle_bad_sector(bio);
1816 return 1;
1817 }
1818 }
1819
1820 return 0;
1821}
1822
27a84d54
CH
1823static noinline_for_stack bool
1824generic_make_request_checks(struct bio *bio)
1da177e4 1825{
165125e1 1826 struct request_queue *q;
5a7bbad2 1827 int nr_sectors = bio_sectors(bio);
51fd77bd 1828 int err = -EIO;
5a7bbad2
CH
1829 char b[BDEVNAME_SIZE];
1830 struct hd_struct *part;
1da177e4
LT
1831
1832 might_sleep();
1da177e4 1833
c07e2b41
JA
1834 if (bio_check_eod(bio, nr_sectors))
1835 goto end_io;
1da177e4 1836
5a7bbad2
CH
1837 q = bdev_get_queue(bio->bi_bdev);
1838 if (unlikely(!q)) {
1839 printk(KERN_ERR
1840 "generic_make_request: Trying to access "
1841 "nonexistent block-device %s (%Lu)\n",
1842 bdevname(bio->bi_bdev, b),
4f024f37 1843 (long long) bio->bi_iter.bi_sector);
5a7bbad2
CH
1844 goto end_io;
1845 }
c17bb495 1846
5a7bbad2 1847 part = bio->bi_bdev->bd_part;
4f024f37 1848 if (should_fail_request(part, bio->bi_iter.bi_size) ||
5a7bbad2 1849 should_fail_request(&part_to_disk(part)->part0,
4f024f37 1850 bio->bi_iter.bi_size))
5a7bbad2 1851 goto end_io;
2056a782 1852
5a7bbad2
CH
1853 /*
1854 * If this device has partitions, remap block n
1855 * of partition p to block n+start(p) of the disk.
1856 */
1857 blk_partition_remap(bio);
2056a782 1858
5a7bbad2
CH
1859 if (bio_check_eod(bio, nr_sectors))
1860 goto end_io;
1e87901e 1861
5a7bbad2
CH
1862 /*
1863 * Filter flush bio's early so that make_request based
1864 * drivers without flush support don't have to worry
1865 * about them.
1866 */
f3a8ab7d 1867 if (op_is_flush(bio->bi_opf) &&
c888a8f9 1868 !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
1eff9d32 1869 bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA);
5a7bbad2
CH
1870 if (!nr_sectors) {
1871 err = 0;
51fd77bd
JA
1872 goto end_io;
1873 }
5a7bbad2 1874 }
5ddfe969 1875
288dab8a
CH
1876 switch (bio_op(bio)) {
1877 case REQ_OP_DISCARD:
1878 if (!blk_queue_discard(q))
1879 goto not_supported;
1880 break;
1881 case REQ_OP_SECURE_ERASE:
1882 if (!blk_queue_secure_erase(q))
1883 goto not_supported;
1884 break;
1885 case REQ_OP_WRITE_SAME:
1886 if (!bdev_write_same(bio->bi_bdev))
1887 goto not_supported;
58886785 1888 break;
2d253440
ST
1889 case REQ_OP_ZONE_REPORT:
1890 case REQ_OP_ZONE_RESET:
1891 if (!bdev_is_zoned(bio->bi_bdev))
1892 goto not_supported;
288dab8a 1893 break;
a6f0788e
CK
1894 case REQ_OP_WRITE_ZEROES:
1895 if (!bdev_write_zeroes_sectors(bio->bi_bdev))
1896 goto not_supported;
1897 break;
288dab8a
CH
1898 default:
1899 break;
5a7bbad2 1900 }
01edede4 1901
7f4b35d1
TH
1902 /*
1903 * Various block parts want %current->io_context and lazy ioc
1904 * allocation ends up trading a lot of pain for a small amount of
1905 * memory. Just allocate it upfront. This may fail and block
1906 * layer knows how to live with it.
1907 */
1908 create_io_context(GFP_ATOMIC, q->node);
1909
ae118896
TH
1910 if (!blkcg_bio_issue_check(q, bio))
1911 return false;
27a84d54 1912
5a7bbad2 1913 trace_block_bio_queue(q, bio);
27a84d54 1914 return true;
a7384677 1915
288dab8a
CH
1916not_supported:
1917 err = -EOPNOTSUPP;
a7384677 1918end_io:
4246a0b6
CH
1919 bio->bi_error = err;
1920 bio_endio(bio);
27a84d54 1921 return false;
1da177e4
LT
1922}
1923
27a84d54
CH
1924/**
1925 * generic_make_request - hand a buffer to its device driver for I/O
1926 * @bio: The bio describing the location in memory and on the device.
1927 *
1928 * generic_make_request() is used to make I/O requests of block
1929 * devices. It is passed a &struct bio, which describes the I/O that needs
1930 * to be done.
1931 *
1932 * generic_make_request() does not return any status. The
1933 * success/failure status of the request, along with notification of
1934 * completion, is delivered asynchronously through the bio->bi_end_io
1935 * function described (one day) else where.
1936 *
1937 * The caller of generic_make_request must make sure that bi_io_vec
1938 * are set to describe the memory buffer, and that bi_dev and bi_sector are
1939 * set to describe the device address, and the
1940 * bi_end_io and optionally bi_private are set to describe how
1941 * completion notification should be signaled.
1942 *
1943 * generic_make_request and the drivers it calls may use bi_next if this
1944 * bio happens to be merged with someone else, and may resubmit the bio to
1945 * a lower device by calling into generic_make_request recursively, which
1946 * means the bio should NOT be touched after the call to ->make_request_fn.
d89d8796 1947 */
dece1635 1948blk_qc_t generic_make_request(struct bio *bio)
d89d8796 1949{
bddd87c7 1950 struct bio_list bio_list_on_stack;
dece1635 1951 blk_qc_t ret = BLK_QC_T_NONE;
bddd87c7 1952
27a84d54 1953 if (!generic_make_request_checks(bio))
dece1635 1954 goto out;
27a84d54
CH
1955
1956 /*
1957 * We only want one ->make_request_fn to be active at a time, else
1958 * stack usage with stacked devices could be a problem. So use
1959 * current->bio_list to keep a list of requests submited by a
1960 * make_request_fn function. current->bio_list is also used as a
1961 * flag to say if generic_make_request is currently active in this
1962 * task or not. If it is NULL, then no make_request is active. If
1963 * it is non-NULL, then a make_request is active, and new requests
1964 * should be added at the tail
1965 */
bddd87c7 1966 if (current->bio_list) {
bddd87c7 1967 bio_list_add(current->bio_list, bio);
dece1635 1968 goto out;
d89d8796 1969 }
27a84d54 1970
d89d8796
NB
1971 /* following loop may be a bit non-obvious, and so deserves some
1972 * explanation.
1973 * Before entering the loop, bio->bi_next is NULL (as all callers
1974 * ensure that) so we have a list with a single bio.
1975 * We pretend that we have just taken it off a longer list, so
bddd87c7
AM
1976 * we assign bio_list to a pointer to the bio_list_on_stack,
1977 * thus initialising the bio_list of new bios to be
27a84d54 1978 * added. ->make_request() may indeed add some more bios
d89d8796
NB
1979 * through a recursive call to generic_make_request. If it
1980 * did, we find a non-NULL value in bio_list and re-enter the loop
1981 * from the top. In this case we really did just take the bio
bddd87c7 1982 * of the top of the list (no pretending) and so remove it from
27a84d54 1983 * bio_list, and call into ->make_request() again.
d89d8796
NB
1984 */
1985 BUG_ON(bio->bi_next);
bddd87c7
AM
1986 bio_list_init(&bio_list_on_stack);
1987 current->bio_list = &bio_list_on_stack;
d89d8796 1988 do {
27a84d54
CH
1989 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
1990
6f3b0e8b 1991 if (likely(blk_queue_enter(q, false) == 0)) {
dece1635 1992 ret = q->make_request_fn(q, bio);
3ef28e83
DW
1993
1994 blk_queue_exit(q);
27a84d54 1995
3ef28e83
DW
1996 bio = bio_list_pop(current->bio_list);
1997 } else {
1998 struct bio *bio_next = bio_list_pop(current->bio_list);
1999
2000 bio_io_error(bio);
2001 bio = bio_next;
2002 }
d89d8796 2003 } while (bio);
bddd87c7 2004 current->bio_list = NULL; /* deactivate */
dece1635
JA
2005
2006out:
2007 return ret;
d89d8796 2008}
1da177e4
LT
2009EXPORT_SYMBOL(generic_make_request);
2010
2011/**
710027a4 2012 * submit_bio - submit a bio to the block device layer for I/O
1da177e4
LT
2013 * @bio: The &struct bio which describes the I/O
2014 *
2015 * submit_bio() is very similar in purpose to generic_make_request(), and
2016 * uses that function to do most of the work. Both are fairly rough
710027a4 2017 * interfaces; @bio must be presetup and ready for I/O.
1da177e4
LT
2018 *
2019 */
4e49ea4a 2020blk_qc_t submit_bio(struct bio *bio)
1da177e4 2021{
bf2de6f5
JA
2022 /*
2023 * If it's a regular read/write or a barrier with data attached,
2024 * go through the normal accounting stuff before submission.
2025 */
e2a60da7 2026 if (bio_has_data(bio)) {
4363ac7c
MP
2027 unsigned int count;
2028
95fe6c1a 2029 if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME))
4363ac7c
MP
2030 count = bdev_logical_block_size(bio->bi_bdev) >> 9;
2031 else
2032 count = bio_sectors(bio);
2033
a8ebb056 2034 if (op_is_write(bio_op(bio))) {
bf2de6f5
JA
2035 count_vm_events(PGPGOUT, count);
2036 } else {
4f024f37 2037 task_io_account_read(bio->bi_iter.bi_size);
bf2de6f5
JA
2038 count_vm_events(PGPGIN, count);
2039 }
2040
2041 if (unlikely(block_dump)) {
2042 char b[BDEVNAME_SIZE];
8dcbdc74 2043 printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n",
ba25f9dc 2044 current->comm, task_pid_nr(current),
a8ebb056 2045 op_is_write(bio_op(bio)) ? "WRITE" : "READ",
4f024f37 2046 (unsigned long long)bio->bi_iter.bi_sector,
8dcbdc74
SM
2047 bdevname(bio->bi_bdev, b),
2048 count);
bf2de6f5 2049 }
1da177e4
LT
2050 }
2051
dece1635 2052 return generic_make_request(bio);
1da177e4 2053}
1da177e4
LT
2054EXPORT_SYMBOL(submit_bio);
2055
82124d60 2056/**
bf4e6b4e
HR
2057 * blk_cloned_rq_check_limits - Helper function to check a cloned request
2058 * for new the queue limits
82124d60
KU
2059 * @q: the queue
2060 * @rq: the request being checked
2061 *
2062 * Description:
2063 * @rq may have been made based on weaker limitations of upper-level queues
2064 * in request stacking drivers, and it may violate the limitation of @q.
2065 * Since the block layer and the underlying device driver trust @rq
2066 * after it is inserted to @q, it should be checked against @q before
2067 * the insertion using this generic function.
2068 *
82124d60 2069 * Request stacking drivers like request-based dm may change the queue
bf4e6b4e
HR
2070 * limits when retrying requests on other queues. Those requests need
2071 * to be checked against the new queue limits again during dispatch.
82124d60 2072 */
bf4e6b4e
HR
2073static int blk_cloned_rq_check_limits(struct request_queue *q,
2074 struct request *rq)
82124d60 2075{
8fe0d473 2076 if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, req_op(rq))) {
82124d60
KU
2077 printk(KERN_ERR "%s: over max size limit.\n", __func__);
2078 return -EIO;
2079 }
2080
2081 /*
2082 * queue's settings related to segment counting like q->bounce_pfn
2083 * may differ from that of other stacking queues.
2084 * Recalculate it to check the request correctly on this queue's
2085 * limitation.
2086 */
2087 blk_recalc_rq_segments(rq);
8a78362c 2088 if (rq->nr_phys_segments > queue_max_segments(q)) {
82124d60
KU
2089 printk(KERN_ERR "%s: over max segments limit.\n", __func__);
2090 return -EIO;
2091 }
2092
2093 return 0;
2094}
82124d60
KU
2095
2096/**
2097 * blk_insert_cloned_request - Helper for stacking drivers to submit a request
2098 * @q: the queue to submit the request
2099 * @rq: the request being queued
2100 */
2101int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
2102{
2103 unsigned long flags;
4853abaa 2104 int where = ELEVATOR_INSERT_BACK;
82124d60 2105
bf4e6b4e 2106 if (blk_cloned_rq_check_limits(q, rq))
82124d60
KU
2107 return -EIO;
2108
b2c9cd37
AM
2109 if (rq->rq_disk &&
2110 should_fail_request(&rq->rq_disk->part0, blk_rq_bytes(rq)))
82124d60 2111 return -EIO;
82124d60 2112
7fb4898e
KB
2113 if (q->mq_ops) {
2114 if (blk_queue_io_stat(q))
2115 blk_account_io_start(rq, true);
bd6737f1 2116 blk_mq_sched_insert_request(rq, false, true, false, false);
7fb4898e
KB
2117 return 0;
2118 }
2119
82124d60 2120 spin_lock_irqsave(q->queue_lock, flags);
3f3299d5 2121 if (unlikely(blk_queue_dying(q))) {
8ba61435
TH
2122 spin_unlock_irqrestore(q->queue_lock, flags);
2123 return -ENODEV;
2124 }
82124d60
KU
2125
2126 /*
2127 * Submitting request must be dequeued before calling this function
2128 * because it will be linked to another request_queue
2129 */
2130 BUG_ON(blk_queued_rq(rq));
2131
f73f44eb 2132 if (op_is_flush(rq->cmd_flags))
4853abaa
JM
2133 where = ELEVATOR_INSERT_FLUSH;
2134
2135 add_acct_request(q, rq, where);
e67b77c7
JM
2136 if (where == ELEVATOR_INSERT_FLUSH)
2137 __blk_run_queue(q);
82124d60
KU
2138 spin_unlock_irqrestore(q->queue_lock, flags);
2139
2140 return 0;
2141}
2142EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
2143
80a761fd
TH
2144/**
2145 * blk_rq_err_bytes - determine number of bytes till the next failure boundary
2146 * @rq: request to examine
2147 *
2148 * Description:
2149 * A request could be merge of IOs which require different failure
2150 * handling. This function determines the number of bytes which
2151 * can be failed from the beginning of the request without
2152 * crossing into area which need to be retried further.
2153 *
2154 * Return:
2155 * The number of bytes to fail.
2156 *
2157 * Context:
2158 * queue_lock must be held.
2159 */
2160unsigned int blk_rq_err_bytes(const struct request *rq)
2161{
2162 unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
2163 unsigned int bytes = 0;
2164 struct bio *bio;
2165
e8064021 2166 if (!(rq->rq_flags & RQF_MIXED_MERGE))
80a761fd
TH
2167 return blk_rq_bytes(rq);
2168
2169 /*
2170 * Currently the only 'mixing' which can happen is between
2171 * different fastfail types. We can safely fail portions
2172 * which have all the failfast bits that the first one has -
2173 * the ones which are at least as eager to fail as the first
2174 * one.
2175 */
2176 for (bio = rq->bio; bio; bio = bio->bi_next) {
1eff9d32 2177 if ((bio->bi_opf & ff) != ff)
80a761fd 2178 break;
4f024f37 2179 bytes += bio->bi_iter.bi_size;
80a761fd
TH
2180 }
2181
2182 /* this could lead to infinite loop */
2183 BUG_ON(blk_rq_bytes(rq) && !bytes);
2184 return bytes;
2185}
2186EXPORT_SYMBOL_GPL(blk_rq_err_bytes);
2187
320ae51f 2188void blk_account_io_completion(struct request *req, unsigned int bytes)
bc58ba94 2189{
c2553b58 2190 if (blk_do_io_stat(req)) {
bc58ba94
JA
2191 const int rw = rq_data_dir(req);
2192 struct hd_struct *part;
2193 int cpu;
2194
2195 cpu = part_stat_lock();
09e099d4 2196 part = req->part;
bc58ba94
JA
2197 part_stat_add(cpu, part, sectors[rw], bytes >> 9);
2198 part_stat_unlock();
2199 }
2200}
2201
320ae51f 2202void blk_account_io_done(struct request *req)
bc58ba94 2203{
bc58ba94 2204 /*
dd4c133f
TH
2205 * Account IO completion. flush_rq isn't accounted as a
2206 * normal IO on queueing nor completion. Accounting the
2207 * containing request is enough.
bc58ba94 2208 */
e8064021 2209 if (blk_do_io_stat(req) && !(req->rq_flags & RQF_FLUSH_SEQ)) {
bc58ba94
JA
2210 unsigned long duration = jiffies - req->start_time;
2211 const int rw = rq_data_dir(req);
2212 struct hd_struct *part;
2213 int cpu;
2214
2215 cpu = part_stat_lock();
09e099d4 2216 part = req->part;
bc58ba94
JA
2217
2218 part_stat_inc(cpu, part, ios[rw]);
2219 part_stat_add(cpu, part, ticks[rw], duration);
2220 part_round_stats(cpu, part);
316d315b 2221 part_dec_in_flight(part, rw);
bc58ba94 2222
6c23a968 2223 hd_struct_put(part);
bc58ba94
JA
2224 part_stat_unlock();
2225 }
2226}
2227
47fafbc7 2228#ifdef CONFIG_PM
c8158819
LM
2229/*
2230 * Don't process normal requests when queue is suspended
2231 * or in the process of suspending/resuming
2232 */
2233static struct request *blk_pm_peek_request(struct request_queue *q,
2234 struct request *rq)
2235{
2236 if (q->dev && (q->rpm_status == RPM_SUSPENDED ||
e8064021 2237 (q->rpm_status != RPM_ACTIVE && !(rq->rq_flags & RQF_PM))))
c8158819
LM
2238 return NULL;
2239 else
2240 return rq;
2241}
2242#else
2243static inline struct request *blk_pm_peek_request(struct request_queue *q,
2244 struct request *rq)
2245{
2246 return rq;
2247}
2248#endif
2249
320ae51f
JA
2250void blk_account_io_start(struct request *rq, bool new_io)
2251{
2252 struct hd_struct *part;
2253 int rw = rq_data_dir(rq);
2254 int cpu;
2255
2256 if (!blk_do_io_stat(rq))
2257 return;
2258
2259 cpu = part_stat_lock();
2260
2261 if (!new_io) {
2262 part = rq->part;
2263 part_stat_inc(cpu, part, merges[rw]);
2264 } else {
2265 part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
2266 if (!hd_struct_try_get(part)) {
2267 /*
2268 * The partition is already being removed,
2269 * the request will be accounted on the disk only
2270 *
2271 * We take a reference on disk->part0 although that
2272 * partition will never be deleted, so we can treat
2273 * it as any other partition.
2274 */
2275 part = &rq->rq_disk->part0;
2276 hd_struct_get(part);
2277 }
2278 part_round_stats(cpu, part);
2279 part_inc_in_flight(part, rw);
2280 rq->part = part;
2281 }
2282
2283 part_stat_unlock();
2284}
2285
3bcddeac 2286/**
9934c8c0
TH
2287 * blk_peek_request - peek at the top of a request queue
2288 * @q: request queue to peek at
2289 *
2290 * Description:
2291 * Return the request at the top of @q. The returned request
2292 * should be started using blk_start_request() before LLD starts
2293 * processing it.
2294 *
2295 * Return:
2296 * Pointer to the request at the top of @q if available. Null
2297 * otherwise.
2298 *
2299 * Context:
2300 * queue_lock must be held.
2301 */
2302struct request *blk_peek_request(struct request_queue *q)
158dbda0
TH
2303{
2304 struct request *rq;
2305 int ret;
2306
2307 while ((rq = __elv_next_request(q)) != NULL) {
c8158819
LM
2308
2309 rq = blk_pm_peek_request(q, rq);
2310 if (!rq)
2311 break;
2312
e8064021 2313 if (!(rq->rq_flags & RQF_STARTED)) {
158dbda0
TH
2314 /*
2315 * This is the first time the device driver
2316 * sees this request (possibly after
2317 * requeueing). Notify IO scheduler.
2318 */
e8064021 2319 if (rq->rq_flags & RQF_SORTED)
158dbda0
TH
2320 elv_activate_rq(q, rq);
2321
2322 /*
2323 * just mark as started even if we don't start
2324 * it, a request that has been delayed should
2325 * not be passed by new incoming requests
2326 */
e8064021 2327 rq->rq_flags |= RQF_STARTED;
158dbda0
TH
2328 trace_block_rq_issue(q, rq);
2329 }
2330
2331 if (!q->boundary_rq || q->boundary_rq == rq) {
2332 q->end_sector = rq_end_sector(rq);
2333 q->boundary_rq = NULL;
2334 }
2335
e8064021 2336 if (rq->rq_flags & RQF_DONTPREP)
158dbda0
TH
2337 break;
2338
2e46e8b2 2339 if (q->dma_drain_size && blk_rq_bytes(rq)) {
158dbda0
TH
2340 /*
2341 * make sure space for the drain appears we
2342 * know we can do this because max_hw_segments
2343 * has been adjusted to be one fewer than the
2344 * device can handle
2345 */
2346 rq->nr_phys_segments++;
2347 }
2348
2349 if (!q->prep_rq_fn)
2350 break;
2351
2352 ret = q->prep_rq_fn(q, rq);
2353 if (ret == BLKPREP_OK) {
2354 break;
2355 } else if (ret == BLKPREP_DEFER) {
2356 /*
2357 * the request may have been (partially) prepped.
2358 * we need to keep this request in the front to
e8064021 2359 * avoid resource deadlock. RQF_STARTED will
158dbda0
TH
2360 * prevent other fs requests from passing this one.
2361 */
2e46e8b2 2362 if (q->dma_drain_size && blk_rq_bytes(rq) &&
e8064021 2363 !(rq->rq_flags & RQF_DONTPREP)) {
158dbda0
TH
2364 /*
2365 * remove the space for the drain we added
2366 * so that we don't add it again
2367 */
2368 --rq->nr_phys_segments;
2369 }
2370
2371 rq = NULL;
2372 break;
0fb5b1fb
MP
2373 } else if (ret == BLKPREP_KILL || ret == BLKPREP_INVALID) {
2374 int err = (ret == BLKPREP_INVALID) ? -EREMOTEIO : -EIO;
2375
e8064021 2376 rq->rq_flags |= RQF_QUIET;
c143dc90
JB
2377 /*
2378 * Mark this request as started so we don't trigger
2379 * any debug logic in the end I/O path.
2380 */
2381 blk_start_request(rq);
0fb5b1fb 2382 __blk_end_request_all(rq, err);
158dbda0
TH
2383 } else {
2384 printk(KERN_ERR "%s: bad return=%d\n", __func__, ret);
2385 break;
2386 }
2387 }
2388
2389 return rq;
2390}
9934c8c0 2391EXPORT_SYMBOL(blk_peek_request);
158dbda0 2392
9934c8c0 2393void blk_dequeue_request(struct request *rq)
158dbda0 2394{
9934c8c0
TH
2395 struct request_queue *q = rq->q;
2396
158dbda0
TH
2397 BUG_ON(list_empty(&rq->queuelist));
2398 BUG_ON(ELV_ON_HASH(rq));
2399
2400 list_del_init(&rq->queuelist);
2401
2402 /*
2403 * the time frame between a request being removed from the lists
2404 * and to it is freed is accounted as io that is in progress at
2405 * the driver side.
2406 */
9195291e 2407 if (blk_account_rq(rq)) {
0a7ae2ff 2408 q->in_flight[rq_is_sync(rq)]++;
9195291e
DS
2409 set_io_start_time_ns(rq);
2410 }
158dbda0
TH
2411}
2412
9934c8c0
TH
2413/**
2414 * blk_start_request - start request processing on the driver
2415 * @req: request to dequeue
2416 *
2417 * Description:
2418 * Dequeue @req and start timeout timer on it. This hands off the
2419 * request to the driver.
2420 *
2421 * Block internal functions which don't want to start timer should
2422 * call blk_dequeue_request().
2423 *
2424 * Context:
2425 * queue_lock must be held.
2426 */
2427void blk_start_request(struct request *req)
2428{
2429 blk_dequeue_request(req);
2430
cf43e6be
JA
2431 if (test_bit(QUEUE_FLAG_STATS, &req->q->queue_flags)) {
2432 blk_stat_set_issue_time(&req->issue_stat);
2433 req->rq_flags |= RQF_STATS;
87760e5e 2434 wbt_issue(req->q->rq_wb, &req->issue_stat);
cf43e6be
JA
2435 }
2436
4912aa6c 2437 BUG_ON(test_bit(REQ_ATOM_COMPLETE, &req->atomic_flags));
9934c8c0
TH
2438 blk_add_timer(req);
2439}
2440EXPORT_SYMBOL(blk_start_request);
2441
2442/**
2443 * blk_fetch_request - fetch a request from a request queue
2444 * @q: request queue to fetch a request from
2445 *
2446 * Description:
2447 * Return the request at the top of @q. The request is started on
2448 * return and LLD can start processing it immediately.
2449 *
2450 * Return:
2451 * Pointer to the request at the top of @q if available. Null
2452 * otherwise.
2453 *
2454 * Context:
2455 * queue_lock must be held.
2456 */
2457struct request *blk_fetch_request(struct request_queue *q)
2458{
2459 struct request *rq;
2460
2461 rq = blk_peek_request(q);
2462 if (rq)
2463 blk_start_request(rq);
2464 return rq;
2465}
2466EXPORT_SYMBOL(blk_fetch_request);
2467
3bcddeac 2468/**
2e60e022 2469 * blk_update_request - Special helper function for request stacking drivers
8ebf9756 2470 * @req: the request being processed
710027a4 2471 * @error: %0 for success, < %0 for error
8ebf9756 2472 * @nr_bytes: number of bytes to complete @req
3bcddeac
KU
2473 *
2474 * Description:
8ebf9756
RD
2475 * Ends I/O on a number of bytes attached to @req, but doesn't complete
2476 * the request structure even if @req doesn't have leftover.
2477 * If @req has leftover, sets it up for the next range of segments.
2e60e022
TH
2478 *
2479 * This special helper function is only for request stacking drivers
2480 * (e.g. request-based dm) so that they can handle partial completion.
2481 * Actual device drivers should use blk_end_request instead.
2482 *
2483 * Passing the result of blk_rq_bytes() as @nr_bytes guarantees
2484 * %false return from this function.
3bcddeac
KU
2485 *
2486 * Return:
2e60e022
TH
2487 * %false - this request doesn't have any more data
2488 * %true - this request has more data
3bcddeac 2489 **/
2e60e022 2490bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
1da177e4 2491{
f79ea416 2492 int total_bytes;
1da177e4 2493
4a0efdc9
HR
2494 trace_block_rq_complete(req->q, req, nr_bytes);
2495
2e60e022
TH
2496 if (!req->bio)
2497 return false;
2498
1da177e4 2499 /*
6f41469c
TH
2500 * For fs requests, rq is just carrier of independent bio's
2501 * and each partial completion should be handled separately.
2502 * Reset per-request error on each partial completion.
2503 *
2504 * TODO: tj: This is too subtle. It would be better to let
2505 * low level drivers do what they see fit.
1da177e4 2506 */
57292b58 2507 if (!blk_rq_is_passthrough(req))
1da177e4
LT
2508 req->errors = 0;
2509
57292b58 2510 if (error && !blk_rq_is_passthrough(req) &&
e8064021 2511 !(req->rq_flags & RQF_QUIET)) {
79775567
HR
2512 char *error_type;
2513
2514 switch (error) {
2515 case -ENOLINK:
2516 error_type = "recoverable transport";
2517 break;
2518 case -EREMOTEIO:
2519 error_type = "critical target";
2520 break;
2521 case -EBADE:
2522 error_type = "critical nexus";
2523 break;
d1ffc1f8
HR
2524 case -ETIMEDOUT:
2525 error_type = "timeout";
2526 break;
a9d6ceb8
HR
2527 case -ENOSPC:
2528 error_type = "critical space allocation";
2529 break;
7e782af5
HR
2530 case -ENODATA:
2531 error_type = "critical medium";
2532 break;
79775567
HR
2533 case -EIO:
2534 default:
2535 error_type = "I/O";
2536 break;
2537 }
ef3ecb66
RE
2538 printk_ratelimited(KERN_ERR "%s: %s error, dev %s, sector %llu\n",
2539 __func__, error_type, req->rq_disk ?
37d7b34f
YZ
2540 req->rq_disk->disk_name : "?",
2541 (unsigned long long)blk_rq_pos(req));
2542
1da177e4
LT
2543 }
2544
bc58ba94 2545 blk_account_io_completion(req, nr_bytes);
d72d904a 2546
f79ea416
KO
2547 total_bytes = 0;
2548 while (req->bio) {
2549 struct bio *bio = req->bio;
4f024f37 2550 unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes);
1da177e4 2551
4f024f37 2552 if (bio_bytes == bio->bi_iter.bi_size)
1da177e4 2553 req->bio = bio->bi_next;
1da177e4 2554
f79ea416 2555 req_bio_endio(req, bio, bio_bytes, error);
1da177e4 2556
f79ea416
KO
2557 total_bytes += bio_bytes;
2558 nr_bytes -= bio_bytes;
1da177e4 2559
f79ea416
KO
2560 if (!nr_bytes)
2561 break;
1da177e4
LT
2562 }
2563
2564 /*
2565 * completely done
2566 */
2e60e022
TH
2567 if (!req->bio) {
2568 /*
2569 * Reset counters so that the request stacking driver
2570 * can find how many bytes remain in the request
2571 * later.
2572 */
a2dec7b3 2573 req->__data_len = 0;
2e60e022
TH
2574 return false;
2575 }
1da177e4 2576
f9d03f96
CH
2577 WARN_ON_ONCE(req->rq_flags & RQF_SPECIAL_PAYLOAD);
2578
a2dec7b3 2579 req->__data_len -= total_bytes;
2e46e8b2
TH
2580
2581 /* update sector only for requests with clear definition of sector */
57292b58 2582 if (!blk_rq_is_passthrough(req))
a2dec7b3 2583 req->__sector += total_bytes >> 9;
2e46e8b2 2584
80a761fd 2585 /* mixed attributes always follow the first bio */
e8064021 2586 if (req->rq_flags & RQF_MIXED_MERGE) {
80a761fd 2587 req->cmd_flags &= ~REQ_FAILFAST_MASK;
1eff9d32 2588 req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK;
80a761fd
TH
2589 }
2590
2e46e8b2
TH
2591 /*
2592 * If total number of sectors is less than the first segment
2593 * size, something has gone terribly wrong.
2594 */
2595 if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) {
8182924b 2596 blk_dump_rq_flags(req, "request botched");
a2dec7b3 2597 req->__data_len = blk_rq_cur_bytes(req);
2e46e8b2
TH
2598 }
2599
2600 /* recalculate the number of segments */
1da177e4 2601 blk_recalc_rq_segments(req);
2e46e8b2 2602
2e60e022 2603 return true;
1da177e4 2604}
2e60e022 2605EXPORT_SYMBOL_GPL(blk_update_request);
1da177e4 2606
2e60e022
TH
2607static bool blk_update_bidi_request(struct request *rq, int error,
2608 unsigned int nr_bytes,
2609 unsigned int bidi_bytes)
5efccd17 2610{
2e60e022
TH
2611 if (blk_update_request(rq, error, nr_bytes))
2612 return true;
5efccd17 2613
2e60e022
TH
2614 /* Bidi request must be completed as a whole */
2615 if (unlikely(blk_bidi_rq(rq)) &&
2616 blk_update_request(rq->next_rq, error, bidi_bytes))
2617 return true;
5efccd17 2618
e2e1a148
JA
2619 if (blk_queue_add_random(rq->q))
2620 add_disk_randomness(rq->rq_disk);
2e60e022
TH
2621
2622 return false;
1da177e4
LT
2623}
2624
28018c24
JB
2625/**
2626 * blk_unprep_request - unprepare a request
2627 * @req: the request
2628 *
2629 * This function makes a request ready for complete resubmission (or
2630 * completion). It happens only after all error handling is complete,
2631 * so represents the appropriate moment to deallocate any resources
2632 * that were allocated to the request in the prep_rq_fn. The queue
2633 * lock is held when calling this.
2634 */
2635void blk_unprep_request(struct request *req)
2636{
2637 struct request_queue *q = req->q;
2638
e8064021 2639 req->rq_flags &= ~RQF_DONTPREP;
28018c24
JB
2640 if (q->unprep_rq_fn)
2641 q->unprep_rq_fn(q, req);
2642}
2643EXPORT_SYMBOL_GPL(blk_unprep_request);
2644
1da177e4
LT
2645/*
2646 * queue lock must be held
2647 */
12120077 2648void blk_finish_request(struct request *req, int error)
1da177e4 2649{
cf43e6be
JA
2650 struct request_queue *q = req->q;
2651
2652 if (req->rq_flags & RQF_STATS)
2653 blk_stat_add(&q->rq_stats[rq_data_dir(req)], req);
2654
e8064021 2655 if (req->rq_flags & RQF_QUEUED)
cf43e6be 2656 blk_queue_end_tag(q, req);
b8286239 2657
ba396a6c 2658 BUG_ON(blk_queued_rq(req));
1da177e4 2659
57292b58 2660 if (unlikely(laptop_mode) && !blk_rq_is_passthrough(req))
dc3b17cc 2661 laptop_io_completion(req->q->backing_dev_info);
1da177e4 2662
e78042e5
MA
2663 blk_delete_timer(req);
2664
e8064021 2665 if (req->rq_flags & RQF_DONTPREP)
28018c24
JB
2666 blk_unprep_request(req);
2667
bc58ba94 2668 blk_account_io_done(req);
b8286239 2669
87760e5e
JA
2670 if (req->end_io) {
2671 wbt_done(req->q->rq_wb, &req->issue_stat);
8ffdc655 2672 req->end_io(req, error);
87760e5e 2673 } else {
b8286239
KU
2674 if (blk_bidi_rq(req))
2675 __blk_put_request(req->next_rq->q, req->next_rq);
2676
cf43e6be 2677 __blk_put_request(q, req);
b8286239 2678 }
1da177e4 2679}
12120077 2680EXPORT_SYMBOL(blk_finish_request);
1da177e4 2681
3b11313a 2682/**
2e60e022
TH
2683 * blk_end_bidi_request - Complete a bidi request
2684 * @rq: the request to complete
2685 * @error: %0 for success, < %0 for error
2686 * @nr_bytes: number of bytes to complete @rq
2687 * @bidi_bytes: number of bytes to complete @rq->next_rq
a0cd1285
JA
2688 *
2689 * Description:
e3a04fe3 2690 * Ends I/O on a number of bytes attached to @rq and @rq->next_rq.
2e60e022
TH
2691 * Drivers that supports bidi can safely call this member for any
2692 * type of request, bidi or uni. In the later case @bidi_bytes is
2693 * just ignored.
336cdb40
KU
2694 *
2695 * Return:
2e60e022
TH
2696 * %false - we are done with this request
2697 * %true - still buffers pending for this request
a0cd1285 2698 **/
b1f74493 2699static bool blk_end_bidi_request(struct request *rq, int error,
32fab448
KU
2700 unsigned int nr_bytes, unsigned int bidi_bytes)
2701{
336cdb40 2702 struct request_queue *q = rq->q;
2e60e022 2703 unsigned long flags;
32fab448 2704
2e60e022
TH
2705 if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
2706 return true;
32fab448 2707
336cdb40 2708 spin_lock_irqsave(q->queue_lock, flags);
2e60e022 2709 blk_finish_request(rq, error);
336cdb40
KU
2710 spin_unlock_irqrestore(q->queue_lock, flags);
2711
2e60e022 2712 return false;
32fab448
KU
2713}
2714
336cdb40 2715/**
2e60e022
TH
2716 * __blk_end_bidi_request - Complete a bidi request with queue lock held
2717 * @rq: the request to complete
710027a4 2718 * @error: %0 for success, < %0 for error
e3a04fe3
KU
2719 * @nr_bytes: number of bytes to complete @rq
2720 * @bidi_bytes: number of bytes to complete @rq->next_rq
336cdb40
KU
2721 *
2722 * Description:
2e60e022
TH
2723 * Identical to blk_end_bidi_request() except that queue lock is
2724 * assumed to be locked on entry and remains so on return.
336cdb40
KU
2725 *
2726 * Return:
2e60e022
TH
2727 * %false - we are done with this request
2728 * %true - still buffers pending for this request
336cdb40 2729 **/
4853abaa 2730bool __blk_end_bidi_request(struct request *rq, int error,
b1f74493 2731 unsigned int nr_bytes, unsigned int bidi_bytes)
336cdb40 2732{
2e60e022
TH
2733 if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
2734 return true;
336cdb40 2735
2e60e022 2736 blk_finish_request(rq, error);
336cdb40 2737
2e60e022 2738 return false;
336cdb40 2739}
e19a3ab0
KU
2740
2741/**
2742 * blk_end_request - Helper function for drivers to complete the request.
2743 * @rq: the request being processed
710027a4 2744 * @error: %0 for success, < %0 for error
e19a3ab0
KU
2745 * @nr_bytes: number of bytes to complete
2746 *
2747 * Description:
2748 * Ends I/O on a number of bytes attached to @rq.
2749 * If @rq has leftover, sets it up for the next range of segments.
2750 *
2751 * Return:
b1f74493
FT
2752 * %false - we are done with this request
2753 * %true - still buffers pending for this request
e19a3ab0 2754 **/
b1f74493 2755bool blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
e19a3ab0 2756{
b1f74493 2757 return blk_end_bidi_request(rq, error, nr_bytes, 0);
e19a3ab0 2758}
56ad1740 2759EXPORT_SYMBOL(blk_end_request);
336cdb40
KU
2760
2761/**
b1f74493
FT
2762 * blk_end_request_all - Helper function for drives to finish the request.
2763 * @rq: the request to finish
8ebf9756 2764 * @error: %0 for success, < %0 for error
336cdb40
KU
2765 *
2766 * Description:
b1f74493
FT
2767 * Completely finish @rq.
2768 */
2769void blk_end_request_all(struct request *rq, int error)
336cdb40 2770{
b1f74493
FT
2771 bool pending;
2772 unsigned int bidi_bytes = 0;
336cdb40 2773
b1f74493
FT
2774 if (unlikely(blk_bidi_rq(rq)))
2775 bidi_bytes = blk_rq_bytes(rq->next_rq);
336cdb40 2776
b1f74493
FT
2777 pending = blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes);
2778 BUG_ON(pending);
2779}
56ad1740 2780EXPORT_SYMBOL(blk_end_request_all);
336cdb40 2781
b1f74493
FT
2782/**
2783 * blk_end_request_cur - Helper function to finish the current request chunk.
2784 * @rq: the request to finish the current chunk for
8ebf9756 2785 * @error: %0 for success, < %0 for error
b1f74493
FT
2786 *
2787 * Description:
2788 * Complete the current consecutively mapped chunk from @rq.
2789 *
2790 * Return:
2791 * %false - we are done with this request
2792 * %true - still buffers pending for this request
2793 */
2794bool blk_end_request_cur(struct request *rq, int error)
2795{
2796 return blk_end_request(rq, error, blk_rq_cur_bytes(rq));
336cdb40 2797}
56ad1740 2798EXPORT_SYMBOL(blk_end_request_cur);
336cdb40 2799
80a761fd
TH
2800/**
2801 * blk_end_request_err - Finish a request till the next failure boundary.
2802 * @rq: the request to finish till the next failure boundary for
2803 * @error: must be negative errno
2804 *
2805 * Description:
2806 * Complete @rq till the next failure boundary.
2807 *
2808 * Return:
2809 * %false - we are done with this request
2810 * %true - still buffers pending for this request
2811 */
2812bool blk_end_request_err(struct request *rq, int error)
2813{
2814 WARN_ON(error >= 0);
2815 return blk_end_request(rq, error, blk_rq_err_bytes(rq));
2816}
2817EXPORT_SYMBOL_GPL(blk_end_request_err);
2818
e3a04fe3 2819/**
b1f74493
FT
2820 * __blk_end_request - Helper function for drivers to complete the request.
2821 * @rq: the request being processed
2822 * @error: %0 for success, < %0 for error
2823 * @nr_bytes: number of bytes to complete
e3a04fe3
KU
2824 *
2825 * Description:
b1f74493 2826 * Must be called with queue lock held unlike blk_end_request().
e3a04fe3
KU
2827 *
2828 * Return:
b1f74493
FT
2829 * %false - we are done with this request
2830 * %true - still buffers pending for this request
e3a04fe3 2831 **/
b1f74493 2832bool __blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
e3a04fe3 2833{
b1f74493 2834 return __blk_end_bidi_request(rq, error, nr_bytes, 0);
e3a04fe3 2835}
56ad1740 2836EXPORT_SYMBOL(__blk_end_request);
e3a04fe3 2837
32fab448 2838/**
b1f74493
FT
2839 * __blk_end_request_all - Helper function for drives to finish the request.
2840 * @rq: the request to finish
8ebf9756 2841 * @error: %0 for success, < %0 for error
32fab448
KU
2842 *
2843 * Description:
b1f74493 2844 * Completely finish @rq. Must be called with queue lock held.
32fab448 2845 */
b1f74493 2846void __blk_end_request_all(struct request *rq, int error)
32fab448 2847{
b1f74493
FT
2848 bool pending;
2849 unsigned int bidi_bytes = 0;
2850
2851 if (unlikely(blk_bidi_rq(rq)))
2852 bidi_bytes = blk_rq_bytes(rq->next_rq);
2853
2854 pending = __blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes);
2855 BUG_ON(pending);
32fab448 2856}
56ad1740 2857EXPORT_SYMBOL(__blk_end_request_all);
32fab448 2858
e19a3ab0 2859/**
b1f74493
FT
2860 * __blk_end_request_cur - Helper function to finish the current request chunk.
2861 * @rq: the request to finish the current chunk for
8ebf9756 2862 * @error: %0 for success, < %0 for error
e19a3ab0
KU
2863 *
2864 * Description:
b1f74493
FT
2865 * Complete the current consecutively mapped chunk from @rq. Must
2866 * be called with queue lock held.
e19a3ab0
KU
2867 *
2868 * Return:
b1f74493
FT
2869 * %false - we are done with this request
2870 * %true - still buffers pending for this request
2871 */
2872bool __blk_end_request_cur(struct request *rq, int error)
e19a3ab0 2873{
b1f74493 2874 return __blk_end_request(rq, error, blk_rq_cur_bytes(rq));
e19a3ab0 2875}
56ad1740 2876EXPORT_SYMBOL(__blk_end_request_cur);
e19a3ab0 2877
80a761fd
TH
2878/**
2879 * __blk_end_request_err - Finish a request till the next failure boundary.
2880 * @rq: the request to finish till the next failure boundary for
2881 * @error: must be negative errno
2882 *
2883 * Description:
2884 * Complete @rq till the next failure boundary. Must be called
2885 * with queue lock held.
2886 *
2887 * Return:
2888 * %false - we are done with this request
2889 * %true - still buffers pending for this request
2890 */
2891bool __blk_end_request_err(struct request *rq, int error)
2892{
2893 WARN_ON(error >= 0);
2894 return __blk_end_request(rq, error, blk_rq_err_bytes(rq));
2895}
2896EXPORT_SYMBOL_GPL(__blk_end_request_err);
2897
86db1e29
JA
2898void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
2899 struct bio *bio)
1da177e4 2900{
b4f42e28 2901 if (bio_has_data(bio))
fb2dce86 2902 rq->nr_phys_segments = bio_phys_segments(q, bio);
b4f42e28 2903
4f024f37 2904 rq->__data_len = bio->bi_iter.bi_size;
1da177e4 2905 rq->bio = rq->biotail = bio;
1da177e4 2906
66846572
N
2907 if (bio->bi_bdev)
2908 rq->rq_disk = bio->bi_bdev->bd_disk;
2909}
1da177e4 2910
2d4dc890
IL
2911#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
2912/**
2913 * rq_flush_dcache_pages - Helper function to flush all pages in a request
2914 * @rq: the request to be flushed
2915 *
2916 * Description:
2917 * Flush all pages in @rq.
2918 */
2919void rq_flush_dcache_pages(struct request *rq)
2920{
2921 struct req_iterator iter;
7988613b 2922 struct bio_vec bvec;
2d4dc890
IL
2923
2924 rq_for_each_segment(bvec, rq, iter)
7988613b 2925 flush_dcache_page(bvec.bv_page);
2d4dc890
IL
2926}
2927EXPORT_SYMBOL_GPL(rq_flush_dcache_pages);
2928#endif
2929
ef9e3fac
KU
2930/**
2931 * blk_lld_busy - Check if underlying low-level drivers of a device are busy
2932 * @q : the queue of the device being checked
2933 *
2934 * Description:
2935 * Check if underlying low-level drivers of a device are busy.
2936 * If the drivers want to export their busy state, they must set own
2937 * exporting function using blk_queue_lld_busy() first.
2938 *
2939 * Basically, this function is used only by request stacking drivers
2940 * to stop dispatching requests to underlying devices when underlying
2941 * devices are busy. This behavior helps more I/O merging on the queue
2942 * of the request stacking driver and prevents I/O throughput regression
2943 * on burst I/O load.
2944 *
2945 * Return:
2946 * 0 - Not busy (The request stacking driver should dispatch request)
2947 * 1 - Busy (The request stacking driver should stop dispatching request)
2948 */
2949int blk_lld_busy(struct request_queue *q)
2950{
2951 if (q->lld_busy_fn)
2952 return q->lld_busy_fn(q);
2953
2954 return 0;
2955}
2956EXPORT_SYMBOL_GPL(blk_lld_busy);
2957
78d8e58a
MS
2958/**
2959 * blk_rq_unprep_clone - Helper function to free all bios in a cloned request
2960 * @rq: the clone request to be cleaned up
2961 *
2962 * Description:
2963 * Free all bios in @rq for a cloned request.
2964 */
2965void blk_rq_unprep_clone(struct request *rq)
2966{
2967 struct bio *bio;
2968
2969 while ((bio = rq->bio) != NULL) {
2970 rq->bio = bio->bi_next;
2971
2972 bio_put(bio);
2973 }
2974}
2975EXPORT_SYMBOL_GPL(blk_rq_unprep_clone);
2976
2977/*
2978 * Copy attributes of the original request to the clone request.
2979 * The actual data parts (e.g. ->cmd, ->sense) are not copied.
2980 */
2981static void __blk_rq_prep_clone(struct request *dst, struct request *src)
b0fd271d
KU
2982{
2983 dst->cpu = src->cpu;
b0fd271d
KU
2984 dst->__sector = blk_rq_pos(src);
2985 dst->__data_len = blk_rq_bytes(src);
2986 dst->nr_phys_segments = src->nr_phys_segments;
2987 dst->ioprio = src->ioprio;
2988 dst->extra_len = src->extra_len;
78d8e58a
MS
2989}
2990
2991/**
2992 * blk_rq_prep_clone - Helper function to setup clone request
2993 * @rq: the request to be setup
2994 * @rq_src: original request to be cloned
2995 * @bs: bio_set that bios for clone are allocated from
2996 * @gfp_mask: memory allocation mask for bio
2997 * @bio_ctr: setup function to be called for each clone bio.
2998 * Returns %0 for success, non %0 for failure.
2999 * @data: private data to be passed to @bio_ctr
3000 *
3001 * Description:
3002 * Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq.
3003 * The actual data parts of @rq_src (e.g. ->cmd, ->sense)
3004 * are not copied, and copying such parts is the caller's responsibility.
3005 * Also, pages which the original bios are pointing to are not copied
3006 * and the cloned bios just point same pages.
3007 * So cloned bios must be completed before original bios, which means
3008 * the caller must complete @rq before @rq_src.
3009 */
3010int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
3011 struct bio_set *bs, gfp_t gfp_mask,
3012 int (*bio_ctr)(struct bio *, struct bio *, void *),
3013 void *data)
3014{
3015 struct bio *bio, *bio_src;
3016
3017 if (!bs)
3018 bs = fs_bio_set;
3019
3020 __rq_for_each_bio(bio_src, rq_src) {
3021 bio = bio_clone_fast(bio_src, gfp_mask, bs);
3022 if (!bio)
3023 goto free_and_out;
3024
3025 if (bio_ctr && bio_ctr(bio, bio_src, data))
3026 goto free_and_out;
3027
3028 if (rq->bio) {
3029 rq->biotail->bi_next = bio;
3030 rq->biotail = bio;
3031 } else
3032 rq->bio = rq->biotail = bio;
3033 }
3034
3035 __blk_rq_prep_clone(rq, rq_src);
3036
3037 return 0;
3038
3039free_and_out:
3040 if (bio)
3041 bio_put(bio);
3042 blk_rq_unprep_clone(rq);
3043
3044 return -ENOMEM;
b0fd271d
KU
3045}
3046EXPORT_SYMBOL_GPL(blk_rq_prep_clone);
3047
59c3d45e 3048int kblockd_schedule_work(struct work_struct *work)
1da177e4
LT
3049{
3050 return queue_work(kblockd_workqueue, work);
3051}
1da177e4
LT
3052EXPORT_SYMBOL(kblockd_schedule_work);
3053
ee63cfa7
JA
3054int kblockd_schedule_work_on(int cpu, struct work_struct *work)
3055{
3056 return queue_work_on(cpu, kblockd_workqueue, work);
3057}
3058EXPORT_SYMBOL(kblockd_schedule_work_on);
3059
59c3d45e
JA
3060int kblockd_schedule_delayed_work(struct delayed_work *dwork,
3061 unsigned long delay)
e43473b7
VG
3062{
3063 return queue_delayed_work(kblockd_workqueue, dwork, delay);
3064}
3065EXPORT_SYMBOL(kblockd_schedule_delayed_work);
3066
8ab14595
JA
3067int kblockd_schedule_delayed_work_on(int cpu, struct delayed_work *dwork,
3068 unsigned long delay)
3069{
3070 return queue_delayed_work_on(cpu, kblockd_workqueue, dwork, delay);
3071}
3072EXPORT_SYMBOL(kblockd_schedule_delayed_work_on);
3073
75df7136
SJ
3074/**
3075 * blk_start_plug - initialize blk_plug and track it inside the task_struct
3076 * @plug: The &struct blk_plug that needs to be initialized
3077 *
3078 * Description:
3079 * Tracking blk_plug inside the task_struct will help with auto-flushing the
3080 * pending I/O should the task end up blocking between blk_start_plug() and
3081 * blk_finish_plug(). This is important from a performance perspective, but
3082 * also ensures that we don't deadlock. For instance, if the task is blocking
3083 * for a memory allocation, memory reclaim could end up wanting to free a
3084 * page belonging to that request that is currently residing in our private
3085 * plug. By flushing the pending I/O when the process goes to sleep, we avoid
3086 * this kind of deadlock.
3087 */
73c10101
JA
3088void blk_start_plug(struct blk_plug *plug)
3089{
3090 struct task_struct *tsk = current;
3091
dd6cf3e1
SL
3092 /*
3093 * If this is a nested plug, don't actually assign it.
3094 */
3095 if (tsk->plug)
3096 return;
3097
73c10101 3098 INIT_LIST_HEAD(&plug->list);
320ae51f 3099 INIT_LIST_HEAD(&plug->mq_list);
048c9374 3100 INIT_LIST_HEAD(&plug->cb_list);
73c10101 3101 /*
dd6cf3e1
SL
3102 * Store ordering should not be needed here, since a potential
3103 * preempt will imply a full memory barrier
73c10101 3104 */
dd6cf3e1 3105 tsk->plug = plug;
73c10101
JA
3106}
3107EXPORT_SYMBOL(blk_start_plug);
3108
3109static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b)
3110{
3111 struct request *rqa = container_of(a, struct request, queuelist);
3112 struct request *rqb = container_of(b, struct request, queuelist);
3113
975927b9
JM
3114 return !(rqa->q < rqb->q ||
3115 (rqa->q == rqb->q && blk_rq_pos(rqa) < blk_rq_pos(rqb)));
73c10101
JA
3116}
3117
49cac01e
JA
3118/*
3119 * If 'from_schedule' is true, then postpone the dispatch of requests
3120 * until a safe kblockd context. We due this to avoid accidental big
3121 * additional stack usage in driver dispatch, in places where the originally
3122 * plugger did not intend it.
3123 */
f6603783 3124static void queue_unplugged(struct request_queue *q, unsigned int depth,
49cac01e 3125 bool from_schedule)
99e22598 3126 __releases(q->queue_lock)
94b5eb28 3127{
49cac01e 3128 trace_block_unplug(q, depth, !from_schedule);
99e22598 3129
70460571 3130 if (from_schedule)
24ecfbe2 3131 blk_run_queue_async(q);
70460571 3132 else
24ecfbe2 3133 __blk_run_queue(q);
70460571 3134 spin_unlock(q->queue_lock);
94b5eb28
JA
3135}
3136
74018dc3 3137static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
048c9374
N
3138{
3139 LIST_HEAD(callbacks);
3140
2a7d5559
SL
3141 while (!list_empty(&plug->cb_list)) {
3142 list_splice_init(&plug->cb_list, &callbacks);
048c9374 3143
2a7d5559
SL
3144 while (!list_empty(&callbacks)) {
3145 struct blk_plug_cb *cb = list_first_entry(&callbacks,
048c9374
N
3146 struct blk_plug_cb,
3147 list);
2a7d5559 3148 list_del(&cb->list);
74018dc3 3149 cb->callback(cb, from_schedule);
2a7d5559 3150 }
048c9374
N
3151 }
3152}
3153
9cbb1750
N
3154struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data,
3155 int size)
3156{
3157 struct blk_plug *plug = current->plug;
3158 struct blk_plug_cb *cb;
3159
3160 if (!plug)
3161 return NULL;
3162
3163 list_for_each_entry(cb, &plug->cb_list, list)
3164 if (cb->callback == unplug && cb->data == data)
3165 return cb;
3166
3167 /* Not currently on the callback list */
3168 BUG_ON(size < sizeof(*cb));
3169 cb = kzalloc(size, GFP_ATOMIC);
3170 if (cb) {
3171 cb->data = data;
3172 cb->callback = unplug;
3173 list_add(&cb->list, &plug->cb_list);
3174 }
3175 return cb;
3176}
3177EXPORT_SYMBOL(blk_check_plugged);
3178
49cac01e 3179void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
73c10101
JA
3180{
3181 struct request_queue *q;
3182 unsigned long flags;
3183 struct request *rq;
109b8129 3184 LIST_HEAD(list);
94b5eb28 3185 unsigned int depth;
73c10101 3186
74018dc3 3187 flush_plug_callbacks(plug, from_schedule);
320ae51f
JA
3188
3189 if (!list_empty(&plug->mq_list))
3190 blk_mq_flush_plug_list(plug, from_schedule);
3191
73c10101
JA
3192 if (list_empty(&plug->list))
3193 return;
3194
109b8129
N
3195 list_splice_init(&plug->list, &list);
3196
422765c2 3197 list_sort(NULL, &list, plug_rq_cmp);
73c10101
JA
3198
3199 q = NULL;
94b5eb28 3200 depth = 0;
18811272
JA
3201
3202 /*
3203 * Save and disable interrupts here, to avoid doing it for every
3204 * queue lock we have to take.
3205 */
73c10101 3206 local_irq_save(flags);
109b8129
N
3207 while (!list_empty(&list)) {
3208 rq = list_entry_rq(list.next);
73c10101 3209 list_del_init(&rq->queuelist);
73c10101
JA
3210 BUG_ON(!rq->q);
3211 if (rq->q != q) {
99e22598
JA
3212 /*
3213 * This drops the queue lock
3214 */
3215 if (q)
49cac01e 3216 queue_unplugged(q, depth, from_schedule);
73c10101 3217 q = rq->q;
94b5eb28 3218 depth = 0;
73c10101
JA
3219 spin_lock(q->queue_lock);
3220 }
8ba61435
TH
3221
3222 /*
3223 * Short-circuit if @q is dead
3224 */
3f3299d5 3225 if (unlikely(blk_queue_dying(q))) {
8ba61435
TH
3226 __blk_end_request_all(rq, -ENODEV);
3227 continue;
3228 }
3229
73c10101
JA
3230 /*
3231 * rq is already accounted, so use raw insert
3232 */
f73f44eb 3233 if (op_is_flush(rq->cmd_flags))
401a18e9
JA
3234 __elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH);
3235 else
3236 __elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE);
94b5eb28
JA
3237
3238 depth++;
73c10101
JA
3239 }
3240
99e22598
JA
3241 /*
3242 * This drops the queue lock
3243 */
3244 if (q)
49cac01e 3245 queue_unplugged(q, depth, from_schedule);
73c10101 3246
73c10101
JA
3247 local_irq_restore(flags);
3248}
73c10101
JA
3249
3250void blk_finish_plug(struct blk_plug *plug)
3251{
dd6cf3e1
SL
3252 if (plug != current->plug)
3253 return;
f6603783 3254 blk_flush_plug_list(plug, false);
73c10101 3255
dd6cf3e1 3256 current->plug = NULL;
73c10101 3257}
88b996cd 3258EXPORT_SYMBOL(blk_finish_plug);
73c10101 3259
47fafbc7 3260#ifdef CONFIG_PM
6c954667
LM
3261/**
3262 * blk_pm_runtime_init - Block layer runtime PM initialization routine
3263 * @q: the queue of the device
3264 * @dev: the device the queue belongs to
3265 *
3266 * Description:
3267 * Initialize runtime-PM-related fields for @q and start auto suspend for
3268 * @dev. Drivers that want to take advantage of request-based runtime PM
3269 * should call this function after @dev has been initialized, and its
3270 * request queue @q has been allocated, and runtime PM for it can not happen
3271 * yet(either due to disabled/forbidden or its usage_count > 0). In most
3272 * cases, driver should call this function before any I/O has taken place.
3273 *
3274 * This function takes care of setting up using auto suspend for the device,
3275 * the autosuspend delay is set to -1 to make runtime suspend impossible
3276 * until an updated value is either set by user or by driver. Drivers do
3277 * not need to touch other autosuspend settings.
3278 *
3279 * The block layer runtime PM is request based, so only works for drivers
3280 * that use request as their IO unit instead of those directly use bio's.
3281 */
3282void blk_pm_runtime_init(struct request_queue *q, struct device *dev)
3283{
3284 q->dev = dev;
3285 q->rpm_status = RPM_ACTIVE;
3286 pm_runtime_set_autosuspend_delay(q->dev, -1);
3287 pm_runtime_use_autosuspend(q->dev);
3288}
3289EXPORT_SYMBOL(blk_pm_runtime_init);
3290
3291/**
3292 * blk_pre_runtime_suspend - Pre runtime suspend check
3293 * @q: the queue of the device
3294 *
3295 * Description:
3296 * This function will check if runtime suspend is allowed for the device
3297 * by examining if there are any requests pending in the queue. If there
3298 * are requests pending, the device can not be runtime suspended; otherwise,
3299 * the queue's status will be updated to SUSPENDING and the driver can
3300 * proceed to suspend the device.
3301 *
3302 * For the not allowed case, we mark last busy for the device so that
3303 * runtime PM core will try to autosuspend it some time later.
3304 *
3305 * This function should be called near the start of the device's
3306 * runtime_suspend callback.
3307 *
3308 * Return:
3309 * 0 - OK to runtime suspend the device
3310 * -EBUSY - Device should not be runtime suspended
3311 */
3312int blk_pre_runtime_suspend(struct request_queue *q)
3313{
3314 int ret = 0;
3315
4fd41a85
KX
3316 if (!q->dev)
3317 return ret;
3318
6c954667
LM
3319 spin_lock_irq(q->queue_lock);
3320 if (q->nr_pending) {
3321 ret = -EBUSY;
3322 pm_runtime_mark_last_busy(q->dev);
3323 } else {
3324 q->rpm_status = RPM_SUSPENDING;
3325 }
3326 spin_unlock_irq(q->queue_lock);
3327 return ret;
3328}
3329EXPORT_SYMBOL(blk_pre_runtime_suspend);
3330
3331/**
3332 * blk_post_runtime_suspend - Post runtime suspend processing
3333 * @q: the queue of the device
3334 * @err: return value of the device's runtime_suspend function
3335 *
3336 * Description:
3337 * Update the queue's runtime status according to the return value of the
3338 * device's runtime suspend function and mark last busy for the device so
3339 * that PM core will try to auto suspend the device at a later time.
3340 *
3341 * This function should be called near the end of the device's
3342 * runtime_suspend callback.
3343 */
3344void blk_post_runtime_suspend(struct request_queue *q, int err)
3345{
4fd41a85
KX
3346 if (!q->dev)
3347 return;
3348
6c954667
LM
3349 spin_lock_irq(q->queue_lock);
3350 if (!err) {
3351 q->rpm_status = RPM_SUSPENDED;
3352 } else {
3353 q->rpm_status = RPM_ACTIVE;
3354 pm_runtime_mark_last_busy(q->dev);
3355 }
3356 spin_unlock_irq(q->queue_lock);
3357}
3358EXPORT_SYMBOL(blk_post_runtime_suspend);
3359
3360/**
3361 * blk_pre_runtime_resume - Pre runtime resume processing
3362 * @q: the queue of the device
3363 *
3364 * Description:
3365 * Update the queue's runtime status to RESUMING in preparation for the
3366 * runtime resume of the device.
3367 *
3368 * This function should be called near the start of the device's
3369 * runtime_resume callback.
3370 */
3371void blk_pre_runtime_resume(struct request_queue *q)
3372{
4fd41a85
KX
3373 if (!q->dev)
3374 return;
3375
6c954667
LM
3376 spin_lock_irq(q->queue_lock);
3377 q->rpm_status = RPM_RESUMING;
3378 spin_unlock_irq(q->queue_lock);
3379}
3380EXPORT_SYMBOL(blk_pre_runtime_resume);
3381
3382/**
3383 * blk_post_runtime_resume - Post runtime resume processing
3384 * @q: the queue of the device
3385 * @err: return value of the device's runtime_resume function
3386 *
3387 * Description:
3388 * Update the queue's runtime status according to the return value of the
3389 * device's runtime_resume function. If it is successfully resumed, process
3390 * the requests that are queued into the device's queue when it is resuming
3391 * and then mark last busy and initiate autosuspend for it.
3392 *
3393 * This function should be called near the end of the device's
3394 * runtime_resume callback.
3395 */
3396void blk_post_runtime_resume(struct request_queue *q, int err)
3397{
4fd41a85
KX
3398 if (!q->dev)
3399 return;
3400
6c954667
LM
3401 spin_lock_irq(q->queue_lock);
3402 if (!err) {
3403 q->rpm_status = RPM_ACTIVE;
3404 __blk_run_queue(q);
3405 pm_runtime_mark_last_busy(q->dev);
c60855cd 3406 pm_request_autosuspend(q->dev);
6c954667
LM
3407 } else {
3408 q->rpm_status = RPM_SUSPENDED;
3409 }
3410 spin_unlock_irq(q->queue_lock);
3411}
3412EXPORT_SYMBOL(blk_post_runtime_resume);
d07ab6d1
MW
3413
3414/**
3415 * blk_set_runtime_active - Force runtime status of the queue to be active
3416 * @q: the queue of the device
3417 *
3418 * If the device is left runtime suspended during system suspend the resume
3419 * hook typically resumes the device and corrects runtime status
3420 * accordingly. However, that does not affect the queue runtime PM status
3421 * which is still "suspended". This prevents processing requests from the
3422 * queue.
3423 *
3424 * This function can be used in driver's resume hook to correct queue
3425 * runtime PM status and re-enable peeking requests from the queue. It
3426 * should be called before first request is added to the queue.
3427 */
3428void blk_set_runtime_active(struct request_queue *q)
3429{
3430 spin_lock_irq(q->queue_lock);
3431 q->rpm_status = RPM_ACTIVE;
3432 pm_runtime_mark_last_busy(q->dev);
3433 pm_request_autosuspend(q->dev);
3434 spin_unlock_irq(q->queue_lock);
3435}
3436EXPORT_SYMBOL(blk_set_runtime_active);
6c954667
LM
3437#endif
3438
1da177e4
LT
3439int __init blk_dev_init(void)
3440{
ef295ecf
CH
3441 BUILD_BUG_ON(REQ_OP_LAST >= (1 << REQ_OP_BITS));
3442 BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
0762b23d 3443 FIELD_SIZEOF(struct request, cmd_flags));
ef295ecf
CH
3444 BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
3445 FIELD_SIZEOF(struct bio, bi_opf));
9eb55b03 3446
89b90be2
TH
3447 /* used for unplugging and affects IO latency/throughput - HIGHPRI */
3448 kblockd_workqueue = alloc_workqueue("kblockd",
28747fcd 3449 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
1da177e4
LT
3450 if (!kblockd_workqueue)
3451 panic("Failed to create kblockd\n");
3452
3453 request_cachep = kmem_cache_create("blkdev_requests",
20c2df83 3454 sizeof(struct request), 0, SLAB_PANIC, NULL);
1da177e4 3455
c2789bd4 3456 blk_requestq_cachep = kmem_cache_create("request_queue",
165125e1 3457 sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
1da177e4 3458
18fbda91
OS
3459#ifdef CONFIG_DEBUG_FS
3460 blk_debugfs_root = debugfs_create_dir("block", NULL);
3461#endif
3462
d38ecf93 3463 return 0;
1da177e4 3464}