]> git.ipfire.org Git - thirdparty/kernel/linux.git/blame - block/blk-core.c
block: Add and use op_stat_group() for indexing disk_stat fields.
[thirdparty/kernel/linux.git] / block / blk-core.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * Copyright (C) 1991, 1992 Linus Torvalds
3 * Copyright (C) 1994, Karl Keyte: Added support for disk statistics
4 * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5 * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
6728cb0e
JA
6 * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au>
7 * - July2000
1da177e4
LT
8 * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
9 */
10
11/*
12 * This handles all read/write requests to block devices
13 */
1da177e4
LT
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/backing-dev.h>
17#include <linux/bio.h>
18#include <linux/blkdev.h>
320ae51f 19#include <linux/blk-mq.h>
1da177e4
LT
20#include <linux/highmem.h>
21#include <linux/mm.h>
22#include <linux/kernel_stat.h>
23#include <linux/string.h>
24#include <linux/init.h>
1da177e4
LT
25#include <linux/completion.h>
26#include <linux/slab.h>
27#include <linux/swap.h>
28#include <linux/writeback.h>
faccbd4b 29#include <linux/task_io_accounting_ops.h>
c17bb495 30#include <linux/fault-inject.h>
73c10101 31#include <linux/list_sort.h>
e3c78ca5 32#include <linux/delay.h>
aaf7c680 33#include <linux/ratelimit.h>
6c954667 34#include <linux/pm_runtime.h>
eea8f41c 35#include <linux/blk-cgroup.h>
18fbda91 36#include <linux/debugfs.h>
30abb3a6 37#include <linux/bpf.h>
55782138
LZ
38
39#define CREATE_TRACE_POINTS
40#include <trace/events/block.h>
1da177e4 41
8324aa91 42#include "blk.h"
43a5e4e2 43#include "blk-mq.h"
bd166ef1 44#include "blk-mq-sched.h"
c1c80384 45#include "blk-rq-qos.h"
8324aa91 46
18fbda91
OS
47#ifdef CONFIG_DEBUG_FS
48struct dentry *blk_debugfs_root;
49#endif
50
d07335e5 51EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
b0da3f0d 52EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
0a82a8d1 53EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
3291fa57 54EXPORT_TRACEPOINT_SYMBOL_GPL(block_split);
cbae8d45 55EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug);
0bfc2455 56
a73f730d
TH
57DEFINE_IDA(blk_queue_ida);
58
1da177e4
LT
59/*
60 * For the allocated request tables
61 */
d674d414 62struct kmem_cache *request_cachep;
1da177e4
LT
63
64/*
65 * For queue allocation
66 */
6728cb0e 67struct kmem_cache *blk_requestq_cachep;
1da177e4 68
1da177e4
LT
69/*
70 * Controlling structure to kblockd
71 */
ff856bad 72static struct workqueue_struct *kblockd_workqueue;
1da177e4 73
8814ce8a
BVA
74/**
75 * blk_queue_flag_set - atomically set a queue flag
76 * @flag: flag to be set
77 * @q: request queue
78 */
79void blk_queue_flag_set(unsigned int flag, struct request_queue *q)
80{
81 unsigned long flags;
82
83 spin_lock_irqsave(q->queue_lock, flags);
84 queue_flag_set(flag, q);
85 spin_unlock_irqrestore(q->queue_lock, flags);
86}
87EXPORT_SYMBOL(blk_queue_flag_set);
88
89/**
90 * blk_queue_flag_clear - atomically clear a queue flag
91 * @flag: flag to be cleared
92 * @q: request queue
93 */
94void blk_queue_flag_clear(unsigned int flag, struct request_queue *q)
95{
96 unsigned long flags;
97
98 spin_lock_irqsave(q->queue_lock, flags);
99 queue_flag_clear(flag, q);
100 spin_unlock_irqrestore(q->queue_lock, flags);
101}
102EXPORT_SYMBOL(blk_queue_flag_clear);
103
104/**
105 * blk_queue_flag_test_and_set - atomically test and set a queue flag
106 * @flag: flag to be set
107 * @q: request queue
108 *
109 * Returns the previous value of @flag - 0 if the flag was not set and 1 if
110 * the flag was already set.
111 */
112bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q)
113{
114 unsigned long flags;
115 bool res;
116
117 spin_lock_irqsave(q->queue_lock, flags);
118 res = queue_flag_test_and_set(flag, q);
119 spin_unlock_irqrestore(q->queue_lock, flags);
120
121 return res;
122}
123EXPORT_SYMBOL_GPL(blk_queue_flag_test_and_set);
124
125/**
126 * blk_queue_flag_test_and_clear - atomically test and clear a queue flag
127 * @flag: flag to be cleared
128 * @q: request queue
129 *
130 * Returns the previous value of @flag - 0 if the flag was not set and 1 if
131 * the flag was set.
132 */
133bool blk_queue_flag_test_and_clear(unsigned int flag, struct request_queue *q)
134{
135 unsigned long flags;
136 bool res;
137
138 spin_lock_irqsave(q->queue_lock, flags);
139 res = queue_flag_test_and_clear(flag, q);
140 spin_unlock_irqrestore(q->queue_lock, flags);
141
142 return res;
143}
144EXPORT_SYMBOL_GPL(blk_queue_flag_test_and_clear);
145
d40f75a0
TH
146static void blk_clear_congested(struct request_list *rl, int sync)
147{
d40f75a0
TH
148#ifdef CONFIG_CGROUP_WRITEBACK
149 clear_wb_congested(rl->blkg->wb_congested, sync);
150#else
482cf79c
TH
151 /*
152 * If !CGROUP_WRITEBACK, all blkg's map to bdi->wb and we shouldn't
153 * flip its congestion state for events on other blkcgs.
154 */
155 if (rl == &rl->q->root_rl)
dc3b17cc 156 clear_wb_congested(rl->q->backing_dev_info->wb.congested, sync);
d40f75a0
TH
157#endif
158}
159
160static void blk_set_congested(struct request_list *rl, int sync)
161{
d40f75a0
TH
162#ifdef CONFIG_CGROUP_WRITEBACK
163 set_wb_congested(rl->blkg->wb_congested, sync);
164#else
482cf79c
TH
165 /* see blk_clear_congested() */
166 if (rl == &rl->q->root_rl)
dc3b17cc 167 set_wb_congested(rl->q->backing_dev_info->wb.congested, sync);
d40f75a0
TH
168#endif
169}
170
8324aa91 171void blk_queue_congestion_threshold(struct request_queue *q)
1da177e4
LT
172{
173 int nr;
174
175 nr = q->nr_requests - (q->nr_requests / 8) + 1;
176 if (nr > q->nr_requests)
177 nr = q->nr_requests;
178 q->nr_congestion_on = nr;
179
180 nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1;
181 if (nr < 1)
182 nr = 1;
183 q->nr_congestion_off = nr;
184}
185
2a4aa30c 186void blk_rq_init(struct request_queue *q, struct request *rq)
1da177e4 187{
1afb20f3
FT
188 memset(rq, 0, sizeof(*rq));
189
1da177e4 190 INIT_LIST_HEAD(&rq->queuelist);
242f9dcb 191 INIT_LIST_HEAD(&rq->timeout_list);
c7c22e4d 192 rq->cpu = -1;
63a71386 193 rq->q = q;
a2dec7b3 194 rq->__sector = (sector_t) -1;
2e662b65
JA
195 INIT_HLIST_NODE(&rq->hash);
196 RB_CLEAR_NODE(&rq->rb_node);
63a71386 197 rq->tag = -1;
bd166ef1 198 rq->internal_tag = -1;
522a7775 199 rq->start_time_ns = ktime_get_ns();
09e099d4 200 rq->part = NULL;
1da177e4 201}
2a4aa30c 202EXPORT_SYMBOL(blk_rq_init);
1da177e4 203
2a842aca
CH
204static const struct {
205 int errno;
206 const char *name;
207} blk_errors[] = {
208 [BLK_STS_OK] = { 0, "" },
209 [BLK_STS_NOTSUPP] = { -EOPNOTSUPP, "operation not supported" },
210 [BLK_STS_TIMEOUT] = { -ETIMEDOUT, "timeout" },
211 [BLK_STS_NOSPC] = { -ENOSPC, "critical space allocation" },
212 [BLK_STS_TRANSPORT] = { -ENOLINK, "recoverable transport" },
213 [BLK_STS_TARGET] = { -EREMOTEIO, "critical target" },
214 [BLK_STS_NEXUS] = { -EBADE, "critical nexus" },
215 [BLK_STS_MEDIUM] = { -ENODATA, "critical medium" },
216 [BLK_STS_PROTECTION] = { -EILSEQ, "protection" },
217 [BLK_STS_RESOURCE] = { -ENOMEM, "kernel resource" },
86ff7c2a 218 [BLK_STS_DEV_RESOURCE] = { -EBUSY, "device resource" },
03a07c92 219 [BLK_STS_AGAIN] = { -EAGAIN, "nonblocking retry" },
2a842aca 220
4e4cbee9
CH
221 /* device mapper special case, should not leak out: */
222 [BLK_STS_DM_REQUEUE] = { -EREMCHG, "dm internal retry" },
223
2a842aca
CH
224 /* everything else not covered above: */
225 [BLK_STS_IOERR] = { -EIO, "I/O" },
226};
227
228blk_status_t errno_to_blk_status(int errno)
229{
230 int i;
231
232 for (i = 0; i < ARRAY_SIZE(blk_errors); i++) {
233 if (blk_errors[i].errno == errno)
234 return (__force blk_status_t)i;
235 }
236
237 return BLK_STS_IOERR;
238}
239EXPORT_SYMBOL_GPL(errno_to_blk_status);
240
241int blk_status_to_errno(blk_status_t status)
242{
243 int idx = (__force int)status;
244
34bd9c1c 245 if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
2a842aca
CH
246 return -EIO;
247 return blk_errors[idx].errno;
248}
249EXPORT_SYMBOL_GPL(blk_status_to_errno);
250
251static void print_req_error(struct request *req, blk_status_t status)
252{
253 int idx = (__force int)status;
254
34bd9c1c 255 if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
2a842aca
CH
256 return;
257
258 printk_ratelimited(KERN_ERR "%s: %s error, dev %s, sector %llu\n",
259 __func__, blk_errors[idx].name, req->rq_disk ?
260 req->rq_disk->disk_name : "?",
261 (unsigned long long)blk_rq_pos(req));
262}
263
5bb23a68 264static void req_bio_endio(struct request *rq, struct bio *bio,
2a842aca 265 unsigned int nbytes, blk_status_t error)
1da177e4 266{
78d8e58a 267 if (error)
4e4cbee9 268 bio->bi_status = error;
797e7dbb 269
e8064021 270 if (unlikely(rq->rq_flags & RQF_QUIET))
b7c44ed9 271 bio_set_flag(bio, BIO_QUIET);
08bafc03 272
f79ea416 273 bio_advance(bio, nbytes);
7ba1ba12 274
143a87f4 275 /* don't actually finish bio if it's part of flush sequence */
e8064021 276 if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ))
4246a0b6 277 bio_endio(bio);
1da177e4 278}
1da177e4 279
1da177e4
LT
280void blk_dump_rq_flags(struct request *rq, char *msg)
281{
aebf526b
CH
282 printk(KERN_INFO "%s: dev %s: flags=%llx\n", msg,
283 rq->rq_disk ? rq->rq_disk->disk_name : "?",
5953316d 284 (unsigned long long) rq->cmd_flags);
1da177e4 285
83096ebf
TH
286 printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n",
287 (unsigned long long)blk_rq_pos(rq),
288 blk_rq_sectors(rq), blk_rq_cur_sectors(rq));
b4f42e28
JA
289 printk(KERN_INFO " bio %p, biotail %p, len %u\n",
290 rq->bio, rq->biotail, blk_rq_bytes(rq));
1da177e4 291}
1da177e4
LT
292EXPORT_SYMBOL(blk_dump_rq_flags);
293
3cca6dc1 294static void blk_delay_work(struct work_struct *work)
1da177e4 295{
3cca6dc1 296 struct request_queue *q;
1da177e4 297
3cca6dc1
JA
298 q = container_of(work, struct request_queue, delay_work.work);
299 spin_lock_irq(q->queue_lock);
24ecfbe2 300 __blk_run_queue(q);
3cca6dc1 301 spin_unlock_irq(q->queue_lock);
1da177e4 302}
1da177e4
LT
303
304/**
3cca6dc1
JA
305 * blk_delay_queue - restart queueing after defined interval
306 * @q: The &struct request_queue in question
307 * @msecs: Delay in msecs
1da177e4
LT
308 *
309 * Description:
3cca6dc1
JA
310 * Sometimes queueing needs to be postponed for a little while, to allow
311 * resources to come back. This function will make sure that queueing is
2fff8a92 312 * restarted around the specified time.
3cca6dc1
JA
313 */
314void blk_delay_queue(struct request_queue *q, unsigned long msecs)
2ad8b1ef 315{
2fff8a92 316 lockdep_assert_held(q->queue_lock);
332ebbf7 317 WARN_ON_ONCE(q->mq_ops);
2fff8a92 318
70460571
BVA
319 if (likely(!blk_queue_dead(q)))
320 queue_delayed_work(kblockd_workqueue, &q->delay_work,
321 msecs_to_jiffies(msecs));
2ad8b1ef 322}
3cca6dc1 323EXPORT_SYMBOL(blk_delay_queue);
2ad8b1ef 324
21491412
JA
325/**
326 * blk_start_queue_async - asynchronously restart a previously stopped queue
327 * @q: The &struct request_queue in question
328 *
329 * Description:
330 * blk_start_queue_async() will clear the stop flag on the queue, and
331 * ensure that the request_fn for the queue is run from an async
332 * context.
333 **/
334void blk_start_queue_async(struct request_queue *q)
335{
2fff8a92 336 lockdep_assert_held(q->queue_lock);
332ebbf7 337 WARN_ON_ONCE(q->mq_ops);
2fff8a92 338
21491412
JA
339 queue_flag_clear(QUEUE_FLAG_STOPPED, q);
340 blk_run_queue_async(q);
341}
342EXPORT_SYMBOL(blk_start_queue_async);
343
1da177e4
LT
344/**
345 * blk_start_queue - restart a previously stopped queue
165125e1 346 * @q: The &struct request_queue in question
1da177e4
LT
347 *
348 * Description:
349 * blk_start_queue() will clear the stop flag on the queue, and call
350 * the request_fn for the queue if it was in a stopped state when
2fff8a92 351 * entered. Also see blk_stop_queue().
1da177e4 352 **/
165125e1 353void blk_start_queue(struct request_queue *q)
1da177e4 354{
2fff8a92 355 lockdep_assert_held(q->queue_lock);
332ebbf7 356 WARN_ON_ONCE(q->mq_ops);
a038e253 357
75ad23bc 358 queue_flag_clear(QUEUE_FLAG_STOPPED, q);
24ecfbe2 359 __blk_run_queue(q);
1da177e4 360}
1da177e4
LT
361EXPORT_SYMBOL(blk_start_queue);
362
363/**
364 * blk_stop_queue - stop a queue
165125e1 365 * @q: The &struct request_queue in question
1da177e4
LT
366 *
367 * Description:
368 * The Linux block layer assumes that a block driver will consume all
369 * entries on the request queue when the request_fn strategy is called.
370 * Often this will not happen, because of hardware limitations (queue
371 * depth settings). If a device driver gets a 'queue full' response,
372 * or if it simply chooses not to queue more I/O at one point, it can
373 * call this function to prevent the request_fn from being called until
374 * the driver has signalled it's ready to go again. This happens by calling
2fff8a92 375 * blk_start_queue() to restart queue operations.
1da177e4 376 **/
165125e1 377void blk_stop_queue(struct request_queue *q)
1da177e4 378{
2fff8a92 379 lockdep_assert_held(q->queue_lock);
332ebbf7 380 WARN_ON_ONCE(q->mq_ops);
2fff8a92 381
136b5721 382 cancel_delayed_work(&q->delay_work);
75ad23bc 383 queue_flag_set(QUEUE_FLAG_STOPPED, q);
1da177e4
LT
384}
385EXPORT_SYMBOL(blk_stop_queue);
386
387/**
388 * blk_sync_queue - cancel any pending callbacks on a queue
389 * @q: the queue
390 *
391 * Description:
392 * The block layer may perform asynchronous callback activity
393 * on a queue, such as calling the unplug function after a timeout.
394 * A block device may call blk_sync_queue to ensure that any
395 * such activity is cancelled, thus allowing it to release resources
59c51591 396 * that the callbacks might use. The caller must already have made sure
1da177e4
LT
397 * that its ->make_request_fn will not re-add plugging prior to calling
398 * this function.
399 *
da527770 400 * This function does not cancel any asynchronous activity arising
da3dae54 401 * out of elevator or throttling code. That would require elevator_exit()
5efd6113 402 * and blkcg_exit_queue() to be called with queue lock initialized.
da527770 403 *
1da177e4
LT
404 */
405void blk_sync_queue(struct request_queue *q)
406{
70ed28b9 407 del_timer_sync(&q->timeout);
4e9b6f20 408 cancel_work_sync(&q->timeout_work);
f04c1fe7
ML
409
410 if (q->mq_ops) {
411 struct blk_mq_hw_ctx *hctx;
412 int i;
413
aba7afc5 414 cancel_delayed_work_sync(&q->requeue_work);
21c6e939 415 queue_for_each_hw_ctx(q, hctx, i)
9f993737 416 cancel_delayed_work_sync(&hctx->run_work);
f04c1fe7
ML
417 } else {
418 cancel_delayed_work_sync(&q->delay_work);
419 }
1da177e4
LT
420}
421EXPORT_SYMBOL(blk_sync_queue);
422
c9254f2d
BVA
423/**
424 * blk_set_preempt_only - set QUEUE_FLAG_PREEMPT_ONLY
425 * @q: request queue pointer
426 *
427 * Returns the previous value of the PREEMPT_ONLY flag - 0 if the flag was not
428 * set and 1 if the flag was already set.
429 */
430int blk_set_preempt_only(struct request_queue *q)
431{
8814ce8a 432 return blk_queue_flag_test_and_set(QUEUE_FLAG_PREEMPT_ONLY, q);
c9254f2d
BVA
433}
434EXPORT_SYMBOL_GPL(blk_set_preempt_only);
435
436void blk_clear_preempt_only(struct request_queue *q)
437{
8814ce8a 438 blk_queue_flag_clear(QUEUE_FLAG_PREEMPT_ONLY, q);
3a0a5299 439 wake_up_all(&q->mq_freeze_wq);
c9254f2d
BVA
440}
441EXPORT_SYMBOL_GPL(blk_clear_preempt_only);
442
c246e80d
BVA
443/**
444 * __blk_run_queue_uncond - run a queue whether or not it has been stopped
445 * @q: The queue to run
446 *
447 * Description:
448 * Invoke request handling on a queue if there are any pending requests.
449 * May be used to restart request handling after a request has completed.
450 * This variant runs the queue whether or not the queue has been
451 * stopped. Must be called with the queue lock held and interrupts
452 * disabled. See also @blk_run_queue.
453 */
454inline void __blk_run_queue_uncond(struct request_queue *q)
455{
2fff8a92 456 lockdep_assert_held(q->queue_lock);
332ebbf7 457 WARN_ON_ONCE(q->mq_ops);
2fff8a92 458
c246e80d
BVA
459 if (unlikely(blk_queue_dead(q)))
460 return;
461
24faf6f6
BVA
462 /*
463 * Some request_fn implementations, e.g. scsi_request_fn(), unlock
464 * the queue lock internally. As a result multiple threads may be
465 * running such a request function concurrently. Keep track of the
466 * number of active request_fn invocations such that blk_drain_queue()
467 * can wait until all these request_fn calls have finished.
468 */
469 q->request_fn_active++;
c246e80d 470 q->request_fn(q);
24faf6f6 471 q->request_fn_active--;
c246e80d 472}
a7928c15 473EXPORT_SYMBOL_GPL(__blk_run_queue_uncond);
c246e80d 474
1da177e4 475/**
80a4b58e 476 * __blk_run_queue - run a single device queue
1da177e4 477 * @q: The queue to run
80a4b58e
JA
478 *
479 * Description:
2fff8a92 480 * See @blk_run_queue.
1da177e4 481 */
24ecfbe2 482void __blk_run_queue(struct request_queue *q)
1da177e4 483{
2fff8a92 484 lockdep_assert_held(q->queue_lock);
332ebbf7 485 WARN_ON_ONCE(q->mq_ops);
2fff8a92 486
a538cd03
TH
487 if (unlikely(blk_queue_stopped(q)))
488 return;
489
c246e80d 490 __blk_run_queue_uncond(q);
75ad23bc
NP
491}
492EXPORT_SYMBOL(__blk_run_queue);
dac07ec1 493
24ecfbe2
CH
494/**
495 * blk_run_queue_async - run a single device queue in workqueue context
496 * @q: The queue to run
497 *
498 * Description:
499 * Tells kblockd to perform the equivalent of @blk_run_queue on behalf
2fff8a92
BVA
500 * of us.
501 *
502 * Note:
503 * Since it is not allowed to run q->delay_work after blk_cleanup_queue()
504 * has canceled q->delay_work, callers must hold the queue lock to avoid
505 * race conditions between blk_cleanup_queue() and blk_run_queue_async().
24ecfbe2
CH
506 */
507void blk_run_queue_async(struct request_queue *q)
508{
2fff8a92 509 lockdep_assert_held(q->queue_lock);
332ebbf7 510 WARN_ON_ONCE(q->mq_ops);
2fff8a92 511
70460571 512 if (likely(!blk_queue_stopped(q) && !blk_queue_dead(q)))
e7c2f967 513 mod_delayed_work(kblockd_workqueue, &q->delay_work, 0);
24ecfbe2 514}
c21e6beb 515EXPORT_SYMBOL(blk_run_queue_async);
24ecfbe2 516
75ad23bc
NP
517/**
518 * blk_run_queue - run a single device queue
519 * @q: The queue to run
80a4b58e
JA
520 *
521 * Description:
522 * Invoke request handling on this queue, if it has pending work to do.
a7f55792 523 * May be used to restart queueing when a request has completed.
75ad23bc
NP
524 */
525void blk_run_queue(struct request_queue *q)
526{
527 unsigned long flags;
528
332ebbf7
BVA
529 WARN_ON_ONCE(q->mq_ops);
530
75ad23bc 531 spin_lock_irqsave(q->queue_lock, flags);
24ecfbe2 532 __blk_run_queue(q);
1da177e4
LT
533 spin_unlock_irqrestore(q->queue_lock, flags);
534}
535EXPORT_SYMBOL(blk_run_queue);
536
165125e1 537void blk_put_queue(struct request_queue *q)
483f4afc
AV
538{
539 kobject_put(&q->kobj);
540}
d86e0e83 541EXPORT_SYMBOL(blk_put_queue);
483f4afc 542
e3c78ca5 543/**
807592a4 544 * __blk_drain_queue - drain requests from request_queue
e3c78ca5 545 * @q: queue to drain
c9a929dd 546 * @drain_all: whether to drain all requests or only the ones w/ ELVPRIV
e3c78ca5 547 *
c9a929dd
TH
548 * Drain requests from @q. If @drain_all is set, all requests are drained.
549 * If not, only ELVPRIV requests are drained. The caller is responsible
550 * for ensuring that no new requests which need to be drained are queued.
e3c78ca5 551 */
807592a4
BVA
552static void __blk_drain_queue(struct request_queue *q, bool drain_all)
553 __releases(q->queue_lock)
554 __acquires(q->queue_lock)
e3c78ca5 555{
458f27a9
AH
556 int i;
557
807592a4 558 lockdep_assert_held(q->queue_lock);
332ebbf7 559 WARN_ON_ONCE(q->mq_ops);
807592a4 560
e3c78ca5 561 while (true) {
481a7d64 562 bool drain = false;
e3c78ca5 563
b855b04a
TH
564 /*
565 * The caller might be trying to drain @q before its
566 * elevator is initialized.
567 */
568 if (q->elevator)
569 elv_drain_elevator(q);
570
5efd6113 571 blkcg_drain_queue(q);
e3c78ca5 572
4eabc941
TH
573 /*
574 * This function might be called on a queue which failed
b855b04a
TH
575 * driver init after queue creation or is not yet fully
576 * active yet. Some drivers (e.g. fd and loop) get unhappy
577 * in such cases. Kick queue iff dispatch queue has
578 * something on it and @q has request_fn set.
4eabc941 579 */
b855b04a 580 if (!list_empty(&q->queue_head) && q->request_fn)
4eabc941 581 __blk_run_queue(q);
c9a929dd 582
8a5ecdd4 583 drain |= q->nr_rqs_elvpriv;
24faf6f6 584 drain |= q->request_fn_active;
481a7d64
TH
585
586 /*
587 * Unfortunately, requests are queued at and tracked from
588 * multiple places and there's no single counter which can
589 * be drained. Check all the queues and counters.
590 */
591 if (drain_all) {
e97c293c 592 struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL);
481a7d64
TH
593 drain |= !list_empty(&q->queue_head);
594 for (i = 0; i < 2; i++) {
8a5ecdd4 595 drain |= q->nr_rqs[i];
481a7d64 596 drain |= q->in_flight[i];
7c94e1c1
ML
597 if (fq)
598 drain |= !list_empty(&fq->flush_queue[i]);
481a7d64
TH
599 }
600 }
e3c78ca5 601
481a7d64 602 if (!drain)
e3c78ca5 603 break;
807592a4
BVA
604
605 spin_unlock_irq(q->queue_lock);
606
e3c78ca5 607 msleep(10);
807592a4
BVA
608
609 spin_lock_irq(q->queue_lock);
e3c78ca5 610 }
458f27a9
AH
611
612 /*
613 * With queue marked dead, any woken up waiter will fail the
614 * allocation path, so the wakeup chaining is lost and we're
615 * left with hung waiters. We need to wake up those waiters.
616 */
617 if (q->request_fn) {
a051661c
TH
618 struct request_list *rl;
619
a051661c
TH
620 blk_queue_for_each_rl(rl, q)
621 for (i = 0; i < ARRAY_SIZE(rl->wait); i++)
622 wake_up_all(&rl->wait[i]);
458f27a9 623 }
e3c78ca5
TH
624}
625
454be724
ML
626void blk_drain_queue(struct request_queue *q)
627{
628 spin_lock_irq(q->queue_lock);
629 __blk_drain_queue(q, true);
630 spin_unlock_irq(q->queue_lock);
631}
632
d732580b
TH
633/**
634 * blk_queue_bypass_start - enter queue bypass mode
635 * @q: queue of interest
636 *
637 * In bypass mode, only the dispatch FIFO queue of @q is used. This
638 * function makes @q enter bypass mode and drains all requests which were
6ecf23af 639 * throttled or issued before. On return, it's guaranteed that no request
80fd9979
TH
640 * is being throttled or has ELVPRIV set and blk_queue_bypass() %true
641 * inside queue or RCU read lock.
d732580b
TH
642 */
643void blk_queue_bypass_start(struct request_queue *q)
644{
332ebbf7
BVA
645 WARN_ON_ONCE(q->mq_ops);
646
d732580b 647 spin_lock_irq(q->queue_lock);
776687bc 648 q->bypass_depth++;
d732580b
TH
649 queue_flag_set(QUEUE_FLAG_BYPASS, q);
650 spin_unlock_irq(q->queue_lock);
651
776687bc
TH
652 /*
653 * Queues start drained. Skip actual draining till init is
654 * complete. This avoids lenghty delays during queue init which
655 * can happen many times during boot.
656 */
657 if (blk_queue_init_done(q)) {
807592a4
BVA
658 spin_lock_irq(q->queue_lock);
659 __blk_drain_queue(q, false);
660 spin_unlock_irq(q->queue_lock);
661
b82d4b19
TH
662 /* ensure blk_queue_bypass() is %true inside RCU read lock */
663 synchronize_rcu();
664 }
d732580b
TH
665}
666EXPORT_SYMBOL_GPL(blk_queue_bypass_start);
667
668/**
669 * blk_queue_bypass_end - leave queue bypass mode
670 * @q: queue of interest
671 *
672 * Leave bypass mode and restore the normal queueing behavior.
332ebbf7
BVA
673 *
674 * Note: although blk_queue_bypass_start() is only called for blk-sq queues,
675 * this function is called for both blk-sq and blk-mq queues.
d732580b
TH
676 */
677void blk_queue_bypass_end(struct request_queue *q)
678{
679 spin_lock_irq(q->queue_lock);
680 if (!--q->bypass_depth)
681 queue_flag_clear(QUEUE_FLAG_BYPASS, q);
682 WARN_ON_ONCE(q->bypass_depth < 0);
683 spin_unlock_irq(q->queue_lock);
684}
685EXPORT_SYMBOL_GPL(blk_queue_bypass_end);
686
aed3ea94
JA
687void blk_set_queue_dying(struct request_queue *q)
688{
8814ce8a 689 blk_queue_flag_set(QUEUE_FLAG_DYING, q);
aed3ea94 690
d3cfb2a0
ML
691 /*
692 * When queue DYING flag is set, we need to block new req
693 * entering queue, so we call blk_freeze_queue_start() to
694 * prevent I/O from crossing blk_queue_enter().
695 */
696 blk_freeze_queue_start(q);
697
aed3ea94
JA
698 if (q->mq_ops)
699 blk_mq_wake_waiters(q);
700 else {
701 struct request_list *rl;
702
bbfc3c5d 703 spin_lock_irq(q->queue_lock);
aed3ea94
JA
704 blk_queue_for_each_rl(rl, q) {
705 if (rl->rq_pool) {
34d9715a
ML
706 wake_up_all(&rl->wait[BLK_RW_SYNC]);
707 wake_up_all(&rl->wait[BLK_RW_ASYNC]);
aed3ea94
JA
708 }
709 }
bbfc3c5d 710 spin_unlock_irq(q->queue_lock);
aed3ea94 711 }
055f6e18
ML
712
713 /* Make blk_queue_enter() reexamine the DYING flag. */
714 wake_up_all(&q->mq_freeze_wq);
aed3ea94
JA
715}
716EXPORT_SYMBOL_GPL(blk_set_queue_dying);
717
c9a929dd
TH
718/**
719 * blk_cleanup_queue - shutdown a request queue
720 * @q: request queue to shutdown
721 *
c246e80d
BVA
722 * Mark @q DYING, drain all pending requests, mark @q DEAD, destroy and
723 * put it. All future requests will be failed immediately with -ENODEV.
c94a96ac 724 */
6728cb0e 725void blk_cleanup_queue(struct request_queue *q)
483f4afc 726{
c9a929dd 727 spinlock_t *lock = q->queue_lock;
e3335de9 728
3f3299d5 729 /* mark @q DYING, no new request or merges will be allowed afterwards */
483f4afc 730 mutex_lock(&q->sysfs_lock);
aed3ea94 731 blk_set_queue_dying(q);
c9a929dd 732 spin_lock_irq(lock);
6ecf23af 733
80fd9979 734 /*
3f3299d5 735 * A dying queue is permanently in bypass mode till released. Note
80fd9979
TH
736 * that, unlike blk_queue_bypass_start(), we aren't performing
737 * synchronize_rcu() after entering bypass mode to avoid the delay
738 * as some drivers create and destroy a lot of queues while
739 * probing. This is still safe because blk_release_queue() will be
740 * called only after the queue refcnt drops to zero and nothing,
741 * RCU or not, would be traversing the queue by then.
742 */
6ecf23af
TH
743 q->bypass_depth++;
744 queue_flag_set(QUEUE_FLAG_BYPASS, q);
745
c9a929dd
TH
746 queue_flag_set(QUEUE_FLAG_NOMERGES, q);
747 queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
3f3299d5 748 queue_flag_set(QUEUE_FLAG_DYING, q);
c9a929dd
TH
749 spin_unlock_irq(lock);
750 mutex_unlock(&q->sysfs_lock);
751
c246e80d
BVA
752 /*
753 * Drain all requests queued before DYING marking. Set DEAD flag to
754 * prevent that q->request_fn() gets invoked after draining finished.
755 */
3ef28e83 756 blk_freeze_queue(q);
9c1051aa 757 spin_lock_irq(lock);
c246e80d 758 queue_flag_set(QUEUE_FLAG_DEAD, q);
807592a4 759 spin_unlock_irq(lock);
c9a929dd 760
c2856ae2
ML
761 /*
762 * make sure all in-progress dispatch are completed because
763 * blk_freeze_queue() can only complete all requests, and
764 * dispatch may still be in-progress since we dispatch requests
1311326c
ML
765 * from more than one contexts.
766 *
767 * No need to quiesce queue if it isn't initialized yet since
768 * blk_freeze_queue() should be enough for cases of passthrough
769 * request.
c2856ae2 770 */
1311326c 771 if (q->mq_ops && blk_queue_init_done(q))
c2856ae2
ML
772 blk_mq_quiesce_queue(q);
773
5a48fc14
DW
774 /* for synchronous bio-based driver finish in-flight integrity i/o */
775 blk_flush_integrity();
776
c9a929dd 777 /* @q won't process any more request, flush async actions */
dc3b17cc 778 del_timer_sync(&q->backing_dev_info->laptop_mode_wb_timer);
c9a929dd
TH
779 blk_sync_queue(q);
780
a063057d
BVA
781 /*
782 * I/O scheduler exit is only safe after the sysfs scheduler attribute
783 * has been removed.
784 */
785 WARN_ON_ONCE(q->kobj.state_in_sysfs);
786
787 /*
788 * Since the I/O scheduler exit code may access cgroup information,
789 * perform I/O scheduler exit before disassociating from the block
790 * cgroup controller.
791 */
792 if (q->elevator) {
793 ioc_clear_queue(q);
794 elevator_exit(q, q->elevator);
795 q->elevator = NULL;
796 }
797
798 /*
799 * Remove all references to @q from the block cgroup controller before
800 * restoring @q->queue_lock to avoid that restoring this pointer causes
801 * e.g. blkcg_print_blkgs() to crash.
802 */
803 blkcg_exit_queue(q);
804
805 /*
806 * Since the cgroup code may dereference the @q->backing_dev_info
807 * pointer, only decrease its reference count after having removed the
808 * association with the block cgroup controller.
809 */
810 bdi_put(q->backing_dev_info);
811
45a9c9d9
BVA
812 if (q->mq_ops)
813 blk_mq_free_queue(q);
3ef28e83 814 percpu_ref_exit(&q->q_usage_counter);
45a9c9d9 815
5e5cfac0
AH
816 spin_lock_irq(lock);
817 if (q->queue_lock != &q->__queue_lock)
818 q->queue_lock = &q->__queue_lock;
819 spin_unlock_irq(lock);
820
c9a929dd 821 /* @q is and will stay empty, shutdown and put */
483f4afc
AV
822 blk_put_queue(q);
823}
1da177e4
LT
824EXPORT_SYMBOL(blk_cleanup_queue);
825
271508db 826/* Allocate memory local to the request queue */
6d247d7f 827static void *alloc_request_simple(gfp_t gfp_mask, void *data)
271508db 828{
6d247d7f
CH
829 struct request_queue *q = data;
830
831 return kmem_cache_alloc_node(request_cachep, gfp_mask, q->node);
271508db
DR
832}
833
6d247d7f 834static void free_request_simple(void *element, void *data)
271508db
DR
835{
836 kmem_cache_free(request_cachep, element);
837}
838
6d247d7f
CH
839static void *alloc_request_size(gfp_t gfp_mask, void *data)
840{
841 struct request_queue *q = data;
842 struct request *rq;
843
844 rq = kmalloc_node(sizeof(struct request) + q->cmd_size, gfp_mask,
845 q->node);
846 if (rq && q->init_rq_fn && q->init_rq_fn(q, rq, gfp_mask) < 0) {
847 kfree(rq);
848 rq = NULL;
849 }
850 return rq;
851}
852
853static void free_request_size(void *element, void *data)
854{
855 struct request_queue *q = data;
856
857 if (q->exit_rq_fn)
858 q->exit_rq_fn(q, element);
859 kfree(element);
860}
861
5b788ce3
TH
862int blk_init_rl(struct request_list *rl, struct request_queue *q,
863 gfp_t gfp_mask)
1da177e4 864{
85acb3ba 865 if (unlikely(rl->rq_pool) || q->mq_ops)
1abec4fd
MS
866 return 0;
867
5b788ce3 868 rl->q = q;
1faa16d2
JA
869 rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0;
870 rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0;
1faa16d2
JA
871 init_waitqueue_head(&rl->wait[BLK_RW_SYNC]);
872 init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]);
1da177e4 873
6d247d7f
CH
874 if (q->cmd_size) {
875 rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ,
876 alloc_request_size, free_request_size,
877 q, gfp_mask, q->node);
878 } else {
879 rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ,
880 alloc_request_simple, free_request_simple,
881 q, gfp_mask, q->node);
882 }
1da177e4
LT
883 if (!rl->rq_pool)
884 return -ENOMEM;
885
b425e504
BVA
886 if (rl != &q->root_rl)
887 WARN_ON_ONCE(!blk_get_queue(q));
888
1da177e4
LT
889 return 0;
890}
891
b425e504 892void blk_exit_rl(struct request_queue *q, struct request_list *rl)
5b788ce3 893{
b425e504 894 if (rl->rq_pool) {
5b788ce3 895 mempool_destroy(rl->rq_pool);
b425e504
BVA
896 if (rl != &q->root_rl)
897 blk_put_queue(q);
898 }
5b788ce3
TH
899}
900
165125e1 901struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
1da177e4 902{
5ee0524b 903 return blk_alloc_queue_node(gfp_mask, NUMA_NO_NODE, NULL);
1946089a
CL
904}
905EXPORT_SYMBOL(blk_alloc_queue);
1da177e4 906
3a0a5299
BVA
907/**
908 * blk_queue_enter() - try to increase q->q_usage_counter
909 * @q: request queue pointer
910 * @flags: BLK_MQ_REQ_NOWAIT and/or BLK_MQ_REQ_PREEMPT
911 */
9a95e4ef 912int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
3ef28e83 913{
3a0a5299
BVA
914 const bool preempt = flags & BLK_MQ_REQ_PREEMPT;
915
3ef28e83 916 while (true) {
3a0a5299 917 bool success = false;
3ef28e83 918
818e0fa2 919 rcu_read_lock();
3a0a5299
BVA
920 if (percpu_ref_tryget_live(&q->q_usage_counter)) {
921 /*
922 * The code that sets the PREEMPT_ONLY flag is
923 * responsible for ensuring that that flag is globally
924 * visible before the queue is unfrozen.
925 */
926 if (preempt || !blk_queue_preempt_only(q)) {
927 success = true;
928 } else {
929 percpu_ref_put(&q->q_usage_counter);
930 }
931 }
818e0fa2 932 rcu_read_unlock();
3a0a5299
BVA
933
934 if (success)
3ef28e83
DW
935 return 0;
936
3a0a5299 937 if (flags & BLK_MQ_REQ_NOWAIT)
3ef28e83
DW
938 return -EBUSY;
939
5ed61d3f 940 /*
1671d522 941 * read pair of barrier in blk_freeze_queue_start(),
5ed61d3f 942 * we need to order reading __PERCPU_REF_DEAD flag of
d3cfb2a0
ML
943 * .q_usage_counter and reading .mq_freeze_depth or
944 * queue dying flag, otherwise the following wait may
945 * never return if the two reads are reordered.
5ed61d3f
ML
946 */
947 smp_rmb();
948
1dc3039b
AJ
949 wait_event(q->mq_freeze_wq,
950 (atomic_read(&q->mq_freeze_depth) == 0 &&
951 (preempt || !blk_queue_preempt_only(q))) ||
952 blk_queue_dying(q));
3ef28e83
DW
953 if (blk_queue_dying(q))
954 return -ENODEV;
3ef28e83
DW
955 }
956}
957
958void blk_queue_exit(struct request_queue *q)
959{
960 percpu_ref_put(&q->q_usage_counter);
961}
962
963static void blk_queue_usage_counter_release(struct percpu_ref *ref)
964{
965 struct request_queue *q =
966 container_of(ref, struct request_queue, q_usage_counter);
967
968 wake_up_all(&q->mq_freeze_wq);
969}
970
bca237a5 971static void blk_rq_timed_out_timer(struct timer_list *t)
287922eb 972{
bca237a5 973 struct request_queue *q = from_timer(q, t, timeout);
287922eb
CH
974
975 kblockd_schedule_work(&q->timeout_work);
976}
977
498f6650
BVA
978/**
979 * blk_alloc_queue_node - allocate a request queue
980 * @gfp_mask: memory allocation flags
981 * @node_id: NUMA node to allocate memory from
982 * @lock: For legacy queues, pointer to a spinlock that will be used to e.g.
983 * serialize calls to the legacy .request_fn() callback. Ignored for
984 * blk-mq request queues.
985 *
986 * Note: pass the queue lock as the third argument to this function instead of
987 * setting the queue lock pointer explicitly to avoid triggering a sporadic
988 * crash in the blkcg code. This function namely calls blkcg_init_queue() and
989 * the queue lock pointer must be set before blkcg_init_queue() is called.
990 */
5ee0524b
BVA
991struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id,
992 spinlock_t *lock)
1946089a 993{
165125e1 994 struct request_queue *q;
338aa96d 995 int ret;
1946089a 996
8324aa91 997 q = kmem_cache_alloc_node(blk_requestq_cachep,
94f6030c 998 gfp_mask | __GFP_ZERO, node_id);
1da177e4
LT
999 if (!q)
1000 return NULL;
1001
cbf62af3
CH
1002 INIT_LIST_HEAD(&q->queue_head);
1003 q->last_merge = NULL;
1004 q->end_sector = 0;
1005 q->boundary_rq = NULL;
1006
00380a40 1007 q->id = ida_simple_get(&blk_queue_ida, 0, 0, gfp_mask);
a73f730d 1008 if (q->id < 0)
3d2936f4 1009 goto fail_q;
a73f730d 1010
338aa96d
KO
1011 ret = bioset_init(&q->bio_split, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
1012 if (ret)
54efd50b
KO
1013 goto fail_id;
1014
d03f6cdc
JK
1015 q->backing_dev_info = bdi_alloc_node(gfp_mask, node_id);
1016 if (!q->backing_dev_info)
1017 goto fail_split;
1018
a83b576c
JA
1019 q->stats = blk_alloc_queue_stats();
1020 if (!q->stats)
1021 goto fail_stats;
1022
dc3b17cc 1023 q->backing_dev_info->ra_pages =
09cbfeaf 1024 (VM_MAX_READAHEAD * 1024) / PAGE_SIZE;
dc3b17cc
JK
1025 q->backing_dev_info->capabilities = BDI_CAP_CGROUP_WRITEBACK;
1026 q->backing_dev_info->name = "block";
5151412d 1027 q->node = node_id;
0989a025 1028
bca237a5
KC
1029 timer_setup(&q->backing_dev_info->laptop_mode_wb_timer,
1030 laptop_mode_timer_fn, 0);
1031 timer_setup(&q->timeout, blk_rq_timed_out_timer, 0);
4e9b6f20 1032 INIT_WORK(&q->timeout_work, NULL);
b855b04a 1033 INIT_LIST_HEAD(&q->queue_head);
242f9dcb 1034 INIT_LIST_HEAD(&q->timeout_list);
a612fddf 1035 INIT_LIST_HEAD(&q->icq_list);
4eef3049 1036#ifdef CONFIG_BLK_CGROUP
e8989fae 1037 INIT_LIST_HEAD(&q->blkg_list);
4eef3049 1038#endif
3cca6dc1 1039 INIT_DELAYED_WORK(&q->delay_work, blk_delay_work);
483f4afc 1040
8324aa91 1041 kobject_init(&q->kobj, &blk_queue_ktype);
1da177e4 1042
5acb3cc2
WL
1043#ifdef CONFIG_BLK_DEV_IO_TRACE
1044 mutex_init(&q->blk_trace_mutex);
1045#endif
483f4afc 1046 mutex_init(&q->sysfs_lock);
e7e72bf6 1047 spin_lock_init(&q->__queue_lock);
483f4afc 1048
498f6650
BVA
1049 if (!q->mq_ops)
1050 q->queue_lock = lock ? : &q->__queue_lock;
c94a96ac 1051
b82d4b19
TH
1052 /*
1053 * A queue starts its life with bypass turned on to avoid
1054 * unnecessary bypass on/off overhead and nasty surprises during
749fefe6
TH
1055 * init. The initial bypass will be finished when the queue is
1056 * registered by blk_register_queue().
b82d4b19
TH
1057 */
1058 q->bypass_depth = 1;
f78bac2c 1059 queue_flag_set_unlocked(QUEUE_FLAG_BYPASS, q);
b82d4b19 1060
320ae51f
JA
1061 init_waitqueue_head(&q->mq_freeze_wq);
1062
3ef28e83
DW
1063 /*
1064 * Init percpu_ref in atomic mode so that it's faster to shutdown.
1065 * See blk_register_queue() for details.
1066 */
1067 if (percpu_ref_init(&q->q_usage_counter,
1068 blk_queue_usage_counter_release,
1069 PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
fff4996b 1070 goto fail_bdi;
f51b802c 1071
3ef28e83
DW
1072 if (blkcg_init_queue(q))
1073 goto fail_ref;
1074
1da177e4 1075 return q;
a73f730d 1076
3ef28e83
DW
1077fail_ref:
1078 percpu_ref_exit(&q->q_usage_counter);
fff4996b 1079fail_bdi:
a83b576c
JA
1080 blk_free_queue_stats(q->stats);
1081fail_stats:
d03f6cdc 1082 bdi_put(q->backing_dev_info);
54efd50b 1083fail_split:
338aa96d 1084 bioset_exit(&q->bio_split);
a73f730d
TH
1085fail_id:
1086 ida_simple_remove(&blk_queue_ida, q->id);
1087fail_q:
1088 kmem_cache_free(blk_requestq_cachep, q);
1089 return NULL;
1da177e4 1090}
1946089a 1091EXPORT_SYMBOL(blk_alloc_queue_node);
1da177e4
LT
1092
1093/**
1094 * blk_init_queue - prepare a request queue for use with a block device
1095 * @rfn: The function to be called to process requests that have been
1096 * placed on the queue.
1097 * @lock: Request queue spin lock
1098 *
1099 * Description:
1100 * If a block device wishes to use the standard request handling procedures,
1101 * which sorts requests and coalesces adjacent requests, then it must
1102 * call blk_init_queue(). The function @rfn will be called when there
1103 * are requests on the queue that need to be processed. If the device
1104 * supports plugging, then @rfn may not be called immediately when requests
1105 * are available on the queue, but may be called at some time later instead.
1106 * Plugged queues are generally unplugged when a buffer belonging to one
1107 * of the requests on the queue is needed, or due to memory pressure.
1108 *
1109 * @rfn is not required, or even expected, to remove all requests off the
1110 * queue, but only as many as it can handle at a time. If it does leave
1111 * requests on the queue, it is responsible for arranging that the requests
1112 * get dealt with eventually.
1113 *
1114 * The queue spin lock must be held while manipulating the requests on the
a038e253
PBG
1115 * request queue; this lock will be taken also from interrupt context, so irq
1116 * disabling is needed for it.
1da177e4 1117 *
710027a4 1118 * Function returns a pointer to the initialized request queue, or %NULL if
1da177e4
LT
1119 * it didn't succeed.
1120 *
1121 * Note:
1122 * blk_init_queue() must be paired with a blk_cleanup_queue() call
1123 * when the block device is deactivated (such as at module unload).
1124 **/
1946089a 1125
165125e1 1126struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock)
1da177e4 1127{
c304a51b 1128 return blk_init_queue_node(rfn, lock, NUMA_NO_NODE);
1946089a
CL
1129}
1130EXPORT_SYMBOL(blk_init_queue);
1131
165125e1 1132struct request_queue *
1946089a
CL
1133blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
1134{
5ea708d1 1135 struct request_queue *q;
1da177e4 1136
498f6650 1137 q = blk_alloc_queue_node(GFP_KERNEL, node_id, lock);
5ea708d1 1138 if (!q)
c86d1b8a
MS
1139 return NULL;
1140
5ea708d1 1141 q->request_fn = rfn;
5ea708d1
CH
1142 if (blk_init_allocated_queue(q) < 0) {
1143 blk_cleanup_queue(q);
1144 return NULL;
1145 }
18741986 1146
7982e90c 1147 return q;
01effb0d
MS
1148}
1149EXPORT_SYMBOL(blk_init_queue_node);
1150
dece1635 1151static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio);
336b7e1f 1152
1da177e4 1153
5ea708d1
CH
1154int blk_init_allocated_queue(struct request_queue *q)
1155{
332ebbf7
BVA
1156 WARN_ON_ONCE(q->mq_ops);
1157
6d247d7f 1158 q->fq = blk_alloc_flush_queue(q, NUMA_NO_NODE, q->cmd_size);
ba483388 1159 if (!q->fq)
5ea708d1 1160 return -ENOMEM;
7982e90c 1161
6d247d7f
CH
1162 if (q->init_rq_fn && q->init_rq_fn(q, q->fq->flush_rq, GFP_KERNEL))
1163 goto out_free_flush_queue;
7982e90c 1164
a051661c 1165 if (blk_init_rl(&q->root_rl, q, GFP_KERNEL))
6d247d7f 1166 goto out_exit_flush_rq;
1da177e4 1167
287922eb 1168 INIT_WORK(&q->timeout_work, blk_timeout_work);
60ea8226 1169 q->queue_flags |= QUEUE_FLAG_DEFAULT;
c94a96ac 1170
f3b144aa
JA
1171 /*
1172 * This also sets hw/phys segments, boundary and size
1173 */
c20e8de2 1174 blk_queue_make_request(q, blk_queue_bio);
1da177e4 1175
44ec9542
AS
1176 q->sg_reserved_size = INT_MAX;
1177
acddf3b3 1178 if (elevator_init(q))
6d247d7f 1179 goto out_exit_flush_rq;
5ea708d1 1180 return 0;
eb1c160b 1181
6d247d7f
CH
1182out_exit_flush_rq:
1183 if (q->exit_rq_fn)
1184 q->exit_rq_fn(q, q->fq->flush_rq);
1185out_free_flush_queue:
ba483388 1186 blk_free_flush_queue(q->fq);
5ea708d1 1187 return -ENOMEM;
1da177e4 1188}
5151412d 1189EXPORT_SYMBOL(blk_init_allocated_queue);
1da177e4 1190
09ac46c4 1191bool blk_get_queue(struct request_queue *q)
1da177e4 1192{
3f3299d5 1193 if (likely(!blk_queue_dying(q))) {
09ac46c4
TH
1194 __blk_get_queue(q);
1195 return true;
1da177e4
LT
1196 }
1197
09ac46c4 1198 return false;
1da177e4 1199}
d86e0e83 1200EXPORT_SYMBOL(blk_get_queue);
1da177e4 1201
5b788ce3 1202static inline void blk_free_request(struct request_list *rl, struct request *rq)
1da177e4 1203{
e8064021 1204 if (rq->rq_flags & RQF_ELVPRIV) {
5b788ce3 1205 elv_put_request(rl->q, rq);
f1f8cc94 1206 if (rq->elv.icq)
11a3122f 1207 put_io_context(rq->elv.icq->ioc);
f1f8cc94
TH
1208 }
1209
5b788ce3 1210 mempool_free(rq, rl->rq_pool);
1da177e4
LT
1211}
1212
1da177e4
LT
1213/*
1214 * ioc_batching returns true if the ioc is a valid batching request and
1215 * should be given priority access to a request.
1216 */
165125e1 1217static inline int ioc_batching(struct request_queue *q, struct io_context *ioc)
1da177e4
LT
1218{
1219 if (!ioc)
1220 return 0;
1221
1222 /*
1223 * Make sure the process is able to allocate at least 1 request
1224 * even if the batch times out, otherwise we could theoretically
1225 * lose wakeups.
1226 */
1227 return ioc->nr_batch_requests == q->nr_batching ||
1228 (ioc->nr_batch_requests > 0
1229 && time_before(jiffies, ioc->last_waited + BLK_BATCH_TIME));
1230}
1231
1232/*
1233 * ioc_set_batching sets ioc to be a new "batcher" if it is not one. This
1234 * will cause the process to be a "batcher" on all queues in the system. This
1235 * is the behaviour we want though - once it gets a wakeup it should be given
1236 * a nice run.
1237 */
165125e1 1238static void ioc_set_batching(struct request_queue *q, struct io_context *ioc)
1da177e4
LT
1239{
1240 if (!ioc || ioc_batching(q, ioc))
1241 return;
1242
1243 ioc->nr_batch_requests = q->nr_batching;
1244 ioc->last_waited = jiffies;
1245}
1246
5b788ce3 1247static void __freed_request(struct request_list *rl, int sync)
1da177e4 1248{
5b788ce3 1249 struct request_queue *q = rl->q;
1da177e4 1250
d40f75a0
TH
1251 if (rl->count[sync] < queue_congestion_off_threshold(q))
1252 blk_clear_congested(rl, sync);
1da177e4 1253
1faa16d2
JA
1254 if (rl->count[sync] + 1 <= q->nr_requests) {
1255 if (waitqueue_active(&rl->wait[sync]))
1256 wake_up(&rl->wait[sync]);
1da177e4 1257
5b788ce3 1258 blk_clear_rl_full(rl, sync);
1da177e4
LT
1259 }
1260}
1261
1262/*
1263 * A request has just been released. Account for it, update the full and
1264 * congestion status, wake up any waiters. Called under q->queue_lock.
1265 */
e8064021
CH
1266static void freed_request(struct request_list *rl, bool sync,
1267 req_flags_t rq_flags)
1da177e4 1268{
5b788ce3 1269 struct request_queue *q = rl->q;
1da177e4 1270
8a5ecdd4 1271 q->nr_rqs[sync]--;
1faa16d2 1272 rl->count[sync]--;
e8064021 1273 if (rq_flags & RQF_ELVPRIV)
8a5ecdd4 1274 q->nr_rqs_elvpriv--;
1da177e4 1275
5b788ce3 1276 __freed_request(rl, sync);
1da177e4 1277
1faa16d2 1278 if (unlikely(rl->starved[sync ^ 1]))
5b788ce3 1279 __freed_request(rl, sync ^ 1);
1da177e4
LT
1280}
1281
e3a2b3f9
JA
1282int blk_update_nr_requests(struct request_queue *q, unsigned int nr)
1283{
1284 struct request_list *rl;
d40f75a0 1285 int on_thresh, off_thresh;
e3a2b3f9 1286
332ebbf7
BVA
1287 WARN_ON_ONCE(q->mq_ops);
1288
e3a2b3f9
JA
1289 spin_lock_irq(q->queue_lock);
1290 q->nr_requests = nr;
1291 blk_queue_congestion_threshold(q);
d40f75a0
TH
1292 on_thresh = queue_congestion_on_threshold(q);
1293 off_thresh = queue_congestion_off_threshold(q);
e3a2b3f9 1294
d40f75a0
TH
1295 blk_queue_for_each_rl(rl, q) {
1296 if (rl->count[BLK_RW_SYNC] >= on_thresh)
1297 blk_set_congested(rl, BLK_RW_SYNC);
1298 else if (rl->count[BLK_RW_SYNC] < off_thresh)
1299 blk_clear_congested(rl, BLK_RW_SYNC);
e3a2b3f9 1300
d40f75a0
TH
1301 if (rl->count[BLK_RW_ASYNC] >= on_thresh)
1302 blk_set_congested(rl, BLK_RW_ASYNC);
1303 else if (rl->count[BLK_RW_ASYNC] < off_thresh)
1304 blk_clear_congested(rl, BLK_RW_ASYNC);
e3a2b3f9 1305
e3a2b3f9
JA
1306 if (rl->count[BLK_RW_SYNC] >= q->nr_requests) {
1307 blk_set_rl_full(rl, BLK_RW_SYNC);
1308 } else {
1309 blk_clear_rl_full(rl, BLK_RW_SYNC);
1310 wake_up(&rl->wait[BLK_RW_SYNC]);
1311 }
1312
1313 if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) {
1314 blk_set_rl_full(rl, BLK_RW_ASYNC);
1315 } else {
1316 blk_clear_rl_full(rl, BLK_RW_ASYNC);
1317 wake_up(&rl->wait[BLK_RW_ASYNC]);
1318 }
1319 }
1320
1321 spin_unlock_irq(q->queue_lock);
1322 return 0;
1323}
1324
da8303c6 1325/**
a06e05e6 1326 * __get_request - get a free request
5b788ce3 1327 * @rl: request list to allocate from
ef295ecf 1328 * @op: operation and flags
da8303c6 1329 * @bio: bio to allocate request for (can be %NULL)
6a15674d 1330 * @flags: BLQ_MQ_REQ_* flags
4accf5fc 1331 * @gfp_mask: allocator flags
da8303c6
TH
1332 *
1333 * Get a free request from @q. This function may fail under memory
1334 * pressure or if @q is dead.
1335 *
da3dae54 1336 * Must be called with @q->queue_lock held and,
a492f075
JL
1337 * Returns ERR_PTR on failure, with @q->queue_lock held.
1338 * Returns request pointer on success, with @q->queue_lock *not held*.
1da177e4 1339 */
ef295ecf 1340static struct request *__get_request(struct request_list *rl, unsigned int op,
4accf5fc 1341 struct bio *bio, blk_mq_req_flags_t flags, gfp_t gfp_mask)
1da177e4 1342{
5b788ce3 1343 struct request_queue *q = rl->q;
b679281a 1344 struct request *rq;
7f4b35d1
TH
1345 struct elevator_type *et = q->elevator->type;
1346 struct io_context *ioc = rq_ioc(bio);
f1f8cc94 1347 struct io_cq *icq = NULL;
ef295ecf 1348 const bool is_sync = op_is_sync(op);
75eb6c37 1349 int may_queue;
e8064021 1350 req_flags_t rq_flags = RQF_ALLOCED;
88ee5ef1 1351
2fff8a92
BVA
1352 lockdep_assert_held(q->queue_lock);
1353
3f3299d5 1354 if (unlikely(blk_queue_dying(q)))
a492f075 1355 return ERR_PTR(-ENODEV);
da8303c6 1356
ef295ecf 1357 may_queue = elv_may_queue(q, op);
88ee5ef1
JA
1358 if (may_queue == ELV_MQUEUE_NO)
1359 goto rq_starved;
1360
1faa16d2
JA
1361 if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) {
1362 if (rl->count[is_sync]+1 >= q->nr_requests) {
88ee5ef1
JA
1363 /*
1364 * The queue will fill after this allocation, so set
1365 * it as full, and mark this process as "batching".
1366 * This process will be allowed to complete a batch of
1367 * requests, others will be blocked.
1368 */
5b788ce3 1369 if (!blk_rl_full(rl, is_sync)) {
88ee5ef1 1370 ioc_set_batching(q, ioc);
5b788ce3 1371 blk_set_rl_full(rl, is_sync);
88ee5ef1
JA
1372 } else {
1373 if (may_queue != ELV_MQUEUE_MUST
1374 && !ioc_batching(q, ioc)) {
1375 /*
1376 * The queue is full and the allocating
1377 * process is not a "batcher", and not
1378 * exempted by the IO scheduler
1379 */
a492f075 1380 return ERR_PTR(-ENOMEM);
88ee5ef1
JA
1381 }
1382 }
1da177e4 1383 }
d40f75a0 1384 blk_set_congested(rl, is_sync);
1da177e4
LT
1385 }
1386
082cf69e
JA
1387 /*
1388 * Only allow batching queuers to allocate up to 50% over the defined
1389 * limit of requests, otherwise we could have thousands of requests
1390 * allocated with any setting of ->nr_requests
1391 */
1faa16d2 1392 if (rl->count[is_sync] >= (3 * q->nr_requests / 2))
a492f075 1393 return ERR_PTR(-ENOMEM);
fd782a4a 1394
8a5ecdd4 1395 q->nr_rqs[is_sync]++;
1faa16d2
JA
1396 rl->count[is_sync]++;
1397 rl->starved[is_sync] = 0;
cb98fc8b 1398
f1f8cc94
TH
1399 /*
1400 * Decide whether the new request will be managed by elevator. If
e8064021 1401 * so, mark @rq_flags and increment elvpriv. Non-zero elvpriv will
f1f8cc94
TH
1402 * prevent the current elevator from being destroyed until the new
1403 * request is freed. This guarantees icq's won't be destroyed and
1404 * makes creating new ones safe.
1405 *
e6f7f93d
CH
1406 * Flush requests do not use the elevator so skip initialization.
1407 * This allows a request to share the flush and elevator data.
1408 *
f1f8cc94
TH
1409 * Also, lookup icq while holding queue_lock. If it doesn't exist,
1410 * it will be created after releasing queue_lock.
1411 */
e6f7f93d 1412 if (!op_is_flush(op) && !blk_queue_bypass(q)) {
e8064021 1413 rq_flags |= RQF_ELVPRIV;
8a5ecdd4 1414 q->nr_rqs_elvpriv++;
f1f8cc94
TH
1415 if (et->icq_cache && ioc)
1416 icq = ioc_lookup_icq(ioc, q);
9d5a4e94 1417 }
cb98fc8b 1418
f253b86b 1419 if (blk_queue_io_stat(q))
e8064021 1420 rq_flags |= RQF_IO_STAT;
1da177e4
LT
1421 spin_unlock_irq(q->queue_lock);
1422
29e2b09a 1423 /* allocate and init request */
5b788ce3 1424 rq = mempool_alloc(rl->rq_pool, gfp_mask);
29e2b09a 1425 if (!rq)
b679281a 1426 goto fail_alloc;
1da177e4 1427
29e2b09a 1428 blk_rq_init(q, rq);
a051661c 1429 blk_rq_set_rl(rq, rl);
ef295ecf 1430 rq->cmd_flags = op;
e8064021 1431 rq->rq_flags = rq_flags;
1b6d65a0
BVA
1432 if (flags & BLK_MQ_REQ_PREEMPT)
1433 rq->rq_flags |= RQF_PREEMPT;
29e2b09a 1434
aaf7c680 1435 /* init elvpriv */
e8064021 1436 if (rq_flags & RQF_ELVPRIV) {
aaf7c680 1437 if (unlikely(et->icq_cache && !icq)) {
7f4b35d1
TH
1438 if (ioc)
1439 icq = ioc_create_icq(ioc, q, gfp_mask);
aaf7c680
TH
1440 if (!icq)
1441 goto fail_elvpriv;
29e2b09a 1442 }
aaf7c680
TH
1443
1444 rq->elv.icq = icq;
1445 if (unlikely(elv_set_request(q, rq, bio, gfp_mask)))
1446 goto fail_elvpriv;
1447
1448 /* @rq->elv.icq holds io_context until @rq is freed */
29e2b09a
TH
1449 if (icq)
1450 get_io_context(icq->ioc);
1451 }
aaf7c680 1452out:
88ee5ef1
JA
1453 /*
1454 * ioc may be NULL here, and ioc_batching will be false. That's
1455 * OK, if the queue is under the request limit then requests need
1456 * not count toward the nr_batch_requests limit. There will always
1457 * be some limit enforced by BLK_BATCH_TIME.
1458 */
1da177e4
LT
1459 if (ioc_batching(q, ioc))
1460 ioc->nr_batch_requests--;
6728cb0e 1461
e6a40b09 1462 trace_block_getrq(q, bio, op);
1da177e4 1463 return rq;
b679281a 1464
aaf7c680
TH
1465fail_elvpriv:
1466 /*
1467 * elvpriv init failed. ioc, icq and elvpriv aren't mempool backed
1468 * and may fail indefinitely under memory pressure and thus
1469 * shouldn't stall IO. Treat this request as !elvpriv. This will
1470 * disturb iosched and blkcg but weird is bettern than dead.
1471 */
7b2b10e0 1472 printk_ratelimited(KERN_WARNING "%s: dev %s: request aux data allocation failed, iosched may be disturbed\n",
dc3b17cc 1473 __func__, dev_name(q->backing_dev_info->dev));
aaf7c680 1474
e8064021 1475 rq->rq_flags &= ~RQF_ELVPRIV;
aaf7c680
TH
1476 rq->elv.icq = NULL;
1477
1478 spin_lock_irq(q->queue_lock);
8a5ecdd4 1479 q->nr_rqs_elvpriv--;
aaf7c680
TH
1480 spin_unlock_irq(q->queue_lock);
1481 goto out;
1482
b679281a
TH
1483fail_alloc:
1484 /*
1485 * Allocation failed presumably due to memory. Undo anything we
1486 * might have messed up.
1487 *
1488 * Allocating task should really be put onto the front of the wait
1489 * queue, but this is pretty rare.
1490 */
1491 spin_lock_irq(q->queue_lock);
e8064021 1492 freed_request(rl, is_sync, rq_flags);
b679281a
TH
1493
1494 /*
1495 * in the very unlikely event that allocation failed and no
1496 * requests for this direction was pending, mark us starved so that
1497 * freeing of a request in the other direction will notice
1498 * us. another possible fix would be to split the rq mempool into
1499 * READ and WRITE
1500 */
1501rq_starved:
1502 if (unlikely(rl->count[is_sync] == 0))
1503 rl->starved[is_sync] = 1;
a492f075 1504 return ERR_PTR(-ENOMEM);
1da177e4
LT
1505}
1506
da8303c6 1507/**
a06e05e6 1508 * get_request - get a free request
da8303c6 1509 * @q: request_queue to allocate request from
ef295ecf 1510 * @op: operation and flags
da8303c6 1511 * @bio: bio to allocate request for (can be %NULL)
6a15674d 1512 * @flags: BLK_MQ_REQ_* flags.
4accf5fc 1513 * @gfp: allocator flags
da8303c6 1514 *
a9a14d36 1515 * Get a free request from @q. If %BLK_MQ_REQ_NOWAIT is set in @flags,
d0164adc 1516 * this function keeps retrying under memory pressure and fails iff @q is dead.
d6344532 1517 *
da3dae54 1518 * Must be called with @q->queue_lock held and,
a492f075
JL
1519 * Returns ERR_PTR on failure, with @q->queue_lock held.
1520 * Returns request pointer on success, with @q->queue_lock *not held*.
1da177e4 1521 */
ef295ecf 1522static struct request *get_request(struct request_queue *q, unsigned int op,
4accf5fc 1523 struct bio *bio, blk_mq_req_flags_t flags, gfp_t gfp)
1da177e4 1524{
ef295ecf 1525 const bool is_sync = op_is_sync(op);
a06e05e6 1526 DEFINE_WAIT(wait);
a051661c 1527 struct request_list *rl;
1da177e4 1528 struct request *rq;
a051661c 1529
2fff8a92 1530 lockdep_assert_held(q->queue_lock);
332ebbf7 1531 WARN_ON_ONCE(q->mq_ops);
2fff8a92 1532
a051661c 1533 rl = blk_get_rl(q, bio); /* transferred to @rq on success */
a06e05e6 1534retry:
4accf5fc 1535 rq = __get_request(rl, op, bio, flags, gfp);
a492f075 1536 if (!IS_ERR(rq))
a06e05e6 1537 return rq;
1da177e4 1538
03a07c92
GR
1539 if (op & REQ_NOWAIT) {
1540 blk_put_rl(rl);
1541 return ERR_PTR(-EAGAIN);
1542 }
1543
6a15674d 1544 if ((flags & BLK_MQ_REQ_NOWAIT) || unlikely(blk_queue_dying(q))) {
a051661c 1545 blk_put_rl(rl);
a492f075 1546 return rq;
a051661c 1547 }
1da177e4 1548
a06e05e6
TH
1549 /* wait on @rl and retry */
1550 prepare_to_wait_exclusive(&rl->wait[is_sync], &wait,
1551 TASK_UNINTERRUPTIBLE);
1da177e4 1552
e6a40b09 1553 trace_block_sleeprq(q, bio, op);
1da177e4 1554
a06e05e6
TH
1555 spin_unlock_irq(q->queue_lock);
1556 io_schedule();
d6344532 1557
a06e05e6
TH
1558 /*
1559 * After sleeping, we become a "batching" process and will be able
1560 * to allocate at least one request, and up to a big batch of them
1561 * for a small period time. See ioc_batching, ioc_set_batching
1562 */
a06e05e6 1563 ioc_set_batching(q, current->io_context);
05caf8db 1564
a06e05e6
TH
1565 spin_lock_irq(q->queue_lock);
1566 finish_wait(&rl->wait[is_sync], &wait);
1da177e4 1567
a06e05e6 1568 goto retry;
1da177e4
LT
1569}
1570
6a15674d 1571/* flags: BLK_MQ_REQ_PREEMPT and/or BLK_MQ_REQ_NOWAIT. */
cd6ce148 1572static struct request *blk_old_get_request(struct request_queue *q,
9a95e4ef 1573 unsigned int op, blk_mq_req_flags_t flags)
1da177e4
LT
1574{
1575 struct request *rq;
c3036021 1576 gfp_t gfp_mask = flags & BLK_MQ_REQ_NOWAIT ? GFP_ATOMIC : GFP_NOIO;
055f6e18 1577 int ret = 0;
1da177e4 1578
332ebbf7
BVA
1579 WARN_ON_ONCE(q->mq_ops);
1580
7f4b35d1
TH
1581 /* create ioc upfront */
1582 create_io_context(gfp_mask, q->node);
1583
3a0a5299 1584 ret = blk_queue_enter(q, flags);
055f6e18
ML
1585 if (ret)
1586 return ERR_PTR(ret);
d6344532 1587 spin_lock_irq(q->queue_lock);
4accf5fc 1588 rq = get_request(q, op, NULL, flags, gfp_mask);
0c4de0f3 1589 if (IS_ERR(rq)) {
da8303c6 1590 spin_unlock_irq(q->queue_lock);
055f6e18 1591 blk_queue_exit(q);
0c4de0f3
CH
1592 return rq;
1593 }
1da177e4 1594
0c4de0f3
CH
1595 /* q->queue_lock is unlocked at this point */
1596 rq->__data_len = 0;
1597 rq->__sector = (sector_t) -1;
1598 rq->bio = rq->biotail = NULL;
1da177e4
LT
1599 return rq;
1600}
320ae51f 1601
6a15674d 1602/**
ff005a06 1603 * blk_get_request - allocate a request
6a15674d
BVA
1604 * @q: request queue to allocate a request for
1605 * @op: operation (REQ_OP_*) and REQ_* flags, e.g. REQ_SYNC.
1606 * @flags: BLK_MQ_REQ_* flags, e.g. BLK_MQ_REQ_NOWAIT.
1607 */
ff005a06
CH
1608struct request *blk_get_request(struct request_queue *q, unsigned int op,
1609 blk_mq_req_flags_t flags)
320ae51f 1610{
d280bab3
BVA
1611 struct request *req;
1612
6a15674d 1613 WARN_ON_ONCE(op & REQ_NOWAIT);
1b6d65a0 1614 WARN_ON_ONCE(flags & ~(BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_PREEMPT));
6a15674d 1615
d280bab3 1616 if (q->mq_ops) {
6a15674d 1617 req = blk_mq_alloc_request(q, op, flags);
d280bab3
BVA
1618 if (!IS_ERR(req) && q->mq_ops->initialize_rq_fn)
1619 q->mq_ops->initialize_rq_fn(req);
1620 } else {
6a15674d 1621 req = blk_old_get_request(q, op, flags);
d280bab3
BVA
1622 if (!IS_ERR(req) && q->initialize_rq_fn)
1623 q->initialize_rq_fn(req);
1624 }
1625
1626 return req;
320ae51f 1627}
1da177e4
LT
1628EXPORT_SYMBOL(blk_get_request);
1629
1630/**
1631 * blk_requeue_request - put a request back on queue
1632 * @q: request queue where request should be inserted
1633 * @rq: request to be inserted
1634 *
1635 * Description:
1636 * Drivers often keep queueing requests until the hardware cannot accept
1637 * more, when that condition happens we need to put the request back
1638 * on the queue. Must be called with queue lock held.
1639 */
165125e1 1640void blk_requeue_request(struct request_queue *q, struct request *rq)
1da177e4 1641{
2fff8a92 1642 lockdep_assert_held(q->queue_lock);
332ebbf7 1643 WARN_ON_ONCE(q->mq_ops);
2fff8a92 1644
242f9dcb
JA
1645 blk_delete_timer(rq);
1646 blk_clear_rq_complete(rq);
5f3ea37c 1647 trace_block_rq_requeue(q, rq);
a7905043 1648 rq_qos_requeue(q, rq);
2056a782 1649
e8064021 1650 if (rq->rq_flags & RQF_QUEUED)
1da177e4
LT
1651 blk_queue_end_tag(q, rq);
1652
ba396a6c
JB
1653 BUG_ON(blk_queued_rq(rq));
1654
1da177e4
LT
1655 elv_requeue_request(q, rq);
1656}
1da177e4
LT
1657EXPORT_SYMBOL(blk_requeue_request);
1658
73c10101
JA
1659static void add_acct_request(struct request_queue *q, struct request *rq,
1660 int where)
1661{
320ae51f 1662 blk_account_io_start(rq, true);
7eaceacc 1663 __elv_add_request(q, rq, where);
73c10101
JA
1664}
1665
d62e26b3 1666static void part_round_stats_single(struct request_queue *q, int cpu,
b8d62b3a
JA
1667 struct hd_struct *part, unsigned long now,
1668 unsigned int inflight)
074a7aca 1669{
b8d62b3a 1670 if (inflight) {
074a7aca 1671 __part_stat_add(cpu, part, time_in_queue,
b8d62b3a 1672 inflight * (now - part->stamp));
074a7aca
TH
1673 __part_stat_add(cpu, part, io_ticks, (now - part->stamp));
1674 }
1675 part->stamp = now;
1676}
1677
1678/**
496aa8a9 1679 * part_round_stats() - Round off the performance stats on a struct disk_stats.
d62e26b3 1680 * @q: target block queue
496aa8a9
RD
1681 * @cpu: cpu number for stats access
1682 * @part: target partition
1da177e4
LT
1683 *
1684 * The average IO queue length and utilisation statistics are maintained
1685 * by observing the current state of the queue length and the amount of
1686 * time it has been in this state for.
1687 *
1688 * Normally, that accounting is done on IO completion, but that can result
1689 * in more than a second's worth of IO being accounted for within any one
1690 * second, leading to >100% utilisation. To deal with that, we call this
1691 * function to do a round-off before returning the results when reading
1692 * /proc/diskstats. This accounts immediately for all queue usage up to
1693 * the current jiffies and restarts the counters again.
1694 */
d62e26b3 1695void part_round_stats(struct request_queue *q, int cpu, struct hd_struct *part)
6f2576af 1696{
b8d62b3a 1697 struct hd_struct *part2 = NULL;
6f2576af 1698 unsigned long now = jiffies;
b8d62b3a
JA
1699 unsigned int inflight[2];
1700 int stats = 0;
1701
1702 if (part->stamp != now)
1703 stats |= 1;
1704
1705 if (part->partno) {
1706 part2 = &part_to_disk(part)->part0;
1707 if (part2->stamp != now)
1708 stats |= 2;
1709 }
1710
1711 if (!stats)
1712 return;
1713
1714 part_in_flight(q, part, inflight);
6f2576af 1715
b8d62b3a
JA
1716 if (stats & 2)
1717 part_round_stats_single(q, cpu, part2, now, inflight[1]);
1718 if (stats & 1)
1719 part_round_stats_single(q, cpu, part, now, inflight[0]);
6f2576af 1720}
074a7aca 1721EXPORT_SYMBOL_GPL(part_round_stats);
6f2576af 1722
47fafbc7 1723#ifdef CONFIG_PM
c8158819
LM
1724static void blk_pm_put_request(struct request *rq)
1725{
e8064021 1726 if (rq->q->dev && !(rq->rq_flags & RQF_PM) && !--rq->q->nr_pending)
c8158819
LM
1727 pm_runtime_mark_last_busy(rq->q->dev);
1728}
1729#else
1730static inline void blk_pm_put_request(struct request *rq) {}
1731#endif
1732
165125e1 1733void __blk_put_request(struct request_queue *q, struct request *req)
1da177e4 1734{
e8064021
CH
1735 req_flags_t rq_flags = req->rq_flags;
1736
1da177e4
LT
1737 if (unlikely(!q))
1738 return;
1da177e4 1739
6f5ba581
CH
1740 if (q->mq_ops) {
1741 blk_mq_free_request(req);
1742 return;
1743 }
1744
2fff8a92
BVA
1745 lockdep_assert_held(q->queue_lock);
1746
6cc77e9c 1747 blk_req_zone_write_unlock(req);
c8158819
LM
1748 blk_pm_put_request(req);
1749
8922e16c
TH
1750 elv_completed_request(q, req);
1751
1cd96c24
BH
1752 /* this is a bio leak */
1753 WARN_ON(req->bio != NULL);
1754
a7905043 1755 rq_qos_done(q, req);
87760e5e 1756
1da177e4
LT
1757 /*
1758 * Request may not have originated from ll_rw_blk. if not,
1759 * it didn't come out of our reserved rq pools
1760 */
e8064021 1761 if (rq_flags & RQF_ALLOCED) {
a051661c 1762 struct request_list *rl = blk_rq_rl(req);
ef295ecf 1763 bool sync = op_is_sync(req->cmd_flags);
1da177e4 1764
1da177e4 1765 BUG_ON(!list_empty(&req->queuelist));
360f92c2 1766 BUG_ON(ELV_ON_HASH(req));
1da177e4 1767
a051661c 1768 blk_free_request(rl, req);
e8064021 1769 freed_request(rl, sync, rq_flags);
a051661c 1770 blk_put_rl(rl);
055f6e18 1771 blk_queue_exit(q);
1da177e4
LT
1772 }
1773}
6e39b69e
MC
1774EXPORT_SYMBOL_GPL(__blk_put_request);
1775
1da177e4
LT
1776void blk_put_request(struct request *req)
1777{
165125e1 1778 struct request_queue *q = req->q;
8922e16c 1779
320ae51f
JA
1780 if (q->mq_ops)
1781 blk_mq_free_request(req);
1782 else {
1783 unsigned long flags;
1784
1785 spin_lock_irqsave(q->queue_lock, flags);
1786 __blk_put_request(q, req);
1787 spin_unlock_irqrestore(q->queue_lock, flags);
1788 }
1da177e4 1789}
1da177e4
LT
1790EXPORT_SYMBOL(blk_put_request);
1791
320ae51f
JA
1792bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
1793 struct bio *bio)
73c10101 1794{
1eff9d32 1795 const int ff = bio->bi_opf & REQ_FAILFAST_MASK;
73c10101 1796
73c10101
JA
1797 if (!ll_back_merge_fn(q, req, bio))
1798 return false;
1799
8c1cf6bb 1800 trace_block_bio_backmerge(q, req, bio);
73c10101
JA
1801
1802 if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
1803 blk_rq_set_mixed_merge(req);
1804
1805 req->biotail->bi_next = bio;
1806 req->biotail = bio;
4f024f37 1807 req->__data_len += bio->bi_iter.bi_size;
73c10101
JA
1808 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
1809
320ae51f 1810 blk_account_io_start(req, false);
73c10101
JA
1811 return true;
1812}
1813
320ae51f
JA
1814bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
1815 struct bio *bio)
73c10101 1816{
1eff9d32 1817 const int ff = bio->bi_opf & REQ_FAILFAST_MASK;
73c10101 1818
73c10101
JA
1819 if (!ll_front_merge_fn(q, req, bio))
1820 return false;
1821
8c1cf6bb 1822 trace_block_bio_frontmerge(q, req, bio);
73c10101
JA
1823
1824 if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
1825 blk_rq_set_mixed_merge(req);
1826
73c10101
JA
1827 bio->bi_next = req->bio;
1828 req->bio = bio;
1829
4f024f37
KO
1830 req->__sector = bio->bi_iter.bi_sector;
1831 req->__data_len += bio->bi_iter.bi_size;
73c10101
JA
1832 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
1833
320ae51f 1834 blk_account_io_start(req, false);
73c10101
JA
1835 return true;
1836}
1837
1e739730
CH
1838bool bio_attempt_discard_merge(struct request_queue *q, struct request *req,
1839 struct bio *bio)
1840{
1841 unsigned short segments = blk_rq_nr_discard_segments(req);
1842
1843 if (segments >= queue_max_discard_segments(q))
1844 goto no_merge;
1845 if (blk_rq_sectors(req) + bio_sectors(bio) >
1846 blk_rq_get_max_sectors(req, blk_rq_pos(req)))
1847 goto no_merge;
1848
1849 req->biotail->bi_next = bio;
1850 req->biotail = bio;
1851 req->__data_len += bio->bi_iter.bi_size;
1852 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
1853 req->nr_phys_segments = segments + 1;
1854
1855 blk_account_io_start(req, false);
1856 return true;
1857no_merge:
1858 req_set_nomerge(q, req);
1859 return false;
1860}
1861
bd87b589 1862/**
320ae51f 1863 * blk_attempt_plug_merge - try to merge with %current's plugged list
bd87b589
TH
1864 * @q: request_queue new bio is being queued at
1865 * @bio: new bio being queued
1866 * @request_count: out parameter for number of traversed plugged requests
ccc2600b
RD
1867 * @same_queue_rq: pointer to &struct request that gets filled in when
1868 * another request associated with @q is found on the plug list
1869 * (optional, may be %NULL)
bd87b589
TH
1870 *
1871 * Determine whether @bio being queued on @q can be merged with a request
1872 * on %current's plugged list. Returns %true if merge was successful,
1873 * otherwise %false.
1874 *
07c2bd37
TH
1875 * Plugging coalesces IOs from the same issuer for the same purpose without
1876 * going through @q->queue_lock. As such it's more of an issuing mechanism
1877 * than scheduling, and the request, while may have elvpriv data, is not
1878 * added on the elevator at this point. In addition, we don't have
1879 * reliable access to the elevator outside queue lock. Only check basic
1880 * merging parameters without querying the elevator.
da41a589
RE
1881 *
1882 * Caller must ensure !blk_queue_nomerges(q) beforehand.
73c10101 1883 */
320ae51f 1884bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
5b3f341f
SL
1885 unsigned int *request_count,
1886 struct request **same_queue_rq)
73c10101
JA
1887{
1888 struct blk_plug *plug;
1889 struct request *rq;
92f399c7 1890 struct list_head *plug_list;
73c10101 1891
bd87b589 1892 plug = current->plug;
73c10101 1893 if (!plug)
34fe7c05 1894 return false;
56ebdaf2 1895 *request_count = 0;
73c10101 1896
92f399c7
SL
1897 if (q->mq_ops)
1898 plug_list = &plug->mq_list;
1899 else
1900 plug_list = &plug->list;
1901
1902 list_for_each_entry_reverse(rq, plug_list, queuelist) {
34fe7c05 1903 bool merged = false;
73c10101 1904
5b3f341f 1905 if (rq->q == q) {
1b2e19f1 1906 (*request_count)++;
5b3f341f
SL
1907 /*
1908 * Only blk-mq multiple hardware queues case checks the
1909 * rq in the same queue, there should be only one such
1910 * rq in a queue
1911 **/
1912 if (same_queue_rq)
1913 *same_queue_rq = rq;
1914 }
56ebdaf2 1915
07c2bd37 1916 if (rq->q != q || !blk_rq_merge_ok(rq, bio))
73c10101
JA
1917 continue;
1918
34fe7c05
CH
1919 switch (blk_try_merge(rq, bio)) {
1920 case ELEVATOR_BACK_MERGE:
1921 merged = bio_attempt_back_merge(q, rq, bio);
1922 break;
1923 case ELEVATOR_FRONT_MERGE:
1924 merged = bio_attempt_front_merge(q, rq, bio);
1925 break;
1e739730
CH
1926 case ELEVATOR_DISCARD_MERGE:
1927 merged = bio_attempt_discard_merge(q, rq, bio);
1928 break;
34fe7c05
CH
1929 default:
1930 break;
73c10101 1931 }
34fe7c05
CH
1932
1933 if (merged)
1934 return true;
73c10101 1935 }
34fe7c05
CH
1936
1937 return false;
73c10101
JA
1938}
1939
0809e3ac
JM
1940unsigned int blk_plug_queued_count(struct request_queue *q)
1941{
1942 struct blk_plug *plug;
1943 struct request *rq;
1944 struct list_head *plug_list;
1945 unsigned int ret = 0;
1946
1947 plug = current->plug;
1948 if (!plug)
1949 goto out;
1950
1951 if (q->mq_ops)
1952 plug_list = &plug->mq_list;
1953 else
1954 plug_list = &plug->list;
1955
1956 list_for_each_entry(rq, plug_list, queuelist) {
1957 if (rq->q == q)
1958 ret++;
1959 }
1960out:
1961 return ret;
1962}
1963
da8d7f07 1964void blk_init_request_from_bio(struct request *req, struct bio *bio)
52d9e675 1965{
0be0dee6
BVA
1966 struct io_context *ioc = rq_ioc(bio);
1967
1eff9d32 1968 if (bio->bi_opf & REQ_RAHEAD)
a82afdfc 1969 req->cmd_flags |= REQ_FAILFAST_MASK;
b31dc66a 1970
4f024f37 1971 req->__sector = bio->bi_iter.bi_sector;
5dc8b362
AM
1972 if (ioprio_valid(bio_prio(bio)))
1973 req->ioprio = bio_prio(bio);
0be0dee6
BVA
1974 else if (ioc)
1975 req->ioprio = ioc->ioprio;
1976 else
1977 req->ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0);
cb6934f8 1978 req->write_hint = bio->bi_write_hint;
bc1c56fd 1979 blk_rq_bio_prep(req->q, req, bio);
52d9e675 1980}
da8d7f07 1981EXPORT_SYMBOL_GPL(blk_init_request_from_bio);
52d9e675 1982
dece1635 1983static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
1da177e4 1984{
73c10101 1985 struct blk_plug *plug;
34fe7c05 1986 int where = ELEVATOR_INSERT_SORT;
e4d750c9 1987 struct request *req, *free;
56ebdaf2 1988 unsigned int request_count = 0;
1da177e4 1989
1da177e4
LT
1990 /*
1991 * low level driver can indicate that it wants pages above a
1992 * certain limit bounced to low memory (ie for highmem, or even
1993 * ISA dma in theory)
1994 */
1995 blk_queue_bounce(q, &bio);
1996
af67c31f 1997 blk_queue_split(q, &bio);
23688bf4 1998
e23947bd 1999 if (!bio_integrity_prep(bio))
dece1635 2000 return BLK_QC_T_NONE;
ffecfd1a 2001
f73f44eb 2002 if (op_is_flush(bio->bi_opf)) {
73c10101 2003 spin_lock_irq(q->queue_lock);
ae1b1539 2004 where = ELEVATOR_INSERT_FLUSH;
28e7d184
TH
2005 goto get_rq;
2006 }
2007
73c10101
JA
2008 /*
2009 * Check if we can merge with the plugged list before grabbing
2010 * any locks.
2011 */
0809e3ac
JM
2012 if (!blk_queue_nomerges(q)) {
2013 if (blk_attempt_plug_merge(q, bio, &request_count, NULL))
dece1635 2014 return BLK_QC_T_NONE;
0809e3ac
JM
2015 } else
2016 request_count = blk_plug_queued_count(q);
1da177e4 2017
73c10101 2018 spin_lock_irq(q->queue_lock);
2056a782 2019
34fe7c05
CH
2020 switch (elv_merge(q, &req, bio)) {
2021 case ELEVATOR_BACK_MERGE:
2022 if (!bio_attempt_back_merge(q, req, bio))
2023 break;
2024 elv_bio_merged(q, req, bio);
2025 free = attempt_back_merge(q, req);
2026 if (free)
2027 __blk_put_request(q, free);
2028 else
2029 elv_merged_request(q, req, ELEVATOR_BACK_MERGE);
2030 goto out_unlock;
2031 case ELEVATOR_FRONT_MERGE:
2032 if (!bio_attempt_front_merge(q, req, bio))
2033 break;
2034 elv_bio_merged(q, req, bio);
2035 free = attempt_front_merge(q, req);
2036 if (free)
2037 __blk_put_request(q, free);
2038 else
2039 elv_merged_request(q, req, ELEVATOR_FRONT_MERGE);
2040 goto out_unlock;
2041 default:
2042 break;
1da177e4
LT
2043 }
2044
450991bc 2045get_rq:
c1c80384 2046 rq_qos_throttle(q, bio, q->queue_lock);
87760e5e 2047
1da177e4 2048 /*
450991bc 2049 * Grab a free request. This is might sleep but can not fail.
d6344532 2050 * Returns with the queue unlocked.
450991bc 2051 */
055f6e18 2052 blk_queue_enter_live(q);
c3036021 2053 req = get_request(q, bio->bi_opf, bio, 0, GFP_NOIO);
a492f075 2054 if (IS_ERR(req)) {
055f6e18 2055 blk_queue_exit(q);
c1c80384 2056 rq_qos_cleanup(q, bio);
4e4cbee9
CH
2057 if (PTR_ERR(req) == -ENOMEM)
2058 bio->bi_status = BLK_STS_RESOURCE;
2059 else
2060 bio->bi_status = BLK_STS_IOERR;
4246a0b6 2061 bio_endio(bio);
da8303c6
TH
2062 goto out_unlock;
2063 }
d6344532 2064
c1c80384 2065 rq_qos_track(q, req, bio);
87760e5e 2066
450991bc
NP
2067 /*
2068 * After dropping the lock and possibly sleeping here, our request
2069 * may now be mergeable after it had proven unmergeable (above).
2070 * We don't worry about that case for efficiency. It won't happen
2071 * often, and the elevators are able to handle it.
1da177e4 2072 */
da8d7f07 2073 blk_init_request_from_bio(req, bio);
1da177e4 2074
9562ad9a 2075 if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags))
11ccf116 2076 req->cpu = raw_smp_processor_id();
73c10101
JA
2077
2078 plug = current->plug;
721a9602 2079 if (plug) {
dc6d36c9
JA
2080 /*
2081 * If this is the first request added after a plug, fire
7aef2e78 2082 * of a plug trace.
0a6219a9
ML
2083 *
2084 * @request_count may become stale because of schedule
2085 * out, so check plug list again.
dc6d36c9 2086 */
0a6219a9 2087 if (!request_count || list_empty(&plug->list))
dc6d36c9 2088 trace_block_plug(q);
3540d5e8 2089 else {
50d24c34
SL
2090 struct request *last = list_entry_rq(plug->list.prev);
2091 if (request_count >= BLK_MAX_REQUEST_COUNT ||
2092 blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE) {
3540d5e8 2093 blk_flush_plug_list(plug, false);
019ceb7d
SL
2094 trace_block_plug(q);
2095 }
73c10101 2096 }
73c10101 2097 list_add_tail(&req->queuelist, &plug->list);
320ae51f 2098 blk_account_io_start(req, true);
73c10101
JA
2099 } else {
2100 spin_lock_irq(q->queue_lock);
2101 add_acct_request(q, req, where);
24ecfbe2 2102 __blk_run_queue(q);
73c10101
JA
2103out_unlock:
2104 spin_unlock_irq(q->queue_lock);
2105 }
dece1635
JA
2106
2107 return BLK_QC_T_NONE;
1da177e4
LT
2108}
2109
52c5e62d 2110static void handle_bad_sector(struct bio *bio, sector_t maxsector)
1da177e4
LT
2111{
2112 char b[BDEVNAME_SIZE];
2113
2114 printk(KERN_INFO "attempt to access beyond end of device\n");
6296b960 2115 printk(KERN_INFO "%s: rw=%d, want=%Lu, limit=%Lu\n",
74d46992 2116 bio_devname(bio, b), bio->bi_opf,
f73a1c7d 2117 (unsigned long long)bio_end_sector(bio),
52c5e62d 2118 (long long)maxsector);
1da177e4
LT
2119}
2120
c17bb495
AM
2121#ifdef CONFIG_FAIL_MAKE_REQUEST
2122
2123static DECLARE_FAULT_ATTR(fail_make_request);
2124
2125static int __init setup_fail_make_request(char *str)
2126{
2127 return setup_fault_attr(&fail_make_request, str);
2128}
2129__setup("fail_make_request=", setup_fail_make_request);
2130
b2c9cd37 2131static bool should_fail_request(struct hd_struct *part, unsigned int bytes)
c17bb495 2132{
b2c9cd37 2133 return part->make_it_fail && should_fail(&fail_make_request, bytes);
c17bb495
AM
2134}
2135
2136static int __init fail_make_request_debugfs(void)
2137{
dd48c085
AM
2138 struct dentry *dir = fault_create_debugfs_attr("fail_make_request",
2139 NULL, &fail_make_request);
2140
21f9fcd8 2141 return PTR_ERR_OR_ZERO(dir);
c17bb495
AM
2142}
2143
2144late_initcall(fail_make_request_debugfs);
2145
2146#else /* CONFIG_FAIL_MAKE_REQUEST */
2147
b2c9cd37
AM
2148static inline bool should_fail_request(struct hd_struct *part,
2149 unsigned int bytes)
c17bb495 2150{
b2c9cd37 2151 return false;
c17bb495
AM
2152}
2153
2154#endif /* CONFIG_FAIL_MAKE_REQUEST */
2155
721c7fc7
ID
2156static inline bool bio_check_ro(struct bio *bio, struct hd_struct *part)
2157{
2158 if (part->policy && op_is_write(bio_op(bio))) {
2159 char b[BDEVNAME_SIZE];
2160
2161 printk(KERN_ERR
2162 "generic_make_request: Trying to write "
2163 "to read-only block-device %s (partno %d)\n",
2164 bio_devname(bio, b), part->partno);
2165 return true;
2166 }
2167
2168 return false;
2169}
2170
30abb3a6
HM
2171static noinline int should_fail_bio(struct bio *bio)
2172{
2173 if (should_fail_request(&bio->bi_disk->part0, bio->bi_iter.bi_size))
2174 return -EIO;
2175 return 0;
2176}
2177ALLOW_ERROR_INJECTION(should_fail_bio, ERRNO);
2178
52c5e62d
CH
2179/*
2180 * Check whether this bio extends beyond the end of the device or partition.
2181 * This may well happen - the kernel calls bread() without checking the size of
2182 * the device, e.g., when mounting a file system.
2183 */
2184static inline int bio_check_eod(struct bio *bio, sector_t maxsector)
2185{
2186 unsigned int nr_sectors = bio_sectors(bio);
2187
2188 if (nr_sectors && maxsector &&
2189 (nr_sectors > maxsector ||
2190 bio->bi_iter.bi_sector > maxsector - nr_sectors)) {
2191 handle_bad_sector(bio, maxsector);
2192 return -EIO;
2193 }
2194 return 0;
2195}
2196
74d46992
CH
2197/*
2198 * Remap block n of partition p to block n+start(p) of the disk.
2199 */
2200static inline int blk_partition_remap(struct bio *bio)
2201{
2202 struct hd_struct *p;
52c5e62d 2203 int ret = -EIO;
74d46992 2204
721c7fc7
ID
2205 rcu_read_lock();
2206 p = __disk_get_part(bio->bi_disk, bio->bi_partno);
52c5e62d
CH
2207 if (unlikely(!p))
2208 goto out;
2209 if (unlikely(should_fail_request(p, bio->bi_iter.bi_size)))
2210 goto out;
2211 if (unlikely(bio_check_ro(bio, p)))
721c7fc7 2212 goto out;
721c7fc7 2213
74d46992
CH
2214 /*
2215 * Zone reset does not include bi_size so bio_sectors() is always 0.
2216 * Include a test for the reset op code and perform the remap if needed.
2217 */
52c5e62d
CH
2218 if (bio_sectors(bio) || bio_op(bio) == REQ_OP_ZONE_RESET) {
2219 if (bio_check_eod(bio, part_nr_sects_read(p)))
2220 goto out;
2221 bio->bi_iter.bi_sector += p->start_sect;
52c5e62d
CH
2222 trace_block_bio_remap(bio->bi_disk->queue, bio, part_devt(p),
2223 bio->bi_iter.bi_sector - p->start_sect);
2224 }
c04fa44b 2225 bio->bi_partno = 0;
52c5e62d 2226 ret = 0;
721c7fc7
ID
2227out:
2228 rcu_read_unlock();
74d46992
CH
2229 return ret;
2230}
2231
27a84d54
CH
2232static noinline_for_stack bool
2233generic_make_request_checks(struct bio *bio)
1da177e4 2234{
165125e1 2235 struct request_queue *q;
5a7bbad2 2236 int nr_sectors = bio_sectors(bio);
4e4cbee9 2237 blk_status_t status = BLK_STS_IOERR;
5a7bbad2 2238 char b[BDEVNAME_SIZE];
1da177e4
LT
2239
2240 might_sleep();
1da177e4 2241
74d46992 2242 q = bio->bi_disk->queue;
5a7bbad2
CH
2243 if (unlikely(!q)) {
2244 printk(KERN_ERR
2245 "generic_make_request: Trying to access "
2246 "nonexistent block-device %s (%Lu)\n",
74d46992 2247 bio_devname(bio, b), (long long)bio->bi_iter.bi_sector);
5a7bbad2
CH
2248 goto end_io;
2249 }
c17bb495 2250
03a07c92
GR
2251 /*
2252 * For a REQ_NOWAIT based request, return -EOPNOTSUPP
2253 * if queue is not a request based queue.
2254 */
03a07c92
GR
2255 if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_rq_based(q))
2256 goto not_supported;
2257
30abb3a6 2258 if (should_fail_bio(bio))
5a7bbad2 2259 goto end_io;
2056a782 2260
52c5e62d
CH
2261 if (bio->bi_partno) {
2262 if (unlikely(blk_partition_remap(bio)))
721c7fc7
ID
2263 goto end_io;
2264 } else {
52c5e62d
CH
2265 if (unlikely(bio_check_ro(bio, &bio->bi_disk->part0)))
2266 goto end_io;
2267 if (unlikely(bio_check_eod(bio, get_capacity(bio->bi_disk))))
721c7fc7
ID
2268 goto end_io;
2269 }
2056a782 2270
5a7bbad2
CH
2271 /*
2272 * Filter flush bio's early so that make_request based
2273 * drivers without flush support don't have to worry
2274 * about them.
2275 */
f3a8ab7d 2276 if (op_is_flush(bio->bi_opf) &&
c888a8f9 2277 !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
1eff9d32 2278 bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA);
5a7bbad2 2279 if (!nr_sectors) {
4e4cbee9 2280 status = BLK_STS_OK;
51fd77bd
JA
2281 goto end_io;
2282 }
5a7bbad2 2283 }
5ddfe969 2284
288dab8a
CH
2285 switch (bio_op(bio)) {
2286 case REQ_OP_DISCARD:
2287 if (!blk_queue_discard(q))
2288 goto not_supported;
2289 break;
2290 case REQ_OP_SECURE_ERASE:
2291 if (!blk_queue_secure_erase(q))
2292 goto not_supported;
2293 break;
2294 case REQ_OP_WRITE_SAME:
74d46992 2295 if (!q->limits.max_write_same_sectors)
288dab8a 2296 goto not_supported;
58886785 2297 break;
2d253440
ST
2298 case REQ_OP_ZONE_REPORT:
2299 case REQ_OP_ZONE_RESET:
74d46992 2300 if (!blk_queue_is_zoned(q))
2d253440 2301 goto not_supported;
288dab8a 2302 break;
a6f0788e 2303 case REQ_OP_WRITE_ZEROES:
74d46992 2304 if (!q->limits.max_write_zeroes_sectors)
a6f0788e
CK
2305 goto not_supported;
2306 break;
288dab8a
CH
2307 default:
2308 break;
5a7bbad2 2309 }
01edede4 2310
7f4b35d1
TH
2311 /*
2312 * Various block parts want %current->io_context and lazy ioc
2313 * allocation ends up trading a lot of pain for a small amount of
2314 * memory. Just allocate it upfront. This may fail and block
2315 * layer knows how to live with it.
2316 */
2317 create_io_context(GFP_ATOMIC, q->node);
2318
ae118896
TH
2319 if (!blkcg_bio_issue_check(q, bio))
2320 return false;
27a84d54 2321
fbbaf700
N
2322 if (!bio_flagged(bio, BIO_TRACE_COMPLETION)) {
2323 trace_block_bio_queue(q, bio);
2324 /* Now that enqueuing has been traced, we need to trace
2325 * completion as well.
2326 */
2327 bio_set_flag(bio, BIO_TRACE_COMPLETION);
2328 }
27a84d54 2329 return true;
a7384677 2330
288dab8a 2331not_supported:
4e4cbee9 2332 status = BLK_STS_NOTSUPP;
a7384677 2333end_io:
4e4cbee9 2334 bio->bi_status = status;
4246a0b6 2335 bio_endio(bio);
27a84d54 2336 return false;
1da177e4
LT
2337}
2338
27a84d54
CH
2339/**
2340 * generic_make_request - hand a buffer to its device driver for I/O
2341 * @bio: The bio describing the location in memory and on the device.
2342 *
2343 * generic_make_request() is used to make I/O requests of block
2344 * devices. It is passed a &struct bio, which describes the I/O that needs
2345 * to be done.
2346 *
2347 * generic_make_request() does not return any status. The
2348 * success/failure status of the request, along with notification of
2349 * completion, is delivered asynchronously through the bio->bi_end_io
2350 * function described (one day) else where.
2351 *
2352 * The caller of generic_make_request must make sure that bi_io_vec
2353 * are set to describe the memory buffer, and that bi_dev and bi_sector are
2354 * set to describe the device address, and the
2355 * bi_end_io and optionally bi_private are set to describe how
2356 * completion notification should be signaled.
2357 *
2358 * generic_make_request and the drivers it calls may use bi_next if this
2359 * bio happens to be merged with someone else, and may resubmit the bio to
2360 * a lower device by calling into generic_make_request recursively, which
2361 * means the bio should NOT be touched after the call to ->make_request_fn.
d89d8796 2362 */
dece1635 2363blk_qc_t generic_make_request(struct bio *bio)
d89d8796 2364{
f5fe1b51
N
2365 /*
2366 * bio_list_on_stack[0] contains bios submitted by the current
2367 * make_request_fn.
2368 * bio_list_on_stack[1] contains bios that were submitted before
2369 * the current make_request_fn, but that haven't been processed
2370 * yet.
2371 */
2372 struct bio_list bio_list_on_stack[2];
37f9579f
BVA
2373 blk_mq_req_flags_t flags = 0;
2374 struct request_queue *q = bio->bi_disk->queue;
dece1635 2375 blk_qc_t ret = BLK_QC_T_NONE;
bddd87c7 2376
37f9579f
BVA
2377 if (bio->bi_opf & REQ_NOWAIT)
2378 flags = BLK_MQ_REQ_NOWAIT;
cd4a4ae4
JA
2379 if (bio_flagged(bio, BIO_QUEUE_ENTERED))
2380 blk_queue_enter_live(q);
2381 else if (blk_queue_enter(q, flags) < 0) {
37f9579f
BVA
2382 if (!blk_queue_dying(q) && (bio->bi_opf & REQ_NOWAIT))
2383 bio_wouldblock_error(bio);
2384 else
2385 bio_io_error(bio);
2386 return ret;
2387 }
2388
27a84d54 2389 if (!generic_make_request_checks(bio))
dece1635 2390 goto out;
27a84d54
CH
2391
2392 /*
2393 * We only want one ->make_request_fn to be active at a time, else
2394 * stack usage with stacked devices could be a problem. So use
2395 * current->bio_list to keep a list of requests submited by a
2396 * make_request_fn function. current->bio_list is also used as a
2397 * flag to say if generic_make_request is currently active in this
2398 * task or not. If it is NULL, then no make_request is active. If
2399 * it is non-NULL, then a make_request is active, and new requests
2400 * should be added at the tail
2401 */
bddd87c7 2402 if (current->bio_list) {
f5fe1b51 2403 bio_list_add(&current->bio_list[0], bio);
dece1635 2404 goto out;
d89d8796 2405 }
27a84d54 2406
d89d8796
NB
2407 /* following loop may be a bit non-obvious, and so deserves some
2408 * explanation.
2409 * Before entering the loop, bio->bi_next is NULL (as all callers
2410 * ensure that) so we have a list with a single bio.
2411 * We pretend that we have just taken it off a longer list, so
bddd87c7
AM
2412 * we assign bio_list to a pointer to the bio_list_on_stack,
2413 * thus initialising the bio_list of new bios to be
27a84d54 2414 * added. ->make_request() may indeed add some more bios
d89d8796
NB
2415 * through a recursive call to generic_make_request. If it
2416 * did, we find a non-NULL value in bio_list and re-enter the loop
2417 * from the top. In this case we really did just take the bio
bddd87c7 2418 * of the top of the list (no pretending) and so remove it from
27a84d54 2419 * bio_list, and call into ->make_request() again.
d89d8796
NB
2420 */
2421 BUG_ON(bio->bi_next);
f5fe1b51
N
2422 bio_list_init(&bio_list_on_stack[0]);
2423 current->bio_list = bio_list_on_stack;
d89d8796 2424 do {
37f9579f
BVA
2425 bool enter_succeeded = true;
2426
2427 if (unlikely(q != bio->bi_disk->queue)) {
2428 if (q)
2429 blk_queue_exit(q);
2430 q = bio->bi_disk->queue;
2431 flags = 0;
2432 if (bio->bi_opf & REQ_NOWAIT)
2433 flags = BLK_MQ_REQ_NOWAIT;
2434 if (blk_queue_enter(q, flags) < 0) {
2435 enter_succeeded = false;
2436 q = NULL;
2437 }
2438 }
27a84d54 2439
37f9579f 2440 if (enter_succeeded) {
79bd9959
N
2441 struct bio_list lower, same;
2442
2443 /* Create a fresh bio_list for all subordinate requests */
f5fe1b51
N
2444 bio_list_on_stack[1] = bio_list_on_stack[0];
2445 bio_list_init(&bio_list_on_stack[0]);
dece1635 2446 ret = q->make_request_fn(q, bio);
3ef28e83 2447
79bd9959
N
2448 /* sort new bios into those for a lower level
2449 * and those for the same level
2450 */
2451 bio_list_init(&lower);
2452 bio_list_init(&same);
f5fe1b51 2453 while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL)
74d46992 2454 if (q == bio->bi_disk->queue)
79bd9959
N
2455 bio_list_add(&same, bio);
2456 else
2457 bio_list_add(&lower, bio);
2458 /* now assemble so we handle the lowest level first */
f5fe1b51
N
2459 bio_list_merge(&bio_list_on_stack[0], &lower);
2460 bio_list_merge(&bio_list_on_stack[0], &same);
2461 bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]);
3ef28e83 2462 } else {
03a07c92
GR
2463 if (unlikely(!blk_queue_dying(q) &&
2464 (bio->bi_opf & REQ_NOWAIT)))
2465 bio_wouldblock_error(bio);
2466 else
2467 bio_io_error(bio);
3ef28e83 2468 }
f5fe1b51 2469 bio = bio_list_pop(&bio_list_on_stack[0]);
d89d8796 2470 } while (bio);
bddd87c7 2471 current->bio_list = NULL; /* deactivate */
dece1635
JA
2472
2473out:
37f9579f
BVA
2474 if (q)
2475 blk_queue_exit(q);
dece1635 2476 return ret;
d89d8796 2477}
1da177e4
LT
2478EXPORT_SYMBOL(generic_make_request);
2479
f421e1d9
CH
2480/**
2481 * direct_make_request - hand a buffer directly to its device driver for I/O
2482 * @bio: The bio describing the location in memory and on the device.
2483 *
2484 * This function behaves like generic_make_request(), but does not protect
2485 * against recursion. Must only be used if the called driver is known
2486 * to not call generic_make_request (or direct_make_request) again from
2487 * its make_request function. (Calling direct_make_request again from
2488 * a workqueue is perfectly fine as that doesn't recurse).
2489 */
2490blk_qc_t direct_make_request(struct bio *bio)
2491{
2492 struct request_queue *q = bio->bi_disk->queue;
2493 bool nowait = bio->bi_opf & REQ_NOWAIT;
2494 blk_qc_t ret;
2495
2496 if (!generic_make_request_checks(bio))
2497 return BLK_QC_T_NONE;
2498
3a0a5299 2499 if (unlikely(blk_queue_enter(q, nowait ? BLK_MQ_REQ_NOWAIT : 0))) {
f421e1d9
CH
2500 if (nowait && !blk_queue_dying(q))
2501 bio->bi_status = BLK_STS_AGAIN;
2502 else
2503 bio->bi_status = BLK_STS_IOERR;
2504 bio_endio(bio);
2505 return BLK_QC_T_NONE;
2506 }
2507
2508 ret = q->make_request_fn(q, bio);
2509 blk_queue_exit(q);
2510 return ret;
2511}
2512EXPORT_SYMBOL_GPL(direct_make_request);
2513
1da177e4 2514/**
710027a4 2515 * submit_bio - submit a bio to the block device layer for I/O
1da177e4
LT
2516 * @bio: The &struct bio which describes the I/O
2517 *
2518 * submit_bio() is very similar in purpose to generic_make_request(), and
2519 * uses that function to do most of the work. Both are fairly rough
710027a4 2520 * interfaces; @bio must be presetup and ready for I/O.
1da177e4
LT
2521 *
2522 */
4e49ea4a 2523blk_qc_t submit_bio(struct bio *bio)
1da177e4 2524{
bf2de6f5
JA
2525 /*
2526 * If it's a regular read/write or a barrier with data attached,
2527 * go through the normal accounting stuff before submission.
2528 */
e2a60da7 2529 if (bio_has_data(bio)) {
4363ac7c
MP
2530 unsigned int count;
2531
95fe6c1a 2532 if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME))
7c5a0dcf 2533 count = queue_logical_block_size(bio->bi_disk->queue) >> 9;
4363ac7c
MP
2534 else
2535 count = bio_sectors(bio);
2536
a8ebb056 2537 if (op_is_write(bio_op(bio))) {
bf2de6f5
JA
2538 count_vm_events(PGPGOUT, count);
2539 } else {
4f024f37 2540 task_io_account_read(bio->bi_iter.bi_size);
bf2de6f5
JA
2541 count_vm_events(PGPGIN, count);
2542 }
2543
2544 if (unlikely(block_dump)) {
2545 char b[BDEVNAME_SIZE];
8dcbdc74 2546 printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n",
ba25f9dc 2547 current->comm, task_pid_nr(current),
a8ebb056 2548 op_is_write(bio_op(bio)) ? "WRITE" : "READ",
4f024f37 2549 (unsigned long long)bio->bi_iter.bi_sector,
74d46992 2550 bio_devname(bio, b), count);
bf2de6f5 2551 }
1da177e4
LT
2552 }
2553
dece1635 2554 return generic_make_request(bio);
1da177e4 2555}
1da177e4
LT
2556EXPORT_SYMBOL(submit_bio);
2557
ea435e1b
CH
2558bool blk_poll(struct request_queue *q, blk_qc_t cookie)
2559{
2560 if (!q->poll_fn || !blk_qc_t_valid(cookie))
2561 return false;
2562
2563 if (current->plug)
2564 blk_flush_plug_list(current->plug, false);
2565 return q->poll_fn(q, cookie);
2566}
2567EXPORT_SYMBOL_GPL(blk_poll);
2568
82124d60 2569/**
bf4e6b4e
HR
2570 * blk_cloned_rq_check_limits - Helper function to check a cloned request
2571 * for new the queue limits
82124d60
KU
2572 * @q: the queue
2573 * @rq: the request being checked
2574 *
2575 * Description:
2576 * @rq may have been made based on weaker limitations of upper-level queues
2577 * in request stacking drivers, and it may violate the limitation of @q.
2578 * Since the block layer and the underlying device driver trust @rq
2579 * after it is inserted to @q, it should be checked against @q before
2580 * the insertion using this generic function.
2581 *
82124d60 2582 * Request stacking drivers like request-based dm may change the queue
bf4e6b4e
HR
2583 * limits when retrying requests on other queues. Those requests need
2584 * to be checked against the new queue limits again during dispatch.
82124d60 2585 */
bf4e6b4e
HR
2586static int blk_cloned_rq_check_limits(struct request_queue *q,
2587 struct request *rq)
82124d60 2588{
8fe0d473 2589 if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, req_op(rq))) {
82124d60
KU
2590 printk(KERN_ERR "%s: over max size limit.\n", __func__);
2591 return -EIO;
2592 }
2593
2594 /*
2595 * queue's settings related to segment counting like q->bounce_pfn
2596 * may differ from that of other stacking queues.
2597 * Recalculate it to check the request correctly on this queue's
2598 * limitation.
2599 */
2600 blk_recalc_rq_segments(rq);
8a78362c 2601 if (rq->nr_phys_segments > queue_max_segments(q)) {
82124d60
KU
2602 printk(KERN_ERR "%s: over max segments limit.\n", __func__);
2603 return -EIO;
2604 }
2605
2606 return 0;
2607}
82124d60
KU
2608
2609/**
2610 * blk_insert_cloned_request - Helper for stacking drivers to submit a request
2611 * @q: the queue to submit the request
2612 * @rq: the request being queued
2613 */
2a842aca 2614blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq)
82124d60
KU
2615{
2616 unsigned long flags;
4853abaa 2617 int where = ELEVATOR_INSERT_BACK;
82124d60 2618
bf4e6b4e 2619 if (blk_cloned_rq_check_limits(q, rq))
2a842aca 2620 return BLK_STS_IOERR;
82124d60 2621
b2c9cd37
AM
2622 if (rq->rq_disk &&
2623 should_fail_request(&rq->rq_disk->part0, blk_rq_bytes(rq)))
2a842aca 2624 return BLK_STS_IOERR;
82124d60 2625
7fb4898e
KB
2626 if (q->mq_ops) {
2627 if (blk_queue_io_stat(q))
2628 blk_account_io_start(rq, true);
157f377b
JA
2629 /*
2630 * Since we have a scheduler attached on the top device,
2631 * bypass a potential scheduler on the bottom device for
2632 * insert.
2633 */
c77ff7fd 2634 return blk_mq_request_issue_directly(rq);
7fb4898e
KB
2635 }
2636
82124d60 2637 spin_lock_irqsave(q->queue_lock, flags);
3f3299d5 2638 if (unlikely(blk_queue_dying(q))) {
8ba61435 2639 spin_unlock_irqrestore(q->queue_lock, flags);
2a842aca 2640 return BLK_STS_IOERR;
8ba61435 2641 }
82124d60
KU
2642
2643 /*
2644 * Submitting request must be dequeued before calling this function
2645 * because it will be linked to another request_queue
2646 */
2647 BUG_ON(blk_queued_rq(rq));
2648
f73f44eb 2649 if (op_is_flush(rq->cmd_flags))
4853abaa
JM
2650 where = ELEVATOR_INSERT_FLUSH;
2651
2652 add_acct_request(q, rq, where);
e67b77c7
JM
2653 if (where == ELEVATOR_INSERT_FLUSH)
2654 __blk_run_queue(q);
82124d60
KU
2655 spin_unlock_irqrestore(q->queue_lock, flags);
2656
2a842aca 2657 return BLK_STS_OK;
82124d60
KU
2658}
2659EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
2660
80a761fd
TH
2661/**
2662 * blk_rq_err_bytes - determine number of bytes till the next failure boundary
2663 * @rq: request to examine
2664 *
2665 * Description:
2666 * A request could be merge of IOs which require different failure
2667 * handling. This function determines the number of bytes which
2668 * can be failed from the beginning of the request without
2669 * crossing into area which need to be retried further.
2670 *
2671 * Return:
2672 * The number of bytes to fail.
80a761fd
TH
2673 */
2674unsigned int blk_rq_err_bytes(const struct request *rq)
2675{
2676 unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
2677 unsigned int bytes = 0;
2678 struct bio *bio;
2679
e8064021 2680 if (!(rq->rq_flags & RQF_MIXED_MERGE))
80a761fd
TH
2681 return blk_rq_bytes(rq);
2682
2683 /*
2684 * Currently the only 'mixing' which can happen is between
2685 * different fastfail types. We can safely fail portions
2686 * which have all the failfast bits that the first one has -
2687 * the ones which are at least as eager to fail as the first
2688 * one.
2689 */
2690 for (bio = rq->bio; bio; bio = bio->bi_next) {
1eff9d32 2691 if ((bio->bi_opf & ff) != ff)
80a761fd 2692 break;
4f024f37 2693 bytes += bio->bi_iter.bi_size;
80a761fd
TH
2694 }
2695
2696 /* this could lead to infinite loop */
2697 BUG_ON(blk_rq_bytes(rq) && !bytes);
2698 return bytes;
2699}
2700EXPORT_SYMBOL_GPL(blk_rq_err_bytes);
2701
320ae51f 2702void blk_account_io_completion(struct request *req, unsigned int bytes)
bc58ba94 2703{
c2553b58 2704 if (blk_do_io_stat(req)) {
ddcf35d3 2705 const int sgrp = op_stat_group(req_op(req));
bc58ba94
JA
2706 struct hd_struct *part;
2707 int cpu;
2708
2709 cpu = part_stat_lock();
09e099d4 2710 part = req->part;
ddcf35d3 2711 part_stat_add(cpu, part, sectors[sgrp], bytes >> 9);
bc58ba94
JA
2712 part_stat_unlock();
2713 }
2714}
2715
522a7775 2716void blk_account_io_done(struct request *req, u64 now)
bc58ba94 2717{
bc58ba94 2718 /*
dd4c133f
TH
2719 * Account IO completion. flush_rq isn't accounted as a
2720 * normal IO on queueing nor completion. Accounting the
2721 * containing request is enough.
bc58ba94 2722 */
e8064021 2723 if (blk_do_io_stat(req) && !(req->rq_flags & RQF_FLUSH_SEQ)) {
522a7775 2724 unsigned long duration;
ddcf35d3 2725 const int sgrp = op_stat_group(req_op(req));
bc58ba94
JA
2726 struct hd_struct *part;
2727 int cpu;
2728
522a7775 2729 duration = nsecs_to_jiffies(now - req->start_time_ns);
bc58ba94 2730 cpu = part_stat_lock();
09e099d4 2731 part = req->part;
bc58ba94 2732
ddcf35d3
MC
2733 part_stat_inc(cpu, part, ios[sgrp]);
2734 part_stat_add(cpu, part, ticks[sgrp], duration);
d62e26b3 2735 part_round_stats(req->q, cpu, part);
ddcf35d3 2736 part_dec_in_flight(req->q, part, rq_data_dir(req));
bc58ba94 2737
6c23a968 2738 hd_struct_put(part);
bc58ba94
JA
2739 part_stat_unlock();
2740 }
2741}
2742
47fafbc7 2743#ifdef CONFIG_PM
c8158819
LM
2744/*
2745 * Don't process normal requests when queue is suspended
2746 * or in the process of suspending/resuming
2747 */
e4f36b24 2748static bool blk_pm_allow_request(struct request *rq)
c8158819 2749{
e4f36b24
CH
2750 switch (rq->q->rpm_status) {
2751 case RPM_RESUMING:
2752 case RPM_SUSPENDING:
2753 return rq->rq_flags & RQF_PM;
2754 case RPM_SUSPENDED:
2755 return false;
e9a83853
GU
2756 default:
2757 return true;
e4f36b24 2758 }
c8158819
LM
2759}
2760#else
e4f36b24 2761static bool blk_pm_allow_request(struct request *rq)
c8158819 2762{
e4f36b24 2763 return true;
c8158819
LM
2764}
2765#endif
2766
320ae51f
JA
2767void blk_account_io_start(struct request *rq, bool new_io)
2768{
2769 struct hd_struct *part;
2770 int rw = rq_data_dir(rq);
2771 int cpu;
2772
2773 if (!blk_do_io_stat(rq))
2774 return;
2775
2776 cpu = part_stat_lock();
2777
2778 if (!new_io) {
2779 part = rq->part;
2780 part_stat_inc(cpu, part, merges[rw]);
2781 } else {
2782 part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
2783 if (!hd_struct_try_get(part)) {
2784 /*
2785 * The partition is already being removed,
2786 * the request will be accounted on the disk only
2787 *
2788 * We take a reference on disk->part0 although that
2789 * partition will never be deleted, so we can treat
2790 * it as any other partition.
2791 */
2792 part = &rq->rq_disk->part0;
2793 hd_struct_get(part);
2794 }
d62e26b3
JA
2795 part_round_stats(rq->q, cpu, part);
2796 part_inc_in_flight(rq->q, part, rw);
320ae51f
JA
2797 rq->part = part;
2798 }
2799
2800 part_stat_unlock();
2801}
2802
9c988374
CH
2803static struct request *elv_next_request(struct request_queue *q)
2804{
2805 struct request *rq;
2806 struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL);
2807
2808 WARN_ON_ONCE(q->mq_ops);
2809
2810 while (1) {
e4f36b24
CH
2811 list_for_each_entry(rq, &q->queue_head, queuelist) {
2812 if (blk_pm_allow_request(rq))
2813 return rq;
2814
2815 if (rq->rq_flags & RQF_SOFTBARRIER)
2816 break;
9c988374
CH
2817 }
2818
2819 /*
2820 * Flush request is running and flush request isn't queueable
2821 * in the drive, we can hold the queue till flush request is
2822 * finished. Even we don't do this, driver can't dispatch next
2823 * requests and will requeue them. And this can improve
2824 * throughput too. For example, we have request flush1, write1,
2825 * flush 2. flush1 is dispatched, then queue is hold, write1
2826 * isn't inserted to queue. After flush1 is finished, flush2
2827 * will be dispatched. Since disk cache is already clean,
2828 * flush2 will be finished very soon, so looks like flush2 is
2829 * folded to flush1.
2830 * Since the queue is hold, a flag is set to indicate the queue
2831 * should be restarted later. Please see flush_end_io() for
2832 * details.
2833 */
2834 if (fq->flush_pending_idx != fq->flush_running_idx &&
2835 !queue_flush_queueable(q)) {
2836 fq->flush_queue_delayed = 1;
2837 return NULL;
2838 }
2839 if (unlikely(blk_queue_bypass(q)) ||
2840 !q->elevator->type->ops.sq.elevator_dispatch_fn(q, 0))
2841 return NULL;
2842 }
2843}
2844
3bcddeac 2845/**
9934c8c0
TH
2846 * blk_peek_request - peek at the top of a request queue
2847 * @q: request queue to peek at
2848 *
2849 * Description:
2850 * Return the request at the top of @q. The returned request
2851 * should be started using blk_start_request() before LLD starts
2852 * processing it.
2853 *
2854 * Return:
2855 * Pointer to the request at the top of @q if available. Null
2856 * otherwise.
9934c8c0
TH
2857 */
2858struct request *blk_peek_request(struct request_queue *q)
158dbda0
TH
2859{
2860 struct request *rq;
2861 int ret;
2862
2fff8a92 2863 lockdep_assert_held(q->queue_lock);
332ebbf7 2864 WARN_ON_ONCE(q->mq_ops);
2fff8a92 2865
9c988374 2866 while ((rq = elv_next_request(q)) != NULL) {
e8064021 2867 if (!(rq->rq_flags & RQF_STARTED)) {
158dbda0
TH
2868 /*
2869 * This is the first time the device driver
2870 * sees this request (possibly after
2871 * requeueing). Notify IO scheduler.
2872 */
e8064021 2873 if (rq->rq_flags & RQF_SORTED)
158dbda0
TH
2874 elv_activate_rq(q, rq);
2875
2876 /*
2877 * just mark as started even if we don't start
2878 * it, a request that has been delayed should
2879 * not be passed by new incoming requests
2880 */
e8064021 2881 rq->rq_flags |= RQF_STARTED;
158dbda0
TH
2882 trace_block_rq_issue(q, rq);
2883 }
2884
2885 if (!q->boundary_rq || q->boundary_rq == rq) {
2886 q->end_sector = rq_end_sector(rq);
2887 q->boundary_rq = NULL;
2888 }
2889
e8064021 2890 if (rq->rq_flags & RQF_DONTPREP)
158dbda0
TH
2891 break;
2892
2e46e8b2 2893 if (q->dma_drain_size && blk_rq_bytes(rq)) {
158dbda0
TH
2894 /*
2895 * make sure space for the drain appears we
2896 * know we can do this because max_hw_segments
2897 * has been adjusted to be one fewer than the
2898 * device can handle
2899 */
2900 rq->nr_phys_segments++;
2901 }
2902
2903 if (!q->prep_rq_fn)
2904 break;
2905
2906 ret = q->prep_rq_fn(q, rq);
2907 if (ret == BLKPREP_OK) {
2908 break;
2909 } else if (ret == BLKPREP_DEFER) {
2910 /*
2911 * the request may have been (partially) prepped.
2912 * we need to keep this request in the front to
e8064021 2913 * avoid resource deadlock. RQF_STARTED will
158dbda0
TH
2914 * prevent other fs requests from passing this one.
2915 */
2e46e8b2 2916 if (q->dma_drain_size && blk_rq_bytes(rq) &&
e8064021 2917 !(rq->rq_flags & RQF_DONTPREP)) {
158dbda0
TH
2918 /*
2919 * remove the space for the drain we added
2920 * so that we don't add it again
2921 */
2922 --rq->nr_phys_segments;
2923 }
2924
2925 rq = NULL;
2926 break;
0fb5b1fb 2927 } else if (ret == BLKPREP_KILL || ret == BLKPREP_INVALID) {
e8064021 2928 rq->rq_flags |= RQF_QUIET;
c143dc90
JB
2929 /*
2930 * Mark this request as started so we don't trigger
2931 * any debug logic in the end I/O path.
2932 */
2933 blk_start_request(rq);
2a842aca
CH
2934 __blk_end_request_all(rq, ret == BLKPREP_INVALID ?
2935 BLK_STS_TARGET : BLK_STS_IOERR);
158dbda0
TH
2936 } else {
2937 printk(KERN_ERR "%s: bad return=%d\n", __func__, ret);
2938 break;
2939 }
2940 }
2941
2942 return rq;
2943}
9934c8c0 2944EXPORT_SYMBOL(blk_peek_request);
158dbda0 2945
5034435c 2946static void blk_dequeue_request(struct request *rq)
158dbda0 2947{
9934c8c0
TH
2948 struct request_queue *q = rq->q;
2949
158dbda0
TH
2950 BUG_ON(list_empty(&rq->queuelist));
2951 BUG_ON(ELV_ON_HASH(rq));
2952
2953 list_del_init(&rq->queuelist);
2954
2955 /*
2956 * the time frame between a request being removed from the lists
2957 * and to it is freed is accounted as io that is in progress at
2958 * the driver side.
2959 */
522a7775 2960 if (blk_account_rq(rq))
0a7ae2ff 2961 q->in_flight[rq_is_sync(rq)]++;
158dbda0
TH
2962}
2963
9934c8c0
TH
2964/**
2965 * blk_start_request - start request processing on the driver
2966 * @req: request to dequeue
2967 *
2968 * Description:
2969 * Dequeue @req and start timeout timer on it. This hands off the
2970 * request to the driver.
9934c8c0
TH
2971 */
2972void blk_start_request(struct request *req)
2973{
2fff8a92 2974 lockdep_assert_held(req->q->queue_lock);
332ebbf7 2975 WARN_ON_ONCE(req->q->mq_ops);
2fff8a92 2976
9934c8c0
TH
2977 blk_dequeue_request(req);
2978
cf43e6be 2979 if (test_bit(QUEUE_FLAG_STATS, &req->q->queue_flags)) {
544ccc8d
OS
2980 req->io_start_time_ns = ktime_get_ns();
2981#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2982 req->throtl_size = blk_rq_sectors(req);
2983#endif
cf43e6be 2984 req->rq_flags |= RQF_STATS;
a7905043 2985 rq_qos_issue(req->q, req);
cf43e6be
JA
2986 }
2987
e14575b3 2988 BUG_ON(blk_rq_is_complete(req));
9934c8c0
TH
2989 blk_add_timer(req);
2990}
2991EXPORT_SYMBOL(blk_start_request);
2992
2993/**
2994 * blk_fetch_request - fetch a request from a request queue
2995 * @q: request queue to fetch a request from
2996 *
2997 * Description:
2998 * Return the request at the top of @q. The request is started on
2999 * return and LLD can start processing it immediately.
3000 *
3001 * Return:
3002 * Pointer to the request at the top of @q if available. Null
3003 * otherwise.
9934c8c0
TH
3004 */
3005struct request *blk_fetch_request(struct request_queue *q)
3006{
3007 struct request *rq;
3008
2fff8a92 3009 lockdep_assert_held(q->queue_lock);
332ebbf7 3010 WARN_ON_ONCE(q->mq_ops);
2fff8a92 3011
9934c8c0
TH
3012 rq = blk_peek_request(q);
3013 if (rq)
3014 blk_start_request(rq);
3015 return rq;
3016}
3017EXPORT_SYMBOL(blk_fetch_request);
3018
ef71de8b
CH
3019/*
3020 * Steal bios from a request and add them to a bio list.
3021 * The request must not have been partially completed before.
3022 */
3023void blk_steal_bios(struct bio_list *list, struct request *rq)
3024{
3025 if (rq->bio) {
3026 if (list->tail)
3027 list->tail->bi_next = rq->bio;
3028 else
3029 list->head = rq->bio;
3030 list->tail = rq->biotail;
3031
3032 rq->bio = NULL;
3033 rq->biotail = NULL;
3034 }
3035
3036 rq->__data_len = 0;
3037}
3038EXPORT_SYMBOL_GPL(blk_steal_bios);
3039
3bcddeac 3040/**
2e60e022 3041 * blk_update_request - Special helper function for request stacking drivers
8ebf9756 3042 * @req: the request being processed
2a842aca 3043 * @error: block status code
8ebf9756 3044 * @nr_bytes: number of bytes to complete @req
3bcddeac
KU
3045 *
3046 * Description:
8ebf9756
RD
3047 * Ends I/O on a number of bytes attached to @req, but doesn't complete
3048 * the request structure even if @req doesn't have leftover.
3049 * If @req has leftover, sets it up for the next range of segments.
2e60e022
TH
3050 *
3051 * This special helper function is only for request stacking drivers
3052 * (e.g. request-based dm) so that they can handle partial completion.
3053 * Actual device drivers should use blk_end_request instead.
3054 *
3055 * Passing the result of blk_rq_bytes() as @nr_bytes guarantees
3056 * %false return from this function.
3bcddeac 3057 *
1954e9a9
BVA
3058 * Note:
3059 * The RQF_SPECIAL_PAYLOAD flag is ignored on purpose in both
3060 * blk_rq_bytes() and in blk_update_request().
3061 *
3bcddeac 3062 * Return:
2e60e022
TH
3063 * %false - this request doesn't have any more data
3064 * %true - this request has more data
3bcddeac 3065 **/
2a842aca
CH
3066bool blk_update_request(struct request *req, blk_status_t error,
3067 unsigned int nr_bytes)
1da177e4 3068{
f79ea416 3069 int total_bytes;
1da177e4 3070
2a842aca 3071 trace_block_rq_complete(req, blk_status_to_errno(error), nr_bytes);
4a0efdc9 3072
2e60e022
TH
3073 if (!req->bio)
3074 return false;
3075
2a842aca
CH
3076 if (unlikely(error && !blk_rq_is_passthrough(req) &&
3077 !(req->rq_flags & RQF_QUIET)))
3078 print_req_error(req, error);
1da177e4 3079
bc58ba94 3080 blk_account_io_completion(req, nr_bytes);
d72d904a 3081
f79ea416
KO
3082 total_bytes = 0;
3083 while (req->bio) {
3084 struct bio *bio = req->bio;
4f024f37 3085 unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes);
1da177e4 3086
9c24c10a 3087 if (bio_bytes == bio->bi_iter.bi_size)
1da177e4 3088 req->bio = bio->bi_next;
1da177e4 3089
fbbaf700
N
3090 /* Completion has already been traced */
3091 bio_clear_flag(bio, BIO_TRACE_COMPLETION);
f79ea416 3092 req_bio_endio(req, bio, bio_bytes, error);
1da177e4 3093
f79ea416
KO
3094 total_bytes += bio_bytes;
3095 nr_bytes -= bio_bytes;
1da177e4 3096
f79ea416
KO
3097 if (!nr_bytes)
3098 break;
1da177e4
LT
3099 }
3100
3101 /*
3102 * completely done
3103 */
2e60e022
TH
3104 if (!req->bio) {
3105 /*
3106 * Reset counters so that the request stacking driver
3107 * can find how many bytes remain in the request
3108 * later.
3109 */
a2dec7b3 3110 req->__data_len = 0;
2e60e022
TH
3111 return false;
3112 }
1da177e4 3113
a2dec7b3 3114 req->__data_len -= total_bytes;
2e46e8b2
TH
3115
3116 /* update sector only for requests with clear definition of sector */
57292b58 3117 if (!blk_rq_is_passthrough(req))
a2dec7b3 3118 req->__sector += total_bytes >> 9;
2e46e8b2 3119
80a761fd 3120 /* mixed attributes always follow the first bio */
e8064021 3121 if (req->rq_flags & RQF_MIXED_MERGE) {
80a761fd 3122 req->cmd_flags &= ~REQ_FAILFAST_MASK;
1eff9d32 3123 req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK;
80a761fd
TH
3124 }
3125
ed6565e7
CH
3126 if (!(req->rq_flags & RQF_SPECIAL_PAYLOAD)) {
3127 /*
3128 * If total number of sectors is less than the first segment
3129 * size, something has gone terribly wrong.
3130 */
3131 if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) {
3132 blk_dump_rq_flags(req, "request botched");
3133 req->__data_len = blk_rq_cur_bytes(req);
3134 }
2e46e8b2 3135
ed6565e7
CH
3136 /* recalculate the number of segments */
3137 blk_recalc_rq_segments(req);
3138 }
2e46e8b2 3139
2e60e022 3140 return true;
1da177e4 3141}
2e60e022 3142EXPORT_SYMBOL_GPL(blk_update_request);
1da177e4 3143
2a842aca 3144static bool blk_update_bidi_request(struct request *rq, blk_status_t error,
2e60e022
TH
3145 unsigned int nr_bytes,
3146 unsigned int bidi_bytes)
5efccd17 3147{
2e60e022
TH
3148 if (blk_update_request(rq, error, nr_bytes))
3149 return true;
5efccd17 3150
2e60e022
TH
3151 /* Bidi request must be completed as a whole */
3152 if (unlikely(blk_bidi_rq(rq)) &&
3153 blk_update_request(rq->next_rq, error, bidi_bytes))
3154 return true;
5efccd17 3155
e2e1a148
JA
3156 if (blk_queue_add_random(rq->q))
3157 add_disk_randomness(rq->rq_disk);
2e60e022
TH
3158
3159 return false;
1da177e4
LT
3160}
3161
28018c24
JB
3162/**
3163 * blk_unprep_request - unprepare a request
3164 * @req: the request
3165 *
3166 * This function makes a request ready for complete resubmission (or
3167 * completion). It happens only after all error handling is complete,
3168 * so represents the appropriate moment to deallocate any resources
3169 * that were allocated to the request in the prep_rq_fn. The queue
3170 * lock is held when calling this.
3171 */
3172void blk_unprep_request(struct request *req)
3173{
3174 struct request_queue *q = req->q;
3175
e8064021 3176 req->rq_flags &= ~RQF_DONTPREP;
28018c24
JB
3177 if (q->unprep_rq_fn)
3178 q->unprep_rq_fn(q, req);
3179}
3180EXPORT_SYMBOL_GPL(blk_unprep_request);
3181
2a842aca 3182void blk_finish_request(struct request *req, blk_status_t error)
1da177e4 3183{
cf43e6be 3184 struct request_queue *q = req->q;
522a7775 3185 u64 now = ktime_get_ns();
cf43e6be 3186
2fff8a92 3187 lockdep_assert_held(req->q->queue_lock);
332ebbf7 3188 WARN_ON_ONCE(q->mq_ops);
2fff8a92 3189
cf43e6be 3190 if (req->rq_flags & RQF_STATS)
522a7775 3191 blk_stat_add(req, now);
cf43e6be 3192
e8064021 3193 if (req->rq_flags & RQF_QUEUED)
cf43e6be 3194 blk_queue_end_tag(q, req);
b8286239 3195
ba396a6c 3196 BUG_ON(blk_queued_rq(req));
1da177e4 3197
57292b58 3198 if (unlikely(laptop_mode) && !blk_rq_is_passthrough(req))
dc3b17cc 3199 laptop_io_completion(req->q->backing_dev_info);
1da177e4 3200
e78042e5
MA
3201 blk_delete_timer(req);
3202
e8064021 3203 if (req->rq_flags & RQF_DONTPREP)
28018c24
JB
3204 blk_unprep_request(req);
3205
522a7775 3206 blk_account_io_done(req, now);
b8286239 3207
87760e5e 3208 if (req->end_io) {
a7905043 3209 rq_qos_done(q, req);
8ffdc655 3210 req->end_io(req, error);
87760e5e 3211 } else {
b8286239
KU
3212 if (blk_bidi_rq(req))
3213 __blk_put_request(req->next_rq->q, req->next_rq);
3214
cf43e6be 3215 __blk_put_request(q, req);
b8286239 3216 }
1da177e4 3217}
12120077 3218EXPORT_SYMBOL(blk_finish_request);
1da177e4 3219
3b11313a 3220/**
2e60e022
TH
3221 * blk_end_bidi_request - Complete a bidi request
3222 * @rq: the request to complete
2a842aca 3223 * @error: block status code
2e60e022
TH
3224 * @nr_bytes: number of bytes to complete @rq
3225 * @bidi_bytes: number of bytes to complete @rq->next_rq
a0cd1285
JA
3226 *
3227 * Description:
e3a04fe3 3228 * Ends I/O on a number of bytes attached to @rq and @rq->next_rq.
2e60e022
TH
3229 * Drivers that supports bidi can safely call this member for any
3230 * type of request, bidi or uni. In the later case @bidi_bytes is
3231 * just ignored.
336cdb40
KU
3232 *
3233 * Return:
2e60e022
TH
3234 * %false - we are done with this request
3235 * %true - still buffers pending for this request
a0cd1285 3236 **/
2a842aca 3237static bool blk_end_bidi_request(struct request *rq, blk_status_t error,
32fab448
KU
3238 unsigned int nr_bytes, unsigned int bidi_bytes)
3239{
336cdb40 3240 struct request_queue *q = rq->q;
2e60e022 3241 unsigned long flags;
32fab448 3242
332ebbf7
BVA
3243 WARN_ON_ONCE(q->mq_ops);
3244
2e60e022
TH
3245 if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
3246 return true;
32fab448 3247
336cdb40 3248 spin_lock_irqsave(q->queue_lock, flags);
2e60e022 3249 blk_finish_request(rq, error);
336cdb40
KU
3250 spin_unlock_irqrestore(q->queue_lock, flags);
3251
2e60e022 3252 return false;
32fab448
KU
3253}
3254
336cdb40 3255/**
2e60e022
TH
3256 * __blk_end_bidi_request - Complete a bidi request with queue lock held
3257 * @rq: the request to complete
2a842aca 3258 * @error: block status code
e3a04fe3
KU
3259 * @nr_bytes: number of bytes to complete @rq
3260 * @bidi_bytes: number of bytes to complete @rq->next_rq
336cdb40
KU
3261 *
3262 * Description:
2e60e022
TH
3263 * Identical to blk_end_bidi_request() except that queue lock is
3264 * assumed to be locked on entry and remains so on return.
336cdb40
KU
3265 *
3266 * Return:
2e60e022
TH
3267 * %false - we are done with this request
3268 * %true - still buffers pending for this request
336cdb40 3269 **/
2a842aca 3270static bool __blk_end_bidi_request(struct request *rq, blk_status_t error,
b1f74493 3271 unsigned int nr_bytes, unsigned int bidi_bytes)
336cdb40 3272{
2fff8a92 3273 lockdep_assert_held(rq->q->queue_lock);
332ebbf7 3274 WARN_ON_ONCE(rq->q->mq_ops);
2fff8a92 3275
2e60e022
TH
3276 if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
3277 return true;
336cdb40 3278
2e60e022 3279 blk_finish_request(rq, error);
336cdb40 3280
2e60e022 3281 return false;
336cdb40 3282}
e19a3ab0
KU
3283
3284/**
3285 * blk_end_request - Helper function for drivers to complete the request.
3286 * @rq: the request being processed
2a842aca 3287 * @error: block status code
e19a3ab0
KU
3288 * @nr_bytes: number of bytes to complete
3289 *
3290 * Description:
3291 * Ends I/O on a number of bytes attached to @rq.
3292 * If @rq has leftover, sets it up for the next range of segments.
3293 *
3294 * Return:
b1f74493
FT
3295 * %false - we are done with this request
3296 * %true - still buffers pending for this request
e19a3ab0 3297 **/
2a842aca
CH
3298bool blk_end_request(struct request *rq, blk_status_t error,
3299 unsigned int nr_bytes)
e19a3ab0 3300{
332ebbf7 3301 WARN_ON_ONCE(rq->q->mq_ops);
b1f74493 3302 return blk_end_bidi_request(rq, error, nr_bytes, 0);
e19a3ab0 3303}
56ad1740 3304EXPORT_SYMBOL(blk_end_request);
336cdb40
KU
3305
3306/**
b1f74493
FT
3307 * blk_end_request_all - Helper function for drives to finish the request.
3308 * @rq: the request to finish
2a842aca 3309 * @error: block status code
336cdb40
KU
3310 *
3311 * Description:
b1f74493
FT
3312 * Completely finish @rq.
3313 */
2a842aca 3314void blk_end_request_all(struct request *rq, blk_status_t error)
336cdb40 3315{
b1f74493
FT
3316 bool pending;
3317 unsigned int bidi_bytes = 0;
336cdb40 3318
b1f74493
FT
3319 if (unlikely(blk_bidi_rq(rq)))
3320 bidi_bytes = blk_rq_bytes(rq->next_rq);
336cdb40 3321
b1f74493
FT
3322 pending = blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes);
3323 BUG_ON(pending);
3324}
56ad1740 3325EXPORT_SYMBOL(blk_end_request_all);
336cdb40 3326
e3a04fe3 3327/**
b1f74493
FT
3328 * __blk_end_request - Helper function for drivers to complete the request.
3329 * @rq: the request being processed
2a842aca 3330 * @error: block status code
b1f74493 3331 * @nr_bytes: number of bytes to complete
e3a04fe3
KU
3332 *
3333 * Description:
b1f74493 3334 * Must be called with queue lock held unlike blk_end_request().
e3a04fe3
KU
3335 *
3336 * Return:
b1f74493
FT
3337 * %false - we are done with this request
3338 * %true - still buffers pending for this request
e3a04fe3 3339 **/
2a842aca
CH
3340bool __blk_end_request(struct request *rq, blk_status_t error,
3341 unsigned int nr_bytes)
e3a04fe3 3342{
2fff8a92 3343 lockdep_assert_held(rq->q->queue_lock);
332ebbf7 3344 WARN_ON_ONCE(rq->q->mq_ops);
2fff8a92 3345
b1f74493 3346 return __blk_end_bidi_request(rq, error, nr_bytes, 0);
e3a04fe3 3347}
56ad1740 3348EXPORT_SYMBOL(__blk_end_request);
e3a04fe3 3349
32fab448 3350/**
b1f74493
FT
3351 * __blk_end_request_all - Helper function for drives to finish the request.
3352 * @rq: the request to finish
2a842aca 3353 * @error: block status code
32fab448
KU
3354 *
3355 * Description:
b1f74493 3356 * Completely finish @rq. Must be called with queue lock held.
32fab448 3357 */
2a842aca 3358void __blk_end_request_all(struct request *rq, blk_status_t error)
32fab448 3359{
b1f74493
FT
3360 bool pending;
3361 unsigned int bidi_bytes = 0;
3362
2fff8a92 3363 lockdep_assert_held(rq->q->queue_lock);
332ebbf7 3364 WARN_ON_ONCE(rq->q->mq_ops);
2fff8a92 3365
b1f74493
FT
3366 if (unlikely(blk_bidi_rq(rq)))
3367 bidi_bytes = blk_rq_bytes(rq->next_rq);
3368
3369 pending = __blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes);
3370 BUG_ON(pending);
32fab448 3371}
56ad1740 3372EXPORT_SYMBOL(__blk_end_request_all);
32fab448 3373
e19a3ab0 3374/**
b1f74493
FT
3375 * __blk_end_request_cur - Helper function to finish the current request chunk.
3376 * @rq: the request to finish the current chunk for
2a842aca 3377 * @error: block status code
e19a3ab0
KU
3378 *
3379 * Description:
b1f74493
FT
3380 * Complete the current consecutively mapped chunk from @rq. Must
3381 * be called with queue lock held.
e19a3ab0
KU
3382 *
3383 * Return:
b1f74493
FT
3384 * %false - we are done with this request
3385 * %true - still buffers pending for this request
3386 */
2a842aca 3387bool __blk_end_request_cur(struct request *rq, blk_status_t error)
e19a3ab0 3388{
b1f74493 3389 return __blk_end_request(rq, error, blk_rq_cur_bytes(rq));
e19a3ab0 3390}
56ad1740 3391EXPORT_SYMBOL(__blk_end_request_cur);
e19a3ab0 3392
86db1e29
JA
3393void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
3394 struct bio *bio)
1da177e4 3395{
b4f42e28 3396 if (bio_has_data(bio))
fb2dce86 3397 rq->nr_phys_segments = bio_phys_segments(q, bio);
445251d0
JA
3398 else if (bio_op(bio) == REQ_OP_DISCARD)
3399 rq->nr_phys_segments = 1;
b4f42e28 3400
4f024f37 3401 rq->__data_len = bio->bi_iter.bi_size;
1da177e4 3402 rq->bio = rq->biotail = bio;
1da177e4 3403
74d46992
CH
3404 if (bio->bi_disk)
3405 rq->rq_disk = bio->bi_disk;
66846572 3406}
1da177e4 3407
2d4dc890
IL
3408#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
3409/**
3410 * rq_flush_dcache_pages - Helper function to flush all pages in a request
3411 * @rq: the request to be flushed
3412 *
3413 * Description:
3414 * Flush all pages in @rq.
3415 */
3416void rq_flush_dcache_pages(struct request *rq)
3417{
3418 struct req_iterator iter;
7988613b 3419 struct bio_vec bvec;
2d4dc890
IL
3420
3421 rq_for_each_segment(bvec, rq, iter)
7988613b 3422 flush_dcache_page(bvec.bv_page);
2d4dc890
IL
3423}
3424EXPORT_SYMBOL_GPL(rq_flush_dcache_pages);
3425#endif
3426
ef9e3fac
KU
3427/**
3428 * blk_lld_busy - Check if underlying low-level drivers of a device are busy
3429 * @q : the queue of the device being checked
3430 *
3431 * Description:
3432 * Check if underlying low-level drivers of a device are busy.
3433 * If the drivers want to export their busy state, they must set own
3434 * exporting function using blk_queue_lld_busy() first.
3435 *
3436 * Basically, this function is used only by request stacking drivers
3437 * to stop dispatching requests to underlying devices when underlying
3438 * devices are busy. This behavior helps more I/O merging on the queue
3439 * of the request stacking driver and prevents I/O throughput regression
3440 * on burst I/O load.
3441 *
3442 * Return:
3443 * 0 - Not busy (The request stacking driver should dispatch request)
3444 * 1 - Busy (The request stacking driver should stop dispatching request)
3445 */
3446int blk_lld_busy(struct request_queue *q)
3447{
3448 if (q->lld_busy_fn)
3449 return q->lld_busy_fn(q);
3450
3451 return 0;
3452}
3453EXPORT_SYMBOL_GPL(blk_lld_busy);
3454
78d8e58a
MS
3455/**
3456 * blk_rq_unprep_clone - Helper function to free all bios in a cloned request
3457 * @rq: the clone request to be cleaned up
3458 *
3459 * Description:
3460 * Free all bios in @rq for a cloned request.
3461 */
3462void blk_rq_unprep_clone(struct request *rq)
3463{
3464 struct bio *bio;
3465
3466 while ((bio = rq->bio) != NULL) {
3467 rq->bio = bio->bi_next;
3468
3469 bio_put(bio);
3470 }
3471}
3472EXPORT_SYMBOL_GPL(blk_rq_unprep_clone);
3473
3474/*
3475 * Copy attributes of the original request to the clone request.
3476 * The actual data parts (e.g. ->cmd, ->sense) are not copied.
3477 */
3478static void __blk_rq_prep_clone(struct request *dst, struct request *src)
b0fd271d
KU
3479{
3480 dst->cpu = src->cpu;
b0fd271d
KU
3481 dst->__sector = blk_rq_pos(src);
3482 dst->__data_len = blk_rq_bytes(src);
297ba57d
BVA
3483 if (src->rq_flags & RQF_SPECIAL_PAYLOAD) {
3484 dst->rq_flags |= RQF_SPECIAL_PAYLOAD;
3485 dst->special_vec = src->special_vec;
3486 }
b0fd271d
KU
3487 dst->nr_phys_segments = src->nr_phys_segments;
3488 dst->ioprio = src->ioprio;
3489 dst->extra_len = src->extra_len;
78d8e58a
MS
3490}
3491
3492/**
3493 * blk_rq_prep_clone - Helper function to setup clone request
3494 * @rq: the request to be setup
3495 * @rq_src: original request to be cloned
3496 * @bs: bio_set that bios for clone are allocated from
3497 * @gfp_mask: memory allocation mask for bio
3498 * @bio_ctr: setup function to be called for each clone bio.
3499 * Returns %0 for success, non %0 for failure.
3500 * @data: private data to be passed to @bio_ctr
3501 *
3502 * Description:
3503 * Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq.
3504 * The actual data parts of @rq_src (e.g. ->cmd, ->sense)
3505 * are not copied, and copying such parts is the caller's responsibility.
3506 * Also, pages which the original bios are pointing to are not copied
3507 * and the cloned bios just point same pages.
3508 * So cloned bios must be completed before original bios, which means
3509 * the caller must complete @rq before @rq_src.
3510 */
3511int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
3512 struct bio_set *bs, gfp_t gfp_mask,
3513 int (*bio_ctr)(struct bio *, struct bio *, void *),
3514 void *data)
3515{
3516 struct bio *bio, *bio_src;
3517
3518 if (!bs)
f4f8154a 3519 bs = &fs_bio_set;
78d8e58a
MS
3520
3521 __rq_for_each_bio(bio_src, rq_src) {
3522 bio = bio_clone_fast(bio_src, gfp_mask, bs);
3523 if (!bio)
3524 goto free_and_out;
3525
3526 if (bio_ctr && bio_ctr(bio, bio_src, data))
3527 goto free_and_out;
3528
3529 if (rq->bio) {
3530 rq->biotail->bi_next = bio;
3531 rq->biotail = bio;
3532 } else
3533 rq->bio = rq->biotail = bio;
3534 }
3535
3536 __blk_rq_prep_clone(rq, rq_src);
3537
3538 return 0;
3539
3540free_and_out:
3541 if (bio)
3542 bio_put(bio);
3543 blk_rq_unprep_clone(rq);
3544
3545 return -ENOMEM;
b0fd271d
KU
3546}
3547EXPORT_SYMBOL_GPL(blk_rq_prep_clone);
3548
59c3d45e 3549int kblockd_schedule_work(struct work_struct *work)
1da177e4
LT
3550{
3551 return queue_work(kblockd_workqueue, work);
3552}
1da177e4
LT
3553EXPORT_SYMBOL(kblockd_schedule_work);
3554
ee63cfa7
JA
3555int kblockd_schedule_work_on(int cpu, struct work_struct *work)
3556{
3557 return queue_work_on(cpu, kblockd_workqueue, work);
3558}
3559EXPORT_SYMBOL(kblockd_schedule_work_on);
3560
818cd1cb
JA
3561int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork,
3562 unsigned long delay)
3563{
3564 return mod_delayed_work_on(cpu, kblockd_workqueue, dwork, delay);
3565}
3566EXPORT_SYMBOL(kblockd_mod_delayed_work_on);
3567
75df7136
SJ
3568/**
3569 * blk_start_plug - initialize blk_plug and track it inside the task_struct
3570 * @plug: The &struct blk_plug that needs to be initialized
3571 *
3572 * Description:
3573 * Tracking blk_plug inside the task_struct will help with auto-flushing the
3574 * pending I/O should the task end up blocking between blk_start_plug() and
3575 * blk_finish_plug(). This is important from a performance perspective, but
3576 * also ensures that we don't deadlock. For instance, if the task is blocking
3577 * for a memory allocation, memory reclaim could end up wanting to free a
3578 * page belonging to that request that is currently residing in our private
3579 * plug. By flushing the pending I/O when the process goes to sleep, we avoid
3580 * this kind of deadlock.
3581 */
73c10101
JA
3582void blk_start_plug(struct blk_plug *plug)
3583{
3584 struct task_struct *tsk = current;
3585
dd6cf3e1
SL
3586 /*
3587 * If this is a nested plug, don't actually assign it.
3588 */
3589 if (tsk->plug)
3590 return;
3591
73c10101 3592 INIT_LIST_HEAD(&plug->list);
320ae51f 3593 INIT_LIST_HEAD(&plug->mq_list);
048c9374 3594 INIT_LIST_HEAD(&plug->cb_list);
73c10101 3595 /*
dd6cf3e1
SL
3596 * Store ordering should not be needed here, since a potential
3597 * preempt will imply a full memory barrier
73c10101 3598 */
dd6cf3e1 3599 tsk->plug = plug;
73c10101
JA
3600}
3601EXPORT_SYMBOL(blk_start_plug);
3602
3603static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b)
3604{
3605 struct request *rqa = container_of(a, struct request, queuelist);
3606 struct request *rqb = container_of(b, struct request, queuelist);
3607
975927b9
JM
3608 return !(rqa->q < rqb->q ||
3609 (rqa->q == rqb->q && blk_rq_pos(rqa) < blk_rq_pos(rqb)));
73c10101
JA
3610}
3611
49cac01e
JA
3612/*
3613 * If 'from_schedule' is true, then postpone the dispatch of requests
3614 * until a safe kblockd context. We due this to avoid accidental big
3615 * additional stack usage in driver dispatch, in places where the originally
3616 * plugger did not intend it.
3617 */
f6603783 3618static void queue_unplugged(struct request_queue *q, unsigned int depth,
49cac01e 3619 bool from_schedule)
99e22598 3620 __releases(q->queue_lock)
94b5eb28 3621{
2fff8a92
BVA
3622 lockdep_assert_held(q->queue_lock);
3623
49cac01e 3624 trace_block_unplug(q, depth, !from_schedule);
99e22598 3625
70460571 3626 if (from_schedule)
24ecfbe2 3627 blk_run_queue_async(q);
70460571 3628 else
24ecfbe2 3629 __blk_run_queue(q);
50864670 3630 spin_unlock_irq(q->queue_lock);
94b5eb28
JA
3631}
3632
74018dc3 3633static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
048c9374
N
3634{
3635 LIST_HEAD(callbacks);
3636
2a7d5559
SL
3637 while (!list_empty(&plug->cb_list)) {
3638 list_splice_init(&plug->cb_list, &callbacks);
048c9374 3639
2a7d5559
SL
3640 while (!list_empty(&callbacks)) {
3641 struct blk_plug_cb *cb = list_first_entry(&callbacks,
048c9374
N
3642 struct blk_plug_cb,
3643 list);
2a7d5559 3644 list_del(&cb->list);
74018dc3 3645 cb->callback(cb, from_schedule);
2a7d5559 3646 }
048c9374
N
3647 }
3648}
3649
9cbb1750
N
3650struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data,
3651 int size)
3652{
3653 struct blk_plug *plug = current->plug;
3654 struct blk_plug_cb *cb;
3655
3656 if (!plug)
3657 return NULL;
3658
3659 list_for_each_entry(cb, &plug->cb_list, list)
3660 if (cb->callback == unplug && cb->data == data)
3661 return cb;
3662
3663 /* Not currently on the callback list */
3664 BUG_ON(size < sizeof(*cb));
3665 cb = kzalloc(size, GFP_ATOMIC);
3666 if (cb) {
3667 cb->data = data;
3668 cb->callback = unplug;
3669 list_add(&cb->list, &plug->cb_list);
3670 }
3671 return cb;
3672}
3673EXPORT_SYMBOL(blk_check_plugged);
3674
49cac01e 3675void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
73c10101
JA
3676{
3677 struct request_queue *q;
73c10101 3678 struct request *rq;
109b8129 3679 LIST_HEAD(list);
94b5eb28 3680 unsigned int depth;
73c10101 3681
74018dc3 3682 flush_plug_callbacks(plug, from_schedule);
320ae51f
JA
3683
3684 if (!list_empty(&plug->mq_list))
3685 blk_mq_flush_plug_list(plug, from_schedule);
3686
73c10101
JA
3687 if (list_empty(&plug->list))
3688 return;
3689
109b8129
N
3690 list_splice_init(&plug->list, &list);
3691
422765c2 3692 list_sort(NULL, &list, plug_rq_cmp);
73c10101
JA
3693
3694 q = NULL;
94b5eb28 3695 depth = 0;
18811272 3696
109b8129
N
3697 while (!list_empty(&list)) {
3698 rq = list_entry_rq(list.next);
73c10101 3699 list_del_init(&rq->queuelist);
73c10101
JA
3700 BUG_ON(!rq->q);
3701 if (rq->q != q) {
99e22598
JA
3702 /*
3703 * This drops the queue lock
3704 */
3705 if (q)
49cac01e 3706 queue_unplugged(q, depth, from_schedule);
73c10101 3707 q = rq->q;
94b5eb28 3708 depth = 0;
50864670 3709 spin_lock_irq(q->queue_lock);
73c10101 3710 }
8ba61435
TH
3711
3712 /*
3713 * Short-circuit if @q is dead
3714 */
3f3299d5 3715 if (unlikely(blk_queue_dying(q))) {
2a842aca 3716 __blk_end_request_all(rq, BLK_STS_IOERR);
8ba61435
TH
3717 continue;
3718 }
3719
73c10101
JA
3720 /*
3721 * rq is already accounted, so use raw insert
3722 */
f73f44eb 3723 if (op_is_flush(rq->cmd_flags))
401a18e9
JA
3724 __elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH);
3725 else
3726 __elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE);
94b5eb28
JA
3727
3728 depth++;
73c10101
JA
3729 }
3730
99e22598
JA
3731 /*
3732 * This drops the queue lock
3733 */
3734 if (q)
49cac01e 3735 queue_unplugged(q, depth, from_schedule);
73c10101 3736}
73c10101
JA
3737
3738void blk_finish_plug(struct blk_plug *plug)
3739{
dd6cf3e1
SL
3740 if (plug != current->plug)
3741 return;
f6603783 3742 blk_flush_plug_list(plug, false);
73c10101 3743
dd6cf3e1 3744 current->plug = NULL;
73c10101 3745}
88b996cd 3746EXPORT_SYMBOL(blk_finish_plug);
73c10101 3747
47fafbc7 3748#ifdef CONFIG_PM
6c954667
LM
3749/**
3750 * blk_pm_runtime_init - Block layer runtime PM initialization routine
3751 * @q: the queue of the device
3752 * @dev: the device the queue belongs to
3753 *
3754 * Description:
3755 * Initialize runtime-PM-related fields for @q and start auto suspend for
3756 * @dev. Drivers that want to take advantage of request-based runtime PM
3757 * should call this function after @dev has been initialized, and its
3758 * request queue @q has been allocated, and runtime PM for it can not happen
3759 * yet(either due to disabled/forbidden or its usage_count > 0). In most
3760 * cases, driver should call this function before any I/O has taken place.
3761 *
3762 * This function takes care of setting up using auto suspend for the device,
3763 * the autosuspend delay is set to -1 to make runtime suspend impossible
3764 * until an updated value is either set by user or by driver. Drivers do
3765 * not need to touch other autosuspend settings.
3766 *
3767 * The block layer runtime PM is request based, so only works for drivers
3768 * that use request as their IO unit instead of those directly use bio's.
3769 */
3770void blk_pm_runtime_init(struct request_queue *q, struct device *dev)
3771{
765e40b6
CH
3772 /* not support for RQF_PM and ->rpm_status in blk-mq yet */
3773 if (q->mq_ops)
3774 return;
3775
6c954667
LM
3776 q->dev = dev;
3777 q->rpm_status = RPM_ACTIVE;
3778 pm_runtime_set_autosuspend_delay(q->dev, -1);
3779 pm_runtime_use_autosuspend(q->dev);
3780}
3781EXPORT_SYMBOL(blk_pm_runtime_init);
3782
3783/**
3784 * blk_pre_runtime_suspend - Pre runtime suspend check
3785 * @q: the queue of the device
3786 *
3787 * Description:
3788 * This function will check if runtime suspend is allowed for the device
3789 * by examining if there are any requests pending in the queue. If there
3790 * are requests pending, the device can not be runtime suspended; otherwise,
3791 * the queue's status will be updated to SUSPENDING and the driver can
3792 * proceed to suspend the device.
3793 *
3794 * For the not allowed case, we mark last busy for the device so that
3795 * runtime PM core will try to autosuspend it some time later.
3796 *
3797 * This function should be called near the start of the device's
3798 * runtime_suspend callback.
3799 *
3800 * Return:
3801 * 0 - OK to runtime suspend the device
3802 * -EBUSY - Device should not be runtime suspended
3803 */
3804int blk_pre_runtime_suspend(struct request_queue *q)
3805{
3806 int ret = 0;
3807
4fd41a85
KX
3808 if (!q->dev)
3809 return ret;
3810
6c954667
LM
3811 spin_lock_irq(q->queue_lock);
3812 if (q->nr_pending) {
3813 ret = -EBUSY;
3814 pm_runtime_mark_last_busy(q->dev);
3815 } else {
3816 q->rpm_status = RPM_SUSPENDING;
3817 }
3818 spin_unlock_irq(q->queue_lock);
3819 return ret;
3820}
3821EXPORT_SYMBOL(blk_pre_runtime_suspend);
3822
3823/**
3824 * blk_post_runtime_suspend - Post runtime suspend processing
3825 * @q: the queue of the device
3826 * @err: return value of the device's runtime_suspend function
3827 *
3828 * Description:
3829 * Update the queue's runtime status according to the return value of the
3830 * device's runtime suspend function and mark last busy for the device so
3831 * that PM core will try to auto suspend the device at a later time.
3832 *
3833 * This function should be called near the end of the device's
3834 * runtime_suspend callback.
3835 */
3836void blk_post_runtime_suspend(struct request_queue *q, int err)
3837{
4fd41a85
KX
3838 if (!q->dev)
3839 return;
3840
6c954667
LM
3841 spin_lock_irq(q->queue_lock);
3842 if (!err) {
3843 q->rpm_status = RPM_SUSPENDED;
3844 } else {
3845 q->rpm_status = RPM_ACTIVE;
3846 pm_runtime_mark_last_busy(q->dev);
3847 }
3848 spin_unlock_irq(q->queue_lock);
3849}
3850EXPORT_SYMBOL(blk_post_runtime_suspend);
3851
3852/**
3853 * blk_pre_runtime_resume - Pre runtime resume processing
3854 * @q: the queue of the device
3855 *
3856 * Description:
3857 * Update the queue's runtime status to RESUMING in preparation for the
3858 * runtime resume of the device.
3859 *
3860 * This function should be called near the start of the device's
3861 * runtime_resume callback.
3862 */
3863void blk_pre_runtime_resume(struct request_queue *q)
3864{
4fd41a85
KX
3865 if (!q->dev)
3866 return;
3867
6c954667
LM
3868 spin_lock_irq(q->queue_lock);
3869 q->rpm_status = RPM_RESUMING;
3870 spin_unlock_irq(q->queue_lock);
3871}
3872EXPORT_SYMBOL(blk_pre_runtime_resume);
3873
3874/**
3875 * blk_post_runtime_resume - Post runtime resume processing
3876 * @q: the queue of the device
3877 * @err: return value of the device's runtime_resume function
3878 *
3879 * Description:
3880 * Update the queue's runtime status according to the return value of the
3881 * device's runtime_resume function. If it is successfully resumed, process
3882 * the requests that are queued into the device's queue when it is resuming
3883 * and then mark last busy and initiate autosuspend for it.
3884 *
3885 * This function should be called near the end of the device's
3886 * runtime_resume callback.
3887 */
3888void blk_post_runtime_resume(struct request_queue *q, int err)
3889{
4fd41a85
KX
3890 if (!q->dev)
3891 return;
3892
6c954667
LM
3893 spin_lock_irq(q->queue_lock);
3894 if (!err) {
3895 q->rpm_status = RPM_ACTIVE;
3896 __blk_run_queue(q);
3897 pm_runtime_mark_last_busy(q->dev);
c60855cd 3898 pm_request_autosuspend(q->dev);
6c954667
LM
3899 } else {
3900 q->rpm_status = RPM_SUSPENDED;
3901 }
3902 spin_unlock_irq(q->queue_lock);
3903}
3904EXPORT_SYMBOL(blk_post_runtime_resume);
d07ab6d1
MW
3905
3906/**
3907 * blk_set_runtime_active - Force runtime status of the queue to be active
3908 * @q: the queue of the device
3909 *
3910 * If the device is left runtime suspended during system suspend the resume
3911 * hook typically resumes the device and corrects runtime status
3912 * accordingly. However, that does not affect the queue runtime PM status
3913 * which is still "suspended". This prevents processing requests from the
3914 * queue.
3915 *
3916 * This function can be used in driver's resume hook to correct queue
3917 * runtime PM status and re-enable peeking requests from the queue. It
3918 * should be called before first request is added to the queue.
3919 */
3920void blk_set_runtime_active(struct request_queue *q)
3921{
3922 spin_lock_irq(q->queue_lock);
3923 q->rpm_status = RPM_ACTIVE;
3924 pm_runtime_mark_last_busy(q->dev);
3925 pm_request_autosuspend(q->dev);
3926 spin_unlock_irq(q->queue_lock);
3927}
3928EXPORT_SYMBOL(blk_set_runtime_active);
6c954667
LM
3929#endif
3930
1da177e4
LT
3931int __init blk_dev_init(void)
3932{
ef295ecf
CH
3933 BUILD_BUG_ON(REQ_OP_LAST >= (1 << REQ_OP_BITS));
3934 BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
0762b23d 3935 FIELD_SIZEOF(struct request, cmd_flags));
ef295ecf
CH
3936 BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
3937 FIELD_SIZEOF(struct bio, bi_opf));
9eb55b03 3938
89b90be2
TH
3939 /* used for unplugging and affects IO latency/throughput - HIGHPRI */
3940 kblockd_workqueue = alloc_workqueue("kblockd",
28747fcd 3941 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
1da177e4
LT
3942 if (!kblockd_workqueue)
3943 panic("Failed to create kblockd\n");
3944
3945 request_cachep = kmem_cache_create("blkdev_requests",
20c2df83 3946 sizeof(struct request), 0, SLAB_PANIC, NULL);
1da177e4 3947
c2789bd4 3948 blk_requestq_cachep = kmem_cache_create("request_queue",
165125e1 3949 sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
1da177e4 3950
18fbda91
OS
3951#ifdef CONFIG_DEBUG_FS
3952 blk_debugfs_root = debugfs_create_dir("block", NULL);
3953#endif
3954
d38ecf93 3955 return 0;
1da177e4 3956}