]> git.ipfire.org Git - thirdparty/linux.git/blame - block/blk-core.c
Merge tag 'v6.1-rc6' into x86/core, to resolve conflicts
[thirdparty/linux.git] / block / blk-core.c
CommitLineData
3dcf60bc 1// SPDX-License-Identifier: GPL-2.0
1da177e4 2/*
1da177e4
LT
3 * Copyright (C) 1991, 1992 Linus Torvalds
4 * Copyright (C) 1994, Karl Keyte: Added support for disk statistics
5 * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
6 * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
6728cb0e
JA
7 * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au>
8 * - July2000
1da177e4
LT
9 * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
10 */
11
12/*
13 * This handles all read/write requests to block devices
14 */
1da177e4
LT
15#include <linux/kernel.h>
16#include <linux/module.h>
1da177e4
LT
17#include <linux/bio.h>
18#include <linux/blkdev.h>
52abca64 19#include <linux/blk-pm.h>
fe45e630 20#include <linux/blk-integrity.h>
1da177e4
LT
21#include <linux/highmem.h>
22#include <linux/mm.h>
cee9a0c4 23#include <linux/pagemap.h>
1da177e4
LT
24#include <linux/kernel_stat.h>
25#include <linux/string.h>
26#include <linux/init.h>
1da177e4
LT
27#include <linux/completion.h>
28#include <linux/slab.h>
29#include <linux/swap.h>
30#include <linux/writeback.h>
faccbd4b 31#include <linux/task_io_accounting_ops.h>
c17bb495 32#include <linux/fault-inject.h>
73c10101 33#include <linux/list_sort.h>
e3c78ca5 34#include <linux/delay.h>
aaf7c680 35#include <linux/ratelimit.h>
6c954667 36#include <linux/pm_runtime.h>
54d4e6ab 37#include <linux/t10-pi.h>
18fbda91 38#include <linux/debugfs.h>
30abb3a6 39#include <linux/bpf.h>
82d981d4 40#include <linux/part_stat.h>
71ac860a 41#include <linux/sched/sysctl.h>
a892c8d5 42#include <linux/blk-crypto.h>
55782138
LZ
43
44#define CREATE_TRACE_POINTS
45#include <trace/events/block.h>
1da177e4 46
8324aa91 47#include "blk.h"
2aa7745b 48#include "blk-mq-sched.h"
bca6b067 49#include "blk-pm.h"
672fdcf0 50#include "blk-cgroup.h"
a7b36ee6 51#include "blk-throttle.h"
8324aa91 52
18fbda91 53struct dentry *blk_debugfs_root;
18fbda91 54
d07335e5 55EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
b0da3f0d 56EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
0a82a8d1 57EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
3291fa57 58EXPORT_TRACEPOINT_SYMBOL_GPL(block_split);
cbae8d45 59EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug);
b357e4a6 60EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_insert);
0bfc2455 61
a73f730d
TH
62DEFINE_IDA(blk_queue_ida);
63
1da177e4
LT
64/*
65 * For queue allocation
66 */
6728cb0e 67struct kmem_cache *blk_requestq_cachep;
704b914f 68struct kmem_cache *blk_requestq_srcu_cachep;
1da177e4 69
1da177e4
LT
70/*
71 * Controlling structure to kblockd
72 */
ff856bad 73static struct workqueue_struct *kblockd_workqueue;
1da177e4 74
8814ce8a
BVA
75/**
76 * blk_queue_flag_set - atomically set a queue flag
77 * @flag: flag to be set
78 * @q: request queue
79 */
80void blk_queue_flag_set(unsigned int flag, struct request_queue *q)
81{
57d74df9 82 set_bit(flag, &q->queue_flags);
8814ce8a
BVA
83}
84EXPORT_SYMBOL(blk_queue_flag_set);
85
86/**
87 * blk_queue_flag_clear - atomically clear a queue flag
88 * @flag: flag to be cleared
89 * @q: request queue
90 */
91void blk_queue_flag_clear(unsigned int flag, struct request_queue *q)
92{
57d74df9 93 clear_bit(flag, &q->queue_flags);
8814ce8a
BVA
94}
95EXPORT_SYMBOL(blk_queue_flag_clear);
96
97/**
98 * blk_queue_flag_test_and_set - atomically test and set a queue flag
99 * @flag: flag to be set
100 * @q: request queue
101 *
102 * Returns the previous value of @flag - 0 if the flag was not set and 1 if
103 * the flag was already set.
104 */
105bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q)
106{
57d74df9 107 return test_and_set_bit(flag, &q->queue_flags);
8814ce8a
BVA
108}
109EXPORT_SYMBOL_GPL(blk_queue_flag_test_and_set);
110
e47bc4ed
CK
111#define REQ_OP_NAME(name) [REQ_OP_##name] = #name
112static const char *const blk_op_name[] = {
113 REQ_OP_NAME(READ),
114 REQ_OP_NAME(WRITE),
115 REQ_OP_NAME(FLUSH),
116 REQ_OP_NAME(DISCARD),
117 REQ_OP_NAME(SECURE_ERASE),
118 REQ_OP_NAME(ZONE_RESET),
6e33dbf2 119 REQ_OP_NAME(ZONE_RESET_ALL),
6c1b1da5
AJ
120 REQ_OP_NAME(ZONE_OPEN),
121 REQ_OP_NAME(ZONE_CLOSE),
122 REQ_OP_NAME(ZONE_FINISH),
0512a75b 123 REQ_OP_NAME(ZONE_APPEND),
e47bc4ed 124 REQ_OP_NAME(WRITE_ZEROES),
e47bc4ed
CK
125 REQ_OP_NAME(DRV_IN),
126 REQ_OP_NAME(DRV_OUT),
127};
128#undef REQ_OP_NAME
129
130/**
131 * blk_op_str - Return string XXX in the REQ_OP_XXX.
132 * @op: REQ_OP_XXX.
133 *
134 * Description: Centralize block layer function to convert REQ_OP_XXX into
135 * string format. Useful in the debugging and tracing bio or request. For
136 * invalid REQ_OP_XXX it returns string "UNKNOWN".
137 */
77e7ffd7 138inline const char *blk_op_str(enum req_op op)
e47bc4ed
CK
139{
140 const char *op_str = "UNKNOWN";
141
142 if (op < ARRAY_SIZE(blk_op_name) && blk_op_name[op])
143 op_str = blk_op_name[op];
144
145 return op_str;
146}
147EXPORT_SYMBOL_GPL(blk_op_str);
148
2a842aca
CH
149static const struct {
150 int errno;
151 const char *name;
152} blk_errors[] = {
153 [BLK_STS_OK] = { 0, "" },
154 [BLK_STS_NOTSUPP] = { -EOPNOTSUPP, "operation not supported" },
155 [BLK_STS_TIMEOUT] = { -ETIMEDOUT, "timeout" },
156 [BLK_STS_NOSPC] = { -ENOSPC, "critical space allocation" },
157 [BLK_STS_TRANSPORT] = { -ENOLINK, "recoverable transport" },
158 [BLK_STS_TARGET] = { -EREMOTEIO, "critical target" },
159 [BLK_STS_NEXUS] = { -EBADE, "critical nexus" },
160 [BLK_STS_MEDIUM] = { -ENODATA, "critical medium" },
161 [BLK_STS_PROTECTION] = { -EILSEQ, "protection" },
162 [BLK_STS_RESOURCE] = { -ENOMEM, "kernel resource" },
86ff7c2a 163 [BLK_STS_DEV_RESOURCE] = { -EBUSY, "device resource" },
03a07c92 164 [BLK_STS_AGAIN] = { -EAGAIN, "nonblocking retry" },
7d32c027 165 [BLK_STS_OFFLINE] = { -ENODEV, "device offline" },
2a842aca 166
4e4cbee9
CH
167 /* device mapper special case, should not leak out: */
168 [BLK_STS_DM_REQUEUE] = { -EREMCHG, "dm internal retry" },
169
3b481d91
KB
170 /* zone device specific errors */
171 [BLK_STS_ZONE_OPEN_RESOURCE] = { -ETOOMANYREFS, "open zones exceeded" },
172 [BLK_STS_ZONE_ACTIVE_RESOURCE] = { -EOVERFLOW, "active zones exceeded" },
173
2a842aca
CH
174 /* everything else not covered above: */
175 [BLK_STS_IOERR] = { -EIO, "I/O" },
176};
177
178blk_status_t errno_to_blk_status(int errno)
179{
180 int i;
181
182 for (i = 0; i < ARRAY_SIZE(blk_errors); i++) {
183 if (blk_errors[i].errno == errno)
184 return (__force blk_status_t)i;
185 }
186
187 return BLK_STS_IOERR;
188}
189EXPORT_SYMBOL_GPL(errno_to_blk_status);
190
191int blk_status_to_errno(blk_status_t status)
192{
193 int idx = (__force int)status;
194
34bd9c1c 195 if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
2a842aca
CH
196 return -EIO;
197 return blk_errors[idx].errno;
198}
199EXPORT_SYMBOL_GPL(blk_status_to_errno);
200
0d7a29a2 201const char *blk_status_to_str(blk_status_t status)
2a842aca
CH
202{
203 int idx = (__force int)status;
204
34bd9c1c 205 if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
0d7a29a2
CH
206 return "<null>";
207 return blk_errors[idx].name;
2a842aca
CH
208}
209
1da177e4
LT
210/**
211 * blk_sync_queue - cancel any pending callbacks on a queue
212 * @q: the queue
213 *
214 * Description:
215 * The block layer may perform asynchronous callback activity
216 * on a queue, such as calling the unplug function after a timeout.
217 * A block device may call blk_sync_queue to ensure that any
218 * such activity is cancelled, thus allowing it to release resources
59c51591 219 * that the callbacks might use. The caller must already have made sure
c62b37d9 220 * that its ->submit_bio will not re-add plugging prior to calling
1da177e4
LT
221 * this function.
222 *
da527770 223 * This function does not cancel any asynchronous activity arising
da3dae54 224 * out of elevator or throttling code. That would require elevator_exit()
5efd6113 225 * and blkcg_exit_queue() to be called with queue lock initialized.
da527770 226 *
1da177e4
LT
227 */
228void blk_sync_queue(struct request_queue *q)
229{
70ed28b9 230 del_timer_sync(&q->timeout);
4e9b6f20 231 cancel_work_sync(&q->timeout_work);
1da177e4
LT
232}
233EXPORT_SYMBOL(blk_sync_queue);
234
c9254f2d 235/**
cd84a62e 236 * blk_set_pm_only - increment pm_only counter
c9254f2d 237 * @q: request queue pointer
c9254f2d 238 */
cd84a62e 239void blk_set_pm_only(struct request_queue *q)
c9254f2d 240{
cd84a62e 241 atomic_inc(&q->pm_only);
c9254f2d 242}
cd84a62e 243EXPORT_SYMBOL_GPL(blk_set_pm_only);
c9254f2d 244
cd84a62e 245void blk_clear_pm_only(struct request_queue *q)
c9254f2d 246{
cd84a62e
BVA
247 int pm_only;
248
249 pm_only = atomic_dec_return(&q->pm_only);
250 WARN_ON_ONCE(pm_only < 0);
251 if (pm_only == 0)
252 wake_up_all(&q->mq_freeze_wq);
c9254f2d 253}
cd84a62e 254EXPORT_SYMBOL_GPL(blk_clear_pm_only);
c9254f2d 255
b5bd357c
LC
256/**
257 * blk_put_queue - decrement the request_queue refcount
258 * @q: the request_queue structure to decrement the refcount for
259 *
260 * Decrements the refcount of the request_queue kobject. When this reaches 0
261 * we'll have blk_release_queue() called.
e8c7d14a
LC
262 *
263 * Context: Any context, but the last reference must not be dropped from
264 * atomic context.
b5bd357c 265 */
165125e1 266void blk_put_queue(struct request_queue *q)
483f4afc
AV
267{
268 kobject_put(&q->kobj);
269}
d86e0e83 270EXPORT_SYMBOL(blk_put_queue);
483f4afc 271
8e141f9e 272void blk_queue_start_drain(struct request_queue *q)
aed3ea94 273{
d3cfb2a0
ML
274 /*
275 * When queue DYING flag is set, we need to block new req
276 * entering queue, so we call blk_freeze_queue_start() to
277 * prevent I/O from crossing blk_queue_enter().
278 */
279 blk_freeze_queue_start(q);
344e9ffc 280 if (queue_is_mq(q))
aed3ea94 281 blk_mq_wake_waiters(q);
055f6e18
ML
282 /* Make blk_queue_enter() reexamine the DYING flag. */
283 wake_up_all(&q->mq_freeze_wq);
aed3ea94 284}
8e141f9e 285
3a0a5299
BVA
286/**
287 * blk_queue_enter() - try to increase q->q_usage_counter
288 * @q: request queue pointer
a4d34da7 289 * @flags: BLK_MQ_REQ_NOWAIT and/or BLK_MQ_REQ_PM
3a0a5299 290 */
9a95e4ef 291int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
3ef28e83 292{
a4d34da7 293 const bool pm = flags & BLK_MQ_REQ_PM;
3a0a5299 294
1f14a098 295 while (!blk_try_enter_queue(q, pm)) {
3a0a5299 296 if (flags & BLK_MQ_REQ_NOWAIT)
56f99b8d 297 return -EAGAIN;
3ef28e83 298
5ed61d3f 299 /*
1f14a098
CH
300 * read pair of barrier in blk_freeze_queue_start(), we need to
301 * order reading __PERCPU_REF_DEAD flag of .q_usage_counter and
302 * reading .mq_freeze_depth or queue dying flag, otherwise the
303 * following wait may never return if the two reads are
304 * reordered.
5ed61d3f
ML
305 */
306 smp_rmb();
1dc3039b 307 wait_event(q->mq_freeze_wq,
7996a8b5 308 (!q->mq_freeze_depth &&
52abca64 309 blk_pm_resume_queue(pm, q)) ||
1dc3039b 310 blk_queue_dying(q));
3ef28e83
DW
311 if (blk_queue_dying(q))
312 return -ENODEV;
3ef28e83 313 }
1f14a098
CH
314
315 return 0;
3ef28e83
DW
316}
317
c98cb5bb 318int __bio_queue_enter(struct request_queue *q, struct bio *bio)
accea322 319{
a6741536 320 while (!blk_try_enter_queue(q, false)) {
eab4e027
PB
321 struct gendisk *disk = bio->bi_bdev->bd_disk;
322
a6741536 323 if (bio->bi_opf & REQ_NOWAIT) {
8e141f9e 324 if (test_bit(GD_DEAD, &disk->state))
a6741536 325 goto dead;
accea322 326 bio_wouldblock_error(bio);
56f99b8d 327 return -EAGAIN;
a6741536
CH
328 }
329
330 /*
331 * read pair of barrier in blk_freeze_queue_start(), we need to
332 * order reading __PERCPU_REF_DEAD flag of .q_usage_counter and
333 * reading .mq_freeze_depth or queue dying flag, otherwise the
334 * following wait may never return if the two reads are
335 * reordered.
336 */
337 smp_rmb();
338 wait_event(q->mq_freeze_wq,
339 (!q->mq_freeze_depth &&
340 blk_pm_resume_queue(false, q)) ||
8e141f9e
CH
341 test_bit(GD_DEAD, &disk->state));
342 if (test_bit(GD_DEAD, &disk->state))
a6741536 343 goto dead;
accea322
CH
344 }
345
a6741536
CH
346 return 0;
347dead:
348 bio_io_error(bio);
349 return -ENODEV;
accea322
CH
350}
351
3ef28e83
DW
352void blk_queue_exit(struct request_queue *q)
353{
354 percpu_ref_put(&q->q_usage_counter);
355}
356
357static void blk_queue_usage_counter_release(struct percpu_ref *ref)
358{
359 struct request_queue *q =
360 container_of(ref, struct request_queue, q_usage_counter);
361
362 wake_up_all(&q->mq_freeze_wq);
363}
364
bca237a5 365static void blk_rq_timed_out_timer(struct timer_list *t)
287922eb 366{
bca237a5 367 struct request_queue *q = from_timer(q, t, timeout);
287922eb
CH
368
369 kblockd_schedule_work(&q->timeout_work);
370}
371
2e3c18d0
TH
372static void blk_timeout_work(struct work_struct *work)
373{
374}
375
704b914f 376struct request_queue *blk_alloc_queue(int node_id, bool alloc_srcu)
1946089a 377{
165125e1 378 struct request_queue *q;
1946089a 379
704b914f
ML
380 q = kmem_cache_alloc_node(blk_get_queue_kmem_cache(alloc_srcu),
381 GFP_KERNEL | __GFP_ZERO, node_id);
1da177e4
LT
382 if (!q)
383 return NULL;
384
704b914f
ML
385 if (alloc_srcu) {
386 blk_queue_flag_set(QUEUE_FLAG_HAS_SRCU, q);
387 if (init_srcu_struct(q->srcu) != 0)
388 goto fail_q;
389 }
390
cbf62af3 391 q->last_merge = NULL;
cbf62af3 392
798f2a6f 393 q->id = ida_alloc(&blk_queue_ida, GFP_KERNEL);
a73f730d 394 if (q->id < 0)
704b914f 395 goto fail_srcu;
a73f730d 396
a83b576c
JA
397 q->stats = blk_alloc_queue_stats();
398 if (!q->stats)
46754bd0 399 goto fail_id;
a83b576c 400
5151412d 401 q->node = node_id;
0989a025 402
079a2e3e 403 atomic_set(&q->nr_active_requests_shared_tags, 0);
bccf5e26 404
bca237a5 405 timer_setup(&q->timeout, blk_rq_timed_out_timer, 0);
2e3c18d0 406 INIT_WORK(&q->timeout_work, blk_timeout_work);
a612fddf 407 INIT_LIST_HEAD(&q->icq_list);
483f4afc 408
8324aa91 409 kobject_init(&q->kobj, &blk_queue_ktype);
1da177e4 410
85e0cbbb 411 mutex_init(&q->debugfs_mutex);
483f4afc 412 mutex_init(&q->sysfs_lock);
cecf5d87 413 mutex_init(&q->sysfs_dir_lock);
0d945c1f 414 spin_lock_init(&q->queue_lock);
c94a96ac 415
320ae51f 416 init_waitqueue_head(&q->mq_freeze_wq);
7996a8b5 417 mutex_init(&q->mq_freeze_lock);
320ae51f 418
3ef28e83
DW
419 /*
420 * Init percpu_ref in atomic mode so that it's faster to shutdown.
421 * See blk_register_queue() for details.
422 */
423 if (percpu_ref_init(&q->q_usage_counter,
424 blk_queue_usage_counter_release,
425 PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
edb0872f 426 goto fail_stats;
f51b802c 427
3d745ea5 428 blk_set_default_limits(&q->limits);
d2a27964 429 q->nr_requests = BLKDEV_DEFAULT_RQ;
3d745ea5 430
1da177e4 431 return q;
a73f730d 432
a83b576c 433fail_stats:
edb0872f 434 blk_free_queue_stats(q->stats);
a73f730d 435fail_id:
798f2a6f 436 ida_free(&blk_queue_ida, q->id);
704b914f
ML
437fail_srcu:
438 if (alloc_srcu)
439 cleanup_srcu_struct(q->srcu);
a73f730d 440fail_q:
704b914f 441 kmem_cache_free(blk_get_queue_kmem_cache(alloc_srcu), q);
a73f730d 442 return NULL;
1da177e4 443}
1da177e4 444
b5bd357c
LC
445/**
446 * blk_get_queue - increment the request_queue refcount
447 * @q: the request_queue structure to increment the refcount for
448 *
449 * Increment the refcount of the request_queue kobject.
763b5892
LC
450 *
451 * Context: Any context.
b5bd357c 452 */
09ac46c4 453bool blk_get_queue(struct request_queue *q)
1da177e4 454{
828b5f01
CH
455 if (unlikely(blk_queue_dying(q)))
456 return false;
457 kobject_get(&q->kobj);
458 return true;
1da177e4 459}
d86e0e83 460EXPORT_SYMBOL(blk_get_queue);
1da177e4 461
c17bb495
AM
462#ifdef CONFIG_FAIL_MAKE_REQUEST
463
464static DECLARE_FAULT_ATTR(fail_make_request);
465
466static int __init setup_fail_make_request(char *str)
467{
468 return setup_fault_attr(&fail_make_request, str);
469}
470__setup("fail_make_request=", setup_fail_make_request);
471
06c8c691 472bool should_fail_request(struct block_device *part, unsigned int bytes)
c17bb495 473{
8446fe92 474 return part->bd_make_it_fail && should_fail(&fail_make_request, bytes);
c17bb495
AM
475}
476
477static int __init fail_make_request_debugfs(void)
478{
dd48c085
AM
479 struct dentry *dir = fault_create_debugfs_attr("fail_make_request",
480 NULL, &fail_make_request);
481
21f9fcd8 482 return PTR_ERR_OR_ZERO(dir);
c17bb495
AM
483}
484
485late_initcall(fail_make_request_debugfs);
c17bb495
AM
486#endif /* CONFIG_FAIL_MAKE_REQUEST */
487
bdb7d420 488static inline void bio_check_ro(struct bio *bio)
721c7fc7 489{
2f9f6221 490 if (op_is_write(bio_op(bio)) && bdev_read_only(bio->bi_bdev)) {
8b2ded1c 491 if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
bdb7d420 492 return;
57e95e46
CH
493 pr_warn("Trying to write to read-only block-device %pg\n",
494 bio->bi_bdev);
a32e236e 495 /* Older lvm-tools actually trigger this */
721c7fc7 496 }
721c7fc7
ID
497}
498
30abb3a6
HM
499static noinline int should_fail_bio(struct bio *bio)
500{
309dca30 501 if (should_fail_request(bdev_whole(bio->bi_bdev), bio->bi_iter.bi_size))
30abb3a6
HM
502 return -EIO;
503 return 0;
504}
505ALLOW_ERROR_INJECTION(should_fail_bio, ERRNO);
506
52c5e62d
CH
507/*
508 * Check whether this bio extends beyond the end of the device or partition.
509 * This may well happen - the kernel calls bread() without checking the size of
510 * the device, e.g., when mounting a file system.
511 */
2f9f6221 512static inline int bio_check_eod(struct bio *bio)
52c5e62d 513{
2f9f6221 514 sector_t maxsector = bdev_nr_sectors(bio->bi_bdev);
52c5e62d
CH
515 unsigned int nr_sectors = bio_sectors(bio);
516
517 if (nr_sectors && maxsector &&
518 (nr_sectors > maxsector ||
519 bio->bi_iter.bi_sector > maxsector - nr_sectors)) {
ad740780 520 pr_info_ratelimited("%s: attempt to access beyond end of device\n"
069adbac
CH
521 "%pg: rw=%d, sector=%llu, nr_sectors = %u limit=%llu\n",
522 current->comm, bio->bi_bdev, bio->bi_opf,
523 bio->bi_iter.bi_sector, nr_sectors, maxsector);
52c5e62d
CH
524 return -EIO;
525 }
526 return 0;
527}
528
74d46992
CH
529/*
530 * Remap block n of partition p to block n+start(p) of the disk.
531 */
2f9f6221 532static int blk_partition_remap(struct bio *bio)
74d46992 533{
309dca30 534 struct block_device *p = bio->bi_bdev;
74d46992 535
52c5e62d 536 if (unlikely(should_fail_request(p, bio->bi_iter.bi_size)))
2f9f6221 537 return -EIO;
5eac3eb3 538 if (bio_sectors(bio)) {
8446fe92 539 bio->bi_iter.bi_sector += p->bd_start_sect;
1c02fca6 540 trace_block_bio_remap(bio, p->bd_dev,
29ff57c6 541 bio->bi_iter.bi_sector -
8446fe92 542 p->bd_start_sect);
52c5e62d 543 }
30c5d345 544 bio_set_flag(bio, BIO_REMAPPED);
2f9f6221 545 return 0;
74d46992
CH
546}
547
0512a75b
KB
548/*
549 * Check write append to a zoned block device.
550 */
551static inline blk_status_t blk_check_zone_append(struct request_queue *q,
552 struct bio *bio)
553{
0512a75b
KB
554 int nr_sectors = bio_sectors(bio);
555
556 /* Only applicable to zoned block devices */
edd1dbc8 557 if (!bdev_is_zoned(bio->bi_bdev))
0512a75b
KB
558 return BLK_STS_NOTSUPP;
559
560 /* The bio sector must point to the start of a sequential zone */
052e545c
CH
561 if (bio->bi_iter.bi_sector & (bdev_zone_sectors(bio->bi_bdev) - 1) ||
562 !bio_zone_is_seq(bio))
0512a75b
KB
563 return BLK_STS_IOERR;
564
565 /*
566 * Not allowed to cross zone boundaries. Otherwise, the BIO will be
567 * split and could result in non-contiguous sectors being written in
568 * different zones.
569 */
570 if (nr_sectors > q->limits.chunk_sectors)
571 return BLK_STS_IOERR;
572
573 /* Make sure the BIO is small enough and will not get split */
574 if (nr_sectors > q->limits.max_zone_append_sectors)
575 return BLK_STS_IOERR;
576
577 bio->bi_opf |= REQ_NOMERGE;
578
579 return BLK_STS_OK;
580}
581
900e0807
JA
582static void __submit_bio(struct bio *bio)
583{
584 struct gendisk *disk = bio->bi_bdev->bd_disk;
cc9c884d 585
7f36b7d0
ML
586 if (unlikely(!blk_crypto_bio_prep(&bio)))
587 return;
588
589 if (!disk->fops->submit_bio) {
3e08773c 590 blk_mq_submit_bio(bio);
7f36b7d0
ML
591 } else if (likely(bio_queue_enter(bio) == 0)) {
592 disk->fops->submit_bio(bio);
593 blk_queue_exit(disk->queue);
594 }
ac7c5675
CH
595}
596
566acf2d
CH
597/*
598 * The loop in this function may be a bit non-obvious, and so deserves some
599 * explanation:
600 *
601 * - Before entering the loop, bio->bi_next is NULL (as all callers ensure
602 * that), so we have a list with a single bio.
603 * - We pretend that we have just taken it off a longer list, so we assign
604 * bio_list to a pointer to the bio_list_on_stack, thus initialising the
605 * bio_list of new bios to be added. ->submit_bio() may indeed add some more
606 * bios through a recursive call to submit_bio_noacct. If it did, we find a
607 * non-NULL value in bio_list and re-enter the loop from the top.
608 * - In this case we really did just take the bio of the top of the list (no
609 * pretending) and so remove it from bio_list, and call into ->submit_bio()
610 * again.
611 *
612 * bio_list_on_stack[0] contains bios submitted by the current ->submit_bio.
613 * bio_list_on_stack[1] contains bios that were submitted before the current
69fe0f29 614 * ->submit_bio, but that haven't been processed yet.
566acf2d 615 */
3e08773c 616static void __submit_bio_noacct(struct bio *bio)
566acf2d
CH
617{
618 struct bio_list bio_list_on_stack[2];
566acf2d
CH
619
620 BUG_ON(bio->bi_next);
621
622 bio_list_init(&bio_list_on_stack[0]);
623 current->bio_list = bio_list_on_stack;
624
625 do {
eab4e027 626 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
566acf2d
CH
627 struct bio_list lower, same;
628
566acf2d
CH
629 /*
630 * Create a fresh bio_list for all subordinate requests.
631 */
632 bio_list_on_stack[1] = bio_list_on_stack[0];
633 bio_list_init(&bio_list_on_stack[0]);
634
3e08773c 635 __submit_bio(bio);
566acf2d
CH
636
637 /*
638 * Sort new bios into those for a lower level and those for the
639 * same level.
640 */
641 bio_list_init(&lower);
642 bio_list_init(&same);
643 while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL)
eab4e027 644 if (q == bdev_get_queue(bio->bi_bdev))
566acf2d
CH
645 bio_list_add(&same, bio);
646 else
647 bio_list_add(&lower, bio);
648
649 /*
650 * Now assemble so we handle the lowest level first.
651 */
652 bio_list_merge(&bio_list_on_stack[0], &lower);
653 bio_list_merge(&bio_list_on_stack[0], &same);
654 bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]);
655 } while ((bio = bio_list_pop(&bio_list_on_stack[0])));
656
657 current->bio_list = NULL;
566acf2d
CH
658}
659
3e08773c 660static void __submit_bio_noacct_mq(struct bio *bio)
ff93ea0c 661{
7c792f33 662 struct bio_list bio_list[2] = { };
ff93ea0c 663
7c792f33 664 current->bio_list = bio_list;
ff93ea0c
CH
665
666 do {
3e08773c 667 __submit_bio(bio);
7c792f33 668 } while ((bio = bio_list_pop(&bio_list[0])));
ff93ea0c
CH
669
670 current->bio_list = NULL;
ff93ea0c
CH
671}
672
3f98c753 673void submit_bio_noacct_nocheck(struct bio *bio)
d89d8796 674{
27a84d54 675 /*
566acf2d
CH
676 * We only want one ->submit_bio to be active at a time, else stack
677 * usage with stacked devices could be a problem. Use current->bio_list
678 * to collect a list of requests submited by a ->submit_bio method while
679 * it is active, and then process them after it returned.
27a84d54 680 */
3e08773c 681 if (current->bio_list)
f5fe1b51 682 bio_list_add(&current->bio_list[0], bio);
3e08773c
CH
683 else if (!bio->bi_bdev->bd_disk->fops->submit_bio)
684 __submit_bio_noacct_mq(bio);
685 else
686 __submit_bio_noacct(bio);
d89d8796 687}
3f98c753
ML
688
689/**
690 * submit_bio_noacct - re-submit a bio to the block device layer for I/O
691 * @bio: The bio describing the location in memory and on the device.
692 *
693 * This is a version of submit_bio() that shall only be used for I/O that is
694 * resubmitted to lower level drivers by stacking block drivers. All file
695 * systems and other upper level users of the block layer should use
696 * submit_bio() instead.
697 */
698void submit_bio_noacct(struct bio *bio)
1da177e4 699{
309dca30 700 struct block_device *bdev = bio->bi_bdev;
eab4e027 701 struct request_queue *q = bdev_get_queue(bdev);
4e4cbee9 702 blk_status_t status = BLK_STS_IOERR;
5a473e83 703 struct blk_plug *plug;
1da177e4
LT
704
705 might_sleep();
1da177e4 706
6deacb3b 707 plug = blk_mq_plug(bio);
5a473e83
JA
708 if (plug && plug->nowait)
709 bio->bi_opf |= REQ_NOWAIT;
710
03a07c92 711 /*
b0beb280 712 * For a REQ_NOWAIT based request, return -EOPNOTSUPP
021a2446 713 * if queue does not support NOWAIT.
03a07c92 714 */
568ec936 715 if ((bio->bi_opf & REQ_NOWAIT) && !bdev_nowait(bdev))
b0beb280 716 goto not_supported;
03a07c92 717
30abb3a6 718 if (should_fail_bio(bio))
5a7bbad2 719 goto end_io;
bdb7d420 720 bio_check_ro(bio);
3a905c37
CH
721 if (!bio_flagged(bio, BIO_REMAPPED)) {
722 if (unlikely(bio_check_eod(bio)))
723 goto end_io;
724 if (bdev->bd_partno && unlikely(blk_partition_remap(bio)))
725 goto end_io;
726 }
2056a782 727
5a7bbad2 728 /*
ed00aabd
CH
729 * Filter flush bio's early so that bio based drivers without flush
730 * support don't have to worry about them.
5a7bbad2 731 */
f3a8ab7d 732 if (op_is_flush(bio->bi_opf) &&
c888a8f9 733 !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
1eff9d32 734 bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA);
e439ab71 735 if (!bio_sectors(bio)) {
4e4cbee9 736 status = BLK_STS_OK;
51fd77bd
JA
737 goto end_io;
738 }
5a7bbad2 739 }
5ddfe969 740
d04c406f 741 if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
6ce913fe 742 bio_clear_polled(bio);
d04c406f 743
288dab8a
CH
744 switch (bio_op(bio)) {
745 case REQ_OP_DISCARD:
70200574 746 if (!bdev_max_discard_sectors(bdev))
288dab8a
CH
747 goto not_supported;
748 break;
749 case REQ_OP_SECURE_ERASE:
44abff2c 750 if (!bdev_max_secure_erase_sectors(bdev))
288dab8a
CH
751 goto not_supported;
752 break;
0512a75b
KB
753 case REQ_OP_ZONE_APPEND:
754 status = blk_check_zone_append(q, bio);
755 if (status != BLK_STS_OK)
756 goto end_io;
757 break;
2d253440 758 case REQ_OP_ZONE_RESET:
6c1b1da5
AJ
759 case REQ_OP_ZONE_OPEN:
760 case REQ_OP_ZONE_CLOSE:
761 case REQ_OP_ZONE_FINISH:
edd1dbc8 762 if (!bdev_is_zoned(bio->bi_bdev))
2d253440 763 goto not_supported;
288dab8a 764 break;
6e33dbf2 765 case REQ_OP_ZONE_RESET_ALL:
edd1dbc8 766 if (!bdev_is_zoned(bio->bi_bdev) || !blk_queue_zone_resetall(q))
6e33dbf2
CK
767 goto not_supported;
768 break;
a6f0788e 769 case REQ_OP_WRITE_ZEROES:
74d46992 770 if (!q->limits.max_write_zeroes_sectors)
a6f0788e
CK
771 goto not_supported;
772 break;
288dab8a
CH
773 default:
774 break;
5a7bbad2 775 }
01edede4 776
b781d8db 777 if (blk_throtl_bio(bio))
3f98c753 778 return;
db18a53e
CH
779
780 blk_cgroup_bio_start(bio);
781 blkcg_bio_issue_init(bio);
27a84d54 782
fbbaf700 783 if (!bio_flagged(bio, BIO_TRACE_COMPLETION)) {
e8a676d6 784 trace_block_bio_queue(bio);
fbbaf700
N
785 /* Now that enqueuing has been traced, we need to trace
786 * completion as well.
787 */
788 bio_set_flag(bio, BIO_TRACE_COMPLETION);
789 }
3f98c753 790 submit_bio_noacct_nocheck(bio);
d24c670e 791 return;
a7384677 792
288dab8a 793not_supported:
4e4cbee9 794 status = BLK_STS_NOTSUPP;
a7384677 795end_io:
4e4cbee9 796 bio->bi_status = status;
4246a0b6 797 bio_endio(bio);
d89d8796 798}
ed00aabd 799EXPORT_SYMBOL(submit_bio_noacct);
1da177e4
LT
800
801/**
710027a4 802 * submit_bio - submit a bio to the block device layer for I/O
1da177e4
LT
803 * @bio: The &struct bio which describes the I/O
804 *
3fdd4086
CH
805 * submit_bio() is used to submit I/O requests to block devices. It is passed a
806 * fully set up &struct bio that describes the I/O that needs to be done. The
309dca30 807 * bio will be send to the device described by the bi_bdev field.
1da177e4 808 *
3fdd4086
CH
809 * The success/failure status of the request, along with notification of
810 * completion, is delivered asynchronously through the ->bi_end_io() callback
e8848087 811 * in @bio. The bio must NOT be touched by the caller until ->bi_end_io() has
3fdd4086 812 * been called.
1da177e4 813 */
3e08773c 814void submit_bio(struct bio *bio)
1da177e4 815{
d3f77dfd 816 if (blkcg_punt_bio_submit(bio))
3e08773c 817 return;
d3f77dfd 818
a3e7689b
CH
819 if (bio_op(bio) == REQ_OP_READ) {
820 task_io_account_read(bio->bi_iter.bi_size);
821 count_vm_events(PGPGIN, bio_sectors(bio));
822 } else if (bio_op(bio) == REQ_OP_WRITE) {
823 count_vm_events(PGPGOUT, bio_sectors(bio));
1da177e4
LT
824 }
825
3e08773c 826 submit_bio_noacct(bio);
1da177e4 827}
1da177e4
LT
828EXPORT_SYMBOL(submit_bio);
829
3e08773c
CH
830/**
831 * bio_poll - poll for BIO completions
832 * @bio: bio to poll for
e30028ac 833 * @iob: batches of IO
3e08773c
CH
834 * @flags: BLK_POLL_* flags that control the behavior
835 *
836 * Poll for completions on queue associated with the bio. Returns number of
837 * completed entries found.
838 *
839 * Note: the caller must either be the context that submitted @bio, or
840 * be in a RCU critical section to prevent freeing of @bio.
841 */
5a72e899 842int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags)
3e08773c 843{
859897c3 844 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
3e08773c 845 blk_qc_t cookie = READ_ONCE(bio->bi_cookie);
69fe0f29 846 int ret = 0;
3e08773c
CH
847
848 if (cookie == BLK_QC_T_NONE ||
849 !test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
850 return 0;
851
110fdb44
PR
852 /*
853 * As the requests that require a zone lock are not plugged in the
854 * first place, directly accessing the plug instead of using
855 * blk_mq_plug() should not have any consequences during flushing for
856 * zoned devices.
857 */
aa8dccca 858 blk_flush_plug(current->plug, false);
3e08773c 859
ebd076bf 860 if (bio_queue_enter(bio))
3e08773c 861 return 0;
69fe0f29 862 if (queue_is_mq(q)) {
5a72e899 863 ret = blk_mq_poll(q, cookie, iob, flags);
69fe0f29
ML
864 } else {
865 struct gendisk *disk = q->disk;
866
867 if (disk && disk->fops->poll_bio)
868 ret = disk->fops->poll_bio(bio, iob, flags);
869 }
3e08773c
CH
870 blk_queue_exit(q);
871 return ret;
872}
873EXPORT_SYMBOL_GPL(bio_poll);
874
875/*
876 * Helper to implement file_operations.iopoll. Requires the bio to be stored
877 * in iocb->private, and cleared before freeing the bio.
878 */
5a72e899
JA
879int iocb_bio_iopoll(struct kiocb *kiocb, struct io_comp_batch *iob,
880 unsigned int flags)
3e08773c
CH
881{
882 struct bio *bio;
883 int ret = 0;
884
885 /*
886 * Note: the bio cache only uses SLAB_TYPESAFE_BY_RCU, so bio can
887 * point to a freshly allocated bio at this point. If that happens
888 * we have a few cases to consider:
889 *
890 * 1) the bio is beeing initialized and bi_bdev is NULL. We can just
891 * simply nothing in this case
892 * 2) the bio points to a not poll enabled device. bio_poll will catch
893 * this and return 0
894 * 3) the bio points to a poll capable device, including but not
895 * limited to the one that the original bio pointed to. In this
896 * case we will call into the actual poll method and poll for I/O,
897 * even if we don't need to, but it won't cause harm either.
898 *
899 * For cases 2) and 3) above the RCU grace period ensures that bi_bdev
900 * is still allocated. Because partitions hold a reference to the whole
901 * device bdev and thus disk, the disk is also still valid. Grabbing
902 * a reference to the queue in bio_poll() ensures the hctxs and requests
903 * are still valid as well.
904 */
905 rcu_read_lock();
906 bio = READ_ONCE(kiocb->private);
907 if (bio && bio->bi_bdev)
5a72e899 908 ret = bio_poll(bio, iob, flags);
3e08773c
CH
909 rcu_read_unlock();
910
911 return ret;
912}
913EXPORT_SYMBOL_GPL(iocb_bio_iopoll);
914
450b7879 915void update_io_ticks(struct block_device *part, unsigned long now, bool end)
9123bf6f
CH
916{
917 unsigned long stamp;
918again:
8446fe92 919 stamp = READ_ONCE(part->bd_stamp);
d80c228d 920 if (unlikely(time_after(now, stamp))) {
939f9dd0 921 if (likely(try_cmpxchg(&part->bd_stamp, &stamp, now)))
9123bf6f
CH
922 __part_stat_add(part, io_ticks, end ? now - stamp : 1);
923 }
8446fe92
CH
924 if (part->bd_partno) {
925 part = bdev_whole(part);
9123bf6f
CH
926 goto again;
927 }
928}
929
5f0614a5 930unsigned long bdev_start_io_acct(struct block_device *bdev,
77e7ffd7 931 unsigned int sectors, enum req_op op,
5f0614a5 932 unsigned long start_time)
956d510e 933{
956d510e 934 const int sgrp = op_stat_group(op);
956d510e
CH
935
936 part_stat_lock();
5f0614a5
ML
937 update_io_ticks(bdev, start_time, false);
938 part_stat_inc(bdev, ios[sgrp]);
939 part_stat_add(bdev, sectors[sgrp], sectors);
940 part_stat_local_inc(bdev, in_flight[op_is_write(op)]);
956d510e 941 part_stat_unlock();
320ae51f 942
e45c47d1
MS
943 return start_time;
944}
5f0614a5 945EXPORT_SYMBOL(bdev_start_io_acct);
e45c47d1
MS
946
947/**
948 * bio_start_io_acct_time - start I/O accounting for bio based drivers
949 * @bio: bio to start account for
950 * @start_time: start time that should be passed back to bio_end_io_acct().
951 */
952void bio_start_io_acct_time(struct bio *bio, unsigned long start_time)
953{
5f0614a5
ML
954 bdev_start_io_acct(bio->bi_bdev, bio_sectors(bio),
955 bio_op(bio), start_time);
956d510e 956}
e45c47d1 957EXPORT_SYMBOL_GPL(bio_start_io_acct_time);
7b26410b 958
99dfc43e
CH
959/**
960 * bio_start_io_acct - start I/O accounting for bio based drivers
961 * @bio: bio to start account for
962 *
963 * Returns the start time that should be passed back to bio_end_io_acct().
964 */
965unsigned long bio_start_io_acct(struct bio *bio)
7b26410b 966{
5f0614a5
ML
967 return bdev_start_io_acct(bio->bi_bdev, bio_sectors(bio),
968 bio_op(bio), jiffies);
7b26410b 969}
99dfc43e 970EXPORT_SYMBOL_GPL(bio_start_io_acct);
7b26410b 971
77e7ffd7 972void bdev_end_io_acct(struct block_device *bdev, enum req_op op,
5f0614a5 973 unsigned long start_time)
956d510e 974{
956d510e
CH
975 const int sgrp = op_stat_group(op);
976 unsigned long now = READ_ONCE(jiffies);
977 unsigned long duration = now - start_time;
5b18b5a7 978
956d510e 979 part_stat_lock();
5f0614a5
ML
980 update_io_ticks(bdev, now, true);
981 part_stat_add(bdev, nsecs[sgrp], jiffies_to_nsecs(duration));
982 part_stat_local_dec(bdev, in_flight[op_is_write(op)]);
320ae51f
JA
983 part_stat_unlock();
984}
5f0614a5 985EXPORT_SYMBOL(bdev_end_io_acct);
7b26410b 986
99dfc43e 987void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time,
5f0614a5 988 struct block_device *orig_bdev)
7b26410b 989{
5f0614a5 990 bdev_end_io_acct(orig_bdev, bio_op(bio), start_time);
7b26410b 991}
99dfc43e 992EXPORT_SYMBOL_GPL(bio_end_io_acct_remapped);
7b26410b 993
ef9e3fac
KU
994/**
995 * blk_lld_busy - Check if underlying low-level drivers of a device are busy
996 * @q : the queue of the device being checked
997 *
998 * Description:
999 * Check if underlying low-level drivers of a device are busy.
1000 * If the drivers want to export their busy state, they must set own
1001 * exporting function using blk_queue_lld_busy() first.
1002 *
1003 * Basically, this function is used only by request stacking drivers
1004 * to stop dispatching requests to underlying devices when underlying
1005 * devices are busy. This behavior helps more I/O merging on the queue
1006 * of the request stacking driver and prevents I/O throughput regression
1007 * on burst I/O load.
1008 *
1009 * Return:
1010 * 0 - Not busy (The request stacking driver should dispatch request)
1011 * 1 - Busy (The request stacking driver should stop dispatching request)
1012 */
1013int blk_lld_busy(struct request_queue *q)
1014{
344e9ffc 1015 if (queue_is_mq(q) && q->mq_ops->busy)
9ba20527 1016 return q->mq_ops->busy(q);
ef9e3fac
KU
1017
1018 return 0;
1019}
1020EXPORT_SYMBOL_GPL(blk_lld_busy);
1021
59c3d45e 1022int kblockd_schedule_work(struct work_struct *work)
1da177e4
LT
1023{
1024 return queue_work(kblockd_workqueue, work);
1025}
1da177e4
LT
1026EXPORT_SYMBOL(kblockd_schedule_work);
1027
818cd1cb
JA
1028int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork,
1029 unsigned long delay)
1030{
1031 return mod_delayed_work_on(cpu, kblockd_workqueue, dwork, delay);
1032}
1033EXPORT_SYMBOL(kblockd_mod_delayed_work_on);
1034
47c122e3
JA
1035void blk_start_plug_nr_ios(struct blk_plug *plug, unsigned short nr_ios)
1036{
1037 struct task_struct *tsk = current;
1038
1039 /*
1040 * If this is a nested plug, don't actually assign it.
1041 */
1042 if (tsk->plug)
1043 return;
1044
bc490f81 1045 plug->mq_list = NULL;
47c122e3
JA
1046 plug->cached_rq = NULL;
1047 plug->nr_ios = min_t(unsigned short, nr_ios, BLK_MAX_REQUEST_COUNT);
1048 plug->rq_count = 0;
1049 plug->multiple_queues = false;
dc5fc361 1050 plug->has_elevator = false;
47c122e3
JA
1051 plug->nowait = false;
1052 INIT_LIST_HEAD(&plug->cb_list);
1053
1054 /*
1055 * Store ordering should not be needed here, since a potential
1056 * preempt will imply a full memory barrier
1057 */
1058 tsk->plug = plug;
1059}
1060
75df7136
SJ
1061/**
1062 * blk_start_plug - initialize blk_plug and track it inside the task_struct
1063 * @plug: The &struct blk_plug that needs to be initialized
1064 *
1065 * Description:
40405851
JM
1066 * blk_start_plug() indicates to the block layer an intent by the caller
1067 * to submit multiple I/O requests in a batch. The block layer may use
1068 * this hint to defer submitting I/Os from the caller until blk_finish_plug()
1069 * is called. However, the block layer may choose to submit requests
1070 * before a call to blk_finish_plug() if the number of queued I/Os
1071 * exceeds %BLK_MAX_REQUEST_COUNT, or if the size of the I/O is larger than
1072 * %BLK_PLUG_FLUSH_SIZE. The queued I/Os may also be submitted early if
1073 * the task schedules (see below).
1074 *
75df7136
SJ
1075 * Tracking blk_plug inside the task_struct will help with auto-flushing the
1076 * pending I/O should the task end up blocking between blk_start_plug() and
1077 * blk_finish_plug(). This is important from a performance perspective, but
1078 * also ensures that we don't deadlock. For instance, if the task is blocking
1079 * for a memory allocation, memory reclaim could end up wanting to free a
1080 * page belonging to that request that is currently residing in our private
1081 * plug. By flushing the pending I/O when the process goes to sleep, we avoid
1082 * this kind of deadlock.
1083 */
73c10101
JA
1084void blk_start_plug(struct blk_plug *plug)
1085{
47c122e3 1086 blk_start_plug_nr_ios(plug, 1);
73c10101
JA
1087}
1088EXPORT_SYMBOL(blk_start_plug);
1089
74018dc3 1090static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
048c9374
N
1091{
1092 LIST_HEAD(callbacks);
1093
2a7d5559
SL
1094 while (!list_empty(&plug->cb_list)) {
1095 list_splice_init(&plug->cb_list, &callbacks);
048c9374 1096
2a7d5559
SL
1097 while (!list_empty(&callbacks)) {
1098 struct blk_plug_cb *cb = list_first_entry(&callbacks,
048c9374
N
1099 struct blk_plug_cb,
1100 list);
2a7d5559 1101 list_del(&cb->list);
74018dc3 1102 cb->callback(cb, from_schedule);
2a7d5559 1103 }
048c9374
N
1104 }
1105}
1106
9cbb1750
N
1107struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data,
1108 int size)
1109{
1110 struct blk_plug *plug = current->plug;
1111 struct blk_plug_cb *cb;
1112
1113 if (!plug)
1114 return NULL;
1115
1116 list_for_each_entry(cb, &plug->cb_list, list)
1117 if (cb->callback == unplug && cb->data == data)
1118 return cb;
1119
1120 /* Not currently on the callback list */
1121 BUG_ON(size < sizeof(*cb));
1122 cb = kzalloc(size, GFP_ATOMIC);
1123 if (cb) {
1124 cb->data = data;
1125 cb->callback = unplug;
1126 list_add(&cb->list, &plug->cb_list);
1127 }
1128 return cb;
1129}
1130EXPORT_SYMBOL(blk_check_plugged);
1131
aa8dccca 1132void __blk_flush_plug(struct blk_plug *plug, bool from_schedule)
73c10101 1133{
b600455d
PB
1134 if (!list_empty(&plug->cb_list))
1135 flush_plug_callbacks(plug, from_schedule);
bc490f81 1136 if (!rq_list_empty(plug->mq_list))
320ae51f 1137 blk_mq_flush_plug_list(plug, from_schedule);
c5fc7b93
JA
1138 /*
1139 * Unconditionally flush out cached requests, even if the unplug
1140 * event came from schedule. Since we know hold references to the
1141 * queue for cached requests, we don't want a blocked task holding
1142 * up a queue freeze/quiesce event.
1143 */
1144 if (unlikely(!rq_list_empty(plug->cached_rq)))
47c122e3 1145 blk_mq_free_plug_rqs(plug);
73c10101 1146}
73c10101 1147
40405851
JM
1148/**
1149 * blk_finish_plug - mark the end of a batch of submitted I/O
1150 * @plug: The &struct blk_plug passed to blk_start_plug()
1151 *
1152 * Description:
1153 * Indicate that a batch of I/O submissions is complete. This function
1154 * must be paired with an initial call to blk_start_plug(). The intent
1155 * is to allow the block layer to optimize I/O submission. See the
1156 * documentation for blk_start_plug() for more information.
1157 */
73c10101
JA
1158void blk_finish_plug(struct blk_plug *plug)
1159{
008f75a2 1160 if (plug == current->plug) {
aa8dccca 1161 __blk_flush_plug(plug, false);
008f75a2
CH
1162 current->plug = NULL;
1163 }
73c10101 1164}
88b996cd 1165EXPORT_SYMBOL(blk_finish_plug);
73c10101 1166
71ac860a
ML
1167void blk_io_schedule(void)
1168{
1169 /* Prevent hang_check timer from firing at us during very long I/O */
1170 unsigned long timeout = sysctl_hung_task_timeout_secs * HZ / 2;
1171
1172 if (timeout)
1173 io_schedule_timeout(timeout);
1174 else
1175 io_schedule();
1176}
1177EXPORT_SYMBOL_GPL(blk_io_schedule);
1178
1da177e4
LT
1179int __init blk_dev_init(void)
1180{
16458cf3 1181 BUILD_BUG_ON((__force u32)REQ_OP_LAST >= (1 << REQ_OP_BITS));
ef295ecf 1182 BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
c593642c 1183 sizeof_field(struct request, cmd_flags));
ef295ecf 1184 BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
c593642c 1185 sizeof_field(struct bio, bi_opf));
704b914f
ML
1186 BUILD_BUG_ON(ALIGN(offsetof(struct request_queue, srcu),
1187 __alignof__(struct request_queue)) !=
1188 sizeof(struct request_queue));
9eb55b03 1189
89b90be2
TH
1190 /* used for unplugging and affects IO latency/throughput - HIGHPRI */
1191 kblockd_workqueue = alloc_workqueue("kblockd",
28747fcd 1192 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
1da177e4
LT
1193 if (!kblockd_workqueue)
1194 panic("Failed to create kblockd\n");
1195
c2789bd4 1196 blk_requestq_cachep = kmem_cache_create("request_queue",
165125e1 1197 sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
1da177e4 1198
704b914f
ML
1199 blk_requestq_srcu_cachep = kmem_cache_create("request_queue_srcu",
1200 sizeof(struct request_queue) +
1201 sizeof(struct srcu_struct), 0, SLAB_PANIC, NULL);
1202
18fbda91 1203 blk_debugfs_root = debugfs_create_dir("block", NULL);
18fbda91 1204
d38ecf93 1205 return 0;
1da177e4 1206}