1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 1991, 1992 Linus Torvalds
4 * Copyright (C) 1994, Karl Keyte: Added support for disk statistics
5 * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
6 * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
7 * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au>
9 * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
13 * This handles all read/write requests to block devices
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/bio.h>
18 #include <linux/blkdev.h>
19 #include <linux/blk-pm.h>
20 #include <linux/blk-integrity.h>
21 #include <linux/highmem.h>
23 #include <linux/pagemap.h>
24 #include <linux/kernel_stat.h>
25 #include <linux/string.h>
26 #include <linux/init.h>
27 #include <linux/completion.h>
28 #include <linux/slab.h>
29 #include <linux/swap.h>
30 #include <linux/writeback.h>
31 #include <linux/task_io_accounting_ops.h>
32 #include <linux/fault-inject.h>
33 #include <linux/list_sort.h>
34 #include <linux/delay.h>
35 #include <linux/ratelimit.h>
36 #include <linux/pm_runtime.h>
37 #include <linux/t10-pi.h>
38 #include <linux/debugfs.h>
39 #include <linux/bpf.h>
40 #include <linux/part_stat.h>
41 #include <linux/sched/sysctl.h>
42 #include <linux/blk-crypto.h>
44 #define CREATE_TRACE_POINTS
45 #include <trace/events/block.h>
48 #include "blk-mq-sched.h"
50 #include "blk-cgroup.h"
51 #include "blk-throttle.h"
53 struct dentry
*blk_debugfs_root
;
55 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap
);
56 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap
);
57 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete
);
58 EXPORT_TRACEPOINT_SYMBOL_GPL(block_split
);
59 EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug
);
60 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_insert
);
62 static DEFINE_IDA(blk_queue_ida
);
65 * For queue allocation
67 static struct kmem_cache
*blk_requestq_cachep
;
70 * Controlling structure to kblockd
72 static struct workqueue_struct
*kblockd_workqueue
;
75 * blk_queue_flag_set - atomically set a queue flag
76 * @flag: flag to be set
79 void blk_queue_flag_set(unsigned int flag
, struct request_queue
*q
)
81 set_bit(flag
, &q
->queue_flags
);
83 EXPORT_SYMBOL(blk_queue_flag_set
);
86 * blk_queue_flag_clear - atomically clear a queue flag
87 * @flag: flag to be cleared
90 void blk_queue_flag_clear(unsigned int flag
, struct request_queue
*q
)
92 clear_bit(flag
, &q
->queue_flags
);
94 EXPORT_SYMBOL(blk_queue_flag_clear
);
97 * blk_queue_flag_test_and_set - atomically test and set a queue flag
98 * @flag: flag to be set
101 * Returns the previous value of @flag - 0 if the flag was not set and 1 if
102 * the flag was already set.
104 bool blk_queue_flag_test_and_set(unsigned int flag
, struct request_queue
*q
)
106 return test_and_set_bit(flag
, &q
->queue_flags
);
108 EXPORT_SYMBOL_GPL(blk_queue_flag_test_and_set
);
110 #define REQ_OP_NAME(name) [REQ_OP_##name] = #name
111 static const char *const blk_op_name
[] = {
115 REQ_OP_NAME(DISCARD
),
116 REQ_OP_NAME(SECURE_ERASE
),
117 REQ_OP_NAME(ZONE_RESET
),
118 REQ_OP_NAME(ZONE_RESET_ALL
),
119 REQ_OP_NAME(ZONE_OPEN
),
120 REQ_OP_NAME(ZONE_CLOSE
),
121 REQ_OP_NAME(ZONE_FINISH
),
122 REQ_OP_NAME(ZONE_APPEND
),
123 REQ_OP_NAME(WRITE_ZEROES
),
125 REQ_OP_NAME(DRV_OUT
),
130 * blk_op_str - Return string XXX in the REQ_OP_XXX.
133 * Description: Centralize block layer function to convert REQ_OP_XXX into
134 * string format. Useful in the debugging and tracing bio or request. For
135 * invalid REQ_OP_XXX it returns string "UNKNOWN".
137 inline const char *blk_op_str(enum req_op op
)
139 const char *op_str
= "UNKNOWN";
141 if (op
< ARRAY_SIZE(blk_op_name
) && blk_op_name
[op
])
142 op_str
= blk_op_name
[op
];
146 EXPORT_SYMBOL_GPL(blk_op_str
);
148 static const struct {
152 [BLK_STS_OK
] = { 0, "" },
153 [BLK_STS_NOTSUPP
] = { -EOPNOTSUPP
, "operation not supported" },
154 [BLK_STS_TIMEOUT
] = { -ETIMEDOUT
, "timeout" },
155 [BLK_STS_NOSPC
] = { -ENOSPC
, "critical space allocation" },
156 [BLK_STS_TRANSPORT
] = { -ENOLINK
, "recoverable transport" },
157 [BLK_STS_TARGET
] = { -EREMOTEIO
, "critical target" },
158 [BLK_STS_NEXUS
] = { -EBADE
, "critical nexus" },
159 [BLK_STS_MEDIUM
] = { -ENODATA
, "critical medium" },
160 [BLK_STS_PROTECTION
] = { -EILSEQ
, "protection" },
161 [BLK_STS_RESOURCE
] = { -ENOMEM
, "kernel resource" },
162 [BLK_STS_DEV_RESOURCE
] = { -EBUSY
, "device resource" },
163 [BLK_STS_AGAIN
] = { -EAGAIN
, "nonblocking retry" },
164 [BLK_STS_OFFLINE
] = { -ENODEV
, "device offline" },
166 /* device mapper special case, should not leak out: */
167 [BLK_STS_DM_REQUEUE
] = { -EREMCHG
, "dm internal retry" },
169 /* zone device specific errors */
170 [BLK_STS_ZONE_OPEN_RESOURCE
] = { -ETOOMANYREFS
, "open zones exceeded" },
171 [BLK_STS_ZONE_ACTIVE_RESOURCE
] = { -EOVERFLOW
, "active zones exceeded" },
173 /* everything else not covered above: */
174 [BLK_STS_IOERR
] = { -EIO
, "I/O" },
177 blk_status_t
errno_to_blk_status(int errno
)
181 for (i
= 0; i
< ARRAY_SIZE(blk_errors
); i
++) {
182 if (blk_errors
[i
].errno
== errno
)
183 return (__force blk_status_t
)i
;
186 return BLK_STS_IOERR
;
188 EXPORT_SYMBOL_GPL(errno_to_blk_status
);
190 int blk_status_to_errno(blk_status_t status
)
192 int idx
= (__force
int)status
;
194 if (WARN_ON_ONCE(idx
>= ARRAY_SIZE(blk_errors
)))
196 return blk_errors
[idx
].errno
;
198 EXPORT_SYMBOL_GPL(blk_status_to_errno
);
200 const char *blk_status_to_str(blk_status_t status
)
202 int idx
= (__force
int)status
;
204 if (WARN_ON_ONCE(idx
>= ARRAY_SIZE(blk_errors
)))
206 return blk_errors
[idx
].name
;
210 * blk_sync_queue - cancel any pending callbacks on a queue
214 * The block layer may perform asynchronous callback activity
215 * on a queue, such as calling the unplug function after a timeout.
216 * A block device may call blk_sync_queue to ensure that any
217 * such activity is cancelled, thus allowing it to release resources
218 * that the callbacks might use. The caller must already have made sure
219 * that its ->submit_bio will not re-add plugging prior to calling
222 * This function does not cancel any asynchronous activity arising
223 * out of elevator or throttling code. That would require elevator_exit()
224 * and blkcg_exit_queue() to be called with queue lock initialized.
227 void blk_sync_queue(struct request_queue
*q
)
229 del_timer_sync(&q
->timeout
);
230 cancel_work_sync(&q
->timeout_work
);
232 EXPORT_SYMBOL(blk_sync_queue
);
235 * blk_set_pm_only - increment pm_only counter
236 * @q: request queue pointer
238 void blk_set_pm_only(struct request_queue
*q
)
240 atomic_inc(&q
->pm_only
);
242 EXPORT_SYMBOL_GPL(blk_set_pm_only
);
244 void blk_clear_pm_only(struct request_queue
*q
)
248 pm_only
= atomic_dec_return(&q
->pm_only
);
249 WARN_ON_ONCE(pm_only
< 0);
251 wake_up_all(&q
->mq_freeze_wq
);
253 EXPORT_SYMBOL_GPL(blk_clear_pm_only
);
255 static void blk_free_queue_rcu(struct rcu_head
*rcu_head
)
257 struct request_queue
*q
= container_of(rcu_head
,
258 struct request_queue
, rcu_head
);
260 percpu_ref_exit(&q
->q_usage_counter
);
261 kmem_cache_free(blk_requestq_cachep
, q
);
264 static void blk_free_queue(struct request_queue
*q
)
267 blk_stat_remove_callback(q
, q
->poll_cb
);
268 blk_stat_free_callback(q
->poll_cb
);
270 blk_free_queue_stats(q
->stats
);
276 ida_free(&blk_queue_ida
, q
->id
);
277 call_rcu(&q
->rcu_head
, blk_free_queue_rcu
);
281 * blk_put_queue - decrement the request_queue refcount
282 * @q: the request_queue structure to decrement the refcount for
284 * Decrements the refcount of the request_queue and free it when the refcount
287 * Context: Can sleep.
289 void blk_put_queue(struct request_queue
*q
)
292 if (refcount_dec_and_test(&q
->refs
))
295 EXPORT_SYMBOL(blk_put_queue
);
297 void blk_queue_start_drain(struct request_queue
*q
)
300 * When queue DYING flag is set, we need to block new req
301 * entering queue, so we call blk_freeze_queue_start() to
302 * prevent I/O from crossing blk_queue_enter().
304 blk_freeze_queue_start(q
);
306 blk_mq_wake_waiters(q
);
307 /* Make blk_queue_enter() reexamine the DYING flag. */
308 wake_up_all(&q
->mq_freeze_wq
);
312 * blk_queue_enter() - try to increase q->q_usage_counter
313 * @q: request queue pointer
314 * @flags: BLK_MQ_REQ_NOWAIT and/or BLK_MQ_REQ_PM
316 int blk_queue_enter(struct request_queue
*q
, blk_mq_req_flags_t flags
)
318 const bool pm
= flags
& BLK_MQ_REQ_PM
;
320 while (!blk_try_enter_queue(q
, pm
)) {
321 if (flags
& BLK_MQ_REQ_NOWAIT
)
325 * read pair of barrier in blk_freeze_queue_start(), we need to
326 * order reading __PERCPU_REF_DEAD flag of .q_usage_counter and
327 * reading .mq_freeze_depth or queue dying flag, otherwise the
328 * following wait may never return if the two reads are
332 wait_event(q
->mq_freeze_wq
,
333 (!q
->mq_freeze_depth
&&
334 blk_pm_resume_queue(pm
, q
)) ||
336 if (blk_queue_dying(q
))
343 int __bio_queue_enter(struct request_queue
*q
, struct bio
*bio
)
345 while (!blk_try_enter_queue(q
, false)) {
346 struct gendisk
*disk
= bio
->bi_bdev
->bd_disk
;
348 if (bio
->bi_opf
& REQ_NOWAIT
) {
349 if (test_bit(GD_DEAD
, &disk
->state
))
351 bio_wouldblock_error(bio
);
356 * read pair of barrier in blk_freeze_queue_start(), we need to
357 * order reading __PERCPU_REF_DEAD flag of .q_usage_counter and
358 * reading .mq_freeze_depth or queue dying flag, otherwise the
359 * following wait may never return if the two reads are
363 wait_event(q
->mq_freeze_wq
,
364 (!q
->mq_freeze_depth
&&
365 blk_pm_resume_queue(false, q
)) ||
366 test_bit(GD_DEAD
, &disk
->state
));
367 if (test_bit(GD_DEAD
, &disk
->state
))
377 void blk_queue_exit(struct request_queue
*q
)
379 percpu_ref_put(&q
->q_usage_counter
);
382 static void blk_queue_usage_counter_release(struct percpu_ref
*ref
)
384 struct request_queue
*q
=
385 container_of(ref
, struct request_queue
, q_usage_counter
);
387 wake_up_all(&q
->mq_freeze_wq
);
390 static void blk_rq_timed_out_timer(struct timer_list
*t
)
392 struct request_queue
*q
= from_timer(q
, t
, timeout
);
394 kblockd_schedule_work(&q
->timeout_work
);
397 static void blk_timeout_work(struct work_struct
*work
)
401 struct request_queue
*blk_alloc_queue(int node_id
)
403 struct request_queue
*q
;
405 q
= kmem_cache_alloc_node(blk_requestq_cachep
, GFP_KERNEL
| __GFP_ZERO
,
410 q
->last_merge
= NULL
;
412 q
->id
= ida_alloc(&blk_queue_ida
, GFP_KERNEL
);
416 q
->stats
= blk_alloc_queue_stats();
422 atomic_set(&q
->nr_active_requests_shared_tags
, 0);
424 timer_setup(&q
->timeout
, blk_rq_timed_out_timer
, 0);
425 INIT_WORK(&q
->timeout_work
, blk_timeout_work
);
426 INIT_LIST_HEAD(&q
->icq_list
);
428 refcount_set(&q
->refs
, 1);
429 mutex_init(&q
->debugfs_mutex
);
430 mutex_init(&q
->sysfs_lock
);
431 mutex_init(&q
->sysfs_dir_lock
);
432 spin_lock_init(&q
->queue_lock
);
434 init_waitqueue_head(&q
->mq_freeze_wq
);
435 mutex_init(&q
->mq_freeze_lock
);
438 * Init percpu_ref in atomic mode so that it's faster to shutdown.
439 * See blk_register_queue() for details.
441 if (percpu_ref_init(&q
->q_usage_counter
,
442 blk_queue_usage_counter_release
,
443 PERCPU_REF_INIT_ATOMIC
, GFP_KERNEL
))
446 blk_set_default_limits(&q
->limits
);
447 q
->nr_requests
= BLKDEV_DEFAULT_RQ
;
452 blk_free_queue_stats(q
->stats
);
454 ida_free(&blk_queue_ida
, q
->id
);
456 kmem_cache_free(blk_requestq_cachep
, q
);
461 * blk_get_queue - increment the request_queue refcount
462 * @q: the request_queue structure to increment the refcount for
464 * Increment the refcount of the request_queue kobject.
466 * Context: Any context.
468 bool blk_get_queue(struct request_queue
*q
)
470 if (unlikely(blk_queue_dying(q
)))
472 refcount_inc(&q
->refs
);
475 EXPORT_SYMBOL(blk_get_queue
);
477 #ifdef CONFIG_FAIL_MAKE_REQUEST
479 static DECLARE_FAULT_ATTR(fail_make_request
);
481 static int __init
setup_fail_make_request(char *str
)
483 return setup_fault_attr(&fail_make_request
, str
);
485 __setup("fail_make_request=", setup_fail_make_request
);
487 bool should_fail_request(struct block_device
*part
, unsigned int bytes
)
489 return part
->bd_make_it_fail
&& should_fail(&fail_make_request
, bytes
);
492 static int __init
fail_make_request_debugfs(void)
494 struct dentry
*dir
= fault_create_debugfs_attr("fail_make_request",
495 NULL
, &fail_make_request
);
497 return PTR_ERR_OR_ZERO(dir
);
500 late_initcall(fail_make_request_debugfs
);
501 #endif /* CONFIG_FAIL_MAKE_REQUEST */
503 static inline void bio_check_ro(struct bio
*bio
)
505 if (op_is_write(bio_op(bio
)) && bdev_read_only(bio
->bi_bdev
)) {
506 if (op_is_flush(bio
->bi_opf
) && !bio_sectors(bio
))
508 pr_warn("Trying to write to read-only block-device %pg\n",
510 /* Older lvm-tools actually trigger this */
514 static noinline
int should_fail_bio(struct bio
*bio
)
516 if (should_fail_request(bdev_whole(bio
->bi_bdev
), bio
->bi_iter
.bi_size
))
520 ALLOW_ERROR_INJECTION(should_fail_bio
, ERRNO
);
523 * Check whether this bio extends beyond the end of the device or partition.
524 * This may well happen - the kernel calls bread() without checking the size of
525 * the device, e.g., when mounting a file system.
527 static inline int bio_check_eod(struct bio
*bio
)
529 sector_t maxsector
= bdev_nr_sectors(bio
->bi_bdev
);
530 unsigned int nr_sectors
= bio_sectors(bio
);
532 if (nr_sectors
&& maxsector
&&
533 (nr_sectors
> maxsector
||
534 bio
->bi_iter
.bi_sector
> maxsector
- nr_sectors
)) {
535 pr_info_ratelimited("%s: attempt to access beyond end of device\n"
536 "%pg: rw=%d, sector=%llu, nr_sectors = %u limit=%llu\n",
537 current
->comm
, bio
->bi_bdev
, bio
->bi_opf
,
538 bio
->bi_iter
.bi_sector
, nr_sectors
, maxsector
);
545 * Remap block n of partition p to block n+start(p) of the disk.
547 static int blk_partition_remap(struct bio
*bio
)
549 struct block_device
*p
= bio
->bi_bdev
;
551 if (unlikely(should_fail_request(p
, bio
->bi_iter
.bi_size
)))
553 if (bio_sectors(bio
)) {
554 bio
->bi_iter
.bi_sector
+= p
->bd_start_sect
;
555 trace_block_bio_remap(bio
, p
->bd_dev
,
556 bio
->bi_iter
.bi_sector
-
559 bio_set_flag(bio
, BIO_REMAPPED
);
564 * Check write append to a zoned block device.
566 static inline blk_status_t
blk_check_zone_append(struct request_queue
*q
,
569 int nr_sectors
= bio_sectors(bio
);
571 /* Only applicable to zoned block devices */
572 if (!bdev_is_zoned(bio
->bi_bdev
))
573 return BLK_STS_NOTSUPP
;
575 /* The bio sector must point to the start of a sequential zone */
576 if (bio
->bi_iter
.bi_sector
& (bdev_zone_sectors(bio
->bi_bdev
) - 1) ||
577 !bio_zone_is_seq(bio
))
578 return BLK_STS_IOERR
;
581 * Not allowed to cross zone boundaries. Otherwise, the BIO will be
582 * split and could result in non-contiguous sectors being written in
585 if (nr_sectors
> q
->limits
.chunk_sectors
)
586 return BLK_STS_IOERR
;
588 /* Make sure the BIO is small enough and will not get split */
589 if (nr_sectors
> q
->limits
.max_zone_append_sectors
)
590 return BLK_STS_IOERR
;
592 bio
->bi_opf
|= REQ_NOMERGE
;
597 static void __submit_bio(struct bio
*bio
)
599 struct gendisk
*disk
= bio
->bi_bdev
->bd_disk
;
601 if (unlikely(!blk_crypto_bio_prep(&bio
)))
604 if (!disk
->fops
->submit_bio
) {
605 blk_mq_submit_bio(bio
);
606 } else if (likely(bio_queue_enter(bio
) == 0)) {
607 disk
->fops
->submit_bio(bio
);
608 blk_queue_exit(disk
->queue
);
613 * The loop in this function may be a bit non-obvious, and so deserves some
616 * - Before entering the loop, bio->bi_next is NULL (as all callers ensure
617 * that), so we have a list with a single bio.
618 * - We pretend that we have just taken it off a longer list, so we assign
619 * bio_list to a pointer to the bio_list_on_stack, thus initialising the
620 * bio_list of new bios to be added. ->submit_bio() may indeed add some more
621 * bios through a recursive call to submit_bio_noacct. If it did, we find a
622 * non-NULL value in bio_list and re-enter the loop from the top.
623 * - In this case we really did just take the bio of the top of the list (no
624 * pretending) and so remove it from bio_list, and call into ->submit_bio()
627 * bio_list_on_stack[0] contains bios submitted by the current ->submit_bio.
628 * bio_list_on_stack[1] contains bios that were submitted before the current
629 * ->submit_bio, but that haven't been processed yet.
631 static void __submit_bio_noacct(struct bio
*bio
)
633 struct bio_list bio_list_on_stack
[2];
635 BUG_ON(bio
->bi_next
);
637 bio_list_init(&bio_list_on_stack
[0]);
638 current
->bio_list
= bio_list_on_stack
;
641 struct request_queue
*q
= bdev_get_queue(bio
->bi_bdev
);
642 struct bio_list lower
, same
;
645 * Create a fresh bio_list for all subordinate requests.
647 bio_list_on_stack
[1] = bio_list_on_stack
[0];
648 bio_list_init(&bio_list_on_stack
[0]);
653 * Sort new bios into those for a lower level and those for the
656 bio_list_init(&lower
);
657 bio_list_init(&same
);
658 while ((bio
= bio_list_pop(&bio_list_on_stack
[0])) != NULL
)
659 if (q
== bdev_get_queue(bio
->bi_bdev
))
660 bio_list_add(&same
, bio
);
662 bio_list_add(&lower
, bio
);
665 * Now assemble so we handle the lowest level first.
667 bio_list_merge(&bio_list_on_stack
[0], &lower
);
668 bio_list_merge(&bio_list_on_stack
[0], &same
);
669 bio_list_merge(&bio_list_on_stack
[0], &bio_list_on_stack
[1]);
670 } while ((bio
= bio_list_pop(&bio_list_on_stack
[0])));
672 current
->bio_list
= NULL
;
675 static void __submit_bio_noacct_mq(struct bio
*bio
)
677 struct bio_list bio_list
[2] = { };
679 current
->bio_list
= bio_list
;
683 } while ((bio
= bio_list_pop(&bio_list
[0])));
685 current
->bio_list
= NULL
;
688 void submit_bio_noacct_nocheck(struct bio
*bio
)
691 * We only want one ->submit_bio to be active at a time, else stack
692 * usage with stacked devices could be a problem. Use current->bio_list
693 * to collect a list of requests submited by a ->submit_bio method while
694 * it is active, and then process them after it returned.
696 if (current
->bio_list
)
697 bio_list_add(¤t
->bio_list
[0], bio
);
698 else if (!bio
->bi_bdev
->bd_disk
->fops
->submit_bio
)
699 __submit_bio_noacct_mq(bio
);
701 __submit_bio_noacct(bio
);
705 * submit_bio_noacct - re-submit a bio to the block device layer for I/O
706 * @bio: The bio describing the location in memory and on the device.
708 * This is a version of submit_bio() that shall only be used for I/O that is
709 * resubmitted to lower level drivers by stacking block drivers. All file
710 * systems and other upper level users of the block layer should use
711 * submit_bio() instead.
713 void submit_bio_noacct(struct bio
*bio
)
715 struct block_device
*bdev
= bio
->bi_bdev
;
716 struct request_queue
*q
= bdev_get_queue(bdev
);
717 blk_status_t status
= BLK_STS_IOERR
;
718 struct blk_plug
*plug
;
722 plug
= blk_mq_plug(bio
);
723 if (plug
&& plug
->nowait
)
724 bio
->bi_opf
|= REQ_NOWAIT
;
727 * For a REQ_NOWAIT based request, return -EOPNOTSUPP
728 * if queue does not support NOWAIT.
730 if ((bio
->bi_opf
& REQ_NOWAIT
) && !bdev_nowait(bdev
))
733 if (should_fail_bio(bio
))
736 if (!bio_flagged(bio
, BIO_REMAPPED
)) {
737 if (unlikely(bio_check_eod(bio
)))
739 if (bdev
->bd_partno
&& unlikely(blk_partition_remap(bio
)))
744 * Filter flush bio's early so that bio based drivers without flush
745 * support don't have to worry about them.
747 if (op_is_flush(bio
->bi_opf
) &&
748 !test_bit(QUEUE_FLAG_WC
, &q
->queue_flags
)) {
749 bio
->bi_opf
&= ~(REQ_PREFLUSH
| REQ_FUA
);
750 if (!bio_sectors(bio
)) {
756 if (!test_bit(QUEUE_FLAG_POLL
, &q
->queue_flags
))
757 bio_clear_polled(bio
);
759 switch (bio_op(bio
)) {
761 if (!bdev_max_discard_sectors(bdev
))
764 case REQ_OP_SECURE_ERASE
:
765 if (!bdev_max_secure_erase_sectors(bdev
))
768 case REQ_OP_ZONE_APPEND
:
769 status
= blk_check_zone_append(q
, bio
);
770 if (status
!= BLK_STS_OK
)
773 case REQ_OP_ZONE_RESET
:
774 case REQ_OP_ZONE_OPEN
:
775 case REQ_OP_ZONE_CLOSE
:
776 case REQ_OP_ZONE_FINISH
:
777 if (!bdev_is_zoned(bio
->bi_bdev
))
780 case REQ_OP_ZONE_RESET_ALL
:
781 if (!bdev_is_zoned(bio
->bi_bdev
) || !blk_queue_zone_resetall(q
))
784 case REQ_OP_WRITE_ZEROES
:
785 if (!q
->limits
.max_write_zeroes_sectors
)
792 if (blk_throtl_bio(bio
))
795 blk_cgroup_bio_start(bio
);
796 blkcg_bio_issue_init(bio
);
798 if (!bio_flagged(bio
, BIO_TRACE_COMPLETION
)) {
799 trace_block_bio_queue(bio
);
800 /* Now that enqueuing has been traced, we need to trace
801 * completion as well.
803 bio_set_flag(bio
, BIO_TRACE_COMPLETION
);
805 submit_bio_noacct_nocheck(bio
);
809 status
= BLK_STS_NOTSUPP
;
811 bio
->bi_status
= status
;
814 EXPORT_SYMBOL(submit_bio_noacct
);
817 * submit_bio - submit a bio to the block device layer for I/O
818 * @bio: The &struct bio which describes the I/O
820 * submit_bio() is used to submit I/O requests to block devices. It is passed a
821 * fully set up &struct bio that describes the I/O that needs to be done. The
822 * bio will be send to the device described by the bi_bdev field.
824 * The success/failure status of the request, along with notification of
825 * completion, is delivered asynchronously through the ->bi_end_io() callback
826 * in @bio. The bio must NOT be touched by the caller until ->bi_end_io() has
829 void submit_bio(struct bio
*bio
)
831 if (blkcg_punt_bio_submit(bio
))
834 if (bio_op(bio
) == REQ_OP_READ
) {
835 task_io_account_read(bio
->bi_iter
.bi_size
);
836 count_vm_events(PGPGIN
, bio_sectors(bio
));
837 } else if (bio_op(bio
) == REQ_OP_WRITE
) {
838 count_vm_events(PGPGOUT
, bio_sectors(bio
));
841 submit_bio_noacct(bio
);
843 EXPORT_SYMBOL(submit_bio
);
846 * bio_poll - poll for BIO completions
847 * @bio: bio to poll for
848 * @iob: batches of IO
849 * @flags: BLK_POLL_* flags that control the behavior
851 * Poll for completions on queue associated with the bio. Returns number of
852 * completed entries found.
854 * Note: the caller must either be the context that submitted @bio, or
855 * be in a RCU critical section to prevent freeing of @bio.
857 int bio_poll(struct bio
*bio
, struct io_comp_batch
*iob
, unsigned int flags
)
859 struct request_queue
*q
= bdev_get_queue(bio
->bi_bdev
);
860 blk_qc_t cookie
= READ_ONCE(bio
->bi_cookie
);
863 if (cookie
== BLK_QC_T_NONE
||
864 !test_bit(QUEUE_FLAG_POLL
, &q
->queue_flags
))
868 * As the requests that require a zone lock are not plugged in the
869 * first place, directly accessing the plug instead of using
870 * blk_mq_plug() should not have any consequences during flushing for
873 blk_flush_plug(current
->plug
, false);
875 if (bio_queue_enter(bio
))
877 if (queue_is_mq(q
)) {
878 ret
= blk_mq_poll(q
, cookie
, iob
, flags
);
880 struct gendisk
*disk
= q
->disk
;
882 if (disk
&& disk
->fops
->poll_bio
)
883 ret
= disk
->fops
->poll_bio(bio
, iob
, flags
);
888 EXPORT_SYMBOL_GPL(bio_poll
);
891 * Helper to implement file_operations.iopoll. Requires the bio to be stored
892 * in iocb->private, and cleared before freeing the bio.
894 int iocb_bio_iopoll(struct kiocb
*kiocb
, struct io_comp_batch
*iob
,
901 * Note: the bio cache only uses SLAB_TYPESAFE_BY_RCU, so bio can
902 * point to a freshly allocated bio at this point. If that happens
903 * we have a few cases to consider:
905 * 1) the bio is beeing initialized and bi_bdev is NULL. We can just
906 * simply nothing in this case
907 * 2) the bio points to a not poll enabled device. bio_poll will catch
909 * 3) the bio points to a poll capable device, including but not
910 * limited to the one that the original bio pointed to. In this
911 * case we will call into the actual poll method and poll for I/O,
912 * even if we don't need to, but it won't cause harm either.
914 * For cases 2) and 3) above the RCU grace period ensures that bi_bdev
915 * is still allocated. Because partitions hold a reference to the whole
916 * device bdev and thus disk, the disk is also still valid. Grabbing
917 * a reference to the queue in bio_poll() ensures the hctxs and requests
918 * are still valid as well.
921 bio
= READ_ONCE(kiocb
->private);
922 if (bio
&& bio
->bi_bdev
)
923 ret
= bio_poll(bio
, iob
, flags
);
928 EXPORT_SYMBOL_GPL(iocb_bio_iopoll
);
930 void update_io_ticks(struct block_device
*part
, unsigned long now
, bool end
)
934 stamp
= READ_ONCE(part
->bd_stamp
);
935 if (unlikely(time_after(now
, stamp
))) {
936 if (likely(try_cmpxchg(&part
->bd_stamp
, &stamp
, now
)))
937 __part_stat_add(part
, io_ticks
, end
? now
- stamp
: 1);
939 if (part
->bd_partno
) {
940 part
= bdev_whole(part
);
945 unsigned long bdev_start_io_acct(struct block_device
*bdev
,
946 unsigned int sectors
, enum req_op op
,
947 unsigned long start_time
)
949 const int sgrp
= op_stat_group(op
);
952 update_io_ticks(bdev
, start_time
, false);
953 part_stat_inc(bdev
, ios
[sgrp
]);
954 part_stat_add(bdev
, sectors
[sgrp
], sectors
);
955 part_stat_local_inc(bdev
, in_flight
[op_is_write(op
)]);
960 EXPORT_SYMBOL(bdev_start_io_acct
);
963 * bio_start_io_acct - start I/O accounting for bio based drivers
964 * @bio: bio to start account for
966 * Returns the start time that should be passed back to bio_end_io_acct().
968 unsigned long bio_start_io_acct(struct bio
*bio
)
970 return bdev_start_io_acct(bio
->bi_bdev
, bio_sectors(bio
),
971 bio_op(bio
), jiffies
);
973 EXPORT_SYMBOL_GPL(bio_start_io_acct
);
975 void bdev_end_io_acct(struct block_device
*bdev
, enum req_op op
,
976 unsigned long start_time
)
978 const int sgrp
= op_stat_group(op
);
979 unsigned long now
= READ_ONCE(jiffies
);
980 unsigned long duration
= now
- start_time
;
983 update_io_ticks(bdev
, now
, true);
984 part_stat_add(bdev
, nsecs
[sgrp
], jiffies_to_nsecs(duration
));
985 part_stat_local_dec(bdev
, in_flight
[op_is_write(op
)]);
988 EXPORT_SYMBOL(bdev_end_io_acct
);
990 void bio_end_io_acct_remapped(struct bio
*bio
, unsigned long start_time
,
991 struct block_device
*orig_bdev
)
993 bdev_end_io_acct(orig_bdev
, bio_op(bio
), start_time
);
995 EXPORT_SYMBOL_GPL(bio_end_io_acct_remapped
);
998 * blk_lld_busy - Check if underlying low-level drivers of a device are busy
999 * @q : the queue of the device being checked
1002 * Check if underlying low-level drivers of a device are busy.
1003 * If the drivers want to export their busy state, they must set own
1004 * exporting function using blk_queue_lld_busy() first.
1006 * Basically, this function is used only by request stacking drivers
1007 * to stop dispatching requests to underlying devices when underlying
1008 * devices are busy. This behavior helps more I/O merging on the queue
1009 * of the request stacking driver and prevents I/O throughput regression
1010 * on burst I/O load.
1013 * 0 - Not busy (The request stacking driver should dispatch request)
1014 * 1 - Busy (The request stacking driver should stop dispatching request)
1016 int blk_lld_busy(struct request_queue
*q
)
1018 if (queue_is_mq(q
) && q
->mq_ops
->busy
)
1019 return q
->mq_ops
->busy(q
);
1023 EXPORT_SYMBOL_GPL(blk_lld_busy
);
1025 int kblockd_schedule_work(struct work_struct
*work
)
1027 return queue_work(kblockd_workqueue
, work
);
1029 EXPORT_SYMBOL(kblockd_schedule_work
);
1031 int kblockd_mod_delayed_work_on(int cpu
, struct delayed_work
*dwork
,
1032 unsigned long delay
)
1034 return mod_delayed_work_on(cpu
, kblockd_workqueue
, dwork
, delay
);
1036 EXPORT_SYMBOL(kblockd_mod_delayed_work_on
);
1038 void blk_start_plug_nr_ios(struct blk_plug
*plug
, unsigned short nr_ios
)
1040 struct task_struct
*tsk
= current
;
1043 * If this is a nested plug, don't actually assign it.
1048 plug
->mq_list
= NULL
;
1049 plug
->cached_rq
= NULL
;
1050 plug
->nr_ios
= min_t(unsigned short, nr_ios
, BLK_MAX_REQUEST_COUNT
);
1052 plug
->multiple_queues
= false;
1053 plug
->has_elevator
= false;
1054 plug
->nowait
= false;
1055 INIT_LIST_HEAD(&plug
->cb_list
);
1058 * Store ordering should not be needed here, since a potential
1059 * preempt will imply a full memory barrier
1065 * blk_start_plug - initialize blk_plug and track it inside the task_struct
1066 * @plug: The &struct blk_plug that needs to be initialized
1069 * blk_start_plug() indicates to the block layer an intent by the caller
1070 * to submit multiple I/O requests in a batch. The block layer may use
1071 * this hint to defer submitting I/Os from the caller until blk_finish_plug()
1072 * is called. However, the block layer may choose to submit requests
1073 * before a call to blk_finish_plug() if the number of queued I/Os
1074 * exceeds %BLK_MAX_REQUEST_COUNT, or if the size of the I/O is larger than
1075 * %BLK_PLUG_FLUSH_SIZE. The queued I/Os may also be submitted early if
1076 * the task schedules (see below).
1078 * Tracking blk_plug inside the task_struct will help with auto-flushing the
1079 * pending I/O should the task end up blocking between blk_start_plug() and
1080 * blk_finish_plug(). This is important from a performance perspective, but
1081 * also ensures that we don't deadlock. For instance, if the task is blocking
1082 * for a memory allocation, memory reclaim could end up wanting to free a
1083 * page belonging to that request that is currently residing in our private
1084 * plug. By flushing the pending I/O when the process goes to sleep, we avoid
1085 * this kind of deadlock.
1087 void blk_start_plug(struct blk_plug
*plug
)
1089 blk_start_plug_nr_ios(plug
, 1);
1091 EXPORT_SYMBOL(blk_start_plug
);
1093 static void flush_plug_callbacks(struct blk_plug
*plug
, bool from_schedule
)
1095 LIST_HEAD(callbacks
);
1097 while (!list_empty(&plug
->cb_list
)) {
1098 list_splice_init(&plug
->cb_list
, &callbacks
);
1100 while (!list_empty(&callbacks
)) {
1101 struct blk_plug_cb
*cb
= list_first_entry(&callbacks
,
1104 list_del(&cb
->list
);
1105 cb
->callback(cb
, from_schedule
);
1110 struct blk_plug_cb
*blk_check_plugged(blk_plug_cb_fn unplug
, void *data
,
1113 struct blk_plug
*plug
= current
->plug
;
1114 struct blk_plug_cb
*cb
;
1119 list_for_each_entry(cb
, &plug
->cb_list
, list
)
1120 if (cb
->callback
== unplug
&& cb
->data
== data
)
1123 /* Not currently on the callback list */
1124 BUG_ON(size
< sizeof(*cb
));
1125 cb
= kzalloc(size
, GFP_ATOMIC
);
1128 cb
->callback
= unplug
;
1129 list_add(&cb
->list
, &plug
->cb_list
);
1133 EXPORT_SYMBOL(blk_check_plugged
);
1135 void __blk_flush_plug(struct blk_plug
*plug
, bool from_schedule
)
1137 if (!list_empty(&plug
->cb_list
))
1138 flush_plug_callbacks(plug
, from_schedule
);
1139 if (!rq_list_empty(plug
->mq_list
))
1140 blk_mq_flush_plug_list(plug
, from_schedule
);
1142 * Unconditionally flush out cached requests, even if the unplug
1143 * event came from schedule. Since we know hold references to the
1144 * queue for cached requests, we don't want a blocked task holding
1145 * up a queue freeze/quiesce event.
1147 if (unlikely(!rq_list_empty(plug
->cached_rq
)))
1148 blk_mq_free_plug_rqs(plug
);
1152 * blk_finish_plug - mark the end of a batch of submitted I/O
1153 * @plug: The &struct blk_plug passed to blk_start_plug()
1156 * Indicate that a batch of I/O submissions is complete. This function
1157 * must be paired with an initial call to blk_start_plug(). The intent
1158 * is to allow the block layer to optimize I/O submission. See the
1159 * documentation for blk_start_plug() for more information.
1161 void blk_finish_plug(struct blk_plug
*plug
)
1163 if (plug
== current
->plug
) {
1164 __blk_flush_plug(plug
, false);
1165 current
->plug
= NULL
;
1168 EXPORT_SYMBOL(blk_finish_plug
);
1170 void blk_io_schedule(void)
1172 /* Prevent hang_check timer from firing at us during very long I/O */
1173 unsigned long timeout
= sysctl_hung_task_timeout_secs
* HZ
/ 2;
1176 io_schedule_timeout(timeout
);
1180 EXPORT_SYMBOL_GPL(blk_io_schedule
);
1182 int __init
blk_dev_init(void)
1184 BUILD_BUG_ON((__force u32
)REQ_OP_LAST
>= (1 << REQ_OP_BITS
));
1185 BUILD_BUG_ON(REQ_OP_BITS
+ REQ_FLAG_BITS
> 8 *
1186 sizeof_field(struct request
, cmd_flags
));
1187 BUILD_BUG_ON(REQ_OP_BITS
+ REQ_FLAG_BITS
> 8 *
1188 sizeof_field(struct bio
, bi_opf
));
1190 /* used for unplugging and affects IO latency/throughput - HIGHPRI */
1191 kblockd_workqueue
= alloc_workqueue("kblockd",
1192 WQ_MEM_RECLAIM
| WQ_HIGHPRI
, 0);
1193 if (!kblockd_workqueue
)
1194 panic("Failed to create kblockd\n");
1196 blk_requestq_cachep
= kmem_cache_create("request_queue",
1197 sizeof(struct request_queue
), 0, SLAB_PANIC
, NULL
);
1199 blk_debugfs_root
= debugfs_create_dir("block", NULL
);