1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 1991, 1992 Linus Torvalds
4 * Copyright (C) 1994, Karl Keyte: Added support for disk statistics
5 * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
6 * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
7 * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au>
9 * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
13 * This handles all read/write requests to block devices
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/bio.h>
18 #include <linux/blkdev.h>
19 #include <linux/blk-pm.h>
20 #include <linux/blk-integrity.h>
21 #include <linux/highmem.h>
23 #include <linux/pagemap.h>
24 #include <linux/kernel_stat.h>
25 #include <linux/string.h>
26 #include <linux/init.h>
27 #include <linux/completion.h>
28 #include <linux/slab.h>
29 #include <linux/swap.h>
30 #include <linux/writeback.h>
31 #include <linux/task_io_accounting_ops.h>
32 #include <linux/fault-inject.h>
33 #include <linux/list_sort.h>
34 #include <linux/delay.h>
35 #include <linux/ratelimit.h>
36 #include <linux/pm_runtime.h>
37 #include <linux/t10-pi.h>
38 #include <linux/debugfs.h>
39 #include <linux/bpf.h>
40 #include <linux/part_stat.h>
41 #include <linux/sched/sysctl.h>
42 #include <linux/blk-crypto.h>
44 #define CREATE_TRACE_POINTS
45 #include <trace/events/block.h>
48 #include "blk-mq-sched.h"
50 #include "blk-cgroup.h"
51 #include "blk-throttle.h"
53 struct dentry
*blk_debugfs_root
;
55 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap
);
56 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap
);
57 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete
);
58 EXPORT_TRACEPOINT_SYMBOL_GPL(block_split
);
59 EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug
);
60 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_insert
);
62 static DEFINE_IDA(blk_queue_ida
);
65 * For queue allocation
67 static struct kmem_cache
*blk_requestq_cachep
;
70 * Controlling structure to kblockd
72 static struct workqueue_struct
*kblockd_workqueue
;
75 * blk_queue_flag_set - atomically set a queue flag
76 * @flag: flag to be set
79 void blk_queue_flag_set(unsigned int flag
, struct request_queue
*q
)
81 set_bit(flag
, &q
->queue_flags
);
83 EXPORT_SYMBOL(blk_queue_flag_set
);
86 * blk_queue_flag_clear - atomically clear a queue flag
87 * @flag: flag to be cleared
90 void blk_queue_flag_clear(unsigned int flag
, struct request_queue
*q
)
92 clear_bit(flag
, &q
->queue_flags
);
94 EXPORT_SYMBOL(blk_queue_flag_clear
);
97 * blk_queue_flag_test_and_set - atomically test and set a queue flag
98 * @flag: flag to be set
101 * Returns the previous value of @flag - 0 if the flag was not set and 1 if
102 * the flag was already set.
104 bool blk_queue_flag_test_and_set(unsigned int flag
, struct request_queue
*q
)
106 return test_and_set_bit(flag
, &q
->queue_flags
);
108 EXPORT_SYMBOL_GPL(blk_queue_flag_test_and_set
);
110 #define REQ_OP_NAME(name) [REQ_OP_##name] = #name
111 static const char *const blk_op_name
[] = {
115 REQ_OP_NAME(DISCARD
),
116 REQ_OP_NAME(SECURE_ERASE
),
117 REQ_OP_NAME(ZONE_RESET
),
118 REQ_OP_NAME(ZONE_RESET_ALL
),
119 REQ_OP_NAME(ZONE_OPEN
),
120 REQ_OP_NAME(ZONE_CLOSE
),
121 REQ_OP_NAME(ZONE_FINISH
),
122 REQ_OP_NAME(ZONE_APPEND
),
123 REQ_OP_NAME(WRITE_ZEROES
),
125 REQ_OP_NAME(DRV_OUT
),
130 * blk_op_str - Return string XXX in the REQ_OP_XXX.
133 * Description: Centralize block layer function to convert REQ_OP_XXX into
134 * string format. Useful in the debugging and tracing bio or request. For
135 * invalid REQ_OP_XXX it returns string "UNKNOWN".
137 inline const char *blk_op_str(enum req_op op
)
139 const char *op_str
= "UNKNOWN";
141 if (op
< ARRAY_SIZE(blk_op_name
) && blk_op_name
[op
])
142 op_str
= blk_op_name
[op
];
146 EXPORT_SYMBOL_GPL(blk_op_str
);
148 static const struct {
152 [BLK_STS_OK
] = { 0, "" },
153 [BLK_STS_NOTSUPP
] = { -EOPNOTSUPP
, "operation not supported" },
154 [BLK_STS_TIMEOUT
] = { -ETIMEDOUT
, "timeout" },
155 [BLK_STS_NOSPC
] = { -ENOSPC
, "critical space allocation" },
156 [BLK_STS_TRANSPORT
] = { -ENOLINK
, "recoverable transport" },
157 [BLK_STS_TARGET
] = { -EREMOTEIO
, "critical target" },
158 [BLK_STS_RESV_CONFLICT
] = { -EBADE
, "reservation conflict" },
159 [BLK_STS_MEDIUM
] = { -ENODATA
, "critical medium" },
160 [BLK_STS_PROTECTION
] = { -EILSEQ
, "protection" },
161 [BLK_STS_RESOURCE
] = { -ENOMEM
, "kernel resource" },
162 [BLK_STS_DEV_RESOURCE
] = { -EBUSY
, "device resource" },
163 [BLK_STS_AGAIN
] = { -EAGAIN
, "nonblocking retry" },
164 [BLK_STS_OFFLINE
] = { -ENODEV
, "device offline" },
166 /* device mapper special case, should not leak out: */
167 [BLK_STS_DM_REQUEUE
] = { -EREMCHG
, "dm internal retry" },
169 /* zone device specific errors */
170 [BLK_STS_ZONE_OPEN_RESOURCE
] = { -ETOOMANYREFS
, "open zones exceeded" },
171 [BLK_STS_ZONE_ACTIVE_RESOURCE
] = { -EOVERFLOW
, "active zones exceeded" },
173 /* Command duration limit device-side timeout */
174 [BLK_STS_DURATION_LIMIT
] = { -ETIME
, "duration limit exceeded" },
176 /* everything else not covered above: */
177 [BLK_STS_IOERR
] = { -EIO
, "I/O" },
180 blk_status_t
errno_to_blk_status(int errno
)
184 for (i
= 0; i
< ARRAY_SIZE(blk_errors
); i
++) {
185 if (blk_errors
[i
].errno
== errno
)
186 return (__force blk_status_t
)i
;
189 return BLK_STS_IOERR
;
191 EXPORT_SYMBOL_GPL(errno_to_blk_status
);
193 int blk_status_to_errno(blk_status_t status
)
195 int idx
= (__force
int)status
;
197 if (WARN_ON_ONCE(idx
>= ARRAY_SIZE(blk_errors
)))
199 return blk_errors
[idx
].errno
;
201 EXPORT_SYMBOL_GPL(blk_status_to_errno
);
203 const char *blk_status_to_str(blk_status_t status
)
205 int idx
= (__force
int)status
;
207 if (WARN_ON_ONCE(idx
>= ARRAY_SIZE(blk_errors
)))
209 return blk_errors
[idx
].name
;
211 EXPORT_SYMBOL_GPL(blk_status_to_str
);
214 * blk_sync_queue - cancel any pending callbacks on a queue
218 * The block layer may perform asynchronous callback activity
219 * on a queue, such as calling the unplug function after a timeout.
220 * A block device may call blk_sync_queue to ensure that any
221 * such activity is cancelled, thus allowing it to release resources
222 * that the callbacks might use. The caller must already have made sure
223 * that its ->submit_bio will not re-add plugging prior to calling
226 * This function does not cancel any asynchronous activity arising
227 * out of elevator or throttling code. That would require elevator_exit()
228 * and blkcg_exit_queue() to be called with queue lock initialized.
231 void blk_sync_queue(struct request_queue
*q
)
233 del_timer_sync(&q
->timeout
);
234 cancel_work_sync(&q
->timeout_work
);
236 EXPORT_SYMBOL(blk_sync_queue
);
239 * blk_set_pm_only - increment pm_only counter
240 * @q: request queue pointer
242 void blk_set_pm_only(struct request_queue
*q
)
244 atomic_inc(&q
->pm_only
);
246 EXPORT_SYMBOL_GPL(blk_set_pm_only
);
248 void blk_clear_pm_only(struct request_queue
*q
)
252 pm_only
= atomic_dec_return(&q
->pm_only
);
253 WARN_ON_ONCE(pm_only
< 0);
255 wake_up_all(&q
->mq_freeze_wq
);
257 EXPORT_SYMBOL_GPL(blk_clear_pm_only
);
259 static void blk_free_queue_rcu(struct rcu_head
*rcu_head
)
261 struct request_queue
*q
= container_of(rcu_head
,
262 struct request_queue
, rcu_head
);
264 percpu_ref_exit(&q
->q_usage_counter
);
265 kmem_cache_free(blk_requestq_cachep
, q
);
268 static void blk_free_queue(struct request_queue
*q
)
270 blk_free_queue_stats(q
->stats
);
274 ida_free(&blk_queue_ida
, q
->id
);
275 call_rcu(&q
->rcu_head
, blk_free_queue_rcu
);
279 * blk_put_queue - decrement the request_queue refcount
280 * @q: the request_queue structure to decrement the refcount for
282 * Decrements the refcount of the request_queue and free it when the refcount
285 void blk_put_queue(struct request_queue
*q
)
287 if (refcount_dec_and_test(&q
->refs
))
290 EXPORT_SYMBOL(blk_put_queue
);
292 void blk_queue_start_drain(struct request_queue
*q
)
295 * When queue DYING flag is set, we need to block new req
296 * entering queue, so we call blk_freeze_queue_start() to
297 * prevent I/O from crossing blk_queue_enter().
299 blk_freeze_queue_start(q
);
301 blk_mq_wake_waiters(q
);
302 /* Make blk_queue_enter() reexamine the DYING flag. */
303 wake_up_all(&q
->mq_freeze_wq
);
307 * blk_queue_enter() - try to increase q->q_usage_counter
308 * @q: request queue pointer
309 * @flags: BLK_MQ_REQ_NOWAIT and/or BLK_MQ_REQ_PM
311 int blk_queue_enter(struct request_queue
*q
, blk_mq_req_flags_t flags
)
313 const bool pm
= flags
& BLK_MQ_REQ_PM
;
315 while (!blk_try_enter_queue(q
, pm
)) {
316 if (flags
& BLK_MQ_REQ_NOWAIT
)
320 * read pair of barrier in blk_freeze_queue_start(), we need to
321 * order reading __PERCPU_REF_DEAD flag of .q_usage_counter and
322 * reading .mq_freeze_depth or queue dying flag, otherwise the
323 * following wait may never return if the two reads are
327 wait_event(q
->mq_freeze_wq
,
328 (!q
->mq_freeze_depth
&&
329 blk_pm_resume_queue(pm
, q
)) ||
331 if (blk_queue_dying(q
))
338 int __bio_queue_enter(struct request_queue
*q
, struct bio
*bio
)
340 while (!blk_try_enter_queue(q
, false)) {
341 struct gendisk
*disk
= bio
->bi_bdev
->bd_disk
;
343 if (bio
->bi_opf
& REQ_NOWAIT
) {
344 if (test_bit(GD_DEAD
, &disk
->state
))
346 bio_wouldblock_error(bio
);
351 * read pair of barrier in blk_freeze_queue_start(), we need to
352 * order reading __PERCPU_REF_DEAD flag of .q_usage_counter and
353 * reading .mq_freeze_depth or queue dying flag, otherwise the
354 * following wait may never return if the two reads are
358 wait_event(q
->mq_freeze_wq
,
359 (!q
->mq_freeze_depth
&&
360 blk_pm_resume_queue(false, q
)) ||
361 test_bit(GD_DEAD
, &disk
->state
));
362 if (test_bit(GD_DEAD
, &disk
->state
))
372 void blk_queue_exit(struct request_queue
*q
)
374 percpu_ref_put(&q
->q_usage_counter
);
377 static void blk_queue_usage_counter_release(struct percpu_ref
*ref
)
379 struct request_queue
*q
=
380 container_of(ref
, struct request_queue
, q_usage_counter
);
382 wake_up_all(&q
->mq_freeze_wq
);
385 static void blk_rq_timed_out_timer(struct timer_list
*t
)
387 struct request_queue
*q
= from_timer(q
, t
, timeout
);
389 kblockd_schedule_work(&q
->timeout_work
);
392 static void blk_timeout_work(struct work_struct
*work
)
396 struct request_queue
*blk_alloc_queue(int node_id
)
398 struct request_queue
*q
;
400 q
= kmem_cache_alloc_node(blk_requestq_cachep
, GFP_KERNEL
| __GFP_ZERO
,
405 q
->last_merge
= NULL
;
407 q
->id
= ida_alloc(&blk_queue_ida
, GFP_KERNEL
);
411 q
->stats
= blk_alloc_queue_stats();
417 atomic_set(&q
->nr_active_requests_shared_tags
, 0);
419 timer_setup(&q
->timeout
, blk_rq_timed_out_timer
, 0);
420 INIT_WORK(&q
->timeout_work
, blk_timeout_work
);
421 INIT_LIST_HEAD(&q
->icq_list
);
423 refcount_set(&q
->refs
, 1);
424 mutex_init(&q
->debugfs_mutex
);
425 mutex_init(&q
->sysfs_lock
);
426 mutex_init(&q
->sysfs_dir_lock
);
427 mutex_init(&q
->rq_qos_mutex
);
428 spin_lock_init(&q
->queue_lock
);
430 init_waitqueue_head(&q
->mq_freeze_wq
);
431 mutex_init(&q
->mq_freeze_lock
);
434 * Init percpu_ref in atomic mode so that it's faster to shutdown.
435 * See blk_register_queue() for details.
437 if (percpu_ref_init(&q
->q_usage_counter
,
438 blk_queue_usage_counter_release
,
439 PERCPU_REF_INIT_ATOMIC
, GFP_KERNEL
))
442 blk_set_default_limits(&q
->limits
);
443 q
->nr_requests
= BLKDEV_DEFAULT_RQ
;
448 blk_free_queue_stats(q
->stats
);
450 ida_free(&blk_queue_ida
, q
->id
);
452 kmem_cache_free(blk_requestq_cachep
, q
);
457 * blk_get_queue - increment the request_queue refcount
458 * @q: the request_queue structure to increment the refcount for
460 * Increment the refcount of the request_queue kobject.
462 * Context: Any context.
464 bool blk_get_queue(struct request_queue
*q
)
466 if (unlikely(blk_queue_dying(q
)))
468 refcount_inc(&q
->refs
);
471 EXPORT_SYMBOL(blk_get_queue
);
473 #ifdef CONFIG_FAIL_MAKE_REQUEST
475 static DECLARE_FAULT_ATTR(fail_make_request
);
477 static int __init
setup_fail_make_request(char *str
)
479 return setup_fault_attr(&fail_make_request
, str
);
481 __setup("fail_make_request=", setup_fail_make_request
);
483 bool should_fail_request(struct block_device
*part
, unsigned int bytes
)
485 return part
->bd_make_it_fail
&& should_fail(&fail_make_request
, bytes
);
488 static int __init
fail_make_request_debugfs(void)
490 struct dentry
*dir
= fault_create_debugfs_attr("fail_make_request",
491 NULL
, &fail_make_request
);
493 return PTR_ERR_OR_ZERO(dir
);
496 late_initcall(fail_make_request_debugfs
);
497 #endif /* CONFIG_FAIL_MAKE_REQUEST */
499 static inline void bio_check_ro(struct bio
*bio
)
501 if (op_is_write(bio_op(bio
)) && bdev_read_only(bio
->bi_bdev
)) {
502 if (op_is_flush(bio
->bi_opf
) && !bio_sectors(bio
))
504 pr_warn("Trying to write to read-only block-device %pg\n",
506 /* Older lvm-tools actually trigger this */
510 static noinline
int should_fail_bio(struct bio
*bio
)
512 if (should_fail_request(bdev_whole(bio
->bi_bdev
), bio
->bi_iter
.bi_size
))
516 ALLOW_ERROR_INJECTION(should_fail_bio
, ERRNO
);
519 * Check whether this bio extends beyond the end of the device or partition.
520 * This may well happen - the kernel calls bread() without checking the size of
521 * the device, e.g., when mounting a file system.
523 static inline int bio_check_eod(struct bio
*bio
)
525 sector_t maxsector
= bdev_nr_sectors(bio
->bi_bdev
);
526 unsigned int nr_sectors
= bio_sectors(bio
);
529 (nr_sectors
> maxsector
||
530 bio
->bi_iter
.bi_sector
> maxsector
- nr_sectors
)) {
531 pr_info_ratelimited("%s: attempt to access beyond end of device\n"
532 "%pg: rw=%d, sector=%llu, nr_sectors = %u limit=%llu\n",
533 current
->comm
, bio
->bi_bdev
, bio
->bi_opf
,
534 bio
->bi_iter
.bi_sector
, nr_sectors
, maxsector
);
541 * Remap block n of partition p to block n+start(p) of the disk.
543 static int blk_partition_remap(struct bio
*bio
)
545 struct block_device
*p
= bio
->bi_bdev
;
547 if (unlikely(should_fail_request(p
, bio
->bi_iter
.bi_size
)))
549 if (bio_sectors(bio
)) {
550 bio
->bi_iter
.bi_sector
+= p
->bd_start_sect
;
551 trace_block_bio_remap(bio
, p
->bd_dev
,
552 bio
->bi_iter
.bi_sector
-
555 bio_set_flag(bio
, BIO_REMAPPED
);
560 * Check write append to a zoned block device.
562 static inline blk_status_t
blk_check_zone_append(struct request_queue
*q
,
565 int nr_sectors
= bio_sectors(bio
);
567 /* Only applicable to zoned block devices */
568 if (!bdev_is_zoned(bio
->bi_bdev
))
569 return BLK_STS_NOTSUPP
;
571 /* The bio sector must point to the start of a sequential zone */
572 if (!bdev_is_zone_start(bio
->bi_bdev
, bio
->bi_iter
.bi_sector
) ||
573 !bio_zone_is_seq(bio
))
574 return BLK_STS_IOERR
;
577 * Not allowed to cross zone boundaries. Otherwise, the BIO will be
578 * split and could result in non-contiguous sectors being written in
581 if (nr_sectors
> q
->limits
.chunk_sectors
)
582 return BLK_STS_IOERR
;
584 /* Make sure the BIO is small enough and will not get split */
585 if (nr_sectors
> q
->limits
.max_zone_append_sectors
)
586 return BLK_STS_IOERR
;
588 bio
->bi_opf
|= REQ_NOMERGE
;
593 static void __submit_bio(struct bio
*bio
)
595 if (unlikely(!blk_crypto_bio_prep(&bio
)))
598 if (!bio
->bi_bdev
->bd_has_submit_bio
) {
599 blk_mq_submit_bio(bio
);
600 } else if (likely(bio_queue_enter(bio
) == 0)) {
601 struct gendisk
*disk
= bio
->bi_bdev
->bd_disk
;
603 disk
->fops
->submit_bio(bio
);
604 blk_queue_exit(disk
->queue
);
609 * The loop in this function may be a bit non-obvious, and so deserves some
612 * - Before entering the loop, bio->bi_next is NULL (as all callers ensure
613 * that), so we have a list with a single bio.
614 * - We pretend that we have just taken it off a longer list, so we assign
615 * bio_list to a pointer to the bio_list_on_stack, thus initialising the
616 * bio_list of new bios to be added. ->submit_bio() may indeed add some more
617 * bios through a recursive call to submit_bio_noacct. If it did, we find a
618 * non-NULL value in bio_list and re-enter the loop from the top.
619 * - In this case we really did just take the bio of the top of the list (no
620 * pretending) and so remove it from bio_list, and call into ->submit_bio()
623 * bio_list_on_stack[0] contains bios submitted by the current ->submit_bio.
624 * bio_list_on_stack[1] contains bios that were submitted before the current
625 * ->submit_bio, but that haven't been processed yet.
627 static void __submit_bio_noacct(struct bio
*bio
)
629 struct bio_list bio_list_on_stack
[2];
631 BUG_ON(bio
->bi_next
);
633 bio_list_init(&bio_list_on_stack
[0]);
634 current
->bio_list
= bio_list_on_stack
;
637 struct request_queue
*q
= bdev_get_queue(bio
->bi_bdev
);
638 struct bio_list lower
, same
;
641 * Create a fresh bio_list for all subordinate requests.
643 bio_list_on_stack
[1] = bio_list_on_stack
[0];
644 bio_list_init(&bio_list_on_stack
[0]);
649 * Sort new bios into those for a lower level and those for the
652 bio_list_init(&lower
);
653 bio_list_init(&same
);
654 while ((bio
= bio_list_pop(&bio_list_on_stack
[0])) != NULL
)
655 if (q
== bdev_get_queue(bio
->bi_bdev
))
656 bio_list_add(&same
, bio
);
658 bio_list_add(&lower
, bio
);
661 * Now assemble so we handle the lowest level first.
663 bio_list_merge(&bio_list_on_stack
[0], &lower
);
664 bio_list_merge(&bio_list_on_stack
[0], &same
);
665 bio_list_merge(&bio_list_on_stack
[0], &bio_list_on_stack
[1]);
666 } while ((bio
= bio_list_pop(&bio_list_on_stack
[0])));
668 current
->bio_list
= NULL
;
671 static void __submit_bio_noacct_mq(struct bio
*bio
)
673 struct bio_list bio_list
[2] = { };
675 current
->bio_list
= bio_list
;
679 } while ((bio
= bio_list_pop(&bio_list
[0])));
681 current
->bio_list
= NULL
;
684 void submit_bio_noacct_nocheck(struct bio
*bio
)
686 blk_cgroup_bio_start(bio
);
687 blkcg_bio_issue_init(bio
);
689 if (!bio_flagged(bio
, BIO_TRACE_COMPLETION
)) {
690 trace_block_bio_queue(bio
);
692 * Now that enqueuing has been traced, we need to trace
693 * completion as well.
695 bio_set_flag(bio
, BIO_TRACE_COMPLETION
);
699 * We only want one ->submit_bio to be active at a time, else stack
700 * usage with stacked devices could be a problem. Use current->bio_list
701 * to collect a list of requests submited by a ->submit_bio method while
702 * it is active, and then process them after it returned.
704 if (current
->bio_list
)
705 bio_list_add(¤t
->bio_list
[0], bio
);
706 else if (!bio
->bi_bdev
->bd_has_submit_bio
)
707 __submit_bio_noacct_mq(bio
);
709 __submit_bio_noacct(bio
);
713 * submit_bio_noacct - re-submit a bio to the block device layer for I/O
714 * @bio: The bio describing the location in memory and on the device.
716 * This is a version of submit_bio() that shall only be used for I/O that is
717 * resubmitted to lower level drivers by stacking block drivers. All file
718 * systems and other upper level users of the block layer should use
719 * submit_bio() instead.
721 void submit_bio_noacct(struct bio
*bio
)
723 struct block_device
*bdev
= bio
->bi_bdev
;
724 struct request_queue
*q
= bdev_get_queue(bdev
);
725 blk_status_t status
= BLK_STS_IOERR
;
730 * For a REQ_NOWAIT based request, return -EOPNOTSUPP
731 * if queue does not support NOWAIT.
733 if ((bio
->bi_opf
& REQ_NOWAIT
) && !bdev_nowait(bdev
))
736 if (should_fail_bio(bio
))
739 if (!bio_flagged(bio
, BIO_REMAPPED
)) {
740 if (unlikely(bio_check_eod(bio
)))
742 if (bdev
->bd_partno
&& unlikely(blk_partition_remap(bio
)))
747 * Filter flush bio's early so that bio based drivers without flush
748 * support don't have to worry about them.
750 if (op_is_flush(bio
->bi_opf
)) {
751 if (WARN_ON_ONCE(bio_op(bio
) != REQ_OP_WRITE
&&
752 bio_op(bio
) != REQ_OP_ZONE_APPEND
))
754 if (!test_bit(QUEUE_FLAG_WC
, &q
->queue_flags
)) {
755 bio
->bi_opf
&= ~(REQ_PREFLUSH
| REQ_FUA
);
756 if (!bio_sectors(bio
)) {
763 if (!test_bit(QUEUE_FLAG_POLL
, &q
->queue_flags
))
764 bio_clear_polled(bio
);
766 switch (bio_op(bio
)) {
768 if (!bdev_max_discard_sectors(bdev
))
771 case REQ_OP_SECURE_ERASE
:
772 if (!bdev_max_secure_erase_sectors(bdev
))
775 case REQ_OP_ZONE_APPEND
:
776 status
= blk_check_zone_append(q
, bio
);
777 if (status
!= BLK_STS_OK
)
780 case REQ_OP_ZONE_RESET
:
781 case REQ_OP_ZONE_OPEN
:
782 case REQ_OP_ZONE_CLOSE
:
783 case REQ_OP_ZONE_FINISH
:
784 if (!bdev_is_zoned(bio
->bi_bdev
))
787 case REQ_OP_ZONE_RESET_ALL
:
788 if (!bdev_is_zoned(bio
->bi_bdev
) || !blk_queue_zone_resetall(q
))
791 case REQ_OP_WRITE_ZEROES
:
792 if (!q
->limits
.max_write_zeroes_sectors
)
799 if (blk_throtl_bio(bio
))
801 submit_bio_noacct_nocheck(bio
);
805 status
= BLK_STS_NOTSUPP
;
807 bio
->bi_status
= status
;
810 EXPORT_SYMBOL(submit_bio_noacct
);
813 * submit_bio - submit a bio to the block device layer for I/O
814 * @bio: The &struct bio which describes the I/O
816 * submit_bio() is used to submit I/O requests to block devices. It is passed a
817 * fully set up &struct bio that describes the I/O that needs to be done. The
818 * bio will be send to the device described by the bi_bdev field.
820 * The success/failure status of the request, along with notification of
821 * completion, is delivered asynchronously through the ->bi_end_io() callback
822 * in @bio. The bio must NOT be touched by the caller until ->bi_end_io() has
825 void submit_bio(struct bio
*bio
)
827 if (bio_op(bio
) == REQ_OP_READ
) {
828 task_io_account_read(bio
->bi_iter
.bi_size
);
829 count_vm_events(PGPGIN
, bio_sectors(bio
));
830 } else if (bio_op(bio
) == REQ_OP_WRITE
) {
831 count_vm_events(PGPGOUT
, bio_sectors(bio
));
834 submit_bio_noacct(bio
);
836 EXPORT_SYMBOL(submit_bio
);
839 * bio_poll - poll for BIO completions
840 * @bio: bio to poll for
841 * @iob: batches of IO
842 * @flags: BLK_POLL_* flags that control the behavior
844 * Poll for completions on queue associated with the bio. Returns number of
845 * completed entries found.
847 * Note: the caller must either be the context that submitted @bio, or
848 * be in a RCU critical section to prevent freeing of @bio.
850 int bio_poll(struct bio
*bio
, struct io_comp_batch
*iob
, unsigned int flags
)
852 blk_qc_t cookie
= READ_ONCE(bio
->bi_cookie
);
853 struct block_device
*bdev
;
854 struct request_queue
*q
;
857 bdev
= READ_ONCE(bio
->bi_bdev
);
861 q
= bdev_get_queue(bdev
);
862 if (cookie
== BLK_QC_T_NONE
||
863 !test_bit(QUEUE_FLAG_POLL
, &q
->queue_flags
))
867 * As the requests that require a zone lock are not plugged in the
868 * first place, directly accessing the plug instead of using
869 * blk_mq_plug() should not have any consequences during flushing for
872 blk_flush_plug(current
->plug
, false);
875 * We need to be able to enter a frozen queue, similar to how
876 * timeouts also need to do that. If that is blocked, then we can
877 * have pending IO when a queue freeze is started, and then the
878 * wait for the freeze to finish will wait for polled requests to
879 * timeout as the poller is preventer from entering the queue and
880 * completing them. As long as we prevent new IO from being queued,
881 * that should be all that matters.
883 if (!percpu_ref_tryget(&q
->q_usage_counter
))
885 if (queue_is_mq(q
)) {
886 ret
= blk_mq_poll(q
, cookie
, iob
, flags
);
888 struct gendisk
*disk
= q
->disk
;
890 if (disk
&& disk
->fops
->poll_bio
)
891 ret
= disk
->fops
->poll_bio(bio
, iob
, flags
);
896 EXPORT_SYMBOL_GPL(bio_poll
);
899 * Helper to implement file_operations.iopoll. Requires the bio to be stored
900 * in iocb->private, and cleared before freeing the bio.
902 int iocb_bio_iopoll(struct kiocb
*kiocb
, struct io_comp_batch
*iob
,
909 * Note: the bio cache only uses SLAB_TYPESAFE_BY_RCU, so bio can
910 * point to a freshly allocated bio at this point. If that happens
911 * we have a few cases to consider:
913 * 1) the bio is beeing initialized and bi_bdev is NULL. We can just
914 * simply nothing in this case
915 * 2) the bio points to a not poll enabled device. bio_poll will catch
917 * 3) the bio points to a poll capable device, including but not
918 * limited to the one that the original bio pointed to. In this
919 * case we will call into the actual poll method and poll for I/O,
920 * even if we don't need to, but it won't cause harm either.
922 * For cases 2) and 3) above the RCU grace period ensures that bi_bdev
923 * is still allocated. Because partitions hold a reference to the whole
924 * device bdev and thus disk, the disk is also still valid. Grabbing
925 * a reference to the queue in bio_poll() ensures the hctxs and requests
926 * are still valid as well.
929 bio
= READ_ONCE(kiocb
->private);
931 ret
= bio_poll(bio
, iob
, flags
);
936 EXPORT_SYMBOL_GPL(iocb_bio_iopoll
);
938 void update_io_ticks(struct block_device
*part
, unsigned long now
, bool end
)
942 stamp
= READ_ONCE(part
->bd_stamp
);
943 if (unlikely(time_after(now
, stamp
))) {
944 if (likely(try_cmpxchg(&part
->bd_stamp
, &stamp
, now
)))
945 __part_stat_add(part
, io_ticks
, end
? now
- stamp
: 1);
947 if (part
->bd_partno
) {
948 part
= bdev_whole(part
);
953 unsigned long bdev_start_io_acct(struct block_device
*bdev
, enum req_op op
,
954 unsigned long start_time
)
957 update_io_ticks(bdev
, start_time
, false);
958 part_stat_local_inc(bdev
, in_flight
[op_is_write(op
)]);
963 EXPORT_SYMBOL(bdev_start_io_acct
);
966 * bio_start_io_acct - start I/O accounting for bio based drivers
967 * @bio: bio to start account for
969 * Returns the start time that should be passed back to bio_end_io_acct().
971 unsigned long bio_start_io_acct(struct bio
*bio
)
973 return bdev_start_io_acct(bio
->bi_bdev
, bio_op(bio
), jiffies
);
975 EXPORT_SYMBOL_GPL(bio_start_io_acct
);
977 void bdev_end_io_acct(struct block_device
*bdev
, enum req_op op
,
978 unsigned int sectors
, unsigned long start_time
)
980 const int sgrp
= op_stat_group(op
);
981 unsigned long now
= READ_ONCE(jiffies
);
982 unsigned long duration
= now
- start_time
;
985 update_io_ticks(bdev
, now
, true);
986 part_stat_inc(bdev
, ios
[sgrp
]);
987 part_stat_add(bdev
, sectors
[sgrp
], sectors
);
988 part_stat_add(bdev
, nsecs
[sgrp
], jiffies_to_nsecs(duration
));
989 part_stat_local_dec(bdev
, in_flight
[op_is_write(op
)]);
992 EXPORT_SYMBOL(bdev_end_io_acct
);
994 void bio_end_io_acct_remapped(struct bio
*bio
, unsigned long start_time
,
995 struct block_device
*orig_bdev
)
997 bdev_end_io_acct(orig_bdev
, bio_op(bio
), bio_sectors(bio
), start_time
);
999 EXPORT_SYMBOL_GPL(bio_end_io_acct_remapped
);
1002 * blk_lld_busy - Check if underlying low-level drivers of a device are busy
1003 * @q : the queue of the device being checked
1006 * Check if underlying low-level drivers of a device are busy.
1007 * If the drivers want to export their busy state, they must set own
1008 * exporting function using blk_queue_lld_busy() first.
1010 * Basically, this function is used only by request stacking drivers
1011 * to stop dispatching requests to underlying devices when underlying
1012 * devices are busy. This behavior helps more I/O merging on the queue
1013 * of the request stacking driver and prevents I/O throughput regression
1014 * on burst I/O load.
1017 * 0 - Not busy (The request stacking driver should dispatch request)
1018 * 1 - Busy (The request stacking driver should stop dispatching request)
1020 int blk_lld_busy(struct request_queue
*q
)
1022 if (queue_is_mq(q
) && q
->mq_ops
->busy
)
1023 return q
->mq_ops
->busy(q
);
1027 EXPORT_SYMBOL_GPL(blk_lld_busy
);
1029 int kblockd_schedule_work(struct work_struct
*work
)
1031 return queue_work(kblockd_workqueue
, work
);
1033 EXPORT_SYMBOL(kblockd_schedule_work
);
1035 int kblockd_mod_delayed_work_on(int cpu
, struct delayed_work
*dwork
,
1036 unsigned long delay
)
1038 return mod_delayed_work_on(cpu
, kblockd_workqueue
, dwork
, delay
);
1040 EXPORT_SYMBOL(kblockd_mod_delayed_work_on
);
1042 void blk_start_plug_nr_ios(struct blk_plug
*plug
, unsigned short nr_ios
)
1044 struct task_struct
*tsk
= current
;
1047 * If this is a nested plug, don't actually assign it.
1052 plug
->mq_list
= NULL
;
1053 plug
->cached_rq
= NULL
;
1054 plug
->nr_ios
= min_t(unsigned short, nr_ios
, BLK_MAX_REQUEST_COUNT
);
1056 plug
->multiple_queues
= false;
1057 plug
->has_elevator
= false;
1058 INIT_LIST_HEAD(&plug
->cb_list
);
1061 * Store ordering should not be needed here, since a potential
1062 * preempt will imply a full memory barrier
1068 * blk_start_plug - initialize blk_plug and track it inside the task_struct
1069 * @plug: The &struct blk_plug that needs to be initialized
1072 * blk_start_plug() indicates to the block layer an intent by the caller
1073 * to submit multiple I/O requests in a batch. The block layer may use
1074 * this hint to defer submitting I/Os from the caller until blk_finish_plug()
1075 * is called. However, the block layer may choose to submit requests
1076 * before a call to blk_finish_plug() if the number of queued I/Os
1077 * exceeds %BLK_MAX_REQUEST_COUNT, or if the size of the I/O is larger than
1078 * %BLK_PLUG_FLUSH_SIZE. The queued I/Os may also be submitted early if
1079 * the task schedules (see below).
1081 * Tracking blk_plug inside the task_struct will help with auto-flushing the
1082 * pending I/O should the task end up blocking between blk_start_plug() and
1083 * blk_finish_plug(). This is important from a performance perspective, but
1084 * also ensures that we don't deadlock. For instance, if the task is blocking
1085 * for a memory allocation, memory reclaim could end up wanting to free a
1086 * page belonging to that request that is currently residing in our private
1087 * plug. By flushing the pending I/O when the process goes to sleep, we avoid
1088 * this kind of deadlock.
1090 void blk_start_plug(struct blk_plug
*plug
)
1092 blk_start_plug_nr_ios(plug
, 1);
1094 EXPORT_SYMBOL(blk_start_plug
);
1096 static void flush_plug_callbacks(struct blk_plug
*plug
, bool from_schedule
)
1098 LIST_HEAD(callbacks
);
1100 while (!list_empty(&plug
->cb_list
)) {
1101 list_splice_init(&plug
->cb_list
, &callbacks
);
1103 while (!list_empty(&callbacks
)) {
1104 struct blk_plug_cb
*cb
= list_first_entry(&callbacks
,
1107 list_del(&cb
->list
);
1108 cb
->callback(cb
, from_schedule
);
1113 struct blk_plug_cb
*blk_check_plugged(blk_plug_cb_fn unplug
, void *data
,
1116 struct blk_plug
*plug
= current
->plug
;
1117 struct blk_plug_cb
*cb
;
1122 list_for_each_entry(cb
, &plug
->cb_list
, list
)
1123 if (cb
->callback
== unplug
&& cb
->data
== data
)
1126 /* Not currently on the callback list */
1127 BUG_ON(size
< sizeof(*cb
));
1128 cb
= kzalloc(size
, GFP_ATOMIC
);
1131 cb
->callback
= unplug
;
1132 list_add(&cb
->list
, &plug
->cb_list
);
1136 EXPORT_SYMBOL(blk_check_plugged
);
1138 void __blk_flush_plug(struct blk_plug
*plug
, bool from_schedule
)
1140 if (!list_empty(&plug
->cb_list
))
1141 flush_plug_callbacks(plug
, from_schedule
);
1142 blk_mq_flush_plug_list(plug
, from_schedule
);
1144 * Unconditionally flush out cached requests, even if the unplug
1145 * event came from schedule. Since we know hold references to the
1146 * queue for cached requests, we don't want a blocked task holding
1147 * up a queue freeze/quiesce event.
1149 if (unlikely(!rq_list_empty(plug
->cached_rq
)))
1150 blk_mq_free_plug_rqs(plug
);
1154 * blk_finish_plug - mark the end of a batch of submitted I/O
1155 * @plug: The &struct blk_plug passed to blk_start_plug()
1158 * Indicate that a batch of I/O submissions is complete. This function
1159 * must be paired with an initial call to blk_start_plug(). The intent
1160 * is to allow the block layer to optimize I/O submission. See the
1161 * documentation for blk_start_plug() for more information.
1163 void blk_finish_plug(struct blk_plug
*plug
)
1165 if (plug
== current
->plug
) {
1166 __blk_flush_plug(plug
, false);
1167 current
->plug
= NULL
;
1170 EXPORT_SYMBOL(blk_finish_plug
);
1172 void blk_io_schedule(void)
1174 /* Prevent hang_check timer from firing at us during very long I/O */
1175 unsigned long timeout
= sysctl_hung_task_timeout_secs
* HZ
/ 2;
1178 io_schedule_timeout(timeout
);
1182 EXPORT_SYMBOL_GPL(blk_io_schedule
);
1184 int __init
blk_dev_init(void)
1186 BUILD_BUG_ON((__force u32
)REQ_OP_LAST
>= (1 << REQ_OP_BITS
));
1187 BUILD_BUG_ON(REQ_OP_BITS
+ REQ_FLAG_BITS
> 8 *
1188 sizeof_field(struct request
, cmd_flags
));
1189 BUILD_BUG_ON(REQ_OP_BITS
+ REQ_FLAG_BITS
> 8 *
1190 sizeof_field(struct bio
, bi_opf
));
1192 /* used for unplugging and affects IO latency/throughput - HIGHPRI */
1193 kblockd_workqueue
= alloc_workqueue("kblockd",
1194 WQ_MEM_RECLAIM
| WQ_HIGHPRI
, 0);
1195 if (!kblockd_workqueue
)
1196 panic("Failed to create kblockd\n");
1198 blk_requestq_cachep
= kmem_cache_create("request_queue",
1199 sizeof(struct request_queue
), 0, SLAB_PANIC
, NULL
);
1201 blk_debugfs_root
= debugfs_create_dir("block", NULL
);