]> git.ipfire.org Git - thirdparty/kernel/linux.git/blob - block/blk-core.c
Merge tag 'rproc-v6.9' of git://git.kernel.org/pub/scm/linux/kernel/git/remoteproc...
[thirdparty/kernel/linux.git] / block / blk-core.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 1991, 1992 Linus Torvalds
4 * Copyright (C) 1994, Karl Keyte: Added support for disk statistics
5 * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
6 * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
7 * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au>
8 * - July2000
9 * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
10 */
11
12 /*
13 * This handles all read/write requests to block devices
14 */
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/bio.h>
18 #include <linux/blkdev.h>
19 #include <linux/blk-pm.h>
20 #include <linux/blk-integrity.h>
21 #include <linux/highmem.h>
22 #include <linux/mm.h>
23 #include <linux/pagemap.h>
24 #include <linux/kernel_stat.h>
25 #include <linux/string.h>
26 #include <linux/init.h>
27 #include <linux/completion.h>
28 #include <linux/slab.h>
29 #include <linux/swap.h>
30 #include <linux/writeback.h>
31 #include <linux/task_io_accounting_ops.h>
32 #include <linux/fault-inject.h>
33 #include <linux/list_sort.h>
34 #include <linux/delay.h>
35 #include <linux/ratelimit.h>
36 #include <linux/pm_runtime.h>
37 #include <linux/t10-pi.h>
38 #include <linux/debugfs.h>
39 #include <linux/bpf.h>
40 #include <linux/part_stat.h>
41 #include <linux/sched/sysctl.h>
42 #include <linux/blk-crypto.h>
43
44 #define CREATE_TRACE_POINTS
45 #include <trace/events/block.h>
46
47 #include "blk.h"
48 #include "blk-mq-sched.h"
49 #include "blk-pm.h"
50 #include "blk-cgroup.h"
51 #include "blk-throttle.h"
52 #include "blk-ioprio.h"
53
54 struct dentry *blk_debugfs_root;
55
56 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
57 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
58 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
59 EXPORT_TRACEPOINT_SYMBOL_GPL(block_split);
60 EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug);
61 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_insert);
62
63 static DEFINE_IDA(blk_queue_ida);
64
65 /*
66 * For queue allocation
67 */
68 static struct kmem_cache *blk_requestq_cachep;
69
70 /*
71 * Controlling structure to kblockd
72 */
73 static struct workqueue_struct *kblockd_workqueue;
74
75 /**
76 * blk_queue_flag_set - atomically set a queue flag
77 * @flag: flag to be set
78 * @q: request queue
79 */
80 void blk_queue_flag_set(unsigned int flag, struct request_queue *q)
81 {
82 set_bit(flag, &q->queue_flags);
83 }
84 EXPORT_SYMBOL(blk_queue_flag_set);
85
86 /**
87 * blk_queue_flag_clear - atomically clear a queue flag
88 * @flag: flag to be cleared
89 * @q: request queue
90 */
91 void blk_queue_flag_clear(unsigned int flag, struct request_queue *q)
92 {
93 clear_bit(flag, &q->queue_flags);
94 }
95 EXPORT_SYMBOL(blk_queue_flag_clear);
96
97 /**
98 * blk_queue_flag_test_and_set - atomically test and set a queue flag
99 * @flag: flag to be set
100 * @q: request queue
101 *
102 * Returns the previous value of @flag - 0 if the flag was not set and 1 if
103 * the flag was already set.
104 */
105 bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q)
106 {
107 return test_and_set_bit(flag, &q->queue_flags);
108 }
109 EXPORT_SYMBOL_GPL(blk_queue_flag_test_and_set);
110
111 #define REQ_OP_NAME(name) [REQ_OP_##name] = #name
112 static const char *const blk_op_name[] = {
113 REQ_OP_NAME(READ),
114 REQ_OP_NAME(WRITE),
115 REQ_OP_NAME(FLUSH),
116 REQ_OP_NAME(DISCARD),
117 REQ_OP_NAME(SECURE_ERASE),
118 REQ_OP_NAME(ZONE_RESET),
119 REQ_OP_NAME(ZONE_RESET_ALL),
120 REQ_OP_NAME(ZONE_OPEN),
121 REQ_OP_NAME(ZONE_CLOSE),
122 REQ_OP_NAME(ZONE_FINISH),
123 REQ_OP_NAME(ZONE_APPEND),
124 REQ_OP_NAME(WRITE_ZEROES),
125 REQ_OP_NAME(DRV_IN),
126 REQ_OP_NAME(DRV_OUT),
127 };
128 #undef REQ_OP_NAME
129
130 /**
131 * blk_op_str - Return string XXX in the REQ_OP_XXX.
132 * @op: REQ_OP_XXX.
133 *
134 * Description: Centralize block layer function to convert REQ_OP_XXX into
135 * string format. Useful in the debugging and tracing bio or request. For
136 * invalid REQ_OP_XXX it returns string "UNKNOWN".
137 */
138 inline const char *blk_op_str(enum req_op op)
139 {
140 const char *op_str = "UNKNOWN";
141
142 if (op < ARRAY_SIZE(blk_op_name) && blk_op_name[op])
143 op_str = blk_op_name[op];
144
145 return op_str;
146 }
147 EXPORT_SYMBOL_GPL(blk_op_str);
148
149 static const struct {
150 int errno;
151 const char *name;
152 } blk_errors[] = {
153 [BLK_STS_OK] = { 0, "" },
154 [BLK_STS_NOTSUPP] = { -EOPNOTSUPP, "operation not supported" },
155 [BLK_STS_TIMEOUT] = { -ETIMEDOUT, "timeout" },
156 [BLK_STS_NOSPC] = { -ENOSPC, "critical space allocation" },
157 [BLK_STS_TRANSPORT] = { -ENOLINK, "recoverable transport" },
158 [BLK_STS_TARGET] = { -EREMOTEIO, "critical target" },
159 [BLK_STS_RESV_CONFLICT] = { -EBADE, "reservation conflict" },
160 [BLK_STS_MEDIUM] = { -ENODATA, "critical medium" },
161 [BLK_STS_PROTECTION] = { -EILSEQ, "protection" },
162 [BLK_STS_RESOURCE] = { -ENOMEM, "kernel resource" },
163 [BLK_STS_DEV_RESOURCE] = { -EBUSY, "device resource" },
164 [BLK_STS_AGAIN] = { -EAGAIN, "nonblocking retry" },
165 [BLK_STS_OFFLINE] = { -ENODEV, "device offline" },
166
167 /* device mapper special case, should not leak out: */
168 [BLK_STS_DM_REQUEUE] = { -EREMCHG, "dm internal retry" },
169
170 /* zone device specific errors */
171 [BLK_STS_ZONE_OPEN_RESOURCE] = { -ETOOMANYREFS, "open zones exceeded" },
172 [BLK_STS_ZONE_ACTIVE_RESOURCE] = { -EOVERFLOW, "active zones exceeded" },
173
174 /* Command duration limit device-side timeout */
175 [BLK_STS_DURATION_LIMIT] = { -ETIME, "duration limit exceeded" },
176
177 /* everything else not covered above: */
178 [BLK_STS_IOERR] = { -EIO, "I/O" },
179 };
180
181 blk_status_t errno_to_blk_status(int errno)
182 {
183 int i;
184
185 for (i = 0; i < ARRAY_SIZE(blk_errors); i++) {
186 if (blk_errors[i].errno == errno)
187 return (__force blk_status_t)i;
188 }
189
190 return BLK_STS_IOERR;
191 }
192 EXPORT_SYMBOL_GPL(errno_to_blk_status);
193
194 int blk_status_to_errno(blk_status_t status)
195 {
196 int idx = (__force int)status;
197
198 if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
199 return -EIO;
200 return blk_errors[idx].errno;
201 }
202 EXPORT_SYMBOL_GPL(blk_status_to_errno);
203
204 const char *blk_status_to_str(blk_status_t status)
205 {
206 int idx = (__force int)status;
207
208 if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
209 return "<null>";
210 return blk_errors[idx].name;
211 }
212 EXPORT_SYMBOL_GPL(blk_status_to_str);
213
214 /**
215 * blk_sync_queue - cancel any pending callbacks on a queue
216 * @q: the queue
217 *
218 * Description:
219 * The block layer may perform asynchronous callback activity
220 * on a queue, such as calling the unplug function after a timeout.
221 * A block device may call blk_sync_queue to ensure that any
222 * such activity is cancelled, thus allowing it to release resources
223 * that the callbacks might use. The caller must already have made sure
224 * that its ->submit_bio will not re-add plugging prior to calling
225 * this function.
226 *
227 * This function does not cancel any asynchronous activity arising
228 * out of elevator or throttling code. That would require elevator_exit()
229 * and blkcg_exit_queue() to be called with queue lock initialized.
230 *
231 */
232 void blk_sync_queue(struct request_queue *q)
233 {
234 del_timer_sync(&q->timeout);
235 cancel_work_sync(&q->timeout_work);
236 }
237 EXPORT_SYMBOL(blk_sync_queue);
238
239 /**
240 * blk_set_pm_only - increment pm_only counter
241 * @q: request queue pointer
242 */
243 void blk_set_pm_only(struct request_queue *q)
244 {
245 atomic_inc(&q->pm_only);
246 }
247 EXPORT_SYMBOL_GPL(blk_set_pm_only);
248
249 void blk_clear_pm_only(struct request_queue *q)
250 {
251 int pm_only;
252
253 pm_only = atomic_dec_return(&q->pm_only);
254 WARN_ON_ONCE(pm_only < 0);
255 if (pm_only == 0)
256 wake_up_all(&q->mq_freeze_wq);
257 }
258 EXPORT_SYMBOL_GPL(blk_clear_pm_only);
259
260 static void blk_free_queue_rcu(struct rcu_head *rcu_head)
261 {
262 struct request_queue *q = container_of(rcu_head,
263 struct request_queue, rcu_head);
264
265 percpu_ref_exit(&q->q_usage_counter);
266 kmem_cache_free(blk_requestq_cachep, q);
267 }
268
269 static void blk_free_queue(struct request_queue *q)
270 {
271 blk_free_queue_stats(q->stats);
272 if (queue_is_mq(q))
273 blk_mq_release(q);
274
275 ida_free(&blk_queue_ida, q->id);
276 call_rcu(&q->rcu_head, blk_free_queue_rcu);
277 }
278
279 /**
280 * blk_put_queue - decrement the request_queue refcount
281 * @q: the request_queue structure to decrement the refcount for
282 *
283 * Decrements the refcount of the request_queue and free it when the refcount
284 * reaches 0.
285 */
286 void blk_put_queue(struct request_queue *q)
287 {
288 if (refcount_dec_and_test(&q->refs))
289 blk_free_queue(q);
290 }
291 EXPORT_SYMBOL(blk_put_queue);
292
293 void blk_queue_start_drain(struct request_queue *q)
294 {
295 /*
296 * When queue DYING flag is set, we need to block new req
297 * entering queue, so we call blk_freeze_queue_start() to
298 * prevent I/O from crossing blk_queue_enter().
299 */
300 blk_freeze_queue_start(q);
301 if (queue_is_mq(q))
302 blk_mq_wake_waiters(q);
303 /* Make blk_queue_enter() reexamine the DYING flag. */
304 wake_up_all(&q->mq_freeze_wq);
305 }
306
307 /**
308 * blk_queue_enter() - try to increase q->q_usage_counter
309 * @q: request queue pointer
310 * @flags: BLK_MQ_REQ_NOWAIT and/or BLK_MQ_REQ_PM
311 */
312 int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
313 {
314 const bool pm = flags & BLK_MQ_REQ_PM;
315
316 while (!blk_try_enter_queue(q, pm)) {
317 if (flags & BLK_MQ_REQ_NOWAIT)
318 return -EAGAIN;
319
320 /*
321 * read pair of barrier in blk_freeze_queue_start(), we need to
322 * order reading __PERCPU_REF_DEAD flag of .q_usage_counter and
323 * reading .mq_freeze_depth or queue dying flag, otherwise the
324 * following wait may never return if the two reads are
325 * reordered.
326 */
327 smp_rmb();
328 wait_event(q->mq_freeze_wq,
329 (!q->mq_freeze_depth &&
330 blk_pm_resume_queue(pm, q)) ||
331 blk_queue_dying(q));
332 if (blk_queue_dying(q))
333 return -ENODEV;
334 }
335
336 return 0;
337 }
338
339 int __bio_queue_enter(struct request_queue *q, struct bio *bio)
340 {
341 while (!blk_try_enter_queue(q, false)) {
342 struct gendisk *disk = bio->bi_bdev->bd_disk;
343
344 if (bio->bi_opf & REQ_NOWAIT) {
345 if (test_bit(GD_DEAD, &disk->state))
346 goto dead;
347 bio_wouldblock_error(bio);
348 return -EAGAIN;
349 }
350
351 /*
352 * read pair of barrier in blk_freeze_queue_start(), we need to
353 * order reading __PERCPU_REF_DEAD flag of .q_usage_counter and
354 * reading .mq_freeze_depth or queue dying flag, otherwise the
355 * following wait may never return if the two reads are
356 * reordered.
357 */
358 smp_rmb();
359 wait_event(q->mq_freeze_wq,
360 (!q->mq_freeze_depth &&
361 blk_pm_resume_queue(false, q)) ||
362 test_bit(GD_DEAD, &disk->state));
363 if (test_bit(GD_DEAD, &disk->state))
364 goto dead;
365 }
366
367 return 0;
368 dead:
369 bio_io_error(bio);
370 return -ENODEV;
371 }
372
373 void blk_queue_exit(struct request_queue *q)
374 {
375 percpu_ref_put(&q->q_usage_counter);
376 }
377
378 static void blk_queue_usage_counter_release(struct percpu_ref *ref)
379 {
380 struct request_queue *q =
381 container_of(ref, struct request_queue, q_usage_counter);
382
383 wake_up_all(&q->mq_freeze_wq);
384 }
385
386 static void blk_rq_timed_out_timer(struct timer_list *t)
387 {
388 struct request_queue *q = from_timer(q, t, timeout);
389
390 kblockd_schedule_work(&q->timeout_work);
391 }
392
393 static void blk_timeout_work(struct work_struct *work)
394 {
395 }
396
397 struct request_queue *blk_alloc_queue(struct queue_limits *lim, int node_id)
398 {
399 struct request_queue *q;
400 int error;
401
402 q = kmem_cache_alloc_node(blk_requestq_cachep, GFP_KERNEL | __GFP_ZERO,
403 node_id);
404 if (!q)
405 return ERR_PTR(-ENOMEM);
406
407 q->last_merge = NULL;
408
409 q->id = ida_alloc(&blk_queue_ida, GFP_KERNEL);
410 if (q->id < 0) {
411 error = q->id;
412 goto fail_q;
413 }
414
415 q->stats = blk_alloc_queue_stats();
416 if (!q->stats) {
417 error = -ENOMEM;
418 goto fail_id;
419 }
420
421 error = blk_set_default_limits(lim);
422 if (error)
423 goto fail_stats;
424 q->limits = *lim;
425
426 q->node = node_id;
427
428 atomic_set(&q->nr_active_requests_shared_tags, 0);
429
430 timer_setup(&q->timeout, blk_rq_timed_out_timer, 0);
431 INIT_WORK(&q->timeout_work, blk_timeout_work);
432 INIT_LIST_HEAD(&q->icq_list);
433
434 refcount_set(&q->refs, 1);
435 mutex_init(&q->debugfs_mutex);
436 mutex_init(&q->sysfs_lock);
437 mutex_init(&q->sysfs_dir_lock);
438 mutex_init(&q->limits_lock);
439 mutex_init(&q->rq_qos_mutex);
440 spin_lock_init(&q->queue_lock);
441
442 init_waitqueue_head(&q->mq_freeze_wq);
443 mutex_init(&q->mq_freeze_lock);
444
445 /*
446 * Init percpu_ref in atomic mode so that it's faster to shutdown.
447 * See blk_register_queue() for details.
448 */
449 error = percpu_ref_init(&q->q_usage_counter,
450 blk_queue_usage_counter_release,
451 PERCPU_REF_INIT_ATOMIC, GFP_KERNEL);
452 if (error)
453 goto fail_stats;
454
455 q->nr_requests = BLKDEV_DEFAULT_RQ;
456
457 return q;
458
459 fail_stats:
460 blk_free_queue_stats(q->stats);
461 fail_id:
462 ida_free(&blk_queue_ida, q->id);
463 fail_q:
464 kmem_cache_free(blk_requestq_cachep, q);
465 return ERR_PTR(error);
466 }
467
468 /**
469 * blk_get_queue - increment the request_queue refcount
470 * @q: the request_queue structure to increment the refcount for
471 *
472 * Increment the refcount of the request_queue kobject.
473 *
474 * Context: Any context.
475 */
476 bool blk_get_queue(struct request_queue *q)
477 {
478 if (unlikely(blk_queue_dying(q)))
479 return false;
480 refcount_inc(&q->refs);
481 return true;
482 }
483 EXPORT_SYMBOL(blk_get_queue);
484
485 #ifdef CONFIG_FAIL_MAKE_REQUEST
486
487 static DECLARE_FAULT_ATTR(fail_make_request);
488
489 static int __init setup_fail_make_request(char *str)
490 {
491 return setup_fault_attr(&fail_make_request, str);
492 }
493 __setup("fail_make_request=", setup_fail_make_request);
494
495 bool should_fail_request(struct block_device *part, unsigned int bytes)
496 {
497 return part->bd_make_it_fail && should_fail(&fail_make_request, bytes);
498 }
499
500 static int __init fail_make_request_debugfs(void)
501 {
502 struct dentry *dir = fault_create_debugfs_attr("fail_make_request",
503 NULL, &fail_make_request);
504
505 return PTR_ERR_OR_ZERO(dir);
506 }
507
508 late_initcall(fail_make_request_debugfs);
509 #endif /* CONFIG_FAIL_MAKE_REQUEST */
510
511 static inline void bio_check_ro(struct bio *bio)
512 {
513 if (op_is_write(bio_op(bio)) && bdev_read_only(bio->bi_bdev)) {
514 if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
515 return;
516
517 if (bio->bi_bdev->bd_ro_warned)
518 return;
519
520 bio->bi_bdev->bd_ro_warned = true;
521 /*
522 * Use ioctl to set underlying disk of raid/dm to read-only
523 * will trigger this.
524 */
525 pr_warn("Trying to write to read-only block-device %pg\n",
526 bio->bi_bdev);
527 }
528 }
529
530 static noinline int should_fail_bio(struct bio *bio)
531 {
532 if (should_fail_request(bdev_whole(bio->bi_bdev), bio->bi_iter.bi_size))
533 return -EIO;
534 return 0;
535 }
536 ALLOW_ERROR_INJECTION(should_fail_bio, ERRNO);
537
538 /*
539 * Check whether this bio extends beyond the end of the device or partition.
540 * This may well happen - the kernel calls bread() without checking the size of
541 * the device, e.g., when mounting a file system.
542 */
543 static inline int bio_check_eod(struct bio *bio)
544 {
545 sector_t maxsector = bdev_nr_sectors(bio->bi_bdev);
546 unsigned int nr_sectors = bio_sectors(bio);
547
548 if (nr_sectors &&
549 (nr_sectors > maxsector ||
550 bio->bi_iter.bi_sector > maxsector - nr_sectors)) {
551 pr_info_ratelimited("%s: attempt to access beyond end of device\n"
552 "%pg: rw=%d, sector=%llu, nr_sectors = %u limit=%llu\n",
553 current->comm, bio->bi_bdev, bio->bi_opf,
554 bio->bi_iter.bi_sector, nr_sectors, maxsector);
555 return -EIO;
556 }
557 return 0;
558 }
559
560 /*
561 * Remap block n of partition p to block n+start(p) of the disk.
562 */
563 static int blk_partition_remap(struct bio *bio)
564 {
565 struct block_device *p = bio->bi_bdev;
566
567 if (unlikely(should_fail_request(p, bio->bi_iter.bi_size)))
568 return -EIO;
569 if (bio_sectors(bio)) {
570 bio->bi_iter.bi_sector += p->bd_start_sect;
571 trace_block_bio_remap(bio, p->bd_dev,
572 bio->bi_iter.bi_sector -
573 p->bd_start_sect);
574 }
575 bio_set_flag(bio, BIO_REMAPPED);
576 return 0;
577 }
578
579 /*
580 * Check write append to a zoned block device.
581 */
582 static inline blk_status_t blk_check_zone_append(struct request_queue *q,
583 struct bio *bio)
584 {
585 int nr_sectors = bio_sectors(bio);
586
587 /* Only applicable to zoned block devices */
588 if (!bdev_is_zoned(bio->bi_bdev))
589 return BLK_STS_NOTSUPP;
590
591 /* The bio sector must point to the start of a sequential zone */
592 if (!bdev_is_zone_start(bio->bi_bdev, bio->bi_iter.bi_sector) ||
593 !bio_zone_is_seq(bio))
594 return BLK_STS_IOERR;
595
596 /*
597 * Not allowed to cross zone boundaries. Otherwise, the BIO will be
598 * split and could result in non-contiguous sectors being written in
599 * different zones.
600 */
601 if (nr_sectors > q->limits.chunk_sectors)
602 return BLK_STS_IOERR;
603
604 /* Make sure the BIO is small enough and will not get split */
605 if (nr_sectors > q->limits.max_zone_append_sectors)
606 return BLK_STS_IOERR;
607
608 bio->bi_opf |= REQ_NOMERGE;
609
610 return BLK_STS_OK;
611 }
612
613 static void __submit_bio(struct bio *bio)
614 {
615 if (unlikely(!blk_crypto_bio_prep(&bio)))
616 return;
617
618 if (!bio->bi_bdev->bd_has_submit_bio) {
619 blk_mq_submit_bio(bio);
620 } else if (likely(bio_queue_enter(bio) == 0)) {
621 struct gendisk *disk = bio->bi_bdev->bd_disk;
622
623 disk->fops->submit_bio(bio);
624 blk_queue_exit(disk->queue);
625 }
626 }
627
628 /*
629 * The loop in this function may be a bit non-obvious, and so deserves some
630 * explanation:
631 *
632 * - Before entering the loop, bio->bi_next is NULL (as all callers ensure
633 * that), so we have a list with a single bio.
634 * - We pretend that we have just taken it off a longer list, so we assign
635 * bio_list to a pointer to the bio_list_on_stack, thus initialising the
636 * bio_list of new bios to be added. ->submit_bio() may indeed add some more
637 * bios through a recursive call to submit_bio_noacct. If it did, we find a
638 * non-NULL value in bio_list and re-enter the loop from the top.
639 * - In this case we really did just take the bio of the top of the list (no
640 * pretending) and so remove it from bio_list, and call into ->submit_bio()
641 * again.
642 *
643 * bio_list_on_stack[0] contains bios submitted by the current ->submit_bio.
644 * bio_list_on_stack[1] contains bios that were submitted before the current
645 * ->submit_bio, but that haven't been processed yet.
646 */
647 static void __submit_bio_noacct(struct bio *bio)
648 {
649 struct bio_list bio_list_on_stack[2];
650
651 BUG_ON(bio->bi_next);
652
653 bio_list_init(&bio_list_on_stack[0]);
654 current->bio_list = bio_list_on_stack;
655
656 do {
657 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
658 struct bio_list lower, same;
659
660 /*
661 * Create a fresh bio_list for all subordinate requests.
662 */
663 bio_list_on_stack[1] = bio_list_on_stack[0];
664 bio_list_init(&bio_list_on_stack[0]);
665
666 __submit_bio(bio);
667
668 /*
669 * Sort new bios into those for a lower level and those for the
670 * same level.
671 */
672 bio_list_init(&lower);
673 bio_list_init(&same);
674 while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL)
675 if (q == bdev_get_queue(bio->bi_bdev))
676 bio_list_add(&same, bio);
677 else
678 bio_list_add(&lower, bio);
679
680 /*
681 * Now assemble so we handle the lowest level first.
682 */
683 bio_list_merge(&bio_list_on_stack[0], &lower);
684 bio_list_merge(&bio_list_on_stack[0], &same);
685 bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]);
686 } while ((bio = bio_list_pop(&bio_list_on_stack[0])));
687
688 current->bio_list = NULL;
689 }
690
691 static void __submit_bio_noacct_mq(struct bio *bio)
692 {
693 struct bio_list bio_list[2] = { };
694
695 current->bio_list = bio_list;
696
697 do {
698 __submit_bio(bio);
699 } while ((bio = bio_list_pop(&bio_list[0])));
700
701 current->bio_list = NULL;
702 }
703
704 void submit_bio_noacct_nocheck(struct bio *bio)
705 {
706 blk_cgroup_bio_start(bio);
707 blkcg_bio_issue_init(bio);
708
709 if (!bio_flagged(bio, BIO_TRACE_COMPLETION)) {
710 trace_block_bio_queue(bio);
711 /*
712 * Now that enqueuing has been traced, we need to trace
713 * completion as well.
714 */
715 bio_set_flag(bio, BIO_TRACE_COMPLETION);
716 }
717
718 /*
719 * We only want one ->submit_bio to be active at a time, else stack
720 * usage with stacked devices could be a problem. Use current->bio_list
721 * to collect a list of requests submited by a ->submit_bio method while
722 * it is active, and then process them after it returned.
723 */
724 if (current->bio_list)
725 bio_list_add(&current->bio_list[0], bio);
726 else if (!bio->bi_bdev->bd_has_submit_bio)
727 __submit_bio_noacct_mq(bio);
728 else
729 __submit_bio_noacct(bio);
730 }
731
732 /**
733 * submit_bio_noacct - re-submit a bio to the block device layer for I/O
734 * @bio: The bio describing the location in memory and on the device.
735 *
736 * This is a version of submit_bio() that shall only be used for I/O that is
737 * resubmitted to lower level drivers by stacking block drivers. All file
738 * systems and other upper level users of the block layer should use
739 * submit_bio() instead.
740 */
741 void submit_bio_noacct(struct bio *bio)
742 {
743 struct block_device *bdev = bio->bi_bdev;
744 struct request_queue *q = bdev_get_queue(bdev);
745 blk_status_t status = BLK_STS_IOERR;
746
747 might_sleep();
748
749 /*
750 * For a REQ_NOWAIT based request, return -EOPNOTSUPP
751 * if queue does not support NOWAIT.
752 */
753 if ((bio->bi_opf & REQ_NOWAIT) && !bdev_nowait(bdev))
754 goto not_supported;
755
756 if (should_fail_bio(bio))
757 goto end_io;
758 bio_check_ro(bio);
759 if (!bio_flagged(bio, BIO_REMAPPED)) {
760 if (unlikely(bio_check_eod(bio)))
761 goto end_io;
762 if (bdev->bd_partno && unlikely(blk_partition_remap(bio)))
763 goto end_io;
764 }
765
766 /*
767 * Filter flush bio's early so that bio based drivers without flush
768 * support don't have to worry about them.
769 */
770 if (op_is_flush(bio->bi_opf)) {
771 if (WARN_ON_ONCE(bio_op(bio) != REQ_OP_WRITE &&
772 bio_op(bio) != REQ_OP_ZONE_APPEND))
773 goto end_io;
774 if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
775 bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA);
776 if (!bio_sectors(bio)) {
777 status = BLK_STS_OK;
778 goto end_io;
779 }
780 }
781 }
782
783 if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
784 bio_clear_polled(bio);
785
786 switch (bio_op(bio)) {
787 case REQ_OP_READ:
788 case REQ_OP_WRITE:
789 break;
790 case REQ_OP_FLUSH:
791 /*
792 * REQ_OP_FLUSH can't be submitted through bios, it is only
793 * synthetized in struct request by the flush state machine.
794 */
795 goto not_supported;
796 case REQ_OP_DISCARD:
797 if (!bdev_max_discard_sectors(bdev))
798 goto not_supported;
799 break;
800 case REQ_OP_SECURE_ERASE:
801 if (!bdev_max_secure_erase_sectors(bdev))
802 goto not_supported;
803 break;
804 case REQ_OP_ZONE_APPEND:
805 status = blk_check_zone_append(q, bio);
806 if (status != BLK_STS_OK)
807 goto end_io;
808 break;
809 case REQ_OP_WRITE_ZEROES:
810 if (!q->limits.max_write_zeroes_sectors)
811 goto not_supported;
812 break;
813 case REQ_OP_ZONE_RESET:
814 case REQ_OP_ZONE_OPEN:
815 case REQ_OP_ZONE_CLOSE:
816 case REQ_OP_ZONE_FINISH:
817 if (!bdev_is_zoned(bio->bi_bdev))
818 goto not_supported;
819 break;
820 case REQ_OP_ZONE_RESET_ALL:
821 if (!bdev_is_zoned(bio->bi_bdev) || !blk_queue_zone_resetall(q))
822 goto not_supported;
823 break;
824 case REQ_OP_DRV_IN:
825 case REQ_OP_DRV_OUT:
826 /*
827 * Driver private operations are only used with passthrough
828 * requests.
829 */
830 fallthrough;
831 default:
832 goto not_supported;
833 }
834
835 if (blk_throtl_bio(bio))
836 return;
837 submit_bio_noacct_nocheck(bio);
838 return;
839
840 not_supported:
841 status = BLK_STS_NOTSUPP;
842 end_io:
843 bio->bi_status = status;
844 bio_endio(bio);
845 }
846 EXPORT_SYMBOL(submit_bio_noacct);
847
848 static void bio_set_ioprio(struct bio *bio)
849 {
850 /* Nobody set ioprio so far? Initialize it based on task's nice value */
851 if (IOPRIO_PRIO_CLASS(bio->bi_ioprio) == IOPRIO_CLASS_NONE)
852 bio->bi_ioprio = get_current_ioprio();
853 blkcg_set_ioprio(bio);
854 }
855
856 /**
857 * submit_bio - submit a bio to the block device layer for I/O
858 * @bio: The &struct bio which describes the I/O
859 *
860 * submit_bio() is used to submit I/O requests to block devices. It is passed a
861 * fully set up &struct bio that describes the I/O that needs to be done. The
862 * bio will be send to the device described by the bi_bdev field.
863 *
864 * The success/failure status of the request, along with notification of
865 * completion, is delivered asynchronously through the ->bi_end_io() callback
866 * in @bio. The bio must NOT be touched by the caller until ->bi_end_io() has
867 * been called.
868 */
869 void submit_bio(struct bio *bio)
870 {
871 if (bio_op(bio) == REQ_OP_READ) {
872 task_io_account_read(bio->bi_iter.bi_size);
873 count_vm_events(PGPGIN, bio_sectors(bio));
874 } else if (bio_op(bio) == REQ_OP_WRITE) {
875 count_vm_events(PGPGOUT, bio_sectors(bio));
876 }
877
878 bio_set_ioprio(bio);
879 submit_bio_noacct(bio);
880 }
881 EXPORT_SYMBOL(submit_bio);
882
883 /**
884 * bio_poll - poll for BIO completions
885 * @bio: bio to poll for
886 * @iob: batches of IO
887 * @flags: BLK_POLL_* flags that control the behavior
888 *
889 * Poll for completions on queue associated with the bio. Returns number of
890 * completed entries found.
891 *
892 * Note: the caller must either be the context that submitted @bio, or
893 * be in a RCU critical section to prevent freeing of @bio.
894 */
895 int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags)
896 {
897 blk_qc_t cookie = READ_ONCE(bio->bi_cookie);
898 struct block_device *bdev;
899 struct request_queue *q;
900 int ret = 0;
901
902 bdev = READ_ONCE(bio->bi_bdev);
903 if (!bdev)
904 return 0;
905
906 q = bdev_get_queue(bdev);
907 if (cookie == BLK_QC_T_NONE ||
908 !test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
909 return 0;
910
911 /*
912 * As the requests that require a zone lock are not plugged in the
913 * first place, directly accessing the plug instead of using
914 * blk_mq_plug() should not have any consequences during flushing for
915 * zoned devices.
916 */
917 blk_flush_plug(current->plug, false);
918
919 /*
920 * We need to be able to enter a frozen queue, similar to how
921 * timeouts also need to do that. If that is blocked, then we can
922 * have pending IO when a queue freeze is started, and then the
923 * wait for the freeze to finish will wait for polled requests to
924 * timeout as the poller is preventer from entering the queue and
925 * completing them. As long as we prevent new IO from being queued,
926 * that should be all that matters.
927 */
928 if (!percpu_ref_tryget(&q->q_usage_counter))
929 return 0;
930 if (queue_is_mq(q)) {
931 ret = blk_mq_poll(q, cookie, iob, flags);
932 } else {
933 struct gendisk *disk = q->disk;
934
935 if (disk && disk->fops->poll_bio)
936 ret = disk->fops->poll_bio(bio, iob, flags);
937 }
938 blk_queue_exit(q);
939 return ret;
940 }
941 EXPORT_SYMBOL_GPL(bio_poll);
942
943 /*
944 * Helper to implement file_operations.iopoll. Requires the bio to be stored
945 * in iocb->private, and cleared before freeing the bio.
946 */
947 int iocb_bio_iopoll(struct kiocb *kiocb, struct io_comp_batch *iob,
948 unsigned int flags)
949 {
950 struct bio *bio;
951 int ret = 0;
952
953 /*
954 * Note: the bio cache only uses SLAB_TYPESAFE_BY_RCU, so bio can
955 * point to a freshly allocated bio at this point. If that happens
956 * we have a few cases to consider:
957 *
958 * 1) the bio is beeing initialized and bi_bdev is NULL. We can just
959 * simply nothing in this case
960 * 2) the bio points to a not poll enabled device. bio_poll will catch
961 * this and return 0
962 * 3) the bio points to a poll capable device, including but not
963 * limited to the one that the original bio pointed to. In this
964 * case we will call into the actual poll method and poll for I/O,
965 * even if we don't need to, but it won't cause harm either.
966 *
967 * For cases 2) and 3) above the RCU grace period ensures that bi_bdev
968 * is still allocated. Because partitions hold a reference to the whole
969 * device bdev and thus disk, the disk is also still valid. Grabbing
970 * a reference to the queue in bio_poll() ensures the hctxs and requests
971 * are still valid as well.
972 */
973 rcu_read_lock();
974 bio = READ_ONCE(kiocb->private);
975 if (bio)
976 ret = bio_poll(bio, iob, flags);
977 rcu_read_unlock();
978
979 return ret;
980 }
981 EXPORT_SYMBOL_GPL(iocb_bio_iopoll);
982
983 void update_io_ticks(struct block_device *part, unsigned long now, bool end)
984 {
985 unsigned long stamp;
986 again:
987 stamp = READ_ONCE(part->bd_stamp);
988 if (unlikely(time_after(now, stamp))) {
989 if (likely(try_cmpxchg(&part->bd_stamp, &stamp, now)))
990 __part_stat_add(part, io_ticks, end ? now - stamp : 1);
991 }
992 if (part->bd_partno) {
993 part = bdev_whole(part);
994 goto again;
995 }
996 }
997
998 unsigned long bdev_start_io_acct(struct block_device *bdev, enum req_op op,
999 unsigned long start_time)
1000 {
1001 part_stat_lock();
1002 update_io_ticks(bdev, start_time, false);
1003 part_stat_local_inc(bdev, in_flight[op_is_write(op)]);
1004 part_stat_unlock();
1005
1006 return start_time;
1007 }
1008 EXPORT_SYMBOL(bdev_start_io_acct);
1009
1010 /**
1011 * bio_start_io_acct - start I/O accounting for bio based drivers
1012 * @bio: bio to start account for
1013 *
1014 * Returns the start time that should be passed back to bio_end_io_acct().
1015 */
1016 unsigned long bio_start_io_acct(struct bio *bio)
1017 {
1018 return bdev_start_io_acct(bio->bi_bdev, bio_op(bio), jiffies);
1019 }
1020 EXPORT_SYMBOL_GPL(bio_start_io_acct);
1021
1022 void bdev_end_io_acct(struct block_device *bdev, enum req_op op,
1023 unsigned int sectors, unsigned long start_time)
1024 {
1025 const int sgrp = op_stat_group(op);
1026 unsigned long now = READ_ONCE(jiffies);
1027 unsigned long duration = now - start_time;
1028
1029 part_stat_lock();
1030 update_io_ticks(bdev, now, true);
1031 part_stat_inc(bdev, ios[sgrp]);
1032 part_stat_add(bdev, sectors[sgrp], sectors);
1033 part_stat_add(bdev, nsecs[sgrp], jiffies_to_nsecs(duration));
1034 part_stat_local_dec(bdev, in_flight[op_is_write(op)]);
1035 part_stat_unlock();
1036 }
1037 EXPORT_SYMBOL(bdev_end_io_acct);
1038
1039 void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time,
1040 struct block_device *orig_bdev)
1041 {
1042 bdev_end_io_acct(orig_bdev, bio_op(bio), bio_sectors(bio), start_time);
1043 }
1044 EXPORT_SYMBOL_GPL(bio_end_io_acct_remapped);
1045
1046 /**
1047 * blk_lld_busy - Check if underlying low-level drivers of a device are busy
1048 * @q : the queue of the device being checked
1049 *
1050 * Description:
1051 * Check if underlying low-level drivers of a device are busy.
1052 * If the drivers want to export their busy state, they must set own
1053 * exporting function using blk_queue_lld_busy() first.
1054 *
1055 * Basically, this function is used only by request stacking drivers
1056 * to stop dispatching requests to underlying devices when underlying
1057 * devices are busy. This behavior helps more I/O merging on the queue
1058 * of the request stacking driver and prevents I/O throughput regression
1059 * on burst I/O load.
1060 *
1061 * Return:
1062 * 0 - Not busy (The request stacking driver should dispatch request)
1063 * 1 - Busy (The request stacking driver should stop dispatching request)
1064 */
1065 int blk_lld_busy(struct request_queue *q)
1066 {
1067 if (queue_is_mq(q) && q->mq_ops->busy)
1068 return q->mq_ops->busy(q);
1069
1070 return 0;
1071 }
1072 EXPORT_SYMBOL_GPL(blk_lld_busy);
1073
1074 int kblockd_schedule_work(struct work_struct *work)
1075 {
1076 return queue_work(kblockd_workqueue, work);
1077 }
1078 EXPORT_SYMBOL(kblockd_schedule_work);
1079
1080 int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork,
1081 unsigned long delay)
1082 {
1083 return mod_delayed_work_on(cpu, kblockd_workqueue, dwork, delay);
1084 }
1085 EXPORT_SYMBOL(kblockd_mod_delayed_work_on);
1086
1087 void blk_start_plug_nr_ios(struct blk_plug *plug, unsigned short nr_ios)
1088 {
1089 struct task_struct *tsk = current;
1090
1091 /*
1092 * If this is a nested plug, don't actually assign it.
1093 */
1094 if (tsk->plug)
1095 return;
1096
1097 plug->cur_ktime = 0;
1098 plug->mq_list = NULL;
1099 plug->cached_rq = NULL;
1100 plug->nr_ios = min_t(unsigned short, nr_ios, BLK_MAX_REQUEST_COUNT);
1101 plug->rq_count = 0;
1102 plug->multiple_queues = false;
1103 plug->has_elevator = false;
1104 INIT_LIST_HEAD(&plug->cb_list);
1105
1106 /*
1107 * Store ordering should not be needed here, since a potential
1108 * preempt will imply a full memory barrier
1109 */
1110 tsk->plug = plug;
1111 }
1112
1113 /**
1114 * blk_start_plug - initialize blk_plug and track it inside the task_struct
1115 * @plug: The &struct blk_plug that needs to be initialized
1116 *
1117 * Description:
1118 * blk_start_plug() indicates to the block layer an intent by the caller
1119 * to submit multiple I/O requests in a batch. The block layer may use
1120 * this hint to defer submitting I/Os from the caller until blk_finish_plug()
1121 * is called. However, the block layer may choose to submit requests
1122 * before a call to blk_finish_plug() if the number of queued I/Os
1123 * exceeds %BLK_MAX_REQUEST_COUNT, or if the size of the I/O is larger than
1124 * %BLK_PLUG_FLUSH_SIZE. The queued I/Os may also be submitted early if
1125 * the task schedules (see below).
1126 *
1127 * Tracking blk_plug inside the task_struct will help with auto-flushing the
1128 * pending I/O should the task end up blocking between blk_start_plug() and
1129 * blk_finish_plug(). This is important from a performance perspective, but
1130 * also ensures that we don't deadlock. For instance, if the task is blocking
1131 * for a memory allocation, memory reclaim could end up wanting to free a
1132 * page belonging to that request that is currently residing in our private
1133 * plug. By flushing the pending I/O when the process goes to sleep, we avoid
1134 * this kind of deadlock.
1135 */
1136 void blk_start_plug(struct blk_plug *plug)
1137 {
1138 blk_start_plug_nr_ios(plug, 1);
1139 }
1140 EXPORT_SYMBOL(blk_start_plug);
1141
1142 static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
1143 {
1144 LIST_HEAD(callbacks);
1145
1146 while (!list_empty(&plug->cb_list)) {
1147 list_splice_init(&plug->cb_list, &callbacks);
1148
1149 while (!list_empty(&callbacks)) {
1150 struct blk_plug_cb *cb = list_first_entry(&callbacks,
1151 struct blk_plug_cb,
1152 list);
1153 list_del(&cb->list);
1154 cb->callback(cb, from_schedule);
1155 }
1156 }
1157 }
1158
1159 struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data,
1160 int size)
1161 {
1162 struct blk_plug *plug = current->plug;
1163 struct blk_plug_cb *cb;
1164
1165 if (!plug)
1166 return NULL;
1167
1168 list_for_each_entry(cb, &plug->cb_list, list)
1169 if (cb->callback == unplug && cb->data == data)
1170 return cb;
1171
1172 /* Not currently on the callback list */
1173 BUG_ON(size < sizeof(*cb));
1174 cb = kzalloc(size, GFP_ATOMIC);
1175 if (cb) {
1176 cb->data = data;
1177 cb->callback = unplug;
1178 list_add(&cb->list, &plug->cb_list);
1179 }
1180 return cb;
1181 }
1182 EXPORT_SYMBOL(blk_check_plugged);
1183
1184 void __blk_flush_plug(struct blk_plug *plug, bool from_schedule)
1185 {
1186 if (!list_empty(&plug->cb_list))
1187 flush_plug_callbacks(plug, from_schedule);
1188 blk_mq_flush_plug_list(plug, from_schedule);
1189 /*
1190 * Unconditionally flush out cached requests, even if the unplug
1191 * event came from schedule. Since we know hold references to the
1192 * queue for cached requests, we don't want a blocked task holding
1193 * up a queue freeze/quiesce event.
1194 */
1195 if (unlikely(!rq_list_empty(plug->cached_rq)))
1196 blk_mq_free_plug_rqs(plug);
1197
1198 current->flags &= ~PF_BLOCK_TS;
1199 }
1200
1201 /**
1202 * blk_finish_plug - mark the end of a batch of submitted I/O
1203 * @plug: The &struct blk_plug passed to blk_start_plug()
1204 *
1205 * Description:
1206 * Indicate that a batch of I/O submissions is complete. This function
1207 * must be paired with an initial call to blk_start_plug(). The intent
1208 * is to allow the block layer to optimize I/O submission. See the
1209 * documentation for blk_start_plug() for more information.
1210 */
1211 void blk_finish_plug(struct blk_plug *plug)
1212 {
1213 if (plug == current->plug) {
1214 __blk_flush_plug(plug, false);
1215 current->plug = NULL;
1216 }
1217 }
1218 EXPORT_SYMBOL(blk_finish_plug);
1219
1220 void blk_io_schedule(void)
1221 {
1222 /* Prevent hang_check timer from firing at us during very long I/O */
1223 unsigned long timeout = sysctl_hung_task_timeout_secs * HZ / 2;
1224
1225 if (timeout)
1226 io_schedule_timeout(timeout);
1227 else
1228 io_schedule();
1229 }
1230 EXPORT_SYMBOL_GPL(blk_io_schedule);
1231
1232 int __init blk_dev_init(void)
1233 {
1234 BUILD_BUG_ON((__force u32)REQ_OP_LAST >= (1 << REQ_OP_BITS));
1235 BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
1236 sizeof_field(struct request, cmd_flags));
1237 BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
1238 sizeof_field(struct bio, bi_opf));
1239
1240 /* used for unplugging and affects IO latency/throughput - HIGHPRI */
1241 kblockd_workqueue = alloc_workqueue("kblockd",
1242 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
1243 if (!kblockd_workqueue)
1244 panic("Failed to create kblockd\n");
1245
1246 blk_requestq_cachep = KMEM_CACHE(request_queue, SLAB_PANIC);
1247
1248 blk_debugfs_root = debugfs_create_dir("block", NULL);
1249
1250 return 0;
1251 }