]> git.ipfire.org Git - thirdparty/linux.git/blob - block/bio.c
Merge branch 'from-miklos' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
[thirdparty/linux.git] / block / bio.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2001 Jens Axboe <axboe@kernel.dk>
4 */
5 #include <linux/mm.h>
6 #include <linux/swap.h>
7 #include <linux/bio.h>
8 #include <linux/blkdev.h>
9 #include <linux/uio.h>
10 #include <linux/iocontext.h>
11 #include <linux/slab.h>
12 #include <linux/init.h>
13 #include <linux/kernel.h>
14 #include <linux/export.h>
15 #include <linux/mempool.h>
16 #include <linux/workqueue.h>
17 #include <linux/cgroup.h>
18 #include <linux/blk-cgroup.h>
19 #include <linux/highmem.h>
20 #include <linux/sched/sysctl.h>
21
22 #include <trace/events/block.h>
23 #include "blk.h"
24 #include "blk-rq-qos.h"
25
26 /*
27 * Test patch to inline a certain number of bi_io_vec's inside the bio
28 * itself, to shrink a bio data allocation from two mempool calls to one
29 */
30 #define BIO_INLINE_VECS 4
31
32 /*
33 * if you change this list, also change bvec_alloc or things will
34 * break badly! cannot be bigger than what you can fit into an
35 * unsigned short
36 */
37 #define BV(x, n) { .nr_vecs = x, .name = "biovec-"#n }
38 static struct biovec_slab bvec_slabs[BVEC_POOL_NR] __read_mostly = {
39 BV(1, 1), BV(4, 4), BV(16, 16), BV(64, 64), BV(128, 128), BV(BIO_MAX_PAGES, max),
40 };
41 #undef BV
42
43 /*
44 * fs_bio_set is the bio_set containing bio and iovec memory pools used by
45 * IO code that does not need private memory pools.
46 */
47 struct bio_set fs_bio_set;
48 EXPORT_SYMBOL(fs_bio_set);
49
50 /*
51 * Our slab pool management
52 */
53 struct bio_slab {
54 struct kmem_cache *slab;
55 unsigned int slab_ref;
56 unsigned int slab_size;
57 char name[8];
58 };
59 static DEFINE_MUTEX(bio_slab_lock);
60 static struct bio_slab *bio_slabs;
61 static unsigned int bio_slab_nr, bio_slab_max;
62
63 static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
64 {
65 unsigned int sz = sizeof(struct bio) + extra_size;
66 struct kmem_cache *slab = NULL;
67 struct bio_slab *bslab, *new_bio_slabs;
68 unsigned int new_bio_slab_max;
69 unsigned int i, entry = -1;
70
71 mutex_lock(&bio_slab_lock);
72
73 i = 0;
74 while (i < bio_slab_nr) {
75 bslab = &bio_slabs[i];
76
77 if (!bslab->slab && entry == -1)
78 entry = i;
79 else if (bslab->slab_size == sz) {
80 slab = bslab->slab;
81 bslab->slab_ref++;
82 break;
83 }
84 i++;
85 }
86
87 if (slab)
88 goto out_unlock;
89
90 if (bio_slab_nr == bio_slab_max && entry == -1) {
91 new_bio_slab_max = bio_slab_max << 1;
92 new_bio_slabs = krealloc(bio_slabs,
93 new_bio_slab_max * sizeof(struct bio_slab),
94 GFP_KERNEL);
95 if (!new_bio_slabs)
96 goto out_unlock;
97 bio_slab_max = new_bio_slab_max;
98 bio_slabs = new_bio_slabs;
99 }
100 if (entry == -1)
101 entry = bio_slab_nr++;
102
103 bslab = &bio_slabs[entry];
104
105 snprintf(bslab->name, sizeof(bslab->name), "bio-%d", entry);
106 slab = kmem_cache_create(bslab->name, sz, ARCH_KMALLOC_MINALIGN,
107 SLAB_HWCACHE_ALIGN, NULL);
108 if (!slab)
109 goto out_unlock;
110
111 bslab->slab = slab;
112 bslab->slab_ref = 1;
113 bslab->slab_size = sz;
114 out_unlock:
115 mutex_unlock(&bio_slab_lock);
116 return slab;
117 }
118
119 static void bio_put_slab(struct bio_set *bs)
120 {
121 struct bio_slab *bslab = NULL;
122 unsigned int i;
123
124 mutex_lock(&bio_slab_lock);
125
126 for (i = 0; i < bio_slab_nr; i++) {
127 if (bs->bio_slab == bio_slabs[i].slab) {
128 bslab = &bio_slabs[i];
129 break;
130 }
131 }
132
133 if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n"))
134 goto out;
135
136 WARN_ON(!bslab->slab_ref);
137
138 if (--bslab->slab_ref)
139 goto out;
140
141 kmem_cache_destroy(bslab->slab);
142 bslab->slab = NULL;
143
144 out:
145 mutex_unlock(&bio_slab_lock);
146 }
147
148 unsigned int bvec_nr_vecs(unsigned short idx)
149 {
150 return bvec_slabs[--idx].nr_vecs;
151 }
152
153 void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned int idx)
154 {
155 if (!idx)
156 return;
157 idx--;
158
159 BIO_BUG_ON(idx >= BVEC_POOL_NR);
160
161 if (idx == BVEC_POOL_MAX) {
162 mempool_free(bv, pool);
163 } else {
164 struct biovec_slab *bvs = bvec_slabs + idx;
165
166 kmem_cache_free(bvs->slab, bv);
167 }
168 }
169
170 struct bio_vec *bvec_alloc(gfp_t gfp_mask, int nr, unsigned long *idx,
171 mempool_t *pool)
172 {
173 struct bio_vec *bvl;
174
175 /*
176 * see comment near bvec_array define!
177 */
178 switch (nr) {
179 case 1:
180 *idx = 0;
181 break;
182 case 2 ... 4:
183 *idx = 1;
184 break;
185 case 5 ... 16:
186 *idx = 2;
187 break;
188 case 17 ... 64:
189 *idx = 3;
190 break;
191 case 65 ... 128:
192 *idx = 4;
193 break;
194 case 129 ... BIO_MAX_PAGES:
195 *idx = 5;
196 break;
197 default:
198 return NULL;
199 }
200
201 /*
202 * idx now points to the pool we want to allocate from. only the
203 * 1-vec entry pool is mempool backed.
204 */
205 if (*idx == BVEC_POOL_MAX) {
206 fallback:
207 bvl = mempool_alloc(pool, gfp_mask);
208 } else {
209 struct biovec_slab *bvs = bvec_slabs + *idx;
210 gfp_t __gfp_mask = gfp_mask & ~(__GFP_DIRECT_RECLAIM | __GFP_IO);
211
212 /*
213 * Make this allocation restricted and don't dump info on
214 * allocation failures, since we'll fallback to the mempool
215 * in case of failure.
216 */
217 __gfp_mask |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
218
219 /*
220 * Try a slab allocation. If this fails and __GFP_DIRECT_RECLAIM
221 * is set, retry with the 1-entry mempool
222 */
223 bvl = kmem_cache_alloc(bvs->slab, __gfp_mask);
224 if (unlikely(!bvl && (gfp_mask & __GFP_DIRECT_RECLAIM))) {
225 *idx = BVEC_POOL_MAX;
226 goto fallback;
227 }
228 }
229
230 (*idx)++;
231 return bvl;
232 }
233
234 void bio_uninit(struct bio *bio)
235 {
236 bio_disassociate_blkg(bio);
237
238 if (bio_integrity(bio))
239 bio_integrity_free(bio);
240 }
241 EXPORT_SYMBOL(bio_uninit);
242
243 static void bio_free(struct bio *bio)
244 {
245 struct bio_set *bs = bio->bi_pool;
246 void *p;
247
248 bio_uninit(bio);
249
250 if (bs) {
251 bvec_free(&bs->bvec_pool, bio->bi_io_vec, BVEC_POOL_IDX(bio));
252
253 /*
254 * If we have front padding, adjust the bio pointer before freeing
255 */
256 p = bio;
257 p -= bs->front_pad;
258
259 mempool_free(p, &bs->bio_pool);
260 } else {
261 /* Bio was allocated by bio_kmalloc() */
262 kfree(bio);
263 }
264 }
265
266 /*
267 * Users of this function have their own bio allocation. Subsequently,
268 * they must remember to pair any call to bio_init() with bio_uninit()
269 * when IO has completed, or when the bio is released.
270 */
271 void bio_init(struct bio *bio, struct bio_vec *table,
272 unsigned short max_vecs)
273 {
274 memset(bio, 0, sizeof(*bio));
275 atomic_set(&bio->__bi_remaining, 1);
276 atomic_set(&bio->__bi_cnt, 1);
277
278 bio->bi_io_vec = table;
279 bio->bi_max_vecs = max_vecs;
280 }
281 EXPORT_SYMBOL(bio_init);
282
283 /**
284 * bio_reset - reinitialize a bio
285 * @bio: bio to reset
286 *
287 * Description:
288 * After calling bio_reset(), @bio will be in the same state as a freshly
289 * allocated bio returned bio bio_alloc_bioset() - the only fields that are
290 * preserved are the ones that are initialized by bio_alloc_bioset(). See
291 * comment in struct bio.
292 */
293 void bio_reset(struct bio *bio)
294 {
295 unsigned long flags = bio->bi_flags & (~0UL << BIO_RESET_BITS);
296
297 bio_uninit(bio);
298
299 memset(bio, 0, BIO_RESET_BYTES);
300 bio->bi_flags = flags;
301 atomic_set(&bio->__bi_remaining, 1);
302 }
303 EXPORT_SYMBOL(bio_reset);
304
305 static struct bio *__bio_chain_endio(struct bio *bio)
306 {
307 struct bio *parent = bio->bi_private;
308
309 if (!parent->bi_status)
310 parent->bi_status = bio->bi_status;
311 bio_put(bio);
312 return parent;
313 }
314
315 static void bio_chain_endio(struct bio *bio)
316 {
317 bio_endio(__bio_chain_endio(bio));
318 }
319
320 /**
321 * bio_chain - chain bio completions
322 * @bio: the target bio
323 * @parent: the @bio's parent bio
324 *
325 * The caller won't have a bi_end_io called when @bio completes - instead,
326 * @parent's bi_end_io won't be called until both @parent and @bio have
327 * completed; the chained bio will also be freed when it completes.
328 *
329 * The caller must not set bi_private or bi_end_io in @bio.
330 */
331 void bio_chain(struct bio *bio, struct bio *parent)
332 {
333 BUG_ON(bio->bi_private || bio->bi_end_io);
334
335 bio->bi_private = parent;
336 bio->bi_end_io = bio_chain_endio;
337 bio_inc_remaining(parent);
338 }
339 EXPORT_SYMBOL(bio_chain);
340
341 static void bio_alloc_rescue(struct work_struct *work)
342 {
343 struct bio_set *bs = container_of(work, struct bio_set, rescue_work);
344 struct bio *bio;
345
346 while (1) {
347 spin_lock(&bs->rescue_lock);
348 bio = bio_list_pop(&bs->rescue_list);
349 spin_unlock(&bs->rescue_lock);
350
351 if (!bio)
352 break;
353
354 generic_make_request(bio);
355 }
356 }
357
358 static void punt_bios_to_rescuer(struct bio_set *bs)
359 {
360 struct bio_list punt, nopunt;
361 struct bio *bio;
362
363 if (WARN_ON_ONCE(!bs->rescue_workqueue))
364 return;
365 /*
366 * In order to guarantee forward progress we must punt only bios that
367 * were allocated from this bio_set; otherwise, if there was a bio on
368 * there for a stacking driver higher up in the stack, processing it
369 * could require allocating bios from this bio_set, and doing that from
370 * our own rescuer would be bad.
371 *
372 * Since bio lists are singly linked, pop them all instead of trying to
373 * remove from the middle of the list:
374 */
375
376 bio_list_init(&punt);
377 bio_list_init(&nopunt);
378
379 while ((bio = bio_list_pop(&current->bio_list[0])))
380 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
381 current->bio_list[0] = nopunt;
382
383 bio_list_init(&nopunt);
384 while ((bio = bio_list_pop(&current->bio_list[1])))
385 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
386 current->bio_list[1] = nopunt;
387
388 spin_lock(&bs->rescue_lock);
389 bio_list_merge(&bs->rescue_list, &punt);
390 spin_unlock(&bs->rescue_lock);
391
392 queue_work(bs->rescue_workqueue, &bs->rescue_work);
393 }
394
395 /**
396 * bio_alloc_bioset - allocate a bio for I/O
397 * @gfp_mask: the GFP_* mask given to the slab allocator
398 * @nr_iovecs: number of iovecs to pre-allocate
399 * @bs: the bio_set to allocate from.
400 *
401 * Description:
402 * If @bs is NULL, uses kmalloc() to allocate the bio; else the allocation is
403 * backed by the @bs's mempool.
404 *
405 * When @bs is not NULL, if %__GFP_DIRECT_RECLAIM is set then bio_alloc will
406 * always be able to allocate a bio. This is due to the mempool guarantees.
407 * To make this work, callers must never allocate more than 1 bio at a time
408 * from this pool. Callers that need to allocate more than 1 bio must always
409 * submit the previously allocated bio for IO before attempting to allocate
410 * a new one. Failure to do so can cause deadlocks under memory pressure.
411 *
412 * Note that when running under generic_make_request() (i.e. any block
413 * driver), bios are not submitted until after you return - see the code in
414 * generic_make_request() that converts recursion into iteration, to prevent
415 * stack overflows.
416 *
417 * This would normally mean allocating multiple bios under
418 * generic_make_request() would be susceptible to deadlocks, but we have
419 * deadlock avoidance code that resubmits any blocked bios from a rescuer
420 * thread.
421 *
422 * However, we do not guarantee forward progress for allocations from other
423 * mempools. Doing multiple allocations from the same mempool under
424 * generic_make_request() should be avoided - instead, use bio_set's front_pad
425 * for per bio allocations.
426 *
427 * RETURNS:
428 * Pointer to new bio on success, NULL on failure.
429 */
430 struct bio *bio_alloc_bioset(gfp_t gfp_mask, unsigned int nr_iovecs,
431 struct bio_set *bs)
432 {
433 gfp_t saved_gfp = gfp_mask;
434 unsigned front_pad;
435 unsigned inline_vecs;
436 struct bio_vec *bvl = NULL;
437 struct bio *bio;
438 void *p;
439
440 if (!bs) {
441 if (nr_iovecs > UIO_MAXIOV)
442 return NULL;
443
444 p = kmalloc(sizeof(struct bio) +
445 nr_iovecs * sizeof(struct bio_vec),
446 gfp_mask);
447 front_pad = 0;
448 inline_vecs = nr_iovecs;
449 } else {
450 /* should not use nobvec bioset for nr_iovecs > 0 */
451 if (WARN_ON_ONCE(!mempool_initialized(&bs->bvec_pool) &&
452 nr_iovecs > 0))
453 return NULL;
454 /*
455 * generic_make_request() converts recursion to iteration; this
456 * means if we're running beneath it, any bios we allocate and
457 * submit will not be submitted (and thus freed) until after we
458 * return.
459 *
460 * This exposes us to a potential deadlock if we allocate
461 * multiple bios from the same bio_set() while running
462 * underneath generic_make_request(). If we were to allocate
463 * multiple bios (say a stacking block driver that was splitting
464 * bios), we would deadlock if we exhausted the mempool's
465 * reserve.
466 *
467 * We solve this, and guarantee forward progress, with a rescuer
468 * workqueue per bio_set. If we go to allocate and there are
469 * bios on current->bio_list, we first try the allocation
470 * without __GFP_DIRECT_RECLAIM; if that fails, we punt those
471 * bios we would be blocking to the rescuer workqueue before
472 * we retry with the original gfp_flags.
473 */
474
475 if (current->bio_list &&
476 (!bio_list_empty(&current->bio_list[0]) ||
477 !bio_list_empty(&current->bio_list[1])) &&
478 bs->rescue_workqueue)
479 gfp_mask &= ~__GFP_DIRECT_RECLAIM;
480
481 p = mempool_alloc(&bs->bio_pool, gfp_mask);
482 if (!p && gfp_mask != saved_gfp) {
483 punt_bios_to_rescuer(bs);
484 gfp_mask = saved_gfp;
485 p = mempool_alloc(&bs->bio_pool, gfp_mask);
486 }
487
488 front_pad = bs->front_pad;
489 inline_vecs = BIO_INLINE_VECS;
490 }
491
492 if (unlikely(!p))
493 return NULL;
494
495 bio = p + front_pad;
496 bio_init(bio, NULL, 0);
497
498 if (nr_iovecs > inline_vecs) {
499 unsigned long idx = 0;
500
501 bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, &bs->bvec_pool);
502 if (!bvl && gfp_mask != saved_gfp) {
503 punt_bios_to_rescuer(bs);
504 gfp_mask = saved_gfp;
505 bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, &bs->bvec_pool);
506 }
507
508 if (unlikely(!bvl))
509 goto err_free;
510
511 bio->bi_flags |= idx << BVEC_POOL_OFFSET;
512 } else if (nr_iovecs) {
513 bvl = bio->bi_inline_vecs;
514 }
515
516 bio->bi_pool = bs;
517 bio->bi_max_vecs = nr_iovecs;
518 bio->bi_io_vec = bvl;
519 return bio;
520
521 err_free:
522 mempool_free(p, &bs->bio_pool);
523 return NULL;
524 }
525 EXPORT_SYMBOL(bio_alloc_bioset);
526
527 void zero_fill_bio_iter(struct bio *bio, struct bvec_iter start)
528 {
529 unsigned long flags;
530 struct bio_vec bv;
531 struct bvec_iter iter;
532
533 __bio_for_each_segment(bv, bio, iter, start) {
534 char *data = bvec_kmap_irq(&bv, &flags);
535 memset(data, 0, bv.bv_len);
536 flush_dcache_page(bv.bv_page);
537 bvec_kunmap_irq(data, &flags);
538 }
539 }
540 EXPORT_SYMBOL(zero_fill_bio_iter);
541
542 /**
543 * bio_truncate - truncate the bio to small size of @new_size
544 * @bio: the bio to be truncated
545 * @new_size: new size for truncating the bio
546 *
547 * Description:
548 * Truncate the bio to new size of @new_size. If bio_op(bio) is
549 * REQ_OP_READ, zero the truncated part. This function should only
550 * be used for handling corner cases, such as bio eod.
551 */
552 void bio_truncate(struct bio *bio, unsigned new_size)
553 {
554 struct bio_vec bv;
555 struct bvec_iter iter;
556 unsigned int done = 0;
557 bool truncated = false;
558
559 if (new_size >= bio->bi_iter.bi_size)
560 return;
561
562 if (bio_op(bio) != REQ_OP_READ)
563 goto exit;
564
565 bio_for_each_segment(bv, bio, iter) {
566 if (done + bv.bv_len > new_size) {
567 unsigned offset;
568
569 if (!truncated)
570 offset = new_size - done;
571 else
572 offset = 0;
573 zero_user(bv.bv_page, offset, bv.bv_len - offset);
574 truncated = true;
575 }
576 done += bv.bv_len;
577 }
578
579 exit:
580 /*
581 * Don't touch bvec table here and make it really immutable, since
582 * fs bio user has to retrieve all pages via bio_for_each_segment_all
583 * in its .end_bio() callback.
584 *
585 * It is enough to truncate bio by updating .bi_size since we can make
586 * correct bvec with the updated .bi_size for drivers.
587 */
588 bio->bi_iter.bi_size = new_size;
589 }
590
591 /**
592 * guard_bio_eod - truncate a BIO to fit the block device
593 * @bio: bio to truncate
594 *
595 * This allows us to do IO even on the odd last sectors of a device, even if the
596 * block size is some multiple of the physical sector size.
597 *
598 * We'll just truncate the bio to the size of the device, and clear the end of
599 * the buffer head manually. Truly out-of-range accesses will turn into actual
600 * I/O errors, this only handles the "we need to be able to do I/O at the final
601 * sector" case.
602 */
603 void guard_bio_eod(struct bio *bio)
604 {
605 sector_t maxsector;
606 struct hd_struct *part;
607
608 rcu_read_lock();
609 part = __disk_get_part(bio->bi_disk, bio->bi_partno);
610 if (part)
611 maxsector = part_nr_sects_read(part);
612 else
613 maxsector = get_capacity(bio->bi_disk);
614 rcu_read_unlock();
615
616 if (!maxsector)
617 return;
618
619 /*
620 * If the *whole* IO is past the end of the device,
621 * let it through, and the IO layer will turn it into
622 * an EIO.
623 */
624 if (unlikely(bio->bi_iter.bi_sector >= maxsector))
625 return;
626
627 maxsector -= bio->bi_iter.bi_sector;
628 if (likely((bio->bi_iter.bi_size >> 9) <= maxsector))
629 return;
630
631 bio_truncate(bio, maxsector << 9);
632 }
633
634 /**
635 * bio_put - release a reference to a bio
636 * @bio: bio to release reference to
637 *
638 * Description:
639 * Put a reference to a &struct bio, either one you have gotten with
640 * bio_alloc, bio_get or bio_clone_*. The last put of a bio will free it.
641 **/
642 void bio_put(struct bio *bio)
643 {
644 if (!bio_flagged(bio, BIO_REFFED))
645 bio_free(bio);
646 else {
647 BIO_BUG_ON(!atomic_read(&bio->__bi_cnt));
648
649 /*
650 * last put frees it
651 */
652 if (atomic_dec_and_test(&bio->__bi_cnt))
653 bio_free(bio);
654 }
655 }
656 EXPORT_SYMBOL(bio_put);
657
658 /**
659 * __bio_clone_fast - clone a bio that shares the original bio's biovec
660 * @bio: destination bio
661 * @bio_src: bio to clone
662 *
663 * Clone a &bio. Caller will own the returned bio, but not
664 * the actual data it points to. Reference count of returned
665 * bio will be one.
666 *
667 * Caller must ensure that @bio_src is not freed before @bio.
668 */
669 void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
670 {
671 BUG_ON(bio->bi_pool && BVEC_POOL_IDX(bio));
672
673 /*
674 * most users will be overriding ->bi_disk with a new target,
675 * so we don't set nor calculate new physical/hw segment counts here
676 */
677 bio->bi_disk = bio_src->bi_disk;
678 bio->bi_partno = bio_src->bi_partno;
679 bio_set_flag(bio, BIO_CLONED);
680 if (bio_flagged(bio_src, BIO_THROTTLED))
681 bio_set_flag(bio, BIO_THROTTLED);
682 bio->bi_opf = bio_src->bi_opf;
683 bio->bi_ioprio = bio_src->bi_ioprio;
684 bio->bi_write_hint = bio_src->bi_write_hint;
685 bio->bi_iter = bio_src->bi_iter;
686 bio->bi_io_vec = bio_src->bi_io_vec;
687
688 bio_clone_blkg_association(bio, bio_src);
689 blkcg_bio_issue_init(bio);
690 }
691 EXPORT_SYMBOL(__bio_clone_fast);
692
693 /**
694 * bio_clone_fast - clone a bio that shares the original bio's biovec
695 * @bio: bio to clone
696 * @gfp_mask: allocation priority
697 * @bs: bio_set to allocate from
698 *
699 * Like __bio_clone_fast, only also allocates the returned bio
700 */
701 struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs)
702 {
703 struct bio *b;
704
705 b = bio_alloc_bioset(gfp_mask, 0, bs);
706 if (!b)
707 return NULL;
708
709 __bio_clone_fast(b, bio);
710
711 if (bio_integrity(bio)) {
712 int ret;
713
714 ret = bio_integrity_clone(b, bio, gfp_mask);
715
716 if (ret < 0) {
717 bio_put(b);
718 return NULL;
719 }
720 }
721
722 return b;
723 }
724 EXPORT_SYMBOL(bio_clone_fast);
725
726 const char *bio_devname(struct bio *bio, char *buf)
727 {
728 return disk_name(bio->bi_disk, bio->bi_partno, buf);
729 }
730 EXPORT_SYMBOL(bio_devname);
731
732 static inline bool page_is_mergeable(const struct bio_vec *bv,
733 struct page *page, unsigned int len, unsigned int off,
734 bool *same_page)
735 {
736 phys_addr_t vec_end_addr = page_to_phys(bv->bv_page) +
737 bv->bv_offset + bv->bv_len - 1;
738 phys_addr_t page_addr = page_to_phys(page);
739
740 if (vec_end_addr + 1 != page_addr + off)
741 return false;
742 if (xen_domain() && !xen_biovec_phys_mergeable(bv, page))
743 return false;
744
745 *same_page = ((vec_end_addr & PAGE_MASK) == page_addr);
746 if (!*same_page && pfn_to_page(PFN_DOWN(vec_end_addr)) + 1 != page)
747 return false;
748 return true;
749 }
750
751 static bool bio_try_merge_pc_page(struct request_queue *q, struct bio *bio,
752 struct page *page, unsigned len, unsigned offset,
753 bool *same_page)
754 {
755 struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
756 unsigned long mask = queue_segment_boundary(q);
757 phys_addr_t addr1 = page_to_phys(bv->bv_page) + bv->bv_offset;
758 phys_addr_t addr2 = page_to_phys(page) + offset + len - 1;
759
760 if ((addr1 | mask) != (addr2 | mask))
761 return false;
762 if (bv->bv_len + len > queue_max_segment_size(q))
763 return false;
764 return __bio_try_merge_page(bio, page, len, offset, same_page);
765 }
766
767 /**
768 * __bio_add_pc_page - attempt to add page to passthrough bio
769 * @q: the target queue
770 * @bio: destination bio
771 * @page: page to add
772 * @len: vec entry length
773 * @offset: vec entry offset
774 * @same_page: return if the merge happen inside the same page
775 *
776 * Attempt to add a page to the bio_vec maplist. This can fail for a
777 * number of reasons, such as the bio being full or target block device
778 * limitations. The target block device must allow bio's up to PAGE_SIZE,
779 * so it is always possible to add a single page to an empty bio.
780 *
781 * This should only be used by passthrough bios.
782 */
783 int __bio_add_pc_page(struct request_queue *q, struct bio *bio,
784 struct page *page, unsigned int len, unsigned int offset,
785 bool *same_page)
786 {
787 struct bio_vec *bvec;
788
789 /*
790 * cloned bio must not modify vec list
791 */
792 if (unlikely(bio_flagged(bio, BIO_CLONED)))
793 return 0;
794
795 if (((bio->bi_iter.bi_size + len) >> 9) > queue_max_hw_sectors(q))
796 return 0;
797
798 if (bio->bi_vcnt > 0) {
799 if (bio_try_merge_pc_page(q, bio, page, len, offset, same_page))
800 return len;
801
802 /*
803 * If the queue doesn't support SG gaps and adding this segment
804 * would create a gap, disallow it.
805 */
806 bvec = &bio->bi_io_vec[bio->bi_vcnt - 1];
807 if (bvec_gap_to_prev(q, bvec, offset))
808 return 0;
809 }
810
811 if (bio_full(bio, len))
812 return 0;
813
814 if (bio->bi_vcnt >= queue_max_segments(q))
815 return 0;
816
817 bvec = &bio->bi_io_vec[bio->bi_vcnt];
818 bvec->bv_page = page;
819 bvec->bv_len = len;
820 bvec->bv_offset = offset;
821 bio->bi_vcnt++;
822 bio->bi_iter.bi_size += len;
823 return len;
824 }
825
826 int bio_add_pc_page(struct request_queue *q, struct bio *bio,
827 struct page *page, unsigned int len, unsigned int offset)
828 {
829 bool same_page = false;
830 return __bio_add_pc_page(q, bio, page, len, offset, &same_page);
831 }
832 EXPORT_SYMBOL(bio_add_pc_page);
833
834 /**
835 * __bio_try_merge_page - try appending data to an existing bvec.
836 * @bio: destination bio
837 * @page: start page to add
838 * @len: length of the data to add
839 * @off: offset of the data relative to @page
840 * @same_page: return if the segment has been merged inside the same page
841 *
842 * Try to add the data at @page + @off to the last bvec of @bio. This is a
843 * a useful optimisation for file systems with a block size smaller than the
844 * page size.
845 *
846 * Warn if (@len, @off) crosses pages in case that @same_page is true.
847 *
848 * Return %true on success or %false on failure.
849 */
850 bool __bio_try_merge_page(struct bio *bio, struct page *page,
851 unsigned int len, unsigned int off, bool *same_page)
852 {
853 if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
854 return false;
855
856 if (bio->bi_vcnt > 0) {
857 struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
858
859 if (page_is_mergeable(bv, page, len, off, same_page)) {
860 if (bio->bi_iter.bi_size > UINT_MAX - len)
861 return false;
862 bv->bv_len += len;
863 bio->bi_iter.bi_size += len;
864 return true;
865 }
866 }
867 return false;
868 }
869 EXPORT_SYMBOL_GPL(__bio_try_merge_page);
870
871 /**
872 * __bio_add_page - add page(s) to a bio in a new segment
873 * @bio: destination bio
874 * @page: start page to add
875 * @len: length of the data to add, may cross pages
876 * @off: offset of the data relative to @page, may cross pages
877 *
878 * Add the data at @page + @off to @bio as a new bvec. The caller must ensure
879 * that @bio has space for another bvec.
880 */
881 void __bio_add_page(struct bio *bio, struct page *page,
882 unsigned int len, unsigned int off)
883 {
884 struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt];
885
886 WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
887 WARN_ON_ONCE(bio_full(bio, len));
888
889 bv->bv_page = page;
890 bv->bv_offset = off;
891 bv->bv_len = len;
892
893 bio->bi_iter.bi_size += len;
894 bio->bi_vcnt++;
895
896 if (!bio_flagged(bio, BIO_WORKINGSET) && unlikely(PageWorkingset(page)))
897 bio_set_flag(bio, BIO_WORKINGSET);
898 }
899 EXPORT_SYMBOL_GPL(__bio_add_page);
900
901 /**
902 * bio_add_page - attempt to add page(s) to bio
903 * @bio: destination bio
904 * @page: start page to add
905 * @len: vec entry length, may cross pages
906 * @offset: vec entry offset relative to @page, may cross pages
907 *
908 * Attempt to add page(s) to the bio_vec maplist. This will only fail
909 * if either bio->bi_vcnt == bio->bi_max_vecs or it's a cloned bio.
910 */
911 int bio_add_page(struct bio *bio, struct page *page,
912 unsigned int len, unsigned int offset)
913 {
914 bool same_page = false;
915
916 if (!__bio_try_merge_page(bio, page, len, offset, &same_page)) {
917 if (bio_full(bio, len))
918 return 0;
919 __bio_add_page(bio, page, len, offset);
920 }
921 return len;
922 }
923 EXPORT_SYMBOL(bio_add_page);
924
925 void bio_release_pages(struct bio *bio, bool mark_dirty)
926 {
927 struct bvec_iter_all iter_all;
928 struct bio_vec *bvec;
929
930 if (bio_flagged(bio, BIO_NO_PAGE_REF))
931 return;
932
933 bio_for_each_segment_all(bvec, bio, iter_all) {
934 if (mark_dirty && !PageCompound(bvec->bv_page))
935 set_page_dirty_lock(bvec->bv_page);
936 put_page(bvec->bv_page);
937 }
938 }
939
940 static int __bio_iov_bvec_add_pages(struct bio *bio, struct iov_iter *iter)
941 {
942 const struct bio_vec *bv = iter->bvec;
943 unsigned int len;
944 size_t size;
945
946 if (WARN_ON_ONCE(iter->iov_offset > bv->bv_len))
947 return -EINVAL;
948
949 len = min_t(size_t, bv->bv_len - iter->iov_offset, iter->count);
950 size = bio_add_page(bio, bv->bv_page, len,
951 bv->bv_offset + iter->iov_offset);
952 if (unlikely(size != len))
953 return -EINVAL;
954 iov_iter_advance(iter, size);
955 return 0;
956 }
957
958 #define PAGE_PTRS_PER_BVEC (sizeof(struct bio_vec) / sizeof(struct page *))
959
960 /**
961 * __bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio
962 * @bio: bio to add pages to
963 * @iter: iov iterator describing the region to be mapped
964 *
965 * Pins pages from *iter and appends them to @bio's bvec array. The
966 * pages will have to be released using put_page() when done.
967 * For multi-segment *iter, this function only adds pages from the
968 * the next non-empty segment of the iov iterator.
969 */
970 static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
971 {
972 unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt;
973 unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt;
974 struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
975 struct page **pages = (struct page **)bv;
976 bool same_page = false;
977 ssize_t size, left;
978 unsigned len, i;
979 size_t offset;
980
981 /*
982 * Move page array up in the allocated memory for the bio vecs as far as
983 * possible so that we can start filling biovecs from the beginning
984 * without overwriting the temporary page array.
985 */
986 BUILD_BUG_ON(PAGE_PTRS_PER_BVEC < 2);
987 pages += entries_left * (PAGE_PTRS_PER_BVEC - 1);
988
989 size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset);
990 if (unlikely(size <= 0))
991 return size ? size : -EFAULT;
992
993 for (left = size, i = 0; left > 0; left -= len, i++) {
994 struct page *page = pages[i];
995
996 len = min_t(size_t, PAGE_SIZE - offset, left);
997
998 if (__bio_try_merge_page(bio, page, len, offset, &same_page)) {
999 if (same_page)
1000 put_page(page);
1001 } else {
1002 if (WARN_ON_ONCE(bio_full(bio, len)))
1003 return -EINVAL;
1004 __bio_add_page(bio, page, len, offset);
1005 }
1006 offset = 0;
1007 }
1008
1009 iov_iter_advance(iter, size);
1010 return 0;
1011 }
1012
1013 /**
1014 * bio_iov_iter_get_pages - add user or kernel pages to a bio
1015 * @bio: bio to add pages to
1016 * @iter: iov iterator describing the region to be added
1017 *
1018 * This takes either an iterator pointing to user memory, or one pointing to
1019 * kernel pages (BVEC iterator). If we're adding user pages, we pin them and
1020 * map them into the kernel. On IO completion, the caller should put those
1021 * pages. If we're adding kernel pages, and the caller told us it's safe to
1022 * do so, we just have to add the pages to the bio directly. We don't grab an
1023 * extra reference to those pages (the user should already have that), and we
1024 * don't put the page on IO completion. The caller needs to check if the bio is
1025 * flagged BIO_NO_PAGE_REF on IO completion. If it isn't, then pages should be
1026 * released.
1027 *
1028 * The function tries, but does not guarantee, to pin as many pages as
1029 * fit into the bio, or are requested in *iter, whatever is smaller. If
1030 * MM encounters an error pinning the requested pages, it stops. Error
1031 * is returned only if 0 pages could be pinned.
1032 */
1033 int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
1034 {
1035 const bool is_bvec = iov_iter_is_bvec(iter);
1036 int ret;
1037
1038 if (WARN_ON_ONCE(bio->bi_vcnt))
1039 return -EINVAL;
1040
1041 do {
1042 if (is_bvec)
1043 ret = __bio_iov_bvec_add_pages(bio, iter);
1044 else
1045 ret = __bio_iov_iter_get_pages(bio, iter);
1046 } while (!ret && iov_iter_count(iter) && !bio_full(bio, 0));
1047
1048 if (is_bvec)
1049 bio_set_flag(bio, BIO_NO_PAGE_REF);
1050 return bio->bi_vcnt ? 0 : ret;
1051 }
1052
1053 static void submit_bio_wait_endio(struct bio *bio)
1054 {
1055 complete(bio->bi_private);
1056 }
1057
1058 /**
1059 * submit_bio_wait - submit a bio, and wait until it completes
1060 * @bio: The &struct bio which describes the I/O
1061 *
1062 * Simple wrapper around submit_bio(). Returns 0 on success, or the error from
1063 * bio_endio() on failure.
1064 *
1065 * WARNING: Unlike to how submit_bio() is usually used, this function does not
1066 * result in bio reference to be consumed. The caller must drop the reference
1067 * on his own.
1068 */
1069 int submit_bio_wait(struct bio *bio)
1070 {
1071 DECLARE_COMPLETION_ONSTACK_MAP(done, bio->bi_disk->lockdep_map);
1072 unsigned long hang_check;
1073
1074 bio->bi_private = &done;
1075 bio->bi_end_io = submit_bio_wait_endio;
1076 bio->bi_opf |= REQ_SYNC;
1077 submit_bio(bio);
1078
1079 /* Prevent hang_check timer from firing at us during very long I/O */
1080 hang_check = sysctl_hung_task_timeout_secs;
1081 if (hang_check)
1082 while (!wait_for_completion_io_timeout(&done,
1083 hang_check * (HZ/2)))
1084 ;
1085 else
1086 wait_for_completion_io(&done);
1087
1088 return blk_status_to_errno(bio->bi_status);
1089 }
1090 EXPORT_SYMBOL(submit_bio_wait);
1091
1092 /**
1093 * bio_advance - increment/complete a bio by some number of bytes
1094 * @bio: bio to advance
1095 * @bytes: number of bytes to complete
1096 *
1097 * This updates bi_sector, bi_size and bi_idx; if the number of bytes to
1098 * complete doesn't align with a bvec boundary, then bv_len and bv_offset will
1099 * be updated on the last bvec as well.
1100 *
1101 * @bio will then represent the remaining, uncompleted portion of the io.
1102 */
1103 void bio_advance(struct bio *bio, unsigned bytes)
1104 {
1105 if (bio_integrity(bio))
1106 bio_integrity_advance(bio, bytes);
1107
1108 bio_advance_iter(bio, &bio->bi_iter, bytes);
1109 }
1110 EXPORT_SYMBOL(bio_advance);
1111
1112 void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
1113 struct bio *src, struct bvec_iter *src_iter)
1114 {
1115 struct bio_vec src_bv, dst_bv;
1116 void *src_p, *dst_p;
1117 unsigned bytes;
1118
1119 while (src_iter->bi_size && dst_iter->bi_size) {
1120 src_bv = bio_iter_iovec(src, *src_iter);
1121 dst_bv = bio_iter_iovec(dst, *dst_iter);
1122
1123 bytes = min(src_bv.bv_len, dst_bv.bv_len);
1124
1125 src_p = kmap_atomic(src_bv.bv_page);
1126 dst_p = kmap_atomic(dst_bv.bv_page);
1127
1128 memcpy(dst_p + dst_bv.bv_offset,
1129 src_p + src_bv.bv_offset,
1130 bytes);
1131
1132 kunmap_atomic(dst_p);
1133 kunmap_atomic(src_p);
1134
1135 flush_dcache_page(dst_bv.bv_page);
1136
1137 bio_advance_iter(src, src_iter, bytes);
1138 bio_advance_iter(dst, dst_iter, bytes);
1139 }
1140 }
1141 EXPORT_SYMBOL(bio_copy_data_iter);
1142
1143 /**
1144 * bio_copy_data - copy contents of data buffers from one bio to another
1145 * @src: source bio
1146 * @dst: destination bio
1147 *
1148 * Stops when it reaches the end of either @src or @dst - that is, copies
1149 * min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of bios).
1150 */
1151 void bio_copy_data(struct bio *dst, struct bio *src)
1152 {
1153 struct bvec_iter src_iter = src->bi_iter;
1154 struct bvec_iter dst_iter = dst->bi_iter;
1155
1156 bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
1157 }
1158 EXPORT_SYMBOL(bio_copy_data);
1159
1160 /**
1161 * bio_list_copy_data - copy contents of data buffers from one chain of bios to
1162 * another
1163 * @src: source bio list
1164 * @dst: destination bio list
1165 *
1166 * Stops when it reaches the end of either the @src list or @dst list - that is,
1167 * copies min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of
1168 * bios).
1169 */
1170 void bio_list_copy_data(struct bio *dst, struct bio *src)
1171 {
1172 struct bvec_iter src_iter = src->bi_iter;
1173 struct bvec_iter dst_iter = dst->bi_iter;
1174
1175 while (1) {
1176 if (!src_iter.bi_size) {
1177 src = src->bi_next;
1178 if (!src)
1179 break;
1180
1181 src_iter = src->bi_iter;
1182 }
1183
1184 if (!dst_iter.bi_size) {
1185 dst = dst->bi_next;
1186 if (!dst)
1187 break;
1188
1189 dst_iter = dst->bi_iter;
1190 }
1191
1192 bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
1193 }
1194 }
1195 EXPORT_SYMBOL(bio_list_copy_data);
1196
1197 void bio_free_pages(struct bio *bio)
1198 {
1199 struct bio_vec *bvec;
1200 struct bvec_iter_all iter_all;
1201
1202 bio_for_each_segment_all(bvec, bio, iter_all)
1203 __free_page(bvec->bv_page);
1204 }
1205 EXPORT_SYMBOL(bio_free_pages);
1206
1207 /*
1208 * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions
1209 * for performing direct-IO in BIOs.
1210 *
1211 * The problem is that we cannot run set_page_dirty() from interrupt context
1212 * because the required locks are not interrupt-safe. So what we can do is to
1213 * mark the pages dirty _before_ performing IO. And in interrupt context,
1214 * check that the pages are still dirty. If so, fine. If not, redirty them
1215 * in process context.
1216 *
1217 * We special-case compound pages here: normally this means reads into hugetlb
1218 * pages. The logic in here doesn't really work right for compound pages
1219 * because the VM does not uniformly chase down the head page in all cases.
1220 * But dirtiness of compound pages is pretty meaningless anyway: the VM doesn't
1221 * handle them at all. So we skip compound pages here at an early stage.
1222 *
1223 * Note that this code is very hard to test under normal circumstances because
1224 * direct-io pins the pages with get_user_pages(). This makes
1225 * is_page_cache_freeable return false, and the VM will not clean the pages.
1226 * But other code (eg, flusher threads) could clean the pages if they are mapped
1227 * pagecache.
1228 *
1229 * Simply disabling the call to bio_set_pages_dirty() is a good way to test the
1230 * deferred bio dirtying paths.
1231 */
1232
1233 /*
1234 * bio_set_pages_dirty() will mark all the bio's pages as dirty.
1235 */
1236 void bio_set_pages_dirty(struct bio *bio)
1237 {
1238 struct bio_vec *bvec;
1239 struct bvec_iter_all iter_all;
1240
1241 bio_for_each_segment_all(bvec, bio, iter_all) {
1242 if (!PageCompound(bvec->bv_page))
1243 set_page_dirty_lock(bvec->bv_page);
1244 }
1245 }
1246
1247 /*
1248 * bio_check_pages_dirty() will check that all the BIO's pages are still dirty.
1249 * If they are, then fine. If, however, some pages are clean then they must
1250 * have been written out during the direct-IO read. So we take another ref on
1251 * the BIO and re-dirty the pages in process context.
1252 *
1253 * It is expected that bio_check_pages_dirty() will wholly own the BIO from
1254 * here on. It will run one put_page() against each page and will run one
1255 * bio_put() against the BIO.
1256 */
1257
1258 static void bio_dirty_fn(struct work_struct *work);
1259
1260 static DECLARE_WORK(bio_dirty_work, bio_dirty_fn);
1261 static DEFINE_SPINLOCK(bio_dirty_lock);
1262 static struct bio *bio_dirty_list;
1263
1264 /*
1265 * This runs in process context
1266 */
1267 static void bio_dirty_fn(struct work_struct *work)
1268 {
1269 struct bio *bio, *next;
1270
1271 spin_lock_irq(&bio_dirty_lock);
1272 next = bio_dirty_list;
1273 bio_dirty_list = NULL;
1274 spin_unlock_irq(&bio_dirty_lock);
1275
1276 while ((bio = next) != NULL) {
1277 next = bio->bi_private;
1278
1279 bio_release_pages(bio, true);
1280 bio_put(bio);
1281 }
1282 }
1283
1284 void bio_check_pages_dirty(struct bio *bio)
1285 {
1286 struct bio_vec *bvec;
1287 unsigned long flags;
1288 struct bvec_iter_all iter_all;
1289
1290 bio_for_each_segment_all(bvec, bio, iter_all) {
1291 if (!PageDirty(bvec->bv_page) && !PageCompound(bvec->bv_page))
1292 goto defer;
1293 }
1294
1295 bio_release_pages(bio, false);
1296 bio_put(bio);
1297 return;
1298 defer:
1299 spin_lock_irqsave(&bio_dirty_lock, flags);
1300 bio->bi_private = bio_dirty_list;
1301 bio_dirty_list = bio;
1302 spin_unlock_irqrestore(&bio_dirty_lock, flags);
1303 schedule_work(&bio_dirty_work);
1304 }
1305
1306 void update_io_ticks(struct hd_struct *part, unsigned long now, bool end)
1307 {
1308 unsigned long stamp;
1309 again:
1310 stamp = READ_ONCE(part->stamp);
1311 if (unlikely(stamp != now)) {
1312 if (likely(cmpxchg(&part->stamp, stamp, now) == stamp)) {
1313 __part_stat_add(part, io_ticks, end ? now - stamp : 1);
1314 }
1315 }
1316 if (part->partno) {
1317 part = &part_to_disk(part)->part0;
1318 goto again;
1319 }
1320 }
1321
1322 void generic_start_io_acct(struct request_queue *q, int op,
1323 unsigned long sectors, struct hd_struct *part)
1324 {
1325 const int sgrp = op_stat_group(op);
1326
1327 part_stat_lock();
1328
1329 update_io_ticks(part, jiffies, false);
1330 part_stat_inc(part, ios[sgrp]);
1331 part_stat_add(part, sectors[sgrp], sectors);
1332 part_inc_in_flight(q, part, op_is_write(op));
1333
1334 part_stat_unlock();
1335 }
1336 EXPORT_SYMBOL(generic_start_io_acct);
1337
1338 void generic_end_io_acct(struct request_queue *q, int req_op,
1339 struct hd_struct *part, unsigned long start_time)
1340 {
1341 unsigned long now = jiffies;
1342 unsigned long duration = now - start_time;
1343 const int sgrp = op_stat_group(req_op);
1344
1345 part_stat_lock();
1346
1347 update_io_ticks(part, now, true);
1348 part_stat_add(part, nsecs[sgrp], jiffies_to_nsecs(duration));
1349 part_dec_in_flight(q, part, op_is_write(req_op));
1350
1351 part_stat_unlock();
1352 }
1353 EXPORT_SYMBOL(generic_end_io_acct);
1354
1355 static inline bool bio_remaining_done(struct bio *bio)
1356 {
1357 /*
1358 * If we're not chaining, then ->__bi_remaining is always 1 and
1359 * we always end io on the first invocation.
1360 */
1361 if (!bio_flagged(bio, BIO_CHAIN))
1362 return true;
1363
1364 BUG_ON(atomic_read(&bio->__bi_remaining) <= 0);
1365
1366 if (atomic_dec_and_test(&bio->__bi_remaining)) {
1367 bio_clear_flag(bio, BIO_CHAIN);
1368 return true;
1369 }
1370
1371 return false;
1372 }
1373
1374 /**
1375 * bio_endio - end I/O on a bio
1376 * @bio: bio
1377 *
1378 * Description:
1379 * bio_endio() will end I/O on the whole bio. bio_endio() is the preferred
1380 * way to end I/O on a bio. No one should call bi_end_io() directly on a
1381 * bio unless they own it and thus know that it has an end_io function.
1382 *
1383 * bio_endio() can be called several times on a bio that has been chained
1384 * using bio_chain(). The ->bi_end_io() function will only be called the
1385 * last time. At this point the BLK_TA_COMPLETE tracing event will be
1386 * generated if BIO_TRACE_COMPLETION is set.
1387 **/
1388 void bio_endio(struct bio *bio)
1389 {
1390 again:
1391 if (!bio_remaining_done(bio))
1392 return;
1393 if (!bio_integrity_endio(bio))
1394 return;
1395
1396 if (bio->bi_disk)
1397 rq_qos_done_bio(bio->bi_disk->queue, bio);
1398
1399 /*
1400 * Need to have a real endio function for chained bios, otherwise
1401 * various corner cases will break (like stacking block devices that
1402 * save/restore bi_end_io) - however, we want to avoid unbounded
1403 * recursion and blowing the stack. Tail call optimization would
1404 * handle this, but compiling with frame pointers also disables
1405 * gcc's sibling call optimization.
1406 */
1407 if (bio->bi_end_io == bio_chain_endio) {
1408 bio = __bio_chain_endio(bio);
1409 goto again;
1410 }
1411
1412 if (bio->bi_disk && bio_flagged(bio, BIO_TRACE_COMPLETION)) {
1413 trace_block_bio_complete(bio->bi_disk->queue, bio,
1414 blk_status_to_errno(bio->bi_status));
1415 bio_clear_flag(bio, BIO_TRACE_COMPLETION);
1416 }
1417
1418 blk_throtl_bio_endio(bio);
1419 /* release cgroup info */
1420 bio_uninit(bio);
1421 if (bio->bi_end_io)
1422 bio->bi_end_io(bio);
1423 }
1424 EXPORT_SYMBOL(bio_endio);
1425
1426 /**
1427 * bio_split - split a bio
1428 * @bio: bio to split
1429 * @sectors: number of sectors to split from the front of @bio
1430 * @gfp: gfp mask
1431 * @bs: bio set to allocate from
1432 *
1433 * Allocates and returns a new bio which represents @sectors from the start of
1434 * @bio, and updates @bio to represent the remaining sectors.
1435 *
1436 * Unless this is a discard request the newly allocated bio will point
1437 * to @bio's bi_io_vec. It is the caller's responsibility to ensure that
1438 * neither @bio nor @bs are freed before the split bio.
1439 */
1440 struct bio *bio_split(struct bio *bio, int sectors,
1441 gfp_t gfp, struct bio_set *bs)
1442 {
1443 struct bio *split;
1444
1445 BUG_ON(sectors <= 0);
1446 BUG_ON(sectors >= bio_sectors(bio));
1447
1448 split = bio_clone_fast(bio, gfp, bs);
1449 if (!split)
1450 return NULL;
1451
1452 split->bi_iter.bi_size = sectors << 9;
1453
1454 if (bio_integrity(split))
1455 bio_integrity_trim(split);
1456
1457 bio_advance(bio, split->bi_iter.bi_size);
1458
1459 if (bio_flagged(bio, BIO_TRACE_COMPLETION))
1460 bio_set_flag(split, BIO_TRACE_COMPLETION);
1461
1462 return split;
1463 }
1464 EXPORT_SYMBOL(bio_split);
1465
1466 /**
1467 * bio_trim - trim a bio
1468 * @bio: bio to trim
1469 * @offset: number of sectors to trim from the front of @bio
1470 * @size: size we want to trim @bio to, in sectors
1471 */
1472 void bio_trim(struct bio *bio, int offset, int size)
1473 {
1474 /* 'bio' is a cloned bio which we need to trim to match
1475 * the given offset and size.
1476 */
1477
1478 size <<= 9;
1479 if (offset == 0 && size == bio->bi_iter.bi_size)
1480 return;
1481
1482 bio_advance(bio, offset << 9);
1483 bio->bi_iter.bi_size = size;
1484
1485 if (bio_integrity(bio))
1486 bio_integrity_trim(bio);
1487
1488 }
1489 EXPORT_SYMBOL_GPL(bio_trim);
1490
1491 /*
1492 * create memory pools for biovec's in a bio_set.
1493 * use the global biovec slabs created for general use.
1494 */
1495 int biovec_init_pool(mempool_t *pool, int pool_entries)
1496 {
1497 struct biovec_slab *bp = bvec_slabs + BVEC_POOL_MAX;
1498
1499 return mempool_init_slab_pool(pool, pool_entries, bp->slab);
1500 }
1501
1502 /*
1503 * bioset_exit - exit a bioset initialized with bioset_init()
1504 *
1505 * May be called on a zeroed but uninitialized bioset (i.e. allocated with
1506 * kzalloc()).
1507 */
1508 void bioset_exit(struct bio_set *bs)
1509 {
1510 if (bs->rescue_workqueue)
1511 destroy_workqueue(bs->rescue_workqueue);
1512 bs->rescue_workqueue = NULL;
1513
1514 mempool_exit(&bs->bio_pool);
1515 mempool_exit(&bs->bvec_pool);
1516
1517 bioset_integrity_free(bs);
1518 if (bs->bio_slab)
1519 bio_put_slab(bs);
1520 bs->bio_slab = NULL;
1521 }
1522 EXPORT_SYMBOL(bioset_exit);
1523
1524 /**
1525 * bioset_init - Initialize a bio_set
1526 * @bs: pool to initialize
1527 * @pool_size: Number of bio and bio_vecs to cache in the mempool
1528 * @front_pad: Number of bytes to allocate in front of the returned bio
1529 * @flags: Flags to modify behavior, currently %BIOSET_NEED_BVECS
1530 * and %BIOSET_NEED_RESCUER
1531 *
1532 * Description:
1533 * Set up a bio_set to be used with @bio_alloc_bioset. Allows the caller
1534 * to ask for a number of bytes to be allocated in front of the bio.
1535 * Front pad allocation is useful for embedding the bio inside
1536 * another structure, to avoid allocating extra data to go with the bio.
1537 * Note that the bio must be embedded at the END of that structure always,
1538 * or things will break badly.
1539 * If %BIOSET_NEED_BVECS is set in @flags, a separate pool will be allocated
1540 * for allocating iovecs. This pool is not needed e.g. for bio_clone_fast().
1541 * If %BIOSET_NEED_RESCUER is set, a workqueue is created which can be used to
1542 * dispatch queued requests when the mempool runs out of space.
1543 *
1544 */
1545 int bioset_init(struct bio_set *bs,
1546 unsigned int pool_size,
1547 unsigned int front_pad,
1548 int flags)
1549 {
1550 unsigned int back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec);
1551
1552 bs->front_pad = front_pad;
1553
1554 spin_lock_init(&bs->rescue_lock);
1555 bio_list_init(&bs->rescue_list);
1556 INIT_WORK(&bs->rescue_work, bio_alloc_rescue);
1557
1558 bs->bio_slab = bio_find_or_create_slab(front_pad + back_pad);
1559 if (!bs->bio_slab)
1560 return -ENOMEM;
1561
1562 if (mempool_init_slab_pool(&bs->bio_pool, pool_size, bs->bio_slab))
1563 goto bad;
1564
1565 if ((flags & BIOSET_NEED_BVECS) &&
1566 biovec_init_pool(&bs->bvec_pool, pool_size))
1567 goto bad;
1568
1569 if (!(flags & BIOSET_NEED_RESCUER))
1570 return 0;
1571
1572 bs->rescue_workqueue = alloc_workqueue("bioset", WQ_MEM_RECLAIM, 0);
1573 if (!bs->rescue_workqueue)
1574 goto bad;
1575
1576 return 0;
1577 bad:
1578 bioset_exit(bs);
1579 return -ENOMEM;
1580 }
1581 EXPORT_SYMBOL(bioset_init);
1582
1583 /*
1584 * Initialize and setup a new bio_set, based on the settings from
1585 * another bio_set.
1586 */
1587 int bioset_init_from_src(struct bio_set *bs, struct bio_set *src)
1588 {
1589 int flags;
1590
1591 flags = 0;
1592 if (src->bvec_pool.min_nr)
1593 flags |= BIOSET_NEED_BVECS;
1594 if (src->rescue_workqueue)
1595 flags |= BIOSET_NEED_RESCUER;
1596
1597 return bioset_init(bs, src->bio_pool.min_nr, src->front_pad, flags);
1598 }
1599 EXPORT_SYMBOL(bioset_init_from_src);
1600
1601 #ifdef CONFIG_BLK_CGROUP
1602
1603 /**
1604 * bio_disassociate_blkg - puts back the blkg reference if associated
1605 * @bio: target bio
1606 *
1607 * Helper to disassociate the blkg from @bio if a blkg is associated.
1608 */
1609 void bio_disassociate_blkg(struct bio *bio)
1610 {
1611 if (bio->bi_blkg) {
1612 blkg_put(bio->bi_blkg);
1613 bio->bi_blkg = NULL;
1614 }
1615 }
1616 EXPORT_SYMBOL_GPL(bio_disassociate_blkg);
1617
1618 /**
1619 * __bio_associate_blkg - associate a bio with the a blkg
1620 * @bio: target bio
1621 * @blkg: the blkg to associate
1622 *
1623 * This tries to associate @bio with the specified @blkg. Association failure
1624 * is handled by walking up the blkg tree. Therefore, the blkg associated can
1625 * be anything between @blkg and the root_blkg. This situation only happens
1626 * when a cgroup is dying and then the remaining bios will spill to the closest
1627 * alive blkg.
1628 *
1629 * A reference will be taken on the @blkg and will be released when @bio is
1630 * freed.
1631 */
1632 static void __bio_associate_blkg(struct bio *bio, struct blkcg_gq *blkg)
1633 {
1634 bio_disassociate_blkg(bio);
1635
1636 bio->bi_blkg = blkg_tryget_closest(blkg);
1637 }
1638
1639 /**
1640 * bio_associate_blkg_from_css - associate a bio with a specified css
1641 * @bio: target bio
1642 * @css: target css
1643 *
1644 * Associate @bio with the blkg found by combining the css's blkg and the
1645 * request_queue of the @bio. This falls back to the queue's root_blkg if
1646 * the association fails with the css.
1647 */
1648 void bio_associate_blkg_from_css(struct bio *bio,
1649 struct cgroup_subsys_state *css)
1650 {
1651 struct request_queue *q = bio->bi_disk->queue;
1652 struct blkcg_gq *blkg;
1653
1654 rcu_read_lock();
1655
1656 if (!css || !css->parent)
1657 blkg = q->root_blkg;
1658 else
1659 blkg = blkg_lookup_create(css_to_blkcg(css), q);
1660
1661 __bio_associate_blkg(bio, blkg);
1662
1663 rcu_read_unlock();
1664 }
1665 EXPORT_SYMBOL_GPL(bio_associate_blkg_from_css);
1666
1667 #ifdef CONFIG_MEMCG
1668 /**
1669 * bio_associate_blkg_from_page - associate a bio with the page's blkg
1670 * @bio: target bio
1671 * @page: the page to lookup the blkcg from
1672 *
1673 * Associate @bio with the blkg from @page's owning memcg and the respective
1674 * request_queue. If cgroup_e_css returns %NULL, fall back to the queue's
1675 * root_blkg.
1676 */
1677 void bio_associate_blkg_from_page(struct bio *bio, struct page *page)
1678 {
1679 struct cgroup_subsys_state *css;
1680
1681 if (!page->mem_cgroup)
1682 return;
1683
1684 rcu_read_lock();
1685
1686 css = cgroup_e_css(page->mem_cgroup->css.cgroup, &io_cgrp_subsys);
1687 bio_associate_blkg_from_css(bio, css);
1688
1689 rcu_read_unlock();
1690 }
1691 #endif /* CONFIG_MEMCG */
1692
1693 /**
1694 * bio_associate_blkg - associate a bio with a blkg
1695 * @bio: target bio
1696 *
1697 * Associate @bio with the blkg found from the bio's css and request_queue.
1698 * If one is not found, bio_lookup_blkg() creates the blkg. If a blkg is
1699 * already associated, the css is reused and association redone as the
1700 * request_queue may have changed.
1701 */
1702 void bio_associate_blkg(struct bio *bio)
1703 {
1704 struct cgroup_subsys_state *css;
1705
1706 rcu_read_lock();
1707
1708 if (bio->bi_blkg)
1709 css = &bio_blkcg(bio)->css;
1710 else
1711 css = blkcg_css();
1712
1713 bio_associate_blkg_from_css(bio, css);
1714
1715 rcu_read_unlock();
1716 }
1717 EXPORT_SYMBOL_GPL(bio_associate_blkg);
1718
1719 /**
1720 * bio_clone_blkg_association - clone blkg association from src to dst bio
1721 * @dst: destination bio
1722 * @src: source bio
1723 */
1724 void bio_clone_blkg_association(struct bio *dst, struct bio *src)
1725 {
1726 rcu_read_lock();
1727
1728 if (src->bi_blkg)
1729 __bio_associate_blkg(dst, src->bi_blkg);
1730
1731 rcu_read_unlock();
1732 }
1733 EXPORT_SYMBOL_GPL(bio_clone_blkg_association);
1734 #endif /* CONFIG_BLK_CGROUP */
1735
1736 static void __init biovec_init_slabs(void)
1737 {
1738 int i;
1739
1740 for (i = 0; i < BVEC_POOL_NR; i++) {
1741 int size;
1742 struct biovec_slab *bvs = bvec_slabs + i;
1743
1744 if (bvs->nr_vecs <= BIO_INLINE_VECS) {
1745 bvs->slab = NULL;
1746 continue;
1747 }
1748
1749 size = bvs->nr_vecs * sizeof(struct bio_vec);
1750 bvs->slab = kmem_cache_create(bvs->name, size, 0,
1751 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
1752 }
1753 }
1754
1755 static int __init init_bio(void)
1756 {
1757 bio_slab_max = 2;
1758 bio_slab_nr = 0;
1759 bio_slabs = kcalloc(bio_slab_max, sizeof(struct bio_slab),
1760 GFP_KERNEL);
1761
1762 BUILD_BUG_ON(BIO_FLAG_LAST > BVEC_POOL_OFFSET);
1763
1764 if (!bio_slabs)
1765 panic("bio: can't allocate bios\n");
1766
1767 bio_integrity_init();
1768 biovec_init_slabs();
1769
1770 if (bioset_init(&fs_bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS))
1771 panic("bio: can't allocate bios\n");
1772
1773 if (bioset_integrity_create(&fs_bio_set, BIO_POOL_SIZE))
1774 panic("bio: can't create integrity pool\n");
1775
1776 return 0;
1777 }
1778 subsys_initcall(init_bio);