]> git.ipfire.org Git - people/ms/linux.git/blame - block/bio.c
bvec: add memcpy_{from,to}_bvec and memzero_bvec helper
[people/ms/linux.git] / block / bio.c
CommitLineData
8c16567d 1// SPDX-License-Identifier: GPL-2.0
1da177e4 2/*
0fe23479 3 * Copyright (C) 2001 Jens Axboe <axboe@kernel.dk>
1da177e4
LT
4 */
5#include <linux/mm.h>
6#include <linux/swap.h>
7#include <linux/bio.h>
8#include <linux/blkdev.h>
a27bb332 9#include <linux/uio.h>
852c788f 10#include <linux/iocontext.h>
1da177e4
LT
11#include <linux/slab.h>
12#include <linux/init.h>
13#include <linux/kernel.h>
630d9c47 14#include <linux/export.h>
1da177e4
LT
15#include <linux/mempool.h>
16#include <linux/workqueue.h>
852c788f 17#include <linux/cgroup.h>
08e18eab 18#include <linux/blk-cgroup.h>
b4c5875d 19#include <linux/highmem.h>
de6a78b6 20#include <linux/sched/sysctl.h>
a892c8d5 21#include <linux/blk-crypto.h>
49d1ec85 22#include <linux/xarray.h>
1da177e4 23
55782138 24#include <trace/events/block.h>
9e234eea 25#include "blk.h"
67b42d0b 26#include "blk-rq-qos.h"
0bfc2455 27
de76fd89 28static struct biovec_slab {
6ac0b715
CH
29 int nr_vecs;
30 char *name;
31 struct kmem_cache *slab;
de76fd89
CH
32} bvec_slabs[] __read_mostly = {
33 { .nr_vecs = 16, .name = "biovec-16" },
34 { .nr_vecs = 64, .name = "biovec-64" },
35 { .nr_vecs = 128, .name = "biovec-128" },
a8affc03 36 { .nr_vecs = BIO_MAX_VECS, .name = "biovec-max" },
1da177e4 37};
6ac0b715 38
7a800a20
CH
39static struct biovec_slab *biovec_slab(unsigned short nr_vecs)
40{
41 switch (nr_vecs) {
42 /* smaller bios use inline vecs */
43 case 5 ... 16:
44 return &bvec_slabs[0];
45 case 17 ... 64:
46 return &bvec_slabs[1];
47 case 65 ... 128:
48 return &bvec_slabs[2];
a8affc03 49 case 129 ... BIO_MAX_VECS:
7a800a20
CH
50 return &bvec_slabs[3];
51 default:
52 BUG();
53 return NULL;
54 }
55}
1da177e4 56
1da177e4
LT
57/*
58 * fs_bio_set is the bio_set containing bio and iovec memory pools used by
59 * IO code that does not need private memory pools.
60 */
f4f8154a 61struct bio_set fs_bio_set;
3f86a82a 62EXPORT_SYMBOL(fs_bio_set);
1da177e4 63
bb799ca0
JA
64/*
65 * Our slab pool management
66 */
67struct bio_slab {
68 struct kmem_cache *slab;
69 unsigned int slab_ref;
70 unsigned int slab_size;
71 char name[8];
72};
73static DEFINE_MUTEX(bio_slab_lock);
49d1ec85 74static DEFINE_XARRAY(bio_slabs);
bb799ca0 75
49d1ec85 76static struct bio_slab *create_bio_slab(unsigned int size)
bb799ca0 77{
49d1ec85 78 struct bio_slab *bslab = kzalloc(sizeof(*bslab), GFP_KERNEL);
bb799ca0 79
49d1ec85
ML
80 if (!bslab)
81 return NULL;
bb799ca0 82
49d1ec85
ML
83 snprintf(bslab->name, sizeof(bslab->name), "bio-%d", size);
84 bslab->slab = kmem_cache_create(bslab->name, size,
85 ARCH_KMALLOC_MINALIGN, SLAB_HWCACHE_ALIGN, NULL);
86 if (!bslab->slab)
87 goto fail_alloc_slab;
bb799ca0 88
49d1ec85
ML
89 bslab->slab_ref = 1;
90 bslab->slab_size = size;
bb799ca0 91
49d1ec85
ML
92 if (!xa_err(xa_store(&bio_slabs, size, bslab, GFP_KERNEL)))
93 return bslab;
bb799ca0 94
49d1ec85 95 kmem_cache_destroy(bslab->slab);
bb799ca0 96
49d1ec85
ML
97fail_alloc_slab:
98 kfree(bslab);
99 return NULL;
100}
bb799ca0 101
49d1ec85
ML
102static inline unsigned int bs_bio_slab_size(struct bio_set *bs)
103{
9f180e31 104 return bs->front_pad + sizeof(struct bio) + bs->back_pad;
49d1ec85 105}
bb799ca0 106
49d1ec85
ML
107static struct kmem_cache *bio_find_or_create_slab(struct bio_set *bs)
108{
109 unsigned int size = bs_bio_slab_size(bs);
110 struct bio_slab *bslab;
bb799ca0 111
49d1ec85
ML
112 mutex_lock(&bio_slab_lock);
113 bslab = xa_load(&bio_slabs, size);
114 if (bslab)
115 bslab->slab_ref++;
116 else
117 bslab = create_bio_slab(size);
bb799ca0 118 mutex_unlock(&bio_slab_lock);
49d1ec85
ML
119
120 if (bslab)
121 return bslab->slab;
122 return NULL;
bb799ca0
JA
123}
124
125static void bio_put_slab(struct bio_set *bs)
126{
127 struct bio_slab *bslab = NULL;
49d1ec85 128 unsigned int slab_size = bs_bio_slab_size(bs);
bb799ca0
JA
129
130 mutex_lock(&bio_slab_lock);
131
49d1ec85 132 bslab = xa_load(&bio_slabs, slab_size);
bb799ca0
JA
133 if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n"))
134 goto out;
135
49d1ec85
ML
136 WARN_ON_ONCE(bslab->slab != bs->bio_slab);
137
bb799ca0
JA
138 WARN_ON(!bslab->slab_ref);
139
140 if (--bslab->slab_ref)
141 goto out;
142
49d1ec85
ML
143 xa_erase(&bio_slabs, slab_size);
144
bb799ca0 145 kmem_cache_destroy(bslab->slab);
49d1ec85 146 kfree(bslab);
bb799ca0
JA
147
148out:
149 mutex_unlock(&bio_slab_lock);
150}
151
7a800a20 152void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned short nr_vecs)
7ba1ba12 153{
a8affc03 154 BIO_BUG_ON(nr_vecs > BIO_MAX_VECS);
ed996a52 155
a8affc03 156 if (nr_vecs == BIO_MAX_VECS)
9f060e22 157 mempool_free(bv, pool);
7a800a20
CH
158 else if (nr_vecs > BIO_INLINE_VECS)
159 kmem_cache_free(biovec_slab(nr_vecs)->slab, bv);
bb799ca0 160}
bb799ca0 161
f2c3eb9b
CH
162/*
163 * Make the first allocation restricted and don't dump info on allocation
164 * failures, since we'll fall back to the mempool in case of failure.
165 */
166static inline gfp_t bvec_alloc_gfp(gfp_t gfp)
167{
168 return (gfp & ~(__GFP_DIRECT_RECLAIM | __GFP_IO)) |
169 __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
bb799ca0
JA
170}
171
7a800a20
CH
172struct bio_vec *bvec_alloc(mempool_t *pool, unsigned short *nr_vecs,
173 gfp_t gfp_mask)
1da177e4 174{
7a800a20 175 struct biovec_slab *bvs = biovec_slab(*nr_vecs);
1da177e4 176
7a800a20 177 if (WARN_ON_ONCE(!bvs))
7ff9345f 178 return NULL;
7ff9345f
JA
179
180 /*
7a800a20
CH
181 * Upgrade the nr_vecs request to take full advantage of the allocation.
182 * We also rely on this in the bvec_free path.
7ff9345f 183 */
7a800a20 184 *nr_vecs = bvs->nr_vecs;
7ff9345f 185
7ff9345f 186 /*
f007a3d6
CH
187 * Try a slab allocation first for all smaller allocations. If that
188 * fails and __GFP_DIRECT_RECLAIM is set retry with the mempool.
a8affc03 189 * The mempool is sized to handle up to BIO_MAX_VECS entries.
7ff9345f 190 */
a8affc03 191 if (*nr_vecs < BIO_MAX_VECS) {
f007a3d6 192 struct bio_vec *bvl;
1da177e4 193
f2c3eb9b 194 bvl = kmem_cache_alloc(bvs->slab, bvec_alloc_gfp(gfp_mask));
7a800a20 195 if (likely(bvl) || !(gfp_mask & __GFP_DIRECT_RECLAIM))
f007a3d6 196 return bvl;
a8affc03 197 *nr_vecs = BIO_MAX_VECS;
7ff9345f
JA
198 }
199
f007a3d6 200 return mempool_alloc(pool, gfp_mask);
1da177e4
LT
201}
202
9ae3b3f5 203void bio_uninit(struct bio *bio)
1da177e4 204{
db9819c7
CH
205#ifdef CONFIG_BLK_CGROUP
206 if (bio->bi_blkg) {
207 blkg_put(bio->bi_blkg);
208 bio->bi_blkg = NULL;
209 }
210#endif
ece841ab
JT
211 if (bio_integrity(bio))
212 bio_integrity_free(bio);
a892c8d5
ST
213
214 bio_crypt_free_ctx(bio);
4254bba1 215}
9ae3b3f5 216EXPORT_SYMBOL(bio_uninit);
7ba1ba12 217
4254bba1
KO
218static void bio_free(struct bio *bio)
219{
220 struct bio_set *bs = bio->bi_pool;
221 void *p;
222
9ae3b3f5 223 bio_uninit(bio);
4254bba1
KO
224
225 if (bs) {
7a800a20 226 bvec_free(&bs->bvec_pool, bio->bi_io_vec, bio->bi_max_vecs);
4254bba1
KO
227
228 /*
229 * If we have front padding, adjust the bio pointer before freeing
230 */
231 p = bio;
bb799ca0
JA
232 p -= bs->front_pad;
233
8aa6ba2f 234 mempool_free(p, &bs->bio_pool);
4254bba1
KO
235 } else {
236 /* Bio was allocated by bio_kmalloc() */
237 kfree(bio);
238 }
3676347a
PO
239}
240
9ae3b3f5
JA
241/*
242 * Users of this function have their own bio allocation. Subsequently,
243 * they must remember to pair any call to bio_init() with bio_uninit()
244 * when IO has completed, or when the bio is released.
245 */
3a83f467
ML
246void bio_init(struct bio *bio, struct bio_vec *table,
247 unsigned short max_vecs)
1da177e4 248{
2b94de55 249 memset(bio, 0, sizeof(*bio));
c4cf5261 250 atomic_set(&bio->__bi_remaining, 1);
dac56212 251 atomic_set(&bio->__bi_cnt, 1);
3a83f467
ML
252
253 bio->bi_io_vec = table;
254 bio->bi_max_vecs = max_vecs;
1da177e4 255}
a112a71d 256EXPORT_SYMBOL(bio_init);
1da177e4 257
f44b48c7
KO
258/**
259 * bio_reset - reinitialize a bio
260 * @bio: bio to reset
261 *
262 * Description:
263 * After calling bio_reset(), @bio will be in the same state as a freshly
264 * allocated bio returned bio bio_alloc_bioset() - the only fields that are
265 * preserved are the ones that are initialized by bio_alloc_bioset(). See
266 * comment in struct bio.
267 */
268void bio_reset(struct bio *bio)
269{
9ae3b3f5 270 bio_uninit(bio);
f44b48c7 271 memset(bio, 0, BIO_RESET_BYTES);
c4cf5261 272 atomic_set(&bio->__bi_remaining, 1);
f44b48c7
KO
273}
274EXPORT_SYMBOL(bio_reset);
275
38f8baae 276static struct bio *__bio_chain_endio(struct bio *bio)
196d38bc 277{
4246a0b6
CH
278 struct bio *parent = bio->bi_private;
279
3edf5346 280 if (bio->bi_status && !parent->bi_status)
4e4cbee9 281 parent->bi_status = bio->bi_status;
196d38bc 282 bio_put(bio);
38f8baae
CH
283 return parent;
284}
285
286static void bio_chain_endio(struct bio *bio)
287{
288 bio_endio(__bio_chain_endio(bio));
196d38bc
KO
289}
290
291/**
292 * bio_chain - chain bio completions
1051a902 293 * @bio: the target bio
5b874af6 294 * @parent: the parent bio of @bio
196d38bc
KO
295 *
296 * The caller won't have a bi_end_io called when @bio completes - instead,
297 * @parent's bi_end_io won't be called until both @parent and @bio have
298 * completed; the chained bio will also be freed when it completes.
299 *
300 * The caller must not set bi_private or bi_end_io in @bio.
301 */
302void bio_chain(struct bio *bio, struct bio *parent)
303{
304 BUG_ON(bio->bi_private || bio->bi_end_io);
305
306 bio->bi_private = parent;
307 bio->bi_end_io = bio_chain_endio;
c4cf5261 308 bio_inc_remaining(parent);
196d38bc
KO
309}
310EXPORT_SYMBOL(bio_chain);
311
df2cb6da
KO
312static void bio_alloc_rescue(struct work_struct *work)
313{
314 struct bio_set *bs = container_of(work, struct bio_set, rescue_work);
315 struct bio *bio;
316
317 while (1) {
318 spin_lock(&bs->rescue_lock);
319 bio = bio_list_pop(&bs->rescue_list);
320 spin_unlock(&bs->rescue_lock);
321
322 if (!bio)
323 break;
324
ed00aabd 325 submit_bio_noacct(bio);
df2cb6da
KO
326 }
327}
328
329static void punt_bios_to_rescuer(struct bio_set *bs)
330{
331 struct bio_list punt, nopunt;
332 struct bio *bio;
333
47e0fb46
N
334 if (WARN_ON_ONCE(!bs->rescue_workqueue))
335 return;
df2cb6da
KO
336 /*
337 * In order to guarantee forward progress we must punt only bios that
338 * were allocated from this bio_set; otherwise, if there was a bio on
339 * there for a stacking driver higher up in the stack, processing it
340 * could require allocating bios from this bio_set, and doing that from
341 * our own rescuer would be bad.
342 *
343 * Since bio lists are singly linked, pop them all instead of trying to
344 * remove from the middle of the list:
345 */
346
347 bio_list_init(&punt);
348 bio_list_init(&nopunt);
349
f5fe1b51 350 while ((bio = bio_list_pop(&current->bio_list[0])))
df2cb6da 351 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
f5fe1b51 352 current->bio_list[0] = nopunt;
df2cb6da 353
f5fe1b51
N
354 bio_list_init(&nopunt);
355 while ((bio = bio_list_pop(&current->bio_list[1])))
356 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
357 current->bio_list[1] = nopunt;
df2cb6da
KO
358
359 spin_lock(&bs->rescue_lock);
360 bio_list_merge(&bs->rescue_list, &punt);
361 spin_unlock(&bs->rescue_lock);
362
363 queue_work(bs->rescue_workqueue, &bs->rescue_work);
364}
365
1da177e4
LT
366/**
367 * bio_alloc_bioset - allocate a bio for I/O
519c8e9f 368 * @gfp_mask: the GFP_* mask given to the slab allocator
1da177e4 369 * @nr_iovecs: number of iovecs to pre-allocate
db18efac 370 * @bs: the bio_set to allocate from.
1da177e4 371 *
3175199a 372 * Allocate a bio from the mempools in @bs.
3f86a82a 373 *
3175199a
CH
374 * If %__GFP_DIRECT_RECLAIM is set then bio_alloc will always be able to
375 * allocate a bio. This is due to the mempool guarantees. To make this work,
376 * callers must never allocate more than 1 bio at a time from the general pool.
377 * Callers that need to allocate more than 1 bio must always submit the
378 * previously allocated bio for IO before attempting to allocate a new one.
379 * Failure to do so can cause deadlocks under memory pressure.
3f86a82a 380 *
3175199a
CH
381 * Note that when running under submit_bio_noacct() (i.e. any block driver),
382 * bios are not submitted until after you return - see the code in
383 * submit_bio_noacct() that converts recursion into iteration, to prevent
384 * stack overflows.
df2cb6da 385 *
3175199a
CH
386 * This would normally mean allocating multiple bios under submit_bio_noacct()
387 * would be susceptible to deadlocks, but we have
388 * deadlock avoidance code that resubmits any blocked bios from a rescuer
389 * thread.
df2cb6da 390 *
3175199a
CH
391 * However, we do not guarantee forward progress for allocations from other
392 * mempools. Doing multiple allocations from the same mempool under
393 * submit_bio_noacct() should be avoided - instead, use bio_set's front_pad
394 * for per bio allocations.
df2cb6da 395 *
3175199a 396 * Returns: Pointer to new bio on success, NULL on failure.
3f86a82a 397 */
0f2e6ab8 398struct bio *bio_alloc_bioset(gfp_t gfp_mask, unsigned short nr_iovecs,
7a88fa19 399 struct bio_set *bs)
1da177e4 400{
df2cb6da 401 gfp_t saved_gfp = gfp_mask;
451a9ebf
TH
402 struct bio *bio;
403 void *p;
404
3175199a
CH
405 /* should not use nobvec bioset for nr_iovecs > 0 */
406 if (WARN_ON_ONCE(!mempool_initialized(&bs->bvec_pool) && nr_iovecs > 0))
407 return NULL;
df2cb6da 408
3175199a
CH
409 /*
410 * submit_bio_noacct() converts recursion to iteration; this means if
411 * we're running beneath it, any bios we allocate and submit will not be
412 * submitted (and thus freed) until after we return.
413 *
414 * This exposes us to a potential deadlock if we allocate multiple bios
415 * from the same bio_set() while running underneath submit_bio_noacct().
416 * If we were to allocate multiple bios (say a stacking block driver
417 * that was splitting bios), we would deadlock if we exhausted the
418 * mempool's reserve.
419 *
420 * We solve this, and guarantee forward progress, with a rescuer
421 * workqueue per bio_set. If we go to allocate and there are bios on
422 * current->bio_list, we first try the allocation without
423 * __GFP_DIRECT_RECLAIM; if that fails, we punt those bios we would be
424 * blocking to the rescuer workqueue before we retry with the original
425 * gfp_flags.
426 */
427 if (current->bio_list &&
428 (!bio_list_empty(&current->bio_list[0]) ||
429 !bio_list_empty(&current->bio_list[1])) &&
430 bs->rescue_workqueue)
431 gfp_mask &= ~__GFP_DIRECT_RECLAIM;
432
433 p = mempool_alloc(&bs->bio_pool, gfp_mask);
434 if (!p && gfp_mask != saved_gfp) {
435 punt_bios_to_rescuer(bs);
436 gfp_mask = saved_gfp;
8aa6ba2f 437 p = mempool_alloc(&bs->bio_pool, gfp_mask);
3f86a82a 438 }
451a9ebf
TH
439 if (unlikely(!p))
440 return NULL;
1da177e4 441
3175199a
CH
442 bio = p + bs->front_pad;
443 if (nr_iovecs > BIO_INLINE_VECS) {
3175199a 444 struct bio_vec *bvl = NULL;
34053979 445
7a800a20 446 bvl = bvec_alloc(&bs->bvec_pool, &nr_iovecs, gfp_mask);
df2cb6da
KO
447 if (!bvl && gfp_mask != saved_gfp) {
448 punt_bios_to_rescuer(bs);
449 gfp_mask = saved_gfp;
7a800a20 450 bvl = bvec_alloc(&bs->bvec_pool, &nr_iovecs, gfp_mask);
df2cb6da 451 }
34053979
IM
452 if (unlikely(!bvl))
453 goto err_free;
a38352e0 454
7a800a20 455 bio_init(bio, bvl, nr_iovecs);
3f86a82a 456 } else if (nr_iovecs) {
3175199a
CH
457 bio_init(bio, bio->bi_inline_vecs, BIO_INLINE_VECS);
458 } else {
459 bio_init(bio, NULL, 0);
1da177e4 460 }
3f86a82a
KO
461
462 bio->bi_pool = bs;
1da177e4 463 return bio;
34053979
IM
464
465err_free:
8aa6ba2f 466 mempool_free(p, &bs->bio_pool);
34053979 467 return NULL;
1da177e4 468}
a112a71d 469EXPORT_SYMBOL(bio_alloc_bioset);
1da177e4 470
3175199a
CH
471/**
472 * bio_kmalloc - kmalloc a bio for I/O
473 * @gfp_mask: the GFP_* mask given to the slab allocator
474 * @nr_iovecs: number of iovecs to pre-allocate
475 *
476 * Use kmalloc to allocate and initialize a bio.
477 *
478 * Returns: Pointer to new bio on success, NULL on failure.
479 */
0f2e6ab8 480struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned short nr_iovecs)
3175199a
CH
481{
482 struct bio *bio;
483
484 if (nr_iovecs > UIO_MAXIOV)
485 return NULL;
486
487 bio = kmalloc(struct_size(bio, bi_inline_vecs, nr_iovecs), gfp_mask);
488 if (unlikely(!bio))
489 return NULL;
490 bio_init(bio, nr_iovecs ? bio->bi_inline_vecs : NULL, nr_iovecs);
491 bio->bi_pool = NULL;
492 return bio;
493}
494EXPORT_SYMBOL(bio_kmalloc);
495
6f822e1b 496void zero_fill_bio(struct bio *bio)
1da177e4
LT
497{
498 unsigned long flags;
7988613b
KO
499 struct bio_vec bv;
500 struct bvec_iter iter;
1da177e4 501
6f822e1b 502 bio_for_each_segment(bv, bio, iter) {
7988613b
KO
503 char *data = bvec_kmap_irq(&bv, &flags);
504 memset(data, 0, bv.bv_len);
505 flush_dcache_page(bv.bv_page);
1da177e4
LT
506 bvec_kunmap_irq(data, &flags);
507 }
508}
6f822e1b 509EXPORT_SYMBOL(zero_fill_bio);
1da177e4 510
83c9c547
ML
511/**
512 * bio_truncate - truncate the bio to small size of @new_size
513 * @bio: the bio to be truncated
514 * @new_size: new size for truncating the bio
515 *
516 * Description:
517 * Truncate the bio to new size of @new_size. If bio_op(bio) is
518 * REQ_OP_READ, zero the truncated part. This function should only
519 * be used for handling corner cases, such as bio eod.
520 */
85a8ce62
ML
521void bio_truncate(struct bio *bio, unsigned new_size)
522{
523 struct bio_vec bv;
524 struct bvec_iter iter;
525 unsigned int done = 0;
526 bool truncated = false;
527
528 if (new_size >= bio->bi_iter.bi_size)
529 return;
530
83c9c547 531 if (bio_op(bio) != REQ_OP_READ)
85a8ce62
ML
532 goto exit;
533
534 bio_for_each_segment(bv, bio, iter) {
535 if (done + bv.bv_len > new_size) {
536 unsigned offset;
537
538 if (!truncated)
539 offset = new_size - done;
540 else
541 offset = 0;
542 zero_user(bv.bv_page, offset, bv.bv_len - offset);
543 truncated = true;
544 }
545 done += bv.bv_len;
546 }
547
548 exit:
549 /*
550 * Don't touch bvec table here and make it really immutable, since
551 * fs bio user has to retrieve all pages via bio_for_each_segment_all
552 * in its .end_bio() callback.
553 *
554 * It is enough to truncate bio by updating .bi_size since we can make
555 * correct bvec with the updated .bi_size for drivers.
556 */
557 bio->bi_iter.bi_size = new_size;
558}
559
29125ed6
CH
560/**
561 * guard_bio_eod - truncate a BIO to fit the block device
562 * @bio: bio to truncate
563 *
564 * This allows us to do IO even on the odd last sectors of a device, even if the
565 * block size is some multiple of the physical sector size.
566 *
567 * We'll just truncate the bio to the size of the device, and clear the end of
568 * the buffer head manually. Truly out-of-range accesses will turn into actual
569 * I/O errors, this only handles the "we need to be able to do I/O at the final
570 * sector" case.
571 */
572void guard_bio_eod(struct bio *bio)
573{
309dca30 574 sector_t maxsector = bdev_nr_sectors(bio->bi_bdev);
29125ed6
CH
575
576 if (!maxsector)
577 return;
578
579 /*
580 * If the *whole* IO is past the end of the device,
581 * let it through, and the IO layer will turn it into
582 * an EIO.
583 */
584 if (unlikely(bio->bi_iter.bi_sector >= maxsector))
585 return;
586
587 maxsector -= bio->bi_iter.bi_sector;
588 if (likely((bio->bi_iter.bi_size >> 9) <= maxsector))
589 return;
590
591 bio_truncate(bio, maxsector << 9);
592}
593
1da177e4
LT
594/**
595 * bio_put - release a reference to a bio
596 * @bio: bio to release reference to
597 *
598 * Description:
599 * Put a reference to a &struct bio, either one you have gotten with
9b10f6a9 600 * bio_alloc, bio_get or bio_clone_*. The last put of a bio will free it.
1da177e4
LT
601 **/
602void bio_put(struct bio *bio)
603{
dac56212 604 if (!bio_flagged(bio, BIO_REFFED))
4254bba1 605 bio_free(bio);
dac56212
JA
606 else {
607 BIO_BUG_ON(!atomic_read(&bio->__bi_cnt));
608
609 /*
610 * last put frees it
611 */
612 if (atomic_dec_and_test(&bio->__bi_cnt))
613 bio_free(bio);
614 }
1da177e4 615}
a112a71d 616EXPORT_SYMBOL(bio_put);
1da177e4 617
59d276fe
KO
618/**
619 * __bio_clone_fast - clone a bio that shares the original bio's biovec
620 * @bio: destination bio
621 * @bio_src: bio to clone
622 *
623 * Clone a &bio. Caller will own the returned bio, but not
624 * the actual data it points to. Reference count of returned
625 * bio will be one.
626 *
627 * Caller must ensure that @bio_src is not freed before @bio.
628 */
629void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
630{
7a800a20 631 WARN_ON_ONCE(bio->bi_pool && bio->bi_max_vecs);
59d276fe
KO
632
633 /*
309dca30 634 * most users will be overriding ->bi_bdev with a new target,
59d276fe
KO
635 * so we don't set nor calculate new physical/hw segment counts here
636 */
309dca30 637 bio->bi_bdev = bio_src->bi_bdev;
b7c44ed9 638 bio_set_flag(bio, BIO_CLONED);
111be883
SL
639 if (bio_flagged(bio_src, BIO_THROTTLED))
640 bio_set_flag(bio, BIO_THROTTLED);
46bbf653
CH
641 if (bio_flagged(bio_src, BIO_REMAPPED))
642 bio_set_flag(bio, BIO_REMAPPED);
1eff9d32 643 bio->bi_opf = bio_src->bi_opf;
ca474b73 644 bio->bi_ioprio = bio_src->bi_ioprio;
cb6934f8 645 bio->bi_write_hint = bio_src->bi_write_hint;
59d276fe
KO
646 bio->bi_iter = bio_src->bi_iter;
647 bio->bi_io_vec = bio_src->bi_io_vec;
20bd723e 648
db6638d7 649 bio_clone_blkg_association(bio, bio_src);
e439bedf 650 blkcg_bio_issue_init(bio);
59d276fe
KO
651}
652EXPORT_SYMBOL(__bio_clone_fast);
653
654/**
655 * bio_clone_fast - clone a bio that shares the original bio's biovec
656 * @bio: bio to clone
657 * @gfp_mask: allocation priority
658 * @bs: bio_set to allocate from
659 *
660 * Like __bio_clone_fast, only also allocates the returned bio
661 */
662struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs)
663{
664 struct bio *b;
665
666 b = bio_alloc_bioset(gfp_mask, 0, bs);
667 if (!b)
668 return NULL;
669
670 __bio_clone_fast(b, bio);
671
07560151
EB
672 if (bio_crypt_clone(b, bio, gfp_mask) < 0)
673 goto err_put;
a892c8d5 674
07560151
EB
675 if (bio_integrity(bio) &&
676 bio_integrity_clone(b, bio, gfp_mask) < 0)
677 goto err_put;
59d276fe
KO
678
679 return b;
07560151
EB
680
681err_put:
682 bio_put(b);
683 return NULL;
59d276fe
KO
684}
685EXPORT_SYMBOL(bio_clone_fast);
686
5cbd28e3
CH
687const char *bio_devname(struct bio *bio, char *buf)
688{
309dca30 689 return bdevname(bio->bi_bdev, buf);
5cbd28e3
CH
690}
691EXPORT_SYMBOL(bio_devname);
692
5919482e
ML
693static inline bool page_is_mergeable(const struct bio_vec *bv,
694 struct page *page, unsigned int len, unsigned int off,
ff896738 695 bool *same_page)
5919482e 696{
d8166519
MWO
697 size_t bv_end = bv->bv_offset + bv->bv_len;
698 phys_addr_t vec_end_addr = page_to_phys(bv->bv_page) + bv_end - 1;
5919482e
ML
699 phys_addr_t page_addr = page_to_phys(page);
700
701 if (vec_end_addr + 1 != page_addr + off)
702 return false;
703 if (xen_domain() && !xen_biovec_phys_mergeable(bv, page))
704 return false;
52d52d1c 705
ff896738 706 *same_page = ((vec_end_addr & PAGE_MASK) == page_addr);
d8166519
MWO
707 if (*same_page)
708 return true;
709 return (bv->bv_page + bv_end / PAGE_SIZE) == (page + off / PAGE_SIZE);
5919482e
ML
710}
711
e4581105
CH
712/*
713 * Try to merge a page into a segment, while obeying the hardware segment
714 * size limit. This is not for normal read/write bios, but for passthrough
715 * or Zone Append operations that we can't split.
716 */
717static bool bio_try_merge_hw_seg(struct request_queue *q, struct bio *bio,
718 struct page *page, unsigned len,
719 unsigned offset, bool *same_page)
489fbbcb 720{
384209cd 721 struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
489fbbcb
ML
722 unsigned long mask = queue_segment_boundary(q);
723 phys_addr_t addr1 = page_to_phys(bv->bv_page) + bv->bv_offset;
724 phys_addr_t addr2 = page_to_phys(page) + offset + len - 1;
725
726 if ((addr1 | mask) != (addr2 | mask))
727 return false;
489fbbcb
ML
728 if (bv->bv_len + len > queue_max_segment_size(q))
729 return false;
384209cd 730 return __bio_try_merge_page(bio, page, len, offset, same_page);
489fbbcb
ML
731}
732
1da177e4 733/**
e4581105
CH
734 * bio_add_hw_page - attempt to add a page to a bio with hw constraints
735 * @q: the target queue
736 * @bio: destination bio
737 * @page: page to add
738 * @len: vec entry length
739 * @offset: vec entry offset
740 * @max_sectors: maximum number of sectors that can be added
741 * @same_page: return if the segment has been merged inside the same page
c66a14d0 742 *
e4581105
CH
743 * Add a page to a bio while respecting the hardware max_sectors, max_segment
744 * and gap limitations.
1da177e4 745 */
e4581105 746int bio_add_hw_page(struct request_queue *q, struct bio *bio,
19047087 747 struct page *page, unsigned int len, unsigned int offset,
e4581105 748 unsigned int max_sectors, bool *same_page)
1da177e4 749{
1da177e4
LT
750 struct bio_vec *bvec;
751
e4581105 752 if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
1da177e4
LT
753 return 0;
754
e4581105 755 if (((bio->bi_iter.bi_size + len) >> 9) > max_sectors)
1da177e4
LT
756 return 0;
757
80cfd548 758 if (bio->bi_vcnt > 0) {
e4581105 759 if (bio_try_merge_hw_seg(q, bio, page, len, offset, same_page))
384209cd 760 return len;
320ea869
CH
761
762 /*
763 * If the queue doesn't support SG gaps and adding this segment
764 * would create a gap, disallow it.
765 */
384209cd 766 bvec = &bio->bi_io_vec[bio->bi_vcnt - 1];
320ea869
CH
767 if (bvec_gap_to_prev(q, bvec, offset))
768 return 0;
80cfd548
JA
769 }
770
79d08f89 771 if (bio_full(bio, len))
1da177e4
LT
772 return 0;
773
14ccb66b 774 if (bio->bi_vcnt >= queue_max_segments(q))
489fbbcb
ML
775 return 0;
776
fcbf6a08
ML
777 bvec = &bio->bi_io_vec[bio->bi_vcnt];
778 bvec->bv_page = page;
779 bvec->bv_len = len;
780 bvec->bv_offset = offset;
781 bio->bi_vcnt++;
dcdca753 782 bio->bi_iter.bi_size += len;
1da177e4
LT
783 return len;
784}
19047087 785
e4581105
CH
786/**
787 * bio_add_pc_page - attempt to add page to passthrough bio
788 * @q: the target queue
789 * @bio: destination bio
790 * @page: page to add
791 * @len: vec entry length
792 * @offset: vec entry offset
793 *
794 * Attempt to add a page to the bio_vec maplist. This can fail for a
795 * number of reasons, such as the bio being full or target block device
796 * limitations. The target block device must allow bio's up to PAGE_SIZE,
797 * so it is always possible to add a single page to an empty bio.
798 *
799 * This should only be used by passthrough bios.
800 */
19047087
ML
801int bio_add_pc_page(struct request_queue *q, struct bio *bio,
802 struct page *page, unsigned int len, unsigned int offset)
803{
d1916c86 804 bool same_page = false;
e4581105
CH
805 return bio_add_hw_page(q, bio, page, len, offset,
806 queue_max_hw_sectors(q), &same_page);
19047087 807}
a112a71d 808EXPORT_SYMBOL(bio_add_pc_page);
6e68af66 809
ae29333f
JT
810/**
811 * bio_add_zone_append_page - attempt to add page to zone-append bio
812 * @bio: destination bio
813 * @page: page to add
814 * @len: vec entry length
815 * @offset: vec entry offset
816 *
817 * Attempt to add a page to the bio_vec maplist of a bio that will be submitted
818 * for a zone-append request. This can fail for a number of reasons, such as the
819 * bio being full or the target block device is not a zoned block device or
820 * other limitations of the target block device. The target block device must
821 * allow bio's up to PAGE_SIZE, so it is always possible to add a single page
822 * to an empty bio.
823 *
824 * Returns: number of bytes added to the bio, or 0 in case of a failure.
825 */
826int bio_add_zone_append_page(struct bio *bio, struct page *page,
827 unsigned int len, unsigned int offset)
828{
582cd91f 829 struct request_queue *q = bio->bi_bdev->bd_disk->queue;
ae29333f
JT
830 bool same_page = false;
831
832 if (WARN_ON_ONCE(bio_op(bio) != REQ_OP_ZONE_APPEND))
833 return 0;
834
835 if (WARN_ON_ONCE(!blk_queue_is_zoned(q)))
836 return 0;
837
838 return bio_add_hw_page(q, bio, page, len, offset,
839 queue_max_zone_append_sectors(q), &same_page);
840}
841EXPORT_SYMBOL_GPL(bio_add_zone_append_page);
842
1da177e4 843/**
0aa69fd3
CH
844 * __bio_try_merge_page - try appending data to an existing bvec.
845 * @bio: destination bio
551879a4 846 * @page: start page to add
0aa69fd3 847 * @len: length of the data to add
551879a4 848 * @off: offset of the data relative to @page
ff896738 849 * @same_page: return if the segment has been merged inside the same page
1da177e4 850 *
0aa69fd3 851 * Try to add the data at @page + @off to the last bvec of @bio. This is a
3cf14889 852 * useful optimisation for file systems with a block size smaller than the
0aa69fd3
CH
853 * page size.
854 *
551879a4
ML
855 * Warn if (@len, @off) crosses pages in case that @same_page is true.
856 *
0aa69fd3 857 * Return %true on success or %false on failure.
1da177e4 858 */
0aa69fd3 859bool __bio_try_merge_page(struct bio *bio, struct page *page,
ff896738 860 unsigned int len, unsigned int off, bool *same_page)
1da177e4 861{
c66a14d0 862 if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
0aa69fd3 863 return false;
762380ad 864
cc90bc68 865 if (bio->bi_vcnt > 0) {
0aa69fd3 866 struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
5919482e
ML
867
868 if (page_is_mergeable(bv, page, len, off, same_page)) {
35c820e7 869 if (bio->bi_iter.bi_size > UINT_MAX - len) {
2cd896a5 870 *same_page = false;
cc90bc68 871 return false;
2cd896a5 872 }
5919482e
ML
873 bv->bv_len += len;
874 bio->bi_iter.bi_size += len;
875 return true;
876 }
c66a14d0 877 }
0aa69fd3
CH
878 return false;
879}
880EXPORT_SYMBOL_GPL(__bio_try_merge_page);
c66a14d0 881
0aa69fd3 882/**
551879a4 883 * __bio_add_page - add page(s) to a bio in a new segment
0aa69fd3 884 * @bio: destination bio
551879a4
ML
885 * @page: start page to add
886 * @len: length of the data to add, may cross pages
887 * @off: offset of the data relative to @page, may cross pages
0aa69fd3
CH
888 *
889 * Add the data at @page + @off to @bio as a new bvec. The caller must ensure
890 * that @bio has space for another bvec.
891 */
892void __bio_add_page(struct bio *bio, struct page *page,
893 unsigned int len, unsigned int off)
894{
895 struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt];
c66a14d0 896
0aa69fd3 897 WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
79d08f89 898 WARN_ON_ONCE(bio_full(bio, len));
0aa69fd3
CH
899
900 bv->bv_page = page;
901 bv->bv_offset = off;
902 bv->bv_len = len;
c66a14d0 903
c66a14d0 904 bio->bi_iter.bi_size += len;
0aa69fd3 905 bio->bi_vcnt++;
b8e24a93
JW
906
907 if (!bio_flagged(bio, BIO_WORKINGSET) && unlikely(PageWorkingset(page)))
908 bio_set_flag(bio, BIO_WORKINGSET);
0aa69fd3
CH
909}
910EXPORT_SYMBOL_GPL(__bio_add_page);
911
912/**
551879a4 913 * bio_add_page - attempt to add page(s) to bio
0aa69fd3 914 * @bio: destination bio
551879a4
ML
915 * @page: start page to add
916 * @len: vec entry length, may cross pages
917 * @offset: vec entry offset relative to @page, may cross pages
0aa69fd3 918 *
551879a4 919 * Attempt to add page(s) to the bio_vec maplist. This will only fail
0aa69fd3
CH
920 * if either bio->bi_vcnt == bio->bi_max_vecs or it's a cloned bio.
921 */
922int bio_add_page(struct bio *bio, struct page *page,
923 unsigned int len, unsigned int offset)
924{
ff896738
CH
925 bool same_page = false;
926
927 if (!__bio_try_merge_page(bio, page, len, offset, &same_page)) {
79d08f89 928 if (bio_full(bio, len))
0aa69fd3
CH
929 return 0;
930 __bio_add_page(bio, page, len, offset);
931 }
c66a14d0 932 return len;
1da177e4 933}
a112a71d 934EXPORT_SYMBOL(bio_add_page);
1da177e4 935
d241a95f 936void bio_release_pages(struct bio *bio, bool mark_dirty)
7321ecbf
CH
937{
938 struct bvec_iter_all iter_all;
939 struct bio_vec *bvec;
7321ecbf 940
b2d0d991
CH
941 if (bio_flagged(bio, BIO_NO_PAGE_REF))
942 return;
943
d241a95f
CH
944 bio_for_each_segment_all(bvec, bio, iter_all) {
945 if (mark_dirty && !PageCompound(bvec->bv_page))
946 set_page_dirty_lock(bvec->bv_page);
7321ecbf 947 put_page(bvec->bv_page);
d241a95f 948 }
7321ecbf 949}
29b2a3aa 950EXPORT_SYMBOL_GPL(bio_release_pages);
7321ecbf 951
7de55b7d 952static void __bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter)
6d0c48ae 953{
7a800a20 954 WARN_ON_ONCE(bio->bi_max_vecs);
c42bca92
PB
955
956 bio->bi_vcnt = iter->nr_segs;
c42bca92
PB
957 bio->bi_io_vec = (struct bio_vec *)iter->bvec;
958 bio->bi_iter.bi_bvec_done = iter->iov_offset;
959 bio->bi_iter.bi_size = iter->count;
ed97ce5e 960 bio_set_flag(bio, BIO_NO_PAGE_REF);
977be012 961 bio_set_flag(bio, BIO_CLONED);
7de55b7d 962}
c42bca92 963
7de55b7d
JT
964static int bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter)
965{
966 __bio_iov_bvec_set(bio, iter);
c42bca92 967 iov_iter_advance(iter, iter->count);
a10584c3 968 return 0;
6d0c48ae
JA
969}
970
7de55b7d
JT
971static int bio_iov_bvec_set_append(struct bio *bio, struct iov_iter *iter)
972{
973 struct request_queue *q = bio->bi_bdev->bd_disk->queue;
974 struct iov_iter i = *iter;
975
976 iov_iter_truncate(&i, queue_max_zone_append_sectors(q) << 9);
977 __bio_iov_bvec_set(bio, &i);
978 iov_iter_advance(iter, i.count);
979 return 0;
980}
981
576ed913
CH
982#define PAGE_PTRS_PER_BVEC (sizeof(struct bio_vec) / sizeof(struct page *))
983
2cefe4db 984/**
17d51b10 985 * __bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio
2cefe4db
KO
986 * @bio: bio to add pages to
987 * @iter: iov iterator describing the region to be mapped
988 *
17d51b10 989 * Pins pages from *iter and appends them to @bio's bvec array. The
2cefe4db 990 * pages will have to be released using put_page() when done.
17d51b10 991 * For multi-segment *iter, this function only adds pages from the
3cf14889 992 * next non-empty segment of the iov iterator.
2cefe4db 993 */
17d51b10 994static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
2cefe4db 995{
576ed913
CH
996 unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt;
997 unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt;
2cefe4db
KO
998 struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
999 struct page **pages = (struct page **)bv;
45691804 1000 bool same_page = false;
576ed913
CH
1001 ssize_t size, left;
1002 unsigned len, i;
b403ea24 1003 size_t offset;
576ed913
CH
1004
1005 /*
1006 * Move page array up in the allocated memory for the bio vecs as far as
1007 * possible so that we can start filling biovecs from the beginning
1008 * without overwriting the temporary page array.
1009 */
1010 BUILD_BUG_ON(PAGE_PTRS_PER_BVEC < 2);
1011 pages += entries_left * (PAGE_PTRS_PER_BVEC - 1);
2cefe4db 1012
35c820e7 1013 size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset);
2cefe4db
KO
1014 if (unlikely(size <= 0))
1015 return size ? size : -EFAULT;
2cefe4db 1016
576ed913
CH
1017 for (left = size, i = 0; left > 0; left -= len, i++) {
1018 struct page *page = pages[i];
2cefe4db 1019
576ed913 1020 len = min_t(size_t, PAGE_SIZE - offset, left);
45691804
CH
1021
1022 if (__bio_try_merge_page(bio, page, len, offset, &same_page)) {
1023 if (same_page)
1024 put_page(page);
1025 } else {
79d08f89 1026 if (WARN_ON_ONCE(bio_full(bio, len)))
45691804
CH
1027 return -EINVAL;
1028 __bio_add_page(bio, page, len, offset);
1029 }
576ed913 1030 offset = 0;
2cefe4db
KO
1031 }
1032
2cefe4db
KO
1033 iov_iter_advance(iter, size);
1034 return 0;
1035}
17d51b10 1036
0512a75b
KB
1037static int __bio_iov_append_get_pages(struct bio *bio, struct iov_iter *iter)
1038{
1039 unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt;
1040 unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt;
309dca30 1041 struct request_queue *q = bio->bi_bdev->bd_disk->queue;
0512a75b
KB
1042 unsigned int max_append_sectors = queue_max_zone_append_sectors(q);
1043 struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
1044 struct page **pages = (struct page **)bv;
1045 ssize_t size, left;
1046 unsigned len, i;
1047 size_t offset;
4977d121 1048 int ret = 0;
0512a75b
KB
1049
1050 if (WARN_ON_ONCE(!max_append_sectors))
1051 return 0;
1052
1053 /*
1054 * Move page array up in the allocated memory for the bio vecs as far as
1055 * possible so that we can start filling biovecs from the beginning
1056 * without overwriting the temporary page array.
1057 */
1058 BUILD_BUG_ON(PAGE_PTRS_PER_BVEC < 2);
1059 pages += entries_left * (PAGE_PTRS_PER_BVEC - 1);
1060
1061 size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset);
1062 if (unlikely(size <= 0))
1063 return size ? size : -EFAULT;
1064
1065 for (left = size, i = 0; left > 0; left -= len, i++) {
1066 struct page *page = pages[i];
1067 bool same_page = false;
1068
1069 len = min_t(size_t, PAGE_SIZE - offset, left);
1070 if (bio_add_hw_page(q, bio, page, len, offset,
4977d121
NA
1071 max_append_sectors, &same_page) != len) {
1072 ret = -EINVAL;
1073 break;
1074 }
0512a75b
KB
1075 if (same_page)
1076 put_page(page);
1077 offset = 0;
1078 }
1079
4977d121
NA
1080 iov_iter_advance(iter, size - left);
1081 return ret;
0512a75b
KB
1082}
1083
17d51b10 1084/**
6d0c48ae 1085 * bio_iov_iter_get_pages - add user or kernel pages to a bio
17d51b10 1086 * @bio: bio to add pages to
6d0c48ae
JA
1087 * @iter: iov iterator describing the region to be added
1088 *
1089 * This takes either an iterator pointing to user memory, or one pointing to
1090 * kernel pages (BVEC iterator). If we're adding user pages, we pin them and
1091 * map them into the kernel. On IO completion, the caller should put those
c42bca92
PB
1092 * pages. For bvec based iterators bio_iov_iter_get_pages() uses the provided
1093 * bvecs rather than copying them. Hence anyone issuing kiocb based IO needs
1094 * to ensure the bvecs and pages stay referenced until the submitted I/O is
1095 * completed by a call to ->ki_complete() or returns with an error other than
1096 * -EIOCBQUEUED. The caller needs to check if the bio is flagged BIO_NO_PAGE_REF
1097 * on IO completion. If it isn't, then pages should be released.
17d51b10 1098 *
17d51b10 1099 * The function tries, but does not guarantee, to pin as many pages as
5cd3ddc1 1100 * fit into the bio, or are requested in @iter, whatever is smaller. If
6d0c48ae
JA
1101 * MM encounters an error pinning the requested pages, it stops. Error
1102 * is returned only if 0 pages could be pinned.
0cf41e5e
PB
1103 *
1104 * It's intended for direct IO, so doesn't do PSI tracking, the caller is
1105 * responsible for setting BIO_WORKINGSET if necessary.
17d51b10
MW
1106 */
1107int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
1108{
c42bca92 1109 int ret = 0;
14eacf12 1110
c42bca92 1111 if (iov_iter_is_bvec(iter)) {
7de55b7d
JT
1112 if (bio_op(bio) == REQ_OP_ZONE_APPEND)
1113 return bio_iov_bvec_set_append(bio, iter);
ed97ce5e 1114 return bio_iov_bvec_set(bio, iter);
c42bca92 1115 }
17d51b10
MW
1116
1117 do {
86004515 1118 if (bio_op(bio) == REQ_OP_ZONE_APPEND)
0512a75b 1119 ret = __bio_iov_append_get_pages(bio, iter);
86004515
CH
1120 else
1121 ret = __bio_iov_iter_get_pages(bio, iter);
79d08f89 1122 } while (!ret && iov_iter_count(iter) && !bio_full(bio, 0));
17d51b10 1123
0cf41e5e
PB
1124 /* don't account direct I/O as memory stall */
1125 bio_clear_flag(bio, BIO_WORKINGSET);
14eacf12 1126 return bio->bi_vcnt ? 0 : ret;
17d51b10 1127}
29b2a3aa 1128EXPORT_SYMBOL_GPL(bio_iov_iter_get_pages);
2cefe4db 1129
4246a0b6 1130static void submit_bio_wait_endio(struct bio *bio)
9e882242 1131{
65e53aab 1132 complete(bio->bi_private);
9e882242
KO
1133}
1134
1135/**
1136 * submit_bio_wait - submit a bio, and wait until it completes
9e882242
KO
1137 * @bio: The &struct bio which describes the I/O
1138 *
1139 * Simple wrapper around submit_bio(). Returns 0 on success, or the error from
1140 * bio_endio() on failure.
3d289d68
JK
1141 *
1142 * WARNING: Unlike to how submit_bio() is usually used, this function does not
1143 * result in bio reference to be consumed. The caller must drop the reference
1144 * on his own.
9e882242 1145 */
4e49ea4a 1146int submit_bio_wait(struct bio *bio)
9e882242 1147{
309dca30
CH
1148 DECLARE_COMPLETION_ONSTACK_MAP(done,
1149 bio->bi_bdev->bd_disk->lockdep_map);
de6a78b6 1150 unsigned long hang_check;
9e882242 1151
65e53aab 1152 bio->bi_private = &done;
9e882242 1153 bio->bi_end_io = submit_bio_wait_endio;
1eff9d32 1154 bio->bi_opf |= REQ_SYNC;
4e49ea4a 1155 submit_bio(bio);
de6a78b6
ML
1156
1157 /* Prevent hang_check timer from firing at us during very long I/O */
1158 hang_check = sysctl_hung_task_timeout_secs;
1159 if (hang_check)
1160 while (!wait_for_completion_io_timeout(&done,
1161 hang_check * (HZ/2)))
1162 ;
1163 else
1164 wait_for_completion_io(&done);
9e882242 1165
65e53aab 1166 return blk_status_to_errno(bio->bi_status);
9e882242
KO
1167}
1168EXPORT_SYMBOL(submit_bio_wait);
1169
054bdf64
KO
1170/**
1171 * bio_advance - increment/complete a bio by some number of bytes
1172 * @bio: bio to advance
1173 * @bytes: number of bytes to complete
1174 *
1175 * This updates bi_sector, bi_size and bi_idx; if the number of bytes to
1176 * complete doesn't align with a bvec boundary, then bv_len and bv_offset will
1177 * be updated on the last bvec as well.
1178 *
1179 * @bio will then represent the remaining, uncompleted portion of the io.
1180 */
1181void bio_advance(struct bio *bio, unsigned bytes)
1182{
1183 if (bio_integrity(bio))
1184 bio_integrity_advance(bio, bytes);
1185
a892c8d5 1186 bio_crypt_advance(bio, bytes);
4550dd6c 1187 bio_advance_iter(bio, &bio->bi_iter, bytes);
054bdf64
KO
1188}
1189EXPORT_SYMBOL(bio_advance);
1190
45db54d5
KO
1191void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
1192 struct bio *src, struct bvec_iter *src_iter)
16ac3d63 1193{
1cb9dda4 1194 struct bio_vec src_bv, dst_bv;
16ac3d63 1195 void *src_p, *dst_p;
1cb9dda4 1196 unsigned bytes;
16ac3d63 1197
45db54d5
KO
1198 while (src_iter->bi_size && dst_iter->bi_size) {
1199 src_bv = bio_iter_iovec(src, *src_iter);
1200 dst_bv = bio_iter_iovec(dst, *dst_iter);
1cb9dda4
KO
1201
1202 bytes = min(src_bv.bv_len, dst_bv.bv_len);
16ac3d63 1203
1cb9dda4
KO
1204 src_p = kmap_atomic(src_bv.bv_page);
1205 dst_p = kmap_atomic(dst_bv.bv_page);
16ac3d63 1206
1cb9dda4
KO
1207 memcpy(dst_p + dst_bv.bv_offset,
1208 src_p + src_bv.bv_offset,
16ac3d63
KO
1209 bytes);
1210
1211 kunmap_atomic(dst_p);
1212 kunmap_atomic(src_p);
1213
6e6e811d
KO
1214 flush_dcache_page(dst_bv.bv_page);
1215
22b56c29
PB
1216 bio_advance_iter_single(src, src_iter, bytes);
1217 bio_advance_iter_single(dst, dst_iter, bytes);
16ac3d63
KO
1218 }
1219}
38a72dac
KO
1220EXPORT_SYMBOL(bio_copy_data_iter);
1221
1222/**
45db54d5
KO
1223 * bio_copy_data - copy contents of data buffers from one bio to another
1224 * @src: source bio
1225 * @dst: destination bio
38a72dac
KO
1226 *
1227 * Stops when it reaches the end of either @src or @dst - that is, copies
1228 * min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of bios).
1229 */
1230void bio_copy_data(struct bio *dst, struct bio *src)
1231{
45db54d5
KO
1232 struct bvec_iter src_iter = src->bi_iter;
1233 struct bvec_iter dst_iter = dst->bi_iter;
1234
1235 bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
38a72dac 1236}
16ac3d63
KO
1237EXPORT_SYMBOL(bio_copy_data);
1238
491221f8 1239void bio_free_pages(struct bio *bio)
1dfa0f68
CH
1240{
1241 struct bio_vec *bvec;
6dc4f100 1242 struct bvec_iter_all iter_all;
1dfa0f68 1243
2b070cfe 1244 bio_for_each_segment_all(bvec, bio, iter_all)
1dfa0f68
CH
1245 __free_page(bvec->bv_page);
1246}
491221f8 1247EXPORT_SYMBOL(bio_free_pages);
1dfa0f68 1248
1da177e4
LT
1249/*
1250 * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions
1251 * for performing direct-IO in BIOs.
1252 *
1253 * The problem is that we cannot run set_page_dirty() from interrupt context
1254 * because the required locks are not interrupt-safe. So what we can do is to
1255 * mark the pages dirty _before_ performing IO. And in interrupt context,
1256 * check that the pages are still dirty. If so, fine. If not, redirty them
1257 * in process context.
1258 *
1259 * We special-case compound pages here: normally this means reads into hugetlb
1260 * pages. The logic in here doesn't really work right for compound pages
1261 * because the VM does not uniformly chase down the head page in all cases.
1262 * But dirtiness of compound pages is pretty meaningless anyway: the VM doesn't
1263 * handle them at all. So we skip compound pages here at an early stage.
1264 *
1265 * Note that this code is very hard to test under normal circumstances because
1266 * direct-io pins the pages with get_user_pages(). This makes
1267 * is_page_cache_freeable return false, and the VM will not clean the pages.
0d5c3eba 1268 * But other code (eg, flusher threads) could clean the pages if they are mapped
1da177e4
LT
1269 * pagecache.
1270 *
1271 * Simply disabling the call to bio_set_pages_dirty() is a good way to test the
1272 * deferred bio dirtying paths.
1273 */
1274
1275/*
1276 * bio_set_pages_dirty() will mark all the bio's pages as dirty.
1277 */
1278void bio_set_pages_dirty(struct bio *bio)
1279{
cb34e057 1280 struct bio_vec *bvec;
6dc4f100 1281 struct bvec_iter_all iter_all;
1da177e4 1282
2b070cfe 1283 bio_for_each_segment_all(bvec, bio, iter_all) {
3bb50983
CH
1284 if (!PageCompound(bvec->bv_page))
1285 set_page_dirty_lock(bvec->bv_page);
1da177e4
LT
1286 }
1287}
1288
1da177e4
LT
1289/*
1290 * bio_check_pages_dirty() will check that all the BIO's pages are still dirty.
1291 * If they are, then fine. If, however, some pages are clean then they must
1292 * have been written out during the direct-IO read. So we take another ref on
24d5493f 1293 * the BIO and re-dirty the pages in process context.
1da177e4
LT
1294 *
1295 * It is expected that bio_check_pages_dirty() will wholly own the BIO from
ea1754a0
KS
1296 * here on. It will run one put_page() against each page and will run one
1297 * bio_put() against the BIO.
1da177e4
LT
1298 */
1299
65f27f38 1300static void bio_dirty_fn(struct work_struct *work);
1da177e4 1301
65f27f38 1302static DECLARE_WORK(bio_dirty_work, bio_dirty_fn);
1da177e4
LT
1303static DEFINE_SPINLOCK(bio_dirty_lock);
1304static struct bio *bio_dirty_list;
1305
1306/*
1307 * This runs in process context
1308 */
65f27f38 1309static void bio_dirty_fn(struct work_struct *work)
1da177e4 1310{
24d5493f 1311 struct bio *bio, *next;
1da177e4 1312
24d5493f
CH
1313 spin_lock_irq(&bio_dirty_lock);
1314 next = bio_dirty_list;
1da177e4 1315 bio_dirty_list = NULL;
24d5493f 1316 spin_unlock_irq(&bio_dirty_lock);
1da177e4 1317
24d5493f
CH
1318 while ((bio = next) != NULL) {
1319 next = bio->bi_private;
1da177e4 1320
d241a95f 1321 bio_release_pages(bio, true);
1da177e4 1322 bio_put(bio);
1da177e4
LT
1323 }
1324}
1325
1326void bio_check_pages_dirty(struct bio *bio)
1327{
cb34e057 1328 struct bio_vec *bvec;
24d5493f 1329 unsigned long flags;
6dc4f100 1330 struct bvec_iter_all iter_all;
1da177e4 1331
2b070cfe 1332 bio_for_each_segment_all(bvec, bio, iter_all) {
24d5493f
CH
1333 if (!PageDirty(bvec->bv_page) && !PageCompound(bvec->bv_page))
1334 goto defer;
1da177e4
LT
1335 }
1336
d241a95f 1337 bio_release_pages(bio, false);
24d5493f
CH
1338 bio_put(bio);
1339 return;
1340defer:
1341 spin_lock_irqsave(&bio_dirty_lock, flags);
1342 bio->bi_private = bio_dirty_list;
1343 bio_dirty_list = bio;
1344 spin_unlock_irqrestore(&bio_dirty_lock, flags);
1345 schedule_work(&bio_dirty_work);
1da177e4
LT
1346}
1347
c4cf5261
JA
1348static inline bool bio_remaining_done(struct bio *bio)
1349{
1350 /*
1351 * If we're not chaining, then ->__bi_remaining is always 1 and
1352 * we always end io on the first invocation.
1353 */
1354 if (!bio_flagged(bio, BIO_CHAIN))
1355 return true;
1356
1357 BUG_ON(atomic_read(&bio->__bi_remaining) <= 0);
1358
326e1dbb 1359 if (atomic_dec_and_test(&bio->__bi_remaining)) {
b7c44ed9 1360 bio_clear_flag(bio, BIO_CHAIN);
c4cf5261 1361 return true;
326e1dbb 1362 }
c4cf5261
JA
1363
1364 return false;
1365}
1366
1da177e4
LT
1367/**
1368 * bio_endio - end I/O on a bio
1369 * @bio: bio
1da177e4
LT
1370 *
1371 * Description:
4246a0b6
CH
1372 * bio_endio() will end I/O on the whole bio. bio_endio() is the preferred
1373 * way to end I/O on a bio. No one should call bi_end_io() directly on a
1374 * bio unless they own it and thus know that it has an end_io function.
fbbaf700
N
1375 *
1376 * bio_endio() can be called several times on a bio that has been chained
1377 * using bio_chain(). The ->bi_end_io() function will only be called the
60b6a7e6 1378 * last time.
1da177e4 1379 **/
4246a0b6 1380void bio_endio(struct bio *bio)
1da177e4 1381{
ba8c6967 1382again:
2b885517 1383 if (!bio_remaining_done(bio))
ba8c6967 1384 return;
7c20f116
CH
1385 if (!bio_integrity_endio(bio))
1386 return;
1da177e4 1387
309dca30
CH
1388 if (bio->bi_bdev)
1389 rq_qos_done_bio(bio->bi_bdev->bd_disk->queue, bio);
67b42d0b 1390
60b6a7e6
EH
1391 if (bio->bi_bdev && bio_flagged(bio, BIO_TRACE_COMPLETION)) {
1392 trace_block_bio_complete(bio->bi_bdev->bd_disk->queue, bio);
1393 bio_clear_flag(bio, BIO_TRACE_COMPLETION);
1394 }
1395
ba8c6967
CH
1396 /*
1397 * Need to have a real endio function for chained bios, otherwise
1398 * various corner cases will break (like stacking block devices that
1399 * save/restore bi_end_io) - however, we want to avoid unbounded
1400 * recursion and blowing the stack. Tail call optimization would
1401 * handle this, but compiling with frame pointers also disables
1402 * gcc's sibling call optimization.
1403 */
1404 if (bio->bi_end_io == bio_chain_endio) {
1405 bio = __bio_chain_endio(bio);
1406 goto again;
196d38bc 1407 }
ba8c6967 1408
9e234eea 1409 blk_throtl_bio_endio(bio);
b222dd2f
SL
1410 /* release cgroup info */
1411 bio_uninit(bio);
ba8c6967
CH
1412 if (bio->bi_end_io)
1413 bio->bi_end_io(bio);
1da177e4 1414}
a112a71d 1415EXPORT_SYMBOL(bio_endio);
1da177e4 1416
20d0189b
KO
1417/**
1418 * bio_split - split a bio
1419 * @bio: bio to split
1420 * @sectors: number of sectors to split from the front of @bio
1421 * @gfp: gfp mask
1422 * @bs: bio set to allocate from
1423 *
1424 * Allocates and returns a new bio which represents @sectors from the start of
1425 * @bio, and updates @bio to represent the remaining sectors.
1426 *
f3f5da62 1427 * Unless this is a discard request the newly allocated bio will point
dad77584
BVA
1428 * to @bio's bi_io_vec. It is the caller's responsibility to ensure that
1429 * neither @bio nor @bs are freed before the split bio.
20d0189b
KO
1430 */
1431struct bio *bio_split(struct bio *bio, int sectors,
1432 gfp_t gfp, struct bio_set *bs)
1433{
f341a4d3 1434 struct bio *split;
20d0189b
KO
1435
1436 BUG_ON(sectors <= 0);
1437 BUG_ON(sectors >= bio_sectors(bio));
1438
0512a75b
KB
1439 /* Zone append commands cannot be split */
1440 if (WARN_ON_ONCE(bio_op(bio) == REQ_OP_ZONE_APPEND))
1441 return NULL;
1442
f9d03f96 1443 split = bio_clone_fast(bio, gfp, bs);
20d0189b
KO
1444 if (!split)
1445 return NULL;
1446
1447 split->bi_iter.bi_size = sectors << 9;
1448
1449 if (bio_integrity(split))
fbd08e76 1450 bio_integrity_trim(split);
20d0189b
KO
1451
1452 bio_advance(bio, split->bi_iter.bi_size);
1453
fbbaf700 1454 if (bio_flagged(bio, BIO_TRACE_COMPLETION))
20d59023 1455 bio_set_flag(split, BIO_TRACE_COMPLETION);
fbbaf700 1456
20d0189b
KO
1457 return split;
1458}
1459EXPORT_SYMBOL(bio_split);
1460
6678d83f
KO
1461/**
1462 * bio_trim - trim a bio
1463 * @bio: bio to trim
1464 * @offset: number of sectors to trim from the front of @bio
1465 * @size: size we want to trim @bio to, in sectors
1466 */
1467void bio_trim(struct bio *bio, int offset, int size)
1468{
1469 /* 'bio' is a cloned bio which we need to trim to match
1470 * the given offset and size.
6678d83f 1471 */
6678d83f
KO
1472
1473 size <<= 9;
4f024f37 1474 if (offset == 0 && size == bio->bi_iter.bi_size)
6678d83f
KO
1475 return;
1476
6678d83f 1477 bio_advance(bio, offset << 9);
4f024f37 1478 bio->bi_iter.bi_size = size;
376a78ab
DM
1479
1480 if (bio_integrity(bio))
fbd08e76 1481 bio_integrity_trim(bio);
376a78ab 1482
6678d83f
KO
1483}
1484EXPORT_SYMBOL_GPL(bio_trim);
1485
1da177e4
LT
1486/*
1487 * create memory pools for biovec's in a bio_set.
1488 * use the global biovec slabs created for general use.
1489 */
8aa6ba2f 1490int biovec_init_pool(mempool_t *pool, int pool_entries)
1da177e4 1491{
7a800a20 1492 struct biovec_slab *bp = bvec_slabs + ARRAY_SIZE(bvec_slabs) - 1;
1da177e4 1493
8aa6ba2f 1494 return mempool_init_slab_pool(pool, pool_entries, bp->slab);
1da177e4
LT
1495}
1496
917a38c7
KO
1497/*
1498 * bioset_exit - exit a bioset initialized with bioset_init()
1499 *
1500 * May be called on a zeroed but uninitialized bioset (i.e. allocated with
1501 * kzalloc()).
1502 */
1503void bioset_exit(struct bio_set *bs)
1da177e4 1504{
df2cb6da
KO
1505 if (bs->rescue_workqueue)
1506 destroy_workqueue(bs->rescue_workqueue);
917a38c7 1507 bs->rescue_workqueue = NULL;
df2cb6da 1508
8aa6ba2f
KO
1509 mempool_exit(&bs->bio_pool);
1510 mempool_exit(&bs->bvec_pool);
9f060e22 1511
7878cba9 1512 bioset_integrity_free(bs);
917a38c7
KO
1513 if (bs->bio_slab)
1514 bio_put_slab(bs);
1515 bs->bio_slab = NULL;
1516}
1517EXPORT_SYMBOL(bioset_exit);
1da177e4 1518
917a38c7
KO
1519/**
1520 * bioset_init - Initialize a bio_set
dad08527 1521 * @bs: pool to initialize
917a38c7
KO
1522 * @pool_size: Number of bio and bio_vecs to cache in the mempool
1523 * @front_pad: Number of bytes to allocate in front of the returned bio
1524 * @flags: Flags to modify behavior, currently %BIOSET_NEED_BVECS
1525 * and %BIOSET_NEED_RESCUER
1526 *
dad08527
KO
1527 * Description:
1528 * Set up a bio_set to be used with @bio_alloc_bioset. Allows the caller
1529 * to ask for a number of bytes to be allocated in front of the bio.
1530 * Front pad allocation is useful for embedding the bio inside
1531 * another structure, to avoid allocating extra data to go with the bio.
1532 * Note that the bio must be embedded at the END of that structure always,
1533 * or things will break badly.
1534 * If %BIOSET_NEED_BVECS is set in @flags, a separate pool will be allocated
1535 * for allocating iovecs. This pool is not needed e.g. for bio_clone_fast().
1536 * If %BIOSET_NEED_RESCUER is set, a workqueue is created which can be used to
1537 * dispatch queued requests when the mempool runs out of space.
1538 *
917a38c7
KO
1539 */
1540int bioset_init(struct bio_set *bs,
1541 unsigned int pool_size,
1542 unsigned int front_pad,
1543 int flags)
1544{
917a38c7 1545 bs->front_pad = front_pad;
9f180e31
ML
1546 if (flags & BIOSET_NEED_BVECS)
1547 bs->back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec);
1548 else
1549 bs->back_pad = 0;
917a38c7
KO
1550
1551 spin_lock_init(&bs->rescue_lock);
1552 bio_list_init(&bs->rescue_list);
1553 INIT_WORK(&bs->rescue_work, bio_alloc_rescue);
1554
49d1ec85 1555 bs->bio_slab = bio_find_or_create_slab(bs);
917a38c7
KO
1556 if (!bs->bio_slab)
1557 return -ENOMEM;
1558
1559 if (mempool_init_slab_pool(&bs->bio_pool, pool_size, bs->bio_slab))
1560 goto bad;
1561
1562 if ((flags & BIOSET_NEED_BVECS) &&
1563 biovec_init_pool(&bs->bvec_pool, pool_size))
1564 goto bad;
1565
1566 if (!(flags & BIOSET_NEED_RESCUER))
1567 return 0;
1568
1569 bs->rescue_workqueue = alloc_workqueue("bioset", WQ_MEM_RECLAIM, 0);
1570 if (!bs->rescue_workqueue)
1571 goto bad;
1572
1573 return 0;
1574bad:
1575 bioset_exit(bs);
1576 return -ENOMEM;
1577}
1578EXPORT_SYMBOL(bioset_init);
1579
28e89fd9
JA
1580/*
1581 * Initialize and setup a new bio_set, based on the settings from
1582 * another bio_set.
1583 */
1584int bioset_init_from_src(struct bio_set *bs, struct bio_set *src)
1585{
1586 int flags;
1587
1588 flags = 0;
1589 if (src->bvec_pool.min_nr)
1590 flags |= BIOSET_NEED_BVECS;
1591 if (src->rescue_workqueue)
1592 flags |= BIOSET_NEED_RESCUER;
1593
1594 return bioset_init(bs, src->bio_pool.min_nr, src->front_pad, flags);
1595}
1596EXPORT_SYMBOL(bioset_init_from_src);
1597
de76fd89 1598static int __init init_bio(void)
1da177e4
LT
1599{
1600 int i;
1601
7878cba9 1602 bio_integrity_init();
1da177e4 1603
de76fd89
CH
1604 for (i = 0; i < ARRAY_SIZE(bvec_slabs); i++) {
1605 struct biovec_slab *bvs = bvec_slabs + i;
a7fcd37c 1606
de76fd89
CH
1607 bvs->slab = kmem_cache_create(bvs->name,
1608 bvs->nr_vecs * sizeof(struct bio_vec), 0,
1609 SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
1da177e4 1610 }
1da177e4 1611
f4f8154a 1612 if (bioset_init(&fs_bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS))
1da177e4
LT
1613 panic("bio: can't allocate bios\n");
1614
f4f8154a 1615 if (bioset_integrity_create(&fs_bio_set, BIO_POOL_SIZE))
a91a2785
MP
1616 panic("bio: can't create integrity pool\n");
1617
1da177e4
LT
1618 return 0;
1619}
1da177e4 1620subsys_initcall(init_bio);