]> git.ipfire.org Git - people/ms/linux.git/blame - fs/bio.c
block: Add bio_alloc_pages()
[people/ms/linux.git] / fs / bio.c
CommitLineData
1da177e4 1/*
0fe23479 2 * Copyright (C) 2001 Jens Axboe <axboe@kernel.dk>
1da177e4
LT
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public Licens
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
16 *
17 */
18#include <linux/mm.h>
19#include <linux/swap.h>
20#include <linux/bio.h>
21#include <linux/blkdev.h>
852c788f 22#include <linux/iocontext.h>
1da177e4
LT
23#include <linux/slab.h>
24#include <linux/init.h>
25#include <linux/kernel.h>
630d9c47 26#include <linux/export.h>
1da177e4
LT
27#include <linux/mempool.h>
28#include <linux/workqueue.h>
852c788f 29#include <linux/cgroup.h>
f1970baf 30#include <scsi/sg.h> /* for struct sg_iovec */
1da177e4 31
55782138 32#include <trace/events/block.h>
0bfc2455 33
392ddc32
JA
34/*
35 * Test patch to inline a certain number of bi_io_vec's inside the bio
36 * itself, to shrink a bio data allocation from two mempool calls to one
37 */
38#define BIO_INLINE_VECS 4
39
6feef531 40static mempool_t *bio_split_pool __read_mostly;
1da177e4 41
1da177e4
LT
42/*
43 * if you change this list, also change bvec_alloc or things will
44 * break badly! cannot be bigger than what you can fit into an
45 * unsigned short
46 */
1da177e4 47#define BV(x) { .nr_vecs = x, .name = "biovec-"__stringify(x) }
df677140 48static struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly = {
1da177e4
LT
49 BV(1), BV(4), BV(16), BV(64), BV(128), BV(BIO_MAX_PAGES),
50};
51#undef BV
52
1da177e4
LT
53/*
54 * fs_bio_set is the bio_set containing bio and iovec memory pools used by
55 * IO code that does not need private memory pools.
56 */
51d654e1 57struct bio_set *fs_bio_set;
3f86a82a 58EXPORT_SYMBOL(fs_bio_set);
1da177e4 59
bb799ca0
JA
60/*
61 * Our slab pool management
62 */
63struct bio_slab {
64 struct kmem_cache *slab;
65 unsigned int slab_ref;
66 unsigned int slab_size;
67 char name[8];
68};
69static DEFINE_MUTEX(bio_slab_lock);
70static struct bio_slab *bio_slabs;
71static unsigned int bio_slab_nr, bio_slab_max;
72
73static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
74{
75 unsigned int sz = sizeof(struct bio) + extra_size;
76 struct kmem_cache *slab = NULL;
389d7b26 77 struct bio_slab *bslab, *new_bio_slabs;
386bc35a 78 unsigned int new_bio_slab_max;
bb799ca0
JA
79 unsigned int i, entry = -1;
80
81 mutex_lock(&bio_slab_lock);
82
83 i = 0;
84 while (i < bio_slab_nr) {
f06f135d 85 bslab = &bio_slabs[i];
bb799ca0
JA
86
87 if (!bslab->slab && entry == -1)
88 entry = i;
89 else if (bslab->slab_size == sz) {
90 slab = bslab->slab;
91 bslab->slab_ref++;
92 break;
93 }
94 i++;
95 }
96
97 if (slab)
98 goto out_unlock;
99
100 if (bio_slab_nr == bio_slab_max && entry == -1) {
386bc35a 101 new_bio_slab_max = bio_slab_max << 1;
389d7b26 102 new_bio_slabs = krealloc(bio_slabs,
386bc35a 103 new_bio_slab_max * sizeof(struct bio_slab),
389d7b26
AK
104 GFP_KERNEL);
105 if (!new_bio_slabs)
bb799ca0 106 goto out_unlock;
386bc35a 107 bio_slab_max = new_bio_slab_max;
389d7b26 108 bio_slabs = new_bio_slabs;
bb799ca0
JA
109 }
110 if (entry == -1)
111 entry = bio_slab_nr++;
112
113 bslab = &bio_slabs[entry];
114
115 snprintf(bslab->name, sizeof(bslab->name), "bio-%d", entry);
116 slab = kmem_cache_create(bslab->name, sz, 0, SLAB_HWCACHE_ALIGN, NULL);
117 if (!slab)
118 goto out_unlock;
119
80cdc6da 120 printk(KERN_INFO "bio: create slab <%s> at %d\n", bslab->name, entry);
bb799ca0
JA
121 bslab->slab = slab;
122 bslab->slab_ref = 1;
123 bslab->slab_size = sz;
124out_unlock:
125 mutex_unlock(&bio_slab_lock);
126 return slab;
127}
128
129static void bio_put_slab(struct bio_set *bs)
130{
131 struct bio_slab *bslab = NULL;
132 unsigned int i;
133
134 mutex_lock(&bio_slab_lock);
135
136 for (i = 0; i < bio_slab_nr; i++) {
137 if (bs->bio_slab == bio_slabs[i].slab) {
138 bslab = &bio_slabs[i];
139 break;
140 }
141 }
142
143 if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n"))
144 goto out;
145
146 WARN_ON(!bslab->slab_ref);
147
148 if (--bslab->slab_ref)
149 goto out;
150
151 kmem_cache_destroy(bslab->slab);
152 bslab->slab = NULL;
153
154out:
155 mutex_unlock(&bio_slab_lock);
156}
157
7ba1ba12
MP
158unsigned int bvec_nr_vecs(unsigned short idx)
159{
160 return bvec_slabs[idx].nr_vecs;
161}
162
9f060e22 163void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned int idx)
bb799ca0
JA
164{
165 BIO_BUG_ON(idx >= BIOVEC_NR_POOLS);
166
167 if (idx == BIOVEC_MAX_IDX)
9f060e22 168 mempool_free(bv, pool);
bb799ca0
JA
169 else {
170 struct biovec_slab *bvs = bvec_slabs + idx;
171
172 kmem_cache_free(bvs->slab, bv);
173 }
174}
175
9f060e22
KO
176struct bio_vec *bvec_alloc(gfp_t gfp_mask, int nr, unsigned long *idx,
177 mempool_t *pool)
1da177e4
LT
178{
179 struct bio_vec *bvl;
1da177e4 180
7ff9345f
JA
181 /*
182 * see comment near bvec_array define!
183 */
184 switch (nr) {
185 case 1:
186 *idx = 0;
187 break;
188 case 2 ... 4:
189 *idx = 1;
190 break;
191 case 5 ... 16:
192 *idx = 2;
193 break;
194 case 17 ... 64:
195 *idx = 3;
196 break;
197 case 65 ... 128:
198 *idx = 4;
199 break;
200 case 129 ... BIO_MAX_PAGES:
201 *idx = 5;
202 break;
203 default:
204 return NULL;
205 }
206
207 /*
208 * idx now points to the pool we want to allocate from. only the
209 * 1-vec entry pool is mempool backed.
210 */
211 if (*idx == BIOVEC_MAX_IDX) {
212fallback:
9f060e22 213 bvl = mempool_alloc(pool, gfp_mask);
7ff9345f
JA
214 } else {
215 struct biovec_slab *bvs = bvec_slabs + *idx;
216 gfp_t __gfp_mask = gfp_mask & ~(__GFP_WAIT | __GFP_IO);
217
0a0d96b0 218 /*
7ff9345f
JA
219 * Make this allocation restricted and don't dump info on
220 * allocation failures, since we'll fallback to the mempool
221 * in case of failure.
0a0d96b0 222 */
7ff9345f 223 __gfp_mask |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
1da177e4 224
0a0d96b0 225 /*
7ff9345f
JA
226 * Try a slab allocation. If this fails and __GFP_WAIT
227 * is set, retry with the 1-entry mempool
0a0d96b0 228 */
7ff9345f
JA
229 bvl = kmem_cache_alloc(bvs->slab, __gfp_mask);
230 if (unlikely(!bvl && (gfp_mask & __GFP_WAIT))) {
231 *idx = BIOVEC_MAX_IDX;
232 goto fallback;
233 }
234 }
235
1da177e4
LT
236 return bvl;
237}
238
4254bba1 239static void __bio_free(struct bio *bio)
1da177e4 240{
4254bba1 241 bio_disassociate_task(bio);
1da177e4 242
7ba1ba12 243 if (bio_integrity(bio))
1e2a410f 244 bio_integrity_free(bio);
4254bba1 245}
7ba1ba12 246
4254bba1
KO
247static void bio_free(struct bio *bio)
248{
249 struct bio_set *bs = bio->bi_pool;
250 void *p;
251
252 __bio_free(bio);
253
254 if (bs) {
255 if (bio_has_allocated_vec(bio))
9f060e22 256 bvec_free(bs->bvec_pool, bio->bi_io_vec, BIO_POOL_IDX(bio));
4254bba1
KO
257
258 /*
259 * If we have front padding, adjust the bio pointer before freeing
260 */
261 p = bio;
bb799ca0
JA
262 p -= bs->front_pad;
263
4254bba1
KO
264 mempool_free(p, bs->bio_pool);
265 } else {
266 /* Bio was allocated by bio_kmalloc() */
267 kfree(bio);
268 }
3676347a
PO
269}
270
858119e1 271void bio_init(struct bio *bio)
1da177e4 272{
2b94de55 273 memset(bio, 0, sizeof(*bio));
1da177e4 274 bio->bi_flags = 1 << BIO_UPTODATE;
1da177e4 275 atomic_set(&bio->bi_cnt, 1);
1da177e4 276}
a112a71d 277EXPORT_SYMBOL(bio_init);
1da177e4 278
f44b48c7
KO
279/**
280 * bio_reset - reinitialize a bio
281 * @bio: bio to reset
282 *
283 * Description:
284 * After calling bio_reset(), @bio will be in the same state as a freshly
285 * allocated bio returned bio bio_alloc_bioset() - the only fields that are
286 * preserved are the ones that are initialized by bio_alloc_bioset(). See
287 * comment in struct bio.
288 */
289void bio_reset(struct bio *bio)
290{
291 unsigned long flags = bio->bi_flags & (~0UL << BIO_RESET_BITS);
292
4254bba1 293 __bio_free(bio);
f44b48c7
KO
294
295 memset(bio, 0, BIO_RESET_BYTES);
296 bio->bi_flags = flags|(1 << BIO_UPTODATE);
297}
298EXPORT_SYMBOL(bio_reset);
299
df2cb6da
KO
300static void bio_alloc_rescue(struct work_struct *work)
301{
302 struct bio_set *bs = container_of(work, struct bio_set, rescue_work);
303 struct bio *bio;
304
305 while (1) {
306 spin_lock(&bs->rescue_lock);
307 bio = bio_list_pop(&bs->rescue_list);
308 spin_unlock(&bs->rescue_lock);
309
310 if (!bio)
311 break;
312
313 generic_make_request(bio);
314 }
315}
316
317static void punt_bios_to_rescuer(struct bio_set *bs)
318{
319 struct bio_list punt, nopunt;
320 struct bio *bio;
321
322 /*
323 * In order to guarantee forward progress we must punt only bios that
324 * were allocated from this bio_set; otherwise, if there was a bio on
325 * there for a stacking driver higher up in the stack, processing it
326 * could require allocating bios from this bio_set, and doing that from
327 * our own rescuer would be bad.
328 *
329 * Since bio lists are singly linked, pop them all instead of trying to
330 * remove from the middle of the list:
331 */
332
333 bio_list_init(&punt);
334 bio_list_init(&nopunt);
335
336 while ((bio = bio_list_pop(current->bio_list)))
337 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
338
339 *current->bio_list = nopunt;
340
341 spin_lock(&bs->rescue_lock);
342 bio_list_merge(&bs->rescue_list, &punt);
343 spin_unlock(&bs->rescue_lock);
344
345 queue_work(bs->rescue_workqueue, &bs->rescue_work);
346}
347
1da177e4
LT
348/**
349 * bio_alloc_bioset - allocate a bio for I/O
350 * @gfp_mask: the GFP_ mask given to the slab allocator
351 * @nr_iovecs: number of iovecs to pre-allocate
db18efac 352 * @bs: the bio_set to allocate from.
1da177e4
LT
353 *
354 * Description:
3f86a82a
KO
355 * If @bs is NULL, uses kmalloc() to allocate the bio; else the allocation is
356 * backed by the @bs's mempool.
357 *
358 * When @bs is not NULL, if %__GFP_WAIT is set then bio_alloc will always be
359 * able to allocate a bio. This is due to the mempool guarantees. To make this
360 * work, callers must never allocate more than 1 bio at a time from this pool.
361 * Callers that need to allocate more than 1 bio must always submit the
362 * previously allocated bio for IO before attempting to allocate a new one.
363 * Failure to do so can cause deadlocks under memory pressure.
364 *
df2cb6da
KO
365 * Note that when running under generic_make_request() (i.e. any block
366 * driver), bios are not submitted until after you return - see the code in
367 * generic_make_request() that converts recursion into iteration, to prevent
368 * stack overflows.
369 *
370 * This would normally mean allocating multiple bios under
371 * generic_make_request() would be susceptible to deadlocks, but we have
372 * deadlock avoidance code that resubmits any blocked bios from a rescuer
373 * thread.
374 *
375 * However, we do not guarantee forward progress for allocations from other
376 * mempools. Doing multiple allocations from the same mempool under
377 * generic_make_request() should be avoided - instead, use bio_set's front_pad
378 * for per bio allocations.
379 *
3f86a82a
KO
380 * RETURNS:
381 * Pointer to new bio on success, NULL on failure.
382 */
dd0fc66f 383struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
1da177e4 384{
df2cb6da 385 gfp_t saved_gfp = gfp_mask;
3f86a82a
KO
386 unsigned front_pad;
387 unsigned inline_vecs;
451a9ebf 388 unsigned long idx = BIO_POOL_NONE;
34053979 389 struct bio_vec *bvl = NULL;
451a9ebf
TH
390 struct bio *bio;
391 void *p;
392
3f86a82a
KO
393 if (!bs) {
394 if (nr_iovecs > UIO_MAXIOV)
395 return NULL;
396
397 p = kmalloc(sizeof(struct bio) +
398 nr_iovecs * sizeof(struct bio_vec),
399 gfp_mask);
400 front_pad = 0;
401 inline_vecs = nr_iovecs;
402 } else {
df2cb6da
KO
403 /*
404 * generic_make_request() converts recursion to iteration; this
405 * means if we're running beneath it, any bios we allocate and
406 * submit will not be submitted (and thus freed) until after we
407 * return.
408 *
409 * This exposes us to a potential deadlock if we allocate
410 * multiple bios from the same bio_set() while running
411 * underneath generic_make_request(). If we were to allocate
412 * multiple bios (say a stacking block driver that was splitting
413 * bios), we would deadlock if we exhausted the mempool's
414 * reserve.
415 *
416 * We solve this, and guarantee forward progress, with a rescuer
417 * workqueue per bio_set. If we go to allocate and there are
418 * bios on current->bio_list, we first try the allocation
419 * without __GFP_WAIT; if that fails, we punt those bios we
420 * would be blocking to the rescuer workqueue before we retry
421 * with the original gfp_flags.
422 */
423
424 if (current->bio_list && !bio_list_empty(current->bio_list))
425 gfp_mask &= ~__GFP_WAIT;
426
3f86a82a 427 p = mempool_alloc(bs->bio_pool, gfp_mask);
df2cb6da
KO
428 if (!p && gfp_mask != saved_gfp) {
429 punt_bios_to_rescuer(bs);
430 gfp_mask = saved_gfp;
431 p = mempool_alloc(bs->bio_pool, gfp_mask);
432 }
433
3f86a82a
KO
434 front_pad = bs->front_pad;
435 inline_vecs = BIO_INLINE_VECS;
436 }
437
451a9ebf
TH
438 if (unlikely(!p))
439 return NULL;
1da177e4 440
3f86a82a 441 bio = p + front_pad;
34053979
IM
442 bio_init(bio);
443
3f86a82a 444 if (nr_iovecs > inline_vecs) {
9f060e22 445 bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, bs->bvec_pool);
df2cb6da
KO
446 if (!bvl && gfp_mask != saved_gfp) {
447 punt_bios_to_rescuer(bs);
448 gfp_mask = saved_gfp;
9f060e22 449 bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, bs->bvec_pool);
df2cb6da
KO
450 }
451
34053979
IM
452 if (unlikely(!bvl))
453 goto err_free;
3f86a82a
KO
454 } else if (nr_iovecs) {
455 bvl = bio->bi_inline_vecs;
1da177e4 456 }
3f86a82a
KO
457
458 bio->bi_pool = bs;
34053979
IM
459 bio->bi_flags |= idx << BIO_POOL_OFFSET;
460 bio->bi_max_vecs = nr_iovecs;
34053979 461 bio->bi_io_vec = bvl;
1da177e4 462 return bio;
34053979
IM
463
464err_free:
451a9ebf 465 mempool_free(p, bs->bio_pool);
34053979 466 return NULL;
1da177e4 467}
a112a71d 468EXPORT_SYMBOL(bio_alloc_bioset);
1da177e4 469
1da177e4
LT
470void zero_fill_bio(struct bio *bio)
471{
472 unsigned long flags;
473 struct bio_vec *bv;
474 int i;
475
476 bio_for_each_segment(bv, bio, i) {
477 char *data = bvec_kmap_irq(bv, &flags);
478 memset(data, 0, bv->bv_len);
479 flush_dcache_page(bv->bv_page);
480 bvec_kunmap_irq(data, &flags);
481 }
482}
483EXPORT_SYMBOL(zero_fill_bio);
484
485/**
486 * bio_put - release a reference to a bio
487 * @bio: bio to release reference to
488 *
489 * Description:
490 * Put a reference to a &struct bio, either one you have gotten with
ad0bf110 491 * bio_alloc, bio_get or bio_clone. The last put of a bio will free it.
1da177e4
LT
492 **/
493void bio_put(struct bio *bio)
494{
495 BIO_BUG_ON(!atomic_read(&bio->bi_cnt));
496
497 /*
498 * last put frees it
499 */
4254bba1
KO
500 if (atomic_dec_and_test(&bio->bi_cnt))
501 bio_free(bio);
1da177e4 502}
a112a71d 503EXPORT_SYMBOL(bio_put);
1da177e4 504
165125e1 505inline int bio_phys_segments(struct request_queue *q, struct bio *bio)
1da177e4
LT
506{
507 if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
508 blk_recount_segments(q, bio);
509
510 return bio->bi_phys_segments;
511}
a112a71d 512EXPORT_SYMBOL(bio_phys_segments);
1da177e4 513
1da177e4
LT
514/**
515 * __bio_clone - clone a bio
516 * @bio: destination bio
517 * @bio_src: bio to clone
518 *
519 * Clone a &bio. Caller will own the returned bio, but not
520 * the actual data it points to. Reference count of returned
521 * bio will be one.
522 */
858119e1 523void __bio_clone(struct bio *bio, struct bio *bio_src)
1da177e4 524{
e525e153
AM
525 memcpy(bio->bi_io_vec, bio_src->bi_io_vec,
526 bio_src->bi_max_vecs * sizeof(struct bio_vec));
1da177e4 527
5d84070e
JA
528 /*
529 * most users will be overriding ->bi_bdev with a new target,
530 * so we don't set nor calculate new physical/hw segment counts here
531 */
1da177e4
LT
532 bio->bi_sector = bio_src->bi_sector;
533 bio->bi_bdev = bio_src->bi_bdev;
534 bio->bi_flags |= 1 << BIO_CLONED;
535 bio->bi_rw = bio_src->bi_rw;
1da177e4
LT
536 bio->bi_vcnt = bio_src->bi_vcnt;
537 bio->bi_size = bio_src->bi_size;
a5453be4 538 bio->bi_idx = bio_src->bi_idx;
1da177e4 539}
a112a71d 540EXPORT_SYMBOL(__bio_clone);
1da177e4
LT
541
542/**
bf800ef1 543 * bio_clone_bioset - clone a bio
1da177e4
LT
544 * @bio: bio to clone
545 * @gfp_mask: allocation priority
bf800ef1 546 * @bs: bio_set to allocate from
1da177e4
LT
547 *
548 * Like __bio_clone, only also allocates the returned bio
549 */
bf800ef1
KO
550struct bio *bio_clone_bioset(struct bio *bio, gfp_t gfp_mask,
551 struct bio_set *bs)
1da177e4 552{
bf800ef1 553 struct bio *b;
1da177e4 554
bf800ef1 555 b = bio_alloc_bioset(gfp_mask, bio->bi_max_vecs, bs);
7ba1ba12
MP
556 if (!b)
557 return NULL;
558
7ba1ba12
MP
559 __bio_clone(b, bio);
560
561 if (bio_integrity(bio)) {
562 int ret;
563
1e2a410f 564 ret = bio_integrity_clone(b, bio, gfp_mask);
7ba1ba12 565
059ea331
LZ
566 if (ret < 0) {
567 bio_put(b);
7ba1ba12 568 return NULL;
059ea331 569 }
3676347a 570 }
1da177e4
LT
571
572 return b;
573}
bf800ef1 574EXPORT_SYMBOL(bio_clone_bioset);
1da177e4
LT
575
576/**
577 * bio_get_nr_vecs - return approx number of vecs
578 * @bdev: I/O target
579 *
580 * Return the approximate number of pages we can send to this target.
581 * There's no guarantee that you will be able to fit this number of pages
582 * into a bio, it does not account for dynamic restrictions that vary
583 * on offset.
584 */
585int bio_get_nr_vecs(struct block_device *bdev)
586{
165125e1 587 struct request_queue *q = bdev_get_queue(bdev);
f908ee94
BS
588 int nr_pages;
589
590 nr_pages = min_t(unsigned,
5abebfdd
KO
591 queue_max_segments(q),
592 queue_max_sectors(q) / (PAGE_SIZE >> 9) + 1);
f908ee94
BS
593
594 return min_t(unsigned, nr_pages, BIO_MAX_PAGES);
595
1da177e4 596}
a112a71d 597EXPORT_SYMBOL(bio_get_nr_vecs);
1da177e4 598
165125e1 599static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
defd94b7
MC
600 *page, unsigned int len, unsigned int offset,
601 unsigned short max_sectors)
1da177e4
LT
602{
603 int retried_segments = 0;
604 struct bio_vec *bvec;
605
606 /*
607 * cloned bio must not modify vec list
608 */
609 if (unlikely(bio_flagged(bio, BIO_CLONED)))
610 return 0;
611
80cfd548 612 if (((bio->bi_size + len) >> 9) > max_sectors)
1da177e4
LT
613 return 0;
614
80cfd548
JA
615 /*
616 * For filesystems with a blocksize smaller than the pagesize
617 * we will often be called with the same page as last time and
618 * a consecutive offset. Optimize this special case.
619 */
620 if (bio->bi_vcnt > 0) {
621 struct bio_vec *prev = &bio->bi_io_vec[bio->bi_vcnt - 1];
622
623 if (page == prev->bv_page &&
624 offset == prev->bv_offset + prev->bv_len) {
1d616585 625 unsigned int prev_bv_len = prev->bv_len;
80cfd548 626 prev->bv_len += len;
cc371e66
AK
627
628 if (q->merge_bvec_fn) {
629 struct bvec_merge_data bvm = {
1d616585
DM
630 /* prev_bvec is already charged in
631 bi_size, discharge it in order to
632 simulate merging updated prev_bvec
633 as new bvec. */
cc371e66
AK
634 .bi_bdev = bio->bi_bdev,
635 .bi_sector = bio->bi_sector,
1d616585 636 .bi_size = bio->bi_size - prev_bv_len,
cc371e66
AK
637 .bi_rw = bio->bi_rw,
638 };
639
8bf8c376 640 if (q->merge_bvec_fn(q, &bvm, prev) < prev->bv_len) {
cc371e66
AK
641 prev->bv_len -= len;
642 return 0;
643 }
80cfd548
JA
644 }
645
646 goto done;
647 }
648 }
649
650 if (bio->bi_vcnt >= bio->bi_max_vecs)
1da177e4
LT
651 return 0;
652
653 /*
654 * we might lose a segment or two here, but rather that than
655 * make this too complex.
656 */
657
8a78362c 658 while (bio->bi_phys_segments >= queue_max_segments(q)) {
1da177e4
LT
659
660 if (retried_segments)
661 return 0;
662
663 retried_segments = 1;
664 blk_recount_segments(q, bio);
665 }
666
667 /*
668 * setup the new entry, we might clear it again later if we
669 * cannot add the page
670 */
671 bvec = &bio->bi_io_vec[bio->bi_vcnt];
672 bvec->bv_page = page;
673 bvec->bv_len = len;
674 bvec->bv_offset = offset;
675
676 /*
677 * if queue has other restrictions (eg varying max sector size
678 * depending on offset), it can specify a merge_bvec_fn in the
679 * queue to get further control
680 */
681 if (q->merge_bvec_fn) {
cc371e66
AK
682 struct bvec_merge_data bvm = {
683 .bi_bdev = bio->bi_bdev,
684 .bi_sector = bio->bi_sector,
685 .bi_size = bio->bi_size,
686 .bi_rw = bio->bi_rw,
687 };
688
1da177e4
LT
689 /*
690 * merge_bvec_fn() returns number of bytes it can accept
691 * at this offset
692 */
8bf8c376 693 if (q->merge_bvec_fn(q, &bvm, bvec) < bvec->bv_len) {
1da177e4
LT
694 bvec->bv_page = NULL;
695 bvec->bv_len = 0;
696 bvec->bv_offset = 0;
697 return 0;
698 }
699 }
700
701 /* If we may be able to merge these biovecs, force a recount */
b8b3e16c 702 if (bio->bi_vcnt && (BIOVEC_PHYS_MERGEABLE(bvec-1, bvec)))
1da177e4
LT
703 bio->bi_flags &= ~(1 << BIO_SEG_VALID);
704
705 bio->bi_vcnt++;
706 bio->bi_phys_segments++;
80cfd548 707 done:
1da177e4
LT
708 bio->bi_size += len;
709 return len;
710}
711
6e68af66
MC
712/**
713 * bio_add_pc_page - attempt to add page to bio
fddfdeaf 714 * @q: the target queue
6e68af66
MC
715 * @bio: destination bio
716 * @page: page to add
717 * @len: vec entry length
718 * @offset: vec entry offset
719 *
720 * Attempt to add a page to the bio_vec maplist. This can fail for a
c6428084
AG
721 * number of reasons, such as the bio being full or target block device
722 * limitations. The target block device must allow bio's up to PAGE_SIZE,
723 * so it is always possible to add a single page to an empty bio.
724 *
725 * This should only be used by REQ_PC bios.
6e68af66 726 */
165125e1 727int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page *page,
6e68af66
MC
728 unsigned int len, unsigned int offset)
729{
ae03bf63
MP
730 return __bio_add_page(q, bio, page, len, offset,
731 queue_max_hw_sectors(q));
6e68af66 732}
a112a71d 733EXPORT_SYMBOL(bio_add_pc_page);
6e68af66 734
1da177e4
LT
735/**
736 * bio_add_page - attempt to add page to bio
737 * @bio: destination bio
738 * @page: page to add
739 * @len: vec entry length
740 * @offset: vec entry offset
741 *
742 * Attempt to add a page to the bio_vec maplist. This can fail for a
c6428084
AG
743 * number of reasons, such as the bio being full or target block device
744 * limitations. The target block device must allow bio's up to PAGE_SIZE,
745 * so it is always possible to add a single page to an empty bio.
1da177e4
LT
746 */
747int bio_add_page(struct bio *bio, struct page *page, unsigned int len,
748 unsigned int offset)
749{
defd94b7 750 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
ae03bf63 751 return __bio_add_page(q, bio, page, len, offset, queue_max_sectors(q));
1da177e4 752}
a112a71d 753EXPORT_SYMBOL(bio_add_page);
1da177e4 754
9e882242
KO
755struct submit_bio_ret {
756 struct completion event;
757 int error;
758};
759
760static void submit_bio_wait_endio(struct bio *bio, int error)
761{
762 struct submit_bio_ret *ret = bio->bi_private;
763
764 ret->error = error;
765 complete(&ret->event);
766}
767
768/**
769 * submit_bio_wait - submit a bio, and wait until it completes
770 * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead)
771 * @bio: The &struct bio which describes the I/O
772 *
773 * Simple wrapper around submit_bio(). Returns 0 on success, or the error from
774 * bio_endio() on failure.
775 */
776int submit_bio_wait(int rw, struct bio *bio)
777{
778 struct submit_bio_ret ret;
779
780 rw |= REQ_SYNC;
781 init_completion(&ret.event);
782 bio->bi_private = &ret;
783 bio->bi_end_io = submit_bio_wait_endio;
784 submit_bio(rw, bio);
785 wait_for_completion(&ret.event);
786
787 return ret.error;
788}
789EXPORT_SYMBOL(submit_bio_wait);
790
054bdf64
KO
791/**
792 * bio_advance - increment/complete a bio by some number of bytes
793 * @bio: bio to advance
794 * @bytes: number of bytes to complete
795 *
796 * This updates bi_sector, bi_size and bi_idx; if the number of bytes to
797 * complete doesn't align with a bvec boundary, then bv_len and bv_offset will
798 * be updated on the last bvec as well.
799 *
800 * @bio will then represent the remaining, uncompleted portion of the io.
801 */
802void bio_advance(struct bio *bio, unsigned bytes)
803{
804 if (bio_integrity(bio))
805 bio_integrity_advance(bio, bytes);
806
807 bio->bi_sector += bytes >> 9;
808 bio->bi_size -= bytes;
809
810 if (bio->bi_rw & BIO_NO_ADVANCE_ITER_MASK)
811 return;
812
813 while (bytes) {
814 if (unlikely(bio->bi_idx >= bio->bi_vcnt)) {
815 WARN_ONCE(1, "bio idx %d >= vcnt %d\n",
816 bio->bi_idx, bio->bi_vcnt);
817 break;
818 }
819
820 if (bytes >= bio_iovec(bio)->bv_len) {
821 bytes -= bio_iovec(bio)->bv_len;
822 bio->bi_idx++;
823 } else {
824 bio_iovec(bio)->bv_len -= bytes;
825 bio_iovec(bio)->bv_offset += bytes;
826 bytes = 0;
827 }
828 }
829}
830EXPORT_SYMBOL(bio_advance);
831
a0787606
KO
832/**
833 * bio_alloc_pages - allocates a single page for each bvec in a bio
834 * @bio: bio to allocate pages for
835 * @gfp_mask: flags for allocation
836 *
837 * Allocates pages up to @bio->bi_vcnt.
838 *
839 * Returns 0 on success, -ENOMEM on failure. On failure, any allocated pages are
840 * freed.
841 */
842int bio_alloc_pages(struct bio *bio, gfp_t gfp_mask)
843{
844 int i;
845 struct bio_vec *bv;
846
847 bio_for_each_segment_all(bv, bio, i) {
848 bv->bv_page = alloc_page(gfp_mask);
849 if (!bv->bv_page) {
850 while (--bv >= bio->bi_io_vec)
851 __free_page(bv->bv_page);
852 return -ENOMEM;
853 }
854 }
855
856 return 0;
857}
858EXPORT_SYMBOL(bio_alloc_pages);
859
16ac3d63
KO
860/**
861 * bio_copy_data - copy contents of data buffers from one chain of bios to
862 * another
863 * @src: source bio list
864 * @dst: destination bio list
865 *
866 * If @src and @dst are single bios, bi_next must be NULL - otherwise, treats
867 * @src and @dst as linked lists of bios.
868 *
869 * Stops when it reaches the end of either @src or @dst - that is, copies
870 * min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of bios).
871 */
872void bio_copy_data(struct bio *dst, struct bio *src)
873{
874 struct bio_vec *src_bv, *dst_bv;
875 unsigned src_offset, dst_offset, bytes;
876 void *src_p, *dst_p;
877
878 src_bv = bio_iovec(src);
879 dst_bv = bio_iovec(dst);
880
881 src_offset = src_bv->bv_offset;
882 dst_offset = dst_bv->bv_offset;
883
884 while (1) {
885 if (src_offset == src_bv->bv_offset + src_bv->bv_len) {
886 src_bv++;
887 if (src_bv == bio_iovec_idx(src, src->bi_vcnt)) {
888 src = src->bi_next;
889 if (!src)
890 break;
891
892 src_bv = bio_iovec(src);
893 }
894
895 src_offset = src_bv->bv_offset;
896 }
897
898 if (dst_offset == dst_bv->bv_offset + dst_bv->bv_len) {
899 dst_bv++;
900 if (dst_bv == bio_iovec_idx(dst, dst->bi_vcnt)) {
901 dst = dst->bi_next;
902 if (!dst)
903 break;
904
905 dst_bv = bio_iovec(dst);
906 }
907
908 dst_offset = dst_bv->bv_offset;
909 }
910
911 bytes = min(dst_bv->bv_offset + dst_bv->bv_len - dst_offset,
912 src_bv->bv_offset + src_bv->bv_len - src_offset);
913
914 src_p = kmap_atomic(src_bv->bv_page);
915 dst_p = kmap_atomic(dst_bv->bv_page);
916
917 memcpy(dst_p + dst_bv->bv_offset,
918 src_p + src_bv->bv_offset,
919 bytes);
920
921 kunmap_atomic(dst_p);
922 kunmap_atomic(src_p);
923
924 src_offset += bytes;
925 dst_offset += bytes;
926 }
927}
928EXPORT_SYMBOL(bio_copy_data);
929
1da177e4
LT
930struct bio_map_data {
931 struct bio_vec *iovecs;
c5dec1c3 932 struct sg_iovec *sgvecs;
152e283f
FT
933 int nr_sgvecs;
934 int is_our_pages;
1da177e4
LT
935};
936
c5dec1c3 937static void bio_set_map_data(struct bio_map_data *bmd, struct bio *bio,
152e283f
FT
938 struct sg_iovec *iov, int iov_count,
939 int is_our_pages)
1da177e4
LT
940{
941 memcpy(bmd->iovecs, bio->bi_io_vec, sizeof(struct bio_vec) * bio->bi_vcnt);
c5dec1c3
FT
942 memcpy(bmd->sgvecs, iov, sizeof(struct sg_iovec) * iov_count);
943 bmd->nr_sgvecs = iov_count;
152e283f 944 bmd->is_our_pages = is_our_pages;
1da177e4
LT
945 bio->bi_private = bmd;
946}
947
948static void bio_free_map_data(struct bio_map_data *bmd)
949{
950 kfree(bmd->iovecs);
c5dec1c3 951 kfree(bmd->sgvecs);
1da177e4
LT
952 kfree(bmd);
953}
954
121f0994
DC
955static struct bio_map_data *bio_alloc_map_data(int nr_segs,
956 unsigned int iov_count,
76029ff3 957 gfp_t gfp_mask)
1da177e4 958{
f3f63c1c
JA
959 struct bio_map_data *bmd;
960
961 if (iov_count > UIO_MAXIOV)
962 return NULL;
1da177e4 963
f3f63c1c 964 bmd = kmalloc(sizeof(*bmd), gfp_mask);
1da177e4
LT
965 if (!bmd)
966 return NULL;
967
76029ff3 968 bmd->iovecs = kmalloc(sizeof(struct bio_vec) * nr_segs, gfp_mask);
c5dec1c3
FT
969 if (!bmd->iovecs) {
970 kfree(bmd);
971 return NULL;
972 }
973
76029ff3 974 bmd->sgvecs = kmalloc(sizeof(struct sg_iovec) * iov_count, gfp_mask);
c5dec1c3 975 if (bmd->sgvecs)
1da177e4
LT
976 return bmd;
977
c5dec1c3 978 kfree(bmd->iovecs);
1da177e4
LT
979 kfree(bmd);
980 return NULL;
981}
982
aefcc28a 983static int __bio_copy_iov(struct bio *bio, struct bio_vec *iovecs,
ecb554a8
FT
984 struct sg_iovec *iov, int iov_count,
985 int to_user, int from_user, int do_free_page)
c5dec1c3
FT
986{
987 int ret = 0, i;
988 struct bio_vec *bvec;
989 int iov_idx = 0;
990 unsigned int iov_off = 0;
c5dec1c3 991
d74c6d51 992 bio_for_each_segment_all(bvec, bio, i) {
c5dec1c3 993 char *bv_addr = page_address(bvec->bv_page);
aefcc28a 994 unsigned int bv_len = iovecs[i].bv_len;
c5dec1c3
FT
995
996 while (bv_len && iov_idx < iov_count) {
997 unsigned int bytes;
0e0c6212 998 char __user *iov_addr;
c5dec1c3
FT
999
1000 bytes = min_t(unsigned int,
1001 iov[iov_idx].iov_len - iov_off, bv_len);
1002 iov_addr = iov[iov_idx].iov_base + iov_off;
1003
1004 if (!ret) {
ecb554a8 1005 if (to_user)
c5dec1c3
FT
1006 ret = copy_to_user(iov_addr, bv_addr,
1007 bytes);
1008
ecb554a8
FT
1009 if (from_user)
1010 ret = copy_from_user(bv_addr, iov_addr,
1011 bytes);
1012
c5dec1c3
FT
1013 if (ret)
1014 ret = -EFAULT;
1015 }
1016
1017 bv_len -= bytes;
1018 bv_addr += bytes;
1019 iov_addr += bytes;
1020 iov_off += bytes;
1021
1022 if (iov[iov_idx].iov_len == iov_off) {
1023 iov_idx++;
1024 iov_off = 0;
1025 }
1026 }
1027
152e283f 1028 if (do_free_page)
c5dec1c3
FT
1029 __free_page(bvec->bv_page);
1030 }
1031
1032 return ret;
1033}
1034
1da177e4
LT
1035/**
1036 * bio_uncopy_user - finish previously mapped bio
1037 * @bio: bio being terminated
1038 *
1039 * Free pages allocated from bio_copy_user() and write back data
1040 * to user space in case of a read.
1041 */
1042int bio_uncopy_user(struct bio *bio)
1043{
1044 struct bio_map_data *bmd = bio->bi_private;
81882766 1045 int ret = 0;
1da177e4 1046
81882766
FT
1047 if (!bio_flagged(bio, BIO_NULL_MAPPED))
1048 ret = __bio_copy_iov(bio, bmd->iovecs, bmd->sgvecs,
ecb554a8
FT
1049 bmd->nr_sgvecs, bio_data_dir(bio) == READ,
1050 0, bmd->is_our_pages);
1da177e4
LT
1051 bio_free_map_data(bmd);
1052 bio_put(bio);
1053 return ret;
1054}
a112a71d 1055EXPORT_SYMBOL(bio_uncopy_user);
1da177e4
LT
1056
1057/**
c5dec1c3 1058 * bio_copy_user_iov - copy user data to bio
1da177e4 1059 * @q: destination block queue
152e283f 1060 * @map_data: pointer to the rq_map_data holding pages (if necessary)
c5dec1c3
FT
1061 * @iov: the iovec.
1062 * @iov_count: number of elements in the iovec
1da177e4 1063 * @write_to_vm: bool indicating writing to pages or not
a3bce90e 1064 * @gfp_mask: memory allocation flags
1da177e4
LT
1065 *
1066 * Prepares and returns a bio for indirect user io, bouncing data
1067 * to/from kernel pages as necessary. Must be paired with
1068 * call bio_uncopy_user() on io completion.
1069 */
152e283f
FT
1070struct bio *bio_copy_user_iov(struct request_queue *q,
1071 struct rq_map_data *map_data,
1072 struct sg_iovec *iov, int iov_count,
1073 int write_to_vm, gfp_t gfp_mask)
1da177e4 1074{
1da177e4
LT
1075 struct bio_map_data *bmd;
1076 struct bio_vec *bvec;
1077 struct page *page;
1078 struct bio *bio;
1079 int i, ret;
c5dec1c3
FT
1080 int nr_pages = 0;
1081 unsigned int len = 0;
56c451f4 1082 unsigned int offset = map_data ? map_data->offset & ~PAGE_MASK : 0;
1da177e4 1083
c5dec1c3
FT
1084 for (i = 0; i < iov_count; i++) {
1085 unsigned long uaddr;
1086 unsigned long end;
1087 unsigned long start;
1088
1089 uaddr = (unsigned long)iov[i].iov_base;
1090 end = (uaddr + iov[i].iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1091 start = uaddr >> PAGE_SHIFT;
1092
cb4644ca
JA
1093 /*
1094 * Overflow, abort
1095 */
1096 if (end < start)
1097 return ERR_PTR(-EINVAL);
1098
c5dec1c3
FT
1099 nr_pages += end - start;
1100 len += iov[i].iov_len;
1101 }
1102
69838727
FT
1103 if (offset)
1104 nr_pages++;
1105
a3bce90e 1106 bmd = bio_alloc_map_data(nr_pages, iov_count, gfp_mask);
1da177e4
LT
1107 if (!bmd)
1108 return ERR_PTR(-ENOMEM);
1109
1da177e4 1110 ret = -ENOMEM;
a9e9dc24 1111 bio = bio_kmalloc(gfp_mask, nr_pages);
1da177e4
LT
1112 if (!bio)
1113 goto out_bmd;
1114
7b6d91da
CH
1115 if (!write_to_vm)
1116 bio->bi_rw |= REQ_WRITE;
1da177e4
LT
1117
1118 ret = 0;
56c451f4
FT
1119
1120 if (map_data) {
e623ddb4 1121 nr_pages = 1 << map_data->page_order;
56c451f4
FT
1122 i = map_data->offset / PAGE_SIZE;
1123 }
1da177e4 1124 while (len) {
e623ddb4 1125 unsigned int bytes = PAGE_SIZE;
1da177e4 1126
56c451f4
FT
1127 bytes -= offset;
1128
1da177e4
LT
1129 if (bytes > len)
1130 bytes = len;
1131
152e283f 1132 if (map_data) {
e623ddb4 1133 if (i == map_data->nr_entries * nr_pages) {
152e283f
FT
1134 ret = -ENOMEM;
1135 break;
1136 }
e623ddb4
FT
1137
1138 page = map_data->pages[i / nr_pages];
1139 page += (i % nr_pages);
1140
1141 i++;
1142 } else {
152e283f 1143 page = alloc_page(q->bounce_gfp | gfp_mask);
e623ddb4
FT
1144 if (!page) {
1145 ret = -ENOMEM;
1146 break;
1147 }
1da177e4
LT
1148 }
1149
56c451f4 1150 if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes)
1da177e4 1151 break;
1da177e4
LT
1152
1153 len -= bytes;
56c451f4 1154 offset = 0;
1da177e4
LT
1155 }
1156
1157 if (ret)
1158 goto cleanup;
1159
1160 /*
1161 * success
1162 */
ecb554a8
FT
1163 if ((!write_to_vm && (!map_data || !map_data->null_mapped)) ||
1164 (map_data && map_data->from_user)) {
1165 ret = __bio_copy_iov(bio, bio->bi_io_vec, iov, iov_count, 0, 1, 0);
c5dec1c3
FT
1166 if (ret)
1167 goto cleanup;
1da177e4
LT
1168 }
1169
152e283f 1170 bio_set_map_data(bmd, bio, iov, iov_count, map_data ? 0 : 1);
1da177e4
LT
1171 return bio;
1172cleanup:
152e283f 1173 if (!map_data)
d74c6d51 1174 bio_for_each_segment_all(bvec, bio, i)
152e283f 1175 __free_page(bvec->bv_page);
1da177e4
LT
1176
1177 bio_put(bio);
1178out_bmd:
1179 bio_free_map_data(bmd);
1180 return ERR_PTR(ret);
1181}
1182
c5dec1c3
FT
1183/**
1184 * bio_copy_user - copy user data to bio
1185 * @q: destination block queue
152e283f 1186 * @map_data: pointer to the rq_map_data holding pages (if necessary)
c5dec1c3
FT
1187 * @uaddr: start of user address
1188 * @len: length in bytes
1189 * @write_to_vm: bool indicating writing to pages or not
a3bce90e 1190 * @gfp_mask: memory allocation flags
c5dec1c3
FT
1191 *
1192 * Prepares and returns a bio for indirect user io, bouncing data
1193 * to/from kernel pages as necessary. Must be paired with
1194 * call bio_uncopy_user() on io completion.
1195 */
152e283f
FT
1196struct bio *bio_copy_user(struct request_queue *q, struct rq_map_data *map_data,
1197 unsigned long uaddr, unsigned int len,
1198 int write_to_vm, gfp_t gfp_mask)
c5dec1c3
FT
1199{
1200 struct sg_iovec iov;
1201
1202 iov.iov_base = (void __user *)uaddr;
1203 iov.iov_len = len;
1204
152e283f 1205 return bio_copy_user_iov(q, map_data, &iov, 1, write_to_vm, gfp_mask);
c5dec1c3 1206}
a112a71d 1207EXPORT_SYMBOL(bio_copy_user);
c5dec1c3 1208
165125e1 1209static struct bio *__bio_map_user_iov(struct request_queue *q,
f1970baf
JB
1210 struct block_device *bdev,
1211 struct sg_iovec *iov, int iov_count,
a3bce90e 1212 int write_to_vm, gfp_t gfp_mask)
1da177e4 1213{
f1970baf
JB
1214 int i, j;
1215 int nr_pages = 0;
1da177e4
LT
1216 struct page **pages;
1217 struct bio *bio;
f1970baf
JB
1218 int cur_page = 0;
1219 int ret, offset;
1da177e4 1220
f1970baf
JB
1221 for (i = 0; i < iov_count; i++) {
1222 unsigned long uaddr = (unsigned long)iov[i].iov_base;
1223 unsigned long len = iov[i].iov_len;
1224 unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1225 unsigned long start = uaddr >> PAGE_SHIFT;
1226
cb4644ca
JA
1227 /*
1228 * Overflow, abort
1229 */
1230 if (end < start)
1231 return ERR_PTR(-EINVAL);
1232
f1970baf
JB
1233 nr_pages += end - start;
1234 /*
ad2d7225 1235 * buffer must be aligned to at least hardsector size for now
f1970baf 1236 */
ad2d7225 1237 if (uaddr & queue_dma_alignment(q))
f1970baf
JB
1238 return ERR_PTR(-EINVAL);
1239 }
1240
1241 if (!nr_pages)
1da177e4
LT
1242 return ERR_PTR(-EINVAL);
1243
a9e9dc24 1244 bio = bio_kmalloc(gfp_mask, nr_pages);
1da177e4
LT
1245 if (!bio)
1246 return ERR_PTR(-ENOMEM);
1247
1248 ret = -ENOMEM;
a3bce90e 1249 pages = kcalloc(nr_pages, sizeof(struct page *), gfp_mask);
1da177e4
LT
1250 if (!pages)
1251 goto out;
1252
f1970baf
JB
1253 for (i = 0; i < iov_count; i++) {
1254 unsigned long uaddr = (unsigned long)iov[i].iov_base;
1255 unsigned long len = iov[i].iov_len;
1256 unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1257 unsigned long start = uaddr >> PAGE_SHIFT;
1258 const int local_nr_pages = end - start;
1259 const int page_limit = cur_page + local_nr_pages;
cb4644ca 1260
f5dd33c4
NP
1261 ret = get_user_pages_fast(uaddr, local_nr_pages,
1262 write_to_vm, &pages[cur_page]);
99172157
JA
1263 if (ret < local_nr_pages) {
1264 ret = -EFAULT;
f1970baf 1265 goto out_unmap;
99172157 1266 }
f1970baf
JB
1267
1268 offset = uaddr & ~PAGE_MASK;
1269 for (j = cur_page; j < page_limit; j++) {
1270 unsigned int bytes = PAGE_SIZE - offset;
1271
1272 if (len <= 0)
1273 break;
1274
1275 if (bytes > len)
1276 bytes = len;
1277
1278 /*
1279 * sorry...
1280 */
defd94b7
MC
1281 if (bio_add_pc_page(q, bio, pages[j], bytes, offset) <
1282 bytes)
f1970baf
JB
1283 break;
1284
1285 len -= bytes;
1286 offset = 0;
1287 }
1da177e4 1288
f1970baf 1289 cur_page = j;
1da177e4 1290 /*
f1970baf 1291 * release the pages we didn't map into the bio, if any
1da177e4 1292 */
f1970baf
JB
1293 while (j < page_limit)
1294 page_cache_release(pages[j++]);
1da177e4
LT
1295 }
1296
1da177e4
LT
1297 kfree(pages);
1298
1299 /*
1300 * set data direction, and check if mapped pages need bouncing
1301 */
1302 if (!write_to_vm)
7b6d91da 1303 bio->bi_rw |= REQ_WRITE;
1da177e4 1304
f1970baf 1305 bio->bi_bdev = bdev;
1da177e4
LT
1306 bio->bi_flags |= (1 << BIO_USER_MAPPED);
1307 return bio;
f1970baf
JB
1308
1309 out_unmap:
1310 for (i = 0; i < nr_pages; i++) {
1311 if(!pages[i])
1312 break;
1313 page_cache_release(pages[i]);
1314 }
1315 out:
1da177e4
LT
1316 kfree(pages);
1317 bio_put(bio);
1318 return ERR_PTR(ret);
1319}
1320
1321/**
1322 * bio_map_user - map user address into bio
165125e1 1323 * @q: the struct request_queue for the bio
1da177e4
LT
1324 * @bdev: destination block device
1325 * @uaddr: start of user address
1326 * @len: length in bytes
1327 * @write_to_vm: bool indicating writing to pages or not
a3bce90e 1328 * @gfp_mask: memory allocation flags
1da177e4
LT
1329 *
1330 * Map the user space address into a bio suitable for io to a block
1331 * device. Returns an error pointer in case of error.
1332 */
165125e1 1333struct bio *bio_map_user(struct request_queue *q, struct block_device *bdev,
a3bce90e
FT
1334 unsigned long uaddr, unsigned int len, int write_to_vm,
1335 gfp_t gfp_mask)
f1970baf
JB
1336{
1337 struct sg_iovec iov;
1338
3f70353e 1339 iov.iov_base = (void __user *)uaddr;
f1970baf
JB
1340 iov.iov_len = len;
1341
a3bce90e 1342 return bio_map_user_iov(q, bdev, &iov, 1, write_to_vm, gfp_mask);
f1970baf 1343}
a112a71d 1344EXPORT_SYMBOL(bio_map_user);
f1970baf
JB
1345
1346/**
1347 * bio_map_user_iov - map user sg_iovec table into bio
165125e1 1348 * @q: the struct request_queue for the bio
f1970baf
JB
1349 * @bdev: destination block device
1350 * @iov: the iovec.
1351 * @iov_count: number of elements in the iovec
1352 * @write_to_vm: bool indicating writing to pages or not
a3bce90e 1353 * @gfp_mask: memory allocation flags
f1970baf
JB
1354 *
1355 * Map the user space address into a bio suitable for io to a block
1356 * device. Returns an error pointer in case of error.
1357 */
165125e1 1358struct bio *bio_map_user_iov(struct request_queue *q, struct block_device *bdev,
f1970baf 1359 struct sg_iovec *iov, int iov_count,
a3bce90e 1360 int write_to_vm, gfp_t gfp_mask)
1da177e4
LT
1361{
1362 struct bio *bio;
1363
a3bce90e
FT
1364 bio = __bio_map_user_iov(q, bdev, iov, iov_count, write_to_vm,
1365 gfp_mask);
1da177e4
LT
1366 if (IS_ERR(bio))
1367 return bio;
1368
1369 /*
1370 * subtle -- if __bio_map_user() ended up bouncing a bio,
1371 * it would normally disappear when its bi_end_io is run.
1372 * however, we need it for the unmap, so grab an extra
1373 * reference to it
1374 */
1375 bio_get(bio);
1376
0e75f906 1377 return bio;
1da177e4
LT
1378}
1379
1380static void __bio_unmap_user(struct bio *bio)
1381{
1382 struct bio_vec *bvec;
1383 int i;
1384
1385 /*
1386 * make sure we dirty pages we wrote to
1387 */
d74c6d51 1388 bio_for_each_segment_all(bvec, bio, i) {
1da177e4
LT
1389 if (bio_data_dir(bio) == READ)
1390 set_page_dirty_lock(bvec->bv_page);
1391
1392 page_cache_release(bvec->bv_page);
1393 }
1394
1395 bio_put(bio);
1396}
1397
1398/**
1399 * bio_unmap_user - unmap a bio
1400 * @bio: the bio being unmapped
1401 *
1402 * Unmap a bio previously mapped by bio_map_user(). Must be called with
1403 * a process context.
1404 *
1405 * bio_unmap_user() may sleep.
1406 */
1407void bio_unmap_user(struct bio *bio)
1408{
1409 __bio_unmap_user(bio);
1410 bio_put(bio);
1411}
a112a71d 1412EXPORT_SYMBOL(bio_unmap_user);
1da177e4 1413
6712ecf8 1414static void bio_map_kern_endio(struct bio *bio, int err)
b823825e 1415{
b823825e 1416 bio_put(bio);
b823825e
JA
1417}
1418
165125e1 1419static struct bio *__bio_map_kern(struct request_queue *q, void *data,
27496a8c 1420 unsigned int len, gfp_t gfp_mask)
df46b9a4
MC
1421{
1422 unsigned long kaddr = (unsigned long)data;
1423 unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1424 unsigned long start = kaddr >> PAGE_SHIFT;
1425 const int nr_pages = end - start;
1426 int offset, i;
1427 struct bio *bio;
1428
a9e9dc24 1429 bio = bio_kmalloc(gfp_mask, nr_pages);
df46b9a4
MC
1430 if (!bio)
1431 return ERR_PTR(-ENOMEM);
1432
1433 offset = offset_in_page(kaddr);
1434 for (i = 0; i < nr_pages; i++) {
1435 unsigned int bytes = PAGE_SIZE - offset;
1436
1437 if (len <= 0)
1438 break;
1439
1440 if (bytes > len)
1441 bytes = len;
1442
defd94b7
MC
1443 if (bio_add_pc_page(q, bio, virt_to_page(data), bytes,
1444 offset) < bytes)
df46b9a4
MC
1445 break;
1446
1447 data += bytes;
1448 len -= bytes;
1449 offset = 0;
1450 }
1451
b823825e 1452 bio->bi_end_io = bio_map_kern_endio;
df46b9a4
MC
1453 return bio;
1454}
1455
1456/**
1457 * bio_map_kern - map kernel address into bio
165125e1 1458 * @q: the struct request_queue for the bio
df46b9a4
MC
1459 * @data: pointer to buffer to map
1460 * @len: length in bytes
1461 * @gfp_mask: allocation flags for bio allocation
1462 *
1463 * Map the kernel address into a bio suitable for io to a block
1464 * device. Returns an error pointer in case of error.
1465 */
165125e1 1466struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len,
27496a8c 1467 gfp_t gfp_mask)
df46b9a4
MC
1468{
1469 struct bio *bio;
1470
1471 bio = __bio_map_kern(q, data, len, gfp_mask);
1472 if (IS_ERR(bio))
1473 return bio;
1474
1475 if (bio->bi_size == len)
1476 return bio;
1477
1478 /*
1479 * Don't support partial mappings.
1480 */
1481 bio_put(bio);
1482 return ERR_PTR(-EINVAL);
1483}
a112a71d 1484EXPORT_SYMBOL(bio_map_kern);
df46b9a4 1485
68154e90
FT
1486static void bio_copy_kern_endio(struct bio *bio, int err)
1487{
1488 struct bio_vec *bvec;
1489 const int read = bio_data_dir(bio) == READ;
76029ff3 1490 struct bio_map_data *bmd = bio->bi_private;
68154e90 1491 int i;
76029ff3 1492 char *p = bmd->sgvecs[0].iov_base;
68154e90 1493
d74c6d51 1494 bio_for_each_segment_all(bvec, bio, i) {
68154e90 1495 char *addr = page_address(bvec->bv_page);
76029ff3 1496 int len = bmd->iovecs[i].bv_len;
68154e90 1497
4fc981ef 1498 if (read)
76029ff3 1499 memcpy(p, addr, len);
68154e90
FT
1500
1501 __free_page(bvec->bv_page);
76029ff3 1502 p += len;
68154e90
FT
1503 }
1504
76029ff3 1505 bio_free_map_data(bmd);
68154e90
FT
1506 bio_put(bio);
1507}
1508
1509/**
1510 * bio_copy_kern - copy kernel address into bio
1511 * @q: the struct request_queue for the bio
1512 * @data: pointer to buffer to copy
1513 * @len: length in bytes
1514 * @gfp_mask: allocation flags for bio and page allocation
ffee0259 1515 * @reading: data direction is READ
68154e90
FT
1516 *
1517 * copy the kernel address into a bio suitable for io to a block
1518 * device. Returns an error pointer in case of error.
1519 */
1520struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len,
1521 gfp_t gfp_mask, int reading)
1522{
68154e90
FT
1523 struct bio *bio;
1524 struct bio_vec *bvec;
4d8ab62e 1525 int i;
68154e90 1526
4d8ab62e
FT
1527 bio = bio_copy_user(q, NULL, (unsigned long)data, len, 1, gfp_mask);
1528 if (IS_ERR(bio))
1529 return bio;
68154e90
FT
1530
1531 if (!reading) {
1532 void *p = data;
1533
d74c6d51 1534 bio_for_each_segment_all(bvec, bio, i) {
68154e90
FT
1535 char *addr = page_address(bvec->bv_page);
1536
1537 memcpy(addr, p, bvec->bv_len);
1538 p += bvec->bv_len;
1539 }
1540 }
1541
68154e90 1542 bio->bi_end_io = bio_copy_kern_endio;
76029ff3 1543
68154e90 1544 return bio;
68154e90 1545}
a112a71d 1546EXPORT_SYMBOL(bio_copy_kern);
68154e90 1547
1da177e4
LT
1548/*
1549 * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions
1550 * for performing direct-IO in BIOs.
1551 *
1552 * The problem is that we cannot run set_page_dirty() from interrupt context
1553 * because the required locks are not interrupt-safe. So what we can do is to
1554 * mark the pages dirty _before_ performing IO. And in interrupt context,
1555 * check that the pages are still dirty. If so, fine. If not, redirty them
1556 * in process context.
1557 *
1558 * We special-case compound pages here: normally this means reads into hugetlb
1559 * pages. The logic in here doesn't really work right for compound pages
1560 * because the VM does not uniformly chase down the head page in all cases.
1561 * But dirtiness of compound pages is pretty meaningless anyway: the VM doesn't
1562 * handle them at all. So we skip compound pages here at an early stage.
1563 *
1564 * Note that this code is very hard to test under normal circumstances because
1565 * direct-io pins the pages with get_user_pages(). This makes
1566 * is_page_cache_freeable return false, and the VM will not clean the pages.
0d5c3eba 1567 * But other code (eg, flusher threads) could clean the pages if they are mapped
1da177e4
LT
1568 * pagecache.
1569 *
1570 * Simply disabling the call to bio_set_pages_dirty() is a good way to test the
1571 * deferred bio dirtying paths.
1572 */
1573
1574/*
1575 * bio_set_pages_dirty() will mark all the bio's pages as dirty.
1576 */
1577void bio_set_pages_dirty(struct bio *bio)
1578{
cb34e057 1579 struct bio_vec *bvec;
1da177e4
LT
1580 int i;
1581
cb34e057
KO
1582 bio_for_each_segment_all(bvec, bio, i) {
1583 struct page *page = bvec->bv_page;
1da177e4
LT
1584
1585 if (page && !PageCompound(page))
1586 set_page_dirty_lock(page);
1587 }
1588}
1589
86b6c7a7 1590static void bio_release_pages(struct bio *bio)
1da177e4 1591{
cb34e057 1592 struct bio_vec *bvec;
1da177e4
LT
1593 int i;
1594
cb34e057
KO
1595 bio_for_each_segment_all(bvec, bio, i) {
1596 struct page *page = bvec->bv_page;
1da177e4
LT
1597
1598 if (page)
1599 put_page(page);
1600 }
1601}
1602
1603/*
1604 * bio_check_pages_dirty() will check that all the BIO's pages are still dirty.
1605 * If they are, then fine. If, however, some pages are clean then they must
1606 * have been written out during the direct-IO read. So we take another ref on
1607 * the BIO and the offending pages and re-dirty the pages in process context.
1608 *
1609 * It is expected that bio_check_pages_dirty() will wholly own the BIO from
1610 * here on. It will run one page_cache_release() against each page and will
1611 * run one bio_put() against the BIO.
1612 */
1613
65f27f38 1614static void bio_dirty_fn(struct work_struct *work);
1da177e4 1615
65f27f38 1616static DECLARE_WORK(bio_dirty_work, bio_dirty_fn);
1da177e4
LT
1617static DEFINE_SPINLOCK(bio_dirty_lock);
1618static struct bio *bio_dirty_list;
1619
1620/*
1621 * This runs in process context
1622 */
65f27f38 1623static void bio_dirty_fn(struct work_struct *work)
1da177e4
LT
1624{
1625 unsigned long flags;
1626 struct bio *bio;
1627
1628 spin_lock_irqsave(&bio_dirty_lock, flags);
1629 bio = bio_dirty_list;
1630 bio_dirty_list = NULL;
1631 spin_unlock_irqrestore(&bio_dirty_lock, flags);
1632
1633 while (bio) {
1634 struct bio *next = bio->bi_private;
1635
1636 bio_set_pages_dirty(bio);
1637 bio_release_pages(bio);
1638 bio_put(bio);
1639 bio = next;
1640 }
1641}
1642
1643void bio_check_pages_dirty(struct bio *bio)
1644{
cb34e057 1645 struct bio_vec *bvec;
1da177e4
LT
1646 int nr_clean_pages = 0;
1647 int i;
1648
cb34e057
KO
1649 bio_for_each_segment_all(bvec, bio, i) {
1650 struct page *page = bvec->bv_page;
1da177e4
LT
1651
1652 if (PageDirty(page) || PageCompound(page)) {
1653 page_cache_release(page);
cb34e057 1654 bvec->bv_page = NULL;
1da177e4
LT
1655 } else {
1656 nr_clean_pages++;
1657 }
1658 }
1659
1660 if (nr_clean_pages) {
1661 unsigned long flags;
1662
1663 spin_lock_irqsave(&bio_dirty_lock, flags);
1664 bio->bi_private = bio_dirty_list;
1665 bio_dirty_list = bio;
1666 spin_unlock_irqrestore(&bio_dirty_lock, flags);
1667 schedule_work(&bio_dirty_work);
1668 } else {
1669 bio_put(bio);
1670 }
1671}
1672
2d4dc890
IL
1673#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
1674void bio_flush_dcache_pages(struct bio *bi)
1675{
1676 int i;
1677 struct bio_vec *bvec;
1678
1679 bio_for_each_segment(bvec, bi, i)
1680 flush_dcache_page(bvec->bv_page);
1681}
1682EXPORT_SYMBOL(bio_flush_dcache_pages);
1683#endif
1684
1da177e4
LT
1685/**
1686 * bio_endio - end I/O on a bio
1687 * @bio: bio
1da177e4
LT
1688 * @error: error, if any
1689 *
1690 * Description:
6712ecf8 1691 * bio_endio() will end I/O on the whole bio. bio_endio() is the
5bb23a68
N
1692 * preferred way to end I/O on a bio, it takes care of clearing
1693 * BIO_UPTODATE on error. @error is 0 on success, and and one of the
1694 * established -Exxxx (-EIO, for instance) error values in case
25985edc 1695 * something went wrong. No one should call bi_end_io() directly on a
5bb23a68
N
1696 * bio unless they own it and thus know that it has an end_io
1697 * function.
1da177e4 1698 **/
6712ecf8 1699void bio_endio(struct bio *bio, int error)
1da177e4
LT
1700{
1701 if (error)
1702 clear_bit(BIO_UPTODATE, &bio->bi_flags);
9cc54d40
N
1703 else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
1704 error = -EIO;
1da177e4 1705
3a366e61
TH
1706 trace_block_bio_complete(bio, error);
1707
5bb23a68 1708 if (bio->bi_end_io)
6712ecf8 1709 bio->bi_end_io(bio, error);
1da177e4 1710}
a112a71d 1711EXPORT_SYMBOL(bio_endio);
1da177e4
LT
1712
1713void bio_pair_release(struct bio_pair *bp)
1714{
1715 if (atomic_dec_and_test(&bp->cnt)) {
1716 struct bio *master = bp->bio1.bi_private;
1717
6712ecf8 1718 bio_endio(master, bp->error);
1da177e4
LT
1719 mempool_free(bp, bp->bio2.bi_private);
1720 }
1721}
a112a71d 1722EXPORT_SYMBOL(bio_pair_release);
1da177e4 1723
6712ecf8 1724static void bio_pair_end_1(struct bio *bi, int err)
1da177e4
LT
1725{
1726 struct bio_pair *bp = container_of(bi, struct bio_pair, bio1);
1727
1728 if (err)
1729 bp->error = err;
1730
1da177e4 1731 bio_pair_release(bp);
1da177e4
LT
1732}
1733
6712ecf8 1734static void bio_pair_end_2(struct bio *bi, int err)
1da177e4
LT
1735{
1736 struct bio_pair *bp = container_of(bi, struct bio_pair, bio2);
1737
1738 if (err)
1739 bp->error = err;
1740
1da177e4 1741 bio_pair_release(bp);
1da177e4
LT
1742}
1743
1744/*
c7eee1b8 1745 * split a bio - only worry about a bio with a single page in its iovec
1da177e4 1746 */
6feef531 1747struct bio_pair *bio_split(struct bio *bi, int first_sectors)
1da177e4 1748{
6feef531 1749 struct bio_pair *bp = mempool_alloc(bio_split_pool, GFP_NOIO);
1da177e4
LT
1750
1751 if (!bp)
1752 return bp;
1753
5f3ea37c 1754 trace_block_split(bdev_get_queue(bi->bi_bdev), bi,
2056a782
JA
1755 bi->bi_sector + first_sectors);
1756
5b83636a 1757 BUG_ON(bio_segments(bi) > 1);
1da177e4
LT
1758 atomic_set(&bp->cnt, 3);
1759 bp->error = 0;
1760 bp->bio1 = *bi;
1761 bp->bio2 = *bi;
1762 bp->bio2.bi_sector += first_sectors;
1763 bp->bio2.bi_size -= first_sectors << 9;
1764 bp->bio1.bi_size = first_sectors << 9;
1765
02f3939e 1766 if (bi->bi_vcnt != 0) {
5b83636a
KO
1767 bp->bv1 = *bio_iovec(bi);
1768 bp->bv2 = *bio_iovec(bi);
4363ac7c 1769
02f3939e
SL
1770 if (bio_is_rw(bi)) {
1771 bp->bv2.bv_offset += first_sectors << 9;
1772 bp->bv2.bv_len -= first_sectors << 9;
1773 bp->bv1.bv_len = first_sectors << 9;
1774 }
1da177e4 1775
02f3939e
SL
1776 bp->bio1.bi_io_vec = &bp->bv1;
1777 bp->bio2.bi_io_vec = &bp->bv2;
1da177e4 1778
02f3939e
SL
1779 bp->bio1.bi_max_vecs = 1;
1780 bp->bio2.bi_max_vecs = 1;
1781 }
a2eb0c10 1782
1da177e4
LT
1783 bp->bio1.bi_end_io = bio_pair_end_1;
1784 bp->bio2.bi_end_io = bio_pair_end_2;
1785
1786 bp->bio1.bi_private = bi;
6feef531 1787 bp->bio2.bi_private = bio_split_pool;
1da177e4 1788
7ba1ba12
MP
1789 if (bio_integrity(bi))
1790 bio_integrity_split(bi, bp, first_sectors);
1791
1da177e4
LT
1792 return bp;
1793}
a112a71d 1794EXPORT_SYMBOL(bio_split);
1da177e4 1795
ad3316bf
MP
1796/**
1797 * bio_sector_offset - Find hardware sector offset in bio
1798 * @bio: bio to inspect
1799 * @index: bio_vec index
1800 * @offset: offset in bv_page
1801 *
1802 * Return the number of hardware sectors between beginning of bio
1803 * and an end point indicated by a bio_vec index and an offset
1804 * within that vector's page.
1805 */
1806sector_t bio_sector_offset(struct bio *bio, unsigned short index,
1807 unsigned int offset)
1808{
e1defc4f 1809 unsigned int sector_sz;
ad3316bf
MP
1810 struct bio_vec *bv;
1811 sector_t sectors;
1812 int i;
1813
e1defc4f 1814 sector_sz = queue_logical_block_size(bio->bi_bdev->bd_disk->queue);
ad3316bf
MP
1815 sectors = 0;
1816
1817 if (index >= bio->bi_idx)
1818 index = bio->bi_vcnt - 1;
1819
d74c6d51 1820 bio_for_each_segment_all(bv, bio, i) {
ad3316bf
MP
1821 if (i == index) {
1822 if (offset > bv->bv_offset)
1823 sectors += (offset - bv->bv_offset) / sector_sz;
1824 break;
1825 }
1826
1827 sectors += bv->bv_len / sector_sz;
1828 }
1829
1830 return sectors;
1831}
1832EXPORT_SYMBOL(bio_sector_offset);
1da177e4
LT
1833
1834/*
1835 * create memory pools for biovec's in a bio_set.
1836 * use the global biovec slabs created for general use.
1837 */
9f060e22 1838mempool_t *biovec_create_pool(struct bio_set *bs, int pool_entries)
1da177e4 1839{
7ff9345f 1840 struct biovec_slab *bp = bvec_slabs + BIOVEC_MAX_IDX;
1da177e4 1841
9f060e22 1842 return mempool_create_slab_pool(pool_entries, bp->slab);
1da177e4
LT
1843}
1844
1845void bioset_free(struct bio_set *bs)
1846{
df2cb6da
KO
1847 if (bs->rescue_workqueue)
1848 destroy_workqueue(bs->rescue_workqueue);
1849
1da177e4
LT
1850 if (bs->bio_pool)
1851 mempool_destroy(bs->bio_pool);
1852
9f060e22
KO
1853 if (bs->bvec_pool)
1854 mempool_destroy(bs->bvec_pool);
1855
7878cba9 1856 bioset_integrity_free(bs);
bb799ca0 1857 bio_put_slab(bs);
1da177e4
LT
1858
1859 kfree(bs);
1860}
a112a71d 1861EXPORT_SYMBOL(bioset_free);
1da177e4 1862
bb799ca0
JA
1863/**
1864 * bioset_create - Create a bio_set
1865 * @pool_size: Number of bio and bio_vecs to cache in the mempool
1866 * @front_pad: Number of bytes to allocate in front of the returned bio
1867 *
1868 * Description:
1869 * Set up a bio_set to be used with @bio_alloc_bioset. Allows the caller
1870 * to ask for a number of bytes to be allocated in front of the bio.
1871 * Front pad allocation is useful for embedding the bio inside
1872 * another structure, to avoid allocating extra data to go with the bio.
1873 * Note that the bio must be embedded at the END of that structure always,
1874 * or things will break badly.
1875 */
1876struct bio_set *bioset_create(unsigned int pool_size, unsigned int front_pad)
1da177e4 1877{
392ddc32 1878 unsigned int back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec);
1b434498 1879 struct bio_set *bs;
1da177e4 1880
1b434498 1881 bs = kzalloc(sizeof(*bs), GFP_KERNEL);
1da177e4
LT
1882 if (!bs)
1883 return NULL;
1884
bb799ca0 1885 bs->front_pad = front_pad;
1b434498 1886
df2cb6da
KO
1887 spin_lock_init(&bs->rescue_lock);
1888 bio_list_init(&bs->rescue_list);
1889 INIT_WORK(&bs->rescue_work, bio_alloc_rescue);
1890
392ddc32 1891 bs->bio_slab = bio_find_or_create_slab(front_pad + back_pad);
bb799ca0
JA
1892 if (!bs->bio_slab) {
1893 kfree(bs);
1894 return NULL;
1895 }
1896
1897 bs->bio_pool = mempool_create_slab_pool(pool_size, bs->bio_slab);
1da177e4
LT
1898 if (!bs->bio_pool)
1899 goto bad;
1900
9f060e22
KO
1901 bs->bvec_pool = biovec_create_pool(bs, pool_size);
1902 if (!bs->bvec_pool)
df2cb6da
KO
1903 goto bad;
1904
1905 bs->rescue_workqueue = alloc_workqueue("bioset", WQ_MEM_RECLAIM, 0);
1906 if (!bs->rescue_workqueue)
1907 goto bad;
1da177e4 1908
df2cb6da 1909 return bs;
1da177e4
LT
1910bad:
1911 bioset_free(bs);
1912 return NULL;
1913}
a112a71d 1914EXPORT_SYMBOL(bioset_create);
1da177e4 1915
852c788f
TH
1916#ifdef CONFIG_BLK_CGROUP
1917/**
1918 * bio_associate_current - associate a bio with %current
1919 * @bio: target bio
1920 *
1921 * Associate @bio with %current if it hasn't been associated yet. Block
1922 * layer will treat @bio as if it were issued by %current no matter which
1923 * task actually issues it.
1924 *
1925 * This function takes an extra reference of @task's io_context and blkcg
1926 * which will be put when @bio is released. The caller must own @bio,
1927 * ensure %current->io_context exists, and is responsible for synchronizing
1928 * calls to this function.
1929 */
1930int bio_associate_current(struct bio *bio)
1931{
1932 struct io_context *ioc;
1933 struct cgroup_subsys_state *css;
1934
1935 if (bio->bi_ioc)
1936 return -EBUSY;
1937
1938 ioc = current->io_context;
1939 if (!ioc)
1940 return -ENOENT;
1941
1942 /* acquire active ref on @ioc and associate */
1943 get_io_context_active(ioc);
1944 bio->bi_ioc = ioc;
1945
1946 /* associate blkcg if exists */
1947 rcu_read_lock();
1948 css = task_subsys_state(current, blkio_subsys_id);
1949 if (css && css_tryget(css))
1950 bio->bi_css = css;
1951 rcu_read_unlock();
1952
1953 return 0;
1954}
1955
1956/**
1957 * bio_disassociate_task - undo bio_associate_current()
1958 * @bio: target bio
1959 */
1960void bio_disassociate_task(struct bio *bio)
1961{
1962 if (bio->bi_ioc) {
1963 put_io_context(bio->bi_ioc);
1964 bio->bi_ioc = NULL;
1965 }
1966 if (bio->bi_css) {
1967 css_put(bio->bi_css);
1968 bio->bi_css = NULL;
1969 }
1970}
1971
1972#endif /* CONFIG_BLK_CGROUP */
1973
1da177e4
LT
1974static void __init biovec_init_slabs(void)
1975{
1976 int i;
1977
1978 for (i = 0; i < BIOVEC_NR_POOLS; i++) {
1979 int size;
1980 struct biovec_slab *bvs = bvec_slabs + i;
1981
a7fcd37c
JA
1982 if (bvs->nr_vecs <= BIO_INLINE_VECS) {
1983 bvs->slab = NULL;
1984 continue;
1985 }
a7fcd37c 1986
1da177e4
LT
1987 size = bvs->nr_vecs * sizeof(struct bio_vec);
1988 bvs->slab = kmem_cache_create(bvs->name, size, 0,
20c2df83 1989 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
1da177e4
LT
1990 }
1991}
1992
1993static int __init init_bio(void)
1994{
bb799ca0
JA
1995 bio_slab_max = 2;
1996 bio_slab_nr = 0;
1997 bio_slabs = kzalloc(bio_slab_max * sizeof(struct bio_slab), GFP_KERNEL);
1998 if (!bio_slabs)
1999 panic("bio: can't allocate bios\n");
1da177e4 2000
7878cba9 2001 bio_integrity_init();
1da177e4
LT
2002 biovec_init_slabs();
2003
bb799ca0 2004 fs_bio_set = bioset_create(BIO_POOL_SIZE, 0);
1da177e4
LT
2005 if (!fs_bio_set)
2006 panic("bio: can't allocate bios\n");
2007
a91a2785
MP
2008 if (bioset_integrity_create(fs_bio_set, BIO_POOL_SIZE))
2009 panic("bio: can't create integrity pool\n");
2010
0eaae62a
MD
2011 bio_split_pool = mempool_create_kmalloc_pool(BIO_SPLIT_ENTRIES,
2012 sizeof(struct bio_pair));
1da177e4
LT
2013 if (!bio_split_pool)
2014 panic("bio: can't create split pool\n");
2015
2016 return 0;
2017}
1da177e4 2018subsys_initcall(init_bio);