]>
git.ipfire.org Git - thirdparty/kernel/linux.git/blob - include/linux/bio.h
46ffac5caab788a1419701fb31c0053c2ef24a47
1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Copyright (C) 2001 Jens Axboe <axboe@suse.de>
8 #include <linux/mempool.h>
9 /* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */
10 #include <linux/blk_types.h>
11 #include <linux/uio.h>
13 #define BIO_MAX_VECS 256U
14 #define BIO_MAX_INLINE_VECS UIO_MAXIOV
18 static inline unsigned int bio_max_segs(unsigned int nr_segs
)
20 return min(nr_segs
, BIO_MAX_VECS
);
23 #define bio_iter_iovec(bio, iter) \
24 bvec_iter_bvec((bio)->bi_io_vec, (iter))
26 #define bio_iter_page(bio, iter) \
27 bvec_iter_page((bio)->bi_io_vec, (iter))
28 #define bio_iter_len(bio, iter) \
29 bvec_iter_len((bio)->bi_io_vec, (iter))
30 #define bio_iter_offset(bio, iter) \
31 bvec_iter_offset((bio)->bi_io_vec, (iter))
33 #define bio_page(bio) bio_iter_page((bio), (bio)->bi_iter)
34 #define bio_offset(bio) bio_iter_offset((bio), (bio)->bi_iter)
35 #define bio_iovec(bio) bio_iter_iovec((bio), (bio)->bi_iter)
37 #define bvec_iter_sectors(iter) ((iter).bi_size >> 9)
38 #define bvec_iter_end_sector(iter) ((iter).bi_sector + bvec_iter_sectors((iter)))
40 #define bio_sectors(bio) bvec_iter_sectors((bio)->bi_iter)
41 #define bio_end_sector(bio) bvec_iter_end_sector((bio)->bi_iter)
44 * Return the data direction, READ or WRITE.
46 #define bio_data_dir(bio) \
47 (op_is_write(bio_op(bio)) ? WRITE : READ)
50 * Check whether this bio carries any data or not. A NULL bio is allowed.
52 static inline bool bio_has_data(struct bio
*bio
)
55 bio
->bi_iter
.bi_size
&&
56 bio_op(bio
) != REQ_OP_DISCARD
&&
57 bio_op(bio
) != REQ_OP_SECURE_ERASE
&&
58 bio_op(bio
) != REQ_OP_WRITE_ZEROES
)
64 static inline bool bio_no_advance_iter(const struct bio
*bio
)
66 return bio_op(bio
) == REQ_OP_DISCARD
||
67 bio_op(bio
) == REQ_OP_SECURE_ERASE
||
68 bio_op(bio
) == REQ_OP_WRITE_ZEROES
;
71 static inline void *bio_data(struct bio
*bio
)
73 if (bio_has_data(bio
))
74 return page_address(bio_page(bio
)) + bio_offset(bio
);
79 static inline bool bio_next_segment(const struct bio
*bio
,
80 struct bvec_iter_all
*iter
)
82 if (iter
->idx
>= bio
->bi_vcnt
)
85 bvec_advance(&bio
->bi_io_vec
[iter
->idx
], iter
);
90 * drivers should _never_ use the all version - the bio may have been split
91 * before it got to the driver and the driver won't own all of it
93 #define bio_for_each_segment_all(bvl, bio, iter) \
94 for (bvl = bvec_init_iter_all(&iter); bio_next_segment((bio), &iter); )
96 static inline void bio_advance_iter(const struct bio
*bio
,
97 struct bvec_iter
*iter
, unsigned int bytes
)
99 iter
->bi_sector
+= bytes
>> 9;
101 if (bio_no_advance_iter(bio
))
102 iter
->bi_size
-= bytes
;
104 bvec_iter_advance(bio
->bi_io_vec
, iter
, bytes
);
105 /* TODO: It is reasonable to complete bio with error here. */
108 /* @bytes should be less or equal to bvec[i->bi_idx].bv_len */
109 static inline void bio_advance_iter_single(const struct bio
*bio
,
110 struct bvec_iter
*iter
,
113 iter
->bi_sector
+= bytes
>> 9;
115 if (bio_no_advance_iter(bio
))
116 iter
->bi_size
-= bytes
;
118 bvec_iter_advance_single(bio
->bi_io_vec
, iter
, bytes
);
121 void __bio_advance(struct bio
*, unsigned bytes
);
124 * bio_advance - increment/complete a bio by some number of bytes
125 * @bio: bio to advance
126 * @nbytes: number of bytes to complete
128 * This updates bi_sector, bi_size and bi_idx; if the number of bytes to
129 * complete doesn't align with a bvec boundary, then bv_len and bv_offset will
130 * be updated on the last bvec as well.
132 * @bio will then represent the remaining, uncompleted portion of the io.
134 static inline void bio_advance(struct bio
*bio
, unsigned int nbytes
)
136 if (nbytes
== bio
->bi_iter
.bi_size
) {
137 bio
->bi_iter
.bi_size
= 0;
140 __bio_advance(bio
, nbytes
);
143 #define __bio_for_each_segment(bvl, bio, iter, start) \
144 for (iter = (start); \
146 ((bvl = bio_iter_iovec((bio), (iter))), 1); \
147 bio_advance_iter_single((bio), &(iter), (bvl).bv_len))
149 #define bio_for_each_segment(bvl, bio, iter) \
150 __bio_for_each_segment(bvl, bio, iter, (bio)->bi_iter)
152 #define __bio_for_each_bvec(bvl, bio, iter, start) \
153 for (iter = (start); \
155 ((bvl = mp_bvec_iter_bvec((bio)->bi_io_vec, (iter))), 1); \
156 bio_advance_iter_single((bio), &(iter), (bvl).bv_len))
158 /* iterate over multi-page bvec */
159 #define bio_for_each_bvec(bvl, bio, iter) \
160 __bio_for_each_bvec(bvl, bio, iter, (bio)->bi_iter)
163 * Iterate over all multi-page bvecs. Drivers shouldn't use this version for the
164 * same reasons as bio_for_each_segment_all().
166 #define bio_for_each_bvec_all(bvl, bio, i) \
167 for (i = 0, bvl = bio_first_bvec_all(bio); \
168 i < (bio)->bi_vcnt; i++, bvl++)
170 #define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len)
172 static inline unsigned bio_segments(struct bio
*bio
)
176 struct bvec_iter iter
;
179 * We special case discard/write same/write zeroes, because they
180 * interpret bi_size differently:
183 switch (bio_op(bio
)) {
185 case REQ_OP_SECURE_ERASE
:
186 case REQ_OP_WRITE_ZEROES
:
192 bio_for_each_segment(bv
, bio
, iter
)
199 * get a reference to a bio, so it won't disappear. the intended use is
203 * submit_bio(rw, bio);
204 * if (bio->bi_flags ...)
208 * without the bio_get(), it could potentially complete I/O before submit_bio
209 * returns. and then bio would be freed memory when if (bio->bi_flags ...)
212 static inline void bio_get(struct bio
*bio
)
214 bio
->bi_flags
|= (1 << BIO_REFFED
);
215 smp_mb__before_atomic();
216 atomic_inc(&bio
->__bi_cnt
);
219 static inline void bio_cnt_set(struct bio
*bio
, unsigned int count
)
222 bio
->bi_flags
|= (1 << BIO_REFFED
);
225 atomic_set(&bio
->__bi_cnt
, count
);
228 static inline bool bio_flagged(struct bio
*bio
, unsigned int bit
)
230 return bio
->bi_flags
& (1U << bit
);
233 static inline void bio_set_flag(struct bio
*bio
, unsigned int bit
)
235 bio
->bi_flags
|= (1U << bit
);
238 static inline void bio_clear_flag(struct bio
*bio
, unsigned int bit
)
240 bio
->bi_flags
&= ~(1U << bit
);
243 static inline struct bio_vec
*bio_first_bvec_all(struct bio
*bio
)
245 WARN_ON_ONCE(bio_flagged(bio
, BIO_CLONED
));
246 return bio
->bi_io_vec
;
249 static inline struct page
*bio_first_page_all(struct bio
*bio
)
251 return bio_first_bvec_all(bio
)->bv_page
;
254 static inline struct folio
*bio_first_folio_all(struct bio
*bio
)
256 return page_folio(bio_first_page_all(bio
));
259 static inline struct bio_vec
*bio_last_bvec_all(struct bio
*bio
)
261 WARN_ON_ONCE(bio_flagged(bio
, BIO_CLONED
));
262 return &bio
->bi_io_vec
[bio
->bi_vcnt
- 1];
266 * struct folio_iter - State for iterating all folios in a bio.
267 * @folio: The current folio we're iterating. NULL after the last folio.
268 * @offset: The byte offset within the current folio.
269 * @length: The number of bytes in this iteration (will not cross folio
276 /* private: for use by the iterator */
282 static inline void bio_first_folio(struct folio_iter
*fi
, struct bio
*bio
,
285 struct bio_vec
*bvec
= bio_first_bvec_all(bio
) + i
;
287 if (unlikely(i
>= bio
->bi_vcnt
)) {
292 fi
->folio
= page_folio(bvec
->bv_page
);
293 fi
->offset
= bvec
->bv_offset
+
294 PAGE_SIZE
* folio_page_idx(fi
->folio
, bvec
->bv_page
);
295 fi
->_seg_count
= bvec
->bv_len
;
296 fi
->length
= min(folio_size(fi
->folio
) - fi
->offset
, fi
->_seg_count
);
297 fi
->_next
= folio_next(fi
->folio
);
301 static inline void bio_next_folio(struct folio_iter
*fi
, struct bio
*bio
)
303 fi
->_seg_count
-= fi
->length
;
304 if (fi
->_seg_count
) {
305 fi
->folio
= fi
->_next
;
307 fi
->length
= min(folio_size(fi
->folio
), fi
->_seg_count
);
308 fi
->_next
= folio_next(fi
->folio
);
310 bio_first_folio(fi
, bio
, fi
->_i
+ 1);
315 * bio_for_each_folio_all - Iterate over each folio in a bio.
316 * @fi: struct folio_iter which is updated for each folio.
317 * @bio: struct bio to iterate over.
319 #define bio_for_each_folio_all(fi, bio) \
320 for (bio_first_folio(&fi, bio, 0); fi.folio; bio_next_folio(&fi, bio))
322 void bio_trim(struct bio
*bio
, sector_t offset
, sector_t size
);
323 extern struct bio
*bio_split(struct bio
*bio
, int sectors
,
324 gfp_t gfp
, struct bio_set
*bs
);
325 int bio_split_rw_at(struct bio
*bio
, const struct queue_limits
*lim
,
326 unsigned *segs
, unsigned max_bytes
);
329 * bio_next_split - get next @sectors from a bio, splitting if necessary
331 * @sectors: number of sectors to split from the front of @bio
333 * @bs: bio set to allocate from
335 * Return: a bio representing the next @sectors of @bio - if the bio is smaller
336 * than @sectors, returns the original bio unchanged.
338 static inline struct bio
*bio_next_split(struct bio
*bio
, int sectors
,
339 gfp_t gfp
, struct bio_set
*bs
)
341 if (sectors
>= bio_sectors(bio
))
344 return bio_split(bio
, sectors
, gfp
, bs
);
348 BIOSET_NEED_BVECS
= BIT(0),
349 BIOSET_NEED_RESCUER
= BIT(1),
350 BIOSET_PERCPU_CACHE
= BIT(2),
352 extern int bioset_init(struct bio_set
*, unsigned int, unsigned int, int flags
);
353 extern void bioset_exit(struct bio_set
*);
354 extern int biovec_init_pool(mempool_t
*pool
, int pool_entries
);
356 struct bio
*bio_alloc_bioset(struct block_device
*bdev
, unsigned short nr_vecs
,
357 blk_opf_t opf
, gfp_t gfp_mask
,
359 struct bio
*bio_kmalloc(unsigned short nr_vecs
, gfp_t gfp_mask
);
360 extern void bio_put(struct bio
*);
362 struct bio
*bio_alloc_clone(struct block_device
*bdev
, struct bio
*bio_src
,
363 gfp_t gfp
, struct bio_set
*bs
);
364 int bio_init_clone(struct block_device
*bdev
, struct bio
*bio
,
365 struct bio
*bio_src
, gfp_t gfp
);
367 extern struct bio_set fs_bio_set
;
369 static inline struct bio
*bio_alloc(struct block_device
*bdev
,
370 unsigned short nr_vecs
, blk_opf_t opf
, gfp_t gfp_mask
)
372 return bio_alloc_bioset(bdev
, nr_vecs
, opf
, gfp_mask
, &fs_bio_set
);
375 void submit_bio(struct bio
*bio
);
377 extern void bio_endio(struct bio
*);
379 static inline void bio_io_error(struct bio
*bio
)
381 bio
->bi_status
= BLK_STS_IOERR
;
385 static inline void bio_wouldblock_error(struct bio
*bio
)
387 bio_set_flag(bio
, BIO_QUIET
);
388 bio
->bi_status
= BLK_STS_AGAIN
;
393 * Calculate number of bvec segments that should be allocated to fit data
394 * pointed by @iter. If @iter is backed by bvec it's going to be reused
395 * instead of allocating a new one.
397 static inline int bio_iov_vecs_to_alloc(struct iov_iter
*iter
, int max_segs
)
399 if (iov_iter_is_bvec(iter
))
401 return iov_iter_npages(iter
, max_segs
);
404 struct request_queue
;
406 void bio_init(struct bio
*bio
, struct block_device
*bdev
, struct bio_vec
*table
,
407 unsigned short max_vecs
, blk_opf_t opf
);
408 extern void bio_uninit(struct bio
*);
409 void bio_reset(struct bio
*bio
, struct block_device
*bdev
, blk_opf_t opf
);
410 void bio_chain(struct bio
*, struct bio
*);
412 int __must_check
bio_add_page(struct bio
*bio
, struct page
*page
, unsigned len
,
414 bool __must_check
bio_add_folio(struct bio
*bio
, struct folio
*folio
,
415 size_t len
, size_t off
);
416 void __bio_add_page(struct bio
*bio
, struct page
*page
,
417 unsigned int len
, unsigned int off
);
418 void bio_add_folio_nofail(struct bio
*bio
, struct folio
*folio
, size_t len
,
420 void bio_add_virt_nofail(struct bio
*bio
, void *vaddr
, unsigned len
);
423 * bio_add_max_vecs - number of bio_vecs needed to add data to a bio
424 * @kaddr: kernel virtual address to add
425 * @len: length in bytes to add
427 * Calculate how many bio_vecs need to be allocated to add the kernel virtual
428 * address range in [@kaddr:@len] in the worse case.
430 static inline unsigned int bio_add_max_vecs(void *kaddr
, unsigned int len
)
432 if (is_vmalloc_addr(kaddr
))
433 return DIV_ROUND_UP(offset_in_page(kaddr
) + len
, PAGE_SIZE
);
437 unsigned int bio_add_vmalloc_chunk(struct bio
*bio
, void *vaddr
, unsigned len
);
438 bool bio_add_vmalloc(struct bio
*bio
, void *vaddr
, unsigned int len
);
440 int submit_bio_wait(struct bio
*bio
);
441 int bdev_rw_virt(struct block_device
*bdev
, sector_t sector
, void *data
,
442 size_t len
, enum req_op op
);
444 int bio_iov_iter_get_pages(struct bio
*bio
, struct iov_iter
*iter
);
445 void bio_iov_bvec_set(struct bio
*bio
, const struct iov_iter
*iter
);
446 void __bio_release_pages(struct bio
*bio
, bool mark_dirty
);
447 extern void bio_set_pages_dirty(struct bio
*bio
);
448 extern void bio_check_pages_dirty(struct bio
*bio
);
450 extern void bio_copy_data_iter(struct bio
*dst
, struct bvec_iter
*dst_iter
,
451 struct bio
*src
, struct bvec_iter
*src_iter
);
452 extern void bio_copy_data(struct bio
*dst
, struct bio
*src
);
453 extern void bio_free_pages(struct bio
*bio
);
454 void guard_bio_eod(struct bio
*bio
);
455 void zero_fill_bio_iter(struct bio
*bio
, struct bvec_iter iter
);
457 static inline void zero_fill_bio(struct bio
*bio
)
459 zero_fill_bio_iter(bio
, bio
->bi_iter
);
462 static inline void bio_release_pages(struct bio
*bio
, bool mark_dirty
)
464 if (bio_flagged(bio
, BIO_PAGE_PINNED
))
465 __bio_release_pages(bio
, mark_dirty
);
468 #define bio_dev(bio) \
469 disk_devt((bio)->bi_bdev->bd_disk)
471 #ifdef CONFIG_BLK_CGROUP
472 void bio_associate_blkg(struct bio
*bio
);
473 void bio_associate_blkg_from_css(struct bio
*bio
,
474 struct cgroup_subsys_state
*css
);
475 void bio_clone_blkg_association(struct bio
*dst
, struct bio
*src
);
476 void blkcg_punt_bio_submit(struct bio
*bio
);
477 #else /* CONFIG_BLK_CGROUP */
478 static inline void bio_associate_blkg(struct bio
*bio
) { }
479 static inline void bio_associate_blkg_from_css(struct bio
*bio
,
480 struct cgroup_subsys_state
*css
)
482 static inline void bio_clone_blkg_association(struct bio
*dst
,
484 static inline void blkcg_punt_bio_submit(struct bio
*bio
)
488 #endif /* CONFIG_BLK_CGROUP */
490 static inline void bio_set_dev(struct bio
*bio
, struct block_device
*bdev
)
492 bio_clear_flag(bio
, BIO_REMAPPED
);
493 if (bio
->bi_bdev
!= bdev
)
494 bio_clear_flag(bio
, BIO_BPS_THROTTLED
);
496 bio_associate_blkg(bio
);
500 * BIO list management for use by remapping drivers (e.g. DM or MD) and loop.
502 * A bio_list anchors a singly-linked list of bios chained through the bi_next
503 * member of the bio. The bio_list also caches the last list member to allow
504 * fast access to the tail.
511 static inline int bio_list_empty(const struct bio_list
*bl
)
513 return bl
->head
== NULL
;
516 static inline void bio_list_init(struct bio_list
*bl
)
518 bl
->head
= bl
->tail
= NULL
;
521 #define BIO_EMPTY_LIST { NULL, NULL }
523 #define bio_list_for_each(bio, bl) \
524 for (bio = (bl)->head; bio; bio = bio->bi_next)
526 static inline unsigned bio_list_size(const struct bio_list
*bl
)
531 bio_list_for_each(bio
, bl
)
537 static inline void bio_list_add(struct bio_list
*bl
, struct bio
*bio
)
542 bl
->tail
->bi_next
= bio
;
549 static inline void bio_list_add_head(struct bio_list
*bl
, struct bio
*bio
)
551 bio
->bi_next
= bl
->head
;
559 static inline void bio_list_merge(struct bio_list
*bl
, struct bio_list
*bl2
)
565 bl
->tail
->bi_next
= bl2
->head
;
567 bl
->head
= bl2
->head
;
569 bl
->tail
= bl2
->tail
;
572 static inline void bio_list_merge_init(struct bio_list
*bl
,
573 struct bio_list
*bl2
)
575 bio_list_merge(bl
, bl2
);
579 static inline void bio_list_merge_head(struct bio_list
*bl
,
580 struct bio_list
*bl2
)
586 bl2
->tail
->bi_next
= bl
->head
;
588 bl
->tail
= bl2
->tail
;
590 bl
->head
= bl2
->head
;
593 static inline struct bio
*bio_list_peek(struct bio_list
*bl
)
598 static inline struct bio
*bio_list_pop(struct bio_list
*bl
)
600 struct bio
*bio
= bl
->head
;
603 bl
->head
= bl
->head
->bi_next
;
613 static inline struct bio
*bio_list_get(struct bio_list
*bl
)
615 struct bio
*bio
= bl
->head
;
617 bl
->head
= bl
->tail
= NULL
;
623 * Increment chain count for the bio. Make sure the CHAIN flag update
624 * is visible before the raised count.
626 static inline void bio_inc_remaining(struct bio
*bio
)
628 bio_set_flag(bio
, BIO_CHAIN
);
629 smp_mb__before_atomic();
630 atomic_inc(&bio
->__bi_remaining
);
634 * bio_set is used to allow other portions of the IO system to
635 * allocate their own private memory pools for bio and iovec structures.
636 * These memory pools in turn all allocate from the bio_slab
637 * and the bvec_slabs[].
639 #define BIO_POOL_SIZE 2
642 struct kmem_cache
*bio_slab
;
643 unsigned int front_pad
;
646 * per-cpu bio alloc cache
648 struct bio_alloc_cache __percpu
*cache
;
653 unsigned int back_pad
;
655 * Deadlock avoidance for stacking block drivers: see comments in
656 * bio_alloc_bioset() for details
658 spinlock_t rescue_lock
;
659 struct bio_list rescue_list
;
660 struct work_struct rescue_work
;
661 struct workqueue_struct
*rescue_workqueue
;
664 * Hot un-plug notifier for the per-cpu cache, if used
666 struct hlist_node cpuhp_dead
;
669 static inline bool bioset_initialized(struct bio_set
*bs
)
671 return bs
->bio_slab
!= NULL
;
675 * Mark a bio as polled. Note that for async polled IO, the caller must
676 * expect -EWOULDBLOCK if we cannot allocate a request (or other resources).
677 * We cannot block waiting for requests on polled IO, as those completions
678 * must be found by the caller. This is different than IRQ driven IO, where
679 * it's safe to wait for IO to complete.
681 static inline void bio_set_polled(struct bio
*bio
, struct kiocb
*kiocb
)
683 bio
->bi_opf
|= REQ_POLLED
;
684 if (kiocb
->ki_flags
& IOCB_NOWAIT
)
685 bio
->bi_opf
|= REQ_NOWAIT
;
688 static inline void bio_clear_polled(struct bio
*bio
)
690 bio
->bi_opf
&= ~REQ_POLLED
;
694 * bio_is_zone_append - is this a zone append bio?
697 * Check if @bio is a zone append operation. Core block layer code and end_io
698 * handlers must use this instead of an open coded REQ_OP_ZONE_APPEND check
699 * because the block layer can rewrite REQ_OP_ZONE_APPEND to REQ_OP_WRITE if
700 * it is not natively supported.
702 static inline bool bio_is_zone_append(struct bio
*bio
)
704 if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED
))
706 return bio_op(bio
) == REQ_OP_ZONE_APPEND
||
707 bio_flagged(bio
, BIO_EMULATES_ZONE_APPEND
);
710 struct bio
*blk_next_bio(struct bio
*bio
, struct block_device
*bdev
,
711 unsigned int nr_pages
, blk_opf_t opf
, gfp_t gfp
);
712 struct bio
*bio_chain_and_submit(struct bio
*prev
, struct bio
*new);
714 struct bio
*blk_alloc_discard_bio(struct block_device
*bdev
,
715 sector_t
*sector
, sector_t
*nr_sects
, gfp_t gfp_mask
);
717 #endif /* __LINUX_BIO_H */