From 12da89e8844ae16e86b75a32b34a4f0b0525f453 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 26 Jan 2026 06:53:34 +0100 Subject: [PATCH] block: open code bio_add_page and fix handling of mismatching P2P ranges bio_add_page fails to add data to the bio when mixing P2P with non-P2P ranges, or ranges that map to different P2P providers. In that case it will trigger that WARN_ON and return an error up the chain instead of simply starting a new bio as intended. Fix this by open coding bio_add_page and handling this case explicitly. While doing so, stop merging physical contiguous data that belongs to multiple folios. While this merge could lead to more efficient bio packing in some case, dropping will allow to remove handling of this corner case in other places and make the code more robust. Signed-off-by: Christoph Hellwig Reviewed-by: Anuj Gupta Reviewed-by: Damien Le Moal Reviewed-by: Johannes Thumshirn Reviewed-by: Darrick J. Wong Tested-by: Anuj Gupta Reviewed-by: Martin K. Petersen Signed-off-by: Jens Axboe --- block/bio.c | 37 +++++++++++++------------------------ 1 file changed, 13 insertions(+), 24 deletions(-) diff --git a/block/bio.c b/block/bio.c index d633e80d821fb..4591f0ba90f55 100644 --- a/block/bio.c +++ b/block/bio.c @@ -1216,7 +1216,7 @@ static unsigned int get_contig_folio_len(struct page **pages, * For a multi-segment *iter, this function only adds pages from the next * non-empty segment of the iov iterator. */ -static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter) +static ssize_t __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter) { iov_iter_extraction_t extraction_flags = 0; unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt; @@ -1226,7 +1226,6 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter) ssize_t size; unsigned int i = 0; size_t offset, left, len; - int ret = 0; /* * Move page array up in the allocated memory for the bio vecs as far as @@ -1247,37 +1246,26 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter) nr_pages = DIV_ROUND_UP(offset + size, PAGE_SIZE); for (left = size; left > 0; left -= len) { - unsigned int old_vcnt = bio->bi_vcnt; unsigned int nr_to_add; - len = get_contig_folio_len(&pages[i], &nr_to_add, left, offset); - if (!bio_add_page(bio, pages[i], len, offset)) { - WARN_ON_ONCE(1); - ret = -EINVAL; - goto out; - } + if (bio->bi_vcnt > 0) { + struct bio_vec *prev = &bio->bi_io_vec[bio->bi_vcnt - 1]; - if (bio_flagged(bio, BIO_PAGE_PINNED)) { - /* - * We're adding another fragment of a page that already - * was part of the last segment. Undo our pin as the - * page was pinned when an earlier fragment of it was - * added to the bio and __bio_release_pages expects a - * single pin per page. - */ - if (offset && bio->bi_vcnt == old_vcnt) - unpin_user_folio(page_folio(pages[i]), 1); + if (!zone_device_pages_have_same_pgmap(prev->bv_page, + pages[i])) + break; } + + len = get_contig_folio_len(&pages[i], &nr_to_add, left, offset); + __bio_add_page(bio, pages[i], len, offset); i += nr_to_add; offset = 0; } iov_iter_revert(iter, left); -out: while (i < nr_pages) bio_release_page(bio, pages[i++]); - - return ret; + return size - left; } /* @@ -1337,7 +1325,7 @@ static int bio_iov_iter_align_down(struct bio *bio, struct iov_iter *iter, int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter, unsigned len_align_mask) { - int ret = 0; + ssize_t ret; if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED))) return -EIO; @@ -1350,9 +1338,10 @@ int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter, if (iov_iter_extract_will_pin(iter)) bio_set_flag(bio, BIO_PAGE_PINNED); + do { ret = __bio_iov_iter_get_pages(bio, iter); - } while (!ret && iov_iter_count(iter) && !bio_full(bio, 0)); + } while (ret > 0 && iov_iter_count(iter) && !bio_full(bio, 0)); if (bio->bi_vcnt) return bio_iov_iter_align_down(bio, iter, len_align_mask); -- 2.47.3