]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
iomap: split out the per-bio logic from iomap_dio_bio_iter
authorChristoph Hellwig <hch@lst.de>
Mon, 26 Jan 2026 05:53:40 +0000 (06:53 +0100)
committerJens Axboe <axboe@kernel.dk>
Wed, 28 Jan 2026 12:16:40 +0000 (05:16 -0700)
Factor out a separate helper that builds and submits a single bio.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Damien Le Moal <dlemoal@kernel.org>
Reviewed-by: Darrick J. Wong <djwong@kernel.org>
Tested-by: Anuj Gupta <anuj20.g@samsung.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
fs/iomap/direct-io.c

index de03bc7cf4ed0b46df5ed3f11a3ac02a6690f29e..bb79519dec657517988857bc715b8de0cebeb9c5 100644 (file)
@@ -302,6 +302,56 @@ static int iomap_dio_zero(const struct iomap_iter *iter, struct iomap_dio *dio,
        return 0;
 }
 
+static ssize_t iomap_dio_bio_iter_one(struct iomap_iter *iter,
+               struct iomap_dio *dio, loff_t pos, unsigned int alignment,
+               blk_opf_t op)
+{
+       struct bio *bio;
+       ssize_t ret;
+
+       bio = iomap_dio_alloc_bio(iter, dio,
+                       bio_iov_vecs_to_alloc(dio->submit.iter, BIO_MAX_VECS),
+                       op);
+       fscrypt_set_bio_crypt_ctx(bio, iter->inode,
+                       pos >> iter->inode->i_blkbits, GFP_KERNEL);
+       bio->bi_iter.bi_sector = iomap_sector(&iter->iomap, pos);
+       bio->bi_write_hint = iter->inode->i_write_hint;
+       bio->bi_ioprio = dio->iocb->ki_ioprio;
+       bio->bi_private = dio;
+       bio->bi_end_io = iomap_dio_bio_end_io;
+
+       ret = bio_iov_iter_get_pages(bio, dio->submit.iter, alignment - 1);
+       if (unlikely(ret))
+               goto out_put_bio;
+       ret = bio->bi_iter.bi_size;
+
+       /*
+        * An atomic write bio must cover the complete length.  If it doesn't,
+        * error out.
+        */
+       if ((op & REQ_ATOMIC) && WARN_ON_ONCE(ret != iomap_length(iter))) {
+               ret = -EINVAL;
+               goto out_put_bio;
+       }
+
+       if (dio->flags & IOMAP_DIO_WRITE)
+               task_io_account_write(ret);
+       else if (dio->flags & IOMAP_DIO_DIRTY)
+               bio_set_pages_dirty(bio);
+
+       /*
+        * We can only poll for single bio I/Os.
+        */
+       if (iov_iter_count(dio->submit.iter))
+               dio->iocb->ki_flags &= ~IOCB_HIPRI;
+       iomap_dio_submit_bio(iter, dio, bio, pos);
+       return ret;
+
+out_put_bio:
+       bio_put(bio);
+       return ret;
+}
+
 static int iomap_dio_bio_iter(struct iomap_iter *iter, struct iomap_dio *dio)
 {
        const struct iomap *iomap = &iter->iomap;
@@ -310,12 +360,11 @@ static int iomap_dio_bio_iter(struct iomap_iter *iter, struct iomap_dio *dio)
        const loff_t length = iomap_length(iter);
        loff_t pos = iter->pos;
        blk_opf_t bio_opf = REQ_SYNC | REQ_IDLE;
-       struct bio *bio;
        bool need_zeroout = false;
-       int ret = 0;
        u64 copied = 0;
        size_t orig_count;
        unsigned int alignment;
+       ssize_t ret = 0;
 
        /*
         * File systems that write out of place and always allocate new blocks
@@ -441,68 +490,27 @@ static int iomap_dio_bio_iter(struct iomap_iter *iter, struct iomap_dio *dio)
        }
 
        do {
-               size_t n;
-
                /*
                 * If completions already occurred and reported errors, give up now and
                 * don't bother submitting more bios.
                 */
-               if (unlikely(data_race(dio->error))) {
-                       ret = 0;
+               if (unlikely(data_race(dio->error)))
                        goto out;
-               }
 
-               bio = iomap_dio_alloc_bio(iter, dio,
-                               bio_iov_vecs_to_alloc(dio->submit.iter,
-                                               BIO_MAX_VECS), bio_opf);
-               fscrypt_set_bio_crypt_ctx(bio, inode, pos >> inode->i_blkbits,
-                                         GFP_KERNEL);
-               bio->bi_iter.bi_sector = iomap_sector(iomap, pos);
-               bio->bi_write_hint = inode->i_write_hint;
-               bio->bi_ioprio = dio->iocb->ki_ioprio;
-               bio->bi_private = dio;
-               bio->bi_end_io = iomap_dio_bio_end_io;
-
-               ret = bio_iov_iter_get_pages(bio, dio->submit.iter,
-                                            alignment - 1);
-               if (unlikely(ret)) {
+               ret = iomap_dio_bio_iter_one(iter, dio, pos, alignment, bio_opf);
+               if (unlikely(ret < 0)) {
                        /*
                         * We have to stop part way through an IO. We must fall
                         * through to the sub-block tail zeroing here, otherwise
                         * this short IO may expose stale data in the tail of
                         * the block we haven't written data to.
                         */
-                       bio_put(bio);
-                       goto zero_tail;
-               }
-
-               n = bio->bi_iter.bi_size;
-               if (WARN_ON_ONCE((bio_opf & REQ_ATOMIC) && n != length)) {
-                       /*
-                        * An atomic write bio must cover the complete length,
-                        * which it doesn't, so error. We may need to zero out
-                        * the tail (complete FS block), similar to when
-                        * bio_iov_iter_get_pages() returns an error, above.
-                        */
-                       ret = -EINVAL;
-                       bio_put(bio);
-                       goto zero_tail;
+                       break;
                }
-               if (dio->flags & IOMAP_DIO_WRITE)
-                       task_io_account_write(n);
-               else if (dio->flags & IOMAP_DIO_DIRTY)
-                       bio_set_pages_dirty(bio);
-
-               dio->size += n;
-               copied += n;
-
-               /*
-                * We can only poll for single bio I/Os.
-                */
-               if (iov_iter_count(dio->submit.iter))
-                       dio->iocb->ki_flags &= ~IOCB_HIPRI;
-               iomap_dio_submit_bio(iter, dio, bio, pos);
-               pos += n;
+               dio->size += ret;
+               copied += ret;
+               pos += ret;
+               ret = 0;
        } while (iov_iter_count(dio->submit.iter));
 
        /*
@@ -511,7 +519,6 @@ static int iomap_dio_bio_iter(struct iomap_iter *iter, struct iomap_dio *dio)
         * the block tail in the latter case, we can expose stale data via mmap
         * reads of the EOF block.
         */
-zero_tail:
        if (need_zeroout ||
            ((dio->flags & IOMAP_DIO_WRITE) && pos >= i_size_read(inode))) {
                /* zero out from the end of the write to the end of the block */