return 0;
}
+static ssize_t iomap_dio_bio_iter_one(struct iomap_iter *iter,
+ struct iomap_dio *dio, loff_t pos, unsigned int alignment,
+ blk_opf_t op)
+{
+ struct bio *bio;
+ ssize_t ret;
+
+ bio = iomap_dio_alloc_bio(iter, dio,
+ bio_iov_vecs_to_alloc(dio->submit.iter, BIO_MAX_VECS),
+ op);
+ fscrypt_set_bio_crypt_ctx(bio, iter->inode,
+ pos >> iter->inode->i_blkbits, GFP_KERNEL);
+ bio->bi_iter.bi_sector = iomap_sector(&iter->iomap, pos);
+ bio->bi_write_hint = iter->inode->i_write_hint;
+ bio->bi_ioprio = dio->iocb->ki_ioprio;
+ bio->bi_private = dio;
+ bio->bi_end_io = iomap_dio_bio_end_io;
+
+ ret = bio_iov_iter_get_pages(bio, dio->submit.iter, alignment - 1);
+ if (unlikely(ret))
+ goto out_put_bio;
+ ret = bio->bi_iter.bi_size;
+
+ /*
+ * An atomic write bio must cover the complete length. If it doesn't,
+ * error out.
+ */
+ if ((op & REQ_ATOMIC) && WARN_ON_ONCE(ret != iomap_length(iter))) {
+ ret = -EINVAL;
+ goto out_put_bio;
+ }
+
+ if (dio->flags & IOMAP_DIO_WRITE)
+ task_io_account_write(ret);
+ else if (dio->flags & IOMAP_DIO_DIRTY)
+ bio_set_pages_dirty(bio);
+
+ /*
+ * We can only poll for single bio I/Os.
+ */
+ if (iov_iter_count(dio->submit.iter))
+ dio->iocb->ki_flags &= ~IOCB_HIPRI;
+ iomap_dio_submit_bio(iter, dio, bio, pos);
+ return ret;
+
+out_put_bio:
+ bio_put(bio);
+ return ret;
+}
+
static int iomap_dio_bio_iter(struct iomap_iter *iter, struct iomap_dio *dio)
{
const struct iomap *iomap = &iter->iomap;
const loff_t length = iomap_length(iter);
loff_t pos = iter->pos;
blk_opf_t bio_opf = REQ_SYNC | REQ_IDLE;
- struct bio *bio;
bool need_zeroout = false;
- int ret = 0;
u64 copied = 0;
size_t orig_count;
unsigned int alignment;
+ ssize_t ret = 0;
/*
* File systems that write out of place and always allocate new blocks
}
do {
- size_t n;
-
/*
* If completions already occurred and reported errors, give up now and
* don't bother submitting more bios.
*/
- if (unlikely(data_race(dio->error))) {
- ret = 0;
+ if (unlikely(data_race(dio->error)))
goto out;
- }
- bio = iomap_dio_alloc_bio(iter, dio,
- bio_iov_vecs_to_alloc(dio->submit.iter,
- BIO_MAX_VECS), bio_opf);
- fscrypt_set_bio_crypt_ctx(bio, inode, pos >> inode->i_blkbits,
- GFP_KERNEL);
- bio->bi_iter.bi_sector = iomap_sector(iomap, pos);
- bio->bi_write_hint = inode->i_write_hint;
- bio->bi_ioprio = dio->iocb->ki_ioprio;
- bio->bi_private = dio;
- bio->bi_end_io = iomap_dio_bio_end_io;
-
- ret = bio_iov_iter_get_pages(bio, dio->submit.iter,
- alignment - 1);
- if (unlikely(ret)) {
+ ret = iomap_dio_bio_iter_one(iter, dio, pos, alignment, bio_opf);
+ if (unlikely(ret < 0)) {
/*
* We have to stop part way through an IO. We must fall
* through to the sub-block tail zeroing here, otherwise
* this short IO may expose stale data in the tail of
* the block we haven't written data to.
*/
- bio_put(bio);
- goto zero_tail;
- }
-
- n = bio->bi_iter.bi_size;
- if (WARN_ON_ONCE((bio_opf & REQ_ATOMIC) && n != length)) {
- /*
- * An atomic write bio must cover the complete length,
- * which it doesn't, so error. We may need to zero out
- * the tail (complete FS block), similar to when
- * bio_iov_iter_get_pages() returns an error, above.
- */
- ret = -EINVAL;
- bio_put(bio);
- goto zero_tail;
+ break;
}
- if (dio->flags & IOMAP_DIO_WRITE)
- task_io_account_write(n);
- else if (dio->flags & IOMAP_DIO_DIRTY)
- bio_set_pages_dirty(bio);
-
- dio->size += n;
- copied += n;
-
- /*
- * We can only poll for single bio I/Os.
- */
- if (iov_iter_count(dio->submit.iter))
- dio->iocb->ki_flags &= ~IOCB_HIPRI;
- iomap_dio_submit_bio(iter, dio, bio, pos);
- pos += n;
+ dio->size += ret;
+ copied += ret;
+ pos += ret;
+ ret = 0;
} while (iov_iter_count(dio->submit.iter));
/*
* the block tail in the latter case, we can expose stale data via mmap
* reads of the EOF block.
*/
-zero_tail:
if (need_zeroout ||
((dio->flags & IOMAP_DIO_WRITE) && pos >= i_size_read(inode))) {
/* zero out from the end of the write to the end of the block */