{
struct iomap_dio *dio = bio->bi_private;
- if (dio->flags & IOMAP_DIO_USER_BACKED) {
+ if (dio->flags & IOMAP_DIO_BOUNCE) {
+ bio_iov_iter_unbounce(bio, !!dio->error,
+ dio->flags & IOMAP_DIO_USER_BACKED);
+ bio_put(bio);
+ } else if (dio->flags & IOMAP_DIO_USER_BACKED) {
bio_check_pages_dirty(bio);
} else {
bio_release_pages(bio, false);
struct iomap_dio *dio, loff_t pos, unsigned int alignment,
blk_opf_t op)
{
+ unsigned int nr_vecs;
struct bio *bio;
ssize_t ret;
- bio = iomap_dio_alloc_bio(iter, dio,
- bio_iov_vecs_to_alloc(dio->submit.iter, BIO_MAX_VECS),
- op);
+ if (dio->flags & IOMAP_DIO_BOUNCE)
+ nr_vecs = bio_iov_bounce_nr_vecs(dio->submit.iter, op);
+ else
+ nr_vecs = bio_iov_vecs_to_alloc(dio->submit.iter, BIO_MAX_VECS);
+
+ bio = iomap_dio_alloc_bio(iter, dio, nr_vecs, op);
fscrypt_set_bio_crypt_ctx(bio, iter->inode,
pos >> iter->inode->i_blkbits, GFP_KERNEL);
bio->bi_iter.bi_sector = iomap_sector(&iter->iomap, pos);
bio->bi_private = dio;
bio->bi_end_io = iomap_dio_bio_end_io;
- ret = bio_iov_iter_get_pages(bio, dio->submit.iter, alignment - 1);
+ if (dio->flags & IOMAP_DIO_BOUNCE)
+ ret = bio_iov_iter_bounce(bio, dio->submit.iter);
+ else
+ ret = bio_iov_iter_get_pages(bio, dio->submit.iter,
+ alignment - 1);
if (unlikely(ret))
goto out_put_bio;
ret = bio->bi_iter.bi_size;
if (dio->flags & IOMAP_DIO_WRITE)
task_io_account_write(ret);
- else if (dio->flags & IOMAP_DIO_USER_BACKED)
+ else if ((dio->flags & IOMAP_DIO_USER_BACKED) &&
+ !(dio->flags & IOMAP_DIO_BOUNCE))
bio_set_pages_dirty(bio);
/*
dio->i_size = i_size_read(inode);
dio->dops = dops;
dio->error = 0;
- dio->flags = 0;
+ dio->flags = dio_flags & (IOMAP_DIO_FSBLOCK_ALIGNED | IOMAP_DIO_BOUNCE);
dio->done_before = done_before;
dio->submit.iter = iter;
if (iocb->ki_flags & IOCB_NOWAIT)
iomi.flags |= IOMAP_NOWAIT;
- if (dio_flags & IOMAP_DIO_FSBLOCK_ALIGNED)
- dio->flags |= IOMAP_DIO_FSBLOCK_ALIGNED;
-
if (iov_iter_rw(iter) == READ) {
if (iomi.pos >= dio->i_size)
goto out_free_dio;
*/
#define IOMAP_DIO_FSBLOCK_ALIGNED (1 << 3)
+/*
+ * Bounce buffer instead of using zero copy access.
+ *
+ * This is needed if the device needs stable data to checksum or generate
+ * parity. The file system must hook into the I/O submission and offload
+ * completions to user context for reads when this is set.
+ */
+#define IOMAP_DIO_BOUNCE (1 << 4)
+
ssize_t iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
const struct iomap_ops *ops, const struct iomap_dio_ops *dops,
unsigned int dio_flags, void *private, size_t done_before);