struct dax_device *dax_dev;
void *inline_data;
void *private;
- const struct iomap_folio_ops *folio_ops;
u64 validity_cookie;
};
<https://lore.kernel.org/all/20180619164137.13720-7-hch@lst.de/>`_.
This value will be passed unchanged to ``->iomap_end``.
- * ``folio_ops`` will be covered in the section on pagecache operations.
-
* ``validity_cookie`` is a magic freshness value set by the filesystem
that should be used to detect stale mappings.
For pagecache operations this is critical for correct operation
* ``bmap``
* ``swap_activate``
-``struct iomap_folio_ops``
+``struct iomap_write_ops``
--------------------------
-The ``->iomap_begin`` function for pagecache operations may set the
-``struct iomap::folio_ops`` field to an ops structure to override
-default behaviors of iomap:
-
.. code-block:: c
- struct iomap_folio_ops {
+ struct iomap_write_ops {
struct folio *(*get_folio)(struct iomap_iter *iter, loff_t pos,
unsigned len);
void (*put_folio)(struct inode *inode, loff_t pos, unsigned copied,
static ssize_t blkdev_buffered_write(struct kiocb *iocb, struct iov_iter *from)
{
- return iomap_file_buffered_write(iocb, from, &blkdev_iomap_ops, NULL);
+ return iomap_file_buffered_write(iocb, from, &blkdev_iomap_ops, NULL,
+ NULL);
}
/*
gfs2_iomap_get_folio(struct iomap_iter *iter, loff_t pos, unsigned len)
{
struct inode *inode = iter->inode;
+ struct gfs2_inode *ip = GFS2_I(inode);
unsigned int blockmask = i_blocksize(inode) - 1;
struct gfs2_sbd *sdp = GFS2_SB(inode);
unsigned int blocks;
struct folio *folio;
int status;
+ if (!gfs2_is_jdata(ip) && !gfs2_is_stuffed(ip))
+ return iomap_get_folio(iter, pos, len);
+
blocks = ((pos & blockmask) + len + blockmask) >> inode->i_blkbits;
status = gfs2_trans_begin(sdp, RES_DINODE + blocks, 0);
if (status)
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
- if (!gfs2_is_stuffed(ip))
+ if (gfs2_is_jdata(ip) && !gfs2_is_stuffed(ip))
gfs2_trans_add_databufs(ip->i_gl, folio,
offset_in_folio(folio, pos),
copied);
folio_unlock(folio);
folio_put(folio);
- if (tr->tr_num_buf_new)
- __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
-
- gfs2_trans_end(sdp);
+ if (gfs2_is_jdata(ip) || gfs2_is_stuffed(ip)) {
+ if (tr->tr_num_buf_new)
+ __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
+ gfs2_trans_end(sdp);
+ }
}
-static const struct iomap_folio_ops gfs2_iomap_folio_ops = {
+const struct iomap_write_ops gfs2_iomap_write_ops = {
.get_folio = gfs2_iomap_get_folio,
.put_folio = gfs2_iomap_put_folio,
};
gfs2_trans_end(sdp);
}
- if (gfs2_is_stuffed(ip) || gfs2_is_jdata(ip))
- iomap->folio_ops = &gfs2_iomap_folio_ops;
return 0;
out_trans_end:
return 0;
length = min(length, inode->i_size - from);
return iomap_zero_range(inode, from, length, NULL, &gfs2_iomap_ops,
- NULL);
+ &gfs2_iomap_write_ops, NULL);
}
#define GFS2_JTRUNC_REVOKES 8192
}
extern const struct iomap_ops gfs2_iomap_ops;
+extern const struct iomap_write_ops gfs2_iomap_write_ops;
extern const struct iomap_writeback_ops gfs2_writeback_ops;
int gfs2_unstuff_dinode(struct gfs2_inode *ip);
}
pagefault_disable();
- ret = iomap_file_buffered_write(iocb, from, &gfs2_iomap_ops, NULL);
+ ret = iomap_file_buffered_write(iocb, from, &gfs2_iomap_ops,
+ &gfs2_iomap_write_ops, NULL);
pagefault_enable();
if (ret > 0)
written += ret;
return 0;
}
-static struct folio *__iomap_get_folio(struct iomap_iter *iter, size_t len)
+static struct folio *__iomap_get_folio(struct iomap_iter *iter,
+ const struct iomap_write_ops *write_ops, size_t len)
{
- const struct iomap_folio_ops *folio_ops = iter->iomap.folio_ops;
loff_t pos = iter->pos;
if (!mapping_large_folio_support(iter->inode->i_mapping))
len = min_t(size_t, len, PAGE_SIZE - offset_in_page(pos));
- if (folio_ops && folio_ops->get_folio)
- return folio_ops->get_folio(iter, pos, len);
- else
- return iomap_get_folio(iter, pos, len);
+ if (write_ops && write_ops->get_folio)
+ return write_ops->get_folio(iter, pos, len);
+ return iomap_get_folio(iter, pos, len);
}
-static void __iomap_put_folio(struct iomap_iter *iter, size_t ret,
+static void __iomap_put_folio(struct iomap_iter *iter,
+ const struct iomap_write_ops *write_ops, size_t ret,
struct folio *folio)
{
- const struct iomap_folio_ops *folio_ops = iter->iomap.folio_ops;
loff_t pos = iter->pos;
- if (folio_ops && folio_ops->put_folio) {
- folio_ops->put_folio(iter->inode, pos, ret, folio);
+ if (write_ops && write_ops->put_folio) {
+ write_ops->put_folio(iter->inode, pos, ret, folio);
} else {
folio_unlock(folio);
folio_put(folio);
* offset, and length. Callers can optionally pass a max length *plen,
* otherwise init to zero.
*/
-static int iomap_write_begin(struct iomap_iter *iter, struct folio **foliop,
+static int iomap_write_begin(struct iomap_iter *iter,
+ const struct iomap_write_ops *write_ops, struct folio **foliop,
size_t *poffset, u64 *plen)
{
- const struct iomap_folio_ops *folio_ops = iter->iomap.folio_ops;
const struct iomap *srcmap = iomap_iter_srcmap(iter);
loff_t pos = iter->pos;
u64 len = min_t(u64, SIZE_MAX, iomap_length(iter));
if (fatal_signal_pending(current))
return -EINTR;
- folio = __iomap_get_folio(iter, len);
+ folio = __iomap_get_folio(iter, write_ops, len);
if (IS_ERR(folio))
return PTR_ERR(folio);
* could do the wrong thing here (zero a page range incorrectly or fail
* to zero) and corrupt data.
*/
- if (folio_ops && folio_ops->iomap_valid) {
- bool iomap_valid = folio_ops->iomap_valid(iter->inode,
+ if (write_ops && write_ops->iomap_valid) {
+ bool iomap_valid = write_ops->iomap_valid(iter->inode,
&iter->iomap);
if (!iomap_valid) {
iter->iomap.flags |= IOMAP_F_STALE;
return 0;
out_unlock:
- __iomap_put_folio(iter, 0, folio);
-
+ __iomap_put_folio(iter, write_ops, 0, folio);
return status;
}
return __iomap_write_end(iter->inode, pos, len, copied, folio);
}
-static int iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
+static int iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i,
+ const struct iomap_write_ops *write_ops)
{
ssize_t total_written = 0;
int status = 0;
break;
}
- status = iomap_write_begin(iter, &folio, &offset, &bytes);
+ status = iomap_write_begin(iter, write_ops, &folio, &offset,
+ &bytes);
if (unlikely(status)) {
iomap_write_failed(iter->inode, iter->pos, bytes);
break;
i_size_write(iter->inode, pos + written);
iter->iomap.flags |= IOMAP_F_SIZE_CHANGED;
}
- __iomap_put_folio(iter, written, folio);
+ __iomap_put_folio(iter, write_ops, written, folio);
if (old_size < pos)
pagecache_isize_extended(iter->inode, old_size, pos);
ssize_t
iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *i,
- const struct iomap_ops *ops, void *private)
+ const struct iomap_ops *ops,
+ const struct iomap_write_ops *write_ops, void *private)
{
struct iomap_iter iter = {
.inode = iocb->ki_filp->f_mapping->host,
iter.flags |= IOMAP_DONTCACHE;
while ((ret = iomap_iter(&iter, ops)) > 0)
- iter.status = iomap_write_iter(&iter, i);
+ iter.status = iomap_write_iter(&iter, i, write_ops);
if (unlikely(iter.pos == iocb->ki_pos))
return ret;
}
EXPORT_SYMBOL_GPL(iomap_write_delalloc_release);
-static int iomap_unshare_iter(struct iomap_iter *iter)
+static int iomap_unshare_iter(struct iomap_iter *iter,
+ const struct iomap_write_ops *write_ops)
{
struct iomap *iomap = &iter->iomap;
u64 bytes = iomap_length(iter);
bool ret;
bytes = min_t(u64, SIZE_MAX, bytes);
- status = iomap_write_begin(iter, &folio, &offset, &bytes);
+ status = iomap_write_begin(iter, write_ops, &folio, &offset,
+ &bytes);
if (unlikely(status))
return status;
if (iomap->flags & IOMAP_F_STALE)
break;
ret = iomap_write_end(iter, bytes, bytes, folio);
- __iomap_put_folio(iter, bytes, folio);
+ __iomap_put_folio(iter, write_ops, bytes, folio);
if (WARN_ON_ONCE(!ret))
return -EIO;
int
iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
- const struct iomap_ops *ops)
+ const struct iomap_ops *ops,
+ const struct iomap_write_ops *write_ops)
{
struct iomap_iter iter = {
.inode = inode,
iter.len = min(len, size - pos);
while ((ret = iomap_iter(&iter, ops)) > 0)
- iter.status = iomap_unshare_iter(&iter);
+ iter.status = iomap_unshare_iter(&iter, write_ops);
return ret;
}
EXPORT_SYMBOL_GPL(iomap_file_unshare);
return filemap_write_and_wait_range(mapping, i->pos, end);
}
-static int iomap_zero_iter(struct iomap_iter *iter, bool *did_zero)
+static int iomap_zero_iter(struct iomap_iter *iter, bool *did_zero,
+ const struct iomap_write_ops *write_ops)
{
u64 bytes = iomap_length(iter);
int status;
bool ret;
bytes = min_t(u64, SIZE_MAX, bytes);
- status = iomap_write_begin(iter, &folio, &offset, &bytes);
+ status = iomap_write_begin(iter, write_ops, &folio, &offset,
+ &bytes);
if (status)
return status;
if (iter->iomap.flags & IOMAP_F_STALE)
folio_mark_accessed(folio);
ret = iomap_write_end(iter, bytes, bytes, folio);
- __iomap_put_folio(iter, bytes, folio);
+ __iomap_put_folio(iter, write_ops, bytes, folio);
if (WARN_ON_ONCE(!ret))
return -EIO;
int
iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
- const struct iomap_ops *ops, void *private)
+ const struct iomap_ops *ops,
+ const struct iomap_write_ops *write_ops, void *private)
{
struct iomap_iter iter = {
.inode = inode,
filemap_range_needs_writeback(mapping, pos, pos + plen - 1)) {
iter.len = plen;
while ((ret = iomap_iter(&iter, ops)) > 0)
- iter.status = iomap_zero_iter(&iter, did_zero);
+ iter.status = iomap_zero_iter(&iter, did_zero,
+ write_ops);
iter.len = len - (iter.pos - pos);
if (ret || !iter.len)
continue;
}
- iter.status = iomap_zero_iter(&iter, did_zero);
+ iter.status = iomap_zero_iter(&iter, did_zero, write_ops);
}
return ret;
}
int
iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
- const struct iomap_ops *ops, void *private)
+ const struct iomap_ops *ops,
+ const struct iomap_write_ops *write_ops, void *private)
{
unsigned int blocksize = i_blocksize(inode);
unsigned int off = pos & (blocksize - 1);
if (!off)
return 0;
return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops,
- private);
+ write_ops, private);
}
EXPORT_SYMBOL_GPL(iomap_truncate_page);
trace_xfs_file_buffered_write(iocb, from);
ret = iomap_file_buffered_write(iocb, from,
- &xfs_buffered_write_iomap_ops, NULL);
+ &xfs_buffered_write_iomap_ops, &xfs_iomap_write_ops,
+ NULL);
/*
* If we hit a space limit, try to free up some lingering preallocated
retry:
trace_xfs_file_buffered_write(iocb, from);
ret = iomap_file_buffered_write(iocb, from,
- &xfs_buffered_write_iomap_ops, &ac);
+ &xfs_buffered_write_iomap_ops, &xfs_iomap_write_ops,
+ &ac);
if (ret == -ENOSPC && !cleared_space) {
/*
* Kick off writeback to convert delalloc space and release the
{
struct xfs_inode *ip = XFS_I(inode);
+ if (iomap->type == IOMAP_HOLE)
+ return true;
+
if (iomap->validity_cookie !=
xfs_iomap_inode_sequence(ip, iomap->flags)) {
trace_xfs_iomap_invalid(ip, iomap);
return true;
}
-static const struct iomap_folio_ops xfs_iomap_folio_ops = {
+const struct iomap_write_ops xfs_iomap_write_ops = {
.iomap_valid = xfs_iomap_valid,
};
iomap->flags |= IOMAP_F_DIRTY;
iomap->validity_cookie = sequence_cookie;
- iomap->folio_ops = &xfs_iomap_folio_ops;
return 0;
}
return dax_zero_range(inode, pos, len, did_zero,
&xfs_dax_write_iomap_ops);
return iomap_zero_range(inode, pos, len, did_zero,
- &xfs_buffered_write_iomap_ops, ac);
+ &xfs_buffered_write_iomap_ops, &xfs_iomap_write_ops,
+ ac);
}
int
return dax_truncate_page(inode, pos, did_zero,
&xfs_dax_write_iomap_ops);
return iomap_truncate_page(inode, pos, did_zero,
- &xfs_buffered_write_iomap_ops, ac);
+ &xfs_buffered_write_iomap_ops, &xfs_iomap_write_ops,
+ ac);
}
extern const struct iomap_ops xfs_xattr_iomap_ops;
extern const struct iomap_ops xfs_dax_write_iomap_ops;
extern const struct iomap_ops xfs_atomic_write_cow_iomap_ops;
+extern const struct iomap_write_ops xfs_iomap_write_ops;
#endif /* __XFS_IOMAP_H__*/
&xfs_dax_write_iomap_ops);
else
error = iomap_file_unshare(inode, offset, len,
- &xfs_buffered_write_iomap_ops);
+ &xfs_buffered_write_iomap_ops,
+ &xfs_iomap_write_ops);
if (error)
goto out;
if (ret <= 0)
goto inode_unlock;
- ret = iomap_file_buffered_write(iocb, from, &zonefs_write_iomap_ops, NULL);
+ ret = iomap_file_buffered_write(iocb, from, &zonefs_write_iomap_ops,
+ NULL, NULL);
if (ret == -EIO)
zonefs_io_error(inode, true);
*/
#define IOMAP_NULL_ADDR -1ULL /* addr is not valid */
-struct iomap_folio_ops;
-
struct iomap {
u64 addr; /* disk offset of mapping, bytes */
loff_t offset; /* file offset of mapping, bytes */
struct dax_device *dax_dev; /* dax_dev for dax operations */
void *inline_data;
void *private; /* filesystem private */
- const struct iomap_folio_ops *folio_ops;
u64 validity_cookie; /* used with .iomap_valid() */
};
}
/*
- * When a filesystem sets folio_ops in an iomap mapping it returns, get_folio
- * and put_folio will be called for each folio written to. This only applies
- * to buffered writes as unbuffered writes will not typically have folios
- * associated with them.
- *
* When get_folio succeeds, put_folio will always be called to do any
* cleanup work necessary. put_folio is responsible for unlocking and putting
* @folio.
*/
-struct iomap_folio_ops {
+struct iomap_write_ops {
struct folio *(*get_folio)(struct iomap_iter *iter, loff_t pos,
unsigned len);
void (*put_folio)(struct inode *inode, loff_t pos, unsigned copied,
}
ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *from,
- const struct iomap_ops *ops, void *private);
+ const struct iomap_ops *ops,
+ const struct iomap_write_ops *write_ops, void *private);
int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops);
void iomap_readahead(struct readahead_control *, const struct iomap_ops *ops);
bool iomap_is_partially_uptodate(struct folio *, size_t from, size_t count);
void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len);
bool iomap_dirty_folio(struct address_space *mapping, struct folio *folio);
int iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
- const struct iomap_ops *ops);
+ const struct iomap_ops *ops,
+ const struct iomap_write_ops *write_ops);
int iomap_zero_range(struct inode *inode, loff_t pos, loff_t len,
- bool *did_zero, const struct iomap_ops *ops, void *private);
+ bool *did_zero, const struct iomap_ops *ops,
+ const struct iomap_write_ops *write_ops, void *private);
int iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
- const struct iomap_ops *ops, void *private);
+ const struct iomap_ops *ops,
+ const struct iomap_write_ops *write_ops, void *private);
vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops,
void *private);
typedef void (*iomap_punch_t)(struct inode *inode, loff_t offset, loff_t length,