return 0;
}
-static struct folio *__iomap_get_folio(struct iomap_iter *iter, loff_t pos,
- size_t len)
+static struct folio *__iomap_get_folio(struct iomap_iter *iter, size_t len)
{
const struct iomap_folio_ops *folio_ops = iter->iomap.folio_ops;
+ loff_t pos = iter->pos;
if (folio_ops && folio_ops->get_folio)
return folio_ops->get_folio(iter, pos, len);
return iomap_get_folio(iter, pos, len);
}
-static void __iomap_put_folio(struct iomap_iter *iter, loff_t pos, size_t ret,
+static void __iomap_put_folio(struct iomap_iter *iter, size_t ret,
struct folio *folio)
{
const struct iomap_folio_ops *folio_ops = iter->iomap.folio_ops;
+ loff_t pos = iter->pos;
if (folio_ops && folio_ops->put_folio) {
folio_ops->put_folio(iter->inode, pos, ret, folio);
if (!mapping_large_folio_support(iter->inode->i_mapping))
len = min_t(size_t, len, PAGE_SIZE - offset_in_page(pos));
- folio = __iomap_get_folio(iter, pos, len);
+ folio = __iomap_get_folio(iter, len);
if (IS_ERR(folio))
return PTR_ERR(folio);
return 0;
out_unlock:
- __iomap_put_folio(iter, pos, 0, folio);
+ __iomap_put_folio(iter, 0, folio);
return status;
}
i_size_write(iter->inode, pos + written);
iter->iomap.flags |= IOMAP_F_SIZE_CHANGED;
}
- __iomap_put_folio(iter, pos, written, folio);
+ __iomap_put_folio(iter, written, folio);
if (old_size < pos)
pagecache_isize_extended(iter->inode, old_size, pos);
bytes = folio_size(folio) - offset;
ret = iomap_write_end(iter, bytes, bytes, folio);
- __iomap_put_folio(iter, pos, bytes, folio);
+ __iomap_put_folio(iter, bytes, folio);
if (WARN_ON_ONCE(!ret))
return -EIO;
folio_mark_accessed(folio);
ret = iomap_write_end(iter, bytes, bytes, folio);
- __iomap_put_folio(iter, pos, bytes, folio);
+ __iomap_put_folio(iter, bytes, folio);
if (WARN_ON_ONCE(!ret))
return -EIO;