if (!mapping_large_folio_support(iter->inode->i_mapping))
len = min_t(size_t, len, PAGE_SIZE - offset_in_page(pos));
- if (iter->fbatch) {
+ if (iter->iomap.flags & IOMAP_F_FOLIO_BATCH) {
struct folio *folio = folio_batch_next(iter->fbatch);
if (!folio)
* process so return and let the caller iterate and refill the batch.
*/
if (!folio) {
- WARN_ON_ONCE(!iter->fbatch);
+ WARN_ON_ONCE(!(iter->iomap.flags & IOMAP_F_FOLIO_BATCH));
return 0;
}
return status;
}
-loff_t
+/**
+ * iomap_fill_dirty_folios - fill a folio batch with dirty folios
+ * @iter: Iteration structure
+ * @start: Start offset of range. Updated based on lookup progress.
+ * @end: End offset of range
+ * @iomap_flags: Flags to set on the associated iomap to track the batch.
+ *
+ * Returns the folio count directly. Also returns the associated control flag if
+ * the the batch lookup is performed and the expected offset of a subsequent
+ * lookup via out params. The caller is responsible to set the flag on the
+ * associated iomap.
+ */
+unsigned int
iomap_fill_dirty_folios(
struct iomap_iter *iter,
- loff_t offset,
- loff_t length)
+ loff_t *start,
+ loff_t end,
+ unsigned int *iomap_flags)
{
struct address_space *mapping = iter->inode->i_mapping;
- pgoff_t start = offset >> PAGE_SHIFT;
- pgoff_t end = (offset + length - 1) >> PAGE_SHIFT;
+ pgoff_t pstart = *start >> PAGE_SHIFT;
+ pgoff_t pend = (end - 1) >> PAGE_SHIFT;
+ unsigned int count;
- iter->fbatch = kmalloc(sizeof(struct folio_batch), GFP_KERNEL);
- if (!iter->fbatch)
- return offset + length;
- folio_batch_init(iter->fbatch);
+ if (!iter->fbatch) {
+ *start = end;
+ return 0;
+ }
- filemap_get_folios_dirty(mapping, &start, end, iter->fbatch);
- return (start << PAGE_SHIFT);
+ count = filemap_get_folios_dirty(mapping, &pstart, pend, iter->fbatch);
+ *start = (pstart << PAGE_SHIFT);
+ *iomap_flags |= IOMAP_F_FOLIO_BATCH;
+ return count;
}
EXPORT_SYMBOL_GPL(iomap_fill_dirty_folios);
const struct iomap_ops *ops,
const struct iomap_write_ops *write_ops, void *private)
{
+ struct folio_batch fbatch;
struct iomap_iter iter = {
.inode = inode,
.pos = pos,
.len = len,
.flags = IOMAP_ZERO,
.private = private,
+ .fbatch = &fbatch,
};
struct address_space *mapping = inode->i_mapping;
int ret;
bool range_dirty;
+ folio_batch_init(&fbatch);
+
/*
* To avoid an unconditional flush, check pagecache state and only flush
* if dirty and the fs returns a mapping that might convert on
while ((ret = iomap_iter(&iter, ops)) > 0) {
const struct iomap *srcmap = iomap_iter_srcmap(&iter);
- if (WARN_ON_ONCE(iter.fbatch &&
+ if (WARN_ON_ONCE((iter.iomap.flags & IOMAP_F_FOLIO_BATCH) &&
srcmap->type != IOMAP_UNWRITTEN))
return -EIO;
- if (!iter.fbatch &&
+ if (!(iter.iomap.flags & IOMAP_F_FOLIO_BATCH) &&
(srcmap->type == IOMAP_HOLE ||
srcmap->type == IOMAP_UNWRITTEN)) {
s64 status;
*/
if (flags & IOMAP_ZERO) {
xfs_fileoff_t eof_fsb = XFS_B_TO_FSB(mp, XFS_ISIZE(ip));
- u64 end;
if (isnullstartblock(imap.br_startblock) &&
offset_fsb >= eof_fsb)
*/
if (imap.br_state == XFS_EXT_UNWRITTEN &&
offset_fsb < eof_fsb) {
- loff_t len = min(count,
- XFS_FSB_TO_B(mp, imap.br_blockcount));
+ loff_t foffset = offset, fend;
- end = iomap_fill_dirty_folios(iter, offset, len);
+ fend = offset +
+ min(count, XFS_FSB_TO_B(mp, imap.br_blockcount));
+ iomap_fill_dirty_folios(iter, &foffset, fend,
+ &iomap_flags);
end_fsb = min_t(xfs_fileoff_t, end_fsb,
- XFS_B_TO_FSB(mp, end));
+ XFS_B_TO_FSB(mp, foffset));
}
xfs_trim_extent(&imap, offset_fsb, end_fsb - offset_fsb);
/*
* Flags set by the core iomap code during operations:
*
+ * IOMAP_F_FOLIO_BATCH indicates that the folio batch mechanism is active
+ * for this operation, set by iomap_fill_dirty_folios().
+ *
* IOMAP_F_SIZE_CHANGED indicates to the iomap_end method that the file size
* has changed as the result of this write operation.
*
* range it covers needs to be remapped by the high level before the operation
* can proceed.
*/
+#define IOMAP_F_FOLIO_BATCH (1U << 13)
#define IOMAP_F_SIZE_CHANGED (1U << 14)
#define IOMAP_F_STALE (1U << 15)
int iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
const struct iomap_ops *ops,
const struct iomap_write_ops *write_ops);
-loff_t iomap_fill_dirty_folios(struct iomap_iter *iter, loff_t offset,
- loff_t length);
+unsigned int iomap_fill_dirty_folios(struct iomap_iter *iter, loff_t *start,
+ loff_t end, unsigned int *iomap_flags);
int iomap_zero_range(struct inode *inode, loff_t pos, loff_t len,
bool *did_zero, const struct iomap_ops *ops,
const struct iomap_write_ops *write_ops, void *private);