]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
iomap: replace folio_batch allocation with stack allocation
authorBrian Foster <bfoster@redhat.com>
Mon, 8 Dec 2025 14:05:48 +0000 (09:05 -0500)
committerChristian Brauner <brauner@kernel.org>
Mon, 15 Dec 2025 14:17:44 +0000 (15:17 +0100)
Zhang Yi points out that the dynamic folio_batch allocation in
iomap_fill_dirty_folios() is problematic for the ext4 on iomap work
that is under development because it doesn't sufficiently handle the
allocation failure case (by allowing a retry, for example). We've
also seen lockdep (via syzbot) complain recently about the scope of
the allocation.

The dynamic allocation was initially added for simplicity and to
help indicate whether the batch was used or not by the calling fs.
To address these issues, put the batch on the stack of
iomap_zero_range() and use a flag to control whether the batch
should be used in the iomap folio lookup path. This keeps things
simple and eliminates allocation issues with lockdep and for ext4 on
iomap.

While here, also clean up the fill helper signature to be more
consistent with the underlying filemap helper. Pass through the
return value of the filemap helper (folio count) and update the
lookup offset via an out param.

Fixes: 395ed1ef0012 ("iomap: optional zero range dirty folio processing")
Signed-off-by: Brian Foster <bfoster@redhat.com>
Link: https://patch.msgid.link/20251208140548.373411-1-bfoster@redhat.com
Acked-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Darrick J. Wong <djwong@kernel.org>
Signed-off-by: Christian Brauner <brauner@kernel.org>
fs/iomap/buffered-io.c
fs/iomap/iter.c
fs/xfs/xfs_iomap.c
include/linux/iomap.h

index e5c1ca440d93bd7468eb2fbf37760295672622e9..fd9a2cf9562024b8dd056a7f9c7d5e20822ce6ef 100644 (file)
@@ -832,7 +832,7 @@ static struct folio *__iomap_get_folio(struct iomap_iter *iter,
        if (!mapping_large_folio_support(iter->inode->i_mapping))
                len = min_t(size_t, len, PAGE_SIZE - offset_in_page(pos));
 
-       if (iter->fbatch) {
+       if (iter->iomap.flags & IOMAP_F_FOLIO_BATCH) {
                struct folio *folio = folio_batch_next(iter->fbatch);
 
                if (!folio)
@@ -929,7 +929,7 @@ static int iomap_write_begin(struct iomap_iter *iter,
         * process so return and let the caller iterate and refill the batch.
         */
        if (!folio) {
-               WARN_ON_ONCE(!iter->fbatch);
+               WARN_ON_ONCE(!(iter->iomap.flags & IOMAP_F_FOLIO_BATCH));
                return 0;
        }
 
@@ -1544,23 +1544,39 @@ static int iomap_zero_iter(struct iomap_iter *iter, bool *did_zero,
        return status;
 }
 
-loff_t
+/**
+ * iomap_fill_dirty_folios - fill a folio batch with dirty folios
+ * @iter: Iteration structure
+ * @start: Start offset of range. Updated based on lookup progress.
+ * @end: End offset of range
+ * @iomap_flags: Flags to set on the associated iomap to track the batch.
+ *
+ * Returns the folio count directly. Also returns the associated control flag if
+ * the the batch lookup is performed and the expected offset of a subsequent
+ * lookup via out params. The caller is responsible to set the flag on the
+ * associated iomap.
+ */
+unsigned int
 iomap_fill_dirty_folios(
        struct iomap_iter       *iter,
-       loff_t                  offset,
-       loff_t                  length)
+       loff_t                  *start,
+       loff_t                  end,
+       unsigned int            *iomap_flags)
 {
        struct address_space    *mapping = iter->inode->i_mapping;
-       pgoff_t                 start = offset >> PAGE_SHIFT;
-       pgoff_t                 end = (offset + length - 1) >> PAGE_SHIFT;
+       pgoff_t                 pstart = *start >> PAGE_SHIFT;
+       pgoff_t                 pend = (end - 1) >> PAGE_SHIFT;
+       unsigned int            count;
 
-       iter->fbatch = kmalloc(sizeof(struct folio_batch), GFP_KERNEL);
-       if (!iter->fbatch)
-               return offset + length;
-       folio_batch_init(iter->fbatch);
+       if (!iter->fbatch) {
+               *start = end;
+               return 0;
+       }
 
-       filemap_get_folios_dirty(mapping, &start, end, iter->fbatch);
-       return (start << PAGE_SHIFT);
+       count = filemap_get_folios_dirty(mapping, &pstart, pend, iter->fbatch);
+       *start = (pstart << PAGE_SHIFT);
+       *iomap_flags |= IOMAP_F_FOLIO_BATCH;
+       return count;
 }
 EXPORT_SYMBOL_GPL(iomap_fill_dirty_folios);
 
@@ -1569,17 +1585,21 @@ iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
                const struct iomap_ops *ops,
                const struct iomap_write_ops *write_ops, void *private)
 {
+       struct folio_batch fbatch;
        struct iomap_iter iter = {
                .inode          = inode,
                .pos            = pos,
                .len            = len,
                .flags          = IOMAP_ZERO,
                .private        = private,
+               .fbatch         = &fbatch,
        };
        struct address_space *mapping = inode->i_mapping;
        int ret;
        bool range_dirty;
 
+       folio_batch_init(&fbatch);
+
        /*
         * To avoid an unconditional flush, check pagecache state and only flush
         * if dirty and the fs returns a mapping that might convert on
@@ -1590,11 +1610,11 @@ iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
        while ((ret = iomap_iter(&iter, ops)) > 0) {
                const struct iomap *srcmap = iomap_iter_srcmap(&iter);
 
-               if (WARN_ON_ONCE(iter.fbatch &&
+               if (WARN_ON_ONCE((iter.iomap.flags & IOMAP_F_FOLIO_BATCH) &&
                                 srcmap->type != IOMAP_UNWRITTEN))
                        return -EIO;
 
-               if (!iter.fbatch &&
+               if (!(iter.iomap.flags & IOMAP_F_FOLIO_BATCH) &&
                    (srcmap->type == IOMAP_HOLE ||
                     srcmap->type == IOMAP_UNWRITTEN)) {
                        s64 status;
index 8692e5e41c6df6fe789849b740ce7c982682cacc..c04796f6e57fab18229a912893fddba7511ee646 100644 (file)
@@ -8,10 +8,10 @@
 
 static inline void iomap_iter_reset_iomap(struct iomap_iter *iter)
 {
-       if (iter->fbatch) {
+       if (iter->iomap.flags & IOMAP_F_FOLIO_BATCH) {
                folio_batch_release(iter->fbatch);
-               kfree(iter->fbatch);
-               iter->fbatch = NULL;
+               folio_batch_reinit(iter->fbatch);
+               iter->iomap.flags &= ~IOMAP_F_FOLIO_BATCH;
        }
 
        iter->status = 0;
index 04f39ea15898be43c09383faa13c23a4bf5601b2..37a1b33e90450f5e0159be9b368c2d2ab75ac6c1 100644 (file)
@@ -1831,7 +1831,6 @@ xfs_buffered_write_iomap_begin(
         */
        if (flags & IOMAP_ZERO) {
                xfs_fileoff_t eof_fsb = XFS_B_TO_FSB(mp, XFS_ISIZE(ip));
-               u64 end;
 
                if (isnullstartblock(imap.br_startblock) &&
                    offset_fsb >= eof_fsb)
@@ -1851,12 +1850,14 @@ xfs_buffered_write_iomap_begin(
                 */
                if (imap.br_state == XFS_EXT_UNWRITTEN &&
                    offset_fsb < eof_fsb) {
-                       loff_t len = min(count,
-                                        XFS_FSB_TO_B(mp, imap.br_blockcount));
+                       loff_t foffset = offset, fend;
 
-                       end = iomap_fill_dirty_folios(iter, offset, len);
+                       fend = offset +
+                              min(count, XFS_FSB_TO_B(mp, imap.br_blockcount));
+                       iomap_fill_dirty_folios(iter, &foffset, fend,
+                                               &iomap_flags);
                        end_fsb = min_t(xfs_fileoff_t, end_fsb,
-                                       XFS_B_TO_FSB(mp, end));
+                                       XFS_B_TO_FSB(mp, foffset));
                }
 
                xfs_trim_extent(&imap, offset_fsb, end_fsb - offset_fsb);
index 520e967cb501d335fd8fc9361cbbeba0042a9a4e..6bb941707d12260e35d3b650608634779376594b 100644 (file)
@@ -88,6 +88,9 @@ struct vm_fault;
 /*
  * Flags set by the core iomap code during operations:
  *
+ * IOMAP_F_FOLIO_BATCH indicates that the folio batch mechanism is active
+ * for this operation, set by iomap_fill_dirty_folios().
+ *
  * IOMAP_F_SIZE_CHANGED indicates to the iomap_end method that the file size
  * has changed as the result of this write operation.
  *
@@ -95,6 +98,7 @@ struct vm_fault;
  * range it covers needs to be remapped by the high level before the operation
  * can proceed.
  */
+#define IOMAP_F_FOLIO_BATCH    (1U << 13)
 #define IOMAP_F_SIZE_CHANGED   (1U << 14)
 #define IOMAP_F_STALE          (1U << 15)
 
@@ -352,8 +356,8 @@ bool iomap_dirty_folio(struct address_space *mapping, struct folio *folio);
 int iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
                const struct iomap_ops *ops,
                const struct iomap_write_ops *write_ops);
-loff_t iomap_fill_dirty_folios(struct iomap_iter *iter, loff_t offset,
-               loff_t length);
+unsigned int iomap_fill_dirty_folios(struct iomap_iter *iter, loff_t *start,
+               loff_t end, unsigned int *iomap_flags);
 int iomap_zero_range(struct inode *inode, loff_t pos, loff_t len,
                bool *did_zero, const struct iomap_ops *ops,
                const struct iomap_write_ops *write_ops, void *private);