]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
iomap: move folio_unlock out of iomap_writeback_folio
authorJoanne Koong <joannelkoong@gmail.com>
Thu, 10 Jul 2025 13:33:33 +0000 (15:33 +0200)
committerChristian Brauner <brauner@kernel.org>
Mon, 14 Jul 2025 08:51:32 +0000 (10:51 +0200)
Move unlocking the folio out of iomap_writeback_folio into the caller.
This means the end writeback machinery is now run with the folio locked
when no writeback happened, or writeback completed extremely fast.

Note that having the folio locked over the call to folio_end_writeback in
iomap_writeback_folio means that the dropbehind handling there will never
run because the trylock fails.  The only way this can happen is if the
writepage either never wrote back any dirty data at all, in which case
the dropbehind handling isn't needed, or if all writeback finished
instantly, which is rather unlikely.  Even in the latter case the
dropbehind handling is an optional optimization so skipping it will not
cause correctness issues.

This prepares for exporting iomap_writeback_folio for use in folio
laundering.

Signed-off-by: Joanne Koong <joannelkoong@gmail.com>
[hch: split from a larger patch]
Signed-off-by: Christoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/20250710133343.399917-10-hch@lst.de
Reviewed-by: Darrick J. Wong <djwong@kernel.org>
Signed-off-by: Christian Brauner <brauner@kernel.org>
fs/iomap/buffered-io.c

index e6e4c2d1b3995b82a60f0084377a7eb295ad354b..ca45a6d1cb684177197cfd7b34873ca5e5421d9f 100644 (file)
@@ -1652,10 +1652,8 @@ static int iomap_writeback_folio(struct iomap_writepage_ctx *wpc,
 
        trace_iomap_writeback_folio(inode, pos, folio_size(folio));
 
-       if (!iomap_writeback_handle_eof(folio, inode, &end_pos)) {
-               folio_unlock(folio);
+       if (!iomap_writeback_handle_eof(folio, inode, &end_pos))
                return 0;
-       }
        WARN_ON_ONCE(end_pos <= pos);
 
        if (i_blocks_per_folio(inode, folio) > 1) {
@@ -1709,7 +1707,6 @@ static int iomap_writeback_folio(struct iomap_writepage_ctx *wpc,
         * already at this point.  In that case we need to clear the writeback
         * bit ourselves right after unlocking the page.
         */
-       folio_unlock(folio);
        if (ifs) {
                if (atomic_dec_and_test(&ifs->write_bytes_pending))
                        folio_end_writeback(folio);
@@ -1736,8 +1733,10 @@ iomap_writepages(struct iomap_writepage_ctx *wpc)
                        PF_MEMALLOC))
                return -EIO;
 
-       while ((folio = writeback_iter(mapping, wpc->wbc, folio, &error)))
+       while ((folio = writeback_iter(mapping, wpc->wbc, folio, &error))) {
                error = iomap_writeback_folio(wpc, folio);
+               folio_unlock(folio);
+       }
 
        /*
         * If @error is non-zero, it means that we have a situation where some