]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
btrfs: remove folio parameter from ordered io related functions
authorQu Wenruo <wqu@suse.com>
Thu, 12 Feb 2026 09:13:56 +0000 (19:43 +1030)
committerDavid Sterba <dsterba@suse.com>
Tue, 7 Apr 2026 16:55:57 +0000 (18:55 +0200)
Both functions btrfs_finish_ordered_extent() and
btrfs_mark_ordered_io_finished() are accepting an optional folio
parameter.

That @folio is passed into can_finish_ordered_extent(), which later will
test and clear the ordered flag for the involved range.

However I do not think there is any other call site that can clear
ordered flags of an page cache folio and can affect
can_finish_ordered_extent().

There are limited *_clear_ordered() callers out of
can_finish_ordered_extent() function:

- btrfs_migrate_folio()
  This is completely unrelated, it's just migrating the ordered flag to
  the new folio.

- btrfs_cleanup_ordered_extents()
  We manually clean the ordered flags of all involved folios, then call
  btrfs_mark_ordered_io_finished() without a @folio parameter.
  So it doesn't need and didn't pass a @folio parameter in the first
  place.

- btrfs_writepage_fixup_worker()
  This function is going to be removed soon, and we should not hit that
  function anymore.

- btrfs_invalidate_folio()
  This is the real call site we need to bother with.

  If we already have a bio running, btrfs_finish_ordered_extent() in
  end_bbio_data_write() will be executed first, as
  btrfs_invalidate_folio() will wait for the writeback to finish.

  Thus if there is a running bio, it will not see the range has
  ordered flags, and just skip to the next range.

  If there is no bio running, meaning the ordered extent is created but
  the folio is not yet submitted.

  In that case btrfs_invalidate_folio() will manually clear the folio
  ordered range, but then manually finish the ordered extent with
  btrfs_dec_test_ordered_pending() without bothering the folio ordered
  flags.

  Meaning if the OE range with folio ordered flags will be finished
  manually without the need to call can_finish_ordered_extent().

This means all can_finish_ordered_extent() call sites should get a range
that has folio ordered flag set, thus the old "return false" branch
should never be triggered.

Now we can:

- Remove the @folio parameter from involved functions
  * btrfs_mark_ordered_io_finished()
  * btrfs_finish_ordered_extent()

  For call sites passing a @folio into those functions, let them
  manually clear the ordered flag of involved folios.

- Move btrfs_finish_ordered_extent() out of the loop in
  end_bbio_data_write()

  We only need to call btrfs_finish_ordered_extent() once per bbio,
  not per folio.

- Add an ASSERT() to make sure all folio ranges have ordered flags
  It's only for end_bbio_data_write().

  And we already have enough safe nets to catch over-accounting of ordered
  extents.

Reviewed-by: Filipe Manana <fdmanana@suse.com>
Signed-off-by: Qu Wenruo <wqu@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
fs/btrfs/compression.c
fs/btrfs/direct-io.c
fs/btrfs/extent_io.c
fs/btrfs/inode.c
fs/btrfs/ordered-data.c
fs/btrfs/ordered-data.h

index 85199944c1ebe5d7430674ff59b24bf7f2a85cdc..3a5701f68172e61c5b689f807b4244c1d1e185ce 100644 (file)
@@ -292,7 +292,7 @@ static void end_bbio_compressed_write(struct btrfs_bio *bbio)
        struct compressed_bio *cb = to_compressed_bio(bbio);
        struct folio_iter fi;
 
-       btrfs_finish_ordered_extent(cb->bbio.ordered, NULL, cb->start, cb->len,
+       btrfs_finish_ordered_extent(cb->bbio.ordered, cb->start, cb->len,
                                    cb->bbio.bio.bi_status == BLK_STS_OK);
 
        if (cb->writeback)
index 9a63200d7a53d9cc67ea2d1c351bbf3e1e842c4a..837306254f73681328d0522514253d239ce6a6ff 100644 (file)
@@ -625,7 +625,7 @@ static int btrfs_dio_iomap_end(struct inode *inode, loff_t pos, loff_t length,
                pos += submitted;
                length -= submitted;
                if (write)
-                       btrfs_finish_ordered_extent(dio_data->ordered, NULL,
+                       btrfs_finish_ordered_extent(dio_data->ordered,
                                                    pos, length, false);
                else
                        btrfs_unlock_dio_extent(&BTRFS_I(inode)->io_tree, pos,
@@ -657,9 +657,8 @@ static void btrfs_dio_end_io(struct btrfs_bio *bbio)
        }
 
        if (btrfs_op(bio) == BTRFS_MAP_WRITE) {
-               btrfs_finish_ordered_extent(bbio->ordered, NULL,
-                                           dip->file_offset, dip->bytes,
-                                           !bio->bi_status);
+               btrfs_finish_ordered_extent(bbio->ordered, dip->file_offset,
+                                           dip->bytes, !bio->bi_status);
        } else {
                btrfs_unlock_dio_extent(&inode->io_tree, dip->file_offset,
                                        dip->file_offset + dip->bytes - 1, NULL);
@@ -735,7 +734,7 @@ static void btrfs_dio_submit_io(const struct iomap_iter *iter, struct bio *bio,
 
                ret = btrfs_extract_ordered_extent(bbio, dio_data->ordered);
                if (ret) {
-                       btrfs_finish_ordered_extent(dio_data->ordered, NULL,
+                       btrfs_finish_ordered_extent(dio_data->ordered,
                                                    file_offset, dip->bytes,
                                                    !ret);
                        bio->bi_status = errno_to_blk_status(ret);
index cfafe05aa6f7046655fdf8e5e1b20fabc4a7d47a..33b1afbee0a6630fd3abda4bfaefef86746451f7 100644 (file)
@@ -521,6 +521,7 @@ static void end_bbio_data_write(struct btrfs_bio *bbio)
        int error = blk_status_to_errno(bio->bi_status);
        struct folio_iter fi;
        const u32 sectorsize = fs_info->sectorsize;
+       u32 bio_size = 0;
 
        ASSERT(!bio_flagged(bio, BIO_CLONED));
        bio_for_each_folio_all(fi, bio) {
@@ -528,6 +529,7 @@ static void end_bbio_data_write(struct btrfs_bio *bbio)
                u64 start = folio_pos(folio) + fi.offset;
                u32 len = fi.length;
 
+               bio_size += len;
                /* Our read/write should always be sector aligned. */
                if (!IS_ALIGNED(fi.offset, sectorsize))
                        btrfs_err(fs_info,
@@ -538,13 +540,15 @@ static void end_bbio_data_write(struct btrfs_bio *bbio)
                "incomplete page write with offset %zu and length %zu",
                                   fi.offset, fi.length);
 
-               btrfs_finish_ordered_extent(bbio->ordered, folio, start, len,
-                                           !error);
                if (error)
                        mapping_set_error(folio->mapping, error);
+
+               ASSERT(btrfs_folio_test_ordered(fs_info, folio, start, len));
+               btrfs_folio_clear_ordered(fs_info, folio, start, len);
                btrfs_folio_clear_writeback(fs_info, folio, start, len);
        }
 
+       btrfs_finish_ordered_extent(bbio->ordered, bbio->file_offset, bio_size, !error);
        bio_put(bio);
 }
 
@@ -1587,7 +1591,8 @@ static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
                        u64 start = page_start + (start_bit << fs_info->sectorsize_bits);
                        u32 len = (end_bit - start_bit) << fs_info->sectorsize_bits;
 
-                       btrfs_mark_ordered_io_finished(inode, folio, start, len, false);
+                       btrfs_folio_clear_ordered(fs_info, folio, start, len);
+                       btrfs_mark_ordered_io_finished(inode, start, len, false);
                }
                return ret;
        }
@@ -1663,6 +1668,7 @@ static int submit_one_sector(struct btrfs_inode *inode,
                 * ordered extent.
                 */
                btrfs_folio_clear_dirty(fs_info, folio, filepos, sectorsize);
+               btrfs_folio_clear_ordered(fs_info, folio, filepos, sectorsize);
                btrfs_folio_set_writeback(fs_info, folio, filepos, sectorsize);
                btrfs_folio_clear_writeback(fs_info, folio, filepos, sectorsize);
 
@@ -1670,8 +1676,8 @@ static int submit_one_sector(struct btrfs_inode *inode,
                 * Since there is no bio submitted to finish the ordered
                 * extent, we have to manually finish this sector.
                 */
-               btrfs_mark_ordered_io_finished(inode, folio, filepos,
-                                              fs_info->sectorsize, false);
+               btrfs_mark_ordered_io_finished(inode, filepos, fs_info->sectorsize,
+                                              false);
                return PTR_ERR(em);
        }
 
@@ -1783,8 +1789,8 @@ static noinline_for_stack int extent_writepage_io(struct btrfs_inode *inode,
                        spin_unlock(&inode->ordered_tree_lock);
                        btrfs_put_ordered_extent(ordered);
 
-                       btrfs_mark_ordered_io_finished(inode, folio, cur,
-                                                      fs_info->sectorsize, true);
+                       btrfs_folio_clear_ordered(fs_info, folio, cur, fs_info->sectorsize);
+                       btrfs_mark_ordered_io_finished(inode, cur, fs_info->sectorsize, true);
                        /*
                         * This range is beyond i_size, thus we don't need to
                         * bother writing back.
@@ -2633,8 +2639,7 @@ void extent_write_locked_range(struct inode *inode, const struct folio *locked_f
                if (IS_ERR(folio)) {
                        cur_end = min(round_down(cur, PAGE_SIZE) + PAGE_SIZE - 1, end);
                        cur_len = cur_end + 1 - cur;
-                       btrfs_mark_ordered_io_finished(BTRFS_I(inode), NULL,
-                                                      cur, cur_len, false);
+                       btrfs_mark_ordered_io_finished(BTRFS_I(inode), cur, cur_len, false);
                        mapping_set_error(mapping, PTR_ERR(folio));
                        cur = cur_end;
                        continue;
index 70b56fcaaccca6f8badb2c074334c9be3e29d28c..6daa7bb027fc98107a629d60b0a20a15597caccd 100644 (file)
@@ -424,7 +424,7 @@ static inline void btrfs_cleanup_ordered_extents(struct btrfs_inode *inode,
                folio_put(folio);
        }
 
-       return btrfs_mark_ordered_io_finished(inode, NULL, offset, bytes, false);
+       return btrfs_mark_ordered_io_finished(inode, offset, bytes, false);
 }
 
 static int btrfs_dirty_inode(struct btrfs_inode *inode);
@@ -2959,7 +2959,9 @@ out_page:
                 * to reflect the errors and clean the page.
                 */
                mapping_set_error(folio->mapping, ret);
-               btrfs_mark_ordered_io_finished(inode, folio, page_start,
+               btrfs_folio_clear_ordered(fs_info, folio, page_start,
+                                         folio_size(folio));
+               btrfs_mark_ordered_io_finished(inode, page_start,
                                               folio_size(folio), !ret);
                folio_clear_dirty_for_io(folio);
        }
index e47c3a3a619abbbc3bef4a99d4e3ca9527d3665c..8405d07b49cdf15d9158bf564387f2004afd71c0 100644 (file)
@@ -348,30 +348,13 @@ static void finish_ordered_fn(struct btrfs_work *work)
 }
 
 static bool can_finish_ordered_extent(struct btrfs_ordered_extent *ordered,
-                                     struct folio *folio, u64 file_offset,
-                                     u64 len, bool uptodate)
+                                     u64 file_offset, u64 len, bool uptodate)
 {
        struct btrfs_inode *inode = ordered->inode;
        struct btrfs_fs_info *fs_info = inode->root->fs_info;
 
        lockdep_assert_held(&inode->ordered_tree_lock);
 
-       if (folio) {
-               ASSERT(folio->mapping);
-               ASSERT(folio_pos(folio) <= file_offset);
-               ASSERT(file_offset + len <= folio_next_pos(folio));
-
-               /*
-                * Ordered flag indicates whether we still have
-                * pending io unfinished for the ordered extent.
-                *
-                * If it's not set, we need to skip to next range.
-                */
-               if (!btrfs_folio_test_ordered(fs_info, folio, file_offset, len))
-                       return false;
-               btrfs_folio_clear_ordered(fs_info, folio, file_offset, len);
-       }
-
        /* Now we're fine to update the accounting. */
        if (WARN_ON_ONCE(len > ordered->bytes_left)) {
                btrfs_crit(fs_info,
@@ -413,8 +396,7 @@ static void btrfs_queue_ordered_fn(struct btrfs_ordered_extent *ordered)
 }
 
 void btrfs_finish_ordered_extent(struct btrfs_ordered_extent *ordered,
-                                struct folio *folio, u64 file_offset, u64 len,
-                                bool uptodate)
+                                u64 file_offset, u64 len, bool uptodate)
 {
        struct btrfs_inode *inode = ordered->inode;
        bool ret;
@@ -422,7 +404,7 @@ void btrfs_finish_ordered_extent(struct btrfs_ordered_extent *ordered,
        trace_btrfs_finish_ordered_extent(inode, file_offset, len, uptodate);
 
        spin_lock(&inode->ordered_tree_lock);
-       ret = can_finish_ordered_extent(ordered, folio, file_offset, len,
+       ret = can_finish_ordered_extent(ordered, file_offset, len,
                                        uptodate);
        spin_unlock(&inode->ordered_tree_lock);
 
@@ -475,8 +457,7 @@ void btrfs_finish_ordered_extent(struct btrfs_ordered_extent *ordered,
  * extent(s) covering it.
  */
 void btrfs_mark_ordered_io_finished(struct btrfs_inode *inode,
-                                   struct folio *folio, u64 file_offset,
-                                   u64 num_bytes, bool uptodate)
+                                   u64 file_offset, u64 num_bytes, bool uptodate)
 {
        struct rb_node *node;
        struct btrfs_ordered_extent *entry = NULL;
@@ -536,7 +517,7 @@ void btrfs_mark_ordered_io_finished(struct btrfs_inode *inode,
                len = this_end - cur;
                ASSERT(len < U32_MAX);
 
-               if (can_finish_ordered_extent(entry, folio, cur, len, uptodate)) {
+               if (can_finish_ordered_extent(entry, cur, len, uptodate)) {
                        spin_unlock(&inode->ordered_tree_lock);
                        btrfs_queue_ordered_fn(entry);
                        spin_lock(&inode->ordered_tree_lock);
index 86e69de9e9ff50ad770348255ddde75e5dbbc58b..cd74c5ecfd67876c74cd103110260cb9da30f72a 100644 (file)
@@ -163,11 +163,9 @@ int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent);
 void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry);
 void btrfs_remove_ordered_extent(struct btrfs_ordered_extent *entry);
 void btrfs_finish_ordered_extent(struct btrfs_ordered_extent *ordered,
-                                struct folio *folio, u64 file_offset, u64 len,
-                                bool uptodate);
+                                u64 file_offset, u64 len, bool uptodate);
 void btrfs_mark_ordered_io_finished(struct btrfs_inode *inode,
-                                   struct folio *folio, u64 file_offset,
-                                   u64 num_bytes, bool uptodate);
+                                   u64 file_offset, u64 num_bytes, bool uptodate);
 bool btrfs_dec_test_ordered_pending(struct btrfs_inode *inode,
                                    struct btrfs_ordered_extent **cached,
                                    u64 file_offset, u64 io_size);