From: Josef Bacik Date: Wed, 24 Jul 2024 20:56:32 +0000 (-0400) Subject: btrfs: convert btrfs_run_delalloc_range() to take a folio X-Git-Tag: v6.12-rc1~207^2~73 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=2609c9289f423e3d2d4044ddace4a0fb1939e9c3;p=thirdparty%2Flinux.git btrfs: convert btrfs_run_delalloc_range() to take a folio Now that every function that btrfs_run_delalloc_range calls takes a folio, update it to take a folio and update the callers. Signed-off-by: Josef Bacik Reviewed-by: David Sterba Signed-off-by: David Sterba --- diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h index 3056c8aed8ef4..5599b458a9a98 100644 --- a/fs/btrfs/btrfs_inode.h +++ b/fs/btrfs/btrfs_inode.h @@ -596,7 +596,7 @@ int btrfs_prealloc_file_range_trans(struct inode *inode, struct btrfs_trans_handle *trans, int mode, u64 start, u64 num_bytes, u64 min_size, loff_t actual_len, u64 *alloc_hint); -int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct page *locked_page, +int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct folio *locked_folio, u64 start, u64 end, struct writeback_control *wbc); int btrfs_writepage_cow_fixup(struct page *page); int btrfs_encoded_io_compression_from_extent(struct btrfs_fs_info *fs_info, diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 9ae17c9fd89b2..5ff38e3f28e6b 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -1254,7 +1254,7 @@ static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode, if (ret >= 0) { /* No errors hit so far, run the current delalloc range. */ - ret = btrfs_run_delalloc_range(inode, &folio->page, + ret = btrfs_run_delalloc_range(inode, folio, found_start, found_start + found_len - 1, wbc); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 3ee2c863adeed..e189dc9b6a3b0 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -2287,42 +2287,40 @@ static bool should_nocow(struct btrfs_inode *inode, u64 start, u64 end) * Function to process delayed allocation (create CoW) for ranges which are * being touched for the first time. */ -int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct page *locked_page, +int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct folio *locked_folio, u64 start, u64 end, struct writeback_control *wbc) { const bool zoned = btrfs_is_zoned(inode->root->fs_info); int ret; /* - * The range must cover part of the @locked_page, or a return of 1 + * The range must cover part of the @locked_folio, or a return of 1 * can confuse the caller. */ - ASSERT(!(end <= page_offset(locked_page) || - start >= page_offset(locked_page) + PAGE_SIZE)); + ASSERT(!(end <= folio_pos(locked_folio) || + start >= folio_pos(locked_folio) + folio_size(locked_folio))); if (should_nocow(inode, start, end)) { - ret = run_delalloc_nocow(inode, page_folio(locked_page), start, - end); + ret = run_delalloc_nocow(inode, locked_folio, start, end); goto out; } if (btrfs_inode_can_compress(inode) && inode_need_compress(inode, start, end) && - run_delalloc_compressed(inode, page_folio(locked_page), start, end, - wbc)) + run_delalloc_compressed(inode, locked_folio, start, end, wbc)) return 1; if (zoned) - ret = run_delalloc_cow(inode, page_folio(locked_page), start, - end, wbc, true); + ret = run_delalloc_cow(inode, locked_folio, start, end, wbc, + true); else - ret = cow_file_range(inode, page_folio(locked_page), start, end, - NULL, false, false); + ret = cow_file_range(inode, locked_folio, start, end, NULL, + false, false); out: if (ret < 0) - btrfs_cleanup_ordered_extents(inode, page_folio(locked_page), - start, end - start + 1); + btrfs_cleanup_ordered_extents(inode, locked_folio, start, + end - start + 1); return ret; }