u32 iosize;
if (cur >= i_size) {
- btrfs_mark_ordered_io_finished(inode, &folio->page, cur,
- len, true);
+ btrfs_mark_ordered_io_finished(inode, folio, cur, len,
+ true);
/*
* This range is beyond i_size, thus we don't need to
* bother writing back.
folio_end_writeback(folio);
}
if (ret) {
- btrfs_mark_ordered_io_finished(BTRFS_I(inode), &folio->page,
+ btrfs_mark_ordered_io_finished(BTRFS_I(inode), folio,
page_start, PAGE_SIZE, !ret);
mapping_set_error(folio->mapping, ret);
}
btrfs_folio_clear_writeback(fs_info, folio, cur, cur_len);
}
if (ret) {
- btrfs_mark_ordered_io_finished(BTRFS_I(inode), &folio->page,
+ btrfs_mark_ordered_io_finished(BTRFS_I(inode), folio,
cur, cur_len, !ret);
mapping_set_error(mapping, ret);
}
set_page_writeback(locked_page);
end_page_writeback(locked_page);
- btrfs_mark_ordered_io_finished(inode, locked_page,
+ btrfs_mark_ordered_io_finished(inode,
+ page_folio(locked_page),
page_start, PAGE_SIZE,
!ret);
mapping_set_error(locked_page->mapping, ret);
* to reflect the errors and clean the page.
*/
mapping_set_error(page->mapping, ret);
- btrfs_mark_ordered_io_finished(inode, page, page_start,
- PAGE_SIZE, !ret);
+ btrfs_mark_ordered_io_finished(inode, page_folio(page),
+ page_start, PAGE_SIZE, !ret);
clear_page_dirty_for_io(page);
}
btrfs_folio_clear_checked(fs_info, page_folio(page), page_start, PAGE_SIZE);
/*
* Mark all ordered extents io inside the specified range finished.
*
- * @page: The involved page for the operation.
- * For uncompressed buffered IO, the page status also needs to be
+ * @folio: The involved folio for the operation.
+ * For uncompressed buffered IO, the folio status also needs to be
* updated to indicate whether the pending ordered io is finished.
* Can be NULL for direct IO and compressed write.
* For these cases, callers are ensured they won't execute the
* extent(s) covering it.
*/
void btrfs_mark_ordered_io_finished(struct btrfs_inode *inode,
- struct page *page, u64 file_offset,
+ struct folio *folio, u64 file_offset,
u64 num_bytes, bool uptodate)
{
struct rb_node *node;
ASSERT(end + 1 - cur < U32_MAX);
len = end + 1 - cur;
- if (can_finish_ordered_extent(entry, page_folio(page), cur, len,
- uptodate)) {
+ if (can_finish_ordered_extent(entry, folio, cur, len, uptodate)) {
spin_unlock_irqrestore(&inode->ordered_tree_lock, flags);
btrfs_queue_ordered_fn(entry);
spin_lock_irqsave(&inode->ordered_tree_lock, flags);
struct folio *folio, u64 file_offset, u64 len,
bool uptodate);
void btrfs_mark_ordered_io_finished(struct btrfs_inode *inode,
- struct page *page, u64 file_offset,
- u64 num_bytes, bool uptodate);
+ struct folio *folio, u64 file_offset,
+ u64 num_bytes, bool uptodate);
bool btrfs_dec_test_ordered_pending(struct btrfs_inode *inode,
struct btrfs_ordered_extent **cached,
u64 file_offset, u64 io_size);