struct folio *folio = fi.folio;
if (!f2fs_is_compressed_page(folio) &&
- !fsverity_verify_page(vi, &folio->page)) {
- !fsverity_verify_page(&folio->page)) {
++ !fsverity_verify_folio(vi, folio)) {
bio->bi_status = BLK_STS_IOERR;
break;
}
if (is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN) ||
!f2fs_is_checkpoint_ready(sbi))
__submit_merged_bio(io);
- f2fs_up_write(&io->io_rwsem);
+ f2fs_up_write_trace(&io->io_rwsem, &lc);
}
-static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
+static struct bio *f2fs_grab_read_bio(struct inode *inode,
+ struct fsverity_info *vi, block_t blkaddr,
unsigned nr_pages, blk_opf_t op_flag,
pgoff_t first_idx, bool for_write)
{
}
#endif
- folio_zero_range(folio, offset << PAGE_SHIFT, PAGE_SIZE);
- if (f2fs_need_verity(inode, index) &&
- !fsverity_verify_page(folio_file_page(folio,
- index))) {
+ static struct f2fs_folio_state *ffs_find_or_alloc(struct folio *folio)
+ {
+ struct f2fs_folio_state *ffs = folio->private;
+
+ if (ffs)
+ return ffs;
+
+ ffs = f2fs_kmem_cache_alloc(ffs_entry_slab,
+ GFP_NOIO | __GFP_ZERO, true, NULL);
+
+ spin_lock_init(&ffs->state_lock);
+ folio_attach_private(folio, ffs);
+ return ffs;
+ }
+
+ static void ffs_detach_free(struct folio *folio)
+ {
+ struct f2fs_folio_state *ffs;
+
+ if (!folio_test_large(folio)) {
+ folio_detach_private(folio);
+ return;
+ }
+
+ ffs = folio_detach_private(folio);
+ if (!ffs)
+ return;
+
+ WARN_ON_ONCE(ffs->read_pages_pending != 0);
+ kmem_cache_free(ffs_entry_slab, ffs);
+ }
+
+ static int f2fs_read_data_large_folio(struct inode *inode,
++ struct fsverity_info *vi,
+ struct readahead_control *rac, struct folio *folio)
+ {
+ struct bio *bio = NULL;
+ sector_t last_block_in_bio = 0;
+ struct f2fs_map_blocks map = {0, };
+ pgoff_t index, offset, next_pgofs = 0;
+ unsigned max_nr_pages = rac ? readahead_count(rac) :
+ folio_nr_pages(folio);
+ unsigned nrpages;
+ struct f2fs_folio_state *ffs;
+ int ret = 0;
+ bool folio_in_bio;
+
+ if (!IS_IMMUTABLE(inode) || f2fs_compressed_file(inode)) {
+ if (folio)
+ folio_unlock(folio);
+ return -EOPNOTSUPP;
+ }
+
+ map.m_seg_type = NO_CHECK_TYPE;
+
+ if (rac)
+ folio = readahead_folio(rac);
+ next_folio:
+ if (!folio)
+ goto out;
+
+ folio_in_bio = false;
+ index = folio->index;
+ offset = 0;
+ ffs = NULL;
+ nrpages = folio_nr_pages(folio);
+
+ for (; nrpages; nrpages--, max_nr_pages--, index++, offset++) {
+ sector_t block_nr;
+ /*
+ * Map blocks using the previous result first.
+ */
+ if (map.m_flags & F2FS_MAP_MAPPED) {
+ if (index > map.m_lblk &&
+ index < (map.m_lblk + map.m_len))
+ goto got_it;
+ } else if (index < next_pgofs) {
+ /* hole case */
+ goto got_it;
+ }
+
+ /*
+ * Then do more f2fs_map_blocks() calls until we are
+ * done with this page.
+ */
+ memset(&map, 0, sizeof(map));
+ map.m_next_pgofs = &next_pgofs;
+ map.m_seg_type = NO_CHECK_TYPE;
+ map.m_lblk = index;
+ map.m_len = max_nr_pages;
+
+ ret = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_DEFAULT);
+ if (ret)
+ goto err_out;
+ got_it:
+ if ((map.m_flags & F2FS_MAP_MAPPED)) {
+ block_nr = map.m_pblk + index - map.m_lblk;
+ if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), block_nr,
+ DATA_GENERIC_ENHANCE_READ)) {
+ ret = -EFSCORRUPTED;
+ goto err_out;
+ }
+ } else {
- bio = f2fs_grab_read_bio(inode, block_nr,
- max_nr_pages,
++ size_t page_offset = offset << PAGE_SHIFT;
++ folio_zero_range(folio, page_offset, PAGE_SIZE);
++ if (vi && !fsverity_verify_blocks(vi, folio, PAGE_SIZE, page_offset)) {
+ ret = -EIO;
+ goto err_out;
+ }
+ continue;
+ }
+
+ /* We must increment read_pages_pending before possible BIOs submitting
+ * to prevent from premature folio_end_read() call on folio
+ */
+ if (folio_test_large(folio)) {
+ ffs = ffs_find_or_alloc(folio);
+
+ /* set the bitmap to wait */
+ spin_lock_irq(&ffs->state_lock);
+ ffs->read_pages_pending++;
+ spin_unlock_irq(&ffs->state_lock);
+ }
+
+ /*
+ * This page will go to BIO. Do we need to send this
+ * BIO off first?
+ */
+ if (bio && (!page_is_mergeable(F2FS_I_SB(inode), bio,
+ last_block_in_bio, block_nr) ||
+ !f2fs_crypt_mergeable_bio(bio, inode, index, NULL))) {
+ submit_and_realloc:
+ f2fs_submit_read_bio(F2FS_I_SB(inode), bio, DATA);
+ bio = NULL;
+ }
+ if (bio == NULL)
++ bio = f2fs_grab_read_bio(inode, vi,
++ block_nr, max_nr_pages,
+ f2fs_ra_op_flags(rac),
+ index, false);
+
+ /*
+ * If the page is under writeback, we need to wait for
+ * its completion to see the correct decrypted data.
+ */
+ f2fs_wait_on_block_writeback(inode, block_nr);
+
+ if (!bio_add_folio(bio, folio, F2FS_BLKSIZE,
+ offset << PAGE_SHIFT))
+ goto submit_and_realloc;
+
+ folio_in_bio = true;
+ inc_page_count(F2FS_I_SB(inode), F2FS_RD_DATA);
+ f2fs_update_iostat(F2FS_I_SB(inode), NULL, FS_DATA_READ_IO,
+ F2FS_BLKSIZE);
+ last_block_in_bio = block_nr;
+ }
+ trace_f2fs_read_folio(folio, DATA);
+ err_out:
+ if (!folio_in_bio) {
+ folio_end_read(folio, !ret);
+ if (ret)
+ return ret;
+ }
+ if (rac) {
+ folio = readahead_folio(rac);
+ goto next_folio;
+ }
+ out:
+ f2fs_submit_read_bio(F2FS_I_SB(inode), bio, DATA);
+ if (ret) {
+ /* Wait bios and clear uptodate. */
+ folio_lock(folio);
+ folio_clear_uptodate(folio);
+ folio_unlock(folio);
+ }
+ return ret;
+ }
+
/*
* This function was originally taken from fs/mpage.c, and customized for f2fs.
* Major change was from block_size == page_size in f2fs by default.
unsigned max_nr_pages = nr_pages;
int ret = 0;
- return f2fs_read_data_large_folio(inode, rac, folio);
+ if (mapping_large_folio_support(mapping))
++ return f2fs_read_data_large_folio(inode, vi, rac, folio);
+
#ifdef CONFIG_F2FS_FS_COMPRESSION
if (f2fs_compressed_file(inode)) {
index = rac ? readahead_index(rac) : folio->index;