struct bio_post_read_ctx {
struct bio *bio;
struct f2fs_sb_info *sbi;
+ struct fsverity_info *vi;
struct work_struct work;
unsigned int enabled_steps;
/*
container_of(work, struct bio_post_read_ctx, work);
struct bio *bio = ctx->bio;
bool may_have_compressed_pages = (ctx->enabled_steps & STEP_DECOMPRESS);
+ struct fsverity_info *vi = ctx->vi;
/*
* fsverity_verify_bio() may call readahead() again, and while verity
bio_for_each_folio_all(fi, bio) {
struct folio *folio = fi.folio;
- struct fsverity_info *vi =
- *fsverity_info_addr(folio->mapping->host);
if (!f2fs_is_compressed_page(folio) &&
!fsverity_verify_page(vi, &folio->page)) {
}
}
} else {
- struct inode *inode = bio_first_folio_all(bio)->mapping->host;
-
- fsverity_verify_bio(*fsverity_info_addr(inode), bio);
+ fsverity_verify_bio(vi, bio);
}
f2fs_finish_read_bio(bio, true);
f2fs_up_write(&io->io_rwsem);
}
-static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
+static struct bio *f2fs_grab_read_bio(struct inode *inode,
+ struct fsverity_info *vi, block_t blkaddr,
unsigned nr_pages, blk_opf_t op_flag,
pgoff_t first_idx, bool for_write)
{
if (fscrypt_inode_uses_fs_layer_crypto(inode))
post_read_steps |= STEP_DECRYPT;
- if (f2fs_need_verity(inode, first_idx))
+ if (vi)
post_read_steps |= STEP_VERITY;
/*
ctx = mempool_alloc(bio_post_read_ctx_pool, GFP_NOFS);
ctx->bio = bio;
ctx->sbi = sbi;
+ ctx->vi = vi;
ctx->enabled_steps = post_read_steps;
ctx->fs_blkaddr = blkaddr;
ctx->decompression_attempted = false;
}
/* This can handle encryption stuffs */
-static void f2fs_submit_page_read(struct inode *inode, struct folio *folio,
- block_t blkaddr, blk_opf_t op_flags,
- bool for_write)
+static void f2fs_submit_page_read(struct inode *inode, struct fsverity_info *vi,
+ struct folio *folio, block_t blkaddr,
+ blk_opf_t op_flags, bool for_write)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct bio *bio;
- bio = f2fs_grab_read_bio(inode, blkaddr, 1, op_flags,
- folio->index, for_write);
+ bio = f2fs_grab_read_bio(inode, vi, blkaddr, 1, op_flags, folio->index,
+ for_write);
/* wait for GCed page writeback via META_MAPPING */
f2fs_wait_on_block_writeback(inode, blkaddr);
return err;
}
+static inline struct fsverity_info *f2fs_need_verity(const struct inode *inode,
+ pgoff_t idx)
+{
+ if (idx < DIV_ROUND_UP(inode->i_size, PAGE_SIZE))
+ return fsverity_get_info(inode);
+ return NULL;
+}
+
struct folio *f2fs_get_read_data_folio(struct inode *inode, pgoff_t index,
blk_opf_t op_flags, bool for_write, pgoff_t *next_pgofs)
{
return folio;
}
- f2fs_submit_page_read(inode, folio, dn.data_blkaddr,
- op_flags, for_write);
+ f2fs_submit_page_read(inode, f2fs_need_verity(inode, folio->index),
+ folio, dn.data_blkaddr, op_flags, for_write);
return folio;
put_err:
return rac ? REQ_RAHEAD : 0;
}
-static int f2fs_read_single_page(struct inode *inode, struct folio *folio,
- unsigned nr_pages,
- struct f2fs_map_blocks *map,
- struct bio **bio_ret,
- sector_t *last_block_in_bio,
- struct readahead_control *rac)
+static int f2fs_read_single_page(struct inode *inode, struct fsverity_info *vi,
+ struct folio *folio, unsigned int nr_pages,
+ struct f2fs_map_blocks *map,
+ struct bio **bio_ret,
+ sector_t *last_block_in_bio,
+ struct readahead_control *rac)
{
struct bio *bio = *bio_ret;
const unsigned int blocksize = F2FS_BLKSIZE;
} else {
zero_out:
folio_zero_segment(folio, 0, folio_size(folio));
- if (f2fs_need_verity(inode, index) &&
- !fsverity_verify_folio(
- *fsverity_info_addr(folio->mapping->host),
- folio)) {
+ if (vi && !fsverity_verify_folio(vi, folio)) {
ret = -EIO;
goto out;
}
bio = NULL;
}
if (bio == NULL)
- bio = f2fs_grab_read_bio(inode, block_nr, nr_pages,
- f2fs_ra_op_flags(rac), index,
- false);
+ bio = f2fs_grab_read_bio(inode, vi, block_nr, nr_pages,
+ f2fs_ra_op_flags(rac), index, false);
/*
* If the page is under writeback, we need to wait for
}
if (!bio)
- bio = f2fs_grab_read_bio(inode, blkaddr, nr_pages - i,
- f2fs_ra_op_flags(rac),
- folio->index, for_write);
+ bio = f2fs_grab_read_bio(inode, cc->vi, blkaddr,
+ nr_pages - i,
+ f2fs_ra_op_flags(rac),
+ folio->index, for_write);
if (!bio_add_folio(bio, folio, blocksize, 0))
goto submit_and_realloc;
* This function was originally taken from fs/mpage.c, and customized for f2fs.
* Major change was from block_size == page_size in f2fs by default.
*/
-static int f2fs_mpage_readpages(struct inode *inode,
+static int f2fs_mpage_readpages(struct inode *inode, struct fsverity_info *vi,
struct readahead_control *rac, struct folio *folio)
{
struct bio *bio = NULL;
/* there are remained compressed pages, submit them */
if (!f2fs_cluster_can_merge_page(&cc, index)) {
+ cc.vi = vi;
ret = f2fs_read_multi_pages(&cc, &bio,
max_nr_pages,
&last_block_in_bio,
read_single_page:
#endif
- ret = f2fs_read_single_page(inode, folio, max_nr_pages, &map,
- &bio, &last_block_in_bio, rac);
+ ret = f2fs_read_single_page(inode, vi, folio, max_nr_pages,
+ &map, &bio, &last_block_in_bio,
+ rac);
if (ret) {
#ifdef CONFIG_F2FS_FS_COMPRESSION
set_error_page:
if (f2fs_compressed_file(inode)) {
/* last page */
if (nr_pages == 1 && !f2fs_cluster_is_empty(&cc)) {
+ cc.vi = vi;
ret = f2fs_read_multi_pages(&cc, &bio,
max_nr_pages,
&last_block_in_bio,
static int f2fs_read_data_folio(struct file *file, struct folio *folio)
{
struct inode *inode = folio->mapping->host;
+ struct fsverity_info *vi = NULL;
int ret;
trace_f2fs_readpage(folio, DATA);
return ret;
}
- if (f2fs_need_verity(inode, folio->index))
- fsverity_readahead(*fsverity_info_addr(inode), folio->index,
- folio_nr_pages(folio));
- return f2fs_mpage_readpages(inode, NULL, folio);
+ vi = f2fs_need_verity(inode, folio->index);
+ if (vi)
+ fsverity_readahead(vi, folio->index, folio_nr_pages(folio));
+ return f2fs_mpage_readpages(inode, vi, NULL, folio);
}
static void f2fs_readahead(struct readahead_control *rac)
{
struct inode *inode = rac->mapping->host;
+ struct fsverity_info *vi = NULL;
trace_f2fs_readpages(inode, readahead_index(rac), readahead_count(rac));
if (f2fs_has_inline_data(inode))
return;
- if (f2fs_need_verity(inode, readahead_index(rac)))
- fsverity_readahead(*fsverity_info_addr(inode),
- readahead_index(rac), readahead_count(rac));
- f2fs_mpage_readpages(inode, rac, NULL);
+ vi = f2fs_need_verity(inode, readahead_index(rac));
+ if (vi)
+ fsverity_readahead(vi, readahead_index(rac),
+ readahead_count(rac));
+ f2fs_mpage_readpages(inode, vi, rac, NULL);
}
int f2fs_encrypt_one_page(struct f2fs_io_info *fio)
err = -EFSCORRUPTED;
goto put_folio;
}
- f2fs_submit_page_read(use_cow ?
- F2FS_I(inode)->cow_inode : inode,
- folio, blkaddr, 0, true);
+ f2fs_submit_page_read(use_cow ? F2FS_I(inode)->cow_inode :
+ inode,
+ NULL, /* can't write to fsverity files */
+ folio, blkaddr, 0, true);
folio_lock(folio);
if (unlikely(folio->mapping != mapping)) {