btrfs_folio_test_uptodate(fs_info, folio, start, len) ||
start >= i_size_read(folio->mapping->host))
return true;
- return fsverity_verify_folio(folio);
+ return fsverity_verify_folio(*fsverity_info_addr(folio->mapping->host),
+ folio);
}
static void end_folio_read(struct folio *folio, bool uptodate, u64 start, u32 len)
struct postprocess_bh_ctx *ctx =
container_of(work, struct postprocess_bh_ctx, work);
struct buffer_head *bh = ctx->bh;
+ struct inode *inode = bh->b_folio->mapping->host;
bool valid;
- valid = fsverity_verify_blocks(bh->b_folio, bh->b_size, bh_offset(bh));
+ valid = fsverity_verify_blocks(*fsverity_info_addr(inode), bh->b_folio,
+ bh->b_size, bh_offset(bh));
end_buffer_async_read(bh, valid);
kfree(ctx);
}
struct bio_post_read_ctx *ctx =
container_of(work, struct bio_post_read_ctx, work);
struct bio *bio = ctx->bio;
+ struct inode *inode = bio_first_folio_all(bio)->mapping->host;
/*
* fsverity_verify_bio() may call readahead() again, and although verity
mempool_free(ctx, bio_post_read_ctx_pool);
bio->bi_private = NULL;
- fsverity_verify_bio(bio);
+ fsverity_verify_bio(*fsverity_info_addr(inode), bio);
__read_end_io(bio);
}
folio_size(folio));
if (first_hole == 0) {
if (ext4_need_verity(inode, folio->index) &&
- !fsverity_verify_folio(folio))
+ !fsverity_verify_folio(
+ *fsverity_info_addr(inode),
+ folio))
goto set_error_page;
folio_end_read(folio, true);
continue;
}
if (ext4_need_verity(inode, folio->index))
- fsverity_readahead(inode, folio->index, folio_nr_pages(folio));
+ fsverity_readahead(*fsverity_info_addr(inode), folio->index,
+ folio_nr_pages(folio));
return ext4_mpage_readpages(inode, NULL, folio);
}
return;
if (ext4_need_verity(inode, readahead_index(rac)))
- fsverity_readahead(inode, readahead_index(rac),
- readahead_count(rac));
+ fsverity_readahead(*fsverity_info_addr(inode),
+ readahead_index(rac), readahead_count(rac));
ext4_mpage_readpages(inode, rac, NULL);
}
if (!rpage)
continue;
- if (fsverity_verify_page(rpage))
+ if (fsverity_verify_page(
+ *fsverity_info_addr(rpage->mapping->host),
+ rpage))
SetPageUptodate(rpage);
else
ClearPageUptodate(rpage);
bio_for_each_folio_all(fi, bio) {
struct folio *folio = fi.folio;
+ struct fsverity_info *vi =
+ *fsverity_info_addr(folio->mapping->host);
if (!f2fs_is_compressed_page(folio) &&
- !fsverity_verify_page(&folio->page)) {
+ !fsverity_verify_page(vi, &folio->page)) {
bio->bi_status = BLK_STS_IOERR;
break;
}
}
} else {
- fsverity_verify_bio(bio);
+ struct inode *inode = bio_first_folio_all(bio)->mapping->host;
+
+ fsverity_verify_bio(*fsverity_info_addr(inode), bio);
}
f2fs_finish_read_bio(bio, true);
zero_out:
folio_zero_segment(folio, 0, folio_size(folio));
if (f2fs_need_verity(inode, index) &&
- !fsverity_verify_folio(folio)) {
+ !fsverity_verify_folio(
+ *fsverity_info_addr(folio->mapping->host),
+ folio)) {
ret = -EIO;
goto out;
}
}
if (f2fs_need_verity(inode, folio->index))
- fsverity_readahead(inode, folio->index, folio_nr_pages(folio));
+ fsverity_readahead(*fsverity_info_addr(inode), folio->index,
+ folio_nr_pages(folio));
return f2fs_mpage_readpages(inode, NULL, folio);
}
return;
if (f2fs_need_verity(inode, readahead_index(rac)))
- fsverity_readahead(inode, readahead_index(rac),
- readahead_count(rac));
+ fsverity_readahead(*fsverity_info_addr(inode),
+ readahead_index(rac), readahead_count(rac));
f2fs_mpage_readpages(inode, rac, NULL);
}
/**
* fsverity_readahead() - kick off readahead on fsverity hashes
- * @inode: inode that is being read
+ * @vi: fsverity_info for the inode to be read
* @index: first file data page index that is being read
* @nr_pages: number of file data pages to be read
*
* ensure that the hashes are already cached on completion of the file data
* read if possible.
*/
-void fsverity_readahead(struct inode *inode, pgoff_t index,
+void fsverity_readahead(struct fsverity_info *vi, pgoff_t index,
unsigned long nr_pages)
{
- const struct fsverity_info *vi = *fsverity_info_addr(inode);
+ struct inode *inode = vi->inode;
const struct merkle_tree_params *params = &vi->tree_params;
u64 start_hidx = (u64)index << params->log_blocks_per_page;
u64 end_hidx =
static void
fsverity_init_verification_context(struct fsverity_verification_context *ctx,
- struct inode *inode)
+ struct fsverity_info *vi)
{
- struct fsverity_info *vi = *fsverity_info_addr(inode);
-
- ctx->inode = inode;
+ ctx->inode = vi->inode;
ctx->vi = vi;
ctx->num_pending = 0;
if (vi->tree_params.hash_alg->algo_id == HASH_ALGO_SHA256 &&
/**
* fsverity_verify_blocks() - verify data in a folio
+ * @vi: fsverity_info for the inode to be read
* @folio: the folio containing the data to verify
* @len: the length of the data to verify in the folio
* @offset: the offset of the data to verify in the folio
*
* Return: %true if the data is valid, else %false.
*/
-bool fsverity_verify_blocks(struct folio *folio, size_t len, size_t offset)
+bool fsverity_verify_blocks(struct fsverity_info *vi, struct folio *folio,
+ size_t len, size_t offset)
{
struct fsverity_verification_context ctx;
- fsverity_init_verification_context(&ctx, folio->mapping->host);
+ fsverity_init_verification_context(&ctx, vi);
if (fsverity_add_data_blocks(&ctx, folio, len, offset) &&
fsverity_verify_pending_blocks(&ctx))
#ifdef CONFIG_BLOCK
/**
* fsverity_verify_bio() - verify a 'read' bio that has just completed
+ * @vi: fsverity_info for the inode to be read
* @bio: the bio to verify
*
* Verify the bio's data against the file's Merkle tree. All bio data segments
* filesystems) must instead call fsverity_verify_page() directly on each page.
* All filesystems must also call fsverity_verify_page() on holes.
*/
-void fsverity_verify_bio(struct bio *bio)
+void fsverity_verify_bio(struct fsverity_info *vi, struct bio *bio)
{
- struct inode *inode = bio_first_folio_all(bio)->mapping->host;
struct fsverity_verification_context ctx;
struct folio_iter fi;
- fsverity_init_verification_context(&ctx, inode);
+ fsverity_init_verification_context(&ctx, vi);
bio_for_each_folio_all(fi, bio) {
if (!fsverity_add_data_blocks(&ctx, fi.folio, fi.length,
/* verify.c */
-bool fsverity_verify_blocks(struct folio *folio, size_t len, size_t offset);
-void fsverity_verify_bio(struct bio *bio);
+bool fsverity_verify_blocks(struct fsverity_info *vi, struct folio *folio,
+ size_t len, size_t offset);
+void fsverity_verify_bio(struct fsverity_info *vi, struct bio *bio);
void fsverity_enqueue_verify_work(struct work_struct *work);
#else /* !CONFIG_FS_VERITY */
+/*
+ * Provide a stub to allow code using this to compile. All callsites should be
+ * guarded by compiler dead code elimination, and this forces a link error if
+ * not.
+ */
+struct fsverity_info **fsverity_info_addr(const struct inode *inode);
+
static inline struct fsverity_info *fsverity_get_info(const struct inode *inode)
{
return NULL;
/* verify.c */
-static inline bool fsverity_verify_blocks(struct folio *folio, size_t len,
+static inline bool fsverity_verify_blocks(struct fsverity_info *vi,
+ struct folio *folio, size_t len,
size_t offset)
{
WARN_ON_ONCE(1);
return false;
}
-static inline void fsverity_verify_bio(struct bio *bio)
+static inline void fsverity_verify_bio(struct fsverity_info *vi,
+ struct bio *bio)
{
WARN_ON_ONCE(1);
}
#endif /* !CONFIG_FS_VERITY */
-static inline bool fsverity_verify_folio(struct folio *folio)
+static inline bool fsverity_verify_folio(struct fsverity_info *vi,
+ struct folio *folio)
{
- return fsverity_verify_blocks(folio, folio_size(folio), 0);
+ return fsverity_verify_blocks(vi, folio, folio_size(folio), 0);
}
-static inline bool fsverity_verify_page(struct page *page)
+static inline bool fsverity_verify_page(struct fsverity_info *vi,
+ struct page *page)
{
- return fsverity_verify_blocks(page_folio(page), PAGE_SIZE, 0);
+ return fsverity_verify_blocks(vi, page_folio(page), PAGE_SIZE, 0);
}
/**
}
void fsverity_cleanup_inode(struct inode *inode);
-void fsverity_readahead(struct inode *inode, pgoff_t index,
+void fsverity_readahead(struct fsverity_info *vi, pgoff_t index,
unsigned long nr_pages);
struct page *generic_read_merkle_tree_page(struct inode *inode, pgoff_t index);