end, page_ops);
}
-static bool btrfs_verify_folio(struct folio *folio, u64 start, u32 len)
+static bool btrfs_verify_folio(struct fsverity_info *vi, struct folio *folio,
+ u64 start, u32 len)
{
struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
- if (!fsverity_active(folio->mapping->host) ||
- btrfs_folio_test_uptodate(fs_info, folio, start, len) ||
- start >= i_size_read(folio->mapping->host))
+ if (!vi || btrfs_folio_test_uptodate(fs_info, folio, start, len))
return true;
- return fsverity_verify_folio(*fsverity_info_addr(folio->mapping->host),
- folio);
+ return fsverity_verify_folio(vi, folio);
}
-static void end_folio_read(struct folio *folio, bool uptodate, u64 start, u32 len)
+static void end_folio_read(struct fsverity_info *vi, struct folio *folio,
+ bool uptodate, u64 start, u32 len)
{
struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
ASSERT(folio_pos(folio) <= start &&
start + len <= folio_next_pos(folio));
- if (uptodate && btrfs_verify_folio(folio, start, len))
+ if (uptodate && btrfs_verify_folio(vi, folio, start, len))
btrfs_folio_set_uptodate(fs_info, folio, start, len);
else
btrfs_folio_clear_uptodate(fs_info, folio, start, len);
static void end_bbio_data_read(struct btrfs_bio *bbio)
{
struct btrfs_fs_info *fs_info = bbio->inode->root->fs_info;
+ struct inode *inode = &bbio->inode->vfs_inode;
struct bio *bio = &bbio->bio;
+ struct fsverity_info *vi = NULL;
struct folio_iter fi;
ASSERT(!bio_flagged(bio, BIO_CLONED));
+
+ if (bbio->file_offset < i_size_read(inode))
+ vi = fsverity_get_info(inode);
+
bio_for_each_folio_all(fi, &bbio->bio) {
bool uptodate = !bio->bi_status;
struct folio *folio = fi.folio;
- struct inode *inode = folio->mapping->host;
u64 start = folio_pos(folio) + fi.offset;
btrfs_debug(fs_info,
}
/* Update page status and unlock. */
- end_folio_read(folio, uptodate, start, fi.length);
+ end_folio_read(vi, folio, uptodate, start, fi.length);
}
bio_put(bio);
}
* return 0 on success, otherwise return error
*/
static int btrfs_do_readpage(struct folio *folio, struct extent_map **em_cached,
- struct btrfs_bio_ctrl *bio_ctrl)
+ struct btrfs_bio_ctrl *bio_ctrl,
+ struct fsverity_info *vi)
{
struct inode *inode = folio->mapping->host;
struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
ASSERT(IS_ALIGNED(cur, fs_info->sectorsize));
if (cur >= last_byte) {
folio_zero_range(folio, pg_offset, end - cur + 1);
- end_folio_read(folio, true, cur, end - cur + 1);
+ end_folio_read(vi, folio, true, cur, end - cur + 1);
break;
}
if (btrfs_folio_test_uptodate(fs_info, folio, cur, blocksize)) {
- end_folio_read(folio, true, cur, blocksize);
+ end_folio_read(vi, folio, true, cur, blocksize);
continue;
}
em = get_extent_map(BTRFS_I(inode), folio, cur, end - cur + 1, em_cached);
if (IS_ERR(em)) {
- end_folio_read(folio, false, cur, end + 1 - cur);
+ end_folio_read(vi, folio, false, cur, end + 1 - cur);
return PTR_ERR(em);
}
extent_offset = cur - em->start;
/* we've found a hole, just zero and go on */
if (block_start == EXTENT_MAP_HOLE) {
folio_zero_range(folio, pg_offset, blocksize);
- end_folio_read(folio, true, cur, blocksize);
+ end_folio_read(vi, folio, true, cur, blocksize);
continue;
}
/* the get_extent function already copied into the folio */
if (block_start == EXTENT_MAP_INLINE) {
- end_folio_read(folio, true, cur, blocksize);
+ end_folio_read(vi, folio, true, cur, blocksize);
continue;
}
int btrfs_read_folio(struct file *file, struct folio *folio)
{
- struct btrfs_inode *inode = folio_to_inode(folio);
+ struct inode *vfs_inode = folio->mapping->host;
+ struct btrfs_inode *inode = BTRFS_I(vfs_inode);
const u64 start = folio_pos(folio);
const u64 end = start + folio_size(folio) - 1;
struct extent_state *cached_state = NULL;
.last_em_start = U64_MAX,
};
struct extent_map *em_cached = NULL;
+ struct fsverity_info *vi = NULL;
int ret;
lock_extents_for_read(inode, start, end, &cached_state);
- ret = btrfs_do_readpage(folio, &em_cached, &bio_ctrl);
+ if (folio_pos(folio) < i_size_read(vfs_inode))
+ vi = fsverity_get_info(vfs_inode);
+ ret = btrfs_do_readpage(folio, &em_cached, &bio_ctrl, vi);
btrfs_unlock_extent(&inode->io_tree, start, end, &cached_state);
btrfs_free_extent_map(em_cached);
.last_em_start = U64_MAX,
};
struct folio *folio;
- struct btrfs_inode *inode = BTRFS_I(rac->mapping->host);
+ struct inode *vfs_inode = rac->mapping->host;
+ struct btrfs_inode *inode = BTRFS_I(vfs_inode);
const u64 start = readahead_pos(rac);
const u64 end = start + readahead_length(rac) - 1;
struct extent_state *cached_state = NULL;
struct extent_map *em_cached = NULL;
+ struct fsverity_info *vi = NULL;
lock_extents_for_read(inode, start, end, &cached_state);
-
+ if (start < i_size_read(vfs_inode))
+ vi = fsverity_get_info(vfs_inode);
while ((folio = readahead_folio(rac)) != NULL)
- btrfs_do_readpage(folio, &em_cached, &bio_ctrl);
+ btrfs_do_readpage(folio, &em_cached, &bio_ctrl, vi);
btrfs_unlock_extent(&inode->io_tree, start, end, &cached_state);