struct postprocess_bh_ctx {
struct work_struct work;
struct buffer_head *bh;
+ struct fsverity_info *vi;
};
static void verify_bh(struct work_struct *work)
struct postprocess_bh_ctx *ctx =
container_of(work, struct postprocess_bh_ctx, work);
struct buffer_head *bh = ctx->bh;
- struct inode *inode = bh->b_folio->mapping->host;
bool valid;
- valid = fsverity_verify_blocks(*fsverity_info_addr(inode), bh->b_folio,
- bh->b_size, bh_offset(bh));
+ valid = fsverity_verify_blocks(ctx->vi, bh->b_folio, bh->b_size,
+ bh_offset(bh));
end_buffer_async_read(bh, valid);
kfree(ctx);
}
-static bool need_fsverity(struct buffer_head *bh)
-{
- struct folio *folio = bh->b_folio;
- struct inode *inode = folio->mapping->host;
-
- return fsverity_active(inode) &&
- /* needed by ext4 */
- folio->index < DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
-}
-
static void decrypt_bh(struct work_struct *work)
{
struct postprocess_bh_ctx *ctx =
err = fscrypt_decrypt_pagecache_blocks(bh->b_folio, bh->b_size,
bh_offset(bh));
- if (err == 0 && need_fsverity(bh)) {
+ if (err == 0 && ctx->vi) {
/*
* We use different work queues for decryption and for verity
* because verity may require reading metadata pages that need
{
struct inode *inode = bh->b_folio->mapping->host;
bool decrypt = fscrypt_inode_uses_fs_layer_crypto(inode);
- bool verify = need_fsverity(bh);
+ struct fsverity_info *vi = NULL;
+
+ /* needed by ext4 */
+ if (bh->b_folio->index < DIV_ROUND_UP(inode->i_size, PAGE_SIZE))
+ vi = fsverity_get_info(inode);
/* Decrypt (with fscrypt) and/or verify (with fsverity) if needed. */
- if (uptodate && (decrypt || verify)) {
+ if (uptodate && (decrypt || vi)) {
struct postprocess_bh_ctx *ctx =
kmalloc(sizeof(*ctx), GFP_ATOMIC);
if (ctx) {
ctx->bh = bh;
+ ctx->vi = vi;
if (decrypt) {
INIT_WORK(&ctx->work, decrypt_bh);
fscrypt_enqueue_decrypt_work(&ctx->work);