ext4 and f2fs are largely using the same code to read a page full
of Merkle tree blocks from the page cache, and the upcoming xfs
fsverity support would add another copy.
Move the ext4 code to fs/verity/ and use it in f2fs as well. For f2fs
this removes the previous f2fs-specific error injection, but otherwise
the behavior remains unchanged.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Andrey Albershteyn <aalbersh@redhat.com>
Reviewed-by: Jan Kara <jack@suse.cz>
Reviewed-by: "Darrick J. Wong" <djwong@kernel.org>
Link: https://lore.kernel.org/r/20260128152630.627409-7-hch@lst.de
Signed-off-by: Eric Biggers <ebiggers@kernel.org>
pgoff_t index,
unsigned long num_ra_pages)
{
- struct folio *folio;
-
index += ext4_verity_metadata_pos(inode) >> PAGE_SHIFT;
-
- folio = __filemap_get_folio(inode->i_mapping, index, FGP_ACCESSED, 0);
- if (IS_ERR(folio) || !folio_test_uptodate(folio)) {
- DEFINE_READAHEAD(ractl, NULL, NULL, inode->i_mapping, index);
-
- if (!IS_ERR(folio))
- folio_put(folio);
- else if (num_ra_pages > 1)
- page_cache_ra_unbounded(&ractl, num_ra_pages, 0);
- folio = read_mapping_folio(inode->i_mapping, index, NULL);
- if (IS_ERR(folio))
- return ERR_CAST(folio);
- }
- return folio_file_page(folio, index);
+ return generic_read_merkle_tree_page(inode, index, num_ra_pages);
}
static int ext4_write_merkle_tree_block(struct file *file, const void *buf,
pgoff_t index,
unsigned long num_ra_pages)
{
- struct folio *folio;
-
index += f2fs_verity_metadata_pos(inode) >> PAGE_SHIFT;
-
- folio = f2fs_filemap_get_folio(inode->i_mapping, index, FGP_ACCESSED, 0);
- if (IS_ERR(folio) || !folio_test_uptodate(folio)) {
- DEFINE_READAHEAD(ractl, NULL, NULL, inode->i_mapping, index);
-
- if (!IS_ERR(folio))
- folio_put(folio);
- else if (num_ra_pages > 1)
- page_cache_ra_unbounded(&ractl, num_ra_pages, 0);
- folio = read_mapping_folio(inode->i_mapping, index, NULL);
- if (IS_ERR(folio))
- return ERR_CAST(folio);
- }
- return folio_file_page(folio, index);
+ return generic_read_merkle_tree_page(inode, index, num_ra_pages);
}
static int f2fs_write_merkle_tree_block(struct file *file, const void *buf,
init.o \
measure.o \
open.o \
+ pagecache.o \
read_metadata.o \
verify.o
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 Google LLC
+ */
+
+#include <linux/fsverity.h>
+#include <linux/pagemap.h>
+
+/**
+ * generic_read_merkle_tree_page - generic ->read_merkle_tree_page helper
+ * @inode: inode containing the Merkle tree
+ * @index: 0-based index of the Merkle tree page in the inode
+ * @num_ra_pages: The number of Merkle tree pages that should be prefetched.
+ *
+ * The caller needs to adjust @index from the Merkle-tree relative index passed
+ * to ->read_merkle_tree_page to the actual index where the Merkle tree is
+ * stored in the page cache for @inode.
+ */
+struct page *generic_read_merkle_tree_page(struct inode *inode, pgoff_t index,
+ unsigned long num_ra_pages)
+{
+ struct folio *folio;
+
+ folio = __filemap_get_folio(inode->i_mapping, index, FGP_ACCESSED, 0);
+ if (IS_ERR(folio) || !folio_test_uptodate(folio)) {
+ DEFINE_READAHEAD(ractl, NULL, NULL, inode->i_mapping, index);
+
+ if (!IS_ERR(folio))
+ folio_put(folio);
+ else if (num_ra_pages > 1)
+ page_cache_ra_unbounded(&ractl, num_ra_pages, 0);
+ folio = read_mapping_folio(inode->i_mapping, index, NULL);
+ if (IS_ERR(folio))
+ return ERR_CAST(folio);
+ }
+ return folio_file_page(folio, index);
+}
+EXPORT_SYMBOL_GPL(generic_read_merkle_tree_page);
void fsverity_cleanup_inode(struct inode *inode);
+struct page *generic_read_merkle_tree_page(struct inode *inode, pgoff_t index,
+ unsigned long num_ra_pages);
+
#endif /* _LINUX_FSVERITY_H */