From: Matthew Wilcox (Oracle) Date: Fri, 13 Jun 2025 19:47:43 +0000 (+0100) Subject: secretmem: remove uses of struct page X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=32925ee63beb7e1a3fad25e2f54cefa2d32612de;p=thirdparty%2Fkernel%2Flinux.git secretmem: remove uses of struct page Use filemap_lock_folio() instead of find_lock_page() to retrieve a folio from the page cache. [lorenzo.stoakes@oracle.com: fix check of filemap_lock_folio() return value] Link: https://lkml.kernel.org/r/fdbca1d0-01a3-4653-85ed-cf257bb848be@lucifer.local Link: https://lkml.kernel.org/r/20250613194744.3175157-1-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Lorenzo Stoakes Reviewed-by: Mike Rapoport (Microsoft) Acked-by: David Hildenbrand Signed-off-by: Andrew Morton --- diff --git a/mm/secretmem.c b/mm/secretmem.c index 9a11a38a67708..62ae71907fe43 100644 --- a/mm/secretmem.c +++ b/mm/secretmem.c @@ -54,7 +54,6 @@ static vm_fault_t secretmem_fault(struct vm_fault *vmf) pgoff_t offset = vmf->pgoff; gfp_t gfp = vmf->gfp_mask; unsigned long addr; - struct page *page; struct folio *folio; vm_fault_t ret; int err; @@ -65,16 +64,15 @@ static vm_fault_t secretmem_fault(struct vm_fault *vmf) filemap_invalidate_lock_shared(mapping); retry: - page = find_lock_page(mapping, offset); - if (!page) { + folio = filemap_lock_folio(mapping, offset); + if (IS_ERR(folio)) { folio = folio_alloc(gfp | __GFP_ZERO, 0); if (!folio) { ret = VM_FAULT_OOM; goto out; } - page = &folio->page; - err = set_direct_map_invalid_noflush(page); + err = set_direct_map_invalid_noflush(folio_page(folio, 0)); if (err) { folio_put(folio); ret = vmf_error(err); @@ -90,7 +88,7 @@ retry: * already happened when we marked the page invalid * which guarantees that this call won't fail */ - set_direct_map_default_noflush(page); + set_direct_map_default_noflush(folio_page(folio, 0)); if (err == -EEXIST) goto retry; @@ -98,11 +96,11 @@ retry: goto out; } - addr = (unsigned long)page_address(page); + addr = (unsigned long)folio_address(folio); flush_tlb_kernel_range(addr, addr + PAGE_SIZE); } - vmf->page = page; + vmf->page = folio_file_page(folio, vmf->pgoff); ret = VM_FAULT_LOCKED; out: @@ -154,7 +152,7 @@ static int secretmem_migrate_folio(struct address_space *mapping, static void secretmem_free_folio(struct folio *folio) { - set_direct_map_default_noflush(&folio->page); + set_direct_map_default_noflush(folio_page(folio, 0)); folio_zero_segment(folio, 0, folio_size(folio)); }