]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
secretmem: remove uses of struct page
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Fri, 13 Jun 2025 19:47:43 +0000 (20:47 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Thu, 10 Jul 2025 05:42:10 +0000 (22:42 -0700)
Use filemap_lock_folio() instead of find_lock_page() to retrieve
a folio from the page cache.

[lorenzo.stoakes@oracle.com: fix check of filemap_lock_folio() return value]
Link: https://lkml.kernel.org/r/fdbca1d0-01a3-4653-85ed-cf257bb848be@lucifer.local
Link: https://lkml.kernel.org/r/20250613194744.3175157-1-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Reviewed-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
Acked-by: David Hildenbrand <david@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/secretmem.c

index 9a11a38a677084f08398caf8de3279ca1f617dec..62ae71907fe43382987fb0ada6beac2e3b005108 100644 (file)
@@ -54,7 +54,6 @@ static vm_fault_t secretmem_fault(struct vm_fault *vmf)
        pgoff_t offset = vmf->pgoff;
        gfp_t gfp = vmf->gfp_mask;
        unsigned long addr;
-       struct page *page;
        struct folio *folio;
        vm_fault_t ret;
        int err;
@@ -65,16 +64,15 @@ static vm_fault_t secretmem_fault(struct vm_fault *vmf)
        filemap_invalidate_lock_shared(mapping);
 
 retry:
-       page = find_lock_page(mapping, offset);
-       if (!page) {
+       folio = filemap_lock_folio(mapping, offset);
+       if (IS_ERR(folio)) {
                folio = folio_alloc(gfp | __GFP_ZERO, 0);
                if (!folio) {
                        ret = VM_FAULT_OOM;
                        goto out;
                }
 
-               page = &folio->page;
-               err = set_direct_map_invalid_noflush(page);
+               err = set_direct_map_invalid_noflush(folio_page(folio, 0));
                if (err) {
                        folio_put(folio);
                        ret = vmf_error(err);
@@ -90,7 +88,7 @@ retry:
                         * already happened when we marked the page invalid
                         * which guarantees that this call won't fail
                         */
-                       set_direct_map_default_noflush(page);
+                       set_direct_map_default_noflush(folio_page(folio, 0));
                        if (err == -EEXIST)
                                goto retry;
 
@@ -98,11 +96,11 @@ retry:
                        goto out;
                }
 
-               addr = (unsigned long)page_address(page);
+               addr = (unsigned long)folio_address(folio);
                flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
        }
 
-       vmf->page = page;
+       vmf->page = folio_file_page(folio, vmf->pgoff);
        ret = VM_FAULT_LOCKED;
 
 out:
@@ -154,7 +152,7 @@ static int secretmem_migrate_folio(struct address_space *mapping,
 
 static void secretmem_free_folio(struct folio *folio)
 {
-       set_direct_map_default_noflush(&folio->page);
+       set_direct_map_default_noflush(folio_page(folio, 0));
        folio_zero_segment(folio, 0, folio_size(folio));
 }