pgoff_t offset = vmf->pgoff;
gfp_t gfp = vmf->gfp_mask;
unsigned long addr;
- struct page *page;
struct folio *folio;
vm_fault_t ret;
int err;
filemap_invalidate_lock_shared(mapping);
retry:
- page = find_lock_page(mapping, offset);
- if (!page) {
+ folio = filemap_lock_folio(mapping, offset);
+ if (IS_ERR(folio)) {
folio = folio_alloc(gfp | __GFP_ZERO, 0);
if (!folio) {
ret = VM_FAULT_OOM;
goto out;
}
- page = &folio->page;
- err = set_direct_map_invalid_noflush(page);
+ err = set_direct_map_invalid_noflush(folio_page(folio, 0));
if (err) {
folio_put(folio);
ret = vmf_error(err);
* already happened when we marked the page invalid
* which guarantees that this call won't fail
*/
- set_direct_map_default_noflush(page);
+ set_direct_map_default_noflush(folio_page(folio, 0));
if (err == -EEXIST)
goto retry;
goto out;
}
- addr = (unsigned long)page_address(page);
+ addr = (unsigned long)folio_address(folio);
flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
}
- vmf->page = page;
+ vmf->page = folio_file_page(folio, vmf->pgoff);
ret = VM_FAULT_LOCKED;
out:
static void secretmem_free_folio(struct folio *folio)
{
- set_direct_map_default_noflush(&folio->page);
+ set_direct_map_default_noflush(folio_page(folio, 0));
folio_zero_segment(folio, 0, folio_size(folio));
}