]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
mm: return the address from page_mapped_in_vma()
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Fri, 12 Apr 2024 19:35:00 +0000 (20:35 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 6 May 2024 00:53:45 +0000 (17:53 -0700)
The only user of this function calls page_address_in_vma() immediately
after page_mapped_in_vma() calculates it and uses it to return true/false.
Return the address instead, allowing memory-failure to skip the call to
page_address_in_vma().

Link: https://lkml.kernel.org/r/20240412193510.2356957-4-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Acked-by: Miaohe Lin <linmiaohe@huawei.com>
Reviewed-by: Jane Chu <jane.chu@oracle.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Oscar Salvador <osalvador@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/rmap.h
mm/memory-failure.c
mm/page_vma_mapped.c

index 0f906dc6d28002ee285cc834f19dffa2ad5a6020..7229b9baf20d87e6d5ebb710a6ee20f9e6c2ad80 100644 (file)
@@ -730,7 +730,7 @@ int pfn_mkclean_range(unsigned long pfn, unsigned long nr_pages, pgoff_t pgoff,
 
 void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked);
 
-int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
+unsigned long page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
 
 /*
  * rmap_walk_control: To control rmap traversing for specific needs
index 9e1a7d8ca745494e036c0e5892342c9608a21435..12e5d2844cb15190d93f0a74d80079659558ac9a 100644 (file)
@@ -473,10 +473,11 @@ static void __add_to_kill(struct task_struct *tsk, struct page *p,
 }
 
 static void add_to_kill_anon_file(struct task_struct *tsk, struct page *p,
-                                 struct vm_area_struct *vma,
-                                 struct list_head *to_kill)
+               struct vm_area_struct *vma, struct list_head *to_kill,
+               unsigned long addr)
 {
-       unsigned long addr = page_address_in_vma(p, vma);
+       if (addr == -EFAULT)
+               return;
        __add_to_kill(tsk, p, vma, to_kill, addr);
 }
 
@@ -601,7 +602,6 @@ struct task_struct *task_early_kill(struct task_struct *tsk, int force_early)
 static void collect_procs_anon(struct folio *folio, struct page *page,
                struct list_head *to_kill, int force_early)
 {
-       struct vm_area_struct *vma;
        struct task_struct *tsk;
        struct anon_vma *av;
        pgoff_t pgoff;
@@ -613,8 +613,10 @@ static void collect_procs_anon(struct folio *folio, struct page *page,
        pgoff = page_to_pgoff(page);
        rcu_read_lock();
        for_each_process(tsk) {
+               struct vm_area_struct *vma;
                struct anon_vma_chain *vmac;
                struct task_struct *t = task_early_kill(tsk, force_early);
+               unsigned long addr;
 
                if (!t)
                        continue;
@@ -623,9 +625,8 @@ static void collect_procs_anon(struct folio *folio, struct page *page,
                        vma = vmac->vma;
                        if (vma->vm_mm != t->mm)
                                continue;
-                       if (!page_mapped_in_vma(page, vma))
-                               continue;
-                       add_to_kill_anon_file(t, page, vma, to_kill);
+                       addr = page_mapped_in_vma(page, vma);
+                       add_to_kill_anon_file(t, page, vma, to_kill, addr);
                }
        }
        rcu_read_unlock();
@@ -648,6 +649,7 @@ static void collect_procs_file(struct folio *folio, struct page *page,
        pgoff = page_to_pgoff(page);
        for_each_process(tsk) {
                struct task_struct *t = task_early_kill(tsk, force_early);
+               unsigned long addr;
 
                if (!t)
                        continue;
@@ -660,8 +662,10 @@ static void collect_procs_file(struct folio *folio, struct page *page,
                         * Assume applications who requested early kill want
                         * to be informed of all such data corruptions.
                         */
-                       if (vma->vm_mm == t->mm)
-                               add_to_kill_anon_file(t, page, vma, to_kill);
+                       if (vma->vm_mm != t->mm)
+                               continue;
+                       addr = page_address_in_vma(page, vma);
+                       add_to_kill_anon_file(t, page, vma, to_kill, addr);
                }
        }
        rcu_read_unlock();
index 53b8868ede61c609510c538c5d7b70a68c9a68da..c202eab84936d8ca281b23210b021a29ae33efcc 100644 (file)
@@ -319,11 +319,12 @@ next_pte:
  * @page: the page to test
  * @vma: the VMA to test
  *
- * Returns 1 if the page is mapped into the page tables of the VMA, 0
- * if the page is not mapped into the page tables of this VMA.  Only
- * valid for normal file or anonymous VMAs.
+ * Return: The address the page is mapped at if the page is in the range
+ * covered by the VMA and present in the page table.  If the page is
+ * outside the VMA or not present, returns -EFAULT.
+ * Only valid for normal file or anonymous VMAs.
  */
-int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
+unsigned long page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
 {
        struct folio *folio = page_folio(page);
        pgoff_t pgoff = folio->index + folio_page_idx(folio, page);
@@ -336,9 +337,10 @@ int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
 
        pvmw.address = vma_address(vma, pgoff, 1);
        if (pvmw.address == -EFAULT)
-               return 0;
+               goto out;
        if (!page_vma_mapped_walk(&pvmw))
-               return 0;
+               return -EFAULT;
        page_vma_mapped_walk_done(&pvmw);
-       return 1;
+out:
+       return pvmw.address;
 }