]> git.ipfire.org Git - people/arne_f/kernel.git/blobdiff - mm/rmap.c
mm, rmap: check all VMAs that PTE-mapped THP can be part of
[people/arne_f/kernel.git] / mm / rmap.c
index 91619fd709399a428a5a65fff6367d14cbef3db3..c4bad599cc7bdf08a22b8869b522c64c0383f662 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -886,45 +886,48 @@ struct page_referenced_arg {
 static int page_referenced_one(struct page *page, struct vm_area_struct *vma,
                        unsigned long address, void *arg)
 {
-       struct mm_struct *mm = vma->vm_mm;
        struct page_referenced_arg *pra = arg;
-       pmd_t *pmd;
-       pte_t *pte;
-       spinlock_t *ptl;
+       struct page_vma_mapped_walk pvmw = {
+               .page = page,
+               .vma = vma,
+               .address = address,
+       };
        int referenced = 0;
 
-       if (!page_check_address_transhuge(page, mm, address, &pmd, &pte, &ptl))
-               return SWAP_AGAIN;
+       while (page_vma_mapped_walk(&pvmw)) {
+               address = pvmw.address;
 
-       if (vma->vm_flags & VM_LOCKED) {
-               if (pte)
-                       pte_unmap(pte);
-               spin_unlock(ptl);
-               pra->vm_flags |= VM_LOCKED;
-               return SWAP_FAIL; /* To break the loop */
-       }
+               if (vma->vm_flags & VM_LOCKED) {
+                       page_vma_mapped_walk_done(&pvmw);
+                       pra->vm_flags |= VM_LOCKED;
+                       return SWAP_FAIL; /* To break the loop */
+               }
 
-       if (pte) {
-               if (ptep_clear_flush_young_notify(vma, address, pte)) {
-                       /*
-                        * Don't treat a reference through a sequentially read
-                        * mapping as such.  If the page has been used in
-                        * another mapping, we will catch it; if this other
-                        * mapping is already gone, the unmap path will have
-                        * set PG_referenced or activated the page.
-                        */
-                       if (likely(!(vma->vm_flags & VM_SEQ_READ)))
+               if (pvmw.pte) {
+                       if (ptep_clear_flush_young_notify(vma, address,
+                                               pvmw.pte)) {
+                               /*
+                                * Don't treat a reference through
+                                * a sequentially read mapping as such.
+                                * If the page has been used in another mapping,
+                                * we will catch it; if this other mapping is
+                                * already gone, the unmap path will have set
+                                * PG_referenced or activated the page.
+                                */
+                               if (likely(!(vma->vm_flags & VM_SEQ_READ)))
+                                       referenced++;
+                       }
+               } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
+                       if (pmdp_clear_flush_young_notify(vma, address,
+                                               pvmw.pmd))
                                referenced++;
+               } else {
+                       /* unexpected pmd-mapped page? */
+                       WARN_ON_ONCE(1);
                }
-               pte_unmap(pte);
-       } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
-               if (pmdp_clear_flush_young_notify(vma, address, pmd))
-                       referenced++;
-       } else {
-               /* unexpected pmd-mapped page? */
-               WARN_ON_ONCE(1);
+
+               pra->mapcount--;
        }
-       spin_unlock(ptl);
 
        if (referenced)
                clear_page_idle(page);
@@ -936,7 +939,6 @@ static int page_referenced_one(struct page *page, struct vm_area_struct *vma,
                pra->vm_flags |= vma->vm_flags;
        }
 
-       pra->mapcount--;
        if (!pra->mapcount)
                return SWAP_SUCCESS; /* To break the loop */
 
@@ -1755,7 +1757,7 @@ static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc,
                bool locked)
 {
        struct anon_vma *anon_vma;
-       pgoff_t pgoff;
+       pgoff_t pgoff_start, pgoff_end;
        struct anon_vma_chain *avc;
        int ret = SWAP_AGAIN;
 
@@ -1769,8 +1771,10 @@ static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc,
        if (!anon_vma)
                return ret;
 
-       pgoff = page_to_pgoff(page);
-       anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) {
+       pgoff_start = page_to_pgoff(page);
+       pgoff_end = pgoff_start + hpage_nr_pages(page) - 1;
+       anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root,
+                       pgoff_start, pgoff_end) {
                struct vm_area_struct *vma = avc->vma;
                unsigned long address = vma_address(page, vma);
 
@@ -1808,7 +1812,7 @@ static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc,
                bool locked)
 {
        struct address_space *mapping = page_mapping(page);
-       pgoff_t pgoff;
+       pgoff_t pgoff_start, pgoff_end;
        struct vm_area_struct *vma;
        int ret = SWAP_AGAIN;
 
@@ -1823,10 +1827,12 @@ static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc,
        if (!mapping)
                return ret;
 
-       pgoff = page_to_pgoff(page);
+       pgoff_start = page_to_pgoff(page);
+       pgoff_end = pgoff_start + hpage_nr_pages(page) - 1;
        if (!locked)
                i_mmap_lock_read(mapping);
-       vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
+       vma_interval_tree_foreach(vma, &mapping->i_mmap,
+                       pgoff_start, pgoff_end) {
                unsigned long address = vma_address(page, vma);
 
                cond_resched();