]> git.ipfire.org Git - thirdparty/kernel/stable.git/blobdiff - mm/hugetlb.c
Linux 4.20.17
[thirdparty/kernel/stable.git] / mm / hugetlb.c
index 705a3e9cc910e16472159a2d20f9585e1ff7c13d..ad75aea1344fdf2bbb696d09fbd80b7aa30c8567 100644 (file)
@@ -1248,10 +1248,11 @@ void free_huge_page(struct page *page)
                (struct hugepage_subpool *)page_private(page);
        bool restore_reserve;
 
-       set_page_private(page, 0);
-       page->mapping = NULL;
        VM_BUG_ON_PAGE(page_count(page), page);
        VM_BUG_ON_PAGE(page_mapcount(page), page);
+
+       set_page_private(page, 0);
+       page->mapping = NULL;
        restore_reserve = PagePrivate(page);
        ClearPagePrivate(page);
 
@@ -3624,7 +3625,6 @@ retry_avoidcopy:
        copy_user_huge_page(new_page, old_page, address, vma,
                            pages_per_huge_page(h));
        __SetPageUptodate(new_page);
-       set_page_huge_active(new_page);
 
        mmun_start = haddr;
        mmun_end = mmun_start + huge_page_size(h);
@@ -3646,6 +3646,7 @@ retry_avoidcopy:
                                make_huge_pte(vma, new_page, 1));
                page_remove_rmap(old_page, true);
                hugepage_add_new_anon_rmap(new_page, vma, haddr);
+               set_page_huge_active(new_page);
                /* Make the old page be freed below */
                new_page = old_page;
        }
@@ -3730,6 +3731,7 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
        pte_t new_pte;
        spinlock_t *ptl;
        unsigned long haddr = address & huge_page_mask(h);
+       bool new_page = false;
 
        /*
         * Currently, we are forced to kill the process in the event the
@@ -3791,7 +3793,7 @@ retry:
                }
                clear_huge_page(page, address, pages_per_huge_page(h));
                __SetPageUptodate(page);
-               set_page_huge_active(page);
+               new_page = true;
 
                if (vma->vm_flags & VM_MAYSHARE) {
                        int err = huge_add_to_page_cache(page, mapping, idx);
@@ -3862,6 +3864,15 @@ retry:
        }
 
        spin_unlock(ptl);
+
+       /*
+        * Only make newly allocated pages active.  Existing pages found
+        * in the pagecache could be !page_huge_active() if they have been
+        * isolated for migration.
+        */
+       if (new_page)
+               set_page_huge_active(page);
+
        unlock_page(page);
 out:
        return ret;
@@ -4096,7 +4107,6 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
         * the set_pte_at() write.
         */
        __SetPageUptodate(page);
-       set_page_huge_active(page);
 
        mapping = dst_vma->vm_file->f_mapping;
        idx = vma_hugecache_offset(h, dst_vma, dst_addr);
@@ -4164,6 +4174,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
        update_mmu_cache(dst_vma, dst_addr, dst_pte);
 
        spin_unlock(ptl);
+       set_page_huge_active(page);
        if (vm_shared)
                unlock_page(page);
        ret = 0;
@@ -4269,7 +4280,8 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
                                break;
                        }
                        if (ret & VM_FAULT_RETRY) {
-                               if (nonblocking)
+                               if (nonblocking &&
+                                   !(fault_flags & FAULT_FLAG_RETRY_NOWAIT))
                                        *nonblocking = 0;
                                *nr_pages = 0;
                                /*