]> git.ipfire.org Git - people/arne_f/kernel.git/commitdiff
mm: prevent get_user_pages() from overflowing page refcount
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 11 Apr 2019 17:49:19 +0000 (10:49 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 11 Jun 2019 10:22:45 +0000 (12:22 +0200)
commit 8fde12ca79aff9b5ba951fce1a2641901b8d8e64 upstream.

If the page refcount wraps around past zero, it will be freed while
there are still four billion references to it.  One of the possible
avenues for an attacker to try to make this happen is by doing direct IO
on a page multiple times.  This patch makes get_user_pages() refuse to
take a new page reference if there are already more than two billion
references to the page.

Reported-by: Jann Horn <jannh@google.com>
Acked-by: Matthew Wilcox <willy@infradead.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
[bwh: Backported to 4.9:
 - Add the "err" variable in follow_hugetlb_page()
 - Adjust context]
Signed-off-by: Ben Hutchings <ben.hutchings@codethink.co.uk>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
mm/gup.c
mm/hugetlb.c

index a736aef408233ecc61e1959725a5b61f663e8873..6bb7a8eb7f820ce8143c02abaed90db2d25fb135 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -153,7 +153,10 @@ retry:
        }
 
        if (flags & FOLL_GET) {
-               get_page(page);
+               if (unlikely(!try_get_page(page))) {
+                       page = ERR_PTR(-ENOMEM);
+                       goto out;
+               }
 
                /* drop the pgmap reference now that we hold the page */
                if (pgmap) {
@@ -292,7 +295,10 @@ struct page *follow_page_mask(struct vm_area_struct *vma,
                        if (pmd_trans_unstable(pmd))
                                ret = -EBUSY;
                } else {
-                       get_page(page);
+                       if (unlikely(!try_get_page(page))) {
+                               spin_unlock(ptl);
+                               return ERR_PTR(-ENOMEM);
+                       }
                        spin_unlock(ptl);
                        lock_page(page);
                        ret = split_huge_page(page);
@@ -348,7 +354,10 @@ static int get_gate_page(struct mm_struct *mm, unsigned long address,
                        goto unmap;
                *page = pte_page(*pte);
        }
-       get_page(*page);
+       if (unlikely(!try_get_page(*page))) {
+               ret = -ENOMEM;
+               goto unmap;
+       }
 out:
        ret = 0;
 unmap:
@@ -1231,6 +1240,20 @@ struct page *get_dump_page(unsigned long addr)
  */
 #ifdef CONFIG_HAVE_GENERIC_RCU_GUP
 
+/*
+ * Return the compund head page with ref appropriately incremented,
+ * or NULL if that failed.
+ */
+static inline struct page *try_get_compound_head(struct page *page, int refs)
+{
+       struct page *head = compound_head(page);
+       if (WARN_ON_ONCE(page_ref_count(head) < 0))
+               return NULL;
+       if (unlikely(!page_cache_add_speculative(head, refs)))
+               return NULL;
+       return head;
+}
+
 #ifdef __HAVE_ARCH_PTE_SPECIAL
 static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
                         int write, struct page **pages, int *nr)
@@ -1263,9 +1286,9 @@ static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
 
                VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
                page = pte_page(pte);
-               head = compound_head(page);
 
-               if (!page_cache_get_speculative(head))
+               head = try_get_compound_head(page, 1);
+               if (!head)
                        goto pte_unmap;
 
                if (unlikely(pte_val(pte) != pte_val(*ptep))) {
@@ -1321,8 +1344,8 @@ static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
                refs++;
        } while (addr += PAGE_SIZE, addr != end);
 
-       head = compound_head(pmd_page(orig));
-       if (!page_cache_add_speculative(head, refs)) {
+       head = try_get_compound_head(pmd_page(orig), refs);
+       if (!head) {
                *nr -= refs;
                return 0;
        }
@@ -1355,8 +1378,8 @@ static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
                refs++;
        } while (addr += PAGE_SIZE, addr != end);
 
-       head = compound_head(pud_page(orig));
-       if (!page_cache_add_speculative(head, refs)) {
+       head = try_get_compound_head(pud_page(orig), refs);
+       if (!head) {
                *nr -= refs;
                return 0;
        }
@@ -1390,8 +1413,8 @@ static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr,
                refs++;
        } while (addr += PAGE_SIZE, addr != end);
 
-       head = compound_head(pgd_page(orig));
-       if (!page_cache_add_speculative(head, refs)) {
+       head = try_get_compound_head(pgd_page(orig), refs);
+       if (!head) {
                *nr -= refs;
                return 0;
        }
index 75d8bd7e8798b5924b5bdce2002890e4525ffe66..6b03cd9b6d37ee718b9606671047084c5a757508 100644 (file)
@@ -3984,6 +3984,7 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
        unsigned long vaddr = *position;
        unsigned long remainder = *nr_pages;
        struct hstate *h = hstate_vma(vma);
+       int err = -EFAULT;
 
        while (vaddr < vma->vm_end && remainder) {
                pte_t *pte;
@@ -4055,6 +4056,19 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
 
                pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
                page = pte_page(huge_ptep_get(pte));
+
+               /*
+                * Instead of doing 'try_get_page()' below in the same_page
+                * loop, just check the count once here.
+                */
+               if (unlikely(page_count(page) <= 0)) {
+                       if (pages) {
+                               spin_unlock(ptl);
+                               remainder = 0;
+                               err = -ENOMEM;
+                               break;
+                       }
+               }
 same_page:
                if (pages) {
                        pages[i] = mem_map_offset(page, pfn_offset);
@@ -4081,7 +4095,7 @@ same_page:
        *nr_pages = remainder;
        *position = vaddr;
 
-       return i ? i : -EFAULT;
+       return i ? i : err;
 }
 
 #ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE