]>
Commit | Line | Data |
---|---|---|
4b6a617a GKH |
1 | From 11ac552477e32835cb6970bf0a70c210807f5673 Mon Sep 17 00:00:00 2001 |
2 | From: Linus Torvalds <torvalds@linux-foundation.org> | |
3 | Date: Sat, 14 Aug 2010 11:44:56 -0700 | |
4 | Subject: mm: fix page table unmap for stack guard page properly | |
5 | MIME-Version: 1.0 | |
6 | Content-Type: text/plain; charset=UTF-8 | |
7 | Content-Transfer-Encoding: 8bit | |
8 | ||
9 | From: Linus Torvalds <torvalds@linux-foundation.org> | |
10 | ||
11 | commit 11ac552477e32835cb6970bf0a70c210807f5673 upstream. | |
12 | ||
13 | We do in fact need to unmap the page table _before_ doing the whole | |
14 | stack guard page logic, because if it is needed (mainly 32-bit x86 with | |
15 | PAE and CONFIG_HIGHPTE, but other architectures may use it too) then it | |
16 | will do a kmap_atomic/kunmap_atomic. | |
17 | ||
18 | And those kmaps will create an atomic region that we cannot do | |
19 | allocations in. However, the whole stack expand code will need to do | |
20 | anon_vma_prepare() and vma_lock_anon_vma() and they cannot do that in an | |
21 | atomic region. | |
22 | ||
23 | Now, a better model might actually be to do the anon_vma_prepare() when | |
24 | _creating_ a VM_GROWSDOWN segment, and not have to worry about any of | |
25 | this at page fault time. But in the meantime, this is the | |
26 | straightforward fix for the issue. | |
27 | ||
28 | See https://bugzilla.kernel.org/show_bug.cgi?id=16588 for details. | |
29 | ||
30 | Reported-by: Wylda <wylda@volny.cz> | |
31 | Reported-by: Sedat Dilek <sedat.dilek@gmail.com> | |
32 | Reported-by: Mike Pagano <mpagano@gentoo.org> | |
33 | Reported-by: François Valenduc <francois.valenduc@tvcablenet.be> | |
34 | Tested-by: Ed Tomlinson <edt@aei.ca> | |
35 | Cc: Pekka Enberg <penberg@kernel.org> | |
36 | Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> | |
37 | Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de> | |
38 | ||
39 | --- | |
40 | mm/memory.c | 13 ++++++------- | |
41 | 1 file changed, 6 insertions(+), 7 deletions(-) | |
42 | ||
43 | --- a/mm/memory.c | |
44 | +++ b/mm/memory.c | |
45 | @@ -2783,24 +2783,23 @@ static int do_anonymous_page(struct mm_s | |
46 | spinlock_t *ptl; | |
47 | pte_t entry; | |
48 | ||
49 | - if (check_stack_guard_page(vma, address) < 0) { | |
50 | - pte_unmap(page_table); | |
51 | + pte_unmap(page_table); | |
52 | + | |
53 | + /* Check if we need to add a guard page to the stack */ | |
54 | + if (check_stack_guard_page(vma, address) < 0) | |
55 | return VM_FAULT_SIGBUS; | |
56 | - } | |
57 | ||
58 | + /* Use the zero-page for reads */ | |
59 | if (!(flags & FAULT_FLAG_WRITE)) { | |
60 | entry = pte_mkspecial(pfn_pte(my_zero_pfn(address), | |
61 | vma->vm_page_prot)); | |
62 | - ptl = pte_lockptr(mm, pmd); | |
63 | - spin_lock(ptl); | |
64 | + page_table = pte_offset_map_lock(mm, pmd, address, &ptl); | |
65 | if (!pte_none(*page_table)) | |
66 | goto unlock; | |
67 | goto setpte; | |
68 | } | |
69 | ||
70 | /* Allocate our own private page. */ | |
71 | - pte_unmap(page_table); | |
72 | - | |
73 | if (unlikely(anon_vma_prepare(vma))) | |
74 | goto oom; | |
75 | page = alloc_zeroed_user_highpage_movable(vma, address); |