From: Greg Kroah-Hartman Date: Wed, 18 Aug 2010 15:49:01 +0000 (-0700) Subject: .32 patches X-Git-Tag: v2.6.27.52~6 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=cecdb85be7ed2255f00c9e7499f9ca8097056d42;p=thirdparty%2Fkernel%2Fstable-queue.git .32 patches --- diff --git a/queue-2.6.32/mm-fix-page-table-unmap-for-stack-guard-page-properly.patch b/queue-2.6.32/mm-fix-page-table-unmap-for-stack-guard-page-properly.patch new file mode 100644 index 00000000000..3581fbd32d8 --- /dev/null +++ b/queue-2.6.32/mm-fix-page-table-unmap-for-stack-guard-page-properly.patch @@ -0,0 +1,75 @@ +From 11ac552477e32835cb6970bf0a70c210807f5673 Mon Sep 17 00:00:00 2001 +From: Linus Torvalds +Date: Sat, 14 Aug 2010 11:44:56 -0700 +Subject: mm: fix page table unmap for stack guard page properly +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: Linus Torvalds + +commit 11ac552477e32835cb6970bf0a70c210807f5673 upstream. + +We do in fact need to unmap the page table _before_ doing the whole +stack guard page logic, because if it is needed (mainly 32-bit x86 with +PAE and CONFIG_HIGHPTE, but other architectures may use it too) then it +will do a kmap_atomic/kunmap_atomic. + +And those kmaps will create an atomic region that we cannot do +allocations in. However, the whole stack expand code will need to do +anon_vma_prepare() and vma_lock_anon_vma() and they cannot do that in an +atomic region. + +Now, a better model might actually be to do the anon_vma_prepare() when +_creating_ a VM_GROWSDOWN segment, and not have to worry about any of +this at page fault time. But in the meantime, this is the +straightforward fix for the issue. + +See https://bugzilla.kernel.org/show_bug.cgi?id=16588 for details. + +Reported-by: Wylda +Reported-by: Sedat Dilek +Reported-by: Mike Pagano +Reported-by: François Valenduc +Tested-by: Ed Tomlinson +Cc: Pekka Enberg +Signed-off-by: Linus Torvalds +Signed-off-by: Greg Kroah-Hartman + +--- + mm/memory.c | 13 ++++++------- + 1 file changed, 6 insertions(+), 7 deletions(-) + +--- a/mm/memory.c ++++ b/mm/memory.c +@@ -2662,24 +2662,23 @@ static int do_anonymous_page(struct mm_s + spinlock_t *ptl; + pte_t entry; + +- if (check_stack_guard_page(vma, address) < 0) { +- pte_unmap(page_table); ++ pte_unmap(page_table); ++ ++ /* Check if we need to add a guard page to the stack */ ++ if (check_stack_guard_page(vma, address) < 0) + return VM_FAULT_SIGBUS; +- } + ++ /* Use the zero-page for reads */ + if (!(flags & FAULT_FLAG_WRITE)) { + entry = pte_mkspecial(pfn_pte(my_zero_pfn(address), + vma->vm_page_prot)); +- ptl = pte_lockptr(mm, pmd); +- spin_lock(ptl); ++ page_table = pte_offset_map_lock(mm, pmd, address, &ptl); + if (!pte_none(*page_table)) + goto unlock; + goto setpte; + } + + /* Allocate our own private page. */ +- pte_unmap(page_table); +- + if (unlikely(anon_vma_prepare(vma))) + goto oom; + page = alloc_zeroed_user_highpage_movable(vma, address); diff --git a/queue-2.6.32/mm-fix-up-some-user-visible-effects-of-the-stack-guard-page.patch b/queue-2.6.32/mm-fix-up-some-user-visible-effects-of-the-stack-guard-page.patch new file mode 100644 index 00000000000..059f4767ba4 --- /dev/null +++ b/queue-2.6.32/mm-fix-up-some-user-visible-effects-of-the-stack-guard-page.patch @@ -0,0 +1,89 @@ +From d7824370e26325c881b665350ce64fb0a4fde24a Mon Sep 17 00:00:00 2001 +From: Linus Torvalds +Date: Sun, 15 Aug 2010 11:35:52 -0700 +Subject: mm: fix up some user-visible effects of the stack guard page +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: Linus Torvalds + +commit d7824370e26325c881b665350ce64fb0a4fde24a upstream. + +This commit makes the stack guard page somewhat less visible to user +space. It does this by: + + - not showing the guard page in /proc//maps + + It looks like lvm-tools will actually read /proc/self/maps to figure + out where all its mappings are, and effectively do a specialized + "mlockall()" in user space. By not showing the guard page as part of + the mapping (by just adding PAGE_SIZE to the start for grows-up + pages), lvm-tools ends up not being aware of it. + + - by also teaching the _real_ mlock() functionality not to try to lock + the guard page. + + That would just expand the mapping down to create a new guard page, + so there really is no point in trying to lock it in place. + +It would perhaps be nice to show the guard page specially in +/proc//maps (or at least mark grow-down segments some way), but +let's not open ourselves up to more breakage by user space from programs +that depends on the exact deails of the 'maps' file. + +Special thanks to Henrique de Moraes Holschuh for diving into lvm-tools +source code to see what was going on with the whole new warning. + +Reported-and-tested-by: François Valenduc +Signed-off-by: Linus Torvalds +Signed-off-by: Greg Kroah-Hartman + +--- + fs/proc/task_mmu.c | 8 +++++++- + mm/mlock.c | 8 ++++++++ + 2 files changed, 15 insertions(+), 1 deletion(-) + +--- a/fs/proc/task_mmu.c ++++ b/fs/proc/task_mmu.c +@@ -206,6 +206,7 @@ static void show_map_vma(struct seq_file + int flags = vma->vm_flags; + unsigned long ino = 0; + unsigned long long pgoff = 0; ++ unsigned long start; + dev_t dev = 0; + int len; + +@@ -216,8 +217,13 @@ static void show_map_vma(struct seq_file + pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT; + } + ++ /* We don't show the stack guard page in /proc/maps */ ++ start = vma->vm_start; ++ if (vma->vm_flags & VM_GROWSDOWN) ++ start += PAGE_SIZE; ++ + seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n", +- vma->vm_start, ++ start, + vma->vm_end, + flags & VM_READ ? 'r' : '-', + flags & VM_WRITE ? 'w' : '-', +--- a/mm/mlock.c ++++ b/mm/mlock.c +@@ -170,6 +170,14 @@ static long __mlock_vma_pages_range(stru + if (vma->vm_flags & VM_WRITE) + gup_flags |= FOLL_WRITE; + ++ /* We don't try to access the guard page of a stack vma */ ++ if (vma->vm_flags & VM_GROWSDOWN) { ++ if (start == vma->vm_start) { ++ start += PAGE_SIZE; ++ nr_pages--; ++ } ++ } ++ + while (nr_pages > 0) { + int i; +