From: Greg Kroah-Hartman Date: Sun, 26 Apr 2015 10:43:51 +0000 (+0200) Subject: 3.10-stable patches X-Git-Tag: v4.0.1~10 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=c31b0177fb5d47390e39ad025876f06ee575eae1;p=thirdparty%2Fkernel%2Fstable-queue.git 3.10-stable patches added patches: vm-add-vm_fault_sigsegv-handling-support.patch vm-make-stack-guard-page-errors-return-vm_fault_sigsegv-rather-than-sigbus.patch x86-mm-move-mmap_sem-unlock-from-mm_fault_error-to-caller.patch --- diff --git a/queue-3.10/series b/queue-3.10/series index 5a766a75ecd..16d0e4ed8f2 100644 --- a/queue-3.10/series +++ b/queue-3.10/series @@ -25,3 +25,6 @@ kvm-x86-sysenter-emulation-is-broken.patch utf-8-q-kconfig-20fix-20warning-20-e2-80-98jump.patch move-d_rcu-from-overlapping-d_child-to-overlapping-d_alias.patch deal-with-deadlock-in-d_walk.patch +vm-add-vm_fault_sigsegv-handling-support.patch +vm-make-stack-guard-page-errors-return-vm_fault_sigsegv-rather-than-sigbus.patch +x86-mm-move-mmap_sem-unlock-from-mm_fault_error-to-caller.patch diff --git a/queue-3.10/vm-add-vm_fault_sigsegv-handling-support.patch b/queue-3.10/vm-add-vm_fault_sigsegv-handling-support.patch new file mode 100644 index 00000000000..8d42d40d34d --- /dev/null +++ b/queue-3.10/vm-add-vm_fault_sigsegv-handling-support.patch @@ -0,0 +1,427 @@ +From 33692f27597fcab536d7cbbcc8f52905133e4aa7 Mon Sep 17 00:00:00 2001 +From: Linus Torvalds +Date: Thu, 29 Jan 2015 10:51:32 -0800 +Subject: vm: add VM_FAULT_SIGSEGV handling support + +From: Linus Torvalds + +commit 33692f27597fcab536d7cbbcc8f52905133e4aa7 upstream. + +The core VM already knows about VM_FAULT_SIGBUS, but cannot return a +"you should SIGSEGV" error, because the SIGSEGV case was generally +handled by the caller - usually the architecture fault handler. + +That results in lots of duplication - all the architecture fault +handlers end up doing very similar "look up vma, check permissions, do +retries etc" - but it generally works. However, there are cases where +the VM actually wants to SIGSEGV, and applications _expect_ SIGSEGV. + +In particular, when accessing the stack guard page, libsigsegv expects a +SIGSEGV. And it usually got one, because the stack growth is handled by +that duplicated architecture fault handler. + +However, when the generic VM layer started propagating the error return +from the stack expansion in commit fee7e49d4514 ("mm: propagate error +from stack expansion even for guard page"), that now exposed the +existing VM_FAULT_SIGBUS result to user space. And user space really +expected SIGSEGV, not SIGBUS. + +To fix that case, we need to add a VM_FAULT_SIGSEGV, and teach all those +duplicate architecture fault handlers about it. They all already have +the code to handle SIGSEGV, so it's about just tying that new return +value to the existing code, but it's all a bit annoying. + +This is the mindless minimal patch to do this. A more extensive patch +would be to try to gather up the mostly shared fault handling logic into +one generic helper routine, and long-term we really should do that +cleanup. + +Just from this patch, you can generally see that most architectures just +copied (directly or indirectly) the old x86 way of doing things, but in +the meantime that original x86 model has been improved to hold the VM +semaphore for shorter times etc and to handle VM_FAULT_RETRY and other +"newer" things, so it would be a good idea to bring all those +improvements to the generic case and teach other architectures about +them too. + +Reported-and-tested-by: Takashi Iwai +Tested-by: Jan Engelhardt +Acked-by: Heiko Carstens # "s390 still compiles and boots" +Cc: linux-arch@vger.kernel.org +Signed-off-by: Linus Torvalds +[shengyong: Backport to 3.10 + - adjust context + - ignore modification for arch nios2, because 3.10 does not support it + - ignore modification for driver lustre, because 3.10 does not support it + - ignore VM_FAULT_FALLBACK in VM_FAULT_ERROR, becase 3.10 does not support + this flag + - add SIGSEGV handling to powerpc/cell spu_fault.c, because 3.10 does not + separate it to copro_fault.c + - add SIGSEGV handling in mm/memory.c, because 3.10 does not separate it + to gup.c +] +Signed-off-by: Sheng Yong +Signed-off-by: Greg Kroah-Hartman +--- + arch/alpha/mm/fault.c | 2 ++ + arch/arc/mm/fault.c | 2 ++ + arch/avr32/mm/fault.c | 2 ++ + arch/cris/mm/fault.c | 2 ++ + arch/frv/mm/fault.c | 2 ++ + arch/ia64/mm/fault.c | 2 ++ + arch/m32r/mm/fault.c | 2 ++ + arch/m68k/mm/fault.c | 2 ++ + arch/metag/mm/fault.c | 2 ++ + arch/microblaze/mm/fault.c | 2 ++ + arch/mips/mm/fault.c | 2 ++ + arch/mn10300/mm/fault.c | 2 ++ + arch/openrisc/mm/fault.c | 2 ++ + arch/parisc/mm/fault.c | 2 ++ + arch/powerpc/mm/fault.c | 2 ++ + arch/powerpc/platforms/cell/spu_fault.c | 2 +- + arch/s390/mm/fault.c | 6 ++++++ + arch/score/mm/fault.c | 2 ++ + arch/sh/mm/fault.c | 2 ++ + arch/sparc/mm/fault_32.c | 2 ++ + arch/sparc/mm/fault_64.c | 2 ++ + arch/tile/mm/fault.c | 2 ++ + arch/um/kernel/trap.c | 2 ++ + arch/x86/mm/fault.c | 2 ++ + arch/xtensa/mm/fault.c | 2 ++ + include/linux/mm.h | 5 +++-- + mm/ksm.c | 2 +- + mm/memory.c | 5 +++-- + 28 files changed, 60 insertions(+), 6 deletions(-) + +--- a/arch/alpha/mm/fault.c ++++ b/arch/alpha/mm/fault.c +@@ -156,6 +156,8 @@ retry: + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; ++ else if (fault & VM_FAULT_SIGSEGV) ++ goto bad_area; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + BUG(); +--- a/arch/arc/mm/fault.c ++++ b/arch/arc/mm/fault.c +@@ -160,6 +160,8 @@ good_area: + /* TBD: switch to pagefault_out_of_memory() */ + if (fault & VM_FAULT_OOM) + goto out_of_memory; ++ else if (fault & VM_FAULT_SIGSEV) ++ goto bad_area; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + +--- a/arch/avr32/mm/fault.c ++++ b/arch/avr32/mm/fault.c +@@ -142,6 +142,8 @@ good_area: + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; ++ else if (fault & VM_FAULT_SIGSEGV) ++ goto bad_area; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + BUG(); +--- a/arch/cris/mm/fault.c ++++ b/arch/cris/mm/fault.c +@@ -176,6 +176,8 @@ retry: + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; ++ else if (fault & VM_FAULT_SIGSEGV) ++ goto bad_area; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + BUG(); +--- a/arch/frv/mm/fault.c ++++ b/arch/frv/mm/fault.c +@@ -168,6 +168,8 @@ asmlinkage void do_page_fault(int datamm + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; ++ else if (fault & VM_FAULT_SIGSEGV) ++ goto bad_area; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + BUG(); +--- a/arch/ia64/mm/fault.c ++++ b/arch/ia64/mm/fault.c +@@ -172,6 +172,8 @@ retry: + */ + if (fault & VM_FAULT_OOM) { + goto out_of_memory; ++ } else if (fault & VM_FAULT_SIGSEGV) { ++ goto bad_area; + } else if (fault & VM_FAULT_SIGBUS) { + signal = SIGBUS; + goto bad_area; +--- a/arch/m32r/mm/fault.c ++++ b/arch/m32r/mm/fault.c +@@ -200,6 +200,8 @@ good_area: + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; ++ else if (fault & VM_FAULT_SIGSEGV) ++ goto bad_area; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + BUG(); +--- a/arch/m68k/mm/fault.c ++++ b/arch/m68k/mm/fault.c +@@ -153,6 +153,8 @@ good_area: + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; ++ else if (fault & VM_FAULT_SIGSEGV) ++ goto map_err; + else if (fault & VM_FAULT_SIGBUS) + goto bus_err; + BUG(); +--- a/arch/metag/mm/fault.c ++++ b/arch/metag/mm/fault.c +@@ -141,6 +141,8 @@ good_area: + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; ++ else if (fault & VM_FAULT_SIGSEGV) ++ goto bad_area; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + BUG(); +--- a/arch/microblaze/mm/fault.c ++++ b/arch/microblaze/mm/fault.c +@@ -224,6 +224,8 @@ good_area: + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; ++ else if (fault & VM_FAULT_SIGSEGV) ++ goto bad_area; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + BUG(); +--- a/arch/mips/mm/fault.c ++++ b/arch/mips/mm/fault.c +@@ -157,6 +157,8 @@ good_area: + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; ++ else if (fault & VM_FAULT_SIGSEGV) ++ goto bad_area; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + BUG(); +--- a/arch/mn10300/mm/fault.c ++++ b/arch/mn10300/mm/fault.c +@@ -262,6 +262,8 @@ good_area: + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; ++ else if (fault & VM_FAULT_SIGSEGV) ++ goto bad_area; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + BUG(); +--- a/arch/openrisc/mm/fault.c ++++ b/arch/openrisc/mm/fault.c +@@ -171,6 +171,8 @@ good_area: + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; ++ else if (fault & VM_FAULT_SIGSEGV) ++ goto bad_area; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + BUG(); +--- a/arch/parisc/mm/fault.c ++++ b/arch/parisc/mm/fault.c +@@ -220,6 +220,8 @@ good_area: + */ + if (fault & VM_FAULT_OOM) + goto out_of_memory; ++ else if (fault & VM_FAULT_SIGSEGV) ++ goto bad_area; + else if (fault & VM_FAULT_SIGBUS) + goto bad_area; + BUG(); +--- a/arch/powerpc/mm/fault.c ++++ b/arch/powerpc/mm/fault.c +@@ -425,6 +425,8 @@ good_area: + */ + fault = handle_mm_fault(mm, vma, address, flags); + if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) { ++ if (fault & VM_FAULT_SIGSEGV) ++ goto bad_area; + rc = mm_fault_error(regs, address, fault); + if (rc >= MM_FAULT_RETURN) + goto bail; +--- a/arch/powerpc/platforms/cell/spu_fault.c ++++ b/arch/powerpc/platforms/cell/spu_fault.c +@@ -75,7 +75,7 @@ int spu_handle_mm_fault(struct mm_struct + if (*flt & VM_FAULT_OOM) { + ret = -ENOMEM; + goto out_unlock; +- } else if (*flt & VM_FAULT_SIGBUS) { ++ } else if (*flt & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) { + ret = -EFAULT; + goto out_unlock; + } +--- a/arch/s390/mm/fault.c ++++ b/arch/s390/mm/fault.c +@@ -244,6 +244,12 @@ static noinline void do_fault_error(stru + do_no_context(regs); + else + pagefault_out_of_memory(); ++ } else if (fault & VM_FAULT_SIGSEGV) { ++ /* Kernel mode? Handle exceptions or die */ ++ if (!user_mode(regs)) ++ do_no_context(regs); ++ else ++ do_sigsegv(regs, SEGV_MAPERR); + } else if (fault & VM_FAULT_SIGBUS) { + /* Kernel mode? Handle exceptions or die */ + if (!user_mode(regs)) +--- a/arch/score/mm/fault.c ++++ b/arch/score/mm/fault.c +@@ -114,6 +114,8 @@ good_area: + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; ++ else if (fault & VM_FAULT_SIGSEGV) ++ goto bad_area; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + BUG(); +--- a/arch/sh/mm/fault.c ++++ b/arch/sh/mm/fault.c +@@ -353,6 +353,8 @@ mm_fault_error(struct pt_regs *regs, uns + } else { + if (fault & VM_FAULT_SIGBUS) + do_sigbus(regs, error_code, address); ++ else if (fault & VM_FAULT_SIGSEGV) ++ bad_area(regs, error_code, address); + else + BUG(); + } +--- a/arch/sparc/mm/fault_32.c ++++ b/arch/sparc/mm/fault_32.c +@@ -252,6 +252,8 @@ good_area: + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; ++ else if (fault & VM_FAULT_SIGSEGV) ++ goto bad_area; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + BUG(); +--- a/arch/sparc/mm/fault_64.c ++++ b/arch/sparc/mm/fault_64.c +@@ -443,6 +443,8 @@ good_area: + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; ++ else if (fault & VM_FAULT_SIGSEGV) ++ goto bad_area; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + BUG(); +--- a/arch/tile/mm/fault.c ++++ b/arch/tile/mm/fault.c +@@ -446,6 +446,8 @@ good_area: + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; ++ else if (fault & VM_FAULT_SIGSEGV) ++ goto bad_area; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + BUG(); +--- a/arch/um/kernel/trap.c ++++ b/arch/um/kernel/trap.c +@@ -80,6 +80,8 @@ good_area: + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) { + goto out_of_memory; ++ } else if (fault & VM_FAULT_SIGSEGV) { ++ goto out; + } else if (fault & VM_FAULT_SIGBUS) { + err = -EACCES; + goto out; +--- a/arch/x86/mm/fault.c ++++ b/arch/x86/mm/fault.c +@@ -873,6 +873,8 @@ mm_fault_error(struct pt_regs *regs, uns + if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON| + VM_FAULT_HWPOISON_LARGE)) + do_sigbus(regs, error_code, address, fault); ++ else if (fault & VM_FAULT_SIGSEGV) ++ bad_area_nosemaphore(regs, error_code, address); + else + BUG(); + } +--- a/arch/xtensa/mm/fault.c ++++ b/arch/xtensa/mm/fault.c +@@ -117,6 +117,8 @@ good_area: + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; ++ else if (fault & VM_FAULT_SIGSEGV) ++ goto bad_area; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + BUG(); +--- a/include/linux/mm.h ++++ b/include/linux/mm.h +@@ -891,6 +891,7 @@ static inline int page_mapped(struct pag + #define VM_FAULT_WRITE 0x0008 /* Special case for get_user_pages */ + #define VM_FAULT_HWPOISON 0x0010 /* Hit poisoned small page */ + #define VM_FAULT_HWPOISON_LARGE 0x0020 /* Hit poisoned large page. Index encoded in upper bits */ ++#define VM_FAULT_SIGSEGV 0x0040 + + #define VM_FAULT_NOPAGE 0x0100 /* ->fault installed the pte, not return page */ + #define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */ +@@ -898,8 +899,8 @@ static inline int page_mapped(struct pag + + #define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */ + +-#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_HWPOISON | \ +- VM_FAULT_HWPOISON_LARGE) ++#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | \ ++ VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE) + + /* Encode hstate index for a hwpoisoned large page */ + #define VM_FAULT_SET_HINDEX(x) ((x) << 12) +--- a/mm/ksm.c ++++ b/mm/ksm.c +@@ -376,7 +376,7 @@ static int break_ksm(struct vm_area_stru + else + ret = VM_FAULT_WRITE; + put_page(page); +- } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_OOM))); ++ } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | VM_FAULT_OOM))); + /* + * We must loop because handle_mm_fault() may back out if there's + * any difficulty e.g. if pte accessed bit gets updated concurrently. +--- a/mm/memory.c ++++ b/mm/memory.c +@@ -1844,7 +1844,8 @@ long __get_user_pages(struct task_struct + else + return -EFAULT; + } +- if (ret & VM_FAULT_SIGBUS) ++ if (ret & (VM_FAULT_SIGBUS | ++ VM_FAULT_SIGSEGV)) + return i ? i : -EFAULT; + BUG(); + } +@@ -1954,7 +1955,7 @@ int fixup_user_fault(struct task_struct + return -ENOMEM; + if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) + return -EHWPOISON; +- if (ret & VM_FAULT_SIGBUS) ++ if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) + return -EFAULT; + BUG(); + } diff --git a/queue-3.10/vm-make-stack-guard-page-errors-return-vm_fault_sigsegv-rather-than-sigbus.patch b/queue-3.10/vm-make-stack-guard-page-errors-return-vm_fault_sigsegv-rather-than-sigbus.patch new file mode 100644 index 00000000000..ecd40cf31f2 --- /dev/null +++ b/queue-3.10/vm-make-stack-guard-page-errors-return-vm_fault_sigsegv-rather-than-sigbus.patch @@ -0,0 +1,43 @@ +From 9c145c56d0c8a0b62e48c8d71e055ad0fb2012ba Mon Sep 17 00:00:00 2001 +From: Linus Torvalds +Date: Thu, 29 Jan 2015 11:15:17 -0800 +Subject: vm: make stack guard page errors return VM_FAULT_SIGSEGV rather than SIGBUS + +From: Linus Torvalds + +commit 9c145c56d0c8a0b62e48c8d71e055ad0fb2012ba upstream. + +The stack guard page error case has long incorrectly caused a SIGBUS +rather than a SIGSEGV, but nobody actually noticed until commit +fee7e49d4514 ("mm: propagate error from stack expansion even for guard +page") because that error case was never actually triggered in any +normal situations. + +Now that we actually report the error, people noticed the wrong signal +that resulted. So far, only the test suite of libsigsegv seems to have +actually cared, but there are real applications that use libsigsegv, so +let's not wait for any of those to break. + +Reported-and-tested-by: Takashi Iwai +Tested-by: Jan Engelhardt +Acked-by: Heiko Carstens # "s390 still compiles and boots" +Cc: linux-arch@vger.kernel.org +Cc: stable@vger.kernel.org +Signed-off-by: Linus Torvalds +Signed-off-by: Greg Kroah-Hartman + +--- + mm/memory.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/mm/memory.c ++++ b/mm/memory.c +@@ -3232,7 +3232,7 @@ static int do_anonymous_page(struct mm_s + + /* Check if we need to add a guard page to the stack */ + if (check_stack_guard_page(vma, address) < 0) +- return VM_FAULT_SIGBUS; ++ return VM_FAULT_SIGSEGV; + + /* Use the zero-page for reads */ + if (!(flags & FAULT_FLAG_WRITE)) { diff --git a/queue-3.10/x86-mm-move-mmap_sem-unlock-from-mm_fault_error-to-caller.patch b/queue-3.10/x86-mm-move-mmap_sem-unlock-from-mm_fault_error-to-caller.patch new file mode 100644 index 00000000000..30e2d564438 --- /dev/null +++ b/queue-3.10/x86-mm-move-mmap_sem-unlock-from-mm_fault_error-to-caller.patch @@ -0,0 +1,65 @@ +From 7fb08eca45270d0ae86e1ad9d39c40b7a55d0190 Mon Sep 17 00:00:00 2001 +From: Linus Torvalds +Date: Mon, 15 Dec 2014 14:46:06 -0800 +Subject: x86: mm: move mmap_sem unlock from mm_fault_error() to caller + +From: Linus Torvalds + +commit 7fb08eca45270d0ae86e1ad9d39c40b7a55d0190 upstream. + +This replaces four copies in various stages of mm_fault_error() handling +with just a single one. It will also allow for more natural placement +of the unlocking after some further cleanup. + +Signed-off-by: Linus Torvalds +Signed-off-by: Greg Kroah-Hartman + +--- + arch/x86/mm/fault.c | 8 +------- + 1 file changed, 1 insertion(+), 7 deletions(-) + +--- a/arch/x86/mm/fault.c ++++ b/arch/x86/mm/fault.c +@@ -812,11 +812,8 @@ do_sigbus(struct pt_regs *regs, unsigned + unsigned int fault) + { + struct task_struct *tsk = current; +- struct mm_struct *mm = tsk->mm; + int code = BUS_ADRERR; + +- up_read(&mm->mmap_sem); +- + /* Kernel mode? Handle exceptions or die: */ + if (!(error_code & PF_USER)) { + no_context(regs, error_code, address, SIGBUS, BUS_ADRERR); +@@ -847,7 +844,6 @@ mm_fault_error(struct pt_regs *regs, uns + unsigned long address, unsigned int fault) + { + if (fatal_signal_pending(current) && !(error_code & PF_USER)) { +- up_read(¤t->mm->mmap_sem); + no_context(regs, error_code, address, 0, 0); + return; + } +@@ -855,14 +851,11 @@ mm_fault_error(struct pt_regs *regs, uns + if (fault & VM_FAULT_OOM) { + /* Kernel mode? Handle exceptions or die: */ + if (!(error_code & PF_USER)) { +- up_read(¤t->mm->mmap_sem); + no_context(regs, error_code, address, + SIGSEGV, SEGV_MAPERR); + return; + } + +- up_read(¤t->mm->mmap_sem); +- + /* + * We ran out of memory, call the OOM killer, and return the + * userspace (which will retry the fault, or kill us if we got +@@ -1195,6 +1188,7 @@ good_area: + return; + + if (unlikely(fault & VM_FAULT_ERROR)) { ++ up_read(&mm->mmap_sem); + mm_fault_error(regs, error_code, address, fault); + return; + }