]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.14-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 26 Apr 2015 10:44:07 +0000 (12:44 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 26 Apr 2015 10:44:07 +0000 (12:44 +0200)
added patches:
arm-8108-1-mm-introduce-pte-pmd-_isset-and-pte-pmd-_isclear.patch
arm-8109-1-mm-modify-pte_write-and-pmd_write-logic-for-lpae.patch
vm-add-vm_fault_sigsegv-handling-support.patch
vm-make-stack-guard-page-errors-return-vm_fault_sigsegv-rather-than-sigbus.patch
x86-mm-move-mmap_sem-unlock-from-mm_fault_error-to-caller.patch

queue-3.14/arm-8108-1-mm-introduce-pte-pmd-_isset-and-pte-pmd-_isclear.patch [new file with mode: 0644]
queue-3.14/arm-8109-1-mm-modify-pte_write-and-pmd_write-logic-for-lpae.patch [new file with mode: 0644]
queue-3.14/series
queue-3.14/vm-add-vm_fault_sigsegv-handling-support.patch [new file with mode: 0644]
queue-3.14/vm-make-stack-guard-page-errors-return-vm_fault_sigsegv-rather-than-sigbus.patch [new file with mode: 0644]
queue-3.14/x86-mm-move-mmap_sem-unlock-from-mm_fault_error-to-caller.patch [new file with mode: 0644]

diff --git a/queue-3.14/arm-8108-1-mm-introduce-pte-pmd-_isset-and-pte-pmd-_isclear.patch b/queue-3.14/arm-8108-1-mm-introduce-pte-pmd-_isset-and-pte-pmd-_isclear.patch
new file mode 100644 (file)
index 0000000..e0465ff
--- /dev/null
@@ -0,0 +1,90 @@
+From f2950706871c4b6e8c0f0d7c3f62d35930b8de63 Mon Sep 17 00:00:00 2001
+From: Steven Capper <steve.capper@linaro.org>
+Date: Fri, 18 Jul 2014 16:15:27 +0100
+Subject: ARM: 8108/1: mm: Introduce {pte,pmd}_isset and {pte,pmd}_isclear
+
+From: Steven Capper <steve.capper@linaro.org>
+
+commit f2950706871c4b6e8c0f0d7c3f62d35930b8de63 upstream.
+
+Long descriptors on ARM are 64 bits, and some pte functions such as
+pte_dirty return a bitwise-and of a flag with the pte value. If the
+flag to be tested resides in the upper 32 bits of the pte, then we run
+into the danger of the result being dropped if downcast.
+
+For example:
+       gather_stats(page, md, pte_dirty(*pte), 1);
+where pte_dirty(*pte) is downcast to an int.
+
+This patch introduces a new macro pte_isset which performs the bitwise
+and, then performs a double logical invert (where needed) to ensure
+predictable downcasting. The logical inverse pte_isclear is also
+introduced.
+
+Equivalent pmd functions for Transparent HugePages have also been
+added.
+
+Signed-off-by: Steve Capper <steve.capper@linaro.org>
+Reviewed-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
+[hpy: Backported to 3.14
+ - adjust the context ]
+Signed-off-by: Hou Pengyang <houpengyang@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/include/asm/pgtable-3level.h |   12 ++++++++----
+ arch/arm/include/asm/pgtable.h        |   14 +++++++++-----
+ 2 files changed, 17 insertions(+), 9 deletions(-)
+
+--- a/arch/arm/include/asm/pgtable-3level.h
++++ b/arch/arm/include/asm/pgtable-3level.h
+@@ -207,17 +207,21 @@ static inline pmd_t *pmd_offset(pud_t *p
+ #define pte_huge(pte)         (pte_val(pte) && !(pte_val(pte) & PTE_TABLE_BIT))
+ #define pte_mkhuge(pte)               (__pte(pte_val(pte) & ~PTE_TABLE_BIT))
+-#define pmd_young(pmd)                (pmd_val(pmd) & PMD_SECT_AF)
++#define pmd_isset(pmd, val)   ((u32)(val) == (val) ? pmd_val(pmd) & (val) \
++                                              : !!(pmd_val(pmd) & (val)))
++#define pmd_isclear(pmd, val) (!(pmd_val(pmd) & (val)))
++
++#define pmd_young(pmd)                (pmd_isset((pmd), PMD_SECT_AF))
+ #define __HAVE_ARCH_PMD_WRITE
+-#define pmd_write(pmd)                (!(pmd_val(pmd) & PMD_SECT_RDONLY))
++#define pmd_write(pmd)                (pmd_isclear((pmd), PMD_SECT_RDONLY))
+ #define pmd_hugewillfault(pmd)        (!pmd_young(pmd) || !pmd_write(pmd))
+ #define pmd_thp_or_huge(pmd)  (pmd_huge(pmd) || pmd_trans_huge(pmd))
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+-#define pmd_trans_huge(pmd)   (pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT))
+-#define pmd_trans_splitting(pmd) (pmd_val(pmd) & PMD_SECT_SPLITTING)
++#define pmd_trans_huge(pmd)   (pmd_val(pmd) && !pmd_table(pmd))
++#define pmd_trans_splitting(pmd) (pmd_isset((pmd), PMD_SECT_SPLITTING))
+ #endif
+ #define PMD_BIT_FUNC(fn,op) \
+--- a/arch/arm/include/asm/pgtable.h
++++ b/arch/arm/include/asm/pgtable.h
+@@ -214,12 +214,16 @@ static inline pte_t *pmd_page_vaddr(pmd_
+ #define pte_clear(mm,addr,ptep)       set_pte_ext(ptep, __pte(0), 0)
++#define pte_isset(pte, val)   ((u32)(val) == (val) ? pte_val(pte) & (val) \
++                                              : !!(pte_val(pte) & (val)))
++#define pte_isclear(pte, val) (!(pte_val(pte) & (val)))
++
+ #define pte_none(pte)         (!pte_val(pte))
+-#define pte_present(pte)      (pte_val(pte) & L_PTE_PRESENT)
+-#define pte_write(pte)                (!(pte_val(pte) & L_PTE_RDONLY))
+-#define pte_dirty(pte)                (pte_val(pte) & L_PTE_DIRTY)
+-#define pte_young(pte)                (pte_val(pte) & L_PTE_YOUNG)
+-#define pte_exec(pte)         (!(pte_val(pte) & L_PTE_XN))
++#define pte_present(pte)      (pte_isset((pte), L_PTE_PRESENT))
++#define pte_write(pte)                (pte_isclear((pte), L_PTE_RDONLY))
++#define pte_dirty(pte)                (pte_isset((pte), L_PTE_DIRTY))
++#define pte_young(pte)                (pte_isset((pte), L_PTE_YOUNG))
++#define pte_exec(pte)         (pte_isclear((pte), L_PTE_XN))
+ #define pte_special(pte)      (0)
+ #define pte_present_user(pte)  (pte_present(pte) && (pte_val(pte) & L_PTE_USER))
diff --git a/queue-3.14/arm-8109-1-mm-modify-pte_write-and-pmd_write-logic-for-lpae.patch b/queue-3.14/arm-8109-1-mm-modify-pte_write-and-pmd_write-logic-for-lpae.patch
new file mode 100644 (file)
index 0000000..34cdf3b
--- /dev/null
@@ -0,0 +1,172 @@
+From ded9477984690d026e46dd75e8157392cea3f13f Mon Sep 17 00:00:00 2001
+From: Steven Capper <steve.capper@linaro.org>
+Date: Fri, 18 Jul 2014 16:16:15 +0100
+Subject: ARM: 8109/1: mm: Modify pte_write and pmd_write logic for LPAE
+
+From: Steven Capper <steve.capper@linaro.org>
+
+commit ded9477984690d026e46dd75e8157392cea3f13f upstream.
+
+For LPAE, we have the following means for encoding writable or dirty
+ptes:
+                              L_PTE_DIRTY       L_PTE_RDONLY
+    !pte_dirty && !pte_write        0               1
+    !pte_dirty && pte_write         0               1
+    pte_dirty && !pte_write         1               1
+    pte_dirty && pte_write          1               0
+
+So we can't distinguish between writeable clean ptes and read only
+ptes. This can cause problems with ptes being incorrectly flagged as
+read only when they are writeable but not dirty.
+
+This patch renumbers L_PTE_RDONLY from AP[2] to a software bit #58,
+and adds additional logic to set AP[2] whenever the pte is read only
+or not dirty. That way we can distinguish between clean writeable ptes
+and read only ptes.
+
+HugeTLB pages will use this new logic automatically.
+
+We need to add some logic to Transparent HugePages to ensure that they
+correctly interpret the revised pgprot permissions (L_PTE_RDONLY has
+moved and no longer matches PMD_SECT_AP2). In the process of revising
+THP, the names of the PMD software bits have been prefixed with L_ to
+make them easier to distinguish from their hardware bit counterparts.
+
+Signed-off-by: Steve Capper <steve.capper@linaro.org>
+Reviewed-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
+[hpy: Backported to 3.14
+ - adjust the context ]
+Signed-off-by: Hou Pengyang <houpengyang@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/include/asm/pgtable-3level-hwdef.h |    3 +-
+ arch/arm/include/asm/pgtable-3level.h       |   41 ++++++++++++++++------------
+ arch/arm/mm/proc-v7-3level.S                |    9 ++++--
+ 3 files changed, 33 insertions(+), 20 deletions(-)
+
+--- a/arch/arm/include/asm/pgtable-3level-hwdef.h
++++ b/arch/arm/include/asm/pgtable-3level-hwdef.h
+@@ -43,7 +43,7 @@
+ #define PMD_SECT_BUFFERABLE   (_AT(pmdval_t, 1) << 2)
+ #define PMD_SECT_CACHEABLE    (_AT(pmdval_t, 1) << 3)
+ #define PMD_SECT_USER         (_AT(pmdval_t, 1) << 6)         /* AP[1] */
+-#define PMD_SECT_RDONLY               (_AT(pmdval_t, 1) << 7)         /* AP[2] */
++#define PMD_SECT_AP2          (_AT(pmdval_t, 1) << 7)         /* read only */
+ #define PMD_SECT_S            (_AT(pmdval_t, 3) << 8)
+ #define PMD_SECT_AF           (_AT(pmdval_t, 1) << 10)
+ #define PMD_SECT_nG           (_AT(pmdval_t, 1) << 11)
+@@ -72,6 +72,7 @@
+ #define PTE_TABLE_BIT         (_AT(pteval_t, 1) << 1)
+ #define PTE_BUFFERABLE                (_AT(pteval_t, 1) << 2)         /* AttrIndx[0] */
+ #define PTE_CACHEABLE         (_AT(pteval_t, 1) << 3)         /* AttrIndx[1] */
++#define PTE_AP2                       (_AT(pteval_t, 1) << 7)         /* AP[2] */
+ #define PTE_EXT_SHARED                (_AT(pteval_t, 3) << 8)         /* SH[1:0], inner shareable */
+ #define PTE_EXT_AF            (_AT(pteval_t, 1) << 10)        /* Access Flag */
+ #define PTE_EXT_NG            (_AT(pteval_t, 1) << 11)        /* nG */
+--- a/arch/arm/include/asm/pgtable-3level.h
++++ b/arch/arm/include/asm/pgtable-3level.h
+@@ -79,18 +79,19 @@
+ #define L_PTE_PRESENT         (_AT(pteval_t, 3) << 0)         /* Present */
+ #define L_PTE_FILE            (_AT(pteval_t, 1) << 2)         /* only when !PRESENT */
+ #define L_PTE_USER            (_AT(pteval_t, 1) << 6)         /* AP[1] */
+-#define L_PTE_RDONLY          (_AT(pteval_t, 1) << 7)         /* AP[2] */
+ #define L_PTE_SHARED          (_AT(pteval_t, 3) << 8)         /* SH[1:0], inner shareable */
+ #define L_PTE_YOUNG           (_AT(pteval_t, 1) << 10)        /* AF */
+ #define L_PTE_XN              (_AT(pteval_t, 1) << 54)        /* XN */
+-#define L_PTE_DIRTY           (_AT(pteval_t, 1) << 55)        /* unused */
+-#define L_PTE_SPECIAL         (_AT(pteval_t, 1) << 56)        /* unused */
++#define L_PTE_DIRTY           (_AT(pteval_t, 1) << 55)
++#define L_PTE_SPECIAL         (_AT(pteval_t, 1) << 56)
+ #define L_PTE_NONE            (_AT(pteval_t, 1) << 57)        /* PROT_NONE */
++#define L_PTE_RDONLY          (_AT(pteval_t, 1) << 58)        /* READ ONLY */
+-#define PMD_SECT_VALID                (_AT(pmdval_t, 1) << 0)
+-#define PMD_SECT_DIRTY                (_AT(pmdval_t, 1) << 55)
+-#define PMD_SECT_SPLITTING    (_AT(pmdval_t, 1) << 56)
+-#define PMD_SECT_NONE         (_AT(pmdval_t, 1) << 57)
++#define L_PMD_SECT_VALID      (_AT(pmdval_t, 1) << 0)
++#define L_PMD_SECT_DIRTY      (_AT(pmdval_t, 1) << 55)
++#define L_PMD_SECT_SPLITTING  (_AT(pmdval_t, 1) << 56)
++#define L_PMD_SECT_NONE               (_AT(pmdval_t, 1) << 57)
++#define L_PMD_SECT_RDONLY     (_AT(pteval_t, 1) << 58)
+ /*
+  * To be used in assembly code with the upper page attributes.
+@@ -214,24 +215,25 @@ static inline pmd_t *pmd_offset(pud_t *p
+ #define pmd_young(pmd)                (pmd_isset((pmd), PMD_SECT_AF))
+ #define __HAVE_ARCH_PMD_WRITE
+-#define pmd_write(pmd)                (pmd_isclear((pmd), PMD_SECT_RDONLY))
++#define pmd_write(pmd)                (pmd_isclear((pmd), L_PMD_SECT_RDONLY))
++#define pmd_dirty(pmd)                (pmd_isset((pmd), L_PMD_SECT_DIRTY))
+ #define pmd_hugewillfault(pmd)        (!pmd_young(pmd) || !pmd_write(pmd))
+ #define pmd_thp_or_huge(pmd)  (pmd_huge(pmd) || pmd_trans_huge(pmd))
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ #define pmd_trans_huge(pmd)   (pmd_val(pmd) && !pmd_table(pmd))
+-#define pmd_trans_splitting(pmd) (pmd_isset((pmd), PMD_SECT_SPLITTING))
++#define pmd_trans_splitting(pmd) (pmd_isset((pmd), L_PMD_SECT_SPLITTING))
+ #endif
+ #define PMD_BIT_FUNC(fn,op) \
+ static inline pmd_t pmd_##fn(pmd_t pmd) { pmd_val(pmd) op; return pmd; }
+-PMD_BIT_FUNC(wrprotect,       |= PMD_SECT_RDONLY);
++PMD_BIT_FUNC(wrprotect,       |= L_PMD_SECT_RDONLY);
+ PMD_BIT_FUNC(mkold,   &= ~PMD_SECT_AF);
+-PMD_BIT_FUNC(mksplitting, |= PMD_SECT_SPLITTING);
+-PMD_BIT_FUNC(mkwrite,   &= ~PMD_SECT_RDONLY);
+-PMD_BIT_FUNC(mkdirty,   |= PMD_SECT_DIRTY);
++PMD_BIT_FUNC(mksplitting, |= L_PMD_SECT_SPLITTING);
++PMD_BIT_FUNC(mkwrite,   &= ~L_PMD_SECT_RDONLY);
++PMD_BIT_FUNC(mkdirty,   |= L_PMD_SECT_DIRTY);
+ PMD_BIT_FUNC(mkyoung,   |= PMD_SECT_AF);
+ #define pmd_mkhuge(pmd)               (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT))
+@@ -245,8 +247,8 @@ PMD_BIT_FUNC(mkyoung,   |= PMD_SECT_AF);
+ static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
+ {
+-      const pmdval_t mask = PMD_SECT_USER | PMD_SECT_XN | PMD_SECT_RDONLY |
+-                              PMD_SECT_VALID | PMD_SECT_NONE;
++      const pmdval_t mask = PMD_SECT_USER | PMD_SECT_XN | L_PMD_SECT_RDONLY |
++                              L_PMD_SECT_VALID | L_PMD_SECT_NONE;
+       pmd_val(pmd) = (pmd_val(pmd) & ~mask) | (pgprot_val(newprot) & mask);
+       return pmd;
+ }
+@@ -257,8 +259,13 @@ static inline void set_pmd_at(struct mm_
+       BUG_ON(addr >= TASK_SIZE);
+       /* create a faulting entry if PROT_NONE protected */
+-      if (pmd_val(pmd) & PMD_SECT_NONE)
+-              pmd_val(pmd) &= ~PMD_SECT_VALID;
++      if (pmd_val(pmd) & L_PMD_SECT_NONE)
++              pmd_val(pmd) &= ~L_PMD_SECT_VALID;
++
++      if (pmd_write(pmd) && pmd_dirty(pmd))
++              pmd_val(pmd) &= ~PMD_SECT_AP2;
++      else
++              pmd_val(pmd) |= PMD_SECT_AP2;
+       *pmdp = __pmd(pmd_val(pmd) | PMD_SECT_nG);
+       flush_pmd_entry(pmdp);
+--- a/arch/arm/mm/proc-v7-3level.S
++++ b/arch/arm/mm/proc-v7-3level.S
+@@ -86,8 +86,13 @@ ENTRY(cpu_v7_set_pte_ext)
+       tst     rh, #1 << (57 - 32)             @ L_PTE_NONE
+       bicne   rl, #L_PTE_VALID
+       bne     1f
+-      tst     rh, #1 << (55 - 32)             @ L_PTE_DIRTY
+-      orreq   rl, #L_PTE_RDONLY
++
++      eor     ip, rh, #1 << (55 - 32) @ toggle L_PTE_DIRTY in temp reg to
++                                      @ test for !L_PTE_DIRTY || L_PTE_RDONLY
++      tst     ip, #1 << (55 - 32) | 1 << (58 - 32)
++      orrne   rl, #PTE_AP2
++      biceq   rl, #PTE_AP2
++
+ 1:    strd    r2, r3, [r0]
+       ALT_SMP(W(nop))
+       ALT_UP (mcr     p15, 0, r0, c7, c10, 1)         @ flush_pte
index 876e617fa942525b13836e6881fe412f7b349bca..78d9f1750a08084df1ed76513a41665d946b9b3a 100644 (file)
@@ -32,3 +32,8 @@ netfilter-conntrack-disable-generic-tracking-for-known-protocols.patch
 kvm-x86-sysenter-emulation-is-broken.patch
 move-d_rcu-from-overlapping-d_child-to-overlapping-d_alias.patch
 sched-declare-pid_alive-as-inline.patch
+vm-add-vm_fault_sigsegv-handling-support.patch
+vm-make-stack-guard-page-errors-return-vm_fault_sigsegv-rather-than-sigbus.patch
+arm-8108-1-mm-introduce-pte-pmd-_isset-and-pte-pmd-_isclear.patch
+arm-8109-1-mm-modify-pte_write-and-pmd_write-logic-for-lpae.patch
+x86-mm-move-mmap_sem-unlock-from-mm_fault_error-to-caller.patch
diff --git a/queue-3.14/vm-add-vm_fault_sigsegv-handling-support.patch b/queue-3.14/vm-add-vm_fault_sigsegv-handling-support.patch
new file mode 100644 (file)
index 0000000..9c6aed9
--- /dev/null
@@ -0,0 +1,438 @@
+From 33692f27597fcab536d7cbbcc8f52905133e4aa7 Mon Sep 17 00:00:00 2001
+From: Linus Torvalds <torvalds@linux-foundation.org>
+Date: Thu, 29 Jan 2015 10:51:32 -0800
+Subject: vm: add VM_FAULT_SIGSEGV handling support
+
+From: Linus Torvalds <torvalds@linux-foundation.org>
+
+commit 33692f27597fcab536d7cbbcc8f52905133e4aa7 upstream.
+
+The core VM already knows about VM_FAULT_SIGBUS, but cannot return a
+"you should SIGSEGV" error, because the SIGSEGV case was generally
+handled by the caller - usually the architecture fault handler.
+
+That results in lots of duplication - all the architecture fault
+handlers end up doing very similar "look up vma, check permissions, do
+retries etc" - but it generally works.  However, there are cases where
+the VM actually wants to SIGSEGV, and applications _expect_ SIGSEGV.
+
+In particular, when accessing the stack guard page, libsigsegv expects a
+SIGSEGV.  And it usually got one, because the stack growth is handled by
+that duplicated architecture fault handler.
+
+However, when the generic VM layer started propagating the error return
+from the stack expansion in commit fee7e49d4514 ("mm: propagate error
+from stack expansion even for guard page"), that now exposed the
+existing VM_FAULT_SIGBUS result to user space.  And user space really
+expected SIGSEGV, not SIGBUS.
+
+To fix that case, we need to add a VM_FAULT_SIGSEGV, and teach all those
+duplicate architecture fault handlers about it.  They all already have
+the code to handle SIGSEGV, so it's about just tying that new return
+value to the existing code, but it's all a bit annoying.
+
+This is the mindless minimal patch to do this.  A more extensive patch
+would be to try to gather up the mostly shared fault handling logic into
+one generic helper routine, and long-term we really should do that
+cleanup.
+
+Just from this patch, you can generally see that most architectures just
+copied (directly or indirectly) the old x86 way of doing things, but in
+the meantime that original x86 model has been improved to hold the VM
+semaphore for shorter times etc and to handle VM_FAULT_RETRY and other
+"newer" things, so it would be a good idea to bring all those
+improvements to the generic case and teach other architectures about
+them too.
+
+Reported-and-tested-by: Takashi Iwai <tiwai@suse.de>
+Tested-by: Jan Engelhardt <jengelh@inai.de>
+Acked-by: Heiko Carstens <heiko.carstens@de.ibm.com> # "s390 still compiles and boots"
+Cc: linux-arch@vger.kernel.org
+Cc: stable@vger.kernel.org
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+[shengyong: Backport to 3.14
+ - adjust context
+ - ignore modification for arch nios2, because 3.14 does not support it
+ - add SIGSEGV handling to powerpc/cell spu_fault.c, because 3.14 does not
+   separate it to copro_fault.c
+ - add SIGSEGV handling to mm/memory.c, because 3.14 does not separate it
+   to gup.c
+]
+Signed-off-by: Sheng Yong <shengyong1@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/alpha/mm/fault.c                        |    2 ++
+ arch/arc/mm/fault.c                          |    2 ++
+ arch/avr32/mm/fault.c                        |    2 ++
+ arch/cris/mm/fault.c                         |    2 ++
+ arch/frv/mm/fault.c                          |    2 ++
+ arch/ia64/mm/fault.c                         |    2 ++
+ arch/m32r/mm/fault.c                         |    2 ++
+ arch/m68k/mm/fault.c                         |    2 ++
+ arch/metag/mm/fault.c                        |    2 ++
+ arch/microblaze/mm/fault.c                   |    2 ++
+ arch/mips/mm/fault.c                         |    2 ++
+ arch/mn10300/mm/fault.c                      |    2 ++
+ arch/openrisc/mm/fault.c                     |    2 ++
+ arch/parisc/mm/fault.c                       |    2 ++
+ arch/powerpc/mm/fault.c                      |    2 ++
+ arch/powerpc/platforms/cell/spu_fault.c      |    2 +-
+ arch/s390/mm/fault.c                         |    6 ++++++
+ arch/score/mm/fault.c                        |    2 ++
+ arch/sh/mm/fault.c                           |    2 ++
+ arch/sparc/mm/fault_32.c                     |    2 ++
+ arch/sparc/mm/fault_64.c                     |    2 ++
+ arch/tile/mm/fault.c                         |    2 ++
+ arch/um/kernel/trap.c                        |    2 ++
+ arch/x86/mm/fault.c                          |    2 ++
+ arch/xtensa/mm/fault.c                       |    2 ++
+ drivers/staging/lustre/lustre/llite/vvp_io.c |    2 +-
+ include/linux/mm.h                           |    6 ++++--
+ mm/ksm.c                                     |    2 +-
+ mm/memory.c                                  |    5 +++--
+ 29 files changed, 62 insertions(+), 7 deletions(-)
+
+--- a/arch/alpha/mm/fault.c
++++ b/arch/alpha/mm/fault.c
+@@ -156,6 +156,8 @@ retry:
+       if (unlikely(fault & VM_FAULT_ERROR)) {
+               if (fault & VM_FAULT_OOM)
+                       goto out_of_memory;
++              else if (fault & VM_FAULT_SIGSEGV)
++                      goto bad_area;
+               else if (fault & VM_FAULT_SIGBUS)
+                       goto do_sigbus;
+               BUG();
+--- a/arch/arc/mm/fault.c
++++ b/arch/arc/mm/fault.c
+@@ -162,6 +162,8 @@ good_area:
+       /* TBD: switch to pagefault_out_of_memory() */
+       if (fault & VM_FAULT_OOM)
+               goto out_of_memory;
++      else if (fault & VM_FAULT_SIGSEV)
++              goto bad_area;
+       else if (fault & VM_FAULT_SIGBUS)
+               goto do_sigbus;
+--- a/arch/avr32/mm/fault.c
++++ b/arch/avr32/mm/fault.c
+@@ -142,6 +142,8 @@ good_area:
+       if (unlikely(fault & VM_FAULT_ERROR)) {
+               if (fault & VM_FAULT_OOM)
+                       goto out_of_memory;
++              else if (fault & VM_FAULT_SIGSEGV)
++                      goto bad_area;
+               else if (fault & VM_FAULT_SIGBUS)
+                       goto do_sigbus;
+               BUG();
+--- a/arch/cris/mm/fault.c
++++ b/arch/cris/mm/fault.c
+@@ -176,6 +176,8 @@ retry:
+       if (unlikely(fault & VM_FAULT_ERROR)) {
+               if (fault & VM_FAULT_OOM)
+                       goto out_of_memory;
++              else if (fault & VM_FAULT_SIGSEGV)
++                      goto bad_area;
+               else if (fault & VM_FAULT_SIGBUS)
+                       goto do_sigbus;
+               BUG();
+--- a/arch/frv/mm/fault.c
++++ b/arch/frv/mm/fault.c
+@@ -168,6 +168,8 @@ asmlinkage void do_page_fault(int datamm
+       if (unlikely(fault & VM_FAULT_ERROR)) {
+               if (fault & VM_FAULT_OOM)
+                       goto out_of_memory;
++              else if (fault & VM_FAULT_SIGSEGV)
++                      goto bad_area;
+               else if (fault & VM_FAULT_SIGBUS)
+                       goto do_sigbus;
+               BUG();
+--- a/arch/ia64/mm/fault.c
++++ b/arch/ia64/mm/fault.c
+@@ -172,6 +172,8 @@ retry:
+                */
+               if (fault & VM_FAULT_OOM) {
+                       goto out_of_memory;
++              } else if (fault & VM_FAULT_SIGSEGV) {
++                      goto bad_area;
+               } else if (fault & VM_FAULT_SIGBUS) {
+                       signal = SIGBUS;
+                       goto bad_area;
+--- a/arch/m32r/mm/fault.c
++++ b/arch/m32r/mm/fault.c
+@@ -200,6 +200,8 @@ good_area:
+       if (unlikely(fault & VM_FAULT_ERROR)) {
+               if (fault & VM_FAULT_OOM)
+                       goto out_of_memory;
++              else if (fault & VM_FAULT_SIGSEGV)
++                      goto bad_area;
+               else if (fault & VM_FAULT_SIGBUS)
+                       goto do_sigbus;
+               BUG();
+--- a/arch/m68k/mm/fault.c
++++ b/arch/m68k/mm/fault.c
+@@ -145,6 +145,8 @@ good_area:
+       if (unlikely(fault & VM_FAULT_ERROR)) {
+               if (fault & VM_FAULT_OOM)
+                       goto out_of_memory;
++              else if (fault & VM_FAULT_SIGSEGV)
++                      goto map_err;
+               else if (fault & VM_FAULT_SIGBUS)
+                       goto bus_err;
+               BUG();
+--- a/arch/metag/mm/fault.c
++++ b/arch/metag/mm/fault.c
+@@ -141,6 +141,8 @@ good_area:
+       if (unlikely(fault & VM_FAULT_ERROR)) {
+               if (fault & VM_FAULT_OOM)
+                       goto out_of_memory;
++              else if (fault & VM_FAULT_SIGSEGV)
++                      goto bad_area;
+               else if (fault & VM_FAULT_SIGBUS)
+                       goto do_sigbus;
+               BUG();
+--- a/arch/microblaze/mm/fault.c
++++ b/arch/microblaze/mm/fault.c
+@@ -224,6 +224,8 @@ good_area:
+       if (unlikely(fault & VM_FAULT_ERROR)) {
+               if (fault & VM_FAULT_OOM)
+                       goto out_of_memory;
++              else if (fault & VM_FAULT_SIGSEGV)
++                      goto bad_area;
+               else if (fault & VM_FAULT_SIGBUS)
+                       goto do_sigbus;
+               BUG();
+--- a/arch/mips/mm/fault.c
++++ b/arch/mips/mm/fault.c
+@@ -158,6 +158,8 @@ good_area:
+       if (unlikely(fault & VM_FAULT_ERROR)) {
+               if (fault & VM_FAULT_OOM)
+                       goto out_of_memory;
++              else if (fault & VM_FAULT_SIGSEGV)
++                      goto bad_area;
+               else if (fault & VM_FAULT_SIGBUS)
+                       goto do_sigbus;
+               BUG();
+--- a/arch/mn10300/mm/fault.c
++++ b/arch/mn10300/mm/fault.c
+@@ -262,6 +262,8 @@ good_area:
+       if (unlikely(fault & VM_FAULT_ERROR)) {
+               if (fault & VM_FAULT_OOM)
+                       goto out_of_memory;
++              else if (fault & VM_FAULT_SIGSEGV)
++                      goto bad_area;
+               else if (fault & VM_FAULT_SIGBUS)
+                       goto do_sigbus;
+               BUG();
+--- a/arch/openrisc/mm/fault.c
++++ b/arch/openrisc/mm/fault.c
+@@ -171,6 +171,8 @@ good_area:
+       if (unlikely(fault & VM_FAULT_ERROR)) {
+               if (fault & VM_FAULT_OOM)
+                       goto out_of_memory;
++              else if (fault & VM_FAULT_SIGSEGV)
++                      goto bad_area;
+               else if (fault & VM_FAULT_SIGBUS)
+                       goto do_sigbus;
+               BUG();
+--- a/arch/parisc/mm/fault.c
++++ b/arch/parisc/mm/fault.c
+@@ -256,6 +256,8 @@ good_area:
+                */
+               if (fault & VM_FAULT_OOM)
+                       goto out_of_memory;
++              else if (fault & VM_FAULT_SIGSEGV)
++                      goto bad_area;
+               else if (fault & VM_FAULT_SIGBUS)
+                       goto bad_area;
+               BUG();
+--- a/arch/powerpc/mm/fault.c
++++ b/arch/powerpc/mm/fault.c
+@@ -432,6 +432,8 @@ good_area:
+        */
+       fault = handle_mm_fault(mm, vma, address, flags);
+       if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) {
++              if (fault & VM_FAULT_SIGSEGV)
++                      goto bad_area;
+               rc = mm_fault_error(regs, address, fault);
+               if (rc >= MM_FAULT_RETURN)
+                       goto bail;
+--- a/arch/powerpc/platforms/cell/spu_fault.c
++++ b/arch/powerpc/platforms/cell/spu_fault.c
+@@ -75,7 +75,7 @@ int spu_handle_mm_fault(struct mm_struct
+               if (*flt & VM_FAULT_OOM) {
+                       ret = -ENOMEM;
+                       goto out_unlock;
+-              } else if (*flt & VM_FAULT_SIGBUS) {
++              } else if (*flt & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) {
+                       ret = -EFAULT;
+                       goto out_unlock;
+               }
+--- a/arch/s390/mm/fault.c
++++ b/arch/s390/mm/fault.c
+@@ -239,6 +239,12 @@ static noinline void do_fault_error(stru
+                               do_no_context(regs);
+                       else
+                               pagefault_out_of_memory();
++              } else if (fault & VM_FAULT_SIGSEGV) {
++                      /* Kernel mode? Handle exceptions or die */
++                      if (!user_mode(regs))
++                              do_no_context(regs);
++                      else
++                              do_sigsegv(regs, SEGV_MAPERR);
+               } else if (fault & VM_FAULT_SIGBUS) {
+                       /* Kernel mode? Handle exceptions or die */
+                       if (!user_mode(regs))
+--- a/arch/score/mm/fault.c
++++ b/arch/score/mm/fault.c
+@@ -114,6 +114,8 @@ good_area:
+       if (unlikely(fault & VM_FAULT_ERROR)) {
+               if (fault & VM_FAULT_OOM)
+                       goto out_of_memory;
++              else if (fault & VM_FAULT_SIGSEGV)
++                      goto bad_area;
+               else if (fault & VM_FAULT_SIGBUS)
+                       goto do_sigbus;
+               BUG();
+--- a/arch/sh/mm/fault.c
++++ b/arch/sh/mm/fault.c
+@@ -353,6 +353,8 @@ mm_fault_error(struct pt_regs *regs, uns
+       } else {
+               if (fault & VM_FAULT_SIGBUS)
+                       do_sigbus(regs, error_code, address);
++              else if (fault & VM_FAULT_SIGSEGV)
++                      bad_area(regs, error_code, address);
+               else
+                       BUG();
+       }
+--- a/arch/sparc/mm/fault_32.c
++++ b/arch/sparc/mm/fault_32.c
+@@ -252,6 +252,8 @@ good_area:
+       if (unlikely(fault & VM_FAULT_ERROR)) {
+               if (fault & VM_FAULT_OOM)
+                       goto out_of_memory;
++              else if (fault & VM_FAULT_SIGSEGV)
++                      goto bad_area;
+               else if (fault & VM_FAULT_SIGBUS)
+                       goto do_sigbus;
+               BUG();
+--- a/arch/sparc/mm/fault_64.c
++++ b/arch/sparc/mm/fault_64.c
+@@ -448,6 +448,8 @@ good_area:
+       if (unlikely(fault & VM_FAULT_ERROR)) {
+               if (fault & VM_FAULT_OOM)
+                       goto out_of_memory;
++              else if (fault & VM_FAULT_SIGSEGV)
++                      goto bad_area;
+               else if (fault & VM_FAULT_SIGBUS)
+                       goto do_sigbus;
+               BUG();
+--- a/arch/tile/mm/fault.c
++++ b/arch/tile/mm/fault.c
+@@ -444,6 +444,8 @@ good_area:
+       if (unlikely(fault & VM_FAULT_ERROR)) {
+               if (fault & VM_FAULT_OOM)
+                       goto out_of_memory;
++              else if (fault & VM_FAULT_SIGSEGV)
++                      goto bad_area;
+               else if (fault & VM_FAULT_SIGBUS)
+                       goto do_sigbus;
+               BUG();
+--- a/arch/um/kernel/trap.c
++++ b/arch/um/kernel/trap.c
+@@ -80,6 +80,8 @@ good_area:
+               if (unlikely(fault & VM_FAULT_ERROR)) {
+                       if (fault & VM_FAULT_OOM) {
+                               goto out_of_memory;
++                      } else if (fault & VM_FAULT_SIGSEGV) {
++                              goto out;
+                       } else if (fault & VM_FAULT_SIGBUS) {
+                               err = -EACCES;
+                               goto out;
+--- a/arch/x86/mm/fault.c
++++ b/arch/x86/mm/fault.c
+@@ -894,6 +894,8 @@ mm_fault_error(struct pt_regs *regs, uns
+               if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
+                            VM_FAULT_HWPOISON_LARGE))
+                       do_sigbus(regs, error_code, address, fault);
++              else if (fault & VM_FAULT_SIGSEGV)
++                      bad_area_nosemaphore(regs, error_code, address);
+               else
+                       BUG();
+       }
+--- a/arch/xtensa/mm/fault.c
++++ b/arch/xtensa/mm/fault.c
+@@ -117,6 +117,8 @@ good_area:
+       if (unlikely(fault & VM_FAULT_ERROR)) {
+               if (fault & VM_FAULT_OOM)
+                       goto out_of_memory;
++              else if (fault & VM_FAULT_SIGSEGV)
++                      goto bad_area;
+               else if (fault & VM_FAULT_SIGBUS)
+                       goto do_sigbus;
+               BUG();
+--- a/drivers/staging/lustre/lustre/llite/vvp_io.c
++++ b/drivers/staging/lustre/lustre/llite/vvp_io.c
+@@ -642,7 +642,7 @@ static int vvp_io_kernel_fault(struct vv
+               return 0;
+       }
+-      if (cfio->fault.ft_flags & VM_FAULT_SIGBUS) {
++      if (cfio->fault.ft_flags & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) {
+               CDEBUG(D_PAGE, "got addr %p - SIGBUS\n", vmf->virtual_address);
+               return -EFAULT;
+       }
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -1009,6 +1009,7 @@ static inline int page_mapped(struct pag
+ #define VM_FAULT_WRITE        0x0008  /* Special case for get_user_pages */
+ #define VM_FAULT_HWPOISON 0x0010      /* Hit poisoned small page */
+ #define VM_FAULT_HWPOISON_LARGE 0x0020  /* Hit poisoned large page. Index encoded in upper bits */
++#define VM_FAULT_SIGSEGV 0x0040
+ #define VM_FAULT_NOPAGE       0x0100  /* ->fault installed the pte, not return page */
+ #define VM_FAULT_LOCKED       0x0200  /* ->fault locked the returned page */
+@@ -1017,8 +1018,9 @@ static inline int page_mapped(struct pag
+ #define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */
+-#define VM_FAULT_ERROR        (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_HWPOISON | \
+-                       VM_FAULT_FALLBACK | VM_FAULT_HWPOISON_LARGE)
++#define VM_FAULT_ERROR        (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | \
++                       VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE | \
++                       VM_FAULT_FALLBACK)
+ /* Encode hstate index for a hwpoisoned large page */
+ #define VM_FAULT_SET_HINDEX(x) ((x) << 12)
+--- a/mm/ksm.c
++++ b/mm/ksm.c
+@@ -376,7 +376,7 @@ static int break_ksm(struct vm_area_stru
+               else
+                       ret = VM_FAULT_WRITE;
+               put_page(page);
+-      } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_OOM)));
++      } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | VM_FAULT_OOM)));
+       /*
+        * We must loop because handle_mm_fault() may back out if there's
+        * any difficulty e.g. if pte accessed bit gets updated concurrently.
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -1836,7 +1836,8 @@ long __get_user_pages(struct task_struct
+                                               else
+                                                       return -EFAULT;
+                                       }
+-                                      if (ret & VM_FAULT_SIGBUS)
++                                      if (ret & (VM_FAULT_SIGBUS |
++                                                 VM_FAULT_SIGSEGV))
+                                               return i ? i : -EFAULT;
+                                       BUG();
+                               }
+@@ -1946,7 +1947,7 @@ int fixup_user_fault(struct task_struct
+                       return -ENOMEM;
+               if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
+                       return -EHWPOISON;
+-              if (ret & VM_FAULT_SIGBUS)
++              if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
+                       return -EFAULT;
+               BUG();
+       }
diff --git a/queue-3.14/vm-make-stack-guard-page-errors-return-vm_fault_sigsegv-rather-than-sigbus.patch b/queue-3.14/vm-make-stack-guard-page-errors-return-vm_fault_sigsegv-rather-than-sigbus.patch
new file mode 100644 (file)
index 0000000..633892b
--- /dev/null
@@ -0,0 +1,43 @@
+From 9c145c56d0c8a0b62e48c8d71e055ad0fb2012ba Mon Sep 17 00:00:00 2001
+From: Linus Torvalds <torvalds@linux-foundation.org>
+Date: Thu, 29 Jan 2015 11:15:17 -0800
+Subject: vm: make stack guard page errors return VM_FAULT_SIGSEGV rather than SIGBUS
+
+From: Linus Torvalds <torvalds@linux-foundation.org>
+
+commit 9c145c56d0c8a0b62e48c8d71e055ad0fb2012ba upstream.
+
+The stack guard page error case has long incorrectly caused a SIGBUS
+rather than a SIGSEGV, but nobody actually noticed until commit
+fee7e49d4514 ("mm: propagate error from stack expansion even for guard
+page") because that error case was never actually triggered in any
+normal situations.
+
+Now that we actually report the error, people noticed the wrong signal
+that resulted.  So far, only the test suite of libsigsegv seems to have
+actually cared, but there are real applications that use libsigsegv, so
+let's not wait for any of those to break.
+
+Reported-and-tested-by: Takashi Iwai <tiwai@suse.de>
+Tested-by: Jan Engelhardt <jengelh@inai.de>
+Acked-by: Heiko Carstens <heiko.carstens@de.ibm.com> # "s390 still compiles and boots"
+Cc: linux-arch@vger.kernel.org
+Cc: stable@vger.kernel.org
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/memory.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -3236,7 +3236,7 @@ static int do_anonymous_page(struct mm_s
+       /* Check if we need to add a guard page to the stack */
+       if (check_stack_guard_page(vma, address) < 0)
+-              return VM_FAULT_SIGBUS;
++              return VM_FAULT_SIGSEGV;
+       /* Use the zero-page for reads */
+       if (!(flags & FAULT_FLAG_WRITE)) {
diff --git a/queue-3.14/x86-mm-move-mmap_sem-unlock-from-mm_fault_error-to-caller.patch b/queue-3.14/x86-mm-move-mmap_sem-unlock-from-mm_fault_error-to-caller.patch
new file mode 100644 (file)
index 0000000..18668c9
--- /dev/null
@@ -0,0 +1,65 @@
+From 7fb08eca45270d0ae86e1ad9d39c40b7a55d0190 Mon Sep 17 00:00:00 2001
+From: Linus Torvalds <torvalds@linux-foundation.org>
+Date: Mon, 15 Dec 2014 14:46:06 -0800
+Subject: x86: mm: move mmap_sem unlock from mm_fault_error() to caller
+
+From: Linus Torvalds <torvalds@linux-foundation.org>
+
+commit 7fb08eca45270d0ae86e1ad9d39c40b7a55d0190 upstream.
+
+This replaces four copies in various stages of mm_fault_error() handling
+with just a single one.  It will also allow for more natural placement
+of the unlocking after some further cleanup.
+
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/mm/fault.c |    8 +-------
+ 1 file changed, 1 insertion(+), 7 deletions(-)
+
+--- a/arch/x86/mm/fault.c
++++ b/arch/x86/mm/fault.c
+@@ -833,11 +833,8 @@ do_sigbus(struct pt_regs *regs, unsigned
+         unsigned int fault)
+ {
+       struct task_struct *tsk = current;
+-      struct mm_struct *mm = tsk->mm;
+       int code = BUS_ADRERR;
+-      up_read(&mm->mmap_sem);
+-
+       /* Kernel mode? Handle exceptions or die: */
+       if (!(error_code & PF_USER)) {
+               no_context(regs, error_code, address, SIGBUS, BUS_ADRERR);
+@@ -868,7 +865,6 @@ mm_fault_error(struct pt_regs *regs, uns
+              unsigned long address, unsigned int fault)
+ {
+       if (fatal_signal_pending(current) && !(error_code & PF_USER)) {
+-              up_read(&current->mm->mmap_sem);
+               no_context(regs, error_code, address, 0, 0);
+               return;
+       }
+@@ -876,14 +872,11 @@ mm_fault_error(struct pt_regs *regs, uns
+       if (fault & VM_FAULT_OOM) {
+               /* Kernel mode? Handle exceptions or die: */
+               if (!(error_code & PF_USER)) {
+-                      up_read(&current->mm->mmap_sem);
+                       no_context(regs, error_code, address,
+                                  SIGSEGV, SEGV_MAPERR);
+                       return;
+               }
+-              up_read(&current->mm->mmap_sem);
+-
+               /*
+                * We ran out of memory, call the OOM killer, and return the
+                * userspace (which will retry the fault, or kill us if we got
+@@ -1218,6 +1211,7 @@ good_area:
+               return;
+       if (unlikely(fault & VM_FAULT_ERROR)) {
++              up_read(&mm->mmap_sem);
+               mm_fault_error(regs, error_code, address, fault);
+               return;
+       }