--- /dev/null
+From f6f91b0d9fd971c630cef908dde8fe8795aefbf8 Mon Sep 17 00:00:00 2001
+From: Russell King <rmk+kernel@arm.linux.org.uk>
+Date: Tue, 23 Jul 2013 18:37:00 +0100
+Subject: ARM: allow kuser helpers to be removed from the vector page
+
+From: Russell King <rmk+kernel@arm.linux.org.uk>
+
+commit f6f91b0d9fd971c630cef908dde8fe8795aefbf8 upstream.
+
+Provide a kernel configuration option to allow the kernel user helpers
+to be removed from the vector page, thereby preventing their use with
+ROP (return orientated programming) attacks. This option is only
+visible for CPU architectures which natively support all the operations
+which kernel user helpers would normally provide, and must be enabled
+with caution.
+
+Acked-by: Nicolas Pitre <nico@linaro.org>
+Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/kernel/entry-armv.S | 3 +++
+ arch/arm/kernel/traps.c | 23 ++++++++++++++---------
+ arch/arm/mm/Kconfig | 34 ++++++++++++++++++++++++++++++++++
+ 3 files changed, 51 insertions(+), 9 deletions(-)
+
+--- a/arch/arm/kernel/entry-armv.S
++++ b/arch/arm/kernel/entry-armv.S
+@@ -752,6 +752,7 @@ ENDPROC(__switch_to)
+ .endr
+ .endm
+
++#ifdef CONFIG_KUSER_HELPERS
+ .align 5
+ .globl __kuser_helper_start
+ __kuser_helper_start:
+@@ -938,6 +939,8 @@ __kuser_helper_version: @ 0xffff0ffc
+ .globl __kuser_helper_end
+ __kuser_helper_end:
+
++#endif
++
+ THUMB( .thumb )
+
+ /*
+--- a/arch/arm/kernel/traps.c
++++ b/arch/arm/kernel/traps.c
+@@ -800,23 +800,32 @@ void __init trap_init(void)
+ return;
+ }
+
+-static void __init kuser_get_tls_init(unsigned long vectors)
++#ifdef CONFIG_KUSER_HELPERS
++static void __init kuser_init(void *vectors)
+ {
++ extern char __kuser_helper_start[], __kuser_helper_end[];
++ int kuser_sz = __kuser_helper_end - __kuser_helper_start;
++
++ memcpy(vectors + 0x1000 - kuser_sz, __kuser_helper_start, kuser_sz);
++
+ /*
+ * vectors + 0xfe0 = __kuser_get_tls
+ * vectors + 0xfe8 = hardware TLS instruction at 0xffff0fe8
+ */
+ if (tls_emu || has_tls_reg)
+- memcpy((void *)vectors + 0xfe0, (void *)vectors + 0xfe8, 4);
++ memcpy(vectors + 0xfe0, vectors + 0xfe8, 4);
++}
++#else
++static void __init kuser_init(void *vectors)
++{
+ }
++#endif
+
+ void __init early_trap_init(void *vectors_base)
+ {
+ unsigned long vectors = (unsigned long)vectors_base;
+ extern char __stubs_start[], __stubs_end[];
+ extern char __vectors_start[], __vectors_end[];
+- extern char __kuser_helper_start[], __kuser_helper_end[];
+- int kuser_sz = __kuser_helper_end - __kuser_helper_start;
+ unsigned i;
+
+ vectors_page = vectors_base;
+@@ -837,12 +846,8 @@ void __init early_trap_init(void *vector
+ */
+ memcpy((void *)vectors, __vectors_start, __vectors_end - __vectors_start);
+ memcpy((void *)vectors + 0x1000, __stubs_start, __stubs_end - __stubs_start);
+- memcpy((void *)vectors + 0x1000 - kuser_sz, __kuser_helper_start, kuser_sz);
+
+- /*
+- * Do processor specific fixups for the kuser helpers
+- */
+- kuser_get_tls_init(vectors);
++ kuser_init(vectors_base);
+
+ /*
+ * Copy signal return handlers into the vector page, and
+--- a/arch/arm/mm/Kconfig
++++ b/arch/arm/mm/Kconfig
+@@ -411,24 +411,28 @@ config CPU_32v3
+ select CPU_USE_DOMAINS if MMU
+ select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
+ select TLS_REG_EMUL if SMP || !MMU
++ select NEED_KUSER_HELPERS
+
+ config CPU_32v4
+ bool
+ select CPU_USE_DOMAINS if MMU
+ select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
+ select TLS_REG_EMUL if SMP || !MMU
++ select NEED_KUSER_HELPERS
+
+ config CPU_32v4T
+ bool
+ select CPU_USE_DOMAINS if MMU
+ select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
+ select TLS_REG_EMUL if SMP || !MMU
++ select NEED_KUSER_HELPERS
+
+ config CPU_32v5
+ bool
+ select CPU_USE_DOMAINS if MMU
+ select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
+ select TLS_REG_EMUL if SMP || !MMU
++ select NEED_KUSER_HELPERS
+
+ config CPU_32v6
+ bool
+@@ -756,6 +760,7 @@ config CPU_BPREDICT_DISABLE
+
+ config TLS_REG_EMUL
+ bool
++ select NEED_KUSER_HELPERS
+ help
+ An SMP system using a pre-ARMv6 processor (there are apparently
+ a few prototypes like that in existence) and therefore access to
+@@ -763,11 +768,40 @@ config TLS_REG_EMUL
+
+ config NEEDS_SYSCALL_FOR_CMPXCHG
+ bool
++ select NEED_KUSER_HELPERS
+ help
+ SMP on a pre-ARMv6 processor? Well OK then.
+ Forget about fast user space cmpxchg support.
+ It is just not possible.
+
++config NEED_KUSER_HELPERS
++ bool
++
++config KUSER_HELPERS
++ bool "Enable kuser helpers in vector page" if !NEED_KUSER_HELPERS
++ default y
++ help
++ Warning: disabling this option may break user programs.
++
++ Provide kuser helpers in the vector page. The kernel provides
++ helper code to userspace in read only form at a fixed location
++ in the high vector page to allow userspace to be independent of
++ the CPU type fitted to the system. This permits binaries to be
++ run on ARMv4 through to ARMv7 without modification.
++
++ However, the fixed address nature of these helpers can be used
++ by ROP (return orientated programming) authors when creating
++ exploits.
++
++ If all of the binaries and libraries which run on your platform
++ are built specifically for your platform, and make no use of
++ these helpers, then you can turn this option off. However,
++ when such an binary or library is run, it will receive a SIGILL
++ signal, which will terminate the program.
++
++ Say N here only if you are absolutely certain that you do not
++ need these helpers; otherwise, the safe option is to say Y.
++
+ config DMA_CACHE_RWFO
+ bool "Enable read/write for ownership DMA cache maintenance"
+ depends on CPU_V6K && SMP
--- /dev/null
+From e0d407564b532d978b03ceccebd224a05d02f111 Mon Sep 17 00:00:00 2001
+From: Russell King <rmk+kernel@arm.linux.org.uk>
+Date: Sat, 3 Aug 2013 10:30:05 +0100
+Subject: ARM: fix a cockup in 48be69a02 (ARM: move signal handlers into a vdso-like page)
+
+From: Russell King <rmk+kernel@arm.linux.org.uk>
+
+commit e0d407564b532d978b03ceccebd224a05d02f111 upstream.
+
+Unfortunately, I never committed the fix to a nasty oops which can
+occur as a result of that commit:
+
+------------[ cut here ]------------
+kernel BUG at /home/olof/work/batch/include/linux/mm.h:414!
+Internal error: Oops - BUG: 0 [#1] PREEMPT SMP ARM
+Modules linked in:
+CPU: 0 PID: 490 Comm: killall5 Not tainted 3.11.0-rc3-00288-gabe0308 #53
+task: e90acac0 ti: e9be8000 task.ti: e9be8000
+PC is at special_mapping_fault+0xa4/0xc4
+LR is at __do_fault+0x68/0x48c
+
+This doesn't show up unless you do quite a bit of testing; a simple
+boot test does not do this, so all my nightly tests were passing fine.
+
+The reason for this is that install_special_mapping() expects the
+page array to stick around, and as this was only inserting one page
+which was stored on the kernel stack, that's why this was blowing up.
+
+Reported-by: Olof Johansson <olof@lixom.net>
+Tested-by: Olof Johansson <olof@lixom.net>
+Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/kernel/process.c | 9 +++++----
+ arch/arm/kernel/signal.c | 41 +++++++++++++++++++----------------------
+ 2 files changed, 24 insertions(+), 26 deletions(-)
+
+--- a/arch/arm/kernel/process.c
++++ b/arch/arm/kernel/process.c
+@@ -478,17 +478,18 @@ const char *arch_vma_name(struct vm_area
+ "[sigpage]" : NULL;
+ }
+
++static struct page *signal_page;
+ extern struct page *get_signal_page(void);
+
+ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+ {
+ struct mm_struct *mm = current->mm;
+- struct page *page;
+ unsigned long addr;
+ int ret;
+
+- page = get_signal_page();
+- if (!page)
++ if (!signal_page)
++ signal_page = get_signal_page();
++ if (!signal_page)
+ return -ENOMEM;
+
+ down_write(&mm->mmap_sem);
+@@ -500,7 +501,7 @@ int arch_setup_additional_pages(struct l
+
+ ret = install_special_mapping(mm, addr, PAGE_SIZE,
+ VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
+- &page);
++ &signal_page);
+
+ if (ret == 0)
+ mm->context.sigpage = addr;
+--- a/arch/arm/kernel/signal.c
++++ b/arch/arm/kernel/signal.c
+@@ -610,35 +610,32 @@ do_work_pending(struct pt_regs *regs, un
+ return 0;
+ }
+
+-static struct page *signal_page;
+-
+ struct page *get_signal_page(void)
+ {
+- if (!signal_page) {
+- unsigned long ptr;
+- unsigned offset;
+- void *addr;
++ unsigned long ptr;
++ unsigned offset;
++ struct page *page;
++ void *addr;
+
+- signal_page = alloc_pages(GFP_KERNEL, 0);
++ page = alloc_pages(GFP_KERNEL, 0);
+
+- if (!signal_page)
+- return NULL;
++ if (!page)
++ return NULL;
+
+- addr = page_address(signal_page);
++ addr = page_address(page);
+
+- /* Give the signal return code some randomness */
+- offset = 0x200 + (get_random_int() & 0x7fc);
+- signal_return_offset = offset;
++ /* Give the signal return code some randomness */
++ offset = 0x200 + (get_random_int() & 0x7fc);
++ signal_return_offset = offset;
+
+- /*
+- * Copy signal return handlers into the vector page, and
+- * set sigreturn to be a pointer to these.
+- */
+- memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
++ /*
++ * Copy signal return handlers into the vector page, and
++ * set sigreturn to be a pointer to these.
++ */
++ memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
+
+- ptr = (unsigned long)addr + offset;
+- flush_icache_range(ptr, ptr + sizeof(sigreturn_codes));
+- }
++ ptr = (unsigned long)addr + offset;
++ flush_icache_range(ptr, ptr + sizeof(sigreturn_codes));
+
+- return signal_page;
++ return page;
+ }
--- /dev/null
+From 8c0cc8a5d90bc7373a7a9e7f7a40eb41f51e03fc Mon Sep 17 00:00:00 2001
+From: Russell King <rmk+kernel@arm.linux.org.uk>
+Date: Sat, 3 Aug 2013 10:39:51 +0100
+Subject: ARM: fix nommu builds with 48be69a02 (ARM: move signal handlers into a vdso-like page)
+
+From: Russell King <rmk+kernel@arm.linux.org.uk>
+
+commit 8c0cc8a5d90bc7373a7a9e7f7a40eb41f51e03fc upstream.
+
+Olof reports that noMMU builds error out with:
+
+arch/arm/kernel/signal.c: In function 'setup_return':
+arch/arm/kernel/signal.c:413:25: error: 'mm_context_t' has no member named 'sigpage'
+
+This shows one of the evilnesses of IS_ENABLED(). Get rid of it here
+and replace it with #ifdef's - and as no noMMU platform can make use
+of sigpage, depend on CONIFG_MMU not CONFIG_ARM_MPU.
+
+Reported-by: Olof Johansson <olof@lixom.net>
+Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/include/asm/elf.h | 2 ++
+ arch/arm/kernel/signal.c | 5 ++++-
+ 2 files changed, 6 insertions(+), 1 deletion(-)
+
+--- a/arch/arm/include/asm/elf.h
++++ b/arch/arm/include/asm/elf.h
+@@ -130,8 +130,10 @@ struct mm_struct;
+ extern unsigned long arch_randomize_brk(struct mm_struct *mm);
+ #define arch_randomize_brk arch_randomize_brk
+
++#ifdef CONFIG_MMU
+ #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
+ struct linux_binprm;
+ int arch_setup_additional_pages(struct linux_binprm *, int);
++#endif
+
+ #endif
+--- a/arch/arm/kernel/signal.c
++++ b/arch/arm/kernel/signal.c
+@@ -398,6 +398,7 @@ setup_return(struct pt_regs *regs, struc
+ __put_user(sigreturn_codes[idx+1], rc+1))
+ return 1;
+
++#ifdef CONFIG_MMU
+ if (cpsr & MODE32_BIT) {
+ struct mm_struct *mm = current->mm;
+ /*
+@@ -408,7 +409,9 @@ setup_return(struct pt_regs *regs, struc
+ retcode = KERN_SIGRETURN_CODE + (idx << 2) + thumb;
+ retcode = mm->context.sigpage + signal_return_offset +
+ (idx << 2) + thumb;
+- } else {
++ } else
++#endif
++ {
+ /*
+ * Ensure that the instruction cache sees
+ * the return code written onto the stack.
--- /dev/null
+From a5463cd3435475386cbbe7b06e01292ac169d36f Mon Sep 17 00:00:00 2001
+From: Russell King <rmk+kernel@arm.linux.org.uk>
+Date: Wed, 31 Jul 2013 21:58:56 +0100
+Subject: ARM: make vectors page inaccessible from userspace
+
+From: Russell King <rmk+kernel@arm.linux.org.uk>
+
+commit a5463cd3435475386cbbe7b06e01292ac169d36f upstream.
+
+If kuser helpers are not provided by the kernel, disable user access to
+the vectors page. With the kuser helpers gone, there is no reason for
+this page to be visible to userspace.
+
+Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/include/asm/page.h | 2 ++
+ arch/arm/kernel/process.c | 7 ++++++-
+ arch/arm/mm/mmu.c | 4 ++++
+ 3 files changed, 12 insertions(+), 1 deletion(-)
+
+--- a/arch/arm/include/asm/page.h
++++ b/arch/arm/include/asm/page.h
+@@ -142,7 +142,9 @@ extern void __cpu_copy_user_highpage(str
+ #define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
+ extern void copy_page(void *to, const void *from);
+
++#ifdef CONFIG_KUSER_HELPERS
+ #define __HAVE_ARCH_GATE_AREA 1
++#endif
+
+ #ifdef CONFIG_ARM_LPAE
+ #include <asm/pgtable-3level-types.h>
+--- a/arch/arm/kernel/process.c
++++ b/arch/arm/kernel/process.c
+@@ -433,6 +433,7 @@ unsigned long arch_randomize_brk(struct
+ }
+
+ #ifdef CONFIG_MMU
++#ifdef CONFIG_KUSER_HELPERS
+ /*
+ * The vectors page is always readable from user space for the
+ * atomic helpers. Insert it into the gate_vma so that it is visible
+@@ -465,10 +466,14 @@ int in_gate_area_no_mm(unsigned long add
+ {
+ return in_gate_area(NULL, addr);
+ }
++#define is_gate_vma(vma) ((vma) = &gate_vma)
++#else
++#define is_gate_vma(vma) 0
++#endif
+
+ const char *arch_vma_name(struct vm_area_struct *vma)
+ {
+- return (vma == &gate_vma) ? "[vectors]" :
++ return is_gate_vma(vma) ? "[vectors]" :
+ (vma->vm_mm && vma->vm_start == vma->vm_mm->context.sigpage) ?
+ "[sigpage]" : NULL;
+ }
+--- a/arch/arm/mm/mmu.c
++++ b/arch/arm/mm/mmu.c
+@@ -1220,7 +1220,11 @@ static void __init devicemaps_init(struc
+ map.pfn = __phys_to_pfn(virt_to_phys(vectors));
+ map.virtual = 0xffff0000;
+ map.length = PAGE_SIZE;
++#ifdef CONFIG_KUSER_HELPERS
+ map.type = MT_HIGH_VECTORS;
++#else
++ map.type = MT_LOW_VECTORS;
++#endif
+ create_mapping(&map);
+
+ if (!vectors_high()) {
--- /dev/null
+From 48be69a026b2c17350a5ef18a1959a919f60be7d Mon Sep 17 00:00:00 2001
+From: Russell King <rmk+kernel@arm.linux.org.uk>
+Date: Wed, 24 Jul 2013 00:29:18 +0100
+Subject: ARM: move signal handlers into a vdso-like page
+
+From: Russell King <rmk+kernel@arm.linux.org.uk>
+
+commit 48be69a026b2c17350a5ef18a1959a919f60be7d upstream.
+
+Move the signal handlers into a VDSO page rather than keeping them in
+the vectors page. This allows us to place them randomly within this
+page, and also map the page at a random location within userspace
+further protecting these code fragments from ROP attacks. The new
+VDSO page is also poisoned in the same way as the vector page.
+
+Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/include/asm/elf.h | 4 +++
+ arch/arm/include/asm/mmu.h | 1
+ arch/arm/kernel/process.c | 40 +++++++++++++++++++++++++++++++++---
+ arch/arm/kernel/signal.c | 49 ++++++++++++++++++++++++++++++++++++++++-----
+ arch/arm/kernel/signal.h | 12 -----------
+ arch/arm/kernel/traps.c | 9 --------
+ 6 files changed, 86 insertions(+), 29 deletions(-)
+
+--- a/arch/arm/include/asm/elf.h
++++ b/arch/arm/include/asm/elf.h
+@@ -130,4 +130,8 @@ struct mm_struct;
+ extern unsigned long arch_randomize_brk(struct mm_struct *mm);
+ #define arch_randomize_brk arch_randomize_brk
+
++#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
++struct linux_binprm;
++int arch_setup_additional_pages(struct linux_binprm *, int);
++
+ #endif
+--- a/arch/arm/include/asm/mmu.h
++++ b/arch/arm/include/asm/mmu.h
+@@ -8,6 +8,7 @@ typedef struct {
+ atomic64_t id;
+ #endif
+ unsigned int vmalloc_seq;
++ unsigned long sigpage;
+ } mm_context_t;
+
+ #ifdef CONFIG_CPU_HAS_ASID
+--- a/arch/arm/kernel/process.c
++++ b/arch/arm/kernel/process.c
+@@ -435,8 +435,8 @@ unsigned long arch_randomize_brk(struct
+ #ifdef CONFIG_MMU
+ /*
+ * The vectors page is always readable from user space for the
+- * atomic helpers and the signal restart code. Insert it into the
+- * gate_vma so that it is visible through ptrace and /proc/<pid>/mem.
++ * atomic helpers. Insert it into the gate_vma so that it is visible
++ * through ptrace and /proc/<pid>/mem.
+ */
+ static struct vm_area_struct gate_vma = {
+ .vm_start = 0xffff0000,
+@@ -468,6 +468,40 @@ int in_gate_area_no_mm(unsigned long add
+
+ const char *arch_vma_name(struct vm_area_struct *vma)
+ {
+- return (vma == &gate_vma) ? "[vectors]" : NULL;
++ return (vma == &gate_vma) ? "[vectors]" :
++ (vma->vm_mm && vma->vm_start == vma->vm_mm->context.sigpage) ?
++ "[sigpage]" : NULL;
++}
++
++extern struct page *get_signal_page(void);
++
++int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
++{
++ struct mm_struct *mm = current->mm;
++ struct page *page;
++ unsigned long addr;
++ int ret;
++
++ page = get_signal_page();
++ if (!page)
++ return -ENOMEM;
++
++ down_write(&mm->mmap_sem);
++ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
++ if (IS_ERR_VALUE(addr)) {
++ ret = addr;
++ goto up_fail;
++ }
++
++ ret = install_special_mapping(mm, addr, PAGE_SIZE,
++ VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
++ &page);
++
++ if (ret == 0)
++ mm->context.sigpage = addr;
++
++ up_fail:
++ up_write(&mm->mmap_sem);
++ return ret;
+ }
+ #endif
+--- a/arch/arm/kernel/signal.c
++++ b/arch/arm/kernel/signal.c
+@@ -8,6 +8,7 @@
+ * published by the Free Software Foundation.
+ */
+ #include <linux/errno.h>
++#include <linux/random.h>
+ #include <linux/signal.h>
+ #include <linux/personality.h>
+ #include <linux/uaccess.h>
+@@ -15,12 +16,11 @@
+
+ #include <asm/elf.h>
+ #include <asm/cacheflush.h>
++#include <asm/traps.h>
+ #include <asm/ucontext.h>
+ #include <asm/unistd.h>
+ #include <asm/vfp.h>
+
+-#include "signal.h"
+-
+ /*
+ * For ARM syscalls, we encode the syscall number into the instruction.
+ */
+@@ -40,11 +40,13 @@
+ #define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_sigreturn - __NR_SYSCALL_BASE))
+ #define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE))
+
+-const unsigned long sigreturn_codes[7] = {
++static const unsigned long sigreturn_codes[7] = {
+ MOV_R7_NR_SIGRETURN, SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN,
+ MOV_R7_NR_RT_SIGRETURN, SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN,
+ };
+
++static unsigned long signal_return_offset;
++
+ #ifdef CONFIG_CRUNCH
+ static int preserve_crunch_context(struct crunch_sigframe __user *frame)
+ {
+@@ -397,11 +399,15 @@ setup_return(struct pt_regs *regs, struc
+ return 1;
+
+ if (cpsr & MODE32_BIT) {
++ struct mm_struct *mm = current->mm;
+ /*
+- * 32-bit code can use the new high-page
+- * signal return code support.
++ * 32-bit code can use the signal return page
++ * except when the MPU has protected the vectors
++ * page from PL0
+ */
+ retcode = KERN_SIGRETURN_CODE + (idx << 2) + thumb;
++ retcode = mm->context.sigpage + signal_return_offset +
++ (idx << 2) + thumb;
+ } else {
+ /*
+ * Ensure that the instruction cache sees
+@@ -603,3 +609,36 @@ do_work_pending(struct pt_regs *regs, un
+ } while (thread_flags & _TIF_WORK_MASK);
+ return 0;
+ }
++
++static struct page *signal_page;
++
++struct page *get_signal_page(void)
++{
++ if (!signal_page) {
++ unsigned long ptr;
++ unsigned offset;
++ void *addr;
++
++ signal_page = alloc_pages(GFP_KERNEL, 0);
++
++ if (!signal_page)
++ return NULL;
++
++ addr = page_address(signal_page);
++
++ /* Give the signal return code some randomness */
++ offset = 0x200 + (get_random_int() & 0x7fc);
++ signal_return_offset = offset;
++
++ /*
++ * Copy signal return handlers into the vector page, and
++ * set sigreturn to be a pointer to these.
++ */
++ memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
++
++ ptr = (unsigned long)addr + offset;
++ flush_icache_range(ptr, ptr + sizeof(sigreturn_codes));
++ }
++
++ return signal_page;
++}
+--- a/arch/arm/kernel/signal.h
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/*
+- * linux/arch/arm/kernel/signal.h
+- *
+- * Copyright (C) 2005-2009 Russell King.
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License version 2 as
+- * published by the Free Software Foundation.
+- */
+-#define KERN_SIGRETURN_CODE (CONFIG_VECTORS_BASE + 0x00000500)
+-
+-extern const unsigned long sigreturn_codes[7];
+--- a/arch/arm/kernel/traps.c
++++ b/arch/arm/kernel/traps.c
+@@ -35,8 +35,6 @@
+ #include <asm/tls.h>
+ #include <asm/system_misc.h>
+
+-#include "signal.h"
+-
+ static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" };
+
+ void *vectors_page;
+@@ -849,13 +847,6 @@ void __init early_trap_init(void *vector
+
+ kuser_init(vectors_base);
+
+- /*
+- * Copy signal return handlers into the vector page, and
+- * set sigreturn to be a pointer to these.
+- */
+- memcpy((void *)(vectors + KERN_SIGRETURN_CODE - CONFIG_VECTORS_BASE),
+- sigreturn_codes, sizeof(sigreturn_codes));
+-
+ flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
+ modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
+ }
--- /dev/null
+From 19accfd373847ac3d10623c5d20f948846299741 Mon Sep 17 00:00:00 2001
+From: Russell King <rmk+kernel@arm.linux.org.uk>
+Date: Thu, 4 Jul 2013 11:40:32 +0100
+Subject: ARM: move vector stubs
+
+From: Russell King <rmk+kernel@arm.linux.org.uk>
+
+commit 19accfd373847ac3d10623c5d20f948846299741 upstream.
+
+Move the machine vector stubs into the page above the vector page,
+which we can prevent from being visible to userspace. Also move
+the reset stub, and place the swi vector at a location that the
+'ldr' can get to it.
+
+This hides pointers into the kernel which could give valuable
+information to attackers, and reduces the number of exploitable
+instructions at a fixed address.
+
+Acked-by: Nicolas Pitre <nico@linaro.org>
+Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/Kconfig | 3 +-
+ arch/arm/kernel/entry-armv.S | 50 ++++++++++++++++++++-----------------------
+ arch/arm/kernel/traps.c | 4 +--
+ arch/arm/mm/mmu.c | 10 +++++++-
+ 4 files changed, 37 insertions(+), 30 deletions(-)
+
+--- a/arch/arm/Kconfig
++++ b/arch/arm/Kconfig
+@@ -213,7 +213,8 @@ config VECTORS_BASE
+ default DRAM_BASE if REMAP_VECTORS_TO_RAM
+ default 0x00000000
+ help
+- The base address of exception vectors.
++ The base address of exception vectors. This must be two pages
++ in size.
+
+ config ARM_PATCH_PHYS_VIRT
+ bool "Patch physical to virtual translations at runtime" if EMBEDDED
+--- a/arch/arm/kernel/entry-armv.S
++++ b/arch/arm/kernel/entry-armv.S
+@@ -943,9 +943,9 @@ __kuser_helper_end:
+ /*
+ * Vector stubs.
+ *
+- * This code is copied to 0xffff0200 so we can use branches in the
+- * vectors, rather than ldr's. Note that this code must not
+- * exceed 0x300 bytes.
++ * This code is copied to 0xffff1000 so we can use branches in the
++ * vectors, rather than ldr's. Note that this code must not exceed
++ * a page size.
+ *
+ * Common stub entry macro:
+ * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
+@@ -994,6 +994,15 @@ ENDPROC(vector_\name)
+
+ .globl __stubs_start
+ __stubs_start:
++ @ This must be the first word
++ .word vector_swi
++
++vector_rst:
++ ARM( swi SYS_ERROR0 )
++ THUMB( svc #0 )
++ THUMB( nop )
++ b vector_und
++
+ /*
+ * Interrupt dispatcher
+ */
+@@ -1088,6 +1097,16 @@ __stubs_start:
+ .align 5
+
+ /*=============================================================================
++ * Address exception handler
++ *-----------------------------------------------------------------------------
++ * These aren't too critical.
++ * (they're not supposed to happen, and won't happen in 32-bit data mode).
++ */
++
++vector_addrexcptn:
++ b vector_addrexcptn
++
++/*=============================================================================
+ * Undefined FIQs
+ *-----------------------------------------------------------------------------
+ * Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC
+@@ -1100,35 +1119,14 @@ __stubs_start:
+ vector_fiq:
+ subs pc, lr, #4
+
+-/*=============================================================================
+- * Address exception handler
+- *-----------------------------------------------------------------------------
+- * These aren't too critical.
+- * (they're not supposed to happen, and won't happen in 32-bit data mode).
+- */
+-
+-vector_addrexcptn:
+- b vector_addrexcptn
+-
+-/*
+- * We group all the following data together to optimise
+- * for CPUs with separate I & D caches.
+- */
+- .align 5
+-
+-.LCvswi:
+- .word vector_swi
+-
+ .globl __stubs_end
+ __stubs_end:
+
+- .equ stubs_offset, __vectors_start + 0x200 - __stubs_start
++ .equ stubs_offset, __vectors_start + 0x1000 - __stubs_start
+
+ .globl __vectors_start
+ __vectors_start:
+- ARM( swi SYS_ERROR0 )
+- THUMB( svc #0 )
+- THUMB( nop )
++ W(b) vector_rst + stubs_offset
+ W(b) vector_und + stubs_offset
+ W(ldr) pc, .LCvswi + stubs_offset
+ W(b) vector_pabt + stubs_offset
+--- a/arch/arm/kernel/traps.c
++++ b/arch/arm/kernel/traps.c
+@@ -836,7 +836,7 @@ void __init early_trap_init(void *vector
+ * are visible to the instruction stream.
+ */
+ memcpy((void *)vectors, __vectors_start, __vectors_end - __vectors_start);
+- memcpy((void *)vectors + 0x200, __stubs_start, __stubs_end - __stubs_start);
++ memcpy((void *)vectors + 0x1000, __stubs_start, __stubs_end - __stubs_start);
+ memcpy((void *)vectors + 0x1000 - kuser_sz, __kuser_helper_start, kuser_sz);
+
+ /*
+@@ -851,6 +851,6 @@ void __init early_trap_init(void *vector
+ memcpy((void *)(vectors + KERN_SIGRETURN_CODE - CONFIG_VECTORS_BASE),
+ sigreturn_codes, sizeof(sigreturn_codes));
+
+- flush_icache_range(vectors, vectors + PAGE_SIZE);
++ flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
+ modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
+ }
+--- a/arch/arm/mm/mmu.c
++++ b/arch/arm/mm/mmu.c
+@@ -1175,7 +1175,7 @@ static void __init devicemaps_init(struc
+ /*
+ * Allocate the vector page early.
+ */
+- vectors = early_alloc(PAGE_SIZE);
++ vectors = early_alloc(PAGE_SIZE * 2);
+
+ early_trap_init(vectors);
+
+@@ -1225,10 +1225,18 @@ static void __init devicemaps_init(struc
+
+ if (!vectors_high()) {
+ map.virtual = 0;
++ map.length = PAGE_SIZE * 2;
+ map.type = MT_LOW_VECTORS;
+ create_mapping(&map);
+ }
+
++ /* Now create a kernel read-only mapping */
++ map.pfn += 1;
++ map.virtual = 0xffff0000 + PAGE_SIZE;
++ map.length = PAGE_SIZE;
++ map.type = MT_LOW_VECTORS;
++ create_mapping(&map);
++
+ /*
+ * Ask the machine support to map in the statically mapped devices.
+ */
--- /dev/null
+From 5b43e7a383d69381ffe53423e46dd0fafae07da3 Mon Sep 17 00:00:00 2001
+From: Russell King <rmk+kernel@arm.linux.org.uk>
+Date: Thu, 4 Jul 2013 11:32:04 +0100
+Subject: ARM: poison memory between kuser helpers
+
+From: Russell King <rmk+kernel@arm.linux.org.uk>
+
+commit 5b43e7a383d69381ffe53423e46dd0fafae07da3 upstream.
+
+Poison the memory between each kuser helper. This ensures that any
+branch between the kuser helpers will be appropriately trapped.
+
+Acked-by: Nicolas Pitre <nico@linaro.org>
+Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/kernel/entry-armv.S | 25 ++++++++++++++++---------
+ 1 file changed, 16 insertions(+), 9 deletions(-)
+
+--- a/arch/arm/kernel/entry-armv.S
++++ b/arch/arm/kernel/entry-armv.S
+@@ -741,6 +741,17 @@ ENDPROC(__switch_to)
+ #endif
+ .endm
+
++ .macro kuser_pad, sym, size
++ .if (. - \sym) & 3
++ .rept 4 - (. - \sym) & 3
++ .byte 0
++ .endr
++ .endif
++ .rept (\size - (. - \sym)) / 4
++ .word 0xe7fddef1
++ .endr
++ .endm
++
+ .align 5
+ .globl __kuser_helper_start
+ __kuser_helper_start:
+@@ -831,18 +842,13 @@ kuser_cmpxchg64_fixup:
+ #error "incoherent kernel configuration"
+ #endif
+
+- /* pad to next slot */
+- .rept (16 - (. - __kuser_cmpxchg64)/4)
+- .word 0
+- .endr
+-
+- .align 5
++ kuser_pad __kuser_cmpxchg64, 64
+
+ __kuser_memory_barrier: @ 0xffff0fa0
+ smp_dmb arm
+ usr_ret lr
+
+- .align 5
++ kuser_pad __kuser_memory_barrier, 32
+
+ __kuser_cmpxchg: @ 0xffff0fc0
+
+@@ -915,13 +921,14 @@ kuser_cmpxchg32_fixup:
+
+ #endif
+
+- .align 5
++ kuser_pad __kuser_cmpxchg, 32
+
+ __kuser_get_tls: @ 0xffff0fe0
+ ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init
+ usr_ret lr
+ mrc p15, 0, r0, c13, c0, 3 @ 0xffff0fe8 hardware TLS code
+- .rep 4
++ kuser_pad __kuser_get_tls, 16
++ .rep 3
+ .word 0 @ 0xffff0ff0 software TLS value, then
+ .endr @ pad up to __kuser_helper_version
+
--- /dev/null
+From f928d4f2a86f46b030fa0850385b4391fc2b5918 Mon Sep 17 00:00:00 2001
+From: Russell King <rmk+kernel@arm.linux.org.uk>
+Date: Thu, 4 Jul 2013 11:00:23 +0100
+Subject: ARM: poison the vectors page
+
+From: Russell King <rmk+kernel@arm.linux.org.uk>
+
+commit f928d4f2a86f46b030fa0850385b4391fc2b5918 upstream.
+
+Fill the empty regions of the vectors page with an exception generating
+instruction. This ensures that any inappropriate branch to the vector
+page is appropriately trapped, rather than just encountering some code
+to execute. (The vectors page was filled with zero before, which
+corresponds with the "andeq r0, r0, r0" instruction - a no-op.)
+
+Acked-by Nicolas Pitre <nico@linaro.org>
+Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/kernel/traps.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+--- a/arch/arm/kernel/traps.c
++++ b/arch/arm/kernel/traps.c
+@@ -817,10 +817,20 @@ void __init early_trap_init(void *vector
+ extern char __vectors_start[], __vectors_end[];
+ extern char __kuser_helper_start[], __kuser_helper_end[];
+ int kuser_sz = __kuser_helper_end - __kuser_helper_start;
++ unsigned i;
+
+ vectors_page = vectors_base;
+
+ /*
++ * Poison the vectors page with an undefined instruction. This
++ * instruction is chosen to be undefined for both ARM and Thumb
++ * ISAs. The Thumb version is an undefined instruction with a
++ * branch back to the undefined instruction.
++ */
++ for (i = 0; i < PAGE_SIZE / sizeof(u32); i++)
++ ((u32 *)vectors_base)[i] = 0xe7fddef1;
++
++ /*
+ * Copy the vectors, stubs and kuser helpers (in entry-armv.S)
+ * into the vector page, mapped at 0xffff0000, and ensure these
+ * are visible to the instruction stream.
--- /dev/null
+From e39e3f3ebfef03450cf7bfa7a974a8c61f7980c8 Mon Sep 17 00:00:00 2001
+From: Russell King <rmk+kernel@arm.linux.org.uk>
+Date: Tue, 9 Jul 2013 01:03:17 +0100
+Subject: ARM: update FIQ support for relocation of vectors
+
+From: Russell King <rmk+kernel@arm.linux.org.uk>
+
+commit e39e3f3ebfef03450cf7bfa7a974a8c61f7980c8 upstream.
+
+FIQ should no longer copy the FIQ code into the user visible vector
+page. Instead, it should use the hidden page. This change makes
+that happen.
+
+Acked-by: Nicolas Pitre <nico@linaro.org>
+Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/kernel/entry-armv.S | 3 +++
+ arch/arm/kernel/fiq.c | 19 ++++++++++++++-----
+ 2 files changed, 17 insertions(+), 5 deletions(-)
+
+--- a/arch/arm/kernel/entry-armv.S
++++ b/arch/arm/kernel/entry-armv.S
+@@ -1119,6 +1119,9 @@ vector_addrexcptn:
+ vector_fiq:
+ subs pc, lr, #4
+
++ .globl vector_fiq_offset
++ .equ vector_fiq_offset, vector_fiq
++
+ .section .vectors, "ax", %progbits
+ __vectors_start:
+ W(b) vector_rst
+--- a/arch/arm/kernel/fiq.c
++++ b/arch/arm/kernel/fiq.c
+@@ -47,6 +47,11 @@
+ #include <asm/irq.h>
+ #include <asm/traps.h>
+
++#define FIQ_OFFSET ({ \
++ extern void *vector_fiq_offset; \
++ (unsigned)&vector_fiq_offset; \
++ })
++
+ static unsigned long no_fiq_insn;
+
+ /* Default reacquire function
+@@ -80,13 +85,16 @@ int show_fiq_list(struct seq_file *p, in
+ void set_fiq_handler(void *start, unsigned int length)
+ {
+ #if defined(CONFIG_CPU_USE_DOMAINS)
+- memcpy((void *)0xffff001c, start, length);
++ void *base = (void *)0xffff0000;
+ #else
+- memcpy(vectors_page + 0x1c, start, length);
++ void *base = vectors_page;
+ #endif
+- flush_icache_range(0xffff001c, 0xffff001c + length);
++ unsigned offset = FIQ_OFFSET;
++
++ memcpy(base + offset, start, length);
++ flush_icache_range(0xffff0000 + offset, 0xffff0000 + offset + length);
+ if (!vectors_high())
+- flush_icache_range(0x1c, 0x1c + length);
++ flush_icache_range(offset, offset + length);
+ }
+
+ int claim_fiq(struct fiq_handler *f)
+@@ -144,6 +152,7 @@ EXPORT_SYMBOL(disable_fiq);
+
+ void __init init_FIQ(int start)
+ {
+- no_fiq_insn = *(unsigned long *)0xffff001c;
++ unsigned offset = FIQ_OFFSET;
++ no_fiq_insn = *(unsigned long *)(0xffff0000 + offset);
+ fiq_start = start;
+ }
--- /dev/null
+From b9b32bf70f2fb710b07c94e13afbc729afe221da Mon Sep 17 00:00:00 2001
+From: Russell King <rmk+kernel@arm.linux.org.uk>
+Date: Thu, 4 Jul 2013 12:03:31 +0100
+Subject: ARM: use linker magic for vectors and vector stubs
+
+From: Russell King <rmk+kernel@arm.linux.org.uk>
+
+commit b9b32bf70f2fb710b07c94e13afbc729afe221da upstream.
+
+Use linker magic to create the vectors and vector stubs: we can tell the
+linker to place them at an appropriate VMA, but keep the LMA within the
+kernel. This gets rid of some unnecessary symbol manipulation, and
+have the linker calculate the relocations appropriately.
+
+Acked-by: Nicolas Pitre <nico@linaro.org>
+Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/kernel/entry-armv.S | 28 ++++++++++------------------
+ arch/arm/kernel/vmlinux.lds.S | 17 +++++++++++++++++
+ 2 files changed, 27 insertions(+), 18 deletions(-)
+
+--- a/arch/arm/kernel/entry-armv.S
++++ b/arch/arm/kernel/entry-armv.S
+@@ -992,7 +992,7 @@ ENDPROC(vector_\name)
+ 1:
+ .endm
+
+- .globl __stubs_start
++ .section .stubs, "ax", %progbits
+ __stubs_start:
+ @ This must be the first word
+ .word vector_swi
+@@ -1119,24 +1119,16 @@ vector_addrexcptn:
+ vector_fiq:
+ subs pc, lr, #4
+
+- .globl __stubs_end
+-__stubs_end:
+-
+- .equ stubs_offset, __vectors_start + 0x1000 - __stubs_start
+-
+- .globl __vectors_start
++ .section .vectors, "ax", %progbits
+ __vectors_start:
+- W(b) vector_rst + stubs_offset
+- W(b) vector_und + stubs_offset
+- W(ldr) pc, .LCvswi + stubs_offset
+- W(b) vector_pabt + stubs_offset
+- W(b) vector_dabt + stubs_offset
+- W(b) vector_addrexcptn + stubs_offset
+- W(b) vector_irq + stubs_offset
+- W(b) vector_fiq + stubs_offset
+-
+- .globl __vectors_end
+-__vectors_end:
++ W(b) vector_rst
++ W(b) vector_und
++ W(ldr) pc, __vectors_start + 0x1000
++ W(b) vector_pabt
++ W(b) vector_dabt
++ W(b) vector_addrexcptn
++ W(b) vector_irq
++ W(b) vector_fiq
+
+ .data
+
+--- a/arch/arm/kernel/vmlinux.lds.S
++++ b/arch/arm/kernel/vmlinux.lds.S
+@@ -152,6 +152,23 @@ SECTIONS
+ . = ALIGN(PAGE_SIZE);
+ __init_begin = .;
+ #endif
++ /*
++ * The vectors and stubs are relocatable code, and the
++ * only thing that matters is their relative offsets
++ */
++ __vectors_start = .;
++ .vectors 0 : AT(__vectors_start) {
++ *(.vectors)
++ }
++ . = __vectors_start + SIZEOF(.vectors);
++ __vectors_end = .;
++
++ __stubs_start = .;
++ .stubs 0x1000 : AT(__stubs_start) {
++ *(.stubs)
++ }
++ . = __stubs_start + SIZEOF(.stubs);
++ __stubs_end = .;
+
+ INIT_TEXT_SECTION(8)
+ .exit.text : {
--- /dev/null
+arm-poison-the-vectors-page.patch
+arm-poison-memory-between-kuser-helpers.patch
+arm-move-vector-stubs.patch
+arm-use-linker-magic-for-vectors-and-vector-stubs.patch
+arm-update-fiq-support-for-relocation-of-vectors.patch
+arm-allow-kuser-helpers-to-be-removed-from-the-vector-page.patch
+arm-move-signal-handlers-into-a-vdso-like-page.patch
+arm-make-vectors-page-inaccessible-from-userspace.patch
+arm-fix-a-cockup-in-48be69a02-arm-move-signal-handlers-into-a-vdso-like-page.patch
+arm-fix-nommu-builds-with-48be69a02-arm-move-signal-handlers-into-a-vdso-like-page.patch