From: Greg Kroah-Hartman Date: Sun, 4 Aug 2013 10:29:15 +0000 (+0800) Subject: 3.4-stable patches X-Git-Tag: v3.0.90~31 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=b5ac3d4354f65b5dc98d3d9db9bcab6bc107a13a;p=thirdparty%2Fkernel%2Fstable-queue.git 3.4-stable patches added patches: arm-allow-kuser-helpers-to-be-removed-from-the-vector-page.patch arm-make-vectors-page-inaccessible-from-userspace.patch arm-move-vector-stubs.patch arm-poison-memory-between-kuser-helpers.patch arm-poison-the-vectors-page.patch arm-update-fiq-support-for-relocation-of-vectors.patch arm-use-linker-magic-for-vectors-and-vector-stubs.patch --- diff --git a/queue-3.4/arm-allow-kuser-helpers-to-be-removed-from-the-vector-page.patch b/queue-3.4/arm-allow-kuser-helpers-to-be-removed-from-the-vector-page.patch new file mode 100644 index 00000000000..be376f9c072 --- /dev/null +++ b/queue-3.4/arm-allow-kuser-helpers-to-be-removed-from-the-vector-page.patch @@ -0,0 +1,178 @@ +From f6f91b0d9fd971c630cef908dde8fe8795aefbf8 Mon Sep 17 00:00:00 2001 +From: Russell King +Date: Tue, 23 Jul 2013 18:37:00 +0100 +Subject: ARM: allow kuser helpers to be removed from the vector page + +From: Russell King + +commit f6f91b0d9fd971c630cef908dde8fe8795aefbf8 upstream. + +Provide a kernel configuration option to allow the kernel user helpers +to be removed from the vector page, thereby preventing their use with +ROP (return orientated programming) attacks. This option is only +visible for CPU architectures which natively support all the operations +which kernel user helpers would normally provide, and must be enabled +with caution. + +Acked-by: Nicolas Pitre +Signed-off-by: Russell King +Signed-off-by: Greg Kroah-Hartman + +--- + arch/arm/kernel/entry-armv.S | 3 +++ + arch/arm/kernel/traps.c | 23 ++++++++++++++--------- + arch/arm/mm/Kconfig | 34 ++++++++++++++++++++++++++++++++++ + 3 files changed, 51 insertions(+), 9 deletions(-) + +--- a/arch/arm/kernel/entry-armv.S ++++ b/arch/arm/kernel/entry-armv.S +@@ -798,6 +798,7 @@ ENDPROC(__switch_to) + .endr + .endm + ++#ifdef CONFIG_KUSER_HELPERS + .align 5 + .globl __kuser_helper_start + __kuser_helper_start: +@@ -984,6 +985,8 @@ __kuser_helper_version: @ 0xffff0ffc + .globl __kuser_helper_end + __kuser_helper_end: + ++#endif ++ + THUMB( .thumb ) + + /* +--- a/arch/arm/kernel/traps.c ++++ b/arch/arm/kernel/traps.c +@@ -776,23 +776,32 @@ void __init trap_init(void) + return; + } + +-static void __init kuser_get_tls_init(unsigned long vectors) ++#ifdef CONFIG_KUSER_HELPERS ++static void __init kuser_init(void *vectors) + { ++ extern char __kuser_helper_start[], __kuser_helper_end[]; ++ int kuser_sz = __kuser_helper_end - __kuser_helper_start; ++ ++ memcpy(vectors + 0x1000 - kuser_sz, __kuser_helper_start, kuser_sz); ++ + /* + * vectors + 0xfe0 = __kuser_get_tls + * vectors + 0xfe8 = hardware TLS instruction at 0xffff0fe8 + */ + if (tls_emu || has_tls_reg) +- memcpy((void *)vectors + 0xfe0, (void *)vectors + 0xfe8, 4); ++ memcpy(vectors + 0xfe0, vectors + 0xfe8, 4); ++} ++#else ++static void __init kuser_init(void *vectors) ++{ + } ++#endif + + void __init early_trap_init(void *vectors_base) + { + unsigned long vectors = (unsigned long)vectors_base; + extern char __stubs_start[], __stubs_end[]; + extern char __vectors_start[], __vectors_end[]; +- extern char __kuser_helper_start[], __kuser_helper_end[]; +- int kuser_sz = __kuser_helper_end - __kuser_helper_start; + unsigned i; + + vectors_page = vectors_base; +@@ -813,12 +822,8 @@ void __init early_trap_init(void *vector + */ + memcpy((void *)vectors, __vectors_start, __vectors_end - __vectors_start); + memcpy((void *)vectors + 0x1000, __stubs_start, __stubs_end - __stubs_start); +- memcpy((void *)vectors + 0x1000 - kuser_sz, __kuser_helper_start, kuser_sz); + +- /* +- * Do processor specific fixups for the kuser helpers +- */ +- kuser_get_tls_init(vectors); ++ kuser_init(vectors_base); + + /* + * Copy signal return handlers into the vector page, and +--- a/arch/arm/mm/Kconfig ++++ b/arch/arm/mm/Kconfig +@@ -436,24 +436,28 @@ config CPU_32v3 + select TLS_REG_EMUL if SMP || !MMU + select NEEDS_SYSCALL_FOR_CMPXCHG if SMP + select CPU_USE_DOMAINS if MMU ++ select NEED_KUSER_HELPERS + + config CPU_32v4 + bool + select TLS_REG_EMUL if SMP || !MMU + select NEEDS_SYSCALL_FOR_CMPXCHG if SMP + select CPU_USE_DOMAINS if MMU ++ select NEED_KUSER_HELPERS + + config CPU_32v4T + bool + select TLS_REG_EMUL if SMP || !MMU + select NEEDS_SYSCALL_FOR_CMPXCHG if SMP + select CPU_USE_DOMAINS if MMU ++ select NEED_KUSER_HELPERS + + config CPU_32v5 + bool + select TLS_REG_EMUL if SMP || !MMU + select NEEDS_SYSCALL_FOR_CMPXCHG if SMP + select CPU_USE_DOMAINS if MMU ++ select NEED_KUSER_HELPERS + + config CPU_32v6 + bool +@@ -779,6 +783,7 @@ config CPU_BPREDICT_DISABLE + + config TLS_REG_EMUL + bool ++ select NEED_KUSER_HELPERS + help + An SMP system using a pre-ARMv6 processor (there are apparently + a few prototypes like that in existence) and therefore access to +@@ -786,11 +791,40 @@ config TLS_REG_EMUL + + config NEEDS_SYSCALL_FOR_CMPXCHG + bool ++ select NEED_KUSER_HELPERS + help + SMP on a pre-ARMv6 processor? Well OK then. + Forget about fast user space cmpxchg support. + It is just not possible. + ++config NEED_KUSER_HELPERS ++ bool ++ ++config KUSER_HELPERS ++ bool "Enable kuser helpers in vector page" if !NEED_KUSER_HELPERS ++ default y ++ help ++ Warning: disabling this option may break user programs. ++ ++ Provide kuser helpers in the vector page. The kernel provides ++ helper code to userspace in read only form at a fixed location ++ in the high vector page to allow userspace to be independent of ++ the CPU type fitted to the system. This permits binaries to be ++ run on ARMv4 through to ARMv7 without modification. ++ ++ However, the fixed address nature of these helpers can be used ++ by ROP (return orientated programming) authors when creating ++ exploits. ++ ++ If all of the binaries and libraries which run on your platform ++ are built specifically for your platform, and make no use of ++ these helpers, then you can turn this option off. However, ++ when such an binary or library is run, it will receive a SIGILL ++ signal, which will terminate the program. ++ ++ Say N here only if you are absolutely certain that you do not ++ need these helpers; otherwise, the safe option is to say Y. ++ + config DMA_CACHE_RWFO + bool "Enable read/write for ownership DMA cache maintenance" + depends on CPU_V6K && SMP diff --git a/queue-3.4/arm-make-vectors-page-inaccessible-from-userspace.patch b/queue-3.4/arm-make-vectors-page-inaccessible-from-userspace.patch new file mode 100644 index 00000000000..4942c366b61 --- /dev/null +++ b/queue-3.4/arm-make-vectors-page-inaccessible-from-userspace.patch @@ -0,0 +1,73 @@ +From a5463cd3435475386cbbe7b06e01292ac169d36f Mon Sep 17 00:00:00 2001 +From: Russell King +Date: Wed, 31 Jul 2013 21:58:56 +0100 +Subject: ARM: make vectors page inaccessible from userspace + +From: Russell King + +commit a5463cd3435475386cbbe7b06e01292ac169d36f upstream. + +If kuser helpers are not provided by the kernel, disable user access to +the vectors page. With the kuser helpers gone, there is no reason for +this page to be visible to userspace. + +Signed-off-by: Russell King +Signed-off-by: Greg Kroah-Hartman + +--- + arch/arm/include/asm/page.h | 2 ++ + arch/arm/kernel/process.c | 7 ++++++- + arch/arm/mm/mmu.c | 4 ++++ + 3 files changed, 12 insertions(+), 1 deletion(-) + +--- a/arch/arm/include/asm/page.h ++++ b/arch/arm/include/asm/page.h +@@ -151,7 +151,9 @@ extern void __cpu_copy_user_highpage(str + #define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) + extern void copy_page(void *to, const void *from); + ++#ifdef CONFIG_KUSER_HELPERS + #define __HAVE_ARCH_GATE_AREA 1 ++#endif + + #ifdef CONFIG_ARM_LPAE + #include +--- a/arch/arm/kernel/process.c ++++ b/arch/arm/kernel/process.c +@@ -528,6 +528,7 @@ unsigned long arch_randomize_brk(struct + } + + #ifdef CONFIG_MMU ++#ifdef CONFIG_KUSER_HELPERS + /* + * The vectors page is always readable from user space for the + * atomic helpers and the signal restart code. Insert it into the +@@ -560,9 +561,13 @@ int in_gate_area_no_mm(unsigned long add + { + return in_gate_area(NULL, addr); + } ++#define is_gate_vma(vma) ((vma) = &gate_vma) ++#else ++#define is_gate_vma(vma) 0 ++#endif + + const char *arch_vma_name(struct vm_area_struct *vma) + { +- return (vma == &gate_vma) ? "[vectors]" : NULL; ++ return is_gate_vma(vma) ? "[vectors]" : NULL; + } + #endif +--- a/arch/arm/mm/mmu.c ++++ b/arch/arm/mm/mmu.c +@@ -1123,7 +1123,11 @@ static void __init devicemaps_init(struc + map.pfn = __phys_to_pfn(virt_to_phys(vectors)); + map.virtual = 0xffff0000; + map.length = PAGE_SIZE; ++#ifdef CONFIG_KUSER_HELPERS + map.type = MT_HIGH_VECTORS; ++#else ++ map.type = MT_LOW_VECTORS; ++#endif + create_mapping(&map); + + if (!vectors_high()) { diff --git a/queue-3.4/arm-move-vector-stubs.patch b/queue-3.4/arm-move-vector-stubs.patch new file mode 100644 index 00000000000..11e3c759635 --- /dev/null +++ b/queue-3.4/arm-move-vector-stubs.patch @@ -0,0 +1,176 @@ +From 19accfd373847ac3d10623c5d20f948846299741 Mon Sep 17 00:00:00 2001 +From: Russell King +Date: Thu, 4 Jul 2013 11:40:32 +0100 +Subject: ARM: move vector stubs + +From: Russell King + +commit 19accfd373847ac3d10623c5d20f948846299741 upstream. + +Move the machine vector stubs into the page above the vector page, +which we can prevent from being visible to userspace. Also move +the reset stub, and place the swi vector at a location that the +'ldr' can get to it. + +This hides pointers into the kernel which could give valuable +information to attackers, and reduces the number of exploitable +instructions at a fixed address. + +Acked-by: Nicolas Pitre +Signed-off-by: Russell King +Signed-off-by: Greg Kroah-Hartman + +--- + arch/arm/Kconfig | 3 +- + arch/arm/kernel/entry-armv.S | 50 ++++++++++++++++++++----------------------- + arch/arm/kernel/traps.c | 4 +-- + arch/arm/mm/mmu.c | 10 +++++++- + 4 files changed, 37 insertions(+), 30 deletions(-) + +--- a/arch/arm/Kconfig ++++ b/arch/arm/Kconfig +@@ -200,7 +200,8 @@ config VECTORS_BASE + default DRAM_BASE if REMAP_VECTORS_TO_RAM + default 0x00000000 + help +- The base address of exception vectors. ++ The base address of exception vectors. This must be two pages ++ in size. + + config ARM_PATCH_PHYS_VIRT + bool "Patch physical to virtual translations at runtime" if EMBEDDED +--- a/arch/arm/kernel/entry-armv.S ++++ b/arch/arm/kernel/entry-armv.S +@@ -989,9 +989,9 @@ __kuser_helper_end: + /* + * Vector stubs. + * +- * This code is copied to 0xffff0200 so we can use branches in the +- * vectors, rather than ldr's. Note that this code must not +- * exceed 0x300 bytes. ++ * This code is copied to 0xffff1000 so we can use branches in the ++ * vectors, rather than ldr's. Note that this code must not exceed ++ * a page size. + * + * Common stub entry macro: + * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC +@@ -1040,6 +1040,15 @@ ENDPROC(vector_\name) + + .globl __stubs_start + __stubs_start: ++ @ This must be the first word ++ .word vector_swi ++ ++vector_rst: ++ ARM( swi SYS_ERROR0 ) ++ THUMB( svc #0 ) ++ THUMB( nop ) ++ b vector_und ++ + /* + * Interrupt dispatcher + */ +@@ -1134,6 +1143,16 @@ __stubs_start: + .align 5 + + /*============================================================================= ++ * Address exception handler ++ *----------------------------------------------------------------------------- ++ * These aren't too critical. ++ * (they're not supposed to happen, and won't happen in 32-bit data mode). ++ */ ++ ++vector_addrexcptn: ++ b vector_addrexcptn ++ ++/*============================================================================= + * Undefined FIQs + *----------------------------------------------------------------------------- + * Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC +@@ -1146,35 +1165,14 @@ __stubs_start: + vector_fiq: + subs pc, lr, #4 + +-/*============================================================================= +- * Address exception handler +- *----------------------------------------------------------------------------- +- * These aren't too critical. +- * (they're not supposed to happen, and won't happen in 32-bit data mode). +- */ +- +-vector_addrexcptn: +- b vector_addrexcptn +- +-/* +- * We group all the following data together to optimise +- * for CPUs with separate I & D caches. +- */ +- .align 5 +- +-.LCvswi: +- .word vector_swi +- + .globl __stubs_end + __stubs_end: + +- .equ stubs_offset, __vectors_start + 0x200 - __stubs_start ++ .equ stubs_offset, __vectors_start + 0x1000 - __stubs_start + + .globl __vectors_start + __vectors_start: +- ARM( swi SYS_ERROR0 ) +- THUMB( svc #0 ) +- THUMB( nop ) ++ W(b) vector_rst + stubs_offset + W(b) vector_und + stubs_offset + W(ldr) pc, .LCvswi + stubs_offset + W(b) vector_pabt + stubs_offset +--- a/arch/arm/kernel/traps.c ++++ b/arch/arm/kernel/traps.c +@@ -812,7 +812,7 @@ void __init early_trap_init(void *vector + * are visible to the instruction stream. + */ + memcpy((void *)vectors, __vectors_start, __vectors_end - __vectors_start); +- memcpy((void *)vectors + 0x200, __stubs_start, __stubs_end - __stubs_start); ++ memcpy((void *)vectors + 0x1000, __stubs_start, __stubs_end - __stubs_start); + memcpy((void *)vectors + 0x1000 - kuser_sz, __kuser_helper_start, kuser_sz); + + /* +@@ -829,6 +829,6 @@ void __init early_trap_init(void *vector + memcpy((void *)(vectors + KERN_RESTART_CODE - CONFIG_VECTORS_BASE), + syscall_restart_code, sizeof(syscall_restart_code)); + +- flush_icache_range(vectors, vectors + PAGE_SIZE); ++ flush_icache_range(vectors, vectors + PAGE_SIZE * 2); + modify_domain(DOMAIN_USER, DOMAIN_CLIENT); + } +--- a/arch/arm/mm/mmu.c ++++ b/arch/arm/mm/mmu.c +@@ -1078,7 +1078,7 @@ static void __init devicemaps_init(struc + /* + * Allocate the vector page early. + */ +- vectors = early_alloc(PAGE_SIZE); ++ vectors = early_alloc(PAGE_SIZE * 2); + + early_trap_init(vectors); + +@@ -1128,10 +1128,18 @@ static void __init devicemaps_init(struc + + if (!vectors_high()) { + map.virtual = 0; ++ map.length = PAGE_SIZE * 2; + map.type = MT_LOW_VECTORS; + create_mapping(&map); + } + ++ /* Now create a kernel read-only mapping */ ++ map.pfn += 1; ++ map.virtual = 0xffff0000 + PAGE_SIZE; ++ map.length = PAGE_SIZE; ++ map.type = MT_LOW_VECTORS; ++ create_mapping(&map); ++ + /* + * Ask the machine support to map in the statically mapped devices. + */ diff --git a/queue-3.4/arm-poison-memory-between-kuser-helpers.patch b/queue-3.4/arm-poison-memory-between-kuser-helpers.patch new file mode 100644 index 00000000000..332de2c948b --- /dev/null +++ b/queue-3.4/arm-poison-memory-between-kuser-helpers.patch @@ -0,0 +1,78 @@ +From 5b43e7a383d69381ffe53423e46dd0fafae07da3 Mon Sep 17 00:00:00 2001 +From: Russell King +Date: Thu, 4 Jul 2013 11:32:04 +0100 +Subject: ARM: poison memory between kuser helpers + +From: Russell King + +commit 5b43e7a383d69381ffe53423e46dd0fafae07da3 upstream. + +Poison the memory between each kuser helper. This ensures that any +branch between the kuser helpers will be appropriately trapped. + +Acked-by: Nicolas Pitre +Signed-off-by: Russell King +Signed-off-by: Greg Kroah-Hartman + +--- + arch/arm/kernel/entry-armv.S | 25 ++++++++++++++++--------- + 1 file changed, 16 insertions(+), 9 deletions(-) + +--- a/arch/arm/kernel/entry-armv.S ++++ b/arch/arm/kernel/entry-armv.S +@@ -787,6 +787,17 @@ ENDPROC(__switch_to) + #endif + .endm + ++ .macro kuser_pad, sym, size ++ .if (. - \sym) & 3 ++ .rept 4 - (. - \sym) & 3 ++ .byte 0 ++ .endr ++ .endif ++ .rept (\size - (. - \sym)) / 4 ++ .word 0xe7fddef1 ++ .endr ++ .endm ++ + .align 5 + .globl __kuser_helper_start + __kuser_helper_start: +@@ -877,18 +888,13 @@ kuser_cmpxchg64_fixup: + #error "incoherent kernel configuration" + #endif + +- /* pad to next slot */ +- .rept (16 - (. - __kuser_cmpxchg64)/4) +- .word 0 +- .endr +- +- .align 5 ++ kuser_pad __kuser_cmpxchg64, 64 + + __kuser_memory_barrier: @ 0xffff0fa0 + smp_dmb arm + usr_ret lr + +- .align 5 ++ kuser_pad __kuser_memory_barrier, 32 + + __kuser_cmpxchg: @ 0xffff0fc0 + +@@ -961,13 +967,14 @@ kuser_cmpxchg32_fixup: + + #endif + +- .align 5 ++ kuser_pad __kuser_cmpxchg, 32 + + __kuser_get_tls: @ 0xffff0fe0 + ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init + usr_ret lr + mrc p15, 0, r0, c13, c0, 3 @ 0xffff0fe8 hardware TLS code +- .rep 4 ++ kuser_pad __kuser_get_tls, 16 ++ .rep 3 + .word 0 @ 0xffff0ff0 software TLS value, then + .endr @ pad up to __kuser_helper_version + diff --git a/queue-3.4/arm-poison-the-vectors-page.patch b/queue-3.4/arm-poison-the-vectors-page.patch new file mode 100644 index 00000000000..f412857043b --- /dev/null +++ b/queue-3.4/arm-poison-the-vectors-page.patch @@ -0,0 +1,46 @@ +From f928d4f2a86f46b030fa0850385b4391fc2b5918 Mon Sep 17 00:00:00 2001 +From: Russell King +Date: Thu, 4 Jul 2013 11:00:23 +0100 +Subject: ARM: poison the vectors page + +From: Russell King + +commit f928d4f2a86f46b030fa0850385b4391fc2b5918 upstream. + +Fill the empty regions of the vectors page with an exception generating +instruction. This ensures that any inappropriate branch to the vector +page is appropriately trapped, rather than just encountering some code +to execute. (The vectors page was filled with zero before, which +corresponds with the "andeq r0, r0, r0" instruction - a no-op.) + +Acked-by Nicolas Pitre +Signed-off-by: Russell King +Signed-off-by: Greg Kroah-Hartman + +--- + arch/arm/kernel/traps.c | 10 ++++++++++ + 1 file changed, 10 insertions(+) + +--- a/arch/arm/kernel/traps.c ++++ b/arch/arm/kernel/traps.c +@@ -793,10 +793,20 @@ void __init early_trap_init(void *vector + extern char __vectors_start[], __vectors_end[]; + extern char __kuser_helper_start[], __kuser_helper_end[]; + int kuser_sz = __kuser_helper_end - __kuser_helper_start; ++ unsigned i; + + vectors_page = vectors_base; + + /* ++ * Poison the vectors page with an undefined instruction. This ++ * instruction is chosen to be undefined for both ARM and Thumb ++ * ISAs. The Thumb version is an undefined instruction with a ++ * branch back to the undefined instruction. ++ */ ++ for (i = 0; i < PAGE_SIZE / sizeof(u32); i++) ++ ((u32 *)vectors_base)[i] = 0xe7fddef1; ++ ++ /* + * Copy the vectors, stubs and kuser helpers (in entry-armv.S) + * into the vector page, mapped at 0xffff0000, and ensure these + * are visible to the instruction stream. diff --git a/queue-3.4/arm-update-fiq-support-for-relocation-of-vectors.patch b/queue-3.4/arm-update-fiq-support-for-relocation-of-vectors.patch new file mode 100644 index 00000000000..af711d7ebaa --- /dev/null +++ b/queue-3.4/arm-update-fiq-support-for-relocation-of-vectors.patch @@ -0,0 +1,77 @@ +From e39e3f3ebfef03450cf7bfa7a974a8c61f7980c8 Mon Sep 17 00:00:00 2001 +From: Russell King +Date: Tue, 9 Jul 2013 01:03:17 +0100 +Subject: ARM: update FIQ support for relocation of vectors + +From: Russell King + +commit e39e3f3ebfef03450cf7bfa7a974a8c61f7980c8 upstream. + +FIQ should no longer copy the FIQ code into the user visible vector +page. Instead, it should use the hidden page. This change makes +that happen. + +Acked-by: Nicolas Pitre +Signed-off-by: Russell King +Signed-off-by: Greg Kroah-Hartman + +--- + arch/arm/kernel/entry-armv.S | 3 +++ + arch/arm/kernel/fiq.c | 19 ++++++++++++++----- + 2 files changed, 17 insertions(+), 5 deletions(-) + +--- a/arch/arm/kernel/entry-armv.S ++++ b/arch/arm/kernel/entry-armv.S +@@ -1165,6 +1165,9 @@ vector_addrexcptn: + vector_fiq: + subs pc, lr, #4 + ++ .globl vector_fiq_offset ++ .equ vector_fiq_offset, vector_fiq ++ + .section .vectors, "ax", %progbits + __vectors_start: + W(b) vector_rst +--- a/arch/arm/kernel/fiq.c ++++ b/arch/arm/kernel/fiq.c +@@ -47,6 +47,11 @@ + #include + #include + ++#define FIQ_OFFSET ({ \ ++ extern void *vector_fiq_offset; \ ++ (unsigned)&vector_fiq_offset; \ ++ }) ++ + static unsigned long no_fiq_insn; + + /* Default reacquire function +@@ -80,13 +85,16 @@ int show_fiq_list(struct seq_file *p, in + void set_fiq_handler(void *start, unsigned int length) + { + #if defined(CONFIG_CPU_USE_DOMAINS) +- memcpy((void *)0xffff001c, start, length); ++ void *base = (void *)0xffff0000; + #else +- memcpy(vectors_page + 0x1c, start, length); ++ void *base = vectors_page; + #endif +- flush_icache_range(0xffff001c, 0xffff001c + length); ++ unsigned offset = FIQ_OFFSET; ++ ++ memcpy(base + offset, start, length); ++ flush_icache_range(0xffff0000 + offset, 0xffff0000 + offset + length); + if (!vectors_high()) +- flush_icache_range(0x1c, 0x1c + length); ++ flush_icache_range(offset, offset + length); + } + + int claim_fiq(struct fiq_handler *f) +@@ -142,5 +150,6 @@ EXPORT_SYMBOL(disable_fiq); + + void __init init_FIQ(void) + { +- no_fiq_insn = *(unsigned long *)0xffff001c; ++ unsigned offset = FIQ_OFFSET; ++ no_fiq_insn = *(unsigned long *)(0xffff0000 + offset); + } diff --git a/queue-3.4/arm-use-linker-magic-for-vectors-and-vector-stubs.patch b/queue-3.4/arm-use-linker-magic-for-vectors-and-vector-stubs.patch new file mode 100644 index 00000000000..ecf86d6bff3 --- /dev/null +++ b/queue-3.4/arm-use-linker-magic-for-vectors-and-vector-stubs.patch @@ -0,0 +1,94 @@ +From b9b32bf70f2fb710b07c94e13afbc729afe221da Mon Sep 17 00:00:00 2001 +From: Russell King +Date: Thu, 4 Jul 2013 12:03:31 +0100 +Subject: ARM: use linker magic for vectors and vector stubs + +From: Russell King + +commit b9b32bf70f2fb710b07c94e13afbc729afe221da upstream. + +Use linker magic to create the vectors and vector stubs: we can tell the +linker to place them at an appropriate VMA, but keep the LMA within the +kernel. This gets rid of some unnecessary symbol manipulation, and +have the linker calculate the relocations appropriately. + +Acked-by: Nicolas Pitre +Signed-off-by: Russell King +Signed-off-by: Greg Kroah-Hartman + +--- + arch/arm/kernel/entry-armv.S | 28 ++++++++++------------------ + arch/arm/kernel/vmlinux.lds.S | 17 +++++++++++++++++ + 2 files changed, 27 insertions(+), 18 deletions(-) + +--- a/arch/arm/kernel/entry-armv.S ++++ b/arch/arm/kernel/entry-armv.S +@@ -1038,7 +1038,7 @@ ENDPROC(vector_\name) + 1: + .endm + +- .globl __stubs_start ++ .section .stubs, "ax", %progbits + __stubs_start: + @ This must be the first word + .word vector_swi +@@ -1165,24 +1165,16 @@ vector_addrexcptn: + vector_fiq: + subs pc, lr, #4 + +- .globl __stubs_end +-__stubs_end: +- +- .equ stubs_offset, __vectors_start + 0x1000 - __stubs_start +- +- .globl __vectors_start ++ .section .vectors, "ax", %progbits + __vectors_start: +- W(b) vector_rst + stubs_offset +- W(b) vector_und + stubs_offset +- W(ldr) pc, .LCvswi + stubs_offset +- W(b) vector_pabt + stubs_offset +- W(b) vector_dabt + stubs_offset +- W(b) vector_addrexcptn + stubs_offset +- W(b) vector_irq + stubs_offset +- W(b) vector_fiq + stubs_offset +- +- .globl __vectors_end +-__vectors_end: ++ W(b) vector_rst ++ W(b) vector_und ++ W(ldr) pc, __vectors_start + 0x1000 ++ W(b) vector_pabt ++ W(b) vector_dabt ++ W(b) vector_addrexcptn ++ W(b) vector_irq ++ W(b) vector_fiq + + .data + +--- a/arch/arm/kernel/vmlinux.lds.S ++++ b/arch/arm/kernel/vmlinux.lds.S +@@ -137,6 +137,23 @@ SECTIONS + . = ALIGN(PAGE_SIZE); + __init_begin = .; + #endif ++ /* ++ * The vectors and stubs are relocatable code, and the ++ * only thing that matters is their relative offsets ++ */ ++ __vectors_start = .; ++ .vectors 0 : AT(__vectors_start) { ++ *(.vectors) ++ } ++ . = __vectors_start + SIZEOF(.vectors); ++ __vectors_end = .; ++ ++ __stubs_start = .; ++ .stubs 0x1000 : AT(__stubs_start) { ++ *(.stubs) ++ } ++ . = __stubs_start + SIZEOF(.stubs); ++ __stubs_end = .; + + INIT_TEXT_SECTION(8) + .exit.text : { diff --git a/queue-3.4/series b/queue-3.4/series new file mode 100644 index 00000000000..2cf6f8ef7ad --- /dev/null +++ b/queue-3.4/series @@ -0,0 +1,7 @@ +arm-poison-the-vectors-page.patch +arm-poison-memory-between-kuser-helpers.patch +arm-move-vector-stubs.patch +arm-use-linker-magic-for-vectors-and-vector-stubs.patch +arm-update-fiq-support-for-relocation-of-vectors.patch +arm-allow-kuser-helpers-to-be-removed-from-the-vector-page.patch +arm-make-vectors-page-inaccessible-from-userspace.patch