From: Greg Kroah-Hartman Date: Wed, 15 Sep 2010 23:05:28 +0000 (-0700) Subject: .27 patches X-Git-Tag: v2.6.27.54~19 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=1ace06f18943a17e3f3915f9480b4b64a379f63f;p=thirdparty%2Fkernel%2Fstable-queue.git .27 patches --- diff --git a/queue-2.6.27/bounce-call-flush_dcache_page-after-bounce_copy_vec.patch b/queue-2.6.27/bounce-call-flush_dcache_page-after-bounce_copy_vec.patch new file mode 100644 index 00000000000..2a47d961b1b --- /dev/null +++ b/queue-2.6.27/bounce-call-flush_dcache_page-after-bounce_copy_vec.patch @@ -0,0 +1,43 @@ +From ac8456d6f9a3011c824176bd6084d39e5f70a382 Mon Sep 17 00:00:00 2001 +From: Gary King +Date: Thu, 9 Sep 2010 16:38:05 -0700 +Subject: bounce: call flush_dcache_page() after bounce_copy_vec() + +From: Gary King + +commit ac8456d6f9a3011c824176bd6084d39e5f70a382 upstream. + +I have been seeing problems on Tegra 2 (ARMv7 SMP) systems with HIGHMEM +enabled on 2.6.35 (plus some patches targetted at 2.6.36 to perform cache +maintenance lazily), and the root cause appears to be that the mm bouncing +code is calling flush_dcache_page before it copies the bounce buffer into +the bio. + +The bounced page needs to be flushed after data is copied into it, to +ensure that architecture implementations can synchronize instruction and +data caches if necessary. + +Signed-off-by: Gary King +Cc: Tejun Heo +Cc: Russell King +Acked-by: Jens Axboe +Signed-off-by: Andrew Morton +Signed-off-by: Linus Torvalds +Signed-off-by: Greg Kroah-Hartman + +--- + mm/bounce.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/mm/bounce.c ++++ b/mm/bounce.c +@@ -114,8 +114,8 @@ static void copy_to_high_bio_irq(struct + */ + vfrom = page_address(fromvec->bv_page) + tovec->bv_offset; + +- flush_dcache_page(tovec->bv_page); + bounce_copy_vec(tovec, vfrom); ++ flush_dcache_page(tovec->bv_page); + } + } + diff --git a/queue-2.6.27/compat-make-compat_alloc_user_space-incorporate-the-access_ok.patch b/queue-2.6.27/compat-make-compat_alloc_user_space-incorporate-the-access_ok.patch new file mode 100644 index 00000000000..708af9718ef --- /dev/null +++ b/queue-2.6.27/compat-make-compat_alloc_user_space-incorporate-the-access_ok.patch @@ -0,0 +1,172 @@ +From c41d68a513c71e35a14f66d71782d27a79a81ea6 Mon Sep 17 00:00:00 2001 +From: H. Peter Anvin +Date: Tue, 7 Sep 2010 16:16:18 -0700 +Subject: compat: Make compat_alloc_user_space() incorporate the access_ok() + +From: H. Peter Anvin + +commit c41d68a513c71e35a14f66d71782d27a79a81ea6 upstream. + +compat_alloc_user_space() expects the caller to independently call +access_ok() to verify the returned area. A missing call could +introduce problems on some architectures. + +This patch incorporates the access_ok() check into +compat_alloc_user_space() and also adds a sanity check on the length. +The existing compat_alloc_user_space() implementations are renamed +arch_compat_alloc_user_space() and are used as part of the +implementation of the new global function. + +This patch assumes NULL will cause __get_user()/__put_user() to either +fail or access userspace on all architectures. This should be +followed by checking the return value of compat_access_user_space() +for NULL in the callers, at which time the access_ok() in the callers +can also be removed. + +Reported-by: Ben Hawkes +Signed-off-by: H. Peter Anvin +Acked-by: Benjamin Herrenschmidt +Acked-by: Chris Metcalf +Acked-by: David S. Miller +Acked-by: Ingo Molnar +Acked-by: Thomas Gleixner +Acked-by: Tony Luck +Cc: Andrew Morton +Cc: Arnd Bergmann +Cc: Fenghua Yu +Cc: H. Peter Anvin +Cc: Heiko Carstens +Cc: Helge Deller +Cc: James Bottomley +Cc: Kyle McMartin +Cc: Martin Schwidefsky +Cc: Paul Mackerras +Cc: Ralf Baechle +Signed-off-by: Greg Kroah-Hartman + +--- + arch/ia64/include/asm/compat.h | 2 +- + arch/powerpc/include/asm/compat.h | 2 +- + arch/s390/include/asm/compat.h | 2 +- + arch/sparc/include/asm/compat.h | 2 +- + include/asm-mips/compat.h | 2 +- + include/asm-parisc/compat.h | 2 +- + include/asm-x86/compat.h | 2 +- + include/linux/compat.h | 2 ++ + kernel/compat.c | 21 +++++++++++++++++++++ + 9 files changed, 30 insertions(+), 7 deletions(-) + +--- a/arch/ia64/include/asm/compat.h ++++ b/arch/ia64/include/asm/compat.h +@@ -198,7 +198,7 @@ ptr_to_compat(void __user *uptr) + } + + static __inline__ void __user * +-compat_alloc_user_space (long len) ++arch_compat_alloc_user_space (long len) + { + struct pt_regs *regs = task_pt_regs(current); + return (void __user *) (((regs->r12 & 0xffffffff) & -16) - len); +--- a/arch/powerpc/include/asm/compat.h ++++ b/arch/powerpc/include/asm/compat.h +@@ -133,7 +133,7 @@ static inline compat_uptr_t ptr_to_compa + return (u32)(unsigned long)uptr; + } + +-static inline void __user *compat_alloc_user_space(long len) ++static inline void __user *arch_compat_alloc_user_space(long len) + { + struct pt_regs *regs = current->thread.regs; + unsigned long usp = regs->gpr[1]; +--- a/arch/s390/include/asm/compat.h ++++ b/arch/s390/include/asm/compat.h +@@ -163,7 +163,7 @@ static inline compat_uptr_t ptr_to_compa + return (u32)(unsigned long)uptr; + } + +-static inline void __user *compat_alloc_user_space(long len) ++static inline void __user *arch_compat_alloc_user_space(long len) + { + unsigned long stack; + +--- a/arch/sparc/include/asm/compat.h ++++ b/arch/sparc/include/asm/compat.h +@@ -166,7 +166,7 @@ static inline compat_uptr_t ptr_to_compa + return (u32)(unsigned long)uptr; + } + +-static inline void __user *compat_alloc_user_space(long len) ++static inline void __user *arch_compat_alloc_user_space(long len) + { + struct pt_regs *regs = current_thread_info()->kregs; + unsigned long usp = regs->u_regs[UREG_I6]; +--- a/include/asm-mips/compat.h ++++ b/include/asm-mips/compat.h +@@ -145,7 +145,7 @@ static inline compat_uptr_t ptr_to_compa + return (u32)(unsigned long)uptr; + } + +-static inline void __user *compat_alloc_user_space(long len) ++static inline void __user *arch_compat_alloc_user_space(long len) + { + struct pt_regs *regs = (struct pt_regs *) + ((unsigned long) current_thread_info() + THREAD_SIZE - 32) - 1; +--- a/include/asm-parisc/compat.h ++++ b/include/asm-parisc/compat.h +@@ -146,7 +146,7 @@ static inline compat_uptr_t ptr_to_compa + return (u32)(unsigned long)uptr; + } + +-static __inline__ void __user *compat_alloc_user_space(long len) ++static __inline__ void __user *arch_compat_alloc_user_space(long len) + { + struct pt_regs *regs = ¤t->thread.regs; + return (void __user *)regs->gr[30]; +--- a/include/asm-x86/compat.h ++++ b/include/asm-x86/compat.h +@@ -204,7 +204,7 @@ static inline compat_uptr_t ptr_to_compa + return (u32)(unsigned long)uptr; + } + +-static inline void __user *compat_alloc_user_space(long len) ++static inline void __user *arch_compat_alloc_user_space(long len) + { + struct pt_regs *regs = task_pt_regs(current); + return (void __user *)regs->sp - len; +--- a/include/linux/compat.h ++++ b/include/linux/compat.h +@@ -291,5 +291,7 @@ asmlinkage long compat_sys_newfstatat(un + asmlinkage long compat_sys_openat(unsigned int dfd, const char __user *filename, + int flags, int mode); + ++extern void __user *compat_alloc_user_space(unsigned long len); ++ + #endif /* CONFIG_COMPAT */ + #endif /* _LINUX_COMPAT_H */ +--- a/kernel/compat.c ++++ b/kernel/compat.c +@@ -1081,3 +1081,24 @@ compat_sys_sysinfo(struct compat_sysinfo + + return 0; + } ++ ++/* ++ * Allocate user-space memory for the duration of a single system call, ++ * in order to marshall parameters inside a compat thunk. ++ */ ++void __user *compat_alloc_user_space(unsigned long len) ++{ ++ void __user *ptr; ++ ++ /* If len would occupy more than half of the entire compat space... */ ++ if (unlikely(len > (((compat_uptr_t)~0) >> 1))) ++ return NULL; ++ ++ ptr = arch_compat_alloc_user_space(len); ++ ++ if (unlikely(!access_ok(VERIFY_WRITE, ptr, len))) ++ return NULL; ++ ++ return ptr; ++} ++EXPORT_SYMBOL_GPL(compat_alloc_user_space); diff --git a/queue-2.6.27/series b/queue-2.6.27/series index 8eb37cb5334..a36e6f51b8e 100644 --- a/queue-2.6.27/series +++ b/queue-2.6.27/series @@ -2,3 +2,7 @@ alsa-seq-oss-fix-double-free-at-error-path-of-snd_seq_oss_open.patch ath9k_hw-fix-parsing-of-ht40-5-ghz-ctls.patch tracing-do-not-allow-llseek-to-set_ftrace_filter.patch irda-off-by-one.patch +bounce-call-flush_dcache_page-after-bounce_copy_vec.patch +x86-64-compat-test-rax-for-the-syscall-number-not-eax.patch +compat-make-compat_alloc_user_space-incorporate-the-access_ok.patch +x86-64-compat-retruncate-rax-after-ia32-syscall-entry-tracing.patch diff --git a/queue-2.6.27/x86-64-compat-retruncate-rax-after-ia32-syscall-entry-tracing.patch b/queue-2.6.27/x86-64-compat-retruncate-rax-after-ia32-syscall-entry-tracing.patch new file mode 100644 index 00000000000..f00ba7e9e02 --- /dev/null +++ b/queue-2.6.27/x86-64-compat-retruncate-rax-after-ia32-syscall-entry-tracing.patch @@ -0,0 +1,50 @@ +From eefdca043e8391dcd719711716492063030b55ac Mon Sep 17 00:00:00 2001 +From: Roland McGrath +Date: Tue, 14 Sep 2010 12:22:58 -0700 +Subject: x86-64, compat: Retruncate rax after ia32 syscall entry tracing + +From: Roland McGrath + +commit eefdca043e8391dcd719711716492063030b55ac upstream. + +In commit d4d6715, we reopened an old hole for a 64-bit ptracer touching a +32-bit tracee in system call entry. A %rax value set via ptrace at the +entry tracing stop gets used whole as a 32-bit syscall number, while we +only check the low 32 bits for validity. + +Fix it by truncating %rax back to 32 bits after syscall_trace_enter, +in addition to testing the full 64 bits as has already been added. + +Reported-by: Ben Hawkes +Signed-off-by: Roland McGrath +Signed-off-by: H. Peter Anvin +Signed-off-by: Greg Kroah-Hartman + +--- + arch/x86/ia32/ia32entry.S | 8 +++++++- + 1 file changed, 7 insertions(+), 1 deletion(-) + +--- a/arch/x86/ia32/ia32entry.S ++++ b/arch/x86/ia32/ia32entry.S +@@ -50,7 +50,12 @@ + /* + * Reload arg registers from stack in case ptrace changed them. + * We don't reload %eax because syscall_trace_enter() returned +- * the value it wants us to use in the table lookup. ++ * the %rax value we should see. Instead, we just truncate that ++ * value to 32 bits again as we did on entry from user mode. ++ * If it's a new value set by user_regset during entry tracing, ++ * this matches the normal truncation of the user-mode value. ++ * If it's -1 to make us punt the syscall, then (u32)-1 is still ++ * an appropriately invalid value. + */ + .macro LOAD_ARGS32 offset, _r9=0 + .if \_r9 +@@ -60,6 +65,7 @@ + movl \offset+48(%rsp),%edx + movl \offset+56(%rsp),%esi + movl \offset+64(%rsp),%edi ++ movl %eax,%eax /* zero extension */ + .endm + + .macro CFI_STARTPROC32 simple diff --git a/queue-2.6.27/x86-64-compat-test-rax-for-the-syscall-number-not-eax.patch b/queue-2.6.27/x86-64-compat-test-rax-for-the-syscall-number-not-eax.patch new file mode 100644 index 00000000000..8bf27c2078c --- /dev/null +++ b/queue-2.6.27/x86-64-compat-test-rax-for-the-syscall-number-not-eax.patch @@ -0,0 +1,97 @@ +From 36d001c70d8a0144ac1d038f6876c484849a74de Mon Sep 17 00:00:00 2001 +From: H. Peter Anvin +Date: Tue, 14 Sep 2010 12:42:41 -0700 +Subject: x86-64, compat: Test %rax for the syscall number, not %eax + +From: H. Peter Anvin + +commit 36d001c70d8a0144ac1d038f6876c484849a74de upstream. + +On 64 bits, we always, by necessity, jump through the system call +table via %rax. For 32-bit system calls, in theory the system call +number is stored in %eax, and the code was testing %eax for a valid +system call number. At one point we loaded the stored value back from +the stack to enforce zero-extension, but that was removed in checkin +d4d67150165df8bf1cc05e532f6efca96f907cab. An actual 32-bit process +will not be able to introduce a non-zero-extended number, but it can +happen via ptrace. + +Instead of re-introducing the zero-extension, test what we are +actually going to use, i.e. %rax. This only adds a handful of REX +prefixes to the code. + +Reported-by: Ben Hawkes +Signed-off-by: H. Peter Anvin +Cc: Roland McGrath +Cc: Andrew Morton +Signed-off-by: Greg Kroah-Hartman + +--- + arch/x86/ia32/ia32entry.S | 14 +++++++------- + 1 file changed, 7 insertions(+), 7 deletions(-) + +--- a/arch/x86/ia32/ia32entry.S ++++ b/arch/x86/ia32/ia32entry.S +@@ -153,7 +153,7 @@ ENTRY(ia32_sysenter_target) + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10) + CFI_REMEMBER_STATE + jnz sysenter_tracesys +- cmpl $(IA32_NR_syscalls-1),%eax ++ cmpq $(IA32_NR_syscalls-1),%rax + ja ia32_badsys + sysenter_do_call: + IA32_ARG_FIXUP +@@ -195,7 +195,7 @@ sysexit_from_sys_call: + movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */ + call audit_syscall_entry + movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */ +- cmpl $(IA32_NR_syscalls-1),%eax ++ cmpq $(IA32_NR_syscalls-1),%rax + ja ia32_badsys + movl %ebx,%edi /* reload 1st syscall arg */ + movl RCX-ARGOFFSET(%rsp),%esi /* reload 2nd syscall arg */ +@@ -248,7 +248,7 @@ sysenter_tracesys: + call syscall_trace_enter + LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */ + RESTORE_REST +- cmpl $(IA32_NR_syscalls-1),%eax ++ cmpq $(IA32_NR_syscalls-1),%rax + ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */ + jmp sysenter_do_call + CFI_ENDPROC +@@ -314,7 +314,7 @@ ENTRY(ia32_cstar_target) + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10) + CFI_REMEMBER_STATE + jnz cstar_tracesys +- cmpl $IA32_NR_syscalls-1,%eax ++ cmpq $IA32_NR_syscalls-1,%rax + ja ia32_badsys + cstar_do_call: + IA32_ARG_FIXUP 1 +@@ -367,7 +367,7 @@ cstar_tracesys: + LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */ + RESTORE_REST + xchgl %ebp,%r9d +- cmpl $(IA32_NR_syscalls-1),%eax ++ cmpq $(IA32_NR_syscalls-1),%rax + ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */ + jmp cstar_do_call + END(ia32_cstar_target) +@@ -425,7 +425,7 @@ ENTRY(ia32_syscall) + orl $TS_COMPAT,TI_status(%r10) + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10) + jnz ia32_tracesys +- cmpl $(IA32_NR_syscalls-1),%eax ++ cmpq $(IA32_NR_syscalls-1),%rax + ja ia32_badsys + ia32_do_call: + IA32_ARG_FIXUP +@@ -444,7 +444,7 @@ ia32_tracesys: + call syscall_trace_enter + LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */ + RESTORE_REST +- cmpl $(IA32_NR_syscalls-1),%eax ++ cmpq $(IA32_NR_syscalls-1),%rax + ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */ + jmp ia32_do_call + END(ia32_syscall)