]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
more .30 patches
authorGreg Kroah-Hartman <gregkh@suse.de>
Tue, 30 Jun 2009 00:19:45 +0000 (17:19 -0700)
committerGreg Kroah-Hartman <gregkh@suse.de>
Tue, 30 Jun 2009 00:19:45 +0000 (17:19 -0700)
queue-2.6.30/cifs-fix-fh_mutex-locking-in-cifs_reopen_file.patch [new file with mode: 0644]
queue-2.6.30/md-raid5-add-missing-call-to-schedule-after-prepare_to_wait.patch [new file with mode: 0644]
queue-2.6.30/series
queue-2.6.30/tracing-urgent-fix-unbalanced-ftrace_start_up.patch [new file with mode: 0644]
queue-2.6.30/vt_ioctl-fix-lock-imbalance.patch [new file with mode: 0644]
queue-2.6.30/x86-fix-non-lazy-gs-handling-in-sys_vm86.patch [new file with mode: 0644]
queue-2.6.30/x86-set-cpu_llc_id-on-amd-cpus.patch [new file with mode: 0644]

diff --git a/queue-2.6.30/cifs-fix-fh_mutex-locking-in-cifs_reopen_file.patch b/queue-2.6.30/cifs-fix-fh_mutex-locking-in-cifs_reopen_file.patch
new file mode 100644 (file)
index 0000000..2e8ad8f
--- /dev/null
@@ -0,0 +1,63 @@
+From f0a71eb820596bd8f6abf64beb4cb181edaa2341 Mon Sep 17 00:00:00 2001
+From: Jeff Layton <jlayton@redhat.com>
+Date: Sat, 27 Jun 2009 07:04:55 -0400
+Subject: cifs: fix fh_mutex locking in cifs_reopen_file
+
+From: Jeff Layton <jlayton@redhat.com>
+
+commit f0a71eb820596bd8f6abf64beb4cb181edaa2341 upstream.
+
+Fixes a regression caused by commit a6ce4932fbdbcd8f8e8c6df76812014351c32892
+
+When this lock was converted to a mutex, the locks were turned into
+unlocks and vice-versa.
+
+Signed-off-by: Jeff Layton <jlayton@redhat.com>
+Acked-by: Shirish Pargaonkar <shirishp@us.ibm.com>
+Signed-off-by: Steve French <sfrench@us.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/cifs/file.c |   10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+--- a/fs/cifs/file.c
++++ b/fs/cifs/file.c
+@@ -491,9 +491,9 @@ static int cifs_reopen_file(struct file 
+               return -EBADF;
+       xid = GetXid();
+-      mutex_unlock(&pCifsFile->fh_mutex);
++      mutex_lock(&pCifsFile->fh_mutex);
+       if (!pCifsFile->invalidHandle) {
+-              mutex_lock(&pCifsFile->fh_mutex);
++              mutex_unlock(&pCifsFile->fh_mutex);
+               FreeXid(xid);
+               return 0;
+       }
+@@ -524,7 +524,7 @@ static int cifs_reopen_file(struct file 
+       if (full_path == NULL) {
+               rc = -ENOMEM;
+ reopen_error_exit:
+-              mutex_lock(&pCifsFile->fh_mutex);
++              mutex_unlock(&pCifsFile->fh_mutex);
+               FreeXid(xid);
+               return rc;
+       }
+@@ -566,14 +566,14 @@ reopen_error_exit:
+                        cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
+                               CIFS_MOUNT_MAP_SPECIAL_CHR);
+       if (rc) {
+-              mutex_lock(&pCifsFile->fh_mutex);
++              mutex_unlock(&pCifsFile->fh_mutex);
+               cFYI(1, ("cifs_open returned 0x%x", rc));
+               cFYI(1, ("oplock: %d", oplock));
+       } else {
+ reopen_success:
+               pCifsFile->netfid = netfid;
+               pCifsFile->invalidHandle = false;
+-              mutex_lock(&pCifsFile->fh_mutex);
++              mutex_unlock(&pCifsFile->fh_mutex);
+               pCifsInode = CIFS_I(inode);
+               if (pCifsInode) {
+                       if (can_flush) {
diff --git a/queue-2.6.30/md-raid5-add-missing-call-to-schedule-after-prepare_to_wait.patch b/queue-2.6.30/md-raid5-add-missing-call-to-schedule-after-prepare_to_wait.patch
new file mode 100644 (file)
index 0000000..265f0b1
--- /dev/null
@@ -0,0 +1,35 @@
+From 7a3ab908948b6296ee7e81d42f7c176361c51975 Mon Sep 17 00:00:00 2001
+From: Dan Williams <dan.j.williams@intel.com>
+Date: Tue, 16 Jun 2009 16:00:33 -0700
+Subject: md/raid5: add missing call to schedule() after prepare_to_wait()
+
+From: Dan Williams <dan.j.williams@intel.com>
+
+commit 7a3ab908948b6296ee7e81d42f7c176361c51975 upstream.
+
+In the unlikely event that reshape progresses past the current request
+while it is waiting for a stripe we need to schedule() before retrying
+for 2 reasons:
+1/ Prevent list corruption from duplicated list_add() calls without
+   intervening list_del().
+2/ Give the reshape code a chance to make some progress to resolve the
+   conflict.
+
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+Signed-off-by: NeilBrown <neilb@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/md/raid5.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -3696,6 +3696,7 @@ static int make_request(struct request_q
+                               spin_unlock_irq(&conf->device_lock);
+                               if (must_retry) {
+                                       release_stripe(sh);
++                                      schedule();
+                                       goto retry;
+                               }
+                       }
index 32ca8c4384126ca602fc13d388844aaa7331d7bf..308760c1bc6a7626a4f9ab14fc22c71adf39ce4b 100644 (file)
@@ -73,3 +73,9 @@ ath9k-fix-pci-fatal-interrupts-by-restoring-retry_timeout-disabling.patch
 shift-current_cred-from-__f_setown-to-f_modown.patch
 send_sigio_to_task-sanitize-the-usage-of-fown-signum.patch
 crypto-aes-ni-fix-cbc-mode-iv-saving.patch
+md-raid5-add-missing-call-to-schedule-after-prepare_to_wait.patch
+tracing-urgent-fix-unbalanced-ftrace_start_up.patch
+cifs-fix-fh_mutex-locking-in-cifs_reopen_file.patch
+vt_ioctl-fix-lock-imbalance.patch
+x86-fix-non-lazy-gs-handling-in-sys_vm86.patch
+x86-set-cpu_llc_id-on-amd-cpus.patch
diff --git a/queue-2.6.30/tracing-urgent-fix-unbalanced-ftrace_start_up.patch b/queue-2.6.30/tracing-urgent-fix-unbalanced-ftrace_start_up.patch
new file mode 100644 (file)
index 0000000..cfb5413
--- /dev/null
@@ -0,0 +1,85 @@
+From c85a17e22695969aa24a7ffa40cf26d6e6fcfd50 Mon Sep 17 00:00:00 2001
+From: Frederic Weisbecker <fweisbec@gmail.com>
+Date: Sat, 20 Jun 2009 05:45:14 +0200
+Subject: tracing/urgent: fix unbalanced ftrace_start_up
+
+From: Frederic Weisbecker <fweisbec@gmail.com>
+
+commit c85a17e22695969aa24a7ffa40cf26d6e6fcfd50 upstream.
+
+Perfcounter reports the following stats for a wide system
+profiling:
+
+ #
+ # (2364 samples)
+ #
+ # Overhead  Symbol
+ # ........  ......
+ #
+    15.40%  [k] mwait_idle_with_hints
+     8.29%  [k] read_hpet
+     5.75%  [k] ftrace_caller
+     3.60%  [k] ftrace_call
+     [...]
+
+This snapshot has been taken while neither the function tracer nor
+the function graph tracer was running.
+With dynamic ftrace, such results show a wrong ftrace behaviour
+because all calls to ftrace_caller or ftrace_graph_caller (the patched
+calls to mcount) are supposed to be patched into nop if none of those
+tracers are running.
+
+The problem occurs after the first run of the function tracer. Once we
+launch it a second time, the callsites will never be nopped back,
+unless you set custom filters.
+For example it happens during the self tests at boot time.
+The function tracer selftest runs, and then the dynamic tracing is
+tested too. After that, the callsites are left un-nopped.
+
+This is because the reset callback of the function tracer tries to
+unregister two ftrace callbacks in once: the common function tracer
+and the function tracer with stack backtrace, regardless of which
+one is currently in use.
+It then creates an unbalance on ftrace_start_up value which is expected
+to be zero when the last ftrace callback is unregistered. When it
+reaches zero, the FTRACE_DISABLE_CALLS is set on the next ftrace
+command, triggering the patching into nop. But since it becomes
+unbalanced, ie becomes lower than zero, if the kernel functions
+are patched again (as in every further function tracer runs), they
+won't ever be nopped back.
+
+Note that ftrace_call and ftrace_graph_call are still patched back
+to ftrace_stub in the off case, but not the callers of ftrace_call
+and ftrace_graph_caller. It means that the tracing is well deactivated
+but we waste a useless call into every kernel function.
+
+This patch just unregisters the right ftrace_ops for the function
+tracer on its reset callback and ignores the other one which is
+not registered, fixing the unbalance. The problem also happens
+is .30
+
+Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ kernel/trace/trace_functions.c |    8 +++++---
+ 1 file changed, 5 insertions(+), 3 deletions(-)
+
+--- a/kernel/trace/trace_functions.c
++++ b/kernel/trace/trace_functions.c
+@@ -193,9 +193,11 @@ static void tracing_start_function_trace
+ static void tracing_stop_function_trace(void)
+ {
+       ftrace_function_enabled = 0;
+-      /* OK if they are not registered */
+-      unregister_ftrace_function(&trace_stack_ops);
+-      unregister_ftrace_function(&trace_ops);
++
++      if (func_flags.val & TRACE_FUNC_OPT_STACK)
++              unregister_ftrace_function(&trace_stack_ops);
++      else
++              unregister_ftrace_function(&trace_ops);
+ }
+ static int func_set_flag(u32 old_flags, u32 bit, int set)
diff --git a/queue-2.6.30/vt_ioctl-fix-lock-imbalance.patch b/queue-2.6.30/vt_ioctl-fix-lock-imbalance.patch
new file mode 100644 (file)
index 0000000..4c5c20a
--- /dev/null
@@ -0,0 +1,33 @@
+From a115902f67ef51fbbe83e214fb761aaa9734c1ce Mon Sep 17 00:00:00 2001
+From: Jiri Slaby <jirislaby@gmail.com>
+Date: Mon, 22 Jun 2009 18:42:18 +0100
+Subject: vt_ioctl: fix lock imbalance
+
+From: Jiri Slaby <jirislaby@gmail.com>
+
+commit a115902f67ef51fbbe83e214fb761aaa9734c1ce upstream.
+
+Don't return from switch/case directly in vt_ioctl. Set ret and break
+instead so that we unlock BKL.
+
+Signed-off-by: Jiri Slaby <jirislaby@gmail.com>
+Signed-off-by: Alan Cox <alan@linux.intel.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/char/vt_ioctl.c |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/char/vt_ioctl.c
++++ b/drivers/char/vt_ioctl.c
+@@ -396,7 +396,8 @@ int vt_ioctl(struct tty_struct *tty, str
+       kbd = kbd_table + console;
+       switch (cmd) {
+       case TIOCLINUX:
+-              return tioclinux(tty, arg);
++              ret = tioclinux(tty, arg);
++              break;
+       case KIOCSOUND:
+               if (!perm)
+                       goto eperm;
diff --git a/queue-2.6.30/x86-fix-non-lazy-gs-handling-in-sys_vm86.patch b/queue-2.6.30/x86-fix-non-lazy-gs-handling-in-sys_vm86.patch
new file mode 100644 (file)
index 0000000..f33d705
--- /dev/null
@@ -0,0 +1,53 @@
+From 3aa6b186f86c5d06d6d92d14311ffed51f091f40 Mon Sep 17 00:00:00 2001
+From: Lubomir Rintel <lkundrak@v3.sk>
+Date: Sun, 7 Jun 2009 16:23:48 +0200
+Subject: x86: Fix non-lazy GS handling in sys_vm86()
+
+From: Lubomir Rintel <lkundrak@v3.sk>
+
+commit 3aa6b186f86c5d06d6d92d14311ffed51f091f40 upstream.
+
+This fixes a stack corruption panic or null dereference oops
+due to a bad GS in resume_userspace() when returning from
+sys_vm86() and calling lockdep_sys_exit().
+
+Only a problem when CONFIG_LOCKDEP and CONFIG_CC_STACKPROTECTOR
+enabled.
+
+Signed-off-by: Lubomir Rintel <lkundrak@v3.sk>
+Cc: H. Peter Anvin <hpa@zytor.com>
+LKML-Reference: <1244384628.2323.4.camel@bimbo>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Cc: Steven Noonan <steven@uplinklabs.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/kernel/vm86_32.c |    9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+--- a/arch/x86/kernel/vm86_32.c
++++ b/arch/x86/kernel/vm86_32.c
+@@ -287,10 +287,9 @@ static void do_sys_vm86(struct kernel_vm
+       info->regs.pt.ds = 0;
+       info->regs.pt.es = 0;
+       info->regs.pt.fs = 0;
+-
+-/* we are clearing gs later just before "jmp resume_userspace",
+- * because it is not saved/restored.
+- */
++#ifndef CONFIG_X86_32_LAZY_GS
++      info->regs.pt.gs = 0;
++#endif
+ /*
+  * The flags register is also special: we cannot trust that the user
+@@ -343,7 +342,9 @@ static void do_sys_vm86(struct kernel_vm
+       __asm__ __volatile__(
+               "movl %0,%%esp\n\t"
+               "movl %1,%%ebp\n\t"
++#ifdef CONFIG_X86_32_LAZY_GS
+               "mov  %2, %%gs\n\t"
++#endif
+               "jmp resume_userspace"
+               : /* no outputs */
+               :"r" (&info->regs), "r" (task_thread_info(tsk)), "r" (0));
diff --git a/queue-2.6.30/x86-set-cpu_llc_id-on-amd-cpus.patch b/queue-2.6.30/x86-set-cpu_llc_id-on-amd-cpus.patch
new file mode 100644 (file)
index 0000000..108740d
--- /dev/null
@@ -0,0 +1,76 @@
+From 99bd0c0fc4b04da54cb311953ef9489931c19c63 Mon Sep 17 00:00:00 2001
+From: Andreas Herrmann <andreas.herrmann3@amd.com>
+Date: Fri, 19 Jun 2009 10:59:09 +0200
+Subject: x86: Set cpu_llc_id on AMD CPUs
+
+From: Andreas Herrmann <andreas.herrmann3@amd.com>
+
+commit 99bd0c0fc4b04da54cb311953ef9489931c19c63 upstream.
+
+This counts when building sched domains in case NUMA information
+is not available.
+
+( See cpu_coregroup_mask() which uses llc_shared_map which in turn is
+  created based on cpu_llc_id. )
+
+Currently Linux builds domains as follows:
+(example from a dual socket quad-core system)
+
+ CPU0 attaching sched-domain:
+  domain 0: span 0-7 level CPU
+   groups: 0 1 2 3 4 5 6 7
+
+  ...
+
+ CPU7 attaching sched-domain:
+  domain 0: span 0-7 level CPU
+   groups: 7 0 1 2 3 4 5 6
+
+Ever since that is borked for multi-core AMD CPU systems.
+This patch fixes that and now we get a proper:
+
+ CPU0 attaching sched-domain:
+  domain 0: span 0-3 level MC
+   groups: 0 1 2 3
+   domain 1: span 0-7 level CPU
+    groups: 0-3 4-7
+
+  ...
+
+ CPU7 attaching sched-domain:
+  domain 0: span 4-7 level MC
+   groups: 7 4 5 6
+   domain 1: span 0-7 level CPU
+    groups: 4-7 0-3
+
+This allows scheduler to assign tasks to cores on different sockets
+(i.e. that don't share last level cache) for performance reasons.
+
+Signed-off-by: Andreas Herrmann <andreas.herrmann3@amd.com>
+LKML-Reference: <20090619085909.GJ5218@alberich.amd.com>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/kernel/cpu/amd.c |    4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -258,13 +258,15 @@ static void __cpuinit amd_detect_cmp(str
+ {
+ #ifdef CONFIG_X86_HT
+       unsigned bits;
++      int cpu = smp_processor_id();
+       bits = c->x86_coreid_bits;
+-
+       /* Low order bits define the core id (index of core in socket) */
+       c->cpu_core_id = c->initial_apicid & ((1 << bits)-1);
+       /* Convert the initial APIC ID into the socket ID */
+       c->phys_proc_id = c->initial_apicid >> bits;
++      /* use socket ID also for last level cache */
++      per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
+ #endif
+ }