]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
.29 patches
authorGreg Kroah-Hartman <gregkh@suse.de>
Tue, 30 Jun 2009 00:21:01 +0000 (17:21 -0700)
committerGreg Kroah-Hartman <gregkh@suse.de>
Tue, 30 Jun 2009 00:21:01 +0000 (17:21 -0700)
queue-2.6.29/md-raid5-add-missing-call-to-schedule-after-prepare_to_wait.patch [new file with mode: 0644]
queue-2.6.29/series
queue-2.6.29/vt_ioctl-fix-lock-imbalance.patch [new file with mode: 0644]
queue-2.6.29/x86-set-cpu_llc_id-on-amd-cpus.patch [new file with mode: 0644]

diff --git a/queue-2.6.29/md-raid5-add-missing-call-to-schedule-after-prepare_to_wait.patch b/queue-2.6.29/md-raid5-add-missing-call-to-schedule-after-prepare_to_wait.patch
new file mode 100644 (file)
index 0000000..f84f7d6
--- /dev/null
@@ -0,0 +1,35 @@
+From 7a3ab908948b6296ee7e81d42f7c176361c51975 Mon Sep 17 00:00:00 2001
+From: Dan Williams <dan.j.williams@intel.com>
+Date: Tue, 16 Jun 2009 16:00:33 -0700
+Subject: md/raid5: add missing call to schedule() after prepare_to_wait()
+
+From: Dan Williams <dan.j.williams@intel.com>
+
+commit 7a3ab908948b6296ee7e81d42f7c176361c51975 upstream.
+
+In the unlikely event that reshape progresses past the current request
+while it is waiting for a stripe we need to schedule() before retrying
+for 2 reasons:
+1/ Prevent list corruption from duplicated list_add() calls without
+   intervening list_del().
+2/ Give the reshape code a chance to make some progress to resolve the
+   conflict.
+
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+Signed-off-by: NeilBrown <neilb@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/md/raid5.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -3465,6 +3465,7 @@ static int make_request(struct request_q
+                               spin_unlock_irq(&conf->device_lock);
+                               if (must_retry) {
+                                       release_stripe(sh);
++                                      schedule();
+                                       goto retry;
+                               }
+                       }
index f39857591f4b72d71ac6954e50993bc99bbd852c..58ca2de7494f9cd0d3de013f6af4be1b09aff89c 100644 (file)
@@ -21,3 +21,6 @@ lockdep-select-frame-pointers-on-x86.patch
 mac80211-fix-minstrel-single-rate-memory-corruption.patch
 shift-current_cred-from-__f_setown-to-f_modown.patch
 send_sigio_to_task-sanitize-the-usage-of-fown-signum.patch
+md-raid5-add-missing-call-to-schedule-after-prepare_to_wait.patch
+vt_ioctl-fix-lock-imbalance.patch
+x86-set-cpu_llc_id-on-amd-cpus.patch
diff --git a/queue-2.6.29/vt_ioctl-fix-lock-imbalance.patch b/queue-2.6.29/vt_ioctl-fix-lock-imbalance.patch
new file mode 100644 (file)
index 0000000..4c5c20a
--- /dev/null
@@ -0,0 +1,33 @@
+From a115902f67ef51fbbe83e214fb761aaa9734c1ce Mon Sep 17 00:00:00 2001
+From: Jiri Slaby <jirislaby@gmail.com>
+Date: Mon, 22 Jun 2009 18:42:18 +0100
+Subject: vt_ioctl: fix lock imbalance
+
+From: Jiri Slaby <jirislaby@gmail.com>
+
+commit a115902f67ef51fbbe83e214fb761aaa9734c1ce upstream.
+
+Don't return from switch/case directly in vt_ioctl. Set ret and break
+instead so that we unlock BKL.
+
+Signed-off-by: Jiri Slaby <jirislaby@gmail.com>
+Signed-off-by: Alan Cox <alan@linux.intel.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/char/vt_ioctl.c |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/char/vt_ioctl.c
++++ b/drivers/char/vt_ioctl.c
+@@ -396,7 +396,8 @@ int vt_ioctl(struct tty_struct *tty, str
+       kbd = kbd_table + console;
+       switch (cmd) {
+       case TIOCLINUX:
+-              return tioclinux(tty, arg);
++              ret = tioclinux(tty, arg);
++              break;
+       case KIOCSOUND:
+               if (!perm)
+                       goto eperm;
diff --git a/queue-2.6.29/x86-set-cpu_llc_id-on-amd-cpus.patch b/queue-2.6.29/x86-set-cpu_llc_id-on-amd-cpus.patch
new file mode 100644 (file)
index 0000000..6d5a2fd
--- /dev/null
@@ -0,0 +1,76 @@
+From 99bd0c0fc4b04da54cb311953ef9489931c19c63 Mon Sep 17 00:00:00 2001
+From: Andreas Herrmann <andreas.herrmann3@amd.com>
+Date: Fri, 19 Jun 2009 10:59:09 +0200
+Subject: x86: Set cpu_llc_id on AMD CPUs
+
+From: Andreas Herrmann <andreas.herrmann3@amd.com>
+
+commit 99bd0c0fc4b04da54cb311953ef9489931c19c63 upstream.
+
+This counts when building sched domains in case NUMA information
+is not available.
+
+( See cpu_coregroup_mask() which uses llc_shared_map which in turn is
+  created based on cpu_llc_id. )
+
+Currently Linux builds domains as follows:
+(example from a dual socket quad-core system)
+
+ CPU0 attaching sched-domain:
+  domain 0: span 0-7 level CPU
+   groups: 0 1 2 3 4 5 6 7
+
+  ...
+
+ CPU7 attaching sched-domain:
+  domain 0: span 0-7 level CPU
+   groups: 7 0 1 2 3 4 5 6
+
+Ever since that is borked for multi-core AMD CPU systems.
+This patch fixes that and now we get a proper:
+
+ CPU0 attaching sched-domain:
+  domain 0: span 0-3 level MC
+   groups: 0 1 2 3
+   domain 1: span 0-7 level CPU
+    groups: 0-3 4-7
+
+  ...
+
+ CPU7 attaching sched-domain:
+  domain 0: span 4-7 level MC
+   groups: 7 4 5 6
+   domain 1: span 0-7 level CPU
+    groups: 4-7 0-3
+
+This allows scheduler to assign tasks to cores on different sockets
+(i.e. that don't share last level cache) for performance reasons.
+
+Signed-off-by: Andreas Herrmann <andreas.herrmann3@amd.com>
+LKML-Reference: <20090619085909.GJ5218@alberich.amd.com>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/kernel/cpu/amd.c |    4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -207,13 +207,15 @@ static void __cpuinit amd_detect_cmp(str
+ {
+ #ifdef CONFIG_X86_HT
+       unsigned bits;
++      int cpu = smp_processor_id();
+       bits = c->x86_coreid_bits;
+-
+       /* Low order bits define the core id (index of core in socket) */
+       c->cpu_core_id = c->initial_apicid & ((1 << bits)-1);
+       /* Convert the initial APIC ID into the socket ID */
+       c->phys_proc_id = c->initial_apicid >> bits;
++      /* use socket ID also for last level cache */
++      per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
+ #endif
+ }