--- /dev/null
+From 054b9108e01ef27e2e6b32b4226abb6024626f06 Mon Sep 17 00:00:00 2001
+From: Kirill Korotaev <dev@openvz.org>
+Date: Sun, 10 Dec 2006 02:20:11 -0800
+Subject: move_task_off_dead_cpu() should be called with disabled ints
+
+move_task_off_dead_cpu() requires interrupts to be disabled, while
+migrate_dead() calls it with enabled interrupts. Added appropriate
+comments to functions and added BUG_ON(!irqs_disabled()) into
+double_rq_lock() and double_lock_balance() which are the origin sources of
+such bugs.
+
+Signed-off-by: Kirill Korotaev <dev@openvz.org>
+Acked-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Andrew Morton <akpm@osdl.org>
+Signed-off-by: Linus Torvalds <torvalds@osdl.org>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ kernel/sched.c | 17 ++++++++++++++---
+ 1 file changed, 14 insertions(+), 3 deletions(-)
+
+--- linux-2.6.19.2.orig/kernel/sched.c
++++ linux-2.6.19.2/kernel/sched.c
+@@ -1941,6 +1941,7 @@ static void double_rq_lock(struct rq *rq
+ __acquires(rq1->lock)
+ __acquires(rq2->lock)
+ {
++ BUG_ON(!irqs_disabled());
+ if (rq1 == rq2) {
+ spin_lock(&rq1->lock);
+ __acquire(rq2->lock); /* Fake it out ;) */
+@@ -1980,6 +1981,11 @@ static void double_lock_balance(struct r
+ __acquires(busiest->lock)
+ __acquires(this_rq->lock)
+ {
++ if (unlikely(!irqs_disabled())) {
++ /* printk() doesn't work good under rq->lock */
++ spin_unlock(&this_rq->lock);
++ BUG_ON(1);
++ }
+ if (unlikely(!spin_trylock(&busiest->lock))) {
+ if (busiest < this_rq) {
+ spin_unlock(&this_rq->lock);
+@@ -5050,7 +5056,10 @@ wait_to_die:
+ }
+
+ #ifdef CONFIG_HOTPLUG_CPU
+-/* Figure out where task on dead CPU should go, use force if neccessary. */
++/*
++ * Figure out where task on dead CPU should go, use force if neccessary.
++ * NOTE: interrupts should be disabled by the caller
++ */
+ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
+ {
+ unsigned long flags;
+@@ -5170,6 +5179,7 @@ void idle_task_exit(void)
+ mmdrop(mm);
+ }
+
++/* called under rq->lock with disabled interrupts */
+ static void migrate_dead(unsigned int dead_cpu, struct task_struct *p)
+ {
+ struct rq *rq = cpu_rq(dead_cpu);
+@@ -5186,10 +5196,11 @@ static void migrate_dead(unsigned int de
+ * Drop lock around migration; if someone else moves it,
+ * that's OK. No task can be added to this CPU, so iteration is
+ * fine.
++ * NOTE: interrupts should be left disabled --dev@
+ */
+- spin_unlock_irq(&rq->lock);
++ spin_unlock(&rq->lock);
+ move_task_off_dead_cpu(dead_cpu, p);
+- spin_lock_irq(&rq->lock);
++ spin_lock(&rq->lock);
+
+ put_task_struct(p);
+ }
--- /dev/null
+From 9414232fa0cc28e2f51b8c76d260f2748f7953fc Mon Sep 17 00:00:00 2001
+From: Ingo Molnar <mingo@elte.hu>
+Date: Fri, 29 Dec 2006 16:48:13 -0800
+Subject: sched: fix cond_resched_softirq() offset
+
+Remove the __resched_legal() check: it is conceptually broken. The biggest
+problem it had is that it can mask buggy cond_resched() calls. A
+cond_resched() call is only legal if we are not in an atomic context, with
+two narrow exceptions:
+
+ - if the system is booting
+ - a reacquire_kernel_lock() down() done while PREEMPT_ACTIVE is set
+
+But __resched_legal() hid this and just silently returned whenever
+these primitives were called from invalid contexts. (Same goes for
+cond_resched_locked() and cond_resched_softirq()).
+
+Furthermore, the __legal_resched(0) call was buggy in that it caused
+unnecessarily long softirq latencies via cond_resched_softirq(). (which is
+only called from softirq-off sections, hence the code did nothing.)
+
+The fix is to resurrect the efficiency of the might_sleep checks and to
+only allow the narrow exceptions.
+
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Andrew Morton <akpm@osdl.org>
+Signed-off-by: Linus Torvalds <torvalds@osdl.org>
+[chrisw: backport to 2.6.19.2]
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ kernel/sched.c | 16 ++++------------
+ 1 file changed, 4 insertions(+), 12 deletions(-)
+
+--- linux-2.6.19.2.orig/kernel/sched.c
++++ linux-2.6.19.2/kernel/sched.c
+@@ -4524,15 +4524,6 @@ asmlinkage long sys_sched_yield(void)
+ return 0;
+ }
+
+-static inline int __resched_legal(int expected_preempt_count)
+-{
+- if (unlikely(preempt_count() != expected_preempt_count))
+- return 0;
+- if (unlikely(system_state != SYSTEM_RUNNING))
+- return 0;
+- return 1;
+-}
+-
+ static void __cond_resched(void)
+ {
+ #ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
+@@ -4552,7 +4543,8 @@ static void __cond_resched(void)
+
+ int __sched cond_resched(void)
+ {
+- if (need_resched() && __resched_legal(0)) {
++ if (need_resched() && !(preempt_count() & PREEMPT_ACTIVE) &&
++ system_state == SYSTEM_RUNNING) {
+ __cond_resched();
+ return 1;
+ }
+@@ -4578,7 +4570,7 @@ int cond_resched_lock(spinlock_t *lock)
+ ret = 1;
+ spin_lock(lock);
+ }
+- if (need_resched() && __resched_legal(1)) {
++ if (need_resched() && system_state == SYSTEM_RUNNING) {
+ spin_release(&lock->dep_map, 1, _THIS_IP_);
+ _raw_spin_unlock(lock);
+ preempt_enable_no_resched();
+@@ -4594,7 +4586,7 @@ int __sched cond_resched_softirq(void)
+ {
+ BUG_ON(!in_softirq());
+
+- if (need_resched() && __resched_legal(0)) {
++ if (need_resched() && system_state == SYSTEM_RUNNING) {
+ raw_local_irq_disable();
+ _local_bh_enable();
+ raw_local_irq_enable();
tcp-skb-is-unexpectedly-freed.patch
netfilter-xt_connbytes-fix-division-by-zero.patch
sunrpc-give-cloned-rpc-clients-their-own-rpc_pipefs-directory.patch
+move_task_off_dead_cpu-should-be-called-with-disabled-ints.patch
+sched-fix-cond_resched_softirq-offset.patch