]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.8-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 17 Apr 2013 14:41:36 +0000 (07:41 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 17 Apr 2013 14:41:36 +0000 (07:41 -0700)
added patches:
arm-do-15e0d9e37c-arm-pm-let-platforms-select-cpu_suspend-support-properly.patch
hrtimer-don-t-reinitialize-a-cpu_base-lock-on-cpu_up.patch
kthread-prevent-unpark-race-which-puts-threads-on-the-wrong-cpu.patch
kvm-powerpc-e500mc-fix-tlb-invalidation-on-cpu-migration.patch
powerpc-add-a-missing-label-in-resume_kernel.patch

queue-3.8/arm-do-15e0d9e37c-arm-pm-let-platforms-select-cpu_suspend-support-properly.patch [new file with mode: 0644]
queue-3.8/hrtimer-don-t-reinitialize-a-cpu_base-lock-on-cpu_up.patch [new file with mode: 0644]
queue-3.8/kthread-prevent-unpark-race-which-puts-threads-on-the-wrong-cpu.patch [new file with mode: 0644]
queue-3.8/kvm-powerpc-e500mc-fix-tlb-invalidation-on-cpu-migration.patch [new file with mode: 0644]
queue-3.8/powerpc-add-a-missing-label-in-resume_kernel.patch [new file with mode: 0644]
queue-3.8/series [new file with mode: 0644]

diff --git a/queue-3.8/arm-do-15e0d9e37c-arm-pm-let-platforms-select-cpu_suspend-support-properly.patch b/queue-3.8/arm-do-15e0d9e37c-arm-pm-let-platforms-select-cpu_suspend-support-properly.patch
new file mode 100644 (file)
index 0000000..2254a51
--- /dev/null
@@ -0,0 +1,102 @@
+From b6c7aabd923a17af993c5a5d5d7995f0b27c000a Mon Sep 17 00:00:00 2001
+From: Russell King <rmk+kernel@arm.linux.org.uk>
+Date: Mon, 8 Apr 2013 11:44:57 +0100
+Subject: ARM: Do 15e0d9e37c (ARM: pm: let platforms select cpu_suspend support) properly
+
+From: Russell King <rmk+kernel@arm.linux.org.uk>
+
+commit b6c7aabd923a17af993c5a5d5d7995f0b27c000a upstream.
+
+Let's do the changes properly and fix the same problem everywhere, not
+just for one case.
+
+Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/mm/proc-arm920.S |    2 +-
+ arch/arm/mm/proc-arm926.S |    2 +-
+ arch/arm/mm/proc-mohawk.S |    2 +-
+ arch/arm/mm/proc-sa1100.S |    2 +-
+ arch/arm/mm/proc-v6.S     |    2 +-
+ arch/arm/mm/proc-xsc3.S   |    2 +-
+ arch/arm/mm/proc-xscale.S |    2 +-
+ 7 files changed, 7 insertions(+), 7 deletions(-)
+
+--- a/arch/arm/mm/proc-arm920.S
++++ b/arch/arm/mm/proc-arm920.S
+@@ -387,7 +387,7 @@ ENTRY(cpu_arm920_set_pte_ext)
+ /* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */
+ .globl        cpu_arm920_suspend_size
+ .equ  cpu_arm920_suspend_size, 4 * 3
+-#ifdef CONFIG_PM_SLEEP
++#ifdef CONFIG_ARM_CPU_SUSPEND
+ ENTRY(cpu_arm920_do_suspend)
+       stmfd   sp!, {r4 - r6, lr}
+       mrc     p15, 0, r4, c13, c0, 0  @ PID
+--- a/arch/arm/mm/proc-arm926.S
++++ b/arch/arm/mm/proc-arm926.S
+@@ -402,7 +402,7 @@ ENTRY(cpu_arm926_set_pte_ext)
+ /* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */
+ .globl        cpu_arm926_suspend_size
+ .equ  cpu_arm926_suspend_size, 4 * 3
+-#ifdef CONFIG_PM_SLEEP
++#ifdef CONFIG_ARM_CPU_SUSPEND
+ ENTRY(cpu_arm926_do_suspend)
+       stmfd   sp!, {r4 - r6, lr}
+       mrc     p15, 0, r4, c13, c0, 0  @ PID
+--- a/arch/arm/mm/proc-mohawk.S
++++ b/arch/arm/mm/proc-mohawk.S
+@@ -350,7 +350,7 @@ ENTRY(cpu_mohawk_set_pte_ext)
+ .globl        cpu_mohawk_suspend_size
+ .equ  cpu_mohawk_suspend_size, 4 * 6
+-#ifdef CONFIG_PM_SLEEP
++#ifdef CONFIG_ARM_CPU_SUSPEND
+ ENTRY(cpu_mohawk_do_suspend)
+       stmfd   sp!, {r4 - r9, lr}
+       mrc     p14, 0, r4, c6, c0, 0   @ clock configuration, for turbo mode
+--- a/arch/arm/mm/proc-sa1100.S
++++ b/arch/arm/mm/proc-sa1100.S
+@@ -172,7 +172,7 @@ ENTRY(cpu_sa1100_set_pte_ext)
+ .globl        cpu_sa1100_suspend_size
+ .equ  cpu_sa1100_suspend_size, 4 * 3
+-#ifdef CONFIG_PM_SLEEP
++#ifdef CONFIG_ARM_CPU_SUSPEND
+ ENTRY(cpu_sa1100_do_suspend)
+       stmfd   sp!, {r4 - r6, lr}
+       mrc     p15, 0, r4, c3, c0, 0           @ domain ID
+--- a/arch/arm/mm/proc-v6.S
++++ b/arch/arm/mm/proc-v6.S
+@@ -138,7 +138,7 @@ ENTRY(cpu_v6_set_pte_ext)
+ /* Suspend/resume support: taken from arch/arm/mach-s3c64xx/sleep.S */
+ .globl        cpu_v6_suspend_size
+ .equ  cpu_v6_suspend_size, 4 * 6
+-#ifdef CONFIG_PM_SLEEP
++#ifdef CONFIG_ARM_CPU_SUSPEND
+ ENTRY(cpu_v6_do_suspend)
+       stmfd   sp!, {r4 - r9, lr}
+       mrc     p15, 0, r4, c13, c0, 0  @ FCSE/PID
+--- a/arch/arm/mm/proc-xsc3.S
++++ b/arch/arm/mm/proc-xsc3.S
+@@ -413,7 +413,7 @@ ENTRY(cpu_xsc3_set_pte_ext)
+ .globl        cpu_xsc3_suspend_size
+ .equ  cpu_xsc3_suspend_size, 4 * 6
+-#ifdef CONFIG_PM_SLEEP
++#ifdef CONFIG_ARM_CPU_SUSPEND
+ ENTRY(cpu_xsc3_do_suspend)
+       stmfd   sp!, {r4 - r9, lr}
+       mrc     p14, 0, r4, c6, c0, 0   @ clock configuration, for turbo mode
+--- a/arch/arm/mm/proc-xscale.S
++++ b/arch/arm/mm/proc-xscale.S
+@@ -528,7 +528,7 @@ ENTRY(cpu_xscale_set_pte_ext)
+ .globl        cpu_xscale_suspend_size
+ .equ  cpu_xscale_suspend_size, 4 * 6
+-#ifdef CONFIG_PM_SLEEP
++#ifdef CONFIG_ARM_CPU_SUSPEND
+ ENTRY(cpu_xscale_do_suspend)
+       stmfd   sp!, {r4 - r9, lr}
+       mrc     p14, 0, r4, c6, c0, 0   @ clock configuration, for turbo mode
diff --git a/queue-3.8/hrtimer-don-t-reinitialize-a-cpu_base-lock-on-cpu_up.patch b/queue-3.8/hrtimer-don-t-reinitialize-a-cpu_base-lock-on-cpu_up.patch
new file mode 100644 (file)
index 0000000..40fc6bb
--- /dev/null
@@ -0,0 +1,89 @@
+From 84cc8fd2fe65866e49d70b38b3fdf7219dd92fe0 Mon Sep 17 00:00:00 2001
+From: Michael Bohan <mbohan@codeaurora.org>
+Date: Tue, 19 Mar 2013 19:19:25 -0700
+Subject: hrtimer: Don't reinitialize a cpu_base lock on CPU_UP
+
+From: Michael Bohan <mbohan@codeaurora.org>
+
+commit 84cc8fd2fe65866e49d70b38b3fdf7219dd92fe0 upstream.
+
+The current code makes the assumption that a cpu_base lock won't be
+held if the CPU corresponding to that cpu_base is offline, which isn't
+always true.
+
+If a hrtimer is not queued, then it will not be migrated by
+migrate_hrtimers() when a CPU is offlined. Therefore, the hrtimer's
+cpu_base may still point to a CPU which has subsequently gone offline
+if the timer wasn't enqueued at the time the CPU went down.
+
+Normally this wouldn't be a problem, but a cpu_base's lock is blindly
+reinitialized each time a CPU is brought up. If a CPU is brought
+online during the period that another thread is performing a hrtimer
+operation on a stale hrtimer, then the lock will be reinitialized
+under its feet, and a SPIN_BUG() like the following will be observed:
+
+<0>[   28.082085] BUG: spinlock already unlocked on CPU#0, swapper/0/0
+<0>[   28.087078]  lock: 0xc4780b40, value 0x0 .magic: dead4ead, .owner: <none>/-1, .owner_cpu: -1
+<4>[   42.451150] [<c0014398>] (unwind_backtrace+0x0/0x120) from [<c0269220>] (do_raw_spin_unlock+0x44/0xdc)
+<4>[   42.460430] [<c0269220>] (do_raw_spin_unlock+0x44/0xdc) from [<c071b5bc>] (_raw_spin_unlock+0x8/0x30)
+<4>[   42.469632] [<c071b5bc>] (_raw_spin_unlock+0x8/0x30) from [<c00a9ce0>] (__hrtimer_start_range_ns+0x1e4/0x4f8)
+<4>[   42.479521] [<c00a9ce0>] (__hrtimer_start_range_ns+0x1e4/0x4f8) from [<c00aa014>] (hrtimer_start+0x20/0x28)
+<4>[   42.489247] [<c00aa014>] (hrtimer_start+0x20/0x28) from [<c00e6190>] (rcu_idle_enter_common+0x1ac/0x320)
+<4>[   42.498709] [<c00e6190>] (rcu_idle_enter_common+0x1ac/0x320) from [<c00e6440>] (rcu_idle_enter+0xa0/0xb8)
+<4>[   42.508259] [<c00e6440>] (rcu_idle_enter+0xa0/0xb8) from [<c000f268>] (cpu_idle+0x24/0xf0)
+<4>[   42.516503] [<c000f268>] (cpu_idle+0x24/0xf0) from [<c06ed3c0>] (rest_init+0x88/0xa0)
+<4>[   42.524319] [<c06ed3c0>] (rest_init+0x88/0xa0) from [<c0c00978>] (start_kernel+0x3d0/0x434)
+
+As an example, this particular crash occurred when hrtimer_start() was
+executed on CPU #0. The code locked the hrtimer's current cpu_base
+corresponding to CPU #1. CPU #0 then tried to switch the hrtimer's
+cpu_base to an optimal CPU which was online. In this case, it selected
+the cpu_base corresponding to CPU #3.
+
+Before it could proceed, CPU #1 came online and reinitialized the
+spinlock corresponding to its cpu_base. Thus now CPU #0 held a lock
+which was reinitialized. When CPU #0 finally ended up unlocking the
+old cpu_base corresponding to CPU #1 so that it could switch to CPU
+#3, we hit this SPIN_BUG() above while in switch_hrtimer_base().
+
+CPU #0                            CPU #1
+----                              ----
+...                               <offline>
+hrtimer_start()
+lock_hrtimer_base(base #1)
+...                               init_hrtimers_cpu()
+switch_hrtimer_base()             ...
+...                               raw_spin_lock_init(&cpu_base->lock)
+raw_spin_unlock(&cpu_base->lock)  ...
+<spin_bug>
+
+Solve this by statically initializing the lock.
+
+Signed-off-by: Michael Bohan <mbohan@codeaurora.org>
+Link: http://lkml.kernel.org/r/1363745965-23475-1-git-send-email-mbohan@codeaurora.org
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/hrtimer.c |    3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/kernel/hrtimer.c
++++ b/kernel/hrtimer.c
+@@ -61,6 +61,7 @@
+ DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
+ {
++      .lock = __RAW_SPIN_LOCK_UNLOCKED(hrtimer_bases.lock),
+       .clock_base =
+       {
+               {
+@@ -1640,8 +1641,6 @@ static void __cpuinit init_hrtimers_cpu(
+       struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);
+       int i;
+-      raw_spin_lock_init(&cpu_base->lock);
+-
+       for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
+               cpu_base->clock_base[i].cpu_base = cpu_base;
+               timerqueue_init_head(&cpu_base->clock_base[i].active);
diff --git a/queue-3.8/kthread-prevent-unpark-race-which-puts-threads-on-the-wrong-cpu.patch b/queue-3.8/kthread-prevent-unpark-race-which-puts-threads-on-the-wrong-cpu.patch
new file mode 100644 (file)
index 0000000..64d34b6
--- /dev/null
@@ -0,0 +1,205 @@
+From f2530dc71cf0822f90bb63ea4600caaef33a66bb Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 9 Apr 2013 09:33:34 +0200
+Subject: kthread: Prevent unpark race which puts threads on the wrong cpu
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit f2530dc71cf0822f90bb63ea4600caaef33a66bb upstream.
+
+The smpboot threads rely on the park/unpark mechanism which binds per
+cpu threads on a particular core. Though the functionality is racy:
+
+CPU0                   CPU1                CPU2
+unpark(T)                                  wake_up_process(T)
+  clear(SHOULD_PARK)   T runs
+                       leave parkme() due to !SHOULD_PARK
+  bind_to(CPU2)                BUG_ON(wrong CPU)
+
+We cannot let the tasks move themself to the target CPU as one of
+those tasks is actually the migration thread itself, which requires
+that it starts running on the target cpu right away.
+
+The solution to this problem is to prevent wakeups in park mode which
+are not from unpark(). That way we can guarantee that the association
+of the task to the target cpu is working correctly.
+
+Add a new task state (TASK_PARKED) which prevents other wakeups and
+use this state explicitly for the unpark wakeup.
+
+Peter noticed: Also, since the task state is visible to userspace and
+all the parked tasks are still in the PID space, its a good hint in ps
+and friends that these tasks aren't really there for the moment.
+
+The migration thread has another related issue.
+
+CPU0                    CPU1
+Bring up CPU2
+create_thread(T)
+park(T)
+ wait_for_completion()
+                        parkme()
+                        complete()
+sched_set_stop_task()
+                        schedule(TASK_PARKED)
+
+The sched_set_stop_task() call is issued while the task is on the
+runqueue of CPU1 and that confuses the hell out of the stop_task class
+on that cpu. So we need the same synchronizaion before
+sched_set_stop_task().
+
+Reported-by: Dave Jones <davej@redhat.com>
+Reported-and-tested-by: Dave Hansen <dave@sr71.net>
+Reported-and-tested-by: Borislav Petkov <bp@alien8.de>
+Acked-by: Peter Ziljstra <peterz@infradead.org>
+Cc: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
+Cc: dhillf@gmail.com
+Cc: Ingo Molnar <mingo@kernel.org>
+Link: http://lkml.kernel.org/r/alpine.LFD.2.02.1304091635430.21884@ionos
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/proc/array.c              |    1 
+ include/linux/sched.h        |    5 ++--
+ include/trace/events/sched.h |    2 -
+ kernel/kthread.c             |   52 +++++++++++++++++++++++--------------------
+ 4 files changed, 33 insertions(+), 27 deletions(-)
+
+--- a/fs/proc/array.c
++++ b/fs/proc/array.c
+@@ -143,6 +143,7 @@ static const char * const task_state_arr
+       "x (dead)",             /*  64 */
+       "K (wakekill)",         /* 128 */
+       "W (waking)",           /* 256 */
++      "P (parked)",           /* 512 */
+ };
+ static inline const char *get_task_state(struct task_struct *tsk)
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -163,9 +163,10 @@ print_cfs_rq(struct seq_file *m, int cpu
+ #define TASK_DEAD             64
+ #define TASK_WAKEKILL         128
+ #define TASK_WAKING           256
+-#define TASK_STATE_MAX                512
++#define TASK_PARKED           512
++#define TASK_STATE_MAX                1024
+-#define TASK_STATE_TO_CHAR_STR "RSDTtZXxKW"
++#define TASK_STATE_TO_CHAR_STR "RSDTtZXxKWP"
+ extern char ___assert_task_state[1 - 2*!!(
+               sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
+--- a/include/trace/events/sched.h
++++ b/include/trace/events/sched.h
+@@ -147,7 +147,7 @@ TRACE_EVENT(sched_switch,
+                 __print_flags(__entry->prev_state & (TASK_STATE_MAX-1), "|",
+                               { 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" },
+                               { 16, "Z" }, { 32, "X" }, { 64, "x" },
+-                              { 128, "W" }) : "R",
++                              { 128, "K" }, { 256, "W" }, { 512, "P" }) : "R",
+               __entry->prev_state & TASK_STATE_MAX ? "+" : "",
+               __entry->next_comm, __entry->next_pid, __entry->next_prio)
+ );
+--- a/kernel/kthread.c
++++ b/kernel/kthread.c
+@@ -124,12 +124,12 @@ void *kthread_data(struct task_struct *t
+ static void __kthread_parkme(struct kthread *self)
+ {
+-      __set_current_state(TASK_INTERRUPTIBLE);
++      __set_current_state(TASK_PARKED);
+       while (test_bit(KTHREAD_SHOULD_PARK, &self->flags)) {
+               if (!test_and_set_bit(KTHREAD_IS_PARKED, &self->flags))
+                       complete(&self->parked);
+               schedule();
+-              __set_current_state(TASK_INTERRUPTIBLE);
++              __set_current_state(TASK_PARKED);
+       }
+       clear_bit(KTHREAD_IS_PARKED, &self->flags);
+       __set_current_state(TASK_RUNNING);
+@@ -256,8 +256,13 @@ struct task_struct *kthread_create_on_no
+ }
+ EXPORT_SYMBOL(kthread_create_on_node);
+-static void __kthread_bind(struct task_struct *p, unsigned int cpu)
++static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state)
+ {
++      /* Must have done schedule() in kthread() before we set_task_cpu */
++      if (!wait_task_inactive(p, state)) {
++              WARN_ON(1);
++              return;
++      }
+       /* It's safe because the task is inactive. */
+       do_set_cpus_allowed(p, cpumask_of(cpu));
+       p->flags |= PF_THREAD_BOUND;
+@@ -274,12 +279,7 @@ static void __kthread_bind(struct task_s
+  */
+ void kthread_bind(struct task_struct *p, unsigned int cpu)
+ {
+-      /* Must have done schedule() in kthread() before we set_task_cpu */
+-      if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE)) {
+-              WARN_ON(1);
+-              return;
+-      }
+-      __kthread_bind(p, cpu);
++      __kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
+ }
+ EXPORT_SYMBOL(kthread_bind);
+@@ -324,6 +324,22 @@ static struct kthread *task_get_live_kth
+       return NULL;
+ }
++static void __kthread_unpark(struct task_struct *k, struct kthread *kthread)
++{
++      clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
++      /*
++       * We clear the IS_PARKED bit here as we don't wait
++       * until the task has left the park code. So if we'd
++       * park before that happens we'd see the IS_PARKED bit
++       * which might be about to be cleared.
++       */
++      if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
++              if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
++                      __kthread_bind(k, kthread->cpu, TASK_PARKED);
++              wake_up_state(k, TASK_PARKED);
++      }
++}
++
+ /**
+  * kthread_unpark - unpark a thread created by kthread_create().
+  * @k:                thread created by kthread_create().
+@@ -336,20 +352,8 @@ void kthread_unpark(struct task_struct *
+ {
+       struct kthread *kthread = task_get_live_kthread(k);
+-      if (kthread) {
+-              clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
+-              /*
+-               * We clear the IS_PARKED bit here as we don't wait
+-               * until the task has left the park code. So if we'd
+-               * park before that happens we'd see the IS_PARKED bit
+-               * which might be about to be cleared.
+-               */
+-              if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
+-                      if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
+-                              __kthread_bind(k, kthread->cpu);
+-                      wake_up_process(k);
+-              }
+-      }
++      if (kthread)
++              __kthread_unpark(k, kthread);
+       put_task_struct(k);
+ }
+@@ -407,7 +411,7 @@ int kthread_stop(struct task_struct *k)
+       trace_sched_kthread_stop(k);
+       if (kthread) {
+               set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
+-              clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
++              __kthread_unpark(k, kthread);
+               wake_up_process(k);
+               wait_for_completion(&kthread->exited);
+       }
diff --git a/queue-3.8/kvm-powerpc-e500mc-fix-tlb-invalidation-on-cpu-migration.patch b/queue-3.8/kvm-powerpc-e500mc-fix-tlb-invalidation-on-cpu-migration.patch
new file mode 100644 (file)
index 0000000..cf077c6
--- /dev/null
@@ -0,0 +1,49 @@
+From c5e6cb051c5f7d56f05bd6a4af22cb300a4ced79 Mon Sep 17 00:00:00 2001
+From: Scott Wood <scottwood@freescale.com>
+Date: Mon, 18 Feb 2013 18:13:09 +0000
+Subject: kvm/powerpc/e500mc: fix tlb invalidation on cpu migration
+
+From: Scott Wood <scottwood@freescale.com>
+
+commit c5e6cb051c5f7d56f05bd6a4af22cb300a4ced79 upstream.
+
+The existing check handles the case where we've migrated to a different
+core than we last ran on, but it doesn't handle the case where we're
+still on the same cpu we last ran on, but some other vcpu has run on
+this cpu in the meantime.
+
+Without this, guest segfaults (and other misbehavior) have been seen in
+smp guests.
+
+Signed-off-by: Scott Wood <scottwood@freescale.com>
+Signed-off-by: Alexander Graf <agraf@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kvm/e500mc.c |    7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/arch/powerpc/kvm/e500mc.c
++++ b/arch/powerpc/kvm/e500mc.c
+@@ -108,6 +108,8 @@ void kvmppc_mmu_msr_notify(struct kvm_vc
+ {
+ }
++static DEFINE_PER_CPU(struct kvm_vcpu *, last_vcpu_on_cpu);
++
+ void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+ {
+       struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
+@@ -136,8 +138,11 @@ void kvmppc_core_vcpu_load(struct kvm_vc
+       mtspr(SPRN_GDEAR, vcpu->arch.shared->dar);
+       mtspr(SPRN_GESR, vcpu->arch.shared->esr);
+-      if (vcpu->arch.oldpir != mfspr(SPRN_PIR))
++      if (vcpu->arch.oldpir != mfspr(SPRN_PIR) ||
++          __get_cpu_var(last_vcpu_on_cpu) != vcpu) {
+               kvmppc_e500_tlbil_all(vcpu_e500);
++              __get_cpu_var(last_vcpu_on_cpu) = vcpu;
++      }
+       kvmppc_load_guest_fp(vcpu);
+ }
diff --git a/queue-3.8/powerpc-add-a-missing-label-in-resume_kernel.patch b/queue-3.8/powerpc-add-a-missing-label-in-resume_kernel.patch
new file mode 100644 (file)
index 0000000..3b2d478
--- /dev/null
@@ -0,0 +1,34 @@
+From d8b92292408831d86ff7b781e66bf79301934b99 Mon Sep 17 00:00:00 2001
+From: Kevin Hao <haokexin@gmail.com>
+Date: Tue, 9 Apr 2013 22:31:24 +0000
+Subject: powerpc: add a missing label in resume_kernel
+
+From: Kevin Hao <haokexin@gmail.com>
+
+commit d8b92292408831d86ff7b781e66bf79301934b99 upstream.
+
+A label 0 was missed in the patch a9c4e541 (powerpc/kprobe: Complete
+kprobe and migrate exception frame). This will cause the kernel
+branch to an undetermined address if there really has a conflict when
+updating the thread flags.
+
+Signed-off-by: Kevin Hao <haokexin@gmail.com>
+Acked-By: Tiejun Chen <tiejun.chen@windriver.com>
+Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kernel/entry_64.S |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/powerpc/kernel/entry_64.S
++++ b/arch/powerpc/kernel/entry_64.S
+@@ -634,7 +634,7 @@ resume_kernel:
+       /* Clear _TIF_EMULATE_STACK_STORE flag */
+       lis     r11,_TIF_EMULATE_STACK_STORE@h
+       addi    r5,r9,TI_FLAGS
+-      ldarx   r4,0,r5
++0:    ldarx   r4,0,r5
+       andc    r4,r4,r11
+       stdcx.  r4,0,r5
+       bne-    0b
diff --git a/queue-3.8/series b/queue-3.8/series
new file mode 100644 (file)
index 0000000..5fcde36
--- /dev/null
@@ -0,0 +1,5 @@
+powerpc-add-a-missing-label-in-resume_kernel.patch
+kvm-powerpc-e500mc-fix-tlb-invalidation-on-cpu-migration.patch
+arm-do-15e0d9e37c-arm-pm-let-platforms-select-cpu_suspend-support-properly.patch
+kthread-prevent-unpark-race-which-puts-threads-on-the-wrong-cpu.patch
+hrtimer-don-t-reinitialize-a-cpu_base-lock-on-cpu_up.patch