--- /dev/null
+From 6baea433bc84cd148af1c524389a8d756f67412e Mon Sep 17 00:00:00 2001
+From: "Naveen N. Rao" <naveen.n.rao@linux.vnet.ibm.com>
+Date: Fri, 22 Sep 2017 14:40:47 +0530
+Subject: powerpc/jprobes: Disable preemption when triggered through ftrace
+
+From: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
+
+commit 6baea433bc84cd148af1c524389a8d756f67412e upstream.
+
+KPROBES_SANITY_TEST throws the below splat when CONFIG_PREEMPT is
+enabled:
+
+ Kprobe smoke test: started
+ DEBUG_LOCKS_WARN_ON(val > preempt_count())
+ ------------[ cut here ]------------
+ WARNING: CPU: 19 PID: 1 at kernel/sched/core.c:3094 preempt_count_sub+0xcc/0x140
+ Modules linked in:
+ CPU: 19 PID: 1 Comm: swapper/0 Not tainted 4.13.0-rc7-nnr+ #97
+ task: c0000000fea80000 task.stack: c0000000feb00000
+ NIP: c00000000011d3dc LR: c00000000011d3d8 CTR: c000000000a090d0
+ REGS: c0000000feb03400 TRAP: 0700 Not tainted (4.13.0-rc7-nnr+)
+ MSR: 8000000000021033 <SF,ME,IR,DR,RI,LE> CR: 28000282 XER: 00000000
+ CFAR: c00000000015aa18 SOFTE: 0
+ <snip>
+ NIP preempt_count_sub+0xcc/0x140
+ LR preempt_count_sub+0xc8/0x140
+ Call Trace:
+ preempt_count_sub+0xc8/0x140 (unreliable)
+ kprobe_handler+0x228/0x4b0
+ program_check_exception+0x58/0x3b0
+ program_check_common+0x16c/0x170
+ --- interrupt: 0 at kprobe_target+0x8/0x20
+ LR = init_test_probes+0x248/0x7d0
+ kp+0x0/0x80 (unreliable)
+ livepatch_handler+0x38/0x74
+ init_kprobes+0x1d8/0x208
+ do_one_initcall+0x68/0x1d0
+ kernel_init_freeable+0x298/0x374
+ kernel_init+0x24/0x160
+ ret_from_kernel_thread+0x5c/0x70
+ Instruction dump:
+ 419effdc 3d22001b 39299240 81290000 2f890000 409effc8 3c82ffcb 3c62ffcb
+ 3884bc68 3863bc18 4803d5fd 60000000 <0fe00000> 4bffffa8 60000000 60000000
+ ---[ end trace 432dd46b4ce3d29f ]---
+ Kprobe smoke test: passed successfully
+
+The issue is that we aren't disabling preemption in
+kprobe_ftrace_handler(). Disable it.
+
+Fixes: ead514d5fb30a0 ("powerpc/kprobes: Add support for KPROBES_ON_FTRACE")
+Acked-by: Masami Hiramatsu <mhiramat@kernel.org>
+Signed-off-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
+[mpe: Trim oops a little for formatting]
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kernel/kprobes-ftrace.c | 15 +++++++++++----
+ 1 file changed, 11 insertions(+), 4 deletions(-)
+
+--- a/arch/powerpc/kernel/kprobes-ftrace.c
++++ b/arch/powerpc/kernel/kprobes-ftrace.c
+@@ -65,6 +65,7 @@ void kprobe_ftrace_handler(unsigned long
+ /* Disable irq for emulating a breakpoint and avoiding preempt */
+ local_irq_save(flags);
+ hard_irq_disable();
++ preempt_disable();
+
+ p = get_kprobe((kprobe_opcode_t *)nip);
+ if (unlikely(!p) || kprobe_disabled(p))
+@@ -86,12 +87,18 @@ void kprobe_ftrace_handler(unsigned long
+ kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+ if (!p->pre_handler || !p->pre_handler(p, regs))
+ __skip_singlestep(p, regs, kcb, orig_nip);
+- /*
+- * If pre_handler returns !0, it sets regs->nip and
+- * resets current kprobe.
+- */
++ else {
++ /*
++ * If pre_handler returns !0, it sets regs->nip and
++ * resets current kprobe. In this case, we still need
++ * to restore irq, but not preemption.
++ */
++ local_irq_restore(flags);
++ return;
++ }
+ }
+ end:
++ preempt_enable_no_resched();
+ local_irq_restore(flags);
+ }
+ NOKPROBE_SYMBOL(kprobe_ftrace_handler);
--- /dev/null
+From 8a2d71a3f2737e2448aa68de2b6052cb570d3d2a Mon Sep 17 00:00:00 2001
+From: "Naveen N. Rao" <naveen.n.rao@linux.vnet.ibm.com>
+Date: Mon, 23 Oct 2017 22:07:38 +0530
+Subject: powerpc/kprobes: Disable preemption before invoking probe handler for optprobes
+
+From: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
+
+commit 8a2d71a3f2737e2448aa68de2b6052cb570d3d2a upstream.
+
+Per Documentation/kprobes.txt, probe handlers need to be invoked with
+preemption disabled. Update optimized_callback() to do so. Also move
+get_kprobe_ctlblk() invocation post preemption disable, since it
+accesses pre-cpu data.
+
+This was not an issue so far since optprobes wasn't selected if
+CONFIG_PREEMPT was enabled. Commit a30b85df7d599f ("kprobes: Use
+synchronize_rcu_tasks() for optprobe with CONFIG_PREEMPT=y") changes
+this.
+
+Signed-off-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
+Acked-by: Masami Hiramatsu <mhiramat@kernel.org>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kernel/optprobes.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/arch/powerpc/kernel/optprobes.c
++++ b/arch/powerpc/kernel/optprobes.c
+@@ -115,7 +115,6 @@ static unsigned long can_optimize(struct
+ static void optimized_callback(struct optimized_kprobe *op,
+ struct pt_regs *regs)
+ {
+- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+ unsigned long flags;
+
+ /* This is possible if op is under delayed unoptimizing */
+@@ -124,13 +123,14 @@ static void optimized_callback(struct op
+
+ local_irq_save(flags);
+ hard_irq_disable();
++ preempt_disable();
+
+ if (kprobe_running()) {
+ kprobes_inc_nmissed_count(&op->kp);
+ } else {
+ __this_cpu_write(current_kprobe, &op->kp);
+ regs->nip = (unsigned long)op->kp.addr;
+- kcb->kprobe_status = KPROBE_HIT_ACTIVE;
++ get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
+ opt_pre_handler(&op->kp, regs);
+ __this_cpu_write(current_kprobe, NULL);
+ }
+@@ -140,6 +140,7 @@ static void optimized_callback(struct op
+ * local_irq_restore() will re-enable interrupts,
+ * if they were hard disabled.
+ */
++ preempt_enable_no_resched();
+ local_irq_restore(flags);
+ }
+ NOKPROBE_SYMBOL(optimized_callback);