--- /dev/null
+From 9e298e8604088a600d8100a111a532a9d342af09 Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Wed, 1 May 2019 15:11:17 +0200
+Subject: ftrace/x86_64: Emulate call function while updating in breakpoint handler
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+commit 9e298e8604088a600d8100a111a532a9d342af09 upstream.
+
+Nicolai Stange discovered[1] that if live kernel patching is enabled, and the
+function tracer started tracing the same function that was patched, the
+conversion of the fentry call site during the translation of going from
+calling the live kernel patch trampoline to the iterator trampoline, would
+have as slight window where it didn't call anything.
+
+As live kernel patching depends on ftrace to always call its code (to
+prevent the function being traced from being called, as it will redirect
+it). This small window would allow the old buggy function to be called, and
+this can cause undesirable results.
+
+Nicolai submitted new patches[2] but these were controversial. As this is
+similar to the static call emulation issues that came up a while ago[3].
+But after some debate[4][5] adding a gap in the stack when entering the
+breakpoint handler allows for pushing the return address onto the stack to
+easily emulate a call.
+
+[1] http://lkml.kernel.org/r/20180726104029.7736-1-nstange@suse.de
+[2] http://lkml.kernel.org/r/20190427100639.15074-1-nstange@suse.de
+[3] http://lkml.kernel.org/r/3cf04e113d71c9f8e4be95fb84a510f085aa4afa.1541711457.git.jpoimboe@redhat.com
+[4] http://lkml.kernel.org/r/CAHk-=wh5OpheSU8Em_Q3Hg8qw_JtoijxOdPtHru6d+5K8TWM=A@mail.gmail.com
+[5] http://lkml.kernel.org/r/CAHk-=wjvQxY4DvPrJ6haPgAa6b906h=MwZXO6G8OtiTGe=N7_w@mail.gmail.com
+
+[
+ Live kernel patching is not implemented on x86_32, thus the emulate
+ calls are only for x86_64.
+]
+
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Nicolai Stange <nstange@suse.de>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: the arch/x86 maintainers <x86@kernel.org>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Jiri Kosina <jikos@kernel.org>
+Cc: Miroslav Benes <mbenes@suse.cz>
+Cc: Petr Mladek <pmladek@suse.com>
+Cc: Joe Lawrence <joe.lawrence@redhat.com>
+Cc: Shuah Khan <shuah@kernel.org>
+Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Cc: Tim Chen <tim.c.chen@linux.intel.com>
+Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Cc: Mimi Zohar <zohar@linux.ibm.com>
+Cc: Juergen Gross <jgross@suse.com>
+Cc: Nick Desaulniers <ndesaulniers@google.com>
+Cc: Nayna Jain <nayna@linux.ibm.com>
+Cc: Masahiro Yamada <yamada.masahiro@socionext.com>
+Cc: Joerg Roedel <jroedel@suse.de>
+Cc: "open list:KERNEL SELFTEST FRAMEWORK" <linux-kselftest@vger.kernel.org>
+Cc: stable@vger.kernel.org
+Fixes: b700e7f03df5 ("livepatch: kernel: add support for live patching")
+Tested-by: Nicolai Stange <nstange@suse.de>
+Reviewed-by: Nicolai Stange <nstange@suse.de>
+Reviewed-by: Masami Hiramatsu <mhiramat@kernel.org>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+[ Changed to only implement emulated calls for x86_64 ]
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/ftrace.c | 32 +++++++++++++++++++++++++++-----
+ 1 file changed, 27 insertions(+), 5 deletions(-)
+
+--- a/arch/x86/kernel/ftrace.c
++++ b/arch/x86/kernel/ftrace.c
+@@ -29,6 +29,7 @@
+ #include <asm/kprobes.h>
+ #include <asm/ftrace.h>
+ #include <asm/nops.h>
++#include <asm/text-patching.h>
+
+ #ifdef CONFIG_DYNAMIC_FTRACE
+
+@@ -231,6 +232,7 @@ int ftrace_modify_call(struct dyn_ftrace
+ }
+
+ static unsigned long ftrace_update_func;
++static unsigned long ftrace_update_func_call;
+
+ static int update_ftrace_func(unsigned long ip, void *new)
+ {
+@@ -259,6 +261,8 @@ int ftrace_update_ftrace_func(ftrace_fun
+ unsigned char *new;
+ int ret;
+
++ ftrace_update_func_call = (unsigned long)func;
++
+ new = ftrace_call_replace(ip, (unsigned long)func);
+ ret = update_ftrace_func(ip, new);
+
+@@ -294,13 +298,28 @@ int ftrace_int3_handler(struct pt_regs *
+ if (WARN_ON_ONCE(!regs))
+ return 0;
+
+- ip = regs->ip - 1;
+- if (!ftrace_location(ip) && !is_ftrace_caller(ip))
+- return 0;
++ ip = regs->ip - INT3_INSN_SIZE;
+
+- regs->ip += MCOUNT_INSN_SIZE - 1;
++#ifdef CONFIG_X86_64
++ if (ftrace_location(ip)) {
++ int3_emulate_call(regs, (unsigned long)ftrace_regs_caller);
++ return 1;
++ } else if (is_ftrace_caller(ip)) {
++ if (!ftrace_update_func_call) {
++ int3_emulate_jmp(regs, ip + CALL_INSN_SIZE);
++ return 1;
++ }
++ int3_emulate_call(regs, ftrace_update_func_call);
++ return 1;
++ }
++#else
++ if (ftrace_location(ip) || is_ftrace_caller(ip)) {
++ int3_emulate_jmp(regs, ip + CALL_INSN_SIZE);
++ return 1;
++ }
++#endif
+
+- return 1;
++ return 0;
+ }
+
+ static int ftrace_write(unsigned long ip, const char *val, int size)
+@@ -858,6 +877,8 @@ void arch_ftrace_update_trampoline(struc
+
+ func = ftrace_ops_get_func(ops);
+
++ ftrace_update_func_call = (unsigned long)func;
++
+ /* Do a safe modify in case the trampoline is executing */
+ new = ftrace_call_replace(ip, (unsigned long)func);
+ ret = update_ftrace_func(ip, new);
+@@ -959,6 +980,7 @@ static int ftrace_mod_jmp(unsigned long
+ {
+ unsigned char *new;
+
++ ftrace_update_func_call = 0UL;
+ new = ftrace_jmp_replace(ip, (unsigned long)func);
+
+ return update_ftrace_func(ip, new);
gcc-plugins-arm_ssp_per_task_plugin-fix-for-older-gcc-6.patch
iommu-tegra-smmu-fix-invalid-asid-bits-on-tegra30-114.patch
ceph-flush-dirty-inodes-before-proceeding-with-remount.patch
+x86_64-add-gap-to-int3-to-allow-for-call-emulation.patch
+x86_64-allow-breakpoints-to-emulate-call-instructions.patch
+ftrace-x86_64-emulate-call-function-while-updating-in-breakpoint-handler.patch
--- /dev/null
+From 2700fefdb2d9751c416ad56897e27d41e409324a Mon Sep 17 00:00:00 2001
+From: Josh Poimboeuf <jpoimboe@redhat.com>
+Date: Fri, 30 Nov 2018 12:39:17 -0600
+Subject: x86_64: Add gap to int3 to allow for call emulation
+
+From: Josh Poimboeuf <jpoimboe@redhat.com>
+
+commit 2700fefdb2d9751c416ad56897e27d41e409324a upstream.
+
+To allow an int3 handler to emulate a call instruction, it must be able to
+push a return address onto the stack. Add a gap to the stack to allow the
+int3 handler to push the return address and change the return from int3 to
+jump straight to the emulated called function target.
+
+Link: http://lkml.kernel.org/r/20181130183917.hxmti5josgq4clti@treble
+Link: http://lkml.kernel.org/r/20190502162133.GX2623@hirez.programming.kicks-ass.net
+
+[
+ Note, this is needed to allow Live Kernel Patching to not miss calling a
+ patched function when tracing is enabled. -- Steven Rostedt
+]
+
+Cc: stable@vger.kernel.org
+Fixes: b700e7f03df5 ("livepatch: kernel: add support for live patching")
+Tested-by: Nicolai Stange <nstange@suse.de>
+Reviewed-by: Nicolai Stange <nstange@suse.de>
+Reviewed-by: Masami Hiramatsu <mhiramat@kernel.org>
+Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/entry/entry_64.S | 18 ++++++++++++++++--
+ 1 file changed, 16 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/entry/entry_64.S
++++ b/arch/x86/entry/entry_64.S
+@@ -881,7 +881,7 @@ apicinterrupt IRQ_WORK_VECTOR irq_work
+ * @paranoid == 2 is special: the stub will never switch stacks. This is for
+ * #DF: if the thread stack is somehow unusable, we'll still get a useful OOPS.
+ */
+-.macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1
++.macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1 create_gap=0
+ ENTRY(\sym)
+ UNWIND_HINT_IRET_REGS offset=\has_error_code*8
+
+@@ -901,6 +901,20 @@ ENTRY(\sym)
+ jnz .Lfrom_usermode_switch_stack_\@
+ .endif
+
++ .if \create_gap == 1
++ /*
++ * If coming from kernel space, create a 6-word gap to allow the
++ * int3 handler to emulate a call instruction.
++ */
++ testb $3, CS-ORIG_RAX(%rsp)
++ jnz .Lfrom_usermode_no_gap_\@
++ .rept 6
++ pushq 5*8(%rsp)
++ .endr
++ UNWIND_HINT_IRET_REGS offset=8
++.Lfrom_usermode_no_gap_\@:
++ .endif
++
+ .if \paranoid
+ call paranoid_entry
+ .else
+@@ -1132,7 +1146,7 @@ apicinterrupt3 HYPERV_STIMER0_VECTOR \
+ #endif /* CONFIG_HYPERV */
+
+ idtentry debug do_debug has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK
+-idtentry int3 do_int3 has_error_code=0
++idtentry int3 do_int3 has_error_code=0 create_gap=1
+ idtentry stack_segment do_stack_segment has_error_code=1
+
+ #ifdef CONFIG_XEN_PV
--- /dev/null
+From 4b33dadf37666c0860b88f9e52a16d07bf6d0b03 Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Wed, 1 May 2019 15:11:17 +0200
+Subject: x86_64: Allow breakpoints to emulate call instructions
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+commit 4b33dadf37666c0860b88f9e52a16d07bf6d0b03 upstream.
+
+In order to allow breakpoints to emulate call instructions, they need to push
+the return address onto the stack. The x86_64 int3 handler adds a small gap
+to allow the stack to grow some. Use this gap to add the return address to
+be able to emulate a call instruction at the breakpoint location.
+
+These helper functions are added:
+
+ int3_emulate_jmp(): changes the location of the regs->ip to return there.
+
+ (The next two are only for x86_64)
+ int3_emulate_push(): to push the address onto the gap in the stack
+ int3_emulate_call(): push the return address and change regs->ip
+
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Nicolai Stange <nstange@suse.de>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: the arch/x86 maintainers <x86@kernel.org>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Jiri Kosina <jikos@kernel.org>
+Cc: Miroslav Benes <mbenes@suse.cz>
+Cc: Petr Mladek <pmladek@suse.com>
+Cc: Joe Lawrence <joe.lawrence@redhat.com>
+Cc: Shuah Khan <shuah@kernel.org>
+Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Cc: Tim Chen <tim.c.chen@linux.intel.com>
+Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Cc: Mimi Zohar <zohar@linux.ibm.com>
+Cc: Juergen Gross <jgross@suse.com>
+Cc: Nick Desaulniers <ndesaulniers@google.com>
+Cc: Nayna Jain <nayna@linux.ibm.com>
+Cc: Masahiro Yamada <yamada.masahiro@socionext.com>
+Cc: Joerg Roedel <jroedel@suse.de>
+Cc: "open list:KERNEL SELFTEST FRAMEWORK" <linux-kselftest@vger.kernel.org>
+Cc: stable@vger.kernel.org
+Fixes: b700e7f03df5 ("livepatch: kernel: add support for live patching")
+Tested-by: Nicolai Stange <nstange@suse.de>
+Reviewed-by: Nicolai Stange <nstange@suse.de>
+Reviewed-by: Masami Hiramatsu <mhiramat@kernel.org>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+[ Modified to only work for x86_64 and added comment to int3_emulate_push() ]
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/text-patching.h | 28 ++++++++++++++++++++++++++++
+ 1 file changed, 28 insertions(+)
+
+--- a/arch/x86/include/asm/text-patching.h
++++ b/arch/x86/include/asm/text-patching.h
+@@ -39,4 +39,32 @@ extern int poke_int3_handler(struct pt_r
+ extern void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler);
+ extern int after_bootmem;
+
++static inline void int3_emulate_jmp(struct pt_regs *regs, unsigned long ip)
++{
++ regs->ip = ip;
++}
++
++#define INT3_INSN_SIZE 1
++#define CALL_INSN_SIZE 5
++
++#ifdef CONFIG_X86_64
++static inline void int3_emulate_push(struct pt_regs *regs, unsigned long val)
++{
++ /*
++ * The int3 handler in entry_64.S adds a gap between the
++ * stack where the break point happened, and the saving of
++ * pt_regs. We can extend the original stack because of
++ * this gap. See the idtentry macro's create_gap option.
++ */
++ regs->sp -= sizeof(unsigned long);
++ *(unsigned long *)regs->sp = val;
++}
++
++static inline void int3_emulate_call(struct pt_regs *regs, unsigned long func)
++{
++ int3_emulate_push(regs, regs->ip - INT3_INSN_SIZE + CALL_INSN_SIZE);
++ int3_emulate_jmp(regs, func);
++}
++#endif
++
+ #endif /* _ASM_X86_TEXT_PATCHING_H */