From: Greg Kroah-Hartman Date: Thu, 23 May 2019 09:52:47 +0000 (+0200) Subject: 4.14-stable patches X-Git-Tag: v5.1.5~34 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=53fbdb96362158f77c5339e41cab64fff669578c;p=thirdparty%2Fkernel%2Fstable-queue.git 4.14-stable patches added patches: ftrace-x86_64-emulate-call-function-while-updating-in-breakpoint-handler.patch x86_64-add-gap-to-int3-to-allow-for-call-emulation.patch x86_64-allow-breakpoints-to-emulate-call-instructions.patch --- diff --git a/queue-4.14/ftrace-x86_64-emulate-call-function-while-updating-in-breakpoint-handler.patch b/queue-4.14/ftrace-x86_64-emulate-call-function-while-updating-in-breakpoint-handler.patch new file mode 100644 index 00000000000..3a03385fd40 --- /dev/null +++ b/queue-4.14/ftrace-x86_64-emulate-call-function-while-updating-in-breakpoint-handler.patch @@ -0,0 +1,152 @@ +From 9e298e8604088a600d8100a111a532a9d342af09 Mon Sep 17 00:00:00 2001 +From: Peter Zijlstra +Date: Wed, 1 May 2019 15:11:17 +0200 +Subject: ftrace/x86_64: Emulate call function while updating in breakpoint handler + +From: Peter Zijlstra + +commit 9e298e8604088a600d8100a111a532a9d342af09 upstream. + +Nicolai Stange discovered[1] that if live kernel patching is enabled, and the +function tracer started tracing the same function that was patched, the +conversion of the fentry call site during the translation of going from +calling the live kernel patch trampoline to the iterator trampoline, would +have as slight window where it didn't call anything. + +As live kernel patching depends on ftrace to always call its code (to +prevent the function being traced from being called, as it will redirect +it). This small window would allow the old buggy function to be called, and +this can cause undesirable results. + +Nicolai submitted new patches[2] but these were controversial. As this is +similar to the static call emulation issues that came up a while ago[3]. +But after some debate[4][5] adding a gap in the stack when entering the +breakpoint handler allows for pushing the return address onto the stack to +easily emulate a call. + +[1] http://lkml.kernel.org/r/20180726104029.7736-1-nstange@suse.de +[2] http://lkml.kernel.org/r/20190427100639.15074-1-nstange@suse.de +[3] http://lkml.kernel.org/r/3cf04e113d71c9f8e4be95fb84a510f085aa4afa.1541711457.git.jpoimboe@redhat.com +[4] http://lkml.kernel.org/r/CAHk-=wh5OpheSU8Em_Q3Hg8qw_JtoijxOdPtHru6d+5K8TWM=A@mail.gmail.com +[5] http://lkml.kernel.org/r/CAHk-=wjvQxY4DvPrJ6haPgAa6b906h=MwZXO6G8OtiTGe=N7_w@mail.gmail.com + +[ + Live kernel patching is not implemented on x86_32, thus the emulate + calls are only for x86_64. +] + +Cc: Andy Lutomirski +Cc: Nicolai Stange +Cc: Thomas Gleixner +Cc: Ingo Molnar +Cc: Borislav Petkov +Cc: "H. Peter Anvin" +Cc: the arch/x86 maintainers +Cc: Josh Poimboeuf +Cc: Jiri Kosina +Cc: Miroslav Benes +Cc: Petr Mladek +Cc: Joe Lawrence +Cc: Shuah Khan +Cc: Konrad Rzeszutek Wilk +Cc: Tim Chen +Cc: Sebastian Andrzej Siewior +Cc: Mimi Zohar +Cc: Juergen Gross +Cc: Nick Desaulniers +Cc: Nayna Jain +Cc: Masahiro Yamada +Cc: Joerg Roedel +Cc: "open list:KERNEL SELFTEST FRAMEWORK" +Cc: stable@vger.kernel.org +Fixes: b700e7f03df5 ("livepatch: kernel: add support for live patching") +Tested-by: Nicolai Stange +Reviewed-by: Nicolai Stange +Reviewed-by: Masami Hiramatsu +Signed-off-by: Peter Zijlstra (Intel) +[ Changed to only implement emulated calls for x86_64 ] +Signed-off-by: Steven Rostedt (VMware) +Signed-off-by: Greg Kroah-Hartman + +--- + arch/x86/kernel/ftrace.c | 32 +++++++++++++++++++++++++++----- + 1 file changed, 27 insertions(+), 5 deletions(-) + +--- a/arch/x86/kernel/ftrace.c ++++ b/arch/x86/kernel/ftrace.c +@@ -30,6 +30,7 @@ + #include + #include + #include ++#include + + #ifdef CONFIG_DYNAMIC_FTRACE + +@@ -229,6 +230,7 @@ int ftrace_modify_call(struct dyn_ftrace + } + + static unsigned long ftrace_update_func; ++static unsigned long ftrace_update_func_call; + + static int update_ftrace_func(unsigned long ip, void *new) + { +@@ -257,6 +259,8 @@ int ftrace_update_ftrace_func(ftrace_fun + unsigned char *new; + int ret; + ++ ftrace_update_func_call = (unsigned long)func; ++ + new = ftrace_call_replace(ip, (unsigned long)func); + ret = update_ftrace_func(ip, new); + +@@ -292,13 +296,28 @@ int ftrace_int3_handler(struct pt_regs * + if (WARN_ON_ONCE(!regs)) + return 0; + +- ip = regs->ip - 1; +- if (!ftrace_location(ip) && !is_ftrace_caller(ip)) +- return 0; ++ ip = regs->ip - INT3_INSN_SIZE; + +- regs->ip += MCOUNT_INSN_SIZE - 1; ++#ifdef CONFIG_X86_64 ++ if (ftrace_location(ip)) { ++ int3_emulate_call(regs, (unsigned long)ftrace_regs_caller); ++ return 1; ++ } else if (is_ftrace_caller(ip)) { ++ if (!ftrace_update_func_call) { ++ int3_emulate_jmp(regs, ip + CALL_INSN_SIZE); ++ return 1; ++ } ++ int3_emulate_call(regs, ftrace_update_func_call); ++ return 1; ++ } ++#else ++ if (ftrace_location(ip) || is_ftrace_caller(ip)) { ++ int3_emulate_jmp(regs, ip + CALL_INSN_SIZE); ++ return 1; ++ } ++#endif + +- return 1; ++ return 0; + } + + static int ftrace_write(unsigned long ip, const char *val, int size) +@@ -869,6 +888,8 @@ void arch_ftrace_update_trampoline(struc + + func = ftrace_ops_get_func(ops); + ++ ftrace_update_func_call = (unsigned long)func; ++ + /* Do a safe modify in case the trampoline is executing */ + new = ftrace_call_replace(ip, (unsigned long)func); + ret = update_ftrace_func(ip, new); +@@ -965,6 +986,7 @@ static int ftrace_mod_jmp(unsigned long + { + unsigned char *new; + ++ ftrace_update_func_call = 0UL; + new = ftrace_jmp_replace(ip, (unsigned long)func); + + return update_ftrace_func(ip, new); diff --git a/queue-4.14/series b/queue-4.14/series index 9762f56711f..68c2dfbd40b 100644 --- a/queue-4.14/series +++ b/queue-4.14/series @@ -30,3 +30,6 @@ fuse-fix-writepages-on-32bit.patch fuse-honor-rlimit_fsize-in-fuse_file_fallocate.patch iommu-tegra-smmu-fix-invalid-asid-bits-on-tegra30-114.patch ceph-flush-dirty-inodes-before-proceeding-with-remount.patch +x86_64-add-gap-to-int3-to-allow-for-call-emulation.patch +x86_64-allow-breakpoints-to-emulate-call-instructions.patch +ftrace-x86_64-emulate-call-function-while-updating-in-breakpoint-handler.patch diff --git a/queue-4.14/x86_64-add-gap-to-int3-to-allow-for-call-emulation.patch b/queue-4.14/x86_64-add-gap-to-int3-to-allow-for-call-emulation.patch new file mode 100644 index 00000000000..5db0fcb98f2 --- /dev/null +++ b/queue-4.14/x86_64-add-gap-to-int3-to-allow-for-call-emulation.patch @@ -0,0 +1,76 @@ +From 2700fefdb2d9751c416ad56897e27d41e409324a Mon Sep 17 00:00:00 2001 +From: Josh Poimboeuf +Date: Fri, 30 Nov 2018 12:39:17 -0600 +Subject: x86_64: Add gap to int3 to allow for call emulation + +From: Josh Poimboeuf + +commit 2700fefdb2d9751c416ad56897e27d41e409324a upstream. + +To allow an int3 handler to emulate a call instruction, it must be able to +push a return address onto the stack. Add a gap to the stack to allow the +int3 handler to push the return address and change the return from int3 to +jump straight to the emulated called function target. + +Link: http://lkml.kernel.org/r/20181130183917.hxmti5josgq4clti@treble +Link: http://lkml.kernel.org/r/20190502162133.GX2623@hirez.programming.kicks-ass.net + +[ + Note, this is needed to allow Live Kernel Patching to not miss calling a + patched function when tracing is enabled. -- Steven Rostedt +] + +Cc: stable@vger.kernel.org +Fixes: b700e7f03df5 ("livepatch: kernel: add support for live patching") +Tested-by: Nicolai Stange +Reviewed-by: Nicolai Stange +Reviewed-by: Masami Hiramatsu +Signed-off-by: Josh Poimboeuf +Signed-off-by: Steven Rostedt (VMware) +Signed-off-by: Greg Kroah-Hartman + +--- + arch/x86/entry/entry_64.S | 18 ++++++++++++++++-- + 1 file changed, 16 insertions(+), 2 deletions(-) + +--- a/arch/x86/entry/entry_64.S ++++ b/arch/x86/entry/entry_64.S +@@ -853,7 +853,7 @@ ENTRY(switch_to_thread_stack) + ret + END(switch_to_thread_stack) + +-.macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1 ++.macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1 create_gap=0 + ENTRY(\sym) + UNWIND_HINT_IRET_REGS offset=\has_error_code*8 + +@@ -873,6 +873,20 @@ ENTRY(\sym) + jnz .Lfrom_usermode_switch_stack_\@ + .endif + ++ .if \create_gap == 1 ++ /* ++ * If coming from kernel space, create a 6-word gap to allow the ++ * int3 handler to emulate a call instruction. ++ */ ++ testb $3, CS-ORIG_RAX(%rsp) ++ jnz .Lfrom_usermode_no_gap_\@ ++ .rept 6 ++ pushq 5*8(%rsp) ++ .endr ++ UNWIND_HINT_IRET_REGS offset=8 ++.Lfrom_usermode_no_gap_\@: ++ .endif ++ + .if \paranoid + call paranoid_entry + .else +@@ -1093,7 +1107,7 @@ apicinterrupt3 HYPERVISOR_CALLBACK_VECTO + #endif /* CONFIG_HYPERV */ + + idtentry debug do_debug has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK +-idtentry int3 do_int3 has_error_code=0 ++idtentry int3 do_int3 has_error_code=0 create_gap=1 + idtentry stack_segment do_stack_segment has_error_code=1 + + #ifdef CONFIG_XEN diff --git a/queue-4.14/x86_64-allow-breakpoints-to-emulate-call-instructions.patch b/queue-4.14/x86_64-allow-breakpoints-to-emulate-call-instructions.patch new file mode 100644 index 00000000000..99291f0976c --- /dev/null +++ b/queue-4.14/x86_64-allow-breakpoints-to-emulate-call-instructions.patch @@ -0,0 +1,94 @@ +From 4b33dadf37666c0860b88f9e52a16d07bf6d0b03 Mon Sep 17 00:00:00 2001 +From: Peter Zijlstra +Date: Wed, 1 May 2019 15:11:17 +0200 +Subject: x86_64: Allow breakpoints to emulate call instructions + +From: Peter Zijlstra + +commit 4b33dadf37666c0860b88f9e52a16d07bf6d0b03 upstream. + +In order to allow breakpoints to emulate call instructions, they need to push +the return address onto the stack. The x86_64 int3 handler adds a small gap +to allow the stack to grow some. Use this gap to add the return address to +be able to emulate a call instruction at the breakpoint location. + +These helper functions are added: + + int3_emulate_jmp(): changes the location of the regs->ip to return there. + + (The next two are only for x86_64) + int3_emulate_push(): to push the address onto the gap in the stack + int3_emulate_call(): push the return address and change regs->ip + +Cc: Andy Lutomirski +Cc: Nicolai Stange +Cc: Thomas Gleixner +Cc: Ingo Molnar +Cc: Borislav Petkov +Cc: "H. Peter Anvin" +Cc: the arch/x86 maintainers +Cc: Josh Poimboeuf +Cc: Jiri Kosina +Cc: Miroslav Benes +Cc: Petr Mladek +Cc: Joe Lawrence +Cc: Shuah Khan +Cc: Konrad Rzeszutek Wilk +Cc: Tim Chen +Cc: Sebastian Andrzej Siewior +Cc: Mimi Zohar +Cc: Juergen Gross +Cc: Nick Desaulniers +Cc: Nayna Jain +Cc: Masahiro Yamada +Cc: Joerg Roedel +Cc: "open list:KERNEL SELFTEST FRAMEWORK" +Cc: stable@vger.kernel.org +Fixes: b700e7f03df5 ("livepatch: kernel: add support for live patching") +Tested-by: Nicolai Stange +Reviewed-by: Nicolai Stange +Reviewed-by: Masami Hiramatsu +Signed-off-by: Peter Zijlstra (Intel) +[ Modified to only work for x86_64 and added comment to int3_emulate_push() ] +Signed-off-by: Steven Rostedt (VMware) +Signed-off-by: Greg Kroah-Hartman + +--- + arch/x86/include/asm/text-patching.h | 28 ++++++++++++++++++++++++++++ + 1 file changed, 28 insertions(+) + +--- a/arch/x86/include/asm/text-patching.h ++++ b/arch/x86/include/asm/text-patching.h +@@ -38,4 +38,32 @@ extern void *text_poke(void *addr, const + extern int poke_int3_handler(struct pt_regs *regs); + extern void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler); + ++static inline void int3_emulate_jmp(struct pt_regs *regs, unsigned long ip) ++{ ++ regs->ip = ip; ++} ++ ++#define INT3_INSN_SIZE 1 ++#define CALL_INSN_SIZE 5 ++ ++#ifdef CONFIG_X86_64 ++static inline void int3_emulate_push(struct pt_regs *regs, unsigned long val) ++{ ++ /* ++ * The int3 handler in entry_64.S adds a gap between the ++ * stack where the break point happened, and the saving of ++ * pt_regs. We can extend the original stack because of ++ * this gap. See the idtentry macro's create_gap option. ++ */ ++ regs->sp -= sizeof(unsigned long); ++ *(unsigned long *)regs->sp = val; ++} ++ ++static inline void int3_emulate_call(struct pt_regs *regs, unsigned long func) ++{ ++ int3_emulate_push(regs, regs->ip - INT3_INSN_SIZE + CALL_INSN_SIZE); ++ int3_emulate_jmp(regs, func); ++} ++#endif ++ + #endif /* _ASM_X86_TEXT_PATCHING_H */