]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blob - queue-4.19/ftrace-x86_64-emulate-call-function-while-updating-in-breakpoint-handler.patch
523c7e0a93d3ff22b398e07179222ed852dede52
[thirdparty/kernel/stable-queue.git] / queue-4.19 / ftrace-x86_64-emulate-call-function-while-updating-in-breakpoint-handler.patch
1 From 9e298e8604088a600d8100a111a532a9d342af09 Mon Sep 17 00:00:00 2001
2 From: Peter Zijlstra <peterz@infradead.org>
3 Date: Wed, 1 May 2019 15:11:17 +0200
4 Subject: ftrace/x86_64: Emulate call function while updating in breakpoint handler
5
6 From: Peter Zijlstra <peterz@infradead.org>
7
8 commit 9e298e8604088a600d8100a111a532a9d342af09 upstream.
9
10 Nicolai Stange discovered[1] that if live kernel patching is enabled, and the
11 function tracer started tracing the same function that was patched, the
12 conversion of the fentry call site during the translation of going from
13 calling the live kernel patch trampoline to the iterator trampoline, would
14 have as slight window where it didn't call anything.
15
16 As live kernel patching depends on ftrace to always call its code (to
17 prevent the function being traced from being called, as it will redirect
18 it). This small window would allow the old buggy function to be called, and
19 this can cause undesirable results.
20
21 Nicolai submitted new patches[2] but these were controversial. As this is
22 similar to the static call emulation issues that came up a while ago[3].
23 But after some debate[4][5] adding a gap in the stack when entering the
24 breakpoint handler allows for pushing the return address onto the stack to
25 easily emulate a call.
26
27 [1] http://lkml.kernel.org/r/20180726104029.7736-1-nstange@suse.de
28 [2] http://lkml.kernel.org/r/20190427100639.15074-1-nstange@suse.de
29 [3] http://lkml.kernel.org/r/3cf04e113d71c9f8e4be95fb84a510f085aa4afa.1541711457.git.jpoimboe@redhat.com
30 [4] http://lkml.kernel.org/r/CAHk-=wh5OpheSU8Em_Q3Hg8qw_JtoijxOdPtHru6d+5K8TWM=A@mail.gmail.com
31 [5] http://lkml.kernel.org/r/CAHk-=wjvQxY4DvPrJ6haPgAa6b906h=MwZXO6G8OtiTGe=N7_w@mail.gmail.com
32
33 [
34 Live kernel patching is not implemented on x86_32, thus the emulate
35 calls are only for x86_64.
36 ]
37
38 Cc: Andy Lutomirski <luto@kernel.org>
39 Cc: Nicolai Stange <nstange@suse.de>
40 Cc: Thomas Gleixner <tglx@linutronix.de>
41 Cc: Ingo Molnar <mingo@redhat.com>
42 Cc: Borislav Petkov <bp@alien8.de>
43 Cc: "H. Peter Anvin" <hpa@zytor.com>
44 Cc: the arch/x86 maintainers <x86@kernel.org>
45 Cc: Josh Poimboeuf <jpoimboe@redhat.com>
46 Cc: Jiri Kosina <jikos@kernel.org>
47 Cc: Miroslav Benes <mbenes@suse.cz>
48 Cc: Petr Mladek <pmladek@suse.com>
49 Cc: Joe Lawrence <joe.lawrence@redhat.com>
50 Cc: Shuah Khan <shuah@kernel.org>
51 Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
52 Cc: Tim Chen <tim.c.chen@linux.intel.com>
53 Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
54 Cc: Mimi Zohar <zohar@linux.ibm.com>
55 Cc: Juergen Gross <jgross@suse.com>
56 Cc: Nick Desaulniers <ndesaulniers@google.com>
57 Cc: Nayna Jain <nayna@linux.ibm.com>
58 Cc: Masahiro Yamada <yamada.masahiro@socionext.com>
59 Cc: Joerg Roedel <jroedel@suse.de>
60 Cc: "open list:KERNEL SELFTEST FRAMEWORK" <linux-kselftest@vger.kernel.org>
61 Cc: stable@vger.kernel.org
62 Fixes: b700e7f03df5 ("livepatch: kernel: add support for live patching")
63 Tested-by: Nicolai Stange <nstange@suse.de>
64 Reviewed-by: Nicolai Stange <nstange@suse.de>
65 Reviewed-by: Masami Hiramatsu <mhiramat@kernel.org>
66 Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
67 [ Changed to only implement emulated calls for x86_64 ]
68 Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
69 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
70
71 ---
72 arch/x86/kernel/ftrace.c | 32 +++++++++++++++++++++++++++-----
73 1 file changed, 27 insertions(+), 5 deletions(-)
74
75 --- a/arch/x86/kernel/ftrace.c
76 +++ b/arch/x86/kernel/ftrace.c
77 @@ -29,6 +29,7 @@
78 #include <asm/kprobes.h>
79 #include <asm/ftrace.h>
80 #include <asm/nops.h>
81 +#include <asm/text-patching.h>
82
83 #ifdef CONFIG_DYNAMIC_FTRACE
84
85 @@ -228,6 +229,7 @@ int ftrace_modify_call(struct dyn_ftrace
86 }
87
88 static unsigned long ftrace_update_func;
89 +static unsigned long ftrace_update_func_call;
90
91 static int update_ftrace_func(unsigned long ip, void *new)
92 {
93 @@ -256,6 +258,8 @@ int ftrace_update_ftrace_func(ftrace_fun
94 unsigned char *new;
95 int ret;
96
97 + ftrace_update_func_call = (unsigned long)func;
98 +
99 new = ftrace_call_replace(ip, (unsigned long)func);
100 ret = update_ftrace_func(ip, new);
101
102 @@ -291,13 +295,28 @@ int ftrace_int3_handler(struct pt_regs *
103 if (WARN_ON_ONCE(!regs))
104 return 0;
105
106 - ip = regs->ip - 1;
107 - if (!ftrace_location(ip) && !is_ftrace_caller(ip))
108 - return 0;
109 + ip = regs->ip - INT3_INSN_SIZE;
110
111 - regs->ip += MCOUNT_INSN_SIZE - 1;
112 +#ifdef CONFIG_X86_64
113 + if (ftrace_location(ip)) {
114 + int3_emulate_call(regs, (unsigned long)ftrace_regs_caller);
115 + return 1;
116 + } else if (is_ftrace_caller(ip)) {
117 + if (!ftrace_update_func_call) {
118 + int3_emulate_jmp(regs, ip + CALL_INSN_SIZE);
119 + return 1;
120 + }
121 + int3_emulate_call(regs, ftrace_update_func_call);
122 + return 1;
123 + }
124 +#else
125 + if (ftrace_location(ip) || is_ftrace_caller(ip)) {
126 + int3_emulate_jmp(regs, ip + CALL_INSN_SIZE);
127 + return 1;
128 + }
129 +#endif
130
131 - return 1;
132 + return 0;
133 }
134
135 static int ftrace_write(unsigned long ip, const char *val, int size)
136 @@ -868,6 +887,8 @@ void arch_ftrace_update_trampoline(struc
137
138 func = ftrace_ops_get_func(ops);
139
140 + ftrace_update_func_call = (unsigned long)func;
141 +
142 /* Do a safe modify in case the trampoline is executing */
143 new = ftrace_call_replace(ip, (unsigned long)func);
144 ret = update_ftrace_func(ip, new);
145 @@ -964,6 +985,7 @@ static int ftrace_mod_jmp(unsigned long
146 {
147 unsigned char *new;
148
149 + ftrace_update_func_call = 0UL;
150 new = ftrace_jmp_replace(ip, (unsigned long)func);
151
152 return update_ftrace_func(ip, new);