]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
3d083395 | 2 | /* |
9d2099ab | 3 | * Dynamic function tracing support. |
3d083395 SR |
4 | * |
5 | * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> | |
6 | * | |
7 | * Thanks goes to Ingo Molnar, for suggesting the idea. | |
8 | * Mathieu Desnoyers, for suggesting postponing the modifications. | |
9 | * Arjan van de Ven, for keeping me straight, and explaining to me | |
10 | * the dangers of modifying code on the run. | |
11 | */ | |
12 | ||
3bb258bf JP |
13 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
14 | ||
3d083395 SR |
15 | #include <linux/spinlock.h> |
16 | #include <linux/hardirq.h> | |
6f93fc07 | 17 | #include <linux/uaccess.h> |
3d083395 SR |
18 | #include <linux/ftrace.h> |
19 | #include <linux/percpu.h> | |
19b3e967 | 20 | #include <linux/sched.h> |
f3bea491 | 21 | #include <linux/slab.h> |
3d083395 SR |
22 | #include <linux/init.h> |
23 | #include <linux/list.h> | |
84e1c6bb | 24 | #include <linux/module.h> |
d5b844a2 | 25 | #include <linux/memory.h> |
ac0b14dc | 26 | #include <linux/vmalloc.h> |
3d083395 | 27 | |
47788c58 FW |
28 | #include <trace/syscall.h> |
29 | ||
d1163651 | 30 | #include <asm/set_memory.h> |
59a094c9 | 31 | #include <asm/kprobes.h> |
395a59d0 | 32 | #include <asm/ftrace.h> |
732f3ca7 | 33 | #include <asm/nops.h> |
9e298e86 | 34 | #include <asm/text-patching.h> |
3d083395 | 35 | |
caf4b323 | 36 | #ifdef CONFIG_DYNAMIC_FTRACE |
3d083395 | 37 | |
768ae440 PZ |
38 | static int ftrace_poke_late = 0; |
39 | ||
16239630 | 40 | int ftrace_arch_code_modify_prepare(void) |
074376ac | 41 | __acquires(&text_mutex) |
16239630 | 42 | { |
39611265 SRV |
43 | /* |
44 | * Need to grab text_mutex to prevent a race from module loading | |
45 | * and live kernel patching from changing the text permissions while | |
46 | * ftrace has it set to "read/write". | |
47 | */ | |
d5b844a2 | 48 | mutex_lock(&text_mutex); |
768ae440 | 49 | ftrace_poke_late = 1; |
16239630 SR |
50 | return 0; |
51 | } | |
52 | ||
53 | int ftrace_arch_code_modify_post_process(void) | |
074376ac | 54 | __releases(&text_mutex) |
16239630 | 55 | { |
768ae440 PZ |
56 | /* |
57 | * ftrace_make_{call,nop}() may be called during | |
58 | * module load, and we need to finish the text_poke_queue() | |
59 | * that they do, here. | |
60 | */ | |
61 | text_poke_finish(); | |
62 | ftrace_poke_late = 0; | |
d5b844a2 | 63 | mutex_unlock(&text_mutex); |
16239630 SR |
64 | return 0; |
65 | } | |
66 | ||
768ae440 | 67 | static const char *ftrace_nop_replace(void) |
17666f02 | 68 | { |
768ae440 | 69 | return ideal_nops[NOP_ATOMIC5]; |
17666f02 SR |
70 | } |
71 | ||
768ae440 | 72 | static const char *ftrace_call_replace(unsigned long ip, unsigned long addr) |
caf4b323 | 73 | { |
67c1d4a2 | 74 | return text_gen_insn(CALL_INSN_OPCODE, (void *)ip, (void *)addr); |
caf4b323 FW |
75 | } |
76 | ||
768ae440 | 77 | static int ftrace_verify_code(unsigned long ip, const char *old_code) |
3d083395 | 78 | { |
768ae440 | 79 | char cur_code[MCOUNT_INSN_SIZE]; |
b05086c7 | 80 | |
3d083395 | 81 | /* |
c5d641f9 LB |
82 | * Note: |
83 | * We are paranoid about modifying text, as if a bug was to happen, it | |
84 | * could cause us to read or write to someplace that could cause harm. | |
85 | * Carefully read and modify the code with probe_kernel_*(), and make | |
86 | * sure what we read is what we expected it to be before modifying it. | |
3d083395 | 87 | */ |
76aefee5 | 88 | /* read the text we want to modify */ |
768ae440 PZ |
89 | if (probe_kernel_read(cur_code, (void *)ip, MCOUNT_INSN_SIZE)) { |
90 | WARN_ON(1); | |
593eb8a2 | 91 | return -EFAULT; |
768ae440 | 92 | } |
6f93fc07 | 93 | |
76aefee5 | 94 | /* Make sure it is what we expect it to be */ |
768ae440 PZ |
95 | if (memcmp(cur_code, old_code, MCOUNT_INSN_SIZE) != 0) { |
96 | WARN_ON(1); | |
593eb8a2 | 97 | return -EINVAL; |
768ae440 | 98 | } |
3d083395 | 99 | |
768ae440 PZ |
100 | return 0; |
101 | } | |
6f93fc07 | 102 | |
38ebd8d1 BP |
103 | /* |
104 | * Marked __ref because it calls text_poke_early() which is .init.text. That is | |
105 | * ok because that call will happen early, during boot, when .init sections are | |
106 | * still present. | |
107 | */ | |
108 | static int __ref | |
768ae440 PZ |
109 | ftrace_modify_code_direct(unsigned long ip, const char *old_code, |
110 | const char *new_code) | |
111 | { | |
112 | int ret = ftrace_verify_code(ip, old_code); | |
113 | if (ret) | |
114 | return ret; | |
3d083395 | 115 | |
768ae440 PZ |
116 | /* replace the text with the new text */ |
117 | if (ftrace_poke_late) | |
118 | text_poke_queue((void *)ip, new_code, MCOUNT_INSN_SIZE, NULL); | |
119 | else | |
120 | text_poke_early((void *)ip, new_code, MCOUNT_INSN_SIZE); | |
6f93fc07 | 121 | return 0; |
3d083395 SR |
122 | } |
123 | ||
768ae440 | 124 | int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr) |
31e88909 | 125 | { |
31e88909 | 126 | unsigned long ip = rec->ip; |
768ae440 | 127 | const char *new, *old; |
31e88909 SR |
128 | |
129 | old = ftrace_call_replace(ip, addr); | |
130 | new = ftrace_nop_replace(); | |
131 | ||
8a4d0a68 SR |
132 | /* |
133 | * On boot up, and when modules are loaded, the MCOUNT_ADDR | |
134 | * is converted to a nop, and will never become MCOUNT_ADDR | |
135 | * again. This code is either running before SMP (on boot up) | |
136 | * or before the code will ever be executed (module load). | |
137 | * We do not want to use the breakpoint version in this case, | |
138 | * just modify the code directly. | |
139 | */ | |
140 | if (addr == MCOUNT_ADDR) | |
768ae440 | 141 | return ftrace_modify_code_direct(ip, old, new); |
b05086c7 | 142 | |
768ae440 PZ |
143 | /* |
144 | * x86 overrides ftrace_replace_code -- this function will never be used | |
145 | * in this case. | |
146 | */ | |
8a4d0a68 SR |
147 | WARN_ONCE(1, "invalid use of ftrace_make_nop"); |
148 | return -EINVAL; | |
31e88909 SR |
149 | } |
150 | ||
151 | int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) | |
152 | { | |
31e88909 | 153 | unsigned long ip = rec->ip; |
768ae440 | 154 | const char *new, *old; |
31e88909 SR |
155 | |
156 | old = ftrace_nop_replace(); | |
157 | new = ftrace_call_replace(ip, addr); | |
158 | ||
8a4d0a68 SR |
159 | /* Should only be called when module is loaded */ |
160 | return ftrace_modify_code_direct(rec->ip, old, new); | |
d61f82d0 SR |
161 | } |
162 | ||
08f6fba5 SR |
163 | /* |
164 | * Should never be called: | |
165 | * As it is only called by __ftrace_replace_code() which is called by | |
166 | * ftrace_replace_code() that x86 overrides, and by ftrace_update_code() | |
167 | * which is called to turn mcount into nops or nops into function calls | |
168 | * but not to convert a function from not using regs to one that uses | |
169 | * regs, which ftrace_modify_call() is for. | |
170 | */ | |
171 | int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, | |
172 | unsigned long addr) | |
173 | { | |
174 | WARN_ON(1); | |
175 | return -EINVAL; | |
176 | } | |
08f6fba5 | 177 | |
87fbb2ac | 178 | int ftrace_update_ftrace_func(ftrace_func_t func) |
08d636b6 | 179 | { |
ab4ead02 | 180 | unsigned long ip; |
768ae440 | 181 | const char *new; |
ab4ead02 | 182 | |
768ae440 PZ |
183 | ip = (unsigned long)(&ftrace_call); |
184 | new = ftrace_call_replace(ip, (unsigned long)func); | |
185 | text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL); | |
08d636b6 | 186 | |
768ae440 PZ |
187 | ip = (unsigned long)(&ftrace_regs_call); |
188 | new = ftrace_call_replace(ip, (unsigned long)func); | |
189 | text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL); | |
08d636b6 SR |
190 | |
191 | return 0; | |
192 | } | |
193 | ||
e4f5d544 | 194 | void ftrace_replace_code(int enable) |
08d636b6 SR |
195 | { |
196 | struct ftrace_rec_iter *iter; | |
197 | struct dyn_ftrace *rec; | |
768ae440 | 198 | const char *new, *old; |
08d636b6 SR |
199 | int ret; |
200 | ||
201 | for_ftrace_rec_iter(iter) { | |
202 | rec = ftrace_rec_iter_record(iter); | |
203 | ||
768ae440 PZ |
204 | switch (ftrace_test_record(rec, enable)) { |
205 | case FTRACE_UPDATE_IGNORE: | |
206 | default: | |
207 | continue; | |
08d636b6 | 208 | |
768ae440 PZ |
209 | case FTRACE_UPDATE_MAKE_CALL: |
210 | old = ftrace_nop_replace(); | |
211 | break; | |
08d636b6 | 212 | |
768ae440 PZ |
213 | case FTRACE_UPDATE_MODIFY_CALL: |
214 | case FTRACE_UPDATE_MAKE_NOP: | |
215 | old = ftrace_call_replace(rec->ip, ftrace_get_addr_curr(rec)); | |
216 | break; | |
217 | } | |
08d636b6 | 218 | |
768ae440 PZ |
219 | ret = ftrace_verify_code(rec->ip, old); |
220 | if (ret) { | |
221 | ftrace_bug(ret, rec); | |
222 | return; | |
223 | } | |
08d636b6 SR |
224 | } |
225 | ||
08d636b6 SR |
226 | for_ftrace_rec_iter(iter) { |
227 | rec = ftrace_rec_iter_record(iter); | |
228 | ||
768ae440 PZ |
229 | switch (ftrace_test_record(rec, enable)) { |
230 | case FTRACE_UPDATE_IGNORE: | |
231 | default: | |
232 | continue; | |
08d636b6 | 233 | |
768ae440 PZ |
234 | case FTRACE_UPDATE_MAKE_CALL: |
235 | case FTRACE_UPDATE_MODIFY_CALL: | |
236 | new = ftrace_call_replace(rec->ip, ftrace_get_addr_new(rec)); | |
237 | break; | |
08d636b6 | 238 | |
768ae440 PZ |
239 | case FTRACE_UPDATE_MAKE_NOP: |
240 | new = ftrace_nop_replace(); | |
241 | break; | |
242 | } | |
08d636b6 | 243 | |
768ae440 PZ |
244 | text_poke_queue((void *)rec->ip, new, MCOUNT_INSN_SIZE, NULL); |
245 | ftrace_update_record(rec, enable); | |
08d636b6 | 246 | } |
768ae440 | 247 | text_poke_finish(); |
8a4d0a68 SR |
248 | } |
249 | ||
08d636b6 SR |
250 | void arch_ftrace_update_code(int command) |
251 | { | |
e4f5d544 | 252 | ftrace_modify_all_code(command); |
08d636b6 SR |
253 | } |
254 | ||
3a36cb11 | 255 | int __init ftrace_dyn_arch_init(void) |
3d083395 | 256 | { |
3d083395 SR |
257 | return 0; |
258 | } | |
5a45cfe1 | 259 | |
f3bea491 SRRH |
260 | /* Currently only x86_64 supports dynamic trampolines */ |
261 | #ifdef CONFIG_X86_64 | |
262 | ||
263 | #ifdef CONFIG_MODULES | |
264 | #include <linux/moduleloader.h> | |
265 | /* Module allocation simplifies allocating memory for code */ | |
266 | static inline void *alloc_tramp(unsigned long size) | |
267 | { | |
268 | return module_alloc(size); | |
269 | } | |
7fdfe1e4 | 270 | static inline void tramp_free(void *tramp) |
f3bea491 | 271 | { |
be1f221c | 272 | module_memfree(tramp); |
f3bea491 SRRH |
273 | } |
274 | #else | |
275 | /* Trampolines can only be created if modules are supported */ | |
276 | static inline void *alloc_tramp(unsigned long size) | |
277 | { | |
278 | return NULL; | |
279 | } | |
7fdfe1e4 | 280 | static inline void tramp_free(void *tramp) { } |
f3bea491 SRRH |
281 | #endif |
282 | ||
283 | /* Defined as markers to the end of the ftrace default trampolines */ | |
f3bea491 | 284 | extern void ftrace_regs_caller_end(void); |
f1b92bb6 | 285 | extern void ftrace_epilogue(void); |
f3bea491 SRRH |
286 | extern void ftrace_caller_op_ptr(void); |
287 | extern void ftrace_regs_caller_op_ptr(void); | |
288 | ||
289 | /* movq function_trace_op(%rip), %rdx */ | |
290 | /* 0x48 0x8b 0x15 <offset-to-ftrace_trace_op (4 bytes)> */ | |
291 | #define OP_REF_SIZE 7 | |
292 | ||
293 | /* | |
294 | * The ftrace_ops is passed to the function callback. Since the | |
295 | * trampoline only services a single ftrace_ops, we can pass in | |
296 | * that ops directly. | |
297 | * | |
298 | * The ftrace_op_code_union is used to create a pointer to the | |
299 | * ftrace_ops that will be passed to the callback function. | |
300 | */ | |
301 | union ftrace_op_code_union { | |
302 | char code[OP_REF_SIZE]; | |
303 | struct { | |
304 | char op[3]; | |
305 | int offset; | |
306 | } __attribute__((packed)); | |
307 | }; | |
308 | ||
d2a68c4e SRV |
309 | #define RET_SIZE 1 |
310 | ||
aec0be2d SRRH |
311 | static unsigned long |
312 | create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size) | |
f3bea491 | 313 | { |
f3bea491 SRRH |
314 | unsigned long start_offset; |
315 | unsigned long end_offset; | |
316 | unsigned long op_offset; | |
768ae440 | 317 | unsigned long call_offset; |
f3bea491 | 318 | unsigned long offset; |
3c0dab44 | 319 | unsigned long npages; |
f3bea491 | 320 | unsigned long size; |
d2a68c4e | 321 | unsigned long retq; |
f3bea491 SRRH |
322 | unsigned long *ptr; |
323 | void *trampoline; | |
d2a68c4e | 324 | void *ip; |
f3bea491 SRRH |
325 | /* 48 8b 15 <offset> is movq <offset>(%rip), %rdx */ |
326 | unsigned const char op_ref[] = { 0x48, 0x8b, 0x15 }; | |
327 | union ftrace_op_code_union op_ptr; | |
328 | int ret; | |
329 | ||
330 | if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) { | |
331 | start_offset = (unsigned long)ftrace_regs_caller; | |
332 | end_offset = (unsigned long)ftrace_regs_caller_end; | |
333 | op_offset = (unsigned long)ftrace_regs_caller_op_ptr; | |
768ae440 | 334 | call_offset = (unsigned long)ftrace_regs_call; |
f3bea491 SRRH |
335 | } else { |
336 | start_offset = (unsigned long)ftrace_caller; | |
f1b92bb6 | 337 | end_offset = (unsigned long)ftrace_epilogue; |
f3bea491 | 338 | op_offset = (unsigned long)ftrace_caller_op_ptr; |
768ae440 | 339 | call_offset = (unsigned long)ftrace_call; |
f3bea491 SRRH |
340 | } |
341 | ||
342 | size = end_offset - start_offset; | |
343 | ||
344 | /* | |
345 | * Allocate enough size to store the ftrace_caller code, | |
d2a68c4e SRV |
346 | * the iret , as well as the address of the ftrace_ops this |
347 | * trampoline is used for. | |
f3bea491 | 348 | */ |
d2a68c4e | 349 | trampoline = alloc_tramp(size + RET_SIZE + sizeof(void *)); |
f3bea491 SRRH |
350 | if (!trampoline) |
351 | return 0; | |
352 | ||
d2a68c4e | 353 | *tramp_size = size + RET_SIZE + sizeof(void *); |
3c0dab44 | 354 | npages = DIV_ROUND_UP(*tramp_size, PAGE_SIZE); |
aec0be2d | 355 | |
f3bea491 SRRH |
356 | /* Copy ftrace_caller onto the trampoline memory */ |
357 | ret = probe_kernel_read(trampoline, (void *)start_offset, size); | |
d2a68c4e SRV |
358 | if (WARN_ON(ret < 0)) |
359 | goto fail; | |
f3bea491 | 360 | |
d2a68c4e | 361 | ip = trampoline + size; |
f3bea491 | 362 | |
d2a68c4e SRV |
363 | /* The trampoline ends with ret(q) */ |
364 | retq = (unsigned long)ftrace_stub; | |
365 | ret = probe_kernel_read(ip, (void *)retq, RET_SIZE); | |
366 | if (WARN_ON(ret < 0)) | |
367 | goto fail; | |
f3bea491 SRRH |
368 | |
369 | /* | |
370 | * The address of the ftrace_ops that is used for this trampoline | |
371 | * is stored at the end of the trampoline. This will be used to | |
372 | * load the third parameter for the callback. Basically, that | |
373 | * location at the end of the trampoline takes the place of | |
374 | * the global function_trace_op variable. | |
375 | */ | |
376 | ||
d2a68c4e | 377 | ptr = (unsigned long *)(trampoline + size + RET_SIZE); |
f3bea491 SRRH |
378 | *ptr = (unsigned long)ops; |
379 | ||
380 | op_offset -= start_offset; | |
381 | memcpy(&op_ptr, trampoline + op_offset, OP_REF_SIZE); | |
382 | ||
383 | /* Are we pointing to the reference? */ | |
d2a68c4e SRV |
384 | if (WARN_ON(memcmp(op_ptr.op, op_ref, 3) != 0)) |
385 | goto fail; | |
f3bea491 SRRH |
386 | |
387 | /* Load the contents of ptr into the callback parameter */ | |
388 | offset = (unsigned long)ptr; | |
389 | offset -= (unsigned long)trampoline + op_offset + OP_REF_SIZE; | |
390 | ||
391 | op_ptr.offset = offset; | |
392 | ||
393 | /* put in the new offset to the ftrace_ops */ | |
394 | memcpy(trampoline + op_offset, &op_ptr, OP_REF_SIZE); | |
395 | ||
768ae440 PZ |
396 | /* put in the call to the function */ |
397 | mutex_lock(&text_mutex); | |
398 | call_offset -= start_offset; | |
399 | memcpy(trampoline + call_offset, | |
400 | text_gen_insn(CALL_INSN_OPCODE, | |
401 | trampoline + call_offset, | |
402 | ftrace_ops_get_func(ops)), CALL_INSN_SIZE); | |
403 | mutex_unlock(&text_mutex); | |
404 | ||
f3bea491 SRRH |
405 | /* ALLOC_TRAMP flags lets us know we created it */ |
406 | ops->flags |= FTRACE_OPS_FL_ALLOC_TRAMP; | |
407 | ||
7fdfe1e4 RE |
408 | set_vm_flush_reset_perms(trampoline); |
409 | ||
59566b0b SRV |
410 | if (likely(system_state != SYSTEM_BOOTING)) |
411 | set_memory_ro((unsigned long)trampoline, npages); | |
3c0dab44 | 412 | set_memory_x((unsigned long)trampoline, npages); |
f3bea491 | 413 | return (unsigned long)trampoline; |
d2a68c4e | 414 | fail: |
7fdfe1e4 | 415 | tramp_free(trampoline); |
d2a68c4e | 416 | return 0; |
f3bea491 SRRH |
417 | } |
418 | ||
59566b0b SRV |
419 | void set_ftrace_ops_ro(void) |
420 | { | |
421 | struct ftrace_ops *ops; | |
422 | unsigned long start_offset; | |
423 | unsigned long end_offset; | |
424 | unsigned long npages; | |
425 | unsigned long size; | |
426 | ||
427 | do_for_each_ftrace_op(ops, ftrace_ops_list) { | |
428 | if (!(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP)) | |
429 | continue; | |
430 | ||
431 | if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) { | |
432 | start_offset = (unsigned long)ftrace_regs_caller; | |
433 | end_offset = (unsigned long)ftrace_regs_caller_end; | |
434 | } else { | |
435 | start_offset = (unsigned long)ftrace_caller; | |
436 | end_offset = (unsigned long)ftrace_epilogue; | |
437 | } | |
438 | size = end_offset - start_offset; | |
439 | size = size + RET_SIZE + sizeof(void *); | |
440 | npages = DIV_ROUND_UP(size, PAGE_SIZE); | |
441 | set_memory_ro((unsigned long)ops->trampoline, npages); | |
442 | } while_for_each_ftrace_op(ops); | |
443 | } | |
444 | ||
15d5b02c SRRH |
445 | static unsigned long calc_trampoline_call_offset(bool save_regs) |
446 | { | |
447 | unsigned long start_offset; | |
448 | unsigned long call_offset; | |
449 | ||
450 | if (save_regs) { | |
451 | start_offset = (unsigned long)ftrace_regs_caller; | |
452 | call_offset = (unsigned long)ftrace_regs_call; | |
453 | } else { | |
454 | start_offset = (unsigned long)ftrace_caller; | |
455 | call_offset = (unsigned long)ftrace_call; | |
456 | } | |
457 | ||
458 | return call_offset - start_offset; | |
459 | } | |
460 | ||
f3bea491 SRRH |
461 | void arch_ftrace_update_trampoline(struct ftrace_ops *ops) |
462 | { | |
463 | ftrace_func_t func; | |
f3bea491 SRRH |
464 | unsigned long offset; |
465 | unsigned long ip; | |
aec0be2d | 466 | unsigned int size; |
768ae440 | 467 | const char *new; |
f3bea491 | 468 | |
768ae440 | 469 | if (!ops->trampoline) { |
aec0be2d | 470 | ops->trampoline = create_trampoline(ops, &size); |
f3bea491 SRRH |
471 | if (!ops->trampoline) |
472 | return; | |
aec0be2d | 473 | ops->trampoline_size = size; |
768ae440 | 474 | return; |
f3bea491 SRRH |
475 | } |
476 | ||
768ae440 PZ |
477 | /* |
478 | * The ftrace_ops caller may set up its own trampoline. | |
479 | * In such a case, this code must not modify it. | |
480 | */ | |
481 | if (!(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP)) | |
482 | return; | |
483 | ||
15d5b02c | 484 | offset = calc_trampoline_call_offset(ops->flags & FTRACE_OPS_FL_SAVE_REGS); |
f3bea491 | 485 | ip = ops->trampoline + offset; |
f3bea491 SRRH |
486 | func = ftrace_ops_get_func(ops); |
487 | ||
768ae440 | 488 | mutex_lock(&text_mutex); |
f3bea491 SRRH |
489 | /* Do a safe modify in case the trampoline is executing */ |
490 | new = ftrace_call_replace(ip, (unsigned long)func); | |
768ae440 PZ |
491 | text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL); |
492 | mutex_unlock(&text_mutex); | |
f3bea491 | 493 | } |
15d5b02c SRRH |
494 | |
495 | /* Return the address of the function the trampoline calls */ | |
496 | static void *addr_from_call(void *ptr) | |
497 | { | |
67c1d4a2 | 498 | union text_poke_insn call; |
15d5b02c SRRH |
499 | int ret; |
500 | ||
67c1d4a2 | 501 | ret = probe_kernel_read(&call, ptr, CALL_INSN_SIZE); |
15d5b02c SRRH |
502 | if (WARN_ON_ONCE(ret < 0)) |
503 | return NULL; | |
504 | ||
505 | /* Make sure this is a call */ | |
67c1d4a2 PZ |
506 | if (WARN_ON_ONCE(call.opcode != CALL_INSN_OPCODE)) { |
507 | pr_warn("Expected E8, got %x\n", call.opcode); | |
15d5b02c SRRH |
508 | return NULL; |
509 | } | |
510 | ||
67c1d4a2 | 511 | return ptr + CALL_INSN_SIZE + call.disp; |
15d5b02c SRRH |
512 | } |
513 | ||
6a06bdbf | 514 | void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent, |
15d5b02c SRRH |
515 | unsigned long frame_pointer); |
516 | ||
517 | /* | |
518 | * If the ops->trampoline was not allocated, then it probably | |
519 | * has a static trampoline func, or is the ftrace caller itself. | |
520 | */ | |
521 | static void *static_tramp_func(struct ftrace_ops *ops, struct dyn_ftrace *rec) | |
522 | { | |
523 | unsigned long offset; | |
524 | bool save_regs = rec->flags & FTRACE_FL_REGS_EN; | |
525 | void *ptr; | |
526 | ||
527 | if (ops && ops->trampoline) { | |
528 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | |
529 | /* | |
530 | * We only know about function graph tracer setting as static | |
531 | * trampoline. | |
532 | */ | |
533 | if (ops->trampoline == FTRACE_GRAPH_ADDR) | |
534 | return (void *)prepare_ftrace_return; | |
535 | #endif | |
536 | return NULL; | |
537 | } | |
538 | ||
539 | offset = calc_trampoline_call_offset(save_regs); | |
540 | ||
541 | if (save_regs) | |
542 | ptr = (void *)FTRACE_REGS_ADDR + offset; | |
543 | else | |
544 | ptr = (void *)FTRACE_ADDR + offset; | |
545 | ||
546 | return addr_from_call(ptr); | |
547 | } | |
548 | ||
549 | void *arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec) | |
550 | { | |
551 | unsigned long offset; | |
552 | ||
553 | /* If we didn't allocate this trampoline, consider it static */ | |
554 | if (!ops || !(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP)) | |
555 | return static_tramp_func(ops, rec); | |
556 | ||
557 | offset = calc_trampoline_call_offset(ops->flags & FTRACE_OPS_FL_SAVE_REGS); | |
558 | return addr_from_call((void *)ops->trampoline + offset); | |
559 | } | |
560 | ||
12cce594 SRRH |
561 | void arch_ftrace_trampoline_free(struct ftrace_ops *ops) |
562 | { | |
563 | if (!ops || !(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP)) | |
564 | return; | |
565 | ||
7fdfe1e4 | 566 | tramp_free((void *)ops->trampoline); |
12cce594 SRRH |
567 | ops->trampoline = 0; |
568 | } | |
15d5b02c | 569 | |
f3bea491 SRRH |
570 | #endif /* CONFIG_X86_64 */ |
571 | #endif /* CONFIG_DYNAMIC_FTRACE */ | |
572 | ||
573 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | |
574 | ||
575 | #ifdef CONFIG_DYNAMIC_FTRACE | |
576 | extern void ftrace_graph_call(void); | |
5a45cfe1 | 577 | |
768ae440 | 578 | static const char *ftrace_jmp_replace(unsigned long ip, unsigned long addr) |
745cfeaa | 579 | { |
67c1d4a2 | 580 | return text_gen_insn(JMP32_INSN_OPCODE, (void *)ip, (void *)addr); |
745cfeaa SRV |
581 | } |
582 | ||
87fbb2ac SRRH |
583 | static int ftrace_mod_jmp(unsigned long ip, void *func) |
584 | { | |
768ae440 | 585 | const char *new; |
5a45cfe1 | 586 | |
87fbb2ac | 587 | new = ftrace_jmp_replace(ip, (unsigned long)func); |
768ae440 PZ |
588 | text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL); |
589 | return 0; | |
5a45cfe1 SR |
590 | } |
591 | ||
592 | int ftrace_enable_ftrace_graph_caller(void) | |
593 | { | |
594 | unsigned long ip = (unsigned long)(&ftrace_graph_call); | |
5a45cfe1 | 595 | |
87fbb2ac | 596 | return ftrace_mod_jmp(ip, &ftrace_graph_caller); |
5a45cfe1 SR |
597 | } |
598 | ||
599 | int ftrace_disable_ftrace_graph_caller(void) | |
600 | { | |
601 | unsigned long ip = (unsigned long)(&ftrace_graph_call); | |
5a45cfe1 | 602 | |
87fbb2ac | 603 | return ftrace_mod_jmp(ip, &ftrace_stub); |
5a45cfe1 SR |
604 | } |
605 | ||
e7d3737e FW |
606 | #endif /* !CONFIG_DYNAMIC_FTRACE */ |
607 | ||
e7d3737e FW |
608 | /* |
609 | * Hook the return address and push it in the stack of return addrs | |
610 | * in current thread info. | |
611 | */ | |
6a06bdbf | 612 | void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent, |
71e308a2 | 613 | unsigned long frame_pointer) |
e7d3737e | 614 | { |
768ae440 | 615 | unsigned long return_hooker = (unsigned long)&return_to_handler; |
e7d3737e | 616 | unsigned long old; |
e7d3737e | 617 | int faulted; |
e7d3737e | 618 | |
34a477e5 JP |
619 | /* |
620 | * When resuming from suspend-to-ram, this function can be indirectly | |
621 | * called from early CPU startup code while the CPU is in real mode, | |
622 | * which would fail miserably. Make sure the stack pointer is a | |
623 | * virtual address. | |
624 | * | |
625 | * This check isn't as accurate as virt_addr_valid(), but it should be | |
626 | * good enough for this purpose, and it's fast. | |
627 | */ | |
628 | if (unlikely((long)__builtin_frame_address(0) >= 0)) | |
629 | return; | |
630 | ||
84b2bc7f SRRH |
631 | if (unlikely(ftrace_graph_is_dead())) |
632 | return; | |
633 | ||
380c4b14 | 634 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) |
e7d3737e FW |
635 | return; |
636 | ||
637 | /* | |
638 | * Protect against fault, even if it shouldn't | |
639 | * happen. This tool is too much intrusive to | |
640 | * ignore such a protection. | |
641 | */ | |
642 | asm volatile( | |
96665788 SR |
643 | "1: " _ASM_MOV " (%[parent]), %[old]\n" |
644 | "2: " _ASM_MOV " %[return_hooker], (%[parent])\n" | |
e7d3737e | 645 | " movl $0, %[faulted]\n" |
e3944bfa | 646 | "3:\n" |
e7d3737e FW |
647 | |
648 | ".section .fixup, \"ax\"\n" | |
e3944bfa SR |
649 | "4: movl $1, %[faulted]\n" |
650 | " jmp 3b\n" | |
e7d3737e FW |
651 | ".previous\n" |
652 | ||
e3944bfa SR |
653 | _ASM_EXTABLE(1b, 4b) |
654 | _ASM_EXTABLE(2b, 4b) | |
e7d3737e | 655 | |
aa512a27 | 656 | : [old] "=&r" (old), [faulted] "=r" (faulted) |
96665788 | 657 | : [parent] "r" (parent), [return_hooker] "r" (return_hooker) |
e7d3737e FW |
658 | : "memory" |
659 | ); | |
660 | ||
14a866c5 SR |
661 | if (unlikely(faulted)) { |
662 | ftrace_graph_stop(); | |
663 | WARN_ON(1); | |
e7d3737e FW |
664 | return; |
665 | } | |
666 | ||
07f7175b | 667 | if (function_graph_enter(old, self_addr, frame_pointer, parent)) |
e49dc19c | 668 | *parent = old; |
e7d3737e | 669 | } |
fb52607a | 670 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |