1 // SPDX-License-Identifier: GPL-2.0
3 * Dynamic function tracing support.
5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
7 * Thanks goes to Ingo Molnar, for suggesting the idea.
8 * Mathieu Desnoyers, for suggesting postponing the modifications.
9 * Arjan van de Ven, for keeping me straight, and explaining to me
10 * the dangers of modifying code on the run.
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/spinlock.h>
16 #include <linux/hardirq.h>
17 #include <linux/uaccess.h>
18 #include <linux/ftrace.h>
19 #include <linux/percpu.h>
20 #include <linux/sched.h>
21 #include <linux/slab.h>
22 #include <linux/init.h>
23 #include <linux/list.h>
24 #include <linux/module.h>
25 #include <linux/memory.h>
26 #include <linux/vmalloc.h>
27 #include <linux/set_memory.h>
29 #include <trace/syscall.h>
31 #include <asm/kprobes.h>
32 #include <asm/ftrace.h>
34 #include <asm/text-patching.h>
36 #ifdef CONFIG_DYNAMIC_FTRACE
38 static int ftrace_poke_late
= 0;
40 void ftrace_arch_code_modify_prepare(void)
41 __acquires(&text_mutex
)
44 * Need to grab text_mutex to prevent a race from module loading
45 * and live kernel patching from changing the text permissions while
46 * ftrace has it set to "read/write".
48 mutex_lock(&text_mutex
);
52 void ftrace_arch_code_modify_post_process(void)
53 __releases(&text_mutex
)
56 * ftrace_make_{call,nop}() may be called during
57 * module load, and we need to finish the text_poke_queue()
62 mutex_unlock(&text_mutex
);
65 static const char *ftrace_nop_replace(void)
70 static const char *ftrace_call_replace(unsigned long ip
, unsigned long addr
)
73 * No need to translate into a callthunk. The trampoline does
74 * the depth accounting itself.
76 return text_gen_insn(CALL_INSN_OPCODE
, (void *)ip
, (void *)addr
);
79 static int ftrace_verify_code(unsigned long ip
, const char *old_code
)
81 char cur_code
[MCOUNT_INSN_SIZE
];
85 * We are paranoid about modifying text, as if a bug was to happen, it
86 * could cause us to read or write to someplace that could cause harm.
87 * Carefully read and modify the code with probe_kernel_*(), and make
88 * sure what we read is what we expected it to be before modifying it.
90 /* read the text we want to modify */
91 if (copy_from_kernel_nofault(cur_code
, (void *)ip
, MCOUNT_INSN_SIZE
)) {
96 /* Make sure it is what we expect it to be */
97 if (memcmp(cur_code
, old_code
, MCOUNT_INSN_SIZE
) != 0) {
98 ftrace_expected
= old_code
;
107 * Marked __ref because it calls text_poke_early() which is .init.text. That is
108 * ok because that call will happen early, during boot, when .init sections are
112 ftrace_modify_code_direct(unsigned long ip
, const char *old_code
,
113 const char *new_code
)
115 int ret
= ftrace_verify_code(ip
, old_code
);
119 /* replace the text with the new text */
120 if (ftrace_poke_late
)
121 text_poke_queue((void *)ip
, new_code
, MCOUNT_INSN_SIZE
, NULL
);
123 text_poke_early((void *)ip
, new_code
, MCOUNT_INSN_SIZE
);
127 int ftrace_make_nop(struct module
*mod
, struct dyn_ftrace
*rec
, unsigned long addr
)
129 unsigned long ip
= rec
->ip
;
130 const char *new, *old
;
132 old
= ftrace_call_replace(ip
, addr
);
133 new = ftrace_nop_replace();
136 * On boot up, and when modules are loaded, the MCOUNT_ADDR
137 * is converted to a nop, and will never become MCOUNT_ADDR
138 * again. This code is either running before SMP (on boot up)
139 * or before the code will ever be executed (module load).
140 * We do not want to use the breakpoint version in this case,
141 * just modify the code directly.
143 if (addr
== MCOUNT_ADDR
)
144 return ftrace_modify_code_direct(ip
, old
, new);
147 * x86 overrides ftrace_replace_code -- this function will never be used
150 WARN_ONCE(1, "invalid use of ftrace_make_nop");
154 int ftrace_make_call(struct dyn_ftrace
*rec
, unsigned long addr
)
156 unsigned long ip
= rec
->ip
;
157 const char *new, *old
;
159 old
= ftrace_nop_replace();
160 new = ftrace_call_replace(ip
, addr
);
162 /* Should only be called when module is loaded */
163 return ftrace_modify_code_direct(rec
->ip
, old
, new);
167 * Should never be called:
168 * As it is only called by __ftrace_replace_code() which is called by
169 * ftrace_replace_code() that x86 overrides, and by ftrace_update_code()
170 * which is called to turn mcount into nops or nops into function calls
171 * but not to convert a function from not using regs to one that uses
172 * regs, which ftrace_modify_call() is for.
174 int ftrace_modify_call(struct dyn_ftrace
*rec
, unsigned long old_addr
,
181 int ftrace_update_ftrace_func(ftrace_func_t func
)
186 ip
= (unsigned long)(&ftrace_call
);
187 new = ftrace_call_replace(ip
, (unsigned long)func
);
188 text_poke_bp((void *)ip
, new, MCOUNT_INSN_SIZE
, NULL
);
190 ip
= (unsigned long)(&ftrace_regs_call
);
191 new = ftrace_call_replace(ip
, (unsigned long)func
);
192 text_poke_bp((void *)ip
, new, MCOUNT_INSN_SIZE
, NULL
);
197 void ftrace_replace_code(int enable
)
199 struct ftrace_rec_iter
*iter
;
200 struct dyn_ftrace
*rec
;
201 const char *new, *old
;
204 for_ftrace_rec_iter(iter
) {
205 rec
= ftrace_rec_iter_record(iter
);
207 switch (ftrace_test_record(rec
, enable
)) {
208 case FTRACE_UPDATE_IGNORE
:
212 case FTRACE_UPDATE_MAKE_CALL
:
213 old
= ftrace_nop_replace();
216 case FTRACE_UPDATE_MODIFY_CALL
:
217 case FTRACE_UPDATE_MAKE_NOP
:
218 old
= ftrace_call_replace(rec
->ip
, ftrace_get_addr_curr(rec
));
222 ret
= ftrace_verify_code(rec
->ip
, old
);
224 ftrace_expected
= old
;
225 ftrace_bug(ret
, rec
);
226 ftrace_expected
= NULL
;
231 for_ftrace_rec_iter(iter
) {
232 rec
= ftrace_rec_iter_record(iter
);
234 switch (ftrace_test_record(rec
, enable
)) {
235 case FTRACE_UPDATE_IGNORE
:
239 case FTRACE_UPDATE_MAKE_CALL
:
240 case FTRACE_UPDATE_MODIFY_CALL
:
241 new = ftrace_call_replace(rec
->ip
, ftrace_get_addr_new(rec
));
244 case FTRACE_UPDATE_MAKE_NOP
:
245 new = ftrace_nop_replace();
249 text_poke_queue((void *)rec
->ip
, new, MCOUNT_INSN_SIZE
, NULL
);
250 ftrace_update_record(rec
, enable
);
255 void arch_ftrace_update_code(int command
)
257 ftrace_modify_all_code(command
);
260 /* Currently only x86_64 supports dynamic trampolines */
263 #ifdef CONFIG_MODULES
264 #include <linux/moduleloader.h>
265 /* Module allocation simplifies allocating memory for code */
266 static inline void *alloc_tramp(unsigned long size
)
268 return module_alloc(size
);
270 static inline void tramp_free(void *tramp
)
272 module_memfree(tramp
);
275 /* Trampolines can only be created if modules are supported */
276 static inline void *alloc_tramp(unsigned long size
)
280 static inline void tramp_free(void *tramp
) { }
283 /* Defined as markers to the end of the ftrace default trampolines */
284 extern void ftrace_regs_caller_end(void);
285 extern void ftrace_caller_end(void);
286 extern void ftrace_caller_op_ptr(void);
287 extern void ftrace_regs_caller_op_ptr(void);
288 extern void ftrace_regs_caller_jmp(void);
290 /* movq function_trace_op(%rip), %rdx */
291 /* 0x48 0x8b 0x15 <offset-to-ftrace_trace_op (4 bytes)> */
292 #define OP_REF_SIZE 7
295 * The ftrace_ops is passed to the function callback. Since the
296 * trampoline only services a single ftrace_ops, we can pass in
299 * The ftrace_op_code_union is used to create a pointer to the
300 * ftrace_ops that will be passed to the callback function.
302 union ftrace_op_code_union
{
303 char code
[OP_REF_SIZE
];
307 } __attribute__((packed
));
310 #define RET_SIZE (IS_ENABLED(CONFIG_RETPOLINE) ? 5 : 1 + IS_ENABLED(CONFIG_SLS))
313 create_trampoline(struct ftrace_ops
*ops
, unsigned int *tramp_size
)
315 unsigned long start_offset
;
316 unsigned long end_offset
;
317 unsigned long op_offset
;
318 unsigned long call_offset
;
319 unsigned long jmp_offset
;
320 unsigned long offset
;
321 unsigned long npages
;
326 /* 48 8b 15 <offset> is movq <offset>(%rip), %rdx */
327 unsigned const char op_ref
[] = { 0x48, 0x8b, 0x15 };
328 unsigned const char retq
[] = { RET_INSN_OPCODE
, INT3_INSN_OPCODE
};
329 union ftrace_op_code_union op_ptr
;
332 if (ops
->flags
& FTRACE_OPS_FL_SAVE_REGS
) {
333 start_offset
= (unsigned long)ftrace_regs_caller
;
334 end_offset
= (unsigned long)ftrace_regs_caller_end
;
335 op_offset
= (unsigned long)ftrace_regs_caller_op_ptr
;
336 call_offset
= (unsigned long)ftrace_regs_call
;
337 jmp_offset
= (unsigned long)ftrace_regs_caller_jmp
;
339 start_offset
= (unsigned long)ftrace_caller
;
340 end_offset
= (unsigned long)ftrace_caller_end
;
341 op_offset
= (unsigned long)ftrace_caller_op_ptr
;
342 call_offset
= (unsigned long)ftrace_call
;
346 size
= end_offset
- start_offset
;
349 * Allocate enough size to store the ftrace_caller code,
350 * the iret , as well as the address of the ftrace_ops this
351 * trampoline is used for.
353 trampoline
= alloc_tramp(size
+ RET_SIZE
+ sizeof(void *));
357 *tramp_size
= size
+ RET_SIZE
+ sizeof(void *);
358 npages
= DIV_ROUND_UP(*tramp_size
, PAGE_SIZE
);
360 /* Copy ftrace_caller onto the trampoline memory */
361 ret
= copy_from_kernel_nofault(trampoline
, (void *)start_offset
, size
);
362 if (WARN_ON(ret
< 0))
365 ip
= trampoline
+ size
;
366 if (cpu_feature_enabled(X86_FEATURE_RETHUNK
))
367 __text_gen_insn(ip
, JMP32_INSN_OPCODE
, ip
, x86_return_thunk
, JMP32_INSN_SIZE
);
369 memcpy(ip
, retq
, sizeof(retq
));
371 /* No need to test direct calls on created trampolines */
372 if (ops
->flags
& FTRACE_OPS_FL_SAVE_REGS
) {
373 /* NOP the jnz 1f; but make sure it's a 2 byte jnz */
374 ip
= trampoline
+ (jmp_offset
- start_offset
);
375 if (WARN_ON(*(char *)ip
!= 0x75))
377 ret
= copy_from_kernel_nofault(ip
, x86_nops
[2], 2);
383 * The address of the ftrace_ops that is used for this trampoline
384 * is stored at the end of the trampoline. This will be used to
385 * load the third parameter for the callback. Basically, that
386 * location at the end of the trampoline takes the place of
387 * the global function_trace_op variable.
390 ptr
= (unsigned long *)(trampoline
+ size
+ RET_SIZE
);
391 *ptr
= (unsigned long)ops
;
393 op_offset
-= start_offset
;
394 memcpy(&op_ptr
, trampoline
+ op_offset
, OP_REF_SIZE
);
396 /* Are we pointing to the reference? */
397 if (WARN_ON(memcmp(op_ptr
.op
, op_ref
, 3) != 0))
400 /* Load the contents of ptr into the callback parameter */
401 offset
= (unsigned long)ptr
;
402 offset
-= (unsigned long)trampoline
+ op_offset
+ OP_REF_SIZE
;
404 op_ptr
.offset
= offset
;
406 /* put in the new offset to the ftrace_ops */
407 memcpy(trampoline
+ op_offset
, &op_ptr
, OP_REF_SIZE
);
409 /* put in the call to the function */
410 mutex_lock(&text_mutex
);
411 call_offset
-= start_offset
;
413 * No need to translate into a callthunk. The trampoline does
414 * the depth accounting before the call already.
416 dest
= ftrace_ops_get_func(ops
);
417 memcpy(trampoline
+ call_offset
,
418 text_gen_insn(CALL_INSN_OPCODE
, trampoline
+ call_offset
, dest
),
420 mutex_unlock(&text_mutex
);
422 /* ALLOC_TRAMP flags lets us know we created it */
423 ops
->flags
|= FTRACE_OPS_FL_ALLOC_TRAMP
;
425 set_memory_rox((unsigned long)trampoline
, npages
);
426 return (unsigned long)trampoline
;
428 tramp_free(trampoline
);
432 void set_ftrace_ops_ro(void)
434 struct ftrace_ops
*ops
;
435 unsigned long start_offset
;
436 unsigned long end_offset
;
437 unsigned long npages
;
440 do_for_each_ftrace_op(ops
, ftrace_ops_list
) {
441 if (!(ops
->flags
& FTRACE_OPS_FL_ALLOC_TRAMP
))
444 if (ops
->flags
& FTRACE_OPS_FL_SAVE_REGS
) {
445 start_offset
= (unsigned long)ftrace_regs_caller
;
446 end_offset
= (unsigned long)ftrace_regs_caller_end
;
448 start_offset
= (unsigned long)ftrace_caller
;
449 end_offset
= (unsigned long)ftrace_caller_end
;
451 size
= end_offset
- start_offset
;
452 size
= size
+ RET_SIZE
+ sizeof(void *);
453 npages
= DIV_ROUND_UP(size
, PAGE_SIZE
);
454 set_memory_ro((unsigned long)ops
->trampoline
, npages
);
455 } while_for_each_ftrace_op(ops
);
458 static unsigned long calc_trampoline_call_offset(bool save_regs
)
460 unsigned long start_offset
;
461 unsigned long call_offset
;
464 start_offset
= (unsigned long)ftrace_regs_caller
;
465 call_offset
= (unsigned long)ftrace_regs_call
;
467 start_offset
= (unsigned long)ftrace_caller
;
468 call_offset
= (unsigned long)ftrace_call
;
471 return call_offset
- start_offset
;
474 void arch_ftrace_update_trampoline(struct ftrace_ops
*ops
)
477 unsigned long offset
;
482 if (!ops
->trampoline
) {
483 ops
->trampoline
= create_trampoline(ops
, &size
);
484 if (!ops
->trampoline
)
486 ops
->trampoline_size
= size
;
491 * The ftrace_ops caller may set up its own trampoline.
492 * In such a case, this code must not modify it.
494 if (!(ops
->flags
& FTRACE_OPS_FL_ALLOC_TRAMP
))
497 offset
= calc_trampoline_call_offset(ops
->flags
& FTRACE_OPS_FL_SAVE_REGS
);
498 ip
= ops
->trampoline
+ offset
;
499 func
= ftrace_ops_get_func(ops
);
501 mutex_lock(&text_mutex
);
502 /* Do a safe modify in case the trampoline is executing */
503 new = ftrace_call_replace(ip
, (unsigned long)func
);
504 text_poke_bp((void *)ip
, new, MCOUNT_INSN_SIZE
, NULL
);
505 mutex_unlock(&text_mutex
);
508 /* Return the address of the function the trampoline calls */
509 static void *addr_from_call(void *ptr
)
511 union text_poke_insn call
;
514 ret
= copy_from_kernel_nofault(&call
, ptr
, CALL_INSN_SIZE
);
515 if (WARN_ON_ONCE(ret
< 0))
518 /* Make sure this is a call */
519 if (WARN_ON_ONCE(call
.opcode
!= CALL_INSN_OPCODE
)) {
520 pr_warn("Expected E8, got %x\n", call
.opcode
);
524 return ptr
+ CALL_INSN_SIZE
+ call
.disp
;
528 * If the ops->trampoline was not allocated, then it probably
529 * has a static trampoline func, or is the ftrace caller itself.
531 static void *static_tramp_func(struct ftrace_ops
*ops
, struct dyn_ftrace
*rec
)
533 unsigned long offset
;
534 bool save_regs
= rec
->flags
& FTRACE_FL_REGS_EN
;
537 if (ops
&& ops
->trampoline
) {
538 #if !defined(CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS) && \
539 defined(CONFIG_FUNCTION_GRAPH_TRACER)
541 * We only know about function graph tracer setting as static
544 if (ops
->trampoline
== FTRACE_GRAPH_ADDR
)
545 return (void *)prepare_ftrace_return
;
550 offset
= calc_trampoline_call_offset(save_regs
);
553 ptr
= (void *)FTRACE_REGS_ADDR
+ offset
;
555 ptr
= (void *)FTRACE_ADDR
+ offset
;
557 return addr_from_call(ptr
);
560 void *arch_ftrace_trampoline_func(struct ftrace_ops
*ops
, struct dyn_ftrace
*rec
)
562 unsigned long offset
;
564 /* If we didn't allocate this trampoline, consider it static */
565 if (!ops
|| !(ops
->flags
& FTRACE_OPS_FL_ALLOC_TRAMP
))
566 return static_tramp_func(ops
, rec
);
568 offset
= calc_trampoline_call_offset(ops
->flags
& FTRACE_OPS_FL_SAVE_REGS
);
569 return addr_from_call((void *)ops
->trampoline
+ offset
);
572 void arch_ftrace_trampoline_free(struct ftrace_ops
*ops
)
574 if (!ops
|| !(ops
->flags
& FTRACE_OPS_FL_ALLOC_TRAMP
))
577 tramp_free((void *)ops
->trampoline
);
581 #endif /* CONFIG_X86_64 */
582 #endif /* CONFIG_DYNAMIC_FTRACE */
584 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
586 #if defined(CONFIG_DYNAMIC_FTRACE) && !defined(CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS)
587 extern void ftrace_graph_call(void);
588 static const char *ftrace_jmp_replace(unsigned long ip
, unsigned long addr
)
590 return text_gen_insn(JMP32_INSN_OPCODE
, (void *)ip
, (void *)addr
);
593 static int ftrace_mod_jmp(unsigned long ip
, void *func
)
597 new = ftrace_jmp_replace(ip
, (unsigned long)func
);
598 text_poke_bp((void *)ip
, new, MCOUNT_INSN_SIZE
, NULL
);
602 int ftrace_enable_ftrace_graph_caller(void)
604 unsigned long ip
= (unsigned long)(&ftrace_graph_call
);
606 return ftrace_mod_jmp(ip
, &ftrace_graph_caller
);
609 int ftrace_disable_ftrace_graph_caller(void)
611 unsigned long ip
= (unsigned long)(&ftrace_graph_call
);
613 return ftrace_mod_jmp(ip
, &ftrace_stub
);
615 #endif /* CONFIG_DYNAMIC_FTRACE && !CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */
618 * Hook the return address and push it in the stack of return addrs
619 * in current thread info.
621 void prepare_ftrace_return(unsigned long ip
, unsigned long *parent
,
622 unsigned long frame_pointer
)
624 unsigned long return_hooker
= (unsigned long)&return_to_handler
;
628 * When resuming from suspend-to-ram, this function can be indirectly
629 * called from early CPU startup code while the CPU is in real mode,
630 * which would fail miserably. Make sure the stack pointer is a
633 * This check isn't as accurate as virt_addr_valid(), but it should be
634 * good enough for this purpose, and it's fast.
636 if (unlikely((long)__builtin_frame_address(0) >= 0))
639 if (unlikely(ftrace_graph_is_dead()))
642 if (unlikely(atomic_read(¤t
->tracing_graph_pause
)))
645 bit
= ftrace_test_recursion_trylock(ip
, *parent
);
649 if (!function_graph_enter(*parent
, ip
, frame_pointer
, parent
))
650 *parent
= return_hooker
;
652 ftrace_test_recursion_unlock(bit
);
655 #ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
656 void ftrace_graph_func(unsigned long ip
, unsigned long parent_ip
,
657 struct ftrace_ops
*op
, struct ftrace_regs
*fregs
)
659 struct pt_regs
*regs
= &fregs
->regs
;
660 unsigned long *stack
= (unsigned long *)kernel_stack_pointer(regs
);
662 prepare_ftrace_return(ip
, (unsigned long *)stack
, 0);
666 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */