]> git.ipfire.org Git - thirdparty/linux.git/blob - include/linux/ftrace.h
Merge tag 'io_uring-5.7-2020-05-22' of git://git.kernel.dk/linux-block
[thirdparty/linux.git] / include / linux / ftrace.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Ftrace header. For implementation details beyond the random comments
4 * scattered below, see: Documentation/trace/ftrace-design.rst
5 */
6
7 #ifndef _LINUX_FTRACE_H
8 #define _LINUX_FTRACE_H
9
10 #include <linux/trace_clock.h>
11 #include <linux/kallsyms.h>
12 #include <linux/linkage.h>
13 #include <linux/bitops.h>
14 #include <linux/ptrace.h>
15 #include <linux/ktime.h>
16 #include <linux/sched.h>
17 #include <linux/types.h>
18 #include <linux/init.h>
19 #include <linux/fs.h>
20
21 #include <asm/ftrace.h>
22
23 /*
24 * If the arch supports passing the variable contents of
25 * function_trace_op as the third parameter back from the
26 * mcount call, then the arch should define this as 1.
27 */
28 #ifndef ARCH_SUPPORTS_FTRACE_OPS
29 #define ARCH_SUPPORTS_FTRACE_OPS 0
30 #endif
31
32 /*
33 * If the arch's mcount caller does not support all of ftrace's
34 * features, then it must call an indirect function that
35 * does. Or at least does enough to prevent any unwelcomed side effects.
36 */
37 #if !ARCH_SUPPORTS_FTRACE_OPS
38 # define FTRACE_FORCE_LIST_FUNC 1
39 #else
40 # define FTRACE_FORCE_LIST_FUNC 0
41 #endif
42
43 /* Main tracing buffer and events set up */
44 #ifdef CONFIG_TRACING
45 void trace_init(void);
46 void early_trace_init(void);
47 #else
48 static inline void trace_init(void) { }
49 static inline void early_trace_init(void) { }
50 #endif
51
52 struct module;
53 struct ftrace_hash;
54 struct ftrace_direct_func;
55
56 #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_MODULES) && \
57 defined(CONFIG_DYNAMIC_FTRACE)
58 const char *
59 ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
60 unsigned long *off, char **modname, char *sym);
61 int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
62 char *type, char *name,
63 char *module_name, int *exported);
64 #else
65 static inline const char *
66 ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
67 unsigned long *off, char **modname, char *sym)
68 {
69 return NULL;
70 }
71 static inline int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
72 char *type, char *name,
73 char *module_name, int *exported)
74 {
75 return -1;
76 }
77 #endif
78
79
80 #ifdef CONFIG_FUNCTION_TRACER
81
82 extern int ftrace_enabled;
83 extern int
84 ftrace_enable_sysctl(struct ctl_table *table, int write,
85 void __user *buffer, size_t *lenp,
86 loff_t *ppos);
87
88 struct ftrace_ops;
89
90 typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
91 struct ftrace_ops *op, struct pt_regs *regs);
92
93 ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
94
95 /*
96 * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are
97 * set in the flags member.
98 * CONTROL, SAVE_REGS, SAVE_REGS_IF_SUPPORTED, RECURSION_SAFE, STUB and
99 * IPMODIFY are a kind of attribute flags which can be set only before
100 * registering the ftrace_ops, and can not be modified while registered.
101 * Changing those attribute flags after registering ftrace_ops will
102 * cause unexpected results.
103 *
104 * ENABLED - set/unset when ftrace_ops is registered/unregistered
105 * DYNAMIC - set when ftrace_ops is registered to denote dynamically
106 * allocated ftrace_ops which need special care
107 * SAVE_REGS - The ftrace_ops wants regs saved at each function called
108 * and passed to the callback. If this flag is set, but the
109 * architecture does not support passing regs
110 * (CONFIG_DYNAMIC_FTRACE_WITH_REGS is not defined), then the
111 * ftrace_ops will fail to register, unless the next flag
112 * is set.
113 * SAVE_REGS_IF_SUPPORTED - This is the same as SAVE_REGS, but if the
114 * handler can handle an arch that does not save regs
115 * (the handler tests if regs == NULL), then it can set
116 * this flag instead. It will not fail registering the ftrace_ops
117 * but, the regs field will be NULL if the arch does not support
118 * passing regs to the handler.
119 * Note, if this flag is set, the SAVE_REGS flag will automatically
120 * get set upon registering the ftrace_ops, if the arch supports it.
121 * RECURSION_SAFE - The ftrace_ops can set this to tell the ftrace infrastructure
122 * that the call back has its own recursion protection. If it does
123 * not set this, then the ftrace infrastructure will add recursion
124 * protection for the caller.
125 * STUB - The ftrace_ops is just a place holder.
126 * INITIALIZED - The ftrace_ops has already been initialized (first use time
127 * register_ftrace_function() is called, it will initialized the ops)
128 * DELETED - The ops are being deleted, do not let them be registered again.
129 * ADDING - The ops is in the process of being added.
130 * REMOVING - The ops is in the process of being removed.
131 * MODIFYING - The ops is in the process of changing its filter functions.
132 * ALLOC_TRAMP - A dynamic trampoline was allocated by the core code.
133 * The arch specific code sets this flag when it allocated a
134 * trampoline. This lets the arch know that it can update the
135 * trampoline in case the callback function changes.
136 * The ftrace_ops trampoline can be set by the ftrace users, and
137 * in such cases the arch must not modify it. Only the arch ftrace
138 * core code should set this flag.
139 * IPMODIFY - The ops can modify the IP register. This can only be set with
140 * SAVE_REGS. If another ops with this flag set is already registered
141 * for any of the functions that this ops will be registered for, then
142 * this ops will fail to register or set_filter_ip.
143 * PID - Is affected by set_ftrace_pid (allows filtering on those pids)
144 * RCU - Set when the ops can only be called when RCU is watching.
145 * TRACE_ARRAY - The ops->private points to a trace_array descriptor.
146 * PERMANENT - Set when the ops is permanent and should not be affected by
147 * ftrace_enabled.
148 * DIRECT - Used by the direct ftrace_ops helper for direct functions
149 * (internal ftrace only, should not be used by others)
150 */
151 enum {
152 FTRACE_OPS_FL_ENABLED = BIT(0),
153 FTRACE_OPS_FL_DYNAMIC = BIT(1),
154 FTRACE_OPS_FL_SAVE_REGS = BIT(2),
155 FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = BIT(3),
156 FTRACE_OPS_FL_RECURSION_SAFE = BIT(4),
157 FTRACE_OPS_FL_STUB = BIT(5),
158 FTRACE_OPS_FL_INITIALIZED = BIT(6),
159 FTRACE_OPS_FL_DELETED = BIT(7),
160 FTRACE_OPS_FL_ADDING = BIT(8),
161 FTRACE_OPS_FL_REMOVING = BIT(9),
162 FTRACE_OPS_FL_MODIFYING = BIT(10),
163 FTRACE_OPS_FL_ALLOC_TRAMP = BIT(11),
164 FTRACE_OPS_FL_IPMODIFY = BIT(12),
165 FTRACE_OPS_FL_PID = BIT(13),
166 FTRACE_OPS_FL_RCU = BIT(14),
167 FTRACE_OPS_FL_TRACE_ARRAY = BIT(15),
168 FTRACE_OPS_FL_PERMANENT = BIT(16),
169 FTRACE_OPS_FL_DIRECT = BIT(17),
170 };
171
172 #ifdef CONFIG_DYNAMIC_FTRACE
173 /* The hash used to know what functions callbacks trace */
174 struct ftrace_ops_hash {
175 struct ftrace_hash __rcu *notrace_hash;
176 struct ftrace_hash __rcu *filter_hash;
177 struct mutex regex_lock;
178 };
179
180 void ftrace_free_init_mem(void);
181 void ftrace_free_mem(struct module *mod, void *start, void *end);
182 #else
183 static inline void ftrace_free_init_mem(void) { }
184 static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { }
185 #endif
186
187 /*
188 * Note, ftrace_ops can be referenced outside of RCU protection, unless
189 * the RCU flag is set. If ftrace_ops is allocated and not part of kernel
190 * core data, the unregistering of it will perform a scheduling on all CPUs
191 * to make sure that there are no more users. Depending on the load of the
192 * system that may take a bit of time.
193 *
194 * Any private data added must also take care not to be freed and if private
195 * data is added to a ftrace_ops that is in core code, the user of the
196 * ftrace_ops must perform a schedule_on_each_cpu() before freeing it.
197 */
198 struct ftrace_ops {
199 ftrace_func_t func;
200 struct ftrace_ops __rcu *next;
201 unsigned long flags;
202 void *private;
203 ftrace_func_t saved_func;
204 #ifdef CONFIG_DYNAMIC_FTRACE
205 struct ftrace_ops_hash local_hash;
206 struct ftrace_ops_hash *func_hash;
207 struct ftrace_ops_hash old_hash;
208 unsigned long trampoline;
209 unsigned long trampoline_size;
210 #endif
211 };
212
213 extern struct ftrace_ops __rcu *ftrace_ops_list;
214 extern struct ftrace_ops ftrace_list_end;
215
216 /*
217 * Traverse the ftrace_global_list, invoking all entries. The reason that we
218 * can use rcu_dereference_raw_check() is that elements removed from this list
219 * are simply leaked, so there is no need to interact with a grace-period
220 * mechanism. The rcu_dereference_raw_check() calls are needed to handle
221 * concurrent insertions into the ftrace_global_list.
222 *
223 * Silly Alpha and silly pointer-speculation compiler optimizations!
224 */
225 #define do_for_each_ftrace_op(op, list) \
226 op = rcu_dereference_raw_check(list); \
227 do
228
229 /*
230 * Optimized for just a single item in the list (as that is the normal case).
231 */
232 #define while_for_each_ftrace_op(op) \
233 while (likely(op = rcu_dereference_raw_check((op)->next)) && \
234 unlikely((op) != &ftrace_list_end))
235
236 /*
237 * Type of the current tracing.
238 */
239 enum ftrace_tracing_type_t {
240 FTRACE_TYPE_ENTER = 0, /* Hook the call of the function */
241 FTRACE_TYPE_RETURN, /* Hook the return of the function */
242 };
243
244 /* Current tracing type, default is FTRACE_TYPE_ENTER */
245 extern enum ftrace_tracing_type_t ftrace_tracing_type;
246
247 /*
248 * The ftrace_ops must be a static and should also
249 * be read_mostly. These functions do modify read_mostly variables
250 * so use them sparely. Never free an ftrace_op or modify the
251 * next pointer after it has been registered. Even after unregistering
252 * it, the next pointer may still be used internally.
253 */
254 int register_ftrace_function(struct ftrace_ops *ops);
255 int unregister_ftrace_function(struct ftrace_ops *ops);
256
257 extern void ftrace_stub(unsigned long a0, unsigned long a1,
258 struct ftrace_ops *op, struct pt_regs *regs);
259
260 #else /* !CONFIG_FUNCTION_TRACER */
261 /*
262 * (un)register_ftrace_function must be a macro since the ops parameter
263 * must not be evaluated.
264 */
265 #define register_ftrace_function(ops) ({ 0; })
266 #define unregister_ftrace_function(ops) ({ 0; })
267 static inline void ftrace_kill(void) { }
268 static inline void ftrace_free_init_mem(void) { }
269 static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { }
270 #endif /* CONFIG_FUNCTION_TRACER */
271
272 struct ftrace_func_entry {
273 struct hlist_node hlist;
274 unsigned long ip;
275 unsigned long direct; /* for direct lookup only */
276 };
277
278 struct dyn_ftrace;
279
280 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
281 extern int ftrace_direct_func_count;
282 int register_ftrace_direct(unsigned long ip, unsigned long addr);
283 int unregister_ftrace_direct(unsigned long ip, unsigned long addr);
284 int modify_ftrace_direct(unsigned long ip, unsigned long old_addr, unsigned long new_addr);
285 struct ftrace_direct_func *ftrace_find_direct_func(unsigned long addr);
286 int ftrace_modify_direct_caller(struct ftrace_func_entry *entry,
287 struct dyn_ftrace *rec,
288 unsigned long old_addr,
289 unsigned long new_addr);
290 unsigned long ftrace_find_rec_direct(unsigned long ip);
291 #else
292 # define ftrace_direct_func_count 0
293 static inline int register_ftrace_direct(unsigned long ip, unsigned long addr)
294 {
295 return -ENOTSUPP;
296 }
297 static inline int unregister_ftrace_direct(unsigned long ip, unsigned long addr)
298 {
299 return -ENOTSUPP;
300 }
301 static inline int modify_ftrace_direct(unsigned long ip,
302 unsigned long old_addr, unsigned long new_addr)
303 {
304 return -ENOTSUPP;
305 }
306 static inline struct ftrace_direct_func *ftrace_find_direct_func(unsigned long addr)
307 {
308 return NULL;
309 }
310 static inline int ftrace_modify_direct_caller(struct ftrace_func_entry *entry,
311 struct dyn_ftrace *rec,
312 unsigned long old_addr,
313 unsigned long new_addr)
314 {
315 return -ENODEV;
316 }
317 static inline unsigned long ftrace_find_rec_direct(unsigned long ip)
318 {
319 return 0;
320 }
321 #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
322
323 #ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
324 /*
325 * This must be implemented by the architecture.
326 * It is the way the ftrace direct_ops helper, when called
327 * via ftrace (because there's other callbacks besides the
328 * direct call), can inform the architecture's trampoline that this
329 * routine has a direct caller, and what the caller is.
330 *
331 * For example, in x86, it returns the direct caller
332 * callback function via the regs->orig_ax parameter.
333 * Then in the ftrace trampoline, if this is set, it makes
334 * the return from the trampoline jump to the direct caller
335 * instead of going back to the function it just traced.
336 */
337 static inline void arch_ftrace_set_direct_caller(struct pt_regs *regs,
338 unsigned long addr) { }
339 #endif /* CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
340
341 #ifdef CONFIG_STACK_TRACER
342
343 extern int stack_tracer_enabled;
344
345 int stack_trace_sysctl(struct ctl_table *table, int write,
346 void __user *buffer, size_t *lenp,
347 loff_t *ppos);
348
349 /* DO NOT MODIFY THIS VARIABLE DIRECTLY! */
350 DECLARE_PER_CPU(int, disable_stack_tracer);
351
352 /**
353 * stack_tracer_disable - temporarily disable the stack tracer
354 *
355 * There's a few locations (namely in RCU) where stack tracing
356 * cannot be executed. This function is used to disable stack
357 * tracing during those critical sections.
358 *
359 * This function must be called with preemption or interrupts
360 * disabled and stack_tracer_enable() must be called shortly after
361 * while preemption or interrupts are still disabled.
362 */
363 static inline void stack_tracer_disable(void)
364 {
365 /* Preemption or interupts must be disabled */
366 if (IS_ENABLED(CONFIG_DEBUG_PREEMPT))
367 WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
368 this_cpu_inc(disable_stack_tracer);
369 }
370
371 /**
372 * stack_tracer_enable - re-enable the stack tracer
373 *
374 * After stack_tracer_disable() is called, stack_tracer_enable()
375 * must be called shortly afterward.
376 */
377 static inline void stack_tracer_enable(void)
378 {
379 if (IS_ENABLED(CONFIG_DEBUG_PREEMPT))
380 WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
381 this_cpu_dec(disable_stack_tracer);
382 }
383 #else
384 static inline void stack_tracer_disable(void) { }
385 static inline void stack_tracer_enable(void) { }
386 #endif
387
388 #ifdef CONFIG_DYNAMIC_FTRACE
389
390 int ftrace_arch_code_modify_prepare(void);
391 int ftrace_arch_code_modify_post_process(void);
392
393 enum ftrace_bug_type {
394 FTRACE_BUG_UNKNOWN,
395 FTRACE_BUG_INIT,
396 FTRACE_BUG_NOP,
397 FTRACE_BUG_CALL,
398 FTRACE_BUG_UPDATE,
399 };
400 extern enum ftrace_bug_type ftrace_bug_type;
401
402 /*
403 * Archs can set this to point to a variable that holds the value that was
404 * expected at the call site before calling ftrace_bug().
405 */
406 extern const void *ftrace_expected;
407
408 void ftrace_bug(int err, struct dyn_ftrace *rec);
409
410 struct seq_file;
411
412 extern int ftrace_text_reserved(const void *start, const void *end);
413
414 struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr);
415
416 bool is_ftrace_trampoline(unsigned long addr);
417
418 /*
419 * The dyn_ftrace record's flags field is split into two parts.
420 * the first part which is '0-FTRACE_REF_MAX' is a counter of
421 * the number of callbacks that have registered the function that
422 * the dyn_ftrace descriptor represents.
423 *
424 * The second part is a mask:
425 * ENABLED - the function is being traced
426 * REGS - the record wants the function to save regs
427 * REGS_EN - the function is set up to save regs.
428 * IPMODIFY - the record allows for the IP address to be changed.
429 * DISABLED - the record is not ready to be touched yet
430 * DIRECT - there is a direct function to call
431 *
432 * When a new ftrace_ops is registered and wants a function to save
433 * pt_regs, the rec->flag REGS is set. When the function has been
434 * set up to save regs, the REG_EN flag is set. Once a function
435 * starts saving regs it will do so until all ftrace_ops are removed
436 * from tracing that function.
437 */
438 enum {
439 FTRACE_FL_ENABLED = (1UL << 31),
440 FTRACE_FL_REGS = (1UL << 30),
441 FTRACE_FL_REGS_EN = (1UL << 29),
442 FTRACE_FL_TRAMP = (1UL << 28),
443 FTRACE_FL_TRAMP_EN = (1UL << 27),
444 FTRACE_FL_IPMODIFY = (1UL << 26),
445 FTRACE_FL_DISABLED = (1UL << 25),
446 FTRACE_FL_DIRECT = (1UL << 24),
447 FTRACE_FL_DIRECT_EN = (1UL << 23),
448 };
449
450 #define FTRACE_REF_MAX_SHIFT 23
451 #define FTRACE_FL_BITS 9
452 #define FTRACE_FL_MASKED_BITS ((1UL << FTRACE_FL_BITS) - 1)
453 #define FTRACE_FL_MASK (FTRACE_FL_MASKED_BITS << FTRACE_REF_MAX_SHIFT)
454 #define FTRACE_REF_MAX ((1UL << FTRACE_REF_MAX_SHIFT) - 1)
455
456 #define ftrace_rec_count(rec) ((rec)->flags & ~FTRACE_FL_MASK)
457
458 struct dyn_ftrace {
459 unsigned long ip; /* address of mcount call-site */
460 unsigned long flags;
461 struct dyn_arch_ftrace arch;
462 };
463
464 int ftrace_force_update(void);
465 int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
466 int remove, int reset);
467 int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
468 int len, int reset);
469 int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
470 int len, int reset);
471 void ftrace_set_global_filter(unsigned char *buf, int len, int reset);
472 void ftrace_set_global_notrace(unsigned char *buf, int len, int reset);
473 void ftrace_free_filter(struct ftrace_ops *ops);
474 void ftrace_ops_set_global_filter(struct ftrace_ops *ops);
475
476 enum {
477 FTRACE_UPDATE_CALLS = (1 << 0),
478 FTRACE_DISABLE_CALLS = (1 << 1),
479 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
480 FTRACE_START_FUNC_RET = (1 << 3),
481 FTRACE_STOP_FUNC_RET = (1 << 4),
482 FTRACE_MAY_SLEEP = (1 << 5),
483 };
484
485 /*
486 * The FTRACE_UPDATE_* enum is used to pass information back
487 * from the ftrace_update_record() and ftrace_test_record()
488 * functions. These are called by the code update routines
489 * to find out what is to be done for a given function.
490 *
491 * IGNORE - The function is already what we want it to be
492 * MAKE_CALL - Start tracing the function
493 * MODIFY_CALL - Stop saving regs for the function
494 * MAKE_NOP - Stop tracing the function
495 */
496 enum {
497 FTRACE_UPDATE_IGNORE,
498 FTRACE_UPDATE_MAKE_CALL,
499 FTRACE_UPDATE_MODIFY_CALL,
500 FTRACE_UPDATE_MAKE_NOP,
501 };
502
503 enum {
504 FTRACE_ITER_FILTER = (1 << 0),
505 FTRACE_ITER_NOTRACE = (1 << 1),
506 FTRACE_ITER_PRINTALL = (1 << 2),
507 FTRACE_ITER_DO_PROBES = (1 << 3),
508 FTRACE_ITER_PROBE = (1 << 4),
509 FTRACE_ITER_MOD = (1 << 5),
510 FTRACE_ITER_ENABLED = (1 << 6),
511 };
512
513 void arch_ftrace_update_code(int command);
514 void arch_ftrace_update_trampoline(struct ftrace_ops *ops);
515 void *arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec);
516 void arch_ftrace_trampoline_free(struct ftrace_ops *ops);
517
518 struct ftrace_rec_iter;
519
520 struct ftrace_rec_iter *ftrace_rec_iter_start(void);
521 struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter);
522 struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter);
523
524 #define for_ftrace_rec_iter(iter) \
525 for (iter = ftrace_rec_iter_start(); \
526 iter; \
527 iter = ftrace_rec_iter_next(iter))
528
529
530 int ftrace_update_record(struct dyn_ftrace *rec, bool enable);
531 int ftrace_test_record(struct dyn_ftrace *rec, bool enable);
532 void ftrace_run_stop_machine(int command);
533 unsigned long ftrace_location(unsigned long ip);
534 unsigned long ftrace_location_range(unsigned long start, unsigned long end);
535 unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec);
536 unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec);
537
538 extern ftrace_func_t ftrace_trace_function;
539
540 int ftrace_regex_open(struct ftrace_ops *ops, int flag,
541 struct inode *inode, struct file *file);
542 ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
543 size_t cnt, loff_t *ppos);
544 ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
545 size_t cnt, loff_t *ppos);
546 int ftrace_regex_release(struct inode *inode, struct file *file);
547
548 void __init
549 ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable);
550
551 /* defined in arch */
552 extern int ftrace_ip_converted(unsigned long ip);
553 extern int ftrace_dyn_arch_init(void);
554 extern void ftrace_replace_code(int enable);
555 extern int ftrace_update_ftrace_func(ftrace_func_t func);
556 extern void ftrace_caller(void);
557 extern void ftrace_regs_caller(void);
558 extern void ftrace_call(void);
559 extern void ftrace_regs_call(void);
560 extern void mcount_call(void);
561
562 void ftrace_modify_all_code(int command);
563
564 #ifndef FTRACE_ADDR
565 #define FTRACE_ADDR ((unsigned long)ftrace_caller)
566 #endif
567
568 #ifndef FTRACE_GRAPH_ADDR
569 #define FTRACE_GRAPH_ADDR ((unsigned long)ftrace_graph_caller)
570 #endif
571
572 #ifndef FTRACE_REGS_ADDR
573 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
574 # define FTRACE_REGS_ADDR ((unsigned long)ftrace_regs_caller)
575 #else
576 # define FTRACE_REGS_ADDR FTRACE_ADDR
577 #endif
578 #endif
579
580 /*
581 * If an arch would like functions that are only traced
582 * by the function graph tracer to jump directly to its own
583 * trampoline, then they can define FTRACE_GRAPH_TRAMP_ADDR
584 * to be that address to jump to.
585 */
586 #ifndef FTRACE_GRAPH_TRAMP_ADDR
587 #define FTRACE_GRAPH_TRAMP_ADDR ((unsigned long) 0)
588 #endif
589
590 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
591 extern void ftrace_graph_caller(void);
592 extern int ftrace_enable_ftrace_graph_caller(void);
593 extern int ftrace_disable_ftrace_graph_caller(void);
594 #else
595 static inline int ftrace_enable_ftrace_graph_caller(void) { return 0; }
596 static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; }
597 #endif
598
599 /**
600 * ftrace_make_nop - convert code into nop
601 * @mod: module structure if called by module load initialization
602 * @rec: the call site record (e.g. mcount/fentry)
603 * @addr: the address that the call site should be calling
604 *
605 * This is a very sensitive operation and great care needs
606 * to be taken by the arch. The operation should carefully
607 * read the location, check to see if what is read is indeed
608 * what we expect it to be, and then on success of the compare,
609 * it should write to the location.
610 *
611 * The code segment at @rec->ip should be a caller to @addr
612 *
613 * Return must be:
614 * 0 on success
615 * -EFAULT on error reading the location
616 * -EINVAL on a failed compare of the contents
617 * -EPERM on error writing to the location
618 * Any other value will be considered a failure.
619 */
620 extern int ftrace_make_nop(struct module *mod,
621 struct dyn_ftrace *rec, unsigned long addr);
622
623
624 /**
625 * ftrace_init_nop - initialize a nop call site
626 * @mod: module structure if called by module load initialization
627 * @rec: the call site record (e.g. mcount/fentry)
628 *
629 * This is a very sensitive operation and great care needs
630 * to be taken by the arch. The operation should carefully
631 * read the location, check to see if what is read is indeed
632 * what we expect it to be, and then on success of the compare,
633 * it should write to the location.
634 *
635 * The code segment at @rec->ip should contain the contents created by
636 * the compiler
637 *
638 * Return must be:
639 * 0 on success
640 * -EFAULT on error reading the location
641 * -EINVAL on a failed compare of the contents
642 * -EPERM on error writing to the location
643 * Any other value will be considered a failure.
644 */
645 #ifndef ftrace_init_nop
646 static inline int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
647 {
648 return ftrace_make_nop(mod, rec, MCOUNT_ADDR);
649 }
650 #endif
651
652 /**
653 * ftrace_make_call - convert a nop call site into a call to addr
654 * @rec: the call site record (e.g. mcount/fentry)
655 * @addr: the address that the call site should call
656 *
657 * This is a very sensitive operation and great care needs
658 * to be taken by the arch. The operation should carefully
659 * read the location, check to see if what is read is indeed
660 * what we expect it to be, and then on success of the compare,
661 * it should write to the location.
662 *
663 * The code segment at @rec->ip should be a nop
664 *
665 * Return must be:
666 * 0 on success
667 * -EFAULT on error reading the location
668 * -EINVAL on a failed compare of the contents
669 * -EPERM on error writing to the location
670 * Any other value will be considered a failure.
671 */
672 extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr);
673
674 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
675 /**
676 * ftrace_modify_call - convert from one addr to another (no nop)
677 * @rec: the call site record (e.g. mcount/fentry)
678 * @old_addr: the address expected to be currently called to
679 * @addr: the address to change to
680 *
681 * This is a very sensitive operation and great care needs
682 * to be taken by the arch. The operation should carefully
683 * read the location, check to see if what is read is indeed
684 * what we expect it to be, and then on success of the compare,
685 * it should write to the location.
686 *
687 * The code segment at @rec->ip should be a caller to @old_addr
688 *
689 * Return must be:
690 * 0 on success
691 * -EFAULT on error reading the location
692 * -EINVAL on a failed compare of the contents
693 * -EPERM on error writing to the location
694 * Any other value will be considered a failure.
695 */
696 extern int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
697 unsigned long addr);
698 #else
699 /* Should never be called */
700 static inline int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
701 unsigned long addr)
702 {
703 return -EINVAL;
704 }
705 #endif
706
707 /* May be defined in arch */
708 extern int ftrace_arch_read_dyn_info(char *buf, int size);
709
710 extern int skip_trace(unsigned long ip);
711 extern void ftrace_module_init(struct module *mod);
712 extern void ftrace_module_enable(struct module *mod);
713 extern void ftrace_release_mod(struct module *mod);
714
715 extern void ftrace_disable_daemon(void);
716 extern void ftrace_enable_daemon(void);
717 #else /* CONFIG_DYNAMIC_FTRACE */
718 static inline int skip_trace(unsigned long ip) { return 0; }
719 static inline int ftrace_force_update(void) { return 0; }
720 static inline void ftrace_disable_daemon(void) { }
721 static inline void ftrace_enable_daemon(void) { }
722 static inline void ftrace_module_init(struct module *mod) { }
723 static inline void ftrace_module_enable(struct module *mod) { }
724 static inline void ftrace_release_mod(struct module *mod) { }
725 static inline int ftrace_text_reserved(const void *start, const void *end)
726 {
727 return 0;
728 }
729 static inline unsigned long ftrace_location(unsigned long ip)
730 {
731 return 0;
732 }
733
734 /*
735 * Again users of functions that have ftrace_ops may not
736 * have them defined when ftrace is not enabled, but these
737 * functions may still be called. Use a macro instead of inline.
738 */
739 #define ftrace_regex_open(ops, flag, inod, file) ({ -ENODEV; })
740 #define ftrace_set_early_filter(ops, buf, enable) do { } while (0)
741 #define ftrace_set_filter_ip(ops, ip, remove, reset) ({ -ENODEV; })
742 #define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; })
743 #define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; })
744 #define ftrace_free_filter(ops) do { } while (0)
745 #define ftrace_ops_set_global_filter(ops) do { } while (0)
746
747 static inline ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
748 size_t cnt, loff_t *ppos) { return -ENODEV; }
749 static inline ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
750 size_t cnt, loff_t *ppos) { return -ENODEV; }
751 static inline int
752 ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; }
753
754 static inline bool is_ftrace_trampoline(unsigned long addr)
755 {
756 return false;
757 }
758 #endif /* CONFIG_DYNAMIC_FTRACE */
759
760 /* totally disable ftrace - can not re-enable after this */
761 void ftrace_kill(void);
762
763 static inline void tracer_disable(void)
764 {
765 #ifdef CONFIG_FUNCTION_TRACER
766 ftrace_enabled = 0;
767 #endif
768 }
769
770 /*
771 * Ftrace disable/restore without lock. Some synchronization mechanism
772 * must be used to prevent ftrace_enabled to be changed between
773 * disable/restore.
774 */
775 static inline int __ftrace_enabled_save(void)
776 {
777 #ifdef CONFIG_FUNCTION_TRACER
778 int saved_ftrace_enabled = ftrace_enabled;
779 ftrace_enabled = 0;
780 return saved_ftrace_enabled;
781 #else
782 return 0;
783 #endif
784 }
785
786 static inline void __ftrace_enabled_restore(int enabled)
787 {
788 #ifdef CONFIG_FUNCTION_TRACER
789 ftrace_enabled = enabled;
790 #endif
791 }
792
793 /* All archs should have this, but we define it for consistency */
794 #ifndef ftrace_return_address0
795 # define ftrace_return_address0 __builtin_return_address(0)
796 #endif
797
798 /* Archs may use other ways for ADDR1 and beyond */
799 #ifndef ftrace_return_address
800 # ifdef CONFIG_FRAME_POINTER
801 # define ftrace_return_address(n) __builtin_return_address(n)
802 # else
803 # define ftrace_return_address(n) 0UL
804 # endif
805 #endif
806
807 #define CALLER_ADDR0 ((unsigned long)ftrace_return_address0)
808 #define CALLER_ADDR1 ((unsigned long)ftrace_return_address(1))
809 #define CALLER_ADDR2 ((unsigned long)ftrace_return_address(2))
810 #define CALLER_ADDR3 ((unsigned long)ftrace_return_address(3))
811 #define CALLER_ADDR4 ((unsigned long)ftrace_return_address(4))
812 #define CALLER_ADDR5 ((unsigned long)ftrace_return_address(5))
813 #define CALLER_ADDR6 ((unsigned long)ftrace_return_address(6))
814
815 static inline unsigned long get_lock_parent_ip(void)
816 {
817 unsigned long addr = CALLER_ADDR0;
818
819 if (!in_lock_functions(addr))
820 return addr;
821 addr = CALLER_ADDR1;
822 if (!in_lock_functions(addr))
823 return addr;
824 return CALLER_ADDR2;
825 }
826
827 #ifdef CONFIG_TRACE_PREEMPT_TOGGLE
828 extern void trace_preempt_on(unsigned long a0, unsigned long a1);
829 extern void trace_preempt_off(unsigned long a0, unsigned long a1);
830 #else
831 /*
832 * Use defines instead of static inlines because some arches will make code out
833 * of the CALLER_ADDR, when we really want these to be a real nop.
834 */
835 # define trace_preempt_on(a0, a1) do { } while (0)
836 # define trace_preempt_off(a0, a1) do { } while (0)
837 #endif
838
839 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
840 extern void ftrace_init(void);
841 #ifdef CC_USING_PATCHABLE_FUNCTION_ENTRY
842 #define FTRACE_CALLSITE_SECTION "__patchable_function_entries"
843 #else
844 #define FTRACE_CALLSITE_SECTION "__mcount_loc"
845 #endif
846 #else
847 static inline void ftrace_init(void) { }
848 #endif
849
850 /*
851 * Structure that defines an entry function trace.
852 * It's already packed but the attribute "packed" is needed
853 * to remove extra padding at the end.
854 */
855 struct ftrace_graph_ent {
856 unsigned long func; /* Current function */
857 int depth;
858 } __packed;
859
860 /*
861 * Structure that defines a return function trace.
862 * It's already packed but the attribute "packed" is needed
863 * to remove extra padding at the end.
864 */
865 struct ftrace_graph_ret {
866 unsigned long func; /* Current function */
867 /* Number of functions that overran the depth limit for current task */
868 unsigned long overrun;
869 unsigned long long calltime;
870 unsigned long long rettime;
871 int depth;
872 } __packed;
873
874 /* Type of the callback handlers for tracing function graph*/
875 typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */
876 typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */
877
878 extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace);
879
880 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
881
882 struct fgraph_ops {
883 trace_func_graph_ent_t entryfunc;
884 trace_func_graph_ret_t retfunc;
885 };
886
887 /*
888 * Stack of return addresses for functions
889 * of a thread.
890 * Used in struct thread_info
891 */
892 struct ftrace_ret_stack {
893 unsigned long ret;
894 unsigned long func;
895 unsigned long long calltime;
896 #ifdef CONFIG_FUNCTION_PROFILER
897 unsigned long long subtime;
898 #endif
899 #ifdef HAVE_FUNCTION_GRAPH_FP_TEST
900 unsigned long fp;
901 #endif
902 #ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
903 unsigned long *retp;
904 #endif
905 };
906
907 /*
908 * Primary handler of a function return.
909 * It relays on ftrace_return_to_handler.
910 * Defined in entry_32/64.S
911 */
912 extern void return_to_handler(void);
913
914 extern int
915 function_graph_enter(unsigned long ret, unsigned long func,
916 unsigned long frame_pointer, unsigned long *retp);
917
918 struct ftrace_ret_stack *
919 ftrace_graph_get_ret_stack(struct task_struct *task, int idx);
920
921 unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
922 unsigned long ret, unsigned long *retp);
923
924 /*
925 * Sometimes we don't want to trace a function with the function
926 * graph tracer but we want them to keep traced by the usual function
927 * tracer if the function graph tracer is not configured.
928 */
929 #define __notrace_funcgraph notrace
930
931 #define FTRACE_RETFUNC_DEPTH 50
932 #define FTRACE_RETSTACK_ALLOC_SIZE 32
933
934 extern int register_ftrace_graph(struct fgraph_ops *ops);
935 extern void unregister_ftrace_graph(struct fgraph_ops *ops);
936
937 extern bool ftrace_graph_is_dead(void);
938 extern void ftrace_graph_stop(void);
939
940 /* The current handlers in use */
941 extern trace_func_graph_ret_t ftrace_graph_return;
942 extern trace_func_graph_ent_t ftrace_graph_entry;
943
944 extern void ftrace_graph_init_task(struct task_struct *t);
945 extern void ftrace_graph_exit_task(struct task_struct *t);
946 extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu);
947
948 static inline void pause_graph_tracing(void)
949 {
950 atomic_inc(&current->tracing_graph_pause);
951 }
952
953 static inline void unpause_graph_tracing(void)
954 {
955 atomic_dec(&current->tracing_graph_pause);
956 }
957 #else /* !CONFIG_FUNCTION_GRAPH_TRACER */
958
959 #define __notrace_funcgraph
960
961 static inline void ftrace_graph_init_task(struct task_struct *t) { }
962 static inline void ftrace_graph_exit_task(struct task_struct *t) { }
963 static inline void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { }
964
965 /* Define as macros as fgraph_ops may not be defined */
966 #define register_ftrace_graph(ops) ({ -1; })
967 #define unregister_ftrace_graph(ops) do { } while (0)
968
969 static inline unsigned long
970 ftrace_graph_ret_addr(struct task_struct *task, int *idx, unsigned long ret,
971 unsigned long *retp)
972 {
973 return ret;
974 }
975
976 static inline void pause_graph_tracing(void) { }
977 static inline void unpause_graph_tracing(void) { }
978 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
979
980 #ifdef CONFIG_TRACING
981
982 /* flags for current->trace */
983 enum {
984 TSK_TRACE_FL_TRACE_BIT = 0,
985 TSK_TRACE_FL_GRAPH_BIT = 1,
986 };
987 enum {
988 TSK_TRACE_FL_TRACE = 1 << TSK_TRACE_FL_TRACE_BIT,
989 TSK_TRACE_FL_GRAPH = 1 << TSK_TRACE_FL_GRAPH_BIT,
990 };
991
992 static inline void set_tsk_trace_trace(struct task_struct *tsk)
993 {
994 set_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
995 }
996
997 static inline void clear_tsk_trace_trace(struct task_struct *tsk)
998 {
999 clear_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
1000 }
1001
1002 static inline int test_tsk_trace_trace(struct task_struct *tsk)
1003 {
1004 return tsk->trace & TSK_TRACE_FL_TRACE;
1005 }
1006
1007 static inline void set_tsk_trace_graph(struct task_struct *tsk)
1008 {
1009 set_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
1010 }
1011
1012 static inline void clear_tsk_trace_graph(struct task_struct *tsk)
1013 {
1014 clear_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
1015 }
1016
1017 static inline int test_tsk_trace_graph(struct task_struct *tsk)
1018 {
1019 return tsk->trace & TSK_TRACE_FL_GRAPH;
1020 }
1021
1022 enum ftrace_dump_mode;
1023
1024 extern enum ftrace_dump_mode ftrace_dump_on_oops;
1025 extern int tracepoint_printk;
1026
1027 extern void disable_trace_on_warning(void);
1028 extern int __disable_trace_on_warning;
1029
1030 int tracepoint_printk_sysctl(struct ctl_table *table, int write,
1031 void __user *buffer, size_t *lenp,
1032 loff_t *ppos);
1033
1034 #else /* CONFIG_TRACING */
1035 static inline void disable_trace_on_warning(void) { }
1036 #endif /* CONFIG_TRACING */
1037
1038 #ifdef CONFIG_FTRACE_SYSCALLS
1039
1040 unsigned long arch_syscall_addr(int nr);
1041
1042 #endif /* CONFIG_FTRACE_SYSCALLS */
1043
1044 #endif /* _LINUX_FTRACE_H */